compiler: update aro and translate-c to latest; delete clang translate-c

This commit is contained in:
Andrew Kelley 2025-07-17 22:51:23 -07:00
parent 91b0adc4c1
commit f49a54745b
72 changed files with 41141 additions and 43111 deletions

View File

@ -197,7 +197,6 @@ set(ZIG_CPP_SOURCES
# These are planned to stay even when we are self-hosted. # These are planned to stay even when we are self-hosted.
src/zig_llvm.cpp src/zig_llvm.cpp
src/zig_llvm-ar.cpp src/zig_llvm-ar.cpp
src/zig_clang.cpp
src/zig_clang_driver.cpp src/zig_clang_driver.cpp
src/zig_clang_cc1_main.cpp src/zig_clang_cc1_main.cpp
src/zig_clang_cc1as_main.cpp src/zig_clang_cc1as_main.cpp
@ -537,7 +536,6 @@ set(ZIG_STAGE2_SOURCES
src/Value.zig src/Value.zig
src/Zcu.zig src/Zcu.zig
src/Zcu/PerThread.zig src/Zcu/PerThread.zig
src/clang.zig
src/clang_options.zig src/clang_options.zig
src/clang_options_data.zig src/clang_options_data.zig
src/codegen.zig src/codegen.zig
@ -641,7 +639,6 @@ set(ZIG_STAGE2_SOURCES
src/register_manager.zig src/register_manager.zig
src/target.zig src/target.zig
src/tracy.zig src/tracy.zig
src/translate_c.zig
src/libs/wasi_libc.zig src/libs/wasi_libc.zig
) )

View File

@ -732,13 +732,7 @@ fn addCompilerMod(b: *std.Build, options: AddCompilerModOptions) *std.Build.Modu
.root_source_file = b.path("lib/compiler/aro/aro.zig"), .root_source_file = b.path("lib/compiler/aro/aro.zig"),
}); });
const aro_translate_c_mod = b.createModule(.{
.root_source_file = b.path("lib/compiler/aro_translate_c.zig"),
});
aro_translate_c_mod.addImport("aro", aro_mod);
compiler_mod.addImport("aro", aro_mod); compiler_mod.addImport("aro", aro_mod);
compiler_mod.addImport("aro_translate_c", aro_translate_c_mod);
return compiler_mod; return compiler_mod;
} }

View File

@ -1,26 +0,0 @@
<img src="https://aro.vexu.eu/aro-logo.svg" alt="Aro" width="120px"/>
# Aro
A C compiler with the goal of providing fast compilation and low memory usage with good diagnostics.
Aro is included as an alternative C frontend in the [Zig compiler](https://github.com/ziglang/zig)
for `translate-c` and eventually compiling C files by translating them to Zig first.
Aro is developed in https://github.com/Vexu/arocc and the Zig dependency is
updated from there when needed.
Currently most of standard C is supported up to C23 and as are many of the common
extensions from GNU, MSVC, and Clang
Basic code generation is supported for x86-64 linux and can produce a valid hello world:
```sh-session
$ cat hello.c
extern int printf(const char *restrict fmt, ...);
int main(void) {
printf("Hello, world!\n");
return 0;
}
$ zig build && ./zig-out/bin/arocc hello.c -o hello
$ ./hello
Hello, world!
```

View File

@ -5,12 +5,14 @@ pub const Driver = @import("aro/Driver.zig");
pub const Parser = @import("aro/Parser.zig"); pub const Parser = @import("aro/Parser.zig");
pub const Preprocessor = @import("aro/Preprocessor.zig"); pub const Preprocessor = @import("aro/Preprocessor.zig");
pub const Source = @import("aro/Source.zig"); pub const Source = @import("aro/Source.zig");
pub const StringInterner = @import("aro/StringInterner.zig");
pub const target_util = @import("aro/target.zig");
pub const Tokenizer = @import("aro/Tokenizer.zig"); pub const Tokenizer = @import("aro/Tokenizer.zig");
pub const Toolchain = @import("aro/Toolchain.zig"); pub const Toolchain = @import("aro/Toolchain.zig");
pub const Tree = @import("aro/Tree.zig"); pub const Tree = @import("aro/Tree.zig");
pub const Type = @import("aro/Type.zig"); pub const TypeStore = @import("aro/TypeStore.zig");
pub const TypeMapper = @import("aro/StringInterner.zig").TypeMapper; pub const QualType = TypeStore.QualType;
pub const target_util = @import("aro/target.zig"); pub const Type = TypeStore.Type;
pub const Value = @import("aro/Value.zig"); pub const Value = @import("aro/Value.zig");
const backend = @import("backend.zig"); const backend = @import("backend.zig");
@ -18,6 +20,7 @@ pub const Interner = backend.Interner;
pub const Ir = backend.Ir; pub const Ir = backend.Ir;
pub const Object = backend.Object; pub const Object = backend.Object;
pub const CallingConvention = backend.CallingConvention; pub const CallingConvention = backend.CallingConvention;
pub const Assembly = backend.Assembly;
pub const version_str = backend.version_str; pub const version_str = backend.version_str;
pub const version = backend.version; pub const version = backend.version;

View File

@ -6,9 +6,8 @@ const Compilation = @import("Compilation.zig");
const Diagnostics = @import("Diagnostics.zig"); const Diagnostics = @import("Diagnostics.zig");
const Parser = @import("Parser.zig"); const Parser = @import("Parser.zig");
const Tree = @import("Tree.zig"); const Tree = @import("Tree.zig");
const NodeIndex = Tree.NodeIndex;
const TokenIndex = Tree.TokenIndex; const TokenIndex = Tree.TokenIndex;
const Type = @import("Type.zig"); const QualType = @import("TypeStore.zig").QualType;
const Value = @import("Value.zig"); const Value = @import("Value.zig");
const Attribute = @This(); const Attribute = @This();
@ -39,79 +38,53 @@ pub const Kind = enum {
}; };
pub const Iterator = struct { pub const Iterator = struct {
source: union(enum) { source: ?struct {
ty: Type, qt: QualType,
slice: []const Attribute, comp: *const Compilation,
}, },
slice: []const Attribute,
index: usize, index: usize,
pub fn initSlice(slice: ?[]const Attribute) Iterator { pub fn initSlice(slice: []const Attribute) Iterator {
return .{ .source = .{ .slice = slice orelse &.{} }, .index = 0 }; return .{ .source = null, .slice = slice, .index = 0 };
} }
pub fn initType(ty: Type) Iterator { pub fn initType(qt: QualType, comp: *const Compilation) Iterator {
return .{ .source = .{ .ty = ty }, .index = 0 }; return .{ .source = .{ .qt = qt, .comp = comp }, .slice = &.{}, .index = 0 };
} }
/// returns the next attribute as well as its index within the slice or current type /// returns the next attribute as well as its index within the slice or current type
/// The index can be used to determine when a nested type has been recursed into /// The index can be used to determine when a nested type has been recursed into
pub fn next(self: *Iterator) ?struct { Attribute, usize } { pub fn next(self: *Iterator) ?struct { Attribute, usize } {
switch (self.source) { if (self.index < self.slice.len) {
.slice => |slice| { defer self.index += 1;
if (self.index < slice.len) { return .{ self.slice[self.index], self.index };
defer self.index += 1; }
return .{ slice[self.index], self.index }; if (self.source) |*source| {
} var cur = source.qt;
}, if (cur.isInvalid()) {
.ty => |ty| { self.source = null;
switch (ty.specifier) { return null;
.typeof_type => { }
self.* = .{ .source = .{ .ty = ty.data.sub_type.* }, .index = 0 }; while (true) switch (cur.type(source.comp)) {
return self.next(); .typeof => |typeof| cur = typeof.base,
}, .attributed => |attributed| {
.typeof_expr => { self.slice = attributed.attributes;
self.* = .{ .source = .{ .ty = ty.data.expr.ty }, .index = 0 }; self.index = 1;
return self.next(); source.qt = attributed.base;
}, return .{ self.slice[0], 0 };
.attributed => { },
if (self.index < ty.data.attributed.attributes.len) { .typedef => |typedef| cur = typedef.base,
defer self.index += 1; else => {
return .{ ty.data.attributed.attributes[self.index], self.index }; self.source = null;
} break;
self.* = .{ .source = .{ .ty = ty.data.attributed.base }, .index = 0 }; },
return self.next(); };
},
else => {},
}
},
} }
return null; return null;
} }
}; };
pub const ArgumentType = enum {
string,
identifier,
int,
alignment,
float,
complex_float,
expression,
nullptr_t,
pub fn toString(self: ArgumentType) []const u8 {
return switch (self) {
.string => "a string",
.identifier => "an identifier",
.int, .alignment => "an integer constant",
.nullptr_t => "nullptr",
.float => "a floating point number",
.complex_float => "a complex floating point number",
.expression => "an expression",
};
}
};
/// number of required arguments /// number of required arguments
pub fn requiredArgCount(attr: Tag) u32 { pub fn requiredArgCount(attr: Tag) u32 {
switch (attr) { switch (attr) {
@ -211,21 +184,20 @@ pub fn wantsIdentEnum(attr: Tag) bool {
} }
} }
pub fn diagnoseIdent(attr: Tag, arguments: *Arguments, ident: []const u8) ?Diagnostics.Message { pub fn diagnoseIdent(attr: Tag, arguments: *Arguments, ident: TokenIndex, p: *Parser) !bool {
switch (attr) { switch (attr) {
inline else => |tag| { inline else => |tag| {
const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields;
if (fields.len == 0) unreachable; if (fields.len == 0) unreachable;
const Unwrapped = UnwrapOptional(fields[0].type); const Unwrapped = UnwrapOptional(fields[0].type);
if (@typeInfo(Unwrapped) != .@"enum") unreachable; if (@typeInfo(Unwrapped) != .@"enum") unreachable;
if (std.meta.stringToEnum(Unwrapped, normalize(ident))) |enum_val| { if (std.meta.stringToEnum(Unwrapped, normalize(p.tokSlice(ident)))) |enum_val| {
@field(@field(arguments, @tagName(tag)), fields[0].name) = enum_val; @field(@field(arguments, @tagName(tag)), fields[0].name) = enum_val;
return null; return false;
} }
return Diagnostics.Message{
.tag = .unknown_attr_enum, try p.err(ident, .unknown_attr_enum, .{ @tagName(attr), Formatting.choices(attr) });
.extra = .{ .attr_enum = .{ .tag = attr } }, return true;
};
}, },
} }
} }
@ -244,7 +216,7 @@ pub fn wantsAlignment(attr: Tag, idx: usize) bool {
} }
} }
pub fn diagnoseAlignment(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, p: *Parser) !?Diagnostics.Message { pub fn diagnoseAlignment(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, arg_start: TokenIndex, p: *Parser) !bool {
switch (attr) { switch (attr) {
inline else => |tag| { inline else => |tag| {
const arg_fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; const arg_fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields;
@ -254,17 +226,25 @@ pub fn diagnoseAlignment(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Pa
inline 0...arg_fields.len - 1 => |arg_i| { inline 0...arg_fields.len - 1 => |arg_i| {
if (UnwrapOptional(arg_fields[arg_i].type) != Alignment) unreachable; if (UnwrapOptional(arg_fields[arg_i].type) != Alignment) unreachable;
if (!res.val.is(.int, p.comp)) return Diagnostics.Message{ .tag = .alignas_unavailable }; if (!res.val.is(.int, p.comp)) {
try p.err(arg_start, .alignas_unavailable, .{});
return true;
}
if (res.val.compare(.lt, Value.zero, p.comp)) { if (res.val.compare(.lt, Value.zero, p.comp)) {
return Diagnostics.Message{ .tag = .negative_alignment, .extra = .{ .str = try res.str(p) } }; try p.err(arg_start, .negative_alignment, .{res});
return true;
} }
const requested = res.val.toInt(u29, p.comp) orelse { const requested = res.val.toInt(u29, p.comp) orelse {
return Diagnostics.Message{ .tag = .maximum_alignment, .extra = .{ .str = try res.str(p) } }; try p.err(arg_start, .maximum_alignment, .{res});
return true;
}; };
if (!std.mem.isValidAlign(requested)) return Diagnostics.Message{ .tag = .non_pow2_align }; if (!std.mem.isValidAlign(requested)) {
try p.err(arg_start, .non_pow2_align, .{});
return true;
}
@field(@field(arguments, @tagName(tag)), arg_fields[arg_i].name) = Alignment{ .requested = requested }; @field(@field(arguments, @tagName(tag)), arg_fields[arg_i].name) = .{ .requested = requested };
return null; return false;
}, },
else => unreachable, else => unreachable,
} }
@ -278,102 +258,105 @@ fn diagnoseField(
comptime Wanted: type, comptime Wanted: type,
arguments: *Arguments, arguments: *Arguments,
res: Parser.Result, res: Parser.Result,
arg_start: TokenIndex,
node: Tree.Node, node: Tree.Node,
p: *Parser, p: *Parser,
) !?Diagnostics.Message { ) !bool {
const string = "a string";
const identifier = "an identifier";
const int = "an integer constant";
const alignment = "an integer constant";
const nullptr_t = "nullptr";
const float = "a floating point number";
const complex_float = "a complex floating point number";
const expression = "an expression";
const expected: []const u8 = switch (Wanted) {
Value => string,
Identifier => identifier,
u32 => int,
Alignment => alignment,
CallingConvention => identifier,
else => switch (@typeInfo(Wanted)) {
.@"enum" => if (Wanted.opts.enum_kind == .string) string else identifier,
else => unreachable,
},
};
if (res.val.opt_ref == .none) { if (res.val.opt_ref == .none) {
if (Wanted == Identifier and node.tag == .decl_ref_expr) { if (Wanted == Identifier and node == .decl_ref_expr) {
@field(@field(arguments, decl.name), field.name) = Identifier{ .tok = node.data.decl_ref }; @field(@field(arguments, decl.name), field.name) = .{ .tok = node.decl_ref_expr.name_tok };
return null; return false;
} }
return invalidArgMsg(Wanted, .expression);
try p.err(arg_start, .attribute_arg_invalid, .{ expected, expression });
return true;
} }
const key = p.comp.interner.get(res.val.ref()); const key = p.comp.interner.get(res.val.ref());
switch (key) { switch (key) {
.int => { .int => {
if (@typeInfo(Wanted) == .int) { if (@typeInfo(Wanted) == .int) {
@field(@field(arguments, decl.name), field.name) = res.val.toInt(Wanted, p.comp) orelse return .{ @field(@field(arguments, decl.name), field.name) = res.val.toInt(Wanted, p.comp) orelse {
.tag = .attribute_int_out_of_range, try p.err(arg_start, .attribute_int_out_of_range, .{res});
.extra = .{ .str = try res.str(p) }, return true;
}; };
return null;
return false;
} }
}, },
.bytes => |bytes| { .bytes => |bytes| {
if (Wanted == Value) { if (Wanted == Value) {
if (node.tag != .string_literal_expr or (!node.ty.elemType().is(.char) and !node.ty.elemType().is(.uchar))) { validate: {
return .{ if (node != .string_literal_expr) break :validate;
.tag = .attribute_requires_string, switch (node.string_literal_expr.qt.childType(p.comp).get(p.comp, .int).?) {
.extra = .{ .str = decl.name }, .char, .uchar, .schar => {},
}; else => break :validate,
}
@field(@field(arguments, decl.name), field.name) = try p.removeNull(res.val);
return false;
} }
@field(@field(arguments, decl.name), field.name) = try p.removeNull(res.val);
return null; try p.err(arg_start, .attribute_requires_string, .{decl.name});
return true;
} else if (@typeInfo(Wanted) == .@"enum" and @hasDecl(Wanted, "opts") and Wanted.opts.enum_kind == .string) { } else if (@typeInfo(Wanted) == .@"enum" and @hasDecl(Wanted, "opts") and Wanted.opts.enum_kind == .string) {
const str = bytes[0 .. bytes.len - 1]; const str = bytes[0 .. bytes.len - 1];
if (std.meta.stringToEnum(Wanted, str)) |enum_val| { if (std.meta.stringToEnum(Wanted, str)) |enum_val| {
@field(@field(arguments, decl.name), field.name) = enum_val; @field(@field(arguments, decl.name), field.name) = enum_val;
return null; return false;
} else {
return .{
.tag = .unknown_attr_enum,
.extra = .{ .attr_enum = .{ .tag = std.meta.stringToEnum(Tag, decl.name).? } },
};
} }
try p.err(arg_start, .unknown_attr_enum, .{ decl.name, Formatting.choices(@field(Tag, decl.name)) });
return true;
} }
}, },
else => {}, else => {},
} }
return invalidArgMsg(Wanted, switch (key) {
.int => .int, try p.err(arg_start, .attribute_arg_invalid, .{ expected, switch (key) {
.bytes => .string, .int => int,
.float => .float, .bytes => string,
.complex => .complex_float, .float => float,
.null => .nullptr_t, .complex => complex_float,
.int_ty, .null => nullptr_t,
.float_ty, else => unreachable,
.complex_ty, } });
.ptr_ty, return true;
.noreturn_ty,
.void_ty,
.func_ty,
.array_ty,
.vector_ty,
.record_ty,
=> unreachable,
});
} }
fn invalidArgMsg(comptime Expected: type, actual: ArgumentType) Diagnostics.Message { pub fn diagnose(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, arg_start: TokenIndex, node: Tree.Node, p: *Parser) !bool {
return .{
.tag = .attribute_arg_invalid,
.extra = .{ .attr_arg_type = .{ .expected = switch (Expected) {
Value => .string,
Identifier => .identifier,
u32 => .int,
Alignment => .alignment,
CallingConvention => .identifier,
else => switch (@typeInfo(Expected)) {
.@"enum" => if (Expected.opts.enum_kind == .string) .string else .identifier,
else => unreachable,
},
}, .actual = actual } },
};
}
pub fn diagnose(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, node: Tree.Node, p: *Parser) !?Diagnostics.Message {
switch (attr) { switch (attr) {
inline else => |tag| { inline else => |tag| {
const decl = @typeInfo(attributes).@"struct".decls[@intFromEnum(tag)]; const decl = @typeInfo(attributes).@"struct".decls[@intFromEnum(tag)];
const max_arg_count = comptime maxArgCount(tag); const max_arg_count = comptime maxArgCount(tag);
if (arg_idx >= max_arg_count) return Diagnostics.Message{ if (arg_idx >= max_arg_count) {
.tag = .attribute_too_many_args, try p.err(arg_start, .attribute_too_many_args, .{ @tagName(attr), max_arg_count });
.extra = .{ .attr_arg_count = .{ .attribute = attr, .expected = max_arg_count } }, return true;
}; }
const arg_fields = @typeInfo(@field(attributes, decl.name)).@"struct".fields; const arg_fields = @typeInfo(@field(attributes, decl.name)).@"struct".fields;
switch (arg_idx) { switch (arg_idx) {
inline 0...arg_fields.len - 1 => |arg_i| { inline 0...arg_fields.len - 1 => |arg_i| {
return diagnoseField(decl, arg_fields[arg_i], UnwrapOptional(arg_fields[arg_i].type), arguments, res, node, p); return diagnoseField(decl, arg_fields[arg_i], UnwrapOptional(arg_fields[arg_i].type), arguments, res, arg_start, node, p);
}, },
else => unreachable, else => unreachable,
} }
@ -386,8 +369,8 @@ const EnumTypes = enum {
identifier, identifier,
}; };
pub const Alignment = struct { pub const Alignment = struct {
node: NodeIndex = .none, node: Tree.Node.OptIndex = .null,
requested: u29, requested: u32,
}; };
pub const Identifier = struct { pub const Identifier = struct {
tok: TokenIndex = 0, tok: TokenIndex = 0,
@ -556,6 +539,7 @@ const attributes = struct {
pub const nonstring = struct {}; pub const nonstring = struct {};
pub const noplt = struct {}; pub const noplt = struct {};
pub const @"noreturn" = struct {}; pub const @"noreturn" = struct {};
pub const nothrow = struct {};
// TODO: union args ? // TODO: union args ?
// const optimize = struct { // const optimize = struct {
// // optimize, // u32 | []const u8 -- optimize? // // optimize, // u32 | []const u8 -- optimize?
@ -697,6 +681,39 @@ const attributes = struct {
pub const calling_convention = struct { pub const calling_convention = struct {
cc: CallingConvention, cc: CallingConvention,
}; };
pub const nullability = struct {
kind: enum {
nonnull,
nullable,
nullable_result,
unspecified,
const opts = struct {
const enum_kind = .identifier;
};
},
};
pub const unaligned = struct {};
pub const pcs = struct {
kind: enum {
aapcs,
@"aapcs-vfp",
const opts = struct {
const enum_kind = .string;
};
},
};
pub const riscv_vector_cc = struct {};
pub const aarch64_sve_pcs = struct {};
pub const aarch64_vector_pcs = struct {};
pub const fastcall = struct {};
pub const stdcall = struct {};
pub const vectorcall = struct {};
pub const cdecl = struct {};
pub const thiscall = struct {};
pub const sysv_abi = struct {};
pub const ms_abi = struct {};
}; };
pub const Tag = std.meta.DeclEnum(attributes); pub const Tag = std.meta.DeclEnum(attributes);
@ -786,108 +803,120 @@ fn ignoredAttrErr(p: *Parser, tok: TokenIndex, attr: Attribute.Tag, context: []c
} }
pub const applyParameterAttributes = applyVariableAttributes; pub const applyParameterAttributes = applyVariableAttributes;
pub fn applyVariableAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: ?Diagnostics.Tag) !Type { pub fn applyVariableAttributes(p: *Parser, qt: QualType, attr_buf_start: usize, diagnostic: ?Parser.Diagnostic) !QualType {
const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
const toks = p.attr_buf.items(.tok)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..];
p.attr_application_buf.items.len = 0; p.attr_application_buf.items.len = 0;
var base_ty = ty; var base_qt = qt;
var common = false; var common = false;
var nocommon = false; var nocommon = false;
for (attrs, toks) |attr, tok| switch (attr.tag) { for (attrs, toks) |attr, tok| switch (attr.tag) {
// zig fmt: off // zig fmt: off
.alias, .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .weak, .used, .alias, .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .weak, .used,
.noinit, .retain, .persistent, .section, .mode, .asm_label, .noinit, .retain, .persistent, .section, .mode, .asm_label, .nullability, .unaligned,
=> try p.attr_application_buf.append(p.gpa, attr), => try p.attr_application_buf.append(p.gpa, attr),
// zig fmt: on // zig fmt: on
.common => if (nocommon) { .common => if (nocommon) {
try p.errTok(.ignore_common, tok); try p.err(tok, .ignore_common, .{});
} else { } else {
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
common = true; common = true;
}, },
.nocommon => if (common) { .nocommon => if (common) {
try p.errTok(.ignore_nocommon, tok); try p.err(tok, .ignore_nocommon, .{});
} else { } else {
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
nocommon = true; nocommon = true;
}, },
.vector_size => try attr.applyVectorSize(p, tok, &base_ty), .vector_size => try attr.applyVectorSize(p, tok, &base_qt),
.aligned => try attr.applyAligned(p, base_ty, tag), .aligned => try attr.applyAligned(p, base_qt, diagnostic),
.nonstring => if (!base_ty.isArray() or !(base_ty.is(.char) or base_ty.is(.uchar) or base_ty.is(.schar))) { .nonstring => {
try p.errStr(.non_string_ignored, tok, try p.typeStr(ty)); if (base_qt.get(p.comp, .array)) |array_ty| {
if (array_ty.elem.get(p.comp, .int)) |int_ty| switch (int_ty) {
.char, .uchar, .schar => {
try p.attr_application_buf.append(p.gpa, attr);
continue;
},
else => {},
};
}
try p.err(tok, .non_string_ignored, .{qt});
},
.uninitialized => if (p.func.qt == null) {
try p.err(tok, .local_variable_attribute, .{"uninitialized"});
} else { } else {
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
}, },
.uninitialized => if (p.func.ty == null) { .cleanup => if (p.func.qt == null) {
try p.errStr(.local_variable_attribute, tok, "uninitialized"); try p.err(tok, .local_variable_attribute, .{"cleanup"});
} else {
try p.attr_application_buf.append(p.gpa, attr);
},
.cleanup => if (p.func.ty == null) {
try p.errStr(.local_variable_attribute, tok, "cleanup");
} else { } else {
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
}, },
.calling_convention => try applyCallingConvention(attr, p, tok, base_qt),
.alloc_size, .alloc_size,
.copy, .copy,
.tls_model, .tls_model,
.visibility, .visibility,
=> |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .variables } }), => |t| try p.err(tok, .attribute_todo, .{ @tagName(t), "variables" }),
// There is already an error in Parser for _Noreturn keyword
.noreturn => if (attr.syntax != .keyword) try ignoredAttrErr(p, tok, attr.tag, "variables"),
else => try ignoredAttrErr(p, tok, attr.tag, "variables"), else => try ignoredAttrErr(p, tok, attr.tag, "variables"),
}; };
return base_ty.withAttributes(p.arena, p.attr_application_buf.items); return applySelected(base_qt, p);
} }
pub fn applyFieldAttributes(p: *Parser, field_ty: *Type, attr_buf_start: usize) ![]const Attribute { pub fn applyFieldAttributes(p: *Parser, field_qt: *QualType, attr_buf_start: usize) ![]const Attribute {
const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
const toks = p.attr_buf.items(.tok)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..];
p.attr_application_buf.items.len = 0; p.attr_application_buf.items.len = 0;
for (attrs, toks) |attr, tok| switch (attr.tag) { for (attrs, toks) |attr, tok| switch (attr.tag) {
// zig fmt: off // zig fmt: off
.@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, .warn_unused_result, .nodiscard, .@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned,
.mode, .warn_unused_result, .nodiscard, .nullability, .unaligned,
=> try p.attr_application_buf.append(p.gpa, attr), => try p.attr_application_buf.append(p.gpa, attr),
// zig fmt: on // zig fmt: on
.vector_size => try attr.applyVectorSize(p, tok, field_ty), .vector_size => try attr.applyVectorSize(p, tok, field_qt),
.aligned => try attr.applyAligned(p, field_ty.*, null), .aligned => try attr.applyAligned(p, field_qt.*, null),
.calling_convention => try applyCallingConvention(attr, p, tok, field_qt.*),
else => try ignoredAttrErr(p, tok, attr.tag, "fields"), else => try ignoredAttrErr(p, tok, attr.tag, "fields"),
}; };
if (p.attr_application_buf.items.len == 0) return &[0]Attribute{}; return p.attr_application_buf.items;
return p.arena.dupe(Attribute, p.attr_application_buf.items);
} }
pub fn applyTypeAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: ?Diagnostics.Tag) !Type { pub fn applyTypeAttributes(p: *Parser, qt: QualType, attr_buf_start: usize, diagnostic: ?Parser.Diagnostic) !QualType {
const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
const toks = p.attr_buf.items(.tok)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..];
p.attr_application_buf.items.len = 0; p.attr_application_buf.items.len = 0;
var base_ty = ty; var base_qt = qt;
for (attrs, toks) |attr, tok| switch (attr.tag) { for (attrs, toks) |attr, tok| switch (attr.tag) {
// zig fmt: off // zig fmt: off
.@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, .@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, .nullability, .unaligned,
=> try p.attr_application_buf.append(p.gpa, attr), => try p.attr_application_buf.append(p.gpa, attr),
// zig fmt: on // zig fmt: on
.transparent_union => try attr.applyTransparentUnion(p, tok, base_ty), .transparent_union => try attr.applyTransparentUnion(p, tok, base_qt),
.vector_size => try attr.applyVectorSize(p, tok, &base_ty), .vector_size => try attr.applyVectorSize(p, tok, &base_qt),
.aligned => try attr.applyAligned(p, base_ty, tag), .aligned => try attr.applyAligned(p, base_qt, diagnostic),
.designated_init => if (base_ty.is(.@"struct")) { .designated_init => if (base_qt.is(p.comp, .@"struct")) {
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
} else { } else {
try p.errTok(.designated_init_invalid, tok); try p.err(tok, .designated_init_invalid, .{});
}, },
.calling_convention => try applyCallingConvention(attr, p, tok, base_qt),
.alloc_size, .alloc_size,
.copy, .copy,
.scalar_storage_order, .scalar_storage_order,
.nonstring, .nonstring,
=> |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .types } }), => |t| try p.err(tok, .attribute_todo, .{ @tagName(t), "types" }),
else => try ignoredAttrErr(p, tok, attr.tag, "types"), else => try ignoredAttrErr(p, tok, attr.tag, "types"),
}; };
return base_ty.withAttributes(p.arena, p.attr_application_buf.items); return applySelected(base_qt, p);
} }
pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type { pub fn applyFunctionAttributes(p: *Parser, qt: QualType, attr_buf_start: usize) !QualType {
const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
const toks = p.attr_buf.items(.tok)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..];
p.attr_application_buf.items.len = 0; p.attr_application_buf.items.len = 0;
var base_ty = ty; var base_qt = qt;
var hot = false; var hot = false;
var cold = false; var cold = false;
var @"noinline" = false; var @"noinline" = false;
@ -897,55 +926,153 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ
.noreturn, .unused, .used, .warning, .deprecated, .unavailable, .weak, .pure, .leaf, .noreturn, .unused, .used, .warning, .deprecated, .unavailable, .weak, .pure, .leaf,
.@"const", .warn_unused_result, .section, .returns_nonnull, .returns_twice, .@"error", .@"const", .warn_unused_result, .section, .returns_nonnull, .returns_twice, .@"error",
.externally_visible, .retain, .flatten, .gnu_inline, .alias, .asm_label, .nodiscard, .externally_visible, .retain, .flatten, .gnu_inline, .alias, .asm_label, .nodiscard,
.reproducible, .unsequenced, .reproducible, .unsequenced, .nothrow, .nullability, .unaligned,
=> try p.attr_application_buf.append(p.gpa, attr), => try p.attr_application_buf.append(p.gpa, attr),
// zig fmt: on // zig fmt: on
.hot => if (cold) { .hot => if (cold) {
try p.errTok(.ignore_hot, tok); try p.err(tok, .ignore_hot, .{});
} else { } else {
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
hot = true; hot = true;
}, },
.cold => if (hot) { .cold => if (hot) {
try p.errTok(.ignore_cold, tok); try p.err(tok, .ignore_cold, .{});
} else { } else {
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
cold = true; cold = true;
}, },
.always_inline => if (@"noinline") { .always_inline => if (@"noinline") {
try p.errTok(.ignore_always_inline, tok); try p.err(tok, .ignore_always_inline, .{});
} else { } else {
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
always_inline = true; always_inline = true;
}, },
.@"noinline" => if (always_inline) { .@"noinline" => if (always_inline) {
try p.errTok(.ignore_noinline, tok); try p.err(tok, .ignore_noinline, .{});
} else { } else {
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
@"noinline" = true; @"noinline" = true;
}, },
.aligned => try attr.applyAligned(p, base_ty, null), .aligned => try attr.applyAligned(p, base_qt, null),
.format => try attr.applyFormat(p, base_ty), .format => try attr.applyFormat(p, base_qt),
.calling_convention => switch (attr.args.calling_convention.cc) { .calling_convention => try applyCallingConvention(attr, p, tok, base_qt),
.C => continue, .fastcall => if (p.comp.target.cpu.arch == .x86) {
.stdcall, .thiscall => switch (p.comp.target.cpu.arch) { try p.attr_application_buf.append(p.gpa, .{
.x86 => try p.attr_application_buf.append(p.gpa, attr), .tag = .calling_convention,
else => try p.errStr(.callconv_not_supported, tok, p.tok_ids[tok].lexeme().?), .args = .{ .calling_convention = .{ .cc = .fastcall } },
}, .syntax = attr.syntax,
.vectorcall => switch (p.comp.target.cpu.arch) { });
.x86, .aarch64, .aarch64_be => try p.attr_application_buf.append(p.gpa, attr), } else {
else => try p.errStr(.callconv_not_supported, tok, p.tok_ids[tok].lexeme().?), try p.err(tok, .callconv_not_supported, .{"fastcall"});
}, },
.stdcall => if (p.comp.target.cpu.arch == .x86) {
try p.attr_application_buf.append(p.gpa, .{
.tag = .calling_convention,
.args = .{ .calling_convention = .{ .cc = .stdcall } },
.syntax = attr.syntax,
});
} else {
try p.err(tok, .callconv_not_supported, .{"stdcall"});
},
.thiscall => if (p.comp.target.cpu.arch == .x86) {
try p.attr_application_buf.append(p.gpa, .{
.tag = .calling_convention,
.args = .{ .calling_convention = .{ .cc = .thiscall } },
.syntax = attr.syntax,
});
} else {
try p.err(tok, .callconv_not_supported, .{"thiscall"});
},
.vectorcall => if (p.comp.target.cpu.arch == .x86 or p.comp.target.cpu.arch.isAARCH64()) {
try p.attr_application_buf.append(p.gpa, .{
.tag = .calling_convention,
.args = .{ .calling_convention = .{ .cc = .vectorcall } },
.syntax = attr.syntax,
});
} else {
try p.err(tok, .callconv_not_supported, .{"vectorcall"});
},
.cdecl => {},
.pcs => if (p.comp.target.cpu.arch.isArm()) {
try p.attr_application_buf.append(p.gpa, .{
.tag = .calling_convention,
.args = .{ .calling_convention = .{ .cc = switch (attr.args.pcs.kind) {
.aapcs => .arm_aapcs,
.@"aapcs-vfp" => .arm_aapcs_vfp,
} } },
.syntax = attr.syntax,
});
} else {
try p.err(tok, .callconv_not_supported, .{"pcs"});
},
.riscv_vector_cc => if (p.comp.target.cpu.arch.isRISCV()) {
try p.attr_application_buf.append(p.gpa, .{
.tag = .calling_convention,
.args = .{ .calling_convention = .{ .cc = .riscv_vector } },
.syntax = attr.syntax,
});
} else {
try p.err(tok, .callconv_not_supported, .{"pcs"});
},
.aarch64_sve_pcs => if (p.comp.target.cpu.arch.isAARCH64()) {
try p.attr_application_buf.append(p.gpa, .{
.tag = .calling_convention,
.args = .{ .calling_convention = .{ .cc = .aarch64_sve_pcs } },
.syntax = attr.syntax,
});
} else {
try p.err(tok, .callconv_not_supported, .{"pcs"});
},
.aarch64_vector_pcs => if (p.comp.target.cpu.arch.isAARCH64()) {
try p.attr_application_buf.append(p.gpa, .{
.tag = .calling_convention,
.args = .{ .calling_convention = .{ .cc = .aarch64_vector_pcs } },
.syntax = attr.syntax,
});
} else {
try p.err(tok, .callconv_not_supported, .{"pcs"});
},
.sysv_abi => if (p.comp.target.cpu.arch == .x86_64 and p.comp.target.os.tag == .windows) {
try p.attr_application_buf.append(p.gpa, .{
.tag = .calling_convention,
.args = .{ .calling_convention = .{ .cc = .x86_64_sysv } },
.syntax = attr.syntax,
});
},
.ms_abi => if (p.comp.target.cpu.arch == .x86_64 and p.comp.target.os.tag != .windows) {
try p.attr_application_buf.append(p.gpa, .{
.tag = .calling_convention,
.args = .{ .calling_convention = .{ .cc = .x86_64_win } },
.syntax = attr.syntax,
});
}, },
.malloc => { .malloc => {
if (base_ty.returnType().isPtr()) { if (base_qt.get(p.comp, .func).?.return_type.isPointer(p.comp)) {
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
} else { } else {
try ignoredAttrErr(p, tok, attr.tag, "functions that do not return pointers"); try ignoredAttrErr(p, tok, attr.tag, "functions that do not return pointers");
} }
}, },
.alloc_align => {
const func_ty = base_qt.get(p.comp, .func).?;
if (func_ty.return_type.isPointer(p.comp)) {
if (attr.args.alloc_align.position == 0 or attr.args.alloc_align.position > func_ty.params.len) {
try p.err(tok, .attribute_param_out_of_bounds, .{ "alloc_align", 1 });
} else {
const arg_qt = func_ty.params[attr.args.alloc_align.position - 1].qt;
if (arg_qt.isInvalid()) continue;
const arg_sk = arg_qt.scalarKind(p.comp);
if (!arg_sk.isInt() or !arg_sk.isReal()) {
try p.err(tok, .alloc_align_required_int_param, .{});
} else {
try p.attr_application_buf.append(p.gpa, attr);
}
}
} else {
try p.err(tok, .alloc_align_requires_ptr_return, .{});
}
},
.access, .access,
.alloc_align,
.alloc_size, .alloc_size,
.artificial, .artificial,
.assume_aligned, .assume_aligned,
@ -984,13 +1111,13 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ
.visibility, .visibility,
.weakref, .weakref,
.zero_call_used_regs, .zero_call_used_regs,
=> |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .functions } }), => |t| try p.err(tok, .attribute_todo, .{ @tagName(t), "functions" }),
else => try ignoredAttrErr(p, tok, attr.tag, "functions"), else => try ignoredAttrErr(p, tok, attr.tag, "functions"),
}; };
return ty.withAttributes(p.arena, p.attr_application_buf.items); return applySelected(qt, p);
} }
pub fn applyLabelAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type { pub fn applyLabelAttributes(p: *Parser, attr_buf_start: usize) !QualType {
const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
const toks = p.attr_buf.items(.tok)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..];
p.attr_application_buf.items.len = 0; p.attr_application_buf.items.len = 0;
@ -999,41 +1126,48 @@ pub fn applyLabelAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type {
for (attrs, toks) |attr, tok| switch (attr.tag) { for (attrs, toks) |attr, tok| switch (attr.tag) {
.unused => try p.attr_application_buf.append(p.gpa, attr), .unused => try p.attr_application_buf.append(p.gpa, attr),
.hot => if (cold) { .hot => if (cold) {
try p.errTok(.ignore_hot, tok); try p.err(tok, .ignore_hot, .{});
} else { } else {
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
hot = true; hot = true;
}, },
.cold => if (hot) { .cold => if (hot) {
try p.errTok(.ignore_cold, tok); try p.err(tok, .ignore_cold, .{});
} else { } else {
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
cold = true; cold = true;
}, },
else => try ignoredAttrErr(p, tok, attr.tag, "labels"), else => try ignoredAttrErr(p, tok, attr.tag, "labels"),
}; };
return ty.withAttributes(p.arena, p.attr_application_buf.items); return applySelected(.void, p);
} }
pub fn applyStatementAttributes(p: *Parser, ty: Type, expr_start: TokenIndex, attr_buf_start: usize) !Type { pub fn applyStatementAttributes(p: *Parser, expr_start: TokenIndex, attr_buf_start: usize) !QualType {
const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
const toks = p.attr_buf.items(.tok)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..];
p.attr_application_buf.items.len = 0; p.attr_application_buf.items.len = 0;
for (attrs, toks) |attr, tok| switch (attr.tag) { for (attrs, toks) |attr, tok| switch (attr.tag) {
.fallthrough => if (p.tok_ids[p.tok_i] != .keyword_case and p.tok_ids[p.tok_i] != .keyword_default) { .fallthrough => {
// TODO: this condition is not completely correct; the last statement of a compound for (p.tok_ids[p.tok_i..]) |tok_id| {
// statement is also valid if it precedes a switch label (so intervening '}' are ok, switch (tok_id) {
// but only if they close a compound statement) .keyword_case, .keyword_default, .eof => {
try p.errTok(.invalid_fallthrough, expr_start); try p.attr_application_buf.append(p.gpa, attr);
} else { break;
try p.attr_application_buf.append(p.gpa, attr); },
.r_brace => {},
else => {
try p.err(expr_start, .invalid_fallthrough, .{});
break;
},
}
}
}, },
else => try p.errStr(.cannot_apply_attribute_to_statement, tok, @tagName(attr.tag)), else => try p.err(tok, .cannot_apply_attribute_to_statement, .{@tagName(attr.tag)}),
}; };
return ty.withAttributes(p.arena, p.attr_application_buf.items); return applySelected(.void, p);
} }
pub fn applyEnumeratorAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type { pub fn applyEnumeratorAttributes(p: *Parser, qt: QualType, attr_buf_start: usize) !QualType {
const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
const toks = p.attr_buf.items(.tok)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..];
p.attr_application_buf.items.len = 0; p.attr_application_buf.items.len = 0;
@ -1041,80 +1175,118 @@ pub fn applyEnumeratorAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !T
.deprecated, .unavailable => try p.attr_application_buf.append(p.gpa, attr), .deprecated, .unavailable => try p.attr_application_buf.append(p.gpa, attr),
else => try ignoredAttrErr(p, tok, attr.tag, "enums"), else => try ignoredAttrErr(p, tok, attr.tag, "enums"),
}; };
return ty.withAttributes(p.arena, p.attr_application_buf.items); return applySelected(qt, p);
} }
fn applyAligned(attr: Attribute, p: *Parser, ty: Type, tag: ?Diagnostics.Tag) !void { fn applyAligned(attr: Attribute, p: *Parser, qt: QualType, diagnostic: ?Parser.Diagnostic) !void {
const base = ty.canonicalize(.standard);
if (attr.args.aligned.alignment) |alignment| alignas: { if (attr.args.aligned.alignment) |alignment| alignas: {
if (attr.syntax != .keyword) break :alignas; if (attr.syntax != .keyword) break :alignas;
const align_tok = attr.args.aligned.__name_tok; const align_tok = attr.args.aligned.__name_tok;
if (tag) |t| try p.errTok(t, align_tok); if (diagnostic) |d| try p.err(align_tok, d, .{});
const default_align = base.alignof(p.comp); if (qt.isInvalid()) return;
if (ty.isFunc()) { const default_align = qt.base(p.comp).qt.alignof(p.comp);
try p.errTok(.alignas_on_func, align_tok); if (qt.is(p.comp, .func)) {
try p.err(align_tok, .alignas_on_func, .{});
} else if (alignment.requested < default_align) { } else if (alignment.requested < default_align) {
try p.errExtra(.minimum_alignment, align_tok, .{ .unsigned = default_align }); try p.err(align_tok, .minimum_alignment, .{default_align});
} }
} }
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
} }
fn applyTransparentUnion(attr: Attribute, p: *Parser, tok: TokenIndex, ty: Type) !void { fn applyTransparentUnion(attr: Attribute, p: *Parser, tok: TokenIndex, qt: QualType) !void {
const union_ty = ty.get(.@"union") orelse { const union_ty = qt.get(p.comp, .@"union") orelse {
return p.errTok(.transparent_union_wrong_type, tok); return p.err(tok, .transparent_union_wrong_type, .{});
}; };
// TODO validate union defined at end // TODO validate union defined at end
if (union_ty.data.record.isIncomplete()) return; if (union_ty.layout == null) return;
const fields = union_ty.data.record.fields; if (union_ty.fields.len == 0) {
if (fields.len == 0) { return p.err(tok, .transparent_union_one_field, .{});
return p.errTok(.transparent_union_one_field, tok);
} }
const first_field_size = fields[0].ty.bitSizeof(p.comp).?; const first_field_size = union_ty.fields[0].qt.bitSizeof(p.comp);
for (fields[1..]) |field| { for (union_ty.fields[1..]) |field| {
const field_size = field.ty.bitSizeof(p.comp).?; const field_size = field.qt.bitSizeof(p.comp);
if (field_size == first_field_size) continue; if (field_size == first_field_size) continue;
const mapper = p.comp.string_interner.getSlowTypeMapper();
const str = try std.fmt.allocPrint( try p.err(field.name_tok, .transparent_union_size, .{ field.name.lookup(p.comp), field_size });
p.comp.diagnostics.arena.allocator(), return p.err(union_ty.fields[0].name_tok, .transparent_union_size_note, .{first_field_size});
"'{s}' ({d}",
.{ mapper.lookup(field.name), field_size },
);
try p.errStr(.transparent_union_size, field.name_tok, str);
return p.errExtra(.transparent_union_size_note, fields[0].name_tok, .{ .unsigned = first_field_size });
} }
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
} }
fn applyVectorSize(attr: Attribute, p: *Parser, tok: TokenIndex, ty: *Type) !void { fn applyVectorSize(attr: Attribute, p: *Parser, tok: TokenIndex, qt: *QualType) !void {
const base = ty.base(); if (qt.isInvalid()) return;
const is_enum = ty.is(.@"enum"); const scalar_kind = qt.scalarKind(p.comp);
if (!(ty.isInt() or ty.isFloat()) or !ty.isReal() or (is_enum and p.comp.langopts.emulate == .gcc)) { if (scalar_kind != .int and scalar_kind != .float) {
try p.errStr(.invalid_vec_elem_ty, tok, try p.typeStr(ty.*)); if (qt.get(p.comp, .@"enum")) |enum_ty| {
if (p.comp.langopts.emulate == .clang and enum_ty.incomplete) {
return; // Clang silently ignores vector_size on incomplete enums.
}
}
try p.err(tok, .invalid_vec_elem_ty, .{qt.*});
return error.ParsingFailed; return error.ParsingFailed;
} }
if (is_enum) return; if (qt.get(p.comp, .bit_int)) |bit_int| {
if (bit_int.bits < 8) {
try p.err(tok, .bit_int_vec_too_small, .{});
return error.ParsingFailed;
} else if (!std.math.isPowerOfTwo(bit_int.bits)) {
try p.err(tok, .bit_int_vec_not_pow2, .{});
return error.ParsingFailed;
}
}
const vec_bytes = attr.args.vector_size.bytes; const vec_bytes = attr.args.vector_size.bytes;
const ty_size = ty.sizeof(p.comp).?; const elem_size = qt.sizeof(p.comp);
if (vec_bytes % ty_size != 0) { if (vec_bytes % elem_size != 0) {
return p.errTok(.vec_size_not_multiple, tok); return p.err(tok, .vec_size_not_multiple, .{});
} }
const vec_size = vec_bytes / ty_size;
const arr_ty = try p.arena.create(Type.Array); qt.* = try p.comp.type_store.put(p.gpa, .{ .vector = .{
arr_ty.* = .{ .elem = ty.*, .len = vec_size }; .elem = qt.*,
base.* = .{ .len = @intCast(vec_bytes / elem_size),
.specifier = .vector, } });
.data = .{ .array = arr_ty },
};
} }
fn applyFormat(attr: Attribute, p: *Parser, ty: Type) !void { fn applyFormat(attr: Attribute, p: *Parser, qt: QualType) !void {
// TODO validate // TODO validate
_ = ty; _ = qt;
try p.attr_application_buf.append(p.gpa, attr); try p.attr_application_buf.append(p.gpa, attr);
} }
fn applyCallingConvention(attr: Attribute, p: *Parser, tok: TokenIndex, qt: QualType) !void {
if (!qt.is(p.comp, .func)) {
return p.err(tok, .callconv_non_func, .{ p.tok_ids[tok].symbol(), qt });
}
switch (attr.args.calling_convention.cc) {
.c => {},
.stdcall, .thiscall, .fastcall, .regcall => switch (p.comp.target.cpu.arch) {
.x86 => try p.attr_application_buf.append(p.gpa, attr),
else => try p.err(tok, .callconv_not_supported, .{p.tok_ids[tok].symbol()}),
},
.vectorcall => switch (p.comp.target.cpu.arch) {
.x86, .aarch64, .aarch64_be => try p.attr_application_buf.append(p.gpa, attr),
else => try p.err(tok, .callconv_not_supported, .{p.tok_ids[tok].symbol()}),
},
.riscv_vector,
.aarch64_sve_pcs,
.aarch64_vector_pcs,
.arm_aapcs,
.arm_aapcs_vfp,
.x86_64_sysv,
.x86_64_win,
=> unreachable, // These can't come from keyword syntax
}
}
fn applySelected(qt: QualType, p: *Parser) !QualType {
if (p.attr_application_buf.items.len == 0) return qt;
if (qt.isInvalid()) return qt;
return (try p.comp.type_store.put(p.gpa, .{ .attributed = .{
.base = qt,
.attributes = p.attr_application_buf.items,
} })).withQualifiers(qt);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,21 +1,23 @@
const std = @import("std"); const std = @import("std");
const Compilation = @import("Compilation.zig"); const Compilation = @import("Compilation.zig");
const Type = @import("Type.zig");
const TypeDescription = @import("Builtins/TypeDescription.zig");
const target_util = @import("target.zig");
const StringId = @import("StringInterner.zig").StringId;
const LangOpts = @import("LangOpts.zig"); const LangOpts = @import("LangOpts.zig");
const Parser = @import("Parser.zig"); const Parser = @import("Parser.zig");
const target_util = @import("target.zig");
const TypeStore = @import("TypeStore.zig");
const QualType = TypeStore.QualType;
const Builder = TypeStore.Builder;
const TypeDescription = @import("Builtins/TypeDescription.zig");
const Properties = @import("Builtins/Properties.zig"); const Properties = @import("Builtins/Properties.zig");
pub const Builtin = @import("Builtins/Builtin.zig").with(Properties); pub const Builtin = @import("Builtins/Builtin.zig").with(Properties);
const Expanded = struct { const Expanded = struct {
ty: Type, qt: QualType,
builtin: Builtin, builtin: Builtin,
}; };
const NameToTypeMap = std.StringHashMapUnmanaged(Type); const NameToTypeMap = std.StringHashMapUnmanaged(QualType);
const Builtins = @This(); const Builtins = @This();
@ -25,38 +27,38 @@ pub fn deinit(b: *Builtins, gpa: std.mem.Allocator) void {
b._name_to_type_map.deinit(gpa); b._name_to_type_map.deinit(gpa);
} }
fn specForSize(comp: *const Compilation, size_bits: u32) Type.Builder.Specifier { fn specForSize(comp: *const Compilation, size_bits: u32) TypeStore.Builder.Specifier {
var ty = Type{ .specifier = .short }; var qt: QualType = .short;
if (ty.sizeof(comp).? * 8 == size_bits) return .short; if (qt.bitSizeof(comp) == size_bits) return .short;
ty.specifier = .int; qt = .int;
if (ty.sizeof(comp).? * 8 == size_bits) return .int; if (qt.bitSizeof(comp) == size_bits) return .int;
ty.specifier = .long; qt = .long;
if (ty.sizeof(comp).? * 8 == size_bits) return .long; if (qt.bitSizeof(comp) == size_bits) return .long;
ty.specifier = .long_long; qt = .long_long;
if (ty.sizeof(comp).? * 8 == size_bits) return .long_long; if (qt.bitSizeof(comp) == size_bits) return .long_long;
unreachable; unreachable;
} }
fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *const Compilation, allocator: std.mem.Allocator) !Type { fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *Compilation) !QualType {
var builder: Type.Builder = .{ .error_on_invalid = true }; var parser: Parser = undefined;
parser.comp = comp;
var builder: TypeStore.Builder = .{ .parser = &parser, .error_on_invalid = true };
var require_native_int32 = false; var require_native_int32 = false;
var require_native_int64 = false; var require_native_int64 = false;
for (desc.prefix) |prefix| { for (desc.prefix) |prefix| {
switch (prefix) { switch (prefix) {
.L => builder.combine(undefined, .long, 0) catch unreachable, .L => builder.combine(.long, 0) catch unreachable,
.LL => { .LL => builder.combine(.long_long, 0) catch unreachable,
builder.combine(undefined, .long, 0) catch unreachable;
builder.combine(undefined, .long, 0) catch unreachable;
},
.LLL => { .LLL => {
switch (builder.specifier) { switch (builder.type) {
.none => builder.specifier = .int128, .none => builder.type = .int128,
.signed => builder.specifier = .sint128, .signed => builder.type = .sint128,
.unsigned => builder.specifier = .uint128, .unsigned => builder.type = .uint128,
else => unreachable, else => unreachable,
} }
}, },
@ -65,239 +67,226 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *c
.N => { .N => {
std.debug.assert(desc.spec == .i); std.debug.assert(desc.spec == .i);
if (!target_util.isLP64(comp.target)) { if (!target_util.isLP64(comp.target)) {
builder.combine(undefined, .long, 0) catch unreachable; builder.combine(.long, 0) catch unreachable;
} }
}, },
.O => { .O => {
builder.combine(undefined, .long, 0) catch unreachable; builder.combine(.long, 0) catch unreachable;
if (comp.target.os.tag != .opencl) { if (comp.target.os.tag != .opencl) {
builder.combine(undefined, .long, 0) catch unreachable; builder.combine(.long, 0) catch unreachable;
} }
}, },
.S => builder.combine(undefined, .signed, 0) catch unreachable, .S => builder.combine(.signed, 0) catch unreachable,
.U => builder.combine(undefined, .unsigned, 0) catch unreachable, .U => builder.combine(.unsigned, 0) catch unreachable,
.I => { .I => {
// Todo: compile-time constant integer // Todo: compile-time constant integer
}, },
} }
} }
switch (desc.spec) { switch (desc.spec) {
.v => builder.combine(undefined, .void, 0) catch unreachable, .v => builder.combine(.void, 0) catch unreachable,
.b => builder.combine(undefined, .bool, 0) catch unreachable, .b => builder.combine(.bool, 0) catch unreachable,
.c => builder.combine(undefined, .char, 0) catch unreachable, .c => builder.combine(.char, 0) catch unreachable,
.s => builder.combine(undefined, .short, 0) catch unreachable, .s => builder.combine(.short, 0) catch unreachable,
.i => { .i => {
if (require_native_int32) { if (require_native_int32) {
builder.specifier = specForSize(comp, 32); builder.type = specForSize(comp, 32);
} else if (require_native_int64) { } else if (require_native_int64) {
builder.specifier = specForSize(comp, 64); builder.type = specForSize(comp, 64);
} else { } else {
switch (builder.specifier) { switch (builder.type) {
.int128, .sint128, .uint128 => {}, .int128, .sint128, .uint128 => {},
else => builder.combine(undefined, .int, 0) catch unreachable, else => builder.combine(.int, 0) catch unreachable,
} }
} }
}, },
.h => builder.combine(undefined, .fp16, 0) catch unreachable, .h => builder.combine(.fp16, 0) catch unreachable,
.x => builder.combine(undefined, .float16, 0) catch unreachable, .x => builder.combine(.float16, 0) catch unreachable,
.y => { .y => {
// Todo: __bf16 // Todo: __bf16
return .{ .specifier = .invalid }; return .invalid;
}, },
.f => builder.combine(undefined, .float, 0) catch unreachable, .f => builder.combine(.float, 0) catch unreachable,
.d => { .d => {
if (builder.specifier == .long_long) { if (builder.type == .long_long) {
builder.specifier = .float128; builder.type = .float128;
} else { } else {
builder.combine(undefined, .double, 0) catch unreachable; builder.combine(.double, 0) catch unreachable;
} }
}, },
.z => { .z => {
std.debug.assert(builder.specifier == .none); std.debug.assert(builder.type == .none);
builder.specifier = Type.Builder.fromType(comp.types.size); builder.type = Builder.fromType(comp, comp.type_store.size);
}, },
.w => { .w => {
std.debug.assert(builder.specifier == .none); std.debug.assert(builder.type == .none);
builder.specifier = Type.Builder.fromType(comp.types.wchar); builder.type = Builder.fromType(comp, comp.type_store.wchar);
}, },
.F => { .F => {
std.debug.assert(builder.specifier == .none); std.debug.assert(builder.type == .none);
builder.specifier = Type.Builder.fromType(comp.types.ns_constant_string.ty); builder.type = Builder.fromType(comp, comp.type_store.ns_constant_string);
}, },
.G => { .G => {
// Todo: id // Todo: id
return .{ .specifier = .invalid }; return .invalid;
}, },
.H => { .H => {
// Todo: SEL // Todo: SEL
return .{ .specifier = .invalid }; return .invalid;
}, },
.M => { .M => {
// Todo: struct objc_super // Todo: struct objc_super
return .{ .specifier = .invalid }; return .invalid;
}, },
.a => { .a => {
std.debug.assert(builder.specifier == .none); std.debug.assert(builder.type == .none);
std.debug.assert(desc.suffix.len == 0); std.debug.assert(desc.suffix.len == 0);
builder.specifier = Type.Builder.fromType(comp.types.va_list); builder.type = Builder.fromType(comp, comp.type_store.va_list);
}, },
.A => { .A => {
std.debug.assert(builder.specifier == .none); std.debug.assert(builder.type == .none);
std.debug.assert(desc.suffix.len == 0); std.debug.assert(desc.suffix.len == 0);
var va_list = comp.types.va_list; var va_list = comp.type_store.va_list;
if (va_list.isArray()) va_list.decayArray(); std.debug.assert(!va_list.is(comp, .array));
builder.specifier = Type.Builder.fromType(va_list); builder.type = Builder.fromType(comp, va_list);
}, },
.V => |element_count| { .V => |element_count| {
std.debug.assert(desc.suffix.len == 0); std.debug.assert(desc.suffix.len == 0);
const child_desc = it.next().?; const child_desc = it.next().?;
const child_ty = try createType(child_desc, undefined, comp, allocator); const elem_qt = try createType(child_desc, undefined, comp);
const arr_ty = try allocator.create(Type.Array); const vector_qt = try comp.type_store.put(comp.gpa, .{ .vector = .{
arr_ty.* = .{ .elem = elem_qt,
.len = element_count, .len = element_count,
.elem = child_ty, } });
}; builder.type = .{ .other = vector_qt };
const vector_ty: Type = .{ .specifier = .vector, .data = .{ .array = arr_ty } };
builder.specifier = Type.Builder.fromType(vector_ty);
}, },
.q => { .q => {
// Todo: scalable vector // Todo: scalable vector
return .{ .specifier = .invalid }; return .invalid;
}, },
.E => { .E => {
// Todo: ext_vector (OpenCL vector) // Todo: ext_vector (OpenCL vector)
return .{ .specifier = .invalid }; return .invalid;
}, },
.X => |child| { .X => |child| {
builder.combine(undefined, .complex, 0) catch unreachable; builder.combine(.complex, 0) catch unreachable;
switch (child) { switch (child) {
.float => builder.combine(undefined, .float, 0) catch unreachable, .float => builder.combine(.float, 0) catch unreachable,
.double => builder.combine(undefined, .double, 0) catch unreachable, .double => builder.combine(.double, 0) catch unreachable,
.longdouble => { .longdouble => {
builder.combine(undefined, .long, 0) catch unreachable; builder.combine(.long, 0) catch unreachable;
builder.combine(undefined, .double, 0) catch unreachable; builder.combine(.double, 0) catch unreachable;
}, },
} }
}, },
.Y => { .Y => {
std.debug.assert(builder.specifier == .none); std.debug.assert(builder.type == .none);
std.debug.assert(desc.suffix.len == 0); std.debug.assert(desc.suffix.len == 0);
builder.specifier = Type.Builder.fromType(comp.types.ptrdiff); builder.type = Builder.fromType(comp, comp.type_store.ptrdiff);
}, },
.P => { .P => {
std.debug.assert(builder.specifier == .none); std.debug.assert(builder.type == .none);
if (comp.types.file.specifier == .invalid) { if (comp.type_store.file.isInvalid()) {
return comp.types.file; return comp.type_store.file;
} }
builder.specifier = Type.Builder.fromType(comp.types.file); builder.type = Builder.fromType(comp, comp.type_store.file);
}, },
.J => { .J => {
std.debug.assert(builder.specifier == .none); std.debug.assert(builder.type == .none);
std.debug.assert(desc.suffix.len == 0); std.debug.assert(desc.suffix.len == 0);
if (comp.types.jmp_buf.specifier == .invalid) { if (comp.type_store.jmp_buf.isInvalid()) {
return comp.types.jmp_buf; return comp.type_store.jmp_buf;
} }
builder.specifier = Type.Builder.fromType(comp.types.jmp_buf); builder.type = Builder.fromType(comp, comp.type_store.jmp_buf);
}, },
.SJ => { .SJ => {
std.debug.assert(builder.specifier == .none); std.debug.assert(builder.type == .none);
std.debug.assert(desc.suffix.len == 0); std.debug.assert(desc.suffix.len == 0);
if (comp.types.sigjmp_buf.specifier == .invalid) { if (comp.type_store.sigjmp_buf.isInvalid()) {
return comp.types.sigjmp_buf; return comp.type_store.sigjmp_buf;
} }
builder.specifier = Type.Builder.fromType(comp.types.sigjmp_buf); builder.type = Builder.fromType(comp, comp.type_store.sigjmp_buf);
}, },
.K => { .K => {
std.debug.assert(builder.specifier == .none); std.debug.assert(builder.type == .none);
if (comp.types.ucontext_t.specifier == .invalid) { if (comp.type_store.ucontext_t.isInvalid()) {
return comp.types.ucontext_t; return comp.type_store.ucontext_t;
} }
builder.specifier = Type.Builder.fromType(comp.types.ucontext_t); builder.type = Builder.fromType(comp, comp.type_store.ucontext_t);
}, },
.p => { .p => {
std.debug.assert(builder.specifier == .none); std.debug.assert(builder.type == .none);
std.debug.assert(desc.suffix.len == 0); std.debug.assert(desc.suffix.len == 0);
builder.specifier = Type.Builder.fromType(comp.types.pid_t); builder.type = Builder.fromType(comp, comp.type_store.pid_t);
}, },
.@"!" => return .{ .specifier = .invalid }, .@"!" => return .invalid,
} }
for (desc.suffix) |suffix| { for (desc.suffix) |suffix| {
switch (suffix) { switch (suffix) {
.@"*" => |address_space| { .@"*" => |address_space| {
_ = address_space; // TODO: handle address space _ = address_space; // TODO: handle address space
const elem_ty = try allocator.create(Type); const pointer_qt = try comp.type_store.put(comp.gpa, .{ .pointer = .{
elem_ty.* = builder.finish(undefined) catch unreachable; .child = builder.finish() catch unreachable,
const ty = Type{ .decayed = null,
.specifier = .pointer, } });
.data = .{ .sub_type = elem_ty },
}; builder.@"const" = null;
builder.qual = .{}; builder.@"volatile" = null;
builder.specifier = Type.Builder.fromType(ty); builder.restrict = null;
builder.type = .{ .other = pointer_qt };
}, },
.C => builder.qual.@"const" = 0, .C => builder.@"const" = 0,
.D => builder.qual.@"volatile" = 0, .D => builder.@"volatile" = 0,
.R => builder.qual.restrict = 0, .R => builder.restrict = 0,
} }
} }
return builder.finish(undefined) catch unreachable; return builder.finish() catch unreachable;
} }
fn createBuiltin(comp: *const Compilation, builtin: Builtin, type_arena: std.mem.Allocator) !Type { fn createBuiltin(comp: *Compilation, builtin: Builtin) !QualType {
var it = TypeDescription.TypeIterator.init(builtin.properties.param_str); var it = TypeDescription.TypeIterator.init(builtin.properties.param_str);
const ret_ty_desc = it.next().?; const ret_ty_desc = it.next().?;
if (ret_ty_desc.spec == .@"!") { if (ret_ty_desc.spec == .@"!") {
// Todo: handle target-dependent definition // Todo: handle target-dependent definition
} }
const ret_ty = try createType(ret_ty_desc, &it, comp, type_arena); const ret_ty = try createType(ret_ty_desc, &it, comp);
var param_count: usize = 0; var param_count: usize = 0;
var params: [Builtin.max_param_count]Type.Func.Param = undefined; var params: [Builtin.max_param_count]TypeStore.Type.Func.Param = undefined;
while (it.next()) |desc| : (param_count += 1) { while (it.next()) |desc| : (param_count += 1) {
params[param_count] = .{ .name_tok = 0, .ty = try createType(desc, &it, comp, type_arena), .name = .empty }; params[param_count] = .{ .name_tok = 0, .qt = try createType(desc, &it, comp), .name = .empty, .node = .null };
} }
const duped_params = try type_arena.dupe(Type.Func.Param, params[0..param_count]); return comp.type_store.put(comp.gpa, .{ .func = .{
const func = try type_arena.create(Type.Func);
func.* = .{
.return_type = ret_ty, .return_type = ret_ty,
.params = duped_params, .kind = if (builtin.properties.isVarArgs()) .variadic else .normal,
}; .params = params[0..param_count],
return .{ } });
.specifier = if (builtin.properties.isVarArgs()) .var_args_func else .func,
.data = .{ .func = func },
};
} }
/// Asserts that the builtin has already been created /// Asserts that the builtin has already been created
pub fn lookup(b: *const Builtins, name: []const u8) Expanded { pub fn lookup(b: *const Builtins, name: []const u8) Expanded {
const builtin = Builtin.fromName(name).?; const builtin = Builtin.fromName(name).?;
const ty = b._name_to_type_map.get(name).?; const qt = b._name_to_type_map.get(name).?;
return .{ return .{ .builtin = builtin, .qt = qt };
.builtin = builtin,
.ty = ty,
};
} }
pub fn getOrCreate(b: *Builtins, comp: *Compilation, name: []const u8, type_arena: std.mem.Allocator) !?Expanded { pub fn getOrCreate(b: *Builtins, comp: *Compilation, name: []const u8) !?Expanded {
const ty = b._name_to_type_map.get(name) orelse { const qt = b._name_to_type_map.get(name) orelse {
const builtin = Builtin.fromName(name) orelse return null; const builtin = Builtin.fromName(name) orelse return null;
if (!comp.hasBuiltinFunction(builtin)) return null; if (!comp.hasBuiltinFunction(builtin)) return null;
try b._name_to_type_map.ensureUnusedCapacity(comp.gpa, 1); try b._name_to_type_map.ensureUnusedCapacity(comp.gpa, 1);
const ty = try createBuiltin(comp, builtin, type_arena); const qt = try createBuiltin(comp, builtin);
b._name_to_type_map.putAssumeCapacity(name, ty); b._name_to_type_map.putAssumeCapacity(name, qt);
return .{ return .{
.builtin = builtin, .builtin = builtin,
.ty = ty, .qt = qt,
}; };
}; };
const builtin = Builtin.fromName(name).?; const builtin = Builtin.fromName(name).?;
return .{ return .{ .builtin = builtin, .qt = qt };
.builtin = builtin,
.ty = ty,
};
} }
pub const Iterator = struct { pub const Iterator = struct {
@ -350,19 +339,21 @@ test Iterator {
} }
test "All builtins" { test "All builtins" {
var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); var arena_state: std.heap.ArenaAllocator = .init(std.testing.allocator);
defer comp.deinit(); defer arena_state.deinit();
_ = try comp.generateBuiltinMacros(.include_system_defines); const arena = arena_state.allocator();
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const type_arena = arena.allocator(); var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
defer comp.deinit();
try comp.type_store.initNamedTypes(&comp);
comp.type_store.va_list = try comp.type_store.va_list.decay(&comp);
var builtin_it = Iterator{}; var builtin_it = Iterator{};
while (builtin_it.next()) |entry| { while (builtin_it.next()) |entry| {
const name = try type_arena.dupe(u8, entry.name); const name = try arena.dupe(u8, entry.name);
if (try comp.builtins.getOrCreate(&comp, name, type_arena)) |func_ty| { if (try comp.builtins.getOrCreate(&comp, name)) |func_ty| {
const get_again = (try comp.builtins.getOrCreate(&comp, name, std.testing.failing_allocator)).?; const get_again = (try comp.builtins.getOrCreate(&comp, name)).?;
const found_by_lookup = comp.builtins.lookup(name); const found_by_lookup = comp.builtins.lookup(name);
try std.testing.expectEqual(func_ty.builtin.tag, get_again.builtin.tag); try std.testing.expectEqual(func_ty.builtin.tag, get_again.builtin.tag);
try std.testing.expectEqual(func_ty.builtin.tag, found_by_lookup.builtin.tag); try std.testing.expectEqual(func_ty.builtin.tag, found_by_lookup.builtin.tag);
@ -373,19 +364,19 @@ test "All builtins" {
test "Allocation failures" { test "Allocation failures" {
const Test = struct { const Test = struct {
fn testOne(allocator: std.mem.Allocator) !void { fn testOne(allocator: std.mem.Allocator) !void {
var comp = Compilation.init(allocator, std.fs.cwd()); var arena_state: std.heap.ArenaAllocator = .init(allocator);
defer arena_state.deinit();
const arena = arena_state.allocator();
var comp = Compilation.init(allocator, arena, undefined, std.fs.cwd());
defer comp.deinit(); defer comp.deinit();
_ = try comp.generateBuiltinMacros(.include_system_defines); _ = try comp.generateBuiltinMacros(.include_system_defines);
var arena = std.heap.ArenaAllocator.init(comp.gpa);
defer arena.deinit();
const type_arena = arena.allocator();
const num_builtins = 40; const num_builtins = 40;
var builtin_it = Iterator{}; var builtin_it = Iterator{};
for (0..num_builtins) |_| { for (0..num_builtins) |_| {
const entry = builtin_it.next().?; const entry = builtin_it.next().?;
_ = try comp.builtins.getOrCreate(&comp, entry.name, type_arena); _ = try comp.builtins.getOrCreate(&comp, entry.name);
} }
} }
}; };

File diff suppressed because it is too large Load Diff

View File

@ -5,8 +5,9 @@ const Builtins = @import("../Builtins.zig");
const Builtin = Builtins.Builtin; const Builtin = Builtins.Builtin;
const Parser = @import("../Parser.zig"); const Parser = @import("../Parser.zig");
const Tree = @import("../Tree.zig"); const Tree = @import("../Tree.zig");
const NodeIndex = Tree.NodeIndex; const TypeStore = @import("../TypeStore.zig");
const Type = @import("../Type.zig"); const Type = TypeStore.Type;
const QualType = TypeStore.QualType;
const Value = @import("../Value.zig"); const Value = @import("../Value.zig");
fn makeNan(comptime T: type, str: []const u8) T { fn makeNan(comptime T: type, str: []const u8) T {
@ -22,22 +23,22 @@ fn makeNan(comptime T: type, str: []const u8) T {
return @bitCast(@as(UnsignedSameSize, bits) | @as(UnsignedSameSize, @bitCast(std.math.nan(T)))); return @bitCast(@as(UnsignedSameSize, bits) | @as(UnsignedSameSize, @bitCast(std.math.nan(T))));
} }
pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const NodeIndex) !Value { pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const Tree.Node.Index) !Value {
const builtin = Builtin.fromTag(tag); const builtin = Builtin.fromTag(tag);
if (!builtin.properties.attributes.const_evaluable) return .{}; if (!builtin.properties.attributes.const_evaluable) return .{};
switch (tag) { switch (tag) {
Builtin.tagFromName("__builtin_inff").?, .__builtin_inff,
Builtin.tagFromName("__builtin_inf").?, .__builtin_inf,
Builtin.tagFromName("__builtin_infl").?, .__builtin_infl,
=> { => {
const ty: Type = switch (tag) { const qt: QualType = switch (tag) {
Builtin.tagFromName("__builtin_inff").? => .{ .specifier = .float }, .__builtin_inff => .float,
Builtin.tagFromName("__builtin_inf").? => .{ .specifier = .double }, .__builtin_inf => .double,
Builtin.tagFromName("__builtin_infl").? => .{ .specifier = .long_double }, .__builtin_infl => .long_double,
else => unreachable, else => unreachable,
}; };
const f: Interner.Key.Float = switch (ty.bitSizeof(p.comp).?) { const f: Interner.Key.Float = switch (qt.bitSizeof(p.comp)) {
32 => .{ .f32 = std.math.inf(f32) }, 32 => .{ .f32 = std.math.inf(f32) },
64 => .{ .f64 = std.math.inf(f64) }, 64 => .{ .f64 = std.math.inf(f64) },
80 => .{ .f80 = std.math.inf(f80) }, 80 => .{ .f80 = std.math.inf(f80) },
@ -46,14 +47,14 @@ pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const NodeIndex) !Value {
}; };
return Value.intern(p.comp, .{ .float = f }); return Value.intern(p.comp, .{ .float = f });
}, },
Builtin.tagFromName("__builtin_isinf").? => blk: { .__builtin_isinf => blk: {
if (args.len == 0) break :blk; if (args.len == 0) break :blk;
const val = p.value_map.get(args[0]) orelse break :blk; const val = p.tree.value_map.get(args[0]) orelse break :blk;
return Value.fromBool(val.isInf(p.comp)); return Value.fromBool(val.isInf(p.comp));
}, },
Builtin.tagFromName("__builtin_isinf_sign").? => blk: { .__builtin_isinf_sign => blk: {
if (args.len == 0) break :blk; if (args.len == 0) break :blk;
const val = p.value_map.get(args[0]) orelse break :blk; const val = p.tree.value_map.get(args[0]) orelse break :blk;
switch (val.isInfSign(p.comp)) { switch (val.isInfSign(p.comp)) {
.unknown => {}, .unknown => {},
.finite => return Value.zero, .finite => return Value.zero,
@ -61,17 +62,17 @@ pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const NodeIndex) !Value {
.negative => return Value.int(@as(i64, -1), p.comp), .negative => return Value.int(@as(i64, -1), p.comp),
} }
}, },
Builtin.tagFromName("__builtin_isnan").? => blk: { .__builtin_isnan => blk: {
if (args.len == 0) break :blk; if (args.len == 0) break :blk;
const val = p.value_map.get(args[0]) orelse break :blk; const val = p.tree.value_map.get(args[0]) orelse break :blk;
return Value.fromBool(val.isNan(p.comp)); return Value.fromBool(val.isNan(p.comp));
}, },
Builtin.tagFromName("__builtin_nan").? => blk: { .__builtin_nan => blk: {
if (args.len == 0) break :blk; if (args.len == 0) break :blk;
const val = p.getDecayedStringLiteral(args[0]) orelse break :blk; const val = p.getDecayedStringLiteral(args[0]) orelse break :blk;
const bytes = p.comp.interner.get(val.ref()).bytes; const bytes = p.comp.interner.get(val.ref()).bytes;
const f: Interner.Key.Float = switch ((Type{ .specifier = .double }).bitSizeof(p.comp).?) { const f: Interner.Key.Float = switch (Type.Float.double.bits(p.comp)) {
32 => .{ .f32 = makeNan(f32, bytes) }, 32 => .{ .f32 = makeNan(f32, bytes) },
64 => .{ .f64 = makeNan(f64, bytes) }, 64 => .{ .f64 = makeNan(f64, bytes) },
80 => .{ .f80 = makeNan(f80, bytes) }, 80 => .{ .f80 = makeNan(f80, bytes) },

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -96,7 +96,7 @@ fn findProgramByNamePosix(name: []const u8, path: ?[]const u8, buf: []u8) ?[]con
} }
pub const Filesystem = union(enum) { pub const Filesystem = union(enum) {
real: void, real: std.fs.Dir,
fake: []const Entry, fake: []const Entry,
const Entry = struct { const Entry = struct {
@ -172,8 +172,8 @@ pub const Filesystem = union(enum) {
pub fn exists(fs: Filesystem, path: []const u8) bool { pub fn exists(fs: Filesystem, path: []const u8) bool {
switch (fs) { switch (fs) {
.real => { .real => |cwd| {
std.fs.cwd().access(path, .{}) catch return false; cwd.access(path, .{}) catch return false;
return true; return true;
}, },
.fake => |paths| return existsFake(paths, path), .fake => |paths| return existsFake(paths, path),
@ -210,8 +210,8 @@ pub const Filesystem = union(enum) {
/// Otherwise returns a slice of `buf`. If the file is larger than `buf` partial contents are returned /// Otherwise returns a slice of `buf`. If the file is larger than `buf` partial contents are returned
pub fn readFile(fs: Filesystem, path: []const u8, buf: []u8) ?[]const u8 { pub fn readFile(fs: Filesystem, path: []const u8, buf: []u8) ?[]const u8 {
return switch (fs) { return switch (fs) {
.real => { .real => |cwd| {
const file = std.fs.cwd().openFile(path, .{}) catch return null; const file = cwd.openFile(path, .{}) catch return null;
defer file.close(); defer file.close();
const bytes_read = file.readAll(buf) catch return null; const bytes_read = file.readAll(buf) catch return null;
@ -223,7 +223,7 @@ pub const Filesystem = union(enum) {
pub fn openDir(fs: Filesystem, dir_name: []const u8) std.fs.Dir.OpenError!Dir { pub fn openDir(fs: Filesystem, dir_name: []const u8) std.fs.Dir.OpenError!Dir {
return switch (fs) { return switch (fs) {
.real => .{ .dir = try std.fs.cwd().openDir(dir_name, .{ .access_sub_paths = false, .iterate = true }) }, .real => |cwd| .{ .dir = try cwd.openDir(dir_name, .{ .access_sub_paths = false, .iterate = true }) },
.fake => |entries| .{ .fake = .{ .entries = entries, .path = dir_name } }, .fake => |entries| .{ .fake = .{ .entries = entries, .path = dir_name } },
}; };
} }

View File

@ -1,9 +1,11 @@
const std = @import("std"); const std = @import("std");
const Toolchain = @import("../Toolchain.zig");
const target_util = @import("../target.zig");
const system_defaults = @import("system_defaults"); const system_defaults = @import("system_defaults");
const GCCVersion = @import("GCCVersion.zig"); const GCCVersion = @import("GCCVersion.zig");
const Multilib = @import("Multilib.zig"); const Multilib = @import("Multilib.zig");
const target_util = @import("../target.zig");
const Toolchain = @import("../Toolchain.zig");
const GCCDetector = @This(); const GCCDetector = @This();
@ -50,7 +52,7 @@ fn addDefaultGCCPrefixes(prefixes: *std.ArrayListUnmanaged([]const u8), tc: *con
if (sysroot.len == 0) { if (sysroot.len == 0) {
prefixes.appendAssumeCapacity("/usr"); prefixes.appendAssumeCapacity("/usr");
} else { } else {
var usr_path = try tc.arena.alloc(u8, 4 + sysroot.len); var usr_path = try tc.driver.comp.arena.alloc(u8, 4 + sysroot.len);
@memcpy(usr_path[0..4], "/usr"); @memcpy(usr_path[0..4], "/usr");
@memcpy(usr_path[4..], sysroot); @memcpy(usr_path[4..], sysroot);
prefixes.appendAssumeCapacity(usr_path); prefixes.appendAssumeCapacity(usr_path);
@ -284,11 +286,6 @@ fn collectLibDirsAndTriples(
}, },
.x86 => { .x86 => {
lib_dirs.appendSliceAssumeCapacity(&X86LibDirs); lib_dirs.appendSliceAssumeCapacity(&X86LibDirs);
triple_aliases.appendSliceAssumeCapacity(&X86Triples);
biarch_libdirs.appendSliceAssumeCapacity(&X86_64LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&X86_64Triples);
biarch_libdirs.appendSliceAssumeCapacity(&X32LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&X32Triples);
}, },
.loongarch64 => { .loongarch64 => {
lib_dirs.appendSliceAssumeCapacity(&LoongArch64LibDirs); lib_dirs.appendSliceAssumeCapacity(&LoongArch64LibDirs);
@ -587,6 +584,7 @@ fn scanLibDirForGCCTriple(
) !void { ) !void {
var path_buf: [std.fs.max_path_bytes]u8 = undefined; var path_buf: [std.fs.max_path_bytes]u8 = undefined;
var fib = std.heap.FixedBufferAllocator.init(&path_buf); var fib = std.heap.FixedBufferAllocator.init(&path_buf);
const arena = tc.driver.comp.arena;
for (0..2) |i| { for (0..2) |i| {
if (i == 0 and !gcc_dir_exists) continue; if (i == 0 and !gcc_dir_exists) continue;
if (i == 1 and !gcc_cross_dir_exists) continue; if (i == 1 and !gcc_cross_dir_exists) continue;
@ -619,9 +617,9 @@ fn scanLibDirForGCCTriple(
if (!try self.scanGCCForMultilibs(tc, target, .{ dir_name, version_text }, needs_biarch_suffix)) continue; if (!try self.scanGCCForMultilibs(tc, target, .{ dir_name, version_text }, needs_biarch_suffix)) continue;
self.version = candidate_version; self.version = candidate_version;
self.gcc_triple = try tc.arena.dupe(u8, candidate_triple); self.gcc_triple = try arena.dupe(u8, candidate_triple);
self.install_path = try std.fs.path.join(tc.arena, &.{ lib_dir, lib_suffix, version_text }); self.install_path = try std.fs.path.join(arena, &.{ lib_dir, lib_suffix, version_text });
self.parent_lib_path = try std.fs.path.join(tc.arena, &.{ self.install_path, "..", "..", ".." }); self.parent_lib_path = try std.fs.path.join(arena, &.{ self.install_path, "..", "..", ".." });
self.is_valid = true; self.is_valid = true;
} }
} }

View File

@ -10,8 +10,9 @@
const std = @import("std"); const std = @import("std");
const mem = std.mem; const mem = std.mem;
const Allocator = mem.Allocator; const Allocator = mem.Allocator;
const Source = @import("Source.zig");
const Compilation = @import("Compilation.zig"); const Compilation = @import("Compilation.zig");
const Source = @import("Source.zig");
const Tokenizer = @import("Tokenizer.zig"); const Tokenizer = @import("Tokenizer.zig");
pub const Hideset = @This(); pub const Hideset = @This();
@ -51,10 +52,10 @@ pub const Index = enum(u32) {
_, _,
}; };
map: std.AutoHashMapUnmanaged(Identifier, Index) = .empty, map: std.AutoHashMapUnmanaged(Identifier, Index) = .{},
/// Used for computing union/intersection of two lists; stored here so that allocations can be retained /// Used for computing union/intersection of two lists; stored here so that allocations can be retained
/// until hideset is deinit'ed /// until hideset is deinit'ed
tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .empty, tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .{},
linked_list: Item.List = .{}, linked_list: Item.List = .{},
comp: *const Compilation, comp: *const Compilation,

View File

@ -3,17 +3,16 @@
const std = @import("std"); const std = @import("std");
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const testing = std.testing; const testing = std.testing;
const Diagnostics = @import("Diagnostics.zig");
const Parser = @import("Parser.zig");
const Tree = @import("Tree.zig"); const Tree = @import("Tree.zig");
const Token = Tree.Token; const Token = Tree.Token;
const TokenIndex = Tree.TokenIndex; const TokenIndex = Tree.TokenIndex;
const NodeIndex = Tree.NodeIndex; const Node = Tree.Node;
const Type = @import("Type.zig");
const Diagnostics = @import("Diagnostics.zig");
const NodeList = std.array_list.Managed(NodeIndex);
const Parser = @import("Parser.zig");
const Item = struct { const Item = struct {
list: InitList = .{}, list: InitList,
index: u64, index: u64,
fn order(_: void, a: Item, b: Item) std.math.Order { fn order(_: void, a: Item, b: Item) std.math.Order {
@ -24,7 +23,7 @@ const Item = struct {
const InitList = @This(); const InitList = @This();
list: std.ArrayListUnmanaged(Item) = .empty, list: std.ArrayListUnmanaged(Item) = .empty,
node: NodeIndex = .none, node: Node.OptIndex = .null,
tok: TokenIndex = 0, tok: TokenIndex = 0,
/// Deinitialize freeing all memory. /// Deinitialize freeing all memory.
@ -34,50 +33,6 @@ pub fn deinit(il: *InitList, gpa: Allocator) void {
il.* = undefined; il.* = undefined;
} }
/// Insert initializer at index, returning previous entry if one exists.
pub fn put(il: *InitList, gpa: Allocator, index: usize, node: NodeIndex, tok: TokenIndex) !?TokenIndex {
const items = il.list.items;
var left: usize = 0;
var right: usize = items.len;
// Append new value to empty list
if (left == right) {
const item = try il.list.addOne(gpa);
item.* = .{
.list = .{ .node = node, .tok = tok },
.index = index,
};
return null;
}
while (left < right) {
// Avoid overflowing in the midpoint calculation
const mid = left + (right - left) / 2;
// Compare the key with the midpoint element
switch (std.math.order(index, items[mid].index)) {
.eq => {
// Replace previous entry.
const prev = items[mid].list.tok;
items[mid].list.deinit(gpa);
items[mid] = .{
.list = .{ .node = node, .tok = tok },
.index = index,
};
return prev;
},
.gt => left = mid + 1,
.lt => right = mid,
}
}
// Insert a new value into a sorted position.
try il.list.insert(gpa, left, .{
.list = .{ .node = node, .tok = tok },
.index = index,
});
return null;
}
/// Find item at index, create new if one does not exist. /// Find item at index, create new if one does not exist.
pub fn find(il: *InitList, gpa: Allocator, index: u64) !*InitList { pub fn find(il: *InitList, gpa: Allocator, index: u64) !*InitList {
const items = il.list.items; const items = il.list.items;
@ -85,13 +40,21 @@ pub fn find(il: *InitList, gpa: Allocator, index: u64) !*InitList {
var right: usize = items.len; var right: usize = items.len;
// Append new value to empty list // Append new value to empty list
if (left == right) { if (il.list.items.len == 0) {
const item = try il.list.addOne(gpa); const item = try il.list.addOne(gpa);
item.* = .{ item.* = .{
.list = .{ .node = .none, .tok = 0 }, .list = .{},
.index = index, .index = index,
}; };
return &item.list; return &item.list;
} else if (il.list.items[il.list.items.len - 1].index < index) {
// Append a new value to the end of the list.
const new = try il.list.addOne(gpa);
new.* = .{
.list = .{},
.index = index,
};
return &new.list;
} }
while (left < right) { while (left < right) {
@ -107,7 +70,7 @@ pub fn find(il: *InitList, gpa: Allocator, index: u64) !*InitList {
// Insert a new value into a sorted position. // Insert a new value into a sorted position.
try il.list.insert(gpa, left, .{ try il.list.insert(gpa, left, .{
.list = .{ .node = .none, .tok = 0 }, .list = .{},
.index = index, .index = index,
}); });
return &il.list.items[left].list; return &il.list.items[left].list;
@ -118,22 +81,6 @@ test "basic usage" {
var il: InitList = .{}; var il: InitList = .{};
defer il.deinit(gpa); defer il.deinit(gpa);
{
var i: usize = 0;
while (i < 5) : (i += 1) {
const prev = try il.put(gpa, i, .none, 0);
try testing.expect(prev == null);
}
}
{
const failing = testing.failing_allocator;
var i: usize = 0;
while (i < 5) : (i += 1) {
_ = try il.find(failing, i);
}
}
{ {
var item = try il.find(gpa, 0); var item = try il.find(gpa, 0);
var i: usize = 1; var i: usize = 1;

View File

@ -1,6 +1,7 @@
const std = @import("std"); const std = @import("std");
const DiagnosticTag = @import("Diagnostics.zig").Tag;
const char_info = @import("char_info.zig"); const char_info = @import("char_info.zig");
const DiagnosticTag = @import("Diagnostics.zig").Tag;
pub const Compiler = enum { pub const Compiler = enum {
clang, clang,
@ -144,14 +145,9 @@ pub fn setStandard(self: *LangOpts, name: []const u8) error{InvalidStandard}!voi
self.standard = Standard.NameMap.get(name) orelse return error.InvalidStandard; self.standard = Standard.NameMap.get(name) orelse return error.InvalidStandard;
} }
pub fn enableMSExtensions(self: *LangOpts) void { pub fn setMSExtensions(self: *LangOpts, enabled: bool) void {
self.declspec_attrs = true; self.declspec_attrs = enabled;
self.ms_extensions = true; self.ms_extensions = enabled;
}
pub fn disableMSExtensions(self: *LangOpts) void {
self.declspec_attrs = false;
self.ms_extensions = true;
} }
pub fn hasChar8_T(self: *const LangOpts) bool { pub fn hasChar8_T(self: *const LangOpts) bool {
@ -164,7 +160,7 @@ pub fn hasDigraphs(self: *const LangOpts) bool {
pub fn setEmulatedCompiler(self: *LangOpts, compiler: Compiler) void { pub fn setEmulatedCompiler(self: *LangOpts, compiler: Compiler) void {
self.emulate = compiler; self.emulate = compiler;
if (compiler == .msvc) self.enableMSExtensions(); self.setMSExtensions(compiler == .msvc);
} }
pub fn setFpEvalMethod(self: *LangOpts, fp_eval_method: FPEvalMethod) void { pub fn setFpEvalMethod(self: *LangOpts, fp_eval_method: FPEvalMethod) void {

File diff suppressed because it is too large Load Diff

2390
lib/compiler/aro/aro/Parser/Diagnostic.zig vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,9 @@
const std = @import("std"); const std = @import("std");
const Compilation = @import("Compilation.zig"); const Compilation = @import("Compilation.zig");
const Preprocessor = @import("Preprocessor.zig"); const Diagnostics = @import("Diagnostics.zig");
const Parser = @import("Parser.zig"); const Parser = @import("Parser.zig");
const Preprocessor = @import("Preprocessor.zig");
const TokenIndex = @import("Tree.zig").TokenIndex; const TokenIndex = @import("Tree.zig").TokenIndex;
pub const Error = Compilation.Error || error{ UnknownPragma, StopPreprocessing }; pub const Error = Compilation.Error || error{ UnknownPragma, StopPreprocessing };
@ -69,7 +71,7 @@ pub fn pasteTokens(pp: *Preprocessor, start_idx: TokenIndex) ![]const u8 {
pub fn shouldPreserveTokens(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool { pub fn shouldPreserveTokens(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool {
if (self.preserveTokens) |func| return func(self, pp, start_idx); if (self.preserveTokens) |func| return func(self, pp, start_idx);
return false; return true;
} }
pub fn preprocessorCB(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Error!void { pub fn preprocessorCB(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Error!void {
@ -81,3 +83,128 @@ pub fn parserCB(self: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation.Er
defer std.debug.assert(tok_index == p.tok_i); defer std.debug.assert(tok_index == p.tok_i);
if (self.parserHandler) |func| return func(self, p, start_idx); if (self.parserHandler) |func| return func(self, p, start_idx);
} }
pub const Diagnostic = struct {
fmt: []const u8,
kind: Diagnostics.Message.Kind,
opt: ?Diagnostics.Option = null,
extension: bool = false,
pub const pragma_warning_message: Diagnostic = .{
.fmt = "{s}",
.kind = .warning,
.opt = .@"#pragma-messages",
};
pub const pragma_error_message: Diagnostic = .{
.fmt = "{s}",
.kind = .@"error",
};
pub const pragma_message: Diagnostic = .{
.fmt = "#pragma message: {s}",
.kind = .note,
};
pub const pragma_requires_string_literal: Diagnostic = .{
.fmt = "pragma {s} requires string literal",
.kind = .@"error",
};
pub const poisoned_identifier: Diagnostic = .{
.fmt = "attempt to use a poisoned identifier",
.kind = .@"error",
};
pub const pragma_poison_identifier: Diagnostic = .{
.fmt = "can only poison identifier tokens",
.kind = .@"error",
};
pub const pragma_poison_macro: Diagnostic = .{
.fmt = "poisoning existing macro",
.kind = .warning,
};
pub const unknown_gcc_pragma: Diagnostic = .{
.fmt = "pragma GCC expected 'error', 'warning', 'diagnostic', 'poison'",
.kind = .off,
.opt = .@"unknown-pragmas",
};
pub const unknown_gcc_pragma_directive: Diagnostic = .{
.fmt = "pragma GCC diagnostic expected 'error', 'warning', 'ignored', 'fatal', 'push', or 'pop'",
.kind = .warning,
.opt = .@"unknown-pragmas",
.extension = true,
};
pub const malformed_warning_check: Diagnostic = .{
.fmt = "{s} expected option name (e.g. \"-Wundef\")",
.opt = .@"malformed-warning-check",
.kind = .warning,
.extension = true,
};
pub const pragma_pack_lparen: Diagnostic = .{
.fmt = "missing '(' after '#pragma pack' - ignoring",
.kind = .warning,
.opt = .@"ignored-pragmas",
};
pub const pragma_pack_rparen: Diagnostic = .{
.fmt = "missing ')' after '#pragma pack' - ignoring",
.kind = .warning,
.opt = .@"ignored-pragmas",
};
pub const pragma_pack_unknown_action: Diagnostic = .{
.fmt = "unknown action for '#pragma pack' - ignoring",
.kind = .warning,
.opt = .@"ignored-pragmas",
};
pub const pragma_pack_show: Diagnostic = .{
.fmt = "value of #pragma pack(show) == {d}",
.kind = .warning,
};
pub const pragma_pack_int_ident: Diagnostic = .{
.fmt = "expected integer or identifier in '#pragma pack' - ignored",
.kind = .warning,
.opt = .@"ignored-pragmas",
};
pub const pragma_pack_int: Diagnostic = .{
.fmt = "expected #pragma pack parameter to be '1', '2', '4', '8', or '16'",
.opt = .@"ignored-pragmas",
.kind = .warning,
};
pub const pragma_pack_undefined_pop: Diagnostic = .{
.fmt = "specifying both a name and alignment to 'pop' is undefined",
.kind = .warning,
};
pub const pragma_pack_empty_stack: Diagnostic = .{
.fmt = "#pragma pack(pop, ...) failed: stack empty",
.opt = .@"ignored-pragmas",
.kind = .warning,
};
};
pub fn err(pp: *Preprocessor, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) Compilation.Error!void {
var sf = std.heap.stackFallback(1024, pp.gpa);
var allocating: std.Io.Writer.Allocating = .init(sf.get());
defer allocating.deinit();
Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory;
try pp.diagnostics.addWithLocation(pp.comp, .{
.kind = diagnostic.kind,
.opt = diagnostic.opt,
.text = allocating.getWritten(),
.location = pp.tokens.items(.loc)[tok_i].expand(pp.comp),
.extension = diagnostic.extension,
}, pp.expansionSlice(tok_i), true);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,442 @@
const std = @import("std");
const Diagnostics = @import("../Diagnostics.zig");
const LangOpts = @import("../LangOpts.zig");
const Compilation = @import("../Compilation.zig");
const Diagnostic = @This();
fmt: []const u8,
kind: Diagnostics.Message.Kind,
opt: ?Diagnostics.Option = null,
extension: bool = false,
pub const elif_without_if: Diagnostic = .{
.fmt = "#elif without #if",
.kind = .@"error",
};
pub const elif_after_else: Diagnostic = .{
.fmt = "#elif after #else",
.kind = .@"error",
};
pub const elifdef_without_if: Diagnostic = .{
.fmt = "#elifdef without #if",
.kind = .@"error",
};
pub const elifdef_after_else: Diagnostic = .{
.fmt = "#elifdef after #else",
.kind = .@"error",
};
pub const elifndef_without_if: Diagnostic = .{
.fmt = "#elifndef without #if",
.kind = .@"error",
};
pub const elifndef_after_else: Diagnostic = .{
.fmt = "#elifndef after #else",
.kind = .@"error",
};
pub const else_without_if: Diagnostic = .{
.fmt = "#else without #if",
.kind = .@"error",
};
pub const else_after_else: Diagnostic = .{
.fmt = "#else after #else",
.kind = .@"error",
};
pub const endif_without_if: Diagnostic = .{
.fmt = "#endif without #if",
.kind = .@"error",
};
pub const unknown_pragma: Diagnostic = .{
.fmt = "unknown pragma ignored",
.opt = .@"unknown-pragmas",
.kind = .off,
};
pub const line_simple_digit: Diagnostic = .{
.fmt = "#line directive requires a simple digit sequence",
.kind = .@"error",
};
pub const line_invalid_filename: Diagnostic = .{
.fmt = "invalid filename for #line directive",
.kind = .@"error",
};
pub const unterminated_conditional_directive: Diagnostic = .{
.fmt = "unterminated conditional directive",
.kind = .@"error",
};
pub const invalid_preprocessing_directive: Diagnostic = .{
.fmt = "invalid preprocessing directive",
.kind = .@"error",
};
pub const error_directive: Diagnostic = .{
.fmt = "{s}",
.kind = .@"error",
};
pub const warning_directive: Diagnostic = .{
.fmt = "{s}",
.opt = .@"#warnings",
.kind = .warning,
};
pub const macro_name_missing: Diagnostic = .{
.fmt = "macro name missing",
.kind = .@"error",
};
pub const extra_tokens_directive_end: Diagnostic = .{
.fmt = "extra tokens at end of macro directive",
.kind = .@"error",
};
pub const expected_value_in_expr: Diagnostic = .{
.fmt = "expected value in expression",
.kind = .@"error",
};
pub const defined_as_macro_name: Diagnostic = .{
.fmt = "'defined' cannot be used as a macro name",
.kind = .@"error",
};
pub const macro_name_must_be_identifier: Diagnostic = .{
.fmt = "macro name must be an identifier",
.kind = .@"error",
};
pub const whitespace_after_macro_name: Diagnostic = .{
.fmt = "ISO C99 requires whitespace after the macro name",
.opt = .@"c99-extensions",
.kind = .warning,
.extension = true,
};
pub const hash_hash_at_start: Diagnostic = .{
.fmt = "'##' cannot appear at the start of a macro expansion",
.kind = .@"error",
};
pub const hash_hash_at_end: Diagnostic = .{
.fmt = "'##' cannot appear at the end of a macro expansion",
.kind = .@"error",
};
pub const pasting_formed_invalid: Diagnostic = .{
.fmt = "pasting formed '{s}', an invalid preprocessing token",
.kind = .@"error",
};
pub const missing_paren_param_list: Diagnostic = .{
.fmt = "missing ')' in macro parameter list",
.kind = .@"error",
};
pub const unterminated_macro_param_list: Diagnostic = .{
.fmt = "unterminated macro param list",
.kind = .@"error",
};
pub const invalid_token_param_list: Diagnostic = .{
.fmt = "invalid token in macro parameter list",
.kind = .@"error",
};
pub const expected_comma_param_list: Diagnostic = .{
.fmt = "expected comma in macro parameter list",
.kind = .@"error",
};
pub const hash_not_followed_param: Diagnostic = .{
.fmt = "'#' is not followed by a macro parameter",
.kind = .@"error",
};
pub const expected_filename: Diagnostic = .{
.fmt = "expected \"FILENAME\" or <FILENAME>",
.kind = .@"error",
};
pub const empty_filename: Diagnostic = .{
.fmt = "empty filename",
.kind = .@"error",
};
pub const header_str_closing: Diagnostic = .{
.fmt = "expected closing '>'",
.kind = .@"error",
};
pub const header_str_match: Diagnostic = .{
.fmt = "to match this '<'",
.kind = .note,
};
pub const string_literal_in_pp_expr: Diagnostic = .{
.fmt = "string literal in preprocessor expression",
.kind = .@"error",
};
pub const empty_char_literal_warning: Diagnostic = .{
.fmt = "empty character constant",
.kind = .warning,
.opt = .@"invalid-pp-token",
.extension = true,
};
pub const unterminated_char_literal_warning: Diagnostic = .{
.fmt = "missing terminating ' character",
.kind = .warning,
.opt = .@"invalid-pp-token",
.extension = true,
};
pub const unterminated_string_literal_warning: Diagnostic = .{
.fmt = "missing terminating '\"' character",
.kind = .warning,
.opt = .@"invalid-pp-token",
.extension = true,
};
pub const unterminated_comment: Diagnostic = .{
.fmt = "unterminated comment",
.kind = .@"error",
};
pub const malformed_embed_param: Diagnostic = .{
.fmt = "unexpected token in embed parameter",
.kind = .@"error",
};
pub const malformed_embed_limit: Diagnostic = .{
.fmt = "the limit parameter expects one non-negative integer as a parameter",
.kind = .@"error",
};
pub const duplicate_embed_param: Diagnostic = .{
.fmt = "duplicate embed parameter '{s}'",
.kind = .warning,
.opt = .@"duplicate-embed-param",
};
pub const unsupported_embed_param: Diagnostic = .{
.fmt = "unsupported embed parameter '{s}' embed parameter",
.kind = .warning,
.opt = .@"unsupported-embed-param",
};
pub const va_opt_lparen: Diagnostic = .{
.fmt = "missing '(' following __VA_OPT__",
.kind = .@"error",
};
pub const va_opt_rparen: Diagnostic = .{
.fmt = "unterminated __VA_OPT__ argument list",
.kind = .@"error",
};
pub const keyword_macro: Diagnostic = .{
.fmt = "keyword is hidden by macro definition",
.kind = .off,
.opt = .@"keyword-macro",
.extension = true,
};
pub const undefined_macro: Diagnostic = .{
.fmt = "'{s}' is not defined, evaluates to 0",
.kind = .off,
.opt = .undef,
};
pub const fn_macro_undefined: Diagnostic = .{
.fmt = "function-like macro '{s}' is not defined",
.kind = .@"error",
};
// pub const preprocessing_directive_only: Diagnostic = .{
// .fmt = "'{s}' must be used within a preprocessing directive",
// .extra = .tok_id_expected,
// .kind = .@"error",
// };
pub const missing_lparen_after_builtin: Diagnostic = .{
.fmt = "Missing '(' after built-in macro '{s}'",
.kind = .@"error",
};
pub const too_many_includes: Diagnostic = .{
.fmt = "#include nested too deeply",
.kind = .@"error",
};
pub const include_next: Diagnostic = .{
.fmt = "#include_next is a language extension",
.kind = .off,
.opt = .@"gnu-include-next",
.extension = true,
};
pub const include_next_outside_header: Diagnostic = .{
.fmt = "#include_next in primary source file; will search from start of include path",
.kind = .warning,
.opt = .@"include-next-outside-header",
};
pub const comma_deletion_va_args: Diagnostic = .{
.fmt = "token pasting of ',' and __VA_ARGS__ is a GNU extension",
.kind = .off,
.opt = .@"gnu-zero-variadic-macro-arguments",
.extension = true,
};
pub const expansion_to_defined_obj: Diagnostic = .{
.fmt = "macro expansion producing 'defined' has undefined behavior",
.kind = .off,
.opt = .@"expansion-to-defined",
};
pub const expansion_to_defined_func: Diagnostic = .{
.fmt = expansion_to_defined_obj.fmt,
.kind = .off,
.opt = .@"expansion-to-defined",
.extension = true,
};
pub const invalid_pp_stringify_escape: Diagnostic = .{
.fmt = "invalid string literal, ignoring final '\\'",
.kind = .warning,
};
pub const gnu_va_macro: Diagnostic = .{
.fmt = "named variadic macros are a GNU extension",
.opt = .@"variadic-macros",
.kind = .off,
.extension = true,
};
pub const pragma_operator_string_literal: Diagnostic = .{
.fmt = "_Pragma requires exactly one string literal token",
.kind = .@"error",
};
pub const invalid_preproc_expr_start: Diagnostic = .{
.fmt = "invalid token at start of a preprocessor expression",
.kind = .@"error",
};
pub const newline_eof: Diagnostic = .{
.fmt = "no newline at end of file",
.opt = .@"newline-eof",
.kind = .off,
.extension = true,
};
pub const malformed_warning_check: Diagnostic = .{
.fmt = "{s} expected option name (e.g. \"-Wundef\")",
.opt = .@"malformed-warning-check",
.kind = .warning,
.extension = true,
};
pub const feature_check_requires_identifier: Diagnostic = .{
.fmt = "builtin feature check macro requires a parenthesized identifier",
.kind = .@"error",
};
pub const builtin_macro_redefined: Diagnostic = .{
.fmt = "redefining builtin macro",
.opt = .@"builtin-macro-redefined",
.kind = .warning,
.extension = true,
};
pub const macro_redefined: Diagnostic = .{
.fmt = "'{s}' macro redefined",
.opt = .@"macro-redefined",
.kind = .warning,
.extension = true,
};
pub const previous_definition: Diagnostic = .{
.fmt = "previous definition is here",
.kind = .note,
};
pub const unterminated_macro_arg_list: Diagnostic = .{
.fmt = "unterminated function macro argument list",
.kind = .@"error",
};
pub const to_match_paren: Diagnostic = .{
.fmt = "to match this '('",
.kind = .note,
};
pub const closing_paren: Diagnostic = .{
.fmt = "expected closing ')'",
.kind = .@"error",
};
pub const poisoned_identifier: Diagnostic = .{
.fmt = "attempt to use a poisoned identifier",
.kind = .@"error",
};
pub const expected_arguments: Diagnostic = .{
.fmt = "expected {d} argument(s) got {d}",
.kind = .@"error",
};
pub const expected_at_least_arguments: Diagnostic = .{
.fmt = "expected at least {d} argument(s) got {d}",
.kind = .warning,
};
pub const invalid_preproc_operator: Diagnostic = .{
.fmt = "token is not a valid binary operator in a preprocessor subexpression",
.kind = .@"error",
};
pub const expected_str_literal_in: Diagnostic = .{
.fmt = "expected string literal in '{s}'",
.kind = .@"error",
};
pub const builtin_missing_r_paren: Diagnostic = .{
.fmt = "missing ')', after {s}",
.kind = .@"error",
};
pub const cannot_convert_to_identifier: Diagnostic = .{
.fmt = "cannot convert {s} to an identifier",
.kind = .@"error",
};
pub const expected_identifier: Diagnostic = .{
.fmt = "expected identifier argument",
.kind = .@"error",
};
pub const incomplete_ucn: Diagnostic = .{
.fmt = "incomplete universal character name; treating as '\\' followed by identifier",
.kind = .warning,
.opt = .unicode,
};
pub const invalid_source_epoch: Diagnostic = .{
.fmt = "environment variable SOURCE_DATE_EPOCH must expand to a non-negative integer less than or equal to 253402300799",
.kind = .@"error",
};

View File

@ -24,6 +24,20 @@ pub const Location = struct {
pub fn eql(a: Location, b: Location) bool { pub fn eql(a: Location, b: Location) bool {
return a.id == b.id and a.byte_offset == b.byte_offset and a.line == b.line; return a.id == b.id and a.byte_offset == b.byte_offset and a.line == b.line;
} }
pub fn expand(loc: Location, comp: *const @import("Compilation.zig")) ExpandedLocation {
const source = comp.getSource(loc.id);
return source.lineCol(loc);
}
};
pub const ExpandedLocation = struct {
path: []const u8,
line: []const u8,
line_no: u32,
col: u32,
width: u32,
end_with_splice: bool,
}; };
const Source = @This(); const Source = @This();
@ -51,9 +65,7 @@ pub fn physicalLine(source: Source, loc: Location) u32 {
return loc.line + source.numSplicesBefore(loc.byte_offset); return loc.line + source.numSplicesBefore(loc.byte_offset);
} }
const LineCol = struct { line: []const u8, line_no: u32, col: u32, width: u32, end_with_splice: bool }; pub fn lineCol(source: Source, loc: Location) ExpandedLocation {
pub fn lineCol(source: Source, loc: Location) LineCol {
var start: usize = 0; var start: usize = 0;
// find the start of the line which is either a newline or a splice // find the start of the line which is either a newline or a splice
if (std.mem.lastIndexOfScalar(u8, source.buf[0..loc.byte_offset], '\n')) |some| start = some + 1; if (std.mem.lastIndexOfScalar(u8, source.buf[0..loc.byte_offset], '\n')) |some| start = some + 1;
@ -102,6 +114,7 @@ pub fn lineCol(source: Source, loc: Location) LineCol {
nl = source.splice_locs[splice_index]; nl = source.splice_locs[splice_index];
} }
return .{ return .{
.path = source.path,
.line = source.buf[start..nl], .line = source.buf[start..nl],
.line_no = loc.line + splice_index, .line_no = loc.line + splice_index,
.col = col, .col = col,

View File

@ -2,82 +2,34 @@ const std = @import("std");
const mem = std.mem; const mem = std.mem;
const Compilation = @import("Compilation.zig"); const Compilation = @import("Compilation.zig");
const StringToIdMap = std.StringHashMapUnmanaged(StringId);
pub const StringId = enum(u32) {
empty,
_,
};
pub const TypeMapper = struct {
const LookupSpeed = enum {
fast,
slow,
};
data: union(LookupSpeed) {
fast: []const []const u8,
slow: *const StringToIdMap,
},
pub fn lookup(self: TypeMapper, string_id: StringInterner.StringId) []const u8 {
if (string_id == .empty) return "";
switch (self.data) {
.fast => |arr| return arr[@intFromEnum(string_id)],
.slow => |map| {
var it = map.iterator();
while (it.next()) |entry| {
if (entry.value_ptr.* == string_id) return entry.key_ptr.*;
}
unreachable;
},
}
}
pub fn deinit(self: TypeMapper, allocator: mem.Allocator) void {
switch (self.data) {
.slow => {},
.fast => |arr| allocator.free(arr),
}
}
};
const StringInterner = @This(); const StringInterner = @This();
string_table: StringToIdMap = .{}, pub const StringId = enum(u32) {
next_id: StringId = @enumFromInt(@intFromEnum(StringId.empty) + 1), empty = std.math.maxInt(u32),
_,
pub fn deinit(self: *StringInterner, allocator: mem.Allocator) void { pub fn lookup(id: StringId, comp: *const Compilation) []const u8 {
self.string_table.deinit(allocator); if (id == .empty) return "";
return comp.string_interner.table.keys()[@intFromEnum(id)];
}
pub fn lookupExtra(id: StringId, si: StringInterner) []const u8 {
if (id == .empty) return "";
return si.table.keys()[@intFromEnum(id)];
}
};
table: std.StringArrayHashMapUnmanaged(void) = .empty,
pub fn deinit(si: *StringInterner, allocator: mem.Allocator) void {
si.table.deinit(allocator);
si.* = undefined;
} }
pub fn intern(comp: *Compilation, str: []const u8) !StringId { /// Intern externally owned string.
return comp.string_interner.internExtra(comp.gpa, str); pub fn intern(si: *StringInterner, allocator: mem.Allocator, str: []const u8) !StringId {
}
pub fn internExtra(self: *StringInterner, allocator: mem.Allocator, str: []const u8) !StringId {
if (str.len == 0) return .empty; if (str.len == 0) return .empty;
const gop = try self.string_table.getOrPut(allocator, str); const gop = try si.table.getOrPut(allocator, str);
if (gop.found_existing) return gop.value_ptr.*; return @enumFromInt(gop.index);
defer self.next_id = @enumFromInt(@intFromEnum(self.next_id) + 1);
gop.value_ptr.* = self.next_id;
return self.next_id;
}
/// deinit for the returned TypeMapper is a no-op and does not need to be called
pub fn getSlowTypeMapper(self: *const StringInterner) TypeMapper {
return TypeMapper{ .data = .{ .slow = &self.string_table } };
}
/// Caller must call `deinit` on the returned TypeMapper
pub fn getFastTypeMapper(self: *const StringInterner, allocator: mem.Allocator) !TypeMapper {
var strings = try allocator.alloc([]const u8, @intFromEnum(self.next_id));
var it = self.string_table.iterator();
strings[0] = "";
while (it.next()) |entry| {
strings[@intFromEnum(entry.value_ptr.*)] = entry.key_ptr.*;
}
return TypeMapper{ .data = .{ .fast = strings } };
} }

View File

@ -2,22 +2,24 @@ const std = @import("std");
const mem = std.mem; const mem = std.mem;
const Allocator = mem.Allocator; const Allocator = mem.Allocator;
const assert = std.debug.assert; const assert = std.debug.assert;
const Parser = @import("Parser.zig");
const StringId = @import("StringInterner.zig").StringId;
const Tree = @import("Tree.zig"); const Tree = @import("Tree.zig");
const Token = Tree.Token; const Token = Tree.Token;
const TokenIndex = Tree.TokenIndex; const TokenIndex = Tree.TokenIndex;
const NodeIndex = Tree.NodeIndex; const Node = Tree.Node;
const Type = @import("Type.zig"); const QualType = @import("TypeStore.zig").QualType;
const Parser = @import("Parser.zig");
const Value = @import("Value.zig"); const Value = @import("Value.zig");
const StringId = @import("StringInterner.zig").StringId;
const SymbolStack = @This(); const SymbolStack = @This();
pub const Symbol = struct { pub const Symbol = struct {
name: StringId, name: StringId,
ty: Type, qt: QualType,
tok: TokenIndex, tok: TokenIndex,
node: NodeIndex = .none, node: Node.OptIndex = .null,
out_of_scope: bool = false,
kind: Kind, kind: Kind,
val: Value, val: Value,
}; };
@ -33,14 +35,14 @@ pub const Kind = enum {
constexpr, constexpr,
}; };
scopes: std.ArrayListUnmanaged(Scope) = .empty, scopes: std.ArrayListUnmanaged(Scope) = .{},
/// allocations from nested scopes are retained after popping; `active_len` is the number /// allocations from nested scopes are retained after popping; `active_len` is the number
/// of currently-active items in `scopes`. /// of currently-active items in `scopes`.
active_len: usize = 0, active_len: usize = 0,
const Scope = struct { const Scope = struct {
vars: std.AutoHashMapUnmanaged(StringId, Symbol) = .empty, vars: std.AutoHashMapUnmanaged(StringId, Symbol) = .{},
tags: std.AutoHashMapUnmanaged(StringId, Symbol) = .empty, tags: std.AutoHashMapUnmanaged(StringId, Symbol) = .{},
fn deinit(self: *Scope, allocator: Allocator) void { fn deinit(self: *Scope, allocator: Allocator) void {
self.vars.deinit(allocator); self.vars.deinit(allocator);
@ -82,17 +84,17 @@ pub fn findTypedef(s: *SymbolStack, p: *Parser, name: StringId, name_tok: TokenI
.typedef => return prev, .typedef => return prev,
.@"struct" => { .@"struct" => {
if (no_type_yet) return null; if (no_type_yet) return null;
try p.errStr(.must_use_struct, name_tok, p.tokSlice(name_tok)); try p.err(name_tok, .must_use_struct, .{p.tokSlice(name_tok)});
return prev; return prev;
}, },
.@"union" => { .@"union" => {
if (no_type_yet) return null; if (no_type_yet) return null;
try p.errStr(.must_use_union, name_tok, p.tokSlice(name_tok)); try p.err(name_tok, .must_use_union, .{p.tokSlice(name_tok)});
return prev; return prev;
}, },
.@"enum" => { .@"enum" => {
if (no_type_yet) return null; if (no_type_yet) return null;
try p.errStr(.must_use_enum, name_tok, p.tokSlice(name_tok)); try p.err(name_tok, .must_use_enum, .{p.tokSlice(name_tok)});
return prev; return prev;
}, },
else => return null, else => return null,
@ -120,8 +122,8 @@ pub fn findTag(
else => unreachable, else => unreachable,
} }
if (s.get(name, .tags) == null) return null; if (s.get(name, .tags) == null) return null;
try p.errStr(.wrong_tag, name_tok, p.tokSlice(name_tok)); try p.err(name_tok, .wrong_tag, .{p.tokSlice(name_tok)});
try p.errTok(.previous_definition, prev.tok); try p.err(prev.tok, .previous_definition, .{});
return null; return null;
} }
@ -171,23 +173,24 @@ pub fn defineTypedef(
s: *SymbolStack, s: *SymbolStack,
p: *Parser, p: *Parser,
name: StringId, name: StringId,
ty: Type, qt: QualType,
tok: TokenIndex, tok: TokenIndex,
node: NodeIndex, node: Node.Index,
) !void { ) !void {
if (s.get(name, .vars)) |prev| { if (s.get(name, .vars)) |prev| {
switch (prev.kind) { switch (prev.kind) {
.typedef => { .typedef => {
if (!prev.ty.is(.invalid)) { if (!prev.qt.isInvalid() and !qt.eqlQualified(prev.qt, p.comp)) {
if (!ty.eql(prev.ty, p.comp, true)) { if (qt.isInvalid()) return;
try p.errStr(.redefinition_of_typedef, tok, try p.typePairStrExtra(ty, " vs ", prev.ty)); const non_typedef_qt = qt.type(p.comp).typedef.base;
if (prev.tok != 0) try p.errTok(.previous_definition, prev.tok); const non_typedef_prev_qt = prev.qt.type(p.comp).typedef.base;
} try p.err(tok, .redefinition_of_typedef, .{ non_typedef_qt, non_typedef_prev_qt });
if (prev.tok != 0) try p.err(prev.tok, .previous_definition, .{});
} }
}, },
.enumeration, .decl, .def, .constexpr => { .enumeration, .decl, .def, .constexpr => {
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok)); try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
try p.errTok(.previous_definition, prev.tok); try p.err(prev.tok, .previous_definition, .{});
}, },
else => unreachable, else => unreachable,
} }
@ -196,13 +199,8 @@ pub fn defineTypedef(
.kind = .typedef, .kind = .typedef,
.name = name, .name = name,
.tok = tok, .tok = tok,
.ty = .{ .qt = qt,
.name = name, .node = .pack(node),
.specifier = ty.specifier,
.qual = ty.qual,
.data = ty.data,
},
.node = node,
.val = .{}, .val = .{},
}); });
} }
@ -211,31 +209,37 @@ pub fn defineSymbol(
s: *SymbolStack, s: *SymbolStack,
p: *Parser, p: *Parser,
name: StringId, name: StringId,
ty: Type, qt: QualType,
tok: TokenIndex, tok: TokenIndex,
node: NodeIndex, node: Node.Index,
val: Value, val: Value,
constexpr: bool, constexpr: bool,
) !void { ) !void {
if (s.get(name, .vars)) |prev| { if (s.get(name, .vars)) |prev| {
switch (prev.kind) { switch (prev.kind) {
.enumeration => { .enumeration => {
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
}, },
.decl => { .decl => {
if (!ty.eql(prev.ty, p.comp, true)) { if (!prev.qt.isInvalid() and !qt.eqlQualified(prev.qt, p.comp)) {
try p.errStr(.redefinition_incompatible, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition_incompatible, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
} else {
if (prev.node.unpack()) |some| p.setTentativeDeclDefinition(some, node);
} }
}, },
.def, .constexpr => { .def, .constexpr => if (!prev.qt.isInvalid()) {
try p.errStr(.redefinition, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
}, },
.typedef => { .typedef => {
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
}, },
else => unreachable, else => unreachable,
} }
@ -245,8 +249,8 @@ pub fn defineSymbol(
.kind = if (constexpr) .constexpr else .def, .kind = if (constexpr) .constexpr else .def,
.name = name, .name = name,
.tok = tok, .tok = tok,
.ty = ty, .qt = qt,
.node = node, .node = .pack(node),
.val = val, .val = val,
}); });
} }
@ -264,33 +268,40 @@ pub fn declareSymbol(
s: *SymbolStack, s: *SymbolStack,
p: *Parser, p: *Parser,
name: StringId, name: StringId,
ty: Type, qt: QualType,
tok: TokenIndex, tok: TokenIndex,
node: NodeIndex, node: Node.Index,
) !void { ) !void {
if (s.get(name, .vars)) |prev| { if (s.get(name, .vars)) |prev| {
switch (prev.kind) { switch (prev.kind) {
.enumeration => { .enumeration => {
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
}, },
.decl => { .decl => {
if (!ty.eql(prev.ty, p.comp, true)) { if (!prev.qt.isInvalid() and !qt.eqlQualified(prev.qt, p.comp)) {
try p.errStr(.redefinition_incompatible, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition_incompatible, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
} else {
if (prev.node.unpack()) |some| p.setTentativeDeclDefinition(node, some);
} }
}, },
.def, .constexpr => { .def, .constexpr => {
if (!ty.eql(prev.ty, p.comp, true)) { if (!prev.qt.isInvalid() and !qt.eqlQualified(prev.qt, p.comp)) {
try p.errStr(.redefinition_incompatible, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition_incompatible, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
} else { } else {
if (prev.node.unpack()) |some| p.setTentativeDeclDefinition(node, some);
return; return;
} }
}, },
.typedef => { .typedef => {
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
}, },
else => unreachable, else => unreachable,
} }
@ -299,34 +310,54 @@ pub fn declareSymbol(
.kind = .decl, .kind = .decl,
.name = name, .name = name,
.tok = tok, .tok = tok,
.ty = ty, .qt = qt,
.node = node, .node = .pack(node),
.val = .{}, .val = .{},
}); });
// Declare out of scope symbol for functions declared in functions.
if (s.active_len > 1 and !p.comp.langopts.standard.atLeast(.c23) and qt.is(p.comp, .func)) {
try s.scopes.items[0].vars.put(p.gpa, name, .{
.kind = .decl,
.name = name,
.tok = tok,
.qt = qt,
.node = .pack(node),
.val = .{},
.out_of_scope = true,
});
}
} }
pub fn defineParam(s: *SymbolStack, p: *Parser, name: StringId, ty: Type, tok: TokenIndex) !void { pub fn defineParam(
s: *SymbolStack,
p: *Parser,
name: StringId,
qt: QualType,
tok: TokenIndex,
node: ?Node.Index,
) !void {
if (s.get(name, .vars)) |prev| { if (s.get(name, .vars)) |prev| {
switch (prev.kind) { switch (prev.kind) {
.enumeration, .decl, .def, .constexpr => { .enumeration, .decl, .def, .constexpr => if (!prev.qt.isInvalid()) {
try p.errStr(.redefinition_of_parameter, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition_of_parameter, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
}, },
.typedef => { .typedef => {
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
}, },
else => unreachable, else => unreachable,
} }
} }
if (ty.is(.fp16) and !p.comp.hasHalfPrecisionFloatABI()) {
try p.errStr(.suggest_pointer_for_invalid_fp16, tok, "parameters");
}
try s.define(p.gpa, .{ try s.define(p.gpa, .{
.kind = .def, .kind = .def,
.name = name, .name = name,
.tok = tok, .tok = tok,
.ty = ty, .qt = qt,
.node = .packOpt(node),
.val = .{}, .val = .{},
}); });
} }
@ -342,20 +373,20 @@ pub fn defineTag(
switch (prev.kind) { switch (prev.kind) {
.@"enum" => { .@"enum" => {
if (kind == .keyword_enum) return prev; if (kind == .keyword_enum) return prev;
try p.errStr(.wrong_tag, tok, p.tokSlice(tok)); try p.err(tok, .wrong_tag, .{p.tokSlice(tok)});
try p.errTok(.previous_definition, prev.tok); try p.err(prev.tok, .previous_definition, .{});
return null; return null;
}, },
.@"struct" => { .@"struct" => {
if (kind == .keyword_struct) return prev; if (kind == .keyword_struct) return prev;
try p.errStr(.wrong_tag, tok, p.tokSlice(tok)); try p.err(tok, .wrong_tag, .{p.tokSlice(tok)});
try p.errTok(.previous_definition, prev.tok); try p.err(prev.tok, .previous_definition, .{});
return null; return null;
}, },
.@"union" => { .@"union" => {
if (kind == .keyword_union) return prev; if (kind == .keyword_union) return prev;
try p.errStr(.wrong_tag, tok, p.tokSlice(tok)); try p.err(tok, .wrong_tag, .{p.tokSlice(tok)});
try p.errTok(.previous_definition, prev.tok); try p.err(prev.tok, .previous_definition, .{});
return null; return null;
}, },
else => unreachable, else => unreachable,
@ -366,25 +397,29 @@ pub fn defineEnumeration(
s: *SymbolStack, s: *SymbolStack,
p: *Parser, p: *Parser,
name: StringId, name: StringId,
ty: Type, qt: QualType,
tok: TokenIndex, tok: TokenIndex,
val: Value, val: Value,
node: Node.Index,
) !void { ) !void {
if (s.get(name, .vars)) |prev| { if (s.get(name, .vars)) |prev| {
switch (prev.kind) { switch (prev.kind) {
.enumeration => { .enumeration => if (!prev.qt.isInvalid()) {
try p.errStr(.redefinition, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
return; return;
}, },
.decl, .def, .constexpr => { .decl, .def, .constexpr => {
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
return; return;
}, },
.typedef => { .typedef => {
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok)); if (qt.isInvalid()) return;
try p.errTok(.previous_definition, prev.tok); try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
try p.err(prev.tok, .previous_definition, .{});
}, },
else => unreachable, else => unreachable,
} }
@ -393,7 +428,8 @@ pub fn defineEnumeration(
.kind = .enumeration, .kind = .enumeration,
.name = name, .name = name,
.tok = tok, .tok = tok,
.ty = ty, .qt = qt,
.val = val, .val = val,
.node = .pack(node),
}); });
} }

View File

@ -1,8 +1,45 @@
const std = @import("std"); const std = @import("std");
const assert = std.debug.assert; const assert = std.debug.assert;
const Compilation = @import("Compilation.zig"); const Compilation = @import("Compilation.zig");
const Source = @import("Source.zig");
const LangOpts = @import("LangOpts.zig"); const LangOpts = @import("LangOpts.zig");
const Source = @import("Source.zig");
/// Value for valid escapes indicates how many characters to consume, not counting leading backslash
const UCNKind = enum(u8) {
/// Just `\`
none,
/// \u or \U followed by an insufficient number of hex digits
incomplete,
/// `\uxxxx`
hex4 = 5,
/// `\Uxxxxxxxx`
hex8 = 9,
/// In the classification phase we do not care if the escape represents a valid universal character name
/// e.g. \UFFFFFFFF is acceptable.
fn classify(buf: []const u8) UCNKind {
assert(buf[0] == '\\');
if (buf.len == 1) return .none;
switch (buf[1]) {
'u' => {
if (buf.len < 6) return .incomplete;
for (buf[2..6]) |c| {
if (!std.ascii.isHex(c)) return .incomplete;
}
return .hex4;
},
'U' => {
if (buf.len < 10) return .incomplete;
for (buf[2..10]) |c| {
if (!std.ascii.isHex(c)) return .incomplete;
}
return .hex8;
},
else => return .none,
}
}
};
pub const Token = struct { pub const Token = struct {
id: Id, id: Id,
@ -18,7 +55,7 @@ pub const Token = struct {
eof, eof,
/// identifier containing solely basic character set characters /// identifier containing solely basic character set characters
identifier, identifier,
/// identifier with at least one extended character /// identifier with at least one extended character or UCN escape sequence
extended_identifier, extended_identifier,
// string literals with prefixes // string literals with prefixes
@ -147,6 +184,10 @@ pub const Token = struct {
macro_counter, macro_counter,
/// Special token for implementing _Pragma /// Special token for implementing _Pragma
macro_param_pragma_operator, macro_param_pragma_operator,
/// Special token for implementing __identifier (MS extension)
macro_param_ms_identifier,
/// Special token for implementing __pragma (MS extension)
macro_param_ms_pragma,
/// Special identifier for implementing __func__ /// Special identifier for implementing __func__
macro_func, macro_func,
@ -154,6 +195,12 @@ pub const Token = struct {
macro_function, macro_function,
/// Special identifier for implementing __PRETTY_FUNCTION__ /// Special identifier for implementing __PRETTY_FUNCTION__
macro_pretty_func, macro_pretty_func,
/// Special identifier for implementing __DATE__
macro_date,
/// Special identifier for implementing __TIME__
macro_time,
/// Special identifier for implementing __TIMESTAMP__
macro_timestamp,
keyword_auto, keyword_auto,
keyword_auto_type, keyword_auto_type,
@ -290,13 +337,21 @@ pub const Token = struct {
keyword_thiscall2, keyword_thiscall2,
keyword_vectorcall, keyword_vectorcall,
keyword_vectorcall2, keyword_vectorcall2,
keyword_fastcall,
keyword_fastcall2,
keyword_regcall,
keyword_cdecl,
keyword_cdecl2,
keyword_forceinline,
keyword_forceinline2,
keyword_unaligned,
keyword_unaligned2,
// builtins that require special parsing // Type nullability
builtin_choose_expr, keyword_nonnull,
builtin_va_arg, keyword_nullable,
builtin_offsetof, keyword_nullable_result,
builtin_bitoffsetof, keyword_null_unspecified,
builtin_types_compatible_p,
/// Generated by #embed directive /// Generated by #embed directive
/// Decimal value with no prefix or suffix /// Decimal value with no prefix or suffix
@ -323,6 +378,12 @@ pub const Token = struct {
/// A comment token if asked to preserve comments. /// A comment token if asked to preserve comments.
comment, comment,
/// Incomplete universal character name
/// This happens if the source text contains `\u` or `\U` followed by an insufficient number of hex
/// digits. This token id represents just the backslash; the subsequent `u` or `U` will be treated as the
/// leading character of the following identifier token.
incomplete_ucn,
/// Return true if token is identifier or keyword. /// Return true if token is identifier or keyword.
pub fn isMacroIdentifier(id: Id) bool { pub fn isMacroIdentifier(id: Id) bool {
switch (id) { switch (id) {
@ -347,6 +408,9 @@ pub const Token = struct {
.macro_func, .macro_func,
.macro_function, .macro_function,
.macro_pretty_func, .macro_pretty_func,
.macro_date,
.macro_time,
.macro_timestamp,
.keyword_auto, .keyword_auto,
.keyword_auto_type, .keyword_auto_type,
.keyword_break, .keyword_break,
@ -409,11 +473,6 @@ pub const Token = struct {
.keyword_restrict2, .keyword_restrict2,
.keyword_alignof1, .keyword_alignof1,
.keyword_alignof2, .keyword_alignof2,
.builtin_choose_expr,
.builtin_va_arg,
.builtin_offsetof,
.builtin_bitoffsetof,
.builtin_types_compatible_p,
.keyword_attribute1, .keyword_attribute1,
.keyword_attribute2, .keyword_attribute2,
.keyword_extension, .keyword_extension,
@ -444,6 +503,19 @@ pub const Token = struct {
.keyword_thiscall2, .keyword_thiscall2,
.keyword_vectorcall, .keyword_vectorcall,
.keyword_vectorcall2, .keyword_vectorcall2,
.keyword_fastcall,
.keyword_fastcall2,
.keyword_regcall,
.keyword_cdecl,
.keyword_cdecl2,
.keyword_forceinline,
.keyword_forceinline2,
.keyword_unaligned,
.keyword_unaligned2,
.keyword_nonnull,
.keyword_nullable,
.keyword_nullable_result,
.keyword_null_unspecified,
.keyword_bit_int, .keyword_bit_int,
.keyword_c23_alignas, .keyword_c23_alignas,
.keyword_c23_alignof, .keyword_c23_alignof,
@ -547,11 +619,18 @@ pub const Token = struct {
.macro_file, .macro_file,
.macro_line, .macro_line,
.macro_counter, .macro_counter,
.macro_time,
.macro_date,
.macro_timestamp,
.macro_param_pragma_operator, .macro_param_pragma_operator,
.macro_param_ms_identifier,
.macro_param_ms_pragma,
.placemarker, .placemarker,
=> "", => "",
.macro_ws => " ", .macro_ws => " ",
.incomplete_ucn => "\\",
.macro_func => "__func__", .macro_func => "__func__",
.macro_function => "__FUNCTION__", .macro_function => "__FUNCTION__",
.macro_pretty_func => "__PRETTY_FUNCTION__", .macro_pretty_func => "__PRETTY_FUNCTION__",
@ -695,11 +774,6 @@ pub const Token = struct {
.keyword_alignof2 => "__alignof__", .keyword_alignof2 => "__alignof__",
.keyword_typeof1 => "__typeof", .keyword_typeof1 => "__typeof",
.keyword_typeof2 => "__typeof__", .keyword_typeof2 => "__typeof__",
.builtin_choose_expr => "__builtin_choose_expr",
.builtin_va_arg => "__builtin_va_arg",
.builtin_offsetof => "__builtin_offsetof",
.builtin_bitoffsetof => "__builtin_bitoffsetof",
.builtin_types_compatible_p => "__builtin_types_compatible_p",
.keyword_attribute1 => "__attribute", .keyword_attribute1 => "__attribute",
.keyword_attribute2 => "__attribute__", .keyword_attribute2 => "__attribute__",
.keyword_extension => "__extension__", .keyword_extension => "__extension__",
@ -730,6 +804,19 @@ pub const Token = struct {
.keyword_thiscall2 => "_thiscall", .keyword_thiscall2 => "_thiscall",
.keyword_vectorcall => "__vectorcall", .keyword_vectorcall => "__vectorcall",
.keyword_vectorcall2 => "_vectorcall", .keyword_vectorcall2 => "_vectorcall",
.keyword_fastcall => "__fastcall",
.keyword_fastcall2 => "_fastcall",
.keyword_regcall => "__regcall",
.keyword_cdecl => "__cdecl",
.keyword_cdecl2 => "_cdecl",
.keyword_forceinline => "__forceinline",
.keyword_forceinline2 => "_forceinline",
.keyword_unaligned => "__unaligned",
.keyword_unaligned2 => "_unaligned",
.keyword_nonnull => "_Nonnull",
.keyword_nullable => "_Nullable",
.keyword_nullable_result => "_Nullable_result",
.keyword_null_unspecified => "_Null_unspecified",
}; };
} }
@ -742,11 +829,6 @@ pub const Token = struct {
.macro_func, .macro_func,
.macro_function, .macro_function,
.macro_pretty_func, .macro_pretty_func,
.builtin_choose_expr,
.builtin_va_arg,
.builtin_offsetof,
.builtin_bitoffsetof,
.builtin_types_compatible_p,
=> "an identifier", => "an identifier",
.string_literal, .string_literal,
.string_literal_utf_16, .string_literal_utf_16,
@ -763,7 +845,7 @@ pub const Token = struct {
.unterminated_char_literal, .unterminated_char_literal,
.empty_char_literal, .empty_char_literal,
=> "a character literal", => "a character literal",
.pp_num, .embed_byte => "A number", .pp_num, .embed_byte => "a number",
else => id.lexeme().?, else => id.lexeme().?,
}; };
} }
@ -871,6 +953,12 @@ pub const Token = struct {
.keyword_stdcall2, .keyword_stdcall2,
.keyword_thiscall2, .keyword_thiscall2,
.keyword_vectorcall2, .keyword_vectorcall2,
.keyword_fastcall2,
.keyword_cdecl2,
.keyword_forceinline,
.keyword_forceinline2,
.keyword_unaligned,
.keyword_unaligned2,
=> if (langopts.ms_extensions) kw else .identifier, => if (langopts.ms_extensions) kw else .identifier,
else => kw, else => kw,
}; };
@ -1013,13 +1101,21 @@ pub const Token = struct {
.{ "_thiscall", .keyword_thiscall2 }, .{ "_thiscall", .keyword_thiscall2 },
.{ "__vectorcall", .keyword_vectorcall }, .{ "__vectorcall", .keyword_vectorcall },
.{ "_vectorcall", .keyword_vectorcall2 }, .{ "_vectorcall", .keyword_vectorcall2 },
.{ "__fastcall", .keyword_fastcall },
.{ "_fastcall", .keyword_fastcall2 },
.{ "_regcall", .keyword_regcall },
.{ "__cdecl", .keyword_cdecl },
.{ "_cdecl", .keyword_cdecl2 },
.{ "__forceinline", .keyword_forceinline },
.{ "_forceinline", .keyword_forceinline2 },
.{ "__unaligned", .keyword_unaligned },
.{ "_unaligned", .keyword_unaligned2 },
// builtins that require special parsing // Type nullability
.{ "__builtin_choose_expr", .builtin_choose_expr }, .{ "_Nonnull", .keyword_nonnull },
.{ "__builtin_va_arg", .builtin_va_arg }, .{ "_Nullable", .keyword_nullable },
.{ "__builtin_offsetof", .builtin_offsetof }, .{ "_Nullable_result", .keyword_nullable_result },
.{ "__builtin_bitoffsetof", .builtin_bitoffsetof }, .{ "_Null_unspecified", .keyword_null_unspecified },
.{ "__builtin_types_compatible_p", .builtin_types_compatible_p },
}); });
}; };
@ -1099,6 +1195,26 @@ pub fn next(self: *Tokenizer) Token {
'u' => state = .u, 'u' => state = .u,
'U' => state = .U, 'U' => state = .U,
'L' => state = .L, 'L' => state = .L,
'\\' => {
const ucn_kind = UCNKind.classify(self.buf[self.index..]);
switch (ucn_kind) {
.none => {
self.index += 1;
id = .invalid;
break;
},
.incomplete => {
self.index += 1;
id = .incomplete_ucn;
break;
},
.hex4, .hex8 => {
self.index += @intFromEnum(ucn_kind);
id = .extended_identifier;
state = .extended_identifier;
},
}
},
'a'...'t', 'v'...'z', 'A'...'K', 'M'...'T', 'V'...'Z', '_' => state = .identifier, 'a'...'t', 'v'...'z', 'A'...'K', 'M'...'T', 'V'...'Z', '_' => state = .identifier,
'=' => state = .equal, '=' => state = .equal,
'!' => state = .bang, '!' => state = .bang,
@ -1324,6 +1440,20 @@ pub fn next(self: *Tokenizer) Token {
break; break;
}, },
0x80...0xFF => state = .extended_identifier, 0x80...0xFF => state = .extended_identifier,
'\\' => {
const ucn_kind = UCNKind.classify(self.buf[self.index..]);
switch (ucn_kind) {
.none, .incomplete => {
id = if (state == .identifier) Token.getTokenId(self.langopts, self.buf[start..self.index]) else .extended_identifier;
break;
},
.hex4, .hex8 => {
state = .extended_identifier;
self.index += @intFromEnum(ucn_kind);
},
}
},
else => { else => {
id = if (state == .identifier) Token.getTokenId(self.langopts, self.buf[start..self.index]) else .extended_identifier; id = if (state == .identifier) Token.getTokenId(self.langopts, self.buf[start..self.index]) else .extended_identifier;
break; break;
@ -1731,7 +1861,10 @@ pub fn next(self: *Tokenizer) Token {
} }
} else if (self.index == self.buf.len) { } else if (self.index == self.buf.len) {
switch (state) { switch (state) {
.start, .line_comment => {}, .start => {},
.line_comment => if (self.langopts.preserve_comments) {
id = .comment;
},
.u, .u8, .U, .L, .identifier => id = Token.getTokenId(self.langopts, self.buf[start..self.index]), .u, .u8, .U, .L, .identifier => id = Token.getTokenId(self.langopts, self.buf[start..self.index]),
.extended_identifier => id = .extended_identifier, .extended_identifier => id = .extended_identifier,
@ -2105,6 +2238,15 @@ test "comments" {
.hash, .hash,
.identifier, .identifier,
}); });
try expectTokensExtra(
\\//foo
\\void
\\//bar
, &.{
.comment, .nl,
.keyword_void, .nl,
.comment,
}, .{ .preserve_comments = true });
} }
test "extended identifiers" { test "extended identifiers" {
@ -2147,36 +2289,76 @@ test "C23 keywords" {
.keyword_c23_thread_local, .keyword_c23_thread_local,
.keyword_nullptr, .keyword_nullptr,
.keyword_typeof_unqual, .keyword_typeof_unqual,
}, .c23); }, .{ .standard = .c23 });
}
test "Universal character names" {
try expectTokens("\\", &.{.invalid});
try expectTokens("\\g", &.{ .invalid, .identifier });
try expectTokens("\\u", &.{ .incomplete_ucn, .identifier });
try expectTokens("\\ua", &.{ .incomplete_ucn, .identifier });
try expectTokens("\\U9", &.{ .incomplete_ucn, .identifier });
try expectTokens("\\ug", &.{ .incomplete_ucn, .identifier });
try expectTokens("\\uag", &.{ .incomplete_ucn, .identifier });
try expectTokens("\\ ", &.{ .invalid, .eof });
try expectTokens("\\g ", &.{ .invalid, .identifier, .eof });
try expectTokens("\\u ", &.{ .incomplete_ucn, .identifier, .eof });
try expectTokens("\\ua ", &.{ .incomplete_ucn, .identifier, .eof });
try expectTokens("\\U9 ", &.{ .incomplete_ucn, .identifier, .eof });
try expectTokens("\\ug ", &.{ .incomplete_ucn, .identifier, .eof });
try expectTokens("\\uag ", &.{ .incomplete_ucn, .identifier, .eof });
try expectTokens("a\\", &.{ .identifier, .invalid });
try expectTokens("a\\g", &.{ .identifier, .invalid, .identifier });
try expectTokens("a\\u", &.{ .identifier, .incomplete_ucn, .identifier });
try expectTokens("a\\ua", &.{ .identifier, .incomplete_ucn, .identifier });
try expectTokens("a\\U9", &.{ .identifier, .incomplete_ucn, .identifier });
try expectTokens("a\\ug", &.{ .identifier, .incomplete_ucn, .identifier });
try expectTokens("a\\uag", &.{ .identifier, .incomplete_ucn, .identifier });
try expectTokens("a\\ ", &.{ .identifier, .invalid, .eof });
try expectTokens("a\\g ", &.{ .identifier, .invalid, .identifier, .eof });
try expectTokens("a\\u ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
try expectTokens("a\\ua ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
try expectTokens("a\\U9 ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
try expectTokens("a\\ug ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
try expectTokens("a\\uag ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
} }
test "Tokenizer fuzz test" { test "Tokenizer fuzz test" {
var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); const Context = struct {
defer comp.deinit(); fn testOne(_: @This(), input_bytes: []const u8) anyerror!void {
var arena: std.heap.ArenaAllocator = .init(std.testing.allocator);
defer arena.deinit();
var comp = Compilation.init(std.testing.allocator, arena.allocator(), undefined, std.fs.cwd());
defer comp.deinit();
const input_bytes = std.testing.fuzzInput(.{}); const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes);
if (input_bytes.len == 0) return;
const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes); var tokenizer: Tokenizer = .{
.buf = source.buf,
var tokenizer: Tokenizer = .{ .source = source.id,
.buf = source.buf, .langopts = comp.langopts,
.source = source.id, };
.langopts = comp.langopts, while (true) {
const prev_index = tokenizer.index;
const tok = tokenizer.next();
if (tok.id == .eof) break;
try std.testing.expect(prev_index < tokenizer.index); // ensure that the tokenizer always makes progress
}
}
}; };
while (true) { return std.testing.fuzz(Context{}, Context.testOne, .{});
const prev_index = tokenizer.index;
const tok = tokenizer.next();
if (tok.id == .eof) break;
try std.testing.expect(prev_index < tokenizer.index); // ensure that the tokenizer always makes progress
}
} }
fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, standard: ?LangOpts.Standard) !void { fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, langopts: ?LangOpts) !void {
var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); var arena: std.heap.ArenaAllocator = .init(std.testing.allocator);
defer arena.deinit();
var comp = Compilation.init(std.testing.allocator, arena.allocator(), undefined, std.fs.cwd());
defer comp.deinit(); defer comp.deinit();
if (standard) |provided| { if (langopts) |provided| {
comp.langopts.standard = provided; comp.langopts = provided;
} }
const source = try comp.addSourceFromBuffer("path", contents); const source = try comp.addSourceFromBuffer("path", contents);
var tokenizer = Tokenizer{ var tokenizer = Tokenizer{

View File

@ -1,12 +1,14 @@
const std = @import("std"); const std = @import("std");
const Driver = @import("Driver.zig");
const Compilation = @import("Compilation.zig");
const mem = std.mem; const mem = std.mem;
const system_defaults = @import("system_defaults"); const system_defaults = @import("system_defaults");
const Compilation = @import("Compilation.zig");
const Driver = @import("Driver.zig");
const Filesystem = @import("Driver/Filesystem.zig").Filesystem;
const Multilib = @import("Driver/Multilib.zig");
const target_util = @import("target.zig"); const target_util = @import("target.zig");
const Linux = @import("toolchains/Linux.zig"); const Linux = @import("toolchains/Linux.zig");
const Multilib = @import("Driver/Multilib.zig");
const Filesystem = @import("Driver/Filesystem.zig").Filesystem;
pub const PathList = std.ArrayListUnmanaged([]const u8); pub const PathList = std.ArrayListUnmanaged([]const u8);
@ -48,9 +50,8 @@ const Inner = union(enum) {
const Toolchain = @This(); const Toolchain = @This();
filesystem: Filesystem = .{ .real = {} }, filesystem: Filesystem,
driver: *Driver, driver: *Driver,
arena: mem.Allocator,
/// The list of toolchain specific path prefixes to search for libraries. /// The list of toolchain specific path prefixes to search for libraries.
library_paths: PathList = .{}, library_paths: PathList = .{},
@ -83,7 +84,8 @@ pub fn discover(tc: *Toolchain) !void {
const target = tc.getTarget(); const target = tc.getTarget();
tc.inner = switch (target.os.tag) { tc.inner = switch (target.os.tag) {
.linux => if (target.cpu.arch == .hexagon) .linux,
=> if (target.cpu.arch == .hexagon)
.{ .unknown = {} } // TODO .{ .unknown = {} } // TODO
else if (target.cpu.arch.isMIPS()) else if (target.cpu.arch.isMIPS())
.{ .unknown = {} } // TODO .{ .unknown = {} } // TODO
@ -111,6 +113,11 @@ pub fn deinit(tc: *Toolchain) void {
tc.program_paths.deinit(gpa); tc.program_paths.deinit(gpa);
} }
/// Write assembler path to `buf` and return a slice of it
pub fn getAssemblerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
return tc.getProgramPath("as", buf);
}
/// Write linker path to `buf` and return a slice of it /// Write linker path to `buf` and return a slice of it
pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 { pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
// --ld-path= takes precedence over -fuse-ld= and specifies the executable // --ld-path= takes precedence over -fuse-ld= and specifies the executable
@ -149,7 +156,12 @@ pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
// to a relative path is surprising. This is more complex due to priorities // to a relative path is surprising. This is more complex due to priorities
// among -B, COMPILER_PATH and PATH. --ld-path= should be used instead. // among -B, COMPILER_PATH and PATH. --ld-path= should be used instead.
if (mem.indexOfScalar(u8, use_linker, '/') != null) { if (mem.indexOfScalar(u8, use_linker, '/') != null) {
try tc.driver.comp.addDiagnostic(.{ .tag = .fuse_ld_path }, &.{}); try tc.driver.comp.diagnostics.add(.{
.text = "'-fuse-ld=' taking a path is deprecated; use '--ld-path=' instead",
.kind = .off,
.opt = .@"fuse-ld-path",
.location = null,
});
} }
if (std.fs.path.isAbsolute(use_linker)) { if (std.fs.path.isAbsolute(use_linker)) {
@ -205,7 +217,7 @@ pub fn addFilePathLibArgs(tc: *const Toolchain, argv: *std.array_list.Managed([]
for (tc.file_paths.items) |path| { for (tc.file_paths.items) |path| {
bytes_needed += path.len + 2; // +2 for `-L` bytes_needed += path.len + 2; // +2 for `-L`
} }
var bytes = try tc.arena.alloc(u8, bytes_needed); var bytes = try tc.driver.comp.arena.alloc(u8, bytes_needed);
var index: usize = 0; var index: usize = 0;
for (tc.file_paths.items) |path| { for (tc.file_paths.items) |path| {
@memcpy(bytes[index..][0..2], "-L"); @memcpy(bytes[index..][0..2], "-L");
@ -252,6 +264,7 @@ pub fn getFilePath(tc: *const Toolchain, name: []const u8) ![]const u8 {
var path_buf: [std.fs.max_path_bytes]u8 = undefined; var path_buf: [std.fs.max_path_bytes]u8 = undefined;
var fib = std.heap.FixedBufferAllocator.init(&path_buf); var fib = std.heap.FixedBufferAllocator.init(&path_buf);
const allocator = fib.allocator(); const allocator = fib.allocator();
const arena = tc.driver.comp.arena;
const sysroot = tc.getSysroot(); const sysroot = tc.getSysroot();
@ -260,15 +273,15 @@ pub fn getFilePath(tc: *const Toolchain, name: []const u8) ![]const u8 {
const aro_dir = std.fs.path.dirname(tc.driver.aro_name) orelse ""; const aro_dir = std.fs.path.dirname(tc.driver.aro_name) orelse "";
const candidate = try std.fs.path.join(allocator, &.{ aro_dir, "..", name }); const candidate = try std.fs.path.join(allocator, &.{ aro_dir, "..", name });
if (tc.filesystem.exists(candidate)) { if (tc.filesystem.exists(candidate)) {
return tc.arena.dupe(u8, candidate); return arena.dupe(u8, candidate);
} }
if (tc.searchPaths(&fib, sysroot, tc.library_paths.items, name)) |path| { if (tc.searchPaths(&fib, sysroot, tc.library_paths.items, name)) |path| {
return tc.arena.dupe(u8, path); return arena.dupe(u8, path);
} }
if (tc.searchPaths(&fib, sysroot, tc.file_paths.items, name)) |path| { if (tc.searchPaths(&fib, sysroot, tc.file_paths.items, name)) |path| {
return try tc.arena.dupe(u8, path); return try arena.dupe(u8, path);
} }
return name; return name;
@ -299,7 +312,7 @@ const PathKind = enum {
program, program,
}; };
/// Join `components` into a path. If the path exists, dupe it into the toolchain arena and /// Join `components` into a path. If the path exists, dupe it into the Compilation arena and
/// add it to the specified path list. /// add it to the specified path list.
pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind: PathKind) !void { pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind: PathKind) !void {
var path_buf: [std.fs.max_path_bytes]u8 = undefined; var path_buf: [std.fs.max_path_bytes]u8 = undefined;
@ -308,7 +321,7 @@ pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind
const candidate = try std.fs.path.join(fib.allocator(), components); const candidate = try std.fs.path.join(fib.allocator(), components);
if (tc.filesystem.exists(candidate)) { if (tc.filesystem.exists(candidate)) {
const duped = try tc.arena.dupe(u8, candidate); const duped = try tc.driver.comp.arena.dupe(u8, candidate);
const dest = switch (dest_kind) { const dest = switch (dest_kind) {
.library => &tc.library_paths, .library => &tc.library_paths,
.file => &tc.file_paths, .file => &tc.file_paths,
@ -318,10 +331,10 @@ pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind
} }
} }
/// Join `components` using the toolchain arena and add the resulting path to `dest_kind`. Does not check /// Join `components` using the Compilation arena and add the resulting path to `dest_kind`. Does not check
/// whether the path actually exists /// whether the path actually exists
pub fn addPathFromComponents(tc: *Toolchain, components: []const []const u8, dest_kind: PathKind) !void { pub fn addPathFromComponents(tc: *Toolchain, components: []const []const u8, dest_kind: PathKind) !void {
const full_path = try std.fs.path.join(tc.arena, components); const full_path = try std.fs.path.join(tc.driver.comp.arena, components);
const dest = switch (dest_kind) { const dest = switch (dest_kind) {
.library => &tc.library_paths, .library => &tc.library_paths,
.file => &tc.file_paths, .file => &tc.file_paths,
@ -331,7 +344,7 @@ pub fn addPathFromComponents(tc: *Toolchain, components: []const []const u8, des
} }
/// Add linker args to `argv`. Does not add path to linker executable as first item; that must be handled separately /// Add linker args to `argv`. Does not add path to linker executable as first item; that must be handled separately
/// Items added to `argv` will be string literals or owned by `tc.arena` so they must not be individually freed /// Items added to `argv` will be string literals or owned by `tc.driver.comp.arena` so they must not be individually freed
pub fn buildLinkerArgs(tc: *Toolchain, argv: *std.array_list.Managed([]const u8)) !void { pub fn buildLinkerArgs(tc: *Toolchain, argv: *std.array_list.Managed([]const u8)) !void {
return switch (tc.inner) { return switch (tc.inner) {
.uninitialized => unreachable, .uninitialized => unreachable,
@ -396,7 +409,7 @@ fn getUnwindLibKind(tc: *const Toolchain) !UnwindLibKind {
return .libgcc; return .libgcc;
} else if (mem.eql(u8, libname, "libunwind")) { } else if (mem.eql(u8, libname, "libunwind")) {
if (tc.getRuntimeLibKind() == .libgcc) { if (tc.getRuntimeLibKind() == .libgcc) {
try tc.driver.comp.addDiagnostic(.{ .tag = .incompatible_unwindlib }, &.{}); try tc.driver.err("--rtlib=libgcc requires --unwindlib=libgcc", .{});
} }
return .compiler_rt; return .compiler_rt;
} else { } else {
@ -472,7 +485,7 @@ pub fn addRuntimeLibs(tc: *const Toolchain, argv: *std.array_list.Managed([]cons
if (target_util.isKnownWindowsMSVCEnvironment(target)) { if (target_util.isKnownWindowsMSVCEnvironment(target)) {
const rtlib_str = tc.driver.rtlib orelse system_defaults.rtlib; const rtlib_str = tc.driver.rtlib orelse system_defaults.rtlib;
if (!mem.eql(u8, rtlib_str, "platform")) { if (!mem.eql(u8, rtlib_str, "platform")) {
try tc.driver.comp.addDiagnostic(.{ .tag = .unsupported_rtlib_gcc, .extra = .{ .str = "MSVC" } }, &.{}); try tc.driver.err("unsupported runtime library 'libgcc' for platform 'MSVC'", .{});
} }
} else { } else {
try tc.addLibGCC(argv); try tc.addLibGCC(argv);
@ -494,7 +507,7 @@ pub fn defineSystemIncludes(tc: *Toolchain) !void {
const comp = tc.driver.comp; const comp = tc.driver.comp;
if (!tc.driver.nobuiltininc) { if (!tc.driver.nobuiltininc) {
try comp.addBuiltinIncludeDir(tc.driver.aro_name); try comp.addBuiltinIncludeDir(tc.driver.aro_name, tc.driver.resource_dir);
} }
if (!tc.driver.nostdlibinc) { if (!tc.driver.nostdlibinc) {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

3008
lib/compiler/aro/aro/TypeStore.zig vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -2,14 +2,14 @@ const std = @import("std");
const assert = std.debug.assert; const assert = std.debug.assert;
const BigIntConst = std.math.big.int.Const; const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable; const BigIntMutable = std.math.big.int.Mutable;
const backend = @import("../backend.zig");
const Interner = backend.Interner; const Interner = @import("../backend.zig").Interner;
const BigIntSpace = Interner.Tag.Int.BigIntSpace; const BigIntSpace = Interner.Tag.Int.BigIntSpace;
const Compilation = @import("Compilation.zig");
const Type = @import("Type.zig");
const target_util = @import("target.zig");
const annex_g = @import("annex_g.zig"); const annex_g = @import("annex_g.zig");
const Writer = std.Io.Writer; const Compilation = @import("Compilation.zig");
const target_util = @import("target.zig");
const QualType = @import("TypeStore.zig").QualType;
const Value = @This(); const Value = @This();
@ -33,11 +33,19 @@ pub fn int(i: anytype, comp: *Compilation) !Value {
} }
} }
pub fn pointer(r: Interner.Key.Pointer, comp: *Compilation) !Value {
return intern(comp, .{ .pointer = r });
}
pub fn ref(v: Value) Interner.Ref { pub fn ref(v: Value) Interner.Ref {
std.debug.assert(v.opt_ref != .none); std.debug.assert(v.opt_ref != .none);
return @enumFromInt(@intFromEnum(v.opt_ref)); return @enumFromInt(@intFromEnum(v.opt_ref));
} }
pub fn fromRef(r: Interner.Ref) Value {
return .{ .opt_ref = @enumFromInt(@intFromEnum(r)) };
}
pub fn is(v: Value, tag: std.meta.Tag(Interner.Key), comp: *const Compilation) bool { pub fn is(v: Value, tag: std.meta.Tag(Interner.Key), comp: *const Compilation) bool {
if (v.opt_ref == .none) return false; if (v.opt_ref == .none) return false;
return comp.interner.get(v.ref()) == tag; return comp.interner.get(v.ref()) == tag;
@ -68,7 +76,11 @@ test "minUnsignedBits" {
} }
}; };
var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); var arena_state: std.heap.ArenaAllocator = .init(std.testing.allocator);
defer arena_state.deinit();
const arena = arena_state.allocator();
var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
defer comp.deinit(); defer comp.deinit();
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" });
comp.target = try std.zig.system.resolveTargetQuery(target_query); comp.target = try std.zig.system.resolveTargetQuery(target_query);
@ -103,7 +115,11 @@ test "minSignedBits" {
} }
}; };
var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); var arena_state: std.heap.ArenaAllocator = .init(std.testing.allocator);
defer arena_state.deinit();
const arena = arena_state.allocator();
var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
defer comp.deinit(); defer comp.deinit();
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" });
comp.target = try std.zig.system.resolveTargetQuery(target_query); comp.target = try std.zig.system.resolveTargetQuery(target_query);
@ -133,24 +149,27 @@ pub const FloatToIntChangeKind = enum {
/// Converts the stored value from a float to an integer. /// Converts the stored value from a float to an integer.
/// `.none` value remains unchanged. /// `.none` value remains unchanged.
pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChangeKind { pub fn floatToInt(v: *Value, dest_ty: QualType, comp: *Compilation) !FloatToIntChangeKind {
if (v.opt_ref == .none) return .none; if (v.opt_ref == .none) return .none;
const float_val = v.toFloat(f128, comp); const float_val = v.toFloat(f128, comp);
const was_zero = float_val == 0; const was_zero = float_val == 0;
if (dest_ty.is(.bool)) { if (dest_ty.is(comp, .bool)) {
const was_one = float_val == 1.0; const was_one = float_val == 1.0;
v.* = fromBool(!was_zero); v.* = fromBool(!was_zero);
if (was_zero or was_one) return .none; if (was_zero or was_one) return .none;
return .value_changed; return .value_changed;
} else if (dest_ty.isUnsignedInt(comp) and float_val < 0) { } else if (dest_ty.signedness(comp) == .unsigned and float_val < 0) {
v.* = zero; v.* = zero;
return .out_of_range; return .out_of_range;
} else if (!std.math.isFinite(float_val)) {
v.* = .{};
return .overflow;
} }
const signedness = dest_ty.signedness(comp); const signedness = dest_ty.signedness(comp);
const bits: usize = @intCast(dest_ty.bitSizeof(comp).?); const bits: usize = @intCast(dest_ty.bitSizeof(comp));
var big_int: std.math.big.int.Mutable = .{ var big_int: std.math.big.int.Mutable = .{
.limbs = try comp.gpa.alloc(std.math.big.Limb, @max( .limbs = try comp.gpa.alloc(std.math.big.Limb, @max(
@ -160,6 +179,7 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang
.len = undefined, .len = undefined,
.positive = undefined, .positive = undefined,
}; };
defer comp.gpa.free(big_int.limbs);
const had_fraction = switch (big_int.setFloat(float_val, .trunc)) { const had_fraction = switch (big_int.setFloat(float_val, .trunc)) {
.inexact => true, .inexact => true,
.exact => false, .exact => false,
@ -177,11 +197,11 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang
/// Converts the stored value from an integer to a float. /// Converts the stored value from an integer to a float.
/// `.none` value remains unchanged. /// `.none` value remains unchanged.
pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void { pub fn intToFloat(v: *Value, dest_ty: QualType, comp: *Compilation) !void {
if (v.opt_ref == .none) return; if (v.opt_ref == .none) return;
if (dest_ty.isComplex()) { if (dest_ty.is(comp, .complex)) {
const bits = dest_ty.bitSizeof(comp).?; const bits = dest_ty.bitSizeof(comp);
const cf: Interner.Key.Complex = switch (bits) { const cf: Interner.Key.Complex = switch (bits) {
32 => .{ .cf16 = .{ v.toFloat(f16, comp), 0 } }, 32 => .{ .cf16 = .{ v.toFloat(f16, comp), 0 } },
64 => .{ .cf32 = .{ v.toFloat(f32, comp), 0 } }, 64 => .{ .cf32 = .{ v.toFloat(f32, comp), 0 } },
@ -193,7 +213,7 @@ pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void {
v.* = try intern(comp, .{ .complex = cf }); v.* = try intern(comp, .{ .complex = cf });
return; return;
} }
const bits = dest_ty.bitSizeof(comp).?; const bits = dest_ty.bitSizeof(comp);
return switch (comp.interner.get(v.ref()).int) { return switch (comp.interner.get(v.ref()).int) {
inline .u64, .i64 => |data| { inline .u64, .i64 => |data| {
const f: Interner.Key.Float = switch (bits) { const f: Interner.Key.Float = switch (bits) {
@ -232,14 +252,16 @@ pub const IntCastChangeKind = enum {
/// Truncates or extends bits based on type. /// Truncates or extends bits based on type.
/// `.none` value remains unchanged. /// `.none` value remains unchanged.
pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !IntCastChangeKind { pub fn intCast(v: *Value, dest_ty: QualType, comp: *Compilation) !IntCastChangeKind {
if (v.opt_ref == .none) return .none; if (v.opt_ref == .none) return .none;
const key = comp.interner.get(v.ref());
if (key == .pointer or key == .bytes) return .none;
const dest_bits: usize = @intCast(dest_ty.bitSizeof(comp).?); const dest_bits: usize = @intCast(dest_ty.bitSizeof(comp));
const dest_signed = dest_ty.signedness(comp) == .signed; const dest_signed = dest_ty.signedness(comp) == .signed;
var space: BigIntSpace = undefined; var space: BigIntSpace = undefined;
const big = v.toBigInt(&space, comp); const big = key.toBigInt(&space);
const value_bits = big.bitCountTwosComp(); const value_bits = big.bitCountTwosComp();
// if big is negative, then is signed. // if big is negative, then is signed.
@ -269,10 +291,10 @@ pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !IntCastChangeKind
/// Converts the stored value to a float of the specified type /// Converts the stored value to a float of the specified type
/// `.none` value remains unchanged. /// `.none` value remains unchanged.
pub fn floatCast(v: *Value, dest_ty: Type, comp: *Compilation) !void { pub fn floatCast(v: *Value, dest_ty: QualType, comp: *Compilation) !void {
if (v.opt_ref == .none) return; if (v.opt_ref == .none) return;
const bits = dest_ty.bitSizeof(comp).?; const bits = dest_ty.bitSizeof(comp);
if (dest_ty.isComplex()) { if (dest_ty.is(comp, .complex)) {
const cf: Interner.Key.Complex = switch (bits) { const cf: Interner.Key.Complex = switch (bits) {
32 => .{ .cf16 = .{ v.toFloat(f16, comp), v.imag(f16, comp) } }, 32 => .{ .cf16 = .{ v.toFloat(f16, comp), v.imag(f16, comp) } },
64 => .{ .cf32 = .{ v.toFloat(f32, comp), v.imag(f32, comp) } }, 64 => .{ .cf32 = .{ v.toFloat(f32, comp), v.imag(f32, comp) } },
@ -370,11 +392,8 @@ fn bigIntToFloat(limbs: []const std.math.big.Limb, positive: bool) f128 {
} }
} }
pub fn toBigInt(val: Value, space: *BigIntSpace, comp: *const Compilation) BigIntConst { fn toBigInt(val: Value, space: *BigIntSpace, comp: *const Compilation) BigIntConst {
return switch (comp.interner.get(val.ref()).int) { return comp.interner.get(val.ref()).toBigInt(space);
inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(),
.big_int => |b| b,
};
} }
pub fn isZero(v: Value, comp: *const Compilation) bool { pub fn isZero(v: Value, comp: *const Compilation) bool {
@ -398,6 +417,7 @@ pub fn isZero(v: Value, comp: *const Compilation) bool {
inline else => |data| return data[0] == 0.0 and data[1] == 0.0, inline else => |data| return data[0] == 0.0 and data[1] == 0.0,
}, },
.bytes => return false, .bytes => return false,
.pointer => return false,
else => unreachable, else => unreachable,
} }
} }
@ -461,12 +481,19 @@ pub fn toBool(v: Value, comp: *const Compilation) bool {
pub fn toInt(v: Value, comptime T: type, comp: *const Compilation) ?T { pub fn toInt(v: Value, comptime T: type, comp: *const Compilation) ?T {
if (v.opt_ref == .none) return null; if (v.opt_ref == .none) return null;
if (comp.interner.get(v.ref()) != .int) return null; const key = comp.interner.get(v.ref());
if (key != .int) return null;
var space: BigIntSpace = undefined; var space: BigIntSpace = undefined;
const big_int = v.toBigInt(&space, comp); const big_int = key.toBigInt(&space);
return big_int.toInt(T) catch null; return big_int.toInt(T) catch null;
} }
pub fn toBytes(v: Value, comp: *const Compilation) []const u8 {
assert(v.opt_ref != .none);
const key = comp.interner.get(v.ref());
return key.bytes;
}
const ComplexOp = enum { const ComplexOp = enum {
add, add,
sub, sub,
@ -492,10 +519,11 @@ fn complexAddSub(lhs: Value, rhs: Value, comptime T: type, op: ComplexOp, comp:
}; };
} }
pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { pub fn add(res: *Value, lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !bool {
const bits: usize = @intCast(ty.bitSizeof(comp).?); const bits: usize = @intCast(qt.bitSizeof(comp));
if (ty.isFloat()) { const scalar_kind = qt.scalarKind(comp);
if (ty.isComplex()) { if (scalar_kind.isFloat()) {
if (scalar_kind == .complex_float) {
res.* = switch (bits) { res.* = switch (bits) {
32 => try complexAddSub(lhs, rhs, f16, .add, comp), 32 => try complexAddSub(lhs, rhs, f16, .add, comp),
64 => try complexAddSub(lhs, rhs, f32, .add, comp), 64 => try complexAddSub(lhs, rhs, f32, .add, comp),
@ -516,29 +544,60 @@ pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
}; };
res.* = try intern(comp, .{ .float = f }); res.* = try intern(comp, .{ .float = f });
return false; return false;
} else {
var lhs_space: BigIntSpace = undefined;
var rhs_space: BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
const rhs_bigint = rhs.toBigInt(&rhs_space, comp);
const limbs = try comp.gpa.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(bits),
);
defer comp.gpa.free(limbs);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits);
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
return overflowed;
} }
const lhs_key = comp.interner.get(lhs.ref());
const rhs_key = comp.interner.get(rhs.ref());
if (lhs_key == .bytes or rhs_key == .bytes) {
res.* = .{};
return false;
}
if (lhs_key == .pointer or rhs_key == .pointer) {
const rel, const index = if (lhs_key == .pointer)
.{ lhs_key.pointer, rhs }
else
.{ rhs_key.pointer, lhs };
const elem_size = try int(qt.childType(comp).sizeofOrNull(comp) orelse 1, comp);
var total_offset: Value = undefined;
const mul_overflow = try total_offset.mul(elem_size, index, comp.type_store.ptrdiff, comp);
const old_offset = fromRef(rel.offset);
const add_overflow = try total_offset.add(total_offset, old_offset, comp.type_store.ptrdiff, comp);
_ = try total_offset.intCast(comp.type_store.ptrdiff, comp);
res.* = try pointer(.{ .node = rel.node, .offset = total_offset.ref() }, comp);
return mul_overflow or add_overflow;
}
var lhs_space: BigIntSpace = undefined;
var rhs_space: BigIntSpace = undefined;
const lhs_bigint = lhs_key.toBigInt(&lhs_space);
const rhs_bigint = rhs_key.toBigInt(&rhs_space);
const limbs = try comp.gpa.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(bits),
);
defer comp.gpa.free(limbs);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, qt.signedness(comp), bits);
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
return overflowed;
} }
pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { pub fn negate(res: *Value, val: Value, qt: QualType, comp: *Compilation) !bool {
const bits: usize = @intCast(ty.bitSizeof(comp).?); return res.sub(zero, val, qt, undefined, comp);
if (ty.isFloat()) { }
if (ty.isComplex()) {
pub fn decrement(res: *Value, val: Value, qt: QualType, comp: *Compilation) !bool {
return res.sub(val, one, qt, undefined, comp);
}
/// elem_size is only used when subtracting two pointers, so we can scale the result by the size of the element type
pub fn sub(res: *Value, lhs: Value, rhs: Value, qt: QualType, elem_size: u64, comp: *Compilation) !bool {
const bits: usize = @intCast(qt.bitSizeof(comp));
const scalar_kind = qt.scalarKind(comp);
if (scalar_kind.isFloat()) {
if (scalar_kind == .complex_float) {
res.* = switch (bits) { res.* = switch (bits) {
32 => try complexAddSub(lhs, rhs, f16, .sub, comp), 32 => try complexAddSub(lhs, rhs, f16, .sub, comp),
64 => try complexAddSub(lhs, rhs, f32, .sub, comp), 64 => try complexAddSub(lhs, rhs, f32, .sub, comp),
@ -559,29 +618,61 @@ pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
}; };
res.* = try intern(comp, .{ .float = f }); res.* = try intern(comp, .{ .float = f });
return false; return false;
} else {
var lhs_space: BigIntSpace = undefined;
var rhs_space: BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
const rhs_bigint = rhs.toBigInt(&rhs_space, comp);
const limbs = try comp.gpa.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(bits),
);
defer comp.gpa.free(limbs);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits);
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
return overflowed;
} }
const lhs_key = comp.interner.get(lhs.ref());
const rhs_key = comp.interner.get(rhs.ref());
if (lhs_key == .bytes or rhs_key == .bytes) {
res.* = .{};
return false;
}
if (lhs_key == .pointer and rhs_key == .pointer) {
const lhs_pointer = lhs_key.pointer;
const rhs_pointer = rhs_key.pointer;
if (lhs_pointer.node != rhs_pointer.node) {
res.* = .{};
return false;
}
const lhs_offset = fromRef(lhs_pointer.offset);
const rhs_offset = fromRef(rhs_pointer.offset);
const overflowed = try res.sub(lhs_offset, rhs_offset, comp.type_store.ptrdiff, undefined, comp);
const rhs_size = try int(elem_size, comp);
_ = try res.div(res.*, rhs_size, comp.type_store.ptrdiff, comp);
return overflowed;
} else if (lhs_key == .pointer) {
const rel = lhs_key.pointer;
const lhs_size = try int(elem_size, comp);
var total_offset: Value = undefined;
const mul_overflow = try total_offset.mul(lhs_size, rhs, comp.type_store.ptrdiff, comp);
const old_offset = fromRef(rel.offset);
const add_overflow = try total_offset.sub(old_offset, total_offset, comp.type_store.ptrdiff, undefined, comp);
_ = try total_offset.intCast(comp.type_store.ptrdiff, comp);
res.* = try pointer(.{ .node = rel.node, .offset = total_offset.ref() }, comp);
return mul_overflow or add_overflow;
}
var lhs_space: BigIntSpace = undefined;
var rhs_space: BigIntSpace = undefined;
const lhs_bigint = lhs_key.toBigInt(&lhs_space);
const rhs_bigint = rhs_key.toBigInt(&rhs_space);
const limbs = try comp.gpa.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(bits),
);
defer comp.gpa.free(limbs);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, qt.signedness(comp), bits);
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
return overflowed;
} }
pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { pub fn mul(res: *Value, lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !bool {
const bits: usize = @intCast(ty.bitSizeof(comp).?); const bits: usize = @intCast(qt.bitSizeof(comp));
if (ty.isFloat()) { const scalar_kind = qt.scalarKind(comp);
if (ty.isComplex()) { if (scalar_kind.isFloat()) {
if (scalar_kind == .complex_float) {
const cf: Interner.Key.Complex = switch (bits) { const cf: Interner.Key.Complex = switch (bits) {
32 => .{ .cf16 = annex_g.complexFloatMul(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) }, 32 => .{ .cf16 = annex_g.complexFloatMul(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) },
64 => .{ .cf32 = annex_g.complexFloatMul(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) }, 64 => .{ .cf32 = annex_g.complexFloatMul(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) },
@ -624,7 +715,7 @@ pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, comp.gpa); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, comp.gpa);
const signedness = ty.signedness(comp); const signedness = qt.signedness(comp);
const overflowed = !result_bigint.toConst().fitsInTwosComp(signedness, bits); const overflowed = !result_bigint.toConst().fitsInTwosComp(signedness, bits);
if (overflowed) { if (overflowed) {
result_bigint.truncate(result_bigint.toConst(), signedness, bits); result_bigint.truncate(result_bigint.toConst(), signedness, bits);
@ -635,10 +726,11 @@ pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
} }
/// caller guarantees rhs != 0 /// caller guarantees rhs != 0
pub fn div(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { pub fn div(res: *Value, lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !bool {
const bits: usize = @intCast(ty.bitSizeof(comp).?); const bits: usize = @intCast(qt.bitSizeof(comp));
if (ty.isFloat()) { const scalar_kind = qt.scalarKind(comp);
if (ty.isComplex()) { if (scalar_kind.isFloat()) {
if (scalar_kind == .complex_float) {
const cf: Interner.Key.Complex = switch (bits) { const cf: Interner.Key.Complex = switch (bits) {
32 => .{ .cf16 = annex_g.complexFloatDiv(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) }, 32 => .{ .cf16 = annex_g.complexFloatDiv(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) },
64 => .{ .cf32 = annex_g.complexFloatDiv(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) }, 64 => .{ .cf32 = annex_g.complexFloatDiv(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) },
@ -689,22 +781,21 @@ pub fn div(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
res.* = try intern(comp, .{ .int = .{ .big_int = result_q.toConst() } }); res.* = try intern(comp, .{ .int = .{ .big_int = result_q.toConst() } });
return !result_q.toConst().fitsInTwosComp(ty.signedness(comp), bits); return !result_q.toConst().fitsInTwosComp(qt.signedness(comp), bits);
} }
} }
/// caller guarantees rhs != 0 /// caller guarantees rhs != 0
/// caller guarantees lhs != std.math.minInt(T) OR rhs != -1 /// caller guarantees lhs != std.math.minInt(T) OR rhs != -1
pub fn rem(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value { pub fn rem(lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !Value {
var lhs_space: BigIntSpace = undefined; var lhs_space: BigIntSpace = undefined;
var rhs_space: BigIntSpace = undefined; var rhs_space: BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
const rhs_bigint = rhs.toBigInt(&rhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp);
const signedness = ty.signedness(comp); if (qt.signedness(comp) == .signed) {
if (signedness == .signed) {
var spaces: [2]BigIntSpace = undefined; var spaces: [2]BigIntSpace = undefined;
const min_val = try Value.minInt(ty, comp); const min_val = try Value.minInt(qt, comp);
const negative = BigIntMutable.init(&spaces[0].limbs, -1).toConst(); const negative = BigIntMutable.init(&spaces[0].limbs, -1).toConst();
const big_one = BigIntMutable.init(&spaces[1].limbs, 1).toConst(); const big_one = BigIntMutable.init(&spaces[1].limbs, 1).toConst();
if (lhs.compare(.eq, min_val, comp) and rhs_bigint.eql(negative)) { if (lhs.compare(.eq, min_val, comp) and rhs_bigint.eql(negative)) {
@ -712,9 +803,9 @@ pub fn rem(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
} else if (rhs_bigint.order(big_one).compare(.lt)) { } else if (rhs_bigint.order(big_one).compare(.lt)) {
// lhs - @divTrunc(lhs, rhs) * rhs // lhs - @divTrunc(lhs, rhs) * rhs
var tmp: Value = undefined; var tmp: Value = undefined;
_ = try tmp.div(lhs, rhs, ty, comp); _ = try tmp.div(lhs, rhs, qt, comp);
_ = try tmp.mul(tmp, rhs, ty, comp); _ = try tmp.mul(tmp, rhs, qt, comp);
_ = try tmp.sub(lhs, tmp, ty, comp); _ = try tmp.sub(lhs, tmp, qt, undefined, comp);
return tmp; return tmp;
} }
} }
@ -801,8 +892,8 @@ pub fn bitAnd(lhs: Value, rhs: Value, comp: *Compilation) !Value {
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
} }
pub fn bitNot(val: Value, ty: Type, comp: *Compilation) !Value { pub fn bitNot(val: Value, qt: QualType, comp: *Compilation) !Value {
const bits: usize = @intCast(ty.bitSizeof(comp).?); const bits: usize = @intCast(qt.bitSizeof(comp));
var val_space: Value.BigIntSpace = undefined; var val_space: Value.BigIntSpace = undefined;
const val_bigint = val.toBigInt(&val_space, comp); const val_bigint = val.toBigInt(&val_space, comp);
@ -813,21 +904,21 @@ pub fn bitNot(val: Value, ty: Type, comp: *Compilation) !Value {
defer comp.gpa.free(limbs); defer comp.gpa.free(limbs);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitNotWrap(val_bigint, ty.signedness(comp), bits); result_bigint.bitNotWrap(val_bigint, qt.signedness(comp), bits);
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
} }
pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { pub fn shl(res: *Value, lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !bool {
var lhs_space: Value.BigIntSpace = undefined; var lhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
const shift = rhs.toInt(usize, comp) orelse std.math.maxInt(usize); const shift = rhs.toInt(usize, comp) orelse std.math.maxInt(usize);
const bits: usize = @intCast(ty.bitSizeof(comp).?); const bits: usize = @intCast(qt.bitSizeof(comp));
if (shift > bits) { if (shift > bits) {
if (lhs_bigint.positive) { if (lhs_bigint.positive) {
res.* = try Value.maxInt(ty, comp); res.* = try Value.maxInt(qt, comp);
} else { } else {
res.* = try Value.minInt(ty, comp); res.* = try Value.minInt(qt, comp);
} }
return true; return true;
} }
@ -840,7 +931,7 @@ pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.shiftLeft(lhs_bigint, shift); result_bigint.shiftLeft(lhs_bigint, shift);
const signedness = ty.signedness(comp); const signedness = qt.signedness(comp);
const overflowed = !result_bigint.toConst().fitsInTwosComp(signedness, bits); const overflowed = !result_bigint.toConst().fitsInTwosComp(signedness, bits);
if (overflowed) { if (overflowed) {
result_bigint.truncate(result_bigint.toConst(), signedness, bits); result_bigint.truncate(result_bigint.toConst(), signedness, bits);
@ -849,7 +940,7 @@ pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
return overflowed; return overflowed;
} }
pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value { pub fn shr(lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !Value {
var lhs_space: Value.BigIntSpace = undefined; var lhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
const shift = rhs.toInt(usize, comp) orelse return zero; const shift = rhs.toInt(usize, comp) orelse return zero;
@ -865,7 +956,7 @@ pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
} }
} }
const bits: usize = @intCast(ty.bitSizeof(comp).?); const bits: usize = @intCast(qt.bitSizeof(comp));
const limbs = try comp.gpa.alloc( const limbs = try comp.gpa.alloc(
std.math.big.Limb, std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(bits), std.math.big.int.calcTwosCompLimbCount(bits),
@ -877,8 +968,8 @@ pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
} }
pub fn complexConj(val: Value, ty: Type, comp: *Compilation) !Value { pub fn complexConj(val: Value, qt: QualType, comp: *Compilation) !Value {
const bits = ty.bitSizeof(comp).?; const bits = qt.bitSizeof(comp);
const cf: Interner.Key.Complex = switch (bits) { const cf: Interner.Key.Complex = switch (bits) {
32 => .{ .cf16 = .{ val.toFloat(f16, comp), -val.imag(f16, comp) } }, 32 => .{ .cf16 = .{ val.toFloat(f16, comp), -val.imag(f16, comp) } },
64 => .{ .cf32 = .{ val.toFloat(f32, comp), -val.imag(f32, comp) } }, 64 => .{ .cf32 = .{ val.toFloat(f32, comp), -val.imag(f32, comp) } },
@ -890,12 +981,17 @@ pub fn complexConj(val: Value, ty: Type, comp: *Compilation) !Value {
return intern(comp, .{ .complex = cf }); return intern(comp, .{ .complex = cf });
} }
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) bool { fn shallowCompare(lhs: Value, op: std.math.CompareOperator, rhs: Value) ?bool {
if (op == .eq) { if (op == .eq) {
return lhs.opt_ref == rhs.opt_ref; return lhs.opt_ref == rhs.opt_ref;
} else if (lhs.opt_ref == rhs.opt_ref) { } else if (lhs.opt_ref == rhs.opt_ref) {
return std.math.Order.eq.compare(op); return std.math.Order.eq.compare(op);
} }
return null;
}
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) bool {
if (lhs.shallowCompare(op, rhs)) |val| return val;
const lhs_key = comp.interner.get(lhs.ref()); const lhs_key = comp.interner.get(lhs.ref());
const rhs_key = comp.interner.get(rhs.ref()); const rhs_key = comp.interner.get(rhs.ref());
@ -918,10 +1014,33 @@ pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *cons
return lhs_bigint.order(rhs_bigint).compare(op); return lhs_bigint.order(rhs_bigint).compare(op);
} }
fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, ty: Type, comp: *Compilation) !Value { /// Returns null for values that cannot be compared at compile time (e.g. `&x < &y`) for globals `x` and `y`.
const signedness = ty.signedness(comp); pub fn comparePointers(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) ?bool {
if (lhs.shallowCompare(op, rhs)) |val| return val;
const lhs_key = comp.interner.get(lhs.ref());
const rhs_key = comp.interner.get(rhs.ref());
if (lhs_key == .pointer and rhs_key == .pointer) {
const lhs_pointer = lhs_key.pointer;
const rhs_pointer = rhs_key.pointer;
switch (op) {
.eq => if (lhs_pointer.node != rhs_pointer.node) return false,
.neq => if (lhs_pointer.node != rhs_pointer.node) return true,
else => if (lhs_pointer.node != rhs_pointer.node) return null,
}
const lhs_offset = fromRef(lhs_pointer.offset);
const rhs_offset = fromRef(rhs_pointer.offset);
return lhs_offset.compare(op, rhs_offset, comp);
}
return null;
}
fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, qt: QualType, comp: *Compilation) !Value {
const signedness = qt.signedness(comp);
if (limit == .min and signedness == .unsigned) return Value.zero; if (limit == .min and signedness == .unsigned) return Value.zero;
const mag_bits: usize = @intCast(ty.bitSizeof(comp).?); const mag_bits: usize = @intCast(qt.bitSizeof(comp));
switch (mag_bits) { switch (mag_bits) {
inline 8, 16, 32, 64 => |bits| { inline 8, 16, 32, 64 => |bits| {
if (limit == .min) return Value.int(@as(i64, std.math.minInt(std.meta.Int(.signed, bits))), comp); if (limit == .min) return Value.int(@as(i64, std.math.minInt(std.meta.Int(.signed, bits))), comp);
@ -946,44 +1065,63 @@ fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, ty: Type, comp: *C
return Value.intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); return Value.intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
} }
pub fn minInt(ty: Type, comp: *Compilation) !Value { pub fn minInt(qt: QualType, comp: *Compilation) !Value {
return twosCompIntLimit(.min, ty, comp); return twosCompIntLimit(.min, qt, comp);
} }
pub fn maxInt(ty: Type, comp: *Compilation) !Value { pub fn maxInt(qt: QualType, comp: *Compilation) !Value {
return twosCompIntLimit(.max, ty, comp); return twosCompIntLimit(.max, qt, comp);
} }
pub fn print(v: Value, ty: Type, comp: *const Compilation, w: *Writer) Writer.Error!void { const NestedPrint = union(enum) {
if (ty.is(.bool)) { pointer: struct {
return w.writeAll(if (v.isZero(comp)) "false" else "true"); node: u32,
offset: Value,
},
};
pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void {
try w.writeByte('&');
try w.writeAll(base);
if (!offset.isZero(comp)) {
const maybe_nested = try offset.print(comp.type_store.ptrdiff, comp, w);
std.debug.assert(maybe_nested == null);
}
}
pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!?NestedPrint {
if (qt.is(comp, .bool)) {
try w.writeAll(if (v.isZero(comp)) "false" else "true");
return null;
} }
const key = comp.interner.get(v.ref()); const key = comp.interner.get(v.ref());
switch (key) { switch (key) {
.null => return w.writeAll("nullptr_t"), .null => try w.writeAll("nullptr_t"),
.int => |repr| switch (repr) { .int => |repr| switch (repr) {
inline .u64, .i64, .big_int => |x| return w.print("{d}", .{x}), inline else => |x| try w.print("{d}", .{x}),
}, },
.float => |repr| switch (repr) { .float => |repr| switch (repr) {
.f16 => |x| return w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000) / 1000}), .f16 => |x| try w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000) / 1000}),
.f32 => |x| return w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000000) / 1000000}), .f32 => |x| try w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000000) / 1000000}),
inline else => |x| return w.print("{d}", .{@as(f64, @floatCast(x))}), inline else => |x| try w.print("{d}", .{@as(f64, @floatCast(x))}),
}, },
.bytes => |b| return printString(b, ty, comp, w), .bytes => |b| try printString(b, qt, comp, w),
.complex => |repr| switch (repr) { .complex => |repr| switch (repr) {
.cf32 => |components| return w.print("{d} + {d}i", .{ @round(@as(f64, @floatCast(components[0])) * 1000000) / 1000000, @round(@as(f64, @floatCast(components[1])) * 1000000) / 1000000 }), .cf32 => |components| try w.print("{d} + {d}i", .{ @round(@as(f64, @floatCast(components[0])) * 1000000) / 1000000, @round(@as(f64, @floatCast(components[1])) * 1000000) / 1000000 }),
inline else => |components| return w.print("{d} + {d}i", .{ @as(f64, @floatCast(components[0])), @as(f64, @floatCast(components[1])) }), inline else => |components| try w.print("{d} + {d}i", .{ @as(f64, @floatCast(components[0])), @as(f64, @floatCast(components[1])) }),
}, },
.pointer => |ptr| return .{ .pointer = .{ .node = ptr.node, .offset = fromRef(ptr.offset) } },
else => unreachable, // not a value else => unreachable, // not a value
} }
return null;
} }
pub fn printString(bytes: []const u8, ty: Type, comp: *const Compilation, w: *Writer) Writer.Error!void { pub fn printString(bytes: []const u8, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void {
const size: Compilation.CharUnitSize = @enumFromInt(ty.elemType().sizeof(comp).?); const size: Compilation.CharUnitSize = @enumFromInt(qt.childType(comp).sizeof(comp));
const without_null = bytes[0 .. bytes.len - @intFromEnum(size)]; const without_null = bytes[0 .. bytes.len - @intFromEnum(size)];
try w.writeByte('"'); try w.writeByte('"');
switch (size) { switch (size) {
.@"1" => try w.print("{f}", .{std.zig.fmtString(without_null)}), .@"1" => try std.zig.stringEscape(without_null, w),
.@"2" => { .@"2" => {
var items: [2]u16 = undefined; var items: [2]u16 = undefined;
var i: usize = 0; var i: usize = 0;

View File

@ -442,48 +442,48 @@ pub fn isInvisible(codepoint: u21) bool {
} }
/// Checks for identifier characters which resemble non-identifier characters /// Checks for identifier characters which resemble non-identifier characters
pub fn homoglyph(codepoint: u21) ?u21 { pub fn homoglyph(codepoint: u21) ?[]const u8 {
assert(codepoint > 0x7F); assert(codepoint > 0x7F);
return switch (codepoint) { return switch (codepoint) {
0x01c3 => '!', // LATIN LETTER RETROFLEX CLICK 0x01c3 => "!", // LATIN LETTER RETROFLEX CLICK
0x037e => ';', // GREEK QUESTION MARK 0x037e => ";", // GREEK QUESTION MARK
0x2212 => '-', // MINUS SIGN 0x2212 => "-", // MINUS SIGN
0x2215 => '/', // DIVISION SLASH 0x2215 => "/", // DIVISION SLASH
0x2216 => '\\', // SET MINUS 0x2216 => "\\", // SET MINUS
0x2217 => '*', // ASTERISK OPERATOR 0x2217 => "*", // ASTERISK OPERATOR
0x2223 => '|', // DIVIDES 0x2223 => "|", // DIVIDES
0x2227 => '^', // LOGICAL AND 0x2227 => "^", // LOGICAL AND
0x2236 => ':', // RATIO 0x2236 => ":", // RATIO
0x223c => '~', // TILDE OPERATOR 0x223c => "~", // TILDE OPERATOR
0xa789 => ':', // MODIFIER LETTER COLON 0xa789 => ":", // MODIFIER LETTER COLON
0xff01 => '!', // FULLWIDTH EXCLAMATION MARK 0xff01 => "!", // FULLWIDTH EXCLAMATION MARK
0xff03 => '#', // FULLWIDTH NUMBER SIGN 0xff03 => "#", // FULLWIDTH NUMBER SIGN
0xff04 => '$', // FULLWIDTH DOLLAR SIGN 0xff04 => "$", // FULLWIDTH DOLLAR SIGN
0xff05 => '%', // FULLWIDTH PERCENT SIGN 0xff05 => "%", // FULLWIDTH PERCENT SIGN
0xff06 => '&', // FULLWIDTH AMPERSAND 0xff06 => "&", // FULLWIDTH AMPERSAND
0xff08 => '(', // FULLWIDTH LEFT PARENTHESIS 0xff08 => "(", // FULLWIDTH LEFT PARENTHESIS
0xff09 => ')', // FULLWIDTH RIGHT PARENTHESIS 0xff09 => ")", // FULLWIDTH RIGHT PARENTHESIS
0xff0a => '*', // FULLWIDTH ASTERISK 0xff0a => "*", // FULLWIDTH ASTERISK
0xff0b => '+', // FULLWIDTH ASTERISK 0xff0b => "+", // FULLWIDTH ASTERISK
0xff0c => ',', // FULLWIDTH COMMA 0xff0c => ",", // FULLWIDTH COMMA
0xff0d => '-', // FULLWIDTH HYPHEN-MINUS 0xff0d => "-", // FULLWIDTH HYPHEN-MINUS
0xff0e => '.', // FULLWIDTH FULL STOP 0xff0e => ".", // FULLWIDTH FULL STOP
0xff0f => '/', // FULLWIDTH SOLIDUS 0xff0f => "/", // FULLWIDTH SOLIDUS
0xff1a => ':', // FULLWIDTH COLON 0xff1a => ":", // FULLWIDTH COLON
0xff1b => ';', // FULLWIDTH SEMICOLON 0xff1b => ";", // FULLWIDTH SEMICOLON
0xff1c => '<', // FULLWIDTH LESS-THAN SIGN 0xff1c => "<", // FULLWIDTH LESS-THAN SIGN
0xff1d => '=', // FULLWIDTH EQUALS SIGN 0xff1d => "=", // FULLWIDTH EQUALS SIGN
0xff1e => '>', // FULLWIDTH GREATER-THAN SIGN 0xff1e => ">", // FULLWIDTH GREATER-THAN SIGN
0xff1f => '?', // FULLWIDTH QUESTION MARK 0xff1f => "?", // FULLWIDTH QUESTION MARK
0xff20 => '@', // FULLWIDTH COMMERCIAL AT 0xff20 => "@", // FULLWIDTH COMMERCIAL AT
0xff3b => '[', // FULLWIDTH LEFT SQUARE BRACKET 0xff3b => "[", // FULLWIDTH LEFT SQUARE BRACKET
0xff3c => '\\', // FULLWIDTH REVERSE SOLIDUS 0xff3c => "\\", // FULLWIDTH REVERSE SOLIDUS
0xff3d => ']', // FULLWIDTH RIGHT SQUARE BRACKET 0xff3d => "]", // FULLWIDTH RIGHT SQUARE BRACKET
0xff3e => '^', // FULLWIDTH CIRCUMFLEX ACCENT 0xff3e => "^", // FULLWIDTH CIRCUMFLEX ACCENT
0xff5b => '{', // FULLWIDTH LEFT CURLY BRACKET 0xff5b => "{", // FULLWIDTH LEFT CURLY BRACKET
0xff5c => '|', // FULLWIDTH VERTICAL LINE 0xff5c => "|", // FULLWIDTH VERTICAL LINE
0xff5d => '}', // FULLWIDTH RIGHT CURLY BRACKET 0xff5d => "}", // FULLWIDTH RIGHT CURLY BRACKET
0xff5e => '~', // FULLWIDTH TILDE 0xff5e => "~", // FULLWIDTH TILDE
else => null, else => null,
}; };
} }

View File

@ -57,13 +57,13 @@ pub fn hasExtension(comp: *Compilation, ext: []const u8) bool {
// C11 features // C11 features
.c_alignas = true, .c_alignas = true,
.c_alignof = true, .c_alignof = true,
.c_atomic = false, // TODO .c_atomic = true,
.c_generic_selections = true, .c_generic_selections = true,
.c_static_assert = true, .c_static_assert = true,
.c_thread_local = target_util.isTlsSupported(comp.target), .c_thread_local = target_util.isTlsSupported(comp.target),
// misc // misc
.overloadable_unmarked = false, // TODO .overloadable_unmarked = false, // TODO
.statement_attributes_with_gnu_syntax = false, // TODO .statement_attributes_with_gnu_syntax = true,
.gnu_asm = true, .gnu_asm = true,
.gnu_asm_goto_with_outputs = true, .gnu_asm_goto_with_outputs = true,
.matrix_types = false, // TODO .matrix_types = false, // TODO

View File

@ -1,10 +1,11 @@
const std = @import("std"); const std = @import("std");
const mem = std.mem; const mem = std.mem;
const Compilation = @import("../Compilation.zig"); const Compilation = @import("../Compilation.zig");
const Pragma = @import("../Pragma.zig");
const Diagnostics = @import("../Diagnostics.zig"); const Diagnostics = @import("../Diagnostics.zig");
const Preprocessor = @import("../Preprocessor.zig");
const Parser = @import("../Parser.zig"); const Parser = @import("../Parser.zig");
const Pragma = @import("../Pragma.zig");
const Preprocessor = @import("../Preprocessor.zig");
const TokenIndex = @import("../Tree.zig").TokenIndex; const TokenIndex = @import("../Tree.zig").TokenIndex;
const GCC = @This(); const GCC = @This();
@ -18,8 +19,8 @@ pragma: Pragma = .{
.parserHandler = parserHandler, .parserHandler = parserHandler,
.preserveTokens = preserveTokens, .preserveTokens = preserveTokens,
}, },
original_options: Diagnostics.Options = .{}, original_state: Diagnostics.State = .{},
options_stack: std.ArrayListUnmanaged(Diagnostics.Options) = .empty, state_stack: std.ArrayListUnmanaged(Diagnostics.State) = .{},
const Directive = enum { const Directive = enum {
warning, warning,
@ -38,19 +39,19 @@ const Directive = enum {
fn beforePreprocess(pragma: *Pragma, comp: *Compilation) void { fn beforePreprocess(pragma: *Pragma, comp: *Compilation) void {
var self: *GCC = @fieldParentPtr("pragma", pragma); var self: *GCC = @fieldParentPtr("pragma", pragma);
self.original_options = comp.diagnostics.options; self.original_state = comp.diagnostics.state;
} }
fn beforeParse(pragma: *Pragma, comp: *Compilation) void { fn beforeParse(pragma: *Pragma, comp: *Compilation) void {
var self: *GCC = @fieldParentPtr("pragma", pragma); var self: *GCC = @fieldParentPtr("pragma", pragma);
comp.diagnostics.options = self.original_options; comp.diagnostics.state = self.original_state;
self.options_stack.items.len = 0; self.state_stack.items.len = 0;
} }
fn afterParse(pragma: *Pragma, comp: *Compilation) void { fn afterParse(pragma: *Pragma, comp: *Compilation) void {
var self: *GCC = @fieldParentPtr("pragma", pragma); var self: *GCC = @fieldParentPtr("pragma", pragma);
comp.diagnostics.options = self.original_options; comp.diagnostics.state = self.original_state;
self.options_stack.items.len = 0; self.state_stack.items.len = 0;
} }
pub fn init(allocator: mem.Allocator) !*Pragma { pub fn init(allocator: mem.Allocator) !*Pragma {
@ -61,7 +62,7 @@ pub fn init(allocator: mem.Allocator) !*Pragma {
fn deinit(pragma: *Pragma, comp: *Compilation) void { fn deinit(pragma: *Pragma, comp: *Compilation) void {
var self: *GCC = @fieldParentPtr("pragma", pragma); var self: *GCC = @fieldParentPtr("pragma", pragma);
self.options_stack.deinit(comp.gpa); self.state_stack.deinit(comp.gpa);
comp.gpa.destroy(self); comp.gpa.destroy(self);
} }
@ -76,23 +77,14 @@ fn diagnosticHandler(self: *GCC, pp: *Preprocessor, start_idx: TokenIndex) Pragm
.ignored, .warning, .@"error", .fatal => { .ignored, .warning, .@"error", .fatal => {
const str = Pragma.pasteTokens(pp, start_idx + 1) catch |err| switch (err) { const str = Pragma.pasteTokens(pp, start_idx + 1) catch |err| switch (err) {
error.ExpectedStringLiteral => { error.ExpectedStringLiteral => {
return pp.comp.addDiagnostic(.{ return Pragma.err(pp, start_idx, .pragma_requires_string_literal, .{"GCC diagnostic"});
.tag = .pragma_requires_string_literal,
.loc = diagnostic_tok.loc,
.extra = .{ .str = "GCC diagnostic" },
}, pp.expansionSlice(start_idx));
}, },
else => |e| return e, else => |e| return e,
}; };
if (!mem.startsWith(u8, str, "-W")) { if (!mem.startsWith(u8, str, "-W")) {
const next = pp.tokens.get(start_idx + 1); return Pragma.err(pp, start_idx + 1, .malformed_warning_check, .{"GCC diagnostic"});
return pp.comp.addDiagnostic(.{
.tag = .malformed_warning_check,
.loc = next.loc,
.extra = .{ .str = "GCC diagnostic" },
}, pp.expansionSlice(start_idx + 1));
} }
const new_kind: Diagnostics.Kind = switch (diagnostic) { const new_kind: Diagnostics.Message.Kind = switch (diagnostic) {
.ignored => .off, .ignored => .off,
.warning => .warning, .warning => .warning,
.@"error" => .@"error", .@"error" => .@"error",
@ -100,10 +92,10 @@ fn diagnosticHandler(self: *GCC, pp: *Preprocessor, start_idx: TokenIndex) Pragm
else => unreachable, else => unreachable,
}; };
try pp.comp.diagnostics.set(str[2..], new_kind); try pp.diagnostics.set(str[2..], new_kind);
}, },
.push => try self.options_stack.append(pp.comp.gpa, pp.comp.diagnostics.options), .push => try self.state_stack.append(pp.comp.gpa, pp.diagnostics.state),
.pop => pp.comp.diagnostics.options = self.options_stack.pop() orelse self.original_options, .pop => pp.diagnostics.state = self.state_stack.pop() orelse self.original_state,
} }
} }
@ -112,38 +104,24 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
const directive_tok = pp.tokens.get(start_idx + 1); const directive_tok = pp.tokens.get(start_idx + 1);
if (directive_tok.id == .nl) return; if (directive_tok.id == .nl) return;
const gcc_pragma = std.meta.stringToEnum(Directive, pp.expandedSlice(directive_tok)) orelse const gcc_pragma = std.meta.stringToEnum(Directive, pp.expandedSlice(directive_tok)) orelse {
return pp.comp.addDiagnostic(.{ return Pragma.err(pp, start_idx + 1, .unknown_gcc_pragma, .{});
.tag = .unknown_gcc_pragma, };
.loc = directive_tok.loc,
}, pp.expansionSlice(start_idx + 1));
switch (gcc_pragma) { switch (gcc_pragma) {
.warning, .@"error" => { .warning, .@"error" => {
const text = Pragma.pasteTokens(pp, start_idx + 2) catch |err| switch (err) { const text = Pragma.pasteTokens(pp, start_idx + 2) catch |err| switch (err) {
error.ExpectedStringLiteral => { error.ExpectedStringLiteral => {
return pp.comp.addDiagnostic(.{ return Pragma.err(pp, start_idx + 1, .pragma_requires_string_literal, .{@tagName(gcc_pragma)});
.tag = .pragma_requires_string_literal,
.loc = directive_tok.loc,
.extra = .{ .str = @tagName(gcc_pragma) },
}, pp.expansionSlice(start_idx + 1));
}, },
else => |e| return e, else => |e| return e,
}; };
const extra = Diagnostics.Message.Extra{ .str = try pp.comp.diagnostics.arena.allocator().dupe(u8, text) };
const diagnostic_tag: Diagnostics.Tag = if (gcc_pragma == .warning) .pragma_warning_message else .pragma_error_message; return Pragma.err(pp, start_idx + 1, if (gcc_pragma == .warning) .pragma_warning_message else .pragma_error_message, .{text});
return pp.comp.addDiagnostic(
.{ .tag = diagnostic_tag, .loc = directive_tok.loc, .extra = extra },
pp.expansionSlice(start_idx + 1),
);
}, },
.diagnostic => return self.diagnosticHandler(pp, start_idx + 2) catch |err| switch (err) { .diagnostic => return self.diagnosticHandler(pp, start_idx + 2) catch |err| switch (err) {
error.UnknownPragma => { error.UnknownPragma => {
const tok = pp.tokens.get(start_idx + 2); return Pragma.err(pp, start_idx + 2, .unknown_gcc_pragma_directive, .{});
return pp.comp.addDiagnostic(.{
.tag = .unknown_gcc_pragma_directive,
.loc = tok.loc,
}, pp.expansionSlice(start_idx + 2));
}, },
else => |e| return e, else => |e| return e,
}, },
@ -154,17 +132,11 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
if (tok.id == .nl) break; if (tok.id == .nl) break;
if (!tok.id.isMacroIdentifier()) { if (!tok.id.isMacroIdentifier()) {
return pp.comp.addDiagnostic(.{ return Pragma.err(pp, start_idx + i, .pragma_poison_identifier, .{});
.tag = .pragma_poison_identifier,
.loc = tok.loc,
}, pp.expansionSlice(start_idx + i));
} }
const str = pp.expandedSlice(tok); const str = pp.expandedSlice(tok);
if (pp.defines.get(str) != null) { if (pp.defines.get(str) != null) {
try pp.comp.addDiagnostic(.{ try Pragma.err(pp, start_idx + i, .pragma_poison_macro, .{});
.tag = .pragma_poison_macro,
.loc = tok.loc,
}, pp.expansionSlice(start_idx + i));
} }
try pp.poisoned_identifiers.put(str, {}); try pp.poisoned_identifiers.put(str, {});
} }

View File

@ -1,12 +1,13 @@
const std = @import("std"); const std = @import("std");
const mem = std.mem; const mem = std.mem;
const Compilation = @import("../Compilation.zig"); const Compilation = @import("../Compilation.zig");
const Pragma = @import("../Pragma.zig");
const Diagnostics = @import("../Diagnostics.zig"); const Diagnostics = @import("../Diagnostics.zig");
const Preprocessor = @import("../Preprocessor.zig");
const Parser = @import("../Parser.zig"); const Parser = @import("../Parser.zig");
const TokenIndex = @import("../Tree.zig").TokenIndex; const Pragma = @import("../Pragma.zig");
const Preprocessor = @import("../Preprocessor.zig");
const Source = @import("../Source.zig"); const Source = @import("../Source.zig");
const TokenIndex = @import("../Tree.zig").TokenIndex;
const Message = @This(); const Message = @This();
@ -27,24 +28,32 @@ fn deinit(pragma: *Pragma, comp: *Compilation) void {
} }
fn preprocessorHandler(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pragma.Error!void { fn preprocessorHandler(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pragma.Error!void {
const message_tok = pp.tokens.get(start_idx);
const message_expansion_locs = pp.expansionSlice(start_idx);
const str = Pragma.pasteTokens(pp, start_idx + 1) catch |err| switch (err) { const str = Pragma.pasteTokens(pp, start_idx + 1) catch |err| switch (err) {
error.ExpectedStringLiteral => { error.ExpectedStringLiteral => {
return pp.comp.addDiagnostic(.{ return Pragma.err(pp, start_idx, .pragma_requires_string_literal, .{"message"});
.tag = .pragma_requires_string_literal,
.loc = message_tok.loc,
.extra = .{ .str = "message" },
}, message_expansion_locs);
}, },
else => |e| return e, else => |e| return e,
}; };
const message_tok = pp.tokens.get(start_idx);
const message_expansion_locs = pp.expansionSlice(start_idx);
const loc = if (message_expansion_locs.len != 0) const loc = if (message_expansion_locs.len != 0)
message_expansion_locs[message_expansion_locs.len - 1] message_expansion_locs[message_expansion_locs.len - 1]
else else
message_tok.loc; message_tok.loc;
const extra = Diagnostics.Message.Extra{ .str = try pp.comp.diagnostics.arena.allocator().dupe(u8, str) };
return pp.comp.addDiagnostic(.{ .tag = .pragma_message, .loc = loc, .extra = extra }, &.{}); const diagnostic: Pragma.Diagnostic = .pragma_message;
var sf = std.heap.stackFallback(1024, pp.gpa);
var allocating: std.Io.Writer.Allocating = .init(sf.get());
defer allocating.deinit();
Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, .{str}) catch return error.OutOfMemory;
try pp.diagnostics.add(.{
.text = allocating.getWritten(),
.kind = diagnostic.kind,
.opt = diagnostic.opt,
.location = loc.expand(pp.comp),
});
} }

View File

@ -1,12 +1,13 @@
const std = @import("std"); const std = @import("std");
const mem = std.mem; const mem = std.mem;
const Compilation = @import("../Compilation.zig"); const Compilation = @import("../Compilation.zig");
const Pragma = @import("../Pragma.zig");
const Diagnostics = @import("../Diagnostics.zig"); const Diagnostics = @import("../Diagnostics.zig");
const Preprocessor = @import("../Preprocessor.zig");
const Parser = @import("../Parser.zig"); const Parser = @import("../Parser.zig");
const TokenIndex = @import("../Tree.zig").TokenIndex; const Pragma = @import("../Pragma.zig");
const Preprocessor = @import("../Preprocessor.zig");
const Source = @import("../Source.zig"); const Source = @import("../Source.zig");
const TokenIndex = @import("../Tree.zig").TokenIndex;
const Once = @This(); const Once = @This();
@ -14,6 +15,7 @@ pragma: Pragma = .{
.afterParse = afterParse, .afterParse = afterParse,
.deinit = deinit, .deinit = deinit,
.preprocessorHandler = preprocessorHandler, .preprocessorHandler = preprocessorHandler,
.preserveTokens = preserveTokens,
}, },
pragma_once: std.AutoHashMap(Source.Id, void), pragma_once: std.AutoHashMap(Source.Id, void),
preprocess_count: u32 = 0, preprocess_count: u32 = 0,
@ -42,10 +44,13 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
const name_tok = pp.tokens.get(start_idx); const name_tok = pp.tokens.get(start_idx);
const next = pp.tokens.get(start_idx + 1); const next = pp.tokens.get(start_idx + 1);
if (next.id != .nl) { if (next.id != .nl) {
try pp.comp.addDiagnostic(.{ const diagnostic: Preprocessor.Diagnostic = .extra_tokens_directive_end;
.tag = .extra_tokens_directive_end, return pp.diagnostics.addWithLocation(pp.comp, .{
.loc = name_tok.loc, .text = diagnostic.fmt,
}, pp.expansionSlice(start_idx + 1)); .kind = diagnostic.kind,
.opt = diagnostic.opt,
.location = name_tok.loc.expand(pp.comp),
}, pp.expansionSlice(start_idx + 1), true);
} }
const seen = self.preprocess_count == pp.preprocess_count; const seen = self.preprocess_count == pp.preprocess_count;
const prev = try self.pragma_once.fetchPut(name_tok.loc.id, {}); const prev = try self.pragma_once.fetchPut(name_tok.loc.id, {});
@ -54,3 +59,7 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
} }
self.preprocess_count = pp.preprocess_count; self.preprocess_count = pp.preprocess_count;
} }
fn preserveTokens(_: *Pragma, _: *Preprocessor, _: TokenIndex) bool {
return false;
}

View File

@ -1,10 +1,11 @@
const std = @import("std"); const std = @import("std");
const mem = std.mem; const mem = std.mem;
const Compilation = @import("../Compilation.zig"); const Compilation = @import("../Compilation.zig");
const Pragma = @import("../Pragma.zig");
const Diagnostics = @import("../Diagnostics.zig"); const Diagnostics = @import("../Diagnostics.zig");
const Preprocessor = @import("../Preprocessor.zig");
const Parser = @import("../Parser.zig"); const Parser = @import("../Parser.zig");
const Pragma = @import("../Pragma.zig");
const Preprocessor = @import("../Preprocessor.zig");
const Tree = @import("../Tree.zig"); const Tree = @import("../Tree.zig");
const TokenIndex = Tree.TokenIndex; const TokenIndex = Tree.TokenIndex;
@ -13,9 +14,8 @@ const Pack = @This();
pragma: Pragma = .{ pragma: Pragma = .{
.deinit = deinit, .deinit = deinit,
.parserHandler = parserHandler, .parserHandler = parserHandler,
.preserveTokens = preserveTokens,
}, },
stack: std.ArrayListUnmanaged(struct { label: []const u8, val: u8 }) = .empty, stack: std.ArrayListUnmanaged(struct { label: []const u8, val: u8 }) = .{},
pub fn init(allocator: mem.Allocator) !*Pragma { pub fn init(allocator: mem.Allocator) !*Pragma {
var pack = try allocator.create(Pack); var pack = try allocator.create(Pack);
@ -34,10 +34,7 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
var idx = start_idx + 1; var idx = start_idx + 1;
const l_paren = p.pp.tokens.get(idx); const l_paren = p.pp.tokens.get(idx);
if (l_paren.id != .l_paren) { if (l_paren.id != .l_paren) {
return p.comp.addDiagnostic(.{ return Pragma.err(p.pp, idx, .pragma_pack_lparen, .{});
.tag = .pragma_pack_lparen,
.loc = l_paren.loc,
}, p.pp.expansionSlice(idx));
} }
idx += 1; idx += 1;
@ -54,11 +51,11 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
pop, pop,
}; };
const action = std.meta.stringToEnum(Action, p.tokSlice(arg)) orelse { const action = std.meta.stringToEnum(Action, p.tokSlice(arg)) orelse {
return p.errTok(.pragma_pack_unknown_action, arg); return Pragma.err(p.pp, arg, .pragma_pack_unknown_action, .{});
}; };
switch (action) { switch (action) {
.show => { .show => {
try p.errExtra(.pragma_pack_show, arg, .{ .unsigned = p.pragma_pack orelse 8 }); return Pragma.err(p.pp, arg, .pragma_pack_show, .{p.pragma_pack orelse 8});
}, },
.push, .pop => { .push, .pop => {
var new_val: ?u8 = null; var new_val: ?u8 = null;
@ -75,11 +72,13 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
idx += 1; idx += 1;
const int = idx; const int = idx;
idx += 1; idx += 1;
if (tok_ids[int] != .pp_num) return p.errTok(.pragma_pack_int_ident, int); if (tok_ids[int] != .pp_num) {
return Pragma.err(p.pp, int, .pragma_pack_int_ident, .{});
}
new_val = (try packInt(p, int)) orelse return; new_val = (try packInt(p, int)) orelse return;
} }
}, },
else => return p.errTok(.pragma_pack_int_ident, next), else => return Pragma.err(p.pp, next, .pragma_pack_int_ident, .{}),
} }
} }
if (action == .push) { if (action == .push) {
@ -87,9 +86,9 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
} else { } else {
pack.pop(p, label); pack.pop(p, label);
if (new_val != null) { if (new_val != null) {
try p.errTok(.pragma_pack_undefined_pop, arg); try Pragma.err(p.pp, arg, .pragma_pack_undefined_pop, .{});
} else if (pack.stack.items.len == 0) { } else if (pack.stack.items.len == 0) {
try p.errTok(.pragma_pack_empty_stack, arg); try Pragma.err(p.pp, arg, .pragma_pack_empty_stack, .{});
} }
} }
if (new_val) |some| { if (new_val) |some| {
@ -115,14 +114,14 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
} }
if (tok_ids[idx] != .r_paren) { if (tok_ids[idx] != .r_paren) {
return p.errTok(.pragma_pack_rparen, idx); return Pragma.err(p.pp, idx, .pragma_pack_rparen, .{});
} }
} }
fn packInt(p: *Parser, tok_i: TokenIndex) Compilation.Error!?u8 { fn packInt(p: *Parser, tok_i: TokenIndex) Compilation.Error!?u8 {
const res = p.parseNumberToken(tok_i) catch |err| switch (err) { const res = p.parseNumberToken(tok_i) catch |err| switch (err) {
error.ParsingFailed => { error.ParsingFailed => {
try p.errTok(.pragma_pack_int, tok_i); try Pragma.err(p.pp, tok_i, .pragma_pack_int, .{});
return null; return null;
}, },
else => |e| return e, else => |e| return e,
@ -131,7 +130,7 @@ fn packInt(p: *Parser, tok_i: TokenIndex) Compilation.Error!?u8 {
switch (int) { switch (int) {
1, 2, 4, 8, 16 => return @intCast(int), 1, 2, 4, 8, 16 => return @intCast(int),
else => { else => {
try p.errTok(.pragma_pack_int, tok_i); try Pragma.err(p.pp, tok_i, .pragma_pack_int, .{});
return null; return null;
}, },
} }
@ -156,9 +155,3 @@ fn pop(pack: *Pack, p: *Parser, maybe_label: ?[]const u8) void {
p.pragma_pack = prev.val; p.pragma_pack = prev.val;
} }
} }
fn preserveTokens(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool {
_ = pp;
_ = start_idx;
return true;
}

View File

@ -2,15 +2,18 @@
//! Licensed under MIT license: https://github.com/mahkoh/repr-c/tree/master/repc/facade //! Licensed under MIT license: https://github.com/mahkoh/repr-c/tree/master/repc/facade
const std = @import("std"); const std = @import("std");
const Type = @import("Type.zig");
const Attribute = @import("Attribute.zig"); const Attribute = @import("Attribute.zig");
const Compilation = @import("Compilation.zig"); const Compilation = @import("Compilation.zig");
const Parser = @import("Parser.zig"); const Parser = @import("Parser.zig");
const target_util = @import("target.zig");
const TypeStore = @import("TypeStore.zig");
const QualType = TypeStore.QualType;
const Type = TypeStore.Type;
const Record = Type.Record; const Record = Type.Record;
const Field = Record.Field; const Field = Record.Field;
const TypeLayout = Type.TypeLayout; const RecordLayout = Type.Record.Layout;
const FieldLayout = Type.FieldLayout; const FieldLayout = Type.Record.Field.Layout;
const target_util = @import("target.zig");
const BITS_PER_BYTE = 8; const BITS_PER_BYTE = 8;
@ -42,36 +45,33 @@ const SysVContext = struct {
comp: *const Compilation, comp: *const Compilation,
fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) SysVContext { fn init(qt: QualType, comp: *const Compilation, pragma_pack: ?u8) SysVContext {
const pack_value: ?u64 = if (pragma_pack) |pak| @as(u64, pak) * BITS_PER_BYTE else null; const pack_value: ?u64 = if (pragma_pack) |pak| @as(u64, pak) * BITS_PER_BYTE else null;
const req_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE; const req_align = @as(u32, (qt.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE;
return SysVContext{ return SysVContext{
.attr_packed = ty.hasAttribute(.@"packed"), .attr_packed = qt.hasAttribute(comp, .@"packed"),
.max_field_align_bits = pack_value, .max_field_align_bits = pack_value,
.aligned_bits = req_align, .aligned_bits = req_align,
.is_union = ty.is(.@"union"), .is_union = qt.is(comp, .@"union"),
.size_bits = 0, .size_bits = 0,
.comp = comp, .comp = comp,
.ongoing_bitfield = null, .ongoing_bitfield = null,
}; };
} }
fn layoutFields(self: *SysVContext, rec: *const Record) !void { fn layoutFields(self: *SysVContext, fields: []Type.Record.Field) !void {
for (rec.fields, 0..) |*fld, fld_indx| { for (fields) |*field| {
if (fld.ty.specifier == .invalid) continue; if (field.qt.isInvalid()) continue;
const type_layout = computeLayout(fld.ty, self.comp); const type_layout = computeLayout(field.qt, self.comp);
var field_attrs: ?[]const Attribute = null; const attributes = field.attributes(self.comp);
if (rec.field_attributes) |attrs| {
field_attrs = attrs[fld_indx];
}
if (self.comp.target.isMinGW()) { if (self.comp.target.isMinGW()) {
fld.layout = try self.layoutMinGWField(fld, field_attrs, type_layout); field.layout = try self.layoutMinGWField(field, attributes, type_layout);
} else { } else {
if (fld.isRegularField()) { if (field.bit_width.unpack()) |bit_width| {
fld.layout = try self.layoutRegularField(field_attrs, type_layout); field.layout = try self.layoutBitField(attributes, type_layout, field.name_tok != 0, bit_width);
} else { } else {
fld.layout = try self.layoutBitField(field_attrs, type_layout, fld.isNamed(), fld.specifiedBitWidth()); field.layout = try self.layoutRegularField(attributes, type_layout);
} }
} }
} }
@ -83,7 +83,7 @@ const SysVContext = struct {
/// - the field is a bit-field and the previous field was a non-zero-sized bit-field with the same type size /// - the field is a bit-field and the previous field was a non-zero-sized bit-field with the same type size
/// - the field is a zero-sized bit-field and the previous field was not a non-zero-sized bit-field /// - the field is a zero-sized bit-field and the previous field was not a non-zero-sized bit-field
/// See test case 0068. /// See test case 0068.
fn ignoreTypeAlignment(is_attr_packed: bool, bit_width: ?u32, ongoing_bitfield: ?OngoingBitfield, fld_layout: TypeLayout) bool { fn ignoreTypeAlignment(is_attr_packed: bool, bit_width: ?u32, ongoing_bitfield: ?OngoingBitfield, fld_layout: RecordLayout) bool {
if (is_attr_packed) return true; if (is_attr_packed) return true;
if (bit_width) |width| { if (bit_width) |width| {
if (ongoing_bitfield) |ongoing| { if (ongoing_bitfield) |ongoing| {
@ -98,12 +98,12 @@ const SysVContext = struct {
fn layoutMinGWField( fn layoutMinGWField(
self: *SysVContext, self: *SysVContext,
field: *const Field, field: *const Field,
field_attrs: ?[]const Attribute, field_attrs: []const Attribute,
field_layout: TypeLayout, field_layout: RecordLayout,
) !FieldLayout { ) !FieldLayout {
const annotation_alignment_bits = BITS_PER_BYTE * @as(u32, (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(field_attrs)) orelse 1)); const annotation_alignment_bits = BITS_PER_BYTE * (QualType.annotationAlignment(self.comp, Attribute.Iterator.initSlice(field_attrs)) orelse 1);
const is_attr_packed = self.attr_packed or isPacked(field_attrs); const is_attr_packed = self.attr_packed or isPacked(field_attrs);
const ignore_type_alignment = ignoreTypeAlignment(is_attr_packed, field.bit_width, self.ongoing_bitfield, field_layout); const ignore_type_alignment = ignoreTypeAlignment(is_attr_packed, field.bit_width.unpack(), self.ongoing_bitfield, field_layout);
var field_alignment_bits: u64 = field_layout.field_alignment_bits; var field_alignment_bits: u64 = field_layout.field_alignment_bits;
if (ignore_type_alignment) { if (ignore_type_alignment) {
@ -120,16 +120,16 @@ const SysVContext = struct {
// - the field is a non-zero-width bit-field and not packed. // - the field is a non-zero-width bit-field and not packed.
// See test case 0069. // See test case 0069.
const update_record_alignment = const update_record_alignment =
field.isRegularField() or field.bit_width == .null or
(field.specifiedBitWidth() == 0 and self.ongoing_bitfield != null) or (field.bit_width.unpack().? == 0 and self.ongoing_bitfield != null) or
(field.specifiedBitWidth() != 0 and !is_attr_packed); (field.bit_width.unpack().? != 0 and !is_attr_packed);
// If a field affects the alignment of a record, the alignment is calculated in the // If a field affects the alignment of a record, the alignment is calculated in the
// usual way except that __attribute__((packed)) is ignored on a zero-width bit-field. // usual way except that __attribute__((packed)) is ignored on a zero-width bit-field.
// See test case 0068. // See test case 0068.
if (update_record_alignment) { if (update_record_alignment) {
var ty_alignment_bits = field_layout.field_alignment_bits; var ty_alignment_bits = field_layout.field_alignment_bits;
if (is_attr_packed and (field.isRegularField() or field.specifiedBitWidth() != 0)) { if (is_attr_packed and (field.bit_width == .null or field.bit_width.unpack().? != 0)) {
ty_alignment_bits = BITS_PER_BYTE; ty_alignment_bits = BITS_PER_BYTE;
} }
ty_alignment_bits = @max(ty_alignment_bits, annotation_alignment_bits); ty_alignment_bits = @max(ty_alignment_bits, annotation_alignment_bits);
@ -145,10 +145,10 @@ const SysVContext = struct {
// @attr_packed _ { size: 64, alignment: 64 }long long:0, // @attr_packed _ { size: 64, alignment: 64 }long long:0,
// { offset: 8, size: 8 }d { size: 8, alignment: 8 }char, // { offset: 8, size: 8 }d { size: 8, alignment: 8 }char,
// } // }
if (field.isRegularField()) { if (field.bit_width.unpack()) |bit_width| {
return self.layoutRegularFieldMinGW(field_layout.size_bits, field_alignment_bits); return self.layoutBitFieldMinGW(field_layout.size_bits, field_alignment_bits, field.name_tok != 0, bit_width);
} else { } else {
return self.layoutBitFieldMinGW(field_layout.size_bits, field_alignment_bits, field.isNamed(), field.specifiedBitWidth()); return self.layoutRegularFieldMinGW(field_layout.size_bits, field_alignment_bits);
} }
} }
@ -227,8 +227,8 @@ const SysVContext = struct {
fn layoutRegularField( fn layoutRegularField(
self: *SysVContext, self: *SysVContext,
fld_attrs: ?[]const Attribute, fld_attrs: []const Attribute,
fld_layout: TypeLayout, fld_layout: RecordLayout,
) !FieldLayout { ) !FieldLayout {
var fld_align_bits = fld_layout.field_alignment_bits; var fld_align_bits = fld_layout.field_alignment_bits;
@ -240,7 +240,7 @@ const SysVContext = struct {
// The field alignment can be increased by __attribute__((aligned)) annotations on the // The field alignment can be increased by __attribute__((aligned)) annotations on the
// field. See test case 0085. // field. See test case 0085.
if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| { if (QualType.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| {
fld_align_bits = @max(fld_align_bits, @as(u32, anno) * BITS_PER_BYTE); fld_align_bits = @max(fld_align_bits, @as(u32, anno) * BITS_PER_BYTE);
} }
@ -268,8 +268,8 @@ const SysVContext = struct {
fn layoutBitField( fn layoutBitField(
self: *SysVContext, self: *SysVContext,
fld_attrs: ?[]const Attribute, fld_attrs: []const Attribute,
fld_layout: TypeLayout, fld_layout: RecordLayout,
is_named: bool, is_named: bool,
bit_width: u64, bit_width: u64,
) !FieldLayout { ) !FieldLayout {
@ -302,7 +302,7 @@ const SysVContext = struct {
const attr_packed = self.attr_packed or isPacked(fld_attrs); const attr_packed = self.attr_packed or isPacked(fld_attrs);
const has_packing_annotation = attr_packed or self.max_field_align_bits != null; const has_packing_annotation = attr_packed or self.max_field_align_bits != null;
const annotation_alignment = if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| @as(u32, anno) * BITS_PER_BYTE else 1; const annotation_alignment = if (QualType.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| @as(u32, anno) * BITS_PER_BYTE else 1;
const first_unused_bit: u64 = if (self.is_union) 0 else self.size_bits; const first_unused_bit: u64 = if (self.is_union) 0 else self.size_bits;
var field_align_bits: u64 = 1; var field_align_bits: u64 = 1;
@ -403,9 +403,9 @@ const MsvcContext = struct {
is_union: bool, is_union: bool,
comp: *const Compilation, comp: *const Compilation,
fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) MsvcContext { fn init(qt: QualType, comp: *const Compilation, pragma_pack: ?u8) MsvcContext {
var pack_value: ?u32 = null; var pack_value: ?u32 = null;
if (ty.hasAttribute(.@"packed")) { if (qt.hasAttribute(comp, .@"packed")) {
// __attribute__((packed)) behaves like #pragma pack(1) in clang. See test case 0056. // __attribute__((packed)) behaves like #pragma pack(1) in clang. See test case 0056.
pack_value = BITS_PER_BYTE; pack_value = BITS_PER_BYTE;
} }
@ -420,8 +420,8 @@ const MsvcContext = struct {
// The required alignment can be increased by adding a __declspec(align) // The required alignment can be increased by adding a __declspec(align)
// annotation. See test case 0023. // annotation. See test case 0023.
const must_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE; const must_align = @as(u32, (qt.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE;
return MsvcContext{ return .{
.req_align_bits = must_align, .req_align_bits = must_align,
.pointer_align_bits = must_align, .pointer_align_bits = must_align,
.field_align_bits = must_align, .field_align_bits = must_align,
@ -429,26 +429,26 @@ const MsvcContext = struct {
.max_field_align_bits = pack_value, .max_field_align_bits = pack_value,
.ongoing_bitfield = null, .ongoing_bitfield = null,
.contains_non_bitfield = false, .contains_non_bitfield = false,
.is_union = ty.is(.@"union"), .is_union = qt.is(comp, .@"union"),
.comp = comp, .comp = comp,
}; };
} }
fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: ?[]const Attribute) !FieldLayout { fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: []const Attribute) !FieldLayout {
const type_layout = computeLayout(fld.ty, self.comp); const type_layout = computeLayout(fld.qt, self.comp);
// The required alignment of the field is the maximum of the required alignment of the // The required alignment of the field is the maximum of the required alignment of the
// underlying type and the __declspec(align) annotation on the field itself. // underlying type and the __declspec(align) annotation on the field itself.
// See test case 0028. // See test case 0028.
var req_align = type_layout.required_alignment_bits; var req_align = type_layout.required_alignment_bits;
if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| { if (QualType.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| {
req_align = @max(@as(u32, anno) * BITS_PER_BYTE, req_align); req_align = @max(@as(u32, anno) * BITS_PER_BYTE, req_align);
} }
// The required alignment of a record is the maximum of the required alignments of its // The required alignment of a record is the maximum of the required alignments of its
// fields except that the required alignment of bitfields is ignored. // fields except that the required alignment of bitfields is ignored.
// See test case 0029. // See test case 0029.
if (fld.isRegularField()) { if (fld.bit_width == .null) {
self.req_align_bits = @max(self.req_align_bits, req_align); self.req_align_bits = @max(self.req_align_bits, req_align);
} }
@ -459,7 +459,7 @@ const MsvcContext = struct {
fld_align_bits = @min(fld_align_bits, max_align); fld_align_bits = @min(fld_align_bits, max_align);
} }
// check the requested alignment of the field type. // check the requested alignment of the field type.
if (fld.ty.requestedAlignment(self.comp)) |type_req_align| { if (fld.qt.requestedAlignment(self.comp)) |type_req_align| {
fld_align_bits = @max(fld_align_bits, type_req_align * 8); fld_align_bits = @max(fld_align_bits, type_req_align * 8);
} }
@ -471,10 +471,10 @@ const MsvcContext = struct {
// __attribute__((packed)) on a field is a clang extension. It behaves as if #pragma // __attribute__((packed)) on a field is a clang extension. It behaves as if #pragma
// pack(1) had been applied only to this field. See test case 0057. // pack(1) had been applied only to this field. See test case 0057.
fld_align_bits = @max(fld_align_bits, req_align); fld_align_bits = @max(fld_align_bits, req_align);
if (fld.isRegularField()) { if (fld.bit_width.unpack()) |bit_width| {
return self.layoutRegularField(type_layout.size_bits, fld_align_bits); return self.layoutBitField(type_layout.size_bits, fld_align_bits, bit_width);
} else { } else {
return self.layoutBitField(type_layout.size_bits, fld_align_bits, fld.specifiedBitWidth()); return self.layoutRegularField(type_layout.size_bits, fld_align_bits);
} }
} }
@ -567,16 +567,16 @@ const MsvcContext = struct {
} }
}; };
pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pack: ?u8) Error!void { pub fn compute(fields: []Type.Record.Field, qt: QualType, comp: *const Compilation, pragma_pack: ?u8) Error!Type.Record.Layout {
switch (comp.langopts.emulate) { switch (comp.langopts.emulate) {
.gcc, .clang => { .gcc, .clang => {
var context = SysVContext.init(ty, comp, pragma_pack); var context = SysVContext.init(qt, comp, pragma_pack);
try context.layoutFields(rec); try context.layoutFields(fields);
context.size_bits = try alignForward(context.size_bits, context.aligned_bits); context.size_bits = try alignForward(context.size_bits, context.aligned_bits);
rec.type_layout = .{ return .{
.size_bits = context.size_bits, .size_bits = context.size_bits,
.field_alignment_bits = context.aligned_bits, .field_alignment_bits = context.aligned_bits,
.pointer_alignment_bits = context.aligned_bits, .pointer_alignment_bits = context.aligned_bits,
@ -584,15 +584,10 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac
}; };
}, },
.msvc => { .msvc => {
var context = MsvcContext.init(ty, comp, pragma_pack); var context = MsvcContext.init(qt, comp, pragma_pack);
for (rec.fields, 0..) |*fld, fld_indx| { for (fields) |*field| {
if (fld.ty.specifier == .invalid) continue; if (field.qt.isInvalid()) continue;
var field_attrs: ?[]const Attribute = null; field.layout = try context.layoutField(field, field.attributes(comp));
if (rec.field_attributes) |attrs| {
field_attrs = attrs[fld_indx];
}
fld.layout = try context.layoutField(fld, field_attrs);
} }
if (context.size_bits == 0) { if (context.size_bits == 0) {
// As an extension, MSVC allows records that only contain zero-sized bitfields and empty // As an extension, MSVC allows records that only contain zero-sized bitfields and empty
@ -601,7 +596,7 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac
context.handleZeroSizedRecord(); context.handleZeroSizedRecord();
} }
context.size_bits = try alignForward(context.size_bits, context.pointer_align_bits); context.size_bits = try alignForward(context.size_bits, context.pointer_align_bits);
rec.type_layout = .{ return .{
.size_bits = context.size_bits, .size_bits = context.size_bits,
.field_alignment_bits = context.field_align_bits, .field_alignment_bits = context.field_align_bits,
.pointer_alignment_bits = context.pointer_align_bits, .pointer_alignment_bits = context.pointer_align_bits,
@ -611,23 +606,26 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac
} }
} }
fn computeLayout(ty: Type, comp: *const Compilation) TypeLayout { fn computeLayout(qt: QualType, comp: *const Compilation) RecordLayout {
if (ty.getRecord()) |rec| { switch (qt.base(comp).type) {
const requested = BITS_PER_BYTE * (ty.requestedAlignment(comp) orelse 0); .@"struct", .@"union" => |record| {
return .{ const requested = BITS_PER_BYTE * (qt.requestedAlignment(comp) orelse 0);
.size_bits = rec.type_layout.size_bits, return .{
.pointer_alignment_bits = @max(requested, rec.type_layout.pointer_alignment_bits), .size_bits = record.layout.?.size_bits,
.field_alignment_bits = @max(requested, rec.type_layout.field_alignment_bits), .pointer_alignment_bits = @max(requested, record.layout.?.pointer_alignment_bits),
.required_alignment_bits = rec.type_layout.required_alignment_bits, .field_alignment_bits = @max(requested, record.layout.?.field_alignment_bits),
}; .required_alignment_bits = record.layout.?.required_alignment_bits,
} else { };
const type_align = ty.alignof(comp) * BITS_PER_BYTE; },
return .{ else => {
.size_bits = ty.bitSizeof(comp) orelse 0, const type_align = qt.alignof(comp) * BITS_PER_BYTE;
.pointer_alignment_bits = type_align, return .{
.field_alignment_bits = type_align, .size_bits = qt.bitSizeofOrNull(comp) orelse 0,
.required_alignment_bits = BITS_PER_BYTE, .pointer_alignment_bits = type_align,
}; .field_alignment_bits = type_align,
.required_alignment_bits = BITS_PER_BYTE,
};
},
} }
} }

View File

@ -1,15 +1,18 @@
const std = @import("std"); const std = @import("std");
const backend = @import("../backend.zig");
const LangOpts = @import("LangOpts.zig"); const LangOpts = @import("LangOpts.zig");
const Type = @import("Type.zig");
const TargetSet = @import("Builtins/Properties.zig").TargetSet; const TargetSet = @import("Builtins/Properties.zig").TargetSet;
const QualType = @import("TypeStore.zig").QualType;
/// intmax_t for this target /// intmax_t for this target
pub fn intMaxType(target: std.Target) Type { pub fn intMaxType(target: std.Target) QualType {
switch (target.cpu.arch) { switch (target.cpu.arch) {
.aarch64, .aarch64,
.aarch64_be, .aarch64_be,
.sparc64, .sparc64,
=> if (target.os.tag != .openbsd) return .{ .specifier = .long }, => if (target.os.tag != .openbsd) return .long,
.bpfel, .bpfel,
.bpfeb, .bpfeb,
@ -19,28 +22,28 @@ pub fn intMaxType(target: std.Target) Type {
.powerpc64, .powerpc64,
.powerpc64le, .powerpc64le,
.ve, .ve,
=> return .{ .specifier = .long }, => return .long,
.x86_64 => switch (target.os.tag) { .x86_64 => switch (target.os.tag) {
.windows, .openbsd => {}, .windows, .openbsd => {},
else => switch (target.abi) { else => switch (target.abi) {
.gnux32, .muslx32 => {}, .gnux32, .muslx32 => {},
else => return .{ .specifier = .long }, else => return .long,
}, },
}, },
else => {}, else => {},
} }
return .{ .specifier = .long_long }; return .long_long;
} }
/// intptr_t for this target /// intptr_t for this target
pub fn intPtrType(target: std.Target) Type { pub fn intPtrType(target: std.Target) QualType {
if (target.os.tag == .haiku) return .{ .specifier = .long }; if (target.os.tag == .haiku) return .long;
switch (target.cpu.arch) { switch (target.cpu.arch) {
.aarch64, .aarch64_be => switch (target.os.tag) { .aarch64, .aarch64_be => switch (target.os.tag) {
.windows => return .{ .specifier = .long_long }, .windows => return .long_long,
else => {}, else => {},
}, },
@ -55,28 +58,28 @@ pub fn intPtrType(target: std.Target) Type {
.spirv32, .spirv32,
.arc, .arc,
.avr, .avr,
=> return .{ .specifier = .int }, => return .int,
.sparc => switch (target.os.tag) { .sparc => switch (target.os.tag) {
.netbsd, .openbsd => {}, .netbsd, .openbsd => {},
else => return .{ .specifier = .int }, else => return .int,
}, },
.powerpc, .powerpcle => switch (target.os.tag) { .powerpc, .powerpcle => switch (target.os.tag) {
.linux, .freebsd, .netbsd => return .{ .specifier = .int }, .linux, .freebsd, .netbsd => return .int,
else => {}, else => {},
}, },
// 32-bit x86 Darwin, OpenBSD, and RTEMS use long (the default); others use int // 32-bit x86 Darwin, OpenBSD, and RTEMS use long (the default); others use int
.x86 => switch (target.os.tag) { .x86 => switch (target.os.tag) {
.openbsd, .rtems => {}, .openbsd, .rtems => {},
else => if (!target.os.tag.isDarwin()) return .{ .specifier = .int }, else => if (!target.os.tag.isDarwin()) return .int,
}, },
.x86_64 => switch (target.os.tag) { .x86_64 => switch (target.os.tag) {
.windows => return .{ .specifier = .long_long }, .windows => return .long_long,
else => switch (target.abi) { else => switch (target.abi) {
.gnux32, .muslx32 => return .{ .specifier = .int }, .gnux32, .muslx32 => return .int,
else => {}, else => {},
}, },
}, },
@ -84,29 +87,29 @@ pub fn intPtrType(target: std.Target) Type {
else => {}, else => {},
} }
return .{ .specifier = .long }; return .long;
} }
/// int16_t for this target /// int16_t for this target
pub fn int16Type(target: std.Target) Type { pub fn int16Type(target: std.Target) QualType {
return switch (target.cpu.arch) { return switch (target.cpu.arch) {
.avr => .{ .specifier = .int }, .avr => .int,
else => .{ .specifier = .short }, else => .short,
}; };
} }
/// sig_atomic_t for this target /// sig_atomic_t for this target
pub fn sigAtomicType(target: std.Target) Type { pub fn sigAtomicType(target: std.Target) QualType {
if (target.cpu.arch.isWasm()) return .{ .specifier = .long }; if (target.cpu.arch.isWasm()) return .long;
return switch (target.cpu.arch) { return switch (target.cpu.arch) {
.avr => .{ .specifier = .schar }, .avr => .schar,
.msp430 => .{ .specifier = .long }, .msp430 => .long,
else => .{ .specifier = .int }, else => .int,
}; };
} }
/// int64_t for this target /// int64_t for this target
pub fn int64Type(target: std.Target) Type { pub fn int64Type(target: std.Target) QualType {
switch (target.cpu.arch) { switch (target.cpu.arch) {
.loongarch64, .loongarch64,
.ve, .ve,
@ -116,20 +119,20 @@ pub fn int64Type(target: std.Target) Type {
.powerpc64le, .powerpc64le,
.bpfel, .bpfel,
.bpfeb, .bpfeb,
=> return .{ .specifier = .long }, => return .long,
.sparc64 => return intMaxType(target), .sparc64 => return intMaxType(target),
.x86, .x86_64 => if (!target.os.tag.isDarwin()) return intMaxType(target), .x86, .x86_64 => if (!target.os.tag.isDarwin()) return intMaxType(target),
.aarch64, .aarch64_be => if (!target.os.tag.isDarwin() and target.os.tag != .openbsd and target.os.tag != .windows) return .{ .specifier = .long }, .aarch64, .aarch64_be => if (!target.os.tag.isDarwin() and target.os.tag != .openbsd and target.os.tag != .windows) return .long,
else => {}, else => {},
} }
return .{ .specifier = .long_long }; return .long_long;
} }
pub fn float80Type(target: std.Target) ?Type { pub fn float80Type(target: std.Target) ?QualType {
switch (target.cpu.arch) { switch (target.cpu.arch) {
.x86, .x86_64 => return .{ .specifier = .long_double }, .x86, .x86_64 => return .long_double,
else => {}, else => {},
} }
return null; return null;
@ -165,7 +168,7 @@ pub fn ignoreNonZeroSizedBitfieldTypeAlignment(target: std.Target) bool {
switch (target.cpu.arch) { switch (target.cpu.arch) {
.avr => return true, .avr => return true,
.arm => { .arm => {
if (target.cpu.has(.arm, .has_v7)) { if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
switch (target.os.tag) { switch (target.os.tag) {
.ios => return true, .ios => return true,
else => return false, else => return false,
@ -188,7 +191,7 @@ pub fn minZeroWidthBitfieldAlignment(target: std.Target) ?u29 {
switch (target.cpu.arch) { switch (target.cpu.arch) {
.avr => return 8, .avr => return 8,
.arm => { .arm => {
if (target.cpu.has(.arm, .has_v7)) { if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
switch (target.os.tag) { switch (target.os.tag) {
.ios => return 32, .ios => return 32,
else => return null, else => return null,
@ -206,7 +209,7 @@ pub fn unnamedFieldAffectsAlignment(target: std.Target) bool {
return true; return true;
}, },
.armeb => { .armeb => {
if (target.cpu.has(.arm, .has_v7)) { if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
if (std.Target.Abi.default(target.cpu.arch, target.os.tag) == .eabi) return true; if (std.Target.Abi.default(target.cpu.arch, target.os.tag) == .eabi) return true;
} }
}, },
@ -233,7 +236,7 @@ pub fn defaultAlignment(target: std.Target) u29 {
switch (target.cpu.arch) { switch (target.cpu.arch) {
.avr => return 1, .avr => return 1,
.arm => if (target.abi.isAndroid() or target.os.tag == .ios) return 16 else return 8, .arm => if (target.abi.isAndroid() or target.os.tag == .ios) return 16 else return 8,
.sparc => if (target.cpu.has(.sparc, .v9)) return 16 else return 8, .sparc => if (std.Target.sparc.featureSetHas(target.cpu.features, .v9)) return 16 else return 8,
.mips, .mipsel => switch (target.abi) { .mips, .mipsel => switch (target.abi) {
.none, .gnuabi64 => return 16, .none, .gnuabi64 => return 16,
else => return 8, else => return 8,
@ -245,7 +248,8 @@ pub fn defaultAlignment(target: std.Target) u29 {
pub fn systemCompiler(target: std.Target) LangOpts.Compiler { pub fn systemCompiler(target: std.Target) LangOpts.Compiler {
// Android is linux but not gcc, so these checks go first // Android is linux but not gcc, so these checks go first
// the rest for documentation as fn returns .clang // the rest for documentation as fn returns .clang
if (target.abi.isAndroid() or if (target.os.tag.isDarwin() or
target.abi.isAndroid() or
target.os.tag.isBSD() or target.os.tag.isBSD() or
target.os.tag == .fuchsia or target.os.tag == .fuchsia or
target.os.tag == .solaris or target.os.tag == .solaris or
@ -271,7 +275,7 @@ pub fn systemCompiler(target: std.Target) LangOpts.Compiler {
pub fn hasFloat128(target: std.Target) bool { pub fn hasFloat128(target: std.Target) bool {
if (target.cpu.arch.isWasm()) return true; if (target.cpu.arch.isWasm()) return true;
if (target.os.tag.isDarwin()) return false; if (target.os.tag.isDarwin()) return false;
if (target.cpu.arch.isPowerPC()) return target.cpu.has(.powerpc, .float128); if (target.cpu.arch.isPowerPC()) return std.Target.powerpc.featureSetHas(target.cpu.features, .float128);
return switch (target.os.tag) { return switch (target.os.tag) {
.dragonfly, .dragonfly,
.haiku, .haiku,
@ -339,7 +343,7 @@ pub const FPSemantics = enum {
.spirv32, .spirv32,
.spirv64, .spirv64,
=> return .IEEEHalf, => return .IEEEHalf,
.x86, .x86_64 => if (target.cpu.has(.x86, .sse2)) return .IEEEHalf, .x86, .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .sse2)) return .IEEEHalf,
else => {}, else => {},
} }
return null; return null;
@ -374,6 +378,10 @@ pub fn isCygwinMinGW(target: std.Target) bool {
return target.os.tag == .windows and (target.abi == .gnu or target.abi == .cygnus); return target.os.tag == .windows and (target.abi == .gnu or target.abi == .cygnus);
} }
pub fn isPS(target: std.Target) bool {
return (target.os.tag == .ps4 or target.os.tag == .ps5) and target.cpu.arch == .x86_64;
}
pub fn builtinEnabled(target: std.Target, enabled_for: TargetSet) bool { pub fn builtinEnabled(target: std.Target, enabled_for: TargetSet) bool {
var it = enabled_for.iterator(); var it = enabled_for.iterator();
while (it.next()) |val| { while (it.next()) |val| {
@ -404,7 +412,7 @@ pub fn defaultFpEvalMethod(target: std.Target) LangOpts.FPEvalMethod {
return .double; return .double;
} }
} }
if (target.cpu.has(.x86, .sse)) { if (std.Target.x86.featureSetHas(target.cpu.features, .sse)) {
return .source; return .source;
} }
return .extended; return .extended;
@ -497,6 +505,8 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target {
.spirv32, .spirv32,
.loongarch32, .loongarch32,
.xtensa, .xtensa,
.propeller,
.or1k,
=> {}, // Already 32 bit => {}, // Already 32 bit
.aarch64 => copy.cpu.arch = .arm, .aarch64 => copy.cpu.arch = .arm,
@ -530,6 +540,8 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target {
.msp430, .msp430,
.xcore, .xcore,
.xtensa, .xtensa,
.propeller,
.or1k,
=> return null, => return null,
.aarch64, .aarch64,
@ -621,11 +633,14 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
.nvptx64 => "nvptx64", .nvptx64 => "nvptx64",
.spirv32 => "spirv32", .spirv32 => "spirv32",
.spirv64 => "spirv64", .spirv64 => "spirv64",
.kalimba => "kalimba",
.lanai => "lanai", .lanai => "lanai",
.wasm32 => "wasm32", .wasm32 => "wasm32",
.wasm64 => "wasm64", .wasm64 => "wasm64",
.ve => "ve", .ve => "ve",
// Note: propeller1, kalimba and or1k are not supported in LLVM; this is the Zig arch name
.kalimba => "kalimba",
.propeller => "propeller",
.or1k => "or1k",
}; };
writer.writeAll(llvm_arch) catch unreachable; writer.writeAll(llvm_arch) catch unreachable;
writer.writeByte('-') catch unreachable; writer.writeByte('-') catch unreachable;
@ -721,64 +736,262 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
return writer.buffered(); return writer.buffered();
} }
pub const DefaultPIStatus = enum { yes, no, depends_on_linker };
pub fn isPIEDefault(target: std.Target) DefaultPIStatus {
return switch (target.os.tag) {
.aix,
.haiku,
.macos,
.ios,
.tvos,
.watchos,
.visionos,
.driverkit,
.dragonfly,
.netbsd,
.freebsd,
.solaris,
.cuda,
.amdhsa,
.amdpal,
.mesa3d,
.ps4,
.ps5,
.hurd,
.zos,
=> .no,
.openbsd,
.fuchsia,
=> .yes,
.linux => {
if (target.abi == .ohos)
return .yes;
switch (target.cpu.arch) {
.ve => return .no,
else => return if (target.os.tag == .linux or target.abi.isAndroid() or target.abi.isMusl()) .yes else .no,
}
},
.windows => {
if (target.isMinGW())
return .no;
if (target.abi == .itanium)
return if (target.cpu.arch == .x86_64) .yes else .no;
if (target.abi == .msvc or target.abi == .none)
return .depends_on_linker;
return .no;
},
else => {
switch (target.cpu.arch) {
.hexagon => {
// CLANG_DEFAULT_PIE_ON_LINUX
return if (target.os.tag == .linux or target.abi.isAndroid() or target.abi.isMusl()) .yes else .no;
},
else => return .no,
}
},
};
}
pub fn isPICdefault(target: std.Target) DefaultPIStatus {
return switch (target.os.tag) {
.aix,
.haiku,
.macos,
.ios,
.tvos,
.watchos,
.visionos,
.driverkit,
.amdhsa,
.amdpal,
.mesa3d,
.ps4,
.ps5,
=> .yes,
.fuchsia,
.cuda,
.zos,
=> .no,
.dragonfly,
.openbsd,
.netbsd,
.freebsd,
.solaris,
.hurd,
=> {
return switch (target.cpu.arch) {
.mips64, .mips64el => .yes,
else => .no,
};
},
.linux => {
if (target.abi == .ohos)
return .no;
return switch (target.cpu.arch) {
.mips64, .mips64el => .yes,
else => .no,
};
},
.windows => {
if (target.isMinGW())
return if (target.cpu.arch == .x86_64 or target.cpu.arch == .aarch64) .yes else .no;
if (target.abi == .itanium)
return if (target.cpu.arch == .x86_64) .yes else .no;
if (target.abi == .msvc or target.abi == .none)
return .depends_on_linker;
if (target.ofmt == .macho)
return .yes;
return switch (target.cpu.arch) {
.x86_64, .mips64, .mips64el => .yes,
else => .no,
};
},
else => {
if (target.ofmt == .macho)
return .yes;
return switch (target.cpu.arch) {
.mips64, .mips64el => .yes,
else => .no,
};
},
};
}
pub fn isPICDefaultForced(target: std.Target) DefaultPIStatus {
return switch (target.os.tag) {
.aix, .amdhsa, .amdpal, .mesa3d => .yes,
.haiku,
.dragonfly,
.openbsd,
.netbsd,
.freebsd,
.solaris,
.cuda,
.ps4,
.ps5,
.hurd,
.linux,
.fuchsia,
.zos,
=> .no,
.windows => {
if (target.isMinGW())
return .yes;
if (target.abi == .itanium)
return if (target.cpu.arch == .x86_64) .yes else .no;
// if (bfd) return target.cpu.arch == .x86_64 else target.cpu.arch == .x86_64 or target.cpu.arch == .aarch64;
if (target.abi == .msvc or target.abi == .none)
return .depends_on_linker;
if (target.ofmt == .macho)
return if (target.cpu.arch == .aarch64 or target.cpu.arch == .x86_64) .yes else .no;
return if (target.cpu.arch == .x86_64) .yes else .no;
},
.macos,
.ios,
.tvos,
.watchos,
.visionos,
.driverkit,
=> if (target.cpu.arch == .x86_64 or target.cpu.arch == .aarch64) .yes else .no,
else => {
return switch (target.cpu.arch) {
.hexagon,
.lanai,
.avr,
.riscv32,
.riscv64,
.csky,
.xcore,
.wasm32,
.wasm64,
.ve,
.spirv32,
.spirv64,
=> .no,
.msp430 => .yes,
else => {
if (target.ofmt == .macho)
return if (target.cpu.arch == .aarch64 or target.cpu.arch == .x86_64) .yes else .no;
return .no;
},
};
},
};
}
test "alignment functions - smoke test" { test "alignment functions - smoke test" {
var target: std.Target = undefined; const linux: std.Target.Os = .{ .tag = .linux, .version_range = .{ .none = {} } };
const x86 = std.Target.Cpu.Arch.x86_64; const x86_64_target: std.Target = .{
target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86, .none); .abi = std.Target.Abi.default(.x86_64, linux.tag),
target.cpu = std.Target.Cpu.baseline(x86, target.os); .cpu = std.Target.Cpu.Model.generic(.x86_64).toCpu(.x86_64),
target.abi = std.Target.Abi.default(x86, target.os.tag); .os = linux,
.ofmt = .elf,
};
try std.testing.expect(isTlsSupported(target)); try std.testing.expect(isTlsSupported(x86_64_target));
try std.testing.expect(!ignoreNonZeroSizedBitfieldTypeAlignment(target)); try std.testing.expect(!ignoreNonZeroSizedBitfieldTypeAlignment(x86_64_target));
try std.testing.expect(minZeroWidthBitfieldAlignment(target) == null); try std.testing.expect(minZeroWidthBitfieldAlignment(x86_64_target) == null);
try std.testing.expect(!unnamedFieldAffectsAlignment(target)); try std.testing.expect(!unnamedFieldAffectsAlignment(x86_64_target));
try std.testing.expect(defaultAlignment(target) == 16); try std.testing.expect(defaultAlignment(x86_64_target) == 16);
try std.testing.expect(!packAllEnums(target)); try std.testing.expect(!packAllEnums(x86_64_target));
try std.testing.expect(systemCompiler(target) == .gcc); try std.testing.expect(systemCompiler(x86_64_target) == .gcc);
const arm = std.Target.Cpu.Arch.arm;
target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm, .none);
target.cpu = std.Target.Cpu.baseline(arm, target.os);
target.abi = std.Target.Abi.default(arm, target.os.tag);
try std.testing.expect(!isTlsSupported(target));
try std.testing.expect(ignoreNonZeroSizedBitfieldTypeAlignment(target));
try std.testing.expectEqual(@as(?u29, 32), minZeroWidthBitfieldAlignment(target));
try std.testing.expect(unnamedFieldAffectsAlignment(target));
try std.testing.expect(defaultAlignment(target) == 16);
try std.testing.expect(!packAllEnums(target));
try std.testing.expect(systemCompiler(target) == .clang);
} }
test "target size/align tests" { test "target size/align tests" {
var comp: @import("Compilation.zig") = undefined; var comp: @import("Compilation.zig") = undefined;
const x86 = std.Target.Cpu.Arch.x86; const linux: std.Target.Os = .{ .tag = .linux, .version_range = .{ .none = {} } };
comp.target.cpu.arch = x86; const x86_target: std.Target = .{
comp.target.cpu.model = &std.Target.x86.cpu.i586; .abi = std.Target.Abi.default(.x86, linux.tag),
comp.target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86, .none); .cpu = std.Target.Cpu.Model.generic(.x86).toCpu(.x86),
comp.target.abi = std.Target.Abi.gnu; .os = linux,
.ofmt = .elf,
const tt: Type = .{
.specifier = .long_long,
}; };
comp.target = x86_target;
try std.testing.expectEqual(@as(u64, 8), tt.sizeof(&comp).?); const tt: QualType = .long_long;
try std.testing.expectEqual(@as(u64, 8), tt.sizeof(&comp));
try std.testing.expectEqual(@as(u64, 4), tt.alignof(&comp)); try std.testing.expectEqual(@as(u64, 4), tt.alignof(&comp));
const arm = std.Target.Cpu.Arch.arm;
comp.target.cpu = std.Target.Cpu.Model.toCpu(&std.Target.arm.cpu.cortex_r4, arm);
comp.target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm, .none);
comp.target.abi = std.Target.Abi.none;
const ct: Type = .{
.specifier = .char,
};
try std.testing.expectEqual(true, comp.target.cpu.has(.arm, .has_v7));
try std.testing.expectEqual(@as(u64, 1), ct.sizeof(&comp).?);
try std.testing.expectEqual(@as(u64, 1), ct.alignof(&comp));
try std.testing.expectEqual(true, ignoreNonZeroSizedBitfieldTypeAlignment(comp.target));
} }
/// The canonical integer representation of nullptr_t. /// The canonical integer representation of nullptr_t.

View File

@ -1,11 +1,13 @@
//! Parsing and classification of string and character literals //! Parsing and classification of string and character literals
const std = @import("std"); const std = @import("std");
const mem = std.mem;
const Compilation = @import("Compilation.zig"); const Compilation = @import("Compilation.zig");
const Type = @import("Type.zig");
const Diagnostics = @import("Diagnostics.zig"); const Diagnostics = @import("Diagnostics.zig");
const Tokenizer = @import("Tokenizer.zig"); const Tokenizer = @import("Tokenizer.zig");
const mem = std.mem; const QualType = @import("TypeStore.zig").QualType;
const Source = @import("Source.zig");
pub const Item = union(enum) { pub const Item = union(enum) {
/// decoded hex or character escape /// decoded hex or character escape
@ -18,11 +20,6 @@ pub const Item = union(enum) {
utf8_text: std.unicode.Utf8View, utf8_text: std.unicode.Utf8View,
}; };
const CharDiagnostic = struct {
tag: Diagnostics.Tag,
extra: Diagnostics.Message.Extra,
};
pub const Kind = enum { pub const Kind = enum {
char, char,
wide, wide,
@ -91,13 +88,13 @@ pub const Kind = enum {
} }
/// The C type of a character literal of this kind /// The C type of a character literal of this kind
pub fn charLiteralType(kind: Kind, comp: *const Compilation) Type { pub fn charLiteralType(kind: Kind, comp: *const Compilation) QualType {
return switch (kind) { return switch (kind) {
.char => Type.int, .char => .int,
.wide => comp.types.wchar, .wide => comp.type_store.wchar,
.utf_8 => .{ .specifier = .uchar }, .utf_8 => .uchar,
.utf_16 => comp.types.uint_least16_t, .utf_16 => comp.type_store.uint_least16_t,
.utf_32 => comp.types.uint_least32_t, .utf_32 => comp.type_store.uint_least32_t,
.unterminated => unreachable, .unterminated => unreachable,
}; };
} }
@ -120,7 +117,7 @@ pub const Kind = enum {
pub fn charUnitSize(kind: Kind, comp: *const Compilation) Compilation.CharUnitSize { pub fn charUnitSize(kind: Kind, comp: *const Compilation) Compilation.CharUnitSize {
return switch (kind) { return switch (kind) {
.char => .@"1", .char => .@"1",
.wide => switch (comp.types.wchar.sizeof(comp).?) { .wide => switch (comp.type_store.wchar.sizeof(comp)) {
2 => .@"2", 2 => .@"2",
4 => .@"4", 4 => .@"4",
else => unreachable, else => unreachable,
@ -140,37 +137,55 @@ pub const Kind = enum {
} }
/// The C type of an element of a string literal of this kind /// The C type of an element of a string literal of this kind
pub fn elementType(kind: Kind, comp: *const Compilation) Type { pub fn elementType(kind: Kind, comp: *const Compilation) QualType {
return switch (kind) { return switch (kind) {
.unterminated => unreachable, .unterminated => unreachable,
.char => .{ .specifier = .char }, .char => .char,
.utf_8 => if (comp.langopts.hasChar8_T()) .{ .specifier = .uchar } else .{ .specifier = .char }, .utf_8 => if (comp.langopts.hasChar8_T()) .uchar else .char,
else => kind.charLiteralType(comp), else => kind.charLiteralType(comp),
}; };
} }
}; };
pub const Ascii = struct {
val: u7,
pub fn init(val: anytype) Ascii {
return .{ .val = @intCast(val) };
}
pub fn format(ctx: Ascii, w: *std.Io.Writer, fmt_str: []const u8) !usize {
const template = "{c}";
const i = std.mem.indexOf(u8, fmt_str, template).?;
try w.writeAll(fmt_str[0..i]);
if (std.ascii.isPrint(ctx.val)) {
try w.writeByte(ctx.val);
} else {
try w.print("x{x:0>2}", .{ctx.val});
}
return i + template.len;
}
};
pub const Parser = struct { pub const Parser = struct {
comp: *const Compilation,
literal: []const u8, literal: []const u8,
i: usize = 0, i: usize = 0,
kind: Kind, kind: Kind,
max_codepoint: u21, max_codepoint: u21,
loc: Source.Location,
/// Offset added to `loc.byte_offset` when emitting an error.
offset: u32 = 0,
expansion_locs: []const Source.Location,
/// We only want to issue a max of 1 error per char literal /// We only want to issue a max of 1 error per char literal
errored: bool = false, errored: bool = false,
errors_buffer: [4]CharDiagnostic, /// Makes incorrect encoding always an error.
errors_len: usize, /// Used when concatenating string literals.
comp: *const Compilation, incorrect_encoding_is_error: bool = false,
/// If this is false, do not issue any diagnostics for incorrect character encoding
pub fn init(literal: []const u8, kind: Kind, max_codepoint: u21, comp: *const Compilation) Parser { /// Incorrect encoding is allowed if we are unescaping an identifier in the preprocessor
return .{ diagnose_incorrect_encoding: bool = true,
.literal = literal,
.comp = comp,
.kind = kind,
.max_codepoint = max_codepoint,
.errors_buffer = undefined,
.errors_len = 0,
};
}
fn prefixLen(self: *const Parser) usize { fn prefixLen(self: *const Parser) usize {
return switch (self.kind) { return switch (self.kind) {
@ -181,65 +196,204 @@ pub const Parser = struct {
}; };
} }
pub fn errors(p: *Parser) []CharDiagnostic { const Diagnostic = struct {
return p.errors_buffer[0..p.errors_len]; fmt: []const u8,
kind: Diagnostics.Message.Kind,
opt: ?Diagnostics.Option = null,
extension: bool = false,
pub const illegal_char_encoding_error: Diagnostic = .{
.fmt = "illegal character encoding in character literal",
.kind = .@"error",
};
pub const illegal_char_encoding_warning: Diagnostic = .{
.fmt = "illegal character encoding in character literal",
.kind = .warning,
.opt = .@"invalid-source-encoding",
};
pub const missing_hex_escape: Diagnostic = .{
.fmt = "\\{c} used with no following hex digits",
.kind = .@"error",
};
pub const escape_sequence_overflow: Diagnostic = .{
.fmt = "escape sequence out of range",
.kind = .@"error",
};
pub const incomplete_universal_character: Diagnostic = .{
.fmt = "incomplete universal character name",
.kind = .@"error",
};
pub const invalid_universal_character: Diagnostic = .{
.fmt = "invalid universal character",
.kind = .@"error",
};
pub const char_too_large: Diagnostic = .{
.fmt = "character too large for enclosing character literal type",
.kind = .@"error",
};
pub const ucn_basic_char_error: Diagnostic = .{
.fmt = "character '{c}' cannot be specified by a universal character name",
.kind = .@"error",
};
pub const ucn_basic_char_warning: Diagnostic = .{
.fmt = "specifying character '{c}' with a universal character name is incompatible with C standards before C23",
.kind = .off,
.opt = .@"pre-c23-compat",
};
pub const ucn_control_char_error: Diagnostic = .{
.fmt = "universal character name refers to a control character",
.kind = .@"error",
};
pub const ucn_control_char_warning: Diagnostic = .{
.fmt = "universal character name referring to a control character is incompatible with C standards before C23",
.kind = .off,
.opt = .@"pre-c23-compat",
};
pub const c89_ucn_in_literal: Diagnostic = .{
.fmt = "universal character names are only valid in C99 or later",
.kind = .warning,
.opt = .unicode,
};
const non_standard_escape_char: Diagnostic = .{
.fmt = "use of non-standard escape character '\\{c}'",
.kind = .off,
.extension = true,
};
pub const unknown_escape_sequence: Diagnostic = .{
.fmt = "unknown escape sequence '\\{c}'",
.kind = .warning,
.opt = .@"unknown-escape-sequence",
};
pub const four_char_char_literal: Diagnostic = .{
.fmt = "multi-character character constant",
.opt = .@"four-char-constants",
.kind = .off,
};
pub const multichar_literal_warning: Diagnostic = .{
.fmt = "multi-character character constant",
.kind = .warning,
.opt = .multichar,
};
pub const invalid_multichar_literal: Diagnostic = .{
.fmt = "{s} character literals may not contain multiple characters",
.kind = .@"error",
};
pub const char_lit_too_wide: Diagnostic = .{
.fmt = "character constant too long for its type",
.kind = .warning,
};
// pub const wide_multichar_literal: Diagnostic = .{
// .fmt = "extraneous characters in character constant ignored",
// .kind = .warning,
// };
};
pub fn err(p: *Parser, diagnostic: Diagnostic, args: anytype) !void {
defer p.offset = 0;
if (p.errored) return;
defer p.errored = true;
try p.warn(diagnostic, args);
} }
pub fn err(self: *Parser, tag: Diagnostics.Tag, extra: Diagnostics.Message.Extra) void { pub fn warn(p: *Parser, diagnostic: Diagnostic, args: anytype) Compilation.Error!void {
if (self.errored) return; defer p.offset = 0;
self.errored = true; if (p.errored) return;
const diagnostic: CharDiagnostic = .{ .tag = tag, .extra = extra }; if (p.comp.diagnostics.effectiveKind(diagnostic) == .off) return;
if (self.errors_len == self.errors_buffer.len) {
self.errors_buffer[self.errors_buffer.len - 1] = diagnostic; var sf = std.heap.stackFallback(1024, p.comp.gpa);
} else { var allocating: std.Io.Writer.Allocating = .init(sf.get());
self.errors_buffer[self.errors_len] = diagnostic; defer allocating.deinit();
self.errors_len += 1;
formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory;
var offset_location = p.loc;
offset_location.byte_offset += p.offset;
try p.comp.diagnostics.addWithLocation(p.comp, .{
.kind = diagnostic.kind,
.text = allocating.getWritten(),
.opt = diagnostic.opt,
.extension = diagnostic.extension,
.location = offset_location.expand(p.comp),
}, p.expansion_locs, true);
}
fn formatArgs(w: *std.Io.Writer, fmt: []const u8, args: anytype) !void {
var i: usize = 0;
inline for (std.meta.fields(@TypeOf(args))) |arg_info| {
const arg = @field(args, arg_info.name);
i += switch (@TypeOf(arg)) {
[]const u8 => try Diagnostics.formatString(w, fmt[i..], arg),
Ascii => try arg.format(w, fmt[i..]),
else => switch (@typeInfo(@TypeOf(arg))) {
.int, .comptime_int => try Diagnostics.formatInt(w, fmt[i..], arg),
.pointer => try Diagnostics.formatString(w, fmt[i..], arg),
else => unreachable,
},
};
} }
try w.writeAll(fmt[i..]);
} }
pub fn warn(self: *Parser, tag: Diagnostics.Tag, extra: Diagnostics.Message.Extra) void { pub fn next(p: *Parser) !?Item {
if (self.errored) return; if (p.i >= p.literal.len) return null;
if (self.errors_len < self.errors_buffer.len) {
self.errors_buffer[self.errors_len] = .{ .tag = tag, .extra = extra };
self.errors_len += 1;
}
}
pub fn next(self: *Parser) ?Item { const start = p.i;
if (self.i >= self.literal.len) return null; if (p.literal[start] != '\\') {
p.i = mem.indexOfScalarPos(u8, p.literal, start + 1, '\\') orelse p.literal.len;
const start = self.i; const unescaped_slice = p.literal[start..p.i];
if (self.literal[start] != '\\') {
self.i = mem.indexOfScalarPos(u8, self.literal, start + 1, '\\') orelse self.literal.len;
const unescaped_slice = self.literal[start..self.i];
const view = std.unicode.Utf8View.init(unescaped_slice) catch { const view = std.unicode.Utf8View.init(unescaped_slice) catch {
if (self.kind != .char) { if (!p.diagnose_incorrect_encoding) {
self.err(.illegal_char_encoding_error, .{ .none = {} }); return .{ .improperly_encoded = p.literal[start..p.i] };
}
if (p.incorrect_encoding_is_error) {
try p.warn(.illegal_char_encoding_error, .{});
return .{ .improperly_encoded = p.literal[start..p.i] };
}
if (p.kind != .char) {
try p.err(.illegal_char_encoding_error, .{});
return null; return null;
} }
self.warn(.illegal_char_encoding_warning, .{ .none = {} }); try p.warn(.illegal_char_encoding_warning, .{});
return .{ .improperly_encoded = self.literal[start..self.i] }; return .{ .improperly_encoded = p.literal[start..p.i] };
}; };
return .{ .utf8_text = view }; return .{ .utf8_text = view };
} }
switch (self.literal[start + 1]) { switch (p.literal[start + 1]) {
'u', 'U' => return self.parseUnicodeEscape(), 'u', 'U' => return try p.parseUnicodeEscape(),
else => return self.parseEscapedChar(), else => return try p.parseEscapedChar(),
} }
} }
fn parseUnicodeEscape(self: *Parser) ?Item { fn parseUnicodeEscape(p: *Parser) !?Item {
const start = self.i; const start = p.i;
std.debug.assert(self.literal[self.i] == '\\'); std.debug.assert(p.literal[p.i] == '\\');
const kind = self.literal[self.i + 1]; const kind = p.literal[p.i + 1];
std.debug.assert(kind == 'u' or kind == 'U'); std.debug.assert(kind == 'u' or kind == 'U');
self.i += 2; p.i += 2;
if (self.i >= self.literal.len or !std.ascii.isHex(self.literal[self.i])) { if (p.i >= p.literal.len or !std.ascii.isHex(p.literal[p.i])) {
self.err(.missing_hex_escape, .{ .ascii = @intCast(kind) }); try p.err(.missing_hex_escape, .{Ascii.init(kind)});
return null; return null;
} }
const expected_len: usize = if (kind == 'u') 4 else 8; const expected_len: usize = if (kind == 'u') 4 else 8;
@ -247,66 +401,66 @@ pub const Parser = struct {
var count: usize = 0; var count: usize = 0;
var val: u32 = 0; var val: u32 = 0;
for (self.literal[self.i..], 0..) |c, i| { for (p.literal[p.i..], 0..) |c, i| {
if (i == expected_len) break; if (i == expected_len) break;
const char = std.fmt.charToDigit(c, 16) catch { const char = std.fmt.charToDigit(c, 16) catch break;
break;
};
val, const overflow = @shlWithOverflow(val, 4); val, const overflow = @shlWithOverflow(val, 4);
overflowed = overflowed or overflow != 0; overflowed = overflowed or overflow != 0;
val |= char; val |= char;
count += 1; count += 1;
} }
self.i += expected_len; p.i += expected_len;
if (overflowed) { if (overflowed) {
self.err(.escape_sequence_overflow, .{ .offset = start + self.prefixLen() }); p.offset += @intCast(start + p.prefixLen());
try p.err(.escape_sequence_overflow, .{});
return null; return null;
} }
if (count != expected_len) { if (count != expected_len) {
self.err(.incomplete_universal_character, .{ .none = {} }); try p.err(.incomplete_universal_character, .{});
return null; return null;
} }
if (val > std.math.maxInt(u21) or !std.unicode.utf8ValidCodepoint(@intCast(val))) { if (val > std.math.maxInt(u21) or !std.unicode.utf8ValidCodepoint(@intCast(val))) {
self.err(.invalid_universal_character, .{ .offset = start + self.prefixLen() }); p.offset += @intCast(start + p.prefixLen());
try p.err(.invalid_universal_character, .{});
return null; return null;
} }
if (val > self.max_codepoint) { if (val > p.max_codepoint) {
self.err(.char_too_large, .{ .none = {} }); try p.err(.char_too_large, .{});
return null; return null;
} }
if (val < 0xA0 and (val != '$' and val != '@' and val != '`')) { if (val < 0xA0 and (val != '$' and val != '@' and val != '`')) {
const is_error = !self.comp.langopts.standard.atLeast(.c23); const is_error = !p.comp.langopts.standard.atLeast(.c23);
if (val >= 0x20 and val <= 0x7F) { if (val >= 0x20 and val <= 0x7F) {
if (is_error) { if (is_error) {
self.err(.ucn_basic_char_error, .{ .ascii = @intCast(val) }); try p.err(.ucn_basic_char_error, .{Ascii.init(val)});
} else { } else if (!p.comp.langopts.standard.atLeast(.c23)) {
self.warn(.ucn_basic_char_warning, .{ .ascii = @intCast(val) }); try p.warn(.ucn_basic_char_warning, .{Ascii.init(val)});
} }
} else { } else {
if (is_error) { if (is_error) {
self.err(.ucn_control_char_error, .{ .none = {} }); try p.err(.ucn_control_char_error, .{});
} else { } else if (!p.comp.langopts.standard.atLeast(.c23)) {
self.warn(.ucn_control_char_warning, .{ .none = {} }); try p.warn(.ucn_control_char_warning, .{});
} }
} }
} }
self.warn(.c89_ucn_in_literal, .{ .none = {} }); if (!p.comp.langopts.standard.atLeast(.c99)) try p.warn(.c89_ucn_in_literal, .{});
return .{ .codepoint = @intCast(val) }; return .{ .codepoint = @intCast(val) };
} }
fn parseEscapedChar(self: *Parser) Item { fn parseEscapedChar(p: *Parser) !Item {
self.i += 1; p.i += 1;
const c = self.literal[self.i]; const c = p.literal[p.i];
defer if (c != 'x' and (c < '0' or c > '7')) { defer if (c != 'x' and (c < '0' or c > '7')) {
self.i += 1; p.i += 1;
}; };
switch (c) { switch (c) {
@ -319,36 +473,40 @@ pub const Parser = struct {
'a' => return .{ .value = 0x07 }, 'a' => return .{ .value = 0x07 },
'b' => return .{ .value = 0x08 }, 'b' => return .{ .value = 0x08 },
'e', 'E' => { 'e', 'E' => {
self.warn(.non_standard_escape_char, .{ .invalid_escape = .{ .char = c, .offset = @intCast(self.i) } }); p.offset += @intCast(p.i);
try p.warn(.non_standard_escape_char, .{Ascii.init(c)});
return .{ .value = 0x1B }; return .{ .value = 0x1B };
}, },
'(', '{', '[', '%' => { '(', '{', '[', '%' => {
self.warn(.non_standard_escape_char, .{ .invalid_escape = .{ .char = c, .offset = @intCast(self.i) } }); p.offset += @intCast(p.i);
try p.warn(.non_standard_escape_char, .{Ascii.init(c)});
return .{ .value = c }; return .{ .value = c };
}, },
'f' => return .{ .value = 0x0C }, 'f' => return .{ .value = 0x0C },
'v' => return .{ .value = 0x0B }, 'v' => return .{ .value = 0x0B },
'x' => return .{ .value = self.parseNumberEscape(.hex) }, 'x' => return .{ .value = try p.parseNumberEscape(.hex) },
'0'...'7' => return .{ .value = self.parseNumberEscape(.octal) }, '0'...'7' => return .{ .value = try p.parseNumberEscape(.octal) },
'u', 'U' => unreachable, // handled by parseUnicodeEscape 'u', 'U' => unreachable, // handled by parseUnicodeEscape
else => { else => {
self.warn(.unknown_escape_sequence, .{ .invalid_escape = .{ .char = c, .offset = @intCast(self.i) } }); p.offset += @intCast(p.i);
try p.warn(.unknown_escape_sequence, .{Ascii.init(c)});
return .{ .value = c }; return .{ .value = c };
}, },
} }
} }
fn parseNumberEscape(self: *Parser, base: EscapeBase) u32 { fn parseNumberEscape(p: *Parser, base: EscapeBase) !u32 {
var val: u32 = 0; var val: u32 = 0;
var count: usize = 0; var count: usize = 0;
var overflowed = false; var overflowed = false;
const start = self.i; const start = p.i;
defer self.i += count; defer p.i += count;
const slice = switch (base) { const slice = switch (base) {
.octal => self.literal[self.i..@min(self.literal.len, self.i + 3)], // max 3 chars .octal => p.literal[p.i..@min(p.literal.len, p.i + 3)], // max 3 chars
.hex => blk: { .hex => blk: {
self.i += 1; p.i += 1;
break :blk self.literal[self.i..]; // skip over 'x'; could have an arbitrary number of chars break :blk p.literal[p.i..]; // skip over 'x'; could have an arbitrary number of chars
}, },
}; };
for (slice) |c| { for (slice) |c| {
@ -358,13 +516,14 @@ pub const Parser = struct {
val += char; val += char;
count += 1; count += 1;
} }
if (overflowed or val > self.kind.maxInt(self.comp)) { if (overflowed or val > p.kind.maxInt(p.comp)) {
self.err(.escape_sequence_overflow, .{ .offset = start + self.prefixLen() }); p.offset += @intCast(start + p.prefixLen());
try p.err(.escape_sequence_overflow, .{});
return 0; return 0;
} }
if (count == 0) { if (count == 0) {
std.debug.assert(base == .hex); std.debug.assert(base == .hex);
self.err(.missing_hex_escape, .{ .ascii = 'x' }); try p.err(.missing_hex_escape, .{Ascii.init('x')});
} }
return val; return val;
} }

View File

@ -1,12 +1,14 @@
const std = @import("std"); const std = @import("std");
const mem = std.mem; const mem = std.mem;
const system_defaults = @import("system_defaults");
const Compilation = @import("../Compilation.zig"); const Compilation = @import("../Compilation.zig");
const GCCDetector = @import("../Driver/GCCDetector.zig");
const Toolchain = @import("../Toolchain.zig");
const Driver = @import("../Driver.zig"); const Driver = @import("../Driver.zig");
const Distro = @import("../Driver/Distro.zig"); const Distro = @import("../Driver/Distro.zig");
const GCCDetector = @import("../Driver/GCCDetector.zig");
const target_util = @import("../target.zig"); const target_util = @import("../target.zig");
const system_defaults = @import("system_defaults"); const Toolchain = @import("../Toolchain.zig");
const Linux = @This(); const Linux = @This();
@ -144,7 +146,7 @@ fn getPIE(self: *const Linux, d: *const Driver) bool {
fn getStaticPIE(self: *const Linux, d: *Driver) !bool { fn getStaticPIE(self: *const Linux, d: *Driver) !bool {
_ = self; _ = self;
if (d.static_pie and d.pie != null) { if (d.static_pie and d.pie != null) {
try d.err("cannot specify 'nopie' along with 'static-pie'"); try d.err("cannot specify 'nopie' along with 'static-pie'", .{});
} }
return d.static_pie; return d.static_pie;
} }
@ -195,7 +197,7 @@ pub fn buildLinkerArgs(self: *const Linux, tc: *const Toolchain, argv: *std.arra
if (target_util.ldEmulationOption(d.comp.target, null)) |emulation| { if (target_util.ldEmulationOption(d.comp.target, null)) |emulation| {
try argv.appendSlice(&.{ "-m", emulation }); try argv.appendSlice(&.{ "-m", emulation });
} else { } else {
try d.err("Unknown target triple"); try d.err("Unknown target triple", .{});
return; return;
} }
if (d.comp.target.cpu.arch.isRISCV()) { if (d.comp.target.cpu.arch.isRISCV()) {
@ -214,9 +216,9 @@ pub fn buildLinkerArgs(self: *const Linux, tc: *const Toolchain, argv: *std.arra
const dynamic_linker = d.comp.target.standardDynamicLinkerPath(); const dynamic_linker = d.comp.target.standardDynamicLinkerPath();
// todo: check for --dyld-prefix // todo: check for --dyld-prefix
if (dynamic_linker.get()) |path| { if (dynamic_linker.get()) |path| {
try argv.appendSlice(&.{ "-dynamic-linker", try tc.arena.dupe(u8, path) }); try argv.appendSlice(&.{ "-dynamic-linker", try d.comp.arena.dupe(u8, path) });
} else { } else {
try d.err("Could not find dynamic linker path"); try d.err("Could not find dynamic linker path", .{});
} }
} }
} }
@ -318,7 +320,7 @@ pub fn buildLinkerArgs(self: *const Linux, tc: *const Toolchain, argv: *std.arra
fn getMultiarchTriple(target: std.Target) ?[]const u8 { fn getMultiarchTriple(target: std.Target) ?[]const u8 {
const is_android = target.abi.isAndroid(); const is_android = target.abi.isAndroid();
const is_mips_r6 = target.cpu.has(.mips, .mips32r6); const is_mips_r6 = std.Target.mips.featureSetHas(target.cpu.features, .mips32r6);
return switch (target.cpu.arch) { return switch (target.cpu.arch) {
.arm, .thumb => if (is_android) "arm-linux-androideabi" else if (target.abi == .gnueabihf) "arm-linux-gnueabihf" else "arm-linux-gnueabi", .arm, .thumb => if (is_android) "arm-linux-androideabi" else if (target.abi == .gnueabihf) "arm-linux-gnueabihf" else "arm-linux-gnueabi",
.armeb, .thumbeb => if (target.abi == .gnueabihf) "armeb-linux-gnueabihf" else "armeb-linux-gnueabi", .armeb, .thumbeb => if (target.abi == .gnueabihf) "armeb-linux-gnueabihf" else "armeb-linux-gnueabi",
@ -372,13 +374,13 @@ pub fn defineSystemIncludes(self: *const Linux, tc: *const Toolchain) !void {
// musl prefers /usr/include before builtin includes, so musl targets will add builtins // musl prefers /usr/include before builtin includes, so musl targets will add builtins
// at the end of this function (unless disabled with nostdlibinc) // at the end of this function (unless disabled with nostdlibinc)
if (!tc.driver.nobuiltininc and (!target.abi.isMusl() or tc.driver.nostdlibinc)) { if (!tc.driver.nobuiltininc and (!target.abi.isMusl() or tc.driver.nostdlibinc)) {
try comp.addBuiltinIncludeDir(tc.driver.aro_name); try comp.addBuiltinIncludeDir(tc.driver.aro_name, tc.driver.resource_dir);
} }
if (tc.driver.nostdlibinc) return; if (tc.driver.nostdlibinc) return;
const sysroot = tc.getSysroot(); const sysroot = tc.getSysroot();
const local_include = try std.fmt.allocPrint(comp.gpa, "{s}{s}", .{ sysroot, "/usr/local/include" }); const local_include = try std.fs.path.join(comp.gpa, &.{ sysroot, "/usr/local/include" });
defer comp.gpa.free(local_include); defer comp.gpa.free(local_include);
try comp.addSystemIncludeDir(local_include); try comp.addSystemIncludeDir(local_include);
@ -389,7 +391,7 @@ pub fn defineSystemIncludes(self: *const Linux, tc: *const Toolchain) !void {
} }
if (getMultiarchTriple(target)) |triple| { if (getMultiarchTriple(target)) |triple| {
const joined = try std.fs.path.join(comp.gpa, &.{ sysroot, "usr", "include", triple }); const joined = try std.fs.path.join(comp.gpa, &.{ sysroot, "/usr/include", triple });
defer comp.gpa.free(joined); defer comp.gpa.free(joined);
if (tc.filesystem.exists(joined)) { if (tc.filesystem.exists(joined)) {
try comp.addSystemIncludeDir(joined); try comp.addSystemIncludeDir(joined);
@ -403,7 +405,7 @@ pub fn defineSystemIncludes(self: *const Linux, tc: *const Toolchain) !void {
std.debug.assert(!tc.driver.nostdlibinc); std.debug.assert(!tc.driver.nostdlibinc);
if (!tc.driver.nobuiltininc and target.abi.isMusl()) { if (!tc.driver.nobuiltininc and target.abi.isMusl()) {
try comp.addBuiltinIncludeDir(tc.driver.aro_name); try comp.addBuiltinIncludeDir(tc.driver.aro_name, tc.driver.resource_dir);
} }
} }
@ -414,7 +416,7 @@ test Linux {
defer arena_instance.deinit(); defer arena_instance.deinit();
const arena = arena_instance.allocator(); const arena = arena_instance.allocator();
var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
defer comp.deinit(); defer comp.deinit();
comp.environment = .{ comp.environment = .{
.path = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", .path = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
@ -426,7 +428,7 @@ test Linux {
comp.target = try std.zig.system.resolveTargetQuery(target_query); comp.target = try std.zig.system.resolveTargetQuery(target_query);
comp.langopts.setEmulatedCompiler(.gcc); comp.langopts.setEmulatedCompiler(.gcc);
var driver: Driver = .{ .comp = &comp }; var driver: Driver = .{ .comp = &comp, .diagnostics = undefined };
defer driver.deinit(); defer driver.deinit();
driver.raw_target_triple = raw_triple; driver.raw_target_triple = raw_triple;
@ -434,7 +436,7 @@ test Linux {
try driver.link_objects.append(driver.comp.gpa, link_obj); try driver.link_objects.append(driver.comp.gpa, link_obj);
driver.temp_file_count += 1; driver.temp_file_count += 1;
var toolchain: Toolchain = .{ .driver = &driver, .arena = arena, .filesystem = .{ .fake = &.{ var toolchain: Toolchain = .{ .driver = &driver, .filesystem = .{ .fake = &.{
.{ .path = "/tmp" }, .{ .path = "/tmp" },
.{ .path = "/usr" }, .{ .path = "/usr" },
.{ .path = "/usr/lib64" }, .{ .path = "/usr/lib64" },

12
lib/compiler/aro/assembly_backend.zig vendored Normal file
View File

@ -0,0 +1,12 @@
const std = @import("std");
const aro = @import("aro");
pub const x86_64 = @import("assembly_backend/x86_64.zig");
pub fn genAsm(target: std.Target, tree: *const aro.Tree) aro.Compilation.Error!aro.Assembly {
return switch (target.cpu.arch) {
.x86_64 => x86_64.genAsm(tree),
else => std.debug.panic("genAsm not implemented: {s}", .{@tagName(target.cpu.arch)}),
};
}

View File

@ -0,0 +1,254 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const aro = @import("aro");
const Assembly = aro.Assembly;
const Compilation = aro.Compilation;
const Node = Tree.Node;
const Source = aro.Source;
const Tree = aro.Tree;
const QualType = aro.QualType;
const Value = aro.Value;
const AsmCodeGen = @This();
const Error = aro.Compilation.Error;
tree: *const Tree,
comp: *Compilation,
text: *std.Io.Writer,
data: *std.Io.Writer,
const StorageUnit = enum(u8) {
byte = 8,
short = 16,
long = 32,
quad = 64,
fn trunc(self: StorageUnit, val: u64) u64 {
return switch (self) {
.byte => @as(u8, @truncate(val)),
.short => @as(u16, @truncate(val)),
.long => @as(u32, @truncate(val)),
.quad => val,
};
}
};
fn serializeInt(value: u64, storage_unit: StorageUnit, w: *std.Io.Writer) !void {
try w.print(" .{s} 0x{x}\n", .{ @tagName(storage_unit), storage_unit.trunc(value) });
}
fn serializeFloat(comptime T: type, value: T, w: *std.Io.Writer) !void {
switch (T) {
f128 => {
const bytes = std.mem.asBytes(&value);
const first = std.mem.bytesToValue(u64, bytes[0..8]);
try serializeInt(first, .quad, w);
const second = std.mem.bytesToValue(u64, bytes[8..16]);
return serializeInt(second, .quad, w);
},
f80 => {
const bytes = std.mem.asBytes(&value);
const first = std.mem.bytesToValue(u64, bytes[0..8]);
try serializeInt(first, .quad, w);
const second = std.mem.bytesToValue(u16, bytes[8..10]);
try serializeInt(second, .short, w);
return w.writeAll(" .zero 6\n");
},
else => {
const size = @bitSizeOf(T);
const storage_unit = std.meta.intToEnum(StorageUnit, size) catch unreachable;
const IntTy = @Type(.{ .int = .{ .signedness = .unsigned, .bits = size } });
const int_val: IntTy = @bitCast(value);
return serializeInt(int_val, storage_unit, w);
},
}
}
pub fn todo(c: *AsmCodeGen, msg: []const u8, tok: Tree.TokenIndex) Error {
const loc: Source.Location = c.tree.tokens.items(.loc)[tok];
var sf = std.heap.stackFallback(1024, c.comp.gpa);
var buf = std.ArrayList(u8).init(sf.get());
defer buf.deinit();
try buf.print("TODO: {s}", .{msg});
try c.comp.diagnostics.add(.{
.text = buf.items,
.kind = .@"error",
.location = loc.expand(c.comp),
});
return error.FatalError;
}
fn emitAggregate(c: *AsmCodeGen, qt: QualType, node: Node.Index) !void {
_ = qt;
return c.todo("Codegen aggregates", node.tok(c.tree));
}
fn emitSingleValue(c: *AsmCodeGen, qt: QualType, node: Node.Index) !void {
const value = c.tree.value_map.get(node) orelse return;
const bit_size = qt.bitSizeof(c.comp);
const scalar_kind = qt.scalarKind(c.comp);
if (!scalar_kind.isReal()) {
return c.todo("Codegen _Complex values", node.tok(c.tree));
} else if (scalar_kind.isInt()) {
const storage_unit = std.meta.intToEnum(StorageUnit, bit_size) catch return c.todo("Codegen _BitInt values", node.tok(c.tree));
try c.data.print(" .{s} ", .{@tagName(storage_unit)});
_ = try value.print(qt, c.comp, c.data);
try c.data.writeByte('\n');
} else if (scalar_kind.isFloat()) {
switch (bit_size) {
16 => return serializeFloat(f16, value.toFloat(f16, c.comp), c.data),
32 => return serializeFloat(f32, value.toFloat(f32, c.comp), c.data),
64 => return serializeFloat(f64, value.toFloat(f64, c.comp), c.data),
80 => return serializeFloat(f80, value.toFloat(f80, c.comp), c.data),
128 => return serializeFloat(f128, value.toFloat(f128, c.comp), c.data),
else => unreachable,
}
} else if (scalar_kind.isPointer()) {
return c.todo("Codegen pointer", node.tok(c.tree));
} else if (qt.is(c.comp, .array)) {
// Todo:
// Handle truncated initializers e.g. char x[3] = "hello";
// Zero out remaining bytes if initializer is shorter than storage capacity
// Handle non-char strings
const bytes = value.toBytes(c.comp);
const directive = if (bytes.len > bit_size / 8) "ascii" else "string";
try c.data.print(" .{s} ", .{directive});
try Value.printString(bytes, qt, c.comp, c.data);
try c.data.writeByte('\n');
} else unreachable;
}
fn emitValue(c: *AsmCodeGen, qt: QualType, node: Node.Index) !void {
switch (node.get(c.tree)) {
.array_init_expr,
.struct_init_expr,
.union_init_expr,
=> return c.todo("Codegen multiple inits", node.tok(c.tree)),
else => return c.emitSingleValue(qt, node),
}
}
pub fn genAsm(tree: *const Tree) Error!Assembly {
var data: std.Io.Writer.Allocating = .init(tree.comp.gpa);
defer data.deinit();
var text: std.Io.Writer.Allocating = .init(tree.comp.gpa);
defer text.deinit();
var codegen: AsmCodeGen = .{
.tree = tree,
.comp = tree.comp,
.text = &text.writer,
.data = &data.writer,
};
codegen.genDecls() catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
error.OutOfMemory => return error.OutOfMemory,
error.FatalError => return error.FatalError,
};
const text_slice = try text.toOwnedSlice();
errdefer tree.comp.gpa.free(text_slice);
const data_slice = try data.toOwnedSlice();
return .{
.text = text_slice,
.data = data_slice,
};
}
fn genDecls(c: *AsmCodeGen) !void {
if (c.tree.comp.code_gen_options.debug) {
const sources = c.tree.comp.sources.values();
for (sources) |source| {
try c.data.print(" .file {d} \"{s}\"\n", .{ @intFromEnum(source.id) - 1, source.path });
}
}
for (c.tree.root_decls.items) |decl| {
switch (decl.get(c.tree)) {
.static_assert,
.typedef,
.struct_decl,
.union_decl,
.enum_decl,
=> {},
.function => |function| {
if (function.body == null) continue;
try c.genFn(function);
},
.variable => |variable| try c.genVar(variable),
else => unreachable,
}
}
try c.text.writeAll(" .section .note.GNU-stack,\"\",@progbits\n");
}
fn genFn(c: *AsmCodeGen, function: Node.Function) !void {
return c.todo("Codegen functions", function.name_tok);
}
fn genVar(c: *AsmCodeGen, variable: Node.Variable) !void {
const comp = c.comp;
const qt = variable.qt;
const is_tentative = variable.initializer == null;
const size = qt.sizeofOrNull(comp) orelse blk: {
// tentative array definition assumed to have one element
std.debug.assert(is_tentative and qt.is(c.comp, .array));
break :blk qt.childType(c.comp).sizeof(comp);
};
const name = c.tree.tokSlice(variable.name_tok);
const nat_align = qt.alignof(comp);
const alignment = if (qt.is(c.comp, .array) and size >= 16) @max(16, nat_align) else nat_align;
if (variable.storage_class == .static) {
try c.data.print(" .local \"{s}\"\n", .{name});
} else {
try c.data.print(" .globl \"{s}\"\n", .{name});
}
if (is_tentative and comp.code_gen_options.common) {
try c.data.print(" .comm \"{s}\", {d}, {d}\n", .{ name, size, alignment });
return;
}
if (variable.initializer) |init| {
if (variable.thread_local and comp.code_gen_options.data_sections) {
try c.data.print(" .section .tdata.\"{s}\",\"awT\",@progbits\n", .{name});
} else if (variable.thread_local) {
try c.data.writeAll(" .section .tdata,\"awT\",@progbits\n");
} else if (comp.code_gen_options.data_sections) {
try c.data.print(" .section .data.\"{s}\",\"aw\",@progbits\n", .{name});
} else {
try c.data.writeAll(" .data\n");
}
try c.data.print(" .type \"{s}\", @object\n", .{name});
try c.data.print(" .size \"{s}\", {d}\n", .{ name, size });
try c.data.print(" .align {d}\n", .{alignment});
try c.data.print("\"{s}\":\n", .{name});
try c.emitValue(qt, init);
return;
}
if (variable.thread_local and comp.code_gen_options.data_sections) {
try c.data.print(" .section .tbss.\"{s}\",\"awT\",@nobits\n", .{name});
} else if (variable.thread_local) {
try c.data.writeAll(" .section .tbss,\"awT\",@nobits\n");
} else if (comp.code_gen_options.data_sections) {
try c.data.print(" .section .bss.\"{s}\",\"aw\",@nobits\n", .{name});
} else {
try c.data.writeAll(" .bss\n");
}
try c.data.print(" .align {d}\n", .{alignment});
try c.data.print("\"{s}\":\n", .{name});
try c.data.print(" .zero {d}\n", .{size});
}

View File

@ -1,12 +1,23 @@
pub const Assembly = @import("backend/Assembly.zig");
pub const CodeGenOptions = @import("backend/CodeGenOptions.zig");
pub const Interner = @import("backend/Interner.zig"); pub const Interner = @import("backend/Interner.zig");
pub const Ir = @import("backend/Ir.zig"); pub const Ir = @import("backend/Ir.zig");
pub const Object = @import("backend/Object.zig"); pub const Object = @import("backend/Object.zig");
pub const CallingConvention = enum { pub const CallingConvention = enum {
C, c,
stdcall, stdcall,
thiscall, thiscall,
vectorcall, vectorcall,
fastcall,
regcall,
riscv_vector,
aarch64_sve_pcs,
aarch64_vector_pcs,
arm_aapcs,
arm_aapcs_vfp,
x86_64_sysv,
x86_64_win,
}; };
pub const version_str = "aro-zig"; pub const version_str = "aro-zig";

20
lib/compiler/aro/backend/Assembly.zig vendored Normal file
View File

@ -0,0 +1,20 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
data: []const u8,
text: []const u8,
const Assembly = @This();
pub fn deinit(self: *const Assembly, gpa: Allocator) void {
gpa.free(self.data);
gpa.free(self.text);
}
pub fn writeToFile(self: Assembly, file: std.fs.File) !void {
var vec: [2]std.posix.iovec_const = .{
.{ .base = self.data.ptr, .len = self.data.len },
.{ .base = self.text.ptr, .len = self.text.len },
};
return file.writevAll(&vec);
}

View File

@ -0,0 +1,64 @@
const std = @import("std");
/// place uninitialized global variables in a common block
common: bool,
/// Place each function into its own section in the output file if the target supports arbitrary sections
func_sections: bool,
/// Place each data item into its own section in the output file if the target supports arbitrary sections
data_sections: bool,
pic_level: PicLevel,
/// Generate position-independent code that can only be linked into executables
is_pie: bool,
optimization_level: OptimizationLevel,
/// Generate debug information
debug: bool,
pub const PicLevel = enum(u8) {
/// Do not generate position-independent code
none = 0,
/// Generate position-independent code (PIC) suitable for use in a shared library, if supported for the target machine.
one = 1,
/// If supported for the target machine, emit position-independent code, suitable for dynamic linking and avoiding
/// any limit on the size of the global offset table.
two = 2,
};
pub const OptimizationLevel = enum {
@"0",
@"1",
@"2",
@"3",
/// Optimize for size
s,
/// Disregard strict standards compliance
fast,
/// Optimize debugging experience
g,
/// Optimize aggressively for size rather than speed
z,
const level_map = std.StaticStringMap(OptimizationLevel).initComptime(.{
.{ "0", .@"0" },
.{ "1", .@"1" },
.{ "2", .@"2" },
.{ "3", .@"3" },
.{ "s", .s },
.{ "fast", .fast },
.{ "g", .g },
.{ "z", .z },
});
pub fn fromString(str: []const u8) ?OptimizationLevel {
return level_map.get(str);
}
};
pub const default: @This() = .{
.common = false,
.func_sections = false,
.data_sections = false,
.pic_level = .none,
.is_pie = false,
.optimization_level = .@"0",
.debug = false,
};

View File

@ -8,14 +8,14 @@ const Limb = std.math.big.Limb;
const Interner = @This(); const Interner = @This();
map: std.AutoArrayHashMapUnmanaged(void, void) = .empty, map: std.AutoArrayHashMapUnmanaged(void, void) = .{},
items: std.MultiArrayList(struct { items: std.MultiArrayList(struct {
tag: Tag, tag: Tag,
data: u32, data: u32,
}) = .{}, }) = .{},
extra: std.ArrayListUnmanaged(u32) = .empty, extra: std.ArrayListUnmanaged(u32) = .{},
limbs: std.ArrayListUnmanaged(Limb) = .empty, limbs: std.ArrayListUnmanaged(Limb) = .{},
strings: std.ArrayListUnmanaged(u8) = .empty, strings: std.ArrayListUnmanaged(u8) = .{},
const KeyAdapter = struct { const KeyAdapter = struct {
interner: *const Interner, interner: *const Interner,
@ -65,6 +65,7 @@ pub const Key = union(enum) {
float: Float, float: Float,
complex: Complex, complex: Complex,
bytes: []const u8, bytes: []const u8,
pointer: Pointer,
pub const Float = union(enum) { pub const Float = union(enum) {
f16: f16, f16: f16,
@ -80,6 +81,12 @@ pub const Key = union(enum) {
cf80: [2]f80, cf80: [2]f80,
cf128: [2]f128, cf128: [2]f128,
}; };
pub const Pointer = struct {
/// NodeIndex of decl or compound literal whose address we are offsetting from
node: u32,
/// Offset in bytes
offset: Ref,
};
pub fn hash(key: Key) u32 { pub fn hash(key: Key) u32 {
var hasher = Hash.init(0); var hasher = Hash.init(0);
@ -199,6 +206,10 @@ pub const Key = union(enum) {
} }
return null; return null;
} }
pub fn toBigInt(key: Key, space: *Tag.Int.BigIntSpace) BigIntConst {
return key.int.toBigInt(space);
}
}; };
pub const Ref = enum(u32) { pub const Ref = enum(u32) {
@ -303,6 +314,8 @@ pub const Tag = enum(u8) {
bytes, bytes,
/// `data` is `Record` /// `data` is `Record`
record_ty, record_ty,
/// `data` is Pointer
pointer,
pub const Array = struct { pub const Array = struct {
len0: u32, len0: u32,
@ -322,6 +335,11 @@ pub const Tag = enum(u8) {
child: Ref, child: Ref,
}; };
pub const Pointer = struct {
node: u32,
offset: Ref,
};
pub const Int = struct { pub const Int = struct {
limbs_index: u32, limbs_index: u32,
limbs_len: u32, limbs_len: u32,
@ -606,6 +624,15 @@ pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref {
}), }),
}); });
}, },
.pointer => |info| {
i.items.appendAssumeCapacity(.{
.tag = .pointer,
.data = try i.addExtra(gpa, Tag.Pointer{
.node = info.node,
.offset = info.offset,
}),
});
},
.int => |repr| int: { .int => |repr| int: {
var space: Tag.Int.BigIntSpace = undefined; var space: Tag.Int.BigIntSpace = undefined;
const big = repr.toBigInt(&space); const big = repr.toBigInt(&space);
@ -792,6 +819,13 @@ pub fn get(i: *const Interner, ref: Ref) Key {
.child = vector_ty.child, .child = vector_ty.child,
} }; } };
}, },
.pointer => {
const pointer = i.extraData(Tag.Pointer, data);
return .{ .pointer = .{
.node = pointer.node,
.offset = pointer.offset,
} };
},
.u32 => .{ .int = .{ .u64 = data } }, .u32 => .{ .int = .{ .u64 = data } },
.i32 => .{ .int = .{ .i64 = @as(i32, @bitCast(data)) } }, .i32 => .{ .int = .{ .i64 = @as(i32, @bitCast(data)) } },
.int_positive, .int_negative => { .int_positive, .int_negative => {

View File

@ -26,9 +26,9 @@ pub const Builder = struct {
arena: std.heap.ArenaAllocator, arena: std.heap.ArenaAllocator,
interner: *Interner, interner: *Interner,
decls: std.StringArrayHashMapUnmanaged(Decl) = .empty, decls: std.StringArrayHashMapUnmanaged(Decl) = .{},
instructions: std.MultiArrayList(Ir.Inst) = .{}, instructions: std.MultiArrayList(Ir.Inst) = .{},
body: std.ArrayListUnmanaged(Ref) = .empty, body: std.ArrayListUnmanaged(Ref) = .{},
alloc_count: u32 = 0, alloc_count: u32 = 0,
arg_count: u32 = 0, arg_count: u32 = 0,
current_label: Ref = undefined, current_label: Ref = undefined,
@ -382,13 +382,14 @@ const ATTRIBUTE = std.Io.tty.Color.bright_yellow;
const RefMap = std.AutoArrayHashMap(Ref, void); const RefMap = std.AutoArrayHashMap(Ref, void);
pub fn dump(ir: *const Ir, gpa: Allocator, config: std.Io.tty.Config, w: anytype) !void { pub fn dump(ir: *const Ir, gpa: Allocator, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
for (ir.decls.keys(), ir.decls.values()) |name, *decl| { for (ir.decls.keys(), ir.decls.values()) |name, *decl| {
try ir.dumpDecl(decl, gpa, name, config, w); try ir.dumpDecl(decl, gpa, name, config, w);
} }
try w.flush();
} }
fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8, config: std.Io.tty.Config, w: anytype) !void { fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
const tags = decl.instructions.items(.tag); const tags = decl.instructions.items(.tag);
const data = decl.instructions.items(.data); const data = decl.instructions.items(.data);
@ -609,7 +610,7 @@ fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8,
try w.writeAll("}\n\n"); try w.writeAll("}\n\n");
} }
fn writeType(ir: Ir, ty_ref: Interner.Ref, config: std.Io.tty.Config, w: anytype) !void { fn writeType(ir: Ir, ty_ref: Interner.Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
const ty = ir.interner.get(ty_ref); const ty = ir.interner.get(ty_ref);
try config.setColor(w, TYPE); try config.setColor(w, TYPE);
switch (ty) { switch (ty) {
@ -639,7 +640,7 @@ fn writeType(ir: Ir, ty_ref: Interner.Ref, config: std.Io.tty.Config, w: anytype
} }
} }
fn writeValue(ir: Ir, val: Interner.Ref, config: std.Io.tty.Config, w: anytype) !void { fn writeValue(ir: Ir, val: Interner.Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
try config.setColor(w, LITERAL); try config.setColor(w, LITERAL);
const key = ir.interner.get(val); const key = ir.interner.get(val);
switch (key) { switch (key) {
@ -650,12 +651,12 @@ fn writeValue(ir: Ir, val: Interner.Ref, config: std.Io.tty.Config, w: anytype)
.float => |repr| switch (repr) { .float => |repr| switch (repr) {
inline else => |x| return w.print("{d}", .{@as(f64, @floatCast(x))}), inline else => |x| return w.print("{d}", .{@as(f64, @floatCast(x))}),
}, },
.bytes => |b| return std.zig.stringEscape(b, "", .{}, w), .bytes => |b| return std.zig.stringEscape(b, w),
else => unreachable, // not a value else => unreachable, // not a value
} }
} }
fn writeRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: anytype) !void { fn writeRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
assert(ref != .none); assert(ref != .none);
const index = @intFromEnum(ref); const index = @intFromEnum(ref);
const ty_ref = decl.instructions.items(.ty)[index]; const ty_ref = decl.instructions.items(.ty)[index];
@ -678,7 +679,7 @@ fn writeRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.I
try w.print(" %{d}", .{ref_index}); try w.print(" %{d}", .{ref_index});
} }
fn writeNewRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: anytype) !void { fn writeNewRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
try ref_map.put(ref, {}); try ref_map.put(ref, {});
try w.writeAll(" "); try w.writeAll(" ");
try ir.writeRef(decl, ref_map, ref, config, w); try ir.writeRef(decl, ref_map, ref, config, w);
@ -687,7 +688,7 @@ fn writeNewRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: st
try config.setColor(w, INST); try config.setColor(w, INST);
} }
fn writeLabel(decl: *const Decl, label_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: anytype) !void { fn writeLabel(decl: *const Decl, label_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
assert(ref != .none); assert(ref != .none);
const index = @intFromEnum(ref); const index = @intFromEnum(ref);
const label = decl.instructions.items(.data)[index].label; const label = decl.instructions.items(.data)[index].label;

View File

@ -65,9 +65,9 @@ pub fn addRelocation(obj: *Object, name: []const u8, section: Section, address:
} }
} }
pub fn finish(obj: *Object, file: std.fs.File) !void { pub fn finish(obj: *Object, w: *std.Io.Writer) !void {
switch (obj.format) { switch (obj.format) {
.elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).finish(file), .elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).finish(w),
else => unreachable, else => unreachable,
} }
} }

View File

@ -5,7 +5,7 @@ const Object = @import("../Object.zig");
const Section = struct { const Section = struct {
data: std.array_list.Managed(u8), data: std.array_list.Managed(u8),
relocations: std.ArrayListUnmanaged(Relocation) = .empty, relocations: std.ArrayListUnmanaged(Relocation) = .{},
flags: u64, flags: u64,
type: u32, type: u32,
index: u16 = undefined, index: u16 = undefined,
@ -37,9 +37,9 @@ const Elf = @This();
obj: Object, obj: Object,
/// The keys are owned by the Codegen.tree /// The keys are owned by the Codegen.tree
sections: std.StringHashMapUnmanaged(*Section) = .empty, sections: std.StringHashMapUnmanaged(*Section) = .{},
local_symbols: std.StringHashMapUnmanaged(*Symbol) = .empty, local_symbols: std.StringHashMapUnmanaged(*Symbol) = .{},
global_symbols: std.StringHashMapUnmanaged(*Symbol) = .empty, global_symbols: std.StringHashMapUnmanaged(*Symbol) = .{},
unnamed_symbol_mangle: u32 = 0, unnamed_symbol_mangle: u32 = 0,
strtab_len: u64 = strtab_default.len, strtab_len: u64 = strtab_default.len,
arena: std.heap.ArenaAllocator, arena: std.heap.ArenaAllocator,
@ -170,12 +170,8 @@ pub fn addRelocation(elf: *Elf, name: []const u8, section_kind: Object.Section,
/// relocations /// relocations
/// strtab /// strtab
/// section headers /// section headers
pub fn finish(elf: *Elf, file: std.fs.File) !void { pub fn finish(elf: *Elf, w: *std.Io.Writer) !void {
var file_buffer: [1024]u8 = undefined; var num_sections: std.elf.Half = additional_sections;
var file_writer = file.writer(&file_buffer);
const w = &file_writer.interface;
var num_sections: std.elf.Elf64_Half = additional_sections;
var relocations_len: std.elf.Elf64_Off = 0; var relocations_len: std.elf.Elf64_Off = 0;
var sections_len: std.elf.Elf64_Off = 0; var sections_len: std.elf.Elf64_Off = 0;
{ {
@ -196,8 +192,9 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
const strtab_offset = rela_offset + relocations_len; const strtab_offset = rela_offset + relocations_len;
const sh_offset = strtab_offset + elf.strtab_len; const sh_offset = strtab_offset + elf.strtab_len;
const sh_offset_aligned = std.mem.alignForward(u64, sh_offset, 16); const sh_offset_aligned = std.mem.alignForward(u64, sh_offset, 16);
const endian = elf.obj.target.cpu.arch.endian();
const elf_header = std.elf.Elf64_Ehdr{ const elf_header: std.elf.Elf64_Ehdr = .{
.e_ident = .{ 0x7F, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, .e_ident = .{ 0x7F, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
.e_type = std.elf.ET.REL, // we only produce relocatables .e_type = std.elf.ET.REL, // we only produce relocatables
.e_machine = elf.obj.target.toElfMachine(), .e_machine = elf.obj.target.toElfMachine(),
@ -213,7 +210,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
.e_shnum = num_sections, .e_shnum = num_sections,
.e_shstrndx = strtab_index, .e_shstrndx = strtab_index,
}; };
try w.writeStruct(elf_header); try w.writeStruct(elf_header, endian);
// write contents of sections // write contents of sections
{ {
@ -222,13 +219,13 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
} }
// pad to 8 bytes // pad to 8 bytes
try w.writeByteNTimes(0, @intCast(symtab_offset_aligned - symtab_offset)); try w.splatByteAll(0, @intCast(symtab_offset_aligned - symtab_offset));
var name_offset: u32 = strtab_default.len; var name_offset: u32 = strtab_default.len;
// write symbols // write symbols
{ {
// first symbol must be null // first symbol must be null
try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Sym)); try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Sym), endian);
var sym_index: u16 = 1; var sym_index: u16 = 1;
var it = elf.local_symbols.iterator(); var it = elf.local_symbols.iterator();
@ -241,7 +238,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
.st_shndx = if (sym.section) |some| some.index else 0, .st_shndx = if (sym.section) |some| some.index else 0,
.st_value = sym.offset, .st_value = sym.offset,
.st_size = sym.size, .st_size = sym.size,
}); }, endian);
sym.index = sym_index; sym.index = sym_index;
sym_index += 1; sym_index += 1;
name_offset += @intCast(entry.key_ptr.len + 1); // +1 for null byte name_offset += @intCast(entry.key_ptr.len + 1); // +1 for null byte
@ -256,7 +253,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
.st_shndx = if (sym.section) |some| some.index else 0, .st_shndx = if (sym.section) |some| some.index else 0,
.st_value = sym.offset, .st_value = sym.offset,
.st_size = sym.size, .st_size = sym.size,
}); }, endian);
sym.index = sym_index; sym.index = sym_index;
sym_index += 1; sym_index += 1;
name_offset += @intCast(entry.key_ptr.len + 1); // +1 for null byte name_offset += @intCast(entry.key_ptr.len + 1); // +1 for null byte
@ -272,7 +269,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
.r_offset = rela.offset, .r_offset = rela.offset,
.r_addend = rela.addend, .r_addend = rela.addend,
.r_info = (@as(u64, rela.symbol.index) << 32) | rela.type, .r_info = (@as(u64, rela.symbol.index) << 32) | rela.type,
}); }, endian);
} }
} }
} }
@ -294,13 +291,13 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
} }
// pad to 16 bytes // pad to 16 bytes
try w.writeByteNTimes(0, @intCast(sh_offset_aligned - sh_offset)); try w.splatByteAll(0, @intCast(sh_offset_aligned - sh_offset));
// mandatory null header // mandatory null header
try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Shdr)); try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Shdr), endian);
// write strtab section header // write strtab section header
{ {
const sect_header = std.elf.Elf64_Shdr{ const sect_header: std.elf.Elf64_Shdr = .{
.sh_name = strtab_name, .sh_name = strtab_name,
.sh_type = std.elf.SHT_STRTAB, .sh_type = std.elf.SHT_STRTAB,
.sh_flags = 0, .sh_flags = 0,
@ -312,12 +309,12 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
.sh_addralign = 1, .sh_addralign = 1,
.sh_entsize = 0, .sh_entsize = 0,
}; };
try w.writeStruct(sect_header); try w.writeStruct(sect_header, endian);
} }
// write symtab section header // write symtab section header
{ {
const sect_header = std.elf.Elf64_Shdr{ const sect_header: std.elf.Elf64_Shdr = .{
.sh_name = symtab_name, .sh_name = symtab_name,
.sh_type = std.elf.SHT_SYMTAB, .sh_type = std.elf.SHT_SYMTAB,
.sh_flags = 0, .sh_flags = 0,
@ -329,7 +326,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
.sh_addralign = 8, .sh_addralign = 8,
.sh_entsize = @sizeOf(std.elf.Elf64_Sym), .sh_entsize = @sizeOf(std.elf.Elf64_Sym),
}; };
try w.writeStruct(sect_header); try w.writeStruct(sect_header, endian);
} }
// remaining section headers // remaining section headers
@ -352,7 +349,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
.sh_info = 0, .sh_info = 0,
.sh_addralign = if (sect.flags & std.elf.SHF_EXECINSTR != 0) 16 else 1, .sh_addralign = if (sect.flags & std.elf.SHF_EXECINSTR != 0) 16 else 1,
.sh_entsize = 0, .sh_entsize = 0,
}); }, endian);
if (rela_count != 0) { if (rela_count != 0) {
const size = rela_count * @sizeOf(std.elf.Elf64_Rela); const size = rela_count * @sizeOf(std.elf.Elf64_Rela);
@ -367,7 +364,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
.sh_info = sect.index, .sh_info = sect.index,
.sh_addralign = 8, .sh_addralign = 8,
.sh_entsize = @sizeOf(std.elf.Elf64_Rela), .sh_entsize = @sizeOf(std.elf.Elf64_Rela),
}); }, endian);
rela_sect_offset += size; rela_sect_offset += size;
} }

80
lib/compiler/aro/main.zig vendored Normal file
View File

@ -0,0 +1,80 @@
const std = @import("std");
const Allocator = mem.Allocator;
const mem = std.mem;
const process = std.process;
const aro = @import("aro");
const Compilation = aro.Compilation;
const Diagnostics = aro.Diagnostics;
const Driver = aro.Driver;
const Toolchain = aro.Toolchain;
const assembly_backend = @import("assembly_backend");
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
pub fn main() u8 {
const gpa = if (@import("builtin").link_libc)
std.heap.raw_c_allocator
else
general_purpose_allocator.allocator();
defer if (!@import("builtin").link_libc) {
_ = general_purpose_allocator.deinit();
};
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
const fast_exit = @import("builtin").mode != .Debug;
const args = process.argsAlloc(arena) catch {
std.debug.print("out of memory\n", .{});
if (fast_exit) process.exit(1);
return 1;
};
const aro_name = std.fs.selfExePathAlloc(gpa) catch {
std.debug.print("unable to find Aro executable path\n", .{});
if (fast_exit) process.exit(1);
return 1;
};
defer gpa.free(aro_name);
var stderr_buf: [1024]u8 = undefined;
var stderr = std.fs.File.stderr().writer(&stderr_buf);
var diagnostics: Diagnostics = .{
.output = .{ .to_writer = .{
.color = .detect(stderr.file),
.writer = &stderr.interface,
} },
};
var comp = Compilation.initDefault(gpa, arena, &diagnostics, std.fs.cwd()) catch |er| switch (er) {
error.OutOfMemory => {
std.debug.print("out of memory\n", .{});
if (fast_exit) process.exit(1);
return 1;
},
};
defer comp.deinit();
var driver: Driver = .{ .comp = &comp, .aro_name = aro_name, .diagnostics = &diagnostics };
defer driver.deinit();
var toolchain: Toolchain = .{ .driver = &driver, .filesystem = .{ .real = comp.cwd } };
defer toolchain.deinit();
driver.main(&toolchain, args, fast_exit, assembly_backend.genAsm) catch |er| switch (er) {
error.OutOfMemory => {
std.debug.print("out of memory\n", .{});
if (fast_exit) process.exit(1);
return 1;
},
error.FatalError => {
driver.printDiagnosticsStats();
if (fast_exit) process.exit(1);
return 1;
},
};
if (fast_exit) process.exit(@intFromBool(comp.diagnostics.errors != 0));
return @intFromBool(diagnostics.errors != 0);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,301 @@
const std = @import("std");
/// Standard C Library bug: The absolute value of the most negative integer remains negative.
pub inline fn abs(val: c_int) c_int {
return if (val == std.math.minInt(c_int)) val else @intCast(@abs(val));
}
pub inline fn assume(cond: bool) void {
if (!cond) unreachable;
}
pub inline fn bswap16(val: u16) u16 {
return @byteSwap(val);
}
pub inline fn bswap32(val: u32) u32 {
return @byteSwap(val);
}
pub inline fn bswap64(val: u64) u64 {
return @byteSwap(val);
}
pub inline fn ceilf(val: f32) f32 {
return @ceil(val);
}
pub inline fn ceil(val: f64) f64 {
return @ceil(val);
}
/// Returns the number of leading 0-bits in x, starting at the most significant bit position.
/// In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
pub inline fn clz(val: c_uint) c_int {
@setRuntimeSafety(false);
return @as(c_int, @bitCast(@as(c_uint, @clz(val))));
}
pub inline fn constant_p(expr: anytype) c_int {
_ = expr;
return @intFromBool(false);
}
pub inline fn cosf(val: f32) f32 {
return @cos(val);
}
pub inline fn cos(val: f64) f64 {
return @cos(val);
}
/// Returns the number of trailing 0-bits in val, starting at the least significant bit position.
/// In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
pub inline fn ctz(val: c_uint) c_int {
@setRuntimeSafety(false);
return @as(c_int, @bitCast(@as(c_uint, @ctz(val))));
}
pub inline fn exp2f(val: f32) f32 {
return @exp2(val);
}
pub inline fn exp2(val: f64) f64 {
return @exp2(val);
}
pub inline fn expf(val: f32) f32 {
return @exp(val);
}
pub inline fn exp(val: f64) f64 {
return @exp(val);
}
/// The return value of __builtin_expect is `expr`. `c` is the expected value
/// of `expr` and is used as a hint to the compiler in C. Here it is unused.
pub inline fn expect(expr: c_long, c: c_long) c_long {
_ = c;
return expr;
}
pub inline fn fabsf(val: f32) f32 {
return @abs(val);
}
pub inline fn fabs(val: f64) f64 {
return @abs(val);
}
pub inline fn floorf(val: f32) f32 {
return @floor(val);
}
pub inline fn floor(val: f64) f64 {
return @floor(val);
}
pub inline fn has_builtin(func: anytype) c_int {
_ = func;
return @intFromBool(true);
}
pub inline fn huge_valf() f32 {
return std.math.inf(f32);
}
pub inline fn inff() f32 {
return std.math.inf(f32);
}
/// Similar to isinf, except the return value is -1 for an argument of -Inf and 1 for an argument of +Inf.
pub inline fn isinf_sign(x: anytype) c_int {
if (!std.math.isInf(x)) return 0;
return if (std.math.isPositiveInf(x)) 1 else -1;
}
pub inline fn isinf(x: anytype) c_int {
return @intFromBool(std.math.isInf(x));
}
pub inline fn isnan(x: anytype) c_int {
return @intFromBool(std.math.isNan(x));
}
/// Standard C Library bug: The absolute value of the most negative integer remains negative.
pub inline fn labs(val: c_long) c_long {
return if (val == std.math.minInt(c_long)) val else @intCast(@abs(val));
}
/// Standard C Library bug: The absolute value of the most negative integer remains negative.
pub inline fn llabs(val: c_longlong) c_longlong {
return if (val == std.math.minInt(c_longlong)) val else @intCast(@abs(val));
}
pub inline fn log10f(val: f32) f32 {
return @log10(val);
}
pub inline fn log10(val: f64) f64 {
return @log10(val);
}
pub inline fn log2f(val: f32) f32 {
return @log2(val);
}
pub inline fn log2(val: f64) f64 {
return @log2(val);
}
pub inline fn logf(val: f32) f32 {
return @log(val);
}
pub inline fn log(val: f64) f64 {
return @log(val);
}
pub inline fn memcpy_chk(
noalias dst: ?*anyopaque,
noalias src: ?*const anyopaque,
len: usize,
remaining: usize,
) ?*anyopaque {
if (len > remaining) @panic("__builtin___memcpy_chk called with len > remaining");
if (len > 0) @memcpy(
@as([*]u8, @ptrCast(dst.?))[0..len],
@as([*]const u8, @ptrCast(src.?)),
);
return dst;
}
pub inline fn memcpy(
noalias dst: ?*anyopaque,
noalias src: ?*const anyopaque,
len: usize,
) ?*anyopaque {
if (len > 0) @memcpy(
@as([*]u8, @ptrCast(dst.?))[0..len],
@as([*]const u8, @ptrCast(src.?)),
);
return dst;
}
pub inline fn memset_chk(
dst: ?*anyopaque,
val: c_int,
len: usize,
remaining: usize,
) ?*anyopaque {
if (len > remaining) @panic("__builtin___memset_chk called with len > remaining");
const dst_cast = @as([*c]u8, @ptrCast(dst));
@memset(dst_cast[0..len], @as(u8, @bitCast(@as(i8, @truncate(val)))));
return dst;
}
pub inline fn memset(dst: ?*anyopaque, val: c_int, len: usize) ?*anyopaque {
const dst_cast = @as([*c]u8, @ptrCast(dst));
@memset(dst_cast[0..len], @as(u8, @bitCast(@as(i8, @truncate(val)))));
return dst;
}
pub fn mul_overflow(a: anytype, b: anytype, result: *@TypeOf(a, b)) c_int {
const res = @mulWithOverflow(a, b);
result.* = res[0];
return res[1];
}
/// returns a quiet NaN. Quiet NaNs have many representations; tagp is used to select one in an
/// implementation-defined way.
/// This implementation is based on the description for nan provided in the GCC docs at
/// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html#index-_005f_005fbuiltin_005fnan
/// Comment is reproduced below:
/// Since ISO C99 defines this function in terms of strtod, which we do not implement, a description
/// of the parsing is in order.
/// The string is parsed as by strtol; that is, the base is recognized by leading 0 or 0x prefixes.
/// The number parsed is placed in the significand such that the least significant bit of the number is
/// at the least significant bit of the significand.
/// The number is truncated to fit the significand field provided.
/// The significand is forced to be a quiet NaN.
///
/// If tagp contains any non-numeric characters, the function returns a NaN whose significand is zero.
/// If tagp is empty, the function returns a NaN whose significand is zero.
pub inline fn nanf(tagp: []const u8) f32 {
const parsed = std.fmt.parseUnsigned(c_ulong, tagp, 0) catch 0;
const bits: u23 = @truncate(parsed); // single-precision float trailing significand is 23 bits
return @bitCast(@as(u32, bits) | @as(u32, @bitCast(std.math.nan(f32))));
}
pub inline fn object_size(ptr: ?*const anyopaque, ty: c_int) usize {
_ = ptr;
// clang semantics match gcc's: https://gcc.gnu.org/onlinedocs/gcc/Object-Size-Checking.html
// If it is not possible to determine which objects ptr points to at compile time,
// object_size should return (size_t) -1 for type 0 or 1 and (size_t) 0
// for type 2 or 3.
if (ty == 0 or ty == 1) return @as(usize, @bitCast(-@as(isize, 1)));
if (ty == 2 or ty == 3) return 0;
unreachable;
}
/// popcount of a c_uint will never exceed the capacity of a c_int
pub inline fn popcount(val: c_uint) c_int {
@setRuntimeSafety(false);
return @as(c_int, @bitCast(@as(c_uint, @popCount(val))));
}
pub inline fn roundf(val: f32) f32 {
return @round(val);
}
pub inline fn round(val: f64) f64 {
return @round(val);
}
pub inline fn signbitf(val: f32) c_int {
return @intFromBool(std.math.signbit(val));
}
pub inline fn signbit(val: f64) c_int {
return @intFromBool(std.math.signbit(val));
}
pub inline fn sinf(val: f32) f32 {
return @sin(val);
}
pub inline fn sin(val: f64) f64 {
return @sin(val);
}
pub inline fn sqrtf(val: f32) f32 {
return @sqrt(val);
}
pub inline fn sqrt(val: f64) f64 {
return @sqrt(val);
}
pub inline fn strcmp(s1: [*c]const u8, s2: [*c]const u8) c_int {
return switch (std.mem.orderZ(u8, s1, s2)) {
.lt => -1,
.eq => 0,
.gt => 1,
};
}
pub inline fn strlen(s: [*c]const u8) usize {
return std.mem.sliceTo(s, 0).len;
}
pub inline fn truncf(val: f32) f32 {
return @trunc(val);
}
pub inline fn trunc(val: f64) f64 {
return @trunc(val);
}
pub inline fn @"unreachable"() noreturn {
unreachable;
}

View File

@ -0,0 +1,413 @@
const std = @import("std");
/// "Usual arithmetic conversions" from C11 standard 6.3.1.8
pub fn ArithmeticConversion(comptime A: type, comptime B: type) type {
if (A == c_longdouble or B == c_longdouble) return c_longdouble;
if (A == f80 or B == f80) return f80;
if (A == f64 or B == f64) return f64;
if (A == f32 or B == f32) return f32;
const A_Promoted = PromotedIntType(A);
const B_Promoted = PromotedIntType(B);
comptime {
std.debug.assert(integerRank(A_Promoted) >= integerRank(c_int));
std.debug.assert(integerRank(B_Promoted) >= integerRank(c_int));
}
if (A_Promoted == B_Promoted) return A_Promoted;
const a_signed = @typeInfo(A_Promoted).int.signedness == .signed;
const b_signed = @typeInfo(B_Promoted).int.signedness == .signed;
if (a_signed == b_signed) {
return if (integerRank(A_Promoted) > integerRank(B_Promoted)) A_Promoted else B_Promoted;
}
const SignedType = if (a_signed) A_Promoted else B_Promoted;
const UnsignedType = if (!a_signed) A_Promoted else B_Promoted;
if (integerRank(UnsignedType) >= integerRank(SignedType)) return UnsignedType;
if (std.math.maxInt(SignedType) >= std.math.maxInt(UnsignedType)) return SignedType;
return ToUnsigned(SignedType);
}
/// Integer promotion described in C11 6.3.1.1.2
fn PromotedIntType(comptime T: type) type {
return switch (T) {
bool, c_short => c_int,
c_ushort => if (@sizeOf(c_ushort) == @sizeOf(c_int)) c_uint else c_int,
c_int, c_uint, c_long, c_ulong, c_longlong, c_ulonglong => T,
else => switch (@typeInfo(T)) {
.comptime_int => @compileError("Cannot promote `" ++ @typeName(T) ++ "`; a fixed-size number type is required"),
// promote to c_int if it can represent all values of T
.int => |int_info| if (int_info.bits < @bitSizeOf(c_int))
c_int
// otherwise, restore the original C type
else if (int_info.bits == @bitSizeOf(c_int))
if (int_info.signedness == .unsigned) c_uint else c_int
else if (int_info.bits <= @bitSizeOf(c_long))
if (int_info.signedness == .unsigned) c_ulong else c_long
else if (int_info.bits <= @bitSizeOf(c_longlong))
if (int_info.signedness == .unsigned) c_ulonglong else c_longlong
else
@compileError("Cannot promote `" ++ @typeName(T) ++ "`; a C ABI type is required"),
else => @compileError("Attempted to promote invalid type `" ++ @typeName(T) ++ "`"),
},
};
}
/// C11 6.3.1.1.1
fn integerRank(comptime T: type) u8 {
return switch (T) {
bool => 0,
u8, i8 => 1,
c_short, c_ushort => 2,
c_int, c_uint => 3,
c_long, c_ulong => 4,
c_longlong, c_ulonglong => 5,
else => @compileError("integer rank not supported for `" ++ @typeName(T) ++ "`"),
};
}
fn ToUnsigned(comptime T: type) type {
return switch (T) {
c_int => c_uint,
c_long => c_ulong,
c_longlong => c_ulonglong,
else => @compileError("Cannot convert `" ++ @typeName(T) ++ "` to unsigned"),
};
}
/// Constructs a [*c] pointer with the const and volatile annotations
/// from SelfType for pointing to a C flexible array of ElementType.
pub fn FlexibleArrayType(comptime SelfType: type, comptime ElementType: type) type {
switch (@typeInfo(SelfType)) {
.pointer => |ptr| {
return @Type(.{ .pointer = .{
.size = .c,
.is_const = ptr.is_const,
.is_volatile = ptr.is_volatile,
.alignment = @alignOf(ElementType),
.address_space = .generic,
.child = ElementType,
.is_allowzero = true,
.sentinel_ptr = null,
} });
},
else => |info| @compileError("Invalid self type \"" ++ @tagName(info) ++ "\" for flexible array getter: " ++ @typeName(SelfType)),
}
}
/// Promote the type of an integer literal until it fits as C would.
pub fn promoteIntLiteral(
comptime SuffixType: type,
comptime number: comptime_int,
comptime base: CIntLiteralBase,
) PromoteIntLiteralReturnType(SuffixType, number, base) {
return number;
}
const CIntLiteralBase = enum { decimal, octal, hex };
fn PromoteIntLiteralReturnType(comptime SuffixType: type, comptime number: comptime_int, comptime base: CIntLiteralBase) type {
const signed_decimal = [_]type{ c_int, c_long, c_longlong, c_ulonglong };
const signed_oct_hex = [_]type{ c_int, c_uint, c_long, c_ulong, c_longlong, c_ulonglong };
const unsigned = [_]type{ c_uint, c_ulong, c_ulonglong };
const list: []const type = if (@typeInfo(SuffixType).int.signedness == .unsigned)
&unsigned
else if (base == .decimal)
&signed_decimal
else
&signed_oct_hex;
var pos = std.mem.indexOfScalar(type, list, SuffixType).?;
while (pos < list.len) : (pos += 1) {
if (number >= std.math.minInt(list[pos]) and number <= std.math.maxInt(list[pos])) {
return list[pos];
}
}
@compileError("Integer literal is too large");
}
/// Convert from clang __builtin_shufflevector index to Zig @shuffle index
/// clang requires __builtin_shufflevector index arguments to be integer constants.
/// negative values for `this_index` indicate "don't care".
/// clang enforces that `this_index` is less than the total number of vector elements
/// See https://ziglang.org/documentation/master/#shuffle
/// See https://clang.llvm.org/docs/LanguageExtensions.html#langext-builtin-shufflevector
pub fn shuffleVectorIndex(comptime this_index: c_int, comptime source_vector_len: usize) i32 {
const positive_index = std.math.cast(usize, this_index) orelse return undefined;
if (positive_index < source_vector_len) return @as(i32, @intCast(this_index));
const b_index = positive_index - source_vector_len;
return ~@as(i32, @intCast(b_index));
}
/// C `%` operator for signed integers
/// C standard states: "If the quotient a/b is representable, the expression (a/b)*b + a%b shall equal a"
/// The quotient is not representable if denominator is zero, or if numerator is the minimum integer for
/// the type and denominator is -1. C has undefined behavior for those two cases; this function has safety
/// checked undefined behavior
pub fn signedRemainder(numerator: anytype, denominator: anytype) @TypeOf(numerator, denominator) {
std.debug.assert(@typeInfo(@TypeOf(numerator, denominator)).int.signedness == .signed);
if (denominator > 0) return @rem(numerator, denominator);
return numerator - @divTrunc(numerator, denominator) * denominator;
}
/// Given a type and value, cast the value to the type as c would.
pub fn cast(comptime DestType: type, target: anytype) DestType {
// this function should behave like transCCast in translate-c, except it's for macros
const SourceType = @TypeOf(target);
switch (@typeInfo(DestType)) {
.@"fn" => return castToPtr(*const DestType, SourceType, target),
.pointer => return castToPtr(DestType, SourceType, target),
.optional => |dest_opt| {
if (@typeInfo(dest_opt.child) == .pointer) {
return castToPtr(DestType, SourceType, target);
} else if (@typeInfo(dest_opt.child) == .@"fn") {
return castToPtr(?*const dest_opt.child, SourceType, target);
}
},
.int => {
switch (@typeInfo(SourceType)) {
.pointer => {
return castInt(DestType, @intFromPtr(target));
},
.optional => |opt| {
if (@typeInfo(opt.child) == .pointer) {
return castInt(DestType, @intFromPtr(target));
}
},
.int => {
return castInt(DestType, target);
},
.@"fn" => {
return castInt(DestType, @intFromPtr(&target));
},
.bool => {
return @intFromBool(target);
},
else => {},
}
},
.float => {
switch (@typeInfo(SourceType)) {
.int => return @as(DestType, @floatFromInt(target)),
.float => return @as(DestType, @floatCast(target)),
.bool => return @as(DestType, @floatFromInt(@intFromBool(target))),
else => {},
}
},
.@"union" => |info| {
inline for (info.fields) |field| {
if (field.type == SourceType) return @unionInit(DestType, field.name, target);
}
@compileError("cast to union type '" ++ @typeName(DestType) ++ "' from type '" ++ @typeName(SourceType) ++ "' which is not present in union");
},
.bool => return cast(usize, target) != 0,
else => {},
}
return @as(DestType, target);
}
fn castInt(comptime DestType: type, target: anytype) DestType {
const dest = @typeInfo(DestType).int;
const source = @typeInfo(@TypeOf(target)).int;
const Int = @Type(.{ .int = .{ .bits = dest.bits, .signedness = source.signedness } });
if (dest.bits < source.bits)
return @as(DestType, @bitCast(@as(Int, @truncate(target))))
else
return @as(DestType, @bitCast(@as(Int, target)));
}
fn castPtr(comptime DestType: type, target: anytype) DestType {
return @constCast(@volatileCast(@alignCast(@ptrCast(target))));
}
fn castToPtr(comptime DestType: type, comptime SourceType: type, target: anytype) DestType {
switch (@typeInfo(SourceType)) {
.int => {
return @as(DestType, @ptrFromInt(castInt(usize, target)));
},
.comptime_int => {
if (target < 0)
return @as(DestType, @ptrFromInt(@as(usize, @bitCast(@as(isize, @intCast(target))))))
else
return @as(DestType, @ptrFromInt(@as(usize, @intCast(target))));
},
.pointer => {
return castPtr(DestType, target);
},
.@"fn" => {
return castPtr(DestType, &target);
},
.optional => |target_opt| {
if (@typeInfo(target_opt.child) == .pointer) {
return castPtr(DestType, target);
}
},
else => {},
}
return @as(DestType, target);
}
/// Given a value returns its size as C's sizeof operator would.
pub fn sizeof(target: anytype) usize {
const T: type = if (@TypeOf(target) == type) target else @TypeOf(target);
switch (@typeInfo(T)) {
.float, .int, .@"struct", .@"union", .array, .bool, .vector => return @sizeOf(T),
.@"fn" => {
// sizeof(main) in C returns 1
return 1;
},
.null => return @sizeOf(*anyopaque),
.void => {
// Note: sizeof(void) is 1 on clang/gcc and 0 on MSVC.
return 1;
},
.@"opaque" => {
if (T == anyopaque) {
// Note: sizeof(void) is 1 on clang/gcc and 0 on MSVC.
return 1;
} else {
@compileError("Cannot use C sizeof on opaque type " ++ @typeName(T));
}
},
.optional => |opt| {
if (@typeInfo(opt.child) == .pointer) {
return sizeof(opt.child);
} else {
@compileError("Cannot use C sizeof on non-pointer optional " ++ @typeName(T));
}
},
.pointer => |ptr| {
if (ptr.size == .slice) {
@compileError("Cannot use C sizeof on slice type " ++ @typeName(T));
}
// for strings, sizeof("a") returns 2.
// normal pointer decay scenarios from C are handled
// in the .array case above, but strings remain literals
// and are therefore always pointers, so they need to be
// specially handled here.
if (ptr.size == .one and ptr.is_const and @typeInfo(ptr.child) == .array) {
const array_info = @typeInfo(ptr.child).array;
if ((array_info.child == u8 or array_info.child == u16) and array_info.sentinel() == 0) {
// length of the string plus one for the null terminator.
return (array_info.len + 1) * @sizeOf(array_info.child);
}
}
// When zero sized pointers are removed, this case will no
// longer be reachable and can be deleted.
if (@sizeOf(T) == 0) {
return @sizeOf(*anyopaque);
}
return @sizeOf(T);
},
.comptime_float => return @sizeOf(f64), // TODO c_double #3999
.comptime_int => {
// TODO to get the correct result we have to translate
// `1073741824 * 4` as `int(1073741824) *% int(4)` since
// sizeof(1073741824 * 4) != sizeof(4294967296).
// TODO test if target fits in int, long or long long
return @sizeOf(c_int);
},
else => @compileError("__helpers.sizeof does not support type " ++ @typeName(T)),
}
}
pub fn div(a: anytype, b: anytype) ArithmeticConversion(@TypeOf(a), @TypeOf(b)) {
const ResType = ArithmeticConversion(@TypeOf(a), @TypeOf(b));
const a_casted = cast(ResType, a);
const b_casted = cast(ResType, b);
switch (@typeInfo(ResType)) {
.float => return a_casted / b_casted,
.int => return @divTrunc(a_casted, b_casted),
else => unreachable,
}
}
pub fn rem(a: anytype, b: anytype) ArithmeticConversion(@TypeOf(a), @TypeOf(b)) {
const ResType = ArithmeticConversion(@TypeOf(a), @TypeOf(b));
const a_casted = cast(ResType, a);
const b_casted = cast(ResType, b);
switch (@typeInfo(ResType)) {
.int => {
if (@typeInfo(ResType).int.signedness == .signed) {
return signedRemainder(a_casted, b_casted);
} else {
return a_casted % b_casted;
}
},
else => unreachable,
}
}
/// A 2-argument function-like macro defined as #define FOO(A, B) (A)(B)
/// could be either: cast B to A, or call A with the value B.
pub fn CAST_OR_CALL(a: anytype, b: anytype) switch (@typeInfo(@TypeOf(a))) {
.type => a,
.@"fn" => |fn_info| fn_info.return_type orelse void,
else => |info| @compileError("Unexpected argument type: " ++ @tagName(info)),
} {
switch (@typeInfo(@TypeOf(a))) {
.type => return cast(a, b),
.@"fn" => return a(b),
else => unreachable, // return type will be a compile error otherwise
}
}
pub inline fn DISCARD(x: anytype) void {
_ = x;
}
pub fn F_SUFFIX(comptime f: comptime_float) f32 {
return @as(f32, f);
}
fn L_SUFFIX_ReturnType(comptime number: anytype) type {
switch (@typeInfo(@TypeOf(number))) {
.int, .comptime_int => return @TypeOf(promoteIntLiteral(c_long, number, .decimal)),
.float, .comptime_float => return c_longdouble,
else => @compileError("Invalid value for L suffix"),
}
}
pub fn L_SUFFIX(comptime number: anytype) L_SUFFIX_ReturnType(number) {
switch (@typeInfo(@TypeOf(number))) {
.int, .comptime_int => return promoteIntLiteral(c_long, number, .decimal),
.float, .comptime_float => @compileError("TODO: c_longdouble initialization from comptime_float not supported"),
else => @compileError("Invalid value for L suffix"),
}
}
pub fn LL_SUFFIX(comptime n: comptime_int) @TypeOf(promoteIntLiteral(c_longlong, n, .decimal)) {
return promoteIntLiteral(c_longlong, n, .decimal);
}
pub fn U_SUFFIX(comptime n: comptime_int) @TypeOf(promoteIntLiteral(c_uint, n, .decimal)) {
return promoteIntLiteral(c_uint, n, .decimal);
}
pub fn UL_SUFFIX(comptime n: comptime_int) @TypeOf(promoteIntLiteral(c_ulong, n, .decimal)) {
return promoteIntLiteral(c_ulong, n, .decimal);
}
pub fn ULL_SUFFIX(comptime n: comptime_int) @TypeOf(promoteIntLiteral(c_ulonglong, n, .decimal)) {
return promoteIntLiteral(c_ulonglong, n, .decimal);
}
pub fn WL_CONTAINER_OF(ptr: anytype, sample: anytype, comptime member: []const u8) @TypeOf(sample) {
return @fieldParentPtr(member, ptr);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,288 @@
const std = @import("std");
const mem = std.mem;
const assert = std.debug.assert;
const aro = @import("aro");
const CToken = aro.Tokenizer.Token;
const helpers = @import("helpers.zig");
const Translator = @import("Translator.zig");
const Error = Translator.Error;
pub const MacroProcessingError = Error || error{UnexpectedMacroToken};
const Impl = std.meta.DeclEnum(@import("helpers"));
const Template = struct { []const u8, Impl };
/// Templates must be function-like macros
/// first element is macro source, second element is the name of the function
/// in __helpers which implements it
const templates = [_]Template{
.{ "f_SUFFIX(X) (X ## f)", .F_SUFFIX },
.{ "F_SUFFIX(X) (X ## F)", .F_SUFFIX },
.{ "u_SUFFIX(X) (X ## u)", .U_SUFFIX },
.{ "U_SUFFIX(X) (X ## U)", .U_SUFFIX },
.{ "l_SUFFIX(X) (X ## l)", .L_SUFFIX },
.{ "L_SUFFIX(X) (X ## L)", .L_SUFFIX },
.{ "ul_SUFFIX(X) (X ## ul)", .UL_SUFFIX },
.{ "uL_SUFFIX(X) (X ## uL)", .UL_SUFFIX },
.{ "Ul_SUFFIX(X) (X ## Ul)", .UL_SUFFIX },
.{ "UL_SUFFIX(X) (X ## UL)", .UL_SUFFIX },
.{ "ll_SUFFIX(X) (X ## ll)", .LL_SUFFIX },
.{ "LL_SUFFIX(X) (X ## LL)", .LL_SUFFIX },
.{ "ull_SUFFIX(X) (X ## ull)", .ULL_SUFFIX },
.{ "uLL_SUFFIX(X) (X ## uLL)", .ULL_SUFFIX },
.{ "Ull_SUFFIX(X) (X ## Ull)", .ULL_SUFFIX },
.{ "ULL_SUFFIX(X) (X ## ULL)", .ULL_SUFFIX },
.{ "f_SUFFIX(X) X ## f", .F_SUFFIX },
.{ "F_SUFFIX(X) X ## F", .F_SUFFIX },
.{ "u_SUFFIX(X) X ## u", .U_SUFFIX },
.{ "U_SUFFIX(X) X ## U", .U_SUFFIX },
.{ "l_SUFFIX(X) X ## l", .L_SUFFIX },
.{ "L_SUFFIX(X) X ## L", .L_SUFFIX },
.{ "ul_SUFFIX(X) X ## ul", .UL_SUFFIX },
.{ "uL_SUFFIX(X) X ## uL", .UL_SUFFIX },
.{ "Ul_SUFFIX(X) X ## Ul", .UL_SUFFIX },
.{ "UL_SUFFIX(X) X ## UL", .UL_SUFFIX },
.{ "ll_SUFFIX(X) X ## ll", .LL_SUFFIX },
.{ "LL_SUFFIX(X) X ## LL", .LL_SUFFIX },
.{ "ull_SUFFIX(X) X ## ull", .ULL_SUFFIX },
.{ "uLL_SUFFIX(X) X ## uLL", .ULL_SUFFIX },
.{ "Ull_SUFFIX(X) X ## Ull", .ULL_SUFFIX },
.{ "ULL_SUFFIX(X) X ## ULL", .ULL_SUFFIX },
.{ "CAST_OR_CALL(X, Y) (X)(Y)", .CAST_OR_CALL },
.{ "CAST_OR_CALL(X, Y) ((X)(Y))", .CAST_OR_CALL },
.{
\\wl_container_of(ptr, sample, member) \
\\(__typeof__(sample))((char *)(ptr) - \
\\ offsetof(__typeof__(*sample), member))
,
.WL_CONTAINER_OF,
},
.{ "IGNORE_ME(X) ((void)(X))", .DISCARD },
.{ "IGNORE_ME(X) (void)(X)", .DISCARD },
.{ "IGNORE_ME(X) ((const void)(X))", .DISCARD },
.{ "IGNORE_ME(X) (const void)(X)", .DISCARD },
.{ "IGNORE_ME(X) ((volatile void)(X))", .DISCARD },
.{ "IGNORE_ME(X) (volatile void)(X)", .DISCARD },
.{ "IGNORE_ME(X) ((const volatile void)(X))", .DISCARD },
.{ "IGNORE_ME(X) (const volatile void)(X)", .DISCARD },
.{ "IGNORE_ME(X) ((volatile const void)(X))", .DISCARD },
.{ "IGNORE_ME(X) (volatile const void)(X)", .DISCARD },
};
const Pattern = struct {
slicer: MacroSlicer,
impl: Impl,
fn init(pl: *Pattern, allocator: mem.Allocator, template: Template) Error!void {
const source = template[0];
const impl = template[1];
var tok_list = std.ArrayList(CToken).init(allocator);
defer tok_list.deinit();
pl.* = .{
.slicer = try tokenizeMacro(source, &tok_list),
.impl = impl,
};
}
fn deinit(pl: *Pattern, allocator: mem.Allocator) void {
allocator.free(pl.slicer.tokens);
pl.* = undefined;
}
/// This function assumes that `ms` has already been validated to contain a function-like
/// macro, and that the parsed template macro in `pl` also contains a function-like
/// macro. Please review this logic carefully if changing that assumption. Two
/// function-like macros are considered equivalent if and only if they contain the same
/// list of tokens, modulo parameter names.
fn matches(pat: Pattern, ms: MacroSlicer) bool {
if (ms.params != pat.slicer.params) return false;
if (ms.tokens.len != pat.slicer.tokens.len) return false;
for (ms.tokens, pat.slicer.tokens) |macro_tok, pat_tok| {
if (macro_tok.id != pat_tok.id) return false;
switch (macro_tok.id) {
.macro_param, .macro_param_no_expand => {
// `.end` is the parameter index.
if (macro_tok.end != pat_tok.end) return false;
},
.identifier, .extended_identifier, .string_literal, .char_literal, .pp_num => {
const macro_bytes = ms.slice(macro_tok);
const pattern_bytes = pat.slicer.slice(pat_tok);
if (!mem.eql(u8, pattern_bytes, macro_bytes)) return false;
},
else => {
// other tags correspond to keywords and operators that do not contain a "payload"
// that can vary
},
}
}
return true;
}
};
const PatternList = @This();
patterns: []Pattern,
pub const MacroSlicer = struct {
source: []const u8,
tokens: []const CToken,
params: u32,
fn slice(pl: MacroSlicer, token: CToken) []const u8 {
return pl.source[token.start..token.end];
}
};
pub fn init(allocator: mem.Allocator) Error!PatternList {
const patterns = try allocator.alloc(Pattern, templates.len);
for (patterns, templates) |*pattern, template| {
try pattern.init(allocator, template);
}
return .{ .patterns = patterns };
}
pub fn deinit(pl: *PatternList, allocator: mem.Allocator) void {
for (pl.patterns) |*pattern| pattern.deinit(allocator);
allocator.free(pl.patterns);
pl.* = undefined;
}
pub fn match(pl: PatternList, ms: MacroSlicer) Error!?Impl {
for (pl.patterns) |pattern| if (pattern.matches(ms)) return pattern.impl;
return null;
}
fn tokenizeMacro(source: []const u8, tok_list: *std.ArrayList(CToken)) Error!MacroSlicer {
var param_count: u32 = 0;
var param_buf: [8][]const u8 = undefined;
var tokenizer: aro.Tokenizer = .{
.buf = source,
.source = .unused,
.langopts = .{},
};
{
const name_tok = tokenizer.nextNoWS();
assert(name_tok.id == .identifier);
const l_paren = tokenizer.nextNoWS();
assert(l_paren.id == .l_paren);
}
while (true) {
const param = tokenizer.nextNoWS();
if (param.id == .r_paren) break;
assert(param.id == .identifier);
const slice = source[param.start..param.end];
param_buf[param_count] = slice;
param_count += 1;
const comma = tokenizer.nextNoWS();
if (comma.id == .r_paren) break;
assert(comma.id == .comma);
}
outer: while (true) {
const tok = tokenizer.next();
switch (tok.id) {
.whitespace, .comment => continue,
.identifier => {
const slice = source[tok.start..tok.end];
for (param_buf[0..param_count], 0..) |param, i| {
if (std.mem.eql(u8, param, slice)) {
try tok_list.append(.{
.id = .macro_param,
.source = .unused,
.end = @intCast(i),
});
continue :outer;
}
}
},
.hash_hash => {
if (tok_list.items[tok_list.items.len - 1].id == .macro_param) {
tok_list.items[tok_list.items.len - 1].id = .macro_param_no_expand;
}
},
.nl, .eof => break,
else => {},
}
try tok_list.append(tok);
}
return .{
.source = source,
.tokens = try tok_list.toOwnedSlice(),
.params = param_count,
};
}
test "Macro matching" {
const testing = std.testing;
const helper = struct {
fn checkMacro(
allocator: mem.Allocator,
pattern_list: PatternList,
source: []const u8,
comptime expected_match: ?Impl,
) !void {
var tok_list = std.ArrayList(CToken).init(allocator);
defer tok_list.deinit();
const ms = try tokenizeMacro(source, &tok_list);
defer allocator.free(ms.tokens);
const matched = try pattern_list.match(ms);
if (expected_match) |expected| {
try testing.expectEqual(expected, matched);
} else {
try testing.expectEqual(@as(@TypeOf(matched), null), matched);
}
}
};
const allocator = std.testing.allocator;
var pattern_list = try PatternList.init(allocator);
defer pattern_list.deinit(allocator);
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## F)", .F_SUFFIX);
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## U)", .U_SUFFIX);
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## L)", .L_SUFFIX);
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## LL)", .LL_SUFFIX);
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## UL)", .UL_SUFFIX);
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## ULL)", .ULL_SUFFIX);
try helper.checkMacro(allocator, pattern_list,
\\container_of(a, b, c) \
\\(__typeof__(b))((char *)(a) - \
\\ offsetof(__typeof__(*b), c))
, .WL_CONTAINER_OF);
try helper.checkMacro(allocator, pattern_list, "NO_MATCH(X, Y) (X + Y)", null);
try helper.checkMacro(allocator, pattern_list, "CAST_OR_CALL(X, Y) (X)(Y)", .CAST_OR_CALL);
try helper.checkMacro(allocator, pattern_list, "CAST_OR_CALL(X, Y) ((X)(Y))", .CAST_OR_CALL);
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (void)(X)", .DISCARD);
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((void)(X))", .DISCARD);
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (const void)(X)", .DISCARD);
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((const void)(X))", .DISCARD);
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (volatile void)(X)", .DISCARD);
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((volatile void)(X))", .DISCARD);
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (const volatile void)(X)", .DISCARD);
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((const volatile void)(X))", .DISCARD);
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (volatile const void)(X)", .DISCARD);
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((volatile const void)(X))", .DISCARD);
}

View File

@ -0,0 +1,399 @@
const std = @import("std");
const aro = @import("aro");
const ast = @import("ast.zig");
const Translator = @import("Translator.zig");
const Scope = @This();
pub const SymbolTable = std.StringArrayHashMapUnmanaged(ast.Node);
pub const AliasList = std.ArrayListUnmanaged(struct {
alias: []const u8,
name: []const u8,
});
/// Associates a container (structure or union) with its relevant member functions.
pub const ContainerMemberFns = struct {
container_decl_ptr: *ast.Node,
member_fns: std.ArrayListUnmanaged(*ast.Payload.Func) = .empty,
};
pub const ContainerMemberFnsHashMap = std.AutoArrayHashMapUnmanaged(aro.QualType, ContainerMemberFns);
id: Id,
parent: ?*Scope,
pub const Id = enum {
block,
root,
condition,
loop,
do_loop,
};
/// Used for the scope of condition expressions, for example `if (cond)`.
/// The block is lazily initialized because it is only needed for rare
/// cases of comma operators being used.
pub const Condition = struct {
base: Scope,
block: ?Block = null,
fn getBlockScope(cond: *Condition, t: *Translator) !*Block {
if (cond.block) |*b| return b;
cond.block = try Block.init(t, &cond.base, true);
return &cond.block.?;
}
pub fn deinit(cond: *Condition) void {
if (cond.block) |*b| b.deinit();
}
};
/// Represents an in-progress Node.Block. This struct is stack-allocated.
/// When it is deinitialized, it produces an Node.Block which is allocated
/// into the main arena.
pub const Block = struct {
base: Scope,
translator: *Translator,
statements: std.ArrayListUnmanaged(ast.Node),
variables: AliasList,
mangle_count: u32 = 0,
label: ?[]const u8 = null,
/// By default all variables are discarded, since we do not know in advance if they
/// will be used. This maps the variable's name to the Discard payload, so that if
/// the variable is subsequently referenced we can indicate that the discard should
/// be skipped during the intermediate AST -> Zig AST render step.
variable_discards: std.StringArrayHashMapUnmanaged(*ast.Payload.Discard),
/// When the block corresponds to a function, keep track of the return type
/// so that the return expression can be cast, if necessary
return_type: ?aro.QualType = null,
/// C static local variables are wrapped in a block-local struct. The struct
/// is named `mangle(static_local_ + name)` and the Zig variable within the
/// struct keeps the name of the C variable.
pub const static_local_prefix = "static_local";
/// C extern local variables are wrapped in a block-local struct. The struct
/// is named `mangle(extern_local + name)` and the Zig variable within the
/// struct keeps the name of the C variable.
pub const extern_local_prefix = "extern_local";
pub fn init(t: *Translator, parent: *Scope, labeled: bool) !Block {
var blk: Block = .{
.base = .{
.id = .block,
.parent = parent,
},
.translator = t,
.statements = .empty,
.variables = .empty,
.variable_discards = .empty,
};
if (labeled) {
blk.label = try blk.makeMangledName("blk");
}
return blk;
}
pub fn deinit(block: *Block) void {
block.statements.deinit(block.translator.gpa);
block.variables.deinit(block.translator.gpa);
block.variable_discards.deinit(block.translator.gpa);
block.* = undefined;
}
pub fn complete(block: *Block) !ast.Node {
const arena = block.translator.arena;
if (block.base.parent.?.id == .do_loop) {
// We reserve 1 extra statement if the parent is a do_loop. This is in case of
// do while, we want to put `if (cond) break;` at the end.
const alloc_len = block.statements.items.len + @intFromBool(block.base.parent.?.id == .do_loop);
var stmts = try arena.alloc(ast.Node, alloc_len);
stmts.len = block.statements.items.len;
@memcpy(stmts[0..block.statements.items.len], block.statements.items);
return ast.Node.Tag.block.create(arena, .{
.label = block.label,
.stmts = stmts,
});
}
if (block.statements.items.len == 0) return ast.Node.Tag.empty_block.init();
return ast.Node.Tag.block.create(arena, .{
.label = block.label,
.stmts = try arena.dupe(ast.Node, block.statements.items),
});
}
/// Given the desired name, return a name that does not shadow anything from outer scopes.
/// Inserts the returned name into the scope.
/// The name will not be visible to callers of getAlias.
pub fn reserveMangledName(block: *Block, name: []const u8) ![]const u8 {
return block.createMangledName(name, true, null);
}
/// Same as reserveMangledName, but enables the alias immediately.
pub fn makeMangledName(block: *Block, name: []const u8) ![]const u8 {
return block.createMangledName(name, false, null);
}
pub fn createMangledName(block: *Block, name: []const u8, reservation: bool, prefix_opt: ?[]const u8) ![]const u8 {
const arena = block.translator.arena;
const name_copy = try arena.dupe(u8, name);
const alias_base = if (prefix_opt) |prefix|
try std.fmt.allocPrint(arena, "{s}_{s}", .{ prefix, name })
else
name;
var proposed_name = alias_base;
while (block.contains(proposed_name)) {
block.mangle_count += 1;
proposed_name = try std.fmt.allocPrint(arena, "{s}_{d}", .{ alias_base, block.mangle_count });
}
const new_mangle = try block.variables.addOne(block.translator.gpa);
if (reservation) {
new_mangle.* = .{ .name = name_copy, .alias = name_copy };
} else {
new_mangle.* = .{ .name = name_copy, .alias = proposed_name };
}
return proposed_name;
}
fn getAlias(block: *Block, name: []const u8) ?[]const u8 {
for (block.variables.items) |p| {
if (std.mem.eql(u8, p.name, name))
return p.alias;
}
return block.base.parent.?.getAlias(name);
}
fn localContains(block: *Block, name: []const u8) bool {
for (block.variables.items) |p| {
if (std.mem.eql(u8, p.alias, name))
return true;
}
return false;
}
fn contains(block: *Block, name: []const u8) bool {
if (block.localContains(name))
return true;
return block.base.parent.?.contains(name);
}
pub fn discardVariable(block: *Block, name: []const u8) Translator.Error!void {
const gpa = block.translator.gpa;
const arena = block.translator.arena;
const name_node = try ast.Node.Tag.identifier.create(arena, name);
const discard = try ast.Node.Tag.discard.create(arena, .{ .should_skip = false, .value = name_node });
try block.statements.append(gpa, discard);
try block.variable_discards.putNoClobber(gpa, name, discard.castTag(.discard).?);
}
};
pub const Root = struct {
base: Scope,
translator: *Translator,
sym_table: SymbolTable,
blank_macros: std.StringArrayHashMapUnmanaged(void),
nodes: std.ArrayListUnmanaged(ast.Node),
container_member_fns_map: ContainerMemberFnsHashMap,
pub fn init(t: *Translator) Root {
return .{
.base = .{
.id = .root,
.parent = null,
},
.translator = t,
.sym_table = .empty,
.blank_macros = .empty,
.nodes = .empty,
.container_member_fns_map = .empty,
};
}
pub fn deinit(root: *Root) void {
root.sym_table.deinit(root.translator.gpa);
root.blank_macros.deinit(root.translator.gpa);
root.nodes.deinit(root.translator.gpa);
for (root.container_member_fns_map.values()) |*members| {
members.member_fns.deinit(root.translator.gpa);
}
root.container_member_fns_map.deinit(root.translator.gpa);
}
/// Check if the global scope contains this name, without looking into the "future", e.g.
/// ignore the preprocessed decl and macro names.
pub fn containsNow(root: *Root, name: []const u8) bool {
return root.sym_table.contains(name);
}
/// Check if the global scope contains the name, includes all decls that haven't been translated yet.
pub fn contains(root: *Root, name: []const u8) bool {
return root.containsNow(name) or root.translator.global_names.contains(name) or root.translator.weak_global_names.contains(name);
}
pub fn addMemberFunction(root: *Root, func_ty: aro.Type.Func, func: *ast.Payload.Func) !void {
std.debug.assert(func.data.name != null);
if (func_ty.params.len == 0) return;
const param1_base = func_ty.params[0].qt.base(root.translator.comp);
const container_qt = if (param1_base.type == .pointer)
param1_base.type.pointer.child.base(root.translator.comp).qt
else
param1_base.qt;
if (root.container_member_fns_map.getPtr(container_qt)) |members| {
try members.member_fns.append(root.translator.gpa, func);
}
}
pub fn processContainerMemberFns(root: *Root) !void {
const gpa = root.translator.gpa;
const arena = root.translator.arena;
var member_names: std.StringArrayHashMapUnmanaged(u32) = .empty;
defer member_names.deinit(gpa);
for (root.container_member_fns_map.values()) |members| {
member_names.clearRetainingCapacity();
const decls_ptr = switch (members.container_decl_ptr.tag()) {
.@"struct", .@"union" => blk_record: {
const payload: *ast.Payload.Container = @alignCast(@fieldParentPtr("base", members.container_decl_ptr.ptr_otherwise));
// Avoid duplication with field names
for (payload.data.fields) |field| {
try member_names.put(gpa, field.name, 0);
}
break :blk_record &payload.data.decls;
},
.opaque_literal => blk_opaque: {
const container_decl = try ast.Node.Tag.@"opaque".create(arena, .{
.layout = .none,
.fields = &.{},
.decls = &.{},
});
members.container_decl_ptr.* = container_decl;
break :blk_opaque &container_decl.castTag(.@"opaque").?.data.decls;
},
else => return,
};
const old_decls = decls_ptr.*;
const new_decls = try arena.alloc(ast.Node, old_decls.len + members.member_fns.items.len);
@memcpy(new_decls[0..old_decls.len], old_decls);
// Assume the allocator of payload.data.decls is arena,
// so don't add arena.free(old_variables).
const func_ref_vars = new_decls[old_decls.len..];
var count: u32 = 0;
for (members.member_fns.items) |func| {
const func_name = func.data.name.?;
const last_index = std.mem.lastIndexOf(u8, func_name, "_");
const last_name = if (last_index) |index| func_name[index + 1 ..] else continue;
var same_count: u32 = 0;
const gop = try member_names.getOrPutValue(gpa, last_name, same_count);
if (gop.found_existing) {
gop.value_ptr.* += 1;
same_count = gop.value_ptr.*;
}
const var_name = if (same_count == 0)
last_name
else
try std.fmt.allocPrint(arena, "{s}{d}", .{ last_name, same_count });
func_ref_vars[count] = try ast.Node.Tag.pub_var_simple.create(arena, .{
.name = var_name,
.init = try ast.Node.Tag.identifier.create(arena, func_name),
});
count += 1;
}
decls_ptr.* = new_decls[0 .. old_decls.len + count];
}
}
};
pub fn findBlockScope(inner: *Scope, t: *Translator) !*Block {
var scope = inner;
while (true) {
switch (scope.id) {
.root => unreachable,
.block => return @fieldParentPtr("base", scope),
.condition => return @as(*Condition, @fieldParentPtr("base", scope)).getBlockScope(t),
else => scope = scope.parent.?,
}
}
}
pub fn findBlockReturnType(inner: *Scope) aro.QualType {
var scope = inner;
while (true) {
switch (scope.id) {
.root => unreachable,
.block => {
const block: *Block = @fieldParentPtr("base", scope);
if (block.return_type) |qt| return qt;
scope = scope.parent.?;
},
else => scope = scope.parent.?,
}
}
}
pub fn getAlias(scope: *Scope, name: []const u8) ?[]const u8 {
return switch (scope.id) {
.root => null,
.block => @as(*Block, @fieldParentPtr("base", scope)).getAlias(name),
.loop, .do_loop, .condition => scope.parent.?.getAlias(name),
};
}
fn contains(scope: *Scope, name: []const u8) bool {
return switch (scope.id) {
.root => @as(*Root, @fieldParentPtr("base", scope)).contains(name),
.block => @as(*Block, @fieldParentPtr("base", scope)).contains(name),
.loop, .do_loop, .condition => scope.parent.?.contains(name),
};
}
/// Appends a node to the first block scope if inside a function, or to the root tree if not.
pub fn appendNode(inner: *Scope, node: ast.Node) !void {
var scope = inner;
while (true) {
switch (scope.id) {
.root => {
const root: *Root = @fieldParentPtr("base", scope);
return root.nodes.append(root.translator.gpa, node);
},
.block => {
const block: *Block = @fieldParentPtr("base", scope);
return block.statements.append(block.translator.gpa, node);
},
else => scope = scope.parent.?,
}
}
}
pub fn skipVariableDiscard(inner: *Scope, name: []const u8) void {
if (true) {
// TODO: due to 'local variable is never mutated' errors, we can
// only skip discards if a variable is used as an lvalue, which
// we don't currently have detection for in translate-c.
// Once #17584 is completed, perhaps we can do away with this
// logic entirely, and instead rely on render to fixup code.
return;
}
var scope = inner;
while (true) {
switch (scope.id) {
.root => return,
.block => {
const block: *Block = @fieldParentPtr("base", scope);
if (block.variable_discards.get(name)) |discard| {
discard.data.should_skip = true;
return;
}
},
else => {},
}
scope = scope.parent.?;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,76 @@
const std = @import("std");
const ast = @import("ast.zig");
/// All builtins need to have a source so that macros can reference them
/// but for some it is possible to directly call an equivalent Zig builtin
/// which is preferrable.
pub const Builtin = struct {
/// The name of the builtin in `c_builtins.zig`.
name: []const u8,
tag: ?ast.Node.Tag = null,
};
pub const map = std.StaticStringMap(Builtin).initComptime([_]struct { []const u8, Builtin }{
.{ "__builtin_abs", .{ .name = "abs" } },
.{ "__builtin_assume", .{ .name = "assume" } },
.{ "__builtin_bswap16", .{ .name = "bswap16", .tag = .byte_swap } },
.{ "__builtin_bswap32", .{ .name = "bswap32", .tag = .byte_swap } },
.{ "__builtin_bswap64", .{ .name = "bswap64", .tag = .byte_swap } },
.{ "__builtin_ceilf", .{ .name = "ceilf", .tag = .ceil } },
.{ "__builtin_ceil", .{ .name = "ceil", .tag = .ceil } },
.{ "__builtin_clz", .{ .name = "clz" } },
.{ "__builtin_constant_p", .{ .name = "constant_p" } },
.{ "__builtin_cosf", .{ .name = "cosf", .tag = .cos } },
.{ "__builtin_cos", .{ .name = "cos", .tag = .cos } },
.{ "__builtin_ctz", .{ .name = "ctz" } },
.{ "__builtin_exp2f", .{ .name = "exp2f", .tag = .exp2 } },
.{ "__builtin_exp2", .{ .name = "exp2", .tag = .exp2 } },
.{ "__builtin_expf", .{ .name = "expf", .tag = .exp } },
.{ "__builtin_exp", .{ .name = "exp", .tag = .exp } },
.{ "__builtin_expect", .{ .name = "expect" } },
.{ "__builtin_fabsf", .{ .name = "fabsf", .tag = .abs } },
.{ "__builtin_fabs", .{ .name = "fabs", .tag = .abs } },
.{ "__builtin_floorf", .{ .name = "floorf", .tag = .floor } },
.{ "__builtin_floor", .{ .name = "floor", .tag = .floor } },
.{ "__builtin_huge_valf", .{ .name = "huge_valf" } },
.{ "__builtin_inff", .{ .name = "inff" } },
.{ "__builtin_isinf_sign", .{ .name = "isinf_sign" } },
.{ "__builtin_isinf", .{ .name = "isinf" } },
.{ "__builtin_isnan", .{ .name = "isnan" } },
.{ "__builtin_labs", .{ .name = "labs" } },
.{ "__builtin_llabs", .{ .name = "llabs" } },
.{ "__builtin_log10f", .{ .name = "log10f", .tag = .log10 } },
.{ "__builtin_log10", .{ .name = "log10", .tag = .log10 } },
.{ "__builtin_log2f", .{ .name = "log2f", .tag = .log2 } },
.{ "__builtin_log2", .{ .name = "log2", .tag = .log2 } },
.{ "__builtin_logf", .{ .name = "logf", .tag = .log } },
.{ "__builtin_log", .{ .name = "log", .tag = .log } },
.{ "__builtin___memcpy_chk", .{ .name = "memcpy_chk" } },
.{ "__builtin_memcpy", .{ .name = "memcpy" } },
.{ "__builtin___memset_chk", .{ .name = "memset_chk" } },
.{ "__builtin_memset", .{ .name = "memset" } },
.{ "__builtin_mul_overflow", .{ .name = "mul_overflow" } },
.{ "__builtin_nanf", .{ .name = "nanf" } },
.{ "__builtin_object_size", .{ .name = "object_size" } },
.{ "__builtin_popcount", .{ .name = "popcount" } },
.{ "__builtin_roundf", .{ .name = "roundf", .tag = .round } },
.{ "__builtin_round", .{ .name = "round", .tag = .round } },
.{ "__builtin_signbitf", .{ .name = "signbitf" } },
.{ "__builtin_signbit", .{ .name = "signbit" } },
.{ "__builtin_sinf", .{ .name = "sinf", .tag = .sin } },
.{ "__builtin_sin", .{ .name = "sin", .tag = .sin } },
.{ "__builtin_sqrtf", .{ .name = "sqrtf", .tag = .sqrt } },
.{ "__builtin_sqrt", .{ .name = "sqrt", .tag = .sqrt } },
.{ "__builtin_strcmp", .{ .name = "strcmp" } },
.{ "__builtin_strlen", .{ .name = "strlen" } },
.{ "__builtin_truncf", .{ .name = "truncf", .tag = .trunc } },
.{ "__builtin_trunc", .{ .name = "trunc", .tag = .trunc } },
.{ "__builtin_unreachable", .{ .name = "unreachable", .tag = .@"unreachable" } },
.{ "__has_builtin", .{ .name = "has_builtin" } },
// __builtin_alloca_with_align is not currently implemented.
// It is used in a run and a translate test to ensure that non-implemented
// builtins are correctly demoted. If you implement __builtin_alloca_with_align,
// please update the tests to use a different non-implemented builtin.
});

View File

@ -0,0 +1,327 @@
const std = @import("std");
const builtin = @import("builtin");
const testing = std.testing;
const math = std.math;
const helpers = @import("helpers");
const cast = helpers.cast;
test cast {
var i = @as(i64, 10);
try testing.expect(cast(*u8, 16) == @as(*u8, @ptrFromInt(16)));
try testing.expect(cast(*u64, &i).* == @as(u64, 10));
try testing.expect(cast(*i64, @as(?*align(1) i64, &i)) == &i);
try testing.expect(cast(?*u8, 2) == @as(*u8, @ptrFromInt(2)));
try testing.expect(cast(?*i64, @as(*align(1) i64, &i)) == &i);
try testing.expect(cast(?*i64, @as(?*align(1) i64, &i)) == &i);
try testing.expectEqual(@as(u32, 4), cast(u32, @as(*u32, @ptrFromInt(4))));
try testing.expectEqual(@as(u32, 4), cast(u32, @as(?*u32, @ptrFromInt(4))));
try testing.expectEqual(@as(u32, 10), cast(u32, @as(u64, 10)));
try testing.expectEqual(@as(i32, @bitCast(@as(u32, 0x8000_0000))), cast(i32, @as(u32, 0x8000_0000)));
try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*const u8, @ptrFromInt(2))));
try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*volatile u8, @ptrFromInt(2))));
try testing.expectEqual(@as(?*anyopaque, @ptrFromInt(2)), cast(?*anyopaque, @as(*u8, @ptrFromInt(2))));
var foo: c_int = -1;
_ = &foo;
try testing.expect(cast(*anyopaque, -1) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
try testing.expect(cast(*anyopaque, foo) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
try testing.expect(cast(?*anyopaque, -1) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
try testing.expect(cast(?*anyopaque, foo) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
const FnPtr = ?*align(1) const fn (*anyopaque) void;
try testing.expect(cast(FnPtr, 0) == @as(FnPtr, @ptrFromInt(@as(usize, 0))));
try testing.expect(cast(FnPtr, foo) == @as(FnPtr, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
const complexFunction = struct {
fn f(_: ?*anyopaque, _: c_uint, _: ?*const fn (?*anyopaque) callconv(.c) c_uint, _: ?*anyopaque, _: c_uint, _: [*c]c_uint) callconv(.c) usize {
return 0;
}
}.f;
const SDL_FunctionPointer = ?*const fn () callconv(.c) void;
const fn_ptr = cast(SDL_FunctionPointer, complexFunction);
try testing.expect(fn_ptr != null);
}
const sizeof = helpers.sizeof;
test sizeof {
const S = extern struct { a: u32 };
const ptr_size = @sizeOf(*anyopaque);
try testing.expect(sizeof(u32) == 4);
try testing.expect(sizeof(@as(u32, 2)) == 4);
try testing.expect(sizeof(2) == @sizeOf(c_int));
try testing.expect(sizeof(2.0) == @sizeOf(f64));
try testing.expect(sizeof(S) == 4);
try testing.expect(sizeof([_]u32{ 4, 5, 6 }) == 12);
try testing.expect(sizeof([3]u32) == 12);
try testing.expect(sizeof([3:0]u32) == 16);
try testing.expect(sizeof(&[_]u32{ 4, 5, 6 }) == ptr_size);
try testing.expect(sizeof(*u32) == ptr_size);
try testing.expect(sizeof([*]u32) == ptr_size);
try testing.expect(sizeof([*c]u32) == ptr_size);
try testing.expect(sizeof(?*u32) == ptr_size);
try testing.expect(sizeof(?[*]u32) == ptr_size);
try testing.expect(sizeof(*anyopaque) == ptr_size);
try testing.expect(sizeof(*void) == ptr_size);
try testing.expect(sizeof(null) == ptr_size);
try testing.expect(sizeof("foobar") == 7);
try testing.expect(sizeof(&[_:0]u16{ 'f', 'o', 'o', 'b', 'a', 'r' }) == 14);
try testing.expect(sizeof(*const [4:0]u8) == 5);
try testing.expect(sizeof(*[4:0]u8) == ptr_size);
try testing.expect(sizeof([*]const [4:0]u8) == ptr_size);
try testing.expect(sizeof(*const *const [4:0]u8) == ptr_size);
try testing.expect(sizeof(*const [4]u8) == ptr_size);
if (false) { // TODO
try testing.expect(sizeof(&sizeof) == @sizeOf(@TypeOf(&sizeof)));
try testing.expect(sizeof(sizeof) == 1);
}
try testing.expect(sizeof(void) == 1);
try testing.expect(sizeof(anyopaque) == 1);
}
const promoteIntLiteral = helpers.promoteIntLiteral;
test promoteIntLiteral {
const signed_hex = promoteIntLiteral(c_int, math.maxInt(c_int) + 1, .hex);
try testing.expectEqual(c_uint, @TypeOf(signed_hex));
if (math.maxInt(c_longlong) == math.maxInt(c_int)) return;
const signed_decimal = promoteIntLiteral(c_int, math.maxInt(c_int) + 1, .decimal);
const unsigned = promoteIntLiteral(c_uint, math.maxInt(c_uint) + 1, .hex);
if (math.maxInt(c_long) > math.maxInt(c_int)) {
try testing.expectEqual(c_long, @TypeOf(signed_decimal));
try testing.expectEqual(c_ulong, @TypeOf(unsigned));
} else {
try testing.expectEqual(c_longlong, @TypeOf(signed_decimal));
try testing.expectEqual(c_ulonglong, @TypeOf(unsigned));
}
}
const shuffleVectorIndex = helpers.shuffleVectorIndex;
test shuffleVectorIndex {
const vector_len: usize = 4;
_ = shuffleVectorIndex(-1, vector_len);
try testing.expect(shuffleVectorIndex(0, vector_len) == 0);
try testing.expect(shuffleVectorIndex(1, vector_len) == 1);
try testing.expect(shuffleVectorIndex(2, vector_len) == 2);
try testing.expect(shuffleVectorIndex(3, vector_len) == 3);
try testing.expect(shuffleVectorIndex(4, vector_len) == -1);
try testing.expect(shuffleVectorIndex(5, vector_len) == -2);
try testing.expect(shuffleVectorIndex(6, vector_len) == -3);
try testing.expect(shuffleVectorIndex(7, vector_len) == -4);
}
const FlexibleArrayType = helpers.FlexibleArrayType;
test FlexibleArrayType {
const Container = extern struct {
size: usize,
};
try testing.expectEqual(FlexibleArrayType(*Container, c_int), [*c]c_int);
try testing.expectEqual(FlexibleArrayType(*const Container, c_int), [*c]const c_int);
try testing.expectEqual(FlexibleArrayType(*volatile Container, c_int), [*c]volatile c_int);
try testing.expectEqual(FlexibleArrayType(*const volatile Container, c_int), [*c]const volatile c_int);
}
const signedRemainder = helpers.signedRemainder;
test signedRemainder {
// TODO add test
return error.SkipZigTest;
}
const ArithmeticConversion = helpers.ArithmeticConversion;
test ArithmeticConversion {
// Promotions not necessarily the same for other platforms
if (builtin.target.cpu.arch != .x86_64 or builtin.target.os.tag != .linux) return error.SkipZigTest;
const Test = struct {
/// Order of operands should not matter for arithmetic conversions
fn checkPromotion(comptime A: type, comptime B: type, comptime Expected: type) !void {
try std.testing.expect(ArithmeticConversion(A, B) == Expected);
try std.testing.expect(ArithmeticConversion(B, A) == Expected);
}
};
try Test.checkPromotion(c_longdouble, c_int, c_longdouble);
try Test.checkPromotion(c_int, f64, f64);
try Test.checkPromotion(f32, bool, f32);
try Test.checkPromotion(bool, c_short, c_int);
try Test.checkPromotion(c_int, c_int, c_int);
try Test.checkPromotion(c_short, c_int, c_int);
try Test.checkPromotion(c_int, c_long, c_long);
try Test.checkPromotion(c_ulonglong, c_uint, c_ulonglong);
try Test.checkPromotion(c_uint, c_int, c_uint);
try Test.checkPromotion(c_uint, c_long, c_long);
try Test.checkPromotion(c_ulong, c_longlong, c_ulonglong);
// stdint.h
try Test.checkPromotion(u8, i8, c_int);
try Test.checkPromotion(u16, i16, c_int);
try Test.checkPromotion(i32, c_int, c_int);
try Test.checkPromotion(u32, c_int, c_uint);
try Test.checkPromotion(i64, c_int, c_long);
try Test.checkPromotion(u64, c_int, c_ulong);
try Test.checkPromotion(isize, c_int, c_long);
try Test.checkPromotion(usize, c_int, c_ulong);
}
const F_SUFFIX = helpers.F_SUFFIX;
test F_SUFFIX {
try testing.expect(@TypeOf(F_SUFFIX(1)) == f32);
}
const U_SUFFIX = helpers.U_SUFFIX;
test U_SUFFIX {
try testing.expect(@TypeOf(U_SUFFIX(1)) == c_uint);
if (math.maxInt(c_ulong) > math.maxInt(c_uint)) {
try testing.expect(@TypeOf(U_SUFFIX(math.maxInt(c_uint) + 1)) == c_ulong);
}
if (math.maxInt(c_ulonglong) > math.maxInt(c_ulong)) {
try testing.expect(@TypeOf(U_SUFFIX(math.maxInt(c_ulong) + 1)) == c_ulonglong);
}
}
const L_SUFFIX = helpers.L_SUFFIX;
test L_SUFFIX {
try testing.expect(@TypeOf(L_SUFFIX(1)) == c_long);
if (math.maxInt(c_long) > math.maxInt(c_int)) {
try testing.expect(@TypeOf(L_SUFFIX(math.maxInt(c_int) + 1)) == c_long);
}
if (math.maxInt(c_longlong) > math.maxInt(c_long)) {
try testing.expect(@TypeOf(L_SUFFIX(math.maxInt(c_long) + 1)) == c_longlong);
}
}
const UL_SUFFIX = helpers.UL_SUFFIX;
test UL_SUFFIX {
try testing.expect(@TypeOf(UL_SUFFIX(1)) == c_ulong);
if (math.maxInt(c_ulonglong) > math.maxInt(c_ulong)) {
try testing.expect(@TypeOf(UL_SUFFIX(math.maxInt(c_ulong) + 1)) == c_ulonglong);
}
}
const LL_SUFFIX = helpers.LL_SUFFIX;
test LL_SUFFIX {
try testing.expect(@TypeOf(LL_SUFFIX(1)) == c_longlong);
}
const ULL_SUFFIX = helpers.ULL_SUFFIX;
test ULL_SUFFIX {
try testing.expect(@TypeOf(ULL_SUFFIX(1)) == c_ulonglong);
}
test "Extended C ABI casting" {
if (math.maxInt(c_long) > math.maxInt(c_char)) {
try testing.expect(@TypeOf(L_SUFFIX(@as(c_char, math.maxInt(c_char) - 1))) == c_long); // c_char
}
if (math.maxInt(c_long) > math.maxInt(c_short)) {
try testing.expect(@TypeOf(L_SUFFIX(@as(c_short, math.maxInt(c_short) - 1))) == c_long); // c_short
}
if (math.maxInt(c_long) > math.maxInt(c_ushort)) {
try testing.expect(@TypeOf(L_SUFFIX(@as(c_ushort, math.maxInt(c_ushort) - 1))) == c_long); //c_ushort
}
if (math.maxInt(c_long) > math.maxInt(c_int)) {
try testing.expect(@TypeOf(L_SUFFIX(@as(c_int, math.maxInt(c_int) - 1))) == c_long); // c_int
}
if (math.maxInt(c_long) > math.maxInt(c_uint)) {
try testing.expect(@TypeOf(L_SUFFIX(@as(c_uint, math.maxInt(c_uint) - 1))) == c_long); // c_uint
try testing.expect(@TypeOf(L_SUFFIX(math.maxInt(c_uint) + 1)) == c_long); // comptime_int -> c_long
}
if (math.maxInt(c_longlong) > math.maxInt(c_long)) {
try testing.expect(@TypeOf(L_SUFFIX(@as(c_long, math.maxInt(c_long) - 1))) == c_long); // c_long
try testing.expect(@TypeOf(L_SUFFIX(math.maxInt(c_long) + 1)) == c_longlong); // comptime_int -> c_longlong
}
}
const WL_CONTAINER_OF = helpers.WL_CONTAINER_OF;
test WL_CONTAINER_OF {
const S = struct {
a: u32 = 0,
b: u32 = 0,
};
const x = S{};
const y = S{};
const ptr = WL_CONTAINER_OF(&x.b, &y, "b");
try testing.expectEqual(&x, ptr);
}
const CAST_OR_CALL = helpers.CAST_OR_CALL;
test "CAST_OR_CALL casting" {
const arg: c_int = 1000;
const casted = CAST_OR_CALL(u8, arg);
try testing.expectEqual(cast(u8, arg), casted);
const S = struct {
x: u32 = 0,
};
var s: S = .{};
const casted_ptr = CAST_OR_CALL(*u8, &s);
try testing.expectEqual(cast(*u8, &s), casted_ptr);
}
test "CAST_OR_CALL calling" {
const Helper = struct {
var last_val: bool = false;
fn returnsVoid(val: bool) void {
last_val = val;
}
fn returnsBool(f: f32) bool {
return f > 0;
}
fn identity(self: c_uint) c_uint {
return self;
}
};
CAST_OR_CALL(Helper.returnsVoid, true);
try testing.expectEqual(true, Helper.last_val);
CAST_OR_CALL(Helper.returnsVoid, false);
try testing.expectEqual(false, Helper.last_val);
try testing.expectEqual(Helper.returnsBool(1), CAST_OR_CALL(Helper.returnsBool, @as(f32, 1)));
try testing.expectEqual(Helper.returnsBool(-1), CAST_OR_CALL(Helper.returnsBool, @as(f32, -1)));
try testing.expectEqual(Helper.identity(@as(c_uint, 100)), CAST_OR_CALL(Helper.identity, @as(c_uint, 100)));
}

View File

@ -0,0 +1,251 @@
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const process = std.process;
const aro = @import("aro");
const Translator = @import("Translator.zig");
const fast_exit = @import("builtin").mode != .Debug;
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
pub fn main() u8 {
const gpa = general_purpose_allocator.allocator();
defer _ = general_purpose_allocator.deinit();
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
const args = process.argsAlloc(arena) catch {
std.debug.print("ran out of memory allocating arguments\n", .{});
if (fast_exit) process.exit(1);
return 1;
};
var stderr_buf: [1024]u8 = undefined;
var stderr = std.fs.File.stderr().writer(&stderr_buf);
var diagnostics: aro.Diagnostics = .{
.output = .{ .to_writer = .{
.color = .detect(stderr.file),
.writer = &stderr.interface,
} },
};
var comp = aro.Compilation.initDefault(gpa, arena, &diagnostics, std.fs.cwd()) catch |err| switch (err) {
error.OutOfMemory => {
std.debug.print("ran out of memory initializing C compilation\n", .{});
if (fast_exit) process.exit(1);
return 1;
},
};
defer comp.deinit();
const exe_name = std.fs.selfExePathAlloc(gpa) catch {
std.debug.print("unable to find translate-c executable path\n", .{});
if (fast_exit) process.exit(1);
return 1;
};
defer gpa.free(exe_name);
var driver: aro.Driver = .{ .comp = &comp, .diagnostics = &diagnostics, .aro_name = exe_name };
defer driver.deinit();
var toolchain: aro.Toolchain = .{ .driver = &driver, .filesystem = .{ .real = comp.cwd } };
defer toolchain.deinit();
translate(&driver, &toolchain, args) catch |err| switch (err) {
error.OutOfMemory => {
std.debug.print("ran out of memory translating\n", .{});
if (fast_exit) process.exit(1);
return 1;
},
error.FatalError => {
if (fast_exit) process.exit(1);
return 1;
},
error.WriteFailed => {
std.debug.print("unable to write to stdout\n", .{});
if (fast_exit) process.exit(1);
return 1;
},
};
if (fast_exit) process.exit(@intFromBool(comp.diagnostics.errors != 0));
return @intFromBool(comp.diagnostics.errors != 0);
}
pub const usage =
\\Usage {s}: [options] file [CC options]
\\
\\Options:
\\ --help Print this message
\\ --version Print translate-c version
\\ -fmodule-libs Import libraries as modules
\\ -fno-module-libs (default) Install libraries next to output file
\\
\\
;
fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8) !void {
const gpa = d.comp.gpa;
var module_libs = false;
const aro_args = args: {
var i: usize = 0;
for (args) |arg| {
args[i] = arg;
if (mem.eql(u8, arg, "--help")) {
var stdout_buf: [512]u8 = undefined;
var stdout = std.fs.File.stdout().writer(&stdout_buf);
try stdout.interface.print(usage, .{args[0]});
try stdout.interface.flush();
return;
} else if (mem.eql(u8, arg, "--version")) {
var stdout_buf: [512]u8 = undefined;
var stdout = std.fs.File.stdout().writer(&stdout_buf);
// TODO add version
try stdout.interface.writeAll("0.0.0-dev\n");
try stdout.interface.flush();
return;
} else if (mem.eql(u8, arg, "-fmodule-libs")) {
module_libs = true;
} else if (mem.eql(u8, arg, "-fno-module-libs")) {
module_libs = false;
} else {
i += 1;
}
}
break :args args[0..i];
};
const user_macros = macros: {
var macro_buf: std.ArrayListUnmanaged(u8) = .empty;
defer macro_buf.deinit(gpa);
try macro_buf.appendSlice(gpa, "#define __TRANSLATE_C__ 1\n");
var discard_buf: [256]u8 = undefined;
var discarding: std.io.Writer.Discarding = .init(&discard_buf);
assert(!try d.parseArgs(&discarding.writer, &macro_buf, aro_args));
if (macro_buf.items.len > std.math.maxInt(u32)) {
return d.fatal("user provided macro source exceeded max size", .{});
}
const content = try macro_buf.toOwnedSlice(gpa);
errdefer gpa.free(content);
break :macros try d.comp.addSourceFromOwnedBuffer("<command line>", content, .user);
};
if (d.inputs.items.len != 1) {
return d.fatal("expected exactly one input file", .{});
}
const source = d.inputs.items[0];
tc.discover() catch |er| switch (er) {
error.OutOfMemory => return error.OutOfMemory,
error.TooManyMultilibs => return d.fatal("found more than one multilib with the same priority", .{}),
};
tc.defineSystemIncludes() catch |er| switch (er) {
error.OutOfMemory => return error.OutOfMemory,
error.AroIncludeNotFound => return d.fatal("unable to find Aro builtin headers", .{}),
};
const builtin_macros = d.comp.generateBuiltinMacros(.include_system_defines) catch |err| switch (err) {
error.FileTooBig => return d.fatal("builtin macro source exceeded max size", .{}),
else => |e| return e,
};
var pp = try aro.Preprocessor.initDefault(d.comp);
defer pp.deinit();
try pp.preprocessSources(&.{ source, builtin_macros, user_macros });
var c_tree = try pp.parse();
defer c_tree.deinit();
if (d.diagnostics.errors != 0) {
if (fast_exit) process.exit(1);
return error.FatalError;
}
const rendered_zig = try Translator.translate(.{
.gpa = gpa,
.comp = d.comp,
.pp = &pp,
.tree = &c_tree,
.module_libs = module_libs,
});
defer gpa.free(rendered_zig);
var close_out_file = false;
var out_file_path: []const u8 = "<stdout>";
var out_file: std.fs.File = .stdout();
defer if (close_out_file) out_file.close();
if (d.output_name) |path| blk: {
if (std.mem.eql(u8, path, "-")) break :blk;
if (std.fs.path.dirname(path)) |dirname| {
std.fs.cwd().makePath(dirname) catch |err|
return d.fatal("failed to create path to '{s}': {s}", .{ path, aro.Driver.errorDescription(err) });
}
out_file = std.fs.cwd().createFile(path, .{}) catch |err| {
return d.fatal("failed to create output file '{s}': {s}", .{ path, aro.Driver.errorDescription(err) });
};
close_out_file = true;
out_file_path = path;
}
var out_buf: [4096]u8 = undefined;
var out_writer = out_file.writer(&out_buf);
out_writer.interface.writeAll(rendered_zig) catch
return d.fatal("failed to write result to '{s}': {s}", .{ out_file_path, aro.Driver.errorDescription(out_writer.err.?) });
if (!module_libs) {
const dest_path = if (d.output_name) |path| std.fs.path.dirname(path) else null;
installLibs(d, dest_path) catch |err|
return d.fatal("failed to install library files: {s}", .{aro.Driver.errorDescription(err)});
}
if (fast_exit) process.exit(0);
}
fn installLibs(d: *aro.Driver, dest_path: ?[]const u8) !void {
const gpa = d.comp.gpa;
const cwd = std.fs.cwd();
const self_exe_path = try std.fs.selfExePathAlloc(gpa);
defer gpa.free(self_exe_path);
var cur_dir: []const u8 = self_exe_path;
while (std.fs.path.dirname(cur_dir)) |dirname| : (cur_dir = dirname) {
var base_dir = cwd.openDir(dirname, .{}) catch continue;
defer base_dir.close();
var lib_dir = base_dir.openDir("lib", .{}) catch continue;
defer lib_dir.close();
lib_dir.access("c_builtins.zig", .{}) catch continue;
{
const install_path = try std.fs.path.join(gpa, &.{ dest_path orelse "", "c_builtins.zig" });
defer gpa.free(install_path);
try lib_dir.copyFile("c_builtins.zig", cwd, install_path, .{});
}
{
const install_path = try std.fs.path.join(gpa, &.{ dest_path orelse "", "helpers.zig" });
defer gpa.free(install_path);
try lib_dir.copyFile("helpers.zig", cwd, install_path, .{});
}
return;
}
return error.FileNotFound;
}
comptime {
if (@import("builtin").is_test) {
_ = Translator;
_ = @import("helpers.zig");
_ = @import("PatternList.zig");
}
}

View File

@ -5657,149 +5657,10 @@ pub const CImportResult = struct {
/// Caller owns returned memory. /// Caller owns returned memory.
pub fn cImport(comp: *Compilation, c_src: []const u8, owner_mod: *Package.Module) !CImportResult { pub fn cImport(comp: *Compilation, c_src: []const u8, owner_mod: *Package.Module) !CImportResult {
dev.check(.translate_c_command); dev.check(.translate_c_command);
_ = comp;
const tracy_trace = trace(@src()); _ = c_src;
defer tracy_trace.end(); _ = owner_mod;
@panic("TODO execute 'zig translate-c' as a sub process and use the results");
const cimport_zig_basename = "cimport.zig";
var man = comp.obtainCObjectCacheManifest(owner_mod);
defer man.deinit();
man.hash.add(@as(u16, 0xb945)); // Random number to distinguish translate-c from compiling C objects
man.hash.addBytes(c_src);
man.hash.add(comp.config.c_frontend);
// If the previous invocation resulted in clang errors, we will see a hit
// here with 0 files in the manifest, in which case it is actually a miss.
// We need to "unhit" in this case, to keep the digests matching.
const prev_hash_state = man.hash.peekBin();
const actual_hit = hit: {
_ = try man.hit();
if (man.files.entries.len == 0) {
man.unhit(prev_hash_state, 0);
break :hit false;
}
break :hit true;
};
const digest = if (!actual_hit) digest: {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
const tmp_digest = man.hash.peek();
const tmp_dir_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &tmp_digest });
var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{});
defer zig_cache_tmp_dir.close();
const cimport_basename = "cimport.h";
const out_h_path = try comp.dirs.local_cache.join(arena, &[_][]const u8{
tmp_dir_sub_path, cimport_basename,
});
const out_dep_path = try std.fmt.allocPrint(arena, "{s}.d", .{out_h_path});
try zig_cache_tmp_dir.writeFile(.{ .sub_path = cimport_basename, .data = c_src });
if (comp.verbose_cimport) {
log.info("C import source: {s}", .{out_h_path});
}
var argv = std.array_list.Managed([]const u8).init(comp.gpa);
defer argv.deinit();
try argv.append(@tagName(comp.config.c_frontend)); // argv[0] is program name, actual args start at [1]
try comp.addTranslateCCArgs(arena, &argv, .c, out_dep_path, owner_mod);
try argv.append(out_h_path);
if (comp.verbose_cc) {
dump_argv(argv.items);
}
var tree = switch (comp.config.c_frontend) {
.aro => tree: {
if (true) @panic("TODO");
break :tree undefined;
},
.clang => tree: {
if (!build_options.have_llvm) unreachable;
const translate_c = @import("translate_c.zig");
// Convert to null terminated args.
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, argv.items.len + 1);
new_argv_with_sentinel[argv.items.len] = null;
const new_argv = new_argv_with_sentinel[0..argv.items.len :null];
for (argv.items, 0..) |arg, i| {
new_argv[i] = try arena.dupeZ(u8, arg);
}
const c_headers_dir_path_z = try comp.dirs.zig_lib.joinZ(arena, &.{"include"});
var errors = std.zig.ErrorBundle.empty;
errdefer errors.deinit(comp.gpa);
break :tree translate_c.translate(
comp.gpa,
new_argv.ptr,
new_argv.ptr + new_argv.len,
&errors,
c_headers_dir_path_z,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.SemanticAnalyzeFail => {
return CImportResult{
.digest = undefined,
.cache_hit = actual_hit,
.errors = errors,
};
},
};
},
};
defer tree.deinit(comp.gpa);
if (comp.verbose_cimport) {
log.info("C import .d file: {s}", .{out_dep_path});
}
const dep_basename = fs.path.basename(out_dep_path);
try man.addDepFilePost(zig_cache_tmp_dir, dep_basename);
switch (comp.cache_use) {
.whole => |whole| if (whole.cache_manifest) |whole_cache_manifest| {
whole.cache_manifest_mutex.lock();
defer whole.cache_manifest_mutex.unlock();
try whole_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename);
},
.incremental, .none => {},
}
const bin_digest = man.finalBin();
const hex_digest = Cache.binToHex(bin_digest);
const o_sub_path = "o" ++ fs.path.sep_str ++ hex_digest;
var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
defer o_dir.close();
var out_zig_file = try o_dir.createFile(cimport_zig_basename, .{});
defer out_zig_file.close();
const formatted = try tree.renderAlloc(comp.gpa);
defer comp.gpa.free(formatted);
try out_zig_file.writeAll(formatted);
break :digest bin_digest;
} else man.finalBin();
if (man.have_exclusive_lock) {
// Write the updated manifest. This is a no-op if the manifest is not dirty. Note that it is
// possible we had a hit and the manifest is dirty, for example if the file mtime changed but
// the contents were the same, we hit the cache but the manifest is dirty and we need to update
// it to prevent doing a full file content comparison the next time around.
man.writeManifest() catch |err| {
log.warn("failed to write cache manifest for C import: {s}", .{@errorName(err)});
};
}
return CImportResult{
.digest = digest,
.cache_hit = actual_hit,
.errors = std.zig.ErrorBundle.empty,
};
} }
fn workerUpdateCObject( fn workerUpdateCObject(
@ -6739,20 +6600,6 @@ pub fn tmpFilePath(comp: Compilation, ally: Allocator, suffix: []const u8) error
} }
} }
pub fn addTranslateCCArgs(
comp: *Compilation,
arena: Allocator,
argv: *std.array_list.Managed([]const u8),
ext: FileExt,
out_dep_path: ?[]const u8,
owner_mod: *Package.Module,
) !void {
try argv.appendSlice(&.{ "-x", "c" });
try comp.addCCArgs(arena, argv, ext, out_dep_path, owner_mod);
// This gives us access to preprocessing entities, presumably at the cost of performance.
try argv.appendSlice(&.{ "-Xclang", "-detailed-preprocessing-record" });
}
/// Add common C compiler args between translate-c and C object compilation. /// Add common C compiler args between translate-c and C object compilation.
pub fn addCCArgs( pub fn addCCArgs(
comp: *const Compilation, comp: *const Compilation,

View File

@ -32,7 +32,6 @@ const Sema = @import("Sema.zig");
const target_util = @import("target.zig"); const target_util = @import("target.zig");
const build_options = @import("build_options"); const build_options = @import("build_options");
const isUpDir = @import("introspect.zig").isUpDir; const isUpDir = @import("introspect.zig").isUpDir;
const clang = @import("clang.zig");
const InternPool = @import("InternPool.zig"); const InternPool = @import("InternPool.zig");
const Alignment = InternPool.Alignment; const Alignment = InternPool.Alignment;
const AnalUnit = InternPool.AnalUnit; const AnalUnit = InternPool.AnalUnit;

File diff suppressed because it is too large Load Diff

View File

@ -296,7 +296,11 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
}); });
const aro = @import("aro"); const aro = @import("aro");
var aro_comp = aro.Compilation.init(gpa, std.fs.cwd()); var diagnostics: aro.Diagnostics = .{
.output = .{ .to_list = .{ .arena = .init(gpa) } },
};
defer diagnostics.deinit();
var aro_comp = aro.Compilation.init(gpa, arena, &diagnostics, std.fs.cwd());
defer aro_comp.deinit(); defer aro_comp.deinit();
aro_comp.target = target.*; aro_comp.target = target.*;
@ -316,17 +320,22 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
const builtin_macros = try aro_comp.generateBuiltinMacros(.include_system_defines); const builtin_macros = try aro_comp.generateBuiltinMacros(.include_system_defines);
const def_file_source = try aro_comp.addSourceFromPath(def_file_path); const def_file_source = try aro_comp.addSourceFromPath(def_file_path);
var pp = aro.Preprocessor.init(&aro_comp); var pp = aro.Preprocessor.init(&aro_comp, .{ .provided = 0 });
defer pp.deinit(); defer pp.deinit();
pp.linemarkers = .none; pp.linemarkers = .none;
pp.preserve_whitespace = true; pp.preserve_whitespace = true;
try pp.preprocessSources(&.{ def_file_source, builtin_macros }); try pp.preprocessSources(&.{ def_file_source, builtin_macros });
for (aro_comp.diagnostics.list.items) |diagnostic| { if (aro_comp.diagnostics.output.to_list.messages.items.len != 0) {
if (diagnostic.kind == .@"fatal error" or diagnostic.kind == .@"error") { var buffer: [64]u8 = undefined;
aro.Diagnostics.render(&aro_comp, std.Io.tty.detectConfig(std.fs.File.stderr())); const w = std.debug.lockStderrWriter(&buffer);
return error.AroPreprocessorFailed; defer std.debug.unlockStderrWriter();
for (aro_comp.diagnostics.output.to_list.messages.items) |msg| {
if (msg.kind == .@"fatal error" or msg.kind == .@"error") {
aro.Diagnostics.writeToWriter(msg, w, std.io.tty.detectConfig(std.fs.File.stderr())) catch {};
return error.AroPreprocessorFailed;
}
} }
} }
@ -335,9 +344,9 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
const def_final_file = try o_dir.createFile(final_def_basename, .{ .truncate = true }); const def_final_file = try o_dir.createFile(final_def_basename, .{ .truncate = true });
defer def_final_file.close(); defer def_final_file.close();
var buffer: [1024]u8 = undefined; var buffer: [1024]u8 = undefined;
var def_final_file_writer = def_final_file.writer(&buffer); var file_writer = def_final_file.writer(&buffer);
try pp.prettyPrintTokens(&def_final_file_writer.interface, .result_only); try pp.prettyPrintTokens(&file_writer.interface, .result_only);
try def_final_file_writer.interface.flush(); try file_writer.interface.flush();
} }
const lib_final_path = try std.fs.path.join(gpa, &.{ "o", &digest, final_lib_basename }); const lib_final_path = try std.fs.path.join(gpa, &.{ "o", &digest, final_lib_basename });

View File

@ -204,17 +204,6 @@ pub fn main() anyerror!void {
return mainArgs(gpa, arena, args); return mainArgs(gpa, arena, args);
} }
/// Check that LLVM and Clang have been linked properly so that they are using the same
/// libc++ and can safely share objects with pointers to static variables in libc++
fn verifyLibcxxCorrectlyLinked() void {
if (build_options.have_llvm and ZigClangIsLLVMUsingSeparateLibcxx()) {
fatal(
\\Zig was built/linked incorrectly: LLVM and Clang have separate copies of libc++
\\ If you are dynamically linking LLVM, make sure you dynamically link libc++ too
, .{});
}
}
fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
const tr = tracy.trace(@src()); const tr = tracy.trace(@src());
defer tr.end(); defer tr.end();
@ -350,13 +339,9 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
} else if (mem.eql(u8, cmd, "version")) { } else if (mem.eql(u8, cmd, "version")) {
dev.check(.version_command); dev.check(.version_command);
try fs.File.stdout().writeAll(build_options.version ++ "\n"); try fs.File.stdout().writeAll(build_options.version ++ "\n");
// Check libc++ linkage to make sure Zig was built correctly, but only return;
// for "env" and "version" to avoid affecting the startup time for
// build-critical commands (check takes about ~10 μs)
return verifyLibcxxCorrectlyLinked();
} else if (mem.eql(u8, cmd, "env")) { } else if (mem.eql(u8, cmd, "env")) {
dev.check(.env_command); dev.check(.env_command);
verifyLibcxxCorrectlyLinked();
var stdout_writer = fs.File.stdout().writer(&stdout_buffer); var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
try @import("print_env.zig").cmdEnv( try @import("print_env.zig").cmdEnv(
arena, arena,
@ -4551,179 +4536,24 @@ fn cmdTranslateC(
prog_node: std.Progress.Node, prog_node: std.Progress.Node,
) !void { ) !void {
dev.check(.translate_c_command); dev.check(.translate_c_command);
_ = file_system_inputs;
_ = fancy_output;
const color: Color = .auto;
assert(comp.c_source_files.len == 1); assert(comp.c_source_files.len == 1);
const c_source_file = comp.c_source_files[0]; const c_source_file = comp.c_source_files[0];
const translated_zig_basename = try std.fmt.allocPrint(arena, "{s}.zig", .{comp.root_name}); var argv: std.ArrayListUnmanaged([]const u8) = .empty;
try argv.append(arena, c_source_file.src_path);
var man: Cache.Manifest = comp.obtainCObjectCacheManifest(comp.root_mod); if (comp.verbose_cc) Compilation.dump_argv(argv.items);
man.want_shared_lock = false;
defer man.deinit();
man.hash.add(@as(u16, 0xb945)); // Random number to distinguish translate-c from compiling C objects try jitCmd(comp.gpa, arena, argv.items, .{
man.hash.add(comp.config.c_frontend); .cmd_name = "translate-c",
Compilation.cache_helpers.hashCSource(&man, c_source_file) catch |err| { .root_src_path = "translate-c/src/main.zig",
fatal("unable to process '{s}': {s}", .{ c_source_file.src_path, @errorName(err) }); .depend_on_aro = true,
}; .progress_node = prog_node,
});
if (fancy_output) |p| p.cache_hit = true; return cleanExit();
const bin_digest, const hex_digest = if (try man.hit()) digest: {
if (file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
const bin_digest = man.finalBin();
const hex_digest = Cache.binToHex(bin_digest);
break :digest .{ bin_digest, hex_digest };
} else digest: {
if (fancy_output) |p| p.cache_hit = false;
var argv = std.array_list.Managed([]const u8).init(arena);
switch (comp.config.c_frontend) {
.aro => {},
.clang => {
// argv[0] is program name, actual args start at [1]
try argv.append(@tagName(comp.config.c_frontend));
},
}
var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{});
defer zig_cache_tmp_dir.close();
const ext = Compilation.classifyFileExt(c_source_file.src_path);
const out_dep_path: ?[]const u8 = blk: {
if (comp.config.c_frontend == .aro or comp.disable_c_depfile or !ext.clangSupportsDepFile())
break :blk null;
const c_src_basename = fs.path.basename(c_source_file.src_path);
const dep_basename = try std.fmt.allocPrint(arena, "{s}.d", .{c_src_basename});
const out_dep_path = try comp.tmpFilePath(arena, dep_basename);
break :blk out_dep_path;
};
// TODO
if (comp.config.c_frontend != .aro)
try comp.addTranslateCCArgs(arena, &argv, ext, out_dep_path, comp.root_mod);
try argv.append(c_source_file.src_path);
if (comp.verbose_cc) {
Compilation.dump_argv(argv.items);
}
const Result = union(enum) {
success: []const u8,
error_bundle: std.zig.ErrorBundle,
};
const result: Result = switch (comp.config.c_frontend) {
.aro => f: {
var stdout: []u8 = undefined;
try jitCmd(comp.gpa, arena, argv.items, .{
.cmd_name = "aro_translate_c",
.root_src_path = "aro_translate_c.zig",
.depend_on_aro = true,
.capture = &stdout,
.progress_node = prog_node,
});
break :f .{ .success = stdout };
},
.clang => f: {
if (!build_options.have_llvm) unreachable;
const translate_c = @import("translate_c.zig");
// Convert to null terminated args.
const clang_args_len = argv.items.len + c_source_file.extra_flags.len;
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, clang_args_len + 1);
new_argv_with_sentinel[clang_args_len] = null;
const new_argv = new_argv_with_sentinel[0..clang_args_len :null];
for (argv.items, 0..) |arg, i| {
new_argv[i] = try arena.dupeZ(u8, arg);
}
for (c_source_file.extra_flags, 0..) |arg, i| {
new_argv[argv.items.len + i] = try arena.dupeZ(u8, arg);
}
const c_headers_dir_path_z = try comp.dirs.zig_lib.joinZ(arena, &.{"include"});
var errors = std.zig.ErrorBundle.empty;
var tree = translate_c.translate(
comp.gpa,
new_argv.ptr,
new_argv.ptr + new_argv.len,
&errors,
c_headers_dir_path_z,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.SemanticAnalyzeFail => break :f .{ .error_bundle = errors },
};
defer tree.deinit(comp.gpa);
break :f .{ .success = try tree.renderAlloc(arena) };
},
};
if (out_dep_path) |dep_file_path| add_deps: {
const dep_basename = fs.path.basename(dep_file_path);
// Add the files depended on to the cache system.
man.addDepFilePost(zig_cache_tmp_dir, dep_basename) catch |err| switch (err) {
error.FileNotFound => {
// Clang didn't emit the dep file; nothing to add to the manifest.
break :add_deps;
},
else => |e| return e,
};
// Just to save disk space, we delete the file because it is never needed again.
zig_cache_tmp_dir.deleteFile(dep_basename) catch |err| {
warn("failed to delete '{s}': {s}", .{ dep_file_path, @errorName(err) });
};
}
const formatted = switch (result) {
.success => |formatted| formatted,
.error_bundle => |eb| {
if (file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
if (fancy_output) |p| {
p.errors = eb;
return;
} else {
eb.renderToStdErr(color.renderOptions());
process.exit(1);
}
},
};
const bin_digest = man.finalBin();
const hex_digest = Cache.binToHex(bin_digest);
const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &hex_digest });
var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
defer o_dir.close();
var zig_file = try o_dir.createFile(translated_zig_basename, .{});
defer zig_file.close();
try zig_file.writeAll(formatted);
man.writeManifest() catch |err| warn("failed to write cache manifest: {t}", .{err});
if (file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
break :digest .{ bin_digest, hex_digest };
};
if (fancy_output) |p| {
p.digest = bin_digest;
p.errors = std.zig.ErrorBundle.empty;
} else {
const out_zig_path = try fs.path.join(arena, &.{ "o", &hex_digest, translated_zig_basename });
const zig_file = comp.dirs.local_cache.handle.openFile(out_zig_path, .{}) catch |err| {
const path = comp.dirs.local_cache.path orelse ".";
fatal("unable to open cached translated zig file '{s}{s}{s}': {s}", .{ path, fs.path.sep_str, out_zig_path, @errorName(err) });
};
defer zig_file.close();
var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
var file_reader = zig_file.reader(&.{});
_ = try stdout_writer.interface.sendFileAll(&file_reader, .unlimited);
try stdout_writer.interface.flush();
return cleanExit();
}
} }
const usage_init = const usage_init =

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff