sync Aro dependency

ref: adfd13c6ffb563b1379052b92f6ae4148b91cc12
This commit is contained in:
Veikka Tuominen 2024-09-08 14:23:03 +03:00
parent 9e6d167bb7
commit de8cece6e7
35 changed files with 2284 additions and 797 deletions

View File

@ -23,6 +23,7 @@ pub const version_str = backend.version_str;
pub const version = backend.version;
test {
_ = @import("aro/annex_g.zig");
_ = @import("aro/Builtins.zig");
_ = @import("aro/char_info.zig");
_ = @import("aro/Compilation.zig");

View File

@ -38,12 +38,64 @@ pub const Kind = enum {
}
};
pub const Iterator = struct {
source: union(enum) {
ty: Type,
slice: []const Attribute,
},
index: usize,
pub fn initSlice(slice: ?[]const Attribute) Iterator {
return .{ .source = .{ .slice = slice orelse &.{} }, .index = 0 };
}
pub fn initType(ty: Type) Iterator {
return .{ .source = .{ .ty = ty }, .index = 0 };
}
/// returns the next attribute as well as its index within the slice or current type
/// The index can be used to determine when a nested type has been recursed into
pub fn next(self: *Iterator) ?struct { Attribute, usize } {
switch (self.source) {
.slice => |slice| {
if (self.index < slice.len) {
defer self.index += 1;
return .{ slice[self.index], self.index };
}
},
.ty => |ty| {
switch (ty.specifier) {
.typeof_type => {
self.* = .{ .source = .{ .ty = ty.data.sub_type.* }, .index = 0 };
return self.next();
},
.typeof_expr => {
self.* = .{ .source = .{ .ty = ty.data.expr.ty }, .index = 0 };
return self.next();
},
.attributed => {
if (self.index < ty.data.attributed.attributes.len) {
defer self.index += 1;
return .{ ty.data.attributed.attributes[self.index], self.index };
}
self.* = .{ .source = .{ .ty = ty.data.attributed.base }, .index = 0 };
return self.next();
},
else => {},
}
},
}
return null;
}
};
pub const ArgumentType = enum {
string,
identifier,
int,
alignment,
float,
complex_float,
expression,
nullptr_t,
@ -54,6 +106,7 @@ pub const ArgumentType = enum {
.int, .alignment => "an integer constant",
.nullptr_t => "nullptr",
.float => "a floating point number",
.complex_float => "a complex floating point number",
.expression => "an expression",
};
}
@ -65,7 +118,7 @@ pub fn requiredArgCount(attr: Tag) u32 {
inline else => |tag| {
comptime var needed = 0;
comptime {
const fields = std.meta.fields(@field(attributes, @tagName(tag)));
const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields;
for (fields) |arg_field| {
if (!mem.eql(u8, arg_field.name, "__name_tok") and @typeInfo(arg_field.type) != .optional) needed += 1;
}
@ -81,7 +134,7 @@ pub fn maxArgCount(attr: Tag) u32 {
inline else => |tag| {
comptime var max = 0;
comptime {
const fields = std.meta.fields(@field(attributes, @tagName(tag)));
const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields;
for (fields) |arg_field| {
if (!mem.eql(u8, arg_field.name, "__name_tok")) max += 1;
}
@ -106,7 +159,7 @@ pub const Formatting = struct {
switch (attr) {
.calling_convention => unreachable,
inline else => |tag| {
const fields = std.meta.fields(@field(attributes, @tagName(tag)));
const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields;
if (fields.len == 0) unreachable;
const Unwrapped = UnwrapOptional(fields[0].type);
@ -123,14 +176,13 @@ pub const Formatting = struct {
switch (attr) {
.calling_convention => unreachable,
inline else => |tag| {
const fields = std.meta.fields(@field(attributes, @tagName(tag)));
const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields;
if (fields.len == 0) unreachable;
const Unwrapped = UnwrapOptional(fields[0].type);
if (@typeInfo(Unwrapped) != .@"enum") unreachable;
const enum_fields = @typeInfo(Unwrapped).@"enum".fields;
@setEvalBranchQuota(3000);
const quote = comptime quoteChar(@enumFromInt(@intFromEnum(tag)));
comptime var values: []const u8 = quote ++ enum_fields[0].name ++ quote;
inline for (enum_fields[1..]) |enum_field| {
@ -148,7 +200,7 @@ pub fn wantsIdentEnum(attr: Tag) bool {
switch (attr) {
.calling_convention => return false,
inline else => |tag| {
const fields = std.meta.fields(@field(attributes, @tagName(tag)));
const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields;
if (fields.len == 0) return false;
const Unwrapped = UnwrapOptional(fields[0].type);
@ -162,7 +214,7 @@ pub fn wantsIdentEnum(attr: Tag) bool {
pub fn diagnoseIdent(attr: Tag, arguments: *Arguments, ident: []const u8) ?Diagnostics.Message {
switch (attr) {
inline else => |tag| {
const fields = std.meta.fields(@field(attributes, @tagName(tag)));
const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields;
if (fields.len == 0) unreachable;
const Unwrapped = UnwrapOptional(fields[0].type);
if (@typeInfo(Unwrapped) != .@"enum") unreachable;
@ -181,7 +233,7 @@ pub fn diagnoseIdent(attr: Tag, arguments: *Arguments, ident: []const u8) ?Diagn
pub fn wantsAlignment(attr: Tag, idx: usize) bool {
switch (attr) {
inline else => |tag| {
const fields = std.meta.fields(@field(attributes, @tagName(tag)));
const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields;
if (fields.len == 0) return false;
return switch (idx) {
@ -195,7 +247,7 @@ pub fn wantsAlignment(attr: Tag, idx: usize) bool {
pub fn diagnoseAlignment(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, p: *Parser) !?Diagnostics.Message {
switch (attr) {
inline else => |tag| {
const arg_fields = std.meta.fields(@field(attributes, @tagName(tag)));
const arg_fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields;
if (arg_fields.len == 0) unreachable;
switch (arg_idx) {
@ -249,8 +301,7 @@ fn diagnoseField(
},
.bytes => |bytes| {
if (Wanted == Value) {
std.debug.assert(node.tag == .string_literal_expr);
if (!node.ty.elemType().is(.char) and !node.ty.elemType().is(.uchar)) {
if (node.tag != .string_literal_expr or (!node.ty.elemType().is(.char) and !node.ty.elemType().is(.uchar))) {
return .{
.tag = .attribute_requires_string,
.extra = .{ .str = decl.name },
@ -264,7 +315,6 @@ fn diagnoseField(
@field(@field(arguments, decl.name), field.name) = enum_val;
return null;
} else {
@setEvalBranchQuota(3000);
return .{
.tag = .unknown_attr_enum,
.extra = .{ .attr_enum = .{ .tag = std.meta.stringToEnum(Tag, decl.name).? } },
@ -278,8 +328,19 @@ fn diagnoseField(
.int => .int,
.bytes => .string,
.float => .float,
.complex => .complex_float,
.null => .nullptr_t,
else => unreachable,
.int_ty,
.float_ty,
.complex_ty,
.ptr_ty,
.noreturn_ty,
.void_ty,
.func_ty,
.array_ty,
.vector_ty,
.record_ty,
=> unreachable,
});
}
@ -309,7 +370,7 @@ pub fn diagnose(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Resu
.tag = .attribute_too_many_args,
.extra = .{ .attr_arg_count = .{ .attribute = attr, .expected = max_arg_count } },
};
const arg_fields = std.meta.fields(@field(attributes, decl.name));
const arg_fields = @typeInfo(@field(attributes, decl.name)).@"struct".fields;
switch (arg_idx) {
inline 0...arg_fields.len - 1 => |arg_i| {
return diagnoseField(decl, arg_fields[arg_i], UnwrapOptional(arg_fields[arg_i].type), arguments, res, node, p);
@ -645,7 +706,7 @@ pub const Arguments = blk: {
var union_fields: [decls.len]ZigType.UnionField = undefined;
for (decls, &union_fields) |decl, *field| {
field.* = .{
.name = decl.name ++ "",
.name = decl.name,
.type = @field(attributes, decl.name),
.alignment = 0,
};
@ -730,7 +791,6 @@ pub fn applyVariableAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag:
const toks = p.attr_buf.items(.tok)[attr_buf_start..];
p.attr_application_buf.items.len = 0;
var base_ty = ty;
if (base_ty.specifier == .attributed) base_ty = base_ty.data.attributed.base;
var common = false;
var nocommon = false;
for (attrs, toks) |attr, tok| switch (attr.tag) {
@ -772,15 +832,10 @@ pub fn applyVariableAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag:
.copy,
.tls_model,
.visibility,
=> std.debug.panic("apply variable attribute {s}", .{@tagName(attr.tag)}),
=> |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .variables } }),
else => try ignoredAttrErr(p, tok, attr.tag, "variables"),
};
const existing = ty.getAttributes();
if (existing.len == 0 and p.attr_application_buf.items.len == 0) return base_ty;
if (existing.len == 0) return base_ty.withAttributes(p.arena, p.attr_application_buf.items);
const attributed_type = try Type.Attributed.create(p.arena, base_ty, existing, p.attr_application_buf.items);
return Type{ .specifier = .attributed, .data = .{ .attributed = attributed_type } };
return base_ty.withAttributes(p.arena, p.attr_application_buf.items);
}
pub fn applyFieldAttributes(p: *Parser, field_ty: *Type, attr_buf_start: usize) ![]const Attribute {
@ -789,7 +844,7 @@ pub fn applyFieldAttributes(p: *Parser, field_ty: *Type, attr_buf_start: usize)
p.attr_application_buf.items.len = 0;
for (attrs, toks) |attr, tok| switch (attr.tag) {
// zig fmt: off
.@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode,
.@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, .warn_unused_result, .nodiscard,
=> try p.attr_application_buf.append(p.gpa, attr),
// zig fmt: on
.vector_size => try attr.applyVectorSize(p, tok, field_ty),
@ -805,7 +860,6 @@ pub fn applyTypeAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: ?Di
const toks = p.attr_buf.items(.tok)[attr_buf_start..];
p.attr_application_buf.items.len = 0;
var base_ty = ty;
if (base_ty.specifier == .attributed) base_ty = base_ty.data.attributed.base;
for (attrs, toks) |attr, tok| switch (attr.tag) {
// zig fmt: off
.@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode,
@ -823,22 +877,10 @@ pub fn applyTypeAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: ?Di
.copy,
.scalar_storage_order,
.nonstring,
=> std.debug.panic("apply type attribute {s}", .{@tagName(attr.tag)}),
=> |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .types } }),
else => try ignoredAttrErr(p, tok, attr.tag, "types"),
};
const existing = ty.getAttributes();
// TODO: the alignment annotation on a type should override
// the decl it refers to. This might not be true for others. Maybe bug.
// if there are annotations on this type def use those.
if (p.attr_application_buf.items.len > 0) {
return try base_ty.withAttributes(p.arena, p.attr_application_buf.items);
} else if (existing.len > 0) {
// else use the ones on the typedef decl we were refering to.
return try base_ty.withAttributes(p.arena, existing);
}
return base_ty;
return base_ty.withAttributes(p.arena, p.attr_application_buf.items);
}
pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type {
@ -846,7 +888,6 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ
const toks = p.attr_buf.items(.tok)[attr_buf_start..];
p.attr_application_buf.items.len = 0;
var base_ty = ty;
if (base_ty.specifier == .attributed) base_ty = base_ty.data.attributed.base;
var hot = false;
var cold = false;
var @"noinline" = false;
@ -896,6 +937,13 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ
else => try p.errStr(.callconv_not_supported, tok, p.tok_ids[tok].lexeme().?),
},
},
.malloc => {
if (base_ty.returnType().isPtr()) {
try p.attr_application_buf.append(p.gpa, attr);
} else {
try ignoredAttrErr(p, tok, attr.tag, "functions that do not return pointers");
}
},
.access,
.alloc_align,
.alloc_size,
@ -908,7 +956,6 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ
.ifunc,
.interrupt,
.interrupt_handler,
.malloc,
.no_address_safety_analysis,
.no_icf,
.no_instrument_function,
@ -937,7 +984,7 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ
.visibility,
.weakref,
.zero_call_used_regs,
=> std.debug.panic("apply type attribute {s}", .{@tagName(attr.tag)}),
=> |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .functions } }),
else => try ignoredAttrErr(p, tok, attr.tag, "functions"),
};
return ty.withAttributes(p.arena, p.attr_application_buf.items);
@ -1043,11 +1090,14 @@ fn applyTransparentUnion(attr: Attribute, p: *Parser, tok: TokenIndex, ty: Type)
}
fn applyVectorSize(attr: Attribute, p: *Parser, tok: TokenIndex, ty: *Type) !void {
if (!(ty.isInt() or ty.isFloat()) or !ty.isReal()) {
const orig_ty = try p.typeStr(ty.*);
ty.* = Type.invalid;
return p.errStr(.invalid_vec_elem_ty, tok, orig_ty);
const base = ty.base();
const is_enum = ty.is(.@"enum");
if (!(ty.isInt() or ty.isFloat()) or !ty.isReal() or (is_enum and p.comp.langopts.emulate == .gcc)) {
try p.errStr(.invalid_vec_elem_ty, tok, try p.typeStr(ty.*));
return error.ParsingFailed;
}
if (is_enum) return;
const vec_bytes = attr.args.vector_size.bytes;
const ty_size = ty.sizeof(p.comp).?;
if (vec_bytes % ty_size != 0) {
@ -1057,7 +1107,7 @@ fn applyVectorSize(attr: Attribute, p: *Parser, tok: TokenIndex, ty: *Type) !voi
const arr_ty = try p.arena.create(Type.Array);
arr_ty.* = .{ .elem = ty.*, .len = vec_size };
ty.* = Type{
base.* = .{
.specifier = .vector,
.data = .{ .array = arr_ty },
};

View File

@ -69,6 +69,7 @@ pub const longest_name = 30;
/// If found, returns the index of the node within the `dafsa` array.
/// Otherwise, returns `null`.
pub fn findInList(first_child_index: u16, char: u8) ?u16 {
@setEvalBranchQuota(206);
var index = first_child_index;
while (true) {
if (dafsa[index].char == char) return index;
@ -787,7 +788,7 @@ const dafsa = [_]Node{
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 215 },
};
pub const data = blk: {
@setEvalBranchQuota(103);
@setEvalBranchQuota(721);
break :blk [_]@This(){
// access
.{ .tag = @enumFromInt(0), .properties = .{ .tag = .access, .gnu = true } },

View File

@ -350,7 +350,7 @@ test Iterator {
}
test "All builtins" {
var comp = Compilation.init(std.testing.allocator);
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
defer comp.deinit();
_ = try comp.generateBuiltinMacros(.include_system_defines);
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
@ -373,7 +373,7 @@ test "All builtins" {
test "Allocation failures" {
const Test = struct {
fn testOne(allocator: std.mem.Allocator) !void {
var comp = Compilation.init(allocator);
var comp = Compilation.init(allocator, std.fs.cwd());
defer comp.deinit();
_ = try comp.generateBuiltinMacros(.include_system_defines);
var arena = std.heap.ArenaAllocator.init(comp.gpa);

View File

@ -71,6 +71,7 @@ pub const longest_name = 43;
/// If found, returns the index of the node within the `dafsa` array.
/// Otherwise, returns `null`.
pub fn findInList(first_child_index: u16, char: u8) ?u16 {
@setEvalBranchQuota(7972);
var index = first_child_index;
while (true) {
if (dafsa[index].char == char) return index;
@ -5165,7 +5166,7 @@ const dafsa = [_]Node{
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 4913 },
};
pub const data = blk: {
@setEvalBranchQuota(30_000);
@setEvalBranchQuota(27902);
break :blk [_]@This(){
// _Block_object_assign
.{ .tag = @enumFromInt(0), .properties = .{ .param_str = "vv*vC*iC", .header = .blocks, .attributes = .{ .lib_function_without_prefix = true } } },

View File

@ -0,0 +1,86 @@
const std = @import("std");
const backend = @import("../../backend.zig");
const Interner = backend.Interner;
const Builtins = @import("../Builtins.zig");
const Builtin = Builtins.Builtin;
const Parser = @import("../Parser.zig");
const Tree = @import("../Tree.zig");
const NodeIndex = Tree.NodeIndex;
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
fn makeNan(comptime T: type, str: []const u8) T {
const UnsignedSameSize = std.meta.Int(.unsigned, @bitSizeOf(T));
const parsed = std.fmt.parseUnsigned(UnsignedSameSize, str[0 .. str.len - 1], 0) catch 0;
const bits: switch (T) {
f32 => u23,
f64 => u52,
f80 => u63,
f128 => u112,
else => @compileError("Invalid type for makeNan"),
} = @truncate(parsed);
return @bitCast(@as(UnsignedSameSize, bits) | @as(UnsignedSameSize, @bitCast(std.math.nan(T))));
}
pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const NodeIndex) !Value {
const builtin = Builtin.fromTag(tag);
if (!builtin.properties.attributes.const_evaluable) return .{};
switch (tag) {
Builtin.tagFromName("__builtin_inff").?,
Builtin.tagFromName("__builtin_inf").?,
Builtin.tagFromName("__builtin_infl").?,
=> {
const ty: Type = switch (tag) {
Builtin.tagFromName("__builtin_inff").? => .{ .specifier = .float },
Builtin.tagFromName("__builtin_inf").? => .{ .specifier = .double },
Builtin.tagFromName("__builtin_infl").? => .{ .specifier = .long_double },
else => unreachable,
};
const f: Interner.Key.Float = switch (ty.bitSizeof(p.comp).?) {
32 => .{ .f32 = std.math.inf(f32) },
64 => .{ .f64 = std.math.inf(f64) },
80 => .{ .f80 = std.math.inf(f80) },
128 => .{ .f128 = std.math.inf(f128) },
else => unreachable,
};
return Value.intern(p.comp, .{ .float = f });
},
Builtin.tagFromName("__builtin_isinf").? => blk: {
if (args.len == 0) break :blk;
const val = p.value_map.get(args[0]) orelse break :blk;
return Value.fromBool(val.isInf(p.comp));
},
Builtin.tagFromName("__builtin_isinf_sign").? => blk: {
if (args.len == 0) break :blk;
const val = p.value_map.get(args[0]) orelse break :blk;
switch (val.isInfSign(p.comp)) {
.unknown => {},
.finite => return Value.zero,
.positive => return Value.one,
.negative => return Value.int(@as(i64, -1), p.comp),
}
},
Builtin.tagFromName("__builtin_isnan").? => blk: {
if (args.len == 0) break :blk;
const val = p.value_map.get(args[0]) orelse break :blk;
return Value.fromBool(val.isNan(p.comp));
},
Builtin.tagFromName("__builtin_nan").? => blk: {
if (args.len == 0) break :blk;
const val = p.getDecayedStringLiteral(args[0]) orelse break :blk;
const bytes = p.comp.interner.get(val.ref()).bytes;
const f: Interner.Key.Float = switch ((Type{ .specifier = .double }).bitSizeof(p.comp).?) {
32 => .{ .f32 = makeNan(f32, bytes) },
64 => .{ .f64 = makeNan(f64, bytes) },
80 => .{ .f80 = makeNan(f80, bytes) },
128 => .{ .f128 = makeNan(f128, bytes) },
else => unreachable,
};
return Value.intern(p.comp, .{ .float = f });
},
else => {},
}
return .{};
}

View File

@ -127,22 +127,27 @@ types: struct {
} = .{},
string_interner: StrInt = .{},
interner: Interner = .{},
/// If this is not null, the directory containing the specified Source will be searched for includes
/// Used by MS extensions which allow searching for includes relative to the directory of the main source file.
ms_cwd_source_id: ?Source.Id = null,
cwd: std.fs.Dir,
pub fn init(gpa: Allocator) Compilation {
pub fn init(gpa: Allocator, cwd: std.fs.Dir) Compilation {
return .{
.gpa = gpa,
.diagnostics = Diagnostics.init(gpa),
.cwd = cwd,
};
}
/// Initialize Compilation with default environment,
/// pragma handlers and emulation mode set to target.
pub fn initDefault(gpa: Allocator) !Compilation {
pub fn initDefault(gpa: Allocator, cwd: std.fs.Dir) !Compilation {
var comp: Compilation = .{
.gpa = gpa,
.environment = try Environment.loadAll(gpa),
.diagnostics = Diagnostics.init(gpa),
.cwd = cwd,
};
errdefer comp.deinit();
try comp.addDefaultPragmaHandlers();
@ -534,7 +539,7 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi
if (system_defines_mode == .include_system_defines) {
try buf.appendSlice(
\\#define __VERSION__ "Aro
++ @import("../backend.zig").version_str ++ "\"\n" ++
++ " " ++ @import("../backend.zig").version_str ++ "\"\n" ++
\\#define __Aro__
\\
);
@ -550,6 +555,9 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi
\\#define __STDC_NO_VLA__ 1
\\#define __STDC_UTF_16__ 1
\\#define __STDC_UTF_32__ 1
\\#define __STDC_EMBED_NOT_FOUND__ 0
\\#define __STDC_EMBED_FOUND__ 1
\\#define __STDC_EMBED_EMPTY__ 2
\\
);
if (comp.langopts.standard.StdCVersionMacro()) |stdc_version| {
@ -719,8 +727,13 @@ fn generateBuiltinTypes(comp: *Compilation) !void {
try comp.generateNsConstantStringType();
}
pub fn float80Type(comp: *const Compilation) ?Type {
if (comp.langopts.emulate != .gcc) return null;
return target_util.float80Type(comp.target);
}
/// Smallest integer type with at least N bits
fn intLeastN(comp: *const Compilation, bits: usize, signedness: std.builtin.Signedness) Type {
pub fn intLeastN(comp: *const Compilation, bits: usize, signedness: std.builtin.Signedness) Type {
if (bits == 64 and (comp.target.isDarwin() or comp.target.isWasm())) {
// WebAssembly and Darwin use `long long` for `int_least64_t` and `int_fast64_t`.
return .{ .specifier = if (signedness == .signed) .long_long else .ulong_long };
@ -903,7 +916,7 @@ fn generateNsConstantStringType(comp: *Compilation) !void {
comp.types.ns_constant_string.fields[2] = .{ .name = try StrInt.intern(comp, "str"), .ty = const_char_ptr };
comp.types.ns_constant_string.fields[3] = .{ .name = try StrInt.intern(comp, "length"), .ty = .{ .specifier = .long } };
comp.types.ns_constant_string.ty = .{ .specifier = .@"struct", .data = .{ .record = &comp.types.ns_constant_string.record } };
record_layout.compute(&comp.types.ns_constant_string.record, comp.types.ns_constant_string.ty, comp, null);
record_layout.compute(&comp.types.ns_constant_string.record, comp.types.ns_constant_string.ty, comp, null) catch unreachable;
}
fn generateVaListType(comp: *Compilation) !Type {
@ -911,12 +924,12 @@ fn generateVaListType(comp: *Compilation) !Type {
const kind: Kind = switch (comp.target.cpu.arch) {
.aarch64 => switch (comp.target.os.tag) {
.windows => @as(Kind, .char_ptr),
.ios, .macos, .tvos, .watchos, .visionos => .char_ptr,
.ios, .macos, .tvos, .watchos => .char_ptr,
else => .aarch64_va_list,
},
.sparc, .wasm32, .wasm64, .bpfel, .bpfeb, .riscv32, .riscv64, .avr, .spirv32, .spirv64 => .void_ptr,
.powerpc => switch (comp.target.os.tag) {
.ios, .macos, .tvos, .watchos, .visionos, .aix => @as(Kind, .char_ptr),
.ios, .macos, .tvos, .watchos, .aix => @as(Kind, .char_ptr),
else => return Type{ .specifier = .void }, // unknown
},
.x86, .msp430 => .char_ptr,
@ -951,7 +964,7 @@ fn generateVaListType(comp: *Compilation) !Type {
record_ty.fields[3] = .{ .name = try StrInt.intern(comp, "__gr_offs"), .ty = .{ .specifier = .int } };
record_ty.fields[4] = .{ .name = try StrInt.intern(comp, "__vr_offs"), .ty = .{ .specifier = .int } };
ty = .{ .specifier = .@"struct", .data = .{ .record = record_ty } };
record_layout.compute(record_ty, ty, comp, null);
record_layout.compute(record_ty, ty, comp, null) catch unreachable;
},
.x86_64_va_list => {
const record_ty = try arena.create(Type.Record);
@ -969,7 +982,7 @@ fn generateVaListType(comp: *Compilation) !Type {
record_ty.fields[2] = .{ .name = try StrInt.intern(comp, "overflow_arg_area"), .ty = void_ptr };
record_ty.fields[3] = .{ .name = try StrInt.intern(comp, "reg_save_area"), .ty = void_ptr };
ty = .{ .specifier = .@"struct", .data = .{ .record = record_ty } };
record_layout.compute(record_ty, ty, comp, null);
record_layout.compute(record_ty, ty, comp, null) catch unreachable;
},
}
if (kind == .char_ptr or kind == .void_ptr) {
@ -988,13 +1001,28 @@ fn generateVaListType(comp: *Compilation) !Type {
fn generateIntMax(comp: *const Compilation, w: anytype, name: []const u8, ty: Type) !void {
const bit_count: u8 = @intCast(ty.sizeof(comp).? * 8);
const unsigned = ty.isUnsignedInt(comp);
const max = if (bit_count == 128)
@as(u128, if (unsigned) std.math.maxInt(u128) else std.math.maxInt(u128))
else
ty.maxInt(comp);
const max: u128 = switch (bit_count) {
8 => if (unsigned) std.math.maxInt(u8) else std.math.maxInt(i8),
16 => if (unsigned) std.math.maxInt(u16) else std.math.maxInt(i16),
32 => if (unsigned) std.math.maxInt(u32) else std.math.maxInt(i32),
64 => if (unsigned) std.math.maxInt(u64) else std.math.maxInt(i64),
128 => if (unsigned) std.math.maxInt(u128) else std.math.maxInt(i128),
else => unreachable,
};
try w.print("#define __{s}_MAX__ {d}{s}\n", .{ name, max, ty.intValueSuffix(comp) });
}
/// Largest value that can be stored in wchar_t
pub fn wcharMax(comp: *const Compilation) u32 {
const unsigned = comp.types.wchar.isUnsignedInt(comp);
return switch (comp.types.wchar.bitSizeof(comp).?) {
8 => if (unsigned) std.math.maxInt(u8) else std.math.maxInt(i8),
16 => if (unsigned) std.math.maxInt(u16) else std.math.maxInt(i16),
32 => if (unsigned) std.math.maxInt(u32) else std.math.maxInt(i32),
else => unreachable,
};
}
fn generateExactWidthIntMax(comp: *const Compilation, w: anytype, specifier: Type.Specifier) !void {
var ty = Type{ .specifier = specifier };
const bit_count: u8 = @intCast(ty.sizeof(comp).? * 8);
@ -1039,6 +1067,12 @@ pub fn nextLargestIntSameSign(comp: *const Compilation, ty: Type) ?Type {
return null;
}
/// Maximum size of an array, in bytes
pub fn maxArrayBytes(comp: *const Compilation) u64 {
const max_bits = @min(61, comp.target.ptrBitWidth());
return (@as(u64, 1) << @truncate(max_bits)) - 1;
}
/// If `enum E { ... }` syntax has a fixed underlying integer type regardless of the presence of
/// __attribute__((packed)) or the range of values of the corresponding enumerator constants,
/// specify it here.
@ -1060,7 +1094,7 @@ pub fn getCharSignedness(comp: *const Compilation) std.builtin.Signedness {
pub fn addBuiltinIncludeDir(comp: *Compilation, aro_dir: []const u8) !void {
var search_path = aro_dir;
while (std.fs.path.dirname(search_path)) |dirname| : (search_path = dirname) {
var base_dir = std.fs.cwd().openDir(dirname, .{}) catch continue;
var base_dir = comp.cwd.openDir(dirname, .{}) catch continue;
defer base_dir.close();
base_dir.access("include/stddef.h", .{}) catch continue;
@ -1266,7 +1300,7 @@ fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kin
return error.FileNotFound;
}
const file = try std.fs.cwd().openFile(path, .{});
const file = try comp.cwd.openFile(path, .{});
defer file.close();
const contents = file.readToEndAlloc(comp.gpa, std.math.maxInt(u32)) catch |err| switch (err) {
@ -1349,10 +1383,9 @@ pub fn hasInclude(
return false;
}
const cwd = std.fs.cwd();
if (std.fs.path.isAbsolute(filename)) {
if (which == .next) return false;
return !std.meta.isError(cwd.access(filename, .{}));
return !std.meta.isError(comp.cwd.access(filename, .{}));
}
const cwd_source_id = switch (include_type) {
@ -1372,7 +1405,7 @@ pub fn hasInclude(
while (try it.nextWithFile(filename, sf_allocator)) |found| {
defer sf_allocator.free(found.path);
if (!std.meta.isError(cwd.access(found.path, .{}))) return true;
if (!std.meta.isError(comp.cwd.access(found.path, .{}))) return true;
}
return false;
}
@ -1392,7 +1425,7 @@ fn getFileContents(comp: *Compilation, path: []const u8, limit: ?u32) ![]const u
return error.FileNotFound;
}
const file = try std.fs.cwd().openFile(path, .{});
const file = try comp.cwd.openFile(path, .{});
defer file.close();
var buf = std.ArrayList(u8).init(comp.gpa);
@ -1571,6 +1604,17 @@ pub fn hasBuiltinFunction(comp: *const Compilation, builtin: Builtin) bool {
}
}
pub fn locSlice(comp: *const Compilation, loc: Source.Location) []const u8 {
var tmp_tokenizer = Tokenizer{
.buf = comp.getSource(loc.id).buf,
.langopts = comp.langopts,
.index = loc.byte_offset,
.source = .generated,
};
const tok = tmp_tokenizer.next();
return tmp_tokenizer.buf[tok.start..tok.end];
}
pub const CharUnitSize = enum(u32) {
@"1" = 1,
@"2" = 2,
@ -1590,7 +1634,7 @@ pub const addDiagnostic = Diagnostics.add;
test "addSourceFromReader" {
const Test = struct {
fn addSourceFromReader(str: []const u8, expected: []const u8, warning_count: u32, splices: []const u32) !void {
var comp = Compilation.init(std.testing.allocator);
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
defer comp.deinit();
var buf_reader = std.io.fixedBufferStream(str);
@ -1602,7 +1646,7 @@ test "addSourceFromReader" {
}
fn withAllocationFailures(allocator: std.mem.Allocator) !void {
var comp = Compilation.init(allocator);
var comp = Compilation.init(allocator, std.fs.cwd());
defer comp.deinit();
_ = try comp.addSourceFromBuffer("path", "spliced\\\nbuffer\n");
@ -1644,7 +1688,7 @@ test "addSourceFromReader - exhaustive check for carriage return elimination" {
const alen = alphabet.len;
var buf: [alphabet.len]u8 = [1]u8{alphabet[0]} ** alen;
var comp = Compilation.init(std.testing.allocator);
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
defer comp.deinit();
var source_count: u32 = 0;
@ -1672,7 +1716,7 @@ test "ignore BOM at beginning of file" {
const Test = struct {
fn run(buf: []const u8) !void {
var comp = Compilation.init(std.testing.allocator);
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
defer comp.deinit();
var buf_reader = std.io.fixedBufferStream(buf);

View File

@ -47,6 +47,10 @@ pub const Message = struct {
tag: Attribute.Tag,
specifier: enum { @"struct", @"union", @"enum" },
},
attribute_todo: struct {
tag: Attribute.Tag,
kind: enum { variables, fields, types, functions },
},
builtin_with_header: struct {
builtin: Builtin.Tag,
header: Header,
@ -210,6 +214,9 @@ pub const Options = struct {
normalized: Kind = .default,
@"shift-count-negative": Kind = .default,
@"shift-count-overflow": Kind = .default,
@"constant-conversion": Kind = .default,
@"sign-conversion": Kind = .default,
nonnull: Kind = .default,
};
const Diagnostics = @This();
@ -222,14 +229,14 @@ errors: u32 = 0,
macro_backtrace_limit: u32 = 6,
pub fn warningExists(name: []const u8) bool {
inline for (std.meta.fields(Options)) |f| {
inline for (@typeInfo(Options).@"struct".fields) |f| {
if (mem.eql(u8, f.name, name)) return true;
}
return false;
}
pub fn set(d: *Diagnostics, name: []const u8, to: Kind) !void {
inline for (std.meta.fields(Options)) |f| {
inline for (@typeInfo(Options).@"struct".fields) |f| {
if (mem.eql(u8, f.name, name)) {
@field(d.options, f.name) = to;
return;
@ -422,6 +429,10 @@ pub fn renderMessage(comp: *Compilation, m: anytype, msg: Message) void {
@tagName(msg.extra.ignored_record_attr.tag),
@tagName(msg.extra.ignored_record_attr.specifier),
}),
.attribute_todo => printRt(m, prop.msg, .{ "{s}", "{s}" }, .{
@tagName(msg.extra.attribute_todo.tag),
@tagName(msg.extra.attribute_todo.kind),
}),
.builtin_with_header => printRt(m, prop.msg, .{ "{s}", "{s}" }, .{
@tagName(msg.extra.builtin_with_header.header),
Builtin.nameFromTag(msg.extra.builtin_with_header.builtin).span(),

View File

@ -107,6 +107,9 @@ pub const Tag = enum {
multiple_default,
previous_case,
expected_arguments,
callee_with_static_array,
array_argument_too_small,
non_null_argument,
expected_arguments_old,
expected_at_least_arguments,
invalid_static_star,
@ -214,6 +217,7 @@ pub const Tag = enum {
pre_c23_compat,
unbound_vla,
array_too_large,
record_too_large,
incompatible_ptr_init,
incompatible_ptr_init_sign,
incompatible_ptr_assign,
@ -349,6 +353,8 @@ pub const Tag = enum {
non_standard_escape_char,
invalid_pp_stringify_escape,
vla,
int_value_changed,
sign_conversion,
float_overflow_conversion,
float_out_of_range,
float_zero_conversion,
@ -425,7 +431,8 @@ pub const Tag = enum {
bit_int,
unsigned_bit_int_too_small,
signed_bit_int_too_small,
bit_int_too_big,
unsigned_bit_int_too_big,
signed_bit_int_too_big,
keyword_macro,
ptr_arithmetic_incomplete,
callconv_not_supported,
@ -509,6 +516,9 @@ pub const Tag = enum {
complex_conj,
overflow_builtin_requires_int,
overflow_result_requires_ptr,
attribute_todo,
invalid_type_underlying_enum,
auto_type_self_initialized,
pub fn property(tag: Tag) Properties {
return named_data[@intFromEnum(tag)];
@ -613,6 +623,9 @@ pub const Tag = enum {
.{ .msg = "multiple default cases in the same switch", .kind = .@"error" },
.{ .msg = "previous case defined here", .kind = .note },
.{ .msg = expected_arguments, .extra = .arguments, .kind = .@"error" },
.{ .msg = "callee declares array parameter as static here", .kind = .note },
.{ .msg = "array argument is too small; contains {d} elements, callee requires at least {d}", .extra = .arguments, .kind = .warning, .opt = W("array-bounds") },
.{ .msg = "null passed to a callee that requires a non-null argument", .kind = .warning, .opt = W("nonnull") },
.{ .msg = expected_arguments, .extra = .arguments, .kind = .warning },
.{ .msg = "expected at least {d} argument(s) got {d}", .extra = .arguments, .kind = .warning },
.{ .msg = "'static' may not be used with an unspecified variable length array size", .kind = .@"error" },
@ -720,6 +733,7 @@ pub const Tag = enum {
.{ .msg = "{s} is incompatible with C standards before C23", .extra = .str, .kind = .off, .suppress_unless_version = .c23, .opt = W("pre-c23-compat") },
.{ .msg = "variable length array must be bound in function definition", .kind = .@"error" },
.{ .msg = "array is too large", .kind = .@"error" },
.{ .msg = "type '{s}' is too large", .kind = .@"error", .extra = .str },
.{ .msg = "incompatible pointer types initializing {s}", .extra = .str, .opt = W("incompatible-pointer-types"), .kind = .warning },
.{ .msg = "incompatible pointer types initializing {s}" ++ pointer_sign_message, .extra = .str, .opt = W("pointer-sign"), .kind = .warning },
.{ .msg = "incompatible pointer types assigning to {s}", .extra = .str, .opt = W("incompatible-pointer-types"), .kind = .warning },
@ -855,6 +869,8 @@ pub const Tag = enum {
.{ .msg = "use of non-standard escape character '\\{s}'", .kind = .off, .opt = W("pedantic"), .extra = .invalid_escape },
.{ .msg = "invalid string literal, ignoring final '\\'", .kind = .warning },
.{ .msg = "variable length array used", .kind = .off, .opt = W("vla") },
.{ .msg = "implicit conversion from {s}", .extra = .str, .kind = .warning, .opt = W("constant-conversion") },
.{ .msg = "implicit conversion changes signedness: {s}", .extra = .str, .kind = .off, .opt = W("sign-conversion") },
.{ .msg = "implicit conversion of non-finite value from {s} is undefined", .extra = .str, .kind = .off, .opt = W("float-overflow-conversion") },
.{ .msg = "implicit conversion of out of range value from {s} is undefined", .extra = .str, .kind = .warning, .opt = W("literal-conversion") },
.{ .msg = "implicit conversion from {s}", .extra = .str, .kind = .off, .opt = W("float-zero-conversion") },
@ -929,9 +945,10 @@ pub const Tag = enum {
.{ .msg = "this declarator", .kind = .note },
.{ .msg = "{s} is not supported on this target", .extra = .str, .kind = .@"error" },
.{ .msg = "'_BitInt' in C17 and earlier is a Clang extension'", .kind = .off, .pedantic = true, .opt = W("bit-int-extension"), .suppress_version = .c23 },
.{ .msg = "{s} must have a bit size of at least 1", .extra = .str, .kind = .@"error" },
.{ .msg = "{s} must have a bit size of at least 2", .extra = .str, .kind = .@"error" },
.{ .msg = "{s} of bit sizes greater than " ++ std.fmt.comptimePrint("{d}", .{Properties.max_bits}) ++ " not supported", .extra = .str, .kind = .@"error" },
.{ .msg = "{s}unsigned _BitInt must have a bit size of at least 1", .extra = .str, .kind = .@"error" },
.{ .msg = "{s}signed _BitInt must have a bit size of at least 2", .extra = .str, .kind = .@"error" },
.{ .msg = "{s}unsigned _BitInt of bit sizes greater than " ++ std.fmt.comptimePrint("{d}", .{Properties.max_bits}) ++ " not supported", .extra = .str, .kind = .@"error" },
.{ .msg = "{s}signed _BitInt of bit sizes greater than " ++ std.fmt.comptimePrint("{d}", .{Properties.max_bits}) ++ " not supported", .extra = .str, .kind = .@"error" },
.{ .msg = "keyword is hidden by macro definition", .kind = .off, .pedantic = true, .opt = W("keyword-macro") },
.{ .msg = "arithmetic on a pointer to an incomplete type '{s}'", .extra = .str, .kind = .@"error" },
.{ .msg = "'{s}' calling convention is not supported for this target", .extra = .str, .opt = W("ignored-attributes"), .kind = .warning },
@ -1015,6 +1032,9 @@ pub const Tag = enum {
.{ .msg = "ISO C does not support '~' for complex conjugation of '{s}'", .opt = W("pedantic"), .extra = .str, .kind = .off },
.{ .msg = "operand argument to overflow builtin must be an integer ('{s}' invalid)", .extra = .str, .kind = .@"error" },
.{ .msg = "result argument to overflow builtin must be a pointer to a non-const integer ('{s}' invalid)", .extra = .str, .kind = .@"error" },
.{ .msg = "TODO: implement '{s}' attribute for {s}", .extra = .attribute_todo, .kind = .@"error" },
.{ .msg = "non-integral type '{s}' is an invalid underlying type", .extra = .str, .kind = .@"error" },
.{ .msg = "variable '{s}' declared with deduced type '__auto_type' cannot appear in its own initializer", .extra = .str, .kind = .@"error" },
};
};
};

View File

@ -47,6 +47,20 @@ color: ?bool = null,
nobuiltininc: bool = false,
nostdinc: bool = false,
nostdlibinc: bool = false,
debug_dump_letters: packed struct(u3) {
d: bool = false,
m: bool = false,
n: bool = false,
/// According to GCC, specifying letters whose behavior conflicts is undefined.
/// We follow clang in that `-dM` always takes precedence over `-dD`
pub fn getPreprocessorDumpMode(self: @This()) Preprocessor.DumpMode {
if (self.m) return .macros_only;
if (self.d) return .macros_and_result;
if (self.n) return .macro_names_and_result;
return .result_only;
}
} = .{},
/// Full path to the aro executable
aro_name: []const u8 = "",
@ -92,6 +106,9 @@ pub const usage =
\\
\\Compile options:
\\ -c, --compile Only run preprocess, compile, and assemble steps
\\ -dM Output #define directives for all the macros defined during the execution of the preprocessor
\\ -dD Like -dM except that it outputs both the #define directives and the result of preprocessing
\\ -dN Like -dD, but emit only the macro names, not their expansions.
\\ -D <macro>=<value> Define <macro> to <value> (defaults to 1)
\\ -E Only run the preprocessor
\\ -fchar8_t Enable char8_t (enabled by default in C23 and later)
@ -234,6 +251,12 @@ pub fn parseArgs(
d.system_defines = .no_system_defines;
} else if (mem.eql(u8, arg, "-c") or mem.eql(u8, arg, "--compile")) {
d.only_compile = true;
} else if (mem.eql(u8, arg, "-dD")) {
d.debug_dump_letters.d = true;
} else if (mem.eql(u8, arg, "-dM")) {
d.debug_dump_letters.m = true;
} else if (mem.eql(u8, arg, "-dN")) {
d.debug_dump_letters.n = true;
} else if (mem.eql(u8, arg, "-E")) {
d.only_preprocess = true;
} else if (mem.eql(u8, arg, "-P") or mem.eql(u8, arg, "--no-line-commands")) {
@ -636,13 +659,17 @@ fn processSource(
if (d.comp.langopts.ms_extensions) {
d.comp.ms_cwd_source_id = source.id;
}
const dump_mode = d.debug_dump_letters.getPreprocessorDumpMode();
if (d.verbose_pp) pp.verbose = true;
if (d.only_preprocess) {
pp.preserve_whitespace = true;
if (d.line_commands) {
pp.linemarkers = if (d.use_line_directives) .line_directives else .numeric_directives;
}
switch (dump_mode) {
.macros_and_result, .macro_names_and_result => pp.store_macro_tokens = true,
.result_only, .macros_only => {},
}
}
try pp.preprocessSources(&.{ source, builtin, user_macros });
@ -663,7 +690,8 @@ fn processSource(
defer if (d.output_name != null) file.close();
var buf_w = std.io.bufferedWriter(file.writer());
pp.prettyPrintTokens(buf_w.writer()) catch |er|
pp.prettyPrintTokens(buf_w.writer(), dump_mode) catch |er|
return d.fatal("unable to write result: {s}", .{errorDescription(er)});
buf_w.flush() catch |er|

View File

@ -56,7 +56,7 @@ fn existsFake(entries: []const Filesystem.Entry, path: []const u8) bool {
}
fn canExecutePosix(path: []const u8) bool {
std.os.access(path, std.os.X_OK) catch return false;
std.posix.access(path, std.posix.X_OK) catch return false;
// Todo: ensure path is not a directory
return true;
}
@ -173,7 +173,7 @@ pub const Filesystem = union(enum) {
pub fn exists(fs: Filesystem, path: []const u8) bool {
switch (fs) {
.real => {
std.os.access(path, std.os.F_OK) catch return false;
std.fs.cwd().access(path, .{}) catch return false;
return true;
},
.fake => |paths| return existsFake(paths, path),

View File

@ -46,15 +46,15 @@ const Item = struct {
const List = std.MultiArrayList(Item);
};
const Index = enum(u32) {
pub const Index = enum(u32) {
none = std.math.maxInt(u32),
_,
};
map: std.AutoHashMapUnmanaged(Identifier, Index) = .{},
/// Used for computing intersection of two lists; stored here so that allocations can be retained
/// Used for computing union/intersection of two lists; stored here so that allocations can be retained
/// until hideset is deinit'ed
intersection_map: std.AutoHashMapUnmanaged(Identifier, void) = .{},
tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .{},
linked_list: Item.List = .{},
comp: *const Compilation,
@ -72,7 +72,7 @@ const Iterator = struct {
pub fn deinit(self: *Hideset) void {
self.map.deinit(self.comp.gpa);
self.intersection_map.deinit(self.comp.gpa);
self.tmp_map.deinit(self.comp.gpa);
self.linked_list.deinit(self.comp.gpa);
}
@ -83,7 +83,7 @@ pub fn clearRetainingCapacity(self: *Hideset) void {
pub fn clearAndFree(self: *Hideset) void {
self.map.clearAndFree(self.comp.gpa);
self.intersection_map.clearAndFree(self.comp.gpa);
self.tmp_map.clearAndFree(self.comp.gpa);
self.linked_list.shrinkAndFree(self.comp.gpa, 0);
}
@ -109,8 +109,13 @@ fn ensureUnusedCapacity(self: *Hideset, new_size: usize) !void {
/// Creates a one-item list with contents `identifier`
fn createNodeAssumeCapacity(self: *Hideset, identifier: Identifier) Index {
return self.createNodeAssumeCapacityExtra(identifier, .none);
}
/// Creates a one-item list with contents `identifier`
fn createNodeAssumeCapacityExtra(self: *Hideset, identifier: Identifier, next: Index) Index {
const next_idx = self.linked_list.len;
self.linked_list.appendAssumeCapacity(.{ .identifier = identifier });
self.linked_list.appendAssumeCapacity(.{ .identifier = identifier, .next = next });
return @enumFromInt(next_idx);
}
@ -121,24 +126,24 @@ pub fn prepend(self: *Hideset, loc: Source.Location, tail: Index) !Index {
return @enumFromInt(new_idx);
}
/// Copy a, then attach b at the end
/// Attach elements of `b` to the front of `a` (if they're not in `a`)
pub fn @"union"(self: *Hideset, a: Index, b: Index) !Index {
var cur: Index = .none;
if (a == .none) return b;
if (b == .none) return a;
self.tmp_map.clearRetainingCapacity();
var it = self.iterator(b);
while (it.next()) |identifier| {
try self.tmp_map.put(self.comp.gpa, identifier, {});
}
var head: Index = b;
try self.ensureUnusedCapacity(self.len(a));
var it = self.iterator(a);
it = self.iterator(a);
while (it.next()) |identifier| {
const new_idx = self.createNodeAssumeCapacity(identifier);
if (head == b) {
head = new_idx;
if (!self.tmp_map.contains(identifier)) {
head = self.createNodeAssumeCapacityExtra(identifier, head);
}
if (cur != .none) {
self.linked_list.items(.next)[@intFromEnum(cur)] = new_idx;
}
cur = new_idx;
}
if (cur != .none) {
self.linked_list.items(.next)[@intFromEnum(cur)] = b;
}
return head;
}
@ -163,20 +168,20 @@ fn len(self: *const Hideset, list: Index) usize {
pub fn intersection(self: *Hideset, a: Index, b: Index) !Index {
if (a == .none or b == .none) return .none;
self.intersection_map.clearRetainingCapacity();
self.tmp_map.clearRetainingCapacity();
var cur: Index = .none;
var head: Index = .none;
var it = self.iterator(a);
var a_len: usize = 0;
while (it.next()) |identifier| : (a_len += 1) {
try self.intersection_map.put(self.comp.gpa, identifier, {});
try self.tmp_map.put(self.comp.gpa, identifier, {});
}
try self.ensureUnusedCapacity(@min(a_len, self.len(b)));
it = self.iterator(b);
while (it.next()) |identifier| {
if (self.intersection_map.contains(identifier)) {
if (self.tmp_map.contains(identifier)) {
const new_idx = self.createNodeAssumeCapacity(identifier);
if (head == .none) {
head = new_idx;

File diff suppressed because it is too large Load Diff

View File

@ -97,6 +97,11 @@ poisoned_identifiers: std.StringHashMap(void),
/// Map from Source.Id to macro name in the `#ifndef` condition which guards the source, if any
include_guards: std.AutoHashMapUnmanaged(Source.Id, []const u8) = .{},
/// Store `keyword_define` and `keyword_undef` tokens.
/// Used to implement preprocessor debug dump options
/// Must be false unless in -E mode (parser does not handle those token types)
store_macro_tokens: bool = false,
/// Memory is retained to avoid allocation on every single token.
top_expansion_buf: ExpandBuf,
@ -622,9 +627,12 @@ fn preprocessExtra(pp: *Preprocessor, source: Source) MacroError!TokenWithExpans
}
if_level -= 1;
},
.keyword_define => try pp.define(&tokenizer),
.keyword_define => try pp.define(&tokenizer, directive),
.keyword_undef => {
const macro_name = (try pp.expectMacroName(&tokenizer)) orelse continue;
if (pp.store_macro_tokens) {
try pp.addToken(tokFromRaw(directive));
}
_ = pp.defines.remove(macro_name);
try pp.expectNl(&tokenizer);
@ -975,7 +983,7 @@ fn expr(pp: *Preprocessor, tokenizer: *Tokenizer) MacroError!bool {
.tok_i = @intCast(token_state.tokens_len),
.arena = pp.arena.allocator(),
.in_macro = true,
.strings = std.ArrayList(u8).init(pp.comp.gpa),
.strings = std.ArrayListAligned(u8, 4).init(pp.comp.gpa),
.data = undefined,
.value_map = undefined,
@ -1328,19 +1336,41 @@ fn stringify(pp: *Preprocessor, tokens: []const TokenWithExpansionLocs) !void {
try pp.char_buf.append(c);
}
}
if (pp.char_buf.items[pp.char_buf.items.len - 1] == '\\') {
try pp.char_buf.ensureUnusedCapacity(2);
if (pp.char_buf.items[pp.char_buf.items.len - 1] != '\\') {
pp.char_buf.appendSliceAssumeCapacity("\"\n");
return;
}
pp.char_buf.appendAssumeCapacity('"');
var tokenizer: Tokenizer = .{
.buf = pp.char_buf.items,
.index = 0,
.source = .generated,
.langopts = pp.comp.langopts,
.line = 0,
};
const item = tokenizer.next();
if (item.id == .unterminated_string_literal) {
const tok = tokens[tokens.len - 1];
try pp.comp.addDiagnostic(.{
.tag = .invalid_pp_stringify_escape,
.loc = tok.loc,
}, tok.expansionSlice());
pp.char_buf.items.len -= 1;
pp.char_buf.items.len -= 2; // erase unpaired backslash and appended end quote
pp.char_buf.appendAssumeCapacity('"');
}
try pp.char_buf.appendSlice("\"\n");
pp.char_buf.appendAssumeCapacity('\n');
}
fn reconstructIncludeString(pp: *Preprocessor, param_toks: []const TokenWithExpansionLocs, embed_args: ?*[]const TokenWithExpansionLocs, first: TokenWithExpansionLocs) !?[]const u8 {
assert(param_toks.len != 0);
if (param_toks.len == 0) {
try pp.comp.addDiagnostic(.{
.tag = .expected_filename,
.loc = first.loc,
}, first.expansionSlice());
return null;
}
const char_top = pp.char_buf.items.len;
defer pp.char_buf.items.len = char_top;
@ -1539,11 +1569,13 @@ fn getPasteArgs(args: []const TokenWithExpansionLocs) []const TokenWithExpansion
fn expandFuncMacro(
pp: *Preprocessor,
loc: Source.Location,
macro_tok: TokenWithExpansionLocs,
func_macro: *const Macro,
args: *const MacroArguments,
expanded_args: *const MacroArguments,
hideset_arg: Hideset.Index,
) MacroError!ExpandBuf {
var hideset = hideset_arg;
var buf = ExpandBuf.init(pp.gpa);
try buf.ensureTotalCapacity(func_macro.tokens.len);
errdefer buf.deinit();
@ -1594,16 +1626,21 @@ fn expandFuncMacro(
},
else => &[1]TokenWithExpansionLocs{tokFromRaw(raw_next)},
};
try pp.pasteTokens(&buf, next);
if (next.len != 0) break;
},
.macro_param_no_expand => {
if (tok_i + 1 < func_macro.tokens.len and func_macro.tokens[tok_i + 1].id == .hash_hash) {
hideset = pp.hideset.get(tokFromRaw(func_macro.tokens[tok_i + 1]).loc);
}
const slice = getPasteArgs(args.items[raw.end]);
const raw_loc = Source.Location{ .id = raw.source, .byte_offset = raw.start, .line = raw.line };
try bufCopyTokens(&buf, slice, &.{raw_loc});
},
.macro_param => {
if (tok_i + 1 < func_macro.tokens.len and func_macro.tokens[tok_i + 1].id == .hash_hash) {
hideset = pp.hideset.get(tokFromRaw(func_macro.tokens[tok_i + 1]).loc);
}
const arg = expanded_args.items[raw.end];
const raw_loc = Source.Location{ .id = raw.source, .byte_offset = raw.start, .line = raw.line };
try bufCopyTokens(&buf, arg, &.{raw_loc});
@ -1642,9 +1679,9 @@ fn expandFuncMacro(
const arg = expanded_args.items[0];
const result = if (arg.len == 0) blk: {
const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = 1, .actual = 0 } };
try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = loc, .extra = extra }, &.{});
try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = macro_tok.loc, .extra = extra }, &.{});
break :blk false;
} else try pp.handleBuiltinMacro(raw.id, arg, loc);
} else try pp.handleBuiltinMacro(raw.id, arg, macro_tok.loc);
const start = pp.comp.generated_buf.items.len;
const w = pp.comp.generated_buf.writer(pp.gpa);
try w.print("{}\n", .{@intFromBool(result)});
@ -1655,7 +1692,7 @@ fn expandFuncMacro(
const not_found = "0\n";
const result = if (arg.len == 0) blk: {
const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = 1, .actual = 0 } };
try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = loc, .extra = extra }, &.{});
try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = macro_tok.loc, .extra = extra }, &.{});
break :blk not_found;
} else res: {
var invalid: ?TokenWithExpansionLocs = null;
@ -1687,7 +1724,7 @@ fn expandFuncMacro(
if (vendor_ident != null and attr_ident == null) {
invalid = vendor_ident;
} else if (attr_ident == null and invalid == null) {
invalid = .{ .id = .eof, .loc = loc };
invalid = .{ .id = .eof, .loc = macro_tok.loc };
}
if (invalid) |some| {
try pp.comp.addDiagnostic(
@ -1731,7 +1768,7 @@ fn expandFuncMacro(
const not_found = "0\n";
const result = if (arg.len == 0) blk: {
const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = 1, .actual = 0 } };
try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = loc, .extra = extra }, &.{});
try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = macro_tok.loc, .extra = extra }, &.{});
break :blk not_found;
} else res: {
var embed_args: []const TokenWithExpansionLocs = &.{};
@ -1877,11 +1914,11 @@ fn expandFuncMacro(
break;
},
};
if (string == null and invalid == null) invalid = .{ .loc = loc, .id = .eof };
if (string == null and invalid == null) invalid = .{ .loc = macro_tok.loc, .id = .eof };
if (invalid) |some| try pp.comp.addDiagnostic(
.{ .tag = .pragma_operator_string_literal, .loc = some.loc },
some.expansionSlice(),
) else try pp.pragmaOperator(string.?, loc);
) else try pp.pragmaOperator(string.?, macro_tok.loc);
},
.comma => {
if (tok_i + 2 < func_macro.tokens.len and func_macro.tokens[tok_i + 1].id == .hash_hash) {
@ -1930,6 +1967,15 @@ fn expandFuncMacro(
}
removePlacemarkers(&buf);
const macro_expansion_locs = macro_tok.expansionSlice();
for (buf.items) |*tok| {
try tok.addExpansionLocation(pp.gpa, &.{macro_tok.loc});
try tok.addExpansionLocation(pp.gpa, macro_expansion_locs);
const tok_hidelist = pp.hideset.get(tok.loc);
const new_hidelist = try pp.hideset.@"union"(tok_hidelist, hideset);
try pp.hideset.put(tok.loc, new_hidelist);
}
return buf;
}
@ -2207,8 +2253,10 @@ fn expandMacroExhaustive(
else => |e| return e,
};
assert(r_paren.id == .r_paren);
var free_arg_expansion_locs = false;
defer {
for (args.items) |item| {
if (free_arg_expansion_locs) for (item) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa);
pp.gpa.free(item);
}
args.deinit();
@ -2234,6 +2282,7 @@ fn expandMacroExhaustive(
.arguments = .{ .expected = @intCast(macro.params.len), .actual = args_count },
};
if (macro.var_args and args_count < macro.params.len) {
free_arg_expansion_locs = true;
try pp.comp.addDiagnostic(
.{ .tag = .expected_at_least_arguments, .loc = buf.items[idx].loc, .extra = extra },
buf.items[idx].expansionSlice(),
@ -2243,6 +2292,7 @@ fn expandMacroExhaustive(
continue;
}
if (!macro.var_args and args_count != macro.params.len) {
free_arg_expansion_locs = true;
try pp.comp.addDiagnostic(
.{ .tag = .expected_arguments, .loc = buf.items[idx].loc, .extra = extra },
buf.items[idx].expansionSlice(),
@ -2264,19 +2314,9 @@ fn expandMacroExhaustive(
expanded_args.appendAssumeCapacity(try expand_buf.toOwnedSlice());
}
var res = try pp.expandFuncMacro(macro_tok.loc, macro, &args, &expanded_args);
var res = try pp.expandFuncMacro(macro_tok, macro, &args, &expanded_args, hs);
defer res.deinit();
const tokens_added = res.items.len;
const macro_expansion_locs = macro_tok.expansionSlice();
for (res.items) |*tok| {
try tok.addExpansionLocation(pp.gpa, &.{macro_tok.loc});
try tok.addExpansionLocation(pp.gpa, macro_expansion_locs);
const tok_hidelist = pp.hideset.get(tok.loc);
const new_hidelist = try pp.hideset.@"union"(tok_hidelist, hs);
try pp.hideset.put(tok.loc, new_hidelist);
}
const tokens_removed = macro_scan_idx - idx + 1;
for (buf.items[idx .. idx + tokens_removed]) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa);
try buf.replaceRange(idx, tokens_removed, res.items);
@ -2476,7 +2516,7 @@ fn makeGeneratedToken(pp: *Preprocessor, start: usize, id: Token.Id, source: Tok
}
/// Defines a new macro and warns if it is a duplicate
fn defineMacro(pp: *Preprocessor, name_tok: RawToken, macro: Macro) Error!void {
fn defineMacro(pp: *Preprocessor, define_tok: RawToken, name_tok: RawToken, macro: Macro) Error!void {
const name_str = pp.tokSlice(name_tok);
const gop = try pp.defines.getOrPut(pp.gpa, name_str);
if (gop.found_existing and !gop.value_ptr.eql(macro, pp)) {
@ -2497,11 +2537,14 @@ fn defineMacro(pp: *Preprocessor, name_tok: RawToken, macro: Macro) Error!void {
if (pp.verbose) {
pp.verboseLog(name_tok, "macro {s} defined", .{name_str});
}
if (pp.store_macro_tokens) {
try pp.addToken(tokFromRaw(define_tok));
}
gop.value_ptr.* = macro;
}
/// Handle a #define directive.
fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void {
fn define(pp: *Preprocessor, tokenizer: *Tokenizer, define_tok: RawToken) Error!void {
// Get macro name and validate it.
const macro_name = tokenizer.nextNoWS();
if (macro_name.id == .keyword_defined) {
@ -2524,7 +2567,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void {
// Check for function macros and empty defines.
var first = tokenizer.next();
switch (first.id) {
.nl, .eof => return pp.defineMacro(macro_name, .{
.nl, .eof => return pp.defineMacro(define_tok, macro_name, .{
.params = &.{},
.tokens = &.{},
.var_args = false,
@ -2532,7 +2575,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void {
.is_func = false,
}),
.whitespace => first = tokenizer.next(),
.l_paren => return pp.defineFn(tokenizer, macro_name, first),
.l_paren => return pp.defineFn(tokenizer, define_tok, macro_name, first),
else => try pp.err(first, .whitespace_after_macro_name),
}
if (first.id == .hash_hash) {
@ -2591,7 +2634,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void {
}
const list = try pp.arena.allocator().dupe(RawToken, pp.token_buf.items);
try pp.defineMacro(macro_name, .{
try pp.defineMacro(define_tok, macro_name, .{
.loc = tokFromRaw(macro_name).loc,
.tokens = list,
.params = undefined,
@ -2601,7 +2644,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void {
}
/// Handle a function like #define directive.
fn defineFn(pp: *Preprocessor, tokenizer: *Tokenizer, macro_name: RawToken, l_paren: RawToken) Error!void {
fn defineFn(pp: *Preprocessor, tokenizer: *Tokenizer, define_tok: RawToken, macro_name: RawToken, l_paren: RawToken) Error!void {
assert(macro_name.id.isMacroIdentifier());
var params = std.ArrayList([]const u8).init(pp.gpa);
defer params.deinit();
@ -2778,7 +2821,7 @@ fn defineFn(pp: *Preprocessor, tokenizer: *Tokenizer, macro_name: RawToken, l_pa
const param_list = try pp.arena.allocator().dupe([]const u8, params.items);
const token_list = try pp.arena.allocator().dupe(RawToken, pp.token_buf.items);
try pp.defineMacro(macro_name, .{
try pp.defineMacro(define_tok, macro_name, .{
.is_func = true,
.params = param_list,
.var_args = var_args or gnu_var_args.len != 0,
@ -3241,8 +3284,78 @@ fn printLinemarker(
// After how many empty lines are needed to replace them with linemarkers.
const collapse_newlines = 8;
pub const DumpMode = enum {
/// Standard preprocessor output; no macros
result_only,
/// Output only #define directives for all the macros defined during the execution of the preprocessor
/// Only macros which are still defined at the end of preprocessing are printed.
/// Only the most recent definition is printed
/// Defines are printed in arbitrary order
macros_only,
/// Standard preprocessor output; but additionally output #define's and #undef's for macros as they are encountered
macros_and_result,
/// Same as macros_and_result, except only the macro name is printed for #define's
macro_names_and_result,
};
/// Pretty-print the macro define or undef at location `loc`.
/// We re-tokenize the directive because we are printing a macro that may have the same name as one in
/// `pp.defines` but a different definition (due to being #undef'ed and then redefined)
fn prettyPrintMacro(pp: *Preprocessor, w: anytype, loc: Source.Location, parts: enum { name_only, name_and_body }) !void {
const source = pp.comp.getSource(loc.id);
var tokenizer: Tokenizer = .{
.buf = source.buf,
.langopts = pp.comp.langopts,
.source = source.id,
.index = loc.byte_offset,
};
var prev_ws = false; // avoid printing multiple whitespace if /* */ comments are within the macro def
var saw_name = false; // do not print comments before the name token is seen.
while (true) {
const tok = tokenizer.next();
switch (tok.id) {
.comment => {
if (saw_name) {
prev_ws = false;
try w.print("{s}", .{pp.tokSlice(tok)});
}
},
.nl, .eof => break,
.whitespace => {
if (!prev_ws) {
try w.writeByte(' ');
prev_ws = true;
}
},
else => {
prev_ws = false;
try w.print("{s}", .{pp.tokSlice(tok)});
},
}
if (tok.id == .identifier or tok.id == .extended_identifier) {
if (parts == .name_only) break;
saw_name = true;
}
}
}
fn prettyPrintMacrosOnly(pp: *Preprocessor, w: anytype) !void {
var it = pp.defines.valueIterator();
while (it.next()) |macro| {
if (macro.is_builtin) continue;
try w.writeAll("#define ");
try pp.prettyPrintMacro(w, macro.loc, .name_and_body);
try w.writeByte('\n');
}
}
/// Pretty print tokens and try to preserve whitespace.
pub fn prettyPrintTokens(pp: *Preprocessor, w: anytype) !void {
pub fn prettyPrintTokens(pp: *Preprocessor, w: anytype, macro_dump_mode: DumpMode) !void {
if (macro_dump_mode == .macros_only) {
return pp.prettyPrintMacrosOnly(w);
}
const tok_ids = pp.tokens.items(.id);
var i: u32 = 0;
@ -3334,6 +3447,17 @@ pub fn prettyPrintTokens(pp: *Preprocessor, w: anytype) !void {
try pp.printLinemarker(w, line_col.line_no, source, .@"resume");
last_nl = true;
},
.keyword_define, .keyword_undef => {
switch (macro_dump_mode) {
.macros_and_result, .macro_names_and_result => {
try w.writeByte('#');
try pp.prettyPrintMacro(w, cur.loc, if (macro_dump_mode == .macros_and_result) .name_and_body else .name_only);
last_nl = false;
},
.result_only => unreachable, // `pp.store_macro_tokens` should be false for standard preprocessor output
.macros_only => unreachable, // handled by prettyPrintMacrosOnly
}
},
else => {
const slice = pp.expandedSlice(cur);
try w.writeAll(slice);
@ -3350,7 +3474,7 @@ test "Preserve pragma tokens sometimes" {
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
var comp = Compilation.init(allocator);
var comp = Compilation.init(allocator, std.fs.cwd());
defer comp.deinit();
try comp.addDefaultPragmaHandlers();
@ -3364,7 +3488,7 @@ test "Preserve pragma tokens sometimes" {
const test_runner_macros = try comp.addSourceFromBuffer("<test_runner>", source_text);
const eof = try pp.preprocess(test_runner_macros);
try pp.addToken(eof);
try pp.prettyPrintTokens(buf.writer());
try pp.prettyPrintTokens(buf.writer(), .result_only);
return allocator.dupe(u8, buf.items);
}
@ -3410,7 +3534,7 @@ test "destringify" {
try std.testing.expectEqualStrings(destringified, pp.char_buf.items);
}
};
var comp = Compilation.init(allocator);
var comp = Compilation.init(allocator, std.fs.cwd());
defer comp.deinit();
var pp = Preprocessor.init(&comp);
defer pp.deinit();
@ -3468,7 +3592,7 @@ test "Include guards" {
}
fn testIncludeGuard(allocator: std.mem.Allocator, comptime template: []const u8, tok_id: RawToken.Id, expected_guards: u32) !void {
var comp = Compilation.init(allocator);
var comp = Compilation.init(allocator, std.fs.cwd());
defer comp.deinit();
var pp = Preprocessor.init(&comp);
defer pp.deinit();

View File

@ -75,7 +75,17 @@ pub fn lineCol(source: Source, loc: Location) LineCol {
i += 1;
continue;
};
const cp = std.unicode.utf8Decode(source.buf[i..][0..len]) catch {
const slice = source.buf[i..];
if (len > slice.len) {
break;
}
const cp = switch (len) {
1 => slice[0],
2 => std.unicode.utf8Decode2(slice[0..2].*),
3 => std.unicode.utf8Decode3(slice[0..3].*),
4 => std.unicode.utf8Decode4(slice[0..4].*),
else => unreachable,
} catch {
i += 1;
continue;
};

View File

@ -178,9 +178,11 @@ pub fn defineTypedef(
if (s.get(name, .vars)) |prev| {
switch (prev.kind) {
.typedef => {
if (!ty.eql(prev.ty, p.comp, true)) {
try p.errStr(.redefinition_of_typedef, tok, try p.typePairStrExtra(ty, " vs ", prev.ty));
if (prev.tok != 0) try p.errTok(.previous_definition, prev.tok);
if (!prev.ty.is(.invalid)) {
if (!ty.eql(prev.ty, p.comp, true)) {
try p.errStr(.redefinition_of_typedef, tok, try p.typePairStrExtra(ty, " vs ", prev.ty));
if (prev.tok != 0) try p.errTok(.previous_definition, prev.tok);
}
}
},
.enumeration, .decl, .def, .constexpr => {
@ -194,7 +196,12 @@ pub fn defineTypedef(
.kind = .typedef,
.name = name,
.tok = tok,
.ty = ty,
.ty = .{
.name = name,
.specifier = ty.specifier,
.qual = ty.qual,
.data = ty.data,
},
.node = node,
.val = .{},
});

View File

@ -178,6 +178,8 @@ pub const Token = struct {
keyword_return,
keyword_short,
keyword_signed,
keyword_signed1,
keyword_signed2,
keyword_sizeof,
keyword_static,
keyword_struct,
@ -258,7 +260,6 @@ pub const Token = struct {
keyword_asm,
keyword_asm1,
keyword_asm2,
keyword_float80,
/// _Float128
keyword_float128_1,
/// __float128
@ -369,6 +370,8 @@ pub const Token = struct {
.keyword_return,
.keyword_short,
.keyword_signed,
.keyword_signed1,
.keyword_signed2,
.keyword_sizeof,
.keyword_static,
.keyword_struct,
@ -417,7 +420,6 @@ pub const Token = struct {
.keyword_asm,
.keyword_asm1,
.keyword_asm2,
.keyword_float80,
.keyword_float128_1,
.keyword_float128_2,
.keyword_int128,
@ -627,6 +629,8 @@ pub const Token = struct {
.keyword_return => "return",
.keyword_short => "short",
.keyword_signed => "signed",
.keyword_signed1 => "__signed",
.keyword_signed2 => "__signed__",
.keyword_sizeof => "sizeof",
.keyword_static => "static",
.keyword_struct => "struct",
@ -702,7 +706,6 @@ pub const Token = struct {
.keyword_asm => "asm",
.keyword_asm1 => "__asm",
.keyword_asm2 => "__asm__",
.keyword_float80 => "__float80",
.keyword_float128_1 => "_Float128",
.keyword_float128_2 => "__float128",
.keyword_int128 => "__int128",
@ -732,7 +735,8 @@ pub const Token = struct {
pub fn symbol(id: Id) []const u8 {
return switch (id) {
.macro_string, .invalid => unreachable,
.macro_string => unreachable,
.invalid => "invalid bytes",
.identifier,
.extended_identifier,
.macro_func,
@ -873,10 +877,7 @@ pub const Token = struct {
}
const all_kws = std.StaticStringMap(Id).initComptime(.{
.{ "auto", auto: {
@setEvalBranchQuota(3000);
break :auto .keyword_auto;
} },
.{ "auto", .keyword_auto },
.{ "break", .keyword_break },
.{ "case", .keyword_case },
.{ "char", .keyword_char },
@ -898,6 +899,8 @@ pub const Token = struct {
.{ "return", .keyword_return },
.{ "short", .keyword_short },
.{ "signed", .keyword_signed },
.{ "__signed", .keyword_signed1 },
.{ "__signed__", .keyword_signed2 },
.{ "sizeof", .keyword_sizeof },
.{ "static", .keyword_static },
.{ "struct", .keyword_struct },
@ -982,7 +985,6 @@ pub const Token = struct {
.{ "asm", .keyword_asm },
.{ "__asm", .keyword_asm1 },
.{ "__asm__", .keyword_asm2 },
.{ "__float80", .keyword_float80 },
.{ "_Float128", .keyword_float128_1 },
.{ "__float128", .keyword_float128_2 },
.{ "__int128", .keyword_int128 },
@ -1300,11 +1302,17 @@ pub fn next(self: *Tokenizer) Token {
else => {},
},
.char_escape_sequence => switch (c) {
'\r', '\n' => unreachable, // removed by line splicing
'\r', '\n' => {
id = .unterminated_char_literal;
break;
},
else => state = .char_literal,
},
.string_escape_sequence => switch (c) {
'\r', '\n' => unreachable, // removed by line splicing
'\r', '\n' => {
id = .unterminated_string_literal;
break;
},
else => state = .string_literal,
},
.identifier, .extended_identifier => switch (c) {
@ -1792,7 +1800,7 @@ pub fn nextNoWSComments(self: *Tokenizer) Token {
/// Try to tokenize a '::' even if not supported by the current language standard.
pub fn colonColon(self: *Tokenizer) Token {
var tok = self.nextNoWS();
if (tok.id == .colon and self.buf[self.index] == ':') {
if (tok.id == .colon and self.index < self.buf.len and self.buf[self.index] == ':') {
self.index += 1;
tok.id = .colon_colon;
}
@ -2142,8 +2150,30 @@ test "C23 keywords" {
}, .c23);
}
test "Tokenizer fuzz test" {
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
defer comp.deinit();
const input_bytes = std.testing.fuzzInput(.{});
if (input_bytes.len == 0) return;
const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes);
var tokenizer: Tokenizer = .{
.buf = source.buf,
.source = source.id,
.langopts = comp.langopts,
};
while (true) {
const prev_index = tokenizer.index;
const tok = tokenizer.next();
if (tok.id == .eof) break;
try std.testing.expect(prev_index < tokenizer.index); // ensure that the tokenizer always makes progress
}
}
fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, standard: ?LangOpts.Standard) !void {
var comp = Compilation.init(std.testing.allocator);
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
defer comp.deinit();
if (standard) |provided| {
comp.langopts.standard = provided;

View File

@ -137,15 +137,22 @@ pub const Node = struct {
tag: Tag,
ty: Type = .{ .specifier = .void },
data: Data,
loc: Loc = .none,
pub const Range = struct { start: u32, end: u32 };
pub const Loc = enum(u32) {
none = std.math.maxInt(u32),
_,
};
pub const Data = union {
decl: struct {
name: TokenIndex,
node: NodeIndex = .none,
},
decl_ref: TokenIndex,
two: [2]NodeIndex,
range: Range,
if3: struct {
cond: NodeIndex,
@ -277,7 +284,8 @@ pub const Tag = enum(u8) {
// ====== Decl ======
// _Static_assert
/// _Static_assert
/// loc is token index of _Static_assert
static_assert,
// function prototype
@ -303,17 +311,18 @@ pub const Tag = enum(u8) {
threadlocal_static_var,
/// __asm__("...") at file scope
/// loc is token index of __asm__ keyword
file_scope_asm,
// typedef declaration
typedef,
// container declarations
/// { lhs; rhs; }
/// { two[0]; two[1]; }
struct_decl_two,
/// { lhs; rhs; }
/// { two[0]; two[1]; }
union_decl_two,
/// { lhs, rhs, }
/// { two[0], two[1], }
enum_decl_two,
/// { range }
struct_decl,
@ -339,7 +348,7 @@ pub const Tag = enum(u8) {
// ====== Stmt ======
labeled_stmt,
/// { first; second; } first and second may be null
/// { two[0]; two[1]; } first and second may be null
compound_stmt_two,
/// { data }
compound_stmt,
@ -476,7 +485,7 @@ pub const Tag = enum(u8) {
real_expr,
/// lhs[rhs] lhs is pointer/array type, rhs is integer type
array_access_expr,
/// first(second) second may be 0
/// two[0](two[1]) two[1] may be 0
call_expr_one,
/// data[0](data[1..])
call_expr,
@ -515,7 +524,7 @@ pub const Tag = enum(u8) {
sizeof_expr,
/// _Alignof(un?)
alignof_expr,
/// _Generic(controlling lhs, chosen rhs)
/// _Generic(controlling two[0], chosen two[1])
generic_expr_one,
/// _Generic(controlling range[0], chosen range[1], rest range[2..])
generic_expr,
@ -534,28 +543,34 @@ pub const Tag = enum(u8) {
// ====== Initializer expressions ======
/// { lhs, rhs }
/// { two[0], two[1] }
array_init_expr_two,
/// { range }
array_init_expr,
/// { lhs, rhs }
/// { two[0], two[1] }
struct_init_expr_two,
/// { range }
struct_init_expr,
/// { union_init }
union_init_expr,
/// (ty){ un }
/// loc is token index of l_paren
compound_literal_expr,
/// (static ty){ un }
/// loc is token index of l_paren
static_compound_literal_expr,
/// (thread_local ty){ un }
/// loc is token index of l_paren
thread_local_compound_literal_expr,
/// (static thread_local ty){ un }
/// loc is token index of l_paren
static_thread_local_compound_literal_expr,
/// Inserted at the end of a function body if no return stmt is found.
/// ty is the functions return type
/// data is return_zero which is true if the function is called "main" and ty is compatible with int
/// loc is token index of closing r_brace of function
implicit_return,
/// Inserted in array_init_expr to represent unspecified elements.
@ -608,6 +623,57 @@ pub fn bitfieldWidth(tree: *const Tree, node: NodeIndex, inspect_lval: bool) ?u3
}
}
const CallableResultUsage = struct {
/// name token of the thing being called, for diagnostics
tok: TokenIndex,
/// true if `nodiscard` attribute present
nodiscard: bool,
/// true if `warn_unused_result` attribute present
warn_unused_result: bool,
};
pub fn callableResultUsage(tree: *const Tree, node: NodeIndex) ?CallableResultUsage {
const data = tree.nodes.items(.data);
var cur_node = node;
while (true) switch (tree.nodes.items(.tag)[@intFromEnum(cur_node)]) {
.decl_ref_expr => {
const tok = data[@intFromEnum(cur_node)].decl_ref;
const fn_ty = tree.nodes.items(.ty)[@intFromEnum(node)].elemType();
return .{
.tok = tok,
.nodiscard = fn_ty.hasAttribute(.nodiscard),
.warn_unused_result = fn_ty.hasAttribute(.warn_unused_result),
};
},
.paren_expr => cur_node = data[@intFromEnum(cur_node)].un,
.comma_expr => cur_node = data[@intFromEnum(cur_node)].bin.rhs,
.explicit_cast, .implicit_cast => cur_node = data[@intFromEnum(cur_node)].cast.operand,
.addr_of_expr, .deref_expr => cur_node = data[@intFromEnum(cur_node)].un,
.call_expr_one => cur_node = data[@intFromEnum(cur_node)].two[0],
.call_expr => cur_node = tree.data[data[@intFromEnum(cur_node)].range.start],
.member_access_expr, .member_access_ptr_expr => {
const member = data[@intFromEnum(cur_node)].member;
var ty = tree.nodes.items(.ty)[@intFromEnum(member.lhs)];
if (ty.isPtr()) ty = ty.elemType();
const record = ty.getRecord().?;
const field = record.fields[member.index];
const attributes = if (record.field_attributes) |attrs| attrs[member.index] else &.{};
return .{
.tok = field.name_tok,
.nodiscard = for (attributes) |attr| {
if (attr.tag == .nodiscard) break true;
} else false,
.warn_unused_result = for (attributes) |attr| {
if (attr.tag == .warn_unused_result) break true;
} else false,
};
},
else => return null,
};
}
pub fn isLval(tree: *const Tree, node: NodeIndex) bool {
var is_const: bool = undefined;
return tree.isLvalExtra(node, &is_const);
@ -672,17 +738,66 @@ pub fn isLvalExtra(tree: *const Tree, node: NodeIndex, is_const: *bool) bool {
}
}
/// This should only be used for node tags that represent AST nodes which have an arbitrary number of children
/// It particular it should *not* be used for nodes with .un or .bin data types
///
/// For call expressions, child_nodes[0] is the function pointer being called and child_nodes[1..]
/// are the arguments
///
/// For generic selection expressions, child_nodes[0] is the controlling expression,
/// child_nodes[1] is the chosen expression (it is a syntax error for there to be no chosen expression),
/// and child_nodes[2..] are the remaining expressions.
pub fn childNodes(tree: *const Tree, node: NodeIndex) []const NodeIndex {
const tags = tree.nodes.items(.tag);
const data = tree.nodes.items(.data);
switch (tags[@intFromEnum(node)]) {
.compound_stmt_two,
.array_init_expr_two,
.struct_init_expr_two,
.enum_decl_two,
.struct_decl_two,
.union_decl_two,
.call_expr_one,
.generic_expr_one,
=> {
const index: u32 = @intFromEnum(node);
const end = std.mem.indexOfScalar(NodeIndex, &data[index].two, .none) orelse 2;
return data[index].two[0..end];
},
.compound_stmt,
.array_init_expr,
.struct_init_expr,
.enum_decl,
.struct_decl,
.union_decl,
.call_expr,
.generic_expr,
=> {
const range = data[@intFromEnum(node)].range;
return tree.data[range.start..range.end];
},
else => unreachable,
}
}
pub fn tokSlice(tree: *const Tree, tok_i: TokenIndex) []const u8 {
if (tree.tokens.items(.id)[tok_i].lexeme()) |some| return some;
const loc = tree.tokens.items(.loc)[tok_i];
var tmp_tokenizer = Tokenizer{
.buf = tree.comp.getSource(loc.id).buf,
.langopts = tree.comp.langopts,
.index = loc.byte_offset,
.source = .generated,
return tree.comp.locSlice(loc);
}
pub fn nodeTok(tree: *const Tree, node: NodeIndex) ?TokenIndex {
std.debug.assert(node != .none);
const loc = tree.nodes.items(.loc)[@intFromEnum(node)];
return switch (loc) {
.none => null,
else => |tok_i| @intFromEnum(tok_i),
};
const tok = tmp_tokenizer.next();
return tmp_tokenizer.buf[tok.start..tok.end];
}
pub fn nodeLoc(tree: *const Tree, node: NodeIndex) ?Source.Location {
const tok_i = tree.nodeTok(node) orelse return null;
return tree.tokens.items(.loc)[@intFromEnum(tok_i)];
}
pub fn dump(tree: *const Tree, config: std.io.tty.Config, writer: anytype) !void {
@ -766,6 +881,10 @@ fn dumpNode(
}
try config.setColor(w, TYPE);
try w.writeByte('\'');
const name = ty.getName();
if (name != .empty) {
try w.print("{s}': '", .{mapper.lookup(name)});
}
try ty.dump(mapper, tree.comp.langopts, w);
try w.writeByte('\'');
@ -794,7 +913,9 @@ fn dumpNode(
if (ty.specifier == .attributed) {
try config.setColor(w, ATTRIBUTE);
for (ty.data.attributed.attributes) |attr| {
var it = Attribute.Iterator.initType(ty);
while (it.next()) |item| {
const attr, _ = item;
try w.writeByteNTimes(' ', level + half);
try w.print("attr: {s}", .{@tagName(attr.tag)});
try tree.dumpAttribute(attr, w);
@ -900,20 +1021,6 @@ fn dumpNode(
.enum_decl,
.struct_decl,
.union_decl,
=> {
const maybe_field_attributes = if (ty.getRecord()) |record| record.field_attributes else null;
for (tree.data[data.range.start..data.range.end], 0..) |stmt, i| {
if (i != 0) try w.writeByte('\n');
try tree.dumpNode(stmt, level + delta, mapper, config, w);
if (maybe_field_attributes) |field_attributes| {
if (field_attributes[i].len == 0) continue;
try config.setColor(w, ATTRIBUTE);
try tree.dumpFieldAttributes(field_attributes[i], level + delta + half, w);
try config.setColor(w, .reset);
}
}
},
.compound_stmt_two,
.array_init_expr_two,
.struct_init_expr_two,
@ -921,22 +1028,16 @@ fn dumpNode(
.struct_decl_two,
.union_decl_two,
=> {
var attr_array = [2][]const Attribute{ &.{}, &.{} };
const empty: [][]const Attribute = &attr_array;
const field_attributes = if (ty.getRecord()) |record| (record.field_attributes orelse empty.ptr) else empty.ptr;
if (data.bin.lhs != .none) {
try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w);
if (field_attributes[0].len > 0) {
const child_nodes = tree.childNodes(node);
const maybe_field_attributes = if (ty.getRecord()) |record| record.field_attributes else null;
for (child_nodes, 0..) |stmt, i| {
if (i != 0) try w.writeByte('\n');
try tree.dumpNode(stmt, level + delta, mapper, config, w);
if (maybe_field_attributes) |field_attributes| {
if (field_attributes[i].len == 0) continue;
try config.setColor(w, ATTRIBUTE);
try tree.dumpFieldAttributes(field_attributes[0], level + delta + half, w);
try config.setColor(w, .reset);
}
}
if (data.bin.rhs != .none) {
try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w);
if (field_attributes[1].len > 0) {
try config.setColor(w, ATTRIBUTE);
try tree.dumpFieldAttributes(field_attributes[1], level + delta + half, w);
try tree.dumpFieldAttributes(field_attributes[i], level + delta + half, w);
try config.setColor(w, .reset);
}
}
@ -1130,23 +1231,21 @@ fn dumpNode(
try tree.dumpNode(data.un, level + delta, mapper, config, w);
}
},
.call_expr => {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("lhs:\n");
try tree.dumpNode(tree.data[data.range.start], level + delta, mapper, config, w);
.call_expr, .call_expr_one => {
const child_nodes = tree.childNodes(node);
const fn_ptr = child_nodes[0];
const args = child_nodes[1..];
try w.writeByteNTimes(' ', level + half);
try w.writeAll("args:\n");
for (tree.data[data.range.start + 1 .. data.range.end]) |arg| try tree.dumpNode(arg, level + delta, mapper, config, w);
},
.call_expr_one => {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("lhs:\n");
try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w);
if (data.bin.rhs != .none) {
try tree.dumpNode(fn_ptr, level + delta, mapper, config, w);
if (args.len > 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("arg:\n");
try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w);
try w.writeAll("args:\n");
for (args) |arg| {
try tree.dumpNode(arg, level + delta, mapper, config, w);
}
}
},
.builtin_call_expr => {
@ -1295,28 +1394,25 @@ fn dumpNode(
try tree.dumpNode(data.un, level + delta, mapper, config, w);
}
},
.generic_expr_one => {
.generic_expr, .generic_expr_one => {
const child_nodes = tree.childNodes(node);
const controlling = child_nodes[0];
const chosen = child_nodes[1];
const rest = child_nodes[2..];
try w.writeByteNTimes(' ', level + 1);
try w.writeAll("controlling:\n");
try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w);
try w.writeByteNTimes(' ', level + 1);
if (data.bin.rhs != .none) {
try w.writeAll("chosen:\n");
try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w);
}
},
.generic_expr => {
const nodes = tree.data[data.range.start..data.range.end];
try w.writeByteNTimes(' ', level + 1);
try w.writeAll("controlling:\n");
try tree.dumpNode(nodes[0], level + delta, mapper, config, w);
try tree.dumpNode(controlling, level + delta, mapper, config, w);
try w.writeByteNTimes(' ', level + 1);
try w.writeAll("chosen:\n");
try tree.dumpNode(nodes[1], level + delta, mapper, config, w);
try w.writeByteNTimes(' ', level + 1);
try w.writeAll("rest:\n");
for (nodes[2..]) |expr| {
try tree.dumpNode(expr, level + delta, mapper, config, w);
try tree.dumpNode(chosen, level + delta, mapper, config, w);
if (rest.len > 0) {
try w.writeByteNTimes(' ', level + 1);
try w.writeAll("rest:\n");
for (rest) |expr| {
try tree.dumpNode(expr, level + delta, mapper, config, w);
}
}
},
.generic_association_expr, .generic_default_expr, .stmt_expr, .imaginary_literal => {

View File

@ -74,8 +74,8 @@ pub const Suffix = enum {
// float and imaginary float
F, IF,
// _Float16
F16,
// _Float16 and imaginary _Float16
F16, IF16,
// __float80
W,
@ -129,6 +129,7 @@ pub const Suffix = enum {
.{ .I, &.{"I"} },
.{ .IL, &.{ "I", "L" } },
.{ .IF16, &.{ "I", "F16" } },
.{ .IF, &.{ "I", "F" } },
.{ .IW, &.{ "I", "W" } },
.{ .IF128, &.{ "I", "F128" } },
@ -161,7 +162,7 @@ pub const Suffix = enum {
pub fn isImaginary(suffix: Suffix) bool {
return switch (suffix) {
.I, .IL, .IF, .IU, .IUL, .ILL, .IULL, .IWB, .IUWB, .IF128, .IQ, .IW => true,
.I, .IL, .IF, .IU, .IUL, .ILL, .IULL, .IWB, .IUWB, .IF128, .IQ, .IW, .IF16 => true,
.None, .L, .F16, .F, .U, .UL, .LL, .ULL, .WB, .UWB, .F128, .Q, .W => false,
};
}
@ -170,7 +171,7 @@ pub const Suffix = enum {
return switch (suffix) {
.None, .L, .LL, .I, .IL, .ILL, .WB, .IWB => true,
.U, .UL, .ULL, .IU, .IUL, .IULL, .UWB, .IUWB => false,
.F, .IF, .F16, .F128, .IF128, .Q, .IQ, .W, .IW => unreachable,
.F, .IF, .F16, .F128, .IF128, .Q, .IQ, .W, .IW, .IF16 => unreachable,
};
}
@ -184,4 +185,8 @@ pub const Suffix = enum {
else => false,
};
}
pub fn isFloat80(suffix: Suffix) bool {
return suffix == .W or suffix == .IW;
}
};

View File

@ -146,17 +146,14 @@ pub const Attributed = struct {
attributes: []Attribute,
base: Type,
pub fn create(allocator: std.mem.Allocator, base: Type, existing_attributes: []const Attribute, attributes: []const Attribute) !*Attributed {
pub fn create(allocator: std.mem.Allocator, base_ty: Type, attributes: []const Attribute) !*Attributed {
const attributed_type = try allocator.create(Attributed);
errdefer allocator.destroy(attributed_type);
const all_attrs = try allocator.alloc(Attribute, existing_attributes.len + attributes.len);
@memcpy(all_attrs[0..existing_attributes.len], existing_attributes);
@memcpy(all_attrs[existing_attributes.len..], attributes);
const duped = try allocator.dupe(Attribute, attributes);
attributed_type.* = .{
.attributes = all_attrs,
.base = base,
.attributes = duped,
.base = base_ty,
};
return attributed_type;
}
@ -190,13 +187,10 @@ pub const Enum = struct {
}
};
// might not need all 4 of these when finished,
// but currently it helps having all 4 when diff-ing
// the rust code.
pub const TypeLayout = struct {
/// The size of the type in bits.
///
/// This is the value returned by `sizeof` and C and `std::mem::size_of` in Rust
/// This is the value returned by `sizeof` in C
/// (but in bits instead of bytes). This is a multiple of `pointer_alignment_bits`.
size_bits: u64,
/// The alignment of the type, in bits, when used as a field in a record.
@ -205,9 +199,7 @@ pub const TypeLayout = struct {
/// cases in GCC where `_Alignof` returns a smaller value.
field_alignment_bits: u32,
/// The alignment, in bits, of valid pointers to this type.
///
/// This is the value returned by `std::mem::align_of` in Rust
/// (but in bits instead of bytes). `size_bits` is a multiple of this value.
/// `size_bits` is a multiple of this value.
pointer_alignment_bits: u32,
/// The required alignment of the type in bits.
///
@ -301,6 +293,15 @@ pub const Record = struct {
}
return false;
}
pub fn hasField(self: *const Record, name: StringId) bool {
std.debug.assert(!self.isIncomplete());
for (self.fields) |f| {
if (f.isAnonymousRecord() and f.ty.getRecord().?.hasField(name)) return true;
if (name == f.name) return true;
}
return false;
}
};
pub const Specifier = enum {
@ -354,12 +355,11 @@ pub const Specifier = enum {
float,
double,
long_double,
float80,
float128,
complex_float16,
complex_float,
complex_double,
complex_long_double,
complex_float80,
complex_float128,
// data.sub_type
@ -422,6 +422,8 @@ data: union {
specifier: Specifier,
qual: Qualifiers = .{},
decayed: bool = false,
/// typedef name, if any
name: StringId = .empty,
pub const int = Type{ .specifier = .int };
pub const invalid = Type{ .specifier = .invalid };
@ -435,8 +437,8 @@ pub fn is(ty: Type, specifier: Specifier) bool {
pub fn withAttributes(self: Type, allocator: std.mem.Allocator, attributes: []const Attribute) !Type {
if (attributes.len == 0) return self;
const attributed_type = try Type.Attributed.create(allocator, self, self.getAttributes(), attributes);
return Type{ .specifier = .attributed, .data = .{ .attributed = attributed_type }, .decayed = self.decayed };
const attributed_type = try Type.Attributed.create(allocator, self, attributes);
return .{ .specifier = .attributed, .data = .{ .attributed = attributed_type }, .decayed = self.decayed };
}
pub fn isCallable(ty: Type) ?Type {
@ -470,6 +472,23 @@ pub fn isArray(ty: Type) bool {
};
}
/// Must only be used to set the length of an incomplete array as determined by its initializer
pub fn setIncompleteArrayLen(ty: *Type, len: u64) void {
switch (ty.specifier) {
.incomplete_array => {
// Modifying .data is exceptionally allowed for .incomplete_array.
ty.data.array.len = len;
ty.specifier = .array;
},
.typeof_type => ty.data.sub_type.setIncompleteArrayLen(len),
.typeof_expr => ty.data.expr.ty.setIncompleteArrayLen(len),
.attributed => ty.data.attributed.base.setIncompleteArrayLen(len),
else => unreachable,
}
}
/// Whether the type is promoted if used as a variadic argument or as an argument to a function with no prototype
fn undergoesDefaultArgPromotion(ty: Type, comp: *const Compilation) bool {
return switch (ty.specifier) {
@ -536,7 +555,7 @@ pub fn isFloat(ty: Type) bool {
return switch (ty.specifier) {
// zig fmt: off
.float, .double, .long_double, .complex_float, .complex_double, .complex_long_double,
.fp16, .float16, .float80, .float128, .complex_float80, .complex_float128 => true,
.fp16, .float16, .float128, .complex_float128, .complex_float16 => true,
// zig fmt: on
.typeof_type => ty.data.sub_type.isFloat(),
.typeof_expr => ty.data.expr.ty.isFloat(),
@ -548,11 +567,11 @@ pub fn isFloat(ty: Type) bool {
pub fn isReal(ty: Type) bool {
return switch (ty.specifier) {
// zig fmt: off
.complex_float, .complex_double, .complex_long_double, .complex_float80,
.complex_float, .complex_double, .complex_long_double,
.complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_short,
.complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong,
.complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128,
.complex_bit_int => false,
.complex_bit_int, .complex_float16 => false,
// zig fmt: on
.typeof_type => ty.data.sub_type.isReal(),
.typeof_expr => ty.data.expr.ty.isReal(),
@ -564,11 +583,11 @@ pub fn isReal(ty: Type) bool {
pub fn isComplex(ty: Type) bool {
return switch (ty.specifier) {
// zig fmt: off
.complex_float, .complex_double, .complex_long_double, .complex_float80,
.complex_float, .complex_double, .complex_long_double,
.complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_short,
.complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong,
.complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128,
.complex_bit_int => true,
.complex_bit_int, .complex_float16 => true,
// zig fmt: on
.typeof_type => ty.data.sub_type.isComplex(),
.typeof_expr => ty.data.expr.ty.isComplex(),
@ -671,11 +690,11 @@ pub fn elemType(ty: Type) Type {
.attributed => ty.data.attributed.base.elemType(),
.invalid => Type.invalid,
// zig fmt: off
.complex_float, .complex_double, .complex_long_double, .complex_float80,
.complex_float, .complex_double, .complex_long_double,
.complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_short,
.complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong,
.complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128,
.complex_bit_int => ty.makeReal(),
.complex_bit_int, .complex_float16 => ty.makeReal(),
// zig fmt: on
else => unreachable,
};
@ -703,6 +722,16 @@ pub fn params(ty: Type) []Func.Param {
};
}
/// Returns true if the return value or any param of `ty` is `.invalid`
/// Asserts that ty is a function type
pub fn isInvalidFunc(ty: Type) bool {
if (ty.returnType().is(.invalid)) return true;
for (ty.params()) |param| {
if (param.ty.is(.invalid)) return true;
}
return false;
}
pub fn arrayLen(ty: Type) ?u64 {
return switch (ty.specifier) {
.array, .static_array => ty.data.array.len,
@ -726,15 +755,6 @@ pub fn anyQual(ty: Type) bool {
};
}
pub fn getAttributes(ty: Type) []const Attribute {
return switch (ty.specifier) {
.attributed => ty.data.attributed.attributes,
.typeof_type => ty.data.sub_type.getAttributes(),
.typeof_expr => ty.data.expr.ty.getAttributes(),
else => &.{},
};
}
pub fn getRecord(ty: Type) ?*const Type.Record {
return switch (ty.specifier) {
.attributed => ty.data.attributed.base.getRecord(),
@ -795,8 +815,8 @@ fn realIntegerConversion(a: Type, b: Type, comp: *const Compilation) Type {
pub fn makeIntegerUnsigned(ty: Type) Type {
// TODO discards attributed/typeof
var base = ty.canonicalize(.standard);
switch (base.specifier) {
var base_ty = ty.canonicalize(.standard);
switch (base_ty.specifier) {
// zig fmt: off
.uchar, .ushort, .uint, .ulong, .ulong_long, .uint128,
.complex_uchar, .complex_ushort, .complex_uint, .complex_ulong, .complex_ulong_long, .complex_uint128,
@ -804,21 +824,21 @@ pub fn makeIntegerUnsigned(ty: Type) Type {
// zig fmt: on
.char, .complex_char => {
base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 2);
return base;
base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) + 2);
return base_ty;
},
// zig fmt: off
.schar, .short, .int, .long, .long_long, .int128,
.complex_schar, .complex_short, .complex_int, .complex_long, .complex_long_long, .complex_int128 => {
base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 1);
return base;
base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) + 1);
return base_ty;
},
// zig fmt: on
.bit_int, .complex_bit_int => {
base.data.int.signedness = .unsigned;
return base;
base_ty.data.int.signedness = .unsigned;
return base_ty;
},
else => unreachable,
}
@ -837,6 +857,8 @@ pub fn integerPromotion(ty: Type, comp: *Compilation) Type {
switch (specifier) {
.@"enum" => {
if (ty.hasIncompleteSize()) return .{ .specifier = .int };
if (ty.data.@"enum".fixed) return ty.data.@"enum".tag_ty.integerPromotion(comp);
specifier = ty.data.@"enum".tag_ty.specifier;
},
.bit_int, .complex_bit_int => return .{ .specifier = specifier, .data = ty.data },
@ -915,53 +937,7 @@ pub fn hasUnboundVLA(ty: Type) bool {
}
pub fn hasField(ty: Type, name: StringId) bool {
switch (ty.specifier) {
.@"struct" => {
std.debug.assert(!ty.data.record.isIncomplete());
for (ty.data.record.fields) |f| {
if (f.isAnonymousRecord() and f.ty.hasField(name)) return true;
if (name == f.name) return true;
}
},
.@"union" => {
std.debug.assert(!ty.data.record.isIncomplete());
for (ty.data.record.fields) |f| {
if (f.isAnonymousRecord() and f.ty.hasField(name)) return true;
if (name == f.name) return true;
}
},
.typeof_type => return ty.data.sub_type.hasField(name),
.typeof_expr => return ty.data.expr.ty.hasField(name),
.attributed => return ty.data.attributed.base.hasField(name),
.invalid => return false,
else => unreachable,
}
return false;
}
// TODO handle bitints
pub fn minInt(ty: Type, comp: *const Compilation) i64 {
std.debug.assert(ty.isInt());
if (ty.isUnsignedInt(comp)) return 0;
return switch (ty.sizeof(comp).?) {
1 => std.math.minInt(i8),
2 => std.math.minInt(i16),
4 => std.math.minInt(i32),
8 => std.math.minInt(i64),
else => unreachable,
};
}
// TODO handle bitints
pub fn maxInt(ty: Type, comp: *const Compilation) u64 {
std.debug.assert(ty.isInt());
return switch (ty.sizeof(comp).?) {
1 => if (ty.isUnsignedInt(comp)) @as(u64, std.math.maxInt(u8)) else std.math.maxInt(i8),
2 => if (ty.isUnsignedInt(comp)) @as(u64, std.math.maxInt(u16)) else std.math.maxInt(i16),
4 => if (ty.isUnsignedInt(comp)) @as(u64, std.math.maxInt(u32)) else std.math.maxInt(i32),
8 => if (ty.isUnsignedInt(comp)) @as(u64, std.math.maxInt(u64)) else std.math.maxInt(i64),
else => unreachable,
};
return ty.getRecord().?.hasField(name);
}
const TypeSizeOrder = enum {
@ -1004,16 +980,15 @@ pub fn sizeof(ty: Type, comp: *const Compilation) ?u64 {
.fp16, .float16 => 2,
.float => comp.target.cTypeByteSize(.float),
.double => comp.target.cTypeByteSize(.double),
.float80 => 16,
.float128 => 16,
.bit_int => {
return std.mem.alignForward(u64, (ty.data.int.bits + 7) / 8, ty.alignof(comp));
return std.mem.alignForward(u64, (@as(u32, ty.data.int.bits) + 7) / 8, ty.alignof(comp));
},
// zig fmt: off
.complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int,
.complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long,
.complex_int128, .complex_uint128, .complex_float, .complex_double,
.complex_long_double, .complex_float80, .complex_float128, .complex_bit_int,
.complex_long_double, .complex_float128, .complex_bit_int, .complex_float16,
=> return 2 * ty.makeReal().sizeof(comp).?,
// zig fmt: on
.pointer => unreachable,
@ -1050,7 +1025,6 @@ pub fn bitSizeof(ty: Type, comp: *const Compilation) ?u64 {
.attributed => ty.data.attributed.base.bitSizeof(comp),
.bit_int => return ty.data.int.bits,
.long_double => comp.target.cTypeBitSize(.longdouble),
.float80 => return 80,
else => 8 * (ty.sizeof(comp) orelse return null),
};
}
@ -1100,7 +1074,7 @@ pub fn alignof(ty: Type, comp: *const Compilation) u29 {
.complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int,
.complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long,
.complex_int128, .complex_uint128, .complex_float, .complex_double,
.complex_long_double, .complex_float80, .complex_float128, .complex_bit_int,
.complex_long_double, .complex_float128, .complex_bit_int, .complex_float16,
=> return ty.makeReal().alignof(comp),
// zig fmt: on
@ -1114,10 +1088,15 @@ pub fn alignof(ty: Type, comp: *const Compilation) u29 {
.long_long => comp.target.cTypeAlignment(.longlong),
.ulong_long => comp.target.cTypeAlignment(.ulonglong),
.bit_int => @min(
std.math.ceilPowerOfTwoPromote(u16, (ty.data.int.bits + 7) / 8),
16, // comp.target.maxIntAlignment(), please use your own logic for this value as it is implementation-defined
),
.bit_int => {
// https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2709.pdf
// _BitInt(N) types align with existing calling conventions. They have the same size and alignment as the
// smallest basic type that can contain them. Types that are larger than __int64_t are conceptually treated
// as struct of register size chunks. The number of chunks is the smallest number that can contain the type.
if (ty.data.int.bits > 64) return 8;
const basic_type = comp.intLeastN(ty.data.int.bits, ty.data.int.signedness);
return basic_type.alignof(comp);
},
.float => comp.target.cTypeAlignment(.float),
.double => comp.target.cTypeAlignment(.double),
@ -1126,7 +1105,7 @@ pub fn alignof(ty: Type, comp: *const Compilation) u29 {
.int128, .uint128 => if (comp.target.cpu.arch == .s390x and comp.target.os.tag == .linux and comp.target.isGnu()) 8 else 16,
.fp16, .float16 => 2,
.float80, .float128 => 16,
.float128 => 16,
.pointer,
.static_array,
.nullptr_t,
@ -1142,7 +1121,11 @@ pub fn alignof(ty: Type, comp: *const Compilation) u29 {
};
}
pub const QualHandling = enum { standard, preserve_quals };
// This enum should be kept public because it is used by the downstream zig translate-c
pub const QualHandling = enum {
standard,
preserve_quals,
};
/// Canonicalize a possibly-typeof() type. If the type is not a typeof() type, simply
/// return it. Otherwise, determine the actual qualified type.
@ -1151,17 +1134,12 @@ pub const QualHandling = enum { standard, preserve_quals };
/// arrays and pointers.
pub fn canonicalize(ty: Type, qual_handling: QualHandling) Type {
var cur = ty;
if (cur.specifier == .attributed) {
cur = cur.data.attributed.base;
cur.decayed = ty.decayed;
}
if (!cur.isTypeof()) return cur;
var qual = cur.qual;
while (true) {
switch (cur.specifier) {
.typeof_type => cur = cur.data.sub_type.*,
.typeof_expr => cur = cur.data.expr.ty,
.attributed => cur = cur.data.attributed.base,
else => break,
}
qual = qual.mergeAll(cur.qual);
@ -1189,7 +1167,7 @@ pub fn requestedAlignment(ty: Type, comp: *const Compilation) ?u29 {
return switch (ty.specifier) {
.typeof_type => ty.data.sub_type.requestedAlignment(comp),
.typeof_expr => ty.data.expr.ty.requestedAlignment(comp),
.attributed => annotationAlignment(comp, ty.data.attributed.attributes),
.attributed => annotationAlignment(comp, Attribute.Iterator.initType(ty)),
else => null,
};
}
@ -1199,12 +1177,27 @@ pub fn enumIsPacked(ty: Type, comp: *const Compilation) bool {
return comp.langopts.short_enums or target_util.packAllEnums(comp.target) or ty.hasAttribute(.@"packed");
}
pub fn annotationAlignment(comp: *const Compilation, attrs: ?[]const Attribute) ?u29 {
const a = attrs orelse return null;
pub fn getName(ty: Type) StringId {
return switch (ty.specifier) {
.typeof_type => if (ty.name == .empty) ty.data.sub_type.getName() else ty.name,
.typeof_expr => if (ty.name == .empty) ty.data.expr.ty.getName() else ty.name,
.attributed => if (ty.name == .empty) ty.data.attributed.base.getName() else ty.name,
else => ty.name,
};
}
pub fn annotationAlignment(comp: *const Compilation, attrs: Attribute.Iterator) ?u29 {
var it = attrs;
var max_requested: ?u29 = null;
for (a) |attribute| {
var last_aligned_index: ?usize = null;
while (it.next()) |item| {
const attribute, const index = item;
if (attribute.tag != .aligned) continue;
if (last_aligned_index) |aligned_index| {
// once we recurse into a new type, after an `aligned` attribute was found, we're done
if (index <= aligned_index) break;
}
last_aligned_index = index;
const requested = if (attribute.args.aligned.alignment) |alignment| alignment.requested else target_util.defaultAlignment(comp.target);
if (max_requested == null or max_requested.? < requested) {
max_requested = requested;
@ -1225,6 +1218,10 @@ pub fn eql(a_param: Type, b_param: Type, comp: *const Compilation, check_qualifi
if (!b.isFunc()) return false;
} else if (a.isArray()) {
if (!b.isArray()) return false;
} else if (a.specifier == .@"enum" and b.specifier != .@"enum") {
return a.data.@"enum".tag_ty.eql(b, comp, check_qualifiers);
} else if (b.specifier == .@"enum" and a.specifier != .@"enum") {
return a.eql(b.data.@"enum".tag_ty, comp, check_qualifiers);
} else if (a.specifier != b.specifier) return false;
if (a.qual.atomic != b.qual.atomic) return false;
@ -1315,6 +1312,12 @@ pub fn integerRank(ty: Type, comp: *const Compilation) usize {
.long_long, .ulong_long => 6 + (ty.bitSizeof(comp).? << 3),
.int128, .uint128 => 7 + (ty.bitSizeof(comp).? << 3),
.typeof_type => ty.data.sub_type.integerRank(comp),
.typeof_expr => ty.data.expr.ty.integerRank(comp),
.attributed => ty.data.attributed.base.integerRank(comp),
.@"enum" => real.data.@"enum".tag_ty.integerRank(comp),
else => unreachable,
});
}
@ -1322,25 +1325,26 @@ pub fn integerRank(ty: Type, comp: *const Compilation) usize {
/// Returns true if `a` and `b` are integer types that differ only in sign
pub fn sameRankDifferentSign(a: Type, b: Type, comp: *const Compilation) bool {
if (!a.isInt() or !b.isInt()) return false;
if (a.hasIncompleteSize() or b.hasIncompleteSize()) return false;
if (a.integerRank(comp) != b.integerRank(comp)) return false;
return a.isUnsignedInt(comp) != b.isUnsignedInt(comp);
}
pub fn makeReal(ty: Type) Type {
// TODO discards attributed/typeof
var base = ty.canonicalize(.standard);
switch (base.specifier) {
.complex_float, .complex_double, .complex_long_double, .complex_float80, .complex_float128 => {
base.specifier = @enumFromInt(@intFromEnum(base.specifier) - 5);
return base;
var base_ty = ty.canonicalize(.standard);
switch (base_ty.specifier) {
.complex_float16, .complex_float, .complex_double, .complex_long_double, .complex_float128 => {
base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) - 5);
return base_ty;
},
.complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128 => {
base.specifier = @enumFromInt(@intFromEnum(base.specifier) - 13);
return base;
base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) - 13);
return base_ty;
},
.complex_bit_int => {
base.specifier = .bit_int;
return base;
base_ty.specifier = .bit_int;
return base_ty;
},
else => return ty,
}
@ -1348,19 +1352,19 @@ pub fn makeReal(ty: Type) Type {
pub fn makeComplex(ty: Type) Type {
// TODO discards attributed/typeof
var base = ty.canonicalize(.standard);
switch (base.specifier) {
.float, .double, .long_double, .float80, .float128 => {
base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 5);
return base;
var base_ty = ty.canonicalize(.standard);
switch (base_ty.specifier) {
.float, .double, .long_double, .float128 => {
base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) + 5);
return base_ty;
},
.char, .schar, .uchar, .short, .ushort, .int, .uint, .long, .ulong, .long_long, .ulong_long, .int128, .uint128 => {
base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 13);
return base;
base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) + 13);
return base_ty;
},
.bit_int => {
base.specifier = .complex_bit_int;
return base;
base_ty.specifier = .complex_bit_int;
return base_ty;
},
else => return ty,
}
@ -1541,13 +1545,12 @@ pub const Builder = struct {
float,
double,
long_double,
float80,
float128,
complex,
complex_float16,
complex_float,
complex_double,
complex_long_double,
complex_float80,
complex_float128,
pointer: *Type,
@ -1613,9 +1616,6 @@ pub const Builder = struct {
.int128 => "__int128",
.sint128 => "signed __int128",
.uint128 => "unsigned __int128",
.bit_int => "_BitInt",
.sbit_int => "signed _BitInt",
.ubit_int => "unsigned _BitInt",
.complex_char => "_Complex char",
.complex_schar => "_Complex signed char",
.complex_uchar => "_Complex unsigned char",
@ -1645,22 +1645,18 @@ pub const Builder = struct {
.complex_int128 => "_Complex __int128",
.complex_sint128 => "_Complex signed __int128",
.complex_uint128 => "_Complex unsigned __int128",
.complex_bit_int => "_Complex _BitInt",
.complex_sbit_int => "_Complex signed _BitInt",
.complex_ubit_int => "_Complex unsigned _BitInt",
.fp16 => "__fp16",
.float16 => "_Float16",
.float => "float",
.double => "double",
.long_double => "long double",
.float80 => "__float80",
.float128 => "__float128",
.complex => "_Complex",
.complex_float16 => "_Complex _Float16",
.complex_float => "_Complex float",
.complex_double => "_Complex double",
.complex_long_double => "_Complex long double",
.complex_float80 => "_Complex __float80",
.complex_float128 => "_Complex __float128",
.attributed => |attributed| Builder.fromType(attributed.base).str(langopts),
@ -1757,19 +1753,20 @@ pub const Builder = struct {
.complex_uint128 => ty.specifier = .complex_uint128,
.bit_int, .sbit_int, .ubit_int, .complex_bit_int, .complex_ubit_int, .complex_sbit_int => |bits| {
const unsigned = b.specifier == .ubit_int or b.specifier == .complex_ubit_int;
const complex_str = if (b.complex_tok != null) "_Complex " else "";
if (unsigned) {
if (bits < 1) {
try p.errStr(.unsigned_bit_int_too_small, b.bit_int_tok.?, b.specifier.str(p.comp.langopts).?);
try p.errStr(.unsigned_bit_int_too_small, b.bit_int_tok.?, complex_str);
return Type.invalid;
}
} else {
if (bits < 2) {
try p.errStr(.signed_bit_int_too_small, b.bit_int_tok.?, b.specifier.str(p.comp.langopts).?);
try p.errStr(.signed_bit_int_too_small, b.bit_int_tok.?, complex_str);
return Type.invalid;
}
}
if (bits > Compilation.bit_int_max_bits) {
try p.errStr(.bit_int_too_big, b.bit_int_tok.?, b.specifier.str(p.comp.langopts).?);
try p.errStr(if (unsigned) .unsigned_bit_int_too_big else .signed_bit_int_too_big, b.bit_int_tok.?, complex_str);
return Type.invalid;
}
ty.specifier = if (b.complex_tok != null) .complex_bit_int else .bit_int;
@ -1784,12 +1781,11 @@ pub const Builder = struct {
.float => ty.specifier = .float,
.double => ty.specifier = .double,
.long_double => ty.specifier = .long_double,
.float80 => ty.specifier = .float80,
.float128 => ty.specifier = .float128,
.complex_float16 => ty.specifier = .complex_float16,
.complex_float => ty.specifier = .complex_float,
.complex_double => ty.specifier = .complex_double,
.complex_long_double => ty.specifier = .complex_long_double,
.complex_float80 => ty.specifier = .complex_float80,
.complex_float128 => ty.specifier = .complex_float128,
.complex => {
try p.errTok(.plain_complex, p.tok_i - 1);
@ -1907,6 +1903,7 @@ pub const Builder = struct {
/// Try to combine type from typedef, returns true if successful.
pub fn combineTypedef(b: *Builder, p: *Parser, typedef_ty: Type, name_tok: TokenIndex) bool {
if (typedef_ty.is(.invalid)) return false;
b.error_on_invalid = true;
defer b.error_on_invalid = false;
@ -2094,6 +2091,7 @@ pub const Builder = struct {
},
.long => b.specifier = switch (b.specifier) {
.none => .long,
.double => .long_double,
.long => .long_long,
.unsigned => .ulong,
.signed => .long,
@ -2106,6 +2104,7 @@ pub const Builder = struct {
.complex_long => .complex_long_long,
.complex_slong => .complex_slong_long,
.complex_ulong => .complex_ulong_long,
.complex_double => .complex_long_double,
else => return b.cannotCombine(p, source_tok),
},
.int128 => b.specifier = switch (b.specifier) {
@ -2140,6 +2139,7 @@ pub const Builder = struct {
},
.float16 => b.specifier = switch (b.specifier) {
.none => .float16,
.complex => .complex_float16,
else => return b.cannotCombine(p, source_tok),
},
.float => b.specifier = switch (b.specifier) {
@ -2154,11 +2154,6 @@ pub const Builder = struct {
.complex => .complex_double,
else => return b.cannotCombine(p, source_tok),
},
.float80 => b.specifier = switch (b.specifier) {
.none => .float80,
.complex => .complex_float80,
else => return b.cannotCombine(p, source_tok),
},
.float128 => b.specifier = switch (b.specifier) {
.none => .float128,
.complex => .complex_float128,
@ -2166,10 +2161,10 @@ pub const Builder = struct {
},
.complex => b.specifier = switch (b.specifier) {
.none => .complex,
.float16 => .complex_float16,
.float => .complex_float,
.double => .complex_double,
.long_double => .complex_long_double,
.float80 => .complex_float80,
.float128 => .complex_float128,
.char => .complex_char,
.schar => .complex_schar,
@ -2207,7 +2202,6 @@ pub const Builder = struct {
.complex_float,
.complex_double,
.complex_long_double,
.complex_float80,
.complex_float128,
.complex_char,
.complex_schar,
@ -2294,13 +2288,12 @@ pub const Builder = struct {
.float16 => .float16,
.float => .float,
.double => .double,
.float80 => .float80,
.float128 => .float128,
.long_double => .long_double,
.complex_float16 => .complex_float16,
.complex_float => .complex_float,
.complex_double => .complex_double,
.complex_long_double => .complex_long_double,
.complex_float80 => .complex_float80,
.complex_float128 => .complex_float128,
.pointer => .{ .pointer = ty.data.sub_type },
@ -2350,22 +2343,30 @@ pub const Builder = struct {
}
};
/// Use with caution
pub fn base(ty: *Type) *Type {
return switch (ty.specifier) {
.typeof_type => ty.data.sub_type.base(),
.typeof_expr => ty.data.expr.ty.base(),
.attributed => ty.data.attributed.base.base(),
else => ty,
};
}
pub fn getAttribute(ty: Type, comptime tag: Attribute.Tag) ?Attribute.ArgumentsForTag(tag) {
switch (ty.specifier) {
.typeof_type => return ty.data.sub_type.getAttribute(tag),
.typeof_expr => return ty.data.expr.ty.getAttribute(tag),
.attributed => {
for (ty.data.attributed.attributes) |attribute| {
if (attribute.tag == tag) return @field(attribute.args, @tagName(tag));
}
return null;
},
else => return null,
if (tag == .aligned) @compileError("use requestedAlignment");
var it = Attribute.Iterator.initType(ty);
while (it.next()) |item| {
const attribute, _ = item;
if (attribute.tag == tag) return @field(attribute.args, @tagName(tag));
}
return null;
}
pub fn hasAttribute(ty: Type, tag: Attribute.Tag) bool {
for (ty.getAttributes()) |attr| {
var it = Attribute.Iterator.initType(ty);
while (it.next()) |item| {
const attr, _ = item;
if (attr.tag == tag) return true;
}
return false;
@ -2489,6 +2490,8 @@ fn printPrologue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts
_ = try elem_ty.printPrologue(mapper, langopts, w);
try w.writeAll("' values)");
},
.bit_int => try w.print("{s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }),
.complex_bit_int => try w.print("_Complex {s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }),
else => try w.writeAll(Builder.fromType(ty).str(langopts).?),
}
return true;
@ -2644,15 +2647,12 @@ pub fn dump(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w:
.attributed => {
if (ty.isDecayed()) try w.writeAll("*d:");
try w.writeAll("attributed(");
try ty.data.attributed.base.dump(mapper, langopts, w);
try ty.data.attributed.base.canonicalize(.standard).dump(mapper, langopts, w);
try w.writeAll(")");
},
else => {
try w.writeAll(Builder.fromType(ty).str(langopts).?);
if (ty.specifier == .bit_int or ty.specifier == .complex_bit_int) {
try w.print("({d})", .{ty.data.int.bits});
}
},
.bit_int => try w.print("{s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }),
.complex_bit_int => try w.print("_Complex {s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }),
else => try w.writeAll(Builder.fromType(ty).str(langopts).?),
}
}

View File

@ -8,6 +8,7 @@ const BigIntSpace = Interner.Tag.Int.BigIntSpace;
const Compilation = @import("Compilation.zig");
const Type = @import("Type.zig");
const target_util = @import("target.zig");
const annex_g = @import("annex_g.zig");
const Value = @This();
@ -41,6 +42,14 @@ pub fn is(v: Value, tag: std.meta.Tag(Interner.Key), comp: *const Compilation) b
return comp.interner.get(v.ref()) == tag;
}
pub fn isArithmetic(v: Value, comp: *const Compilation) bool {
if (v.opt_ref == .none) return false;
return switch (comp.interner.get(v.ref())) {
.int, .float, .complex => true,
else => false,
};
}
/// Number of bits needed to hold `v`.
/// Asserts that `v` is not negative
pub fn minUnsignedBits(v: Value, comp: *const Compilation) usize {
@ -58,7 +67,7 @@ test "minUnsignedBits" {
}
};
var comp = Compilation.init(std.testing.allocator);
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
defer comp.deinit();
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" });
comp.target = try std.zig.system.resolveTargetQuery(target_query);
@ -93,7 +102,7 @@ test "minSignedBits" {
}
};
var comp = Compilation.init(std.testing.allocator);
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
defer comp.deinit();
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" });
comp.target = try std.zig.system.resolveTargetQuery(target_query);
@ -134,7 +143,7 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang
v.* = fromBool(!was_zero);
if (was_zero or was_one) return .none;
return .value_changed;
} else if (dest_ty.isUnsignedInt(comp) and v.compare(.lt, zero, comp)) {
} else if (dest_ty.isUnsignedInt(comp) and float_val < 0) {
v.* = zero;
return .out_of_range;
}
@ -154,7 +163,7 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang
};
// The float is reduced in rational.setFloat, so we assert that denominator is equal to one
const big_one = std.math.big.int.Const{ .limbs = &.{1}, .positive = true };
const big_one = BigIntConst{ .limbs = &.{1}, .positive = true };
assert(rational.q.toConst().eqlAbs(big_one));
if (is_negative) {
@ -179,6 +188,20 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang
/// `.none` value remains unchanged.
pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void {
if (v.opt_ref == .none) return;
if (dest_ty.isComplex()) {
const bits = dest_ty.bitSizeof(comp).?;
const cf: Interner.Key.Complex = switch (bits) {
32 => .{ .cf16 = .{ v.toFloat(f16, comp), 0 } },
64 => .{ .cf32 = .{ v.toFloat(f32, comp), 0 } },
128 => .{ .cf64 = .{ v.toFloat(f64, comp), 0 } },
160 => .{ .cf80 = .{ v.toFloat(f80, comp), 0 } },
256 => .{ .cf128 = .{ v.toFloat(f128, comp), 0 } },
else => unreachable,
};
v.* = try intern(comp, .{ .complex = cf });
return;
}
const bits = dest_ty.bitSizeof(comp).?;
return switch (comp.interner.get(v.ref()).int) {
inline .u64, .i64 => |data| {
@ -207,40 +230,89 @@ pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void {
};
}
pub const IntCastChangeKind = enum {
/// value did not change
none,
/// Truncation occurred (e.g., i32 to i16)
truncated,
/// Sign conversion occurred (e.g., i32 to u32)
sign_changed,
};
/// Truncates or extends bits based on type.
/// `.none` value remains unchanged.
pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !void {
if (v.opt_ref == .none) return;
const bits: usize = @intCast(dest_ty.bitSizeof(comp).?);
pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !IntCastChangeKind {
if (v.opt_ref == .none) return .none;
const dest_bits: usize = @intCast(dest_ty.bitSizeof(comp).?);
const dest_signed = dest_ty.signedness(comp) == .signed;
var space: BigIntSpace = undefined;
const big = v.toBigInt(&space, comp);
const value_bits = big.bitCountTwosComp();
// if big is negative, then is signed.
const src_signed = !big.positive;
const sign_change = src_signed != dest_signed;
const limbs = try comp.gpa.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(@max(big.bitCountTwosComp(), bits)),
std.math.big.int.calcTwosCompLimbCount(@max(value_bits, dest_bits)),
);
defer comp.gpa.free(limbs);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.truncate(big, dest_ty.signedness(comp), bits);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.truncate(big, dest_ty.signedness(comp), dest_bits);
v.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
const truncation_occurred = value_bits > dest_bits;
if (truncation_occurred) {
return .truncated;
} else if (sign_change) {
return .sign_changed;
} else {
return .none;
}
}
/// Converts the stored value to a float of the specified type
/// `.none` value remains unchanged.
pub fn floatCast(v: *Value, dest_ty: Type, comp: *Compilation) !void {
if (v.opt_ref == .none) return;
// TODO complex values
const bits = dest_ty.makeReal().bitSizeof(comp).?;
const f: Interner.Key.Float = switch (bits) {
16 => .{ .f16 = v.toFloat(f16, comp) },
32 => .{ .f32 = v.toFloat(f32, comp) },
64 => .{ .f64 = v.toFloat(f64, comp) },
80 => .{ .f80 = v.toFloat(f80, comp) },
128 => .{ .f128 = v.toFloat(f128, comp) },
const bits = dest_ty.bitSizeof(comp).?;
if (dest_ty.isComplex()) {
const cf: Interner.Key.Complex = switch (bits) {
32 => .{ .cf16 = .{ v.toFloat(f16, comp), v.imag(f16, comp) } },
64 => .{ .cf32 = .{ v.toFloat(f32, comp), v.imag(f32, comp) } },
128 => .{ .cf64 = .{ v.toFloat(f64, comp), v.imag(f64, comp) } },
160 => .{ .cf80 = .{ v.toFloat(f80, comp), v.imag(f80, comp) } },
256 => .{ .cf128 = .{ v.toFloat(f128, comp), v.imag(f128, comp) } },
else => unreachable,
};
v.* = try intern(comp, .{ .complex = cf });
} else {
const f: Interner.Key.Float = switch (bits) {
16 => .{ .f16 = v.toFloat(f16, comp) },
32 => .{ .f32 = v.toFloat(f32, comp) },
64 => .{ .f64 = v.toFloat(f64, comp) },
80 => .{ .f80 = v.toFloat(f80, comp) },
128 => .{ .f128 = v.toFloat(f128, comp) },
else => unreachable,
};
v.* = try intern(comp, .{ .float = f });
}
}
pub fn imag(v: Value, comptime T: type, comp: *const Compilation) T {
return switch (comp.interner.get(v.ref())) {
.int => 0.0,
.float => 0.0,
.complex => |repr| switch (repr) {
inline else => |components| return @floatCast(components[1]),
},
else => unreachable,
};
v.* = try intern(comp, .{ .float = f });
}
pub fn toFloat(v: Value, comptime T: type, comp: *const Compilation) T {
@ -252,6 +324,39 @@ pub fn toFloat(v: Value, comptime T: type, comp: *const Compilation) T {
.float => |repr| switch (repr) {
inline else => |data| @floatCast(data),
},
.complex => |repr| switch (repr) {
inline else => |components| @floatCast(components[0]),
},
else => unreachable,
};
}
pub fn realPart(v: Value, comp: *Compilation) !Value {
if (v.opt_ref == .none) return v;
return switch (comp.interner.get(v.ref())) {
.int, .float => v,
.complex => |repr| Value.intern(comp, switch (repr) {
.cf16 => |components| .{ .float = .{ .f16 = components[0] } },
.cf32 => |components| .{ .float = .{ .f32 = components[0] } },
.cf64 => |components| .{ .float = .{ .f64 = components[0] } },
.cf80 => |components| .{ .float = .{ .f80 = components[0] } },
.cf128 => |components| .{ .float = .{ .f128 = components[0] } },
}),
else => unreachable,
};
}
pub fn imaginaryPart(v: Value, comp: *Compilation) !Value {
if (v.opt_ref == .none) return v;
return switch (comp.interner.get(v.ref())) {
.int, .float => Value.zero,
.complex => |repr| Value.intern(comp, switch (repr) {
.cf16 => |components| .{ .float = .{ .f16 = components[1] } },
.cf32 => |components| .{ .float = .{ .f32 = components[1] } },
.cf64 => |components| .{ .float = .{ .f64 = components[1] } },
.cf80 => |components| .{ .float = .{ .f80 = components[1] } },
.cf128 => |components| .{ .float = .{ .f128 = components[1] } },
}),
else => unreachable,
};
}
@ -298,11 +403,56 @@ pub fn isZero(v: Value, comp: *const Compilation) bool {
inline .i64, .u64 => |data| return data == 0,
.big_int => |data| return data.eqlZero(),
},
.complex => |repr| switch (repr) {
inline else => |data| return data[0] == 0.0 and data[1] == 0.0,
},
.bytes => return false,
else => unreachable,
}
}
const IsInfKind = enum(i32) {
negative = -1,
finite = 0,
positive = 1,
unknown = std.math.maxInt(i32),
};
pub fn isInfSign(v: Value, comp: *const Compilation) IsInfKind {
if (v.opt_ref == .none) return .unknown;
return switch (comp.interner.get(v.ref())) {
.float => |repr| switch (repr) {
inline else => |data| if (std.math.isPositiveInf(data)) .positive else if (std.math.isNegativeInf(data)) .negative else .finite,
},
else => .unknown,
};
}
pub fn isInf(v: Value, comp: *const Compilation) bool {
if (v.opt_ref == .none) return false;
return switch (comp.interner.get(v.ref())) {
.float => |repr| switch (repr) {
inline else => |data| std.math.isInf(data),
},
.complex => |repr| switch (repr) {
inline else => |components| std.math.isInf(components[0]) or std.math.isInf(components[1]),
},
else => false,
};
}
pub fn isNan(v: Value, comp: *const Compilation) bool {
if (v.opt_ref == .none) return false;
return switch (comp.interner.get(v.ref())) {
.float => |repr| switch (repr) {
inline else => |data| std.math.isNan(data),
},
.complex => |repr| switch (repr) {
inline else => |components| std.math.isNan(components[0]) or std.math.isNan(components[1]),
},
else => false,
};
}
/// Converts value to zero or one;
/// `.none` value remains unchanged.
pub fn boolCast(v: *Value, comp: *const Compilation) void {
@ -326,9 +476,45 @@ pub fn toInt(v: Value, comptime T: type, comp: *const Compilation) ?T {
return big_int.to(T) catch null;
}
const ComplexOp = enum {
add,
sub,
};
fn complexAddSub(lhs: Value, rhs: Value, comptime T: type, op: ComplexOp, comp: *Compilation) !Value {
const res_re = switch (op) {
.add => lhs.toFloat(T, comp) + rhs.toFloat(T, comp),
.sub => lhs.toFloat(T, comp) - rhs.toFloat(T, comp),
};
const res_im = switch (op) {
.add => lhs.imag(T, comp) + rhs.imag(T, comp),
.sub => lhs.imag(T, comp) - rhs.imag(T, comp),
};
return switch (T) {
f16 => intern(comp, .{ .complex = .{ .cf16 = .{ res_re, res_im } } }),
f32 => intern(comp, .{ .complex = .{ .cf32 = .{ res_re, res_im } } }),
f64 => intern(comp, .{ .complex = .{ .cf64 = .{ res_re, res_im } } }),
f80 => intern(comp, .{ .complex = .{ .cf80 = .{ res_re, res_im } } }),
f128 => intern(comp, .{ .complex = .{ .cf128 = .{ res_re, res_im } } }),
else => unreachable,
};
}
pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
const bits: usize = @intCast(ty.bitSizeof(comp).?);
if (ty.isFloat()) {
if (ty.isComplex()) {
res.* = switch (bits) {
32 => try complexAddSub(lhs, rhs, f16, .add, comp),
64 => try complexAddSub(lhs, rhs, f32, .add, comp),
128 => try complexAddSub(lhs, rhs, f64, .add, comp),
160 => try complexAddSub(lhs, rhs, f80, .add, comp),
256 => try complexAddSub(lhs, rhs, f128, .add, comp),
else => unreachable,
};
return false;
}
const f: Interner.Key.Float = switch (bits) {
16 => .{ .f16 = lhs.toFloat(f16, comp) + rhs.toFloat(f16, comp) },
32 => .{ .f32 = lhs.toFloat(f32, comp) + rhs.toFloat(f32, comp) },
@ -350,7 +536,7 @@ pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
std.math.big.int.calcTwosCompLimbCount(bits),
);
defer comp.gpa.free(limbs);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits);
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
@ -361,6 +547,17 @@ pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
const bits: usize = @intCast(ty.bitSizeof(comp).?);
if (ty.isFloat()) {
if (ty.isComplex()) {
res.* = switch (bits) {
32 => try complexAddSub(lhs, rhs, f16, .sub, comp),
64 => try complexAddSub(lhs, rhs, f32, .sub, comp),
128 => try complexAddSub(lhs, rhs, f64, .sub, comp),
160 => try complexAddSub(lhs, rhs, f80, .sub, comp),
256 => try complexAddSub(lhs, rhs, f128, .sub, comp),
else => unreachable,
};
return false;
}
const f: Interner.Key.Float = switch (bits) {
16 => .{ .f16 = lhs.toFloat(f16, comp) - rhs.toFloat(f16, comp) },
32 => .{ .f32 = lhs.toFloat(f32, comp) - rhs.toFloat(f32, comp) },
@ -382,7 +579,7 @@ pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
std.math.big.int.calcTwosCompLimbCount(bits),
);
defer comp.gpa.free(limbs);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits);
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
@ -393,6 +590,18 @@ pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
const bits: usize = @intCast(ty.bitSizeof(comp).?);
if (ty.isFloat()) {
if (ty.isComplex()) {
const cf: Interner.Key.Complex = switch (bits) {
32 => .{ .cf16 = annex_g.complexFloatMul(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) },
64 => .{ .cf32 = annex_g.complexFloatMul(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) },
128 => .{ .cf64 = annex_g.complexFloatMul(f64, lhs.toFloat(f64, comp), lhs.imag(f64, comp), rhs.toFloat(f64, comp), rhs.imag(f64, comp)) },
160 => .{ .cf80 = annex_g.complexFloatMul(f80, lhs.toFloat(f80, comp), lhs.imag(f80, comp), rhs.toFloat(f80, comp), rhs.imag(f80, comp)) },
256 => .{ .cf128 = annex_g.complexFloatMul(f128, lhs.toFloat(f128, comp), lhs.imag(f128, comp), rhs.toFloat(f128, comp), rhs.imag(f128, comp)) },
else => unreachable,
};
res.* = try intern(comp, .{ .complex = cf });
return false;
}
const f: Interner.Key.Float = switch (bits) {
16 => .{ .f16 = lhs.toFloat(f16, comp) * rhs.toFloat(f16, comp) },
32 => .{ .f32 = lhs.toFloat(f32, comp) * rhs.toFloat(f32, comp) },
@ -438,6 +647,18 @@ pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
pub fn div(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
const bits: usize = @intCast(ty.bitSizeof(comp).?);
if (ty.isFloat()) {
if (ty.isComplex()) {
const cf: Interner.Key.Complex = switch (bits) {
32 => .{ .cf16 = annex_g.complexFloatDiv(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) },
64 => .{ .cf32 = annex_g.complexFloatDiv(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) },
128 => .{ .cf64 = annex_g.complexFloatDiv(f64, lhs.toFloat(f64, comp), lhs.imag(f64, comp), rhs.toFloat(f64, comp), rhs.imag(f64, comp)) },
160 => .{ .cf80 = annex_g.complexFloatDiv(f80, lhs.toFloat(f80, comp), lhs.imag(f80, comp), rhs.toFloat(f80, comp), rhs.imag(f80, comp)) },
256 => .{ .cf128 = annex_g.complexFloatDiv(f128, lhs.toFloat(f128, comp), lhs.imag(f128, comp), rhs.toFloat(f128, comp), rhs.imag(f128, comp)) },
else => unreachable,
};
res.* = try intern(comp, .{ .complex = cf });
return false;
}
const f: Interner.Key.Float = switch (bits) {
16 => .{ .f16 = lhs.toFloat(f16, comp) / rhs.toFloat(f16, comp) },
32 => .{ .f32 = lhs.toFloat(f32, comp) / rhs.toFloat(f32, comp) },
@ -491,11 +712,11 @@ pub fn rem(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
const signedness = ty.signedness(comp);
if (signedness == .signed) {
var spaces: [3]BigIntSpace = undefined;
const min_val = BigIntMutable.init(&spaces[0].limbs, ty.minInt(comp)).toConst();
const negative = BigIntMutable.init(&spaces[1].limbs, -1).toConst();
const big_one = BigIntMutable.init(&spaces[2].limbs, 1).toConst();
if (lhs_bigint.eql(min_val) and rhs_bigint.eql(negative)) {
var spaces: [2]BigIntSpace = undefined;
const min_val = try Value.minInt(ty, comp);
const negative = BigIntMutable.init(&spaces[0].limbs, -1).toConst();
const big_one = BigIntMutable.init(&spaces[1].limbs, 1).toConst();
if (lhs.compare(.eq, min_val, comp) and rhs_bigint.eql(negative)) {
return .{};
} else if (rhs_bigint.order(big_one).compare(.lt)) {
// lhs - @divTrunc(lhs, rhs) * rhs
@ -542,7 +763,7 @@ pub fn bitOr(lhs: Value, rhs: Value, comp: *Compilation) !Value {
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
defer comp.gpa.free(limbs);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitOr(lhs_bigint, rhs_bigint);
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
@ -554,12 +775,13 @@ pub fn bitXor(lhs: Value, rhs: Value, comp: *Compilation) !Value {
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
const rhs_bigint = rhs.toBigInt(&rhs_space, comp);
const extra = @intFromBool(lhs_bigint.positive != rhs_bigint.positive);
const limbs = try comp.gpa.alloc(
std.math.big.Limb,
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + extra,
);
defer comp.gpa.free(limbs);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitXor(lhs_bigint, rhs_bigint);
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
@ -571,12 +793,18 @@ pub fn bitAnd(lhs: Value, rhs: Value, comp: *Compilation) !Value {
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
const rhs_bigint = rhs.toBigInt(&rhs_space, comp);
const limbs = try comp.gpa.alloc(
std.math.big.Limb,
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
const limb_count = if (lhs_bigint.positive and rhs_bigint.positive)
@min(lhs_bigint.limbs.len, rhs_bigint.limbs.len)
else if (lhs_bigint.positive)
lhs_bigint.limbs.len
else if (rhs_bigint.positive)
rhs_bigint.limbs.len
else
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1;
const limbs = try comp.gpa.alloc(std.math.big.Limb, limb_count);
defer comp.gpa.free(limbs);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitAnd(lhs_bigint, rhs_bigint);
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
@ -592,7 +820,7 @@ pub fn bitNot(val: Value, ty: Type, comp: *Compilation) !Value {
std.math.big.int.calcTwosCompLimbCount(bits),
);
defer comp.gpa.free(limbs);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitNotWrap(val_bigint, ty.signedness(comp), bits);
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
@ -606,9 +834,9 @@ pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
const bits: usize = @intCast(ty.bitSizeof(comp).?);
if (shift > bits) {
if (lhs_bigint.positive) {
res.* = try intern(comp, .{ .int = .{ .u64 = ty.maxInt(comp) } });
res.* = try Value.maxInt(ty, comp);
} else {
res.* = try intern(comp, .{ .int = .{ .i64 = ty.minInt(comp) } });
res.* = try Value.minInt(ty, comp);
}
return true;
}
@ -618,7 +846,7 @@ pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
);
defer comp.gpa.free(limbs);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.shiftLeft(lhs_bigint, shift);
const signedness = ty.signedness(comp);
@ -652,12 +880,25 @@ pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
std.math.big.int.calcTwosCompLimbCount(bits),
);
defer comp.gpa.free(limbs);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.shiftRight(lhs_bigint, shift);
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
}
pub fn complexConj(val: Value, ty: Type, comp: *Compilation) !Value {
const bits = ty.bitSizeof(comp).?;
const cf: Interner.Key.Complex = switch (bits) {
32 => .{ .cf16 = .{ val.toFloat(f16, comp), -val.imag(f16, comp) } },
64 => .{ .cf32 = .{ val.toFloat(f32, comp), -val.imag(f32, comp) } },
128 => .{ .cf64 = .{ val.toFloat(f64, comp), -val.imag(f64, comp) } },
160 => .{ .cf80 = .{ val.toFloat(f80, comp), -val.imag(f80, comp) } },
256 => .{ .cf128 = .{ val.toFloat(f128, comp), -val.imag(f128, comp) } },
else => unreachable,
};
return intern(comp, .{ .complex = cf });
}
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) bool {
if (op == .eq) {
return lhs.opt_ref == rhs.opt_ref;
@ -672,6 +913,12 @@ pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *cons
const rhs_f128 = rhs.toFloat(f128, comp);
return std.math.compare(lhs_f128, op, rhs_f128);
}
if (lhs_key == .complex or rhs_key == .complex) {
assert(op == .neq);
const real_equal = std.math.compare(lhs.toFloat(f128, comp), .eq, rhs.toFloat(f128, comp));
const imag_equal = std.math.compare(lhs.imag(f128, comp), .eq, rhs.imag(f128, comp));
return !real_equal or !imag_equal;
}
var lhs_bigint_space: BigIntSpace = undefined;
var rhs_bigint_space: BigIntSpace = undefined;
@ -680,6 +927,42 @@ pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *cons
return lhs_bigint.order(rhs_bigint).compare(op);
}
fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, ty: Type, comp: *Compilation) !Value {
const signedness = ty.signedness(comp);
if (limit == .min and signedness == .unsigned) return Value.zero;
const mag_bits: usize = @intCast(ty.bitSizeof(comp).?);
switch (mag_bits) {
inline 8, 16, 32, 64 => |bits| {
if (limit == .min) return Value.int(@as(i64, std.math.minInt(std.meta.Int(.signed, bits))), comp);
return switch (signedness) {
inline else => |sign| Value.int(std.math.maxInt(std.meta.Int(sign, bits)), comp),
};
},
else => {},
}
const sign_bits = @intFromBool(signedness == .signed);
const total_bits = mag_bits + sign_bits;
const limbs = try comp.gpa.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(total_bits),
);
defer comp.gpa.free(limbs);
var result_bigint: BigIntMutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.setTwosCompIntLimit(limit, signedness, mag_bits);
return Value.intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
}
pub fn minInt(ty: Type, comp: *Compilation) !Value {
return twosCompIntLimit(.min, ty, comp);
}
pub fn maxInt(ty: Type, comp: *Compilation) !Value {
return twosCompIntLimit(.max, ty, comp);
}
pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w).Error!void {
if (ty.is(.bool)) {
return w.writeAll(if (v.isZero(comp)) "false" else "true");
@ -696,6 +979,10 @@ pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w
inline else => |x| return w.print("{d}", .{@as(f64, @floatCast(x))}),
},
.bytes => |b| return printString(b, ty, comp, w),
.complex => |repr| switch (repr) {
.cf32 => |components| return w.print("{d} + {d}i", .{ @round(@as(f64, @floatCast(components[0])) * 1000000) / 1000000, @round(@as(f64, @floatCast(components[1])) * 1000000) / 1000000 }),
inline else => |components| return w.print("{d} + {d}i", .{ @as(f64, @floatCast(components[0])), @as(f64, @floatCast(components[1])) }),
},
else => unreachable, // not a value
}
}
@ -703,26 +990,44 @@ pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w
pub fn printString(bytes: []const u8, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w).Error!void {
const size: Compilation.CharUnitSize = @enumFromInt(ty.elemType().sizeof(comp).?);
const without_null = bytes[0 .. bytes.len - @intFromEnum(size)];
try w.writeByte('"');
switch (size) {
inline .@"1", .@"2" => |sz| {
const data_slice: []const sz.Type() = @alignCast(std.mem.bytesAsSlice(sz.Type(), without_null));
const formatter = if (sz == .@"1") std.zig.fmtEscapes(data_slice) else std.unicode.fmtUtf16Le(data_slice);
try w.print("\"{}\"", .{formatter});
},
.@"4" => {
try w.writeByte('"');
const data_slice = std.mem.bytesAsSlice(u32, without_null);
var buf: [4]u8 = undefined;
for (data_slice) |item| {
if (item <= std.math.maxInt(u21) and std.unicode.utf8ValidCodepoint(@intCast(item))) {
const codepoint: u21 = @intCast(item);
const written = std.unicode.utf8Encode(codepoint, &buf) catch unreachable;
try w.print("{s}", .{buf[0..written]});
.@"1" => try w.print("{}", .{std.zig.fmtEscapes(without_null)}),
.@"2" => {
var items: [2]u16 = undefined;
var i: usize = 0;
while (i < without_null.len) {
@memcpy(std.mem.sliceAsBytes(items[0..1]), without_null[i..][0..2]);
i += 2;
const is_surrogate = std.unicode.utf16IsHighSurrogate(items[0]);
if (is_surrogate and i < without_null.len) {
@memcpy(std.mem.sliceAsBytes(items[1..2]), without_null[i..][0..2]);
if (std.unicode.utf16DecodeSurrogatePair(&items)) |decoded| {
i += 2;
try w.print("{u}", .{decoded});
} else |_| {
try w.print("\\x{x}", .{items[0]});
}
} else if (is_surrogate) {
try w.print("\\x{x}", .{items[0]});
} else {
try w.print("\\x{x}", .{item});
try w.print("{u}", .{items[0]});
}
}
},
.@"4" => {
var item: [1]u32 = undefined;
const data_slice = std.mem.sliceAsBytes(item[0..1]);
for (0..@divExact(without_null.len, 4)) |n| {
@memcpy(data_slice, without_null[n * 4 ..][0..4]);
if (item[0] <= std.math.maxInt(u21) and std.unicode.utf8ValidCodepoint(@intCast(item[0]))) {
const codepoint: u21 = @intCast(item[0]);
try w.print("{u}", .{codepoint});
} else {
try w.print("\\x{x}", .{item[0]});
}
}
try w.writeByte('"');
},
}
try w.writeByte('"');
}

View File

@ -0,0 +1,118 @@
//! Complex arithmetic algorithms from C99 Annex G
const std = @import("std");
const copysign = std.math.copysign;
const ilogb = std.math.ilogb;
const inf = std.math.inf;
const isFinite = std.math.isFinite;
const isInf = std.math.isInf;
const isNan = std.math.isNan;
const isPositiveZero = std.math.isPositiveZero;
const scalbn = std.math.scalbn;
/// computes floating point z*w where a_param, b_param are real, imaginary parts of z and c_param, d_param are real, imaginary parts of w
pub fn complexFloatMul(comptime T: type, a_param: T, b_param: T, c_param: T, d_param: T) [2]T {
var a = a_param;
var b = b_param;
var c = c_param;
var d = d_param;
const ac = a * c;
const bd = b * d;
const ad = a * d;
const bc = b * c;
var x = ac - bd;
var y = ad + bc;
if (isNan(x) and isNan(y)) {
var recalc = false;
if (isInf(a) or isInf(b)) {
// lhs infinite
// Box the infinity and change NaNs in the other factor to 0
a = copysign(if (isInf(a)) @as(T, 1.0) else @as(T, 0.0), a);
b = copysign(if (isInf(b)) @as(T, 1.0) else @as(T, 0.0), b);
if (isNan(c)) c = copysign(@as(T, 0.0), c);
if (isNan(d)) d = copysign(@as(T, 0.0), d);
recalc = true;
}
if (isInf(c) or isInf(d)) {
// rhs infinite
// Box the infinity and change NaNs in the other factor to 0
c = copysign(if (isInf(c)) @as(T, 1.0) else @as(T, 0.0), c);
d = copysign(if (isInf(d)) @as(T, 1.0) else @as(T, 0.0), d);
if (isNan(a)) a = copysign(@as(T, 0.0), a);
if (isNan(b)) b = copysign(@as(T, 0.0), b);
recalc = true;
}
if (!recalc and (isInf(ac) or isInf(bd) or isInf(ad) or isInf(bc))) {
// Recover infinities from overflow by changing NaN's to 0
if (isNan(a)) a = copysign(@as(T, 0.0), a);
if (isNan(b)) b = copysign(@as(T, 0.0), b);
if (isNan(c)) c = copysign(@as(T, 0.0), c);
if (isNan(d)) d = copysign(@as(T, 0.0), d);
}
if (recalc) {
x = inf(T) * (a * c - b * d);
y = inf(T) * (a * d + b * c);
}
}
return .{ x, y };
}
/// computes floating point z / w where a_param, b_param are real, imaginary parts of z and c_param, d_param are real, imaginary parts of w
pub fn complexFloatDiv(comptime T: type, a_param: T, b_param: T, c_param: T, d_param: T) [2]T {
var a = a_param;
var b = b_param;
var c = c_param;
var d = d_param;
var denom_logb: i32 = 0;
const max_cd = @max(@abs(c), @abs(d));
if (isFinite(max_cd)) {
if (max_cd == 0) {
denom_logb = std.math.minInt(i32) + 1;
c = 0;
d = 0;
} else {
denom_logb = ilogb(max_cd);
c = scalbn(c, -denom_logb);
d = scalbn(d, -denom_logb);
}
}
const denom = c * c + d * d;
var x = scalbn((a * c + b * d) / denom, -denom_logb);
var y = scalbn((b * c - a * d) / denom, -denom_logb);
if (isNan(x) and isNan(y)) {
if (isPositiveZero(denom) and (!isNan(a) or !isNan(b))) {
x = copysign(inf(T), c) * a;
y = copysign(inf(T), c) * b;
} else if ((isInf(a) or isInf(b)) and isFinite(c) and isFinite(d)) {
a = copysign(if (isInf(a)) @as(T, 1.0) else @as(T, 0.0), a);
b = copysign(if (isInf(b)) @as(T, 1.0) else @as(T, 0.0), b);
x = inf(T) * (a * c + b * d);
y = inf(T) * (b * c - a * d);
} else if (isInf(max_cd) and isFinite(a) and isFinite(b)) {
c = copysign(if (isInf(c)) @as(T, 1.0) else @as(T, 0.0), c);
d = copysign(if (isInf(d)) @as(T, 1.0) else @as(T, 0.0), d);
x = 0.0 * (a * c + b * d);
y = 0.0 * (b * c - a * d);
}
}
return .{ x, y };
}
test complexFloatMul {
// Naive algorithm would produce NaN + NaNi instead of inf + NaNi
const result = complexFloatMul(f64, inf(f64), std.math.nan(f64), 2, 0);
try std.testing.expect(isInf(result[0]));
try std.testing.expect(isNan(result[1]));
}
test complexFloatDiv {
// Naive algorithm would produce NaN + NaNi instead of inf + NaNi
var result = complexFloatDiv(f64, inf(f64), std.math.nan(f64), 2, 0);
try std.testing.expect(isInf(result[0]));
try std.testing.expect(isNan(result[1]));
result = complexFloatDiv(f64, 2.0, 2.0, 0.0, 0.0);
try std.testing.expect(isInf(result[0]));
try std.testing.expect(isInf(result[1]));
}

View File

@ -45,7 +45,7 @@ pub fn hasFeature(comp: *Compilation, ext: []const u8) bool {
.c_static_assert = comp.langopts.standard.atLeast(.c11),
.c_thread_local = comp.langopts.standard.atLeast(.c11) and target_util.isTlsSupported(comp.target),
};
inline for (std.meta.fields(@TypeOf(list))) |f| {
inline for (@typeInfo(@TypeOf(list)).@"struct".fields) |f| {
if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name);
}
return false;
@ -69,7 +69,7 @@ pub fn hasExtension(comp: *Compilation, ext: []const u8) bool {
.matrix_types = false, // TODO
.matrix_types_scalar_division = false, // TODO
};
inline for (std.meta.fields(@TypeOf(list))) |f| {
inline for (@typeInfo(@TypeOf(list)).@"struct".fields) |f| {
if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name);
}
return false;

View File

@ -19,6 +19,13 @@ const OngoingBitfield = struct {
unused_size_bits: u64,
};
pub const Error = error{Overflow};
fn alignForward(addr: u64, alignment: u64) !u64 {
const forward_addr = try std.math.add(u64, addr, alignment - 1);
return std.mem.alignBackward(u64, forward_addr, alignment);
}
const SysVContext = struct {
/// Does the record have an __attribute__((packed)) annotation.
attr_packed: bool,
@ -36,14 +43,8 @@ const SysVContext = struct {
comp: *const Compilation,
fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) SysVContext {
var pack_value: ?u64 = null;
if (pragma_pack) |pak| {
pack_value = pak * BITS_PER_BYTE;
}
var req_align: u29 = BITS_PER_BYTE;
if (ty.requestedAlignment(comp)) |aln| {
req_align = aln * BITS_PER_BYTE;
}
const pack_value: ?u64 = if (pragma_pack) |pak| @as(u64, pak) * BITS_PER_BYTE else null;
const req_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE;
return SysVContext{
.attr_packed = ty.hasAttribute(.@"packed"),
.max_field_align_bits = pack_value,
@ -55,7 +56,7 @@ const SysVContext = struct {
};
}
fn layoutFields(self: *SysVContext, rec: *const Record) void {
fn layoutFields(self: *SysVContext, rec: *const Record) !void {
for (rec.fields, 0..) |*fld, fld_indx| {
if (fld.ty.specifier == .invalid) continue;
const type_layout = computeLayout(fld.ty, self.comp);
@ -65,12 +66,12 @@ const SysVContext = struct {
field_attrs = attrs[fld_indx];
}
if (self.comp.target.isMinGW()) {
fld.layout = self.layoutMinGWField(fld, field_attrs, type_layout);
fld.layout = try self.layoutMinGWField(fld, field_attrs, type_layout);
} else {
if (fld.isRegularField()) {
fld.layout = self.layoutRegularField(field_attrs, type_layout);
fld.layout = try self.layoutRegularField(field_attrs, type_layout);
} else {
fld.layout = self.layoutBitField(field_attrs, type_layout, fld.isNamed(), fld.specifiedBitWidth());
fld.layout = try self.layoutBitField(field_attrs, type_layout, fld.isNamed(), fld.specifiedBitWidth());
}
}
}
@ -99,8 +100,8 @@ const SysVContext = struct {
field: *const Field,
field_attrs: ?[]const Attribute,
field_layout: TypeLayout,
) FieldLayout {
const annotation_alignment_bits = BITS_PER_BYTE * (Type.annotationAlignment(self.comp, field_attrs) orelse 1);
) !FieldLayout {
const annotation_alignment_bits = BITS_PER_BYTE * @as(u32, (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(field_attrs)) orelse 1));
const is_attr_packed = self.attr_packed or isPacked(field_attrs);
const ignore_type_alignment = ignoreTypeAlignment(is_attr_packed, field.bit_width, self.ongoing_bitfield, field_layout);
@ -157,7 +158,7 @@ const SysVContext = struct {
field_alignment_bits: u64,
is_named: bool,
width: u64,
) FieldLayout {
) !FieldLayout {
std.debug.assert(width <= ty_size_bits); // validated in parser
// In a union, the size of the underlying type does not affect the size of the union.
@ -194,8 +195,8 @@ const SysVContext = struct {
.unused_size_bits = ty_size_bits - width,
};
}
const offset_bits = std.mem.alignForward(u64, self.size_bits, field_alignment_bits);
self.size_bits = if (width == 0) offset_bits else offset_bits + ty_size_bits;
const offset_bits = try alignForward(self.size_bits, field_alignment_bits);
self.size_bits = if (width == 0) offset_bits else try std.math.add(u64, offset_bits, ty_size_bits);
if (!is_named) return .{};
return .{
.offset_bits = offset_bits,
@ -207,16 +208,16 @@ const SysVContext = struct {
self: *SysVContext,
ty_size_bits: u64,
field_alignment_bits: u64,
) FieldLayout {
) !FieldLayout {
self.ongoing_bitfield = null;
// A struct field starts at the next offset in the struct that is properly
// aligned with respect to the start of the struct. See test case 0033.
// A union field always starts at offset 0.
const offset_bits = if (self.is_union) 0 else std.mem.alignForward(u64, self.size_bits, field_alignment_bits);
const offset_bits = if (self.is_union) 0 else try alignForward(self.size_bits, field_alignment_bits);
// Set the size of the record to the maximum of the current size and the end of
// the field. See test case 0034.
self.size_bits = @max(self.size_bits, offset_bits + ty_size_bits);
self.size_bits = @max(self.size_bits, try std.math.add(u64, offset_bits, ty_size_bits));
return .{
.offset_bits = offset_bits,
@ -228,7 +229,7 @@ const SysVContext = struct {
self: *SysVContext,
fld_attrs: ?[]const Attribute,
fld_layout: TypeLayout,
) FieldLayout {
) !FieldLayout {
var fld_align_bits = fld_layout.field_alignment_bits;
// If the struct or the field is packed, then the alignment of the underlying type is
@ -239,8 +240,8 @@ const SysVContext = struct {
// The field alignment can be increased by __attribute__((aligned)) annotations on the
// field. See test case 0085.
if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| {
fld_align_bits = @max(fld_align_bits, anno * BITS_PER_BYTE);
if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| {
fld_align_bits = @max(fld_align_bits, @as(u32, anno) * BITS_PER_BYTE);
}
// #pragma pack takes precedence over all other attributes. See test cases 0084 and
@ -251,12 +252,12 @@ const SysVContext = struct {
// A struct field starts at the next offset in the struct that is properly
// aligned with respect to the start of the struct.
const offset_bits = if (self.is_union) 0 else std.mem.alignForward(u64, self.size_bits, fld_align_bits);
const offset_bits = if (self.is_union) 0 else try alignForward(self.size_bits, fld_align_bits);
const size_bits = fld_layout.size_bits;
// The alignment of a record is the maximum of its field alignments. See test cases
// 0084, 0085, 0086.
self.size_bits = @max(self.size_bits, offset_bits + size_bits);
self.size_bits = @max(self.size_bits, try std.math.add(u64, offset_bits, size_bits));
self.aligned_bits = @max(self.aligned_bits, fld_align_bits);
return .{
@ -271,7 +272,7 @@ const SysVContext = struct {
fld_layout: TypeLayout,
is_named: bool,
bit_width: u64,
) FieldLayout {
) !FieldLayout {
const ty_size_bits = fld_layout.size_bits;
var ty_fld_algn_bits: u32 = fld_layout.field_alignment_bits;
@ -301,7 +302,7 @@ const SysVContext = struct {
const attr_packed = self.attr_packed or isPacked(fld_attrs);
const has_packing_annotation = attr_packed or self.max_field_align_bits != null;
const annotation_alignment: u32 = if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| anno * BITS_PER_BYTE else 1;
const annotation_alignment = if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| @as(u32, anno) * BITS_PER_BYTE else 1;
const first_unused_bit: u64 = if (self.is_union) 0 else self.size_bits;
var field_align_bits: u64 = 1;
@ -322,7 +323,7 @@ const SysVContext = struct {
// - the alignment of the type is larger than its size,
// then it is aligned to the type's field alignment. See test case 0083.
if (!has_packing_annotation) {
const start_bit = std.mem.alignForward(u64, first_unused_bit, field_align_bits);
const start_bit = try alignForward(first_unused_bit, field_align_bits);
const does_field_cross_boundary = start_bit % ty_fld_algn_bits + bit_width > ty_size_bits;
@ -349,8 +350,8 @@ const SysVContext = struct {
}
}
const offset_bits = std.mem.alignForward(u64, first_unused_bit, field_align_bits);
self.size_bits = @max(self.size_bits, offset_bits + bit_width);
const offset_bits = try alignForward(first_unused_bit, field_align_bits);
self.size_bits = @max(self.size_bits, try std.math.add(u64, offset_bits, bit_width));
// Unnamed fields do not contribute to the record alignment except on a few targets.
// See test case 0079.
@ -419,10 +420,7 @@ const MsvcContext = struct {
// The required alignment can be increased by adding a __declspec(align)
// annotation. See test case 0023.
var must_align: u29 = BITS_PER_BYTE;
if (ty.requestedAlignment(comp)) |req_align| {
must_align = req_align * BITS_PER_BYTE;
}
const must_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE;
return MsvcContext{
.req_align_bits = must_align,
.pointer_align_bits = must_align,
@ -436,15 +434,15 @@ const MsvcContext = struct {
};
}
fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: ?[]const Attribute) FieldLayout {
fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: ?[]const Attribute) !FieldLayout {
const type_layout = computeLayout(fld.ty, self.comp);
// The required alignment of the field is the maximum of the required alignment of the
// underlying type and the __declspec(align) annotation on the field itself.
// See test case 0028.
var req_align = type_layout.required_alignment_bits;
if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| {
req_align = @max(anno * BITS_PER_BYTE, req_align);
if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| {
req_align = @max(@as(u32, anno) * BITS_PER_BYTE, req_align);
}
// The required alignment of a record is the maximum of the required alignments of its
@ -480,7 +478,7 @@ const MsvcContext = struct {
}
}
fn layoutBitField(self: *MsvcContext, ty_size_bits: u64, field_align: u32, bit_width: u32) FieldLayout {
fn layoutBitField(self: *MsvcContext, ty_size_bits: u64, field_align: u32, bit_width: u32) !FieldLayout {
if (bit_width == 0) {
// A zero-sized bit-field that does not follow a non-zero-sized bit-field does not affect
// the overall layout of the record. Even in a union where the order would otherwise
@ -522,7 +520,7 @@ const MsvcContext = struct {
self.pointer_align_bits = @max(self.pointer_align_bits, p_align);
self.field_align_bits = @max(self.field_align_bits, field_align);
const offset_bits = std.mem.alignForward(u64, self.size_bits, field_align);
const offset_bits = try alignForward(self.size_bits, field_align);
self.size_bits = if (bit_width == 0) offset_bits else offset_bits + ty_size_bits;
break :bits offset_bits;
@ -534,7 +532,7 @@ const MsvcContext = struct {
return .{ .offset_bits = offset_bits, .size_bits = bit_width };
}
fn layoutRegularField(self: *MsvcContext, size_bits: u64, field_align: u32) FieldLayout {
fn layoutRegularField(self: *MsvcContext, size_bits: u64, field_align: u32) !FieldLayout {
self.contains_non_bitfield = true;
self.ongoing_bitfield = null;
// The alignment of the field affects both the pointer alignment and the field
@ -543,7 +541,7 @@ const MsvcContext = struct {
self.field_align_bits = @max(self.field_align_bits, field_align);
const offset_bits = switch (self.is_union) {
true => 0,
false => std.mem.alignForward(u64, self.size_bits, field_align),
false => try alignForward(self.size_bits, field_align),
};
self.size_bits = @max(self.size_bits, offset_bits + size_bits);
return .{ .offset_bits = offset_bits, .size_bits = size_bits };
@ -569,14 +567,14 @@ const MsvcContext = struct {
}
};
pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pack: ?u8) void {
pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pack: ?u8) Error!void {
switch (comp.langopts.emulate) {
.gcc, .clang => {
var context = SysVContext.init(ty, comp, pragma_pack);
context.layoutFields(rec);
try context.layoutFields(rec);
context.size_bits = std.mem.alignForward(u64, context.size_bits, context.aligned_bits);
context.size_bits = try alignForward(context.size_bits, context.aligned_bits);
rec.type_layout = .{
.size_bits = context.size_bits,
@ -594,7 +592,7 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac
field_attrs = attrs[fld_indx];
}
fld.layout = context.layoutField(fld, field_attrs);
fld.layout = try context.layoutField(fld, field_attrs);
}
if (context.size_bits == 0) {
// As an extension, MSVC allows records that only contain zero-sized bitfields and empty
@ -602,7 +600,7 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac
// ensure that there are no zero-sized records.
context.handleZeroSizedRecord();
}
context.size_bits = std.mem.alignForward(u64, context.size_bits, context.pointer_align_bits);
context.size_bits = try alignForward(context.size_bits, context.pointer_align_bits);
rec.type_layout = .{
.size_bits = context.size_bits,
.field_alignment_bits = context.field_align_bits,

View File

@ -35,10 +35,7 @@ pub fn intMaxType(target: std.Target) Type {
/// intptr_t for this target
pub fn intPtrType(target: std.Target) Type {
switch (target.os.tag) {
.haiku => return .{ .specifier = .long },
else => {},
}
if (target.os.tag == .haiku) return .{ .specifier = .long };
switch (target.cpu.arch) {
.aarch64, .aarch64_be => switch (target.os.tag) {
@ -127,6 +124,14 @@ pub fn int64Type(target: std.Target) Type {
return .{ .specifier = .long_long };
}
pub fn float80Type(target: std.Target) ?Type {
switch (target.cpu.arch) {
.x86, .x86_64 => return .{ .specifier = .long_double },
else => {},
}
return null;
}
/// This function returns 1 if function alignment is not observable or settable.
pub fn defaultFunctionAlignment(target: std.Target) u8 {
return switch (target.cpu.arch) {
@ -474,6 +479,7 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target {
.kalimba,
.lanai,
.wasm32,
.spirv,
.spirv32,
.loongarch32,
.dxil,
@ -544,6 +550,7 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target {
.powerpcle => copy.cpu.arch = .powerpc64le,
.riscv32 => copy.cpu.arch = .riscv64,
.sparc => copy.cpu.arch = .sparc64,
.spirv => copy.cpu.arch = .spirv64,
.spirv32 => copy.cpu.arch = .spirv64,
.thumb => copy.cpu.arch = .aarch64,
.thumbeb => copy.cpu.arch = .aarch64_be,
@ -599,6 +606,7 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
.xtensa => "xtensa",
.nvptx => "nvptx",
.nvptx64 => "nvptx64",
.spirv => "spirv",
.spirv32 => "spirv32",
.spirv64 => "spirv64",
.kalimba => "kalimba",
@ -646,9 +654,10 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
.ios => "ios",
.tvos => "tvos",
.watchos => "watchos",
.visionos => "xros",
.driverkit => "driverkit",
.shadermodel => "shadermodel",
.visionos => "xros",
.serenity => "serenity",
.opencl,
.opengl,
.vulkan,
@ -707,6 +716,7 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
.callable => "callable",
.mesh => "mesh",
.amplification => "amplification",
.ohos => "openhos",
};
writer.writeAll(llvm_abi) catch unreachable;
return stream.getWritten();

View File

@ -71,7 +71,7 @@ pub const Kind = enum {
pub fn maxCodepoint(kind: Kind, comp: *const Compilation) u21 {
return @intCast(switch (kind) {
.char => std.math.maxInt(u7),
.wide => @min(0x10FFFF, comp.types.wchar.maxInt(comp)),
.wide => @min(0x10FFFF, comp.wcharMax()),
.utf_8 => std.math.maxInt(u7),
.utf_16 => std.math.maxInt(u16),
.utf_32 => 0x10FFFF,
@ -83,7 +83,7 @@ pub const Kind = enum {
pub fn maxInt(kind: Kind, comp: *const Compilation) u32 {
return @intCast(switch (kind) {
.char, .utf_8 => std.math.maxInt(u8),
.wide => comp.types.wchar.maxInt(comp),
.wide => comp.wcharMax(),
.utf_16 => std.math.maxInt(u16),
.utf_32 => std.math.maxInt(u32),
.unterminated => unreachable,

View File

@ -423,7 +423,7 @@ test Linux {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var comp = Compilation.init(std.testing.allocator);
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
defer comp.deinit();
comp.environment = .{
.path = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",

View File

@ -34,6 +34,7 @@ const KeyAdapter = struct {
pub const Key = union(enum) {
int_ty: u16,
float_ty: u16,
complex_ty: u16,
ptr_ty,
noreturn_ty,
void_ty,
@ -62,6 +63,7 @@ pub const Key = union(enum) {
}
},
float: Float,
complex: Complex,
bytes: []const u8,
pub const Float = union(enum) {
@ -71,6 +73,13 @@ pub const Key = union(enum) {
f80: f80,
f128: f128,
};
pub const Complex = union(enum) {
cf16: [2]f16,
cf32: [2]f32,
cf64: [2]f64,
cf80: [2]f80,
cf128: [2]f128,
};
pub fn hash(key: Key) u32 {
var hasher = Hash.init(0);
@ -89,6 +98,12 @@ pub const Key = union(enum) {
@as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(data))), @bitCast(data)),
),
},
.complex => |repr| switch (repr) {
inline else => |data| std.hash.autoHash(
&hasher,
@as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(data))), @bitCast(data)),
),
},
.int => |repr| {
var space: Tag.Int.BigIntSpace = undefined;
const big = repr.toBigInt(&space);
@ -154,6 +169,14 @@ pub const Key = union(enum) {
128 => return .f128,
else => unreachable,
},
.complex_ty => |bits| switch (bits) {
16 => return .cf16,
32 => return .cf32,
64 => return .cf64,
80 => return .cf80,
128 => return .cf128,
else => unreachable,
},
.ptr_ty => return .ptr,
.func_ty => return .func,
.noreturn_ty => return .noreturn,
@ -199,6 +222,11 @@ pub const Ref = enum(u32) {
zero = max - 16,
one = max - 17,
null = max - 18,
cf16 = max - 19,
cf32 = max - 20,
cf64 = max - 21,
cf80 = max - 22,
cf128 = max - 23,
_,
};
@ -224,6 +252,11 @@ pub const OptRef = enum(u32) {
zero = max - 16,
one = max - 17,
null = max - 18,
cf16 = max - 19,
cf32 = max - 20,
cf64 = max - 21,
cf80 = max - 22,
cf128 = max - 23,
_,
};
@ -232,6 +265,8 @@ pub const Tag = enum(u8) {
int_ty,
/// `data` is `u16`
float_ty,
/// `data` is `u16`
complex_ty,
/// `data` is index to `Array`
array_ty,
/// `data` is index to `Vector`
@ -254,6 +289,16 @@ pub const Tag = enum(u8) {
f80,
/// `data` is `F128`
f128,
/// `data` is `CF16`
cf16,
/// `data` is `CF32`
cf32,
/// `data` is `CF64`
cf64,
/// `data` is `CF80`
cf80,
/// `data` is `CF128`
cf128,
/// `data` is `Bytes`
bytes,
/// `data` is `Record`
@ -354,6 +399,134 @@ pub const Tag = enum(u8) {
}
};
pub const CF16 = struct {
piece0: u32,
pub fn get(self: CF16) [2]f16 {
const real: f16 = @bitCast(@as(u16, @truncate(self.piece0 >> 16)));
const imag: f16 = @bitCast(@as(u16, @truncate(self.piece0)));
return .{
real,
imag,
};
}
fn pack(val: [2]f16) CF16 {
const real: u16 = @bitCast(val[0]);
const imag: u16 = @bitCast(val[1]);
return .{
.piece0 = (@as(u32, real) << 16) | @as(u32, imag),
};
}
};
pub const CF32 = struct {
piece0: u32,
piece1: u32,
pub fn get(self: CF32) [2]f32 {
return .{
@bitCast(self.piece0),
@bitCast(self.piece1),
};
}
fn pack(val: [2]f32) CF32 {
return .{
.piece0 = @bitCast(val[0]),
.piece1 = @bitCast(val[1]),
};
}
};
pub const CF64 = struct {
piece0: u32,
piece1: u32,
piece2: u32,
piece3: u32,
pub fn get(self: CF64) [2]f64 {
return .{
(F64{ .piece0 = self.piece0, .piece1 = self.piece1 }).get(),
(F64{ .piece0 = self.piece2, .piece1 = self.piece3 }).get(),
};
}
fn pack(val: [2]f64) CF64 {
const real = F64.pack(val[0]);
const imag = F64.pack(val[1]);
return .{
.piece0 = real.piece0,
.piece1 = real.piece1,
.piece2 = imag.piece0,
.piece3 = imag.piece1,
};
}
};
/// TODO pack into 5 pieces
pub const CF80 = struct {
piece0: u32,
piece1: u32,
piece2: u32, // u16 part, top bits
piece3: u32,
piece4: u32,
piece5: u32, // u16 part, top bits
pub fn get(self: CF80) [2]f80 {
return .{
(F80{ .piece0 = self.piece0, .piece1 = self.piece1, .piece2 = self.piece2 }).get(),
(F80{ .piece0 = self.piece3, .piece1 = self.piece4, .piece2 = self.piece5 }).get(),
};
}
fn pack(val: [2]f80) CF80 {
const real = F80.pack(val[0]);
const imag = F80.pack(val[1]);
return .{
.piece0 = real.piece0,
.piece1 = real.piece1,
.piece2 = real.piece2,
.piece3 = imag.piece0,
.piece4 = imag.piece1,
.piece5 = imag.piece2,
};
}
};
pub const CF128 = struct {
piece0: u32,
piece1: u32,
piece2: u32,
piece3: u32,
piece4: u32,
piece5: u32,
piece6: u32,
piece7: u32,
pub fn get(self: CF128) [2]f128 {
return .{
(F128{ .piece0 = self.piece0, .piece1 = self.piece1, .piece2 = self.piece2, .piece3 = self.piece3 }).get(),
(F128{ .piece0 = self.piece4, .piece1 = self.piece5, .piece2 = self.piece6, .piece3 = self.piece7 }).get(),
};
}
fn pack(val: [2]f128) CF128 {
const real = F128.pack(val[0]);
const imag = F128.pack(val[1]);
return .{
.piece0 = real.piece0,
.piece1 = real.piece1,
.piece2 = real.piece2,
.piece3 = real.piece3,
.piece4 = imag.piece0,
.piece5 = imag.piece1,
.piece6 = imag.piece2,
.piece7 = imag.piece3,
};
}
};
pub const Bytes = struct {
strings_index: u32,
len: u32,
@ -407,6 +580,12 @@ pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref {
.data = bits,
});
},
.complex_ty => |bits| {
i.items.appendAssumeCapacity(.{
.tag = .complex_ty,
.data = bits,
});
},
.array_ty => |info| {
const split_len = PackedU64.init(info.len);
i.items.appendAssumeCapacity(.{
@ -493,6 +672,28 @@ pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref {
.data = try i.addExtra(gpa, Tag.F128.pack(data)),
}),
},
.complex => |repr| switch (repr) {
.cf16 => |data| i.items.appendAssumeCapacity(.{
.tag = .cf16,
.data = try i.addExtra(gpa, Tag.CF16.pack(data)),
}),
.cf32 => |data| i.items.appendAssumeCapacity(.{
.tag = .cf32,
.data = try i.addExtra(gpa, Tag.CF32.pack(data)),
}),
.cf64 => |data| i.items.appendAssumeCapacity(.{
.tag = .cf64,
.data = try i.addExtra(gpa, Tag.CF64.pack(data)),
}),
.cf80 => |data| i.items.appendAssumeCapacity(.{
.tag = .cf80,
.data = try i.addExtra(gpa, Tag.CF80.pack(data)),
}),
.cf128 => |data| i.items.appendAssumeCapacity(.{
.tag = .cf128,
.data = try i.addExtra(gpa, Tag.CF128.pack(data)),
}),
},
.bytes => |bytes| {
const strings_index: u32 = @intCast(i.strings.items.len);
try i.strings.appendSlice(gpa, bytes);
@ -564,6 +765,10 @@ pub fn get(i: *const Interner, ref: Ref) Key {
.zero => return .{ .int = .{ .u64 = 0 } },
.one => return .{ .int = .{ .u64 = 1 } },
.null => return .null,
.cf16 => return .{ .complex_ty = 16 },
.cf32 => return .{ .complex_ty = 32 },
.cf64 => return .{ .complex_ty = 64 },
.cf80 => return .{ .complex_ty = 80 },
else => {},
}
@ -572,6 +777,7 @@ pub fn get(i: *const Interner, ref: Ref) Key {
return switch (item.tag) {
.int_ty => .{ .int_ty = @intCast(data) },
.float_ty => .{ .float_ty = @intCast(data) },
.complex_ty => .{ .complex_ty = @intCast(data) },
.array_ty => {
const array_ty = i.extraData(Tag.Array, data);
return .{ .array_ty = .{
@ -612,6 +818,26 @@ pub fn get(i: *const Interner, ref: Ref) Key {
const float = i.extraData(Tag.F128, data);
return .{ .float = .{ .f128 = float.get() } };
},
.cf16 => {
const components = i.extraData(Tag.CF16, data);
return .{ .complex = .{ .cf16 = components.get() } };
},
.cf32 => {
const components = i.extraData(Tag.CF32, data);
return .{ .complex = .{ .cf32 = components.get() } };
},
.cf64 => {
const components = i.extraData(Tag.CF64, data);
return .{ .complex = .{ .cf64 = components.get() } };
},
.cf80 => {
const components = i.extraData(Tag.CF80, data);
return .{ .complex = .{ .cf80 = components.get() } };
},
.cf128 => {
const components = i.extraData(Tag.CF128, data);
return .{ .complex = .{ .cf128 = components.get() } };
},
.bytes => {
const bytes = i.extraData(Tag.Bytes, data);
return .{ .bytes = i.strings.items[bytes.strings_index..][0..bytes.len] };

View File

@ -37,6 +37,7 @@ pub const Builder = struct {
for (b.decls.values()) |*decl| {
decl.deinit(b.gpa);
}
b.decls.deinit(b.gpa);
b.arena.deinit();
b.instructions.deinit(b.gpa);
b.body.deinit(b.gpa);

View File

@ -16,7 +16,7 @@ pub fn create(gpa: Allocator, target: std.Target) !*Object {
pub fn deinit(obj: *Object) void {
switch (obj.format) {
.elf => @as(*Elf, @fieldParentPtr("obj", obj)).deinit(),
.elf => @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).deinit(),
else => unreachable,
}
}
@ -32,7 +32,7 @@ pub const Section = union(enum) {
pub fn getSection(obj: *Object, section: Section) !*std.ArrayList(u8) {
switch (obj.format) {
.elf => return @as(*Elf, @fieldParentPtr("obj", obj)).getSection(section),
.elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).getSection(section),
else => unreachable,
}
}
@ -53,21 +53,21 @@ pub fn declareSymbol(
size: u64,
) ![]const u8 {
switch (obj.format) {
.elf => return @as(*Elf, @fieldParentPtr("obj", obj)).declareSymbol(section, name, linkage, @"type", offset, size),
.elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).declareSymbol(section, name, linkage, @"type", offset, size),
else => unreachable,
}
}
pub fn addRelocation(obj: *Object, name: []const u8, section: Section, address: u64, addend: i64) !void {
switch (obj.format) {
.elf => return @as(*Elf, @fieldParentPtr("obj", obj)).addRelocation(name, section, address, addend),
.elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).addRelocation(name, section, address, addend),
else => unreachable,
}
}
pub fn finish(obj: *Object, file: std.fs.File) !void {
switch (obj.format) {
.elf => return @as(*Elf, @fieldParentPtr("obj", obj)).finish(file),
.elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).finish(file),
else => unreachable,
}
}

View File

@ -731,7 +731,6 @@ fn transType(c: *Context, scope: *Scope, raw_ty: Type, qual_handling: Type.QualH
.float => return ZigTag.type.create(c.arena, "f32"),
.double => return ZigTag.type.create(c.arena, "f64"),
.long_double => return ZigTag.type.create(c.arena, "c_longdouble"),
.float80 => return ZigTag.type.create(c.arena, "f80"),
.float128 => return ZigTag.type.create(c.arena, "f128"),
.@"enum" => {
const enum_decl = ty.data.@"enum";
@ -1799,7 +1798,7 @@ pub fn main() !void {
const args = try std.process.argsAlloc(arena);
var aro_comp = aro.Compilation.init(gpa);
var aro_comp = aro.Compilation.init(gpa, std.fs.cwd());
defer aro_comp.deinit();
var tree = translate(gpa, &aro_comp, args) catch |err| switch (err) {

View File

@ -126,7 +126,7 @@ pub fn main() !void {
defer aro_arena_state.deinit();
const aro_arena = aro_arena_state.allocator();
var comp = aro.Compilation.init(aro_arena);
var comp = aro.Compilation.init(aro_arena, std.fs.cwd());
defer comp.deinit();
var argv = std.ArrayList([]const u8).init(comp.gpa);

View File

@ -59,7 +59,7 @@ pub fn preprocess(
if (hasAnyErrors(comp)) return error.PreprocessError;
try pp.prettyPrintTokens(writer);
try pp.prettyPrintTokens(writer, .result_only);
if (maybe_dependencies_list) |dependencies_list| {
for (comp.sources.values()) |comp_source| {

View File

@ -230,7 +230,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
};
const aro = @import("aro");
var aro_comp = aro.Compilation.init(comp.gpa);
var aro_comp = aro.Compilation.init(comp.gpa, std.fs.cwd());
defer aro_comp.deinit();
const include_dir = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", "def-include" });
@ -268,7 +268,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
// new scope to ensure definition file is written before passing the path to WriteImportLibrary
const def_final_file = try o_dir.createFile(final_def_basename, .{ .truncate = true });
defer def_final_file.close();
try pp.prettyPrintTokens(def_final_file.writer());
try pp.prettyPrintTokens(def_final_file.writer(), .result_only);
}
const lib_final_path = try comp.global_cache_directory.join(comp.gpa, &[_][]const u8{

View File

@ -5258,7 +5258,7 @@ fn getMacroText(unit: *const clang.ASTUnit, c: *const Context, macro: *const cla
const end_c = c.source_manager.getCharacterData(end_loc);
const slice_len = @intFromPtr(end_c) - @intFromPtr(begin_c);
var comp = aro.Compilation.init(c.gpa);
var comp = aro.Compilation.init(c.gpa, std.fs.cwd());
defer comp.deinit();
const result = comp.addSourceFromBuffer("", begin_c[0..slice_len]) catch return error.OutOfMemory;