Merge pull request #7920 from ziglang/ast-memory-layout

Rework AST memory layout for better memory usage and performance
This commit is contained in:
Andrew Kelley 2021-02-24 18:49:07 -08:00 committed by GitHub
commit d7049fc8e0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
42 changed files with 20594 additions and 16367 deletions

View File

@ -370,7 +370,6 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/heap.zig"
"${CMAKE_SOURCE_DIR}/lib/std/heap/arena_allocator.zig"
"${CMAKE_SOURCE_DIR}/lib/std/io.zig"
"${CMAKE_SOURCE_DIR}/lib/std/io/auto_indenting_stream.zig"
"${CMAKE_SOURCE_DIR}/lib/std/io/buffered_atomic_file.zig"
"${CMAKE_SOURCE_DIR}/lib/std/io/buffered_writer.zig"
"${CMAKE_SOURCE_DIR}/lib/std/io/change_detection_stream.zig"
@ -408,6 +407,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/meta.zig"
"${CMAKE_SOURCE_DIR}/lib/std/meta/trailer_flags.zig"
"${CMAKE_SOURCE_DIR}/lib/std/meta/trait.zig"
"${CMAKE_SOURCE_DIR}/lib/std/multi_array_list.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/bits.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/bits/linux.zig"
@ -573,6 +573,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/target.zig"
"${CMAKE_SOURCE_DIR}/src/tracy.zig"
"${CMAKE_SOURCE_DIR}/src/translate_c.zig"
"${CMAKE_SOURCE_DIR}/src/translate_c/ast.zig"
"${CMAKE_SOURCE_DIR}/src/type.zig"
"${CMAKE_SOURCE_DIR}/src/value.zig"
"${CMAKE_SOURCE_DIR}/src/windows_sdk.zig"

View File

@ -5,7 +5,7 @@ A general-purpose programming language and toolchain for maintaining
## Resources
* [Introduction](https://ziglang.org/#Introduction)
* [Introduction](https://ziglang.org/learn/#introduction)
* [Download & Documentation](https://ziglang.org/download)
* [Chapter 0 - Getting Started | ZigLearn.org](https://ziglearn.org/)
* [Community](https://github.com/ziglang/zig/wiki/Community)

View File

@ -77,10 +77,12 @@ pub fn build(b: *Builder) !void {
const tracy = b.option([]const u8, "tracy", "Enable Tracy integration. Supply path to Tracy source");
const link_libc = b.option(bool, "force-link-libc", "Force self-hosted compiler to link libc") orelse enable_llvm;
const strip = b.option(bool, "strip", "Omit debug information") orelse false;
const main_file = if (is_stage1) "src/stage1.zig" else "src/main.zig";
var exe = b.addExecutable("zig", main_file);
exe.strip = strip;
exe.install();
exe.setBuildMode(mode);
exe.setTarget(target);

View File

@ -781,106 +781,119 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: anytype, source_token:
next_tok_is_fn = false;
const token = tokenizer.next();
try writeEscaped(out, src[index..token.loc.start]);
switch (token.id) {
.Eof => break,
if (mem.indexOf(u8, src[index..token.loc.start], "//")) |comment_start_off| {
// render one comment
const comment_start = index + comment_start_off;
const comment_end_off = mem.indexOf(u8, src[comment_start .. token.loc.start], "\n");
const comment_end = if (comment_end_off) |o| comment_start + o else token.loc.start;
.Keyword_align,
.Keyword_and,
.Keyword_asm,
.Keyword_async,
.Keyword_await,
.Keyword_break,
.Keyword_catch,
.Keyword_comptime,
.Keyword_const,
.Keyword_continue,
.Keyword_defer,
.Keyword_else,
.Keyword_enum,
.Keyword_errdefer,
.Keyword_error,
.Keyword_export,
.Keyword_extern,
.Keyword_for,
.Keyword_if,
.Keyword_inline,
.Keyword_noalias,
.Keyword_noinline,
.Keyword_nosuspend,
.Keyword_opaque,
.Keyword_or,
.Keyword_orelse,
.Keyword_packed,
.Keyword_anyframe,
.Keyword_pub,
.Keyword_resume,
.Keyword_return,
.Keyword_linksection,
.Keyword_callconv,
.Keyword_struct,
.Keyword_suspend,
.Keyword_switch,
.Keyword_test,
.Keyword_threadlocal,
.Keyword_try,
.Keyword_union,
.Keyword_unreachable,
.Keyword_usingnamespace,
.Keyword_var,
.Keyword_volatile,
.Keyword_allowzero,
.Keyword_while,
.Keyword_anytype,
try writeEscaped(out, src[index..comment_start]);
try out.writeAll("<span class=\"tok-comment\">");
try writeEscaped(out, src[comment_start .. comment_end]);
try out.writeAll("</span>");
index = comment_end;
tokenizer.index = index;
continue;
}
try writeEscaped(out, src[index..token.loc.start]);
switch (token.tag) {
.eof => break,
.keyword_align,
.keyword_and,
.keyword_asm,
.keyword_async,
.keyword_await,
.keyword_break,
.keyword_catch,
.keyword_comptime,
.keyword_const,
.keyword_continue,
.keyword_defer,
.keyword_else,
.keyword_enum,
.keyword_errdefer,
.keyword_error,
.keyword_export,
.keyword_extern,
.keyword_for,
.keyword_if,
.keyword_inline,
.keyword_noalias,
.keyword_noinline,
.keyword_nosuspend,
.keyword_opaque,
.keyword_or,
.keyword_orelse,
.keyword_packed,
.keyword_anyframe,
.keyword_pub,
.keyword_resume,
.keyword_return,
.keyword_linksection,
.keyword_callconv,
.keyword_struct,
.keyword_suspend,
.keyword_switch,
.keyword_test,
.keyword_threadlocal,
.keyword_try,
.keyword_union,
.keyword_unreachable,
.keyword_usingnamespace,
.keyword_var,
.keyword_volatile,
.keyword_allowzero,
.keyword_while,
.keyword_anytype,
=> {
try out.writeAll("<span class=\"tok-kw\">");
try writeEscaped(out, src[token.loc.start..token.loc.end]);
try out.writeAll("</span>");
},
.Keyword_fn => {
.keyword_fn => {
try out.writeAll("<span class=\"tok-kw\">");
try writeEscaped(out, src[token.loc.start..token.loc.end]);
try out.writeAll("</span>");
next_tok_is_fn = true;
},
.Keyword_undefined,
.Keyword_null,
.Keyword_true,
.Keyword_false,
.keyword_undefined,
.keyword_null,
.keyword_true,
.keyword_false,
=> {
try out.writeAll("<span class=\"tok-null\">");
try writeEscaped(out, src[token.loc.start..token.loc.end]);
try out.writeAll("</span>");
},
.StringLiteral,
.MultilineStringLiteralLine,
.CharLiteral,
.string_literal,
.multiline_string_literal_line,
.char_literal,
=> {
try out.writeAll("<span class=\"tok-str\">");
try writeEscaped(out, src[token.loc.start..token.loc.end]);
try out.writeAll("</span>");
},
.Builtin => {
.builtin => {
try out.writeAll("<span class=\"tok-builtin\">");
try writeEscaped(out, src[token.loc.start..token.loc.end]);
try out.writeAll("</span>");
},
.LineComment,
.DocComment,
.ContainerDocComment,
.ShebangLine,
.doc_comment,
.container_doc_comment,
=> {
try out.writeAll("<span class=\"tok-comment\">");
try writeEscaped(out, src[token.loc.start..token.loc.end]);
try out.writeAll("</span>");
},
.Identifier => {
.identifier => {
if (prev_tok_was_fn) {
try out.writeAll("<span class=\"tok-fn\">");
try writeEscaped(out, src[token.loc.start..token.loc.end]);
@ -908,71 +921,71 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: anytype, source_token:
}
},
.IntegerLiteral,
.FloatLiteral,
.integer_literal,
.float_literal,
=> {
try out.writeAll("<span class=\"tok-number\">");
try writeEscaped(out, src[token.loc.start..token.loc.end]);
try out.writeAll("</span>");
},
.Bang,
.Pipe,
.PipePipe,
.PipeEqual,
.Equal,
.EqualEqual,
.EqualAngleBracketRight,
.BangEqual,
.LParen,
.RParen,
.Semicolon,
.Percent,
.PercentEqual,
.LBrace,
.RBrace,
.LBracket,
.RBracket,
.Period,
.PeriodAsterisk,
.Ellipsis2,
.Ellipsis3,
.Caret,
.CaretEqual,
.Plus,
.PlusPlus,
.PlusEqual,
.PlusPercent,
.PlusPercentEqual,
.Minus,
.MinusEqual,
.MinusPercent,
.MinusPercentEqual,
.Asterisk,
.AsteriskEqual,
.AsteriskAsterisk,
.AsteriskPercent,
.AsteriskPercentEqual,
.Arrow,
.Colon,
.Slash,
.SlashEqual,
.Comma,
.Ampersand,
.AmpersandEqual,
.QuestionMark,
.AngleBracketLeft,
.AngleBracketLeftEqual,
.AngleBracketAngleBracketLeft,
.AngleBracketAngleBracketLeftEqual,
.AngleBracketRight,
.AngleBracketRightEqual,
.AngleBracketAngleBracketRight,
.AngleBracketAngleBracketRightEqual,
.Tilde,
.bang,
.pipe,
.pipe_pipe,
.pipe_equal,
.equal,
.equal_equal,
.equal_angle_bracket_right,
.bang_equal,
.l_paren,
.r_paren,
.semicolon,
.percent,
.percent_equal,
.l_brace,
.r_brace,
.l_bracket,
.r_bracket,
.period,
.period_asterisk,
.ellipsis2,
.ellipsis3,
.caret,
.caret_equal,
.plus,
.plus_plus,
.plus_equal,
.plus_percent,
.plus_percent_equal,
.minus,
.minus_equal,
.minus_percent,
.minus_percent_equal,
.asterisk,
.asterisk_equal,
.asterisk_asterisk,
.asterisk_percent,
.asterisk_percent_equal,
.arrow,
.colon,
.slash,
.slash_equal,
.comma,
.ampersand,
.ampersand_equal,
.question_mark,
.angle_bracket_left,
.angle_bracket_left_equal,
.angle_bracket_angle_bracket_left,
.angle_bracket_angle_bracket_left_equal,
.angle_bracket_right,
.angle_bracket_right_equal,
.angle_bracket_angle_bracket_right,
.angle_bracket_angle_bracket_right_equal,
.tilde,
=> try writeEscaped(out, src[token.loc.start..token.loc.end]),
.Invalid, .Invalid_ampersands, .Invalid_periodasterisks => return parseError(
.invalid, .invalid_ampersands, .invalid_periodasterisks => return parseError(
docgen_tokenizer,
source_token,
"syntax error",

View File

@ -98,7 +98,7 @@
//! in a `std.HashMap` using the backing allocator.
const std = @import("std");
const log = std.log.scoped(.std);
const log = std.log.scoped(.gpa);
const math = std.math;
const assert = std.debug.assert;
const mem = std.mem;
@ -162,6 +162,9 @@ pub const Config = struct {
/// logged error messages with stack trace details. The downside is that every allocation
/// will be leaked!
never_unmap: bool = false,
/// Enables emitting info messages with the size and address of every allocation.
verbose_log: bool = false,
};
pub fn GeneralPurposeAllocator(comptime config: Config) type {
@ -454,10 +457,19 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const result_len = try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr);
if (result_len == 0) {
if (config.verbose_log) {
log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
}
self.large_allocations.removeAssertDiscard(@ptrToInt(old_mem.ptr));
return 0;
}
if (config.verbose_log) {
log.info("large resize {d} bytes at {*} to {d}", .{
old_mem.len, old_mem.ptr, new_size,
});
}
entry.value.bytes = old_mem.ptr[0..result_len];
collectStackTrace(ret_addr, &entry.value.stack_addresses);
return result_len;
@ -568,6 +580,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
} else {
@memset(old_mem.ptr, undefined, old_mem.len);
}
if (config.verbose_log) {
log.info("small free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
}
return @as(usize, 0);
}
const new_aligned_size = math.max(new_size, old_align);
@ -576,6 +591,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (old_mem.len > new_size) {
@memset(old_mem.ptr + new_size, undefined, old_mem.len - new_size);
}
if (config.verbose_log) {
log.info("small resize {d} bytes at {*} to {d}", .{
old_mem.len, old_mem.ptr, new_size,
});
}
return new_size;
}
return error.OutOfMemory;
@ -623,6 +643,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
gop.entry.value.bytes = slice;
collectStackTrace(ret_addr, &gop.entry.value.stack_addresses);
if (config.verbose_log) {
log.info("large alloc {d} bytes at {*}", .{ slice.len, slice.ptr });
}
return slice;
}
@ -632,6 +655,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
const ptr = try self.allocSlot(new_size_class, ret_addr);
if (config.verbose_log) {
log.info("small alloc {d} bytes at {*}", .{ len, ptr });
}
return ptr[0..len];
}

View File

@ -142,9 +142,6 @@ pub const bitReader = @import("io/bit_reader.zig").bitReader;
pub const BitWriter = @import("io/bit_writer.zig").BitWriter;
pub const bitWriter = @import("io/bit_writer.zig").bitWriter;
pub const AutoIndentingStream = @import("io/auto_indenting_stream.zig").AutoIndentingStream;
pub const autoIndentingStream = @import("io/auto_indenting_stream.zig").autoIndentingStream;
pub const ChangeDetectionStream = @import("io/change_detection_stream.zig").ChangeDetectionStream;
pub const changeDetectionStream = @import("io/change_detection_stream.zig").changeDetectionStream;

View File

@ -1,154 +0,0 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("../std.zig");
const io = std.io;
const mem = std.mem;
const assert = std.debug.assert;
/// Automatically inserts indentation of written data by keeping
/// track of the current indentation level
pub fn AutoIndentingStream(comptime UnderlyingWriter: type) type {
return struct {
const Self = @This();
pub const Error = UnderlyingWriter.Error;
pub const Writer = io.Writer(*Self, Error, write);
underlying_writer: UnderlyingWriter,
indent_count: usize = 0,
indent_delta: usize,
current_line_empty: bool = true,
indent_one_shot_count: usize = 0, // automatically popped when applied
applied_indent: usize = 0, // the most recently applied indent
indent_next_line: usize = 0, // not used until the next line
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
pub fn write(self: *Self, bytes: []const u8) Error!usize {
if (bytes.len == 0)
return @as(usize, 0);
try self.applyIndent();
return self.writeNoIndent(bytes);
}
// Change the indent delta without changing the final indentation level
pub fn setIndentDelta(self: *Self, indent_delta: usize) void {
if (self.indent_delta == indent_delta) {
return;
} else if (self.indent_delta > indent_delta) {
assert(self.indent_delta % indent_delta == 0);
self.indent_count = self.indent_count * (self.indent_delta / indent_delta);
} else {
// assert that the current indentation (in spaces) in a multiple of the new delta
assert((self.indent_count * self.indent_delta) % indent_delta == 0);
self.indent_count = self.indent_count / (indent_delta / self.indent_delta);
}
self.indent_delta = indent_delta;
}
fn writeNoIndent(self: *Self, bytes: []const u8) Error!usize {
if (bytes.len == 0)
return @as(usize, 0);
try self.underlying_writer.writeAll(bytes);
if (bytes[bytes.len - 1] == '\n')
self.resetLine();
return bytes.len;
}
pub fn insertNewline(self: *Self) Error!void {
_ = try self.writeNoIndent("\n");
}
fn resetLine(self: *Self) void {
self.current_line_empty = true;
self.indent_next_line = 0;
}
/// Insert a newline unless the current line is blank
pub fn maybeInsertNewline(self: *Self) Error!void {
if (!self.current_line_empty)
try self.insertNewline();
}
/// Push default indentation
pub fn pushIndent(self: *Self) void {
// Doesn't actually write any indentation.
// Just primes the stream to be able to write the correct indentation if it needs to.
self.indent_count += 1;
}
/// Push an indent that is automatically popped after being applied
pub fn pushIndentOneShot(self: *Self) void {
self.indent_one_shot_count += 1;
self.pushIndent();
}
/// Turns all one-shot indents into regular indents
/// Returns number of indents that must now be manually popped
pub fn lockOneShotIndent(self: *Self) usize {
var locked_count = self.indent_one_shot_count;
self.indent_one_shot_count = 0;
return locked_count;
}
/// Push an indent that should not take effect until the next line
pub fn pushIndentNextLine(self: *Self) void {
self.indent_next_line += 1;
self.pushIndent();
}
pub fn popIndent(self: *Self) void {
assert(self.indent_count != 0);
self.indent_count -= 1;
if (self.indent_next_line > 0)
self.indent_next_line -= 1;
}
/// Writes ' ' bytes if the current line is empty
fn applyIndent(self: *Self) Error!void {
const current_indent = self.currentIndent();
if (self.current_line_empty and current_indent > 0) {
try self.underlying_writer.writeByteNTimes(' ', current_indent);
self.applied_indent = current_indent;
}
self.indent_count -= self.indent_one_shot_count;
self.indent_one_shot_count = 0;
self.current_line_empty = false;
}
/// Checks to see if the most recent indentation exceeds the currently pushed indents
pub fn isLineOverIndented(self: *Self) bool {
if (self.current_line_empty) return false;
return self.applied_indent > self.currentIndent();
}
fn currentIndent(self: *Self) usize {
var indent_current: usize = 0;
if (self.indent_count > 0) {
const indent_count = self.indent_count - self.indent_next_line;
indent_current = indent_count * self.indent_delta;
}
return indent_current;
}
};
}
pub fn autoIndentingStream(
indent_delta: usize,
underlying_writer: anytype,
) AutoIndentingStream(@TypeOf(underlying_writer)) {
return AutoIndentingStream(@TypeOf(underlying_writer)){
.underlying_writer = underlying_writer,
.indent_delta = indent_delta,
};
}

View File

@ -0,0 +1,446 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std.zig");
const assert = std.debug.assert;
const meta = std.meta;
const mem = std.mem;
const Allocator = mem.Allocator;
pub fn MultiArrayList(comptime S: type) type {
return struct {
bytes: [*]align(@alignOf(S)) u8 = undefined,
len: usize = 0,
capacity: usize = 0,
pub const Elem = S;
pub const Field = meta.FieldEnum(S);
pub const Slice = struct {
/// This array is indexed by the field index which can be obtained
/// by using @enumToInt() on the Field enum
ptrs: [fields.len][*]u8,
len: usize,
capacity: usize,
pub fn items(self: Slice, comptime field: Field) []FieldType(field) {
const byte_ptr = self.ptrs[@enumToInt(field)];
const F = FieldType(field);
const casted_ptr = @ptrCast([*]F, @alignCast(@alignOf(F), byte_ptr));
return casted_ptr[0..self.len];
}
pub fn toMultiArrayList(self: Slice) Self {
if (self.ptrs.len == 0) {
return .{};
}
const unaligned_ptr = self.ptrs[sizes.fields[0]];
const aligned_ptr = @alignCast(@alignOf(S), unaligned_ptr);
const casted_ptr = @ptrCast([*]align(@alignOf(S)) u8, aligned_ptr);
return .{
.bytes = casted_ptr,
.len = self.len,
.capacity = self.capacity,
};
}
pub fn deinit(self: *Slice, gpa: *Allocator) void {
var other = self.toMultiArrayList();
other.deinit(gpa);
self.* = undefined;
}
};
const Self = @This();
const fields = meta.fields(S);
/// `sizes.bytes` is an array of @sizeOf each S field. Sorted by alignment, descending.
/// `sizes.fields` is an array mapping from `sizes.bytes` array index to field index.
const sizes = blk: {
const Data = struct {
size: usize,
size_index: usize,
alignment: usize,
};
var data: [fields.len]Data = undefined;
for (fields) |field_info, i| {
data[i] = .{
.size = @sizeOf(field_info.field_type),
.size_index = i,
.alignment = field_info.alignment,
};
}
const Sort = struct {
fn lessThan(trash: *i32, lhs: Data, rhs: Data) bool {
return lhs.alignment >= rhs.alignment;
}
};
var trash: i32 = undefined; // workaround for stage1 compiler bug
std.sort.sort(Data, &data, &trash, Sort.lessThan);
var sizes_bytes: [fields.len]usize = undefined;
var field_indexes: [fields.len]usize = undefined;
for (data) |elem, i| {
sizes_bytes[i] = elem.size;
field_indexes[i] = elem.size_index;
}
break :blk .{
.bytes = sizes_bytes,
.fields = field_indexes,
};
};
/// Release all allocated memory.
pub fn deinit(self: *Self, gpa: *Allocator) void {
gpa.free(self.allocatedBytes());
self.* = undefined;
}
/// The caller owns the returned memory. Empties this MultiArrayList.
pub fn toOwnedSlice(self: *Self) Slice {
const result = self.slice();
self.* = .{};
return result;
}
pub fn slice(self: Self) Slice {
var result: Slice = .{
.ptrs = undefined,
.len = self.len,
.capacity = self.capacity,
};
var ptr: [*]u8 = self.bytes;
for (sizes.bytes) |field_size, i| {
result.ptrs[sizes.fields[i]] = ptr;
ptr += field_size * self.capacity;
}
return result;
}
pub fn items(self: Self, comptime field: Field) []FieldType(field) {
return self.slice().items(field);
}
/// Overwrite one array element with new data.
pub fn set(self: *Self, index: usize, elem: S) void {
const slices = self.slice();
inline for (fields) |field_info, i| {
slices.items(@intToEnum(Field, i))[index] = @field(elem, field_info.name);
}
}
/// Obtain all the data for one array element.
pub fn get(self: *Self, index: usize) S {
const slices = self.slice();
var result: S = undefined;
inline for (fields) |field_info, i| {
@field(elem, field_info.name) = slices.items(@intToEnum(Field, i))[index];
}
return result;
}
/// Extend the list by 1 element. Allocates more memory as necessary.
pub fn append(self: *Self, gpa: *Allocator, elem: S) !void {
try self.ensureCapacity(gpa, self.len + 1);
self.appendAssumeCapacity(elem);
}
/// Extend the list by 1 element, but asserting `self.capacity`
/// is sufficient to hold an additional item.
pub fn appendAssumeCapacity(self: *Self, elem: S) void {
assert(self.len < self.capacity);
self.len += 1;
self.set(self.len - 1, elem);
}
/// Adjust the list's length to `new_len`.
/// Does not initialize added items, if any.
pub fn resize(self: *Self, gpa: *Allocator, new_len: usize) !void {
try self.ensureCapacity(gpa, new_len);
self.len = new_len;
}
/// Attempt to reduce allocated capacity to `new_len`.
/// If `new_len` is greater than zero, this may fail to reduce the capacity,
/// but the data remains intact and the length is updated to new_len.
pub fn shrinkAndFree(self: *Self, gpa: *Allocator, new_len: usize) void {
if (new_len == 0) {
gpa.free(self.allocatedBytes());
self.* = .{};
return;
}
assert(new_len <= self.capacity);
assert(new_len <= self.len);
const other_bytes = gpa.allocAdvanced(
u8,
@alignOf(S),
capacityInBytes(new_len),
.exact,
) catch {
const self_slice = self.slice();
inline for (fields) |field_info, i| {
const field = @intToEnum(Field, i);
const dest_slice = self_slice.items(field)[new_len..];
const byte_count = dest_slice.len * @sizeOf(field_info.field_type);
// We use memset here for more efficient codegen in safety-checked,
// valgrind-enabled builds. Otherwise the valgrind client request
// will be repeated for every element.
@memset(@ptrCast([*]u8, dest_slice.ptr), undefined, byte_count);
}
self.len = new_len;
return;
};
var other = Self{
.bytes = other_bytes.ptr,
.capacity = new_len,
.len = new_len,
};
self.len = new_len;
const self_slice = self.slice();
const other_slice = other.slice();
inline for (fields) |field_info, i| {
const field = @intToEnum(Field, i);
// TODO we should be able to use std.mem.copy here but it causes a
// test failure on aarch64 with -OReleaseFast
const src_slice = mem.sliceAsBytes(self_slice.items(field));
const dst_slice = mem.sliceAsBytes(other_slice.items(field));
@memcpy(dst_slice.ptr, src_slice.ptr, src_slice.len);
}
gpa.free(self.allocatedBytes());
self.* = other;
}
/// Reduce length to `new_len`.
/// Invalidates pointers to elements `items[new_len..]`.
/// Keeps capacity the same.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
self.len = new_len;
}
/// Modify the array so that it can hold at least `new_capacity` items.
/// Implements super-linear growth to achieve amortized O(1) append operations.
/// Invalidates pointers if additional memory is needed.
pub fn ensureCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void {
var better_capacity = self.capacity;
if (better_capacity >= new_capacity) return;
while (true) {
better_capacity += better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
return self.setCapacity(gpa, better_capacity);
}
/// Modify the array so that it can hold exactly `new_capacity` items.
/// Invalidates pointers if additional memory is needed.
/// `new_capacity` must be greater or equal to `len`.
pub fn setCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void {
assert(new_capacity >= self.len);
const new_bytes = try gpa.allocAdvanced(
u8,
@alignOf(S),
capacityInBytes(new_capacity),
.exact,
);
if (self.len == 0) {
self.bytes = new_bytes.ptr;
self.capacity = new_capacity;
return;
}
var other = Self{
.bytes = new_bytes.ptr,
.capacity = new_capacity,
.len = self.len,
};
const self_slice = self.slice();
const other_slice = other.slice();
inline for (fields) |field_info, i| {
const field = @intToEnum(Field, i);
// TODO we should be able to use std.mem.copy here but it causes a
// test failure on aarch64 with -OReleaseFast
const src_slice = mem.sliceAsBytes(self_slice.items(field));
const dst_slice = mem.sliceAsBytes(other_slice.items(field));
@memcpy(dst_slice.ptr, src_slice.ptr, src_slice.len);
}
gpa.free(self.allocatedBytes());
self.* = other;
}
fn capacityInBytes(capacity: usize) usize {
const sizes_vector: std.meta.Vector(sizes.bytes.len, usize) = sizes.bytes;
const capacity_vector = @splat(sizes.bytes.len, capacity);
return @reduce(.Add, capacity_vector * sizes_vector);
}
fn allocatedBytes(self: Self) []align(@alignOf(S)) u8 {
return self.bytes[0..capacityInBytes(self.capacity)];
}
fn FieldType(field: Field) type {
return meta.fieldInfo(S, field).field_type;
}
};
}
test "basic usage" {
const testing = std.testing;
const ally = testing.allocator;
const Foo = struct {
a: u32,
b: []const u8,
c: u8,
};
var list = MultiArrayList(Foo){};
defer list.deinit(ally);
try list.ensureCapacity(ally, 2);
list.appendAssumeCapacity(.{
.a = 1,
.b = "foobar",
.c = 'a',
});
list.appendAssumeCapacity(.{
.a = 2,
.b = "zigzag",
.c = 'b',
});
testing.expectEqualSlices(u32, list.items(.a), &[_]u32{ 1, 2 });
testing.expectEqualSlices(u8, list.items(.c), &[_]u8{ 'a', 'b' });
testing.expectEqual(@as(usize, 2), list.items(.b).len);
testing.expectEqualStrings("foobar", list.items(.b)[0]);
testing.expectEqualStrings("zigzag", list.items(.b)[1]);
try list.append(ally, .{
.a = 3,
.b = "fizzbuzz",
.c = 'c',
});
testing.expectEqualSlices(u32, list.items(.a), &[_]u32{ 1, 2, 3 });
testing.expectEqualSlices(u8, list.items(.c), &[_]u8{ 'a', 'b', 'c' });
testing.expectEqual(@as(usize, 3), list.items(.b).len);
testing.expectEqualStrings("foobar", list.items(.b)[0]);
testing.expectEqualStrings("zigzag", list.items(.b)[1]);
testing.expectEqualStrings("fizzbuzz", list.items(.b)[2]);
// Add 6 more things to force a capacity increase.
var i: usize = 0;
while (i < 6) : (i += 1) {
try list.append(ally, .{
.a = @intCast(u32, 4 + i),
.b = "whatever",
.c = @intCast(u8, 'd' + i),
});
}
testing.expectEqualSlices(
u32,
&[_]u32{ 1, 2, 3, 4, 5, 6, 7, 8, 9 },
list.items(.a),
);
testing.expectEqualSlices(
u8,
&[_]u8{ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i' },
list.items(.c),
);
list.shrinkAndFree(ally, 3);
testing.expectEqualSlices(u32, list.items(.a), &[_]u32{ 1, 2, 3 });
testing.expectEqualSlices(u8, list.items(.c), &[_]u8{ 'a', 'b', 'c' });
testing.expectEqual(@as(usize, 3), list.items(.b).len);
testing.expectEqualStrings("foobar", list.items(.b)[0]);
testing.expectEqualStrings("zigzag", list.items(.b)[1]);
testing.expectEqualStrings("fizzbuzz", list.items(.b)[2]);
}
// This was observed to fail on aarch64 with LLVM 11, when the capacityInBytes
// function used the @reduce code path.
test "regression test for @reduce bug" {
const ally = std.testing.allocator;
var list = MultiArrayList(struct {
tag: std.zig.Token.Tag,
start: u32,
}){};
defer list.deinit(ally);
try list.ensureCapacity(ally, 20);
try list.append(ally, .{ .tag = .keyword_const, .start = 0 });
try list.append(ally, .{ .tag = .identifier, .start = 6 });
try list.append(ally, .{ .tag = .equal, .start = 10 });
try list.append(ally, .{ .tag = .builtin, .start = 12 });
try list.append(ally, .{ .tag = .l_paren, .start = 19 });
try list.append(ally, .{ .tag = .string_literal, .start = 20 });
try list.append(ally, .{ .tag = .r_paren, .start = 25 });
try list.append(ally, .{ .tag = .semicolon, .start = 26 });
try list.append(ally, .{ .tag = .keyword_pub, .start = 29 });
try list.append(ally, .{ .tag = .keyword_fn, .start = 33 });
try list.append(ally, .{ .tag = .identifier, .start = 36 });
try list.append(ally, .{ .tag = .l_paren, .start = 40 });
try list.append(ally, .{ .tag = .r_paren, .start = 41 });
try list.append(ally, .{ .tag = .identifier, .start = 43 });
try list.append(ally, .{ .tag = .bang, .start = 51 });
try list.append(ally, .{ .tag = .identifier, .start = 52 });
try list.append(ally, .{ .tag = .l_brace, .start = 57 });
try list.append(ally, .{ .tag = .identifier, .start = 63 });
try list.append(ally, .{ .tag = .period, .start = 66 });
try list.append(ally, .{ .tag = .identifier, .start = 67 });
try list.append(ally, .{ .tag = .period, .start = 70 });
try list.append(ally, .{ .tag = .identifier, .start = 71 });
try list.append(ally, .{ .tag = .l_paren, .start = 75 });
try list.append(ally, .{ .tag = .string_literal, .start = 76 });
try list.append(ally, .{ .tag = .comma, .start = 113 });
try list.append(ally, .{ .tag = .period, .start = 115 });
try list.append(ally, .{ .tag = .l_brace, .start = 116 });
try list.append(ally, .{ .tag = .r_brace, .start = 117 });
try list.append(ally, .{ .tag = .r_paren, .start = 118 });
try list.append(ally, .{ .tag = .semicolon, .start = 119 });
try list.append(ally, .{ .tag = .r_brace, .start = 121 });
try list.append(ally, .{ .tag = .eof, .start = 123 });
const tags = list.items(.tag);
std.testing.expectEqual(tags[1], .identifier);
std.testing.expectEqual(tags[2], .equal);
std.testing.expectEqual(tags[3], .builtin);
std.testing.expectEqual(tags[4], .l_paren);
std.testing.expectEqual(tags[5], .string_literal);
std.testing.expectEqual(tags[6], .r_paren);
std.testing.expectEqual(tags[7], .semicolon);
std.testing.expectEqual(tags[8], .keyword_pub);
std.testing.expectEqual(tags[9], .keyword_fn);
std.testing.expectEqual(tags[10], .identifier);
std.testing.expectEqual(tags[11], .l_paren);
std.testing.expectEqual(tags[12], .r_paren);
std.testing.expectEqual(tags[13], .identifier);
std.testing.expectEqual(tags[14], .bang);
std.testing.expectEqual(tags[15], .identifier);
std.testing.expectEqual(tags[16], .l_brace);
std.testing.expectEqual(tags[17], .identifier);
std.testing.expectEqual(tags[18], .period);
std.testing.expectEqual(tags[19], .identifier);
std.testing.expectEqual(tags[20], .period);
std.testing.expectEqual(tags[21], .identifier);
std.testing.expectEqual(tags[22], .l_paren);
std.testing.expectEqual(tags[23], .string_literal);
std.testing.expectEqual(tags[24], .comma);
std.testing.expectEqual(tags[25], .period);
std.testing.expectEqual(tags[26], .l_brace);
std.testing.expectEqual(tags[27], .r_brace);
std.testing.expectEqual(tags[28], .r_paren);
std.testing.expectEqual(tags[29], .semicolon);
std.testing.expectEqual(tags[30], .r_brace);
std.testing.expectEqual(tags[31], .eof);
}

View File

@ -20,6 +20,7 @@ pub const ComptimeStringMap = @import("comptime_string_map.zig").ComptimeStringM
pub const DynLib = @import("dynamic_library.zig").DynLib;
pub const HashMap = hash_map.HashMap;
pub const HashMapUnmanaged = hash_map.HashMapUnmanaged;
pub const MultiArrayList = @import("multi_array_list.zig").MultiArrayList;
pub const PackedIntArray = @import("packed_int_array.zig").PackedIntArray;
pub const PackedIntArrayEndian = @import("packed_int_array.zig").PackedIntArrayEndian;
pub const PackedIntSlice = @import("packed_int_array.zig").PackedIntSlice;

View File

@ -12,7 +12,6 @@ pub const fmtId = @import("zig/fmt.zig").fmtId;
pub const fmtEscapes = @import("zig/fmt.zig").fmtEscapes;
pub const parse = @import("zig/parse.zig").parse;
pub const parseStringLiteral = @import("zig/string_literal.zig").parse;
pub const render = @import("zig/render.zig").render;
pub const ast = @import("zig/ast.zig");
pub const system = @import("zig/system.zig");
pub const CrossTarget = @import("zig/cross_target.zig").CrossTarget;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

844
src/BuiltinFn.zig Normal file
View File

@ -0,0 +1,844 @@
const std = @import("std");
pub const Tag = enum {
add_with_overflow,
align_cast,
align_of,
as,
async_call,
atomic_load,
atomic_rmw,
atomic_store,
bit_cast,
bit_offset_of,
bool_to_int,
bit_size_of,
breakpoint,
mul_add,
byte_swap,
bit_reverse,
byte_offset_of,
call,
c_define,
c_import,
c_include,
clz,
cmpxchg_strong,
cmpxchg_weak,
compile_error,
compile_log,
ctz,
c_undef,
div_exact,
div_floor,
div_trunc,
embed_file,
enum_to_int,
error_name,
error_return_trace,
error_to_int,
err_set_cast,
@"export",
fence,
field,
field_parent_ptr,
float_cast,
float_to_int,
frame,
Frame,
frame_address,
frame_size,
has_decl,
has_field,
import,
int_cast,
int_to_enum,
int_to_error,
int_to_float,
int_to_ptr,
memcpy,
memset,
wasm_memory_size,
wasm_memory_grow,
mod,
mul_with_overflow,
panic,
pop_count,
ptr_cast,
ptr_to_int,
rem,
return_address,
set_align_stack,
set_cold,
set_eval_branch_quota,
set_float_mode,
set_runtime_safety,
shl_exact,
shl_with_overflow,
shr_exact,
shuffle,
size_of,
splat,
reduce,
src,
sqrt,
sin,
cos,
exp,
exp2,
log,
log2,
log10,
fabs,
floor,
ceil,
trunc,
round,
sub_with_overflow,
tag_name,
This,
truncate,
Type,
type_info,
type_name,
TypeOf,
union_init,
};
tag: Tag,
/// `true` if the builtin call can take advantage of a result location pointer.
needs_mem_loc: bool = false,
/// `true` if the builtin call can be the left-hand side of an expression (assigned to).
allows_lvalue: bool = false,
/// The number of parameters to this builtin function. `null` means variable number
/// of parameters.
param_count: ?u8,
pub const list = list: {
@setEvalBranchQuota(3000);
break :list std.ComptimeStringMap(@This(), .{
.{
"@addWithOverflow",
.{
.tag = .add_with_overflow,
.param_count = 4,
},
},
.{
"@alignCast",
.{
.tag = .align_cast,
.param_count = 1,
},
},
.{
"@alignOf",
.{
.tag = .align_of,
.param_count = 1,
},
},
.{
"@as",
.{
.tag = .as,
.needs_mem_loc = true,
.param_count = 2,
},
},
.{
"@asyncCall",
.{
.tag = .async_call,
.param_count = null,
},
},
.{
"@atomicLoad",
.{
.tag = .atomic_load,
.param_count = 3,
},
},
.{
"@atomicRmw",
.{
.tag = .atomic_rmw,
.param_count = 5,
},
},
.{
"@atomicStore",
.{
.tag = .atomic_store,
.param_count = 4,
},
},
.{
"@bitCast",
.{
.tag = .bit_cast,
.needs_mem_loc = true,
.param_count = 2,
},
},
.{
"@bitOffsetOf",
.{
.tag = .bit_offset_of,
.param_count = 2,
},
},
.{
"@boolToInt",
.{
.tag = .bool_to_int,
.param_count = 1,
},
},
.{
"@bitSizeOf",
.{
.tag = .bit_size_of,
.param_count = 1,
},
},
.{
"@breakpoint",
.{
.tag = .breakpoint,
.param_count = 0,
},
},
.{
"@mulAdd",
.{
.tag = .mul_add,
.param_count = 4,
},
},
.{
"@byteSwap",
.{
.tag = .byte_swap,
.param_count = 2,
},
},
.{
"@bitReverse",
.{
.tag = .bit_reverse,
.param_count = 2,
},
},
.{
"@byteOffsetOf",
.{
.tag = .byte_offset_of,
.param_count = 2,
},
},
.{
"@call",
.{
.tag = .call,
.needs_mem_loc = true,
.param_count = 3,
},
},
.{
"@cDefine",
.{
.tag = .c_define,
.param_count = 2,
},
},
.{
"@cImport",
.{
.tag = .c_import,
.param_count = 1,
},
},
.{
"@cInclude",
.{
.tag = .c_include,
.param_count = 1,
},
},
.{
"@clz",
.{
.tag = .clz,
.param_count = 2,
},
},
.{
"@cmpxchgStrong",
.{
.tag = .cmpxchg_strong,
.param_count = 6,
},
},
.{
"@cmpxchgWeak",
.{
.tag = .cmpxchg_weak,
.param_count = 6,
},
},
.{
"@compileError",
.{
.tag = .compile_error,
.param_count = 1,
},
},
.{
"@compileLog",
.{
.tag = .compile_log,
.param_count = null,
},
},
.{
"@ctz",
.{
.tag = .ctz,
.param_count = 2,
},
},
.{
"@cUndef",
.{
.tag = .c_undef,
.param_count = 1,
},
},
.{
"@divExact",
.{
.tag = .div_exact,
.param_count = 2,
},
},
.{
"@divFloor",
.{
.tag = .div_floor,
.param_count = 2,
},
},
.{
"@divTrunc",
.{
.tag = .div_trunc,
.param_count = 2,
},
},
.{
"@embedFile",
.{
.tag = .embed_file,
.param_count = 1,
},
},
.{
"@enumToInt",
.{
.tag = .enum_to_int,
.param_count = 1,
},
},
.{
"@errorName",
.{
.tag = .error_name,
.param_count = 1,
},
},
.{
"@errorReturnTrace",
.{
.tag = .error_return_trace,
.param_count = 0,
},
},
.{
"@errorToInt",
.{
.tag = .error_to_int,
.param_count = 1,
},
},
.{
"@errSetCast",
.{
.tag = .err_set_cast,
.param_count = 2,
},
},
.{
"@export",
.{
.tag = .@"export",
.param_count = 2,
},
},
.{
"@fence",
.{
.tag = .fence,
.param_count = 0,
},
},
.{
"@field",
.{
.tag = .field,
.needs_mem_loc = true,
.param_count = 2,
.allows_lvalue = true,
},
},
.{
"@fieldParentPtr",
.{
.tag = .field_parent_ptr,
.param_count = 3,
},
},
.{
"@floatCast",
.{
.tag = .float_cast,
.param_count = 1,
},
},
.{
"@floatToInt",
.{
.tag = .float_to_int,
.param_count = 1,
},
},
.{
"@frame",
.{
.tag = .frame,
.param_count = 0,
},
},
.{
"@Frame",
.{
.tag = .Frame,
.param_count = 1,
},
},
.{
"@frameAddress",
.{
.tag = .frame_address,
.param_count = 0,
},
},
.{
"@frameSize",
.{
.tag = .frame_size,
.param_count = 1,
},
},
.{
"@hasDecl",
.{
.tag = .has_decl,
.param_count = 2,
},
},
.{
"@hasField",
.{
.tag = .has_field,
.param_count = 2,
},
},
.{
"@import",
.{
.tag = .import,
.param_count = 1,
},
},
.{
"@intCast",
.{
.tag = .int_cast,
.param_count = 1,
},
},
.{
"@intToEnum",
.{
.tag = .int_to_enum,
.param_count = 1,
},
},
.{
"@intToError",
.{
.tag = .int_to_error,
.param_count = 1,
},
},
.{
"@intToFloat",
.{
.tag = .int_to_float,
.param_count = 1,
},
},
.{
"@intToPtr",
.{
.tag = .int_to_ptr,
.param_count = 2,
},
},
.{
"@memcpy",
.{
.tag = .memcpy,
.param_count = 3,
},
},
.{
"@memset",
.{
.tag = .memset,
.param_count = 3,
},
},
.{
"@wasmMemorySize",
.{
.tag = .wasm_memory_size,
.param_count = 1,
},
},
.{
"@wasmMemoryGrow",
.{
.tag = .wasm_memory_grow,
.param_count = 2,
},
},
.{
"@mod",
.{
.tag = .mod,
.param_count = 2,
},
},
.{
"@mulWithOverflow",
.{
.tag = .mul_with_overflow,
.param_count = 4,
},
},
.{
"@panic",
.{
.tag = .panic,
.param_count = 1,
},
},
.{
"@popCount",
.{
.tag = .pop_count,
.param_count = 2,
},
},
.{
"@ptrCast",
.{
.tag = .ptr_cast,
.param_count = 2,
},
},
.{
"@ptrToInt",
.{
.tag = .ptr_to_int,
.param_count = 1,
},
},
.{
"@rem",
.{
.tag = .rem,
.param_count = 2,
},
},
.{
"@returnAddress",
.{
.tag = .return_address,
.param_count = 0,
},
},
.{
"@setAlignStack",
.{
.tag = .set_align_stack,
.param_count = 1,
},
},
.{
"@setCold",
.{
.tag = .set_cold,
.param_count = 1,
},
},
.{
"@setEvalBranchQuota",
.{
.tag = .set_eval_branch_quota,
.param_count = 1,
},
},
.{
"@setFloatMode",
.{
.tag = .set_float_mode,
.param_count = 1,
},
},
.{
"@setRuntimeSafety",
.{
.tag = .set_runtime_safety,
.param_count = 1,
},
},
.{
"@shlExact",
.{
.tag = .shl_exact,
.param_count = 2,
},
},
.{
"@shlWithOverflow",
.{
.tag = .shl_with_overflow,
.param_count = 4,
},
},
.{
"@shrExact",
.{
.tag = .shr_exact,
.param_count = 2,
},
},
.{
"@shuffle",
.{
.tag = .shuffle,
.param_count = 4,
},
},
.{
"@sizeOf",
.{
.tag = .size_of,
.param_count = 1,
},
},
.{
"@splat",
.{
.tag = .splat,
.needs_mem_loc = true,
.param_count = 2,
},
},
.{
"@reduce",
.{
.tag = .reduce,
.param_count = 2,
},
},
.{
"@src",
.{
.tag = .src,
.needs_mem_loc = true,
.param_count = 0,
},
},
.{
"@sqrt",
.{
.tag = .sqrt,
.param_count = 1,
},
},
.{
"@sin",
.{
.tag = .sin,
.param_count = 1,
},
},
.{
"@cos",
.{
.tag = .cos,
.param_count = 1,
},
},
.{
"@exp",
.{
.tag = .exp,
.param_count = 1,
},
},
.{
"@exp2",
.{
.tag = .exp2,
.param_count = 1,
},
},
.{
"@log",
.{
.tag = .log,
.param_count = 1,
},
},
.{
"@log2",
.{
.tag = .log2,
.param_count = 1,
},
},
.{
"@log10",
.{
.tag = .log10,
.param_count = 1,
},
},
.{
"@fabs",
.{
.tag = .fabs,
.param_count = 1,
},
},
.{
"@floor",
.{
.tag = .floor,
.param_count = 1,
},
},
.{
"@ceil",
.{
.tag = .ceil,
.param_count = 1,
},
},
.{
"@trunc",
.{
.tag = .trunc,
.param_count = 1,
},
},
.{
"@round",
.{
.tag = .round,
.param_count = 1,
},
},
.{
"@subWithOverflow",
.{
.tag = .sub_with_overflow,
.param_count = 4,
},
},
.{
"@tagName",
.{
.tag = .tag_name,
.param_count = 1,
},
},
.{
"@This",
.{
.tag = .This,
.param_count = 0,
},
},
.{
"@truncate",
.{
.tag = .truncate,
.param_count = 2,
},
},
.{
"@Type",
.{
.tag = .Type,
.param_count = 1,
},
},
.{
"@typeInfo",
.{
.tag = .type_info,
.param_count = 1,
},
},
.{
"@typeName",
.{
.tag = .type_name,
.param_count = 1,
},
},
.{
"@TypeOf",
.{
.tag = .TypeOf,
.param_count = null,
},
},
.{
"@unionInit",
.{
.tag = .union_init,
.needs_mem_loc = true,
.param_count = 3,
},
},
});
};

View File

@ -921,7 +921,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
// TODO this is duped so it can be freed in Container.deinit
.sub_file_path = try gpa.dupe(u8, root_pkg.root_src_path),
.source = .{ .unloaded = {} },
.contents = .{ .not_available = {} },
.tree = undefined,
.status = .never_loaded,
.pkg = root_pkg,
.root_container = .{
@ -1334,7 +1334,7 @@ pub fn update(self: *Compilation) !void {
self.c_object_work_queue.writeItemAssumeCapacity(entry.key);
}
const use_stage1 = build_options.is_stage1 and self.bin_file.options.use_llvm;
const use_stage1 = build_options.omit_stage2 or build_options.is_stage1 and self.bin_file.options.use_llvm;
if (!use_stage1) {
if (self.bin_file.options.module) |module| {
module.compile_log_text.shrinkAndFree(module.gpa, 0);
@ -1884,7 +1884,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
const c_headers_dir_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{"include"});
const c_headers_dir_path_z = try arena.dupeZ(u8, c_headers_dir_path);
var clang_errors: []translate_c.ClangErrMsg = &[0]translate_c.ClangErrMsg{};
const tree = translate_c.translate(
var tree = translate_c.translate(
comp.gpa,
new_argv.ptr,
new_argv.ptr + new_argv.len,
@ -1903,7 +1903,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
};
},
};
defer tree.deinit();
defer tree.deinit(comp.gpa);
if (comp.verbose_cimport) {
log.info("C import .d file: {s}", .{out_dep_path});
@ -1921,9 +1921,10 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
var out_zig_file = try o_dir.createFile(cimport_zig_basename, .{});
defer out_zig_file.close();
var bos = std.io.bufferedWriter(out_zig_file.writer());
_ = try std.zig.render(comp.gpa, bos.writer(), tree);
try bos.flush();
const formatted = try tree.render(comp.gpa);
defer comp.gpa.free(formatted);
try out_zig_file.writeAll(formatted);
man.writeManifest() catch |err| {
log.warn("failed to write cache manifest for C import: {s}", .{@errorName(err)});
@ -1936,7 +1937,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
"o", &digest, cimport_zig_basename,
});
if (comp.verbose_cimport) {
log.info("C import output: {s}\n", .{out_zig_path});
log.info("C import output: {s}", .{out_zig_path});
}
return CImportResult{
.out_zig_path = out_zig_path,
@ -3000,7 +3001,7 @@ pub fn updateSubCompilation(sub_compilation: *Compilation) !void {
for (errors.list) |full_err_msg| {
switch (full_err_msg) {
.src => |src| {
log.err("{s}:{d}:{d}: {s}\n", .{
log.err("{s}:{d}:{d}: {s}", .{
src.src_path,
src.line + 1,
src.column + 1,

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -127,6 +127,9 @@ pub const APSInt = opaque {
pub const getNumWords = ZigClangAPSInt_getNumWords;
extern fn ZigClangAPSInt_getNumWords(*const APSInt) c_uint;
pub const lessThanEqual = ZigClangAPSInt_lessThanEqual;
extern fn ZigClangAPSInt_lessThanEqual(*const APSInt, rhs: u64) bool;
};
pub const ASTContext = opaque {
@ -270,12 +273,12 @@ pub const CompoundAssignOperator = opaque {
pub const CompoundStmt = opaque {
pub const body_begin = ZigClangCompoundStmt_body_begin;
extern fn ZigClangCompoundStmt_body_begin(*const CompoundStmt) const_body_iterator;
extern fn ZigClangCompoundStmt_body_begin(*const CompoundStmt) ConstBodyIterator;
pub const body_end = ZigClangCompoundStmt_body_end;
extern fn ZigClangCompoundStmt_body_end(*const CompoundStmt) const_body_iterator;
extern fn ZigClangCompoundStmt_body_end(*const CompoundStmt) ConstBodyIterator;
pub const const_body_iterator = [*]const *Stmt;
pub const ConstBodyIterator = [*]const *Stmt;
};
pub const ConditionalOperator = opaque {};
@ -407,7 +410,7 @@ pub const Expr = opaque {
pub const getBeginLoc = ZigClangExpr_getBeginLoc;
extern fn ZigClangExpr_getBeginLoc(*const Expr) SourceLocation;
pub const EvaluateAsConstantExpr = ZigClangExpr_EvaluateAsConstantExpr;
pub const evaluateAsConstantExpr = ZigClangExpr_EvaluateAsConstantExpr;
extern fn ZigClangExpr_EvaluateAsConstantExpr(*const Expr, *ExprEvalResult, Expr_ConstExprUsage, *const ASTContext) bool;
};
@ -694,8 +697,6 @@ pub const ReturnStmt = opaque {
extern fn ZigClangReturnStmt_getRetValue(*const ReturnStmt) ?*const Expr;
};
pub const SkipFunctionBodiesScope = opaque {};
pub const SourceManager = opaque {
pub const getSpellingLoc = ZigClangSourceManager_getSpellingLoc;
extern fn ZigClangSourceManager_getSpellingLoc(*const SourceManager, Loc: SourceLocation) SourceLocation;

View File

@ -451,11 +451,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const src_data: struct { lbrace_src: usize, rbrace_src: usize, source: []const u8 } = blk: {
const container_scope = module_fn.owner_decl.container;
const tree = container_scope.file_scope.contents.tree;
const fn_proto = tree.root_node.decls()[module_fn.owner_decl.src_index].castTag(.FnProto).?;
const block = fn_proto.getBodyNode().?.castTag(.Block).?;
const lbrace_src = tree.token_locs[block.lbrace].start;
const rbrace_src = tree.token_locs[block.rbrace].start;
const tree = container_scope.file_scope.tree;
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const token_starts = tree.tokens.items(.start);
const fn_decl = tree.rootDecls()[module_fn.owner_decl.src_index];
assert(node_tags[fn_decl] == .fn_decl);
const block = node_datas[fn_decl].rhs;
const lbrace_src = token_starts[tree.firstToken(block)];
const rbrace_src = token_starts[tree.lastToken(block)];
break :blk .{
.lbrace_src = lbrace_src,
.rbrace_src = rbrace_src,

View File

@ -1,4 +1,5 @@
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const log = std.log.scoped(.c);
@ -42,7 +43,7 @@ pub const Object = struct {
next_arg_index: usize = 0,
next_local_index: usize = 0,
next_block_index: usize = 0,
indent_writer: std.io.AutoIndentingStream(std.ArrayList(u8).Writer),
indent_writer: IndentWriter(std.ArrayList(u8).Writer),
fn resolveInst(o: *Object, inst: *Inst) !CValue {
if (inst.value()) |_| {
@ -63,7 +64,7 @@ pub const Object = struct {
return local_value;
}
fn writer(o: *Object) std.io.AutoIndentingStream(std.ArrayList(u8).Writer).Writer {
fn writer(o: *Object) IndentWriter(std.ArrayList(u8).Writer).Writer {
return o.indent_writer.writer();
}
@ -796,3 +797,56 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue {
return o.dg.fail(o.dg.decl.src(), "TODO: C backend: inline asm expression result used", .{});
}
fn IndentWriter(comptime UnderlyingWriter: type) type {
return struct {
const Self = @This();
pub const Error = UnderlyingWriter.Error;
pub const Writer = std.io.Writer(*Self, Error, write);
pub const indent_delta = 4;
underlying_writer: UnderlyingWriter,
indent_count: usize = 0,
current_line_empty: bool = true,
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
pub fn write(self: *Self, bytes: []const u8) Error!usize {
if (bytes.len == 0) return @as(usize, 0);
const current_indent = self.indent_count * Self.indent_delta;
if (self.current_line_empty and current_indent > 0) {
try self.underlying_writer.writeByteNTimes(' ', current_indent);
}
self.current_line_empty = false;
return self.writeNoIndent(bytes);
}
pub fn insertNewline(self: *Self) Error!void {
_ = try self.writeNoIndent("\n");
}
pub fn pushIndent(self: *Self) void {
self.indent_count += 1;
}
pub fn popIndent(self: *Self) void {
assert(self.indent_count != 0);
self.indent_count -= 1;
}
fn writeNoIndent(self: *Self, bytes: []const u8) Error!usize {
if (bytes.len == 0) return @as(usize, 0);
try self.underlying_writer.writeAll(bytes);
if (bytes[bytes.len - 1] == '\n') {
self.current_line_empty = true;
}
return bytes.len;
}
};
}

View File

@ -317,6 +317,7 @@ pub const Inst = struct {
pub const base_tag = Tag.arg;
base: Inst,
/// This exists to be emitted into debug info.
name: [*:0]const u8,
pub fn operandCount(self: *const Arg) usize {

View File

@ -550,11 +550,11 @@ pub const File = struct {
id_symlink_basename,
&prev_digest_buf,
) catch |err| b: {
log.debug("archive new_digest={} readFile error: {s}", .{ digest, @errorName(err) });
log.debug("archive new_digest={x} readFile error: {s}", .{ digest, @errorName(err) });
break :b prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
log.debug("archive digest={} match - skipping invocation", .{digest});
log.debug("archive digest={x} match - skipping invocation", .{digest});
base.lock = man.toOwnedLock();
return;
}

View File

@ -97,7 +97,7 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
.value_map = codegen.CValueMap.init(module.gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
};
object.indent_writer = std.io.autoIndentingStream(4, object.code.writer());
object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer object.value_map.deinit();
defer object.code.deinit();
defer object.dg.fwd_decl.deinit();

View File

@ -892,17 +892,17 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("COFF LLD new_digest={} error: {s}", .{ digest, @errorName(err) });
log.debug("COFF LLD new_digest={x} error: {s}", .{ digest, @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
log.debug("COFF LLD digest={} match - skipping invocation", .{digest});
log.debug("COFF LLD digest={x} match - skipping invocation", .{digest});
// Hot diggity dog! The output binary is already there.
self.base.lock = man.toOwnedLock();
return;
}
log.debug("COFF LLD prev_digest={} new_digest={}", .{ prev_digest, digest });
log.debug("COFF LLD prev_digest={x} new_digest={x}", .{ prev_digest, digest });
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {

View File

@ -1365,17 +1365,17 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("ELF LLD new_digest={} error: {s}", .{ digest, @errorName(err) });
log.debug("ELF LLD new_digest={x} error: {s}", .{ digest, @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
log.debug("ELF LLD digest={} match - skipping invocation", .{digest});
log.debug("ELF LLD digest={x} match - skipping invocation", .{digest});
// Hot diggity dog! The output binary is already there.
self.base.lock = man.toOwnedLock();
return;
}
log.debug("ELF LLD prev_digest={} new_digest={}", .{ prev_digest, digest });
log.debug("ELF LLD prev_digest={x} new_digest={x}", .{ prev_digest, digest });
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
@ -2223,13 +2223,19 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
try dbg_line_buffer.ensureCapacity(26);
const line_off: u28 = blk: {
const tree = decl.container.file_scope.contents.tree;
const file_ast_decls = tree.root_node.decls();
const tree = decl.container.file_scope.tree;
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const token_starts = tree.tokens.items(.start);
const file_ast_decls = tree.rootDecls();
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table. Currently this involves scanning over the source code for newlines.
const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?;
const block = fn_proto.getBodyNode().?.castTag(.Block).?;
const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start);
const fn_decl = file_ast_decls[decl.src_index];
assert(node_tags[fn_decl] == .fn_decl);
const block = node_datas[fn_decl].rhs;
const lbrace = tree.firstToken(block);
const line_delta = std.zig.lineDelta(tree.source, 0, token_starts[lbrace]);
break :blk @intCast(u28, line_delta);
};
@ -2744,13 +2750,19 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec
if (self.llvm_ir_module) |_| return;
const tree = decl.container.file_scope.contents.tree;
const file_ast_decls = tree.root_node.decls();
const tree = decl.container.file_scope.tree;
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const token_starts = tree.tokens.items(.start);
const file_ast_decls = tree.rootDecls();
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table. Currently this involves scanning over the source code for newlines.
const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?;
const block = fn_proto.getBodyNode().?.castTag(.Block).?;
const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start);
const fn_decl = file_ast_decls[decl.src_index];
assert(node_tags[fn_decl] == .fn_decl);
const block = node_datas[fn_decl].rhs;
const lbrace = tree.firstToken(block);
const line_delta = std.zig.lineDelta(tree.source, 0, token_starts[lbrace]);
const casted_line_off = @intCast(u28, line_delta);
const shdr = &self.sections.items[self.debug_line_section_index.?];
@ -3025,7 +3037,7 @@ const min_nop_size = 2;
/// Writes to the file a buffer, prefixed and suffixed by the specified number of
/// bytes of NOPs. Asserts each padding size is at least `min_nop_size` and total padding bytes
/// are less than 126,976 bytes (if this limit is ever reached, this function can be
/// are less than 1044480 bytes (if this limit is ever reached, this function can be
/// improved to make more than one pwritev call, or the limit can be raised by a fixed
/// amount by increasing the length of `vecs`).
fn pwriteDbgLineNops(
@ -3040,7 +3052,7 @@ fn pwriteDbgLineNops(
const page_of_nops = [1]u8{DW.LNS_negate_stmt} ** 4096;
const three_byte_nop = [3]u8{ DW.LNS_advance_pc, 0b1000_0000, 0 };
var vecs: [32]std.os.iovec_const = undefined;
var vecs: [256]std.os.iovec_const = undefined;
var vec_index: usize = 0;
{
var padding_left = prev_padding_size;

View File

@ -556,17 +556,17 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("MachO LLD new_digest={} error: {s}", .{ digest, @errorName(err) });
log.debug("MachO LLD new_digest={x} error: {s}", .{ digest, @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
log.debug("MachO LLD digest={} match - skipping invocation", .{digest});
log.debug("MachO LLD digest={x} match - skipping invocation", .{digest});
// Hot diggity dog! The output binary is already there.
self.base.lock = man.toOwnedLock();
return;
}
log.debug("MachO LLD prev_digest={} new_digest={}", .{ prev_digest, digest });
log.debug("MachO LLD prev_digest={x} new_digest={x}", .{ prev_digest, digest });
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {

View File

@ -904,13 +904,19 @@ pub fn updateDeclLineNumber(self: *DebugSymbols, module: *Module, decl: *const M
const tracy = trace(@src());
defer tracy.end();
const tree = decl.container.file_scope.contents.tree;
const file_ast_decls = tree.root_node.decls();
const tree = decl.container.file_scope.tree;
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const token_starts = tree.tokens.items(.start);
const file_ast_decls = tree.rootDecls();
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table. Currently this involves scanning over the source code for newlines.
const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?;
const block = fn_proto.getBodyNode().?.castTag(.Block).?;
const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start);
const fn_decl = file_ast_decls[decl.src_index];
assert(node_tags[fn_decl] == .fn_decl);
const block = node_datas[fn_decl].rhs;
const lbrace = tree.firstToken(block);
const line_delta = std.zig.lineDelta(tree.source, 0, token_starts[lbrace]);
const casted_line_off = @intCast(u28, line_delta);
const dwarf_segment = &self.load_commands.items[self.dwarf_segment_cmd_index.?].Segment;
@ -948,13 +954,19 @@ pub fn initDeclDebugBuffers(
try dbg_line_buffer.ensureCapacity(26);
const line_off: u28 = blk: {
const tree = decl.container.file_scope.contents.tree;
const file_ast_decls = tree.root_node.decls();
const tree = decl.container.file_scope.tree;
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const token_starts = tree.tokens.items(.start);
const file_ast_decls = tree.rootDecls();
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table. Currently this involves scanning over the source code for newlines.
const fn_proto = file_ast_decls[decl.src_index].castTag(.FnProto).?;
const block = fn_proto.getBodyNode().?.castTag(.Block).?;
const line_delta = std.zig.lineDelta(tree.source, 0, tree.token_locs[block.lbrace].start);
const fn_decl = file_ast_decls[decl.src_index];
assert(node_tags[fn_decl] == .fn_decl);
const block = node_datas[fn_decl].rhs;
const lbrace = tree.firstToken(block);
const line_delta = std.zig.lineDelta(tree.source, 0, token_starts[lbrace]);
break :blk @intCast(u28, line_delta);
};

View File

@ -391,17 +391,17 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("WASM LLD new_digest={} error: {s}", .{ digest, @errorName(err) });
log.debug("WASM LLD new_digest={x} error: {s}", .{ digest, @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
log.debug("WASM LLD digest={} match - skipping invocation", .{digest});
log.debug("WASM LLD digest={x} match - skipping invocation", .{digest});
// Hot diggity dog! The output binary is already there.
self.base.lock = man.toOwnedLock();
return;
}
log.debug("WASM LLD prev_digest={} new_digest={}", .{ prev_digest, digest });
log.debug("WASM LLD prev_digest={x} new_digest={x}", .{ prev_digest, digest });
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {

View File

@ -2158,7 +2158,7 @@ fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !voi
const c_headers_dir_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{"include"});
const c_headers_dir_path_z = try arena.dupeZ(u8, c_headers_dir_path);
var clang_errors: []translate_c.ClangErrMsg = &[0]translate_c.ClangErrMsg{};
const tree = translate_c.translate(
var tree = translate_c.translate(
comp.gpa,
new_argv.ptr,
new_argv.ptr + new_argv.len,
@ -2179,7 +2179,7 @@ fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !voi
process.exit(1);
},
};
defer tree.deinit();
defer tree.deinit(comp.gpa);
if (out_dep_path) |dep_file_path| {
const dep_basename = std.fs.path.basename(dep_file_path);
@ -2193,16 +2193,21 @@ fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !voi
const digest = man.final();
const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest });
var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{});
defer o_dir.close();
var zig_file = try o_dir.createFile(translated_zig_basename, .{});
defer zig_file.close();
var bw = io.bufferedWriter(zig_file.writer());
_ = try std.zig.render(comp.gpa, bw.writer(), tree);
try bw.flush();
const formatted = try tree.render(comp.gpa);
defer comp.gpa.free(formatted);
man.writeManifest() catch |err| warn("failed to write cache manifest: {s}", .{@errorName(err)});
try zig_file.writeAll(formatted);
man.writeManifest() catch |err| warn("failed to write cache manifest: {s}", .{
@errorName(err),
});
break :digest digest;
};
@ -2689,10 +2694,10 @@ pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void {
const source_code = try stdin.readAllAlloc(gpa, max_src_size);
defer gpa.free(source_code);
const tree = std.zig.parse(gpa, source_code) catch |err| {
var tree = std.zig.parse(gpa, source_code) catch |err| {
fatal("error parsing stdin: {s}", .{err});
};
defer tree.deinit();
defer tree.deinit(gpa);
for (tree.errors) |parse_error| {
try printErrMsgToFile(gpa, parse_error, tree, "<stdin>", stderr_file, color);
@ -2700,16 +2705,15 @@ pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void {
if (tree.errors.len != 0) {
process.exit(1);
}
const formatted = try tree.render(gpa);
defer gpa.free(formatted);
if (check_flag) {
const anything_changed = try std.zig.render(gpa, io.null_writer, tree);
const code = if (anything_changed) @as(u8, 1) else @as(u8, 0);
const code: u8 = @boolToInt(mem.eql(u8, formatted, source_code));
process.exit(code);
}
var bw = io.bufferedWriter(io.getStdOut().writer());
_ = try std.zig.render(gpa, bw.writer(), tree);
try bw.flush();
return;
return io.getStdOut().writeAll(formatted);
}
if (input_files.items.len == 0) {
@ -2846,8 +2850,8 @@ fn fmtPathFile(
// Add to set after no longer possible to get error.IsDir.
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
const tree = try std.zig.parse(fmt.gpa, source_code);
defer tree.deinit();
var tree = try std.zig.parse(fmt.gpa, source_code);
defer tree.deinit(fmt.gpa);
for (tree.errors) |parse_error| {
try printErrMsgToFile(fmt.gpa, parse_error, tree, file_path, std.io.getStdErr(), fmt.color);
@ -2857,22 +2861,19 @@ fn fmtPathFile(
return;
}
if (check_mode) {
const anything_changed = try std.zig.render(fmt.gpa, io.null_writer, tree);
if (anything_changed) {
const stdout = io.getStdOut().writer();
try stdout.print("{s}\n", .{file_path});
fmt.any_error = true;
}
} else {
// As a heuristic, we make enough capacity for the same as the input source.
try fmt.out_buffer.ensureCapacity(source_code.len);
fmt.out_buffer.items.len = 0;
const writer = fmt.out_buffer.writer();
const anything_changed = try std.zig.render(fmt.gpa, writer, tree);
if (!anything_changed)
return; // Good thing we didn't waste any file system access on this.
// As a heuristic, we make enough capacity for the same as the input source.
fmt.out_buffer.shrinkRetainingCapacity(0);
try fmt.out_buffer.ensureCapacity(source_code.len);
try tree.renderToArrayList(&fmt.out_buffer);
if (mem.eql(u8, fmt.out_buffer.items, source_code))
return;
if (check_mode) {
const stdout = io.getStdOut().writer();
try stdout.print("{s}\n", .{file_path});
fmt.any_error = true;
} else {
var af = try dir.atomicFile(sub_path, .{ .mode = stat.mode });
defer af.deinit();
@ -2886,7 +2887,7 @@ fn fmtPathFile(
fn printErrMsgToFile(
gpa: *mem.Allocator,
parse_error: ast.Error,
tree: *ast.Tree,
tree: ast.Tree,
path: []const u8,
file: fs.File,
color: Color,
@ -2896,19 +2897,17 @@ fn printErrMsgToFile(
.on => true,
.off => false,
};
const lok_token = parse_error.loc();
const span_first = lok_token;
const span_last = lok_token;
const lok_token = parse_error.token;
const first_token = tree.token_locs[span_first];
const last_token = tree.token_locs[span_last];
const start_loc = tree.tokenLocationLoc(0, first_token);
const end_loc = tree.tokenLocationLoc(first_token.end, last_token);
const token_starts = tree.tokens.items(.start);
const token_tags = tree.tokens.items(.tag);
const first_token_start = token_starts[lok_token];
const start_loc = tree.tokenLocation(0, lok_token);
var text_buf = std.ArrayList(u8).init(gpa);
defer text_buf.deinit();
const writer = text_buf.writer();
try parse_error.render(tree.token_ids, writer);
try tree.renderError(parse_error, writer);
const text = text_buf.items;
const stream = file.writer();
@ -2925,8 +2924,12 @@ fn printErrMsgToFile(
}
try stream.writeByte('\n');
try stream.writeByteNTimes(' ', start_loc.column);
try stream.writeByteNTimes('~', last_token.end - first_token.start);
try stream.writeByte('\n');
if (token_tags[lok_token].lexeme()) |lexeme| {
try stream.writeByteNTimes('~', lexeme.len);
try stream.writeByte('\n');
} else {
try stream.writeAll("^\n");
}
}
pub const info_zen =

View File

@ -155,7 +155,7 @@ pub const TestContext = struct {
self.updates.append(.{
.src = src,
.case = .{ .Header = result },
}) catch unreachable;
}) catch @panic("out of memory");
}
/// Adds a subcase in which the module is updated with `src`, compiled,
@ -164,7 +164,7 @@ pub const TestContext = struct {
self.updates.append(.{
.src = src,
.case = .{ .Execution = result },
}) catch unreachable;
}) catch @panic("out of memory");
}
/// Adds a subcase in which the module is updated with `src`, compiled,
@ -173,7 +173,7 @@ pub const TestContext = struct {
self.updates.append(.{
.src = src,
.case = .{ .CompareObjectFile = result },
}) catch unreachable;
}) catch @panic("out of memory");
}
/// Adds a subcase in which the module is updated with `src`, which
@ -181,7 +181,7 @@ pub const TestContext = struct {
/// for the expected reasons, given in sequential order in `errors` in
/// the form `:line:column: error: message`.
pub fn addError(self: *Case, src: [:0]const u8, errors: []const []const u8) void {
var array = self.updates.allocator.alloc(ErrorMsg, errors.len) catch unreachable;
var array = self.updates.allocator.alloc(ErrorMsg, errors.len) catch @panic("out of memory");
for (errors) |err_msg_line, i| {
if (std.mem.startsWith(u8, err_msg_line, "error: ")) {
array[i] = .{
@ -224,7 +224,7 @@ pub const TestContext = struct {
},
};
}
self.updates.append(.{ .src = src, .case = .{ .Error = array } }) catch unreachable;
self.updates.append(.{ .src = src, .case = .{ .Error = array } }) catch @panic("out of memory");
}
/// Adds a subcase in which the module is updated with `src`, and
@ -247,7 +247,7 @@ pub const TestContext = struct {
.output_mode = .Exe,
.extension = extension,
.files = std.ArrayList(File).init(ctx.cases.allocator),
}) catch unreachable;
}) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1];
}
@ -262,15 +262,17 @@ pub const TestContext = struct {
}
pub fn exeFromCompiledC(ctx: *TestContext, name: []const u8, target: CrossTarget) *Case {
const prefixed_name = std.fmt.allocPrint(ctx.cases.allocator, "CBE: {s}", .{name}) catch
@panic("out of memory");
ctx.cases.append(Case{
.name = name,
.name = prefixed_name,
.target = target,
.updates = std.ArrayList(Update).init(ctx.cases.allocator),
.output_mode = .Exe,
.extension = .Zig,
.object_format = .c,
.files = std.ArrayList(File).init(ctx.cases.allocator),
}) catch unreachable;
}) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1];
}
@ -285,7 +287,7 @@ pub const TestContext = struct {
.extension = .Zig,
.files = std.ArrayList(File).init(ctx.cases.allocator),
.llvm_backend = true,
}) catch unreachable;
}) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1];
}
@ -302,7 +304,7 @@ pub const TestContext = struct {
.output_mode = .Obj,
.extension = extension,
.files = std.ArrayList(File).init(ctx.cases.allocator),
}) catch unreachable;
}) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1];
}
@ -326,7 +328,7 @@ pub const TestContext = struct {
.extension = ext,
.object_format = .c,
.files = std.ArrayList(File).init(ctx.cases.allocator),
}) catch unreachable;
}) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1];
}

File diff suppressed because it is too large Load Diff

2529
src/translate_c/ast.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -357,6 +359,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -506,6 +510,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -772,6 +778,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -868,6 +876,7 @@ pub const Type = extern union {
.i16, .u16 => return 2,
.i32, .u32 => return 4,
.i64, .u64 => return 8,
.u128, .i128 => return 16,
.isize,
.usize,
@ -1010,6 +1019,7 @@ pub const Type = extern union {
.i16, .u16 => return 2,
.i32, .u32 => return 4,
.i64, .u64 => return 8,
.u128, .i128 => return 16,
.@"anyframe", .anyframe_T, .isize, .usize => return @divExact(target.cpu.arch.ptrBitWidth(), 8),
@ -1109,6 +1119,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -1191,6 +1203,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -1278,6 +1292,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -1359,6 +1375,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -1440,6 +1458,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -1522,6 +1542,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -1660,6 +1682,8 @@ pub const Type = extern union {
.i32 => unreachable,
.u64 => unreachable,
.i64 => unreachable,
.u128 => unreachable,
.i128 => unreachable,
.usize => unreachable,
.isize => unreachable,
.c_short => unreachable,
@ -1776,6 +1800,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -1856,6 +1882,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -2009,6 +2037,8 @@ pub const Type = extern union {
.i16,
.i32,
.i64,
.u128,
.i128,
=> true,
};
}
@ -2061,6 +2091,8 @@ pub const Type = extern union {
.i16,
.i32,
.i64,
.u128,
.i128,
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
@ -2167,6 +2199,8 @@ pub const Type = extern union {
.i32 => .{ .signedness = .signed, .bits = 32 },
.u64 => .{ .signedness = .unsigned, .bits = 64 },
.i64 => .{ .signedness = .signed, .bits = 64 },
.u128 => .{ .signedness = .unsigned, .bits = 128 },
.i128 => .{ .signedness = .signed, .bits = 128 },
.usize => .{ .signedness = .unsigned, .bits = target.cpu.arch.ptrBitWidth() },
.isize => .{ .signedness = .signed, .bits = target.cpu.arch.ptrBitWidth() },
.c_short => .{ .signedness = .signed, .bits = CType.short.sizeInBits(target) },
@ -2227,6 +2261,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
@ -2333,6 +2369,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -2417,6 +2455,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -2500,6 +2540,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -2583,6 +2625,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -2663,6 +2707,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -2743,6 +2789,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -2793,6 +2841,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -2874,6 +2924,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -2971,6 +3023,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -3060,6 +3114,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -3193,6 +3249,8 @@ pub const Type = extern union {
i32,
u64,
i64,
u128,
i128,
usize,
isize,
c_short,
@ -3277,6 +3335,8 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
@ -3352,6 +3412,11 @@ pub const Type = extern union {
};
}
pub fn init(comptime t: Tag) Type {
comptime std.debug.assert(@enumToInt(t) < Tag.no_payload_count);
return .{ .tag_if_small_enough = @enumToInt(t) };
}
pub fn create(comptime t: Tag, ally: *Allocator, data: Data(t)) error{OutOfMemory}!Type {
const ptr = try ally.create(t.Type());
ptr.* = .{

View File

@ -2244,6 +2244,11 @@ unsigned ZigClangAPSInt_getNumWords(const ZigClangAPSInt *self) {
return casted->getNumWords();
}
bool ZigClangAPSInt_lessThanEqual(const ZigClangAPSInt *self, uint64_t rhs) {
auto casted = reinterpret_cast<const llvm::APSInt *>(self);
return casted->ule(rhs);
}
uint64_t ZigClangAPInt_getLimitedValue(const ZigClangAPInt *self, uint64_t limit) {
auto casted = reinterpret_cast<const llvm::APInt *>(self);
return casted->getLimitedValue(limit);

View File

@ -1097,6 +1097,7 @@ ZIG_EXTERN_C const struct ZigClangAPSInt *ZigClangAPSInt_negate(const struct Zig
ZIG_EXTERN_C void ZigClangAPSInt_free(const struct ZigClangAPSInt *self);
ZIG_EXTERN_C const uint64_t *ZigClangAPSInt_getRawData(const struct ZigClangAPSInt *self);
ZIG_EXTERN_C unsigned ZigClangAPSInt_getNumWords(const struct ZigClangAPSInt *self);
ZIG_EXTERN_C bool ZigClangAPSInt_lessThanEqual(const struct ZigClangAPSInt *self, uint64_t rhs);
ZIG_EXTERN_C uint64_t ZigClangAPInt_getLimitedValue(const struct ZigClangAPInt *self, uint64_t limit);

View File

@ -53,6 +53,9 @@ pub const Inst = struct {
indexable_ptr_len,
/// Function parameter value. These must be first in a function's main block,
/// in respective order with the parameters.
/// TODO make this instruction implicit; after we transition to having ZIR
/// instructions be same sized and referenced by index, the first N indexes
/// will implicitly be references to the parameters of the function.
arg,
/// Type coercion.
as,
@ -169,8 +172,10 @@ pub const Inst = struct {
floatcast,
/// Declare a function body.
@"fn",
/// Returns a function type.
fntype,
/// Returns a function type, assuming unspecified calling convention.
fn_type,
/// Returns a function type, with a calling convention instruction operand.
fn_type_cc,
/// @import(operand)
import,
/// Integer literal.
@ -340,6 +345,8 @@ pub const Inst = struct {
void_value,
/// A switch expression.
switchbr,
/// Same as `switchbr` but the target is a pointer to the value being switched on.
switchbr_ref,
/// A range in a switch case, `lhs...rhs`.
/// Only checks that `lhs >= rhs` if they are ints, everything else is
/// validated by the .switch instruction.
@ -450,6 +457,8 @@ pub const Inst = struct {
.block_comptime_flat,
=> Block,
.switchbr, .switchbr_ref => SwitchBr,
.arg => Arg,
.array_type_sentinel => ArrayTypeSentinel,
.@"break" => Break,
@ -471,7 +480,8 @@ pub const Inst = struct {
.@"export" => Export,
.param_type => ParamType,
.primitive => Primitive,
.fntype => FnType,
.fn_type => FnType,
.fn_type_cc => FnTypeCc,
.elem_ptr, .elem_val => Elem,
.condbr => CondBr,
.ptr_type => PtrType,
@ -485,7 +495,6 @@ pub const Inst = struct {
.enum_type => EnumType,
.union_type => UnionType,
.struct_type => StructType,
.switchbr => SwitchBr,
};
}
@ -546,7 +555,8 @@ pub const Inst = struct {
.field_ptr_named,
.field_val_named,
.@"fn",
.fntype,
.fn_type,
.fn_type_cc,
.int,
.intcast,
.int_type,
@ -614,7 +624,6 @@ pub const Inst = struct {
.struct_type,
.void_value,
.switch_range,
.switchbr,
=> false,
.@"break",
@ -629,6 +638,8 @@ pub const Inst = struct {
.container_field_named,
.container_field_typed,
.container_field,
.switchbr,
.switchbr_ref,
=> true,
};
}
@ -689,6 +700,8 @@ pub const Inst = struct {
base: Inst,
positionals: struct {
/// This exists to be passed to the arg TZIR instruction, which
/// needs it for debug info.
name: []const u8,
},
kw_args: struct {},
@ -725,6 +738,8 @@ pub const Inst = struct {
kw_args: struct {},
};
// TODO break this into multiple call instructions to avoid paying the cost
// of the calling convention field most of the time.
pub const Call = struct {
pub const base_tag = Tag.call;
base: Inst,
@ -732,10 +747,9 @@ pub const Inst = struct {
positionals: struct {
func: *Inst,
args: []*Inst,
},
kw_args: struct {
modifier: std.builtin.CallOptions.Modifier = .auto,
},
kw_args: struct {},
};
pub const DeclRef = struct {
@ -849,8 +863,8 @@ pub const Inst = struct {
kw_args: struct {
@"volatile": bool = false,
output: ?*Inst = null,
inputs: []*Inst = &[0]*Inst{},
clobbers: []*Inst = &[0]*Inst{},
inputs: []const []const u8 = &.{},
clobbers: []const []const u8 = &.{},
args: []*Inst = &[0]*Inst{},
},
};
@ -867,7 +881,18 @@ pub const Inst = struct {
};
pub const FnType = struct {
pub const base_tag = Tag.fntype;
pub const base_tag = Tag.fn_type;
base: Inst,
positionals: struct {
param_types: []*Inst,
return_type: *Inst,
},
kw_args: struct {},
};
pub const FnTypeCc = struct {
pub const base_tag = Tag.fn_type_cc;
base: Inst,
positionals: struct {
@ -1167,20 +1192,12 @@ pub const Inst = struct {
},
kw_args: struct {
init_inst: ?*Inst = null,
init_kind: InitKind = .none,
has_enum_token: bool,
layout: std.builtin.TypeInfo.ContainerLayout = .Auto,
},
// TODO error: values of type '(enum literal)' must be comptime known
pub const InitKind = enum {
enum_type,
tag_type,
none,
};
};
pub const SwitchBr = struct {
pub const base_tag = Tag.switchbr;
base: Inst,
positionals: struct {
@ -1189,14 +1206,12 @@ pub const Inst = struct {
items: []*Inst,
cases: []Case,
else_body: Body,
},
kw_args: struct {
/// Pointer to first range if such exists.
range: ?*Inst = null,
special_prong: SpecialProng = .none,
},
kw_args: struct {},
// Not anonymous due to stage1 limitations
pub const SpecialProng = enum {
none,
@"else",
@ -1391,6 +1406,7 @@ const Writer = struct {
}
switch (@TypeOf(param)) {
*Inst => return self.writeInstParamToStream(stream, param),
?*Inst => return self.writeInstParamToStream(stream, param.?),
[]*Inst => {
try stream.writeByte('[');
for (param) |inst, i| {
@ -1458,7 +1474,7 @@ const Writer = struct {
const name = self.loop_table.get(param).?;
return stream.print("\"{}\"", .{std.zig.fmtEscapes(name)});
},
[][]const u8 => {
[][]const u8, []const []const u8 => {
try stream.writeByte('[');
for (param) |str, i| {
if (i != 0) {
@ -1586,6 +1602,7 @@ const DumpTzir = struct {
.unreach,
.breakpoint,
.dbg_stmt,
.arg,
=> {},
.ref,
@ -1630,8 +1647,6 @@ const DumpTzir = struct {
try dtz.findConst(bin_op.rhs);
},
.arg => {},
.br => {
const br = inst.castTag(.br).?;
try dtz.findConst(&br.block.base);

View File

@ -91,7 +91,8 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
.@"fn" => return zirFn(mod, scope, old_inst.castTag(.@"fn").?),
.@"export" => return zirExport(mod, scope, old_inst.castTag(.@"export").?),
.primitive => return zirPrimitive(mod, scope, old_inst.castTag(.primitive).?),
.fntype => return zirFnType(mod, scope, old_inst.castTag(.fntype).?),
.fn_type => return zirFnType(mod, scope, old_inst.castTag(.fn_type).?),
.fn_type_cc => return zirFnTypeCc(mod, scope, old_inst.castTag(.fn_type_cc).?),
.intcast => return zirIntcast(mod, scope, old_inst.castTag(.intcast).?),
.bitcast => return zirBitcast(mod, scope, old_inst.castTag(.bitcast).?),
.floatcast => return zirFloatcast(mod, scope, old_inst.castTag(.floatcast).?),
@ -154,7 +155,8 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
.bool_and => return zirBoolOp(mod, scope, old_inst.castTag(.bool_and).?),
.bool_or => return zirBoolOp(mod, scope, old_inst.castTag(.bool_or).?),
.void_value => return mod.constVoid(scope, old_inst.src),
.switchbr => return zirSwitchBr(mod, scope, old_inst.castTag(.switchbr).?),
.switchbr => return zirSwitchBr(mod, scope, old_inst.castTag(.switchbr).?, false),
.switchbr_ref => return zirSwitchBr(mod, scope, old_inst.castTag(.switchbr_ref).?, true),
.switch_range => return zirSwitchRange(mod, scope, old_inst.castTag(.switch_range).?),
.container_field_named,
@ -957,11 +959,11 @@ fn zirCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError!*Inst {
);
}
if (inst.kw_args.modifier == .compile_time) {
if (inst.positionals.modifier == .compile_time) {
return mod.fail(scope, inst.base.src, "TODO implement comptime function calls", .{});
}
if (inst.kw_args.modifier != .auto) {
return mod.fail(scope, inst.base.src, "TODO implement call with modifier {}", .{inst.kw_args.modifier});
if (inst.positionals.modifier != .auto) {
return mod.fail(scope, inst.base.src, "TODO implement call with modifier {}", .{inst.positionals.modifier});
}
// TODO handle function calls of generic functions
@ -979,8 +981,8 @@ fn zirCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError!*Inst {
const ret_type = func.ty.fnReturnType();
const b = try mod.requireFunctionBlock(scope, inst.base.src);
const is_comptime_call = b.is_comptime or inst.kw_args.modifier == .compile_time;
const is_inline_call = is_comptime_call or inst.kw_args.modifier == .always_inline or
const is_comptime_call = b.is_comptime or inst.positionals.modifier == .compile_time;
const is_inline_call = is_comptime_call or inst.positionals.modifier == .always_inline or
func.ty.fnCallingConvention() == .Inline;
if (is_inline_call) {
const func_val = try mod.resolveConstValue(scope, func);
@ -1294,34 +1296,69 @@ fn zirEnsureErrPayloadVoid(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp)
fn zirFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const return_type = try resolveType(mod, scope, fntype.positionals.return_type);
return fnTypeCommon(
mod,
scope,
&fntype.base,
fntype.positionals.param_types,
fntype.positionals.return_type,
.Unspecified,
);
}
fn zirFnTypeCc(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnTypeCc) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const cc_tv = try resolveInstConst(mod, scope, fntype.positionals.cc);
// TODO once we're capable of importing and analyzing decls from
// std.builtin, this needs to change
const cc_str = cc_tv.val.castTag(.enum_literal).?.data;
const cc = std.meta.stringToEnum(std.builtin.CallingConvention, cc_str) orelse
return mod.fail(scope, fntype.positionals.cc.src, "Unknown calling convention {s}", .{cc_str});
return fnTypeCommon(
mod,
scope,
&fntype.base,
fntype.positionals.param_types,
fntype.positionals.return_type,
cc,
);
}
fn fnTypeCommon(
mod: *Module,
scope: *Scope,
zir_inst: *zir.Inst,
zir_param_types: []*zir.Inst,
zir_return_type: *zir.Inst,
cc: std.builtin.CallingConvention,
) InnerError!*Inst {
const return_type = try resolveType(mod, scope, zir_return_type);
// Hot path for some common function types.
if (fntype.positionals.param_types.len == 0) {
if (zir_param_types.len == 0) {
if (return_type.zigTypeTag() == .NoReturn and cc == .Unspecified) {
return mod.constType(scope, fntype.base.src, Type.initTag(.fn_noreturn_no_args));
return mod.constType(scope, zir_inst.src, Type.initTag(.fn_noreturn_no_args));
}
if (return_type.zigTypeTag() == .Void and cc == .Unspecified) {
return mod.constType(scope, fntype.base.src, Type.initTag(.fn_void_no_args));
return mod.constType(scope, zir_inst.src, Type.initTag(.fn_void_no_args));
}
if (return_type.zigTypeTag() == .NoReturn and cc == .Naked) {
return mod.constType(scope, fntype.base.src, Type.initTag(.fn_naked_noreturn_no_args));
return mod.constType(scope, zir_inst.src, Type.initTag(.fn_naked_noreturn_no_args));
}
if (return_type.zigTypeTag() == .Void and cc == .C) {
return mod.constType(scope, fntype.base.src, Type.initTag(.fn_ccc_void_no_args));
return mod.constType(scope, zir_inst.src, Type.initTag(.fn_ccc_void_no_args));
}
}
const arena = scope.arena();
const param_types = try arena.alloc(Type, fntype.positionals.param_types.len);
for (fntype.positionals.param_types) |param_type, i| {
const param_types = try arena.alloc(Type, zir_param_types.len);
for (zir_param_types) |param_type, i| {
const resolved = try resolveType(mod, scope, param_type);
// TODO skip for comptime params
if (!resolved.isValidVarType(false)) {
@ -1335,7 +1372,7 @@ fn zirFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) InnerError!*
.return_type = return_type,
.cc = cc,
});
return mod.constType(scope, fntype.base.src, fn_ty);
return mod.constType(scope, zir_inst.src, fn_ty);
}
fn zirPrimitive(mod: *Module, scope: *Scope, primitive: *zir.Inst.Primitive) InnerError!*Inst {
@ -1554,10 +1591,15 @@ fn zirSwitchRange(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError
return mod.constVoid(scope, inst.base.src);
}
fn zirSwitchBr(mod: *Module, scope: *Scope, inst: *zir.Inst.SwitchBr) InnerError!*Inst {
fn zirSwitchBr(mod: *Module, scope: *Scope, inst: *zir.Inst.SwitchBr, ref: bool) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const target = try resolveInst(mod, scope, inst.positionals.target);
const target_ptr = try resolveInst(mod, scope, inst.positionals.target);
const target = if (ref)
try mod.analyzeDeref(scope, inst.base.src, target_ptr, inst.positionals.target.src)
else
target_ptr;
try validateSwitch(mod, scope, target, inst);
if (try mod.resolveDefinedValue(scope, target)) |target_val| {
@ -1626,13 +1668,13 @@ fn zirSwitchBr(mod: *Module, scope: *Scope, inst: *zir.Inst.SwitchBr) InnerError
fn validateSwitch(mod: *Module, scope: *Scope, target: *Inst, inst: *zir.Inst.SwitchBr) InnerError!void {
// validate usage of '_' prongs
if (inst.kw_args.special_prong == .underscore and target.ty.zigTypeTag() != .Enum) {
if (inst.positionals.special_prong == .underscore and target.ty.zigTypeTag() != .Enum) {
return mod.fail(scope, inst.base.src, "'_' prong only allowed when switching on non-exhaustive enums", .{});
// TODO notes "'_' prong here" inst.positionals.cases[last].src
}
// check that target type supports ranges
if (inst.kw_args.range) |range_inst| {
if (inst.positionals.range) |range_inst| {
switch (target.ty.zigTypeTag()) {
.Int, .ComptimeInt => {},
else => {
@ -1683,14 +1725,14 @@ fn validateSwitch(mod: *Module, scope: *Scope, target: *Inst, inst: *zir.Inst.Sw
const start = try target.ty.minInt(&arena, mod.getTarget());
const end = try target.ty.maxInt(&arena, mod.getTarget());
if (try range_set.spans(start, end)) {
if (inst.kw_args.special_prong == .@"else") {
if (inst.positionals.special_prong == .@"else") {
return mod.fail(scope, inst.base.src, "unreachable else prong, all cases already handled", .{});
}
return;
}
}
if (inst.kw_args.special_prong != .@"else") {
if (inst.positionals.special_prong != .@"else") {
return mod.fail(scope, inst.base.src, "switch must handle all possibilities", .{});
}
},
@ -1710,15 +1752,15 @@ fn validateSwitch(mod: *Module, scope: *Scope, target: *Inst, inst: *zir.Inst.Sw
return mod.fail(scope, item.src, "duplicate switch value", .{});
}
}
if ((true_count + false_count < 2) and inst.kw_args.special_prong != .@"else") {
if ((true_count + false_count < 2) and inst.positionals.special_prong != .@"else") {
return mod.fail(scope, inst.base.src, "switch must handle all possibilities", .{});
}
if ((true_count + false_count == 2) and inst.kw_args.special_prong == .@"else") {
if ((true_count + false_count == 2) and inst.positionals.special_prong == .@"else") {
return mod.fail(scope, inst.base.src, "unreachable else prong, all cases already handled", .{});
}
},
.EnumLiteral, .Void, .Fn, .Pointer, .Type => {
if (inst.kw_args.special_prong != .@"else") {
if (inst.positionals.special_prong != .@"else") {
return mod.fail(scope, inst.base.src, "else prong required when switching on type '{}'", .{target.ty});
}
@ -1981,19 +2023,21 @@ fn zirDeref(mod: *Module, scope: *Scope, deref: *zir.Inst.UnOp) InnerError!*Inst
fn zirAsm(mod: *Module, scope: *Scope, assembly: *zir.Inst.Asm) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const return_type = try resolveType(mod, scope, assembly.positionals.return_type);
const asm_source = try resolveConstString(mod, scope, assembly.positionals.asm_source);
const output = if (assembly.kw_args.output) |o| try resolveConstString(mod, scope, o) else null;
const inputs = try scope.arena().alloc([]const u8, assembly.kw_args.inputs.len);
const clobbers = try scope.arena().alloc([]const u8, assembly.kw_args.clobbers.len);
const args = try scope.arena().alloc(*Inst, assembly.kw_args.args.len);
const arena = scope.arena();
const inputs = try arena.alloc([]const u8, assembly.kw_args.inputs.len);
const clobbers = try arena.alloc([]const u8, assembly.kw_args.clobbers.len);
const args = try arena.alloc(*Inst, assembly.kw_args.args.len);
for (inputs) |*elem, i| {
elem.* = try resolveConstString(mod, scope, assembly.kw_args.inputs[i]);
elem.* = try arena.dupe(u8, assembly.kw_args.inputs[i]);
}
for (clobbers) |*elem, i| {
elem.* = try resolveConstString(mod, scope, assembly.kw_args.clobbers[i]);
elem.* = try arena.dupe(u8, assembly.kw_args.clobbers[i]);
}
for (args) |*elem, i| {
const arg = try resolveInst(mod, scope, assembly.kw_args.args[i]);

View File

@ -3,6 +3,33 @@ const tests = @import("tests.zig");
const nl = std.cstr.line_sep;
pub fn addCases(cases: *tests.RunTranslatedCContext) void {
cases.add("use global scope for record/enum/typedef type transalation if needed",
\\void bar(void);
\\void baz(void);
\\struct foo { int x; };
\\void bar() {
\\ struct foo tmp;
\\}
\\
\\void baz() {
\\ struct foo tmp;
\\}
\\
\\int main(void) {
\\ bar();
\\ baz();
\\ return 0;
\\}
, "");
cases.add("failed macros are only declared once",
\\#define FOO =
\\#define FOO =
\\#define PtrToPtr64(p) ((void *POINTER_64) p)
\\#define STRUC_ALIGNED_STACK_COPY(t,s) ((CONST t *)(s))
\\int main(void) {}
, "");
cases.add("parenthesized string literal",
\\void foo(const char *s) {}
\\int main(void) {
@ -922,4 +949,13 @@ pub fn addCases(cases: *tests.RunTranslatedCContext) void {
\\ return 0;
\\}
, "");
cases.add("Use correct break label for statement expression in nested scope",
\\#include <stdlib.h>
\\int main(void) {
\\ int x = ({1, ({2; 3;});});
\\ if (x != 3) abort();
\\ return 0;
\\}
, "");
}

View File

@ -1088,7 +1088,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = foo;
\\}
\\extern var foo;
, &[_][]const u8{":4:1: error: unable to infer variable type"});
, &[_][]const u8{":4:8: error: unable to infer variable type"});
}
{
@ -1194,12 +1194,12 @@ pub fn addCases(ctx: *TestContext) !void {
\\comptime {
\\ foo: while (true) {}
\\}
, &[_][]const u8{":2:5: error: unused while label"});
, &[_][]const u8{":2:5: error: unused while loop label"});
case.addError(
\\comptime {
\\ foo: for ("foo") |_| {}
\\}
, &[_][]const u8{":2:5: error: unused for label"});
, &[_][]const u8{":2:5: error: unused for loop label"});
case.addError(
\\comptime {
\\ blk: {blk: {}}
@ -1294,6 +1294,10 @@ pub fn addCases(ctx: *TestContext) !void {
,
"",
);
// TODO this should be :8:21 not :8:19. we need to improve source locations
// to be relative to the containing Decl so that they can survive when the byte
// offset of a previous Decl changes. Here the change from 7 to 999 introduces
// +2 to the byte offset and makes the error location wrong by 2 bytes.
case.addError(
\\export fn _start() noreturn {
\\ const y = fibonacci(999);
@ -1314,7 +1318,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ );
\\ unreachable;
\\}
, &[_][]const u8{":8:10: error: evaluation exceeded 1000 backwards branches"});
, &[_][]const u8{":8:19: error: evaluation exceeded 1000 backwards branches"});
}
{
var case = ctx.exe("orelse at comptime", linux_x64);

File diff suppressed because it is too large Load Diff