mirror of
https://github.com/ziglang/zig.git
synced 2026-01-21 06:45:24 +00:00
stage2: skeleton codegen for x64 ADD
also rework Module to take advantage of the new hash map implementation.
This commit is contained in:
parent
ad2ed457dd
commit
8be8ebd698
@ -20,8 +20,8 @@ const ast = std.zig.ast;
|
||||
const trace = @import("tracy.zig").trace;
|
||||
const liveness = @import("liveness.zig");
|
||||
|
||||
/// General-purpose allocator.
|
||||
allocator: *Allocator,
|
||||
/// General-purpose allocator. Used for both temporary and long-term storage.
|
||||
gpa: *Allocator,
|
||||
/// Pointer to externally managed resource.
|
||||
root_pkg: *Package,
|
||||
/// Module owns this resource.
|
||||
@ -33,7 +33,7 @@ bin_file_path: []const u8,
|
||||
/// It's rare for a decl to be exported, so we save memory by having a sparse map of
|
||||
/// Decl pointers to details about them being exported.
|
||||
/// The Export memory is owned by the `export_owners` table; the slice itself is owned by this table.
|
||||
decl_exports: std.AutoHashMap(*Decl, []*Export),
|
||||
decl_exports: std.AutoHashMapUnmanaged(*Decl, []*Export) = .{},
|
||||
/// We track which export is associated with the given symbol name for quick
|
||||
/// detection of symbol collisions.
|
||||
symbol_exports: std.StringHashMap(*Export),
|
||||
@ -41,9 +41,9 @@ symbol_exports: std.StringHashMap(*Export),
|
||||
/// is modified. Note that the key of this table is not the Decl being exported, but the Decl that
|
||||
/// is performing the export of another Decl.
|
||||
/// This table owns the Export memory.
|
||||
export_owners: std.AutoHashMap(*Decl, []*Export),
|
||||
export_owners: std.AutoHashMapUnmanaged(*Decl, []*Export) = .{},
|
||||
/// Maps fully qualified namespaced names to the Decl struct for them.
|
||||
decl_table: DeclTable,
|
||||
decl_table: std.HashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{},
|
||||
|
||||
optimize_mode: std.builtin.Mode,
|
||||
link_error_flags: link.ElfFile.ErrorFlags = .{},
|
||||
@ -55,13 +55,13 @@ work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic),
|
||||
/// The ErrorMsg memory is owned by the decl, using Module's allocator.
|
||||
/// Note that a Decl can succeed but the Fn it represents can fail. In this case,
|
||||
/// a Decl can have a failed_decls entry but have analysis status of success.
|
||||
failed_decls: std.AutoHashMap(*Decl, *ErrorMsg),
|
||||
failed_decls: std.AutoHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
|
||||
/// Using a map here for consistency with the other fields here.
|
||||
/// The ErrorMsg memory is owned by the `Scope`, using Module's allocator.
|
||||
failed_files: std.AutoHashMap(*Scope, *ErrorMsg),
|
||||
failed_files: std.AutoHashMapUnmanaged(*Scope, *ErrorMsg) = .{},
|
||||
/// Using a map here for consistency with the other fields here.
|
||||
/// The ErrorMsg memory is owned by the `Export`, using Module's allocator.
|
||||
failed_exports: std.AutoHashMap(*Export, *ErrorMsg),
|
||||
failed_exports: std.AutoHashMapUnmanaged(*Export, *ErrorMsg) = .{},
|
||||
|
||||
/// Incrementing integer used to compare against the corresponding Decl
|
||||
/// field to determine whether a Decl's status applies to an ongoing update, or a
|
||||
@ -76,8 +76,6 @@ deletion_set: std.ArrayListUnmanaged(*Decl) = .{},
|
||||
|
||||
keep_source_files_loaded: bool,
|
||||
|
||||
const DeclTable = std.HashMap(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false);
|
||||
|
||||
const WorkItem = union(enum) {
|
||||
/// Write the machine code for a Decl to the output file.
|
||||
codegen_decl: *Decl,
|
||||
@ -176,19 +174,23 @@ pub const Decl = struct {
|
||||
|
||||
/// The shallow set of other decls whose typed_value could possibly change if this Decl's
|
||||
/// typed_value is modified.
|
||||
dependants: ArrayListUnmanaged(*Decl) = ArrayListUnmanaged(*Decl){},
|
||||
dependants: DepsTable = .{},
|
||||
/// The shallow set of other decls whose typed_value changing indicates that this Decl's
|
||||
/// typed_value may need to be regenerated.
|
||||
dependencies: ArrayListUnmanaged(*Decl) = ArrayListUnmanaged(*Decl){},
|
||||
dependencies: DepsTable = .{},
|
||||
|
||||
pub fn destroy(self: *Decl, allocator: *Allocator) void {
|
||||
allocator.free(mem.spanZ(self.name));
|
||||
/// The reason this is not `std.AutoHashMapUnmanaged` is a workaround for
|
||||
/// stage1 compiler giving me: `error: struct 'Module.Decl' depends on itself`
|
||||
pub const DepsTable = std.HashMapUnmanaged(*Decl, void, std.hash_map.getAutoHashFn(*Decl), std.hash_map.getAutoEqlFn(*Decl), false);
|
||||
|
||||
pub fn destroy(self: *Decl, gpa: *Allocator) void {
|
||||
gpa.free(mem.spanZ(self.name));
|
||||
if (self.typedValueManaged()) |tvm| {
|
||||
tvm.deinit(allocator);
|
||||
tvm.deinit(gpa);
|
||||
}
|
||||
self.dependants.deinit(allocator);
|
||||
self.dependencies.deinit(allocator);
|
||||
allocator.destroy(self);
|
||||
self.dependants.deinit(gpa);
|
||||
self.dependencies.deinit(gpa);
|
||||
gpa.destroy(self);
|
||||
}
|
||||
|
||||
pub fn src(self: Decl) usize {
|
||||
@ -247,23 +249,11 @@ pub const Decl = struct {
|
||||
}
|
||||
|
||||
fn removeDependant(self: *Decl, other: *Decl) void {
|
||||
for (self.dependants.items) |item, i| {
|
||||
if (item == other) {
|
||||
_ = self.dependants.swapRemove(i);
|
||||
return;
|
||||
}
|
||||
}
|
||||
unreachable;
|
||||
self.dependants.removeAssertDiscard(other);
|
||||
}
|
||||
|
||||
fn removeDependency(self: *Decl, other: *Decl) void {
|
||||
for (self.dependencies.items) |item, i| {
|
||||
if (item == other) {
|
||||
_ = self.dependencies.swapRemove(i);
|
||||
return;
|
||||
}
|
||||
}
|
||||
unreachable;
|
||||
self.dependencies.removeAssertDiscard(other);
|
||||
}
|
||||
};
|
||||
|
||||
@ -390,10 +380,10 @@ pub const Scope = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unload(base: *Scope, allocator: *Allocator) void {
|
||||
pub fn unload(base: *Scope, gpa: *Allocator) void {
|
||||
switch (base.tag) {
|
||||
.file => return @fieldParentPtr(File, "base", base).unload(allocator),
|
||||
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).unload(allocator),
|
||||
.file => return @fieldParentPtr(File, "base", base).unload(gpa),
|
||||
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).unload(gpa),
|
||||
.block => unreachable,
|
||||
.gen_zir => unreachable,
|
||||
.decl => unreachable,
|
||||
@ -422,17 +412,17 @@ pub const Scope = struct {
|
||||
}
|
||||
|
||||
/// Asserts the scope is a File or ZIRModule and deinitializes it, then deallocates it.
|
||||
pub fn destroy(base: *Scope, allocator: *Allocator) void {
|
||||
pub fn destroy(base: *Scope, gpa: *Allocator) void {
|
||||
switch (base.tag) {
|
||||
.file => {
|
||||
const scope_file = @fieldParentPtr(File, "base", base);
|
||||
scope_file.deinit(allocator);
|
||||
allocator.destroy(scope_file);
|
||||
scope_file.deinit(gpa);
|
||||
gpa.destroy(scope_file);
|
||||
},
|
||||
.zir_module => {
|
||||
const scope_zir_module = @fieldParentPtr(ZIRModule, "base", base);
|
||||
scope_zir_module.deinit(allocator);
|
||||
allocator.destroy(scope_zir_module);
|
||||
scope_zir_module.deinit(gpa);
|
||||
gpa.destroy(scope_zir_module);
|
||||
},
|
||||
.block => unreachable,
|
||||
.gen_zir => unreachable,
|
||||
@ -483,7 +473,7 @@ pub const Scope = struct {
|
||||
/// Direct children of the file.
|
||||
decls: ArrayListUnmanaged(*Decl),
|
||||
|
||||
pub fn unload(self: *File, allocator: *Allocator) void {
|
||||
pub fn unload(self: *File, gpa: *Allocator) void {
|
||||
switch (self.status) {
|
||||
.never_loaded,
|
||||
.unloaded_parse_failure,
|
||||
@ -497,16 +487,16 @@ pub const Scope = struct {
|
||||
}
|
||||
switch (self.source) {
|
||||
.bytes => |bytes| {
|
||||
allocator.free(bytes);
|
||||
gpa.free(bytes);
|
||||
self.source = .{ .unloaded = {} };
|
||||
},
|
||||
.unloaded => {},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deinit(self: *File, allocator: *Allocator) void {
|
||||
self.decls.deinit(allocator);
|
||||
self.unload(allocator);
|
||||
pub fn deinit(self: *File, gpa: *Allocator) void {
|
||||
self.decls.deinit(gpa);
|
||||
self.unload(gpa);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
@ -528,7 +518,7 @@ pub const Scope = struct {
|
||||
switch (self.source) {
|
||||
.unloaded => {
|
||||
const source = try module.root_pkg.root_src_dir.readFileAllocOptions(
|
||||
module.allocator,
|
||||
module.gpa,
|
||||
self.sub_file_path,
|
||||
std.math.maxInt(u32),
|
||||
1,
|
||||
@ -576,7 +566,7 @@ pub const Scope = struct {
|
||||
/// not this one.
|
||||
decls: ArrayListUnmanaged(*Decl),
|
||||
|
||||
pub fn unload(self: *ZIRModule, allocator: *Allocator) void {
|
||||
pub fn unload(self: *ZIRModule, gpa: *Allocator) void {
|
||||
switch (self.status) {
|
||||
.never_loaded,
|
||||
.unloaded_parse_failure,
|
||||
@ -585,30 +575,30 @@ pub const Scope = struct {
|
||||
=> {},
|
||||
|
||||
.loaded_success => {
|
||||
self.contents.module.deinit(allocator);
|
||||
allocator.destroy(self.contents.module);
|
||||
self.contents.module.deinit(gpa);
|
||||
gpa.destroy(self.contents.module);
|
||||
self.contents = .{ .not_available = {} };
|
||||
self.status = .unloaded_success;
|
||||
},
|
||||
.loaded_sema_failure => {
|
||||
self.contents.module.deinit(allocator);
|
||||
allocator.destroy(self.contents.module);
|
||||
self.contents.module.deinit(gpa);
|
||||
gpa.destroy(self.contents.module);
|
||||
self.contents = .{ .not_available = {} };
|
||||
self.status = .unloaded_sema_failure;
|
||||
},
|
||||
}
|
||||
switch (self.source) {
|
||||
.bytes => |bytes| {
|
||||
allocator.free(bytes);
|
||||
gpa.free(bytes);
|
||||
self.source = .{ .unloaded = {} };
|
||||
},
|
||||
.unloaded => {},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deinit(self: *ZIRModule, allocator: *Allocator) void {
|
||||
self.decls.deinit(allocator);
|
||||
self.unload(allocator);
|
||||
pub fn deinit(self: *ZIRModule, gpa: *Allocator) void {
|
||||
self.decls.deinit(gpa);
|
||||
self.unload(gpa);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
@ -630,7 +620,7 @@ pub const Scope = struct {
|
||||
switch (self.source) {
|
||||
.unloaded => {
|
||||
const source = try module.root_pkg.root_src_dir.readFileAllocOptions(
|
||||
module.allocator,
|
||||
module.gpa,
|
||||
self.sub_file_path,
|
||||
std.math.maxInt(u32),
|
||||
1,
|
||||
@ -701,8 +691,8 @@ pub const AllErrors = struct {
|
||||
msg: []const u8,
|
||||
};
|
||||
|
||||
pub fn deinit(self: *AllErrors, allocator: *Allocator) void {
|
||||
self.arena.promote(allocator).deinit();
|
||||
pub fn deinit(self: *AllErrors, gpa: *Allocator) void {
|
||||
self.arena.promote(gpa).deinit();
|
||||
}
|
||||
|
||||
fn add(
|
||||
@ -772,20 +762,14 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module {
|
||||
};
|
||||
|
||||
return Module{
|
||||
.allocator = gpa,
|
||||
.gpa = gpa,
|
||||
.root_pkg = options.root_pkg,
|
||||
.root_scope = root_scope,
|
||||
.bin_file_dir = bin_file_dir,
|
||||
.bin_file_path = options.bin_file_path,
|
||||
.bin_file = bin_file,
|
||||
.optimize_mode = options.optimize_mode,
|
||||
.decl_table = DeclTable.init(gpa),
|
||||
.decl_exports = std.AutoHashMap(*Decl, []*Export).init(gpa),
|
||||
.symbol_exports = std.StringHashMap(*Export).init(gpa),
|
||||
.export_owners = std.AutoHashMap(*Decl, []*Export).init(gpa),
|
||||
.failed_decls = std.AutoHashMap(*Decl, *ErrorMsg).init(gpa),
|
||||
.failed_files = std.AutoHashMap(*Scope, *ErrorMsg).init(gpa),
|
||||
.failed_exports = std.AutoHashMap(*Export, *ErrorMsg).init(gpa),
|
||||
.work_queue = std.fifo.LinearFifo(WorkItem, .Dynamic).init(gpa),
|
||||
.keep_source_files_loaded = options.keep_source_files_loaded,
|
||||
};
|
||||
@ -793,51 +777,51 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module {
|
||||
|
||||
pub fn deinit(self: *Module) void {
|
||||
self.bin_file.deinit();
|
||||
const allocator = self.allocator;
|
||||
self.deletion_set.deinit(allocator);
|
||||
const gpa = self.gpa;
|
||||
self.deletion_set.deinit(gpa);
|
||||
self.work_queue.deinit();
|
||||
|
||||
for (self.decl_table.items()) |entry| {
|
||||
entry.value.destroy(allocator);
|
||||
entry.value.destroy(gpa);
|
||||
}
|
||||
self.decl_table.deinit();
|
||||
self.decl_table.deinit(gpa);
|
||||
|
||||
for (self.failed_decls.items()) |entry| {
|
||||
entry.value.destroy(allocator);
|
||||
entry.value.destroy(gpa);
|
||||
}
|
||||
self.failed_decls.deinit();
|
||||
self.failed_decls.deinit(gpa);
|
||||
|
||||
for (self.failed_files.items()) |entry| {
|
||||
entry.value.destroy(allocator);
|
||||
entry.value.destroy(gpa);
|
||||
}
|
||||
self.failed_files.deinit();
|
||||
self.failed_files.deinit(gpa);
|
||||
|
||||
for (self.failed_exports.items()) |entry| {
|
||||
entry.value.destroy(allocator);
|
||||
entry.value.destroy(gpa);
|
||||
}
|
||||
self.failed_exports.deinit();
|
||||
self.failed_exports.deinit(gpa);
|
||||
|
||||
for (self.decl_exports.items()) |entry| {
|
||||
const export_list = entry.value;
|
||||
allocator.free(export_list);
|
||||
gpa.free(export_list);
|
||||
}
|
||||
self.decl_exports.deinit();
|
||||
self.decl_exports.deinit(gpa);
|
||||
|
||||
for (self.export_owners.items()) |entry| {
|
||||
freeExportList(allocator, entry.value);
|
||||
freeExportList(gpa, entry.value);
|
||||
}
|
||||
self.export_owners.deinit();
|
||||
self.export_owners.deinit(gpa);
|
||||
|
||||
self.symbol_exports.deinit();
|
||||
self.root_scope.destroy(allocator);
|
||||
self.root_scope.destroy(gpa);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
fn freeExportList(allocator: *Allocator, export_list: []*Export) void {
|
||||
fn freeExportList(gpa: *Allocator, export_list: []*Export) void {
|
||||
for (export_list) |exp| {
|
||||
allocator.destroy(exp);
|
||||
gpa.destroy(exp);
|
||||
}
|
||||
allocator.free(export_list);
|
||||
gpa.free(export_list);
|
||||
}
|
||||
|
||||
pub fn target(self: Module) std.Target {
|
||||
@ -855,7 +839,7 @@ pub fn update(self: *Module) !void {
|
||||
// Until then we simulate a full cache miss. Source files could have been loaded for any reason;
|
||||
// to force a refresh we unload now.
|
||||
if (self.root_scope.cast(Scope.File)) |zig_file| {
|
||||
zig_file.unload(self.allocator);
|
||||
zig_file.unload(self.gpa);
|
||||
self.analyzeRootSrcFile(zig_file) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
assert(self.totalErrorCount() != 0);
|
||||
@ -863,7 +847,7 @@ pub fn update(self: *Module) !void {
|
||||
else => |e| return e,
|
||||
};
|
||||
} else if (self.root_scope.cast(Scope.ZIRModule)) |zir_module| {
|
||||
zir_module.unload(self.allocator);
|
||||
zir_module.unload(self.gpa);
|
||||
self.analyzeRootZIRModule(zir_module) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
assert(self.totalErrorCount() != 0);
|
||||
@ -876,7 +860,7 @@ pub fn update(self: *Module) !void {
|
||||
|
||||
// Process the deletion set.
|
||||
while (self.deletion_set.popOrNull()) |decl| {
|
||||
if (decl.dependants.items.len != 0) {
|
||||
if (decl.dependants.items().len != 0) {
|
||||
decl.deletion_flag = false;
|
||||
continue;
|
||||
}
|
||||
@ -889,7 +873,7 @@ pub fn update(self: *Module) !void {
|
||||
// to report error messages. Otherwise we unload all source files to save memory.
|
||||
if (self.totalErrorCount() == 0) {
|
||||
if (!self.keep_source_files_loaded) {
|
||||
self.root_scope.unload(self.allocator);
|
||||
self.root_scope.unload(self.gpa);
|
||||
}
|
||||
try self.bin_file.flush();
|
||||
}
|
||||
@ -915,10 +899,10 @@ pub fn totalErrorCount(self: *Module) usize {
|
||||
}
|
||||
|
||||
pub fn getAllErrorsAlloc(self: *Module) !AllErrors {
|
||||
var arena = std.heap.ArenaAllocator.init(self.allocator);
|
||||
var arena = std.heap.ArenaAllocator.init(self.gpa);
|
||||
errdefer arena.deinit();
|
||||
|
||||
var errors = std.ArrayList(AllErrors.Message).init(self.allocator);
|
||||
var errors = std.ArrayList(AllErrors.Message).init(self.gpa);
|
||||
defer errors.deinit();
|
||||
|
||||
for (self.failed_files.items()) |entry| {
|
||||
@ -989,9 +973,9 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
|
||||
}
|
||||
// Here we tack on additional allocations to the Decl's arena. The allocations are
|
||||
// lifetime annotations in the ZIR.
|
||||
var decl_arena = decl.typed_value.most_recent.arena.?.promote(self.allocator);
|
||||
var decl_arena = decl.typed_value.most_recent.arena.?.promote(self.gpa);
|
||||
defer decl.typed_value.most_recent.arena.?.* = decl_arena.state;
|
||||
try liveness.analyze(self.allocator, &decl_arena.allocator, payload.func.analysis.success);
|
||||
try liveness.analyze(self.gpa, &decl_arena.allocator, payload.func.analysis.success);
|
||||
}
|
||||
|
||||
assert(decl.typed_value.most_recent.typed_value.ty.hasCodeGenBits());
|
||||
@ -1002,9 +986,9 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
|
||||
decl.analysis = .dependency_failure;
|
||||
},
|
||||
else => {
|
||||
try self.failed_decls.ensureCapacity(self.failed_decls.items().len + 1);
|
||||
try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
|
||||
self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
|
||||
self.allocator,
|
||||
self.gpa,
|
||||
decl.src(),
|
||||
"unable to codegen: {}",
|
||||
.{@errorName(err)},
|
||||
@ -1048,16 +1032,17 @@ fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void {
|
||||
// prior to re-analysis.
|
||||
self.deleteDeclExports(decl);
|
||||
// Dependencies will be re-discovered, so we remove them here prior to re-analysis.
|
||||
for (decl.dependencies.items) |dep| {
|
||||
for (decl.dependencies.items()) |entry| {
|
||||
const dep = entry.key;
|
||||
dep.removeDependant(decl);
|
||||
if (dep.dependants.items.len == 0 and !dep.deletion_flag) {
|
||||
if (dep.dependants.items().len == 0 and !dep.deletion_flag) {
|
||||
// We don't perform a deletion here, because this Decl or another one
|
||||
// may end up referencing it before the update is complete.
|
||||
dep.deletion_flag = true;
|
||||
try self.deletion_set.append(self.allocator, dep);
|
||||
try self.deletion_set.append(self.gpa, dep);
|
||||
}
|
||||
}
|
||||
decl.dependencies.shrink(self.allocator, 0);
|
||||
decl.dependencies.clearRetainingCapacity();
|
||||
|
||||
break :blk true;
|
||||
},
|
||||
@ -1072,9 +1057,9 @@ fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => return error.AnalysisFail,
|
||||
else => {
|
||||
try self.failed_decls.ensureCapacity(self.failed_decls.items().len + 1);
|
||||
try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
|
||||
self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
|
||||
self.allocator,
|
||||
self.gpa,
|
||||
decl.src(),
|
||||
"unable to analyze: {}",
|
||||
.{@errorName(err)},
|
||||
@ -1088,7 +1073,8 @@ fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void {
|
||||
// We may need to chase the dependants and re-analyze them.
|
||||
// However, if the decl is a function, and the type is the same, we do not need to.
|
||||
if (type_changed or decl.typed_value.most_recent.typed_value.val.tag() != .function) {
|
||||
for (decl.dependants.items) |dep| {
|
||||
for (decl.dependants.items()) |entry| {
|
||||
const dep = entry.key;
|
||||
switch (dep.analysis) {
|
||||
.unreferenced => unreachable,
|
||||
.in_progress => unreachable,
|
||||
@ -1127,8 +1113,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
|
||||
// to complete the Decl analysis.
|
||||
var fn_type_scope: Scope.GenZIR = .{
|
||||
.decl = decl,
|
||||
.arena = std.heap.ArenaAllocator.init(self.allocator),
|
||||
.instructions = std.ArrayList(*zir.Inst).init(self.allocator),
|
||||
.arena = std.heap.ArenaAllocator.init(self.gpa),
|
||||
.instructions = std.ArrayList(*zir.Inst).init(self.gpa),
|
||||
};
|
||||
defer fn_type_scope.arena.deinit();
|
||||
defer fn_type_scope.instructions.deinit();
|
||||
@ -1178,7 +1164,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
|
||||
_ = try self.addZIRInst(&fn_type_scope.base, fn_src, zir.Inst.Return, .{ .operand = fn_type_inst }, .{});
|
||||
|
||||
// We need the memory for the Type to go into the arena for the Decl
|
||||
var decl_arena = std.heap.ArenaAllocator.init(self.allocator);
|
||||
var decl_arena = std.heap.ArenaAllocator.init(self.gpa);
|
||||
errdefer decl_arena.deinit();
|
||||
const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
|
||||
|
||||
@ -1189,7 +1175,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
|
||||
.instructions = .{},
|
||||
.arena = &decl_arena.allocator,
|
||||
};
|
||||
defer block_scope.instructions.deinit(self.allocator);
|
||||
defer block_scope.instructions.deinit(self.gpa);
|
||||
|
||||
const fn_type = try self.analyzeBodyValueAsType(&block_scope, .{
|
||||
.instructions = fn_type_scope.instructions.items,
|
||||
@ -1202,8 +1188,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
|
||||
// pass completes, and semantic analysis of it completes.
|
||||
var gen_scope: Scope.GenZIR = .{
|
||||
.decl = decl,
|
||||
.arena = std.heap.ArenaAllocator.init(self.allocator),
|
||||
.instructions = std.ArrayList(*zir.Inst).init(self.allocator),
|
||||
.arena = std.heap.ArenaAllocator.init(self.gpa),
|
||||
.instructions = std.ArrayList(*zir.Inst).init(self.gpa),
|
||||
};
|
||||
errdefer gen_scope.arena.deinit();
|
||||
defer gen_scope.instructions.deinit();
|
||||
@ -1235,7 +1221,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
|
||||
prev_type_has_bits = tvm.typed_value.ty.hasCodeGenBits();
|
||||
type_changed = !tvm.typed_value.ty.eql(fn_type);
|
||||
|
||||
tvm.deinit(self.allocator);
|
||||
tvm.deinit(self.gpa);
|
||||
}
|
||||
|
||||
decl_arena_state.* = decl_arena.state;
|
||||
@ -1626,40 +1612,31 @@ fn getSimplePrimitiveValue(name: []const u8) ?TypedValue {
|
||||
}
|
||||
|
||||
fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void {
|
||||
try depender.dependencies.ensureCapacity(self.allocator, depender.dependencies.items.len + 1);
|
||||
try dependee.dependants.ensureCapacity(self.allocator, dependee.dependants.items.len + 1);
|
||||
try depender.dependencies.ensureCapacity(self.gpa, depender.dependencies.items().len + 1);
|
||||
try dependee.dependants.ensureCapacity(self.gpa, dependee.dependants.items().len + 1);
|
||||
|
||||
for (depender.dependencies.items) |item| {
|
||||
if (item == dependee) break; // Already in the set.
|
||||
} else {
|
||||
depender.dependencies.appendAssumeCapacity(dependee);
|
||||
}
|
||||
|
||||
for (dependee.dependants.items) |item| {
|
||||
if (item == depender) break; // Already in the set.
|
||||
} else {
|
||||
dependee.dependants.appendAssumeCapacity(depender);
|
||||
}
|
||||
depender.dependencies.putAssumeCapacity(dependee, {});
|
||||
dependee.dependants.putAssumeCapacity(depender, {});
|
||||
}
|
||||
|
||||
fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
|
||||
switch (root_scope.status) {
|
||||
.never_loaded, .unloaded_success => {
|
||||
try self.failed_files.ensureCapacity(self.failed_files.items().len + 1);
|
||||
try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1);
|
||||
|
||||
const source = try root_scope.getSource(self);
|
||||
|
||||
var keep_zir_module = false;
|
||||
const zir_module = try self.allocator.create(zir.Module);
|
||||
defer if (!keep_zir_module) self.allocator.destroy(zir_module);
|
||||
const zir_module = try self.gpa.create(zir.Module);
|
||||
defer if (!keep_zir_module) self.gpa.destroy(zir_module);
|
||||
|
||||
zir_module.* = try zir.parse(self.allocator, source);
|
||||
defer if (!keep_zir_module) zir_module.deinit(self.allocator);
|
||||
zir_module.* = try zir.parse(self.gpa, source);
|
||||
defer if (!keep_zir_module) zir_module.deinit(self.gpa);
|
||||
|
||||
if (zir_module.error_msg) |src_err_msg| {
|
||||
self.failed_files.putAssumeCapacityNoClobber(
|
||||
&root_scope.base,
|
||||
try ErrorMsg.create(self.allocator, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}),
|
||||
try ErrorMsg.create(self.gpa, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}),
|
||||
);
|
||||
root_scope.status = .unloaded_parse_failure;
|
||||
return error.AnalysisFail;
|
||||
@ -1686,22 +1663,22 @@ fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree {
|
||||
|
||||
switch (root_scope.status) {
|
||||
.never_loaded, .unloaded_success => {
|
||||
try self.failed_files.ensureCapacity(self.failed_files.items().len + 1);
|
||||
try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1);
|
||||
|
||||
const source = try root_scope.getSource(self);
|
||||
|
||||
var keep_tree = false;
|
||||
const tree = try std.zig.parse(self.allocator, source);
|
||||
const tree = try std.zig.parse(self.gpa, source);
|
||||
defer if (!keep_tree) tree.deinit();
|
||||
|
||||
if (tree.errors.len != 0) {
|
||||
const parse_err = tree.errors[0];
|
||||
|
||||
var msg = std.ArrayList(u8).init(self.allocator);
|
||||
var msg = std.ArrayList(u8).init(self.gpa);
|
||||
defer msg.deinit();
|
||||
|
||||
try parse_err.render(tree.token_ids, msg.outStream());
|
||||
const err_msg = try self.allocator.create(ErrorMsg);
|
||||
const err_msg = try self.gpa.create(ErrorMsg);
|
||||
err_msg.* = .{
|
||||
.msg = msg.toOwnedSlice(),
|
||||
.byte_offset = tree.token_locs[parse_err.loc()].start,
|
||||
@ -1732,11 +1709,11 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
|
||||
const decls = tree.root_node.decls();
|
||||
|
||||
try self.work_queue.ensureUnusedCapacity(decls.len);
|
||||
try root_scope.decls.ensureCapacity(self.allocator, decls.len);
|
||||
try root_scope.decls.ensureCapacity(self.gpa, decls.len);
|
||||
|
||||
// Keep track of the decls that we expect to see in this file so that
|
||||
// we know which ones have been deleted.
|
||||
var deleted_decls = std.AutoHashMap(*Decl, void).init(self.allocator);
|
||||
var deleted_decls = std.AutoHashMap(*Decl, void).init(self.gpa);
|
||||
defer deleted_decls.deinit();
|
||||
try deleted_decls.ensureCapacity(root_scope.decls.items.len);
|
||||
for (root_scope.decls.items) |file_decl| {
|
||||
@ -1760,9 +1737,9 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
|
||||
decl.src_index = decl_i;
|
||||
if (deleted_decls.remove(decl) == null) {
|
||||
decl.analysis = .sema_failure;
|
||||
const err_msg = try ErrorMsg.create(self.allocator, tree.token_locs[name_tok].start, "redefinition of '{}'", .{decl.name});
|
||||
errdefer err_msg.destroy(self.allocator);
|
||||
try self.failed_decls.putNoClobber(decl, err_msg);
|
||||
const err_msg = try ErrorMsg.create(self.gpa, tree.token_locs[name_tok].start, "redefinition of '{}'", .{decl.name});
|
||||
errdefer err_msg.destroy(self.gpa);
|
||||
try self.failed_decls.putNoClobber(self.gpa, decl, err_msg);
|
||||
} else {
|
||||
if (!srcHashEql(decl.contents_hash, contents_hash)) {
|
||||
try self.markOutdatedDecl(decl);
|
||||
@ -1796,14 +1773,14 @@ fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
|
||||
const src_module = try self.getSrcModule(root_scope);
|
||||
|
||||
try self.work_queue.ensureUnusedCapacity(src_module.decls.len);
|
||||
try root_scope.decls.ensureCapacity(self.allocator, src_module.decls.len);
|
||||
try root_scope.decls.ensureCapacity(self.gpa, src_module.decls.len);
|
||||
|
||||
var exports_to_resolve = std.ArrayList(*zir.Decl).init(self.allocator);
|
||||
var exports_to_resolve = std.ArrayList(*zir.Decl).init(self.gpa);
|
||||
defer exports_to_resolve.deinit();
|
||||
|
||||
// Keep track of the decls that we expect to see in this file so that
|
||||
// we know which ones have been deleted.
|
||||
var deleted_decls = std.AutoHashMap(*Decl, void).init(self.allocator);
|
||||
var deleted_decls = std.AutoHashMap(*Decl, void).init(self.gpa);
|
||||
defer deleted_decls.deinit();
|
||||
try deleted_decls.ensureCapacity(self.decl_table.items().len);
|
||||
for (self.decl_table.items()) |entry| {
|
||||
@ -1845,7 +1822,7 @@ fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
|
||||
}
|
||||
|
||||
fn deleteDecl(self: *Module, decl: *Decl) !void {
|
||||
try self.deletion_set.ensureCapacity(self.allocator, self.deletion_set.items.len + decl.dependencies.items.len);
|
||||
try self.deletion_set.ensureCapacity(self.gpa, self.deletion_set.items.len + decl.dependencies.items().len);
|
||||
|
||||
// Remove from the namespace it resides in. In the case of an anonymous Decl it will
|
||||
// not be present in the set, and this does nothing.
|
||||
@ -1855,9 +1832,10 @@ fn deleteDecl(self: *Module, decl: *Decl) !void {
|
||||
const name_hash = decl.fullyQualifiedNameHash();
|
||||
self.decl_table.removeAssertDiscard(name_hash);
|
||||
// Remove itself from its dependencies, because we are about to destroy the decl pointer.
|
||||
for (decl.dependencies.items) |dep| {
|
||||
for (decl.dependencies.items()) |entry| {
|
||||
const dep = entry.key;
|
||||
dep.removeDependant(decl);
|
||||
if (dep.dependants.items.len == 0 and !dep.deletion_flag) {
|
||||
if (dep.dependants.items().len == 0 and !dep.deletion_flag) {
|
||||
// We don't recursively perform a deletion here, because during the update,
|
||||
// another reference to it may turn up.
|
||||
dep.deletion_flag = true;
|
||||
@ -1865,7 +1843,8 @@ fn deleteDecl(self: *Module, decl: *Decl) !void {
|
||||
}
|
||||
}
|
||||
// Anything that depends on this deleted decl certainly needs to be re-analyzed.
|
||||
for (decl.dependants.items) |dep| {
|
||||
for (decl.dependants.items()) |entry| {
|
||||
const dep = entry.key;
|
||||
dep.removeDependency(decl);
|
||||
if (dep.analysis != .outdated) {
|
||||
// TODO Move this failure possibility to the top of the function.
|
||||
@ -1873,11 +1852,11 @@ fn deleteDecl(self: *Module, decl: *Decl) !void {
|
||||
}
|
||||
}
|
||||
if (self.failed_decls.remove(decl)) |entry| {
|
||||
entry.value.destroy(self.allocator);
|
||||
entry.value.destroy(self.gpa);
|
||||
}
|
||||
self.deleteDeclExports(decl);
|
||||
self.bin_file.freeDecl(decl);
|
||||
decl.destroy(self.allocator);
|
||||
decl.destroy(self.gpa);
|
||||
}
|
||||
|
||||
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
|
||||
@ -1899,7 +1878,7 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
decl_exports_kv.value = self.allocator.shrink(list, new_len);
|
||||
decl_exports_kv.value = self.gpa.shrink(list, new_len);
|
||||
if (new_len == 0) {
|
||||
self.decl_exports.removeAssertDiscard(exp.exported_decl);
|
||||
}
|
||||
@ -1907,12 +1886,12 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void {
|
||||
|
||||
self.bin_file.deleteExport(exp.link);
|
||||
if (self.failed_exports.remove(exp)) |entry| {
|
||||
entry.value.destroy(self.allocator);
|
||||
entry.value.destroy(self.gpa);
|
||||
}
|
||||
_ = self.symbol_exports.remove(exp.options.name);
|
||||
self.allocator.destroy(exp);
|
||||
self.gpa.destroy(exp);
|
||||
}
|
||||
self.allocator.free(kv.value);
|
||||
self.gpa.free(kv.value);
|
||||
}
|
||||
|
||||
fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
|
||||
@ -1920,7 +1899,7 @@ fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
|
||||
defer tracy.end();
|
||||
|
||||
// Use the Decl's arena for function memory.
|
||||
var arena = decl.typed_value.most_recent.arena.?.promote(self.allocator);
|
||||
var arena = decl.typed_value.most_recent.arena.?.promote(self.gpa);
|
||||
defer decl.typed_value.most_recent.arena.?.* = arena.state;
|
||||
var inner_block: Scope.Block = .{
|
||||
.parent = null,
|
||||
@ -1929,10 +1908,10 @@ fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
|
||||
.instructions = .{},
|
||||
.arena = &arena.allocator,
|
||||
};
|
||||
defer inner_block.instructions.deinit(self.allocator);
|
||||
defer inner_block.instructions.deinit(self.gpa);
|
||||
|
||||
const fn_zir = func.analysis.queued;
|
||||
defer fn_zir.arena.promote(self.allocator).deinit();
|
||||
defer fn_zir.arena.promote(self.gpa).deinit();
|
||||
func.analysis = .{ .in_progress = {} };
|
||||
//std.debug.warn("set {} to in_progress\n", .{decl.name});
|
||||
|
||||
@ -1947,7 +1926,7 @@ fn markOutdatedDecl(self: *Module, decl: *Decl) !void {
|
||||
//std.debug.warn("mark {} outdated\n", .{decl.name});
|
||||
try self.work_queue.writeItem(.{ .analyze_decl = decl });
|
||||
if (self.failed_decls.remove(decl)) |entry| {
|
||||
entry.value.destroy(self.allocator);
|
||||
entry.value.destroy(self.gpa);
|
||||
}
|
||||
decl.analysis = .outdated;
|
||||
}
|
||||
@ -1958,7 +1937,7 @@ fn allocateNewDecl(
|
||||
src_index: usize,
|
||||
contents_hash: std.zig.SrcHash,
|
||||
) !*Decl {
|
||||
const new_decl = try self.allocator.create(Decl);
|
||||
const new_decl = try self.gpa.create(Decl);
|
||||
new_decl.* = .{
|
||||
.name = "",
|
||||
.scope = scope.namespace(),
|
||||
@ -1981,10 +1960,10 @@ fn createNewDecl(
|
||||
name_hash: Scope.NameHash,
|
||||
contents_hash: std.zig.SrcHash,
|
||||
) !*Decl {
|
||||
try self.decl_table.ensureCapacity(self.decl_table.items().len + 1);
|
||||
try self.decl_table.ensureCapacity(self.gpa, self.decl_table.items().len + 1);
|
||||
const new_decl = try self.allocateNewDecl(scope, src_index, contents_hash);
|
||||
errdefer self.allocator.destroy(new_decl);
|
||||
new_decl.name = try mem.dupeZ(self.allocator, u8, decl_name);
|
||||
errdefer self.gpa.destroy(new_decl);
|
||||
new_decl.name = try mem.dupeZ(self.gpa, u8, decl_name);
|
||||
self.decl_table.putAssumeCapacityNoClobber(name_hash, new_decl);
|
||||
return new_decl;
|
||||
}
|
||||
@ -1992,7 +1971,7 @@ fn createNewDecl(
|
||||
fn analyzeZirDecl(self: *Module, decl: *Decl, src_decl: *zir.Decl) InnerError!bool {
|
||||
var decl_scope: Scope.DeclAnalysis = .{
|
||||
.decl = decl,
|
||||
.arena = std.heap.ArenaAllocator.init(self.allocator),
|
||||
.arena = std.heap.ArenaAllocator.init(self.gpa),
|
||||
};
|
||||
errdefer decl_scope.arena.deinit();
|
||||
|
||||
@ -2008,7 +1987,7 @@ fn analyzeZirDecl(self: *Module, decl: *Decl, src_decl: *zir.Decl) InnerError!bo
|
||||
prev_type_has_bits = tvm.typed_value.ty.hasCodeGenBits();
|
||||
type_changed = !tvm.typed_value.ty.eql(typed_value.ty);
|
||||
|
||||
tvm.deinit(self.allocator);
|
||||
tvm.deinit(self.gpa);
|
||||
}
|
||||
|
||||
arena_state.* = decl_scope.arena.state;
|
||||
@ -2146,11 +2125,11 @@ fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const
|
||||
else => return self.fail(scope, src, "unable to export type '{}'", .{typed_value.ty}),
|
||||
}
|
||||
|
||||
try self.decl_exports.ensureCapacity(self.decl_exports.items().len + 1);
|
||||
try self.export_owners.ensureCapacity(self.export_owners.items().len + 1);
|
||||
try self.decl_exports.ensureCapacity(self.gpa, self.decl_exports.items().len + 1);
|
||||
try self.export_owners.ensureCapacity(self.gpa, self.export_owners.items().len + 1);
|
||||
|
||||
const new_export = try self.allocator.create(Export);
|
||||
errdefer self.allocator.destroy(new_export);
|
||||
const new_export = try self.gpa.create(Export);
|
||||
errdefer self.gpa.destroy(new_export);
|
||||
|
||||
const owner_decl = scope.decl().?;
|
||||
|
||||
@ -2164,27 +2143,27 @@ fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const
|
||||
};
|
||||
|
||||
// Add to export_owners table.
|
||||
const eo_gop = self.export_owners.getOrPut(owner_decl) catch unreachable;
|
||||
const eo_gop = self.export_owners.getOrPut(self.gpa, owner_decl) catch unreachable;
|
||||
if (!eo_gop.found_existing) {
|
||||
eo_gop.entry.value = &[0]*Export{};
|
||||
}
|
||||
eo_gop.entry.value = try self.allocator.realloc(eo_gop.entry.value, eo_gop.entry.value.len + 1);
|
||||
eo_gop.entry.value = try self.gpa.realloc(eo_gop.entry.value, eo_gop.entry.value.len + 1);
|
||||
eo_gop.entry.value[eo_gop.entry.value.len - 1] = new_export;
|
||||
errdefer eo_gop.entry.value = self.allocator.shrink(eo_gop.entry.value, eo_gop.entry.value.len - 1);
|
||||
errdefer eo_gop.entry.value = self.gpa.shrink(eo_gop.entry.value, eo_gop.entry.value.len - 1);
|
||||
|
||||
// Add to exported_decl table.
|
||||
const de_gop = self.decl_exports.getOrPut(exported_decl) catch unreachable;
|
||||
const de_gop = self.decl_exports.getOrPut(self.gpa, exported_decl) catch unreachable;
|
||||
if (!de_gop.found_existing) {
|
||||
de_gop.entry.value = &[0]*Export{};
|
||||
}
|
||||
de_gop.entry.value = try self.allocator.realloc(de_gop.entry.value, de_gop.entry.value.len + 1);
|
||||
de_gop.entry.value = try self.gpa.realloc(de_gop.entry.value, de_gop.entry.value.len + 1);
|
||||
de_gop.entry.value[de_gop.entry.value.len - 1] = new_export;
|
||||
errdefer de_gop.entry.value = self.allocator.shrink(de_gop.entry.value, de_gop.entry.value.len - 1);
|
||||
errdefer de_gop.entry.value = self.gpa.shrink(de_gop.entry.value, de_gop.entry.value.len - 1);
|
||||
|
||||
if (self.symbol_exports.get(symbol_name)) |_| {
|
||||
try self.failed_exports.ensureCapacity(self.failed_exports.items().len + 1);
|
||||
try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1);
|
||||
self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create(
|
||||
self.allocator,
|
||||
self.gpa,
|
||||
src,
|
||||
"exported symbol collision: {}",
|
||||
.{symbol_name},
|
||||
@ -2198,9 +2177,9 @@ fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const
|
||||
self.bin_file.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => {
|
||||
try self.failed_exports.ensureCapacity(self.failed_exports.items().len + 1);
|
||||
try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1);
|
||||
self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create(
|
||||
self.allocator,
|
||||
self.gpa,
|
||||
src,
|
||||
"unable to export: {}",
|
||||
.{@errorName(err)},
|
||||
@ -2224,13 +2203,13 @@ fn addNewInstArgs(
|
||||
}
|
||||
|
||||
fn newZIRInst(
|
||||
allocator: *Allocator,
|
||||
gpa: *Allocator,
|
||||
src: usize,
|
||||
comptime T: type,
|
||||
positionals: std.meta.fieldInfo(T, "positionals").field_type,
|
||||
kw_args: std.meta.fieldInfo(T, "kw_args").field_type,
|
||||
) !*zir.Inst {
|
||||
const inst = try allocator.create(T);
|
||||
const inst = try gpa.create(T);
|
||||
inst.* = .{
|
||||
.base = .{
|
||||
.tag = T.base_tag,
|
||||
@ -2273,7 +2252,7 @@ fn addNewInst(self: *Module, block: *Scope.Block, src: usize, ty: Type, comptime
|
||||
},
|
||||
.args = undefined,
|
||||
};
|
||||
try block.instructions.append(self.allocator, &inst.base);
|
||||
try block.instructions.append(self.gpa, &inst.base);
|
||||
return inst;
|
||||
}
|
||||
|
||||
@ -2433,7 +2412,7 @@ fn analyzeInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*In
|
||||
fn analyzeInstStr(self: *Module, scope: *Scope, str_inst: *zir.Inst.Str) InnerError!*Inst {
|
||||
// The bytes references memory inside the ZIR module, which can get deallocated
|
||||
// after semantic analysis is complete. We need the memory to be in the new anonymous Decl's arena.
|
||||
var new_decl_arena = std.heap.ArenaAllocator.init(self.allocator);
|
||||
var new_decl_arena = std.heap.ArenaAllocator.init(self.gpa);
|
||||
const arena_bytes = try new_decl_arena.allocator.dupe(u8, str_inst.positionals.bytes);
|
||||
|
||||
const ty_payload = try scope.arena().create(Type.Payload.Array_u8_Sentinel0);
|
||||
@ -2457,8 +2436,8 @@ fn createAnonymousDecl(
|
||||
) !*Decl {
|
||||
const name_index = self.getNextAnonNameIndex();
|
||||
const scope_decl = scope.decl().?;
|
||||
const name = try std.fmt.allocPrint(self.allocator, "{}${}", .{ scope_decl.name, name_index });
|
||||
defer self.allocator.free(name);
|
||||
const name = try std.fmt.allocPrint(self.gpa, "{}${}", .{ scope_decl.name, name_index });
|
||||
defer self.gpa.free(name);
|
||||
const name_hash = scope.namespace().fullyQualifiedNameHash(name);
|
||||
const src_hash: std.zig.SrcHash = undefined;
|
||||
const new_decl = try self.createNewDecl(scope, name, scope_decl.src_index, name_hash, src_hash);
|
||||
@ -2554,8 +2533,8 @@ fn analyzeInstBlock(self: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerEr
|
||||
};
|
||||
const label = &child_block.label.?;
|
||||
|
||||
defer child_block.instructions.deinit(self.allocator);
|
||||
defer label.results.deinit(self.allocator);
|
||||
defer child_block.instructions.deinit(self.gpa);
|
||||
defer label.results.deinit(self.gpa);
|
||||
|
||||
try self.analyzeBody(&child_block.base, inst.positionals.body);
|
||||
|
||||
@ -2567,7 +2546,7 @@ fn analyzeInstBlock(self: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerEr
|
||||
// No need to add the Block instruction; we can add the instructions to the parent block directly.
|
||||
// Blocks are terminated with a noreturn instruction which we do not want to include.
|
||||
const instrs = child_block.instructions.items;
|
||||
try parent_block.instructions.appendSlice(self.allocator, instrs[0 .. instrs.len - 1]);
|
||||
try parent_block.instructions.appendSlice(self.gpa, instrs[0 .. instrs.len - 1]);
|
||||
if (label.results.items.len == 1) {
|
||||
return label.results.items[0];
|
||||
} else {
|
||||
@ -2577,7 +2556,7 @@ fn analyzeInstBlock(self: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerEr
|
||||
|
||||
// Need to set the type and emit the Block instruction. This allows machine code generation
|
||||
// to emit a jump instruction to after the block when it encounters the break.
|
||||
try parent_block.instructions.append(self.allocator, &block_inst.base);
|
||||
try parent_block.instructions.append(self.gpa, &block_inst.base);
|
||||
block_inst.base.ty = try self.resolvePeerTypes(scope, label.results.items);
|
||||
block_inst.args.body = .{ .instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items) };
|
||||
return &block_inst.base;
|
||||
@ -2596,7 +2575,7 @@ fn analyzeInstBreakVoid(self: *Module, scope: *Scope, inst: *zir.Inst.BreakVoid)
|
||||
while (opt_block) |block| {
|
||||
if (block.label) |*label| {
|
||||
if (mem.eql(u8, label.name, label_name)) {
|
||||
try label.results.append(self.allocator, void_inst);
|
||||
try label.results.append(self.gpa, void_inst);
|
||||
return self.constNoReturn(scope, inst.base.src);
|
||||
}
|
||||
}
|
||||
@ -2719,8 +2698,8 @@ fn analyzeInstCall(self: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerErro
|
||||
|
||||
// TODO handle function calls of generic functions
|
||||
|
||||
const fn_param_types = try self.allocator.alloc(Type, fn_params_len);
|
||||
defer self.allocator.free(fn_param_types);
|
||||
const fn_param_types = try self.gpa.alloc(Type, fn_params_len);
|
||||
defer self.gpa.free(fn_param_types);
|
||||
func.ty.fnParamTypes(fn_param_types);
|
||||
|
||||
const casted_args = try scope.arena().alloc(*Inst, fn_params_len);
|
||||
@ -2739,7 +2718,7 @@ fn analyzeInstCall(self: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerErro
|
||||
fn analyzeInstFn(self: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError!*Inst {
|
||||
const fn_type = try self.resolveType(scope, fn_inst.positionals.fn_type);
|
||||
const fn_zir = blk: {
|
||||
var fn_arena = std.heap.ArenaAllocator.init(self.allocator);
|
||||
var fn_arena = std.heap.ArenaAllocator.init(self.gpa);
|
||||
errdefer fn_arena.deinit();
|
||||
|
||||
const fn_zir = try scope.arena().create(Fn.ZIR);
|
||||
@ -3120,7 +3099,7 @@ fn analyzeInstCondBr(self: *Module, scope: *Scope, inst: *zir.Inst.CondBr) Inner
|
||||
.instructions = .{},
|
||||
.arena = parent_block.arena,
|
||||
};
|
||||
defer true_block.instructions.deinit(self.allocator);
|
||||
defer true_block.instructions.deinit(self.gpa);
|
||||
try self.analyzeBody(&true_block.base, inst.positionals.true_body);
|
||||
|
||||
var false_block: Scope.Block = .{
|
||||
@ -3130,7 +3109,7 @@ fn analyzeInstCondBr(self: *Module, scope: *Scope, inst: *zir.Inst.CondBr) Inner
|
||||
.instructions = .{},
|
||||
.arena = parent_block.arena,
|
||||
};
|
||||
defer false_block.instructions.deinit(self.allocator);
|
||||
defer false_block.instructions.deinit(self.gpa);
|
||||
try self.analyzeBody(&false_block.base, inst.positionals.false_body);
|
||||
|
||||
return self.addNewInstArgs(parent_block, inst.base.src, Type.initTag(.void), Inst.CondBr, Inst.Args(Inst.CondBr){
|
||||
@ -3284,7 +3263,7 @@ fn cmpNumeric(
|
||||
return self.constUndef(scope, src, Type.initTag(.bool));
|
||||
const is_unsigned = if (lhs_is_float) x: {
|
||||
var bigint_space: Value.BigIntSpace = undefined;
|
||||
var bigint = try lhs_val.toBigInt(&bigint_space).toManaged(self.allocator);
|
||||
var bigint = try lhs_val.toBigInt(&bigint_space).toManaged(self.gpa);
|
||||
defer bigint.deinit();
|
||||
const zcmp = lhs_val.orderAgainstZero();
|
||||
if (lhs_val.floatHasFraction()) {
|
||||
@ -3319,7 +3298,7 @@ fn cmpNumeric(
|
||||
return self.constUndef(scope, src, Type.initTag(.bool));
|
||||
const is_unsigned = if (rhs_is_float) x: {
|
||||
var bigint_space: Value.BigIntSpace = undefined;
|
||||
var bigint = try rhs_val.toBigInt(&bigint_space).toManaged(self.allocator);
|
||||
var bigint = try rhs_val.toBigInt(&bigint_space).toManaged(self.gpa);
|
||||
defer bigint.deinit();
|
||||
const zcmp = rhs_val.orderAgainstZero();
|
||||
if (rhs_val.floatHasFraction()) {
|
||||
@ -3457,7 +3436,7 @@ fn coerceArrayPtrToSlice(self: *Module, scope: *Scope, dest_type: Type, inst: *I
|
||||
|
||||
fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: var) InnerError {
|
||||
@setCold(true);
|
||||
const err_msg = try ErrorMsg.create(self.allocator, src, format, args);
|
||||
const err_msg = try ErrorMsg.create(self.gpa, src, format, args);
|
||||
return self.failWithOwnedErrorMsg(scope, src, err_msg);
|
||||
}
|
||||
|
||||
@ -3487,9 +3466,9 @@ fn failNode(
|
||||
|
||||
fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *ErrorMsg) InnerError {
|
||||
{
|
||||
errdefer err_msg.destroy(self.allocator);
|
||||
try self.failed_decls.ensureCapacity(self.failed_decls.items().len + 1);
|
||||
try self.failed_files.ensureCapacity(self.failed_files.items().len + 1);
|
||||
errdefer err_msg.destroy(self.gpa);
|
||||
try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
|
||||
try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1);
|
||||
}
|
||||
switch (scope.tag) {
|
||||
.decl => {
|
||||
@ -3542,28 +3521,28 @@ pub const ErrorMsg = struct {
|
||||
byte_offset: usize,
|
||||
msg: []const u8,
|
||||
|
||||
pub fn create(allocator: *Allocator, byte_offset: usize, comptime format: []const u8, args: var) !*ErrorMsg {
|
||||
const self = try allocator.create(ErrorMsg);
|
||||
errdefer allocator.destroy(self);
|
||||
self.* = try init(allocator, byte_offset, format, args);
|
||||
pub fn create(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: var) !*ErrorMsg {
|
||||
const self = try gpa.create(ErrorMsg);
|
||||
errdefer gpa.destroy(self);
|
||||
self.* = try init(gpa, byte_offset, format, args);
|
||||
return self;
|
||||
}
|
||||
|
||||
/// Assumes the ErrorMsg struct and msg were both allocated with allocator.
|
||||
pub fn destroy(self: *ErrorMsg, allocator: *Allocator) void {
|
||||
self.deinit(allocator);
|
||||
allocator.destroy(self);
|
||||
pub fn destroy(self: *ErrorMsg, gpa: *Allocator) void {
|
||||
self.deinit(gpa);
|
||||
gpa.destroy(self);
|
||||
}
|
||||
|
||||
pub fn init(allocator: *Allocator, byte_offset: usize, comptime format: []const u8, args: var) !ErrorMsg {
|
||||
pub fn init(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: var) !ErrorMsg {
|
||||
return ErrorMsg{
|
||||
.byte_offset = byte_offset,
|
||||
.msg = try std.fmt.allocPrint(allocator, format, args),
|
||||
.msg = try std.fmt.allocPrint(gpa, format, args),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *ErrorMsg, allocator: *Allocator) void {
|
||||
allocator.free(self.msg);
|
||||
pub fn deinit(self: *ErrorMsg, gpa: *Allocator) void {
|
||||
gpa.free(self.msg);
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
@ -46,7 +46,14 @@ pub fn generateSymbol(
|
||||
var mc_args = try std.ArrayList(Function.MCValue).initCapacity(bin_file.allocator, param_types.len);
|
||||
defer mc_args.deinit();
|
||||
|
||||
var next_stack_offset: u64 = 0;
|
||||
var branch_stack = std.ArrayList(Function.Branch).init(bin_file.allocator);
|
||||
defer {
|
||||
assert(branch_stack.items.len == 1);
|
||||
branch_stack.items[0].deinit(bin_file.allocator);
|
||||
branch_stack.deinit();
|
||||
}
|
||||
const branch = try branch_stack.addOne();
|
||||
branch.* = .{};
|
||||
|
||||
switch (fn_type.fnCallingConvention()) {
|
||||
.Naked => assert(mc_args.items.len == 0),
|
||||
@ -61,8 +68,8 @@ pub fn generateSymbol(
|
||||
switch (param_type.zigTypeTag()) {
|
||||
.Bool, .Int => {
|
||||
if (next_int_reg >= integer_registers.len) {
|
||||
try mc_args.append(.{ .stack_offset = next_stack_offset });
|
||||
next_stack_offset += param_type.abiSize(bin_file.options.target);
|
||||
try mc_args.append(.{ .stack_offset = branch.next_stack_offset });
|
||||
branch.next_stack_offset += @intCast(u32, param_type.abiSize(bin_file.options.target));
|
||||
} else {
|
||||
try mc_args.append(.{ .register = @enumToInt(integer_registers[next_int_reg]) });
|
||||
next_int_reg += 1;
|
||||
@ -100,23 +107,17 @@ pub fn generateSymbol(
|
||||
}
|
||||
|
||||
var function = Function{
|
||||
.gpa = bin_file.allocator,
|
||||
.target = &bin_file.options.target,
|
||||
.bin_file = bin_file,
|
||||
.mod_fn = module_fn,
|
||||
.code = code,
|
||||
.err_msg = null,
|
||||
.args = mc_args.items,
|
||||
.branch_stack = .{},
|
||||
.branch_stack = &branch_stack,
|
||||
};
|
||||
defer {
|
||||
assert(function.branch_stack.items.len == 1);
|
||||
function.branch_stack.items[0].inst_table.deinit();
|
||||
function.branch_stack.deinit(bin_file.allocator);
|
||||
}
|
||||
try function.branch_stack.append(bin_file.allocator, .{
|
||||
.inst_table = std.AutoHashMap(*ir.Inst, Function.MCValue).init(bin_file.allocator),
|
||||
});
|
||||
|
||||
branch.max_end_stack = branch.next_stack_offset;
|
||||
function.gen() catch |err| switch (err) {
|
||||
error.CodegenFail => return Result{ .fail = function.err_msg.? },
|
||||
else => |e| return e,
|
||||
@ -218,6 +219,7 @@ pub fn generateSymbol(
|
||||
}
|
||||
|
||||
const Function = struct {
|
||||
gpa: *Allocator,
|
||||
bin_file: *link.ElfFile,
|
||||
target: *const std.Target,
|
||||
mod_fn: *const Module.Fn,
|
||||
@ -232,10 +234,37 @@ const Function = struct {
|
||||
/// within different branches. Special consideration is needed when a branch
|
||||
/// joins with its parent, to make sure all instructions have the same MCValue
|
||||
/// across each runtime branch upon joining.
|
||||
branch_stack: std.ArrayListUnmanaged(Branch),
|
||||
branch_stack: *std.ArrayList(Branch),
|
||||
|
||||
const Branch = struct {
|
||||
inst_table: std.AutoHashMap(*ir.Inst, MCValue),
|
||||
inst_table: std.AutoHashMapUnmanaged(*ir.Inst, MCValue) = .{},
|
||||
|
||||
/// The key is an enum value of an arch-specific register.
|
||||
registers: std.AutoHashMapUnmanaged(usize, RegisterAllocation) = .{},
|
||||
|
||||
/// Maps offset to what is stored there.
|
||||
stack: std.AutoHashMapUnmanaged(usize, StackAllocation) = .{},
|
||||
/// Offset from the stack base, representing the end of the stack frame.
|
||||
max_end_stack: u32 = 0,
|
||||
/// Represents the current end stack offset. If there is no existing slot
|
||||
/// to place a new stack allocation, it goes here, and then bumps `max_end_stack`.
|
||||
next_stack_offset: u32 = 0,
|
||||
|
||||
fn deinit(self: *Branch, gpa: *Allocator) void {
|
||||
self.inst_table.deinit(gpa);
|
||||
self.registers.deinit(gpa);
|
||||
self.stack.deinit(gpa);
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
const RegisterAllocation = struct {
|
||||
inst: *ir.Inst,
|
||||
};
|
||||
|
||||
const StackAllocation = struct {
|
||||
inst: *ir.Inst,
|
||||
size: u32,
|
||||
};
|
||||
|
||||
const MCValue = union(enum) {
|
||||
@ -256,6 +285,13 @@ const Function = struct {
|
||||
memory: u64,
|
||||
/// The value is one of the stack variables.
|
||||
stack_offset: u64,
|
||||
|
||||
fn isMemory(mcv: MCValue) bool {
|
||||
return switch (mcv) {
|
||||
.embedded_in_code, .memory, .stack_offset => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
fn gen(self: *Function) !void {
|
||||
@ -318,7 +354,7 @@ const Function = struct {
|
||||
const inst_table = &self.branch_stack.items[0].inst_table;
|
||||
for (self.mod_fn.analysis.success.instructions) |inst| {
|
||||
const new_inst = try self.genFuncInst(inst, arch);
|
||||
try inst_table.putNoClobber(inst, new_inst);
|
||||
try inst_table.putNoClobber(self.gpa, inst, new_inst);
|
||||
}
|
||||
}
|
||||
|
||||
@ -344,19 +380,99 @@ const Function = struct {
|
||||
}
|
||||
|
||||
fn genAdd(self: *Function, inst: *ir.Inst.Add, comptime arch: std.Target.Cpu.Arch) !MCValue {
|
||||
const lhs = try self.resolveInst(inst.args.lhs);
|
||||
const rhs = try self.resolveInst(inst.args.rhs);
|
||||
// No side effects, so if it's unreferenced, do nothing.
|
||||
if (inst.base.isUnused())
|
||||
return MCValue.dead;
|
||||
switch (arch) {
|
||||
.i386, .x86_64 => {
|
||||
// const lhs_reg = try self.instAsReg(lhs);
|
||||
// const rhs_reg = try self.instAsReg(rhs);
|
||||
// const result = try self.allocateReg();
|
||||
.x86_64 => {
|
||||
// Biggest encoding of ADD is 8 bytes.
|
||||
try self.code.ensureCapacity(self.code.items.len + 8);
|
||||
|
||||
// try self.code.append(??);
|
||||
// In x86, ADD has 2 operands, destination and source.
|
||||
// Either one, but not both, can be a memory operand.
|
||||
// Source operand can be an immediate, 8 bits or 32 bits.
|
||||
// So, if either one of the operands dies with this instruction, we can use it
|
||||
// as the result MCValue.
|
||||
var dst_mcv: MCValue = undefined;
|
||||
var src_mcv: MCValue = undefined;
|
||||
if (inst.base.operandDies(0)) {
|
||||
// LHS dies; use it as the destination.
|
||||
dst_mcv = try self.resolveInst(inst.args.lhs);
|
||||
// Both operands cannot be memory.
|
||||
if (dst_mcv.isMemory()) {
|
||||
src_mcv = try self.resolveInstImmOrReg(inst.args.rhs);
|
||||
} else {
|
||||
src_mcv = try self.resolveInst(inst.args.rhs);
|
||||
}
|
||||
} else if (inst.base.operandDies(1)) {
|
||||
// RHS dies; use it as the destination.
|
||||
dst_mcv = try self.resolveInst(inst.args.rhs);
|
||||
// Both operands cannot be memory.
|
||||
if (dst_mcv.isMemory()) {
|
||||
src_mcv = try self.resolveInstImmOrReg(inst.args.lhs);
|
||||
} else {
|
||||
src_mcv = try self.resolveInst(inst.args.lhs);
|
||||
}
|
||||
} else {
|
||||
const lhs = try self.resolveInst(inst.args.lhs);
|
||||
const rhs = try self.resolveInst(inst.args.rhs);
|
||||
if (lhs.isMemory()) {
|
||||
dst_mcv = try self.copyToNewRegister(inst.base.src, lhs);
|
||||
src_mcv = rhs;
|
||||
} else {
|
||||
dst_mcv = try self.copyToNewRegister(inst.base.src, rhs);
|
||||
src_mcv = lhs;
|
||||
}
|
||||
}
|
||||
// x86 ADD supports only signed 32-bit immediates at most. If the immediate
|
||||
// value is larger than this, we put it in a register.
|
||||
// A potential opportunity for future optimization here would be keeping track
|
||||
// of the fact that the instruction is available both as an immediate
|
||||
// and as a register.
|
||||
switch (src_mcv) {
|
||||
.immediate => |imm| {
|
||||
if (imm > std.math.maxInt(u31)) {
|
||||
src_mcv = try self.copyToNewRegister(inst.base.src, src_mcv);
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
// lhs_reg.release();
|
||||
// rhs_reg.release();
|
||||
return self.fail(inst.base.src, "TODO implement register allocation", .{});
|
||||
switch (dst_mcv) {
|
||||
.none => unreachable,
|
||||
.dead, .unreach, .immediate => unreachable,
|
||||
.register => |dst_reg_usize| {
|
||||
const dst_reg = @intToEnum(Reg(arch), @intCast(@TagType(Reg(arch)), dst_reg_usize));
|
||||
switch (src_mcv) {
|
||||
.none => unreachable,
|
||||
.dead, .unreach => unreachable,
|
||||
.register => |src_reg_usize| {
|
||||
const src_reg = @intToEnum(Reg(arch), @intCast(@TagType(Reg(arch)), src_reg_usize));
|
||||
self.rex(.{ .b = dst_reg.isExtended(), .r = src_reg.isExtended(), .w = dst_reg.size() == 64 });
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x1, 0xC0 | (@as(u8, src_reg.id() & 0b111) << 3) | @as(u8, dst_reg.id() & 0b111) });
|
||||
},
|
||||
.immediate => |imm| {
|
||||
const imm32 = @intCast(u31, imm); // We handle this case above.
|
||||
// 81 /0 id
|
||||
if (imm32 <= std.math.maxInt(u7)) {
|
||||
self.rex(.{ .b = dst_reg.isExtended(), .w = dst_reg.size() == 64 });
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x83, 0xC0 | @as(u8, dst_reg.id() & 0b111), @intCast(u8, imm32)});
|
||||
} else {
|
||||
self.rex(.{ .r = dst_reg.isExtended(), .w = dst_reg.size() == 64 });
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x81, 0xC0 | @as(u8, dst_reg.id() & 0b111) });
|
||||
std.mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), imm32);
|
||||
}
|
||||
},
|
||||
.embedded_in_code, .memory, .stack_offset => {
|
||||
return self.fail(inst.base.src, "TODO implement x86 add source memory", .{});
|
||||
},
|
||||
}
|
||||
},
|
||||
.embedded_in_code, .memory, .stack_offset => {
|
||||
return self.fail(inst.base.src, "TODO implement x86 add destination memory", .{});
|
||||
},
|
||||
}
|
||||
return dst_mcv;
|
||||
},
|
||||
else => return self.fail(inst.base.src, "TODO implement add for {}", .{self.target.cpu.arch}),
|
||||
}
|
||||
@ -526,23 +642,23 @@ const Function = struct {
|
||||
/// resulting REX is meaningful, but will remain the same if it is not.
|
||||
/// * Deliberately inserting a "meaningless REX" requires explicit usage of
|
||||
/// 0x40, and cannot be done via this function.
|
||||
fn REX(self: *Function, arg: struct { B: bool = false, W: bool = false, X: bool = false, R: bool = false }) !void {
|
||||
fn rex(self: *Function, arg: struct { b: bool = false, w: bool = false, x: bool = false, r: bool = false }) void {
|
||||
// From section 2.2.1.2 of the manual, REX is encoded as b0100WRXB.
|
||||
var value: u8 = 0x40;
|
||||
if (arg.B) {
|
||||
if (arg.b) {
|
||||
value |= 0x1;
|
||||
}
|
||||
if (arg.X) {
|
||||
if (arg.x) {
|
||||
value |= 0x2;
|
||||
}
|
||||
if (arg.R) {
|
||||
if (arg.r) {
|
||||
value |= 0x4;
|
||||
}
|
||||
if (arg.W) {
|
||||
if (arg.w) {
|
||||
value |= 0x8;
|
||||
}
|
||||
if (value != 0x40) {
|
||||
try self.code.append(value);
|
||||
self.code.appendAssumeCapacity(value);
|
||||
}
|
||||
}
|
||||
|
||||
@ -570,11 +686,11 @@ const Function = struct {
|
||||
// If we're accessing e.g. r8d, we need to use a REX prefix before the actual operation. Since
|
||||
// this is a 32-bit operation, the W flag is set to zero. X is also zero, as we're not using a SIB.
|
||||
// Both R and B are set, as we're extending, in effect, the register bits *and* the operand.
|
||||
try self.REX(.{ .R = reg.isExtended(), .B = reg.isExtended() });
|
||||
try self.code.ensureCapacity(self.code.items.len + 3);
|
||||
self.rex(.{ .r = reg.isExtended(), .b = reg.isExtended() });
|
||||
const id = @as(u8, reg.id() & 0b111);
|
||||
return self.code.appendSlice(&[_]u8{
|
||||
0x31, 0xC0 | id << 3 | id,
|
||||
});
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x31, 0xC0 | id << 3 | id });
|
||||
return;
|
||||
}
|
||||
if (x <= std.math.maxInt(u32)) {
|
||||
// Next best case: if we set the lower four bytes, the upper four will be zeroed.
|
||||
@ -607,9 +723,9 @@ const Function = struct {
|
||||
// Since we always need a REX here, let's just check if we also need to set REX.B.
|
||||
//
|
||||
// In this case, the encoding of the REX byte is 0b0100100B
|
||||
|
||||
try self.REX(.{ .W = true, .B = reg.isExtended() });
|
||||
try self.code.resize(self.code.items.len + 9);
|
||||
try self.code.ensureCapacity(self.code.items.len + 10);
|
||||
self.rex(.{ .w = true, .b = reg.isExtended() });
|
||||
self.code.items.len += 9;
|
||||
self.code.items[self.code.items.len - 9] = 0xB8 | @as(u8, reg.id() & 0b111);
|
||||
const imm_ptr = self.code.items[self.code.items.len - 8 ..][0..8];
|
||||
mem.writeIntLittle(u64, imm_ptr, x);
|
||||
@ -620,13 +736,13 @@ const Function = struct {
|
||||
}
|
||||
// We need the offset from RIP in a signed i32 twos complement.
|
||||
// The instruction is 7 bytes long and RIP points to the next instruction.
|
||||
//
|
||||
try self.code.ensureCapacity(self.code.items.len + 7);
|
||||
// 64-bit LEA is encoded as REX.W 8D /r. If the register is extended, the REX byte is modified,
|
||||
// but the operation size is unchanged. Since we're using a disp32, we want mode 0 and lower three
|
||||
// bits as five.
|
||||
// REX 0x8D 0b00RRR101, where RRR is the lower three bits of the id.
|
||||
try self.REX(.{ .W = true, .B = reg.isExtended() });
|
||||
try self.code.resize(self.code.items.len + 6);
|
||||
self.rex(.{ .w = true, .b = reg.isExtended() });
|
||||
self.code.items.len += 6;
|
||||
const rip = self.code.items.len;
|
||||
const big_offset = @intCast(i64, code_offset) - @intCast(i64, rip);
|
||||
const offset = @intCast(i32, big_offset);
|
||||
@ -646,9 +762,10 @@ const Function = struct {
|
||||
// If the *source* is extended, the B field must be 1.
|
||||
// Since the register is being accessed directly, the R/M mode is three. The reg field (the middle
|
||||
// three bits) contain the destination, and the R/M field (the lower three bits) contain the source.
|
||||
try self.REX(.{ .W = true, .R = reg.isExtended(), .B = src_reg.isExtended() });
|
||||
try self.code.ensureCapacity(self.code.items.len + 3);
|
||||
self.rex(.{ .w = true, .r = reg.isExtended(), .b = src_reg.isExtended() });
|
||||
const R = 0xC0 | (@as(u8, reg.id() & 0b111) << 3) | @as(u8, src_reg.id() & 0b111);
|
||||
try self.code.appendSlice(&[_]u8{ 0x8B, R });
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x8B, R });
|
||||
},
|
||||
.memory => |x| {
|
||||
if (reg.size() != 64) {
|
||||
@ -662,14 +779,14 @@ const Function = struct {
|
||||
// The SIB must be 0x25, to indicate a disp32 with no scaled index.
|
||||
// 0b00RRR100, where RRR is the lower three bits of the register ID.
|
||||
// The instruction is thus eight bytes; REX 0x8B 0b00RRR100 0x25 followed by a four-byte disp32.
|
||||
try self.REX(.{ .W = true, .B = reg.isExtended() });
|
||||
try self.code.resize(self.code.items.len + 7);
|
||||
const r = 0x04 | (@as(u8, reg.id() & 0b111) << 3);
|
||||
self.code.items[self.code.items.len - 7] = 0x8B;
|
||||
self.code.items[self.code.items.len - 6] = r;
|
||||
self.code.items[self.code.items.len - 5] = 0x25;
|
||||
const imm_ptr = self.code.items[self.code.items.len - 4 ..][0..4];
|
||||
mem.writeIntLittle(u32, imm_ptr, @intCast(u32, x));
|
||||
try self.code.ensureCapacity(self.code.items.len + 8);
|
||||
self.rex(.{ .w = true, .b = reg.isExtended() });
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{
|
||||
0x8B,
|
||||
0x04 | (@as(u8, reg.id() & 0b111) << 3), // R
|
||||
0x25,
|
||||
});
|
||||
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), @intCast(u32, x));
|
||||
} else {
|
||||
// If this is RAX, we can use a direct load; otherwise, we need to load the address, then indirectly load
|
||||
// the value.
|
||||
@ -700,15 +817,15 @@ const Function = struct {
|
||||
// Currently, we're only allowing 64-bit registers, so we need the `REX.W 8B /r` variant.
|
||||
// TODO: determine whether to allow other sized registers, and if so, handle them properly.
|
||||
// This operation requires three bytes: REX 0x8B R/M
|
||||
//
|
||||
try self.code.ensureCapacity(self.code.items.len + 3);
|
||||
// For this operation, we want R/M mode *zero* (use register indirectly), and the two register
|
||||
// values must match. Thus, it's 00ABCABC where ABC is the lower three bits of the register ID.
|
||||
//
|
||||
// Furthermore, if this is an extended register, both B and R must be set in the REX byte, as *both*
|
||||
// register operands need to be marked as extended.
|
||||
try self.REX(.{ .W = true, .B = reg.isExtended(), .R = reg.isExtended() });
|
||||
self.rex(.{ .w = true, .b = reg.isExtended(), .r = reg.isExtended() });
|
||||
const RM = (@as(u8, reg.id() & 0b111) << 3) | @truncate(u3, reg.id());
|
||||
try self.code.appendSlice(&[_]u8{ 0x8B, RM });
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x8B, RM });
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -731,36 +848,40 @@ const Function = struct {
|
||||
}
|
||||
|
||||
fn resolveInst(self: *Function, inst: *ir.Inst) !MCValue {
|
||||
if (self.inst_table.get(inst)) |mcv| {
|
||||
return mcv;
|
||||
}
|
||||
// Constants have static lifetimes, so they are always memoized in the outer most table.
|
||||
if (inst.cast(ir.Inst.Constant)) |const_inst| {
|
||||
const branch = &self.branch_stack.items[0];
|
||||
const gop = try branch.inst_table.getOrPut(inst);
|
||||
const gop = try branch.inst_table.getOrPut(self.gpa, inst);
|
||||
if (!gop.found_existing) {
|
||||
const mcv = try self.genTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val });
|
||||
try branch.inst_table.putNoClobber(inst, mcv);
|
||||
gop.kv.value = mcv;
|
||||
try branch.inst_table.putNoClobber(self.gpa, inst, mcv);
|
||||
gop.entry.value = mcv;
|
||||
return mcv;
|
||||
}
|
||||
return gop.kv.value;
|
||||
return gop.entry.value;
|
||||
}
|
||||
|
||||
// Treat each stack item as a "layer" on top of the previous one.
|
||||
var i: usize = self.branch_stack.items.len;
|
||||
while (true) {
|
||||
i -= 1;
|
||||
if (self.branch_stack.items[i].inst_table.getValue(inst)) |mcv| {
|
||||
if (self.branch_stack.items[i].inst_table.get(inst)) |mcv| {
|
||||
return mcv;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn resolveInstImmOrReg(self: *Function, inst: *ir.Inst) !MCValue {
|
||||
return self.fail(inst.src, "TODO implement resolveInstImmOrReg", .{});
|
||||
}
|
||||
|
||||
fn copyToNewRegister(self: *Function, src: usize, mcv: MCValue) !MCValue {
|
||||
return self.fail(src, "TODO implement copyToNewRegister", .{});
|
||||
}
|
||||
|
||||
fn genTypedValue(self: *Function, src: usize, typed_value: TypedValue) !MCValue {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const allocator = self.code.allocator;
|
||||
switch (typed_value.ty.zigTypeTag()) {
|
||||
.Pointer => {
|
||||
if (typed_value.val.cast(Value.Payload.DeclRef)) |payload| {
|
||||
@ -787,7 +908,7 @@ const Function = struct {
|
||||
fn fail(self: *Function, src: usize, comptime format: []const u8, args: var) error{ CodegenFail, OutOfMemory } {
|
||||
@setCold(true);
|
||||
assert(self.err_msg == null);
|
||||
self.err_msg = try ErrorMsg.create(self.code.allocator, src, format, args);
|
||||
self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src, format, args);
|
||||
return error.CodegenFail;
|
||||
}
|
||||
};
|
||||
|
||||
@ -2,6 +2,7 @@ const std = @import("std");
|
||||
const Value = @import("value.zig").Value;
|
||||
const Type = @import("type.zig").Type;
|
||||
const Module = @import("Module.zig");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
/// These are in-memory, analyzed instructions. See `zir.Inst` for the representation
|
||||
/// of instructions that correspond to the ZIR text format.
|
||||
@ -12,18 +13,28 @@ pub const Inst = struct {
|
||||
tag: Tag,
|
||||
/// Each bit represents the index of an `Inst` parameter in the `args` field.
|
||||
/// If a bit is set, it marks the end of the lifetime of the corresponding
|
||||
/// instruction parameter. For example, 0b00000101 means that the first and
|
||||
/// instruction parameter. For example, 0b000_00101 means that the first and
|
||||
/// third `Inst` parameters' lifetimes end after this instruction, and will
|
||||
/// not have any more following references.
|
||||
/// The most significant bit being set means that the instruction itself is
|
||||
/// never referenced, in other words its lifetime ends as soon as it finishes.
|
||||
/// If the byte is `0xff`, it means this is a special case and this data is
|
||||
/// encoded elsewhere.
|
||||
deaths: u8 = 0xff,
|
||||
/// If bit 7 (0b1xxx_xxxx) is set, it means this instruction itself is unreferenced.
|
||||
/// If bit 6 (0bx1xx_xxxx) is set, it means this is a special case and the
|
||||
/// lifetimes of operands are encoded elsewhere.
|
||||
deaths: u8 = undefined,
|
||||
ty: Type,
|
||||
/// Byte offset into the source.
|
||||
src: usize,
|
||||
|
||||
pub fn isUnused(self: Inst) bool {
|
||||
return (self.deaths & 0b1000_0000) != 0;
|
||||
}
|
||||
|
||||
pub fn operandDies(self: Inst, index: u3) bool {
|
||||
assert(index < 6);
|
||||
return @truncate(u1, self.deaths << index) != 0;
|
||||
}
|
||||
|
||||
pub const Tag = enum {
|
||||
add,
|
||||
arg,
|
||||
@ -240,4 +251,4 @@ pub const Inst = struct {
|
||||
|
||||
pub const Body = struct {
|
||||
instructions: []*Inst,
|
||||
};
|
||||
};
|
||||
|
||||
@ -1007,7 +1007,7 @@ pub const ElfFile = struct {
|
||||
.appended => code_buffer.items,
|
||||
.fail => |em| {
|
||||
decl.analysis = .codegen_failure;
|
||||
_ = try module.failed_decls.put(decl, em);
|
||||
_ = try module.failed_decls.put(module.gpa, decl, em);
|
||||
return;
|
||||
},
|
||||
};
|
||||
@ -1093,7 +1093,7 @@ pub const ElfFile = struct {
|
||||
for (exports) |exp| {
|
||||
if (exp.options.section) |section_name| {
|
||||
if (!mem.eql(u8, section_name, ".text")) {
|
||||
try module.failed_exports.ensureCapacity(module.failed_exports.items().len + 1);
|
||||
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
|
||||
module.failed_exports.putAssumeCapacityNoClobber(
|
||||
exp,
|
||||
try Module.ErrorMsg.create(self.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
|
||||
@ -1111,7 +1111,7 @@ pub const ElfFile = struct {
|
||||
},
|
||||
.Weak => elf.STB_WEAK,
|
||||
.LinkOnce => {
|
||||
try module.failed_exports.ensureCapacity(module.failed_exports.items().len + 1);
|
||||
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
|
||||
module.failed_exports.putAssumeCapacityNoClobber(
|
||||
exp,
|
||||
try Module.ErrorMsg.create(self.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}),
|
||||
|
||||
@ -123,7 +123,7 @@ fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void
|
||||
if (arg_index >= 6) {
|
||||
@compileError("out of bits to mark deaths of operands");
|
||||
}
|
||||
const prev = try table.put(@field(inst.args, field.name), {});
|
||||
const prev = try table.fetchPut(@field(inst.args, field.name), {});
|
||||
if (prev == null) {
|
||||
// Death.
|
||||
inst.base.deaths |= 1 << arg_index;
|
||||
@ -131,4 +131,4 @@ fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void
|
||||
arg_index += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -502,7 +502,7 @@ fn updateModule(gpa: *Allocator, module: *Module, zir_out_path: ?[]const u8) !vo
|
||||
const update_nanos = timer.read();
|
||||
|
||||
var errors = try module.getAllErrorsAlloc();
|
||||
defer errors.deinit(module.allocator);
|
||||
defer errors.deinit(module.gpa);
|
||||
|
||||
if (errors.list.len != 0) {
|
||||
for (errors.list) |full_err_msg| {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user