self-hosted: fix codegen and resolve some analysis bugs

This commit is contained in:
Andrew Kelley 2020-05-14 13:20:27 -04:00
parent fb947c365e
commit 0986dcf1cf
7 changed files with 302 additions and 79 deletions

View File

@ -191,8 +191,8 @@ pub fn LinearFifo(
}
/// Read the next item from the fifo
pub fn readItem(self: *Self) !T {
if (self.count == 0) return error.EndOfStream;
pub fn readItem(self: *Self) ?T {
if (self.count == 0) return null;
const c = self.buf[self.head];
self.discard(1);
@ -282,7 +282,10 @@ pub fn LinearFifo(
/// Write a single item to the fifo
pub fn writeItem(self: *Self, item: T) !void {
try self.ensureUnusedCapacity(1);
return self.writeItemAssumeCapacity(item);
}
pub fn writeItemAssumeCapacity(self: *Self, item: T) void {
var tail = self.head + self.count;
if (powers_of_two) {
tail &= self.buf.len - 1;
@ -342,10 +345,10 @@ pub fn LinearFifo(
}
}
/// Peek at the item at `offset`
pub fn peekItem(self: Self, offset: usize) error{EndOfStream}!T {
if (offset >= self.count)
return error.EndOfStream;
/// Returns the item at `offset`.
/// Asserts offset is within bounds.
pub fn peekItem(self: Self, offset: usize) T {
assert(offset < self.count);
var index = self.head + offset;
if (powers_of_two) {
@ -369,18 +372,18 @@ test "LinearFifo(u8, .Dynamic)" {
{
var i: usize = 0;
while (i < 5) : (i += 1) {
try fifo.write(&[_]u8{try fifo.peekItem(i)});
try fifo.write(&[_]u8{fifo.peekItem(i)});
}
testing.expectEqual(@as(usize, 10), fifo.readableLength());
testing.expectEqualSlices(u8, "HELLOHELLO", fifo.readableSlice(0));
}
{
testing.expectEqual(@as(u8, 'H'), try fifo.readItem());
testing.expectEqual(@as(u8, 'E'), try fifo.readItem());
testing.expectEqual(@as(u8, 'L'), try fifo.readItem());
testing.expectEqual(@as(u8, 'L'), try fifo.readItem());
testing.expectEqual(@as(u8, 'O'), try fifo.readItem());
testing.expectEqual(@as(u8, 'H'), fifo.readItem().?);
testing.expectEqual(@as(u8, 'E'), fifo.readItem().?);
testing.expectEqual(@as(u8, 'L'), fifo.readItem().?);
testing.expectEqual(@as(u8, 'L'), fifo.readItem().?);
testing.expectEqual(@as(u8, 'O'), fifo.readItem().?);
}
testing.expectEqual(@as(usize, 5), fifo.readableLength());
@ -451,11 +454,11 @@ test "LinearFifo" {
testing.expectEqual(@as(usize, 5), fifo.readableLength());
{
testing.expectEqual(@as(T, 0), try fifo.readItem());
testing.expectEqual(@as(T, 1), try fifo.readItem());
testing.expectEqual(@as(T, 1), try fifo.readItem());
testing.expectEqual(@as(T, 0), try fifo.readItem());
testing.expectEqual(@as(T, 1), try fifo.readItem());
testing.expectEqual(@as(T, 0), fifo.readItem().?);
testing.expectEqual(@as(T, 1), fifo.readItem().?);
testing.expectEqual(@as(T, 1), fifo.readItem().?);
testing.expectEqual(@as(T, 0), fifo.readItem().?);
testing.expectEqual(@as(T, 1), fifo.readItem().?);
testing.expectEqual(@as(usize, 0), fifo.readableLength());
}

View File

@ -9,7 +9,18 @@ const link = @import("link.zig");
const Target = std.Target;
const Allocator = mem.Allocator;
pub fn generateSymbol(bin_file: *link.ElfFile, typed_value: TypedValue, code: *std.ArrayList(u8)) !?*ir.ErrorMsg {
pub const Result = union(enum) {
/// This value might or might not alias the `code` parameter passed to `generateSymbol`.
ok: []const u8,
fail: *ir.ErrorMsg,
};
pub fn generateSymbol(
bin_file: *link.ElfFile,
src: usize,
typed_value: TypedValue,
code: *std.ArrayList(u8),
) error{OutOfMemory}!Result {
switch (typed_value.ty.zigTypeTag()) {
.Fn => {
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
@ -18,25 +29,77 @@ pub fn generateSymbol(bin_file: *link.ElfFile, typed_value: TypedValue, code: *s
.target = &bin_file.options.target,
.mod_fn = module_fn,
.code = code,
.inst_table = std.AutoHashMap(*ir.Inst, Function.MCValue).init(code.allocator),
.inst_table = std.AutoHashMap(*ir.Inst, Function.MCValue).init(bin_file.allocator),
.err_msg = null,
};
defer function.inst_table.deinit();
for (module_fn.analysis.success.instructions) |inst| {
const new_inst = function.genFuncInst(inst) catch |err| switch (err) {
error.CodegenFail => {
assert(function.err_msg != null);
break;
},
error.CodegenFail => return Result{ .fail = function.err_msg.? },
else => |e| return e,
};
try function.inst_table.putNoClobber(inst, new_inst);
}
return function.err_msg;
if (function.err_msg) |em| {
return Result{ .fail = em };
} else {
return Result{ .ok = code.items };
}
},
.Array => {
if (typed_value.val.cast(Value.Payload.Bytes)) |payload| {
return Result{ .ok = payload.data };
}
return Result{
.fail = try ir.ErrorMsg.create(
bin_file.allocator,
src,
"TODO implement generateSymbol for more kinds of arrays",
.{},
),
};
},
.Pointer => {
if (typed_value.val.cast(Value.Payload.DeclRef)) |payload| {
const decl = payload.decl;
assert(decl.link.local_sym_index != 0);
// TODO handle the dependency of this symbol on the decl's vaddr.
// If the decl changes vaddr, then this symbol needs to get regenerated.
const vaddr = bin_file.symbols.items[decl.link.local_sym_index].st_value;
const endian = bin_file.options.target.cpu.arch.endian();
switch (bin_file.ptr_width) {
.p32 => {
try code.resize(4);
mem.writeInt(u32, code.items[0..4], @intCast(u32, vaddr), endian);
},
.p64 => {
try code.resize(8);
mem.writeInt(u64, code.items[0..8], vaddr, endian);
},
}
return Result{ .ok = code.items };
}
return Result{
.fail = try ir.ErrorMsg.create(
bin_file.allocator,
src,
"TODO implement generateSymbol for pointer {}",
.{typed_value.val},
),
};
},
else => |t| {
return Result{
.fail = try ir.ErrorMsg.create(
bin_file.allocator,
src,
"TODO implement generateSymbol for type '{}'",
.{@tagName(t)},
),
};
},
else => @panic("TODO implement generateSymbol for non-function decls"),
}
}
@ -390,14 +453,18 @@ const Function = struct {
}
fn genTypedValue(self: *Function, src: usize, typed_value: TypedValue) !MCValue {
const allocator = self.code.allocator;
switch (typed_value.ty.zigTypeTag()) {
.Pointer => {
const ptr_elem_type = typed_value.ty.elemType();
switch (ptr_elem_type.zigTypeTag()) {
.Array => {
// TODO more checks to make sure this can be emitted as a string literal
const bytes = try typed_value.val.toAllocatedBytes(self.code.allocator);
defer self.code.allocator.free(bytes);
const bytes = typed_value.val.toAllocatedBytes(allocator) catch |err| switch (err) {
error.AnalysisFail => unreachable,
else => |e| return e,
};
defer allocator.free(bytes);
const smaller_len = std.math.cast(u32, bytes.len) catch
return self.fail(src, "TODO handle a larger string constant", .{});

View File

@ -191,7 +191,7 @@ pub const Module = struct {
optimize_mode: std.builtin.Mode,
link_error_flags: link.ElfFile.ErrorFlags = link.ElfFile.ErrorFlags{},
work_stack: ArrayListUnmanaged(WorkItem) = ArrayListUnmanaged(WorkItem){},
work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic),
/// We optimize memory usage for a compilation with no compile errors by storing the
/// error messages and mapping outside of `Decl`.
@ -333,6 +333,15 @@ pub const Module = struct {
return (try self.typedValue()).val;
}
pub fn dump(self: *Decl) void {
self.scope.dumpSrc(self.src);
std.debug.warn(" name={} status={}", .{ mem.spanZ(self.name), @tagName(self.analysis) });
if (self.typedValueManaged()) |tvm| {
std.debug.warn(" ty={} val={}", .{ tvm.typed_value.ty, tvm.typed_value.val });
}
std.debug.warn("\n", .{});
}
fn typedValueManaged(self: *Decl) ?*TypedValue.Managed {
switch (self.analysis) {
.initial_in_progress,
@ -359,7 +368,10 @@ pub const Module = struct {
queued: *text.Inst.Fn,
in_progress: *Analysis,
/// There will be a corresponding ErrorMsg in Module.failed_decls
failure,
sema_failure,
/// This Fn might be OK but it depends on another Decl which did not successfully complete
/// semantic analysis.
dependency_failure,
success: Body,
},
@ -390,7 +402,7 @@ pub const Module = struct {
switch (self.tag) {
.block => return self.cast(Block).?.arena,
.decl => return &self.cast(DeclAnalysis).?.arena.allocator,
.zir_module => unreachable,
.zir_module => return &self.cast(ZIRModule).?.contents.module.arena.allocator,
}
}
@ -414,6 +426,18 @@ pub const Module = struct {
}
}
pub fn dumpInst(self: *Scope, inst: *Inst) void {
const zir_module = self.namespace();
const loc = std.zig.findLineColumn(zir_module.source.bytes, inst.src);
std.debug.warn("{}:{}:{}: {}: ty={}\n", .{
zir_module.sub_file_path,
loc.line + 1,
loc.column + 1,
@tagName(inst.tag),
inst.ty,
});
}
pub const Tag = enum {
zir_module,
block,
@ -438,6 +462,7 @@ pub const Module = struct {
unloaded,
unloaded_parse_failure,
loaded_parse_failure,
loaded_sema_failure,
loaded_success,
},
@ -446,7 +471,7 @@ pub const Module = struct {
.unloaded,
.unloaded_parse_failure,
=> {},
.loaded_success => {
.loaded_success, .loaded_sema_failure => {
allocator.free(self.source.bytes);
self.contents.module.deinit(allocator);
},
@ -456,6 +481,11 @@ pub const Module = struct {
}
self.* = undefined;
}
pub fn dumpSrc(self: *ZIRModule, src: usize) void {
const loc = std.zig.findLineColumn(self.source.bytes, src);
std.debug.warn("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 });
}
};
/// This is a temporary structure, references to it are valid only
@ -520,7 +550,7 @@ pub const Module = struct {
pub fn deinit(self: *Module) void {
const allocator = self.allocator;
self.work_stack.deinit(allocator);
self.work_queue.deinit();
{
var it = self.decl_table.iterator();
while (it.next()) |kv| {
@ -587,6 +617,8 @@ pub const Module = struct {
try self.performAllTheWork();
// TODO unload all the source files from memory
try self.bin_file.flush();
self.link_error_flags = self.bin_file.error_flags;
}
@ -654,7 +686,7 @@ pub const Module = struct {
const InnerError = error{ OutOfMemory, AnalysisFail };
pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
while (self.work_stack.popOrNull()) |work_item| switch (work_item) {
while (self.work_queue.readItem()) |work_item| switch (work_item) {
.codegen_decl => |decl| switch (decl.analysis) {
.initial_in_progress,
.repeat_in_progress,
@ -671,14 +703,22 @@ pub const Module = struct {
if (decl.typed_value.most_recent.typed_value.val.cast(Value.Payload.Function)) |payload| {
switch (payload.func.analysis) {
.queued => self.analyzeFnBody(decl, payload.func) catch |err| switch (err) {
error.AnalysisFail => continue,
error.AnalysisFail => {
if (payload.func.analysis == .queued) {
payload.func.analysis = .dependency_failure;
}
continue;
},
else => |e| return e,
},
.in_progress => unreachable,
.failure => continue,
.sema_failure, .dependency_failure => continue,
.success => {},
}
}
if (!decl.typed_value.most_recent.typed_value.ty.hasCodeGenBits())
continue;
self.bin_file.updateDecl(self, decl) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => {
@ -739,7 +779,10 @@ pub const Module = struct {
return zir_module;
},
.unloaded_parse_failure, .loaded_parse_failure => return error.AnalysisFail,
.unloaded_parse_failure,
.loaded_parse_failure,
.loaded_sema_failure,
=> return error.AnalysisFail,
.loaded_success => return root_scope.contents.module,
}
}
@ -756,14 +799,11 @@ pub const Module = struct {
// Here we ensure enough queue capacity to store all the decls, so that later we can use
// appendAssumeCapacity.
try self.work_stack.ensureCapacity(
self.allocator,
self.work_stack.items.len + src_module.decls.len,
);
try self.work_queue.ensureUnusedCapacity(src_module.decls.len);
for (src_module.decls) |decl| {
if (decl.cast(text.Inst.Export)) |export_inst| {
try analyzeExport(self, &root_scope.base, export_inst);
_ = try self.resolveDecl(&root_scope.base, &export_inst.base);
}
}
}
@ -825,10 +865,19 @@ pub const Module = struct {
};
errdefer decl_scope.arena.deinit();
const typed_value = self.analyzeInstConst(&decl_scope.base, old_inst) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
switch (new_decl.analysis) {
.initial_in_progress => new_decl.analysis = .initial_dependency_failure,
.repeat_in_progress => new_decl.analysis = .repeat_dependency_failure,
else => {},
}
return error.AnalysisFail;
},
};
const arena_state = try decl_scope.arena.allocator.create(std.heap.ArenaAllocator.State);
const typed_value = try self.analyzeInstConst(&decl_scope.base, old_inst);
arena_state.* = decl_scope.arena.state;
new_decl.typed_value = .{
@ -839,7 +888,7 @@ pub const Module = struct {
};
new_decl.analysis = .complete;
// We ensureCapacity when scanning for decls.
self.work_stack.appendAssumeCapacity(.{ .codegen_decl = new_decl });
self.work_queue.writeItemAssumeCapacity(.{ .codegen_decl = new_decl });
return new_decl;
}
}
@ -1021,11 +1070,8 @@ pub const Module = struct {
}
fn constStr(self: *Module, scope: *Scope, src: usize, str: []const u8) !*Inst {
const array_payload = try scope.arena().create(Type.Payload.Array_u8_Sentinel0);
array_payload.* = .{ .len = str.len };
const ty_payload = try scope.arena().create(Type.Payload.SingleConstPointer);
ty_payload.* = .{ .pointee_type = Type.initPayload(&array_payload.base) };
const ty_payload = try scope.arena().create(Type.Payload.Array_u8_Sentinel0);
ty_payload.* = .{ .len = str.len };
const bytes_payload = try scope.arena().create(Value.Payload.Bytes);
bytes_payload.* = .{ .data = str };
@ -1150,6 +1196,7 @@ pub const Module = struct {
return self.constVoid(scope, old_inst.src);
},
.primitive => return self.analyzeInstPrimitive(scope, old_inst.cast(text.Inst.Primitive).?),
.ref => return self.analyzeInstRef(scope, old_inst.cast(text.Inst.Ref).?),
.fntype => return self.analyzeInstFnType(scope, old_inst.cast(text.Inst.FnType).?),
.intcast => return self.analyzeInstIntCast(scope, old_inst.cast(text.Inst.IntCast).?),
.bitcast => return self.analyzeInstBitCast(scope, old_inst.cast(text.Inst.BitCast).?),
@ -1167,12 +1214,34 @@ pub const Module = struct {
return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Breakpoint, Inst.Args(Inst.Breakpoint){});
}
fn analyzeInstRef(self: *Module, scope: *Scope, inst: *text.Inst.Ref) InnerError!*Inst {
const decl = try self.resolveCompleteDecl(scope, inst.positionals.operand);
return self.analyzeDeclRef(scope, inst.base.src, decl);
}
fn analyzeInstDeclRef(self: *Module, scope: *Scope, inst: *text.Inst.DeclRef) InnerError!*Inst {
return self.fail(scope, inst.base.src, "TODO implement analyzeInstDeclFef", .{});
const decl_name = try self.resolveConstString(scope, inst.positionals.name);
// This will need to get more fleshed out when there are proper structs & namespaces.
const zir_module = scope.namespace();
for (zir_module.contents.module.decls) |src_decl| {
if (mem.eql(u8, src_decl.name, decl_name)) {
const decl = try self.resolveCompleteDecl(scope, src_decl);
return self.analyzeDeclRef(scope, inst.base.src, decl);
}
}
return self.fail(scope, inst.positionals.name.src, "use of undeclared identifier '{}'", .{decl_name});
}
fn analyzeDeclRef(self: *Module, scope: *Scope, src: usize, decl: *Decl) InnerError!*Inst {
return self.fail(scope, src, "TODO implement analyzeDeclRef", .{});
const decl_tv = try decl.typedValue();
const ty_payload = try scope.arena().create(Type.Payload.SingleConstPointer);
ty_payload.* = .{ .pointee_type = decl_tv.ty };
const val_payload = try scope.arena().create(Value.Payload.DeclRef);
val_payload.* = .{ .decl = decl };
return self.constInst(scope, src, .{
.ty = Type.initPayload(&ty_payload.base),
.val = Value.initPayload(&val_payload.base),
});
}
fn analyzeInstCall(self: *Module, scope: *Scope, inst: *text.Inst.Call) InnerError!*Inst {
@ -1929,6 +1998,7 @@ pub const Module = struct {
fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: var) InnerError {
@setCold(true);
try self.failed_decls.ensureCapacity(self.failed_decls.size + 1);
try self.failed_files.ensureCapacity(self.failed_files.size + 1);
const err_msg = try ErrorMsg.create(self.allocator, src, format, args);
switch (scope.tag) {
.decl => {
@ -1942,10 +2012,14 @@ pub const Module = struct {
},
.block => {
const block = scope.cast(Scope.Block).?;
block.func.analysis = .failure;
block.func.analysis = .sema_failure;
self.failed_decls.putAssumeCapacityNoClobber(block.decl, err_msg);
},
.zir_module => unreachable,
.zir_module => {
const zir_module = scope.cast(Scope.ZIRModule).?;
zir_module.status = .loaded_sema_failure;
self.failed_files.putAssumeCapacityNoClobber(zir_module, err_msg);
},
}
return error.AnalysisFail;
}
@ -2044,6 +2118,7 @@ pub fn main() anyerror!void {
.failed_decls = std.AutoHashMap(*Module.Decl, *ErrorMsg).init(allocator),
.failed_files = std.AutoHashMap(*Module.Scope.ZIRModule, *ErrorMsg).init(allocator),
.failed_exports = std.AutoHashMap(*Module.Export, *ErrorMsg).init(allocator),
.work_queue = std.fifo.LinearFifo(Module.WorkItem, .Dynamic).init(allocator),
};
};
defer module.deinit();

View File

@ -24,8 +24,7 @@ pub const Inst = struct {
breakpoint,
call,
/// Represents a reference to a global decl by name.
/// Canonicalized ZIR will not have any of these. The
/// syntax `@foo` is equivalent to `declref("foo")`.
/// The syntax `@foo` is equivalent to `declref("foo")`.
declref,
str,
int,
@ -39,6 +38,7 @@ pub const Inst = struct {
@"fn",
@"export",
primitive,
ref,
fntype,
intcast,
bitcast,
@ -67,6 +67,7 @@ pub const Inst = struct {
.@"fn" => Fn,
.@"export" => Export,
.primitive => Primitive,
.ref => Ref,
.fntype => FnType,
.intcast => IntCast,
.bitcast => BitCast,
@ -234,6 +235,16 @@ pub const Inst = struct {
kw_args: struct {},
};
pub const Ref = struct {
pub const base_tag = Tag.ref;
base: Inst,
positionals: struct {
operand: *Inst,
},
kw_args: struct {},
};
pub const Primitive = struct {
pub const base_tag = Tag.primitive;
base: Inst,
@ -407,7 +418,7 @@ pub const ErrorMsg = struct {
pub const Module = struct {
decls: []*Inst,
arena: std.heap.ArenaAllocator.State,
arena: std.heap.ArenaAllocator,
error_msg: ?ErrorMsg = null,
pub const Body = struct {
@ -416,7 +427,7 @@ pub const Module = struct {
pub fn deinit(self: *Module, allocator: *Allocator) void {
allocator.free(self.decls);
self.arena.promote(allocator).deinit();
self.arena.deinit();
self.* = undefined;
}
@ -475,6 +486,7 @@ pub const Module = struct {
.@"return" => return self.writeInstToStreamGeneric(stream, .@"return", decl, inst_table),
.@"fn" => return self.writeInstToStreamGeneric(stream, .@"fn", decl, inst_table),
.@"export" => return self.writeInstToStreamGeneric(stream, .@"export", decl, inst_table),
.ref => return self.writeInstToStreamGeneric(stream, .ref, decl, inst_table),
.primitive => return self.writeInstToStreamGeneric(stream, .primitive, decl, inst_table),
.fntype => return self.writeInstToStreamGeneric(stream, .fntype, decl, inst_table),
.intcast => return self.writeInstToStreamGeneric(stream, .intcast, decl, inst_table),
@ -591,7 +603,7 @@ pub fn parse(allocator: *Allocator, source: [:0]const u8) Allocator.Error!Module
return Module{
.decls = parser.decls.toOwnedSlice(allocator),
.arena = parser.arena.state,
.arena = parser.arena,
.error_msg = parser.error_msg,
};
}
@ -630,7 +642,7 @@ const Parser = struct {
skipSpace(self);
try requireEatBytes(self, "=");
skipSpace(self);
const inst = try parseInstruction(self, &body_context, ident[1..]);
const inst = try parseInstruction(self, &body_context, ident);
const ident_index = body_context.instructions.items.len;
if (try body_context.name_map.put(ident, ident_index)) |_| {
return self.fail("redefinition of identifier '{}'", .{ident});
@ -716,7 +728,7 @@ const Parser = struct {
skipSpace(self);
try requireEatBytes(self, "=");
skipSpace(self);
const inst = try parseInstruction(self, null, ident[1..]);
const inst = try parseInstruction(self, null, ident);
const ident_index = self.decls.items.len;
if (try self.global_name_map.put(ident, ident_index)) |_| {
return self.fail("redefinition of identifier '{}'", .{ident});
@ -987,7 +999,7 @@ pub fn emit_zir(allocator: *Allocator, old_module: ir.Module) !Module {
return Module{
.decls = ctx.decls.toOwnedSlice(allocator),
.arena = ctx.arena.state,
.arena = ctx.arena,
};
}
@ -1056,6 +1068,7 @@ const EmitZIR = struct {
}
fn emitTypedValue(self: *EmitZIR, src: usize, typed_value: TypedValue) Allocator.Error!*Inst {
const allocator = &self.arena.allocator;
switch (typed_value.ty.zigTypeTag()) {
.Pointer => {
const ptr_elem_type = typed_value.ty.elemType();
@ -1067,7 +1080,10 @@ const EmitZIR = struct {
// ptr_elem_type.hasSentinel(Value.initTag(.zero)))
//{
//}
const bytes = try typed_value.val.toAllocatedBytes(&self.arena.allocator);
const bytes = typed_value.val.toAllocatedBytes(allocator) catch |err| switch (err) {
error.AnalysisFail => unreachable,
else => |e| return e,
};
return self.emitStringLiteral(src, bytes);
},
else => |t| std.debug.panic("TODO implement emitTypedValue for pointer to {}", .{@tagName(t)}),

View File

@ -33,9 +33,11 @@ pub fn openBinFilePath(
options: Options,
) !ElfFile {
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = determineMode(options) });
defer file.close();
errdefer file.close();
return openBinFile(allocator, file, options);
var bin_file = try openBinFile(allocator, file, options);
bin_file.owns_file_handle = true;
return bin_file;
}
/// Atomically overwrites the old file, if present.
@ -89,6 +91,7 @@ pub fn openBinFile(allocator: *Allocator, file: fs.File, options: Options) !ElfF
pub const ElfFile = struct {
allocator: *Allocator,
file: fs.File,
owns_file_handle: bool,
options: Options,
ptr_width: enum { p32, p64 },
@ -162,6 +165,8 @@ pub const ElfFile = struct {
self.shstrtab.deinit(self.allocator);
self.symbols.deinit(self.allocator);
self.offset_table.deinit(self.allocator);
if (self.owns_file_handle)
self.file.close();
}
// `alloc_num / alloc_den` is the factor of padding when allocation
@ -685,7 +690,7 @@ pub const ElfFile = struct {
// TODO Also detect virtual address collisions.
const text_capacity = self.allocatedSize(shdr.sh_offset);
// TODO instead of looping here, maintain a free list and a pointer to the end.
var last_start: u64 = 0;
var last_start: u64 = phdr.p_vaddr;
var last_size: u64 = 0;
for (self.symbols.items) |sym| {
if (sym.st_value > last_start) {
@ -738,19 +743,21 @@ pub const ElfFile = struct {
}
pub fn updateDecl(self: *ElfFile, module: *ir.Module, decl: *ir.Module.Decl) !void {
var code = std.ArrayList(u8).init(self.allocator);
defer code.deinit();
var code_buffer = std.ArrayList(u8).init(self.allocator);
defer code_buffer.deinit();
const typed_value = decl.typed_value.most_recent.typed_value;
const err_msg = try codegen.generateSymbol(self, typed_value, &code);
if (err_msg) |em| {
decl.analysis = .codegen_failure;
_ = try module.failed_decls.put(decl, em);
return;
}
const code = switch (try codegen.generateSymbol(self, decl.src, typed_value, &code_buffer)) {
.ok => |x| x,
.fail => |em| {
decl.analysis = .codegen_failure;
_ = try module.failed_decls.put(decl, em);
return;
},
};
const file_offset = blk: {
const code_size = code.items.len;
const code_size = code.len;
const stt_bits: u8 = switch (typed_value.ty.zigTypeTag()) {
.Fn => elf.STT_FUNC,
else => elf.STT_OBJECT,
@ -793,11 +800,13 @@ pub const ElfFile = struct {
errdefer self.symbols.shrink(self.allocator, self.symbols.items.len - 1);
self.offset_table.appendAssumeCapacity(new_block.vaddr);
errdefer self.offset_table.shrink(self.allocator, self.offset_table.items.len - 1);
try self.writeSymbol(local_sym_index);
try self.writeOffsetTableEntry(offset_table_index);
self.symbol_count_dirty = true;
self.offset_table_count_dirty = true;
try self.writeSymbol(local_sym_index);
try self.writeOffsetTableEntry(offset_table_index);
decl.link = .{
.local_sym_index = @intCast(u32, local_sym_index),
.offset_table_index = @intCast(u32, offset_table_index),
@ -807,7 +816,7 @@ pub const ElfFile = struct {
}
};
try self.file.pwriteAll(code.items, file_offset);
try self.file.pwriteAll(code, file_offset);
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.getValue(decl) orelse &[0]*ir.Module.Export{};
@ -823,7 +832,7 @@ pub const ElfFile = struct {
) !void {
try self.symbols.ensureCapacity(self.allocator, self.symbols.items.len + exports.len);
const typed_value = decl.typed_value.most_recent.typed_value;
assert(decl.link.local_sym_index != 0);
if (decl.link.local_sym_index == 0) return;
const decl_sym = self.symbols.items[decl.link.local_sym_index];
for (exports) |exp| {
@ -1112,6 +1121,7 @@ pub fn createElfFile(allocator: *Allocator, file: fs.File, options: Options) !El
else => return error.UnsupportedELFArchitecture,
},
.shdr_table_dirty = true,
.owns_file_handle = false,
};
errdefer self.deinit();
@ -1161,6 +1171,7 @@ fn openBinFileInner(allocator: *Allocator, file: fs.File, options: Options) !Elf
var self: ElfFile = .{
.allocator = allocator,
.file = file,
.owns_file_handle = false,
.options = options,
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
32 => .p32,

View File

@ -262,6 +262,50 @@ pub const Type = extern union {
}
}
pub fn hasCodeGenBits(self: Type) bool {
return switch (self.tag()) {
.u8,
.i8,
.isize,
.usize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.f16,
.f32,
.f64,
.f128,
.bool,
.anyerror,
.fn_noreturn_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.single_const_pointer_to_comptime_int,
.const_slice_u8, // See last_no_payload_tag below.
.array_u8_sentinel_0,
.array,
.single_const_pointer,
.int_signed,
.int_unsigned,
=> true,
.c_void,
.void,
.type,
.comptime_int,
.comptime_float,
.noreturn,
.@"null",
=> false,
};
}
pub fn isSinglePointer(self: Type) bool {
return switch (self.tag()) {
.u8,

View File

@ -180,10 +180,17 @@ pub const Value = extern union {
/// Asserts that the value is representable as an array of bytes.
/// Copies the value into a freshly allocated slice of memory, which is owned by the caller.
pub fn toAllocatedBytes(self: Value, allocator: *Allocator) Allocator.Error![]u8 {
pub fn toAllocatedBytes(self: Value, allocator: *Allocator) ![]u8 {
if (self.cast(Payload.Bytes)) |bytes| {
return std.mem.dupe(allocator, u8, bytes.data);
}
if (self.cast(Payload.Repeated)) |repeated| {
@panic("TODO implement toAllocatedBytes for this Value tag");
}
if (self.cast(Payload.DeclRef)) |declref| {
const val = try declref.decl.value();
return val.toAllocatedBytes(allocator);
}
unreachable;
}