mirror of
https://github.com/ziglang/zig.git
synced 2025-12-24 15:13:08 +00:00
backends: port to new std.io.BufferedWriter API
This commit is contained in:
parent
a21e7ab64f
commit
f3d0fc7a66
2
lib/compiler/aro/aro/Compilation.zig
vendored
2
lib/compiler/aro/aro/Compilation.zig
vendored
@ -546,7 +546,7 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi
|
||||
}
|
||||
|
||||
try buf.appendSlice("#define __STDC__ 1\n");
|
||||
try buf.writer().print("#define __STDC_HOSTED__ {d}\n", .{@intFromBool(comp.target.os.tag != .freestanding)});
|
||||
try buf.print("#define __STDC_HOSTED__ {d}\n", .{@intFromBool(comp.target.os.tag != .freestanding)});
|
||||
|
||||
// standard macros
|
||||
try buf.appendSlice(
|
||||
|
||||
@ -695,7 +695,10 @@ fn runStepNames(
|
||||
|
||||
if (run.summary != .none) {
|
||||
var bw = std.debug.lockStdErr2(&stdio_buffer);
|
||||
defer std.debug.unlockStdErr();
|
||||
defer {
|
||||
bw.flush() catch {};
|
||||
std.debug.unlockStdErr();
|
||||
}
|
||||
|
||||
const total_count = success_count + failure_count + pending_count + skipped_count;
|
||||
ttyconf.setColor(&bw, .cyan) catch {};
|
||||
@ -710,7 +713,7 @@ fn runStepNames(
|
||||
if (test_fail_count > 0) bw.print("; {d} failed", .{test_fail_count}) catch {};
|
||||
if (test_leak_count > 0) bw.print("; {d} leaked", .{test_leak_count}) catch {};
|
||||
|
||||
bw.writeAll("\n") catch {};
|
||||
bw.writeByte('\n') catch {};
|
||||
|
||||
// Print a fancy tree with build results.
|
||||
var step_stack_copy = try step_stack.clone(gpa);
|
||||
|
||||
@ -133,11 +133,11 @@ pub fn makePath(p: Path, sub_path: []const u8) !void {
|
||||
}
|
||||
|
||||
pub fn toString(p: Path, allocator: Allocator) Allocator.Error![]u8 {
|
||||
return std.fmt.allocPrint(allocator, "{}", .{p});
|
||||
return std.fmt.allocPrint(allocator, "{f}", .{p});
|
||||
}
|
||||
|
||||
pub fn toStringZ(p: Path, allocator: Allocator) Allocator.Error![:0]u8 {
|
||||
return std.fmt.allocPrintZ(allocator, "{}", .{p});
|
||||
return std.fmt.allocPrintZ(allocator, "{f}", .{p});
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
|
||||
@ -469,7 +469,7 @@ pub fn evalZigProcess(
|
||||
// This is intentionally printed for failure on the first build but not for
|
||||
// subsequent rebuilds.
|
||||
if (s.result_error_bundle.errorMessageCount() > 0) {
|
||||
return s.fail("the following command failed with {d} compilation errors:\n{s}", .{
|
||||
return s.fail("the following command failed with {d} compilation errors:\n{s}\n", .{
|
||||
s.result_error_bundle.errorMessageCount(),
|
||||
try allocPrintCmd(arena, null, argv),
|
||||
});
|
||||
@ -689,7 +689,7 @@ pub inline fn handleChildProcUnsupported(
|
||||
) error{ OutOfMemory, MakeFailed }!void {
|
||||
if (!std.process.can_spawn) {
|
||||
return s.fail(
|
||||
"unable to execute the following command: host cannot spawn child processes\n{s}",
|
||||
"unable to execute the following command: host cannot spawn child processes\n{s}\n",
|
||||
.{try allocPrintCmd(s.owner.allocator, opt_cwd, argv)},
|
||||
);
|
||||
}
|
||||
@ -706,14 +706,14 @@ pub fn handleChildProcessTerm(
|
||||
.Exited => |code| {
|
||||
if (code != 0) {
|
||||
return s.fail(
|
||||
"the following command exited with error code {d}:\n{s}",
|
||||
"the following command exited with error code {d}:\n{s}\n",
|
||||
.{ code, try allocPrintCmd(arena, opt_cwd, argv) },
|
||||
);
|
||||
}
|
||||
},
|
||||
.Signal, .Stopped, .Unknown => {
|
||||
return s.fail(
|
||||
"the following command terminated unexpectedly:\n{s}",
|
||||
"the following command terminated unexpectedly:\n{s}\n",
|
||||
.{try allocPrintCmd(arena, opt_cwd, argv)},
|
||||
);
|
||||
},
|
||||
|
||||
@ -1523,11 +1523,11 @@ const MachODumper = struct {
|
||||
) !void {
|
||||
const size = try br.takeLeb128(u64);
|
||||
if (size > 0) {
|
||||
const flags = try br.takeLeb128(u64);
|
||||
const flags = try br.takeLeb128(u8);
|
||||
switch (flags) {
|
||||
macho.EXPORT_SYMBOL_FLAGS_REEXPORT => {
|
||||
const ord = try br.takeLeb128(u64);
|
||||
const name = try br.takeDelimiterConclusive(0);
|
||||
const name = try br.takeSentinel(0);
|
||||
try exports.append(.{
|
||||
.name = if (name.len > 0) name else prefix,
|
||||
.tag = .reexport,
|
||||
@ -1568,8 +1568,8 @@ const MachODumper = struct {
|
||||
|
||||
const nedges = try br.takeByte();
|
||||
for (0..nedges) |_| {
|
||||
const label = try br.takeDelimiterConclusive(0);
|
||||
const off = try br.takeLeb128(u64);
|
||||
const label = try br.takeSentinel(0);
|
||||
const off = try br.takeLeb128(usize);
|
||||
const prefix_label = try std.fmt.allocPrint(arena, "{s}{s}", .{ prefix, label });
|
||||
const seek = br.seek;
|
||||
br.seek = off;
|
||||
|
||||
@ -301,28 +301,23 @@ pub const Os = struct {
|
||||
|
||||
/// This function is defined to serialize a Zig source code representation of this
|
||||
/// type, that, when parsed, will deserialize into the same data.
|
||||
pub fn format(
|
||||
ver: WindowsVersion,
|
||||
comptime fmt_str: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: *std.io.BufferedWriter,
|
||||
) anyerror!void {
|
||||
pub fn format(ver: WindowsVersion, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) anyerror!void {
|
||||
const maybe_name = std.enums.tagName(WindowsVersion, ver);
|
||||
if (comptime std.mem.eql(u8, fmt_str, "s")) {
|
||||
if (maybe_name) |name|
|
||||
try writer.print(".{s}", .{name})
|
||||
try bw.print(".{s}", .{name})
|
||||
else
|
||||
try writer.print(".{d}", .{@intFromEnum(ver)});
|
||||
try bw.print(".{d}", .{@intFromEnum(ver)});
|
||||
} else if (comptime std.mem.eql(u8, fmt_str, "c")) {
|
||||
if (maybe_name) |name|
|
||||
try writer.print(".{s}", .{name})
|
||||
try bw.print(".{s}", .{name})
|
||||
else
|
||||
try writer.print("@enumFromInt(0x{X:0>8})", .{@intFromEnum(ver)});
|
||||
try bw.print("@enumFromInt(0x{X:0>8})", .{@intFromEnum(ver)});
|
||||
} else if (fmt_str.len == 0) {
|
||||
if (maybe_name) |name|
|
||||
try writer.print("WindowsVersion.{s}", .{name})
|
||||
try bw.print("WindowsVersion.{s}", .{name})
|
||||
else
|
||||
try writer.print("WindowsVersion(0x{X:0>8})", .{@intFromEnum(ver)});
|
||||
try bw.print("WindowsVersion(0x{X:0>8})", .{@intFromEnum(ver)});
|
||||
} else std.fmt.invalidFmtError(fmt_str, ver);
|
||||
}
|
||||
};
|
||||
|
||||
@ -236,44 +236,44 @@ pub const WriteToStreamOptions = struct {
|
||||
port: bool = true,
|
||||
};
|
||||
|
||||
pub fn writeToStream(uri: Uri, options: WriteToStreamOptions, writer: *std.io.BufferedWriter) anyerror!void {
|
||||
pub fn writeToStream(uri: Uri, options: WriteToStreamOptions, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
if (options.scheme) {
|
||||
try writer.print("{s}:", .{uri.scheme});
|
||||
try bw.print("{s}:", .{uri.scheme});
|
||||
if (options.authority and uri.host != null) {
|
||||
try writer.writeAll("//");
|
||||
try bw.writeAll("//");
|
||||
}
|
||||
}
|
||||
if (options.authority) {
|
||||
if (options.authentication and uri.host != null) {
|
||||
if (uri.user) |user| {
|
||||
try writer.print("{fuser}", .{user});
|
||||
try bw.print("{fuser}", .{user});
|
||||
if (uri.password) |password| {
|
||||
try writer.print(":{fpassword}", .{password});
|
||||
try bw.print(":{fpassword}", .{password});
|
||||
}
|
||||
try writer.writeByte('@');
|
||||
try bw.writeByte('@');
|
||||
}
|
||||
}
|
||||
if (uri.host) |host| {
|
||||
try writer.print("{fhost}", .{host});
|
||||
try bw.print("{fhost}", .{host});
|
||||
if (options.port) {
|
||||
if (uri.port) |port| try writer.print(":{d}", .{port});
|
||||
if (uri.port) |port| try bw.print(":{d}", .{port});
|
||||
}
|
||||
}
|
||||
}
|
||||
if (options.path) {
|
||||
try writer.print("{fpath}", .{
|
||||
try bw.print("{fpath}", .{
|
||||
if (uri.path.isEmpty()) Uri.Component{ .percent_encoded = "/" } else uri.path,
|
||||
});
|
||||
if (options.query) {
|
||||
if (uri.query) |query| try writer.print("?{fquery}", .{query});
|
||||
if (uri.query) |query| try bw.print("?{fquery}", .{query});
|
||||
}
|
||||
if (options.fragment) {
|
||||
if (uri.fragment) |fragment| try writer.print("#{ffragment}", .{fragment});
|
||||
if (uri.fragment) |fragment| try bw.print("#{ffragment}", .{fragment});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format(uri: Uri, comptime fmt: []const u8, _: std.fmt.Options, writer: *std.io.BufferedWriter) anyerror!void {
|
||||
pub fn format(uri: Uri, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
|
||||
const scheme = comptime std.mem.indexOfScalar(u8, fmt, ';') != null or fmt.len == 0;
|
||||
const authentication = comptime std.mem.indexOfScalar(u8, fmt, '@') != null or fmt.len == 0;
|
||||
const authority = comptime std.mem.indexOfScalar(u8, fmt, '+') != null or fmt.len == 0;
|
||||
@ -288,7 +288,7 @@ pub fn format(uri: Uri, comptime fmt: []const u8, _: std.fmt.Options, writer: *s
|
||||
.path = path,
|
||||
.query = query,
|
||||
.fragment = fragment,
|
||||
}, writer);
|
||||
}, bw);
|
||||
}
|
||||
|
||||
/// Parses the URI or returns an error.
|
||||
|
||||
@ -531,11 +531,7 @@ pub fn Formatter(comptime formatFn: anytype) type {
|
||||
const Data = @typeInfo(@TypeOf(formatFn)).@"fn".params[0].type.?;
|
||||
return struct {
|
||||
data: Data,
|
||||
pub fn format(
|
||||
self: @This(),
|
||||
writer: *std.io.BufferedWriter,
|
||||
comptime fmt: []const u8,
|
||||
) anyerror!void {
|
||||
pub fn format(self: @This(), writer: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
|
||||
try formatFn(self.data, writer, fmt);
|
||||
}
|
||||
};
|
||||
|
||||
@ -201,8 +201,8 @@ pub fn toss(br: *BufferedReader, n: usize) void {
|
||||
|
||||
/// Equivalent to `peek` + `toss`.
|
||||
pub fn take(br: *BufferedReader, n: usize) anyerror![]u8 {
|
||||
const result = try peek(br, n);
|
||||
toss(br, n);
|
||||
const result = try br.peek(n);
|
||||
br.toss(n);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -218,7 +218,7 @@ pub fn take(br: *BufferedReader, n: usize) anyerror![]u8 {
|
||||
/// See also:
|
||||
/// * `take`
|
||||
pub fn takeArray(br: *BufferedReader, comptime n: usize) anyerror!*[n]u8 {
|
||||
return (try take(br, n))[0..n];
|
||||
return (try br.take(n))[0..n];
|
||||
}
|
||||
|
||||
/// Skips the next `n` bytes from the stream, advancing the seek position.
|
||||
@ -232,7 +232,7 @@ pub fn takeArray(br: *BufferedReader, comptime n: usize) anyerror!*[n]u8 {
|
||||
/// * `discardUntilEnd`
|
||||
/// * `discardUpTo`
|
||||
pub fn discard(br: *BufferedReader, n: usize) anyerror!void {
|
||||
if ((try discardUpTo(br, n)) != n) return error.EndOfStream;
|
||||
if ((try br.discardUpTo(n)) != n) return error.EndOfStream;
|
||||
}
|
||||
|
||||
/// Skips the next `n` bytes from the stream, advancing the seek position.
|
||||
@ -325,6 +325,34 @@ pub fn partialRead(br: *BufferedReader, buffer: []u8) anyerror!usize {
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
/// `sentinel` is found, advancing the seek position.
|
||||
///
|
||||
/// Returned slice has a sentinel.
|
||||
///
|
||||
/// If the stream ends before the sentinel is found, `error.EndOfStream` is
|
||||
/// returned.
|
||||
///
|
||||
/// If the sentinel is not found within a number of bytes matching the
|
||||
/// capacity of the `BufferedReader`, `error.StreamTooLong` is returned.
|
||||
///
|
||||
/// Invalidates previously returned values from `peek`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `peekSentinel`
|
||||
/// * `takeDelimiterExclusive`
|
||||
/// * `takeDelimiterInclusive`
|
||||
pub fn takeSentinel(br: *BufferedReader, comptime sentinel: u8) anyerror![:sentinel]u8 {
|
||||
const result = try br.peekSentinel(sentinel);
|
||||
br.toss(result.len + 1);
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn peekSentinel(br: *BufferedReader, comptime sentinel: u8) anyerror![:sentinel]u8 {
|
||||
const result = try br.takeDelimiterInclusive(sentinel);
|
||||
return result[0 .. result.len - 1 :sentinel];
|
||||
}
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
/// `delimiter` is found, advancing the seek position.
|
||||
///
|
||||
@ -339,36 +367,17 @@ pub fn partialRead(br: *BufferedReader, buffer: []u8) anyerror!usize {
|
||||
/// Invalidates previously returned values from `peek`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `takeDelimiterConclusive`
|
||||
/// * `takeSentinel`
|
||||
/// * `takeDelimiterExclusive`
|
||||
/// * `peekDelimiterInclusive`
|
||||
pub fn takeDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
|
||||
const result = try peekDelimiterInclusive(br, delimiter);
|
||||
toss(result.len);
|
||||
const result = try br.peekDelimiterInclusive(delimiter);
|
||||
br.toss(result.len);
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
|
||||
const storage = &br.storage;
|
||||
const buffer = storage.buffer[0..storage.end];
|
||||
const seek = br.seek;
|
||||
if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| {
|
||||
@branchHint(.likely);
|
||||
return buffer[seek .. end + 1];
|
||||
}
|
||||
const remainder = buffer[seek..];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
var i = remainder.len;
|
||||
storage.end = i;
|
||||
br.seek = 0;
|
||||
while (i < storage.buffer.len) {
|
||||
const status = try br.unbuffered_reader.read(storage, .none);
|
||||
if (std.mem.indexOfScalarPos(u8, storage.buffer[0..storage.end], i, delimiter)) |end| {
|
||||
return storage.buffer[0 .. end + 1];
|
||||
}
|
||||
if (status.end) return error.EndOfStream;
|
||||
i = storage.end;
|
||||
}
|
||||
return error.StreamTooLong;
|
||||
return (try br.peekDelimiterInclusiveUnlessEnd(delimiter)) orelse error.EndOfStream;
|
||||
}
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
@ -384,21 +393,32 @@ pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8
|
||||
/// Invalidates previously returned values from `peek`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `takeSentinel`
|
||||
/// * `takeDelimiterInclusive`
|
||||
/// * `peekDelimiterConclusive`
|
||||
pub fn takeDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
|
||||
const result = try peekDelimiterConclusive(br, delimiter);
|
||||
/// * `peekDelimiterExclusive`
|
||||
pub fn takeDelimiterExclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
|
||||
const result_unless_end = try br.peekDelimiterInclusiveUnlessEnd(delimiter);
|
||||
const result = result_unless_end orelse {
|
||||
br.toss(br.storage.end);
|
||||
return br.storage.buffer[0..br.storage.end];
|
||||
};
|
||||
br.toss(result.len);
|
||||
return result;
|
||||
return result[0 .. result.len - 1];
|
||||
}
|
||||
|
||||
pub fn peekDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
|
||||
pub fn peekDelimiterExclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
|
||||
const result_unless_end = try br.peekDelimiterInclusiveUnlessEnd(delimiter);
|
||||
const result = result_unless_end orelse return br.storage.buffer[0..br.storage.end];
|
||||
return result[0 .. result.len - 1];
|
||||
}
|
||||
|
||||
fn peekDelimiterInclusiveUnlessEnd(br: *BufferedReader, delimiter: u8) anyerror!?[]u8 {
|
||||
const storage = &br.storage;
|
||||
const buffer = storage.buffer[0..storage.end];
|
||||
const seek = br.seek;
|
||||
if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| {
|
||||
@branchHint(.likely);
|
||||
return buffer[seek..end];
|
||||
return buffer[seek .. end + 1];
|
||||
}
|
||||
const remainder = buffer[seek..];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
@ -407,10 +427,8 @@ pub fn peekDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8
|
||||
br.seek = 0;
|
||||
while (i < storage.buffer.len) {
|
||||
const status = try br.unbuffered_reader.read(storage, .unlimited);
|
||||
if (std.mem.indexOfScalarPos(u8, storage.buffer[0..storage.end], i, delimiter)) |end| {
|
||||
return storage.buffer[0 .. end + 1];
|
||||
}
|
||||
if (status.end) return storage.buffer[0..storage.end];
|
||||
if (std.mem.indexOfScalarPos(u8, storage.buffer[0..storage.end], i, delimiter)) |end| return storage.buffer[0 .. end + 1];
|
||||
if (status.end) return null;
|
||||
i = storage.end;
|
||||
}
|
||||
return error.StreamTooLong;
|
||||
@ -436,7 +454,7 @@ pub fn streamReadDelimiter(br: *BufferedReader, bw: *std.io.BufferedWriter, deli
|
||||
///
|
||||
/// Returns number of bytes streamed as well as whether the input reached the end.
|
||||
/// The end is not signaled to the writer.
|
||||
pub fn streamReadDelimiterConclusive(
|
||||
pub fn streamReadDelimiterExclusive(
|
||||
br: *BufferedReader,
|
||||
bw: *std.io.BufferedWriter,
|
||||
delimiter: u8,
|
||||
@ -468,7 +486,7 @@ pub fn streamReadDelimiterLimited(
|
||||
/// including the delimiter.
|
||||
///
|
||||
/// If end of stream is found, this function succeeds.
|
||||
pub fn discardDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror!void {
|
||||
pub fn discardDelimiterExclusive(br: *BufferedReader, delimiter: u8) anyerror!void {
|
||||
_ = br;
|
||||
_ = delimiter;
|
||||
@panic("TODO");
|
||||
@ -517,7 +535,7 @@ pub fn takeByte(br: *BufferedReader) anyerror!u8 {
|
||||
const seek = br.seek;
|
||||
if (seek >= buffer.len) {
|
||||
@branchHint(.unlikely);
|
||||
try fill(br, 1);
|
||||
try br.fill(1);
|
||||
}
|
||||
br.seek = seek + 1;
|
||||
return buffer[seek];
|
||||
@ -531,20 +549,20 @@ pub fn takeByteSigned(br: *BufferedReader) anyerror!i8 {
|
||||
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
|
||||
pub inline fn takeInt(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) anyerror!T {
|
||||
const n = @divExact(@typeInfo(T).int.bits, 8);
|
||||
return std.mem.readInt(T, try takeArray(br, n), endian);
|
||||
return std.mem.readInt(T, try br.takeArray(n), endian);
|
||||
}
|
||||
|
||||
/// Asserts the buffer was initialized with a capacity at least `n`.
|
||||
pub fn takeVarInt(br: *BufferedReader, comptime Int: type, endian: std.builtin.Endian, n: usize) anyerror!Int {
|
||||
assert(n <= @sizeOf(Int));
|
||||
return std.mem.readVarInt(Int, try take(br, n), endian);
|
||||
return std.mem.readVarInt(Int, try br.take(n), endian);
|
||||
}
|
||||
|
||||
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
|
||||
pub fn takeStruct(br: *BufferedReader, comptime T: type) anyerror!*align(1) T {
|
||||
// Only extern and packed structs have defined in-memory layout.
|
||||
comptime assert(@typeInfo(T).@"struct".layout != .auto);
|
||||
return @ptrCast(try takeArray(br, @sizeOf(T)));
|
||||
return @ptrCast(try br.takeArray(@sizeOf(T)));
|
||||
}
|
||||
|
||||
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
|
||||
@ -561,7 +579,7 @@ pub fn takeStructEndian(br: *BufferedReader, comptime T: type, endian: std.built
|
||||
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`.
|
||||
pub fn takeEnum(br: *BufferedReader, comptime Enum: type, endian: std.builtin.Endian) anyerror!Enum {
|
||||
const Tag = @typeInfo(Enum).@"enum".tag_type;
|
||||
const int = try takeInt(br, Tag, endian);
|
||||
const int = try br.takeInt(Tag, endian);
|
||||
return std.meta.intToEnum(Enum, int);
|
||||
}
|
||||
|
||||
@ -588,10 +606,12 @@ fn takeMultipleOf7Leb128(br: *BufferedReader, comptime Result: type) anyerror!Re
|
||||
const buffer: []const packed struct(u8) { bits: u7, more: bool } = @ptrCast(try br.peekAll(1));
|
||||
for (buffer, 1..) |byte, len| {
|
||||
if (remaining_bits > 0) {
|
||||
result = @shlExact(@as(UnsignedResult, byte.bits), result_info.bits - 7) | if (result_info.bits > 7) @shrExact(result, 7) else 0;
|
||||
result = @shlExact(@as(UnsignedResult, byte.bits), result_info.bits - 7) |
|
||||
if (result_info.bits > 7) @shrExact(result, 7) else 0;
|
||||
remaining_bits -= 7;
|
||||
} else if (fits) fits = switch (result_info.signedness) {
|
||||
.signed => @as(i7, @bitCast(byte.bits)) == @as(i7, @truncate(@as(Result, @bitCast(result)) >> (result_info.bits - 1))),
|
||||
.signed => @as(i7, @bitCast(byte.bits)) ==
|
||||
@as(i7, @truncate(@as(Result, @bitCast(result)) >> (result_info.bits - 1))),
|
||||
.unsigned => byte.bits == 0,
|
||||
};
|
||||
if (byte.more) continue;
|
||||
@ -652,6 +672,14 @@ test read {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeSentinel {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test peekSentinel {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeDelimiterInclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
@ -660,11 +688,11 @@ test peekDelimiterInclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeDelimiterConclusive {
|
||||
test takeDelimiterExclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test peekDelimiterConclusive {
|
||||
test peekDelimiterExclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
@ -672,7 +700,7 @@ test streamReadDelimiter {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test streamReadDelimiterConclusive {
|
||||
test streamReadDelimiterExclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
@ -680,7 +708,7 @@ test streamReadDelimiterLimited {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test discardDelimiterConclusive {
|
||||
test discardDelimiterExclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
|
||||
@ -92,7 +92,7 @@ pub fn writableSlice(bw: *BufferedWriter, minimum_length: usize) anyerror![]u8 {
|
||||
return cap_slice;
|
||||
}
|
||||
const buffer = bw.buffer[0..bw.end];
|
||||
const n = try bw.unbuffered_writer.write(buffer);
|
||||
const n = try bw.unbuffered_writer.writev(&.{buffer});
|
||||
if (n == buffer.len) {
|
||||
@branchHint(.likely);
|
||||
bw.end = 0;
|
||||
@ -306,7 +306,7 @@ pub fn write(bw: *BufferedWriter, bytes: []const u8) anyerror!usize {
|
||||
/// transferred.
|
||||
pub fn writeAll(bw: *BufferedWriter, bytes: []const u8) anyerror!void {
|
||||
var index: usize = 0;
|
||||
while (index < bytes.len) index += try write(bw, bytes[index..]);
|
||||
while (index < bytes.len) index += try bw.write(bytes[index..]);
|
||||
}
|
||||
|
||||
pub fn print(bw: *BufferedWriter, comptime format: []const u8, args: anytype) anyerror!void {
|
||||
@ -354,7 +354,7 @@ pub fn writeByte(bw: *BufferedWriter, byte: u8) anyerror!void {
|
||||
/// many times as necessary.
|
||||
pub fn splatByteAll(bw: *BufferedWriter, byte: u8, n: usize) anyerror!void {
|
||||
var remaining: usize = n;
|
||||
while (remaining > 0) remaining -= try splatByte(bw, byte, remaining);
|
||||
while (remaining > 0) remaining -= try bw.splatByte(byte, remaining);
|
||||
}
|
||||
|
||||
/// Writes the same byte many times, allowing short writes.
|
||||
@ -368,11 +368,11 @@ pub fn splatByte(bw: *BufferedWriter, byte: u8, n: usize) anyerror!usize {
|
||||
/// many times as necessary.
|
||||
pub fn splatBytesAll(bw: *BufferedWriter, bytes: []const u8, splat: usize) anyerror!void {
|
||||
var remaining_bytes: usize = bytes.len * splat;
|
||||
remaining_bytes -= try splatBytes(bw, bytes, splat);
|
||||
remaining_bytes -= try bw.splatBytes(bytes, splat);
|
||||
while (remaining_bytes > 0) {
|
||||
const leftover = remaining_bytes % bytes.len;
|
||||
const buffers: [2][]const u8 = .{ bytes[bytes.len - leftover ..], bytes };
|
||||
remaining_bytes -= try splatBytes(bw, &buffers, splat);
|
||||
remaining_bytes -= try bw.splatBytes(&buffers, splat);
|
||||
}
|
||||
}
|
||||
|
||||
@ -519,7 +519,7 @@ pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOp
|
||||
const headers_and_trailers = options.headers_and_trailers;
|
||||
const headers = headers_and_trailers[0..options.headers_len];
|
||||
switch (options.limit) {
|
||||
.nothing => return writevAll(bw, headers_and_trailers),
|
||||
.nothing => return bw.writevAll(headers_and_trailers),
|
||||
.unlimited => {
|
||||
// When reading the whole file, we cannot include the trailers in the
|
||||
// call that reads from the file handle, because we have no way to
|
||||
@ -528,7 +528,7 @@ pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOp
|
||||
var i: usize = 0;
|
||||
var offset = options.offset;
|
||||
while (true) {
|
||||
var n = try writeFile(bw, file, offset, .unlimited, headers[i..], headers.len - i);
|
||||
var n = try bw.writeFile(file, offset, .unlimited, headers[i..], headers.len - i);
|
||||
while (i < headers.len and n >= headers[i].len) {
|
||||
n -= headers[i].len;
|
||||
i += 1;
|
||||
@ -546,7 +546,7 @@ pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOp
|
||||
var i: usize = 0;
|
||||
var offset = options.offset;
|
||||
while (true) {
|
||||
var n = try writeFile(bw, file, offset, .limited(len), headers_and_trailers[i..], headers.len - i);
|
||||
var n = try bw.writeFile(file, offset, .limited(len), headers_and_trailers[i..], headers.len - i);
|
||||
while (i < headers.len and n >= headers[i].len) {
|
||||
n -= headers[i].len;
|
||||
i += 1;
|
||||
@ -564,7 +564,7 @@ pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOp
|
||||
if (i >= headers_and_trailers.len) return;
|
||||
}
|
||||
headers_and_trailers[i] = headers_and_trailers[i][n..];
|
||||
return writevAll(bw, headers_and_trailers[i..]);
|
||||
return bw.writevAll(headers_and_trailers[i..]);
|
||||
}
|
||||
offset = offset.advance(n);
|
||||
len -= n;
|
||||
@ -605,7 +605,7 @@ pub fn alignBuffer(
|
||||
}
|
||||
|
||||
pub fn alignBufferOptions(bw: *BufferedWriter, buffer: []const u8, options: std.fmt.Options) anyerror!void {
|
||||
return alignBuffer(bw, buffer, options.width orelse buffer.len, options.alignment, options.fill);
|
||||
return bw.alignBuffer(buffer, options.width orelse buffer.len, options.alignment, options.fill);
|
||||
}
|
||||
|
||||
pub fn printAddress(bw: *BufferedWriter, value: anytype) anyerror!void {
|
||||
@ -614,15 +614,15 @@ pub fn printAddress(bw: *BufferedWriter, value: anytype) anyerror!void {
|
||||
.pointer => |info| {
|
||||
try bw.writeAll(@typeName(info.child) ++ "@");
|
||||
if (info.size == .slice)
|
||||
try printIntOptions(bw, @intFromPtr(value.ptr), 16, .lower, .{})
|
||||
try bw.printIntOptions(@intFromPtr(value.ptr), 16, .lower, .{})
|
||||
else
|
||||
try printIntOptions(bw, @intFromPtr(value), 16, .lower, .{});
|
||||
try bw.printIntOptions(@intFromPtr(value), 16, .lower, .{});
|
||||
return;
|
||||
},
|
||||
.optional => |info| {
|
||||
if (@typeInfo(info.child) == .pointer) {
|
||||
try bw.writeAll(@typeName(info.child) ++ "@");
|
||||
try printIntOptions(bw, @intFromPtr(value), 16, .lower, .{});
|
||||
try bw.printIntOptions(@intFromPtr(value), 16, .lower, .{});
|
||||
return;
|
||||
}
|
||||
},
|
||||
@ -648,7 +648,7 @@ pub fn printValue(
|
||||
} else fmt;
|
||||
|
||||
if (comptime std.mem.eql(u8, actual_fmt, "*")) {
|
||||
return printAddress(bw, value);
|
||||
return bw.printAddress(value);
|
||||
}
|
||||
|
||||
if (std.meta.hasMethod(T, "format")) {
|
||||
@ -661,24 +661,24 @@ pub fn printValue(
|
||||
}
|
||||
|
||||
switch (@typeInfo(T)) {
|
||||
.float, .comptime_float => return printFloat(bw, actual_fmt, options, value),
|
||||
.int, .comptime_int => return printInt(bw, actual_fmt, options, value),
|
||||
.float, .comptime_float => return bw.printFloat(actual_fmt, options, value),
|
||||
.int, .comptime_int => return bw.printInt(actual_fmt, options, value),
|
||||
.bool => {
|
||||
if (actual_fmt.len != 0) invalidFmtError(fmt, value);
|
||||
return alignBufferOptions(bw, if (value) "true" else "false", options);
|
||||
return bw.alignBufferOptions(if (value) "true" else "false", options);
|
||||
},
|
||||
.void => {
|
||||
if (actual_fmt.len != 0) invalidFmtError(fmt, value);
|
||||
return alignBufferOptions(bw, "void", options);
|
||||
return bw.alignBufferOptions("void", options);
|
||||
},
|
||||
.optional => {
|
||||
if (actual_fmt.len == 0 or actual_fmt[0] != '?')
|
||||
@compileError("cannot print optional without a specifier (i.e. {?} or {any})");
|
||||
const remaining_fmt = comptime stripOptionalOrErrorUnionSpec(actual_fmt);
|
||||
if (value) |payload| {
|
||||
return printValue(bw, remaining_fmt, options, payload, max_depth);
|
||||
return bw.printValue(remaining_fmt, options, payload, max_depth);
|
||||
} else {
|
||||
return alignBufferOptions(bw, "null", options);
|
||||
return bw.alignBufferOptions("null", options);
|
||||
}
|
||||
},
|
||||
.error_union => {
|
||||
@ -686,9 +686,9 @@ pub fn printValue(
|
||||
@compileError("cannot format error union without a specifier (i.e. {!} or {any})");
|
||||
const remaining_fmt = comptime stripOptionalOrErrorUnionSpec(actual_fmt);
|
||||
if (value) |payload| {
|
||||
return printValue(bw, remaining_fmt, options, payload, max_depth);
|
||||
return bw.printValue(remaining_fmt, options, payload, max_depth);
|
||||
} else |err| {
|
||||
return printValue(bw, "", options, err, max_depth);
|
||||
return bw.printValue("", options, err, max_depth);
|
||||
}
|
||||
},
|
||||
.error_set => {
|
||||
@ -721,14 +721,14 @@ pub fn printValue(
|
||||
}
|
||||
|
||||
try bw.writeByte('(');
|
||||
try printValue(bw, actual_fmt, options, @intFromEnum(value), max_depth);
|
||||
try bw.printValue(actual_fmt, options, @intFromEnum(value), max_depth);
|
||||
try bw.writeByte(')');
|
||||
},
|
||||
.@"union" => |info| {
|
||||
if (actual_fmt.len != 0) invalidFmtError(fmt, value);
|
||||
try bw.writeAll(@typeName(T));
|
||||
if (max_depth == 0) {
|
||||
bw.writeAll("{ ... }");
|
||||
try bw.writeAll("{ ... }");
|
||||
return;
|
||||
}
|
||||
if (info.tag_type) |UnionTagType| {
|
||||
@ -737,13 +737,13 @@ pub fn printValue(
|
||||
try bw.writeAll(" = ");
|
||||
inline for (info.fields) |u_field| {
|
||||
if (value == @field(UnionTagType, u_field.name)) {
|
||||
try printValue(bw, ANY, options, @field(value, u_field.name), max_depth - 1);
|
||||
try bw.printValue(ANY, options, @field(value, u_field.name), max_depth - 1);
|
||||
}
|
||||
}
|
||||
try bw.writeAll(" }");
|
||||
} else {
|
||||
try bw.writeByte('@');
|
||||
try bw.printIntOptions(@intFromPtr(&value), 16, .lower);
|
||||
try bw.printIntOptions(@intFromPtr(&value), 16, .lower, options);
|
||||
}
|
||||
},
|
||||
.@"struct" => |info| {
|
||||
@ -761,7 +761,7 @@ pub fn printValue(
|
||||
} else {
|
||||
try bw.writeAll(", ");
|
||||
}
|
||||
try printValue(bw, ANY, options, @field(value, f.name), max_depth - 1);
|
||||
try bw.printValue(ANY, options, @field(value, f.name), max_depth - 1);
|
||||
}
|
||||
try bw.writeAll(" }");
|
||||
return;
|
||||
@ -780,19 +780,19 @@ pub fn printValue(
|
||||
}
|
||||
try bw.writeAll(f.name);
|
||||
try bw.writeAll(" = ");
|
||||
try printValue(bw, ANY, options, @field(value, f.name), max_depth - 1);
|
||||
try bw.printValue(ANY, options, @field(value, f.name), max_depth - 1);
|
||||
}
|
||||
try bw.writeAll(" }");
|
||||
},
|
||||
.pointer => |ptr_info| switch (ptr_info.size) {
|
||||
.one => switch (@typeInfo(ptr_info.child)) {
|
||||
.array, .@"enum", .@"union", .@"struct" => {
|
||||
return printValue(bw, actual_fmt, options, value.*, max_depth);
|
||||
return bw.printValue(actual_fmt, options, value.*, max_depth);
|
||||
},
|
||||
else => {
|
||||
var buffers: [2][]const u8 = .{ @typeName(ptr_info.child), "@" };
|
||||
try writevAll(bw, &buffers);
|
||||
try printIntOptions(bw, @intFromPtr(value), 16, .lower, options);
|
||||
try bw.writevAll(&buffers);
|
||||
try bw.printIntOptions(@intFromPtr(value), 16, .lower, options);
|
||||
return;
|
||||
},
|
||||
},
|
||||
@ -800,10 +800,10 @@ pub fn printValue(
|
||||
if (actual_fmt.len == 0)
|
||||
@compileError("cannot format pointer without a specifier (i.e. {s} or {*})");
|
||||
if (ptr_info.sentinel() != null) {
|
||||
return printValue(bw, actual_fmt, options, std.mem.span(value), max_depth);
|
||||
return bw.printValue(actual_fmt, options, std.mem.span(value), max_depth);
|
||||
}
|
||||
if (actual_fmt[0] == 's' and ptr_info.child == u8) {
|
||||
return alignBufferOptions(bw, std.mem.span(value), options);
|
||||
return bw.alignBufferOptions(std.mem.span(value), options);
|
||||
}
|
||||
invalidFmtError(fmt, value);
|
||||
},
|
||||
@ -815,19 +815,19 @@ pub fn printValue(
|
||||
}
|
||||
if (ptr_info.child == u8) switch (actual_fmt.len) {
|
||||
1 => switch (actual_fmt[0]) {
|
||||
's' => return alignBufferOptions(bw, value, options),
|
||||
'x' => return printHex(bw, value, .lower),
|
||||
'X' => return printHex(bw, value, .upper),
|
||||
's' => return bw.alignBufferOptions(value, options),
|
||||
'x' => return bw.printHex(value, .lower),
|
||||
'X' => return bw.printHex(value, .upper),
|
||||
else => {},
|
||||
},
|
||||
3 => if (actual_fmt[0] == 'b' and actual_fmt[1] == '6' and actual_fmt[2] == '4') {
|
||||
return printBase64(bw, value);
|
||||
return bw.printBase64(value);
|
||||
},
|
||||
else => {},
|
||||
};
|
||||
try bw.writeAll("{ ");
|
||||
for (value, 0..) |elem, i| {
|
||||
try printValue(bw, actual_fmt, options, elem, max_depth - 1);
|
||||
try bw.printValue(actual_fmt, options, elem, max_depth - 1);
|
||||
if (i != value.len - 1) {
|
||||
try bw.writeAll(", ");
|
||||
}
|
||||
@ -843,16 +843,16 @@ pub fn printValue(
|
||||
}
|
||||
if (info.child == u8) {
|
||||
if (actual_fmt[0] == 's') {
|
||||
return alignBufferOptions(bw, &value, options);
|
||||
return bw.alignBufferOptions(&value, options);
|
||||
} else if (actual_fmt[0] == 'x') {
|
||||
return printHex(bw, &value, .lower);
|
||||
return bw.printHex(&value, .lower);
|
||||
} else if (actual_fmt[0] == 'X') {
|
||||
return printHex(bw, &value, .upper);
|
||||
return bw.printHex(&value, .upper);
|
||||
}
|
||||
}
|
||||
try bw.writeAll("{ ");
|
||||
for (value, 0..) |elem, i| {
|
||||
try printValue(bw, actual_fmt, options, elem, max_depth - 1);
|
||||
try bw.printValue(actual_fmt, options, elem, max_depth - 1);
|
||||
if (i < value.len - 1) {
|
||||
try bw.writeAll(", ");
|
||||
}
|
||||
@ -866,7 +866,7 @@ pub fn printValue(
|
||||
try bw.writeAll("{ ");
|
||||
var i: usize = 0;
|
||||
while (i < info.len) : (i += 1) {
|
||||
try printValue(bw, actual_fmt, options, value[i], max_depth - 1);
|
||||
try bw.printValue(actual_fmt, options, value[i], max_depth - 1);
|
||||
if (i < info.len - 1) {
|
||||
try bw.writeAll(", ");
|
||||
}
|
||||
@ -876,16 +876,16 @@ pub fn printValue(
|
||||
.@"fn" => @compileError("unable to format function body type, use '*const " ++ @typeName(T) ++ "' for a function pointer type"),
|
||||
.type => {
|
||||
if (actual_fmt.len != 0) invalidFmtError(fmt, value);
|
||||
return alignBufferOptions(bw, @typeName(value), options);
|
||||
return bw.alignBufferOptions(@typeName(value), options);
|
||||
},
|
||||
.enum_literal => {
|
||||
if (actual_fmt.len != 0) invalidFmtError(fmt, value);
|
||||
const buffer = [_]u8{'.'} ++ @tagName(value);
|
||||
return alignBufferOptions(bw, buffer, options);
|
||||
return bw.alignBufferOptions(buffer, options);
|
||||
},
|
||||
.null => {
|
||||
if (actual_fmt.len != 0) invalidFmtError(fmt, value);
|
||||
return alignBufferOptions(bw, "null", options);
|
||||
return bw.alignBufferOptions("null", options);
|
||||
},
|
||||
else => @compileError("unable to format type '" ++ @typeName(T) ++ "'"),
|
||||
}
|
||||
@ -903,34 +903,34 @@ pub fn printInt(
|
||||
} else value;
|
||||
|
||||
switch (fmt.len) {
|
||||
0 => return printIntOptions(bw, int_value, 10, .lower, options),
|
||||
0 => return bw.printIntOptions(int_value, 10, .lower, options),
|
||||
1 => switch (fmt[0]) {
|
||||
'd' => return printIntOptions(bw, int_value, 10, .lower, options),
|
||||
'd' => return bw.printIntOptions(int_value, 10, .lower, options),
|
||||
'c' => {
|
||||
if (@typeInfo(@TypeOf(int_value)).int.bits <= 8) {
|
||||
return printAsciiChar(bw, @as(u8, int_value), options);
|
||||
return bw.printAsciiChar(@as(u8, int_value), options);
|
||||
} else {
|
||||
@compileError("cannot print integer that is larger than 8 bits as an ASCII character");
|
||||
}
|
||||
},
|
||||
'u' => {
|
||||
if (@typeInfo(@TypeOf(int_value)).int.bits <= 21) {
|
||||
return printUnicodeCodepoint(bw, @as(u21, int_value), options);
|
||||
return bw.printUnicodeCodepoint(@as(u21, int_value), options);
|
||||
} else {
|
||||
@compileError("cannot print integer that is larger than 21 bits as an UTF-8 sequence");
|
||||
}
|
||||
},
|
||||
'b' => return printIntOptions(bw, int_value, 2, .lower, options),
|
||||
'x' => return printIntOptions(bw, int_value, 16, .lower, options),
|
||||
'X' => return printIntOptions(bw, int_value, 16, .upper, options),
|
||||
'o' => return printIntOptions(bw, int_value, 8, .lower, options),
|
||||
'B' => return printByteSize(bw, int_value, .decimal, options),
|
||||
'D' => return printDuration(bw, int_value, options),
|
||||
'b' => return bw.printIntOptions(int_value, 2, .lower, options),
|
||||
'x' => return bw.printIntOptions(int_value, 16, .lower, options),
|
||||
'X' => return bw.printIntOptions(int_value, 16, .upper, options),
|
||||
'o' => return bw.printIntOptions(int_value, 8, .lower, options),
|
||||
'B' => return bw.printByteSize(int_value, .decimal, options),
|
||||
'D' => return bw.printDuration(int_value, options),
|
||||
else => invalidFmtError(fmt, value),
|
||||
},
|
||||
2 => {
|
||||
if (fmt[0] == 'B' and fmt[1] == 'i') {
|
||||
return printByteSize(bw, int_value, .binary, options);
|
||||
return bw.printByteSize(int_value, .binary, options);
|
||||
} else {
|
||||
invalidFmtError(fmt, value);
|
||||
}
|
||||
@ -941,17 +941,17 @@ pub fn printInt(
|
||||
}
|
||||
|
||||
pub fn printAsciiChar(bw: *BufferedWriter, c: u8, options: std.fmt.Options) anyerror!void {
|
||||
return alignBufferOptions(bw, @as(*const [1]u8, &c), options);
|
||||
return bw.alignBufferOptions(@as(*const [1]u8, &c), options);
|
||||
}
|
||||
|
||||
pub fn printAscii(bw: *BufferedWriter, bytes: []const u8, options: std.fmt.Options) anyerror!void {
|
||||
return alignBufferOptions(bw, bytes, options);
|
||||
return bw.alignBufferOptions(bytes, options);
|
||||
}
|
||||
|
||||
pub fn printUnicodeCodepoint(bw: *BufferedWriter, c: u21, options: std.fmt.Options) anyerror!void {
|
||||
var buf: [4]u8 = undefined;
|
||||
const len = try std.unicode.utf8Encode(c, &buf);
|
||||
return alignBufferOptions(bw, buf[0..len], options);
|
||||
return bw.alignBufferOptions(buf[0..len], options);
|
||||
}
|
||||
|
||||
pub fn printIntOptions(
|
||||
@ -1019,7 +1019,7 @@ pub fn printIntOptions(
|
||||
}
|
||||
}
|
||||
|
||||
return alignBufferOptions(bw, buf[index..], options);
|
||||
return bw.alignBufferOptions(buf[index..], options);
|
||||
}
|
||||
|
||||
pub fn printFloat(
|
||||
@ -1036,19 +1036,19 @@ pub fn printFloat(
|
||||
const s = std.fmt.float.render(&buf, value, .{ .mode = .scientific, .precision = options.precision }) catch |err| switch (err) {
|
||||
error.BufferTooSmall => "(float)",
|
||||
};
|
||||
return alignBufferOptions(bw, s, options);
|
||||
return bw.alignBufferOptions(s, options);
|
||||
},
|
||||
'd' => {
|
||||
const s = std.fmt.float.render(&buf, value, .{ .mode = .decimal, .precision = options.precision }) catch |err| switch (err) {
|
||||
error.BufferTooSmall => "(float)",
|
||||
};
|
||||
return alignBufferOptions(bw, s, options);
|
||||
return bw.alignBufferOptions(s, options);
|
||||
},
|
||||
'x' => {
|
||||
var sub_bw: BufferedWriter = undefined;
|
||||
sub_bw.initFixed(&buf);
|
||||
sub_bw.printFloatHexadecimal(value, options.precision) catch unreachable;
|
||||
return alignBufferOptions(bw, sub_bw.getWritten(), options);
|
||||
return bw.alignBufferOptions(sub_bw.getWritten(), options);
|
||||
},
|
||||
else => invalidFmtError(fmt, value),
|
||||
}
|
||||
@ -1150,7 +1150,7 @@ pub fn printFloatHexadecimal(bw: *BufferedWriter, value: anytype, opt_precision:
|
||||
try bw.splatByteAll('0', precision - trimmed.len);
|
||||
};
|
||||
try bw.writeAll("p");
|
||||
try printIntOptions(bw, exponent - exponent_bias, 10, .lower, .{});
|
||||
try bw.printIntOptions(exponent - exponent_bias, 10, .lower, .{});
|
||||
}
|
||||
|
||||
pub const ByteSizeUnits = enum {
|
||||
@ -1169,7 +1169,7 @@ pub fn printByteSize(
|
||||
comptime units: ByteSizeUnits,
|
||||
options: std.fmt.Options,
|
||||
) anyerror!void {
|
||||
if (value == 0) return alignBufferOptions(bw, "0B", options);
|
||||
if (value == 0) return bw.alignBufferOptions("0B", options);
|
||||
// The worst case in terms of space needed is 32 bytes + 3 for the suffix.
|
||||
var buf: [std.fmt.float.min_buffer_size + 3]u8 = undefined;
|
||||
|
||||
@ -1213,7 +1213,7 @@ pub fn printByteSize(
|
||||
},
|
||||
}
|
||||
|
||||
return alignBufferOptions(bw, buf[0..i], options);
|
||||
return bw.alignBufferOptions(buf[0..i], options);
|
||||
}
|
||||
|
||||
// This ANY const is a workaround for: https://github.com/ziglang/zig/issues/7948
|
||||
@ -1250,7 +1250,7 @@ pub fn invalidFmtError(comptime fmt: []const u8, value: anytype) noreturn {
|
||||
|
||||
pub fn printDurationSigned(bw: *BufferedWriter, ns: i64) anyerror!void {
|
||||
if (ns < 0) try bw.writeByte('-');
|
||||
return printDurationUnsigned(bw, @abs(ns));
|
||||
return bw.printDurationUnsigned(@abs(ns));
|
||||
}
|
||||
|
||||
pub fn printDurationUnsigned(bw: *BufferedWriter, ns: u64) anyerror!void {
|
||||
@ -1296,7 +1296,7 @@ pub fn printDurationUnsigned(bw: *BufferedWriter, ns: u64) anyerror!void {
|
||||
}
|
||||
}
|
||||
|
||||
try printIntOptions(bw, ns_remaining, 10, .lower, .{});
|
||||
try bw.printIntOptions(ns_remaining, 10, .lower, .{});
|
||||
try bw.writeAll("ns");
|
||||
}
|
||||
|
||||
@ -1312,7 +1312,7 @@ pub fn printDuration(bw: *BufferedWriter, nanoseconds: anytype, options: std.fmt
|
||||
.signed => sub_bw.printDurationSigned(nanoseconds) catch unreachable,
|
||||
.unsigned => sub_bw.printDurationUnsigned(nanoseconds) catch unreachable,
|
||||
}
|
||||
return alignBufferOptions(bw, sub_bw.getWritten(), options);
|
||||
return bw.alignBufferOptions(sub_bw.getWritten(), options);
|
||||
}
|
||||
|
||||
pub fn printHex(bw: *BufferedWriter, bytes: []const u8, case: std.fmt.Case) anyerror!void {
|
||||
@ -1321,8 +1321,8 @@ pub fn printHex(bw: *BufferedWriter, bytes: []const u8, case: std.fmt.Case) anye
|
||||
.lower => "0123456789abcdef",
|
||||
};
|
||||
for (bytes) |c| {
|
||||
try writeByte(bw, charset[c >> 4]);
|
||||
try writeByte(bw, charset[c & 15]);
|
||||
try bw.writeByte(charset[c >> 4]);
|
||||
try bw.writeByte(charset[c & 15]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1334,50 +1334,67 @@ pub fn printBase64(bw: *BufferedWriter, bytes: []const u8) anyerror!void {
|
||||
}
|
||||
}
|
||||
|
||||
/// Write a single unsigned integer as unsigned LEB128 to the given writer.
|
||||
pub fn writeUleb128(bw: *std.io.BufferedWriter, arg: anytype) anyerror!void {
|
||||
const Arg = @TypeOf(arg);
|
||||
const Int = switch (Arg) {
|
||||
comptime_int => std.math.IntFittingRange(arg, arg),
|
||||
else => Arg,
|
||||
};
|
||||
const Value = if (@typeInfo(Int).int.bits < 8) u8 else Int;
|
||||
var value: Value = arg;
|
||||
|
||||
while (true) {
|
||||
const byte: u8 = @truncate(value & 0x7f);
|
||||
value >>= 7;
|
||||
if (value == 0) {
|
||||
try bw.writeByte(byte);
|
||||
return;
|
||||
} else {
|
||||
try bw.writeByte(byte | 0x80);
|
||||
}
|
||||
}
|
||||
/// Write a single unsigned integer as LEB128 to the given writer.
|
||||
pub fn writeUleb128(bw: *BufferedWriter, value: anytype) anyerror!void {
|
||||
try bw.writeLeb128(switch (@typeInfo(@TypeOf(value))) {
|
||||
.comptime_int => @as(std.math.IntFittingRange(0, @abs(value)), value),
|
||||
.int => |value_info| switch (value_info.signedness) {
|
||||
.signed => @as(@Type(.{ .int = .{ .signedness = .unsigned, .bits = value_info.bits -| 1 } }), @intCast(value)),
|
||||
.unsigned => value,
|
||||
},
|
||||
else => comptime unreachable,
|
||||
});
|
||||
}
|
||||
|
||||
/// Write a single signed integer as signed LEB128 to the given writer.
|
||||
pub fn writeIleb128(bw: *std.io.BufferedWriter, arg: anytype) anyerror!void {
|
||||
const Arg = @TypeOf(arg);
|
||||
const Int = switch (Arg) {
|
||||
comptime_int => std.math.IntFittingRange(-@abs(arg), @abs(arg)),
|
||||
else => Arg,
|
||||
};
|
||||
const Signed = if (@typeInfo(Int).int.bits < 8) i8 else Int;
|
||||
const Unsigned = std.meta.Int(.unsigned, @typeInfo(Signed).int.bits);
|
||||
var value: Signed = arg;
|
||||
/// Write a single signed integer as LEB128 to the given writer.
|
||||
pub fn writeSleb128(bw: *BufferedWriter, value: anytype) anyerror!void {
|
||||
try bw.writeLeb128(switch (@typeInfo(@TypeOf(value))) {
|
||||
.comptime_int => @as(std.math.IntFittingRange(@min(value, -1), @max(0, value)), value),
|
||||
.int => |value_info| switch (value_info.signedness) {
|
||||
.signed => value,
|
||||
.unsigned => @as(@Type(.{ .int = .{ .signedness = .signed, .bits = value_info.bits + 1 } }), value),
|
||||
},
|
||||
else => comptime unreachable,
|
||||
});
|
||||
}
|
||||
|
||||
/// Write a single integer as LEB128 to the given writer.
|
||||
pub fn writeLeb128(bw: *BufferedWriter, value: anytype) anyerror!void {
|
||||
const value_info = @typeInfo(@TypeOf(value)).int;
|
||||
try bw.writeMultipleOf7Leb128(@as(@Type(.{ .int = .{
|
||||
.signedness = value_info.signedness,
|
||||
.bits = std.mem.alignForwardAnyAlign(u16, value_info.bits, 7),
|
||||
} }), value));
|
||||
}
|
||||
|
||||
fn writeMultipleOf7Leb128(bw: *BufferedWriter, value: anytype) anyerror!void {
|
||||
const value_info = @typeInfo(@TypeOf(value)).int;
|
||||
comptime assert(value_info.bits % 7 == 0);
|
||||
var remaining = value;
|
||||
while (true) {
|
||||
const unsigned: Unsigned = @bitCast(value);
|
||||
const byte: u8 = @truncate(unsigned);
|
||||
value >>= 6;
|
||||
if (value == -1 or value == 0) {
|
||||
try bw.writeByte(byte & 0x7F);
|
||||
return;
|
||||
} else {
|
||||
value >>= 1;
|
||||
try bw.writeByte(byte | 0x80);
|
||||
const buffer: []packed struct(u8) { bits: u7, more: bool } = @ptrCast(try bw.writableSlice(1));
|
||||
for (buffer, 1..) |*byte, len| {
|
||||
const more = switch (value_info.signedness) {
|
||||
.signed => remaining >> 6 != remaining >> (value_info.bits - 1),
|
||||
.unsigned => remaining > std.math.maxInt(u7),
|
||||
};
|
||||
byte.* = if (@inComptime()) @typeInfo(@TypeOf(buffer)).pointer.child{
|
||||
.bits = @bitCast(@as(@Type(.{ .int = .{
|
||||
.signedness = value_info.signedness,
|
||||
.bits = 7,
|
||||
} }), @truncate(remaining))),
|
||||
.more = more,
|
||||
} else .{
|
||||
.bits = @bitCast(@as(@Type(.{ .int = .{
|
||||
.signedness = value_info.signedness,
|
||||
.bits = 7,
|
||||
} }), @truncate(remaining))),
|
||||
.more = more,
|
||||
};
|
||||
if (value_info.bits > 7) remaining >>= 7;
|
||||
if (!more) return bw.advance(len);
|
||||
}
|
||||
bw.advance(buffer.len);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -55,10 +55,10 @@ test writeUnsignedFixed {
|
||||
}
|
||||
|
||||
/// This is an "advanced" function. It allows one to use a fixed amount of memory to store an
|
||||
/// ILEB128. This defeats the entire purpose of using this data encoding; it will no longer use
|
||||
/// SLEB128. This defeats the entire purpose of using this data encoding; it will no longer use
|
||||
/// fewer bytes to store smaller numbers. The advantage of using a fixed width is that it makes
|
||||
/// fields have a predictable size and so depending on the use case this tradeoff can be worthwhile.
|
||||
/// An example use case of this is in emitting DWARF info where one wants to make a ILEB128 field
|
||||
/// An example use case of this is in emitting DWARF info where one wants to make a SLEB128 field
|
||||
/// "relocatable", meaning that it becomes possible to later go back and patch the number to be a
|
||||
/// different value without shifting all the following code.
|
||||
pub fn writeSignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(.signed, l * 7)) void {
|
||||
|
||||
@ -2322,13 +2322,7 @@ pub const Const = struct {
|
||||
/// this function will fail to print the string, printing "(BigInt)" instead of a number.
|
||||
/// This is because the rendering algorithm requires reversing a string, which requires O(N) memory.
|
||||
/// See `toString` and `toStringAlloc` for a way to print big integers without failure.
|
||||
pub fn format(
|
||||
self: Const,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
pub fn format(self: Const, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
|
||||
comptime var base = 10;
|
||||
comptime var case: std.fmt.Case = .lower;
|
||||
|
||||
@ -2348,19 +2342,21 @@ pub const Const = struct {
|
||||
std.fmt.invalidFmtError(fmt, self);
|
||||
}
|
||||
|
||||
const available_len = 64;
|
||||
if (self.limbs.len > available_len)
|
||||
return out_stream.writeAll("(BigInt)");
|
||||
|
||||
var limbs: [calcToStringLimbsBufferLen(available_len, base)]Limb = undefined;
|
||||
|
||||
const biggest: Const = .{
|
||||
.limbs = &([1]Limb{comptime math.maxInt(Limb)} ** available_len),
|
||||
.positive = false,
|
||||
};
|
||||
var buf: [biggest.sizeInBaseUpperBound(base)]u8 = undefined;
|
||||
const len = self.toString(&buf, base, case, &limbs);
|
||||
return out_stream.writeAll(buf[0..len]);
|
||||
const max_str_len = self.sizeInBaseUpperBound(base);
|
||||
const limbs_len = calcToStringLimbsBufferLen(self.limbs.len, base);
|
||||
if (bw.writableSlice(max_str_len + @alignOf(Limb) - 1 + @sizeOf(Limb) * limbs_len)) |buf| {
|
||||
const limbs: [*]Limb = @alignCast(@ptrCast(std.mem.alignPointer(buf[max_str_len..].ptr, @alignOf(Limb))));
|
||||
bw.advance(self.toString(buf[0..max_str_len], base, case, limbs[0..limbs_len]));
|
||||
return;
|
||||
} else |_| if (bw.writableSlice(max_str_len)) |buf| {
|
||||
const available_len = 64;
|
||||
var limbs: [calcToStringLimbsBufferLen(available_len, base)]Limb = undefined;
|
||||
if (limbs.len >= limbs_len) {
|
||||
bw.advance(self.toString(buf, base, case, &limbs));
|
||||
return;
|
||||
}
|
||||
} else |_| {}
|
||||
try bw.writeAll("(BigInt)");
|
||||
}
|
||||
|
||||
/// Converts self to a string in the requested base.
|
||||
|
||||
@ -1358,7 +1358,7 @@ fn linuxLookupNameFromHosts(
|
||||
|
||||
var line_buf: [512]u8 = undefined;
|
||||
var br = file.reader().buffered(&line_buf);
|
||||
while (br.takeDelimiterConclusive('\n')) |line| {
|
||||
while (br.takeSentinel('\n')) |line| {
|
||||
var split_it = mem.splitScalar(u8, line, '#');
|
||||
const no_comment_line = split_it.first();
|
||||
|
||||
@ -1550,7 +1550,7 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
|
||||
|
||||
var line_buf: [512]u8 = undefined;
|
||||
var br = file.reader().buffered(&line_buf);
|
||||
while (br.takeDelimiterConclusive('\n')) |line_with_comment| {
|
||||
while (br.takeSentinel('\n')) |line_with_comment| {
|
||||
const line = line: {
|
||||
var split = mem.splitScalar(u8, line_with_comment, '#');
|
||||
break :line split.first();
|
||||
|
||||
@ -544,9 +544,9 @@ pub fn readSourceFileToEndAlloc(gpa: Allocator, input: std.fs.File, size_hint: u
|
||||
var buffer: std.ArrayListAlignedUnmanaged(u8, .@"2") = .empty;
|
||||
defer buffer.deinit(gpa);
|
||||
|
||||
try buffer.ensureUnusedCapacity(size_hint);
|
||||
try buffer.ensureUnusedCapacity(gpa, size_hint);
|
||||
|
||||
input.readIntoArrayList(gpa, .init(max_src_size), .@"2", &buffer) catch |err| switch (err) {
|
||||
input.readIntoArrayList(gpa, .limited(max_src_size), .@"2", &buffer) catch |err| switch (err) {
|
||||
error.ConnectionResetByPeer => unreachable,
|
||||
error.ConnectionTimedOut => unreachable,
|
||||
error.NotOpenForReading => unreachable,
|
||||
@ -568,7 +568,7 @@ pub fn readSourceFileToEndAlloc(gpa: Allocator, input: std.fs.File, size_hint: u
|
||||
// If the file starts with a UTF-16 little endian BOM, translate it to UTF-8
|
||||
if (std.mem.startsWith(u8, buffer.items, "\xff\xfe")) {
|
||||
if (buffer.items.len % 2 != 0) return error.InvalidEncoding;
|
||||
return std.unicode.utf16LeToUtf8AllocZ(gpa, buffer.items) catch |err| switch (err) {
|
||||
return std.unicode.utf16LeToUtf8AllocZ(gpa, @ptrCast(buffer.items)) catch |err| switch (err) {
|
||||
error.DanglingSurrogateHalf => error.UnsupportedEncoding,
|
||||
error.ExpectedSecondSurrogateHalf => error.UnsupportedEncoding,
|
||||
error.UnexpectedSecondSurrogateHalf => error.UnsupportedEncoding,
|
||||
@ -576,7 +576,7 @@ pub fn readSourceFileToEndAlloc(gpa: Allocator, input: std.fs.File, size_hint: u
|
||||
};
|
||||
}
|
||||
|
||||
return buffer.toOwnedSliceSentinel(0);
|
||||
return buffer.toOwnedSliceSentinel(gpa, 0);
|
||||
}
|
||||
|
||||
pub fn printAstErrorsToStderr(gpa: Allocator, tree: Ast, path: []const u8, color: Color) !void {
|
||||
|
||||
@ -562,7 +562,7 @@ pub fn renderError(tree: Ast, parse_error: Error, bw: *std.io.BufferedWriter) an
|
||||
|
||||
.invalid_byte => {
|
||||
const tok_slice = tree.source[tree.tokens.items(.start)[parse_error.token]..];
|
||||
return bw.print("{s} contains invalid byte: '{'}'", .{
|
||||
return bw.print("{s} contains invalid byte: '{f'}'", .{
|
||||
switch (tok_slice[0]) {
|
||||
'\'' => "character literal",
|
||||
'"', '\\' => "string literal",
|
||||
|
||||
@ -11465,7 +11465,7 @@ fn failWithStrLitError(
|
||||
astgen,
|
||||
token,
|
||||
@intCast(offset + err.offset()),
|
||||
"{}",
|
||||
"{f}",
|
||||
.{err.fmt(raw_string)},
|
||||
);
|
||||
}
|
||||
@ -13898,8 +13898,9 @@ fn lowerAstErrors(astgen: *AstGen) error{OutOfMemory}!void {
|
||||
assert(tree.errors.len > 0);
|
||||
|
||||
var msg: std.io.AllocatingWriter = undefined;
|
||||
const msg_writer = msg.init(gpa);
|
||||
msg.init(gpa);
|
||||
defer msg.deinit();
|
||||
const msg_bw = &msg.buffered_writer;
|
||||
|
||||
var notes: std.ArrayListUnmanaged(u32) = .empty;
|
||||
defer notes.deinit(gpa);
|
||||
@ -13933,19 +13934,19 @@ fn lowerAstErrors(astgen: *AstGen) error{OutOfMemory}!void {
|
||||
.extra = .{ .offset = bad_off },
|
||||
};
|
||||
msg.clearRetainingCapacity();
|
||||
tree.renderError(err, msg_writer) catch |e| return @errorCast(e); // TODO try @errorCast(...)
|
||||
tree.renderError(err, msg_bw) catch |e| return @errorCast(e); // TODO try @errorCast(...)
|
||||
return try astgen.appendErrorTokNotesOff(tok, bad_off, "{s}", .{msg.getWritten()}, notes.items);
|
||||
}
|
||||
|
||||
var cur_err = tree.errors[0];
|
||||
for (tree.errors[1..]) |err| {
|
||||
if (err.is_note) {
|
||||
tree.renderError(err, msg_writer) catch |e| return @errorCast(e); // TODO try @errorCast(...)
|
||||
tree.renderError(err, msg_bw) catch |e| return @errorCast(e); // TODO try @errorCast(...)
|
||||
try notes.append(gpa, try astgen.errNoteTok(err.token, "{s}", .{msg.getWritten()}));
|
||||
} else {
|
||||
// Flush error
|
||||
const extra_offset = tree.errorOffset(cur_err);
|
||||
tree.renderError(cur_err, msg_writer) catch |e| return @errorCast(e); // TODO try @errorCast(...)
|
||||
tree.renderError(cur_err, msg_bw) catch |e| return @errorCast(e); // TODO try @errorCast(...)
|
||||
try astgen.appendErrorTokNotesOff(cur_err.token, extra_offset, "{s}", .{msg.getWritten()}, notes.items);
|
||||
notes.clearRetainingCapacity();
|
||||
cur_err = err;
|
||||
@ -13959,7 +13960,7 @@ fn lowerAstErrors(astgen: *AstGen) error{OutOfMemory}!void {
|
||||
|
||||
// Flush error
|
||||
const extra_offset = tree.errorOffset(cur_err);
|
||||
tree.renderError(cur_err, msg_writer) catch |e| return @errorCast(e); // TODO try @errorCast(...)
|
||||
tree.renderError(cur_err, msg_bw) catch |e| return @errorCast(e); // TODO try @errorCast(...)
|
||||
try astgen.appendErrorTokNotesOff(cur_err.token, extra_offset, "{s}", .{msg.getWritten()}, notes.items);
|
||||
}
|
||||
|
||||
|
||||
@ -43,7 +43,7 @@ pub fn parse(
|
||||
}
|
||||
}
|
||||
|
||||
const contents = try std.fs.cwd().readFileAlloc(allocator, libc_file, std.math.maxInt(usize));
|
||||
const contents = try std.fs.cwd().readFileAlloc(libc_file, allocator, .unlimited);
|
||||
defer allocator.free(contents);
|
||||
|
||||
var it = std.mem.tokenizeScalar(u8, contents, '\n');
|
||||
|
||||
@ -778,7 +778,7 @@ fn lowerStrLitError(
|
||||
zg,
|
||||
token,
|
||||
@intCast(offset + err.offset()),
|
||||
"{}",
|
||||
"{f}",
|
||||
.{err.fmt(raw_string)},
|
||||
);
|
||||
}
|
||||
@ -885,8 +885,9 @@ fn lowerAstErrors(zg: *ZonGen) Allocator.Error!void {
|
||||
assert(tree.errors.len > 0);
|
||||
|
||||
var msg: std.io.AllocatingWriter = undefined;
|
||||
const msg_bw = msg.init(gpa);
|
||||
msg.init(gpa);
|
||||
defer msg.deinit();
|
||||
const msg_bw = &msg.buffered_writer;
|
||||
|
||||
var notes: std.ArrayListUnmanaged(Zoir.CompileError.Note) = .empty;
|
||||
defer notes.deinit(gpa);
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
allocator: std.mem.Allocator,
|
||||
record_arena: std.heap.ArenaAllocator.State,
|
||||
reader: std.io.AnyReader,
|
||||
br: *std.io.BufferedReader,
|
||||
keep_names: bool,
|
||||
bit_buffer: u32,
|
||||
bit_offset: u5,
|
||||
@ -93,14 +93,14 @@ pub const Record = struct {
|
||||
};
|
||||
|
||||
pub const InitOptions = struct {
|
||||
reader: std.io.AnyReader,
|
||||
br: *std.io.BufferedReader,
|
||||
keep_names: bool = false,
|
||||
};
|
||||
pub fn init(allocator: std.mem.Allocator, options: InitOptions) BitcodeReader {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.record_arena = .{},
|
||||
.reader = options.reader,
|
||||
.br = options.br,
|
||||
.keep_names = options.keep_names,
|
||||
.bit_buffer = 0,
|
||||
.bit_offset = 0,
|
||||
@ -170,9 +170,9 @@ pub fn next(bc: *BitcodeReader) !?Item {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn skipBlock(bc: *BitcodeReader, block: Block) !void {
|
||||
pub fn skipBlock(bc: *BitcodeReader, block: Block) anyerror!void {
|
||||
assert(bc.bit_offset == 0);
|
||||
try bc.reader.skipBytes(@as(u34, block.len) * 4, .{});
|
||||
try bc.br.discard(4 * @as(u34, block.len));
|
||||
try bc.endBlock();
|
||||
}
|
||||
|
||||
@ -369,21 +369,21 @@ fn align32Bits(bc: *BitcodeReader) void {
|
||||
bc.bit_offset = 0;
|
||||
}
|
||||
|
||||
fn read32Bits(bc: *BitcodeReader) !u32 {
|
||||
fn read32Bits(bc: *BitcodeReader) anyerror!u32 {
|
||||
assert(bc.bit_offset == 0);
|
||||
return bc.reader.readInt(u32, .little);
|
||||
return bc.br.takeInt(u32, .little);
|
||||
}
|
||||
|
||||
fn readBytes(bc: *BitcodeReader, bytes: []u8) !void {
|
||||
fn readBytes(bc: *BitcodeReader, bytes: []u8) anyerror!void {
|
||||
assert(bc.bit_offset == 0);
|
||||
try bc.reader.readNoEof(bytes);
|
||||
try bc.br.read(bytes);
|
||||
|
||||
const trailing_bytes = bytes.len % 4;
|
||||
if (trailing_bytes > 0) {
|
||||
var bit_buffer = [1]u8{0} ** 4;
|
||||
try bc.reader.readNoEof(bit_buffer[trailing_bytes..]);
|
||||
var bit_buffer: [4]u8 = @splat(0);
|
||||
try bc.br.read(bit_buffer[trailing_bytes..]);
|
||||
bc.bit_buffer = std.mem.readInt(u32, &bit_buffer, .little);
|
||||
bc.bit_offset = @intCast(trailing_bytes * 8);
|
||||
bc.bit_offset = @intCast(8 * trailing_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -77,7 +77,7 @@ const Render = struct {
|
||||
|
||||
pub fn renderTree(gpa: Allocator, bw: *std.io.BufferedWriter, tree: Ast, fixups: Fixups) anyerror!void {
|
||||
assert(tree.errors.len == 0); // Cannot render an invalid tree.
|
||||
var auto_indenting_stream: AutoIndentingStream = .init(bw, indent_delta);
|
||||
var auto_indenting_stream: AutoIndentingStream = .init(gpa, bw, indent_delta);
|
||||
defer auto_indenting_stream.deinit();
|
||||
var r: Render = .{
|
||||
.gpa = gpa,
|
||||
@ -2135,13 +2135,13 @@ fn renderArrayInit(
|
||||
const section_exprs = row_exprs[0..section_end];
|
||||
|
||||
var sub_expr_buffer: std.io.AllocatingWriter = undefined;
|
||||
const sub_expr_buffer_writer = sub_expr_buffer.init(gpa);
|
||||
sub_expr_buffer.init(gpa);
|
||||
defer sub_expr_buffer.deinit();
|
||||
|
||||
const sub_expr_buffer_starts = try gpa.alloc(usize, section_exprs.len + 1);
|
||||
defer gpa.free(sub_expr_buffer_starts);
|
||||
|
||||
var auto_indenting_stream: AutoIndentingStream = .init(sub_expr_buffer_writer, indent_delta);
|
||||
var auto_indenting_stream: AutoIndentingStream = .init(gpa, &sub_expr_buffer.buffered_writer, indent_delta);
|
||||
defer auto_indenting_stream.deinit();
|
||||
var sub_render: Render = .{
|
||||
.gpa = r.gpa,
|
||||
@ -2160,8 +2160,9 @@ fn renderArrayInit(
|
||||
|
||||
if (i + 1 < section_exprs.len) {
|
||||
try renderExpression(&sub_render, expr, .none);
|
||||
const width = sub_expr_buffer.getWritten().len - start;
|
||||
const this_contains_newline = mem.indexOfScalar(u8, sub_expr_buffer.getWritten()[start..], '\n') != null;
|
||||
const written = sub_expr_buffer.getWritten();
|
||||
const width = written.len - start;
|
||||
const this_contains_newline = mem.indexOfScalar(u8, written[start..], '\n') != null;
|
||||
contains_newline = contains_newline or this_contains_newline;
|
||||
expr_widths[i] = width;
|
||||
expr_newlines[i] = this_contains_newline;
|
||||
@ -2183,8 +2184,9 @@ fn renderArrayInit(
|
||||
try renderExpression(&sub_render, expr, .comma);
|
||||
ais.popSpace();
|
||||
|
||||
const width = sub_expr_buffer.items.len - start - 2;
|
||||
const this_contains_newline = mem.indexOfScalar(u8, sub_expr_buffer.getWritten()[start .. sub_expr_buffer.items.len - 1], '\n') != null;
|
||||
const written = sub_expr_buffer.getWritten();
|
||||
const width = written.len - start - 2;
|
||||
const this_contains_newline = mem.indexOfScalar(u8, written[start .. written.len - 1], '\n') != null;
|
||||
contains_newline = contains_newline or this_contains_newline;
|
||||
expr_widths[i] = width;
|
||||
expr_newlines[i] = contains_newline;
|
||||
@ -2682,7 +2684,7 @@ fn renderTokenOverrideSpaceMode(r: *Render, token_index: Ast.TokenIndex, space:
|
||||
const tree = r.tree;
|
||||
const ais = r.ais;
|
||||
const lexeme = tokenSliceForRender(tree, token_index);
|
||||
try ais.writer().writeAll(lexeme);
|
||||
try ais.writeAll(lexeme);
|
||||
ais.enableSpaceMode(override_space);
|
||||
defer ais.disableSpaceMode();
|
||||
try renderSpace(r, token_index, lexeme.len, space);
|
||||
@ -3259,6 +3261,14 @@ fn rowSize(tree: Ast, exprs: []const Ast.Node.Index, rtoken: Ast.TokenIndex) usi
|
||||
const AutoIndentingStream = struct {
|
||||
underlying_writer: *std.io.BufferedWriter,
|
||||
|
||||
/// Offset into the source at which formatting has been disabled with
|
||||
/// a `zig fmt: off` comment.
|
||||
///
|
||||
/// If non-null, the AutoIndentingStream will not write any bytes
|
||||
/// to the underlying writer. It will however continue to track the
|
||||
/// indentation level.
|
||||
disabled_offset: ?usize = null,
|
||||
|
||||
indent_count: usize = 0,
|
||||
indent_delta: usize,
|
||||
indent_stack: std.ArrayList(StackElem),
|
||||
@ -3284,12 +3294,12 @@ const AutoIndentingStream = struct {
|
||||
indent_count: usize,
|
||||
};
|
||||
|
||||
pub fn init(buffer: *std.ArrayList(u8), indent_delta_: usize) AutoIndentingStream {
|
||||
pub fn init(gpa: Allocator, bw: *std.io.BufferedWriter, indent_delta_: usize) AutoIndentingStream {
|
||||
return .{
|
||||
.underlying_writer = buffer.writer(),
|
||||
.underlying_writer = bw,
|
||||
.indent_delta = indent_delta_,
|
||||
.indent_stack = std.ArrayList(StackElem).init(buffer.allocator),
|
||||
.space_stack = std.ArrayList(SpaceElem).init(buffer.allocator),
|
||||
.indent_stack = .init(gpa),
|
||||
.space_stack = .init(gpa),
|
||||
};
|
||||
}
|
||||
|
||||
@ -3477,7 +3487,7 @@ const AutoIndentingStream = struct {
|
||||
const current_indent = ais.currentIndent();
|
||||
if (ais.current_line_empty and current_indent > 0) {
|
||||
if (ais.disabled_offset == null) {
|
||||
try ais.underlying_writer.writeByteNTimes(' ', current_indent);
|
||||
try ais.underlying_writer.splatByteAll(' ', current_indent);
|
||||
}
|
||||
ais.applied_indent = current_indent;
|
||||
}
|
||||
|
||||
@ -44,50 +44,44 @@ pub const Error = union(enum) {
|
||||
raw_string: []const u8,
|
||||
};
|
||||
|
||||
fn formatMessage(
|
||||
self: FormatMessage,
|
||||
comptime f: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatMessage(self: FormatMessage, bw: *std.io.BufferedWriter, comptime f: []const u8) anyerror!void {
|
||||
_ = f;
|
||||
_ = options;
|
||||
switch (self.err) {
|
||||
.invalid_escape_character => |bad_index| try writer.print(
|
||||
.invalid_escape_character => |bad_index| try bw.print(
|
||||
"invalid escape character: '{c}'",
|
||||
.{self.raw_string[bad_index]},
|
||||
),
|
||||
.expected_hex_digit => |bad_index| try writer.print(
|
||||
.expected_hex_digit => |bad_index| try bw.print(
|
||||
"expected hex digit, found '{c}'",
|
||||
.{self.raw_string[bad_index]},
|
||||
),
|
||||
.empty_unicode_escape_sequence => try writer.writeAll(
|
||||
.empty_unicode_escape_sequence => try bw.writeAll(
|
||||
"empty unicode escape sequence",
|
||||
),
|
||||
.expected_hex_digit_or_rbrace => |bad_index| try writer.print(
|
||||
.expected_hex_digit_or_rbrace => |bad_index| try bw.print(
|
||||
"expected hex digit or '}}', found '{c}'",
|
||||
.{self.raw_string[bad_index]},
|
||||
),
|
||||
.invalid_unicode_codepoint => try writer.writeAll(
|
||||
.invalid_unicode_codepoint => try bw.writeAll(
|
||||
"unicode escape does not correspond to a valid unicode scalar value",
|
||||
),
|
||||
.expected_lbrace => |bad_index| try writer.print(
|
||||
.expected_lbrace => |bad_index| try bw.print(
|
||||
"expected '{{', found '{c}'",
|
||||
.{self.raw_string[bad_index]},
|
||||
),
|
||||
.expected_rbrace => |bad_index| try writer.print(
|
||||
.expected_rbrace => |bad_index| try bw.print(
|
||||
"expected '}}', found '{c}'",
|
||||
.{self.raw_string[bad_index]},
|
||||
),
|
||||
.expected_single_quote => |bad_index| try writer.print(
|
||||
.expected_single_quote => |bad_index| try bw.print(
|
||||
"expected single quote ('), found '{c}'",
|
||||
.{self.raw_string[bad_index]},
|
||||
),
|
||||
.invalid_character => |bad_index| try writer.print(
|
||||
.invalid_character => |bad_index| try bw.print(
|
||||
"invalid byte in string or character literal: '{c}'",
|
||||
.{self.raw_string[bad_index]},
|
||||
),
|
||||
.empty_char_literal => try writer.writeAll(
|
||||
.empty_char_literal => try bw.writeAll(
|
||||
"empty character literal",
|
||||
),
|
||||
}
|
||||
@ -363,13 +357,13 @@ pub fn parseWrite(writer: *std.io.BufferedWriter, bytes: []const u8) anyerror!Re
|
||||
/// Higher level API. Does not return extra info about parse errors.
|
||||
/// Caller owns returned memory.
|
||||
pub fn parseAlloc(allocator: std.mem.Allocator, bytes: []const u8) ParseError![]u8 {
|
||||
var buf: std.io.AllocatingWriter = undefined;
|
||||
const bw = buf.init(allocator);
|
||||
defer buf.deinit();
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
aw.init(allocator);
|
||||
defer aw.deinit();
|
||||
// TODO try @errorCast(...)
|
||||
const result = parseWrite(bw, bytes) catch |err| return @errorCast(err);
|
||||
const result = parseWrite(&aw.buffered_writer, bytes) catch |err| return @errorCast(err);
|
||||
switch (result) {
|
||||
.success => return buf.toOwnedSlice(),
|
||||
.success => return aw.toOwnedSlice(),
|
||||
.failure => return error.InvalidLiteral,
|
||||
}
|
||||
}
|
||||
|
||||
@ -583,7 +583,7 @@ pub const Serializer = struct {
|
||||
|
||||
/// Serialize an integer.
|
||||
pub fn int(self: *Serializer, val: anytype) anyerror!void {
|
||||
try std.fmt.formatInt(val, 10, .lower, .{}, self.writer);
|
||||
try self.writer.printIntOptions(val, 10, .lower, .{});
|
||||
}
|
||||
|
||||
/// Serialize a float.
|
||||
@ -613,7 +613,7 @@ pub const Serializer = struct {
|
||||
///
|
||||
/// Escapes the identifier if necessary.
|
||||
pub fn ident(self: *Serializer, name: []const u8) anyerror!void {
|
||||
try self.writer.print(".{p_}", .{std.zig.fmtId(name)});
|
||||
try self.writer.print(".{fp_}", .{std.zig.fmtId(name)});
|
||||
}
|
||||
|
||||
/// Serialize `val` as a Unicode codepoint.
|
||||
@ -626,7 +626,7 @@ pub const Serializer = struct {
|
||||
var buf: [8]u8 = undefined;
|
||||
const len = std.unicode.utf8Encode(val, &buf) catch return error.InvalidCodepoint;
|
||||
const str = buf[0..len];
|
||||
try std.fmt.format(self.writer, "'{'}'", .{std.zig.fmtEscapes(str)});
|
||||
try std.fmt.format(self.writer, "'{f'}'", .{std.zig.fmtEscapes(str)});
|
||||
}
|
||||
|
||||
/// Like `value`, but always serializes `val` as a tuple.
|
||||
@ -684,7 +684,7 @@ pub const Serializer = struct {
|
||||
|
||||
/// Like `value`, but always serializes `val` as a string.
|
||||
pub fn string(self: *Serializer, val: []const u8) anyerror!void {
|
||||
try std.fmt.format(self.writer, "\"{}\"", .{std.zig.fmtEscapes(val)});
|
||||
try std.fmt.format(self.writer, "\"{f}\"", .{std.zig.fmtEscapes(val)});
|
||||
}
|
||||
|
||||
/// Options for formatting multiline strings.
|
||||
@ -758,7 +758,7 @@ pub const Serializer = struct {
|
||||
|
||||
fn indent(self: *Serializer) anyerror!void {
|
||||
if (self.options.whitespace) {
|
||||
try self.writer.writeByteNTimes(' ', 4 * self.indent_level);
|
||||
try self.writer.splatByteAll(' ', 4 * self.indent_level);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
13
src/Air.zig
13
src/Air.zig
@ -957,18 +957,13 @@ pub const Inst = struct {
|
||||
return index.unwrap().target;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
index: Index,
|
||||
comptime _: []const u8,
|
||||
_: std.fmt.Options,
|
||||
writer: *std.io.BufferedWriter,
|
||||
) anyerror!void {
|
||||
try writer.writeByte('%');
|
||||
pub fn format(index: Index, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
|
||||
try bw.writeByte('%');
|
||||
switch (index.unwrap()) {
|
||||
.ref => {},
|
||||
.target => try writer.writeByte('t'),
|
||||
.target => try bw.writeByte('t'),
|
||||
}
|
||||
try writer.print("{d}", .{@as(u31, @truncate(@intFromEnum(index)))});
|
||||
try bw.print("{d}", .{@as(u31, @truncate(@intFromEnum(index)))});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -1323,7 +1323,7 @@ fn analyzeOperands(
|
||||
const mask = @as(Bpi, 1) << @as(OperandInt, @intCast(i));
|
||||
|
||||
if ((try data.live_set.fetchPut(gpa, operand, {})) == null) {
|
||||
log.debug("[{}] %{}: added %{} to live set (operand dies here)", .{ pass, @intFromEnum(inst), operand });
|
||||
log.debug("[{}] %{}: added %{f} to live set (operand dies here)", .{ pass, @intFromEnum(inst), operand });
|
||||
tomb_bits |= mask;
|
||||
}
|
||||
}
|
||||
@ -1462,19 +1462,19 @@ fn analyzeInstBlock(
|
||||
},
|
||||
|
||||
.main_analysis => {
|
||||
log.debug("[{}] %{}: block live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) });
|
||||
log.debug("[{}] %{f}: block live set is {f}", .{ pass, inst, fmtInstSet(&data.live_set) });
|
||||
// We can move the live set because the body should have a noreturn
|
||||
// instruction which overrides the set.
|
||||
try data.block_scopes.put(gpa, inst, .{
|
||||
.live_set = data.live_set.move(),
|
||||
});
|
||||
defer {
|
||||
log.debug("[{}] %{}: popped block scope", .{ pass, inst });
|
||||
log.debug("[{}] %{f}: popped block scope", .{ pass, inst });
|
||||
var scope = data.block_scopes.fetchRemove(inst).?.value;
|
||||
scope.live_set.deinit(gpa);
|
||||
}
|
||||
|
||||
log.debug("[{}] %{}: pushed new block scope", .{ pass, inst });
|
||||
log.debug("[{}] %{f}: pushed new block scope", .{ pass, inst });
|
||||
try analyzeBody(a, pass, data, body);
|
||||
|
||||
// If the block is noreturn, block deaths not only aren't useful, they're impossible to
|
||||
@ -1501,7 +1501,7 @@ fn analyzeInstBlock(
|
||||
}
|
||||
assert(measured_num == num_deaths); // post-live-set should be a subset of pre-live-set
|
||||
try a.special.put(gpa, inst, extra_index);
|
||||
log.debug("[{}] %{}: block deaths are {}", .{
|
||||
log.debug("[{}] %{f}: block deaths are {f}", .{
|
||||
pass,
|
||||
inst,
|
||||
fmtInstList(@ptrCast(a.extra.items[extra_index + 1 ..][0..num_deaths])),
|
||||
@ -1538,7 +1538,7 @@ fn writeLoopInfo(
|
||||
const block_inst = key.*;
|
||||
a.extra.appendAssumeCapacity(@intFromEnum(block_inst));
|
||||
}
|
||||
log.debug("[{}] %{}: includes breaks to {}", .{ LivenessPass.loop_analysis, inst, fmtInstSet(&data.breaks) });
|
||||
log.debug("[{}] %{f}: includes breaks to {f}", .{ LivenessPass.loop_analysis, inst, fmtInstSet(&data.breaks) });
|
||||
|
||||
// Now we put the live operands from the loop body in too
|
||||
const num_live = data.live_set.count();
|
||||
@ -1550,7 +1550,7 @@ fn writeLoopInfo(
|
||||
const alive = key.*;
|
||||
a.extra.appendAssumeCapacity(@intFromEnum(alive));
|
||||
}
|
||||
log.debug("[{}] %{}: maintain liveness of {}", .{ LivenessPass.loop_analysis, inst, fmtInstSet(&data.live_set) });
|
||||
log.debug("[{}] %{f}: maintain liveness of {f}", .{ LivenessPass.loop_analysis, inst, fmtInstSet(&data.live_set) });
|
||||
|
||||
try a.special.put(gpa, inst, extra_index);
|
||||
|
||||
@ -1591,7 +1591,7 @@ fn resolveLoopLiveSet(
|
||||
try data.live_set.ensureUnusedCapacity(gpa, @intCast(loop_live.len));
|
||||
for (loop_live) |alive| data.live_set.putAssumeCapacity(alive, {});
|
||||
|
||||
log.debug("[{}] %{}: block live set is {}", .{ LivenessPass.main_analysis, inst, fmtInstSet(&data.live_set) });
|
||||
log.debug("[{}] %{f}: block live set is {f}", .{ LivenessPass.main_analysis, inst, fmtInstSet(&data.live_set) });
|
||||
|
||||
for (breaks) |block_inst| {
|
||||
// We might break to this block, so include every operand that the block needs alive
|
||||
@ -1604,7 +1604,7 @@ fn resolveLoopLiveSet(
|
||||
}
|
||||
}
|
||||
|
||||
log.debug("[{}] %{}: loop live set is {}", .{ LivenessPass.main_analysis, inst, fmtInstSet(&data.live_set) });
|
||||
log.debug("[{}] %{f}: loop live set is {f}", .{ LivenessPass.main_analysis, inst, fmtInstSet(&data.live_set) });
|
||||
}
|
||||
|
||||
fn analyzeInstLoop(
|
||||
@ -1642,7 +1642,7 @@ fn analyzeInstLoop(
|
||||
.live_set = data.live_set.move(),
|
||||
});
|
||||
defer {
|
||||
log.debug("[{}] %{}: popped loop block scop", .{ pass, inst });
|
||||
log.debug("[{}] %{f}: popped loop block scop", .{ pass, inst });
|
||||
var scope = data.block_scopes.fetchRemove(inst).?.value;
|
||||
scope.live_set.deinit(gpa);
|
||||
}
|
||||
@ -1743,13 +1743,13 @@ fn analyzeInstCondBr(
|
||||
}
|
||||
}
|
||||
|
||||
log.debug("[{}] %{}: 'then' branch mirrored deaths are {}", .{ pass, inst, fmtInstList(then_mirrored_deaths.items) });
|
||||
log.debug("[{}] %{}: 'else' branch mirrored deaths are {}", .{ pass, inst, fmtInstList(else_mirrored_deaths.items) });
|
||||
log.debug("[{}] %{f}: 'then' branch mirrored deaths are {f}", .{ pass, inst, fmtInstList(then_mirrored_deaths.items) });
|
||||
log.debug("[{}] %{f}: 'else' branch mirrored deaths are {f}", .{ pass, inst, fmtInstList(else_mirrored_deaths.items) });
|
||||
|
||||
data.live_set.deinit(gpa);
|
||||
data.live_set = then_live.move(); // Really the union of both live sets
|
||||
|
||||
log.debug("[{}] %{}: new live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) });
|
||||
log.debug("[{}] %{f}: new live set is {f}", .{ pass, inst, fmtInstSet(&data.live_set) });
|
||||
|
||||
// Write the mirrored deaths to `extra`
|
||||
const then_death_count = @as(u32, @intCast(then_mirrored_deaths.items.len));
|
||||
@ -1817,7 +1817,7 @@ fn analyzeInstSwitchBr(
|
||||
});
|
||||
}
|
||||
defer if (is_dispatch_loop) {
|
||||
log.debug("[{}] %{}: popped loop block scop", .{ pass, inst });
|
||||
log.debug("[{}] %{f}: popped loop block scop", .{ pass, inst });
|
||||
var scope = data.block_scopes.fetchRemove(inst).?.value;
|
||||
scope.live_set.deinit(gpa);
|
||||
};
|
||||
@ -1875,13 +1875,13 @@ fn analyzeInstSwitchBr(
|
||||
}
|
||||
|
||||
for (mirrored_deaths, 0..) |mirrored, i| {
|
||||
log.debug("[{}] %{}: case {} mirrored deaths are {}", .{ pass, inst, i, fmtInstList(mirrored.items) });
|
||||
log.debug("[{}] %{f}: case {} mirrored deaths are {f}", .{ pass, inst, i, fmtInstList(mirrored.items) });
|
||||
}
|
||||
|
||||
data.live_set.deinit(gpa);
|
||||
data.live_set = all_alive.move();
|
||||
|
||||
log.debug("[{}] %{}: new live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) });
|
||||
log.debug("[{}] %{f}: new live set is {f}", .{ pass, inst, fmtInstSet(&data.live_set) });
|
||||
}
|
||||
|
||||
const else_death_count = @as(u32, @intCast(mirrored_deaths[ncases].items.len));
|
||||
@ -1980,7 +1980,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
|
||||
|
||||
.main_analysis => {
|
||||
if ((try big.data.live_set.fetchPut(gpa, operand, {})) == null) {
|
||||
log.debug("[{}] %{}: added %{} to live set (operand dies here)", .{ pass, big.inst, operand });
|
||||
log.debug("[{}] %{f}: added %{f} to live set (operand dies here)", .{ pass, big.inst, operand });
|
||||
big.extra_tombs[extra_byte] |= @as(u32, 1) << extra_bit;
|
||||
}
|
||||
},
|
||||
@ -2036,15 +2036,15 @@ fn fmtInstSet(set: *const std.AutoHashMapUnmanaged(Air.Inst.Index, void)) FmtIns
|
||||
const FmtInstSet = struct {
|
||||
set: *const std.AutoHashMapUnmanaged(Air.Inst.Index, void),
|
||||
|
||||
pub fn format(val: FmtInstSet, comptime _: []const u8, _: std.fmt.FormatOptions, w: anytype) !void {
|
||||
pub fn format(val: FmtInstSet, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
|
||||
if (val.set.count() == 0) {
|
||||
try w.writeAll("[no instructions]");
|
||||
try bw.writeAll("[no instructions]");
|
||||
return;
|
||||
}
|
||||
var it = val.set.keyIterator();
|
||||
try w.print("%{}", .{it.next().?.*});
|
||||
try bw.print("%{f}", .{it.next().?.*});
|
||||
while (it.next()) |key| {
|
||||
try w.print(" %{}", .{key.*});
|
||||
try bw.print(" %{f}", .{key.*});
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -2056,14 +2056,14 @@ fn fmtInstList(list: []const Air.Inst.Index) FmtInstList {
|
||||
const FmtInstList = struct {
|
||||
list: []const Air.Inst.Index,
|
||||
|
||||
pub fn format(val: FmtInstList, comptime _: []const u8, _: std.fmt.FormatOptions, w: anytype) !void {
|
||||
pub fn format(val: FmtInstList, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
|
||||
if (val.list.len == 0) {
|
||||
try w.writeAll("[no instructions]");
|
||||
try bw.writeAll("[no instructions]");
|
||||
return;
|
||||
}
|
||||
try w.print("%{}", .{val.list[0]});
|
||||
try bw.print("%{f}", .{val.list[0]});
|
||||
for (val.list[1..]) |inst| {
|
||||
try w.print(" %{}", .{inst});
|
||||
try bw.print(" %{f}", .{inst});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -73,7 +73,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
||||
.trap, .unreach => {
|
||||
try self.verifyInstOperands(inst, .{ .none, .none, .none });
|
||||
// This instruction terminates the function, so everything should be dead
|
||||
if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst});
|
||||
if (self.live.count() > 0) return invalid("%{f}: instructions still alive", .{inst});
|
||||
},
|
||||
|
||||
// unary
|
||||
@ -166,7 +166,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
||||
const un_op = data[@intFromEnum(inst)].un_op;
|
||||
try self.verifyInstOperands(inst, .{ un_op, .none, .none });
|
||||
// This instruction terminates the function, so everything should be dead
|
||||
if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst});
|
||||
if (self.live.count() > 0) return invalid("%{f}: instructions still alive", .{inst});
|
||||
},
|
||||
.dbg_var_ptr,
|
||||
.dbg_var_val,
|
||||
@ -450,7 +450,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
||||
.repeat => {
|
||||
const repeat = data[@intFromEnum(inst)].repeat;
|
||||
const expected_live = self.loops.get(repeat.loop_inst) orelse
|
||||
return invalid("%{}: loop %{} not in scope", .{ @intFromEnum(inst), @intFromEnum(repeat.loop_inst) });
|
||||
return invalid("%{d}: loop %{d} not in scope", .{ @intFromEnum(inst), @intFromEnum(repeat.loop_inst) });
|
||||
|
||||
try self.verifyMatchingLiveness(repeat.loop_inst, expected_live);
|
||||
},
|
||||
@ -460,7 +460,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
||||
try self.verifyOperand(inst, br.operand, self.liveness.operandDies(inst, 0));
|
||||
|
||||
const expected_live = self.loops.get(br.block_inst) orelse
|
||||
return invalid("%{}: loop %{} not in scope", .{ @intFromEnum(inst), @intFromEnum(br.block_inst) });
|
||||
return invalid("%{d}: loop %{d} not in scope", .{ @intFromEnum(inst), @intFromEnum(br.block_inst) });
|
||||
|
||||
try self.verifyMatchingLiveness(br.block_inst, expected_live);
|
||||
},
|
||||
@ -601,9 +601,9 @@ fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies
|
||||
return;
|
||||
};
|
||||
if (dies) {
|
||||
if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand });
|
||||
if (!self.live.remove(operand)) return invalid("%{f}: dead operand %{f} reused and killed again", .{ inst, operand });
|
||||
} else {
|
||||
if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand });
|
||||
if (!self.live.contains(operand)) return invalid("%{f}: dead operand %{f} reused", .{ inst, operand });
|
||||
}
|
||||
}
|
||||
|
||||
@ -628,9 +628,9 @@ fn verifyInst(self: *Verify, inst: Air.Inst.Index) Error!void {
|
||||
}
|
||||
|
||||
fn verifyMatchingLiveness(self: *Verify, block: Air.Inst.Index, live: LiveMap) Error!void {
|
||||
if (self.live.count() != live.count()) return invalid("%{}: different deaths across branches", .{block});
|
||||
if (self.live.count() != live.count()) return invalid("%{f}: different deaths across branches", .{block});
|
||||
var live_it = self.live.keyIterator();
|
||||
while (live_it.next()) |live_inst| if (!live.contains(live_inst.*)) return invalid("%{}: different deaths across branches", .{block});
|
||||
while (live_it.next()) |live_inst| if (!live.contains(live_inst.*)) return invalid("%{f}: different deaths across branches", .{block});
|
||||
}
|
||||
|
||||
fn invalid(comptime fmt: []const u8, args: anytype) error{LivenessInvalid} {
|
||||
|
||||
@ -101,7 +101,7 @@ const Writer = struct {
|
||||
fn writeInst(w: *Writer, s: *std.io.BufferedWriter, inst: Air.Inst.Index) anyerror!void {
|
||||
const tag = w.air.instructions.items(.tag)[@intFromEnum(inst)];
|
||||
try s.splatByteAll(' ', w.indent);
|
||||
try s.print("{}{c}= {s}(", .{
|
||||
try s.print("{f}{c}= {s}(", .{
|
||||
inst,
|
||||
@as(u8, if (if (w.liveness) |liveness| liveness.isUnused(inst) else false) '!' else ' '),
|
||||
@tagName(tag),
|
||||
@ -416,7 +416,7 @@ const Writer = struct {
|
||||
try s.writeAll("}");
|
||||
|
||||
for (liveness_block.deaths) |operand| {
|
||||
try s.print(" {}!", .{operand});
|
||||
try s.print(" {f}!", .{operand});
|
||||
}
|
||||
}
|
||||
|
||||
@ -708,7 +708,7 @@ const Writer = struct {
|
||||
}
|
||||
}
|
||||
const asm_source = std.mem.sliceAsBytes(w.air.extra.items[extra_i..])[0..extra.data.source_len];
|
||||
try s.print(", \"{}\"", .{std.zig.fmtEscapes(asm_source)});
|
||||
try s.print(", \"{f}\"", .{std.zig.fmtEscapes(asm_source)});
|
||||
}
|
||||
|
||||
fn writeDbgStmt(w: *Writer, s: *std.io.BufferedWriter, inst: Air.Inst.Index) anyerror!void {
|
||||
@ -720,7 +720,7 @@ const Writer = struct {
|
||||
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
||||
try w.writeOperand(s, inst, 0, pl_op.operand);
|
||||
const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
|
||||
try s.print(", \"{}\"", .{std.zig.fmtEscapes(name.toSlice(w.air))});
|
||||
try s.print(", \"{f}\"", .{std.zig.fmtEscapes(name.toSlice(w.air))});
|
||||
}
|
||||
|
||||
fn writeCall(w: *Writer, s: *std.io.BufferedWriter, inst: Air.Inst.Index) anyerror!void {
|
||||
@ -767,7 +767,7 @@ const Writer = struct {
|
||||
try s.splatByteAll(' ', w.indent);
|
||||
for (liveness_condbr.else_deaths, 0..) |operand, i| {
|
||||
if (i != 0) try s.writeAll(" ");
|
||||
try s.print("{}!", .{operand});
|
||||
try s.print("{f}!", .{operand});
|
||||
}
|
||||
try s.writeAll("\n");
|
||||
}
|
||||
@ -778,7 +778,7 @@ const Writer = struct {
|
||||
try s.writeAll("}");
|
||||
|
||||
for (liveness_condbr.then_deaths) |operand| {
|
||||
try s.print(" {}!", .{operand});
|
||||
try s.print(" {f}!", .{operand});
|
||||
}
|
||||
}
|
||||
|
||||
@ -804,7 +804,7 @@ const Writer = struct {
|
||||
try s.splatByteAll(' ', w.indent);
|
||||
for (liveness_condbr.else_deaths, 0..) |operand, i| {
|
||||
if (i != 0) try s.writeAll(" ");
|
||||
try s.print("{}!", .{operand});
|
||||
try s.print("{f}!", .{operand});
|
||||
}
|
||||
try s.writeAll("\n");
|
||||
}
|
||||
@ -815,7 +815,7 @@ const Writer = struct {
|
||||
try s.writeAll("}");
|
||||
|
||||
for (liveness_condbr.then_deaths) |operand| {
|
||||
try s.print(" {}!", .{operand});
|
||||
try s.print(" {f}!", .{operand});
|
||||
}
|
||||
}
|
||||
|
||||
@ -846,7 +846,7 @@ const Writer = struct {
|
||||
try s.splatByteAll(' ', w.indent);
|
||||
for (liveness_condbr.then_deaths, 0..) |operand, i| {
|
||||
if (i != 0) try s.writeAll(" ");
|
||||
try s.print("{}!", .{operand});
|
||||
try s.print("{f}!", .{operand});
|
||||
}
|
||||
try s.writeAll("\n");
|
||||
}
|
||||
@ -866,7 +866,7 @@ const Writer = struct {
|
||||
try s.splatByteAll(' ', w.indent);
|
||||
for (liveness_condbr.else_deaths, 0..) |operand, i| {
|
||||
if (i != 0) try s.writeAll(" ");
|
||||
try s.print("{}!", .{operand});
|
||||
try s.print("{f}!", .{operand});
|
||||
}
|
||||
try s.writeAll("\n");
|
||||
}
|
||||
@ -923,7 +923,7 @@ const Writer = struct {
|
||||
try s.splatByteAll(' ', w.indent);
|
||||
for (deaths, 0..) |operand, i| {
|
||||
if (i != 0) try s.writeAll(" ");
|
||||
try s.print("{}!", .{operand});
|
||||
try s.print("{f}!", .{operand});
|
||||
}
|
||||
try s.writeAll("\n");
|
||||
}
|
||||
@ -949,7 +949,7 @@ const Writer = struct {
|
||||
try s.splatByteAll(' ', w.indent);
|
||||
for (deaths, 0..) |operand, i| {
|
||||
if (i != 0) try s.writeAll(" ");
|
||||
try s.print("{}!", .{operand});
|
||||
try s.print("{f}!", .{operand});
|
||||
}
|
||||
try s.writeAll("\n");
|
||||
}
|
||||
@ -1017,7 +1017,7 @@ const Writer = struct {
|
||||
} else if (operand.toInterned()) |ip_index| {
|
||||
const pt = w.pt;
|
||||
const ty = Type.fromInterned(pt.zcu.intern_pool.indexToKey(ip_index).typeOf());
|
||||
try s.print("<{}, {}>", .{
|
||||
try s.print("<{f}, {f}>", .{
|
||||
ty.fmt(pt),
|
||||
Value.fromInterned(ip_index).fmtValue(pt),
|
||||
});
|
||||
@ -1033,7 +1033,7 @@ const Writer = struct {
|
||||
dies: bool,
|
||||
) anyerror!void {
|
||||
_ = w;
|
||||
try s.print("{}", .{inst});
|
||||
try s.print("{f}", .{inst});
|
||||
if (dies) try s.writeByte('!');
|
||||
}
|
||||
|
||||
|
||||
@ -57,18 +57,18 @@ pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void {
|
||||
\\/// feature detection (i.e. with `@hasDecl` or `@hasField`) over version checks.
|
||||
\\pub const zig_version = std.SemanticVersion.parse(zig_version_string) catch unreachable;
|
||||
\\pub const zig_version_string = "{s}";
|
||||
\\pub const zig_backend = std.builtin.CompilerBackend.{p_};
|
||||
\\pub const zig_backend = std.builtin.CompilerBackend.{fp_};
|
||||
\\
|
||||
\\pub const output_mode: std.builtin.OutputMode = .{p_};
|
||||
\\pub const link_mode: std.builtin.LinkMode = .{p_};
|
||||
\\pub const unwind_tables: std.builtin.UnwindTables = .{p_};
|
||||
\\pub const output_mode: std.builtin.OutputMode = .{fp_};
|
||||
\\pub const link_mode: std.builtin.LinkMode = .{fp_};
|
||||
\\pub const unwind_tables: std.builtin.UnwindTables = .{fp_};
|
||||
\\pub const is_test = {};
|
||||
\\pub const single_threaded = {};
|
||||
\\pub const abi: std.Target.Abi = .{p_};
|
||||
\\pub const abi: std.Target.Abi = .{fp_};
|
||||
\\pub const cpu: std.Target.Cpu = .{{
|
||||
\\ .arch = .{p_},
|
||||
\\ .model = &std.Target.{p_}.cpu.{p_},
|
||||
\\ .features = std.Target.{p_}.featureSet(&.{{
|
||||
\\ .arch = .{fp_},
|
||||
\\ .model = &std.Target.{fp_}.cpu.{fp_},
|
||||
\\ .features = std.Target.{fp_}.featureSet(&.{{
|
||||
\\
|
||||
, .{
|
||||
build_options.version,
|
||||
@ -89,14 +89,14 @@ pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void {
|
||||
const index = @as(std.Target.Cpu.Feature.Set.Index, @intCast(index_usize));
|
||||
const is_enabled = target.cpu.features.isEnabled(index);
|
||||
if (is_enabled) {
|
||||
try buffer.print(" .{p_},\n", .{std.zig.fmtId(feature.name)});
|
||||
try buffer.print(" .{fp_},\n", .{std.zig.fmtId(feature.name)});
|
||||
}
|
||||
}
|
||||
try buffer.print(
|
||||
\\ }}),
|
||||
\\}};
|
||||
\\pub const os: std.Target.Os = .{{
|
||||
\\ .tag = .{p_},
|
||||
\\ .tag = .{fp_},
|
||||
\\ .version_range = .{{
|
||||
,
|
||||
.{std.zig.fmtId(@tagName(target.os.tag))},
|
||||
@ -200,8 +200,8 @@ pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void {
|
||||
}),
|
||||
.windows => |windows| try buffer.print(
|
||||
\\ .windows = .{{
|
||||
\\ .min = {c},
|
||||
\\ .max = {c},
|
||||
\\ .min = {fc},
|
||||
\\ .max = {fc},
|
||||
\\ }}}},
|
||||
\\
|
||||
, .{ windows.min, windows.max }),
|
||||
@ -238,8 +238,8 @@ pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void {
|
||||
const link_libc = opts.link_libc;
|
||||
|
||||
try buffer.print(
|
||||
\\pub const object_format: std.Target.ObjectFormat = .{p_};
|
||||
\\pub const mode: std.builtin.OptimizeMode = .{p_};
|
||||
\\pub const object_format: std.Target.ObjectFormat = .{fp_};
|
||||
\\pub const mode: std.builtin.OptimizeMode = .{fp_};
|
||||
\\pub const link_libc = {};
|
||||
\\pub const link_libcpp = {};
|
||||
\\pub const have_error_return_tracing = {};
|
||||
@ -249,7 +249,7 @@ pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void {
|
||||
\\pub const position_independent_code = {};
|
||||
\\pub const position_independent_executable = {};
|
||||
\\pub const strip_debug_info = {};
|
||||
\\pub const code_model: std.builtin.CodeModel = .{p_};
|
||||
\\pub const code_model: std.builtin.CodeModel = .{fp_};
|
||||
\\pub const omit_frame_pointer = {};
|
||||
\\
|
||||
, .{
|
||||
@ -270,7 +270,7 @@ pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void {
|
||||
|
||||
if (target.os.tag == .wasi) {
|
||||
try buffer.print(
|
||||
\\pub const wasi_exec_model: std.builtin.WasiExecModel = .{p_};
|
||||
\\pub const wasi_exec_model: std.builtin.WasiExecModel = .{fp_};
|
||||
\\
|
||||
, .{std.zig.fmtId(@tagName(opts.wasi_exec_model))});
|
||||
}
|
||||
@ -317,7 +317,7 @@ pub fn updateFileOnDisk(file: *File, comp: *Compilation) !void {
|
||||
if (root_dir.statFile(sub_path)) |stat| {
|
||||
if (stat.size != file.source.?.len) {
|
||||
std.log.warn(
|
||||
"the cached file '{}' had the wrong size. Expected {d}, found {d}. " ++
|
||||
"the cached file '{f}{s}' had the wrong size. Expected {d}, found {d}. " ++
|
||||
"Overwriting with correct file contents now",
|
||||
.{ file.path.fmt(comp), file.source.?.len, stat.size },
|
||||
);
|
||||
|
||||
@ -1068,11 +1068,12 @@ pub const CObject = struct {
|
||||
}
|
||||
};
|
||||
|
||||
var buffer: [1024]u8 = undefined;
|
||||
const file = try std.fs.cwd().openFile(path, .{});
|
||||
defer file.close();
|
||||
var br = std.io.bufferedReader(file.reader());
|
||||
const reader = br.reader();
|
||||
var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = reader.any() });
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.init(file.reader(), &buffer);
|
||||
var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .br = &br });
|
||||
defer bc.deinit();
|
||||
|
||||
var file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .empty;
|
||||
@ -2709,7 +2710,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
const prefix = man.cache.prefixes()[pp.prefix];
|
||||
return comp.setMiscFailure(
|
||||
.check_whole_cache,
|
||||
"failed to check cache: '{}{s}' {s} {s}",
|
||||
"failed to check cache: '{f}{s}' {s} {s}",
|
||||
.{ prefix, pp.sub_path, @tagName(man.diagnostic), @errorName(op.err) },
|
||||
);
|
||||
},
|
||||
@ -2926,7 +2927,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
renameTmpIntoCache(comp.dirs.local_cache, tmp_dir_sub_path, o_sub_path) catch |err| {
|
||||
return comp.setMiscFailure(
|
||||
.rename_results,
|
||||
"failed to rename compilation results ('{}{s}') into local cache ('{}{s}'): {s}",
|
||||
"failed to rename compilation results ('{f}{s}') into local cache ('{f}{s}'): {s}",
|
||||
.{
|
||||
comp.dirs.local_cache, tmp_dir_sub_path,
|
||||
comp.dirs.local_cache, o_sub_path,
|
||||
@ -4847,7 +4848,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
|
||||
var out_dir = docs_path.root_dir.handle.makeOpenPath(docs_path.sub_path, .{}) catch |err| {
|
||||
return comp.lockAndSetMiscFailure(
|
||||
.docs_copy,
|
||||
"unable to create output directory '{}': {s}",
|
||||
"unable to create output directory '{f}': {s}",
|
||||
.{ docs_path, @errorName(err) },
|
||||
);
|
||||
};
|
||||
@ -4867,7 +4868,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
|
||||
var tar_file = out_dir.createFile("sources.tar", .{}) catch |err| {
|
||||
return comp.lockAndSetMiscFailure(
|
||||
.docs_copy,
|
||||
"unable to create '{}/sources.tar': {s}",
|
||||
"unable to create '{f}/sources.tar': {s}",
|
||||
.{ docs_path, @errorName(err) },
|
||||
);
|
||||
};
|
||||
@ -4896,7 +4897,7 @@ fn docsCopyModule(comp: *Compilation, module: *Package.Module, name: []const u8,
|
||||
const root_dir, const sub_path = root.openInfo(comp.dirs);
|
||||
break :d root_dir.openDir(sub_path, .{ .iterate = true });
|
||||
} catch |err| {
|
||||
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open directory '{}': {s}", .{
|
||||
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open directory '{f}': {s}", .{
|
||||
root.fmt(comp), @errorName(err),
|
||||
});
|
||||
};
|
||||
@ -5142,7 +5143,7 @@ fn workerUpdateBuiltinFile(comp: *Compilation, file: *Zcu.File) void {
|
||||
defer comp.mutex.unlock();
|
||||
comp.setMiscFailure(
|
||||
.write_builtin_zig,
|
||||
"unable to write '{}': {s}",
|
||||
"unable to write '{f}': {s}",
|
||||
.{ file.path.fmt(comp), @errorName(err) },
|
||||
);
|
||||
};
|
||||
@ -5863,7 +5864,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
|
||||
|
||||
try child.spawn();
|
||||
|
||||
const stderr = try child.stderr.?.reader().readAllAlloc(arena, std.math.maxInt(usize));
|
||||
const stderr = try child.stderr.?.readToEndAlloc(arena, .unlimited);
|
||||
|
||||
const term = child.wait() catch |err| {
|
||||
return comp.failCObj(c_object, "failed to spawn zig clang {s}: {s}", .{ argv.items[0], @errorName(err) });
|
||||
@ -6023,13 +6024,12 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
|
||||
|
||||
// In .rc files, a " within a quoted string is escaped as ""
|
||||
const fmtRcEscape = struct {
|
||||
fn formatRcEscape(bytes: []const u8, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
fn formatRcEscape(bytes: []const u8, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
for (bytes) |byte| switch (byte) {
|
||||
'"' => try writer.writeAll("\"\""),
|
||||
'\\' => try writer.writeAll("\\\\"),
|
||||
else => try writer.writeByte(byte),
|
||||
'"' => try bw.writeAll("\"\""),
|
||||
'\\' => try bw.writeAll("\\\\"),
|
||||
else => try bw.writeByte(byte),
|
||||
};
|
||||
}
|
||||
|
||||
@ -6047,7 +6047,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
|
||||
// 24 is RT_MANIFEST
|
||||
const resource_type = 24;
|
||||
|
||||
const input = try std.fmt.allocPrint(arena, "{} {} \"{s}\"", .{ resource_id, resource_type, fmtRcEscape(src_path) });
|
||||
const input = try std.fmt.allocPrint(arena, "{} {} \"{f}\"", .{ resource_id, resource_type, fmtRcEscape(src_path) });
|
||||
|
||||
try o_dir.writeFile(.{ .sub_path = rc_basename, .data = input });
|
||||
|
||||
@ -6227,13 +6227,10 @@ fn spawnZigRc(
|
||||
const stdout = poller.fifo(.stdout);
|
||||
|
||||
poll: while (true) {
|
||||
while (stdout.readableLength() < @sizeOf(std.zig.Server.Message.Header)) {
|
||||
if (!(try poller.poll())) break :poll;
|
||||
}
|
||||
const header = stdout.reader().readStruct(std.zig.Server.Message.Header) catch unreachable;
|
||||
while (stdout.readableLength() < header.bytes_len) {
|
||||
if (!(try poller.poll())) break :poll;
|
||||
}
|
||||
while (stdout.readableLength() < @sizeOf(std.zig.Server.Message.Header)) if (!try poller.poll()) break :poll;
|
||||
var header: std.zig.Server.Message.Header = undefined;
|
||||
assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(std.zig.Server.Message.Header));
|
||||
while (stdout.readableLength() < header.bytes_len) if (!try poller.poll()) break :poll;
|
||||
const body = stdout.readableSliceOfLen(header.bytes_len);
|
||||
|
||||
switch (header.tag) {
|
||||
|
||||
@ -1888,17 +1888,12 @@ pub const NullTerminatedString = enum(u32) {
|
||||
string: NullTerminatedString,
|
||||
ip: *const InternPool,
|
||||
};
|
||||
fn format(
|
||||
data: FormatData,
|
||||
comptime specifier: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime specifier: []const u8) anyerror!void {
|
||||
const slice = data.string.toSlice(data.ip);
|
||||
if (comptime std.mem.eql(u8, specifier, "")) {
|
||||
try writer.writeAll(slice);
|
||||
try bw.writeAll(slice);
|
||||
} else if (comptime std.mem.eql(u8, specifier, "i")) {
|
||||
try writer.print("{p}", .{std.zig.fmtId(slice)});
|
||||
try bw.print("{fp}", .{std.zig.fmtId(slice)});
|
||||
} else @compileError("invalid format string '" ++ specifier ++ "' for '" ++ @typeName(NullTerminatedString) ++ "'");
|
||||
}
|
||||
|
||||
@ -9758,7 +9753,7 @@ fn finishFuncInstance(
|
||||
const fn_namespace = fn_owner_nav.analysis.?.namespace;
|
||||
|
||||
// TODO: improve this name
|
||||
const nav_name = try ip.getOrPutStringFmt(gpa, tid, "{}__anon_{d}", .{
|
||||
const nav_name = try ip.getOrPutStringFmt(gpa, tid, "{f}__anon_{d}", .{
|
||||
fn_owner_nav.name.fmt(ip), @intFromEnum(func_index),
|
||||
}, .no_embedded_nulls);
|
||||
const nav_index = try ip.createNav(gpa, tid, .{
|
||||
@ -11415,12 +11410,12 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
|
||||
var it = instances.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const generic_fn_owner_nav = ip.getNav(ip.funcDeclInfo(entry.key_ptr.*).owner_nav);
|
||||
try bw.print("{} ({}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
|
||||
try bw.print("{f} ({}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
|
||||
for (entry.value_ptr.items) |index| {
|
||||
const unwrapped_index = index.unwrap(ip);
|
||||
const func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), unwrapped_index.getData(ip));
|
||||
const owner_nav = ip.getNav(func.owner_nav);
|
||||
try bw.print(" {}: (", .{owner_nav.name.fmt(ip)});
|
||||
try bw.print(" {f}: (", .{owner_nav.name.fmt(ip)});
|
||||
for (func.comptime_args.get(ip)) |arg| {
|
||||
if (arg != .none) {
|
||||
const key = ip.indexToKey(arg);
|
||||
|
||||
@ -134,7 +134,7 @@ pub const Hash = struct {
|
||||
}
|
||||
var bin_digest: [Algo.digest_length]u8 = undefined;
|
||||
Algo.hash(sub_path, &bin_digest, .{});
|
||||
_ = std.fmt.bufPrint(result.bytes[i..], "{}", .{std.fmt.fmtSliceHexLower(&bin_digest)}) catch unreachable;
|
||||
_ = std.fmt.bufPrint(result.bytes[i..], "{x}", .{&bin_digest}) catch unreachable;
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
@ -185,7 +185,7 @@ pub const JobQueue = struct {
|
||||
const hash_slice = hash.toSlice();
|
||||
|
||||
try buf.print(
|
||||
\\ pub const {} = struct {{
|
||||
\\ pub const {f} = struct {{
|
||||
\\
|
||||
, .{std.zig.fmtId(hash_slice)});
|
||||
|
||||
@ -211,13 +211,13 @@ pub const JobQueue = struct {
|
||||
}
|
||||
|
||||
try buf.print(
|
||||
\\ pub const build_root = "{q}";
|
||||
\\ pub const build_root = "{fq}";
|
||||
\\
|
||||
, .{fetch.package_root});
|
||||
|
||||
if (fetch.has_build_zig) {
|
||||
try buf.print(
|
||||
\\ pub const build_zig = @import("{}");
|
||||
\\ pub const build_zig = @import("{f}");
|
||||
\\
|
||||
, .{std.zig.fmtEscapes(hash_slice)});
|
||||
}
|
||||
@ -230,7 +230,7 @@ pub const JobQueue = struct {
|
||||
for (manifest.dependencies.keys(), manifest.dependencies.values()) |name, dep| {
|
||||
const h = depDigest(fetch.package_root, jq.global_cache, dep) orelse continue;
|
||||
try buf.print(
|
||||
" .{{ \"{}\", \"{}\" }},\n",
|
||||
" .{{ \"{f}\", \"{f}\" }},\n",
|
||||
.{ std.zig.fmtEscapes(name), std.zig.fmtEscapes(h.toSlice()) },
|
||||
);
|
||||
}
|
||||
@ -262,7 +262,7 @@ pub const JobQueue = struct {
|
||||
for (root_manifest.dependencies.keys(), root_manifest.dependencies.values()) |name, dep| {
|
||||
const h = depDigest(root_fetch.package_root, jq.global_cache, dep) orelse continue;
|
||||
try buf.print(
|
||||
" .{{ \"{}\", \"{}\" }},\n",
|
||||
" .{{ \"{f}\", \"{f}\" }},\n",
|
||||
.{ std.zig.fmtEscapes(name), std.zig.fmtEscapes(h.toSlice()) },
|
||||
);
|
||||
}
|
||||
@ -353,7 +353,7 @@ pub fn run(f: *Fetch) RunError!void {
|
||||
if (!std.mem.startsWith(u8, pkg_root.sub_path, expected_prefix)) {
|
||||
return f.fail(
|
||||
f.location_tok,
|
||||
try eb.printString("dependency path outside project: '{}'", .{pkg_root}),
|
||||
try eb.printString("dependency path outside project: '{f}'", .{pkg_root}),
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -604,7 +604,7 @@ pub fn computedPackageHash(f: *const Fetch) Package.Hash {
|
||||
const saturated_size = std.math.cast(u32, f.computed_hash.total_size) orelse std.math.maxInt(u32);
|
||||
if (f.manifest) |man| {
|
||||
var version_buffer: [32]u8 = undefined;
|
||||
const version: []const u8 = std.fmt.bufPrint(&version_buffer, "{}", .{man.version}) catch &version_buffer;
|
||||
const version: []const u8 = std.fmt.bufPrint(&version_buffer, "{f}", .{man.version}) catch &version_buffer;
|
||||
return .init(f.computed_hash.digest, man.name, version, man.id, saturated_size);
|
||||
}
|
||||
// In the future build.zig.zon fields will be added to allow overriding these values
|
||||
@ -622,7 +622,7 @@ fn checkBuildFileExistence(f: *Fetch) RunError!void {
|
||||
error.FileNotFound => {},
|
||||
else => |e| {
|
||||
try eb.addRootErrorMessage(.{
|
||||
.msg = try eb.printString("unable to access '{}{s}': {s}", .{
|
||||
.msg = try eb.printString("unable to access '{f}{s}': {s}", .{
|
||||
f.package_root, Package.build_zig_basename, @errorName(e),
|
||||
}),
|
||||
});
|
||||
@ -636,9 +636,9 @@ fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
|
||||
const eb = &f.error_bundle;
|
||||
const arena = f.arena.allocator();
|
||||
const manifest_bytes = pkg_root.root_dir.handle.readFileAllocOptions(
|
||||
arena,
|
||||
try fs.path.join(arena, &.{ pkg_root.sub_path, Manifest.basename }),
|
||||
Manifest.max_bytes,
|
||||
arena,
|
||||
.limited(Manifest.max_bytes),
|
||||
null,
|
||||
.@"1",
|
||||
0,
|
||||
@ -647,7 +647,7 @@ fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
|
||||
else => |e| {
|
||||
const file_path = try pkg_root.join(arena, Manifest.basename);
|
||||
try eb.addRootErrorMessage(.{
|
||||
.msg = try eb.printString("unable to load package manifest '{}': {s}", .{
|
||||
.msg = try eb.printString("unable to load package manifest '{f}': {s}", .{
|
||||
file_path, @errorName(e),
|
||||
}),
|
||||
});
|
||||
@ -659,7 +659,7 @@ fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
|
||||
ast.* = try std.zig.Ast.parse(arena, manifest_bytes, .zon);
|
||||
|
||||
if (ast.errors.len > 0) {
|
||||
const file_path = try std.fmt.allocPrint(arena, "{}" ++ fs.path.sep_str ++ Manifest.basename, .{pkg_root});
|
||||
const file_path = try std.fmt.allocPrint(arena, "{f}" ++ fs.path.sep_str ++ Manifest.basename, .{pkg_root});
|
||||
try std.zig.putAstErrorsIntoBundle(arena, ast.*, file_path, eb);
|
||||
return error.FetchFailed;
|
||||
}
|
||||
@ -672,7 +672,7 @@ fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
|
||||
const manifest = &f.manifest.?;
|
||||
|
||||
if (manifest.errors.len > 0) {
|
||||
const src_path = try eb.printString("{}" ++ fs.path.sep_str ++ "{s}", .{ pkg_root, Manifest.basename });
|
||||
const src_path = try eb.printString("{f}" ++ fs.path.sep_str ++ "{s}", .{ pkg_root, Manifest.basename });
|
||||
try manifest.copyErrorsIntoBundle(ast.*, src_path, eb);
|
||||
return error.FetchFailed;
|
||||
}
|
||||
@ -827,7 +827,7 @@ fn srcLoc(
|
||||
const ast = f.parent_manifest_ast orelse return .none;
|
||||
const eb = &f.error_bundle;
|
||||
const start_loc = ast.tokenLocation(0, tok);
|
||||
const src_path = try eb.printString("{}" ++ fs.path.sep_str ++ Manifest.basename, .{f.parent_package_root});
|
||||
const src_path = try eb.printString("{f}" ++ fs.path.sep_str ++ Manifest.basename, .{f.parent_package_root});
|
||||
const msg_off = 0;
|
||||
return eb.addSourceLocation(.{
|
||||
.src_path = src_path,
|
||||
@ -1512,7 +1512,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
|
||||
|
||||
while (walker.next() catch |err| {
|
||||
try eb.addRootErrorMessage(.{ .msg = try eb.printString(
|
||||
"unable to walk temporary directory '{}': {s}",
|
||||
"unable to walk temporary directory '{f}': {s}",
|
||||
.{ pkg_path, @errorName(err) },
|
||||
) });
|
||||
return error.FetchFailed;
|
||||
|
||||
@ -119,15 +119,9 @@ pub const Oid = union(Format) {
|
||||
} else error.InvalidOid;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
oid: Oid,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.Options,
|
||||
writer: *std.io.BufferedWriter,
|
||||
) anyerror!void {
|
||||
pub fn format(oid: Oid, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
try writer.print("{x}", .{oid.slice()});
|
||||
try bw.print("{x}", .{oid.slice()});
|
||||
}
|
||||
|
||||
pub fn slice(oid: *const Oid) []const u8 {
|
||||
|
||||
@ -401,7 +401,7 @@ const Parse = struct {
|
||||
return fail(p, main_token, "name must be a valid bare zig identifier (hint: switch from string to enum literal)", .{});
|
||||
|
||||
if (name.len > max_name_len)
|
||||
return fail(p, main_token, "name '{}' exceeds max length of {d}", .{
|
||||
return fail(p, main_token, "name '{f}' exceeds max length of {d}", .{
|
||||
std.zig.fmtId(name), max_name_len,
|
||||
});
|
||||
|
||||
@ -416,7 +416,7 @@ const Parse = struct {
|
||||
return fail(p, main_token, "name must be a valid bare zig identifier", .{});
|
||||
|
||||
if (ident_name.len > max_name_len)
|
||||
return fail(p, main_token, "name '{}' exceeds max length of {d}", .{
|
||||
return fail(p, main_token, "name '{f}' exceeds max length of {d}", .{
|
||||
std.zig.fmtId(ident_name), max_name_len,
|
||||
});
|
||||
|
||||
|
||||
835
src/Sema.zig
835
src/Sema.zig
File diff suppressed because it is too large
Load Diff
@ -338,7 +338,7 @@ fn failUnsupportedResultType(
|
||||
const gpa = sema.gpa;
|
||||
const pt = sema.pt;
|
||||
return sema.failWithOwnedErrorMsg(self.block, msg: {
|
||||
const msg = try sema.errMsg(self.import_loc, "type '{}' is not available in ZON", .{ty.fmt(pt)});
|
||||
const msg = try sema.errMsg(self.import_loc, "type '{f}' is not available in ZON", .{ty.fmt(pt)});
|
||||
errdefer msg.destroy(gpa);
|
||||
if (opt_note) |n| try sema.errNote(self.import_loc, msg, "{s}", .{n});
|
||||
break :msg msg;
|
||||
@ -362,7 +362,7 @@ fn lowerExprKnownResTy(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) Com
|
||||
return self.lowerExprKnownResTyInner(node, res_ty) catch |err| switch (err) {
|
||||
error.WrongType => return self.fail(
|
||||
node,
|
||||
"expected type '{}'",
|
||||
"expected type '{f}'",
|
||||
.{res_ty.fmt(pt)},
|
||||
),
|
||||
else => |e| return e,
|
||||
@ -428,7 +428,7 @@ fn lowerExprKnownResTyInner(
|
||||
.frame,
|
||||
.@"anyframe",
|
||||
.void,
|
||||
=> return self.fail(node, "type '{}' not available in ZON", .{res_ty.fmt(pt)}),
|
||||
=> return self.fail(node, "type '{f}' not available in ZON", .{res_ty.fmt(pt)}),
|
||||
}
|
||||
}
|
||||
|
||||
@ -458,7 +458,7 @@ fn lowerInt(
|
||||
// If lhs is unsigned and rhs is less than 0, we're out of bounds
|
||||
if (lhs_info.signedness == .unsigned and rhs < 0) return self.fail(
|
||||
node,
|
||||
"type '{}' cannot represent integer value '{}'",
|
||||
"type '{f}' cannot represent integer value '{}'",
|
||||
.{ res_ty.fmt(self.sema.pt), rhs },
|
||||
);
|
||||
|
||||
@ -478,7 +478,7 @@ fn lowerInt(
|
||||
if (rhs < min_int or rhs > max_int) {
|
||||
return self.fail(
|
||||
node,
|
||||
"type '{}' cannot represent integer value '{}'",
|
||||
"type '{f}' cannot represent integer value '{}'",
|
||||
.{ res_ty.fmt(self.sema.pt), rhs },
|
||||
);
|
||||
}
|
||||
@ -496,7 +496,7 @@ fn lowerInt(
|
||||
if (!val.fitsInTwosComp(int_info.signedness, int_info.bits)) {
|
||||
return self.fail(
|
||||
node,
|
||||
"type '{}' cannot represent integer value '{}'",
|
||||
"type '{f}' cannot represent integer value '{f}'",
|
||||
.{ res_ty.fmt(self.sema.pt), val },
|
||||
);
|
||||
}
|
||||
@ -517,7 +517,7 @@ fn lowerInt(
|
||||
switch (big_int.setFloat(val, .trunc)) {
|
||||
.inexact => return self.fail(
|
||||
node,
|
||||
"fractional component prevents float value '{}' from coercion to type '{}'",
|
||||
"fractional component prevents float value '{}' from coercion to type '{f}'",
|
||||
.{ val, res_ty.fmt(self.sema.pt) },
|
||||
),
|
||||
.exact => {},
|
||||
@ -528,7 +528,7 @@ fn lowerInt(
|
||||
if (!big_int.toConst().fitsInTwosComp(int_info.signedness, int_info.bits)) {
|
||||
return self.fail(
|
||||
node,
|
||||
"type '{}' cannot represent integer value '{}'",
|
||||
"type '{}' cannot represent integer value '{f}'",
|
||||
.{ val, res_ty.fmt(self.sema.pt) },
|
||||
);
|
||||
}
|
||||
@ -550,7 +550,7 @@ fn lowerInt(
|
||||
if (val >= out_of_range) {
|
||||
return self.fail(
|
||||
node,
|
||||
"type '{}' cannot represent integer value '{}'",
|
||||
"type '{f}' cannot represent integer value '{}'",
|
||||
.{ res_ty.fmt(self.sema.pt), val },
|
||||
);
|
||||
}
|
||||
@ -584,7 +584,7 @@ fn lowerFloat(
|
||||
.pos_inf => b: {
|
||||
if (res_ty.toIntern() == .comptime_float_type) return self.fail(
|
||||
node,
|
||||
"expected type '{}'",
|
||||
"expected type '{f}'",
|
||||
.{res_ty.fmt(self.sema.pt)},
|
||||
);
|
||||
break :b try self.sema.pt.floatValue(res_ty, std.math.inf(f128));
|
||||
@ -592,7 +592,7 @@ fn lowerFloat(
|
||||
.neg_inf => b: {
|
||||
if (res_ty.toIntern() == .comptime_float_type) return self.fail(
|
||||
node,
|
||||
"expected type '{}'",
|
||||
"expected type '{f}'",
|
||||
.{res_ty.fmt(self.sema.pt)},
|
||||
);
|
||||
break :b try self.sema.pt.floatValue(res_ty, -std.math.inf(f128));
|
||||
@ -600,7 +600,7 @@ fn lowerFloat(
|
||||
.nan => b: {
|
||||
if (res_ty.toIntern() == .comptime_float_type) return self.fail(
|
||||
node,
|
||||
"expected type '{}'",
|
||||
"expected type '{f}'",
|
||||
.{res_ty.fmt(self.sema.pt)},
|
||||
);
|
||||
break :b try self.sema.pt.floatValue(res_ty, std.math.nan(f128));
|
||||
@ -661,7 +661,7 @@ fn lowerEnum(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.I
|
||||
const field_index = res_ty.enumFieldIndex(field_name_interned, self.sema.pt.zcu) orelse {
|
||||
return self.fail(
|
||||
node,
|
||||
"enum {} has no member named '{}'",
|
||||
"enum {f} has no member named '{f}'",
|
||||
.{
|
||||
res_ty.fmt(self.sema.pt),
|
||||
std.zig.fmtId(field_name.get(self.file.zoir.?)),
|
||||
@ -795,7 +795,7 @@ fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool
|
||||
const field_node = fields.vals.at(@intCast(i));
|
||||
|
||||
const name_index = struct_info.nameIndex(ip, field_name) orelse {
|
||||
return self.fail(field_node, "unexpected field '{}'", .{field_name.fmt(ip)});
|
||||
return self.fail(field_node, "unexpected field '{f}'", .{field_name.fmt(ip)});
|
||||
};
|
||||
|
||||
const field_type: Type = .fromInterned(struct_info.field_types.get(ip)[name_index]);
|
||||
@ -816,7 +816,7 @@ fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool
|
||||
|
||||
const field_names = struct_info.field_names.get(ip);
|
||||
for (field_values, field_names) |*value, name| {
|
||||
if (value.* == .none) return self.fail(node, "missing field '{}'", .{name.fmt(ip)});
|
||||
if (value.* == .none) return self.fail(node, "missing field '{f}'", .{name.fmt(ip)});
|
||||
}
|
||||
|
||||
return self.sema.pt.intern(.{ .aggregate = .{
|
||||
@ -934,7 +934,7 @@ fn lowerUnion(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
|
||||
.struct_literal => b: {
|
||||
const fields: @FieldType(Zoir.Node, "struct_literal") = switch (node.get(self.file.zoir.?)) {
|
||||
.struct_literal => |fields| fields,
|
||||
else => return self.fail(node, "expected type '{}'", .{res_ty.fmt(self.sema.pt)}),
|
||||
else => return self.fail(node, "expected type '{f}'", .{res_ty.fmt(self.sema.pt)}),
|
||||
};
|
||||
if (fields.names.len != 1) {
|
||||
return error.WrongType;
|
||||
|
||||
158
src/Type.zig
158
src/Type.zig
@ -142,9 +142,9 @@ const FormatContext = struct {
|
||||
pt: Zcu.PerThread,
|
||||
};
|
||||
|
||||
fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime f: []const u8) anyerror!usize {
|
||||
fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime f: []const u8) anyerror!void {
|
||||
comptime assert(f.len == 0);
|
||||
return print(ctx.ty, bw, ctx.pt);
|
||||
try print(ctx.ty, bw, ctx.pt);
|
||||
}
|
||||
|
||||
pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) {
|
||||
@ -153,20 +153,14 @@ pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) {
|
||||
|
||||
/// This is a debug function. In order to print types in a meaningful way
|
||||
/// we also need access to the module.
|
||||
pub fn dump(
|
||||
start_type: Type,
|
||||
comptime unused_format_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
_ = options;
|
||||
pub fn dump(start_type: Type, bw: *std.io.BufferedWriter, comptime unused_format_string: []const u8) anyerror!void {
|
||||
comptime assert(unused_format_string.len == 0);
|
||||
return writer.print("{any}", .{start_type.ip_index});
|
||||
return bw.print("{any}", .{start_type.ip_index});
|
||||
}
|
||||
|
||||
/// Prints a name suitable for `@typeName`.
|
||||
/// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels.
|
||||
pub fn print(ty: Type, bw: *std.io.BufferedWriter, pt: Zcu.PerThread) anyerror!usize {
|
||||
pub fn print(ty: Type, bw: *std.io.BufferedWriter, pt: Zcu.PerThread) anyerror!void {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (ip.indexToKey(ty.toIntern())) {
|
||||
@ -176,23 +170,22 @@ pub fn print(ty: Type, bw: *std.io.BufferedWriter, pt: Zcu.PerThread) anyerror!u
|
||||
.signed => 'i',
|
||||
.unsigned => 'u',
|
||||
};
|
||||
return bw.print("{c}{d}", .{ sign_char, int_type.bits });
|
||||
try bw.print("{c}{d}", .{ sign_char, int_type.bits });
|
||||
},
|
||||
.ptr_type => {
|
||||
var n: usize = 0;
|
||||
const info = ty.ptrInfo(zcu);
|
||||
|
||||
if (info.sentinel != .none) switch (info.flags.size) {
|
||||
.one, .c => unreachable,
|
||||
.many => n += try bw.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(pt)}),
|
||||
.slice => n += try bw.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(pt)}),
|
||||
.many => try bw.print("[*:{f}]", .{Value.fromInterned(info.sentinel).fmtValue(pt)}),
|
||||
.slice => try bw.print("[:{f}]", .{Value.fromInterned(info.sentinel).fmtValue(pt)}),
|
||||
} else switch (info.flags.size) {
|
||||
.one => n += try bw.writeAll("*"),
|
||||
.many => n += try bw.writeAll("[*]"),
|
||||
.c => n += try bw.writeAll("[*c]"),
|
||||
.slice => n += try bw.writeAll("[]"),
|
||||
.one => try bw.writeAll("*"),
|
||||
.many => try bw.writeAll("[*]"),
|
||||
.c => try bw.writeAll("[*c]"),
|
||||
.slice => try bw.writeAll("[]"),
|
||||
}
|
||||
if (info.flags.is_allowzero and info.flags.size != .c) n += try bw.writeAll("allowzero ");
|
||||
if (info.flags.is_allowzero and info.flags.size != .c) try bw.writeAll("allowzero ");
|
||||
if (info.flags.alignment != .none or
|
||||
info.packed_offset.host_size != 0 or
|
||||
info.flags.vector_index != .none)
|
||||
@ -201,83 +194,72 @@ pub fn print(ty: Type, bw: *std.io.BufferedWriter, pt: Zcu.PerThread) anyerror!u
|
||||
info.flags.alignment
|
||||
else
|
||||
Type.fromInterned(info.child).abiAlignment(pt.zcu);
|
||||
n += try bw.print("align({d}", .{alignment.toByteUnits() orelse 0});
|
||||
try bw.print("align({d}", .{alignment.toByteUnits() orelse 0});
|
||||
|
||||
if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) {
|
||||
n += try bw.print(":{d}:{d}", .{
|
||||
try bw.print(":{d}:{d}", .{
|
||||
info.packed_offset.bit_offset, info.packed_offset.host_size,
|
||||
});
|
||||
}
|
||||
if (info.flags.vector_index == .runtime) {
|
||||
n += try bw.writeAll(":?");
|
||||
try bw.writeAll(":?");
|
||||
} else if (info.flags.vector_index != .none) {
|
||||
n += try bw.print(":{d}", .{@intFromEnum(info.flags.vector_index)});
|
||||
try bw.print(":{d}", .{@intFromEnum(info.flags.vector_index)});
|
||||
}
|
||||
n += try bw.writeAll(") ");
|
||||
try bw.writeAll(") ");
|
||||
}
|
||||
if (info.flags.address_space != .generic) {
|
||||
n += try bw.print("addrspace(.{s}) ", .{@tagName(info.flags.address_space)});
|
||||
try bw.print("addrspace(.{s}) ", .{@tagName(info.flags.address_space)});
|
||||
}
|
||||
if (info.flags.is_const) n += try bw.writeAll("const ");
|
||||
if (info.flags.is_volatile) n += try bw.writeAll("volatile ");
|
||||
if (info.flags.is_const) try bw.writeAll("const ");
|
||||
if (info.flags.is_volatile) try bw.writeAll("volatile ");
|
||||
|
||||
n += try print(Type.fromInterned(info.child), bw, pt);
|
||||
return n;
|
||||
try print(Type.fromInterned(info.child), bw, pt);
|
||||
},
|
||||
.array_type => |array_type| {
|
||||
var n: usize = 0;
|
||||
if (array_type.sentinel == .none) {
|
||||
n += try bw.print("[{d}]", .{array_type.len});
|
||||
n += try print(Type.fromInterned(array_type.child), bw, pt);
|
||||
try bw.print("[{d}]", .{array_type.len});
|
||||
try print(Type.fromInterned(array_type.child), bw, pt);
|
||||
} else {
|
||||
n += try bw.print("[{d}:{}]", .{
|
||||
try bw.print("[{d}:{f}]", .{
|
||||
array_type.len,
|
||||
Value.fromInterned(array_type.sentinel).fmtValue(pt),
|
||||
});
|
||||
n += try print(Type.fromInterned(array_type.child), bw, pt);
|
||||
try print(Type.fromInterned(array_type.child), bw, pt);
|
||||
}
|
||||
return n;
|
||||
},
|
||||
.vector_type => |vector_type| {
|
||||
var n: usize = 0;
|
||||
n += try bw.print("@Vector({d}, ", .{vector_type.len});
|
||||
n += try print(Type.fromInterned(vector_type.child), bw, pt);
|
||||
n += try bw.writeAll(")");
|
||||
return n;
|
||||
try bw.print("@Vector({d}, ", .{vector_type.len});
|
||||
try print(Type.fromInterned(vector_type.child), bw, pt);
|
||||
try bw.writeAll(")");
|
||||
},
|
||||
.opt_type => |child| {
|
||||
var n: usize = 0;
|
||||
n += try bw.writeByte('?');
|
||||
n += try print(Type.fromInterned(child), bw, pt);
|
||||
return n;
|
||||
try bw.writeByte('?');
|
||||
try print(Type.fromInterned(child), bw, pt);
|
||||
},
|
||||
.error_union_type => |error_union_type| {
|
||||
var n: usize = 0;
|
||||
n += try print(Type.fromInterned(error_union_type.error_set_type), bw, pt);
|
||||
n += try bw.writeByte('!');
|
||||
try print(Type.fromInterned(error_union_type.error_set_type), bw, pt);
|
||||
try bw.writeByte('!');
|
||||
if (error_union_type.payload_type == .generic_poison_type) {
|
||||
n += try bw.writeAll("anytype");
|
||||
try bw.writeAll("anytype");
|
||||
} else {
|
||||
n += try print(Type.fromInterned(error_union_type.payload_type), bw, pt);
|
||||
try print(Type.fromInterned(error_union_type.payload_type), bw, pt);
|
||||
}
|
||||
return n;
|
||||
},
|
||||
.inferred_error_set_type => |func_index| {
|
||||
const func_nav = ip.getNav(zcu.funcInfo(func_index).owner_nav);
|
||||
return bw.print("@typeInfo(@typeInfo(@TypeOf({})).@\"fn\".return_type.?).error_union.error_set", .{
|
||||
return bw.print("@typeInfo(@typeInfo(@TypeOf({f})).@\"fn\".return_type.?).error_union.error_set", .{
|
||||
func_nav.fqn.fmt(ip),
|
||||
});
|
||||
},
|
||||
.error_set_type => |error_set_type| {
|
||||
var n: usize = 0;
|
||||
const names = error_set_type.names;
|
||||
n += try bw.writeAll("error{");
|
||||
try bw.writeAll("error{");
|
||||
for (names.get(ip), 0..) |name, i| {
|
||||
if (i != 0) n += try bw.writeByte(',');
|
||||
n += try bw.print("{}", .{name.fmt(ip)});
|
||||
if (i != 0) try bw.writeByte(',');
|
||||
try bw.print("{f}", .{name.fmt(ip)});
|
||||
}
|
||||
n += try bw.writeAll("}");
|
||||
return n;
|
||||
try bw.writeAll("}");
|
||||
},
|
||||
.simple_type => |s| switch (s) {
|
||||
.f16,
|
||||
@ -318,91 +300,85 @@ pub fn print(ty: Type, bw: *std.io.BufferedWriter, pt: Zcu.PerThread) anyerror!u
|
||||
},
|
||||
.struct_type => {
|
||||
const name = ip.loadStructType(ty.toIntern()).name;
|
||||
return bw.print("{}", .{name.fmt(ip)});
|
||||
return bw.print("{f}", .{name.fmt(ip)});
|
||||
},
|
||||
.tuple_type => |tuple| {
|
||||
if (tuple.types.len == 0) {
|
||||
return bw.writeAll("@TypeOf(.{})");
|
||||
}
|
||||
var n: usize = 0;
|
||||
n += try bw.writeAll("struct {");
|
||||
try bw.writeAll("struct {");
|
||||
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, val, i| {
|
||||
n += try bw.writeAll(if (i == 0) " " else ", ");
|
||||
if (val != .none) n += try bw.writeAll("comptime ");
|
||||
n += try print(Type.fromInterned(field_ty), bw, pt);
|
||||
if (val != .none) n += try bw.print(" = {}", .{Value.fromInterned(val).fmtValue(pt)});
|
||||
try bw.writeAll(if (i == 0) " " else ", ");
|
||||
if (val != .none) try bw.writeAll("comptime ");
|
||||
try print(Type.fromInterned(field_ty), bw, pt);
|
||||
if (val != .none) try bw.print(" = {f}", .{Value.fromInterned(val).fmtValue(pt)});
|
||||
}
|
||||
n += try bw.writeAll(" }");
|
||||
return n;
|
||||
try bw.writeAll(" }");
|
||||
},
|
||||
|
||||
.union_type => {
|
||||
const name = ip.loadUnionType(ty.toIntern()).name;
|
||||
return bw.print("{}", .{name.fmt(ip)});
|
||||
return bw.print("{f}", .{name.fmt(ip)});
|
||||
},
|
||||
.opaque_type => {
|
||||
const name = ip.loadOpaqueType(ty.toIntern()).name;
|
||||
return bw.print("{}", .{name.fmt(ip)});
|
||||
return bw.print("{f}", .{name.fmt(ip)});
|
||||
},
|
||||
.enum_type => {
|
||||
const name = ip.loadEnumType(ty.toIntern()).name;
|
||||
return bw.print("{}", .{name.fmt(ip)});
|
||||
return bw.print("{f}", .{name.fmt(ip)});
|
||||
},
|
||||
.func_type => |fn_info| {
|
||||
var n: usize = 0;
|
||||
if (fn_info.is_noinline) {
|
||||
n += try bw.writeAll("noinline ");
|
||||
try bw.writeAll("noinline ");
|
||||
}
|
||||
n += try bw.writeAll("fn (");
|
||||
try bw.writeAll("fn (");
|
||||
const param_types = fn_info.param_types.get(&zcu.intern_pool);
|
||||
for (param_types, 0..) |param_ty, i| {
|
||||
if (i != 0) n += try bw.writeAll(", ");
|
||||
if (i != 0) try bw.writeAll(", ");
|
||||
if (std.math.cast(u5, i)) |index| {
|
||||
if (fn_info.paramIsComptime(index)) {
|
||||
n += try bw.writeAll("comptime ");
|
||||
try bw.writeAll("comptime ");
|
||||
}
|
||||
if (fn_info.paramIsNoalias(index)) {
|
||||
n += try bw.writeAll("noalias ");
|
||||
try bw.writeAll("noalias ");
|
||||
}
|
||||
}
|
||||
if (param_ty == .generic_poison_type) {
|
||||
n += try bw.writeAll("anytype");
|
||||
try bw.writeAll("anytype");
|
||||
} else {
|
||||
n += try print(Type.fromInterned(param_ty), bw, pt);
|
||||
try print(Type.fromInterned(param_ty), bw, pt);
|
||||
}
|
||||
}
|
||||
if (fn_info.is_var_args) {
|
||||
if (param_types.len != 0) {
|
||||
n += try bw.writeAll(", ");
|
||||
try bw.writeAll(", ");
|
||||
}
|
||||
n += try bw.writeAll("...");
|
||||
try bw.writeAll("...");
|
||||
}
|
||||
n += try bw.writeAll(") ");
|
||||
try bw.writeAll(") ");
|
||||
if (fn_info.cc != .auto) print_cc: {
|
||||
if (zcu.getTarget().cCallingConvention()) |ccc| {
|
||||
if (fn_info.cc.eql(ccc)) {
|
||||
n += try bw.writeAll("callconv(.c) ");
|
||||
try bw.writeAll("callconv(.c) ");
|
||||
break :print_cc;
|
||||
}
|
||||
}
|
||||
switch (fn_info.cc) {
|
||||
.auto, .@"async", .naked, .@"inline" => n += try bw.print("callconv(.{}) ", .{std.zig.fmtId(@tagName(fn_info.cc))}),
|
||||
else => n += try bw.print("callconv({any}) ", .{fn_info.cc}),
|
||||
.auto, .@"async", .naked, .@"inline" => try bw.print("callconv(.{f}) ", .{std.zig.fmtId(@tagName(fn_info.cc))}),
|
||||
else => try bw.print("callconv({any}) ", .{fn_info.cc}),
|
||||
}
|
||||
}
|
||||
if (fn_info.return_type == .generic_poison_type) {
|
||||
n += try bw.writeAll("anytype");
|
||||
try bw.writeAll("anytype");
|
||||
} else {
|
||||
n += try print(Type.fromInterned(fn_info.return_type), bw, pt);
|
||||
try print(Type.fromInterned(fn_info.return_type), bw, pt);
|
||||
}
|
||||
return n;
|
||||
},
|
||||
.anyframe_type => |child| {
|
||||
if (child == .none) return bw.writeAll("anyframe");
|
||||
var n: usize = 0;
|
||||
n += try bw.writeAll("anyframe->");
|
||||
n += print(Type.fromInterned(child), bw, pt);
|
||||
return n;
|
||||
try bw.writeAll("anyframe->");
|
||||
try print(Type.fromInterned(child), bw, pt);
|
||||
},
|
||||
|
||||
// values, not types
|
||||
|
||||
123
src/Zcu.zig
123
src/Zcu.zig
@ -862,7 +862,7 @@ pub const Namespace = struct {
|
||||
try ns.fileScope(zcu).renderFullyQualifiedDebugName(writer);
|
||||
break :sep ':';
|
||||
};
|
||||
if (name != .empty) try writer.print("{c}{}", .{ sep, name.fmt(&zcu.intern_pool) });
|
||||
if (name != .empty) try writer.print("{c}{f}", .{ sep, name.fmt(&zcu.intern_pool) });
|
||||
}
|
||||
|
||||
pub fn internFullyQualifiedName(
|
||||
@ -874,7 +874,7 @@ pub const Namespace = struct {
|
||||
) !InternPool.NullTerminatedString {
|
||||
const ns_name = Type.fromInterned(ns.owner_type).containerTypeName(ip);
|
||||
if (name == .empty) return ns_name;
|
||||
return ip.getOrPutStringFmt(gpa, tid, "{}.{}", .{ ns_name.fmt(ip), name.fmt(ip) }, .no_embedded_nulls);
|
||||
return ip.getOrPutStringFmt(gpa, tid, "{f}.{f}", .{ ns_name.fmt(ip), name.fmt(ip) }, .no_embedded_nulls);
|
||||
}
|
||||
};
|
||||
|
||||
@ -1101,11 +1101,11 @@ pub const File = struct {
|
||||
const gpa = pt.zcu.gpa;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
const strings = ip.getLocal(pt.tid).getMutableStrings(gpa);
|
||||
const slice = try strings.addManyAsSlice(file.fullyQualifiedNameLen());
|
||||
var fbs = std.io.fixedBufferStream(slice[0]);
|
||||
file.renderFullyQualifiedName(fbs.writer()) catch unreachable;
|
||||
assert(fbs.pos == slice[0].len);
|
||||
return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(slice[0].len), .no_embedded_nulls);
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed((try strings.addManyAsSlice(file.fullyQualifiedNameLen()))[0]);
|
||||
file.renderFullyQualifiedName(&bw) catch unreachable;
|
||||
assert(bw.end == bw.buffer.len);
|
||||
return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(bw.end), .no_embedded_nulls);
|
||||
}
|
||||
|
||||
pub const Index = InternPool.FileIndex;
|
||||
@ -1194,13 +1194,8 @@ pub const ErrorMsg = struct {
|
||||
gpa.destroy(err_msg);
|
||||
}
|
||||
|
||||
pub fn init(
|
||||
gpa: Allocator,
|
||||
src_loc: LazySrcLoc,
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) !ErrorMsg {
|
||||
return ErrorMsg{
|
||||
pub fn init(gpa: Allocator, src_loc: LazySrcLoc, comptime format: []const u8, args: anytype) !ErrorMsg {
|
||||
return .{
|
||||
.src_loc = src_loc,
|
||||
.msg = try std.fmt.allocPrint(gpa, format, args),
|
||||
};
|
||||
@ -2822,7 +2817,9 @@ comptime {
|
||||
}
|
||||
|
||||
pub fn loadZirCache(gpa: Allocator, cache_file: std.fs.File) !Zir {
|
||||
return loadZirCacheBody(gpa, try cache_file.reader().readStruct(Zir.Header), cache_file);
|
||||
var header: Zir.Header = undefined;
|
||||
if (try cache_file.readAll(std.mem.asBytes(&header)) < @sizeOf(Zir.Header)) return error.EndOfStream;
|
||||
return loadZirCacheBody(gpa, header, cache_file);
|
||||
}
|
||||
|
||||
pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) !Zir {
|
||||
@ -3082,7 +3079,7 @@ pub fn markDependeeOutdated(
|
||||
marked_po: enum { not_marked_po, marked_po },
|
||||
dependee: InternPool.Dependee,
|
||||
) !void {
|
||||
log.debug("outdated dependee: {}", .{zcu.fmtDependee(dependee)});
|
||||
log.debug("outdated dependee: {f}", .{zcu.fmtDependee(dependee)});
|
||||
var it = zcu.intern_pool.dependencyIterator(dependee);
|
||||
while (it.next()) |depender| {
|
||||
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
|
||||
@ -3090,9 +3087,9 @@ pub fn markDependeeOutdated(
|
||||
.not_marked_po => {},
|
||||
.marked_po => {
|
||||
po_dep_count.* -= 1;
|
||||
log.debug("outdated {} => already outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
|
||||
log.debug("outdated {f} => already outdated {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
|
||||
if (po_dep_count.* == 0) {
|
||||
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
|
||||
log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
},
|
||||
@ -3113,9 +3110,9 @@ pub fn markDependeeOutdated(
|
||||
depender,
|
||||
new_po_dep_count,
|
||||
);
|
||||
log.debug("outdated {} => new outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count });
|
||||
log.debug("outdated {f} => new outdated {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count });
|
||||
if (new_po_dep_count == 0) {
|
||||
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
|
||||
log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
// If this is a Decl and was not previously PO, we must recursively
|
||||
@ -3128,16 +3125,16 @@ pub fn markDependeeOutdated(
|
||||
}
|
||||
|
||||
pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
|
||||
log.debug("up-to-date dependee: {}", .{zcu.fmtDependee(dependee)});
|
||||
log.debug("up-to-date dependee: {f}", .{zcu.fmtDependee(dependee)});
|
||||
var it = zcu.intern_pool.dependencyIterator(dependee);
|
||||
while (it.next()) |depender| {
|
||||
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
|
||||
// This depender is already outdated, but it now has one
|
||||
// less PO dependency!
|
||||
po_dep_count.* -= 1;
|
||||
log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
|
||||
log.debug("up-to-date {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
|
||||
if (po_dep_count.* == 0) {
|
||||
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
|
||||
log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
continue;
|
||||
@ -3151,11 +3148,11 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
|
||||
};
|
||||
if (ptr.* > 1) {
|
||||
ptr.* -= 1;
|
||||
log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* });
|
||||
log.debug("up-to-date {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* });
|
||||
continue;
|
||||
}
|
||||
|
||||
log.debug("up-to-date {} => {} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) });
|
||||
log.debug("up-to-date {f} => {f} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) });
|
||||
|
||||
// This dependency is no longer PO, i.e. is known to be up-to-date.
|
||||
assert(zcu.potentially_outdated.swapRemove(depender));
|
||||
@ -3184,7 +3181,7 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
|
||||
.func => |func_index| .{ .interned = func_index }, // IES
|
||||
.memoized_state => |stage| .{ .memoized_state = stage },
|
||||
};
|
||||
log.debug("potentially outdated dependee: {}", .{zcu.fmtDependee(dependee)});
|
||||
log.debug("potentially outdated dependee: {f}", .{zcu.fmtDependee(dependee)});
|
||||
var it = ip.dependencyIterator(dependee);
|
||||
while (it.next()) |po| {
|
||||
if (zcu.outdated.getPtr(po)) |po_dep_count| {
|
||||
@ -3194,17 +3191,17 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
|
||||
_ = zcu.outdated_ready.swapRemove(po);
|
||||
}
|
||||
po_dep_count.* += 1;
|
||||
log.debug("po {} => {} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* });
|
||||
log.debug("po {f} => {f} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* });
|
||||
continue;
|
||||
}
|
||||
if (zcu.potentially_outdated.getPtr(po)) |n| {
|
||||
// There is now one more PO dependency.
|
||||
n.* += 1;
|
||||
log.debug("po {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* });
|
||||
log.debug("po {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* });
|
||||
continue;
|
||||
}
|
||||
try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1);
|
||||
log.debug("po {} => {} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) });
|
||||
log.debug("po {f} => {f} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) });
|
||||
// This AnalUnit was not already PO, so we must recursively mark its dependers as also PO.
|
||||
try zcu.markTransitiveDependersPotentiallyOutdated(po);
|
||||
}
|
||||
@ -3233,7 +3230,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
|
||||
|
||||
if (zcu.outdated_ready.count() > 0) {
|
||||
const unit = zcu.outdated_ready.keys()[0];
|
||||
log.debug("findOutdatedToAnalyze: trivial {}", .{zcu.fmtAnalUnit(unit)});
|
||||
log.debug("findOutdatedToAnalyze: trivial {f}", .{zcu.fmtAnalUnit(unit)});
|
||||
return unit;
|
||||
}
|
||||
|
||||
@ -3284,7 +3281,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
|
||||
}
|
||||
}
|
||||
|
||||
log.debug("findOutdatedToAnalyze: heuristic returned '{}' ({d} dependers)", .{
|
||||
log.debug("findOutdatedToAnalyze: heuristic returned '{f}' ({d} dependers)", .{
|
||||
zcu.fmtAnalUnit(chosen_unit.?),
|
||||
chosen_unit_dependers,
|
||||
});
|
||||
@ -4094,7 +4091,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
const referencer = kv.value;
|
||||
try checked_types.putNoClobber(gpa, ty, {});
|
||||
|
||||
log.debug("handle type '{}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)});
|
||||
log.debug("handle type '{f}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)});
|
||||
|
||||
// If this type undergoes type resolution, the corresponding `AnalUnit` is automatically referenced.
|
||||
const has_resolution: bool = switch (ip.indexToKey(ty)) {
|
||||
@ -4130,7 +4127,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
// `comptime` decls are always analyzed.
|
||||
const unit: AnalUnit = .wrap(.{ .@"comptime" = cu });
|
||||
if (!result.contains(unit)) {
|
||||
log.debug("type '{}': ref comptime %{}", .{
|
||||
log.debug("type '{f}': ref comptime %{}", .{
|
||||
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
|
||||
@intFromEnum(ip.getComptimeUnit(cu).zir_index.resolve(ip) orelse continue),
|
||||
});
|
||||
@ -4162,7 +4159,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
},
|
||||
};
|
||||
if (want_analysis) {
|
||||
log.debug("type '{}': ref test %{}", .{
|
||||
log.debug("type '{f}': ref test %{}", .{
|
||||
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
|
||||
@intFromEnum(inst_info.inst),
|
||||
});
|
||||
@ -4181,7 +4178,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
if (decl.linkage == .@"export") {
|
||||
const unit: AnalUnit = .wrap(.{ .nav_val = nav });
|
||||
if (!result.contains(unit)) {
|
||||
log.debug("type '{}': ref named %{}", .{
|
||||
log.debug("type '{f}': ref named %{}", .{
|
||||
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
|
||||
@intFromEnum(inst_info.inst),
|
||||
});
|
||||
@ -4197,7 +4194,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
if (decl.linkage == .@"export") {
|
||||
const unit: AnalUnit = .wrap(.{ .nav_val = nav });
|
||||
if (!result.contains(unit)) {
|
||||
log.debug("type '{}': ref named %{}", .{
|
||||
log.debug("type '{f}': ref named %{}", .{
|
||||
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
|
||||
@intFromEnum(inst_info.inst),
|
||||
});
|
||||
@ -4232,7 +4229,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
try unit_queue.put(gpa, other, kv.value); // same reference location
|
||||
}
|
||||
|
||||
log.debug("handle unit '{}'", .{zcu.fmtAnalUnit(unit)});
|
||||
log.debug("handle unit '{f}'", .{zcu.fmtAnalUnit(unit)});
|
||||
|
||||
if (zcu.reference_table.get(unit)) |first_ref_idx| {
|
||||
assert(first_ref_idx != std.math.maxInt(u32));
|
||||
@ -4240,7 +4237,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
while (ref_idx != std.math.maxInt(u32)) {
|
||||
const ref = zcu.all_references.items[ref_idx];
|
||||
if (!result.contains(ref.referenced)) {
|
||||
log.debug("unit '{}': ref unit '{}'", .{
|
||||
log.debug("unit '{f}': ref unit '{f}'", .{
|
||||
zcu.fmtAnalUnit(unit),
|
||||
zcu.fmtAnalUnit(ref.referenced),
|
||||
});
|
||||
@ -4259,7 +4256,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
while (ref_idx != std.math.maxInt(u32)) {
|
||||
const ref = zcu.all_type_references.items[ref_idx];
|
||||
if (!checked_types.contains(ref.referenced)) {
|
||||
log.debug("unit '{}': ref type '{}'", .{
|
||||
log.debug("unit '{f}': ref type '{f}'", .{
|
||||
zcu.fmtAnalUnit(unit),
|
||||
Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip),
|
||||
});
|
||||
@ -4347,8 +4344,8 @@ pub fn fmtDependee(zcu: *Zcu, d: InternPool.Dependee) std.fmt.Formatter(formatDe
|
||||
return .{ .data = .{ .dependee = d, .zcu = zcu } };
|
||||
}
|
||||
|
||||
fn formatAnalUnit(data: struct { unit: AnalUnit, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = .{ fmt, options };
|
||||
fn formatAnalUnit(data: struct { unit: AnalUnit, zcu: *Zcu }, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
|
||||
_ = fmt;
|
||||
const zcu = data.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (data.unit.unwrap()) {
|
||||
@ -4356,69 +4353,69 @@ fn formatAnalUnit(data: struct { unit: AnalUnit, zcu: *Zcu }, comptime fmt: []co
|
||||
const cu = ip.getComptimeUnit(cu_id);
|
||||
if (cu.zir_index.resolveFull(ip)) |resolved| {
|
||||
const file_path = zcu.fileByIndex(resolved.file).path;
|
||||
return writer.print("comptime(inst=('{}', %{}) [{}])", .{ file_path.fmt(zcu.comp), @intFromEnum(resolved.inst), @intFromEnum(cu_id) });
|
||||
return bw.print("comptime(inst=('{f}', %{}) [{}])", .{ file_path.fmt(zcu.comp), @intFromEnum(resolved.inst), @intFromEnum(cu_id) });
|
||||
} else {
|
||||
return writer.print("comptime(inst=<lost> [{}])", .{@intFromEnum(cu_id)});
|
||||
return bw.print("comptime(inst=<lost> [{}])", .{@intFromEnum(cu_id)});
|
||||
}
|
||||
},
|
||||
.nav_val => |nav| return writer.print("nav_val('{}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
|
||||
.nav_ty => |nav| return writer.print("nav_ty('{}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
|
||||
.type => |ty| return writer.print("ty('{}' [{}])", .{ Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }),
|
||||
.nav_val => |nav| return bw.print("nav_val('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
|
||||
.nav_ty => |nav| return bw.print("nav_ty('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
|
||||
.type => |ty| return bw.print("ty('{f}' [{}])", .{ Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }),
|
||||
.func => |func| {
|
||||
const nav = zcu.funcInfo(func).owner_nav;
|
||||
return writer.print("func('{}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(func) });
|
||||
return bw.print("func('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(func) });
|
||||
},
|
||||
.memoized_state => return writer.writeAll("memoized_state"),
|
||||
.memoized_state => return bw.writeAll("memoized_state"),
|
||||
}
|
||||
}
|
||||
fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = .{ fmt, options };
|
||||
fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
|
||||
_ = fmt;
|
||||
const zcu = data.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (data.dependee) {
|
||||
.src_hash => |ti| {
|
||||
const info = ti.resolveFull(ip) orelse {
|
||||
return writer.writeAll("inst(<lost>)");
|
||||
return bw.writeAll("inst(<lost>)");
|
||||
};
|
||||
const file_path = zcu.fileByIndex(info.file).path;
|
||||
return writer.print("inst('{}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
|
||||
return bw.print("inst('{f}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
|
||||
},
|
||||
.nav_val => |nav| {
|
||||
const fqn = ip.getNav(nav).fqn;
|
||||
return writer.print("nav_val('{}')", .{fqn.fmt(ip)});
|
||||
return bw.print("nav_val('{f}')", .{fqn.fmt(ip)});
|
||||
},
|
||||
.nav_ty => |nav| {
|
||||
const fqn = ip.getNav(nav).fqn;
|
||||
return writer.print("nav_ty('{}')", .{fqn.fmt(ip)});
|
||||
return bw.print("nav_ty('{f}')", .{fqn.fmt(ip)});
|
||||
},
|
||||
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
|
||||
.struct_type, .union_type, .enum_type => return writer.print("type('{}')", .{Type.fromInterned(ip_index).containerTypeName(ip).fmt(ip)}),
|
||||
.func => |f| return writer.print("ies('{}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}),
|
||||
.struct_type, .union_type, .enum_type => return bw.print("type('{f}')", .{Type.fromInterned(ip_index).containerTypeName(ip).fmt(ip)}),
|
||||
.func => |f| return bw.print("ies('{f}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}),
|
||||
else => unreachable,
|
||||
},
|
||||
.zon_file => |file| {
|
||||
const file_path = zcu.fileByIndex(file).path;
|
||||
return writer.print("zon_file('{}')", .{file_path.fmt(zcu.comp)});
|
||||
return bw.print("zon_file('{f}')", .{file_path.fmt(zcu.comp)});
|
||||
},
|
||||
.embed_file => |ef_idx| {
|
||||
const ef = ef_idx.get(zcu);
|
||||
return writer.print("embed_file('{}')", .{ef.path.fmt(zcu.comp)});
|
||||
return bw.print("embed_file('{f}')", .{ef.path.fmt(zcu.comp)});
|
||||
},
|
||||
.namespace => |ti| {
|
||||
const info = ti.resolveFull(ip) orelse {
|
||||
return writer.writeAll("namespace(<lost>)");
|
||||
return bw.writeAll("namespace(<lost>)");
|
||||
};
|
||||
const file_path = zcu.fileByIndex(info.file).path;
|
||||
return writer.print("namespace('{}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
|
||||
return bw.print("namespace('{f}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
|
||||
},
|
||||
.namespace_name => |k| {
|
||||
const info = k.namespace.resolveFull(ip) orelse {
|
||||
return writer.print("namespace(<lost>, '{}')", .{k.name.fmt(ip)});
|
||||
return bw.print("namespace(<lost>, '{f}')", .{k.name.fmt(ip)});
|
||||
};
|
||||
const file_path = zcu.fileByIndex(info.file).path;
|
||||
return writer.print("namespace('{}', %{d}, '{}')", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst), k.name.fmt(ip) });
|
||||
return bw.print("namespace('{f}', %{d}, '{f}')", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst), k.name.fmt(ip) });
|
||||
},
|
||||
.memoized_state => return writer.writeAll("memoized_state"),
|
||||
.memoized_state => return bw.writeAll("memoized_state"),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -190,7 +190,7 @@ pub fn updateFile(
|
||||
// failure was a race, or ENOENT, indicating deletion of the
|
||||
// directory of our open handle.
|
||||
if (builtin.os.tag != .macos) {
|
||||
std.process.fatal("cache directory '{}' unexpectedly removed during compiler execution", .{
|
||||
std.process.fatal("cache directory '{f}' unexpectedly removed during compiler execution", .{
|
||||
cache_directory,
|
||||
});
|
||||
}
|
||||
@ -202,7 +202,7 @@ pub fn updateFile(
|
||||
}) catch |excl_err| switch (excl_err) {
|
||||
error.PathAlreadyExists => continue,
|
||||
error.FileNotFound => {
|
||||
std.process.fatal("cache directory '{}' unexpectedly removed during compiler execution", .{
|
||||
std.process.fatal("cache directory '{f}' unexpectedly removed during compiler execution", .{
|
||||
cache_directory,
|
||||
});
|
||||
},
|
||||
@ -646,7 +646,7 @@ pub fn ensureMemoizedStateUpToDate(pt: Zcu.PerThread, stage: InternPool.Memoized
|
||||
// If this unit caused the error, it would have an entry in `failed_analysis`.
|
||||
// Since it does not, this must be a transitive failure.
|
||||
try zcu.transitive_failed_analysis.put(gpa, unit, {});
|
||||
log.debug("mark transitive analysis failure for {}", .{zcu.fmtAnalUnit(unit)});
|
||||
log.debug("mark transitive analysis failure for {f}", .{zcu.fmtAnalUnit(unit)});
|
||||
}
|
||||
break :res .{ !prev_failed, true };
|
||||
},
|
||||
@ -751,7 +751,7 @@ pub fn ensureComptimeUnitUpToDate(pt: Zcu.PerThread, cu_id: InternPool.ComptimeU
|
||||
|
||||
const anal_unit: AnalUnit = .wrap(.{ .@"comptime" = cu_id });
|
||||
|
||||
log.debug("ensureComptimeUnitUpToDate {}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
log.debug("ensureComptimeUnitUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
|
||||
assert(!zcu.analysis_in_progress.contains(anal_unit));
|
||||
|
||||
@ -802,7 +802,7 @@ pub fn ensureComptimeUnitUpToDate(pt: Zcu.PerThread, cu_id: InternPool.ComptimeU
|
||||
// If this unit caused the error, it would have an entry in `failed_analysis`.
|
||||
// Since it does not, this must be a transitive failure.
|
||||
try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
|
||||
log.debug("mark transitive analysis failure for {}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
log.debug("mark transitive analysis failure for {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
}
|
||||
return error.AnalysisFail;
|
||||
},
|
||||
@ -832,7 +832,7 @@ fn analyzeComptimeUnit(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu
|
||||
const anal_unit: AnalUnit = .wrap(.{ .@"comptime" = cu_id });
|
||||
const comptime_unit = ip.getComptimeUnit(cu_id);
|
||||
|
||||
log.debug("analyzeComptimeUnit {}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
log.debug("analyzeComptimeUnit {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
|
||||
const inst_resolved = comptime_unit.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
|
||||
const file = zcu.fileByIndex(inst_resolved.file);
|
||||
@ -878,7 +878,7 @@ fn analyzeComptimeUnit(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu
|
||||
.r = .{ .simple = .comptime_keyword },
|
||||
} },
|
||||
.src_base_inst = comptime_unit.zir_index,
|
||||
.type_name_ctx = try ip.getOrPutStringFmt(gpa, pt.tid, "{}.comptime", .{
|
||||
.type_name_ctx = try ip.getOrPutStringFmt(gpa, pt.tid, "{f}.comptime", .{
|
||||
Type.fromInterned(zcu.namespacePtr(comptime_unit.namespace).owner_type).containerTypeName(ip).fmt(ip),
|
||||
}, .no_embedded_nulls),
|
||||
};
|
||||
@ -930,7 +930,7 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu
|
||||
const anal_unit: AnalUnit = .wrap(.{ .nav_val = nav_id });
|
||||
const nav = ip.getNav(nav_id);
|
||||
|
||||
log.debug("ensureNavValUpToDate {}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
log.debug("ensureNavValUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
|
||||
// Determine whether or not this `Nav`'s value is outdated. This also includes checking if the
|
||||
// status is `.unresolved`, which indicates that the value is outdated because it has *never*
|
||||
@ -988,7 +988,7 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu
|
||||
// If this unit caused the error, it would have an entry in `failed_analysis`.
|
||||
// Since it does not, this must be a transitive failure.
|
||||
try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
|
||||
log.debug("mark transitive analysis failure for {}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
log.debug("mark transitive analysis failure for {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
}
|
||||
break :res .{ !prev_failed, true };
|
||||
},
|
||||
@ -1059,7 +1059,7 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
|
||||
const anal_unit: AnalUnit = .wrap(.{ .nav_val = nav_id });
|
||||
const old_nav = ip.getNav(nav_id);
|
||||
|
||||
log.debug("analyzeNavVal {}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
log.debug("analyzeNavVal {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
|
||||
const inst_resolved = old_nav.analysis.?.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
|
||||
const file = zcu.fileByIndex(inst_resolved.file);
|
||||
@ -1240,10 +1240,10 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
|
||||
// TODO: this is jank. If #20663 is rejected, let's think about how to better model `usingnamespace`.
|
||||
if (zir_decl.kind == .@"usingnamespace") {
|
||||
if (nav_ty.toIntern() != .type_type) {
|
||||
return sema.fail(&block, ty_src, "expected type, found {}", .{nav_ty.fmt(pt)});
|
||||
return sema.fail(&block, ty_src, "expected type, found {f}", .{nav_ty.fmt(pt)});
|
||||
}
|
||||
if (nav_val.toType().getNamespace(zcu) == .none) {
|
||||
return sema.fail(&block, ty_src, "type {} has no namespace", .{nav_val.toType().fmt(pt)});
|
||||
return sema.fail(&block, ty_src, "type {f} has no namespace", .{nav_val.toType().fmt(pt)});
|
||||
}
|
||||
ip.resolveNavValue(nav_id, .{
|
||||
.val = nav_val.toIntern(),
|
||||
@ -1339,7 +1339,7 @@ pub fn ensureNavTypeUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zc
|
||||
const anal_unit: AnalUnit = .wrap(.{ .nav_ty = nav_id });
|
||||
const nav = ip.getNav(nav_id);
|
||||
|
||||
log.debug("ensureNavTypeUpToDate {}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
log.debug("ensureNavTypeUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
|
||||
const type_resolved_by_value: bool = from_val: {
|
||||
const analysis = nav.analysis orelse break :from_val false;
|
||||
@ -1409,7 +1409,7 @@ pub fn ensureNavTypeUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zc
|
||||
// If this unit caused the error, it would have an entry in `failed_analysis`.
|
||||
// Since it does not, this must be a transitive failure.
|
||||
try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
|
||||
log.debug("mark transitive analysis failure for {}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
log.debug("mark transitive analysis failure for {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
}
|
||||
break :res .{ !prev_failed, true };
|
||||
},
|
||||
@ -1451,7 +1451,7 @@ fn analyzeNavType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileEr
|
||||
const anal_unit: AnalUnit = .wrap(.{ .nav_ty = nav_id });
|
||||
const old_nav = ip.getNav(nav_id);
|
||||
|
||||
log.debug("analyzeNavType {}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
log.debug("analyzeNavType {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
|
||||
const inst_resolved = old_nav.analysis.?.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
|
||||
const file = zcu.fileByIndex(inst_resolved.file);
|
||||
@ -1582,7 +1582,7 @@ pub fn ensureFuncBodyUpToDate(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
|
||||
const func_index = ip.unwrapCoercedFunc(maybe_coerced_func_index);
|
||||
const anal_unit: AnalUnit = .wrap(.{ .func = func_index });
|
||||
|
||||
log.debug("ensureFuncBodyUpToDate {}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
log.debug("ensureFuncBodyUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
|
||||
const func = zcu.funcInfo(maybe_coerced_func_index);
|
||||
|
||||
@ -1626,7 +1626,7 @@ pub fn ensureFuncBodyUpToDate(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
|
||||
// If this function caused the error, it would have an entry in `failed_analysis`.
|
||||
// Since it does not, this must be a transitive failure.
|
||||
try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
|
||||
log.debug("mark transitive analysis failure for {}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
log.debug("mark transitive analysis failure for {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
}
|
||||
// We consider the IES to be outdated if the function previously succeeded analysis; in this case,
|
||||
// we need to re-analyze dependants to ensure they hit a transitive error here, rather than reporting
|
||||
@ -1696,7 +1696,7 @@ fn analyzeFuncBody(
|
||||
else
|
||||
.none;
|
||||
|
||||
log.debug("analyze and generate fn body {}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
log.debug("analyze and generate fn body {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
|
||||
var air = try pt.analyzeFnBodyInner(func_index);
|
||||
errdefer air.deinit(gpa);
|
||||
@ -2615,7 +2615,7 @@ const ScanDeclIter = struct {
|
||||
var gop = try iter.seen_decls.getOrPut(gpa, name);
|
||||
var next_suffix: u32 = 0;
|
||||
while (gop.found_existing) {
|
||||
name = try ip.getOrPutStringFmt(gpa, pt.tid, "{}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls);
|
||||
name = try ip.getOrPutStringFmt(gpa, pt.tid, "{f}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls);
|
||||
gop = try iter.seen_decls.getOrPut(gpa, name);
|
||||
next_suffix += 1;
|
||||
}
|
||||
@ -2764,7 +2764,7 @@ const ScanDeclIter = struct {
|
||||
|
||||
if (existing_unit == null and (want_analysis or decl.linkage == .@"export")) {
|
||||
log.debug(
|
||||
"scanDecl queue analyze_comptime_unit file='{s}' unit={}",
|
||||
"scanDecl queue analyze_comptime_unit file='{s}' unit={f}",
|
||||
.{ namespace.fileScope(zcu).sub_file_path, zcu.fmtAnalUnit(unit) },
|
||||
);
|
||||
try comp.queueJob(.{ .analyze_comptime_unit = unit });
|
||||
@ -3182,7 +3182,7 @@ fn processExportsInner(
|
||||
if (gop.found_existing) {
|
||||
new_export.status = .failed_retryable;
|
||||
try zcu.failed_exports.ensureUnusedCapacity(gpa, 1);
|
||||
const msg = try Zcu.ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {}", .{
|
||||
const msg = try Zcu.ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {f}", .{
|
||||
new_export.opts.name.fmt(ip),
|
||||
});
|
||||
errdefer msg.destroy(gpa);
|
||||
|
||||
@ -1011,7 +1011,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
|
||||
}
|
||||
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
return self.fail("type '{f}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
};
|
||||
// TODO swap this for inst.ty.ptrAlign
|
||||
const abi_align = elem_ty.abiAlignment(zcu);
|
||||
@ -1022,7 +1022,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
|
||||
fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue {
|
||||
const pt = self.pt;
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(pt.zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
return self.fail("type '{f}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
};
|
||||
const abi_align = elem_ty.abiAlignment(pt.zcu);
|
||||
|
||||
@ -4636,7 +4636,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) InnerError!void {
|
||||
const mcv = try self.resolveInst(operand);
|
||||
const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
|
||||
|
||||
log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), mcv });
|
||||
log.debug("airDbgVar: %{f}: {f}, {}", .{ inst, ty.fmtDebug(), mcv });
|
||||
|
||||
try self.dbg_info_relocs.append(self.gpa, .{
|
||||
.tag = tag,
|
||||
|
||||
@ -70,9 +70,11 @@ const BranchType = enum {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn emitMir(
|
||||
emit: *Emit,
|
||||
) !void {
|
||||
pub fn emitMir(emit: *Emit) InnerError!void {
|
||||
return @errorCast(emit.emitMirInner());
|
||||
}
|
||||
|
||||
fn emitMirInner(emit: *Emit) anyerror!void {
|
||||
const mir_tags = emit.mir.instructions.items(.tag);
|
||||
|
||||
// Find smallest lowerings for branch instructions
|
||||
@ -439,7 +441,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
|
||||
return error.EmitFail;
|
||||
}
|
||||
|
||||
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) InnerError!void {
|
||||
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) anyerror!void {
|
||||
const delta_line = @as(i33, line) - @as(i33, emit.prev_di_line);
|
||||
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
|
||||
log.debug(" (advance pc={d} and line={d})", .{ delta_pc, delta_line });
|
||||
@ -454,25 +456,20 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) InnerError!void {
|
||||
.plan9 => |dbg_out| {
|
||||
if (delta_pc <= 0) return; // only do this when the pc changes
|
||||
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
const bw = aw.fromArrayList(emit.bin_file.comp.gpa, &dbg_out.dbg_line);
|
||||
defer dbg_out.dbg_line = aw.toArrayList();
|
||||
|
||||
// increasing the line number
|
||||
try link.File.Plan9.changeLine(&dbg_out.dbg_line, @intCast(delta_line));
|
||||
try link.File.Plan9.changeLine(bw, @intCast(delta_line));
|
||||
// increasing the pc
|
||||
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - dbg_out.pc_quanta;
|
||||
if (d_pc_p9 > 0) {
|
||||
// minus one because if its the last one, we want to leave space to change the line which is one pc quanta
|
||||
var diff = @divExact(d_pc_p9, dbg_out.pc_quanta) - dbg_out.pc_quanta;
|
||||
while (diff > 0) {
|
||||
if (diff < 64) {
|
||||
try dbg_out.dbg_line.append(@intCast(diff + 128));
|
||||
diff = 0;
|
||||
} else {
|
||||
try dbg_out.dbg_line.append(@intCast(64 + 128));
|
||||
diff -= 64;
|
||||
}
|
||||
}
|
||||
if (dbg_out.pcop_change_index) |pci|
|
||||
dbg_out.dbg_line.items[pci] += 1;
|
||||
dbg_out.pcop_change_index = @intCast(dbg_out.dbg_line.items.len - 1);
|
||||
try bw.writeByte(@as(u8, @intCast(@divExact(d_pc_p9, dbg_out.pc_quanta) + 128)) - dbg_out.pc_quanta);
|
||||
const dbg_line = aw.getWritten();
|
||||
if (dbg_out.pcop_change_index) |pci| dbg_line[pci] += 1;
|
||||
dbg_out.pcop_change_index = @intCast(dbg_line.len - 1);
|
||||
} else if (d_pc_p9 == 0) {
|
||||
// we don't need to do anything, because adding the pc quanta does it for us
|
||||
} else unreachable;
|
||||
|
||||
@ -997,7 +997,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
|
||||
}
|
||||
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
return self.fail("type '{f}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
};
|
||||
// TODO swap this for inst.ty.ptrAlign
|
||||
const abi_align = elem_ty.abiAlignment(zcu);
|
||||
@ -1008,7 +1008,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
|
||||
fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue {
|
||||
const pt = self.pt;
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(pt.zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
return self.fail("type '{f}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
};
|
||||
const abi_align = elem_ty.abiAlignment(pt.zcu);
|
||||
|
||||
@ -4609,7 +4609,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mcv = try self.resolveInst(operand);
|
||||
const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
|
||||
|
||||
log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), mcv });
|
||||
log.debug("airDbgVar: %{f}: {f}, {}", .{ inst, ty.fmtDebug(), mcv });
|
||||
|
||||
try self.dbg_info_relocs.append(self.gpa, .{
|
||||
.tag = tag,
|
||||
|
||||
@ -67,9 +67,11 @@ const BranchType = enum {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn emitMir(
|
||||
emit: *Emit,
|
||||
) !void {
|
||||
pub fn emitMir(emit: *Emit) InnerError!void {
|
||||
return @errorCast(emit.emitMirInner());
|
||||
}
|
||||
|
||||
fn emitMirInner(emit: *Emit) anyerror!void {
|
||||
const mir_tags = emit.mir.instructions.items(.tag);
|
||||
|
||||
// Find smallest lowerings for branch instructions
|
||||
@ -370,16 +372,20 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
|
||||
.plan9 => |dbg_out| {
|
||||
if (delta_pc <= 0) return; // only do this when the pc changes
|
||||
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
const bw = aw.fromArrayList(self.bin_file.comp.gpa, &dbg_out.dbg_line);
|
||||
defer dbg_out.dbg_line = aw.toArrayList();
|
||||
|
||||
// increasing the line number
|
||||
try link.File.Plan9.changeLine(&dbg_out.dbg_line, delta_line);
|
||||
try link.File.Plan9.changeLine(bw, delta_line);
|
||||
// increasing the pc
|
||||
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - dbg_out.pc_quanta;
|
||||
if (d_pc_p9 > 0) {
|
||||
// minus one because if its the last one, we want to leave space to change the line which is one pc quanta
|
||||
try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, dbg_out.pc_quanta) + 128)) - dbg_out.pc_quanta);
|
||||
if (dbg_out.pcop_change_index) |pci|
|
||||
dbg_out.dbg_line.items[pci] += 1;
|
||||
dbg_out.pcop_change_index = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
|
||||
try bw.writeByte(@as(u8, @intCast(@divExact(d_pc_p9, dbg_out.pc_quanta) + 128)) - dbg_out.pc_quanta);
|
||||
const dbg_line = aw.getWritten();
|
||||
if (dbg_out.pcop_change_index) |pci| dbg_line[pci] += 1;
|
||||
dbg_out.pcop_change_index = @intCast(dbg_line.len - 1);
|
||||
} else if (d_pc_p9 == 0) {
|
||||
// we don't need to do anything, because adding the pc quanta does it for us
|
||||
} else unreachable;
|
||||
|
||||
@ -401,7 +401,7 @@ const InstTracking = struct {
|
||||
.reserved_frame => |index| inst_tracking.long = .{ .load_frame = .{ .index = index } },
|
||||
else => unreachable,
|
||||
}
|
||||
tracking_log.debug("spill %{d} from {} to {}", .{ inst, inst_tracking.short, inst_tracking.long });
|
||||
tracking_log.debug("spill %{f} from {} to {}", .{ inst, inst_tracking.short, inst_tracking.long });
|
||||
try function.genCopy(function.typeOfIndex(inst), inst_tracking.long, inst_tracking.short);
|
||||
}
|
||||
|
||||
@ -435,7 +435,7 @@ const InstTracking = struct {
|
||||
fn trackSpill(inst_tracking: *InstTracking, function: *Func, inst: Air.Inst.Index) !void {
|
||||
try function.freeValue(inst_tracking.short);
|
||||
inst_tracking.reuseFrame();
|
||||
tracking_log.debug("%{d} => {} (spilled)", .{ inst, inst_tracking.* });
|
||||
tracking_log.debug("%{f} => {f} (spilled)", .{ inst, inst_tracking.* });
|
||||
}
|
||||
|
||||
fn verifyMaterialize(inst_tracking: InstTracking, target: InstTracking) void {
|
||||
@ -499,14 +499,14 @@ const InstTracking = struct {
|
||||
else => target.long,
|
||||
} else target.long;
|
||||
inst_tracking.short = target.short;
|
||||
tracking_log.debug("%{d} => {} (materialize)", .{ inst, inst_tracking.* });
|
||||
tracking_log.debug("%{f} => {f} (materialize)", .{ inst, inst_tracking.* });
|
||||
}
|
||||
|
||||
fn resurrect(inst_tracking: *InstTracking, inst: Air.Inst.Index, scope_generation: u32) void {
|
||||
switch (inst_tracking.short) {
|
||||
.dead => |die_generation| if (die_generation >= scope_generation) {
|
||||
inst_tracking.reuseFrame();
|
||||
tracking_log.debug("%{d} => {} (resurrect)", .{ inst, inst_tracking.* });
|
||||
tracking_log.debug("%{f} => {f} (resurrect)", .{ inst, inst_tracking.* });
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
@ -516,7 +516,7 @@ const InstTracking = struct {
|
||||
if (inst_tracking.short == .dead) return;
|
||||
try function.freeValue(inst_tracking.short);
|
||||
inst_tracking.short = .{ .dead = function.scope_generation };
|
||||
tracking_log.debug("%{d} => {} (death)", .{ inst, inst_tracking.* });
|
||||
tracking_log.debug("%{f} => {f} (death)", .{ inst, inst_tracking.* });
|
||||
}
|
||||
|
||||
fn reuse(
|
||||
@ -527,15 +527,15 @@ const InstTracking = struct {
|
||||
) void {
|
||||
inst_tracking.short = .{ .dead = function.scope_generation };
|
||||
if (new_inst) |inst|
|
||||
tracking_log.debug("%{d} => {} (reuse %{d})", .{ inst, inst_tracking.*, old_inst })
|
||||
tracking_log.debug("%{f} => {f} (reuse %{f})", .{ inst, inst_tracking.*, old_inst })
|
||||
else
|
||||
tracking_log.debug("tmp => {} (reuse %{d})", .{ inst_tracking.*, old_inst });
|
||||
tracking_log.debug("tmp => {f} (reuse %{f})", .{ inst_tracking.*, old_inst });
|
||||
}
|
||||
|
||||
fn liveOut(inst_tracking: *InstTracking, function: *Func, inst: Air.Inst.Index) void {
|
||||
for (inst_tracking.getRegs()) |reg| {
|
||||
if (function.register_manager.isRegFree(reg)) {
|
||||
tracking_log.debug("%{d} => {} (live-out)", .{ inst, inst_tracking.* });
|
||||
tracking_log.debug("%{f} => {f} (live-out)", .{ inst, inst_tracking.* });
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -562,18 +562,13 @@ const InstTracking = struct {
|
||||
// Perform side-effects of freeValue manually.
|
||||
function.register_manager.freeReg(reg);
|
||||
|
||||
tracking_log.debug("%{d} => {} (live-out %{d})", .{ inst, inst_tracking.*, tracked_inst });
|
||||
tracking_log.debug("%{f} => {f} (live-out %{f})", .{ inst, inst_tracking.*, tracked_inst });
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
inst_tracking: InstTracking,
|
||||
comptime _: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
if (!std.meta.eql(inst_tracking.long, inst_tracking.short)) try writer.print("|{}| ", .{inst_tracking.long});
|
||||
try writer.print("{}", .{inst_tracking.short});
|
||||
pub fn format(inst_tracking: InstTracking, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
|
||||
if (!std.meta.eql(inst_tracking.long, inst_tracking.short)) try bw.print("|{}| ", .{inst_tracking.long});
|
||||
try bw.print("{}", .{inst_tracking.short});
|
||||
}
|
||||
};
|
||||
|
||||
@ -802,7 +797,7 @@ pub fn generate(
|
||||
function.mir_instructions.deinit(gpa);
|
||||
}
|
||||
|
||||
wip_mir_log.debug("{}:", .{fmtNav(func.owner_nav, ip)});
|
||||
wip_mir_log.debug("{f}:", .{fmtNav(func.owner_nav, ip)});
|
||||
|
||||
try function.frame_allocs.resize(gpa, FrameIndex.named_count);
|
||||
function.frame_allocs.set(
|
||||
@ -937,12 +932,7 @@ const FormatWipMirData = struct {
|
||||
func: *Func,
|
||||
inst: Mir.Inst.Index,
|
||||
};
|
||||
fn formatWipMir(
|
||||
data: FormatWipMirData,
|
||||
comptime _: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
fn formatWipMir(data: FormatWipMirData, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
|
||||
const pt = data.func.pt;
|
||||
const comp = pt.zcu.comp;
|
||||
var lower: Lower = .{
|
||||
@ -965,11 +955,11 @@ fn formatWipMir(
|
||||
lower.err_msg.?.deinit(data.func.gpa);
|
||||
lower.err_msg = null;
|
||||
}
|
||||
try writer.writeAll(lower.err_msg.?.msg);
|
||||
try bw.writeAll(lower.err_msg.?.msg);
|
||||
return;
|
||||
},
|
||||
error.OutOfMemory, error.InvalidInstruction => |e| {
|
||||
try writer.writeAll(switch (e) {
|
||||
try bw.writeAll(switch (e) {
|
||||
error.OutOfMemory => "Out of memory",
|
||||
error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
|
||||
});
|
||||
@ -977,8 +967,8 @@ fn formatWipMir(
|
||||
},
|
||||
else => |e| return e,
|
||||
}).insts) |lowered_inst| {
|
||||
if (!first) try writer.writeAll("\ndebug(wip_mir): ");
|
||||
try writer.print(" | {}", .{lowered_inst});
|
||||
if (!first) try bw.writeAll("\ndebug(wip_mir): ");
|
||||
try bw.print(" | {}", .{lowered_inst});
|
||||
first = false;
|
||||
}
|
||||
}
|
||||
@ -990,13 +980,8 @@ const FormatNavData = struct {
|
||||
ip: *const InternPool,
|
||||
nav_index: InternPool.Nav.Index,
|
||||
};
|
||||
fn formatNav(
|
||||
data: FormatNavData,
|
||||
comptime _: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
try writer.print("{}", .{data.ip.getNav(data.nav_index).fqn.fmt(data.ip)});
|
||||
fn formatNav(data: FormatNavData, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
|
||||
try bw.print("{f}", .{data.ip.getNav(data.nav_index).fqn.fmt(data.ip)});
|
||||
}
|
||||
fn fmtNav(nav_index: InternPool.Nav.Index, ip: *const InternPool) std.fmt.Formatter(formatNav) {
|
||||
return .{ .data = .{
|
||||
@ -1009,12 +994,7 @@ const FormatAirData = struct {
|
||||
func: *Func,
|
||||
inst: Air.Inst.Index,
|
||||
};
|
||||
fn formatAir(
|
||||
data: FormatAirData,
|
||||
comptime _: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
fn formatAir(data: FormatAirData, _: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
|
||||
data.func.air.dumpInst(data.inst, data.func.pt, data.func.liveness);
|
||||
}
|
||||
fn fmtAir(func: *Func, inst: Air.Inst.Index) std.fmt.Formatter(formatAir) {
|
||||
@ -1024,14 +1004,9 @@ fn fmtAir(func: *Func, inst: Air.Inst.Index) std.fmt.Formatter(formatAir) {
|
||||
const FormatTrackingData = struct {
|
||||
func: *Func,
|
||||
};
|
||||
fn formatTracking(
|
||||
data: FormatTrackingData,
|
||||
comptime _: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
fn formatTracking(data: FormatTrackingData, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
|
||||
var it = data.func.inst_tracking.iterator();
|
||||
while (it.next()) |entry| try writer.print("\n%{d} = {}", .{ entry.key_ptr.*, entry.value_ptr.* });
|
||||
while (it.next()) |entry| try bw.print("\n%{d} = {f}", .{ entry.key_ptr.*, entry.value_ptr.* });
|
||||
}
|
||||
fn fmtTracking(func: *Func) std.fmt.Formatter(formatTracking) {
|
||||
return .{ .data = .{ .func = func } };
|
||||
@ -1049,7 +1024,7 @@ fn addInst(func: *Func, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
|
||||
.pseudo_dbg_epilogue_begin,
|
||||
.pseudo_dead,
|
||||
=> false,
|
||||
}) wip_mir_log.debug("{}", .{func.fmtWipMir(result_index)});
|
||||
}) wip_mir_log.debug("{f}", .{func.fmtWipMir(result_index)});
|
||||
return result_index;
|
||||
}
|
||||
|
||||
@ -1172,7 +1147,7 @@ fn gen(func: *Func) !void {
|
||||
func.ret_mcv.long.address().offset(-func.ret_mcv.short.indirect.off),
|
||||
);
|
||||
func.ret_mcv.long = .{ .load_frame = .{ .index = frame_index } };
|
||||
tracking_log.debug("spill {} to {}", .{ func.ret_mcv.long, frame_index });
|
||||
tracking_log.debug("spill {} to {f}", .{ func.ret_mcv.long, frame_index });
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -1303,7 +1278,7 @@ fn genLazy(func: *Func, lazy_sym: link.File.LazySymbol) InnerError!void {
|
||||
switch (Type.fromInterned(lazy_sym.ty).zigTypeTag(zcu)) {
|
||||
.@"enum" => {
|
||||
const enum_ty = Type.fromInterned(lazy_sym.ty);
|
||||
wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)});
|
||||
wip_mir_log.debug("{f}.@tagName:", .{enum_ty.fmt(pt)});
|
||||
|
||||
const param_regs = abi.Registers.Integer.function_arg_regs;
|
||||
const ret_reg = param_regs[0];
|
||||
@ -1385,7 +1360,7 @@ fn genLazy(func: *Func, lazy_sym: link.File.LazySymbol) InnerError!void {
|
||||
});
|
||||
},
|
||||
else => return func.fail(
|
||||
"TODO implement {s} for {}",
|
||||
"TODO implement {s} for {f}",
|
||||
.{ @tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt) },
|
||||
),
|
||||
}
|
||||
@ -1399,8 +1374,8 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
|
||||
|
||||
for (body) |inst| {
|
||||
if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) continue;
|
||||
wip_mir_log.debug("{}", .{func.fmtAir(inst)});
|
||||
verbose_tracking_log.debug("{}", .{func.fmtTracking()});
|
||||
wip_mir_log.debug("{f}", .{func.fmtAir(inst)});
|
||||
verbose_tracking_log.debug("{f}", .{func.fmtTracking()});
|
||||
|
||||
const old_air_bookkeeping = func.air_bookkeeping;
|
||||
try func.ensureProcessDeathCapacity(Air.Liveness.bpi);
|
||||
@ -1679,18 +1654,18 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
|
||||
var it = func.register_manager.free_registers.iterator(.{ .kind = .unset });
|
||||
while (it.next()) |index| {
|
||||
const tracked_inst = func.register_manager.registers[index];
|
||||
tracking_log.debug("tracked inst: {}", .{tracked_inst});
|
||||
tracking_log.debug("tracked inst: {f}", .{tracked_inst});
|
||||
const tracking = func.getResolvedInstValue(tracked_inst);
|
||||
for (tracking.getRegs()) |reg| {
|
||||
if (RegisterManager.indexOfRegIntoTracked(reg).? == index) break;
|
||||
} else return std.debug.panic(
|
||||
\\%{} takes up these regs: {any}, however this regs {any}, don't use it
|
||||
\\%{f} takes up these regs: {any}, however this regs {any}, don't use it
|
||||
, .{ tracked_inst, tracking.getRegs(), RegisterManager.regAtTrackedIndex(@intCast(index)) });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
verbose_tracking_log.debug("{}", .{func.fmtTracking()});
|
||||
verbose_tracking_log.debug("{f}", .{func.fmtTracking()});
|
||||
}
|
||||
|
||||
fn getValue(func: *Func, value: MCValue, inst: ?Air.Inst.Index) !void {
|
||||
@ -1713,7 +1688,7 @@ fn freeValue(func: *Func, value: MCValue) !void {
|
||||
|
||||
fn feed(func: *Func, bt: *Air.Liveness.BigTomb, operand: Air.Inst.Ref) !void {
|
||||
if (bt.feed()) if (operand.toIndex()) |inst| {
|
||||
log.debug("feed inst: %{}", .{inst});
|
||||
log.debug("feed inst: %{f}", .{inst});
|
||||
try func.processDeath(inst);
|
||||
};
|
||||
}
|
||||
@ -1907,7 +1882,7 @@ fn splitType(func: *Func, ty: Type) ![2]Type {
|
||||
else => return func.fail("TODO: splitType class {}", .{class}),
|
||||
};
|
||||
} else if (parts[0].abiSize(zcu) + parts[1].abiSize(zcu) == ty.abiSize(zcu)) return parts;
|
||||
return func.fail("TODO implement splitType for {}", .{ty.fmt(func.pt)});
|
||||
return func.fail("TODO implement splitType for {f}", .{ty.fmt(func.pt)});
|
||||
}
|
||||
|
||||
/// Truncates the value in the register in place.
|
||||
@ -2008,7 +1983,7 @@ fn allocFrameIndex(func: *Func, alloc: FrameAlloc) !FrameIndex {
|
||||
}
|
||||
const frame_index: FrameIndex = @enumFromInt(func.frame_allocs.len);
|
||||
try func.frame_allocs.append(func.gpa, alloc);
|
||||
log.debug("allocated frame {}", .{frame_index});
|
||||
log.debug("allocated frame {f}", .{frame_index});
|
||||
return frame_index;
|
||||
}
|
||||
|
||||
@ -2020,7 +1995,7 @@ fn allocMemPtr(func: *Func, inst: Air.Inst.Index) !FrameIndex {
|
||||
const val_ty = ptr_ty.childType(zcu);
|
||||
return func.allocFrameIndex(FrameAlloc.init(.{
|
||||
.size = math.cast(u32, val_ty.abiSize(zcu)) orelse {
|
||||
return func.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(pt)});
|
||||
return func.fail("type '{f}' too big to fit into stack frame", .{val_ty.fmt(pt)});
|
||||
},
|
||||
.alignment = ptr_ty.ptrAlignment(zcu).max(.@"1"),
|
||||
}));
|
||||
@ -2160,7 +2135,7 @@ pub fn spillRegisters(func: *Func, comptime registers: []const Register) !void {
|
||||
/// allocated. A second call to `copyToTmpRegister` may return the same register.
|
||||
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
||||
fn copyToTmpRegister(func: *Func, ty: Type, mcv: MCValue) !Register {
|
||||
log.debug("copyToTmpRegister ty: {}", .{ty.fmt(func.pt)});
|
||||
log.debug("copyToTmpRegister ty: {f}", .{ty.fmt(func.pt)});
|
||||
const reg = try func.register_manager.allocReg(null, func.regTempClassForType(ty));
|
||||
try func.genSetReg(ty, reg, mcv);
|
||||
return reg;
|
||||
@ -2245,7 +2220,7 @@ fn airIntCast(func: *Func, inst: Air.Inst.Index) !void {
|
||||
break :result null; // TODO
|
||||
|
||||
break :result dst_mcv;
|
||||
} orelse return func.fail("TODO: implement airIntCast from {} to {}", .{
|
||||
} orelse return func.fail("TODO: implement airIntCast from {f} to {f}", .{
|
||||
src_ty.fmt(pt), dst_ty.fmt(pt),
|
||||
});
|
||||
|
||||
@ -2633,7 +2608,7 @@ fn genBinOp(
|
||||
.add_sat,
|
||||
=> {
|
||||
if (bit_size != 64 or !is_unsigned)
|
||||
return func.fail("TODO: genBinOp ty: {}", .{lhs_ty.fmt(pt)});
|
||||
return func.fail("TODO: genBinOp ty: {f}", .{lhs_ty.fmt(pt)});
|
||||
|
||||
const tmp_reg = try func.copyToTmpRegister(rhs_ty, .{ .register = rhs_reg });
|
||||
const tmp_lock = func.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
@ -4065,7 +4040,7 @@ fn airGetUnionTag(func: *Func, inst: Air.Inst.Index) !void {
|
||||
);
|
||||
} else {
|
||||
return func.fail(
|
||||
"TODO implement get_union_tag for ABI larger than 8 bytes and operand {}, tag {}",
|
||||
"TODO implement get_union_tag for ABI larger than 8 bytes and operand {}, tag {f}",
|
||||
.{ frame_mcv, tag_ty.fmt(pt) },
|
||||
);
|
||||
}
|
||||
@ -4186,7 +4161,7 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
|
||||
|
||||
switch (scalar_ty.zigTypeTag(zcu)) {
|
||||
.int => if (ty.zigTypeTag(zcu) == .vector) {
|
||||
return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
|
||||
return func.fail("TODO implement airAbs for {f}", .{ty.fmt(pt)});
|
||||
} else {
|
||||
const int_info = scalar_ty.intInfo(zcu);
|
||||
const int_bits = int_info.bits;
|
||||
@ -4267,7 +4242,7 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
|
||||
|
||||
break :result return_mcv;
|
||||
},
|
||||
else => return func.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(pt)}),
|
||||
else => return func.fail("TODO: implement airAbs {f}", .{scalar_ty.fmt(pt)}),
|
||||
}
|
||||
|
||||
break :result .unreach;
|
||||
@ -4331,7 +4306,7 @@ fn airByteSwap(func: *Func, inst: Air.Inst.Index) !void {
|
||||
|
||||
break :result dest_mcv;
|
||||
},
|
||||
else => return func.fail("TODO: airByteSwap {}", .{ty.fmt(pt)}),
|
||||
else => return func.fail("TODO: airByteSwap {f}", .{ty.fmt(pt)}),
|
||||
}
|
||||
};
|
||||
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
||||
@ -4397,7 +4372,7 @@ fn airUnaryMath(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
||||
else => return func.fail("TODO: airUnaryMath Float {s}", .{@tagName(tag)}),
|
||||
}
|
||||
},
|
||||
else => return func.fail("TODO: airUnaryMath ty: {}", .{ty.fmt(pt)}),
|
||||
else => return func.fail("TODO: airUnaryMath ty: {f}", .{ty.fmt(pt)}),
|
||||
}
|
||||
|
||||
break :result MCValue{ .register = dst_reg };
|
||||
@ -4497,7 +4472,7 @@ fn load(func: *Func, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerErro
|
||||
const zcu = pt.zcu;
|
||||
const dst_ty = ptr_ty.childType(zcu);
|
||||
|
||||
log.debug("loading {}:{} into {}", .{ ptr_mcv, ptr_ty.fmt(pt), dst_mcv });
|
||||
log.debug("loading {}:{f} into {}", .{ ptr_mcv, ptr_ty.fmt(pt), dst_mcv });
|
||||
|
||||
switch (ptr_mcv) {
|
||||
.none,
|
||||
@ -4550,7 +4525,7 @@ fn airStore(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
|
||||
fn store(func: *Func, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type) !void {
|
||||
const zcu = func.pt.zcu;
|
||||
const src_ty = ptr_ty.childType(zcu);
|
||||
log.debug("storing {}:{} in {}:{}", .{ src_mcv, src_ty.fmt(func.pt), ptr_mcv, ptr_ty.fmt(func.pt) });
|
||||
log.debug("storing {}:{f} in {}:{f}", .{ src_mcv, src_ty.fmt(func.pt), ptr_mcv, ptr_ty.fmt(func.pt) });
|
||||
|
||||
switch (ptr_mcv) {
|
||||
.none => unreachable,
|
||||
@ -7305,7 +7280,7 @@ fn airBitCast(func: *Func, inst: Air.Inst.Index) !void {
|
||||
const bit_size = dst_ty.bitSize(zcu);
|
||||
if (abi_size * 8 <= bit_size) break :result dst_mcv;
|
||||
|
||||
return func.fail("TODO: airBitCast {} to {}", .{ src_ty.fmt(pt), dst_ty.fmt(pt) });
|
||||
return func.fail("TODO: airBitCast {f} to {f}", .{ src_ty.fmt(pt), dst_ty.fmt(pt) });
|
||||
};
|
||||
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
||||
}
|
||||
@ -8121,7 +8096,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
|
||||
);
|
||||
break :result .{ .load_frame = .{ .index = frame_index } };
|
||||
},
|
||||
else => return func.fail("TODO: airAggregate {}", .{result_ty.fmt(pt)}),
|
||||
else => return func.fail("TODO: airAggregate {f}", .{result_ty.fmt(pt)}),
|
||||
}
|
||||
};
|
||||
|
||||
@ -8322,7 +8297,7 @@ fn resolveCallingConventionValues(
|
||||
};
|
||||
|
||||
result.return_value = switch (ret_tracking_i) {
|
||||
else => return func.fail("ty {} took {} tracking return indices", .{ ret_ty.fmt(pt), ret_tracking_i }),
|
||||
else => return func.fail("ty {f} took {} tracking return indices", .{ ret_ty.fmt(pt), ret_tracking_i }),
|
||||
1 => ret_tracking[0],
|
||||
2 => InstTracking.init(.{ .register_pair = .{
|
||||
ret_tracking[0].short.register, ret_tracking[1].short.register,
|
||||
@ -8377,7 +8352,7 @@ fn resolveCallingConventionValues(
|
||||
else => return func.fail("TODO: C calling convention arg class {}", .{class}),
|
||||
} else {
|
||||
arg.* = switch (arg_mcv_i) {
|
||||
else => return func.fail("ty {} took {} tracking arg indices", .{ ty.fmt(pt), arg_mcv_i }),
|
||||
else => return func.fail("ty {f} took {} tracking arg indices", .{ ty.fmt(pt), arg_mcv_i }),
|
||||
1 => arg_mcv[0],
|
||||
2 => .{ .register_pair = .{ arg_mcv[0].register, arg_mcv[1].register } },
|
||||
};
|
||||
|
||||
@ -18,20 +18,28 @@ pub const Error = Lower.Error || error{
|
||||
};
|
||||
|
||||
pub fn emitMir(emit: *Emit) Error!void {
|
||||
return @errorCast(emit.emitMirInner());
|
||||
}
|
||||
|
||||
fn emitMirInner(emit: *Emit) anyerror!void {
|
||||
const gpa = emit.bin_file.comp.gpa;
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
const bw = aw.fromArrayList(gpa, emit.code);
|
||||
defer emit.code.* = aw.toArrayList();
|
||||
|
||||
log.debug("mir instruction len: {}", .{emit.lower.mir.instructions.len});
|
||||
for (0..emit.lower.mir.instructions.len) |mir_i| {
|
||||
const mir_index: Mir.Inst.Index = @intCast(mir_i);
|
||||
try emit.code_offset_mapping.putNoClobber(
|
||||
emit.lower.allocator,
|
||||
mir_index,
|
||||
@intCast(emit.code.items.len),
|
||||
@intCast(bw.count),
|
||||
);
|
||||
const lowered = try emit.lower.lowerMir(mir_index, .{ .allow_frame_locs = true });
|
||||
var lowered_relocs = lowered.relocs;
|
||||
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
|
||||
const start_offset: u32 = @intCast(emit.code.items.len);
|
||||
try lowered_inst.encode(emit.code.writer(gpa));
|
||||
const start_offset: u32 = @intCast(bw.count);
|
||||
try lowered_inst.encode(bw);
|
||||
|
||||
while (lowered_relocs.len > 0 and
|
||||
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
|
||||
@ -123,7 +131,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{
|
||||
emit.prev_di_line, emit.prev_di_column,
|
||||
});
|
||||
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
|
||||
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column, bw.count);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
@ -132,6 +140,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
.pseudo_dbg_line_column => try emit.dbgAdvancePCAndLine(
|
||||
mir_inst.data.pseudo_dbg_line_column.line,
|
||||
mir_inst.data.pseudo_dbg_line_column.column,
|
||||
bw.count,
|
||||
),
|
||||
.pseudo_dbg_epilogue_begin => {
|
||||
switch (emit.debug_output) {
|
||||
@ -140,7 +149,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{
|
||||
emit.prev_di_line, emit.prev_di_column,
|
||||
});
|
||||
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
|
||||
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column, bw.count);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
@ -150,7 +159,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
}
|
||||
}
|
||||
}
|
||||
try emit.fixupRelocs();
|
||||
try emit.fixupRelocs(aw.getWritten());
|
||||
}
|
||||
|
||||
pub fn deinit(emit: *Emit) void {
|
||||
@ -170,14 +179,14 @@ const Reloc = struct {
|
||||
fmt: encoding.Lir.Format,
|
||||
};
|
||||
|
||||
fn fixupRelocs(emit: *Emit) Error!void {
|
||||
fn fixupRelocs(emit: *Emit, written: []u8) Error!void {
|
||||
for (emit.relocs.items) |reloc| {
|
||||
log.debug("target inst: {}", .{emit.lower.mir.instructions.get(reloc.target)});
|
||||
log.debug("target inst: {f}", .{emit.lower.mir.instructions.get(reloc.target)});
|
||||
const target = emit.code_offset_mapping.get(reloc.target) orelse
|
||||
return emit.fail("relocation target not found!", .{});
|
||||
|
||||
const disp = @as(i32, @intCast(target)) - @as(i32, @intCast(reloc.source));
|
||||
const code: *[4]u8 = emit.code.items[reloc.source + reloc.offset ..][0..4];
|
||||
const code: *[4]u8 = written[reloc.source + reloc.offset ..][0..4];
|
||||
|
||||
switch (reloc.fmt) {
|
||||
.J => riscv_util.writeInstJ(code, @bitCast(disp)),
|
||||
@ -187,9 +196,9 @@ fn fixupRelocs(emit: *Emit) Error!void {
|
||||
}
|
||||
}
|
||||
|
||||
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
|
||||
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32, pc: usize) Error!void {
|
||||
const delta_line = @as(i33, line) - @as(i33, emit.prev_di_line);
|
||||
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
|
||||
const delta_pc = pc - emit.prev_di_pc;
|
||||
log.debug(" (advance pc={d} and line={d})", .{ delta_pc, delta_line });
|
||||
switch (emit.debug_output) {
|
||||
.dwarf => |dw| {
|
||||
|
||||
@ -61,7 +61,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index, options: struct {
|
||||
defer lower.result_relocs_len = undefined;
|
||||
|
||||
const inst = lower.mir.instructions.get(index);
|
||||
log.debug("lowerMir {}", .{inst});
|
||||
log.debug("lowerMir {f}", .{inst});
|
||||
switch (inst.tag) {
|
||||
else => try lower.generic(inst),
|
||||
.pseudo_dbg_line_column,
|
||||
|
||||
@ -92,14 +92,9 @@ pub const Inst = struct {
|
||||
},
|
||||
};
|
||||
|
||||
pub fn format(
|
||||
inst: Inst,
|
||||
comptime fmt: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(inst: Inst, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
|
||||
assert(fmt.len == 0);
|
||||
try writer.print("Tag: {s}, Data: {s}", .{ @tagName(inst.tag), @tagName(inst.data) });
|
||||
try bw.print("Tag: {s}, Data: {s}", .{ @tagName(inst.tag), @tagName(inst.data) });
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -256,21 +256,12 @@ pub const FrameIndex = enum(u32) {
|
||||
return @intFromEnum(fi) < named_count;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
fi: FrameIndex,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
try writer.writeAll("FrameIndex");
|
||||
if (fi.isNamed()) {
|
||||
try writer.writeByte('.');
|
||||
try writer.writeAll(@tagName(fi));
|
||||
} else {
|
||||
try writer.writeByte('(');
|
||||
try std.fmt.formatType(@intFromEnum(fi), fmt, options, writer, 0);
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
pub fn format(fi: FrameIndex, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
|
||||
try bw.writeAll("FrameIndex");
|
||||
if (fi.isNamed())
|
||||
try bw.print(".{s}", .{@tagName(fi)})
|
||||
else
|
||||
try bw.print("({d})", .{@intFromEnum(fi)});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -1001,7 +1001,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void {
|
||||
switch (self.args[arg_index]) {
|
||||
.stack_offset => |off| {
|
||||
const abi_size = math.cast(u32, ty.abiSize(zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)});
|
||||
return self.fail("type '{f}' too big to fit into stack frame", .{ty.fmt(pt)});
|
||||
};
|
||||
const offset = off + abi_size;
|
||||
break :blk .{ .stack_offset = offset };
|
||||
@ -2748,7 +2748,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
|
||||
}
|
||||
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
return self.fail("type '{f}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
};
|
||||
// TODO swap this for inst.ty.ptrAlign
|
||||
const abi_align = elem_ty.abiAlignment(zcu);
|
||||
@ -2760,7 +2760,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
||||
const zcu = pt.zcu;
|
||||
const elem_ty = self.typeOfIndex(inst);
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
return self.fail("type '{f}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
};
|
||||
const abi_align = elem_ty.abiAlignment(zcu);
|
||||
self.stack_align = self.stack_align.max(abi_align);
|
||||
@ -4111,7 +4111,7 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
|
||||
while (true) {
|
||||
i -= 1;
|
||||
if (self.branch_stack.items[i].inst_table.get(inst)) |mcv| {
|
||||
log.debug("getResolvedInstValue %{} => {}", .{ inst, mcv });
|
||||
log.debug("getResolvedInstValue %{f} => {}", .{ inst, mcv });
|
||||
assert(mcv != .dead);
|
||||
return mcv;
|
||||
}
|
||||
@ -4382,7 +4382,7 @@ fn processDeath(self: *Self, inst: Air.Inst.Index) void {
|
||||
const prev_value = self.getResolvedInstValue(inst);
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
branch.inst_table.putAssumeCapacity(inst, .dead);
|
||||
log.debug("%{} death: {} -> .dead", .{ inst, prev_value });
|
||||
log.debug("%{f} death: {} -> .dead", .{ inst, prev_value });
|
||||
switch (prev_value) {
|
||||
.register => |reg| {
|
||||
self.register_manager.freeReg(reg);
|
||||
|
||||
@ -1463,7 +1463,7 @@ fn allocStack(cg: *CodeGen, ty: Type) !WValue {
|
||||
}
|
||||
|
||||
const abi_size = std.math.cast(u32, ty.abiSize(zcu)) orelse {
|
||||
return cg.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
|
||||
return cg.fail("Type {f} with ABI size of {d} exceeds stack frame size", .{
|
||||
ty.fmt(pt), ty.abiSize(zcu),
|
||||
});
|
||||
};
|
||||
@ -1497,7 +1497,7 @@ fn allocStackPtr(cg: *CodeGen, inst: Air.Inst.Index) !WValue {
|
||||
|
||||
const abi_alignment = ptr_ty.ptrAlignment(zcu);
|
||||
const abi_size = std.math.cast(u32, pointee_ty.abiSize(zcu)) orelse {
|
||||
return cg.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
|
||||
return cg.fail("Type {f} with ABI size of {d} exceeds stack frame size", .{
|
||||
pointee_ty.fmt(pt), pointee_ty.abiSize(zcu),
|
||||
});
|
||||
};
|
||||
@ -2404,7 +2404,7 @@ fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErr
|
||||
try cg.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) });
|
||||
},
|
||||
else => if (abi_size > 8) {
|
||||
return cg.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
|
||||
return cg.fail("TODO: `store` for type `{f}` with abisize `{d}`", .{
|
||||
ty.fmt(pt),
|
||||
abi_size,
|
||||
});
|
||||
@ -2597,7 +2597,7 @@ fn binOp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WV
|
||||
return cg.binOpBigInt(lhs, rhs, ty, op);
|
||||
} else {
|
||||
return cg.fail(
|
||||
"TODO: Implement binary operation for type: {}",
|
||||
"TODO: Implement binary operation for type: {f}",
|
||||
.{ty.fmt(pt)},
|
||||
);
|
||||
}
|
||||
@ -2817,7 +2817,7 @@ fn airAbs(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
|
||||
switch (scalar_ty.zigTypeTag(zcu)) {
|
||||
.int => if (ty.zigTypeTag(zcu) == .vector) {
|
||||
return cg.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
|
||||
return cg.fail("TODO implement airAbs for {f}", .{ty.fmt(pt)});
|
||||
} else {
|
||||
const int_bits = ty.intInfo(zcu).bits;
|
||||
const wasm_bits = toWasmBits(int_bits) orelse {
|
||||
@ -3244,7 +3244,7 @@ fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue {
|
||||
return .{ .imm32 = @intFromBool(!val.isNull(zcu)) };
|
||||
},
|
||||
.aggregate => switch (ip.indexToKey(ty.ip_index)) {
|
||||
.array_type => return cg.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}),
|
||||
.array_type => return cg.fail("Wasm TODO: LowerConstant for {f}", .{ty.fmt(pt)}),
|
||||
.vector_type => {
|
||||
assert(determineSimdStoreStrategy(ty, zcu, cg.target) == .direct);
|
||||
var buf: [16]u8 = undefined;
|
||||
@ -3608,7 +3608,7 @@ fn airNot(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
} else {
|
||||
const int_info = operand_ty.intInfo(zcu);
|
||||
const wasm_bits = toWasmBits(int_info.bits) orelse {
|
||||
return cg.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)});
|
||||
return cg.fail("TODO: Implement binary NOT for {f}", .{operand_ty.fmt(pt)});
|
||||
};
|
||||
|
||||
switch (wasm_bits) {
|
||||
@ -3874,7 +3874,7 @@ fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
},
|
||||
else => result: {
|
||||
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, zcu)) orelse {
|
||||
return cg.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)});
|
||||
return cg.fail("Field type '{f}' too big to fit into stack frame", .{field_ty.fmt(pt)});
|
||||
};
|
||||
if (isByRef(field_ty, zcu, cg.target)) {
|
||||
switch (operand) {
|
||||
@ -4360,7 +4360,7 @@ fn isNull(cg: *CodeGen, operand: WValue, optional_ty: Type, opcode: std.wasm.Opc
|
||||
// a pointer to the stack value
|
||||
if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
|
||||
return cg.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)});
|
||||
return cg.fail("Optional type {f} too big to fit into stack frame", .{optional_ty.fmt(pt)});
|
||||
};
|
||||
try cg.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
|
||||
}
|
||||
@ -4430,7 +4430,7 @@ fn airOptionalPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void
|
||||
}
|
||||
|
||||
const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
|
||||
return cg.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)});
|
||||
return cg.fail("Optional type {f} too big to fit into stack frame", .{opt_ty.fmt(pt)});
|
||||
};
|
||||
|
||||
try cg.emitWValue(operand);
|
||||
@ -4462,7 +4462,7 @@ fn airWrapOptional(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
break :result cg.reuseOperand(ty_op.operand, operand);
|
||||
}
|
||||
const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
|
||||
return cg.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)});
|
||||
return cg.fail("Optional type {f} too big to fit into stack frame", .{op_ty.fmt(pt)});
|
||||
};
|
||||
|
||||
// Create optional type, set the non-null bit, and store the operand inside the optional type
|
||||
@ -6196,7 +6196,7 @@ fn airMulWithOverflow(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
_ = try cg.load(overflow_ret, Type.i32, 0);
|
||||
try cg.addLocal(.local_set, overflow_bit.local.value);
|
||||
break :blk res;
|
||||
} else return cg.fail("TODO: @mulWithOverflow for {}", .{ty.fmt(pt)});
|
||||
} else return cg.fail("TODO: @mulWithOverflow for {f}", .{ty.fmt(pt)});
|
||||
var bin_op_local = try mul.toLocal(cg, ty);
|
||||
defer bin_op_local.free(cg);
|
||||
|
||||
@ -6749,7 +6749,7 @@ fn airMod(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const add = try cg.binOp(rem, rhs, ty, .add);
|
||||
break :result try cg.binOp(add, rhs, ty, .rem);
|
||||
}
|
||||
return cg.fail("TODO: @mod for {}", .{ty.fmt(pt)});
|
||||
return cg.fail("TODO: @mod for {f}", .{ty.fmt(pt)});
|
||||
};
|
||||
|
||||
return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
|
||||
@ -6767,7 +6767,7 @@ fn airSatMul(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const lhs = try cg.resolveInst(bin_op.lhs);
|
||||
const rhs = try cg.resolveInst(bin_op.rhs);
|
||||
const wasm_bits = toWasmBits(int_info.bits) orelse {
|
||||
return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
|
||||
return cg.fail("TODO: mul_sat for {f}", .{ty.fmt(pt)});
|
||||
};
|
||||
|
||||
switch (wasm_bits) {
|
||||
@ -6804,7 +6804,7 @@ fn airSatMul(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
},
|
||||
64 => {
|
||||
if (!(int_info.bits == 64 and int_info.signedness == .signed)) {
|
||||
return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
|
||||
return cg.fail("TODO: mul_sat for {f}", .{ty.fmt(pt)});
|
||||
}
|
||||
const overflow_ret = try cg.allocStack(Type.i32);
|
||||
_ = try cg.callIntrinsic(
|
||||
@ -6822,7 +6822,7 @@ fn airSatMul(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
},
|
||||
128 => {
|
||||
if (!(int_info.bits == 128 and int_info.signedness == .signed)) {
|
||||
return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
|
||||
return cg.fail("TODO: mul_sat for {f}", .{ty.fmt(pt)});
|
||||
}
|
||||
const overflow_ret = try cg.allocStack(Type.i32);
|
||||
const ret = try cg.callIntrinsic(
|
||||
|
||||
@ -14,16 +14,20 @@ const codegen = @import("../../codegen.zig");
|
||||
|
||||
mir: Mir,
|
||||
wasm: *Wasm,
|
||||
/// The binary representation that will be emitted by this module.
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
/// The binary representation of this module is written here.
|
||||
bw: *std.io.BufferedWriter,
|
||||
|
||||
pub const Error = error{
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
return @errorCast(emit.lowerToCodeInner());
|
||||
}
|
||||
|
||||
fn lowerToCodeInner(emit: *Emit) anyerror!void {
|
||||
const mir = &emit.mir;
|
||||
const code = emit.code;
|
||||
const bw = emit.bw;
|
||||
const wasm = emit.wasm;
|
||||
const comp = wasm.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
@ -41,18 +45,19 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
},
|
||||
.block, .loop => {
|
||||
const block_type = datas[inst].block_type;
|
||||
try code.ensureUnusedCapacity(gpa, 2);
|
||||
code.appendAssumeCapacity(@intFromEnum(tags[inst]));
|
||||
code.appendAssumeCapacity(@intFromEnum(block_type));
|
||||
try bw.writeAll(&.{
|
||||
@intFromEnum(tags[inst]),
|
||||
@intFromEnum(block_type),
|
||||
});
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.uav_ref => {
|
||||
if (is_obj) {
|
||||
try uavRefObj(wasm, code, datas[inst].ip_index, 0, is_wasm32);
|
||||
try uavRefObj(wasm, bw, datas[inst].ip_index, 0, is_wasm32);
|
||||
} else {
|
||||
try uavRefExe(wasm, code, datas[inst].ip_index, 0, is_wasm32);
|
||||
try uavRefExe(wasm, bw, datas[inst].ip_index, 0, is_wasm32);
|
||||
}
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -60,20 +65,20 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
.uav_ref_off => {
|
||||
const extra = mir.extraData(Mir.UavRefOff, datas[inst].payload).data;
|
||||
if (is_obj) {
|
||||
try uavRefObj(wasm, code, extra.value, extra.offset, is_wasm32);
|
||||
try uavRefObj(wasm, bw, extra.value, extra.offset, is_wasm32);
|
||||
} else {
|
||||
try uavRefExe(wasm, code, extra.value, extra.offset, is_wasm32);
|
||||
try uavRefExe(wasm, bw, extra.value, extra.offset, is_wasm32);
|
||||
}
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.nav_ref => {
|
||||
try navRefOff(wasm, code, .{ .nav_index = datas[inst].nav_index, .offset = 0 }, is_wasm32);
|
||||
try navRefOff(wasm, bw, .{ .nav_index = datas[inst].nav_index, .offset = 0 }, is_wasm32);
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.nav_ref_off => {
|
||||
try navRefOff(wasm, code, mir.extraData(Mir.NavRefOff, datas[inst].payload).data, is_wasm32);
|
||||
try navRefOff(wasm, bw, mir.extraData(Mir.NavRefOff, datas[inst].payload).data, is_wasm32);
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
@ -81,11 +86,11 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
const indirect_func_idx: Wasm.ZcuIndirectFunctionSetIndex = @enumFromInt(
|
||||
wasm.zcu_indirect_function_set.getIndex(datas[inst].nav_index).?,
|
||||
);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
if (is_obj) {
|
||||
@panic("TODO");
|
||||
} else {
|
||||
leb.writeUleb128(code.fixedWriter(), 1 + @intFromEnum(indirect_func_idx)) catch unreachable;
|
||||
try bw.writeLeb128(1 + @intFromEnum(indirect_func_idx));
|
||||
}
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -95,52 +100,48 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.errors_len => {
|
||||
try code.ensureUnusedCapacity(gpa, 6);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
// MIR is lowered during flush, so there is indeed only one thread at this time.
|
||||
const errors_len = 1 + comp.zcu.?.intern_pool.global_error_set.getNamesFromMainThread().len;
|
||||
leb.writeIleb128(code.fixedWriter(), errors_len) catch unreachable;
|
||||
const errors_len: u32 = @intCast(1 + comp.zcu.?.intern_pool.global_error_set.getNamesFromMainThread().len);
|
||||
try bw.writeLeb128(@as(i32, @bitCast(errors_len)));
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.error_name_table_ref => {
|
||||
wasm.error_name_table_ref_count += 1;
|
||||
try code.ensureUnusedCapacity(gpa, 11);
|
||||
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
|
||||
code.appendAssumeCapacity(@intFromEnum(opcode));
|
||||
try bw.writeByte(@intFromEnum(opcode));
|
||||
if (is_obj) {
|
||||
try wasm.out_relocs.append(gpa, .{
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(bw.count),
|
||||
.pointee = .{ .symbol_index = try wasm.errorNameTableSymbolIndex() },
|
||||
.tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64,
|
||||
.addend = 0,
|
||||
});
|
||||
code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
|
||||
try bw.splatByteAll(0, if (is_wasm32) 5 else 10);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
} else {
|
||||
const addr: u32 = wasm.errorNameTableAddr();
|
||||
leb.writeIleb128(code.fixedWriter(), addr) catch unreachable;
|
||||
try bw.writeLeb128(@as(i32, @bitCast(addr)));
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
}
|
||||
},
|
||||
.br_if, .br, .memory_grow, .memory_size => {
|
||||
try code.ensureUnusedCapacity(gpa, 11);
|
||||
code.appendAssumeCapacity(@intFromEnum(tags[inst]));
|
||||
leb.writeUleb128(code.fixedWriter(), datas[inst].label) catch unreachable;
|
||||
try bw.writeByte(@intFromEnum(tags[inst]));
|
||||
try bw.writeLeb128(datas[inst].label);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
|
||||
.local_get, .local_set, .local_tee => {
|
||||
try code.ensureUnusedCapacity(gpa, 11);
|
||||
code.appendAssumeCapacity(@intFromEnum(tags[inst]));
|
||||
leb.writeUleb128(code.fixedWriter(), datas[inst].local) catch unreachable;
|
||||
try bw.writeByte(@intFromEnum(tags[inst]));
|
||||
try bw.writeLeb128(datas[inst].local);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -150,29 +151,27 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
const extra_index = datas[inst].payload;
|
||||
const extra = mir.extraData(Mir.JumpTable, extra_index);
|
||||
const labels = mir.extra[extra.end..][0..extra.data.length];
|
||||
try code.ensureUnusedCapacity(gpa, 11 + 10 * labels.len);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_table));
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.br_table));
|
||||
// -1 because default label is not part of length/depth.
|
||||
leb.writeUleb128(code.fixedWriter(), extra.data.length - 1) catch unreachable;
|
||||
for (labels) |label| leb.writeUleb128(code.fixedWriter(), label) catch unreachable;
|
||||
try bw.writeLeb128(extra.data.length - 1);
|
||||
for (labels) |label| try bw.writeLeb128(label);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
|
||||
.call_nav => {
|
||||
try code.ensureUnusedCapacity(gpa, 6);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.call));
|
||||
if (is_obj) {
|
||||
try wasm.out_relocs.append(gpa, .{
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(bw.count),
|
||||
.pointee = .{ .symbol_index = try wasm.navSymbolIndex(datas[inst].nav_index) },
|
||||
.tag = .function_index_leb,
|
||||
.addend = 0,
|
||||
});
|
||||
code.appendNTimesAssumeCapacity(0, 5);
|
||||
try bw.splatByteAll(0, 5);
|
||||
} else {
|
||||
appendOutputFunctionIndex(code, .fromIpNav(wasm, datas[inst].nav_index));
|
||||
try appendOutputFunctionIndex(bw, .fromIpNav(wasm, datas[inst].nav_index));
|
||||
}
|
||||
|
||||
inst += 1;
|
||||
@ -180,7 +179,6 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
},
|
||||
|
||||
.call_indirect => {
|
||||
try code.ensureUnusedCapacity(gpa, 11);
|
||||
const fn_info = comp.zcu.?.typeToFunc(.fromInterned(datas[inst].ip_index)).?;
|
||||
const func_ty_index = wasm.getExistingFunctionType(
|
||||
fn_info.cc,
|
||||
@ -188,38 +186,37 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
.fromInterned(fn_info.return_type),
|
||||
target,
|
||||
).?;
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call_indirect));
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.call_indirect));
|
||||
if (is_obj) {
|
||||
try wasm.out_relocs.append(gpa, .{
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(bw.count),
|
||||
.pointee = .{ .type_index = func_ty_index },
|
||||
.tag = .type_index_leb,
|
||||
.addend = 0,
|
||||
});
|
||||
code.appendNTimesAssumeCapacity(0, 5);
|
||||
try bw.splatByteAll(0, 5);
|
||||
} else {
|
||||
const index: Wasm.Flush.FuncTypeIndex = .fromTypeIndex(func_ty_index, &wasm.flush_buffer);
|
||||
leb.writeUleb128(code.fixedWriter(), @intFromEnum(index)) catch unreachable;
|
||||
try bw.writeLeb128(@intFromEnum(index));
|
||||
}
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // table index
|
||||
try bw.writeUleb128(0); // table index
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
|
||||
.call_tag_name => {
|
||||
try code.ensureUnusedCapacity(gpa, 6);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.call));
|
||||
if (is_obj) {
|
||||
try wasm.out_relocs.append(gpa, .{
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(bw.count),
|
||||
.pointee = .{ .symbol_index = try wasm.tagNameSymbolIndex(datas[inst].ip_index) },
|
||||
.tag = .function_index_leb,
|
||||
.addend = 0,
|
||||
});
|
||||
code.appendNTimesAssumeCapacity(0, 5);
|
||||
try bw.splatByteAll(0, 5);
|
||||
} else {
|
||||
appendOutputFunctionIndex(code, .fromTagNameType(wasm, datas[inst].ip_index));
|
||||
try appendOutputFunctionIndex(bw, .fromTagNameType(wasm, datas[inst].ip_index));
|
||||
}
|
||||
|
||||
inst += 1;
|
||||
@ -232,18 +229,17 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
// table initialized based on the `Mir.Intrinsic` enum.
|
||||
const symbol_name = try wasm.internString(@tagName(datas[inst].intrinsic));
|
||||
|
||||
try code.ensureUnusedCapacity(gpa, 6);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.call));
|
||||
if (is_obj) {
|
||||
try wasm.out_relocs.append(gpa, .{
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(bw.count),
|
||||
.pointee = .{ .symbol_index = try wasm.symbolNameIndex(symbol_name) },
|
||||
.tag = .function_index_leb,
|
||||
.addend = 0,
|
||||
});
|
||||
code.appendNTimesAssumeCapacity(0, 5);
|
||||
try bw.splatByteAll(0, 5);
|
||||
} else {
|
||||
appendOutputFunctionIndex(code, .fromSymbolName(wasm, symbol_name));
|
||||
try appendOutputFunctionIndex(bw, .fromSymbolName(wasm, symbol_name));
|
||||
}
|
||||
|
||||
inst += 1;
|
||||
@ -251,19 +247,17 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
},
|
||||
|
||||
.global_set_sp => {
|
||||
try code.ensureUnusedCapacity(gpa, 6);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.global_set));
|
||||
if (is_obj) {
|
||||
try wasm.out_relocs.append(gpa, .{
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(bw.count),
|
||||
.pointee = .{ .symbol_index = try wasm.stackPointerSymbolIndex() },
|
||||
.tag = .global_index_leb,
|
||||
.addend = 0,
|
||||
});
|
||||
code.appendNTimesAssumeCapacity(0, 5);
|
||||
try bw.splatByteAll(0, 5);
|
||||
} else {
|
||||
const sp_global: Wasm.GlobalIndex = .stack_pointer;
|
||||
std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
|
||||
try bw.writeLeb128(@intFromEnum(Wasm.GlobalIndex.stack_pointer));
|
||||
}
|
||||
|
||||
inst += 1;
|
||||
@ -271,36 +265,32 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
},
|
||||
|
||||
.f32_const => {
|
||||
try code.ensureUnusedCapacity(gpa, 5);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.f32_const));
|
||||
std.mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @bitCast(datas[inst].float32), .little);
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.f32_const));
|
||||
try bw.writeInt(u32, @bitCast(datas[inst].float32), .little);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
|
||||
.f64_const => {
|
||||
try code.ensureUnusedCapacity(gpa, 9);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.f64_const));
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.f64_const));
|
||||
const float64 = mir.extraData(Mir.Float64, datas[inst].payload).data;
|
||||
std.mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), float64.toInt(), .little);
|
||||
try bw.writeInt(u64, float64.toInt(), .little);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.i32_const => {
|
||||
try code.ensureUnusedCapacity(gpa, 6);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
leb.writeIleb128(code.fixedWriter(), datas[inst].imm32) catch unreachable;
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
try bw.writeLeb128(datas[inst].imm32);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.i64_const => {
|
||||
try code.ensureUnusedCapacity(gpa, 11);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.i64_const));
|
||||
const int64: i64 = @bitCast(mir.extraData(Mir.Imm64, datas[inst].payload).data.toInt());
|
||||
leb.writeIleb128(code.fixedWriter(), int64) catch unreachable;
|
||||
try bw.writeLeb128(int64);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -330,9 +320,8 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
.i64_store16,
|
||||
.i64_store32,
|
||||
=> {
|
||||
try code.ensureUnusedCapacity(gpa, 1 + 20);
|
||||
code.appendAssumeCapacity(@intFromEnum(tags[inst]));
|
||||
encodeMemArg(code, mir.extraData(Mir.MemArg, datas[inst].payload).data);
|
||||
try bw.writeByte(@intFromEnum(tags[inst]));
|
||||
try encodeMemArg(bw, mir.extraData(Mir.MemArg, datas[inst].payload).data);
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
@ -466,43 +455,42 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
.i64_clz,
|
||||
.i64_ctz,
|
||||
=> {
|
||||
try code.append(gpa, @intFromEnum(tags[inst]));
|
||||
try bw.writeByte(@intFromEnum(tags[inst]));
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
|
||||
.misc_prefix => {
|
||||
try code.ensureUnusedCapacity(gpa, 6 + 6);
|
||||
const extra_index = datas[inst].payload;
|
||||
const opcode = mir.extra[extra_index];
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix));
|
||||
leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
|
||||
switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) {
|
||||
const opcode: std.wasm.MiscOpcode = @enumFromInt(mir.extra[extra_index]);
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.misc_prefix));
|
||||
try bw.writeLeb128(@intFromEnum(opcode));
|
||||
switch (opcode) {
|
||||
// bulk-memory opcodes
|
||||
.data_drop => {
|
||||
const segment = mir.extra[extra_index + 1];
|
||||
leb.writeUleb128(code.fixedWriter(), segment) catch unreachable;
|
||||
try bw.writeLeb128(segment);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.memory_init => {
|
||||
const segment = mir.extra[extra_index + 1];
|
||||
leb.writeUleb128(code.fixedWriter(), segment) catch unreachable;
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // memory index
|
||||
try bw.writeLeb128(segment);
|
||||
try bw.writeByte(0); // memory index
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.memory_fill => {
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // memory index
|
||||
try bw.writeByte(0); // memory index
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.memory_copy => {
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // dst memory index
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // src memory index
|
||||
try bw.writeByte(0); // dst memory index
|
||||
try bw.writeByte(0); // src memory index
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -534,12 +522,11 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
comptime unreachable;
|
||||
},
|
||||
.simd_prefix => {
|
||||
try code.ensureUnusedCapacity(gpa, 6 + 20);
|
||||
const extra_index = datas[inst].payload;
|
||||
const opcode = mir.extra[extra_index];
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.simd_prefix));
|
||||
leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
|
||||
switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) {
|
||||
const opcode: std.wasm.SimdOpcode = @enumFromInt(mir.extra[extra_index]);
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.simd_prefix));
|
||||
try bw.writeLeb128(@intFromEnum(opcode));
|
||||
switch (opcode) {
|
||||
.v128_store,
|
||||
.v128_load,
|
||||
.v128_load8_splat,
|
||||
@ -547,12 +534,12 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
.v128_load32_splat,
|
||||
.v128_load64_splat,
|
||||
=> {
|
||||
encodeMemArg(code, mir.extraData(Mir.MemArg, extra_index + 1).data);
|
||||
try encodeMemArg(bw, mir.extraData(Mir.MemArg, extra_index + 1).data);
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.v128_const, .i8x16_shuffle => {
|
||||
code.appendSliceAssumeCapacity(std.mem.asBytes(mir.extra[extra_index + 1 ..][0..4]));
|
||||
try bw.writeAll(std.mem.asBytes(mir.extra[extra_index + 1 ..][0..4]));
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
@ -571,7 +558,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
.f64x2_extract_lane,
|
||||
.f64x2_replace_lane,
|
||||
=> {
|
||||
code.appendAssumeCapacity(@intCast(mir.extra[extra_index + 1]));
|
||||
try bw.writeByte(@intCast(mir.extra[extra_index + 1]));
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
@ -819,13 +806,11 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
comptime unreachable;
|
||||
},
|
||||
.atomics_prefix => {
|
||||
try code.ensureUnusedCapacity(gpa, 6 + 20);
|
||||
|
||||
const extra_index = datas[inst].payload;
|
||||
const opcode = mir.extra[extra_index];
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
|
||||
leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
|
||||
switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) {
|
||||
const opcode: std.wasm.AtomicsOpcode = @enumFromInt(mir.extra[extra_index]);
|
||||
try bw.writeByte(@intFromEnum(std.wasm.Opcode.atomics_prefix));
|
||||
try bw.writeLeb128(@intFromEnum(opcode));
|
||||
switch (opcode) {
|
||||
.i32_atomic_load,
|
||||
.i64_atomic_load,
|
||||
.i32_atomic_load8_u,
|
||||
@ -892,15 +877,12 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
.i64_atomic_rmw32_cmpxchg_u,
|
||||
=> {
|
||||
const mem_arg = mir.extraData(Mir.MemArg, extra_index + 1).data;
|
||||
encodeMemArg(code, mem_arg);
|
||||
try encodeMemArg(bw, mem_arg);
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.atomic_fence => {
|
||||
// Hard-codes memory index 0 since multi-memory proposal is
|
||||
// not yet accepted nor implemented.
|
||||
const memory_index: u32 = 0;
|
||||
leb.writeUleb128(code.fixedWriter(), memory_index) catch unreachable;
|
||||
try bw.writeByte(0); // memory index
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
@ -915,44 +897,36 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
}
|
||||
|
||||
/// Asserts 20 unused capacity.
|
||||
fn encodeMemArg(code: *std.ArrayListUnmanaged(u8), mem_arg: Mir.MemArg) void {
|
||||
assert(code.unusedCapacitySlice().len >= 20);
|
||||
// Wasm encodes alignment as power of 2, rather than natural alignment.
|
||||
const encoded_alignment = @ctz(mem_arg.alignment);
|
||||
leb.writeUleb128(code.fixedWriter(), encoded_alignment) catch unreachable;
|
||||
leb.writeUleb128(code.fixedWriter(), mem_arg.offset) catch unreachable;
|
||||
fn encodeMemArg(bw: *std.io.BufferedWriter, mem_arg: Mir.MemArg) anyerror!void {
|
||||
try bw.writeLeb128(Wasm.Alignment.fromNonzeroByteUnits(mem_arg.alignment).toLog2Units());
|
||||
try bw.writeLeb128(mem_arg.offset);
|
||||
}
|
||||
|
||||
fn uavRefObj(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
|
||||
fn uavRefObj(wasm: *Wasm, bw: *std.io.BufferedWriter, value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
|
||||
const comp = wasm.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
|
||||
|
||||
try code.ensureUnusedCapacity(gpa, 11);
|
||||
code.appendAssumeCapacity(@intFromEnum(opcode));
|
||||
try bw.writeByte(@intFromEnum(opcode));
|
||||
|
||||
try wasm.out_relocs.append(gpa, .{
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(bw.count),
|
||||
.pointee = .{ .symbol_index = try wasm.uavSymbolIndex(value) },
|
||||
.tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64,
|
||||
.addend = offset,
|
||||
});
|
||||
code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
|
||||
try bw.splatByteAll(0, if (is_wasm32) 5 else 10);
|
||||
}
|
||||
|
||||
fn uavRefExe(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
|
||||
const comp = wasm.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
fn uavRefExe(wasm: *Wasm, bw: *std.io.BufferedWriter, value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
|
||||
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
|
||||
|
||||
try code.ensureUnusedCapacity(gpa, 11);
|
||||
code.appendAssumeCapacity(@intFromEnum(opcode));
|
||||
try bw.writeByte(@intFromEnum(opcode));
|
||||
|
||||
const addr = wasm.uavAddr(value);
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + offset))) catch unreachable;
|
||||
try bw.writeLeb128(@as(u32, @intCast(@as(i64, addr) + offset)));
|
||||
}
|
||||
|
||||
fn navRefOff(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff, is_wasm32: bool) !void {
|
||||
fn navRefOff(wasm: *Wasm, bw: *std.io.BufferedWriter, data: Mir.NavRefOff, is_wasm32: bool) !void {
|
||||
const comp = wasm.base.comp;
|
||||
const zcu = comp.zcu.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
@ -961,24 +935,22 @@ fn navRefOff(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff
|
||||
const nav_ty = ip.getNav(data.nav_index).typeOf(ip);
|
||||
assert(!ip.isFunctionType(nav_ty));
|
||||
|
||||
try code.ensureUnusedCapacity(gpa, 11);
|
||||
|
||||
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
|
||||
code.appendAssumeCapacity(@intFromEnum(opcode));
|
||||
try bw.writeByte(@intFromEnum(opcode));
|
||||
if (is_obj) {
|
||||
try wasm.out_relocs.append(gpa, .{
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(bw.count),
|
||||
.pointee = .{ .symbol_index = try wasm.navSymbolIndex(data.nav_index) },
|
||||
.tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64,
|
||||
.addend = data.offset,
|
||||
});
|
||||
code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
|
||||
try bw.splatByteAll(0, if (is_wasm32) 5 else 10);
|
||||
} else {
|
||||
const addr = wasm.navAddr(data.nav_index);
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + data.offset))) catch unreachable;
|
||||
try bw.writeLeb128(@as(i32, @bitCast(@as(u32, @intCast(@as(i64, addr) + data.offset)))));
|
||||
}
|
||||
}
|
||||
|
||||
fn appendOutputFunctionIndex(code: *std.ArrayListUnmanaged(u8), i: Wasm.OutputFunctionIndex) void {
|
||||
leb.writeUleb128(code.fixedWriter(), @intFromEnum(i)) catch unreachable;
|
||||
fn appendOutputFunctionIndex(bw: *std.io.BufferedWriter, i: Wasm.OutputFunctionIndex) anyerror!void {
|
||||
return bw.writeLeb128(@intFromEnum(i));
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -31,10 +31,11 @@ pub fn init(code: []const u8) Disassembler {
|
||||
}
|
||||
|
||||
pub fn next(dis: *Disassembler) Error!?Instruction {
|
||||
const prefixes = dis.parsePrefixes() catch |err| switch (err) {
|
||||
error.EndOfStream => return null,
|
||||
else => |e| return e,
|
||||
};
|
||||
return @errorCast(dis.nextInner());
|
||||
}
|
||||
|
||||
fn nextInner(dis: *Disassembler) anyerror!?Instruction {
|
||||
const prefixes = try dis.parsePrefixes();
|
||||
|
||||
const enc = try dis.parseEncoding(prefixes) orelse return error.UnknownOpcode;
|
||||
switch (enc.data.op_en) {
|
||||
@ -283,66 +284,53 @@ const Prefixes = struct {
|
||||
|
||||
fn parsePrefixes(dis: *Disassembler) !Prefixes {
|
||||
const rex_prefix_mask: u4 = 0b0100;
|
||||
var stream = std.io.fixedBufferStream(dis.code[dis.pos..]);
|
||||
const reader = stream.reader();
|
||||
|
||||
var res: Prefixes = .{};
|
||||
for (dis.code[dis.pos..], dis.pos..) |byte, pos| switch (byte) {
|
||||
0xf0, 0xf2, 0xf3, 0x2e, 0x36, 0x26, 0x64, 0x65, 0x3e, 0x66, 0x67 => {
|
||||
// Legacy prefix
|
||||
if (res.rex.present) return error.LegacyPrefixAfterRex;
|
||||
switch (byte) {
|
||||
0xf0 => res.legacy.prefix_f0 = true,
|
||||
0xf2 => res.legacy.prefix_f2 = true,
|
||||
0xf3 => res.legacy.prefix_f3 = true,
|
||||
0x2e => res.legacy.prefix_2e = true,
|
||||
0x36 => res.legacy.prefix_36 = true,
|
||||
0x26 => res.legacy.prefix_26 = true,
|
||||
0x64 => res.legacy.prefix_64 = true,
|
||||
0x65 => res.legacy.prefix_65 = true,
|
||||
0x3e => res.legacy.prefix_3e = true,
|
||||
0x66 => res.legacy.prefix_66 = true,
|
||||
0x67 => res.legacy.prefix_67 = true,
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
else => {
|
||||
if (rex_prefix_mask == @as(u4, @truncate(byte >> 4))) {
|
||||
// REX prefix
|
||||
res.rex.w = byte & 0b1000 != 0;
|
||||
res.rex.r = byte & 0b100 != 0;
|
||||
res.rex.x = byte & 0b10 != 0;
|
||||
res.rex.b = byte & 0b1 != 0;
|
||||
res.rex.present = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const next_byte = try reader.readByte();
|
||||
dis.pos += 1;
|
||||
|
||||
switch (next_byte) {
|
||||
0xf0, 0xf2, 0xf3, 0x2e, 0x36, 0x26, 0x64, 0x65, 0x3e, 0x66, 0x67 => {
|
||||
// Legacy prefix
|
||||
if (res.rex.present) return error.LegacyPrefixAfterRex;
|
||||
switch (next_byte) {
|
||||
0xf0 => res.legacy.prefix_f0 = true,
|
||||
0xf2 => res.legacy.prefix_f2 = true,
|
||||
0xf3 => res.legacy.prefix_f3 = true,
|
||||
0x2e => res.legacy.prefix_2e = true,
|
||||
0x36 => res.legacy.prefix_36 = true,
|
||||
0x26 => res.legacy.prefix_26 = true,
|
||||
0x64 => res.legacy.prefix_64 = true,
|
||||
0x65 => res.legacy.prefix_65 = true,
|
||||
0x3e => res.legacy.prefix_3e = true,
|
||||
0x66 => res.legacy.prefix_66 = true,
|
||||
0x67 => res.legacy.prefix_67 = true,
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
else => {
|
||||
if (rex_prefix_mask == @as(u4, @truncate(next_byte >> 4))) {
|
||||
// REX prefix
|
||||
res.rex.w = next_byte & 0b1000 != 0;
|
||||
res.rex.r = next_byte & 0b100 != 0;
|
||||
res.rex.x = next_byte & 0b10 != 0;
|
||||
res.rex.b = next_byte & 0b1 != 0;
|
||||
res.rex.present = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO VEX prefix
|
||||
|
||||
dis.pos -= 1;
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
// TODO VEX prefix
|
||||
|
||||
dis.pos = pos;
|
||||
break;
|
||||
},
|
||||
};
|
||||
return res;
|
||||
}
|
||||
|
||||
fn parseEncoding(dis: *Disassembler, prefixes: Prefixes) !?Encoding {
|
||||
const o_mask: u8 = 0b1111_1000;
|
||||
|
||||
var opcode: [3]u8 = .{ 0, 0, 0 };
|
||||
var stream = std.io.fixedBufferStream(dis.code[dis.pos..]);
|
||||
const reader = stream.reader();
|
||||
|
||||
comptime var opc_count = 0;
|
||||
inline while (opc_count < 3) : (opc_count += 1) {
|
||||
const byte = try reader.readByte();
|
||||
const byte = dis.code[dis.pos];
|
||||
opcode[opc_count] = byte;
|
||||
dis.pos += 1;
|
||||
|
||||
@ -387,30 +375,27 @@ fn parseGpRegister(low_enc: u3, is_extended: bool, rex: Rex, bit_size: u64) Regi
|
||||
};
|
||||
}
|
||||
|
||||
fn parseImm(dis: *Disassembler, kind: Encoding.Op) !Immediate {
|
||||
var stream = std.io.fixedBufferStream(dis.code[dis.pos..]);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
const imm = switch (kind) {
|
||||
.imm8s, .rel8 => Immediate.s(try reader.readInt(i8, .little)),
|
||||
.imm16s, .rel16 => Immediate.s(try reader.readInt(i16, .little)),
|
||||
.imm32s, .rel32 => Immediate.s(try reader.readInt(i32, .little)),
|
||||
.imm8 => Immediate.u(try reader.readInt(u8, .little)),
|
||||
.imm16 => Immediate.u(try reader.readInt(u16, .little)),
|
||||
.imm32 => Immediate.u(try reader.readInt(u32, .little)),
|
||||
.imm64 => Immediate.u(try reader.readInt(u64, .little)),
|
||||
fn parseImm(dis: *Disassembler, kind: Encoding.Op) anyerror!Immediate {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(dis.code[dis.pos..]);
|
||||
defer dis.pos += br.seek;
|
||||
return switch (kind) {
|
||||
.imm8s, .rel8 => .s(try br.takeInt(i8, .little)),
|
||||
.imm16s, .rel16 => .s(try br.takeInt(i16, .little)),
|
||||
.imm32s, .rel32 => .s(try br.takeInt(i32, .little)),
|
||||
.imm8 => .u(try br.takeInt(u8, .little)),
|
||||
.imm16 => .u(try br.takeInt(u16, .little)),
|
||||
.imm32 => .u(try br.takeInt(u32, .little)),
|
||||
.imm64 => .u(try br.takeInt(u64, .little)),
|
||||
else => unreachable,
|
||||
};
|
||||
dis.pos += std.math.cast(usize, creader.bytes_read) orelse return error.Overflow;
|
||||
return imm;
|
||||
}
|
||||
|
||||
fn parseOffset(dis: *Disassembler) !u64 {
|
||||
var stream = std.io.fixedBufferStream(dis.code[dis.pos..]);
|
||||
const reader = stream.reader();
|
||||
const offset = try reader.readInt(u64, .little);
|
||||
dis.pos += 8;
|
||||
return offset;
|
||||
fn parseOffset(dis: *Disassembler) anyerror!u64 {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(dis.code[dis.pos..]);
|
||||
defer dis.pos += br.seek;
|
||||
return br.takeInt(u64, .little);
|
||||
}
|
||||
|
||||
const ModRm = packed struct {
|
||||
@ -482,26 +467,22 @@ fn parseSibByte(dis: *Disassembler) !Sib {
|
||||
return Sib{ .scale = scale, .index = index, .base = base };
|
||||
}
|
||||
|
||||
fn parseDisplacement(dis: *Disassembler, modrm: ModRm, sib: ?Sib) !i32 {
|
||||
var stream = std.io.fixedBufferStream(dis.code[dis.pos..]);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
const disp = disp: {
|
||||
if (sib) |info| {
|
||||
if (info.base == 0b101 and modrm.mod == 0) {
|
||||
break :disp try reader.readInt(i32, .little);
|
||||
}
|
||||
fn parseDisplacement(dis: *Disassembler, modrm: ModRm, sib: ?Sib) anyerror!i32 {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(dis.code[dis.pos..]);
|
||||
defer dis.pos += br.seek;
|
||||
if (sib) |info| {
|
||||
if (info.base == 0b101 and modrm.mod == 0) {
|
||||
return br.takeInt(i32, .little);
|
||||
}
|
||||
if (modrm.rip()) {
|
||||
break :disp try reader.readInt(i32, .little);
|
||||
}
|
||||
break :disp switch (modrm.mod) {
|
||||
0b00 => 0,
|
||||
0b01 => try reader.readInt(i8, .little),
|
||||
0b10 => try reader.readInt(i32, .little),
|
||||
0b11 => unreachable,
|
||||
};
|
||||
}
|
||||
if (modrm.rip()) {
|
||||
return br.takeInt(i32, .little);
|
||||
}
|
||||
return switch (modrm.mod) {
|
||||
0b00 => 0,
|
||||
0b01 => try br.takeInt(i8, .little),
|
||||
0b10 => try br.takeInt(i32, .little),
|
||||
0b11 => unreachable,
|
||||
};
|
||||
dis.pos += std.math.cast(usize, creader.bytes_read) orelse return error.Overflow;
|
||||
return disp;
|
||||
}
|
||||
|
||||
@ -424,19 +424,19 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
.line = mir_inst.data.line_column.line,
|
||||
.column = mir_inst.data.line_column.column,
|
||||
.is_stmt = true,
|
||||
}),
|
||||
}, emit.code.items.len),
|
||||
.pseudo_dbg_line_line_column => try emit.dbgAdvancePCAndLine(.{
|
||||
.line = mir_inst.data.line_column.line,
|
||||
.column = mir_inst.data.line_column.column,
|
||||
.is_stmt = false,
|
||||
}),
|
||||
}, emit.code.items.len),
|
||||
.pseudo_dbg_epilogue_begin_none => switch (emit.debug_output) {
|
||||
.dwarf => |dwarf| {
|
||||
try dwarf.setEpilogueBegin();
|
||||
log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{
|
||||
emit.prev_di_loc.line, emit.prev_di_loc.column,
|
||||
});
|
||||
try emit.dbgAdvancePCAndLine(emit.prev_di_loc);
|
||||
try emit.dbgAdvancePCAndLine(emit.prev_di_loc, emit.code.items.len);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
@ -909,9 +909,9 @@ const Loc = struct {
|
||||
is_stmt: bool,
|
||||
};
|
||||
|
||||
fn dbgAdvancePCAndLine(emit: *Emit, loc: Loc) Error!void {
|
||||
fn dbgAdvancePCAndLine(emit: *Emit, loc: Loc, pc: usize) anyerror!void {
|
||||
const delta_line = @as(i33, loc.line) - @as(i33, emit.prev_di_loc.line);
|
||||
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
|
||||
const delta_pc = pc - emit.prev_di_pc;
|
||||
log.debug(" (advance pc={d} and line={d})", .{ delta_pc, delta_line });
|
||||
switch (emit.debug_output) {
|
||||
.dwarf => |dwarf| {
|
||||
@ -919,30 +919,25 @@ fn dbgAdvancePCAndLine(emit: *Emit, loc: Loc) Error!void {
|
||||
if (loc.column != emit.prev_di_loc.column) try dwarf.setColumn(loc.column);
|
||||
try dwarf.advancePCAndLine(delta_line, delta_pc);
|
||||
emit.prev_di_loc = loc;
|
||||
emit.prev_di_pc = emit.code.items.len;
|
||||
emit.prev_di_pc = pc;
|
||||
},
|
||||
.plan9 => |dbg_out| {
|
||||
if (delta_pc <= 0) return; // only do this when the pc changes
|
||||
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
const bw = aw.fromArrayList(emit.lower.bin_file.comp.gpa, &dbg_out.dbg_line);
|
||||
defer dbg_out.dbg_line = aw.toArrayList();
|
||||
|
||||
// increasing the line number
|
||||
try link.File.Plan9.changeLine(&dbg_out.dbg_line, @intCast(delta_line));
|
||||
try link.File.Plan9.changeLine(bw, @intCast(delta_line));
|
||||
// increasing the pc
|
||||
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - dbg_out.pc_quanta;
|
||||
if (d_pc_p9 > 0) {
|
||||
// minus one because if its the last one, we want to leave space to change the line which is one pc quanta
|
||||
var diff = @divExact(d_pc_p9, dbg_out.pc_quanta) - dbg_out.pc_quanta;
|
||||
while (diff > 0) {
|
||||
if (diff < 64) {
|
||||
try dbg_out.dbg_line.append(@intCast(diff + 128));
|
||||
diff = 0;
|
||||
} else {
|
||||
try dbg_out.dbg_line.append(@intCast(64 + 128));
|
||||
diff -= 64;
|
||||
}
|
||||
}
|
||||
if (dbg_out.pcop_change_index) |pci|
|
||||
dbg_out.dbg_line.items[pci] += 1;
|
||||
dbg_out.pcop_change_index = @intCast(dbg_out.dbg_line.items.len - 1);
|
||||
try bw.writeByte(@as(u8, @intCast(@divExact(d_pc_p9, dbg_out.pc_quanta) + 128)) - dbg_out.pc_quanta);
|
||||
const dbg_line = aw.getWritten();
|
||||
if (dbg_out.pcop_change_index) |pci| dbg_line[pci] += 1;
|
||||
dbg_out.pcop_change_index = @intCast(dbg_line.len - 1);
|
||||
} else if (d_pc_p9 == 0) {
|
||||
// we don't need to do anything, because adding the pc quanta does it for us
|
||||
} else unreachable;
|
||||
@ -951,7 +946,7 @@ fn dbgAdvancePCAndLine(emit: *Emit, loc: Loc) Error!void {
|
||||
dbg_out.end_line = loc.line;
|
||||
// only do this if the pc changed
|
||||
emit.prev_di_loc = loc;
|
||||
emit.prev_di_pc = emit.code.items.len;
|
||||
emit.prev_di_pc = pc;
|
||||
},
|
||||
.none => {},
|
||||
}
|
||||
|
||||
@ -158,20 +158,14 @@ pub fn modRmExt(encoding: Encoding) u3 {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
encoding: Encoding,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
pub fn format(encoding: Encoding, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
|
||||
_ = fmt;
|
||||
|
||||
var opc = encoding.opcode();
|
||||
if (encoding.data.mode.isVex()) {
|
||||
try writer.writeAll("VEX.");
|
||||
try bw.writeAll("VEX.");
|
||||
|
||||
try writer.writeAll(switch (encoding.data.mode) {
|
||||
try bw.writeAll(switch (encoding.data.mode) {
|
||||
.vex_128_w0, .vex_128_w1, .vex_128_wig => "128",
|
||||
.vex_256_w0, .vex_256_w1, .vex_256_wig => "256",
|
||||
.vex_lig_w0, .vex_lig_w1, .vex_lig_wig => "LIG",
|
||||
@ -182,25 +176,25 @@ pub fn format(
|
||||
switch (opc[0]) {
|
||||
else => {},
|
||||
0x66, 0xf3, 0xf2 => {
|
||||
try writer.print(".{X:0>2}", .{opc[0]});
|
||||
try bw.print(".{X:0>2}", .{opc[0]});
|
||||
opc = opc[1..];
|
||||
},
|
||||
}
|
||||
|
||||
try writer.print(".{}", .{std.fmt.fmtSliceHexUpper(opc[0 .. opc.len - 1])});
|
||||
try bw.print(".{X}", .{opc[0 .. opc.len - 1]});
|
||||
opc = opc[opc.len - 1 ..];
|
||||
|
||||
try writer.writeAll(".W");
|
||||
try writer.writeAll(switch (encoding.data.mode) {
|
||||
try bw.writeAll(".W");
|
||||
try bw.writeAll(switch (encoding.data.mode) {
|
||||
.vex_128_w0, .vex_256_w0, .vex_lig_w0, .vex_lz_w0 => "0",
|
||||
.vex_128_w1, .vex_256_w1, .vex_lig_w1, .vex_lz_w1 => "1",
|
||||
.vex_128_wig, .vex_256_wig, .vex_lig_wig, .vex_lz_wig => "IG",
|
||||
else => unreachable,
|
||||
});
|
||||
|
||||
try writer.writeByte(' ');
|
||||
} else if (encoding.data.mode.isLong()) try writer.writeAll("REX.W + ");
|
||||
for (opc) |byte| try writer.print("{x:0>2} ", .{byte});
|
||||
try bw.writeByte(' ');
|
||||
} else if (encoding.data.mode.isLong()) try bw.writeAll("REX.W + ");
|
||||
for (opc) |byte| try bw.print("{x:0>2} ", .{byte});
|
||||
|
||||
switch (encoding.data.op_en) {
|
||||
.z, .fd, .td, .i, .zi, .ii, .d => {},
|
||||
@ -217,10 +211,10 @@ pub fn format(
|
||||
.r64 => "rd",
|
||||
else => unreachable,
|
||||
};
|
||||
try writer.print("+{s} ", .{tag});
|
||||
try bw.print("+{s} ", .{tag});
|
||||
},
|
||||
.ia, .m, .mi, .m1, .mc, .vm, .vmi => try writer.print("/{d} ", .{encoding.modRmExt()}),
|
||||
.mr, .rm, .rmi, .mri, .mrc, .rm0, .rvm, .rvmr, .rvmi, .mvr, .rmv => try writer.writeAll("/r "),
|
||||
.ia, .m, .mi, .m1, .mc, .vm, .vmi => try bw.print("/{d} ", .{encoding.modRmExt()}),
|
||||
.mr, .rm, .rmi, .mri, .mrc, .rm0, .rvm, .rvmr, .rvmi, .mvr, .rmv => try bw.writeAll("/r "),
|
||||
}
|
||||
|
||||
switch (encoding.data.op_en) {
|
||||
@ -249,24 +243,24 @@ pub fn format(
|
||||
.rel32 => "cd",
|
||||
else => unreachable,
|
||||
};
|
||||
try writer.print("{s} ", .{tag});
|
||||
try bw.print("{s} ", .{tag});
|
||||
},
|
||||
.rvmr => try writer.writeAll("/is4 "),
|
||||
.rvmr => try bw.writeAll("/is4 "),
|
||||
.z, .fd, .td, .o, .zo, .oz, .m, .m1, .mc, .mr, .rm, .mrc, .rm0, .vm, .rvm, .mvr, .rmv => {},
|
||||
}
|
||||
|
||||
try writer.print("{s} ", .{@tagName(encoding.mnemonic)});
|
||||
try bw.print("{s} ", .{@tagName(encoding.mnemonic)});
|
||||
|
||||
for (encoding.data.ops) |op| switch (op) {
|
||||
.none => break,
|
||||
else => try writer.print("{s} ", .{@tagName(op)}),
|
||||
else => try bw.print("{s} ", .{@tagName(op)}),
|
||||
};
|
||||
|
||||
const op_en = switch (encoding.data.op_en) {
|
||||
.zi => .i,
|
||||
else => |op_en| op_en,
|
||||
};
|
||||
try writer.print("{s}", .{@tagName(op_en)});
|
||||
try bw.print("{s}", .{@tagName(op_en)});
|
||||
}
|
||||
|
||||
pub const Mnemonic = enum {
|
||||
@ -1014,19 +1008,21 @@ pub const Feature = enum {
|
||||
};
|
||||
|
||||
fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Operand) usize {
|
||||
var inst = Instruction{
|
||||
var inst: Instruction = .{
|
||||
.prefix = prefix,
|
||||
.encoding = encoding,
|
||||
.ops = @splat(.none),
|
||||
};
|
||||
@memcpy(inst.ops[0..ops.len], ops);
|
||||
|
||||
var cwriter = std.io.countingWriter(std.io.null_writer);
|
||||
inst.encode(cwriter.writer(), .{
|
||||
var buf: [15]u8 = undefined;
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(&buf);
|
||||
inst.encode(&bw, .{
|
||||
.allow_frame_locs = true,
|
||||
.allow_symbols = true,
|
||||
}) catch unreachable; // Not allowed to fail here unless OOM.
|
||||
return @as(usize, @intCast(cwriter.bytes_written));
|
||||
}) catch unreachable;
|
||||
return @intCast(bw.end);
|
||||
}
|
||||
|
||||
const mnemonic_to_encodings_map = init: {
|
||||
|
||||
@ -728,21 +728,12 @@ pub const FrameIndex = enum(u32) {
|
||||
return @intFromEnum(fi) < named_count;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
fi: FrameIndex,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
try writer.writeAll("FrameIndex");
|
||||
if (fi.isNamed()) {
|
||||
try writer.writeByte('.');
|
||||
try writer.writeAll(@tagName(fi));
|
||||
} else {
|
||||
try writer.writeByte('(');
|
||||
try std.fmt.formatType(@intFromEnum(fi), fmt, options, writer, 0);
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
pub fn format(fi: FrameIndex, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
|
||||
try bw.writeAll("FrameIndex");
|
||||
if (fi.isNamed())
|
||||
try bw.print(".{s}", .{@tagName(fi)})
|
||||
else
|
||||
try bw.print("({d})", .{@intFromEnum(fi)});
|
||||
}
|
||||
};
|
||||
|
||||
@ -844,21 +835,13 @@ pub const Memory = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
s: Size,
|
||||
comptime _: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
pub fn format(s: Size, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
|
||||
if (s == .none) return;
|
||||
try writer.writeAll(@tagName(s));
|
||||
try bw.writeAll(@tagName(s));
|
||||
switch (s) {
|
||||
.none => unreachable,
|
||||
.ptr, .gpr => {},
|
||||
else => {
|
||||
try writer.writeByte(' ');
|
||||
try writer.writeAll("ptr");
|
||||
},
|
||||
else => try bw.writeAll(" ptr"),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -226,16 +226,10 @@ pub const Instruction = struct {
|
||||
};
|
||||
}
|
||||
|
||||
fn format(
|
||||
op: Operand,
|
||||
comptime unused_format_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn format(op: Operand, bw: *std.io.BufferedWriter, comptime unused_format_string: []const u8) anyerror!void {
|
||||
_ = op;
|
||||
_ = bw;
|
||||
_ = unused_format_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format Operand directly; use fmt() instead");
|
||||
}
|
||||
|
||||
@ -244,78 +238,72 @@ pub const Instruction = struct {
|
||||
enc_op: Encoding.Op,
|
||||
};
|
||||
|
||||
fn fmtContext(
|
||||
ctx: FormatContext,
|
||||
comptime unused_format_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
fn fmtContext(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_format_string: []const u8) anyerror!void {
|
||||
_ = unused_format_string;
|
||||
_ = options;
|
||||
const op = ctx.op;
|
||||
const enc_op = ctx.enc_op;
|
||||
switch (op) {
|
||||
.none => {},
|
||||
.reg => |reg| try writer.writeAll(@tagName(reg)),
|
||||
.reg => |reg| try bw.writeAll(@tagName(reg)),
|
||||
.mem => |mem| switch (mem) {
|
||||
.rip => |rip| {
|
||||
try writer.print("{} [rip", .{rip.ptr_size});
|
||||
if (rip.disp != 0) try writer.print(" {c} 0x{x}", .{
|
||||
try bw.print("{f} [rip", .{rip.ptr_size});
|
||||
if (rip.disp != 0) try bw.print(" {c} 0x{x}", .{
|
||||
@as(u8, if (rip.disp < 0) '-' else '+'),
|
||||
@abs(rip.disp),
|
||||
});
|
||||
try writer.writeByte(']');
|
||||
try bw.writeByte(']');
|
||||
},
|
||||
.sib => |sib| {
|
||||
try writer.print("{} ", .{sib.ptr_size});
|
||||
try bw.print("{f} ", .{sib.ptr_size});
|
||||
|
||||
if (mem.isSegmentRegister()) {
|
||||
return writer.print("{s}:0x{x}", .{ @tagName(sib.base.reg), sib.disp });
|
||||
return bw.print("{s}:0x{x}", .{ @tagName(sib.base.reg), sib.disp });
|
||||
}
|
||||
|
||||
try writer.writeByte('[');
|
||||
try bw.writeByte('[');
|
||||
|
||||
var any = true;
|
||||
switch (sib.base) {
|
||||
.none => any = false,
|
||||
.reg => |reg| try writer.print("{s}", .{@tagName(reg)}),
|
||||
.frame => |frame_index| try writer.print("{}", .{frame_index}),
|
||||
.table => try writer.print("Table", .{}),
|
||||
.rip_inst => |inst_index| try writer.print("RipInst({d})", .{inst_index}),
|
||||
.nav => |nav| try writer.print("Nav({d})", .{@intFromEnum(nav)}),
|
||||
.uav => |uav| try writer.print("Uav({d})", .{@intFromEnum(uav.val)}),
|
||||
.lazy_sym => |lazy_sym| try writer.print("LazySym({s}, {d})", .{
|
||||
.reg => |reg| try bw.print("{s}", .{@tagName(reg)}),
|
||||
.frame => |frame_index| try bw.print("{}", .{frame_index}),
|
||||
.table => try bw.print("Table", .{}),
|
||||
.rip_inst => |inst_index| try bw.print("RipInst({d})", .{inst_index}),
|
||||
.nav => |nav| try bw.print("Nav({d})", .{@intFromEnum(nav)}),
|
||||
.uav => |uav| try bw.print("Uav({d})", .{@intFromEnum(uav.val)}),
|
||||
.lazy_sym => |lazy_sym| try bw.print("LazySym({s}, {d})", .{
|
||||
@tagName(lazy_sym.kind),
|
||||
@intFromEnum(lazy_sym.ty),
|
||||
}),
|
||||
.extern_func => |extern_func| try writer.print("ExternFunc({d})", .{@intFromEnum(extern_func)}),
|
||||
.extern_func => |extern_func| try bw.print("ExternFunc({d})", .{@intFromEnum(extern_func)}),
|
||||
}
|
||||
if (mem.scaleIndex()) |si| {
|
||||
if (any) try writer.writeAll(" + ");
|
||||
try writer.print("{s} * {d}", .{ @tagName(si.index), si.scale });
|
||||
if (any) try bw.writeAll(" + ");
|
||||
try bw.print("{s} * {d}", .{ @tagName(si.index), si.scale });
|
||||
any = true;
|
||||
}
|
||||
if (sib.disp != 0 or !any) {
|
||||
if (any)
|
||||
try writer.print(" {c} ", .{@as(u8, if (sib.disp < 0) '-' else '+')})
|
||||
try bw.print(" {c} ", .{@as(u8, if (sib.disp < 0) '-' else '+')})
|
||||
else if (sib.disp < 0)
|
||||
try writer.writeByte('-');
|
||||
try writer.print("0x{x}", .{@abs(sib.disp)});
|
||||
try bw.writeByte('-');
|
||||
try bw.print("0x{x}", .{@abs(sib.disp)});
|
||||
any = true;
|
||||
}
|
||||
|
||||
try writer.writeByte(']');
|
||||
try bw.writeByte(']');
|
||||
},
|
||||
.moffs => |moffs| try writer.print("{s}:0x{x}", .{
|
||||
.moffs => |moffs| try bw.print("{s}:0x{x}", .{
|
||||
@tagName(moffs.seg),
|
||||
moffs.offset,
|
||||
}),
|
||||
},
|
||||
.imm => |imm| if (enc_op.isSigned()) {
|
||||
const imms = imm.asSigned(enc_op.immBitSize());
|
||||
if (imms < 0) try writer.writeByte('-');
|
||||
try writer.print("0x{x}", .{@abs(imms)});
|
||||
} else try writer.print("0x{x}", .{imm.asUnsigned(enc_op.immBitSize())}),
|
||||
if (imms < 0) try bw.writeByte('-');
|
||||
try bw.print("0x{x}", .{@abs(imms)});
|
||||
} else try bw.print("0x{x}", .{imm.asUnsigned(enc_op.immBitSize())}),
|
||||
.bytes => unreachable,
|
||||
}
|
||||
}
|
||||
@ -361,7 +349,7 @@ pub const Instruction = struct {
|
||||
},
|
||||
},
|
||||
};
|
||||
log.debug("selected encoding: {}", .{encoding});
|
||||
log.debug("selected encoding: {f}", .{encoding});
|
||||
|
||||
var inst: Instruction = .{
|
||||
.prefix = prefix,
|
||||
@ -372,30 +360,23 @@ pub const Instruction = struct {
|
||||
return inst;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
inst: Instruction,
|
||||
comptime unused_format_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
pub fn format(inst: Instruction, bw: *std.io.BufferedWriter, comptime unused_format_string: []const u8) anyerror!void {
|
||||
_ = unused_format_string;
|
||||
_ = options;
|
||||
switch (inst.prefix) {
|
||||
.none, .directive => {},
|
||||
else => try writer.print("{s} ", .{@tagName(inst.prefix)}),
|
||||
else => try bw.print("{s} ", .{@tagName(inst.prefix)}),
|
||||
}
|
||||
try writer.print("{s}", .{@tagName(inst.encoding.mnemonic)});
|
||||
try bw.print("{s}", .{@tagName(inst.encoding.mnemonic)});
|
||||
for (inst.ops, inst.encoding.data.ops, 0..) |op, enc, i| {
|
||||
if (op == .none) break;
|
||||
if (i > 0) try writer.writeByte(',');
|
||||
try writer.writeByte(' ');
|
||||
try writer.print("{}", .{op.fmt(enc)});
|
||||
if (i > 0) try bw.writeByte(',');
|
||||
try bw.print(" {f}", .{op.fmt(enc)});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode(inst: Instruction, writer: anytype, comptime opts: Options) !void {
|
||||
pub fn encode(inst: Instruction, bw: *std.io.BufferedWriter, comptime opts: Options) !void {
|
||||
assert(inst.prefix != .directive);
|
||||
const encoder = Encoder(@TypeOf(writer), opts){ .writer = writer };
|
||||
const encoder: Encoder(opts) = .{ .bw = bw };
|
||||
const enc = inst.encoding;
|
||||
const data = enc.data;
|
||||
|
||||
@ -801,9 +782,9 @@ pub const LegacyPrefixes = packed struct {
|
||||
|
||||
pub const Options = struct { allow_frame_locs: bool = false, allow_symbols: bool = false };
|
||||
|
||||
fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
fn Encoder(comptime opts: Options) type {
|
||||
return struct {
|
||||
writer: T,
|
||||
bw: *std.io.BufferedWriter,
|
||||
|
||||
const Self = @This();
|
||||
pub const options = opts;
|
||||
@ -813,44 +794,44 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
// --------
|
||||
|
||||
/// Encodes legacy prefixes
|
||||
pub fn legacyPrefixes(self: Self, prefixes: LegacyPrefixes) !void {
|
||||
pub fn legacyPrefixes(self: Self, prefixes: LegacyPrefixes) anyerror!void {
|
||||
if (@as(u16, @bitCast(prefixes)) != 0) {
|
||||
// Hopefully this path isn't taken very often, so we'll do it the slow way for now
|
||||
|
||||
// LOCK
|
||||
if (prefixes.prefix_f0) try self.writer.writeByte(0xf0);
|
||||
if (prefixes.prefix_f0) try self.bw.writeByte(0xf0);
|
||||
// REPNZ, REPNE, REP, Scalar Double-precision
|
||||
if (prefixes.prefix_f2) try self.writer.writeByte(0xf2);
|
||||
if (prefixes.prefix_f2) try self.bw.writeByte(0xf2);
|
||||
// REPZ, REPE, REP, Scalar Single-precision
|
||||
if (prefixes.prefix_f3) try self.writer.writeByte(0xf3);
|
||||
if (prefixes.prefix_f3) try self.bw.writeByte(0xf3);
|
||||
|
||||
// CS segment override or Branch not taken
|
||||
if (prefixes.prefix_2e) try self.writer.writeByte(0x2e);
|
||||
if (prefixes.prefix_2e) try self.bw.writeByte(0x2e);
|
||||
// DS segment override
|
||||
if (prefixes.prefix_36) try self.writer.writeByte(0x36);
|
||||
if (prefixes.prefix_36) try self.bw.writeByte(0x36);
|
||||
// ES segment override
|
||||
if (prefixes.prefix_26) try self.writer.writeByte(0x26);
|
||||
if (prefixes.prefix_26) try self.bw.writeByte(0x26);
|
||||
// FS segment override
|
||||
if (prefixes.prefix_64) try self.writer.writeByte(0x64);
|
||||
if (prefixes.prefix_64) try self.bw.writeByte(0x64);
|
||||
// GS segment override
|
||||
if (prefixes.prefix_65) try self.writer.writeByte(0x65);
|
||||
if (prefixes.prefix_65) try self.bw.writeByte(0x65);
|
||||
|
||||
// Branch taken
|
||||
if (prefixes.prefix_3e) try self.writer.writeByte(0x3e);
|
||||
if (prefixes.prefix_3e) try self.bw.writeByte(0x3e);
|
||||
|
||||
// Operand size override
|
||||
if (prefixes.prefix_66) try self.writer.writeByte(0x66);
|
||||
if (prefixes.prefix_66) try self.bw.writeByte(0x66);
|
||||
|
||||
// Address size override
|
||||
if (prefixes.prefix_67) try self.writer.writeByte(0x67);
|
||||
if (prefixes.prefix_67) try self.bw.writeByte(0x67);
|
||||
}
|
||||
}
|
||||
|
||||
/// Use 16 bit operand size
|
||||
///
|
||||
/// Note that this flag is overridden by REX.W, if both are present.
|
||||
pub fn prefix16BitMode(self: Self) !void {
|
||||
try self.writer.writeByte(0x66);
|
||||
pub fn prefix16BitMode(self: Self) anyerror!void {
|
||||
try self.bw.writeByte(0x66);
|
||||
}
|
||||
|
||||
/// Encodes a REX prefix byte given all the fields
|
||||
@ -859,7 +840,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// or one of reg, index, r/m, base, or opcode-reg might be extended.
|
||||
///
|
||||
/// See struct `Rex` for a description of each field.
|
||||
pub fn rex(self: Self, fields: Rex) !void {
|
||||
pub fn rex(self: Self, fields: Rex) anyerror!void {
|
||||
if (!fields.present and !fields.isSet()) return;
|
||||
|
||||
var byte: u8 = 0b0100_0000;
|
||||
@ -869,32 +850,32 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
if (fields.x) byte |= 0b0010;
|
||||
if (fields.b) byte |= 0b0001;
|
||||
|
||||
try self.writer.writeByte(byte);
|
||||
try self.bw.writeByte(byte);
|
||||
}
|
||||
|
||||
/// Encodes a VEX prefix given all the fields
|
||||
///
|
||||
/// See struct `Vex` for a description of each field.
|
||||
pub fn vex(self: Self, fields: Vex) !void {
|
||||
pub fn vex(self: Self, fields: Vex) anyerror!void {
|
||||
if (fields.is3Byte()) {
|
||||
try self.writer.writeByte(0b1100_0100);
|
||||
try self.bw.writeByte(0b1100_0100);
|
||||
|
||||
try self.writer.writeByte(
|
||||
try self.bw.writeByte(
|
||||
@as(u8, ~@intFromBool(fields.r)) << 7 |
|
||||
@as(u8, ~@intFromBool(fields.x)) << 6 |
|
||||
@as(u8, ~@intFromBool(fields.b)) << 5 |
|
||||
@as(u8, @intFromEnum(fields.m)) << 0,
|
||||
);
|
||||
|
||||
try self.writer.writeByte(
|
||||
try self.bw.writeByte(
|
||||
@as(u8, @intFromBool(fields.w)) << 7 |
|
||||
@as(u8, ~@as(u4, @intCast(fields.v.enc()))) << 3 |
|
||||
@as(u8, @intFromBool(fields.l)) << 2 |
|
||||
@as(u8, @intFromEnum(fields.p)) << 0,
|
||||
);
|
||||
} else {
|
||||
try self.writer.writeByte(0b1100_0101);
|
||||
try self.writer.writeByte(
|
||||
try self.bw.writeByte(0b1100_0101);
|
||||
try self.bw.writeByte(
|
||||
@as(u8, ~@intFromBool(fields.r)) << 7 |
|
||||
@as(u8, ~@as(u4, @intCast(fields.v.enc()))) << 3 |
|
||||
@as(u8, @intFromBool(fields.l)) << 2 |
|
||||
@ -908,8 +889,8 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
// ------
|
||||
|
||||
/// Encodes a 1 byte opcode
|
||||
pub fn opcode_1byte(self: Self, opcode: u8) !void {
|
||||
try self.writer.writeByte(opcode);
|
||||
pub fn opcode_1byte(self: Self, opcode: u8) anyerror!void {
|
||||
try self.bw.writeByte(opcode);
|
||||
}
|
||||
|
||||
/// Encodes a 2 byte opcode
|
||||
@ -917,8 +898,8 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// e.g. IMUL has the opcode 0x0f 0xaf, so you use
|
||||
///
|
||||
/// encoder.opcode_2byte(0x0f, 0xaf);
|
||||
pub fn opcode_2byte(self: Self, prefix: u8, opcode: u8) !void {
|
||||
try self.writer.writeAll(&.{ prefix, opcode });
|
||||
pub fn opcode_2byte(self: Self, prefix: u8, opcode: u8) anyerror!void {
|
||||
try self.bw.writeAll(&.{ prefix, opcode });
|
||||
}
|
||||
|
||||
/// Encodes a 3 byte opcode
|
||||
@ -926,16 +907,16 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// e.g. MOVSD has the opcode 0xf2 0x0f 0x10
|
||||
///
|
||||
/// encoder.opcode_3byte(0xf2, 0x0f, 0x10);
|
||||
pub fn opcode_3byte(self: Self, prefix_1: u8, prefix_2: u8, opcode: u8) !void {
|
||||
try self.writer.writeAll(&.{ prefix_1, prefix_2, opcode });
|
||||
pub fn opcode_3byte(self: Self, prefix_1: u8, prefix_2: u8, opcode: u8) anyerror!void {
|
||||
try self.bw.writeAll(&.{ prefix_1, prefix_2, opcode });
|
||||
}
|
||||
|
||||
/// Encodes a 1 byte opcode with a reg field
|
||||
///
|
||||
/// Remember to add a REX prefix byte if reg is extended!
|
||||
pub fn opcode_withReg(self: Self, opcode: u8, reg: u3) !void {
|
||||
pub fn opcode_withReg(self: Self, opcode: u8, reg: u3) anyerror!void {
|
||||
assert(opcode & 0b111 == 0);
|
||||
try self.writer.writeByte(opcode | reg);
|
||||
try self.bw.writeByte(opcode | reg);
|
||||
}
|
||||
|
||||
// ------
|
||||
@ -945,8 +926,8 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// Construct a ModR/M byte given all the fields
|
||||
///
|
||||
/// Remember to add a REX prefix byte if reg or rm are extended!
|
||||
pub fn modRm(self: Self, mod: u2, reg_or_opx: u3, rm: u3) !void {
|
||||
try self.writer.writeByte(@as(u8, mod) << 6 | @as(u8, reg_or_opx) << 3 | rm);
|
||||
pub fn modRm(self: Self, mod: u2, reg_or_opx: u3, rm: u3) anyerror!void {
|
||||
try self.bw.writeByte(@as(u8, mod) << 6 | @as(u8, reg_or_opx) << 3 | rm);
|
||||
}
|
||||
|
||||
/// Construct a ModR/M byte using direct r/m addressing
|
||||
@ -954,7 +935,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
///
|
||||
/// Note reg's effective address is always just reg for the ModR/M byte.
|
||||
/// Remember to add a REX prefix byte if reg or rm are extended!
|
||||
pub fn modRm_direct(self: Self, reg_or_opx: u3, rm: u3) !void {
|
||||
pub fn modRm_direct(self: Self, reg_or_opx: u3, rm: u3) anyerror!void {
|
||||
try self.modRm(0b11, reg_or_opx, rm);
|
||||
}
|
||||
|
||||
@ -963,7 +944,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
///
|
||||
/// Note reg's effective address is always just reg for the ModR/M byte.
|
||||
/// Remember to add a REX prefix byte if reg or rm are extended!
|
||||
pub fn modRm_indirectDisp0(self: Self, reg_or_opx: u3, rm: u3) !void {
|
||||
pub fn modRm_indirectDisp0(self: Self, reg_or_opx: u3, rm: u3) anyerror!void {
|
||||
assert(rm != 4 and rm != 5);
|
||||
try self.modRm(0b00, reg_or_opx, rm);
|
||||
}
|
||||
@ -973,7 +954,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
///
|
||||
/// Note reg's effective address is always just reg for the ModR/M byte.
|
||||
/// Remember to add a REX prefix byte if reg or rm are extended!
|
||||
pub fn modRm_SIBDisp0(self: Self, reg_or_opx: u3) !void {
|
||||
pub fn modRm_SIBDisp0(self: Self, reg_or_opx: u3) anyerror!void {
|
||||
try self.modRm(0b00, reg_or_opx, 0b100);
|
||||
}
|
||||
|
||||
@ -982,7 +963,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
///
|
||||
/// Note reg's effective address is always just reg for the ModR/M byte.
|
||||
/// Remember to add a REX prefix byte if reg or rm are extended!
|
||||
pub fn modRm_RIPDisp32(self: Self, reg_or_opx: u3) !void {
|
||||
pub fn modRm_RIPDisp32(self: Self, reg_or_opx: u3) anyerror!void {
|
||||
try self.modRm(0b00, reg_or_opx, 0b101);
|
||||
}
|
||||
|
||||
@ -991,7 +972,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
///
|
||||
/// Note reg's effective address is always just reg for the ModR/M byte.
|
||||
/// Remember to add a REX prefix byte if reg or rm are extended!
|
||||
pub fn modRm_indirectDisp8(self: Self, reg_or_opx: u3, rm: u3) !void {
|
||||
pub fn modRm_indirectDisp8(self: Self, reg_or_opx: u3, rm: u3) anyerror!void {
|
||||
assert(rm != 4);
|
||||
try self.modRm(0b01, reg_or_opx, rm);
|
||||
}
|
||||
@ -1001,7 +982,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
///
|
||||
/// Note reg's effective address is always just reg for the ModR/M byte.
|
||||
/// Remember to add a REX prefix byte if reg or rm are extended!
|
||||
pub fn modRm_SIBDisp8(self: Self, reg_or_opx: u3) !void {
|
||||
pub fn modRm_SIBDisp8(self: Self, reg_or_opx: u3) anyerror!void {
|
||||
try self.modRm(0b01, reg_or_opx, 0b100);
|
||||
}
|
||||
|
||||
@ -1010,7 +991,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
///
|
||||
/// Note reg's effective address is always just reg for the ModR/M byte.
|
||||
/// Remember to add a REX prefix byte if reg or rm are extended!
|
||||
pub fn modRm_indirectDisp32(self: Self, reg_or_opx: u3, rm: u3) !void {
|
||||
pub fn modRm_indirectDisp32(self: Self, reg_or_opx: u3, rm: u3) anyerror!void {
|
||||
assert(rm != 4);
|
||||
try self.modRm(0b10, reg_or_opx, rm);
|
||||
}
|
||||
@ -1020,7 +1001,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
///
|
||||
/// Note reg's effective address is always just reg for the ModR/M byte.
|
||||
/// Remember to add a REX prefix byte if reg or rm are extended!
|
||||
pub fn modRm_SIBDisp32(self: Self, reg_or_opx: u3) !void {
|
||||
pub fn modRm_SIBDisp32(self: Self, reg_or_opx: u3) anyerror!void {
|
||||
try self.modRm(0b10, reg_or_opx, 0b100);
|
||||
}
|
||||
|
||||
@ -1031,15 +1012,15 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// Construct a SIB byte given all the fields
|
||||
///
|
||||
/// Remember to add a REX prefix byte if index or base are extended!
|
||||
pub fn sib(self: Self, scale: u2, index: u3, base: u3) !void {
|
||||
try self.writer.writeByte(@as(u8, scale) << 6 | @as(u8, index) << 3 | base);
|
||||
pub fn sib(self: Self, scale: u2, index: u3, base: u3) anyerror!void {
|
||||
try self.bw.writeByte(@as(u8, scale) << 6 | @as(u8, index) << 3 | base);
|
||||
}
|
||||
|
||||
/// Construct a SIB byte with scale * index + base, no frills.
|
||||
/// r/m effective address: [base + scale * index]
|
||||
///
|
||||
/// Remember to add a REX prefix byte if index or base are extended!
|
||||
pub fn sib_scaleIndexBase(self: Self, scale: u2, index: u3, base: u3) !void {
|
||||
pub fn sib_scaleIndexBase(self: Self, scale: u2, index: u3, base: u3) anyerror!void {
|
||||
assert(base != 5);
|
||||
|
||||
try self.sib(scale, index, base);
|
||||
@ -1049,7 +1030,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// r/m effective address: [scale * index + disp32]
|
||||
///
|
||||
/// Remember to add a REX prefix byte if index or base are extended!
|
||||
pub fn sib_scaleIndexDisp32(self: Self, scale: u2, index: u3) !void {
|
||||
pub fn sib_scaleIndexDisp32(self: Self, scale: u2, index: u3) anyerror!void {
|
||||
// scale is actually ignored
|
||||
// index = 4 means no index if and only if we haven't extended the register
|
||||
// TODO enforce this
|
||||
@ -1061,7 +1042,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// r/m effective address: [base]
|
||||
///
|
||||
/// Remember to add a REX prefix byte if index or base are extended!
|
||||
pub fn sib_base(self: Self, base: u3) !void {
|
||||
pub fn sib_base(self: Self, base: u3) anyerror!void {
|
||||
assert(base != 5);
|
||||
|
||||
// scale is actually ignored
|
||||
@ -1073,7 +1054,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// r/m effective address: [disp32]
|
||||
///
|
||||
/// Remember to add a REX prefix byte if index or base are extended!
|
||||
pub fn sib_disp32(self: Self) !void {
|
||||
pub fn sib_disp32(self: Self) anyerror!void {
|
||||
// scale is actually ignored
|
||||
// index = 4 means no index
|
||||
// base = 5 means no base, if mod == 0.
|
||||
@ -1084,7 +1065,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// r/m effective address: [base + scale * index + disp8]
|
||||
///
|
||||
/// Remember to add a REX prefix byte if index or base are extended!
|
||||
pub fn sib_scaleIndexBaseDisp8(self: Self, scale: u2, index: u3, base: u3) !void {
|
||||
pub fn sib_scaleIndexBaseDisp8(self: Self, scale: u2, index: u3, base: u3) anyerror!void {
|
||||
try self.sib(scale, index, base);
|
||||
}
|
||||
|
||||
@ -1092,7 +1073,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// r/m effective address: [base + disp8]
|
||||
///
|
||||
/// Remember to add a REX prefix byte if index or base are extended!
|
||||
pub fn sib_baseDisp8(self: Self, base: u3) !void {
|
||||
pub fn sib_baseDisp8(self: Self, base: u3) anyerror!void {
|
||||
// scale is ignored
|
||||
// index = 4 means no index
|
||||
try self.sib(0, 4, base);
|
||||
@ -1102,7 +1083,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// r/m effective address: [base + scale * index + disp32]
|
||||
///
|
||||
/// Remember to add a REX prefix byte if index or base are extended!
|
||||
pub fn sib_scaleIndexBaseDisp32(self: Self, scale: u2, index: u3, base: u3) !void {
|
||||
pub fn sib_scaleIndexBaseDisp32(self: Self, scale: u2, index: u3, base: u3) anyerror!void {
|
||||
try self.sib(scale, index, base);
|
||||
}
|
||||
|
||||
@ -1110,7 +1091,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// r/m effective address: [base + disp32]
|
||||
///
|
||||
/// Remember to add a REX prefix byte if index or base are extended!
|
||||
pub fn sib_baseDisp32(self: Self, base: u3) !void {
|
||||
pub fn sib_baseDisp32(self: Self, base: u3) anyerror!void {
|
||||
// scale is ignored
|
||||
// index = 4 means no index
|
||||
try self.sib(0, 4, base);
|
||||
@ -1123,43 +1104,43 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
|
||||
/// Encode an 8 bit displacement
|
||||
///
|
||||
/// It is sign-extended to 64 bits by the cpu.
|
||||
pub fn disp8(self: Self, disp: i8) !void {
|
||||
try self.writer.writeByte(@as(u8, @bitCast(disp)));
|
||||
pub fn disp8(self: Self, disp: i8) anyerror!void {
|
||||
try self.bw.writeByte(@as(u8, @bitCast(disp)));
|
||||
}
|
||||
|
||||
/// Encode an 32 bit displacement
|
||||
///
|
||||
/// It is sign-extended to 64 bits by the cpu.
|
||||
pub fn disp32(self: Self, disp: i32) !void {
|
||||
try self.writer.writeInt(i32, disp, .little);
|
||||
pub fn disp32(self: Self, disp: i32) anyerror!void {
|
||||
try self.bw.writeInt(i32, disp, .little);
|
||||
}
|
||||
|
||||
/// Encode an 8 bit immediate
|
||||
///
|
||||
/// It is sign-extended to 64 bits by the cpu.
|
||||
pub fn imm8(self: Self, imm: u8) !void {
|
||||
try self.writer.writeByte(imm);
|
||||
pub fn imm8(self: Self, imm: u8) anyerror!void {
|
||||
try self.bw.writeByte(imm);
|
||||
}
|
||||
|
||||
/// Encode an 16 bit immediate
|
||||
///
|
||||
/// It is sign-extended to 64 bits by the cpu.
|
||||
pub fn imm16(self: Self, imm: u16) !void {
|
||||
try self.writer.writeInt(u16, imm, .little);
|
||||
pub fn imm16(self: Self, imm: u16) anyerror!void {
|
||||
try self.bw.writeInt(u16, imm, .little);
|
||||
}
|
||||
|
||||
/// Encode an 32 bit immediate
|
||||
///
|
||||
/// It is sign-extended to 64 bits by the cpu.
|
||||
pub fn imm32(self: Self, imm: u32) !void {
|
||||
try self.writer.writeInt(u32, imm, .little);
|
||||
pub fn imm32(self: Self, imm: u32) anyerror!void {
|
||||
try self.bw.writeInt(u32, imm, .little);
|
||||
}
|
||||
|
||||
/// Encode an 64 bit immediate
|
||||
///
|
||||
/// It is sign-extended to 64 bits by the cpu.
|
||||
pub fn imm64(self: Self, imm: u64) !void {
|
||||
try self.writer.writeInt(u64, imm, .little);
|
||||
pub fn imm64(self: Self, imm: u64) anyerror!void {
|
||||
try self.bw.writeInt(u64, imm, .little);
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -2217,10 +2198,10 @@ const Assembler = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn assemble(as: *Assembler, writer: anytype) !void {
|
||||
pub fn assemble(as: *Assembler, bw: *std.io.BufferedWriter) !void {
|
||||
while (try as.next()) |parsed_inst| {
|
||||
const inst: Instruction = try .new(.none, parsed_inst.mnemonic, &parsed_inst.ops);
|
||||
try inst.encode(writer, .{});
|
||||
try inst.encode(bw, .{});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
276
src/codegen.zig
276
src/codegen.zig
@ -225,14 +225,6 @@ pub fn generateLazyFunction(
|
||||
}
|
||||
}
|
||||
|
||||
fn writeFloat(comptime F: type, f: F, target: *const std.Target, endian: std.builtin.Endian, code: []u8) void {
|
||||
_ = target;
|
||||
const bits = @typeInfo(F).float.bits;
|
||||
const Int = @Type(.{ .int = .{ .signedness = .unsigned, .bits = bits } });
|
||||
const int: Int = @bitCast(f);
|
||||
mem.writeInt(Int, code[0..@divExact(bits, 8)], int, endian);
|
||||
}
|
||||
|
||||
pub fn generateLazySymbol(
|
||||
bin_file: *link.File,
|
||||
pt: Zcu.PerThread,
|
||||
@ -256,7 +248,7 @@ pub fn generateLazySymbol(
|
||||
const target = &comp.root_mod.resolved_target.result;
|
||||
const endian = target.cpu.arch.endian();
|
||||
|
||||
log.debug("generateLazySymbol: kind = {s}, ty = {}", .{
|
||||
log.debug("generateLazySymbol: kind = {s}, ty = {f}", .{
|
||||
@tagName(lazy_sym.kind),
|
||||
Type.fromInterned(lazy_sym.ty).fmt(pt),
|
||||
});
|
||||
@ -296,7 +288,7 @@ pub fn generateLazySymbol(
|
||||
code.appendAssumeCapacity(0);
|
||||
}
|
||||
} else {
|
||||
return zcu.codegenFailType(lazy_sym.ty, "TODO implement generateLazySymbol for {s} {}", .{
|
||||
return zcu.codegenFailType(lazy_sym.ty, "TODO implement generateLazySymbol for {s} {f}", .{
|
||||
@tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt),
|
||||
});
|
||||
}
|
||||
@ -321,19 +313,31 @@ pub fn generateSymbol(
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
const bw = aw.fromArrayList(pt.zcu.gpa, code);
|
||||
defer code.* = aw.toArrayList();
|
||||
return @errorCast(generateSymbolInner(bin_file, pt, src_loc, val, bw, reloc_parent));
|
||||
}
|
||||
pub fn generateSymbolInner(
|
||||
bin_file: *link.File,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
val: Value,
|
||||
bw: *std.io.BufferedWriter,
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
) anyerror!void {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
const ty = val.typeOf(zcu);
|
||||
|
||||
const target = zcu.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
|
||||
log.debug("generateSymbol: val = {}", .{val.fmtValue(pt)});
|
||||
log.debug("generateSymbol: val = {f}", .{val.fmtValue(pt)});
|
||||
|
||||
if (val.isUndefDeep(zcu)) {
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
try code.appendNTimes(gpa, 0xaa, abi_size);
|
||||
try bw.splatByteAll(0xaa, abi_size);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -363,7 +367,7 @@ pub fn generateSymbol(
|
||||
.null => unreachable, // non-runtime value
|
||||
.@"unreachable" => unreachable, // non-runtime value
|
||||
.empty_tuple => return,
|
||||
.false, .true => try code.append(gpa, switch (simple_value) {
|
||||
.false, .true => try bw.writeByte(switch (simple_value) {
|
||||
.false => 0,
|
||||
.true => 1,
|
||||
else => unreachable,
|
||||
@ -379,11 +383,12 @@ pub fn generateSymbol(
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
var space: Value.BigIntSpace = undefined;
|
||||
const int_val = val.toBigInt(&space, zcu);
|
||||
int_val.writeTwosComplement(try code.addManyAsSlice(gpa, abi_size), endian);
|
||||
int_val.writeTwosComplement((try bw.writableSlice(abi_size))[0..abi_size], endian);
|
||||
bw.advance(abi_size);
|
||||
},
|
||||
.err => |err| {
|
||||
const int = try pt.getErrorValue(err.name);
|
||||
try code.writer(gpa).writeInt(u16, @intCast(int), endian);
|
||||
try bw.writeInt(u16, @intCast(int), endian);
|
||||
},
|
||||
.error_union => |error_union| {
|
||||
const payload_ty = ty.errorUnionPayload(zcu);
|
||||
@ -393,7 +398,7 @@ pub fn generateSymbol(
|
||||
};
|
||||
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
try code.writer(gpa).writeInt(u16, err_val, endian);
|
||||
try bw.writeInt(u16, err_val, endian);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -403,57 +408,49 @@ pub fn generateSymbol(
|
||||
|
||||
// error value first when its type is larger than the error union's payload
|
||||
if (error_align.order(payload_align) == .gt) {
|
||||
try code.writer(gpa).writeInt(u16, err_val, endian);
|
||||
try bw.writeInt(u16, err_val, endian);
|
||||
}
|
||||
|
||||
// emit payload part of the error union
|
||||
{
|
||||
const begin = code.items.len;
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (error_union.val) {
|
||||
const begin = bw.count;
|
||||
try generateSymbolInner(bin_file, pt, src_loc, .fromInterned(switch (error_union.val) {
|
||||
.err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }),
|
||||
.payload => |payload| payload,
|
||||
}), code, reloc_parent);
|
||||
const unpadded_end = code.items.len - begin;
|
||||
}), bw, reloc_parent);
|
||||
const unpadded_end = bw.count - begin;
|
||||
const padded_end = abi_align.forward(unpadded_end);
|
||||
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
|
||||
|
||||
if (padding > 0) {
|
||||
try code.appendNTimes(gpa, 0, padding);
|
||||
}
|
||||
try bw.splatByteAll(0, math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow);
|
||||
}
|
||||
|
||||
// Payload size is larger than error set, so emit our error set last
|
||||
if (error_align.compare(.lte, payload_align)) {
|
||||
const begin = code.items.len;
|
||||
try code.writer(gpa).writeInt(u16, err_val, endian);
|
||||
const unpadded_end = code.items.len - begin;
|
||||
const begin = bw.count;
|
||||
try bw.writeInt(u16, err_val, endian);
|
||||
const unpadded_end = bw.count - begin;
|
||||
const padded_end = abi_align.forward(unpadded_end);
|
||||
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
|
||||
|
||||
if (padding > 0) {
|
||||
try code.appendNTimes(gpa, 0, padding);
|
||||
}
|
||||
try bw.splatByteAll(0, math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow);
|
||||
}
|
||||
},
|
||||
.enum_tag => |enum_tag| {
|
||||
const int_tag_ty = ty.intTagType(zcu);
|
||||
try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, reloc_parent);
|
||||
try generateSymbolInner(bin_file, pt, src_loc, try pt.getCoerced(.fromInterned(enum_tag.int), int_tag_ty), bw, reloc_parent);
|
||||
},
|
||||
.float => |float| switch (float.storage) {
|
||||
.f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(gpa, 2)),
|
||||
.f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(gpa, 4)),
|
||||
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(gpa, 8)),
|
||||
.f16 => |f16_val| try bw.writeInt(u16, @bitCast(f16_val), endian),
|
||||
.f32 => |f32_val| try bw.writeInt(u32, @bitCast(f32_val), endian),
|
||||
.f64 => |f64_val| try bw.writeInt(u64, @bitCast(f64_val), endian),
|
||||
.f80 => |f80_val| {
|
||||
writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(gpa, 10));
|
||||
try bw.writeInt(u80, @bitCast(f80_val), endian);
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
try code.appendNTimes(gpa, 0, abi_size - 10);
|
||||
try bw.splatByteAll(0, abi_size - 10);
|
||||
},
|
||||
.f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(gpa, 16)),
|
||||
.f128 => |f128_val| try bw.writeInt(u128, @bitCast(f128_val), endian),
|
||||
},
|
||||
.ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, reloc_parent, 0),
|
||||
.ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), bw, reloc_parent, 0),
|
||||
.slice => |slice| {
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), code, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), code, reloc_parent);
|
||||
try generateSymbolInner(bin_file, pt, src_loc, .fromInterned(slice.ptr), bw, reloc_parent);
|
||||
try generateSymbolInner(bin_file, pt, src_loc, .fromInterned(slice.len), bw, reloc_parent);
|
||||
},
|
||||
.opt => {
|
||||
const payload_type = ty.optionalChild(zcu);
|
||||
@ -462,44 +459,44 @@ pub fn generateSymbol(
|
||||
|
||||
if (ty.optionalReprIsPayload(zcu)) {
|
||||
if (payload_val) |value| {
|
||||
try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent);
|
||||
try generateSymbolInner(bin_file, pt, src_loc, value, bw, reloc_parent);
|
||||
} else {
|
||||
try code.appendNTimes(gpa, 0, abi_size);
|
||||
try bw.splatByteAll(0, abi_size);
|
||||
}
|
||||
} else {
|
||||
const padding = abi_size - (math.cast(usize, payload_type.abiSize(zcu)) orelse return error.Overflow) - 1;
|
||||
if (payload_type.hasRuntimeBits(zcu)) {
|
||||
const value = payload_val orelse Value.fromInterned(try pt.intern(.{
|
||||
const value: Value = payload_val orelse .fromInterned(try pt.intern(.{
|
||||
.undef = payload_type.toIntern(),
|
||||
}));
|
||||
try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent);
|
||||
try generateSymbolInner(bin_file, pt, src_loc, value, bw, reloc_parent);
|
||||
}
|
||||
try code.writer(gpa).writeByte(@intFromBool(payload_val != null));
|
||||
try code.appendNTimes(gpa, 0, padding);
|
||||
try bw.writeByte(@intFromBool(payload_val != null));
|
||||
try bw.splatByteAll(0, padding);
|
||||
}
|
||||
},
|
||||
.aggregate => |aggregate| switch (ip.indexToKey(ty.toIntern())) {
|
||||
.array_type => |array_type| switch (aggregate.storage) {
|
||||
.bytes => |bytes| try code.appendSlice(gpa, bytes.toSlice(array_type.lenIncludingSentinel(), ip)),
|
||||
.bytes => |bytes| try bw.writeAll(bytes.toSlice(array_type.lenIncludingSentinel(), ip)),
|
||||
.elems, .repeated_elem => {
|
||||
var index: u64 = 0;
|
||||
while (index < array_type.lenIncludingSentinel()) : (index += 1) {
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) {
|
||||
try generateSymbolInner(bin_file, pt, src_loc, .fromInterned(switch (aggregate.storage) {
|
||||
.bytes => unreachable,
|
||||
.elems => |elems| elems[@intCast(index)],
|
||||
.repeated_elem => |elem| if (index < array_type.len)
|
||||
elem
|
||||
else
|
||||
array_type.sentinel,
|
||||
}), code, reloc_parent);
|
||||
}), bw, reloc_parent);
|
||||
}
|
||||
},
|
||||
},
|
||||
.vector_type => |vector_type| {
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
if (vector_type.child == .bool_type) {
|
||||
const bytes = try code.addManyAsSlice(gpa, abi_size);
|
||||
@memset(bytes, 0xaa);
|
||||
const buffer = (try bw.writableSlice(abi_size))[0..abi_size];
|
||||
@memset(buffer, 0xaa);
|
||||
var index: usize = 0;
|
||||
const len = math.cast(usize, vector_type.len) orelse return error.Overflow;
|
||||
while (index < len) : (index += 1) {
|
||||
@ -507,7 +504,7 @@ pub fn generateSymbol(
|
||||
.big => len - 1 - index,
|
||||
.little => index,
|
||||
};
|
||||
const byte = &bytes[bit_index / 8];
|
||||
const byte = &buffer[bit_index / 8];
|
||||
const mask = @as(u8, 1) << @truncate(bit_index);
|
||||
if (switch (switch (aggregate.storage) {
|
||||
.bytes => unreachable,
|
||||
@ -535,31 +532,31 @@ pub fn generateSymbol(
|
||||
},
|
||||
}) byte.* |= mask else byte.* &= ~mask;
|
||||
}
|
||||
bw.advance(abi_size);
|
||||
} else {
|
||||
switch (aggregate.storage) {
|
||||
.bytes => |bytes| try code.appendSlice(gpa, bytes.toSlice(vector_type.len, ip)),
|
||||
.bytes => |bytes| try bw.writeAll(bytes.toSlice(vector_type.len, ip)),
|
||||
.elems, .repeated_elem => {
|
||||
var index: u64 = 0;
|
||||
while (index < vector_type.len) : (index += 1) {
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) {
|
||||
try generateSymbolInner(bin_file, pt, src_loc, .fromInterned(switch (aggregate.storage) {
|
||||
.bytes => unreachable,
|
||||
.elems => |elems| elems[
|
||||
math.cast(usize, index) orelse return error.Overflow
|
||||
],
|
||||
.repeated_elem => |elem| elem,
|
||||
}), code, reloc_parent);
|
||||
}), bw, reloc_parent);
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
const padding = abi_size -
|
||||
try bw.splatByteAll(0, abi_size -
|
||||
(math.cast(usize, Type.fromInterned(vector_type.child).abiSize(zcu) * vector_type.len) orelse
|
||||
return error.Overflow);
|
||||
if (padding > 0) try code.appendNTimes(gpa, 0, padding);
|
||||
return error.Overflow));
|
||||
}
|
||||
},
|
||||
.tuple_type => |tuple| {
|
||||
const struct_begin = code.items.len;
|
||||
const struct_begin = bw.count;
|
||||
for (
|
||||
tuple.types.get(ip),
|
||||
tuple.values.get(ip),
|
||||
@ -577,17 +574,13 @@ pub fn generateSymbol(
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent);
|
||||
const unpadded_field_end = code.items.len - struct_begin;
|
||||
try generateSymbolInner(bin_file, pt, src_loc, .fromInterned(field_val), bw, reloc_parent);
|
||||
const unpadded_field_end = bw.count - struct_begin;
|
||||
|
||||
// Pad struct members if required
|
||||
const padded_field_end = ty.structFieldOffset(index + 1, zcu);
|
||||
const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse
|
||||
return error.Overflow;
|
||||
|
||||
if (padding > 0) {
|
||||
try code.appendNTimes(gpa, 0, padding);
|
||||
}
|
||||
try bw.splatByteAll(0, math.cast(usize, padded_field_end - unpadded_field_end) orelse
|
||||
return error.Overflow);
|
||||
}
|
||||
},
|
||||
.struct_type => {
|
||||
@ -595,8 +588,9 @@ pub fn generateSymbol(
|
||||
switch (struct_type.layout) {
|
||||
.@"packed" => {
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
const current_pos = code.items.len;
|
||||
try code.appendNTimes(gpa, 0, abi_size);
|
||||
const current_end, const current_count = .{ bw.end, bw.count };
|
||||
const buffer = (try bw.writableSlice(abi_size))[0..abi_size];
|
||||
@memset(buffer, 0);
|
||||
var bits: u16 = 0;
|
||||
|
||||
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
|
||||
@ -616,22 +610,25 @@ pub fn generateSymbol(
|
||||
error.DivisionByZero => unreachable,
|
||||
error.UnexpectedRemainder => return error.RelocationNotByteAligned,
|
||||
};
|
||||
code.items.len = current_pos + field_offset;
|
||||
// TODO: code.lockPointers();
|
||||
bw.end = current_end + field_offset;
|
||||
bw.count = current_count + field_offset;
|
||||
defer {
|
||||
assert(code.items.len == current_pos + field_offset + @divExact(target.ptrBitWidth(), 8));
|
||||
// TODO: code.unlockPointers();
|
||||
code.items.len = current_pos + abi_size;
|
||||
const field_size = @divExact(target.ptrBitWidth(), 8);
|
||||
assert(bw.end == current_end + field_offset + field_size);
|
||||
assert(bw.count == current_count + field_offset + field_size);
|
||||
bw.end = current_end;
|
||||
bw.count = current_count;
|
||||
}
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent);
|
||||
try generateSymbolInner(bin_file, pt, src_loc, .fromInterned(field_val), bw, reloc_parent);
|
||||
} else {
|
||||
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable;
|
||||
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, buffer, bits) catch unreachable;
|
||||
}
|
||||
bits += @intCast(Type.fromInterned(field_ty).bitSize(zcu));
|
||||
}
|
||||
bw.advance(abi_size);
|
||||
},
|
||||
.auto, .@"extern" => {
|
||||
const struct_begin = code.items.len;
|
||||
const struct_begin = bw.count;
|
||||
const field_types = struct_type.field_types.get(ip);
|
||||
const offsets = struct_type.offsets.get(ip);
|
||||
|
||||
@ -649,24 +646,22 @@ pub fn generateSymbol(
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
|
||||
const padding = math.cast(
|
||||
try bw.splatByteAll(0, math.cast(
|
||||
usize,
|
||||
offsets[field_index] - (code.items.len - struct_begin),
|
||||
) orelse return error.Overflow;
|
||||
if (padding > 0) try code.appendNTimes(gpa, 0, padding);
|
||||
offsets[field_index] - (bw.count - struct_begin),
|
||||
) orelse return error.Overflow);
|
||||
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent);
|
||||
try generateSymbolInner(bin_file, pt, src_loc, .fromInterned(field_val), bw, reloc_parent);
|
||||
}
|
||||
|
||||
const size = struct_type.sizeUnordered(ip);
|
||||
const alignment = struct_type.flagsUnordered(ip).alignment.toByteUnits().?;
|
||||
|
||||
const padding = math.cast(
|
||||
try bw.splatByteAll(0, math.cast(
|
||||
usize,
|
||||
std.mem.alignForward(u64, size, @max(alignment, 1)) -
|
||||
(code.items.len - struct_begin),
|
||||
) orelse return error.Overflow;
|
||||
if (padding > 0) try code.appendNTimes(gpa, 0, padding);
|
||||
(bw.count - struct_begin),
|
||||
) orelse return error.Overflow);
|
||||
},
|
||||
}
|
||||
},
|
||||
@ -676,38 +671,31 @@ pub fn generateSymbol(
|
||||
const layout = ty.unionGetLayout(zcu);
|
||||
|
||||
if (layout.payload_size == 0) {
|
||||
return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent);
|
||||
return generateSymbolInner(bin_file, pt, src_loc, .fromInterned(un.tag), bw, reloc_parent);
|
||||
}
|
||||
|
||||
// Check if we should store the tag first.
|
||||
if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) {
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent);
|
||||
try generateSymbolInner(bin_file, pt, src_loc, .fromInterned(un.tag), bw, reloc_parent);
|
||||
}
|
||||
|
||||
const union_obj = zcu.typeToUnion(ty).?;
|
||||
if (un.tag != .none) {
|
||||
const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
|
||||
const field_index = ty.unionTagFieldIndex(.fromInterned(un.tag), zcu).?;
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) {
|
||||
try code.appendNTimes(gpa, 0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
|
||||
try bw.splatByteAll(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
|
||||
} else {
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent);
|
||||
|
||||
const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
if (padding > 0) {
|
||||
try code.appendNTimes(gpa, 0, padding);
|
||||
}
|
||||
try generateSymbolInner(bin_file, pt, src_loc, .fromInterned(un.val), bw, reloc_parent);
|
||||
try bw.splatByteAll(0, math.cast(usize, layout.payload_size - field_ty.abiSize(zcu)) orelse return error.Overflow);
|
||||
}
|
||||
} else {
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent);
|
||||
try generateSymbolInner(bin_file, pt, src_loc, .fromInterned(un.val), bw, reloc_parent);
|
||||
}
|
||||
|
||||
if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) {
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent);
|
||||
|
||||
if (layout.padding > 0) {
|
||||
try code.appendNTimes(gpa, 0, layout.padding);
|
||||
}
|
||||
try generateSymbolInner(bin_file, pt, src_loc, .fromInterned(un.tag), bw, reloc_parent);
|
||||
try bw.splatByteAll(0, layout.padding);
|
||||
}
|
||||
},
|
||||
.memoized_call => unreachable,
|
||||
@ -719,32 +707,32 @@ fn lowerPtr(
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
ptr_val: InternPool.Index,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
bw: *std.io.BufferedWriter,
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
prev_offset: u64,
|
||||
) GenerateSymbolError!void {
|
||||
) anyerror!void {
|
||||
const zcu = pt.zcu;
|
||||
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
|
||||
const offset: u64 = prev_offset + ptr.byte_offset;
|
||||
return switch (ptr.base_addr) {
|
||||
.nav => |nav| try lowerNavRef(bin_file, pt, nav, code, reloc_parent, offset),
|
||||
.uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, code, reloc_parent, offset),
|
||||
.int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), code, reloc_parent),
|
||||
.nav => |nav| try lowerNavRef(bin_file, pt, nav, bw, reloc_parent, offset),
|
||||
.uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, bw, reloc_parent, offset),
|
||||
.int => try generateSymbolInner(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), bw, reloc_parent),
|
||||
.eu_payload => |eu_ptr| try lowerPtr(
|
||||
bin_file,
|
||||
pt,
|
||||
src_loc,
|
||||
eu_ptr,
|
||||
code,
|
||||
bw,
|
||||
reloc_parent,
|
||||
offset + errUnionPayloadOffset(
|
||||
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu),
|
||||
zcu,
|
||||
),
|
||||
),
|
||||
.opt_payload => |opt_ptr| try lowerPtr(bin_file, pt, src_loc, opt_ptr, code, reloc_parent, offset),
|
||||
.opt_payload => |opt_ptr| try lowerPtr(bin_file, pt, src_loc, opt_ptr, bw, reloc_parent, offset),
|
||||
.field => |field| {
|
||||
const base_ptr = Value.fromInterned(field.base);
|
||||
const base_ptr: Value = .fromInterned(field.base);
|
||||
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
|
||||
const field_off: u64 = switch (base_ty.zigTypeTag(zcu)) {
|
||||
.pointer => off: {
|
||||
@ -761,7 +749,7 @@ fn lowerPtr(
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
return lowerPtr(bin_file, pt, src_loc, field.base, code, reloc_parent, offset + field_off);
|
||||
return lowerPtr(bin_file, pt, src_loc, field.base, bw, reloc_parent, offset + field_off);
|
||||
},
|
||||
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
|
||||
};
|
||||
@ -772,12 +760,11 @@ fn lowerUavRef(
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
uav: InternPool.Key.Ptr.BaseAddr.Uav,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
bw: *std.io.BufferedWriter,
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
offset: u64,
|
||||
) GenerateSymbolError!void {
|
||||
) anyerror!void {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
const comp = lf.comp;
|
||||
const target = &comp.root_mod.resolved_target.result;
|
||||
@ -786,13 +773,9 @@ fn lowerUavRef(
|
||||
const uav_ty = Type.fromInterned(ip.typeOf(uav_val));
|
||||
const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn";
|
||||
|
||||
log.debug("lowerUavRef: ty = {}", .{uav_ty.fmt(pt)});
|
||||
try code.ensureUnusedCapacity(gpa, ptr_width_bytes);
|
||||
log.debug("lowerUavRef: ty = {f}", .{uav_ty.fmt(pt)});
|
||||
|
||||
if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) {
|
||||
code.appendNTimesAssumeCapacity(0xaa, ptr_width_bytes);
|
||||
return;
|
||||
}
|
||||
if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) return bw.splatByteAll(0xaa, ptr_width_bytes);
|
||||
|
||||
switch (lf.tag) {
|
||||
.c => unreachable,
|
||||
@ -801,9 +784,8 @@ fn lowerUavRef(
|
||||
dev.check(link.File.Tag.wasm.devFeature());
|
||||
const wasm = lf.cast(.wasm).?;
|
||||
assert(reloc_parent == .none);
|
||||
try wasm.addUavReloc(code.items.len, uav.val, uav.orig_ty, @intCast(offset));
|
||||
code.appendNTimesAssumeCapacity(0, ptr_width_bytes);
|
||||
return;
|
||||
try wasm.addUavReloc(bw.count, uav.val, uav.orig_ty, @intCast(offset));
|
||||
return bw.splatByteAll(0, ptr_width_bytes);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
@ -816,14 +798,14 @@ fn lowerUavRef(
|
||||
|
||||
const vaddr = try lf.getUavVAddr(uav_val, .{
|
||||
.parent = reloc_parent,
|
||||
.offset = code.items.len,
|
||||
.offset = bw.count,
|
||||
.addend = @intCast(offset),
|
||||
});
|
||||
const endian = target.cpu.arch.endian();
|
||||
switch (ptr_width_bytes) {
|
||||
2 => mem.writeInt(u16, code.addManyAsArrayAssumeCapacity(2), @intCast(vaddr), endian),
|
||||
4 => mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @intCast(vaddr), endian),
|
||||
8 => mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), vaddr, endian),
|
||||
2 => try bw.writeInt(u16, @intCast(vaddr), endian),
|
||||
4 => try bw.writeInt(u32, @intCast(vaddr), endian),
|
||||
8 => try bw.writeInt(u64, vaddr, endian),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
@ -832,10 +814,10 @@ fn lowerNavRef(
|
||||
lf: *link.File,
|
||||
pt: Zcu.PerThread,
|
||||
nav_index: InternPool.Nav.Index,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
bw: *std.io.BufferedWriter,
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
offset: u64,
|
||||
) GenerateSymbolError!void {
|
||||
) anyerror!void {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
@ -845,12 +827,9 @@ fn lowerNavRef(
|
||||
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
|
||||
const is_fn_body = nav_ty.zigTypeTag(zcu) == .@"fn";
|
||||
|
||||
try code.ensureUnusedCapacity(gpa, ptr_width_bytes);
|
||||
log.debug("lowerNavRef: ty = {f}", .{nav_ty.fmt(pt)});
|
||||
|
||||
if (!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) {
|
||||
code.appendNTimesAssumeCapacity(0xaa, ptr_width_bytes);
|
||||
return;
|
||||
}
|
||||
if (!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) return bw.splatByteAll(0xaa, ptr_width_bytes);
|
||||
|
||||
switch (lf.tag) {
|
||||
.c => unreachable,
|
||||
@ -867,13 +846,13 @@ fn lowerNavRef(
|
||||
} else {
|
||||
try wasm.func_table_fixups.append(gpa, .{
|
||||
.table_index = @enumFromInt(gop.index),
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(bw.count),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (is_obj) {
|
||||
try wasm.out_relocs.append(gpa, .{
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(bw.count),
|
||||
.pointee = .{ .symbol_index = try wasm.navSymbolIndex(nav_index) },
|
||||
.tag = if (ptr_width_bytes == 4) .memory_addr_i32 else .memory_addr_i64,
|
||||
.addend = @intCast(offset),
|
||||
@ -882,27 +861,26 @@ fn lowerNavRef(
|
||||
try wasm.nav_fixups.ensureUnusedCapacity(gpa, 1);
|
||||
wasm.nav_fixups.appendAssumeCapacity(.{
|
||||
.navs_exe_index = try wasm.refNavExe(nav_index),
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(bw.count),
|
||||
.addend = @intCast(offset),
|
||||
});
|
||||
}
|
||||
}
|
||||
code.appendNTimesAssumeCapacity(0, ptr_width_bytes);
|
||||
return;
|
||||
return bw.splatByteAll(0, ptr_width_bytes);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
const vaddr = lf.getNavVAddr(pt, nav_index, .{
|
||||
.parent = reloc_parent,
|
||||
.offset = code.items.len,
|
||||
.offset = bw.count,
|
||||
.addend = @intCast(offset),
|
||||
}) catch @panic("TODO rework getNavVAddr");
|
||||
const endian = target.cpu.arch.endian();
|
||||
switch (ptr_width_bytes) {
|
||||
2 => mem.writeInt(u16, code.addManyAsArrayAssumeCapacity(2), @intCast(vaddr), endian),
|
||||
4 => mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @intCast(vaddr), endian),
|
||||
8 => mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), vaddr, endian),
|
||||
2 => try bw.writeInt(u16, @intCast(vaddr), endian),
|
||||
4 => try bw.writeInt(u32, @intCast(vaddr), endian),
|
||||
8 => try bw.writeInt(u64, vaddr, endian),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
@ -1084,7 +1062,7 @@ pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allo
|
||||
const ip = &zcu.intern_pool;
|
||||
const ty = val.typeOf(zcu);
|
||||
|
||||
log.debug("lowerValue(@as({}, {}))", .{ ty.fmt(pt), val.fmtValue(pt) });
|
||||
log.debug("lowerValue(@as({f}, {f}))", .{ ty.fmt(pt), val.fmtValue(pt) });
|
||||
|
||||
if (val.isUndef(zcu)) return .undef;
|
||||
|
||||
|
||||
@ -746,12 +746,14 @@ pub const Object = struct {
|
||||
try wip.finish();
|
||||
}
|
||||
|
||||
fn genModuleLevelAssembly(object: *Object) !void {
|
||||
const writer = object.builder.setModuleAsm();
|
||||
fn genModuleLevelAssembly(object: *Object) Allocator.Error!void {
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
const bw = object.builder.setModuleAsm(&aw);
|
||||
errdefer aw.deinit();
|
||||
for (object.pt.zcu.global_assembly.values()) |assembly| {
|
||||
try writer.print("{s}\n", .{assembly});
|
||||
bw.print("{s}\n", .{assembly}) catch |err| return @errorCast(err);
|
||||
}
|
||||
try object.builder.finishModuleAsm();
|
||||
try object.builder.finishModuleAsm(&aw);
|
||||
}
|
||||
|
||||
pub const EmitOptions = struct {
|
||||
@ -939,7 +941,7 @@ pub const Object = struct {
|
||||
if (std.mem.eql(u8, path, "-")) {
|
||||
o.builder.dump();
|
||||
} else {
|
||||
_ = try o.builder.printToFile(path);
|
||||
_ = o.builder.printToFile(path);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2677,9 +2679,9 @@ pub const Object = struct {
|
||||
|
||||
fn allocTypeName(o: *Object, ty: Type) Allocator.Error![:0]const u8 {
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
const bw = aw.init(o.gpa);
|
||||
aw.init(o.gpa);
|
||||
defer aw.deinit();
|
||||
try ty.print(bw, o.pt);
|
||||
ty.print(&aw.buffered_writer, o.pt) catch |err| return @errorCast(err);
|
||||
return aw.toOwnedSliceSentinel(0);
|
||||
}
|
||||
|
||||
@ -4479,7 +4481,7 @@ pub const Object = struct {
|
||||
const target = &zcu.root_mod.resolved_target.result;
|
||||
const function_index = try o.builder.addFunction(
|
||||
try o.builder.fnType(ret_ty, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal),
|
||||
try o.builder.strtabStringFmt("__zig_tag_name_{}", .{enum_type.name.fmt(ip)}),
|
||||
try o.builder.strtabStringFmt("__zig_tag_name_{f}", .{enum_type.name.fmt(ip)}),
|
||||
toLlvmAddressSpace(.generic, target),
|
||||
);
|
||||
|
||||
@ -4630,7 +4632,7 @@ pub const NavGen = struct {
|
||||
if (zcu.getTarget().cpu.arch.isWasm() and ty.zigTypeTag(zcu) == .@"fn") {
|
||||
if (lib_name.toSlice(ip)) |lib_name_slice| {
|
||||
if (!std.mem.eql(u8, lib_name_slice, "c")) {
|
||||
break :decl_name try o.builder.strtabStringFmt("{}|{s}", .{ nav.name.fmt(ip), lib_name_slice });
|
||||
break :decl_name try o.builder.strtabStringFmt("{f}|{s}", .{ nav.name.fmt(ip), lib_name_slice });
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -7469,7 +7471,7 @@ pub const FuncGen = struct {
|
||||
llvm_param_types[llvm_param_i] = llvm_elem_ty;
|
||||
}
|
||||
|
||||
try llvm_constraints.writer(self.gpa).print(",{d}", .{output_index});
|
||||
try llvm_constraints.print(self.gpa, ",{d}", .{output_index});
|
||||
|
||||
// In the case of indirect inputs, LLVM requires the callsite to have
|
||||
// an elementtype(<ty>) attribute.
|
||||
@ -7570,7 +7572,7 @@ pub const FuncGen = struct {
|
||||
// we should validate the assembly in Sema; by now it is too late
|
||||
return self.todo("unknown input or output name: '{s}'", .{name});
|
||||
};
|
||||
try rendered_template.writer().print("{d}", .{index});
|
||||
try rendered_template.print("{d}", .{index});
|
||||
if (byte == ':') {
|
||||
try rendered_template.append(':');
|
||||
modifier_start = i + 1;
|
||||
@ -10377,7 +10379,7 @@ pub const FuncGen = struct {
|
||||
const target = &zcu.root_mod.resolved_target.result;
|
||||
const function_index = try o.builder.addFunction(
|
||||
try o.builder.fnType(.i1, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal),
|
||||
try o.builder.strtabStringFmt("__zig_is_named_enum_value_{}", .{enum_type.name.fmt(ip)}),
|
||||
try o.builder.strtabStringFmt("__zig_is_named_enum_value_{f}", .{enum_type.name.fmt(ip)}),
|
||||
toLlvmAddressSpace(.generic, target),
|
||||
);
|
||||
|
||||
|
||||
@ -817,7 +817,7 @@ const NavGen = struct {
|
||||
const result_ty_id = try self.resolveType(ty, repr);
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
log.debug("lowering constant: ty = {}, val = {}, key = {s}", .{ ty.fmt(pt), val.fmtValue(pt), @tagName(ip.indexToKey(val.toIntern())) });
|
||||
log.debug("lowering constant: ty = {f}, val = {f}, key = {s}", .{ ty.fmt(pt), val.fmtValue(pt), @tagName(ip.indexToKey(val.toIntern())) });
|
||||
if (val.isUndefDeep(zcu)) {
|
||||
return self.spv.constUndef(result_ty_id);
|
||||
}
|
||||
@ -1147,7 +1147,7 @@ const NavGen = struct {
|
||||
return result_ptr_id;
|
||||
}
|
||||
|
||||
return self.fail("cannot perform pointer cast: '{}' to '{}'", .{
|
||||
return self.fail("cannot perform pointer cast: '{f}' to '{f}'", .{
|
||||
parent_ptr_ty.fmt(pt),
|
||||
oac.new_ptr_ty.fmt(pt),
|
||||
});
|
||||
@ -1259,11 +1259,11 @@ const NavGen = struct {
|
||||
}
|
||||
|
||||
// Turn a Zig type's name into a cache reference.
|
||||
fn resolveTypeName(self: *NavGen, ty: Type) ![]const u8 {
|
||||
var name = std.ArrayList(u8).init(self.gpa);
|
||||
defer name.deinit();
|
||||
try ty.print(name.writer(), self.pt);
|
||||
return try name.toOwnedSlice();
|
||||
fn resolveTypeName(self: *NavGen, ty: Type) Allocator.Error![]const u8 {
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
aw.init(self.gpa);
|
||||
ty.print(&aw.buffered_writer, self.pt) catch |err| return @errorCast(err);
|
||||
return aw.toOwnedSlice();
|
||||
}
|
||||
|
||||
/// Create an integer type suitable for storing at least 'bits' bits.
|
||||
@ -1462,7 +1462,7 @@ const NavGen = struct {
|
||||
const pt = self.pt;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
log.debug("resolveType: ty = {}", .{ty.fmt(pt)});
|
||||
log.debug("resolveType: ty = {f}", .{ty.fmt(pt)});
|
||||
const target = self.spv.target;
|
||||
|
||||
const section = &self.spv.sections.types_globals_constants;
|
||||
@ -3068,7 +3068,7 @@ const NavGen = struct {
|
||||
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
|
||||
try self.spv.addFunction(spv_decl_index, self.func);
|
||||
|
||||
try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{nav.fqn.fmt(ip)});
|
||||
try self.spv.debugNameFmt(initializer_id, "initializer of {f}", .{nav.fqn.fmt(ip)});
|
||||
|
||||
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
|
||||
.id_result_type = ptr_ty_id,
|
||||
|
||||
@ -18,15 +18,10 @@ pub const IdResult = enum(Word) {
|
||||
none,
|
||||
_,
|
||||
|
||||
pub fn format(
|
||||
self: IdResult,
|
||||
comptime _: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
pub fn format(self: IdResult, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
|
||||
switch (self) {
|
||||
.none => try writer.writeAll("(none)"),
|
||||
else => try writer.print("%{}", .{@intFromEnum(self)}),
|
||||
.none => try bw.writeAll("(none)"),
|
||||
else => try bw.print("%{}", .{@intFromEnum(self)}),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -80,18 +80,18 @@ fn dumpStatusReport() !void {
|
||||
var fba = std.heap.FixedBufferAllocator.init(&crash_heap);
|
||||
const allocator = fba.allocator();
|
||||
|
||||
const stderr = std.fs.File.stderr.writer().unbuffered();
|
||||
var stderr = std.fs.File.stderr().writer().unbuffered();
|
||||
const block: *Sema.Block = anal.block;
|
||||
const zcu = anal.sema.pt.zcu;
|
||||
|
||||
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu) orelse {
|
||||
const file = zcu.fileByIndex(block.src_base_inst.resolveFile(&zcu.intern_pool));
|
||||
try stderr.print("Analyzing lost instruction in file '{}'. This should not happen!\n\n", .{file.path.fmt(zcu.comp)});
|
||||
try stderr.print("Analyzing lost instruction in file '{f}'. This should not happen!\n\n", .{file.path.fmt(zcu.comp)});
|
||||
return;
|
||||
};
|
||||
|
||||
try stderr.writeAll("Analyzing ");
|
||||
try stderr.print("Analyzing '{}'\n", .{file.path.fmt(zcu.comp)});
|
||||
try stderr.print("Analyzing '{f}'\n", .{file.path.fmt(zcu.comp)});
|
||||
|
||||
print_zir.renderInstructionContext(
|
||||
allocator,
|
||||
@ -100,14 +100,14 @@ fn dumpStatusReport() !void {
|
||||
file,
|
||||
src_base_node,
|
||||
6, // indent
|
||||
stderr,
|
||||
&stderr,
|
||||
) catch |err| switch (err) {
|
||||
error.OutOfMemory => try stderr.writeAll(" <out of memory dumping zir>\n"),
|
||||
else => |e| return e,
|
||||
};
|
||||
try stderr.print(
|
||||
\\ For full context, use the command
|
||||
\\ zig ast-check -t {}
|
||||
\\ zig ast-check -t {f}
|
||||
\\
|
||||
\\
|
||||
, .{file.path.fmt(zcu.comp)});
|
||||
@ -116,7 +116,7 @@ fn dumpStatusReport() !void {
|
||||
while (parent) |curr| {
|
||||
fba.reset();
|
||||
const cur_block_file = zcu.fileByIndex(curr.block.src_base_inst.resolveFile(&zcu.intern_pool));
|
||||
try stderr.print(" in {}\n", .{cur_block_file.path.fmt(zcu.comp)});
|
||||
try stderr.print(" in {f}\n", .{cur_block_file.path.fmt(zcu.comp)});
|
||||
_, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu) orelse {
|
||||
try stderr.writeAll(" > [lost instruction; this should not happen]\n");
|
||||
parent = curr.parent;
|
||||
@ -129,7 +129,7 @@ fn dumpStatusReport() !void {
|
||||
cur_block_file,
|
||||
cur_block_src_base_node,
|
||||
6, // indent
|
||||
stderr,
|
||||
&stderr,
|
||||
) catch |err| switch (err) {
|
||||
error.OutOfMemory => try stderr.writeAll(" <out of memory dumping zir>\n"),
|
||||
else => |e| return e,
|
||||
@ -139,7 +139,7 @@ fn dumpStatusReport() !void {
|
||||
parent = curr.parent;
|
||||
}
|
||||
|
||||
try stderr.writeAll("\n");
|
||||
try stderr.writeByte('\n');
|
||||
}
|
||||
|
||||
var crash_heap: [16 * 4096]u8 = undefined;
|
||||
@ -268,7 +268,8 @@ const StackContext = union(enum) {
|
||||
debug.dumpCurrentStackTrace(ct.ret_addr);
|
||||
},
|
||||
.exception => |context| {
|
||||
debug.dumpStackTraceFromBase(context);
|
||||
var stderr = std.fs.File.stderr().writer().unbuffered();
|
||||
debug.dumpStackTraceFromBase(context, &stderr);
|
||||
},
|
||||
.not_supported => {
|
||||
std.fs.File.stderr().writeAll("Stack trace not supported on this platform.\n") catch {};
|
||||
@ -378,7 +379,7 @@ const PanicSwitch = struct {
|
||||
|
||||
state.recover_stage = .release_mutex;
|
||||
|
||||
const stderr = std.fs.File.stderr().writer().unbuffered();
|
||||
var stderr = std.fs.File.stderr().writer().unbuffered();
|
||||
if (builtin.single_threaded) {
|
||||
stderr.print("panic: ", .{}) catch goTo(releaseMutex, .{state});
|
||||
} else {
|
||||
@ -405,7 +406,7 @@ const PanicSwitch = struct {
|
||||
recover(state, trace, stack, msg);
|
||||
|
||||
state.recover_stage = .release_mutex;
|
||||
const stderr = std.fs.File.stderr().writer().unbuffered();
|
||||
var stderr = std.fs.File.stderr().writer().unbuffered();
|
||||
stderr.writeAll("\nOriginal Error:\n") catch {};
|
||||
goTo(reportStack, .{state});
|
||||
}
|
||||
@ -521,7 +522,7 @@ const PanicSwitch = struct {
|
||||
var stderr = std.fs.File.stderr().writer().unbuffered();
|
||||
stderr.writeAll("\nPanicked while dumping inner panic stack: ") catch {};
|
||||
stderr.writeAll(msg) catch {};
|
||||
stderr.writeAll("\n") catch {};
|
||||
stderr.writeByte('\n') catch {};
|
||||
|
||||
// If we succeed, restore all the way to dumping the stack.
|
||||
state.recover_verbosity = .message_and_stack;
|
||||
|
||||
@ -89,7 +89,7 @@ pub fn run(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
|
||||
fatal("cannot use --stdin with positional arguments", .{});
|
||||
}
|
||||
|
||||
const source_code = std.zig.readSourceFileToEndAlloc(gpa, .stdin(), null) catch |err| {
|
||||
const source_code = std.zig.readSourceFileToEndAlloc(gpa, .stdin(), 0) catch |err| {
|
||||
fatal("unable to read stdin: {}", .{err});
|
||||
};
|
||||
defer gpa.free(source_code);
|
||||
@ -134,9 +134,9 @@ pub fn run(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
|
||||
process.exit(2);
|
||||
}
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
const bw = aw.init(gpa);
|
||||
aw.init(gpa);
|
||||
defer aw.deinit();
|
||||
try tree.render(gpa, bw, .{});
|
||||
try tree.render(gpa, &aw.buffered_writer, .{});
|
||||
const formatted = aw.getWritten();
|
||||
|
||||
if (check_flag) {
|
||||
|
||||
@ -736,13 +736,13 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
.lt => continue,
|
||||
.gt => {
|
||||
// TODO Expose via compile error mechanism instead of log.
|
||||
log.warn("invalid target glibc version: {}", .{target_version});
|
||||
log.warn("invalid target glibc version: {f}", .{target_version});
|
||||
return error.InvalidTargetGLibCVersion;
|
||||
},
|
||||
}
|
||||
} else blk: {
|
||||
const latest_index = metadata.all_versions.len - 1;
|
||||
log.warn("zig cannot build new glibc version {}; providing instead {}", .{
|
||||
log.warn("zig cannot build new glibc version {f}; providing instead {f}", .{
|
||||
target_version, metadata.all_versions[latest_index],
|
||||
});
|
||||
break :blk latest_index;
|
||||
@ -752,9 +752,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
var map_contents = std.ArrayList(u8).init(arena);
|
||||
for (metadata.all_versions[0 .. target_ver_index + 1]) |ver| {
|
||||
if (ver.patch == 0) {
|
||||
try map_contents.writer().print("GLIBC_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
|
||||
try map_contents.print("GLIBC_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
|
||||
} else {
|
||||
try map_contents.writer().print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch });
|
||||
try map_contents.print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch });
|
||||
}
|
||||
}
|
||||
try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items });
|
||||
@ -773,7 +773,6 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
try stubs_asm.appendSlice(".text\n");
|
||||
|
||||
var sym_i: usize = 0;
|
||||
var sym_name_buf = std.ArrayList(u8).init(arena);
|
||||
var opt_symbol_name: ?[]const u8 = null;
|
||||
var versions_buffer: [32]u8 = undefined;
|
||||
var versions_len: usize = undefined;
|
||||
@ -794,24 +793,20 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
// twice, which causes a "duplicate symbol" assembler error.
|
||||
var versions_written = std.AutoArrayHashMap(Version, void).init(arena);
|
||||
|
||||
var inc_fbs = std.io.fixedBufferStream(metadata.inclusions);
|
||||
var inc_reader = inc_fbs.reader();
|
||||
var inc_br: std.io.BufferedReader = undefined;
|
||||
inc_br.initFixed(metadata.inclusions);
|
||||
|
||||
const fn_inclusions_len = try inc_reader.readInt(u16, .little);
|
||||
const fn_inclusions_len = try inc_br.takeInt(u16, .little);
|
||||
|
||||
while (sym_i < fn_inclusions_len) : (sym_i += 1) {
|
||||
const sym_name = opt_symbol_name orelse n: {
|
||||
sym_name_buf.clearRetainingCapacity();
|
||||
try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
|
||||
|
||||
opt_symbol_name = sym_name_buf.items;
|
||||
opt_symbol_name = try inc_br.takeSentinel(0);
|
||||
versions_buffer = undefined;
|
||||
versions_len = 0;
|
||||
|
||||
break :n sym_name_buf.items;
|
||||
break :n opt_symbol_name.?;
|
||||
};
|
||||
const targets = try std.leb.readUleb128(u64, inc_reader);
|
||||
var lib_index = try inc_reader.readByte();
|
||||
const targets = try inc_br.takeLeb128(u64);
|
||||
var lib_index = try inc_br.takeByte();
|
||||
|
||||
const is_terminal = (lib_index & (1 << 7)) != 0;
|
||||
if (is_terminal) {
|
||||
@ -825,7 +820,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
|
||||
|
||||
while (true) {
|
||||
const byte = try inc_reader.readByte();
|
||||
const byte = try inc_br.takeByte();
|
||||
const last = (byte & 0b1000_0000) != 0;
|
||||
const ver_i = @as(u7, @truncate(byte));
|
||||
if (ok_lib_and_target and ver_i <= target_ver_index) {
|
||||
@ -880,7 +875,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
"{s}_{d}_{d}",
|
||||
.{ sym_name, ver.major, ver.minor },
|
||||
);
|
||||
try stubs_asm.writer().print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.globl {s}
|
||||
\\.type {s}, %function
|
||||
@ -905,7 +900,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
"{s}_{d}_{d}_{d}",
|
||||
.{ sym_name, ver.major, ver.minor, ver.patch },
|
||||
);
|
||||
try stubs_asm.writer().print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.globl {s}
|
||||
\\.type {s}, %function
|
||||
@ -950,7 +945,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
// versions where the symbol didn't exist. We only care about modern glibc versions, so use
|
||||
// a strong reference.
|
||||
if (std.mem.eql(u8, lib.name, "c")) {
|
||||
try stubs_asm.writer().print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.globl _IO_stdin_used
|
||||
\\{s} _IO_stdin_used
|
||||
@ -963,7 +958,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
|
||||
try stubs_asm.appendSlice(".data\n");
|
||||
|
||||
const obj_inclusions_len = try inc_reader.readInt(u16, .little);
|
||||
const obj_inclusions_len = try inc_br.takeInt(u16, .little);
|
||||
|
||||
var sizes = try arena.alloc(u16, metadata.all_versions.len);
|
||||
|
||||
@ -973,18 +968,14 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
versions_len = undefined;
|
||||
while (sym_i < obj_inclusions_len) : (sym_i += 1) {
|
||||
const sym_name = opt_symbol_name orelse n: {
|
||||
sym_name_buf.clearRetainingCapacity();
|
||||
try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
|
||||
|
||||
opt_symbol_name = sym_name_buf.items;
|
||||
opt_symbol_name = try inc_br.takeSentinel(0);
|
||||
versions_buffer = undefined;
|
||||
versions_len = 0;
|
||||
|
||||
break :n sym_name_buf.items;
|
||||
break :n opt_symbol_name.?;
|
||||
};
|
||||
const targets = try std.leb.readUleb128(u64, inc_reader);
|
||||
const size = try std.leb.readUleb128(u16, inc_reader);
|
||||
var lib_index = try inc_reader.readByte();
|
||||
const targets = try inc_br.takeLeb128(u64);
|
||||
const size = try inc_br.takeLeb128(u16);
|
||||
var lib_index = try inc_br.takeByte();
|
||||
|
||||
const is_terminal = (lib_index & (1 << 7)) != 0;
|
||||
if (is_terminal) {
|
||||
@ -998,7 +989,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
|
||||
|
||||
while (true) {
|
||||
const byte = try inc_reader.readByte();
|
||||
const byte = try inc_br.takeByte();
|
||||
const last = (byte & 0b1000_0000) != 0;
|
||||
const ver_i = @as(u7, @truncate(byte));
|
||||
if (ok_lib_and_target and ver_i <= target_ver_index) {
|
||||
@ -1055,7 +1046,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
"{s}_{d}_{d}",
|
||||
.{ sym_name, ver.major, ver.minor },
|
||||
);
|
||||
try stubs_asm.writer().print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.globl {s}
|
||||
\\.type {s}, %object
|
||||
@ -1083,7 +1074,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
"{s}_{d}_{d}_{d}",
|
||||
.{ sym_name, ver.major, ver.minor, ver.patch },
|
||||
);
|
||||
try stubs_asm.writer().print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.globl {s}
|
||||
\\.type {s}, %object
|
||||
|
||||
@ -401,7 +401,7 @@ fn findDef(
|
||||
};
|
||||
|
||||
var override_path: std.io.AllocatingWriter = undefined;
|
||||
const override_path_writer = override_path.init(gpa);
|
||||
override_path.init(gpa);
|
||||
defer override_path.deinit();
|
||||
|
||||
const s = path.sep_str;
|
||||
@ -410,9 +410,9 @@ fn findDef(
|
||||
// Try the archtecture-specific path first.
|
||||
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "{s}" ++ s ++ "{s}.def";
|
||||
if (zig_lib_directory.path) |p| {
|
||||
try override_path_writer.print("{s}" ++ s ++ fmt_path, .{ p, lib_path, lib_name });
|
||||
try override_path.buffered_writer.print("{s}" ++ s ++ fmt_path, .{ p, lib_path, lib_name });
|
||||
} else {
|
||||
try override_path_writer.print(fmt_path, .{ lib_path, lib_name });
|
||||
try override_path.buffered_writer.print(fmt_path, .{ lib_path, lib_name });
|
||||
}
|
||||
if (std.fs.cwd().access(override_path.getWritten(), .{})) |_| {
|
||||
return override_path.toOwnedSlice();
|
||||
@ -427,9 +427,9 @@ fn findDef(
|
||||
override_path.clearRetainingCapacity();
|
||||
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "lib-common" ++ s ++ "{s}.def";
|
||||
if (zig_lib_directory.path) |p| {
|
||||
try override_path_writer.print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
|
||||
try override_path.buffered_writer.print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
|
||||
} else {
|
||||
try override_path_writer.print(fmt_path, .{lib_name});
|
||||
try override_path.buffered_writer.print(fmt_path, .{lib_name});
|
||||
}
|
||||
if (std.fs.cwd().access(override_path.getWritten(), .{})) |_| {
|
||||
return override_path.toOwnedSlice();
|
||||
@ -444,9 +444,9 @@ fn findDef(
|
||||
override_path.clearRetainingCapacity();
|
||||
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "lib-common" ++ s ++ "{s}.def.in";
|
||||
if (zig_lib_directory.path) |p| {
|
||||
try override_path_writer.print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
|
||||
try override_path.buffered_writer.print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
|
||||
} else {
|
||||
try override_path_writer.print(fmt_path, .{lib_name});
|
||||
try override_path.buffered_writer.print(fmt_path, .{lib_name});
|
||||
}
|
||||
if (std.fs.cwd().access(override_path.getWritten(), .{})) |_| {
|
||||
return override_path.toOwnedSlice();
|
||||
|
||||
@ -115,7 +115,8 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
|
||||
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(comp.gpa);
|
||||
defer c_source_files.deinit();
|
||||
|
||||
var override_path = std.ArrayList(u8).init(comp.gpa);
|
||||
var override_path: std.io.AllocatingWriter = undefined;
|
||||
override_path.init(comp.gpa);
|
||||
defer override_path.deinit();
|
||||
|
||||
const s = path.sep_str;
|
||||
@ -139,26 +140,23 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
|
||||
}
|
||||
if (!is_arch_specific) {
|
||||
// Look for an arch specific override.
|
||||
override_path.shrinkRetainingCapacity(0);
|
||||
try override_path.writer().print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.s", .{
|
||||
override_path.clearRetainingCapacity();
|
||||
try override_path.buffered_writer.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.s", .{
|
||||
dirname, arch_name, noextbasename,
|
||||
});
|
||||
if (source_table.contains(override_path.items))
|
||||
continue;
|
||||
if (source_table.contains(override_path.getWritten())) continue;
|
||||
|
||||
override_path.shrinkRetainingCapacity(0);
|
||||
try override_path.writer().print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.S", .{
|
||||
override_path.clearRetainingCapacity();
|
||||
try override_path.buffered_writer.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.S", .{
|
||||
dirname, arch_name, noextbasename,
|
||||
});
|
||||
if (source_table.contains(override_path.items))
|
||||
continue;
|
||||
if (source_table.contains(override_path.getWritten())) continue;
|
||||
|
||||
override_path.shrinkRetainingCapacity(0);
|
||||
try override_path.writer().print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.c", .{
|
||||
override_path.clearRetainingCapacity();
|
||||
try override_path.buffered_writer.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.c", .{
|
||||
dirname, arch_name, noextbasename,
|
||||
});
|
||||
if (source_table.contains(override_path.items))
|
||||
continue;
|
||||
if (source_table.contains(override_path.getWritten())) continue;
|
||||
}
|
||||
|
||||
var args = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
50
src/link.zig
50
src/link.zig
@ -323,7 +323,7 @@ pub const Diags = struct {
|
||||
const main_msg = try m;
|
||||
errdefer gpa.free(main_msg);
|
||||
try diags.msgs.ensureUnusedCapacity(gpa, 1);
|
||||
const note = try std.fmt.allocPrint(gpa, "while parsing {}", .{path});
|
||||
const note = try std.fmt.allocPrint(gpa, "while parsing {f}", .{path});
|
||||
errdefer gpa.free(note);
|
||||
const notes = try gpa.create([1]Msg);
|
||||
errdefer gpa.destroy(notes);
|
||||
@ -838,7 +838,7 @@ pub const File = struct {
|
||||
const cached_pp_file_path = the_key.status.success.object_path;
|
||||
cached_pp_file_path.root_dir.handle.copyFile(cached_pp_file_path.sub_path, emit.root_dir.handle, emit.sub_path, .{}) catch |err| {
|
||||
const diags = &base.comp.link_diags;
|
||||
return diags.fail("failed to copy '{'}' to '{'}': {s}", .{
|
||||
return diags.fail("failed to copy '{f'}' to '{f'}': {s}", .{
|
||||
@as(Path, cached_pp_file_path), @as(Path, emit), @errorName(err),
|
||||
});
|
||||
};
|
||||
@ -1351,7 +1351,7 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void {
|
||||
.search_strategy = .paths_first,
|
||||
}) catch |archive_err| switch (archive_err) {
|
||||
error.LinkFailure => return, // error reported via diags
|
||||
else => |e| diags.addParseError(dso_path, "failed to parse archive {}: {s}", .{ archive_path, @errorName(e) }),
|
||||
else => |e| diags.addParseError(dso_path, "failed to parse archive {f}: {s}", .{ archive_path, @errorName(e) }),
|
||||
};
|
||||
},
|
||||
error.LinkFailure => return, // error reported via diags
|
||||
@ -1874,7 +1874,7 @@ pub fn resolveInputs(
|
||||
)) |lib_result| {
|
||||
switch (lib_result) {
|
||||
.ok => {},
|
||||
.no_match => fatal("{}: file not found", .{pq.path}),
|
||||
.no_match => fatal("{f}: file not found", .{pq.path}),
|
||||
}
|
||||
}
|
||||
continue;
|
||||
@ -1928,10 +1928,10 @@ fn resolveLibInput(
|
||||
.root_dir = lib_directory,
|
||||
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.tbd", .{lib_name}),
|
||||
};
|
||||
try checked_paths.print(gpa, "\n {}", .{test_path});
|
||||
try checked_paths.print(gpa, "\n {f}", .{test_path});
|
||||
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => break :tbd,
|
||||
else => |e| fatal("unable to search for tbd library '{}': {s}", .{ test_path, @errorName(e) }),
|
||||
else => |e| fatal("unable to search for tbd library '{f}': {s}", .{ test_path, @errorName(e) }),
|
||||
};
|
||||
errdefer file.close();
|
||||
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
|
||||
@ -1947,7 +1947,7 @@ fn resolveLibInput(
|
||||
},
|
||||
}),
|
||||
};
|
||||
try checked_paths.print(gpa, "\n {}", .{test_path});
|
||||
try checked_paths.print(gpa, "\n {f}", .{test_path});
|
||||
switch (try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, .{
|
||||
.path = test_path,
|
||||
.query = name_query.query,
|
||||
@ -1964,10 +1964,10 @@ fn resolveLibInput(
|
||||
.root_dir = lib_directory,
|
||||
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.so", .{lib_name}),
|
||||
};
|
||||
try checked_paths.print(gpa, "\n {}", .{test_path});
|
||||
try checked_paths.print(gpa, "\n {f}", .{test_path});
|
||||
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => break :so,
|
||||
else => |e| fatal("unable to search for so library '{}': {s}", .{
|
||||
else => |e| fatal("unable to search for so library '{f}': {s}", .{
|
||||
test_path, @errorName(e),
|
||||
}),
|
||||
};
|
||||
@ -1982,10 +1982,10 @@ fn resolveLibInput(
|
||||
.root_dir = lib_directory,
|
||||
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.a", .{lib_name}),
|
||||
};
|
||||
try checked_paths.print(gpa, "\n {}", .{test_path});
|
||||
try checked_paths.print(gpa, "\n {f}", .{test_path});
|
||||
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => break :mingw,
|
||||
else => |e| fatal("unable to search for static library '{}': {s}", .{ test_path, @errorName(e) }),
|
||||
else => |e| fatal("unable to search for static library '{f}': {s}", .{ test_path, @errorName(e) }),
|
||||
};
|
||||
errdefer file.close();
|
||||
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
|
||||
@ -2037,7 +2037,7 @@ fn resolvePathInput(
|
||||
.shared_library => return try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .dynamic, color),
|
||||
.object => {
|
||||
var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err|
|
||||
fatal("failed to open object {}: {s}", .{ pq.path, @errorName(err) });
|
||||
fatal("failed to open object {f}: {s}", .{ pq.path, @errorName(err) });
|
||||
errdefer file.close();
|
||||
try resolved_inputs.append(gpa, .{ .object = .{
|
||||
.path = pq.path,
|
||||
@ -2049,7 +2049,7 @@ fn resolvePathInput(
|
||||
},
|
||||
.res => {
|
||||
var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err|
|
||||
fatal("failed to open windows resource {}: {s}", .{ pq.path, @errorName(err) });
|
||||
fatal("failed to open windows resource {f}: {s}", .{ pq.path, @errorName(err) });
|
||||
errdefer file.close();
|
||||
try resolved_inputs.append(gpa, .{ .res = .{
|
||||
.path = pq.path,
|
||||
@ -2057,7 +2057,7 @@ fn resolvePathInput(
|
||||
} });
|
||||
return null;
|
||||
},
|
||||
else => fatal("{}: unrecognized file extension", .{pq.path}),
|
||||
else => fatal("{f}: unrecognized file extension", .{pq.path}),
|
||||
}
|
||||
}
|
||||
|
||||
@ -2086,13 +2086,13 @@ fn resolvePathInputLib(
|
||||
}) {
|
||||
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => return .no_match,
|
||||
else => |e| fatal("unable to search for {s} library '{'}': {s}", .{
|
||||
else => |e| fatal("unable to search for {s} library '{f'}': {s}", .{
|
||||
@tagName(link_mode), test_path, @errorName(e),
|
||||
}),
|
||||
};
|
||||
errdefer file.close();
|
||||
try ld_script_bytes.resize(gpa, @max(std.elf.MAGIC.len, std.elf.ARMAG.len));
|
||||
const n = file.preadAll(ld_script_bytes.items, 0) catch |err| fatal("failed to read '{'}': {s}", .{
|
||||
const n = file.preadAll(ld_script_bytes.items, 0) catch |err| fatal("failed to read '{f'}': {s}", .{
|
||||
test_path, @errorName(err),
|
||||
});
|
||||
const buf = ld_script_bytes.items[0..n];
|
||||
@ -2101,14 +2101,14 @@ fn resolvePathInputLib(
|
||||
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, pq.query);
|
||||
}
|
||||
const stat = file.stat() catch |err|
|
||||
fatal("failed to stat {}: {s}", .{ test_path, @errorName(err) });
|
||||
fatal("failed to stat {f}: {s}", .{ test_path, @errorName(err) });
|
||||
const size = std.math.cast(u32, stat.size) orelse
|
||||
fatal("{}: linker script too big", .{test_path});
|
||||
fatal("{f}: linker script too big", .{test_path});
|
||||
try ld_script_bytes.resize(gpa, size);
|
||||
const buf2 = ld_script_bytes.items[n..];
|
||||
const n2 = file.preadAll(buf2, n) catch |err|
|
||||
fatal("failed to read {}: {s}", .{ test_path, @errorName(err) });
|
||||
if (n2 != buf2.len) fatal("failed to read {}: unexpected end of file", .{test_path});
|
||||
fatal("failed to read {f}: {s}", .{ test_path, @errorName(err) });
|
||||
if (n2 != buf2.len) fatal("failed to read {f}: unexpected end of file", .{test_path});
|
||||
var diags = Diags.init(gpa);
|
||||
defer diags.deinit();
|
||||
const ld_script_result = LdScript.parse(gpa, &diags, test_path, ld_script_bytes.items);
|
||||
@ -2128,7 +2128,7 @@ fn resolvePathInputLib(
|
||||
}
|
||||
|
||||
var ld_script = ld_script_result catch |err|
|
||||
fatal("{}: failed to parse linker script: {s}", .{ test_path, @errorName(err) });
|
||||
fatal("{f}: failed to parse linker script: {s}", .{ test_path, @errorName(err) });
|
||||
defer ld_script.deinit(gpa);
|
||||
|
||||
try unresolved_inputs.ensureUnusedCapacity(gpa, ld_script.args.len);
|
||||
@ -2159,7 +2159,7 @@ fn resolvePathInputLib(
|
||||
|
||||
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => return .no_match,
|
||||
else => |e| fatal("unable to search for {s} library {}: {s}", .{
|
||||
else => |e| fatal("unable to search for {s} library {f}: {s}", .{
|
||||
@tagName(link_mode), test_path, @errorName(e),
|
||||
}),
|
||||
};
|
||||
@ -2192,19 +2192,19 @@ pub fn openDso(path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso
|
||||
|
||||
pub fn openObjectInput(diags: *Diags, path: Path) error{LinkFailure}!Input {
|
||||
return .{ .object = openObject(path, false, false) catch |err| {
|
||||
return diags.failParse(path, "failed to open {}: {s}", .{ path, @errorName(err) });
|
||||
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
|
||||
} };
|
||||
}
|
||||
|
||||
pub fn openArchiveInput(diags: *Diags, path: Path, must_link: bool, hidden: bool) error{LinkFailure}!Input {
|
||||
return .{ .archive = openObject(path, must_link, hidden) catch |err| {
|
||||
return diags.failParse(path, "failed to open {}: {s}", .{ path, @errorName(err) });
|
||||
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
|
||||
} };
|
||||
}
|
||||
|
||||
pub fn openDsoInput(diags: *Diags, path: Path, needed: bool, weak: bool, reexport: bool) error{LinkFailure}!Input {
|
||||
return .{ .dso = openDso(path, needed, weak, reexport) catch |err| {
|
||||
return diags.failParse(path, "failed to open {}: {s}", .{ path, @errorName(err) });
|
||||
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
|
||||
} };
|
||||
}
|
||||
|
||||
|
||||
@ -1213,7 +1213,7 @@ fn updateLazySymbolAtom(
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(gpa);
|
||||
|
||||
const name = try allocPrint(gpa, "__lazy_{s}_{}", .{
|
||||
const name = try allocPrint(gpa, "__lazy_{s}_{f}", .{
|
||||
@tagName(sym.kind),
|
||||
Type.fromInterned(sym.ty).fmt(pt),
|
||||
});
|
||||
@ -1333,7 +1333,7 @@ fn updateNavCode(
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
|
||||
log.debug("updateNavCode {} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
|
||||
log.debug("updateNavCode {f} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
|
||||
|
||||
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
|
||||
const required_alignment = switch (pt.navAlignment(nav_index)) {
|
||||
@ -1361,7 +1361,7 @@ fn updateNavCode(
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| return coff.base.cgFail(nav_index, "failed to grow atom: {s}", .{@errorName(e)}),
|
||||
};
|
||||
log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), sym.value, vaddr });
|
||||
log.debug("growing {f} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), sym.value, vaddr });
|
||||
log.debug(" (required alignment 0x{x}", .{required_alignment});
|
||||
|
||||
if (vaddr != sym.value) {
|
||||
@ -1389,7 +1389,7 @@ fn updateNavCode(
|
||||
else => |e| return coff.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(e)}),
|
||||
};
|
||||
errdefer coff.freeAtom(atom_index);
|
||||
log.debug("allocated atom for {} at 0x{x}", .{ nav.fqn.fmt(ip), vaddr });
|
||||
log.debug("allocated atom for {f} at 0x{x}", .{ nav.fqn.fmt(ip), vaddr });
|
||||
coff.getAtomPtr(atom_index).size = code_len;
|
||||
sym.value = vaddr;
|
||||
|
||||
@ -1454,7 +1454,7 @@ pub fn updateExports(
|
||||
|
||||
for (export_indices) |export_idx| {
|
||||
const exp = export_idx.ptr(zcu);
|
||||
log.debug("adding new export '{}'", .{exp.opts.name.fmt(&zcu.intern_pool)});
|
||||
log.debug("adding new export '{f}'", .{exp.opts.name.fmt(&zcu.intern_pool)});
|
||||
|
||||
if (exp.opts.section.toSlice(&zcu.intern_pool)) |section_name| {
|
||||
if (!mem.eql(u8, section_name, ".text")) {
|
||||
@ -1530,7 +1530,7 @@ pub fn deleteExport(
|
||||
const gpa = coff.base.comp.gpa;
|
||||
const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
|
||||
const sym = coff.getSymbolPtr(sym_loc);
|
||||
log.debug("deleting export '{}'", .{name.fmt(&zcu.intern_pool)});
|
||||
log.debug("deleting export '{f}'", .{name.fmt(&zcu.intern_pool)});
|
||||
assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED);
|
||||
sym.* = .{
|
||||
.name = [_]u8{0} ** 8,
|
||||
@ -1748,7 +1748,7 @@ pub fn getNavVAddr(
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
log.debug("getNavVAddr {}({d})", .{ nav.fqn.fmt(ip), nav_index });
|
||||
log.debug("getNavVAddr {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
|
||||
const sym_index = if (nav.getExtern(ip)) |e|
|
||||
try coff.getGlobalSymbol(nav.name.toSlice(ip), e.lib_name.toSlice(ip))
|
||||
else
|
||||
@ -2175,15 +2175,14 @@ fn writeDataDirectoriesHeaders(coff: *Coff) !void {
|
||||
fn writeHeader(coff: *Coff) !void {
|
||||
const target = &coff.base.comp.root_mod.resolved_target.result;
|
||||
const gpa = coff.base.comp.gpa;
|
||||
var buffer = std.ArrayList(u8).init(gpa);
|
||||
defer buffer.deinit();
|
||||
const writer = buffer.writer();
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(try gpa.alloc(u8, coff.getSizeOfHeaders()));
|
||||
defer gpa.free(bw.buffer);
|
||||
|
||||
try buffer.ensureTotalCapacity(coff.getSizeOfHeaders());
|
||||
writer.writeAll(&msdos_stub) catch unreachable;
|
||||
mem.writeInt(u32, buffer.items[0x3c..][0..4], msdos_stub.len, .little);
|
||||
bw.writeAll(&msdos_stub) catch unreachable;
|
||||
mem.writeInt(u32, bw.buffer[0x3c..][0..4], msdos_stub.len, .little);
|
||||
|
||||
writer.writeAll("PE\x00\x00") catch unreachable;
|
||||
bw.writeAll("PE\x00\x00") catch unreachable;
|
||||
var flags = coff_util.CoffHeaderFlags{
|
||||
.EXECUTABLE_IMAGE = 1,
|
||||
.DEBUG_STRIPPED = 1, // TODO
|
||||
@ -2208,7 +2207,7 @@ fn writeHeader(coff: *Coff) !void {
|
||||
.flags = flags,
|
||||
};
|
||||
|
||||
writer.writeAll(mem.asBytes(&coff_header)) catch unreachable;
|
||||
bw.writeAll(mem.asBytes(&coff_header)) catch unreachable;
|
||||
|
||||
const dll_flags: coff_util.DllFlags = .{
|
||||
.HIGH_ENTROPY_VA = 1, // TODO do we want to permit non-PIE builds at all?
|
||||
@ -2271,7 +2270,7 @@ fn writeHeader(coff: *Coff) !void {
|
||||
.loader_flags = 0,
|
||||
.number_of_rva_and_sizes = @intCast(coff.data_directories.len),
|
||||
};
|
||||
writer.writeAll(mem.asBytes(&opt_header)) catch unreachable;
|
||||
bw.writeAll(mem.asBytes(&opt_header)) catch unreachable;
|
||||
},
|
||||
.p64 => {
|
||||
var opt_header = coff_util.OptionalHeaderPE64{
|
||||
@ -2305,11 +2304,12 @@ fn writeHeader(coff: *Coff) !void {
|
||||
.loader_flags = 0,
|
||||
.number_of_rva_and_sizes = @intCast(coff.data_directories.len),
|
||||
};
|
||||
writer.writeAll(mem.asBytes(&opt_header)) catch unreachable;
|
||||
bw.writeAll(mem.asBytes(&opt_header)) catch unreachable;
|
||||
},
|
||||
}
|
||||
|
||||
try coff.pwriteAll(buffer.items, 0);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try coff.pwriteAll(bw.buffer, 0);
|
||||
}
|
||||
|
||||
pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
|
||||
@ -2605,7 +2605,7 @@ fn logSymtab(coff: *Coff) void {
|
||||
}
|
||||
|
||||
log.debug("GOT entries:", .{});
|
||||
log.debug("{}", .{coff.got_table});
|
||||
log.debug("{f}", .{coff.got_table});
|
||||
}
|
||||
|
||||
fn logSections(coff: *Coff) void {
|
||||
@ -2625,7 +2625,7 @@ fn logImportTables(coff: *const Coff) void {
|
||||
log.debug("import tables:", .{});
|
||||
for (coff.import_tables.keys(), 0..) |off, i| {
|
||||
const itable = coff.import_tables.values()[i];
|
||||
log.debug("{}", .{itable.fmtDebug(.{
|
||||
log.debug("{f}", .{itable.fmtDebug(.{
|
||||
.coff = coff,
|
||||
.index = i,
|
||||
.name_off = off,
|
||||
@ -3066,27 +3066,20 @@ const ImportTable = struct {
|
||||
ctx: Context,
|
||||
};
|
||||
|
||||
fn format(itab: ImportTable, comptime unused_format_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
fn format(itab: ImportTable, bw: *std.io.BufferedWriter, comptime unused_format_string: []const u8) anyerror!void {
|
||||
_ = itab;
|
||||
_ = bw;
|
||||
_ = unused_format_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format ImportTable directly; use itab.fmtDebug()");
|
||||
}
|
||||
|
||||
fn format2(
|
||||
fmt_ctx: FormatContext,
|
||||
comptime unused_format_string: []const u8,
|
||||
options: fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
_ = options;
|
||||
fn format2(fmt_ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_format_string: []const u8) anyerror!void {
|
||||
comptime assert(unused_format_string.len == 0);
|
||||
const lib_name = fmt_ctx.ctx.coff.temp_strtab.getAssumeExists(fmt_ctx.ctx.name_off);
|
||||
const base_vaddr = getBaseAddress(fmt_ctx.ctx);
|
||||
try writer.print("IAT({s}.dll) @{x}:", .{ lib_name, base_vaddr });
|
||||
try bw.print("IAT({s}.dll) @{x}:", .{ lib_name, base_vaddr });
|
||||
for (fmt_ctx.itab.entries.items, 0..) |entry, i| {
|
||||
try writer.print("\n {d}@{?x} => {s}", .{
|
||||
try bw.print("\n {d}@{?x} => {s}", .{
|
||||
i,
|
||||
fmt_ctx.itab.getImportAddress(entry, fmt_ctx.ctx),
|
||||
fmt_ctx.ctx.coff.getSymbolName(entry),
|
||||
|
||||
1492
src/link/Dwarf.zig
1492
src/link/Dwarf.zig
File diff suppressed because it is too large
Load Diff
275
src/link/Elf.zig
275
src/link/Elf.zig
@ -702,7 +702,7 @@ pub fn allocateChunk(self: *Elf, args: struct {
|
||||
shdr.sh_addr + res.value,
|
||||
shdr.sh_offset + res.value,
|
||||
});
|
||||
log.debug(" placement {}, {s}", .{
|
||||
log.debug(" placement {f}, {s}", .{
|
||||
res.placement,
|
||||
if (self.atom(res.placement)) |atom_ptr| atom_ptr.name(self) else "",
|
||||
});
|
||||
@ -869,7 +869,7 @@ fn flushInner(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id) !void {
|
||||
// Dump the state for easy debugging.
|
||||
// State can be dumped via `--debug-log link_state`.
|
||||
if (build_options.enable_logging) {
|
||||
state_log.debug("{}", .{self.dumpState()});
|
||||
state_log.debug("{f}", .{self.dumpState()});
|
||||
}
|
||||
|
||||
// Beyond this point, everything has been allocated a virtual address and we can resolve
|
||||
@ -1849,7 +1849,7 @@ pub fn updateMergeSectionSizes(self: *Elf) !void {
|
||||
|
||||
pub fn writeMergeSections(self: *Elf) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
var buffer = std.ArrayList(u8).init(gpa);
|
||||
var buffer: std.ArrayList(u8) = .init(gpa);
|
||||
defer buffer.deinit();
|
||||
|
||||
for (self.merge_sections.items) |*msec| {
|
||||
@ -2996,7 +2996,7 @@ fn allocateSpecialPhdrs(self: *Elf) void {
|
||||
}
|
||||
}
|
||||
|
||||
fn writeAtoms(self: *Elf) !void {
|
||||
fn writeAtoms(self: *Elf) anyerror!void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
|
||||
var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.ArrayList(Ref)) = .init(gpa);
|
||||
@ -3005,7 +3005,7 @@ fn writeAtoms(self: *Elf) !void {
|
||||
undefs.deinit();
|
||||
}
|
||||
|
||||
var buffer = std.ArrayList(u8).init(gpa);
|
||||
var buffer: std.ArrayList(u8) = .init(gpa);
|
||||
defer buffer.deinit();
|
||||
|
||||
const slice = self.sections.slice();
|
||||
@ -3028,14 +3028,14 @@ fn writeAtoms(self: *Elf) !void {
|
||||
|
||||
if (self.requiresThunks()) {
|
||||
for (self.thunks.items) |th| {
|
||||
const thunk_size = th.size(self);
|
||||
try buffer.ensureUnusedCapacity(thunk_size);
|
||||
try buffer.resize(th.size(self));
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(buffer.items);
|
||||
const shdr = slice.items(.shdr)[th.output_section_index];
|
||||
const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset;
|
||||
try th.write(self, buffer.writer());
|
||||
assert(buffer.items.len == thunk_size);
|
||||
try self.pwriteAll(buffer.items, offset);
|
||||
buffer.clearRetainingCapacity();
|
||||
try th.write(self, &bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3130,32 +3130,36 @@ pub fn updateSymtabSize(self: *Elf) !void {
|
||||
strtab.sh_size = strsize + 1;
|
||||
}
|
||||
|
||||
fn writeSyntheticSections(self: *Elf) !void {
|
||||
fn writeSyntheticSections(self: *Elf) anyerror!void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const slice = self.sections.slice();
|
||||
|
||||
var buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer buffer.deinit(gpa);
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
|
||||
if (self.section_indexes.interp) |shndx| {
|
||||
var buffer: [256]u8 = undefined;
|
||||
const interp = self.getTarget().dynamic_linker.get().?;
|
||||
@memcpy(buffer[0..interp.len], interp);
|
||||
buffer[interp.len] = 0;
|
||||
const contents = buffer[0 .. interp.len + 1];
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
assert(shdr.sh_size == contents.len);
|
||||
try self.pwriteAll(contents, shdr.sh_offset);
|
||||
const interp = self.getTarget().dynamic_linker.get().?;
|
||||
assert(shdr.sh_size == interp.len + 1);
|
||||
try buffer.resize(gpa, shdr.sh_size);
|
||||
@memcpy(buffer.items[0..interp.len], interp);
|
||||
buffer.items[interp.len] = 0;
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.hash) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
try self.pwriteAll(self.hash.buffer.items, shdr.sh_offset);
|
||||
try self.pwriteAll(@ptrCast(self.hash.buffer), shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.gnu_hash) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.gnu_hash.size());
|
||||
defer buffer.deinit();
|
||||
try self.gnu_hash.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try buffer.resize(gpa, self.gnu_hash.size());
|
||||
bw.initFixed(buffer.items);
|
||||
try self.gnu_hash.write(self, &bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.versym) |shndx| {
|
||||
@ -3165,26 +3169,29 @@ fn writeSyntheticSections(self: *Elf) !void {
|
||||
|
||||
if (self.section_indexes.verneed) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.verneed.size());
|
||||
defer buffer.deinit();
|
||||
try self.verneed.write(buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try buffer.resize(gpa, self.verneed.size());
|
||||
bw.initFixed(buffer.items);
|
||||
try self.verneed.write(&bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.dynamic) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.dynamic.size(self));
|
||||
defer buffer.deinit();
|
||||
try self.dynamic.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try buffer.resize(gpa, self.dynamic.size(self));
|
||||
bw.initFixed(buffer.items);
|
||||
try self.dynamic.write(self, &bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.dynsymtab) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.dynsym.size());
|
||||
defer buffer.deinit();
|
||||
try self.dynsym.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try buffer.resize(gpa, self.dynsym.size());
|
||||
bw.initFixed(buffer.items);
|
||||
try self.dynsym.write(self, &bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.dynstrtab) |shndx| {
|
||||
@ -3200,28 +3207,30 @@ fn writeSyntheticSections(self: *Elf) !void {
|
||||
};
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
const sh_size = try self.cast(usize, shdr.sh_size);
|
||||
var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
|
||||
defer buffer.deinit();
|
||||
try eh_frame.writeEhFrame(self, buffer.writer());
|
||||
assert(buffer.items.len == sh_size - existing_size);
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
|
||||
try buffer.resize(gpa, @intCast(sh_size - existing_size));
|
||||
bw.initFixed(buffer.items);
|
||||
try eh_frame.writeEhFrame(self, &bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, shdr.sh_offset + existing_size);
|
||||
}
|
||||
|
||||
if (self.section_indexes.eh_frame_hdr) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
const sh_size = try self.cast(usize, shdr.sh_size);
|
||||
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
|
||||
defer buffer.deinit();
|
||||
try eh_frame.writeEhFrameHdr(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try buffer.resize(gpa, sh_size);
|
||||
bw.initFixed(buffer.items);
|
||||
try eh_frame.writeEhFrameHdr(self, &bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.got) |index| {
|
||||
const shdr = slice.items(.shdr)[index];
|
||||
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got.size(self));
|
||||
defer buffer.deinit();
|
||||
try self.got.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try buffer.resize(gpa, self.got.size(self));
|
||||
bw.initFixed(buffer.items);
|
||||
try self.got.write(self, &bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.rela_dyn) |shndx| {
|
||||
@ -3234,26 +3243,29 @@ fn writeSyntheticSections(self: *Elf) !void {
|
||||
|
||||
if (self.section_indexes.plt) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt.size(self));
|
||||
defer buffer.deinit();
|
||||
try self.plt.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try buffer.resize(gpa, self.plt.size(self));
|
||||
bw.initFixed(buffer.items);
|
||||
try self.plt.write(self, &bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.got_plt) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got_plt.size(self));
|
||||
defer buffer.deinit();
|
||||
try self.got_plt.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try buffer.resize(gpa, self.got_plt.size(self));
|
||||
bw.initFixed(buffer.items);
|
||||
try self.got_plt.write(self, &bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.plt_got) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt_got.size(self));
|
||||
defer buffer.deinit();
|
||||
try self.plt_got.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try buffer.resize(gpa, self.plt_got.size(self));
|
||||
bw.initFixed(buffer.items);
|
||||
try self.plt_got.write(self, &bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.rela_plt) |shndx| {
|
||||
@ -3544,7 +3556,7 @@ pub fn addRelaDyn(self: *Elf, opts: RelaDyn) !void {
|
||||
}
|
||||
|
||||
pub fn addRelaDynAssumeCapacity(self: *Elf, opts: RelaDyn) void {
|
||||
relocs_log.debug(" {s}: [{x} => {d}({s})] + {x}", .{
|
||||
relocs_log.debug(" {f}: [{x} => {d}({s})] + {x}", .{
|
||||
relocation.fmtRelocType(opts.type, self.getTarget().cpu.arch),
|
||||
opts.offset,
|
||||
opts.sym,
|
||||
@ -3754,9 +3766,8 @@ fn shString(
|
||||
|
||||
pub fn insertShString(self: *Elf, name: [:0]const u8) error{OutOfMemory}!u32 {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const off = @as(u32, @intCast(self.shstrtab.items.len));
|
||||
try self.shstrtab.ensureUnusedCapacity(gpa, name.len + 1);
|
||||
self.shstrtab.writer(gpa).print("{s}\x00", .{name}) catch unreachable;
|
||||
const off: u32 = @intCast(self.shstrtab.items.len);
|
||||
try self.shstrtab.print(gpa, "{s}\x00", .{name});
|
||||
return off;
|
||||
}
|
||||
|
||||
@ -3769,7 +3780,7 @@ pub fn insertDynString(self: *Elf, name: []const u8) error{OutOfMemory}!u32 {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const off = @as(u32, @intCast(self.dynstrtab.items.len));
|
||||
try self.dynstrtab.ensureUnusedCapacity(gpa, name.len + 1);
|
||||
self.dynstrtab.writer(gpa).print("{s}\x00", .{name}) catch unreachable;
|
||||
self.dynstrtab.print(gpa, "{s}\x00", .{name}) catch unreachable;
|
||||
return off;
|
||||
}
|
||||
|
||||
@ -3791,7 +3802,7 @@ fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void {
|
||||
for (refs.items[0..nrefs]) |ref| {
|
||||
const atom_ptr = self.atom(ref).?;
|
||||
const file_ptr = atom_ptr.file(self).?;
|
||||
err.addNote("referenced by {s}:{s}", .{ file_ptr.fmtPath(), atom_ptr.name(self) });
|
||||
err.addNote("referenced by {f}:{s}", .{ file_ptr.fmtPath(), atom_ptr.name(self) });
|
||||
}
|
||||
|
||||
if (refs.items.len > max_notes) {
|
||||
@ -3813,12 +3824,12 @@ fn reportDuplicates(self: *Elf, dupes: anytype) error{ HasDuplicates, OutOfMemor
|
||||
|
||||
var err = try diags.addErrorWithNotes(nnotes + 1);
|
||||
try err.addMsg("duplicate symbol definition: {s}", .{sym.name(self)});
|
||||
err.addNote("defined by {}", .{sym.file(self).?.fmtPath()});
|
||||
err.addNote("defined by {f}", .{sym.file(self).?.fmtPath()});
|
||||
|
||||
var inote: usize = 0;
|
||||
while (inote < @min(notes.items.len, max_notes)) : (inote += 1) {
|
||||
const file_ptr = self.file(notes.items[inote]).?;
|
||||
err.addNote("defined by {}", .{file_ptr.fmtPath()});
|
||||
err.addNote("defined by {f}", .{file_ptr.fmtPath()});
|
||||
}
|
||||
|
||||
if (notes.items.len > max_notes) {
|
||||
@ -3847,7 +3858,7 @@ pub fn addFileError(
|
||||
const diags = &self.base.comp.link_diags;
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg(format, args);
|
||||
err.addNote("while parsing {}", .{self.file(file_index).?.fmtPath()});
|
||||
err.addNote("while parsing {f}", .{self.file(file_index).?.fmtPath()});
|
||||
}
|
||||
|
||||
pub fn failFile(
|
||||
@ -3872,16 +3883,10 @@ fn fmtShdr(self: *Elf, shdr: elf.Elf64_Shdr) std.fmt.Formatter(formatShdr) {
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatShdr(
|
||||
ctx: FormatShdrCtx,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn formatShdr(ctx: FormatShdrCtx, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const shdr = ctx.shdr;
|
||||
try writer.print("{s} : @{x} ({x}) : align({x}) : size({x}) : entsize({x}) : flags({})", .{
|
||||
try bw.print("{s} : @{x} ({x}) : align({x}) : size({x}) : entsize({x}) : flags({f})", .{
|
||||
ctx.elf_file.getShString(shdr.sh_name), shdr.sh_offset,
|
||||
shdr.sh_addr, shdr.sh_addralign,
|
||||
shdr.sh_size, shdr.sh_entsize,
|
||||
@ -3893,55 +3898,49 @@ pub fn fmtShdrFlags(sh_flags: u64) std.fmt.Formatter(formatShdrFlags) {
|
||||
return .{ .data = sh_flags };
|
||||
}
|
||||
|
||||
fn formatShdrFlags(
|
||||
sh_flags: u64,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatShdrFlags(sh_flags: u64, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) !void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
if (elf.SHF_WRITE & sh_flags != 0) {
|
||||
try writer.writeAll("W");
|
||||
try bw.writeByte('W');
|
||||
}
|
||||
if (elf.SHF_ALLOC & sh_flags != 0) {
|
||||
try writer.writeAll("A");
|
||||
try bw.writeByte('A');
|
||||
}
|
||||
if (elf.SHF_EXECINSTR & sh_flags != 0) {
|
||||
try writer.writeAll("X");
|
||||
try bw.writeByte('X');
|
||||
}
|
||||
if (elf.SHF_MERGE & sh_flags != 0) {
|
||||
try writer.writeAll("M");
|
||||
try bw.writeByte('M');
|
||||
}
|
||||
if (elf.SHF_STRINGS & sh_flags != 0) {
|
||||
try writer.writeAll("S");
|
||||
try bw.writeByte('S');
|
||||
}
|
||||
if (elf.SHF_INFO_LINK & sh_flags != 0) {
|
||||
try writer.writeAll("I");
|
||||
try bw.writeByte('I');
|
||||
}
|
||||
if (elf.SHF_LINK_ORDER & sh_flags != 0) {
|
||||
try writer.writeAll("L");
|
||||
try bw.writeByte('L');
|
||||
}
|
||||
if (elf.SHF_EXCLUDE & sh_flags != 0) {
|
||||
try writer.writeAll("E");
|
||||
try bw.writeByte('E');
|
||||
}
|
||||
if (elf.SHF_COMPRESSED & sh_flags != 0) {
|
||||
try writer.writeAll("C");
|
||||
try bw.writeByte('C');
|
||||
}
|
||||
if (elf.SHF_GROUP & sh_flags != 0) {
|
||||
try writer.writeAll("G");
|
||||
try bw.writeByte('G');
|
||||
}
|
||||
if (elf.SHF_OS_NONCONFORMING & sh_flags != 0) {
|
||||
try writer.writeAll("O");
|
||||
try bw.writeByte('O');
|
||||
}
|
||||
if (elf.SHF_TLS & sh_flags != 0) {
|
||||
try writer.writeAll("T");
|
||||
try bw.writeByte('T');
|
||||
}
|
||||
if (elf.SHF_X86_64_LARGE & sh_flags != 0) {
|
||||
try writer.writeAll("l");
|
||||
try bw.writeByte('l');
|
||||
}
|
||||
if (elf.SHF_MIPS_ADDR & sh_flags != 0 or elf.SHF_ARM_PURECODE & sh_flags != 0) {
|
||||
try writer.writeAll("p");
|
||||
try bw.writeByte('p');
|
||||
}
|
||||
}
|
||||
|
||||
@ -3959,11 +3958,9 @@ fn fmtPhdr(self: *Elf, phdr: elf.Elf64_Phdr) std.fmt.Formatter(formatPhdr) {
|
||||
|
||||
fn formatPhdr(
|
||||
ctx: FormatPhdrCtx,
|
||||
bw: *std.io.BufferedWriter,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
_ = unused_fmt_string;
|
||||
const phdr = ctx.phdr;
|
||||
const write = phdr.p_flags & elf.PF_W != 0;
|
||||
@ -3985,7 +3982,7 @@ fn formatPhdr(
|
||||
elf.PT_NOTE => "NOTE",
|
||||
else => "UNKNOWN",
|
||||
};
|
||||
try writer.print("{s} : {s} : @{x} ({x}) : align({x}) : filesz({x}) : memsz({x})", .{
|
||||
try bw.print("{s} : {s} : @{x} ({x}) : align({x}) : filesz({x}) : memsz({x})", .{
|
||||
p_type, flags, phdr.p_offset, phdr.p_vaddr,
|
||||
phdr.p_align, phdr.p_filesz, phdr.p_memsz,
|
||||
});
|
||||
@ -3997,30 +3994,28 @@ pub fn dumpState(self: *Elf) std.fmt.Formatter(fmtDumpState) {
|
||||
|
||||
fn fmtDumpState(
|
||||
self: *Elf,
|
||||
bw: *std.io.BufferedWriter,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
|
||||
const shared_objects = self.shared_objects.values();
|
||||
|
||||
if (self.zigObjectPtr()) |zig_object| {
|
||||
try writer.print("zig_object({d}) : {s}\n", .{ zig_object.index, zig_object.basename });
|
||||
try writer.print("{}{}", .{
|
||||
try bw.print("zig_object({d}) : {s}\n", .{ zig_object.index, zig_object.basename });
|
||||
try bw.print("{f}{f}", .{
|
||||
zig_object.fmtAtoms(self),
|
||||
zig_object.fmtSymtab(self),
|
||||
});
|
||||
try writer.writeByte('\n');
|
||||
try bw.writeByte('\n');
|
||||
}
|
||||
|
||||
for (self.objects.items) |index| {
|
||||
const object = self.file(index).?.object;
|
||||
try writer.print("object({d}) : {}", .{ index, object.fmtPath() });
|
||||
if (!object.alive) try writer.writeAll(" : [*]");
|
||||
try writer.writeByte('\n');
|
||||
try writer.print("{}{}{}{}{}\n", .{
|
||||
try bw.print("object({d}) : {f}", .{ index, object.fmtPath() });
|
||||
if (!object.alive) try bw.writeAll(" : [*]");
|
||||
try bw.writeByte('\n');
|
||||
try bw.print("{f}{f}{f}{f}{f}\n", .{
|
||||
object.fmtAtoms(self),
|
||||
object.fmtCies(self),
|
||||
object.fmtFdes(self),
|
||||
@ -4031,59 +4026,59 @@ fn fmtDumpState(
|
||||
|
||||
for (shared_objects) |index| {
|
||||
const shared_object = self.file(index).?.shared_object;
|
||||
try writer.print("shared_object({d}) : {} : needed({})", .{
|
||||
try bw.print("shared_object({d}) : {f} : needed({})", .{
|
||||
index, shared_object.path, shared_object.needed,
|
||||
});
|
||||
if (!shared_object.alive) try writer.writeAll(" : [*]");
|
||||
try writer.writeByte('\n');
|
||||
try writer.print("{}\n", .{shared_object.fmtSymtab(self)});
|
||||
if (!shared_object.alive) try bw.writeAll(" : [*]");
|
||||
try bw.writeByte('\n');
|
||||
try bw.print("{f}\n", .{shared_object.fmtSymtab(self)});
|
||||
}
|
||||
|
||||
if (self.linker_defined_index) |index| {
|
||||
const linker_defined = self.file(index).?.linker_defined;
|
||||
try writer.print("linker_defined({d}) : (linker defined)\n", .{index});
|
||||
try writer.print("{}\n", .{linker_defined.fmtSymtab(self)});
|
||||
try bw.print("linker_defined({d}) : (linker defined)\n", .{index});
|
||||
try bw.print("{f}\n", .{linker_defined.fmtSymtab(self)});
|
||||
}
|
||||
|
||||
const slice = self.sections.slice();
|
||||
{
|
||||
try writer.writeAll("atom lists\n");
|
||||
try bw.writeAll("atom lists\n");
|
||||
for (slice.items(.shdr), slice.items(.atom_list_2), 0..) |shdr, atom_list, shndx| {
|
||||
try writer.print("shdr({d}) : {s} : {}\n", .{ shndx, self.getShString(shdr.sh_name), atom_list.fmt(self) });
|
||||
try bw.print("shdr({d}) : {s} : {f}\n", .{ shndx, self.getShString(shdr.sh_name), atom_list.fmt(self) });
|
||||
}
|
||||
}
|
||||
|
||||
if (self.requiresThunks()) {
|
||||
try writer.writeAll("thunks\n");
|
||||
try bw.writeAll("thunks\n");
|
||||
for (self.thunks.items, 0..) |th, index| {
|
||||
try writer.print("thunk({d}) : {}\n", .{ index, th.fmt(self) });
|
||||
try bw.print("thunk({d}) : {f}\n", .{ index, th.fmt(self) });
|
||||
}
|
||||
}
|
||||
|
||||
try writer.print("{}\n", .{self.got.fmt(self)});
|
||||
try writer.print("{}\n", .{self.plt.fmt(self)});
|
||||
try bw.print("{f}\n", .{self.got.fmt(self)});
|
||||
try bw.print("{f}\n", .{self.plt.fmt(self)});
|
||||
|
||||
try writer.writeAll("Output groups\n");
|
||||
try bw.writeAll("Output groups\n");
|
||||
for (self.group_sections.items) |cg| {
|
||||
try writer.print(" shdr({d}) : GROUP({})\n", .{ cg.shndx, cg.cg_ref });
|
||||
try bw.print(" shdr({d}) : GROUP({f})\n", .{ cg.shndx, cg.cg_ref });
|
||||
}
|
||||
|
||||
try writer.writeAll("\nOutput merge sections\n");
|
||||
try bw.writeAll("\nOutput merge sections\n");
|
||||
for (self.merge_sections.items) |msec| {
|
||||
try writer.print(" shdr({d}) : {}\n", .{ msec.output_section_index, msec.fmt(self) });
|
||||
try bw.print(" shdr({d}) : {f}\n", .{ msec.output_section_index, msec.fmt(self) });
|
||||
}
|
||||
|
||||
try writer.writeAll("\nOutput shdrs\n");
|
||||
try bw.writeAll("\nOutput shdrs\n");
|
||||
for (slice.items(.shdr), slice.items(.phndx), 0..) |shdr, phndx, shndx| {
|
||||
try writer.print(" shdr({d}) : phdr({?d}) : {}\n", .{
|
||||
try bw.print(" shdr({d}) : phdr({?d}) : {f}\n", .{
|
||||
shndx,
|
||||
phndx,
|
||||
self.fmtShdr(shdr),
|
||||
});
|
||||
}
|
||||
try writer.writeAll("\nOutput phdrs\n");
|
||||
try bw.writeAll("\nOutput phdrs\n");
|
||||
for (self.phdrs.items, 0..) |phdr, phndx| {
|
||||
try writer.print(" phdr({d}) : {}\n", .{ phndx, self.fmtPhdr(phdr) });
|
||||
try bw.print(" phdr({d}) : {f}\n", .{ phndx, self.fmtPhdr(phdr) });
|
||||
}
|
||||
}
|
||||
|
||||
@ -4221,15 +4216,9 @@ pub const Ref = struct {
|
||||
return ref.index == other.index and ref.file == other.file;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
ref: Ref,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(ref: Ref, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
try writer.print("ref({},{})", .{ ref.index, ref.file });
|
||||
try bw.print("ref({},{})", .{ ref.index, ref.file });
|
||||
}
|
||||
};
|
||||
|
||||
@ -4424,7 +4413,7 @@ fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void {
|
||||
for (atom_list.atoms.keys()[start..i]) |ref| {
|
||||
const atom_ptr = elf_file.atom(ref).?;
|
||||
const file_ptr = atom_ptr.file(elf_file).?;
|
||||
log.debug("atom({}) {s}", .{ ref, atom_ptr.name(elf_file) });
|
||||
log.debug("atom({f}) {s}", .{ ref, atom_ptr.name(elf_file) });
|
||||
for (atom_ptr.relocs(elf_file)) |rel| {
|
||||
const is_reachable = switch (cpu_arch) {
|
||||
.aarch64 => r: {
|
||||
@ -4453,7 +4442,7 @@ fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void {
|
||||
|
||||
thunk_ptr.value = try advance(atom_list, thunk_ptr.size(elf_file), Atom.Alignment.fromNonzeroByteUnits(2));
|
||||
|
||||
log.debug("thunk({d}) : {}", .{ thunk_index, thunk_ptr.fmt(elf_file) });
|
||||
log.debug("thunk({d}) : {f}", .{ thunk_index, thunk_ptr.fmt(elf_file) });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -44,7 +44,7 @@ pub fn parse(
|
||||
pos += @sizeOf(elf.ar_hdr);
|
||||
|
||||
if (!mem.eql(u8, &hdr.ar_fmag, elf.ARFMAG)) {
|
||||
return diags.failParse(path, "invalid archive header delimiter: {s}", .{
|
||||
return diags.failParse(path, "invalid archive header delimiter: {f}", .{
|
||||
std.fmt.fmtSliceEscapeLower(&hdr.ar_fmag),
|
||||
});
|
||||
}
|
||||
@ -83,8 +83,8 @@ pub fn parse(
|
||||
.alive = false,
|
||||
};
|
||||
|
||||
log.debug("extracting object '{}' from archive '{}'", .{
|
||||
@as(Path, object.path), @as(Path, path),
|
||||
log.debug("extracting object '{f}' from archive '{f}'", .{
|
||||
object.path, path,
|
||||
});
|
||||
|
||||
try objects.append(gpa, object);
|
||||
@ -110,33 +110,16 @@ pub fn setArHdr(opts: struct {
|
||||
},
|
||||
size: usize,
|
||||
}) elf.ar_hdr {
|
||||
var hdr: elf.ar_hdr = .{
|
||||
.ar_name = undefined,
|
||||
.ar_date = undefined,
|
||||
.ar_uid = undefined,
|
||||
.ar_gid = undefined,
|
||||
.ar_mode = undefined,
|
||||
.ar_size = undefined,
|
||||
.ar_fmag = undefined,
|
||||
};
|
||||
@memset(mem.asBytes(&hdr), 0x20);
|
||||
var hdr: elf.ar_hdr = undefined;
|
||||
@memset(mem.asBytes(&hdr), ' ');
|
||||
@memcpy(&hdr.ar_fmag, elf.ARFMAG);
|
||||
|
||||
{
|
||||
var stream = std.io.fixedBufferStream(&hdr.ar_name);
|
||||
const writer = stream.writer();
|
||||
switch (opts.name) {
|
||||
.symtab => writer.print("{s}", .{elf.SYM64NAME}) catch unreachable,
|
||||
.strtab => writer.print("//", .{}) catch unreachable,
|
||||
.name => |x| writer.print("{s}/", .{x}) catch unreachable,
|
||||
.name_off => |x| writer.print("/{d}", .{x}) catch unreachable,
|
||||
}
|
||||
switch (opts.name) {
|
||||
.symtab => _ = std.fmt.bufPrint(&hdr.ar_name, "{s}", .{elf.SYM64NAME}) catch unreachable,
|
||||
.strtab => _ = std.fmt.bufPrint(&hdr.ar_name, "//", .{}) catch unreachable,
|
||||
.name => |x| _ = std.fmt.bufPrint(&hdr.ar_name, "{s}/", .{x}) catch unreachable,
|
||||
.name_off => |x| _ = std.fmt.bufPrint(&hdr.ar_name, "/{d}", .{x}) catch unreachable,
|
||||
}
|
||||
{
|
||||
var stream = std.io.fixedBufferStream(&hdr.ar_size);
|
||||
stream.writer().print("{d}", .{opts.size}) catch unreachable;
|
||||
}
|
||||
|
||||
_ = std.fmt.bufPrint(&hdr.ar_size, "{d}", .{opts.size}) catch unreachable;
|
||||
return hdr;
|
||||
}
|
||||
|
||||
@ -201,16 +184,10 @@ pub const ArSymtab = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
ar: ArSymtab,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(ar: ArSymtab, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = ar;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format ar symtab directly; use fmt instead");
|
||||
}
|
||||
|
||||
@ -226,20 +203,14 @@ pub const ArSymtab = struct {
|
||||
} };
|
||||
}
|
||||
|
||||
fn format2(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const ar = ctx.ar;
|
||||
const elf_file = ctx.elf_file;
|
||||
for (ar.symtab.items, 0..) |entry, i| {
|
||||
const name = ar.strtab.getAssumeExists(entry.off);
|
||||
const file = elf_file.file(entry.file_index).?;
|
||||
try writer.print(" {d}: {s} in file({d})({})\n", .{ i, name, entry.file_index, file.fmtPath() });
|
||||
try bw.print(" {d}: {s} in file({d})({f})\n", .{ i, name, entry.file_index, file.fmtPath() });
|
||||
}
|
||||
}
|
||||
|
||||
@ -264,9 +235,9 @@ pub const ArStrtab = struct {
|
||||
ar.buffer.deinit(allocator);
|
||||
}
|
||||
|
||||
pub fn insert(ar: *ArStrtab, allocator: Allocator, name: []const u8) error{OutOfMemory}!u32 {
|
||||
const off = @as(u32, @intCast(ar.buffer.items.len));
|
||||
try ar.buffer.writer(allocator).print("{s}/{c}", .{ name, strtab_delimiter });
|
||||
pub fn insert(ar: *ArStrtab, gpa: Allocator, name: []const u8) error{OutOfMemory}!u32 {
|
||||
const off: u32 = @intCast(ar.buffer.items.len);
|
||||
try ar.buffer.print(gpa, "{s}/{c}", .{ name, strtab_delimiter });
|
||||
return off;
|
||||
}
|
||||
|
||||
@ -280,15 +251,9 @@ pub const ArStrtab = struct {
|
||||
try writer.writeAll(ar.buffer.items);
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
ar: ArStrtab,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(ar: ArStrtab, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
try writer.print("{s}", .{std.fmt.fmtSliceEscapeLower(ar.buffer.items)});
|
||||
try bw.print("{f}", .{std.fmt.fmtSliceEscapeLower(ar.buffer.items)});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -142,7 +142,7 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
|
||||
}
|
||||
|
||||
pub fn free(self: *Atom, elf_file: *Elf) void {
|
||||
log.debug("freeAtom atom({}) ({s})", .{ self.ref(), self.name(elf_file) });
|
||||
log.debug("freeAtom atom({f}) ({s})", .{ self.ref(), self.name(elf_file) });
|
||||
|
||||
const comp = elf_file.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
@ -243,7 +243,7 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
|
||||
},
|
||||
}
|
||||
|
||||
relocs_log.debug(" {s}: [{x} => {d}({s})] + {x}", .{
|
||||
relocs_log.debug(" {f}: [{x} => {d}({s})] + {x}", .{
|
||||
relocation.fmtRelocType(rel.r_type(), cpu_arch),
|
||||
r_offset,
|
||||
r_sym,
|
||||
@ -316,7 +316,7 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
|
||||
};
|
||||
// Violation of One Definition Rule for COMDATs.
|
||||
// TODO convert into an error
|
||||
log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
|
||||
log.debug("{f}: {s}: {s} refers to a discarded COMDAT section", .{
|
||||
file_ptr.fmtPath(),
|
||||
self.name(elf_file),
|
||||
sym_name,
|
||||
@ -519,11 +519,11 @@ fn dataType(symbol: *const Symbol, elf_file: *Elf) u2 {
|
||||
fn reportUnhandledRelocError(self: Atom, rel: elf.Elf64_Rela, elf_file: *Elf) RelocError!void {
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("fatal linker error: unhandled relocation type {} at offset 0x{x}", .{
|
||||
try err.addMsg("fatal linker error: unhandled relocation type {f} at offset 0x{x}", .{
|
||||
relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch),
|
||||
rel.r_offset,
|
||||
});
|
||||
err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
return error.RelocFailure;
|
||||
}
|
||||
|
||||
@ -539,7 +539,7 @@ fn reportTextRelocError(
|
||||
rel.r_offset,
|
||||
symbol.name(elf_file),
|
||||
});
|
||||
err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
return error.RelocFailure;
|
||||
}
|
||||
|
||||
@ -555,7 +555,7 @@ fn reportPicError(
|
||||
rel.r_offset,
|
||||
symbol.name(elf_file),
|
||||
});
|
||||
err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
err.addNote("recompile with -fPIC", .{});
|
||||
return error.RelocFailure;
|
||||
}
|
||||
@ -572,7 +572,7 @@ fn reportNoPicError(
|
||||
rel.r_offset,
|
||||
symbol.name(elf_file),
|
||||
});
|
||||
err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
err.addNote("recompile with -fno-PIC", .{});
|
||||
return error.RelocFailure;
|
||||
}
|
||||
@ -621,7 +621,9 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
|
||||
|
||||
const cpu_arch = elf_file.getTarget().cpu.arch;
|
||||
const file_ptr = self.file(elf_file).?;
|
||||
var stream = std.io.fixedBufferStream(code);
|
||||
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(code);
|
||||
|
||||
const rels = self.relocs(elf_file);
|
||||
var it = RelocsIterator{ .relocs = rels };
|
||||
@ -652,7 +654,7 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
|
||||
// Address of the dynamic thread pointer.
|
||||
const DTP = elf_file.dtpAddress();
|
||||
|
||||
relocs_log.debug(" {s}: {x}: [{x} => {x}] GOT({x}) ({s})", .{
|
||||
relocs_log.debug(" {f}: {x}: [{x} => {x}] GOT({x}) ({s})", .{
|
||||
relocation.fmtRelocType(rel.r_type(), cpu_arch),
|
||||
r_offset,
|
||||
P,
|
||||
@ -661,32 +663,32 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
|
||||
target.name(elf_file),
|
||||
});
|
||||
|
||||
try stream.seekTo(r_offset);
|
||||
|
||||
const args = ResolveArgs{ P, A, S, GOT, G, TP, DTP };
|
||||
|
||||
bw.end = r_offset;
|
||||
bw.count = 0;
|
||||
switch (cpu_arch) {
|
||||
.x86_64 => x86_64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
|
||||
.x86_64 => x86_64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, &bw) catch |err| switch (err) {
|
||||
error.RelocFailure,
|
||||
error.RelaxFailure,
|
||||
error.InvalidInstruction,
|
||||
error.CannotEncode,
|
||||
=> has_reloc_errors = true,
|
||||
else => |e| return e,
|
||||
else => |e| return @errorCast(e),
|
||||
},
|
||||
.aarch64 => aarch64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
|
||||
.aarch64 => aarch64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, &bw) catch |err| switch (err) {
|
||||
error.RelocFailure,
|
||||
error.RelaxFailure,
|
||||
error.UnexpectedRemainder,
|
||||
error.DivisionByZero,
|
||||
=> has_reloc_errors = true,
|
||||
else => |e| return e,
|
||||
else => |e| return @errorCast(e),
|
||||
},
|
||||
.riscv64 => riscv.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
|
||||
.riscv64 => riscv.resolveRelocAlloc(self, elf_file, rel, target, args, &it, &bw) catch |err| switch (err) {
|
||||
error.RelocFailure,
|
||||
error.RelaxFailure,
|
||||
=> has_reloc_errors = true,
|
||||
else => |e| return e,
|
||||
else => |e| return @errorCast(e),
|
||||
},
|
||||
else => return error.UnsupportedCpuArch,
|
||||
}
|
||||
@ -804,7 +806,9 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
|
||||
|
||||
const cpu_arch = elf_file.getTarget().cpu.arch;
|
||||
const file_ptr = self.file(elf_file).?;
|
||||
var stream = std.io.fixedBufferStream(code);
|
||||
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(code);
|
||||
|
||||
const rels = self.relocs(elf_file);
|
||||
var has_reloc_errors = false;
|
||||
@ -823,7 +827,7 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
|
||||
};
|
||||
// Violation of One Definition Rule for COMDATs.
|
||||
// TODO convert into an error
|
||||
log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
|
||||
log.debug("{f}: {s}: {s} refers to a discarded COMDAT section", .{
|
||||
file_ptr.fmtPath(),
|
||||
self.name(elf_file),
|
||||
sym_name,
|
||||
@ -855,7 +859,7 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
|
||||
|
||||
const args = ResolveArgs{ P, A, S, GOT, 0, 0, DTP };
|
||||
|
||||
relocs_log.debug(" {}: {x}: [{x} => {x}] ({s})", .{
|
||||
relocs_log.debug(" {f}: {x}: [{x} => {x}] ({s})", .{
|
||||
relocation.fmtRelocType(rel.r_type(), cpu_arch),
|
||||
rel.r_offset,
|
||||
P,
|
||||
@ -863,18 +867,18 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
|
||||
target.name(elf_file),
|
||||
});
|
||||
|
||||
try stream.seekTo(r_offset);
|
||||
|
||||
bw.end = r_offset;
|
||||
bw.count = 0;
|
||||
switch (cpu_arch) {
|
||||
.x86_64 => x86_64.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
|
||||
.x86_64 => x86_64.resolveRelocNonAlloc(self, elf_file, rel, target, args, &bw) catch |err| switch (err) {
|
||||
error.RelocFailure => has_reloc_errors = true,
|
||||
else => |e| return e,
|
||||
},
|
||||
.aarch64 => aarch64.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
|
||||
.aarch64 => aarch64.resolveRelocNonAlloc(self, elf_file, rel, target, args, &bw) catch |err| switch (err) {
|
||||
error.RelocFailure => has_reloc_errors = true,
|
||||
else => |e| return e,
|
||||
},
|
||||
.riscv64 => riscv.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
|
||||
.riscv64 => riscv.resolveRelocNonAlloc(self, elf_file, rel, target, args, &bw) catch |err| switch (err) {
|
||||
error.RelocFailure => has_reloc_errors = true,
|
||||
else => |e| return e,
|
||||
},
|
||||
@ -904,16 +908,10 @@ pub fn setExtra(atom: Atom, extras: Extra, elf_file: *Elf) void {
|
||||
atom.file(elf_file).?.setAtomExtra(atom.extra_index, extras);
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
atom: Atom,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(atom: Atom, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = atom;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format Atom directly");
|
||||
}
|
||||
|
||||
@ -929,17 +927,11 @@ const FormatContext = struct {
|
||||
elf_file: *Elf,
|
||||
};
|
||||
|
||||
fn format2(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const atom = ctx.atom;
|
||||
const elf_file = ctx.elf_file;
|
||||
try writer.print("atom({d}) : {s} : @{x} : shdr({d}) : align({x}) : size({x}) : prev({}) : next({})", .{
|
||||
try bw.print("atom({d}) : {s} : @{x} : shdr({d}) : align({x}) : size({x}) : prev({f}) : next({f})", .{
|
||||
atom.atom_index, atom.name(elf_file), atom.address(elf_file),
|
||||
atom.output_section_index, atom.alignment.toByteUnits() orelse 0, atom.size,
|
||||
atom.prev_atom_ref, atom.next_atom_ref,
|
||||
@ -947,20 +939,20 @@ fn format2(
|
||||
if (atom.file(elf_file)) |atom_file| switch (atom_file) {
|
||||
.object => |object| {
|
||||
if (atom.fdes(object).len > 0) {
|
||||
try writer.writeAll(" : fdes{ ");
|
||||
try bw.writeAll(" : fdes{ ");
|
||||
const extras = atom.extra(elf_file);
|
||||
for (atom.fdes(object), extras.fde_start..) |fde, i| {
|
||||
try writer.print("{d}", .{i});
|
||||
if (!fde.alive) try writer.writeAll("([*])");
|
||||
if (i - extras.fde_start < extras.fde_count - 1) try writer.writeAll(", ");
|
||||
try bw.print("{d}", .{i});
|
||||
if (!fde.alive) try bw.writeAll("([*])");
|
||||
if (i - extras.fde_start < extras.fde_count - 1) try bw.writeAll(", ");
|
||||
}
|
||||
try writer.writeAll(" }");
|
||||
try bw.writeAll(" }");
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
};
|
||||
if (!atom.alive) {
|
||||
try writer.writeAll(" : [*]");
|
||||
try bw.writeAll(" : [*]");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1087,16 +1079,12 @@ const x86_64 = struct {
|
||||
target: *const Symbol,
|
||||
args: ResolveArgs,
|
||||
it: *RelocsIterator,
|
||||
code: []u8,
|
||||
stream: anytype,
|
||||
) (error{ InvalidInstruction, CannotEncode } || RelocError)!void {
|
||||
bw: *std.io.BufferedWriter,
|
||||
) anyerror!void {
|
||||
dev.check(.x86_64_backend);
|
||||
const t = &elf_file.base.comp.root_mod.resolved_target.result;
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
|
||||
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
|
||||
|
||||
const cwriter = stream.writer();
|
||||
|
||||
const P, const A, const S, const GOT, const G, const TP, const DTP = args;
|
||||
|
||||
@ -1109,58 +1097,54 @@ const x86_64 = struct {
|
||||
rel,
|
||||
dynAbsRelocAction(target, elf_file),
|
||||
elf_file,
|
||||
cwriter,
|
||||
bw,
|
||||
);
|
||||
},
|
||||
|
||||
.PLT32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little),
|
||||
.PC32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little),
|
||||
.PLT32 => try bw.writeInt(i32, @as(i32, @intCast(S + A - P)), .little),
|
||||
.PC32 => try bw.writeInt(i32, @as(i32, @intCast(S + A - P)), .little),
|
||||
|
||||
.GOTPCREL => try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little),
|
||||
.GOTPC32 => try cwriter.writeInt(i32, @as(i32, @intCast(GOT + A - P)), .little),
|
||||
.GOTPC64 => try cwriter.writeInt(i64, GOT + A - P, .little),
|
||||
.GOTPCREL => try bw.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little),
|
||||
.GOTPC32 => try bw.writeInt(i32, @as(i32, @intCast(GOT + A - P)), .little),
|
||||
.GOTPC64 => try bw.writeInt(i64, GOT + A - P, .little),
|
||||
|
||||
.GOTPCRELX => {
|
||||
if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
|
||||
x86_64.relaxGotpcrelx(code[r_offset - 2 ..], t) catch break :blk;
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
|
||||
return;
|
||||
}
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
|
||||
.GOTPCRELX => if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
|
||||
x86_64.relaxGotpcrelx(bw.buffer[bw.end - 2 ..], t) catch break :blk;
|
||||
try bw.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
|
||||
} else {
|
||||
try bw.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
|
||||
},
|
||||
|
||||
.REX_GOTPCRELX => {
|
||||
if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
|
||||
x86_64.relaxRexGotpcrelx(code[r_offset - 3 ..], t) catch break :blk;
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
|
||||
return;
|
||||
}
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
|
||||
.REX_GOTPCRELX => if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
|
||||
x86_64.relaxRexGotpcrelx(bw.buffer[bw.end - 3 ..], t) catch break :blk;
|
||||
try bw.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
|
||||
} else {
|
||||
try bw.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
|
||||
},
|
||||
|
||||
.@"32" => try cwriter.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
|
||||
.@"32S" => try cwriter.writeInt(i32, @as(i32, @truncate(S + A)), .little),
|
||||
.@"32" => try bw.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
|
||||
.@"32S" => try bw.writeInt(i32, @as(i32, @truncate(S + A)), .little),
|
||||
|
||||
.TPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - TP)), .little),
|
||||
.TPOFF64 => try cwriter.writeInt(i64, S + A - TP, .little),
|
||||
.TPOFF32 => try bw.writeInt(i32, @as(i32, @truncate(S + A - TP)), .little),
|
||||
.TPOFF64 => try bw.writeInt(i64, S + A - TP, .little),
|
||||
|
||||
.DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - DTP)), .little),
|
||||
.DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
|
||||
.DTPOFF32 => try bw.writeInt(i32, @as(i32, @truncate(S + A - DTP)), .little),
|
||||
.DTPOFF64 => try bw.writeInt(i64, S + A - DTP, .little),
|
||||
|
||||
.TLSGD => {
|
||||
if (target.flags.has_tlsgd) {
|
||||
const S_ = target.tlsGdAddress(elf_file);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
try bw.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
} else if (target.flags.has_gottp) {
|
||||
const S_ = target.gotTpAddress(elf_file);
|
||||
try x86_64.relaxTlsGdToIe(atom, &.{ rel, it.next().? }, @intCast(S_ - P), elf_file, stream);
|
||||
try x86_64.relaxTlsGdToIe(atom, &.{ rel, it.next().? }, @intCast(S_ - P), elf_file, bw);
|
||||
} else {
|
||||
try x86_64.relaxTlsGdToLe(
|
||||
atom,
|
||||
&.{ rel, it.next().? },
|
||||
@as(i32, @intCast(S - TP)),
|
||||
elf_file,
|
||||
stream,
|
||||
bw,
|
||||
);
|
||||
}
|
||||
},
|
||||
@ -1169,14 +1153,14 @@ const x86_64 = struct {
|
||||
if (elf_file.got.tlsld_index) |entry_index| {
|
||||
const tlsld_entry = elf_file.got.entries.items[entry_index];
|
||||
const S_ = tlsld_entry.address(elf_file);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
try bw.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
} else {
|
||||
try x86_64.relaxTlsLdToLe(
|
||||
atom,
|
||||
&.{ rel, it.next().? },
|
||||
@as(i32, @intCast(TP - elf_file.tlsAddress())),
|
||||
elf_file,
|
||||
stream,
|
||||
bw,
|
||||
);
|
||||
}
|
||||
},
|
||||
@ -1184,38 +1168,38 @@ const x86_64 = struct {
|
||||
.GOTPC32_TLSDESC => {
|
||||
if (target.flags.has_tlsdesc) {
|
||||
const S_ = target.tlsDescAddress(elf_file);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
try bw.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
} else {
|
||||
x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..], t) catch {
|
||||
x86_64.relaxGotPcTlsDesc(bw.buffer[bw.end - 3 ..], t) catch {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("could not relax {s}", .{@tagName(r_type)});
|
||||
err.addNote("in {}:{s} at offset 0x{x}", .{
|
||||
err.addNote("in {f}:{s} at offset 0x{x}", .{
|
||||
atom.file(elf_file).?.fmtPath(),
|
||||
atom.name(elf_file),
|
||||
rel.r_offset,
|
||||
});
|
||||
return error.RelaxFailure;
|
||||
};
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
|
||||
try bw.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
|
||||
}
|
||||
},
|
||||
|
||||
.TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
|
||||
// call -> nop
|
||||
try cwriter.writeAll(&.{ 0x66, 0x90 });
|
||||
try bw.writeAll(&.{ 0x66, 0x90 });
|
||||
},
|
||||
|
||||
.GOTTPOFF => {
|
||||
if (target.flags.has_gottp) {
|
||||
const S_ = target.gotTpAddress(elf_file);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
try bw.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
} else {
|
||||
x86_64.relaxGotTpOff(code[r_offset - 3 ..], t);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
|
||||
x86_64.relaxGotTpOff(bw.buffer[bw.end - 3 ..], t);
|
||||
try bw.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
|
||||
}
|
||||
},
|
||||
|
||||
.GOT32 => try cwriter.writeInt(i32, @as(i32, @intCast(G + A)), .little),
|
||||
.GOT32 => try bw.writeInt(i32, @as(i32, @intCast(G + A)), .little),
|
||||
|
||||
else => try atom.reportUnhandledRelocError(rel, elf_file),
|
||||
}
|
||||
@ -1227,45 +1211,40 @@ const x86_64 = struct {
|
||||
rel: elf.Elf64_Rela,
|
||||
target: *const Symbol,
|
||||
args: ResolveArgs,
|
||||
it: *RelocsIterator,
|
||||
code: []u8,
|
||||
stream: anytype,
|
||||
) !void {
|
||||
bw: *std.io.BufferedWriter,
|
||||
) anyerror!void {
|
||||
dev.check(.x86_64_backend);
|
||||
_ = code;
|
||||
_ = it;
|
||||
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
|
||||
const cwriter = stream.writer();
|
||||
|
||||
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
|
||||
_, const A, const S, const GOT, _, _, const DTP = args;
|
||||
|
||||
switch (r_type) {
|
||||
.NONE => unreachable,
|
||||
.@"8" => try cwriter.writeInt(u8, @as(u8, @bitCast(@as(i8, @intCast(S + A)))), .little),
|
||||
.@"16" => try cwriter.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
|
||||
.@"32" => try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
|
||||
.@"32S" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.@"8" => try bw.writeInt(u8, @as(u8, @bitCast(@as(i8, @intCast(S + A)))), .little),
|
||||
.@"16" => try bw.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
|
||||
.@"32" => try bw.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
|
||||
.@"32S" => try bw.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
try bw.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i64, S + A, .little),
|
||||
try bw.writeInt(i64, S + A, .little),
|
||||
.DTPOFF32 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
try bw.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
|
||||
try bw.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
|
||||
.DTPOFF64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
try bw.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i64, S + A - DTP, .little),
|
||||
.GOTOFF64 => try cwriter.writeInt(i64, S + A - GOT, .little),
|
||||
.GOTPC64 => try cwriter.writeInt(i64, GOT + A, .little),
|
||||
try bw.writeInt(i64, S + A - DTP, .little),
|
||||
.GOTOFF64 => try bw.writeInt(i64, S + A - GOT, .little),
|
||||
.GOTPC64 => try bw.writeInt(i64, GOT + A, .little),
|
||||
.SIZE32 => {
|
||||
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
|
||||
try cwriter.writeInt(u32, @bitCast(@as(i32, @intCast(size + A))), .little);
|
||||
try bw.writeInt(u32, @bitCast(@as(i32, @intCast(size + A))), .little);
|
||||
},
|
||||
.SIZE64 => {
|
||||
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
|
||||
try cwriter.writeInt(i64, @intCast(size + A), .little);
|
||||
try bw.writeInt(i64, @intCast(size + A), .little);
|
||||
},
|
||||
else => try atom.reportUnhandledRelocError(rel, elf_file),
|
||||
}
|
||||
@ -1285,7 +1264,7 @@ const x86_64 = struct {
|
||||
}, t),
|
||||
else => return error.RelaxFailure,
|
||||
};
|
||||
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
|
||||
relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
|
||||
const nop: Instruction = try .new(.none, .nop, &.{}, t);
|
||||
try encode(&.{ nop, inst }, code);
|
||||
}
|
||||
@ -1296,7 +1275,7 @@ const x86_64 = struct {
|
||||
switch (old_inst.encoding.mnemonic) {
|
||||
.mov => {
|
||||
const inst: Instruction = try .new(old_inst.prefix, .lea, &old_inst.ops, t);
|
||||
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
|
||||
relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
|
||||
try encode(&.{inst}, code);
|
||||
},
|
||||
else => return error.RelaxFailure,
|
||||
@ -1308,12 +1287,11 @@ const x86_64 = struct {
|
||||
rels: []const elf.Elf64_Rela,
|
||||
value: i32,
|
||||
elf_file: *Elf,
|
||||
stream: anytype,
|
||||
bw: *std.io.BufferedWriter,
|
||||
) !void {
|
||||
dev.check(.x86_64_backend);
|
||||
assert(rels.len == 2);
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
const writer = stream.writer();
|
||||
const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type());
|
||||
switch (rel) {
|
||||
.PC32,
|
||||
@ -1324,17 +1302,17 @@ const x86_64 = struct {
|
||||
0x48, 0x03, 0x05, 0, 0, 0, 0, // add foo@gottpoff(%rip), %rax
|
||||
};
|
||||
std.mem.writeInt(i32, insts[12..][0..4], value - 12, .little);
|
||||
try stream.seekBy(-4);
|
||||
try writer.writeAll(&insts);
|
||||
bw.end -= 4;
|
||||
try bw.writeAll(&insts);
|
||||
},
|
||||
|
||||
else => {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("TODO: rewrite {} when followed by {}", .{
|
||||
try err.addMsg("TODO: rewrite {f} when followed by {f}", .{
|
||||
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
|
||||
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
|
||||
});
|
||||
err.addNote("in {}:{s} at offset 0x{x}", .{
|
||||
err.addNote("in {f}:{s} at offset 0x{x}", .{
|
||||
self.file(elf_file).?.fmtPath(),
|
||||
self.name(elf_file),
|
||||
rels[0].r_offset,
|
||||
@ -1349,12 +1327,11 @@ const x86_64 = struct {
|
||||
rels: []const elf.Elf64_Rela,
|
||||
value: i32,
|
||||
elf_file: *Elf,
|
||||
stream: anytype,
|
||||
bw: *std.io.BufferedWriter,
|
||||
) !void {
|
||||
dev.check(.x86_64_backend);
|
||||
assert(rels.len == 2);
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
const writer = stream.writer();
|
||||
const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type());
|
||||
switch (rel) {
|
||||
.PC32,
|
||||
@ -1366,8 +1343,8 @@ const x86_64 = struct {
|
||||
0x48, 0x2d, 0, 0, 0, 0, // sub $tls_size, %rax
|
||||
};
|
||||
std.mem.writeInt(i32, insts[8..][0..4], value, .little);
|
||||
try stream.seekBy(-3);
|
||||
try writer.writeAll(&insts);
|
||||
bw.end -= 3;
|
||||
try bw.writeAll(&insts);
|
||||
},
|
||||
|
||||
.GOTPCREL,
|
||||
@ -1380,17 +1357,17 @@ const x86_64 = struct {
|
||||
0x90, // nop
|
||||
};
|
||||
std.mem.writeInt(i32, insts[8..][0..4], value, .little);
|
||||
try stream.seekBy(-3);
|
||||
try writer.writeAll(&insts);
|
||||
bw.end -= 3;
|
||||
try bw.writeAll(&insts);
|
||||
},
|
||||
|
||||
else => {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("TODO: rewrite {} when followed by {}", .{
|
||||
try err.addMsg("TODO: rewrite {f} when followed by {f}", .{
|
||||
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
|
||||
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
|
||||
});
|
||||
err.addNote("in {}:{s} at offset 0x{x}", .{
|
||||
err.addNote("in {f}:{s} at offset 0x{x}", .{
|
||||
self.file(elf_file).?.fmtPath(),
|
||||
self.name(elf_file),
|
||||
rels[0].r_offset,
|
||||
@ -1410,7 +1387,12 @@ const x86_64 = struct {
|
||||
// TODO: hack to force imm32s in the assembler
|
||||
.{ .imm = .s(-129) },
|
||||
}, t) catch return false;
|
||||
inst.encode(std.io.null_writer, .{}) catch return false;
|
||||
var buf: [std.atomic.cache_line]u8 = undefined;
|
||||
var bw: std.io.BufferedWriter = .{
|
||||
.unbuffered_writer = .null,
|
||||
.buffer = &buf,
|
||||
};
|
||||
inst.encode(&bw, .{}) catch return false;
|
||||
return true;
|
||||
},
|
||||
else => return false,
|
||||
@ -1427,7 +1409,7 @@ const x86_64 = struct {
|
||||
// TODO: hack to force imm32s in the assembler
|
||||
.{ .imm = .s(-129) },
|
||||
}, t) catch unreachable;
|
||||
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
|
||||
relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
|
||||
encode(&.{inst}, code) catch unreachable;
|
||||
},
|
||||
else => unreachable,
|
||||
@ -1444,7 +1426,7 @@ const x86_64 = struct {
|
||||
// TODO: hack to force imm32s in the assembler
|
||||
.{ .imm = .s(-129) },
|
||||
}, target);
|
||||
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
|
||||
relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
|
||||
try encode(&.{inst}, code);
|
||||
},
|
||||
else => return error.RelaxFailure,
|
||||
@ -1456,12 +1438,11 @@ const x86_64 = struct {
|
||||
rels: []const elf.Elf64_Rela,
|
||||
value: i32,
|
||||
elf_file: *Elf,
|
||||
stream: anytype,
|
||||
bw: *std.io.BufferedWriter,
|
||||
) !void {
|
||||
dev.check(.x86_64_backend);
|
||||
assert(rels.len == 2);
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
const writer = stream.writer();
|
||||
const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type());
|
||||
switch (rel) {
|
||||
.PC32,
|
||||
@ -1474,9 +1455,9 @@ const x86_64 = struct {
|
||||
0x48, 0x81, 0xc0, 0, 0, 0, 0, // add $tp_offset, %rax
|
||||
};
|
||||
std.mem.writeInt(i32, insts[12..][0..4], value, .little);
|
||||
try stream.seekBy(-4);
|
||||
try writer.writeAll(&insts);
|
||||
relocs_log.debug(" relaxing {} and {}", .{
|
||||
bw.end -= 4;
|
||||
try bw.writeAll(&insts);
|
||||
relocs_log.debug(" relaxing {f} and {f}", .{
|
||||
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
|
||||
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
|
||||
});
|
||||
@ -1484,11 +1465,11 @@ const x86_64 = struct {
|
||||
|
||||
else => {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("fatal linker error: rewrite {} when followed by {}", .{
|
||||
try err.addMsg("fatal linker error: rewrite {f} when followed by {f}", .{
|
||||
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
|
||||
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
|
||||
});
|
||||
err.addNote("in {}:{s} at offset 0x{x}", .{
|
||||
err.addNote("in {f}:{s} at offset 0x{x}", .{
|
||||
self.file(elf_file).?.fmtPath(),
|
||||
self.name(elf_file),
|
||||
rels[0].r_offset,
|
||||
@ -1505,11 +1486,9 @@ const x86_64 = struct {
|
||||
}
|
||||
|
||||
fn encode(insts: []const Instruction, code: []u8) !void {
|
||||
var stream = std.io.fixedBufferStream(code);
|
||||
const writer = stream.writer();
|
||||
for (insts) |inst| {
|
||||
try inst.encode(writer, .{});
|
||||
}
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(code);
|
||||
for (insts) |inst| try inst.encode(&bw, .{});
|
||||
}
|
||||
|
||||
const bits = @import("../../arch/x86_64/bits.zig");
|
||||
@ -1613,16 +1592,14 @@ const aarch64 = struct {
|
||||
target: *const Symbol,
|
||||
args: ResolveArgs,
|
||||
it: *RelocsIterator,
|
||||
code_buffer: []u8,
|
||||
stream: anytype,
|
||||
) (error{ UnexpectedRemainder, DivisionByZero } || RelocError)!void {
|
||||
bw: *std.io.BufferedWriter,
|
||||
) anyerror!void {
|
||||
_ = it;
|
||||
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
|
||||
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
|
||||
const cwriter = stream.writer();
|
||||
const code = code_buffer[r_offset..][0..4];
|
||||
const code = (try bw.writableSlice(4))[0..4];
|
||||
const file_ptr = atom.file(elf_file).?;
|
||||
|
||||
const P, const A, const S, const GOT, const G, const TP, const DTP = args;
|
||||
@ -1636,7 +1613,7 @@ const aarch64 = struct {
|
||||
rel,
|
||||
dynAbsRelocAction(target, elf_file),
|
||||
elf_file,
|
||||
cwriter,
|
||||
bw,
|
||||
);
|
||||
},
|
||||
|
||||
@ -1649,17 +1626,17 @@ const aarch64 = struct {
|
||||
const S_ = th.targetAddress(target_index, elf_file);
|
||||
break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
|
||||
};
|
||||
aarch64_util.writeBranchImm(disp, code);
|
||||
aarch64_util.writeBranchImm(disp, (try bw.writableSlice(4))[0..4]);
|
||||
},
|
||||
|
||||
.PREL32 => {
|
||||
const value = math.cast(i32, S + A - P) orelse return error.Overflow;
|
||||
mem.writeInt(u32, code, @bitCast(value), .little);
|
||||
try bw.writeInt(u32, @bitCast(value), .little);
|
||||
},
|
||||
|
||||
.PREL64 => {
|
||||
const value = S + A - P;
|
||||
mem.writeInt(u64, code_buffer[r_offset..][0..8], @bitCast(value), .little);
|
||||
try bw.writeInt(u64, @bitCast(value), .little);
|
||||
},
|
||||
|
||||
.ADR_PREL_PG_HI21 => {
|
||||
@ -1675,7 +1652,7 @@ const aarch64 = struct {
|
||||
// TODO: relax
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("TODO: relax ADR_GOT_PAGE", .{});
|
||||
err.addNote("in {}:{s} at offset 0x{x}", .{
|
||||
err.addNote("in {f}:{s} at offset 0x{x}", .{
|
||||
atom.file(elf_file).?.fmtPath(),
|
||||
atom.name(elf_file),
|
||||
r_offset,
|
||||
@ -1818,25 +1795,18 @@ const aarch64 = struct {
|
||||
rel: elf.Elf64_Rela,
|
||||
target: *const Symbol,
|
||||
args: ResolveArgs,
|
||||
it: *RelocsIterator,
|
||||
code: []u8,
|
||||
stream: anytype,
|
||||
bw: *std.io.BufferedWriter,
|
||||
) !void {
|
||||
_ = it;
|
||||
_ = code;
|
||||
|
||||
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
|
||||
const cwriter = stream.writer();
|
||||
|
||||
_, const A, const S, _, _, _, _ = args;
|
||||
|
||||
switch (r_type) {
|
||||
.NONE => unreachable,
|
||||
.ABS32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.ABS32 => try bw.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.ABS64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
try bw.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i64, S + A, .little),
|
||||
try bw.writeInt(i64, S + A, .little),
|
||||
else => try atom.reportUnhandledRelocError(rel, elf_file),
|
||||
}
|
||||
}
|
||||
@ -1898,13 +1868,10 @@ const riscv = struct {
|
||||
target: *const Symbol,
|
||||
args: ResolveArgs,
|
||||
it: *RelocsIterator,
|
||||
code: []u8,
|
||||
stream: anytype,
|
||||
bw: *std.io.BufferedWriter,
|
||||
) !void {
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
|
||||
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
|
||||
const cwriter = stream.writer();
|
||||
|
||||
const P, const A, const S, const GOT, const G, const TP, const DTP = args;
|
||||
_ = TP;
|
||||
@ -1913,7 +1880,7 @@ const riscv = struct {
|
||||
switch (r_type) {
|
||||
.NONE => unreachable,
|
||||
|
||||
.@"32" => try cwriter.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
|
||||
.@"32" => try bw.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
|
||||
|
||||
.@"64" => {
|
||||
try atom.resolveDynAbsReloc(
|
||||
@ -1921,34 +1888,35 @@ const riscv = struct {
|
||||
rel,
|
||||
dynAbsRelocAction(target, elf_file),
|
||||
elf_file,
|
||||
cwriter,
|
||||
bw,
|
||||
);
|
||||
},
|
||||
|
||||
.ADD32 => riscv_util.writeAddend(i32, .add, code[r_offset..][0..4], S + A),
|
||||
.SUB32 => riscv_util.writeAddend(i32, .sub, code[r_offset..][0..4], S + A),
|
||||
.ADD32 => try riscv_util.writeAddend(i32, .add, S + A, bw),
|
||||
.SUB32 => try riscv_util.writeAddend(i32, .sub, S + A, bw),
|
||||
|
||||
.HI20 => {
|
||||
const value: u32 = @bitCast(math.cast(i32, S + A) orelse return error.Overflow);
|
||||
riscv_util.writeInstU(code[r_offset..][0..4], value);
|
||||
riscv_util.writeInstU((try bw.writableSlice(4))[0..4], value);
|
||||
},
|
||||
|
||||
.GOT_HI20 => {
|
||||
assert(target.flags.has_got);
|
||||
const disp: u32 = @bitCast(math.cast(i32, G + GOT + A - P) orelse return error.Overflow);
|
||||
riscv_util.writeInstU(code[r_offset..][0..4], disp);
|
||||
riscv_util.writeInstU((try bw.writableSlice(4))[0..4], disp);
|
||||
},
|
||||
|
||||
.CALL_PLT => {
|
||||
// TODO: relax
|
||||
const disp: u32 = @bitCast(math.cast(i32, S + A - P) orelse return error.Overflow);
|
||||
riscv_util.writeInstU(code[r_offset..][0..4], disp); // auipc
|
||||
riscv_util.writeInstI(code[r_offset + 4 ..][0..4], disp); // jalr
|
||||
const code = (try bw.writableSlice(8))[0..8];
|
||||
riscv_util.writeInstU(code[0..4], disp); // auipc
|
||||
riscv_util.writeInstI(code[4..8], disp); // jalr
|
||||
},
|
||||
|
||||
.PCREL_HI20 => {
|
||||
const disp: u32 = @bitCast(math.cast(i32, S + A - P) orelse return error.Overflow);
|
||||
riscv_util.writeInstU(code[r_offset..][0..4], disp);
|
||||
riscv_util.writeInstU((try bw.writableSlice(4))[0..4], disp);
|
||||
},
|
||||
|
||||
.PCREL_LO12_I,
|
||||
@ -1965,7 +1933,7 @@ const riscv = struct {
|
||||
// TODO: implement searching forward
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("TODO: find HI20 paired reloc scanning forward", .{});
|
||||
err.addNote("in {}:{s} at offset 0x{x}", .{
|
||||
err.addNote("in {f}:{s} at offset 0x{x}", .{
|
||||
atom.file(elf_file).?.fmtPath(),
|
||||
atom.name(elf_file),
|
||||
rel.r_offset,
|
||||
@ -1986,8 +1954,8 @@ const riscv = struct {
|
||||
};
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P_, disp + P_ });
|
||||
switch (r_type) {
|
||||
.PCREL_LO12_I => riscv_util.writeInstI(code[r_offset..][0..4], @bitCast(disp)),
|
||||
.PCREL_LO12_S => riscv_util.writeInstS(code[r_offset..][0..4], @bitCast(disp)),
|
||||
.PCREL_LO12_I => riscv_util.writeInstI((try bw.writableSlice(4))[0..4], @bitCast(disp)),
|
||||
.PCREL_LO12_S => riscv_util.writeInstS((try bw.writableSlice(4))[0..4], @bitCast(disp)),
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
@ -1997,8 +1965,8 @@ const riscv = struct {
|
||||
=> {
|
||||
const disp: u32 = @bitCast(math.cast(i32, S + A) orelse return error.Overflow);
|
||||
switch (r_type) {
|
||||
.LO12_I => riscv_util.writeInstI(code[r_offset..][0..4], disp),
|
||||
.LO12_S => riscv_util.writeInstS(code[r_offset..][0..4], disp),
|
||||
.LO12_I => riscv_util.writeInstI((try bw.writableSlice(4))[0..4], disp),
|
||||
.LO12_S => riscv_util.writeInstS((try bw.writableSlice(4))[0..4], disp),
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
@ -2006,7 +1974,7 @@ const riscv = struct {
|
||||
.TPREL_HI20 => {
|
||||
const target_addr: u32 = @intCast(target.address(.{}, elf_file));
|
||||
const val: i32 = @intCast(S + A - target_addr);
|
||||
riscv_util.writeInstU(code[r_offset..][0..4], @bitCast(val));
|
||||
riscv_util.writeInstU((try bw.writableSlice(4))[0..4], @bitCast(val));
|
||||
},
|
||||
|
||||
.TPREL_LO12_I,
|
||||
@ -2015,8 +1983,8 @@ const riscv = struct {
|
||||
const target_addr: u32 = @intCast(target.address(.{}, elf_file));
|
||||
const val: i32 = @intCast(S + A - target_addr);
|
||||
switch (r_type) {
|
||||
.TPREL_LO12_I => riscv_util.writeInstI(code[r_offset..][0..4], @bitCast(val)),
|
||||
.TPREL_LO12_S => riscv_util.writeInstS(code[r_offset..][0..4], @bitCast(val)),
|
||||
.TPREL_LO12_I => riscv_util.writeInstI((try bw.writableSlice(4))[0..4], @bitCast(val)),
|
||||
.TPREL_LO12_S => riscv_util.writeInstS((try bw.writableSlice(4))[0..4], @bitCast(val)),
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
@ -2035,15 +2003,9 @@ const riscv = struct {
|
||||
rel: elf.Elf64_Rela,
|
||||
target: *const Symbol,
|
||||
args: ResolveArgs,
|
||||
it: *RelocsIterator,
|
||||
code: []u8,
|
||||
stream: anytype,
|
||||
) !void {
|
||||
_ = it;
|
||||
|
||||
bw: *std.io.BufferedWriter,
|
||||
) anyerror!void {
|
||||
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
|
||||
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
|
||||
const cwriter = stream.writer();
|
||||
|
||||
_, const A, const S, const GOT, _, _, const DTP = args;
|
||||
_ = GOT;
|
||||
@ -2052,30 +2014,30 @@ const riscv = struct {
|
||||
switch (r_type) {
|
||||
.NONE => unreachable,
|
||||
|
||||
.@"32" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.@"32" => try bw.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
try bw.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i64, S + A, .little),
|
||||
try bw.writeInt(i64, S + A, .little),
|
||||
|
||||
.ADD8 => riscv_util.writeAddend(i8, .add, code[r_offset..][0..1], S + A),
|
||||
.SUB8 => riscv_util.writeAddend(i8, .sub, code[r_offset..][0..1], S + A),
|
||||
.ADD16 => riscv_util.writeAddend(i16, .add, code[r_offset..][0..2], S + A),
|
||||
.SUB16 => riscv_util.writeAddend(i16, .sub, code[r_offset..][0..2], S + A),
|
||||
.ADD32 => riscv_util.writeAddend(i32, .add, code[r_offset..][0..4], S + A),
|
||||
.SUB32 => riscv_util.writeAddend(i32, .sub, code[r_offset..][0..4], S + A),
|
||||
.ADD64 => riscv_util.writeAddend(i64, .add, code[r_offset..][0..8], S + A),
|
||||
.SUB64 => riscv_util.writeAddend(i64, .sub, code[r_offset..][0..8], S + A),
|
||||
.ADD8 => try riscv_util.writeAddend(i8, .add, S + A, bw),
|
||||
.SUB8 => try riscv_util.writeAddend(i8, .sub, S + A, bw),
|
||||
.ADD16 => try riscv_util.writeAddend(i16, .add, S + A, bw),
|
||||
.SUB16 => try riscv_util.writeAddend(i16, .sub, S + A, bw),
|
||||
.ADD32 => try riscv_util.writeAddend(i32, .add, S + A, bw),
|
||||
.SUB32 => try riscv_util.writeAddend(i32, .sub, S + A, bw),
|
||||
.ADD64 => try riscv_util.writeAddend(i64, .add, S + A, bw),
|
||||
.SUB64 => try riscv_util.writeAddend(i64, .sub, S + A, bw),
|
||||
|
||||
.SET8 => mem.writeInt(i8, code[r_offset..][0..1], @as(i8, @truncate(S + A)), .little),
|
||||
.SET16 => mem.writeInt(i16, code[r_offset..][0..2], @as(i16, @truncate(S + A)), .little),
|
||||
.SET32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A)), .little),
|
||||
.SET8 => try bw.writeInt(i8, @truncate(S + A), .little),
|
||||
.SET16 => try bw.writeInt(i16, @truncate(S + A), .little),
|
||||
.SET32 => try bw.writeInt(i32, @truncate(S + A), .little),
|
||||
|
||||
.SET6 => riscv_util.writeSetSub6(.set, code[r_offset..][0..1], S + A),
|
||||
.SUB6 => riscv_util.writeSetSub6(.sub, code[r_offset..][0..1], S + A),
|
||||
.SET6 => try riscv_util.writeSetSub6(.set, S + A, bw),
|
||||
.SUB6 => try riscv_util.writeSetSub6(.sub, S + A, bw),
|
||||
|
||||
.SET_ULEB128 => try riscv_util.writeSetSubUleb(.set, stream, S + A),
|
||||
.SUB_ULEB128 => try riscv_util.writeSetSubUleb(.sub, stream, S - A),
|
||||
.SET_ULEB128 => try riscv_util.writeSetSubUleb(.set, S + A, bw),
|
||||
.SUB_ULEB128 => try riscv_util.writeSetSubUleb(.sub, S - A, bw),
|
||||
|
||||
else => try atom.reportUnhandledRelocError(rel, elf_file),
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_fi
|
||||
const off = math.cast(usize, atom_ptr.value - list.value) orelse return error.Overflow;
|
||||
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
|
||||
|
||||
log.debug(" atom({}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
|
||||
log.debug(" atom({f}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
|
||||
|
||||
const object = atom_ptr.file(elf_file).?.object;
|
||||
const code = try object.codeDecompressAlloc(elf_file, ref.index);
|
||||
@ -144,7 +144,7 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *E
|
||||
const off = math.cast(usize, atom_ptr.value - list.value) orelse return error.Overflow;
|
||||
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
|
||||
|
||||
log.debug(" atom({}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
|
||||
log.debug(" atom({f}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
|
||||
|
||||
const object = atom_ptr.file(elf_file).?.object;
|
||||
const code = try object.codeDecompressAlloc(elf_file, ref.index);
|
||||
@ -167,16 +167,10 @@ pub fn lastAtom(list: AtomList, elf_file: *Elf) *Atom {
|
||||
return elf_file.atom(list.atoms.keys()[list.atoms.keys().len - 1]).?;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
list: AtomList,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(list: AtomList, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = list;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format AtomList directly");
|
||||
}
|
||||
|
||||
@ -186,25 +180,19 @@ pub fn fmt(list: AtomList, elf_file: *Elf) std.fmt.Formatter(format2) {
|
||||
return .{ .data = .{ list, elf_file } };
|
||||
}
|
||||
|
||||
fn format2(
|
||||
ctx: FormatCtx,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn format2(ctx: FormatCtx, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const list, const elf_file = ctx;
|
||||
try writer.print("list : @{x} : shdr({d}) : align({x}) : size({x})", .{
|
||||
try bw.print("list : @{x} : shdr({d}) : align({x}) : size({x})", .{
|
||||
list.address(elf_file), list.output_section_index,
|
||||
list.alignment.toByteUnits() orelse 0, list.size,
|
||||
});
|
||||
try writer.writeAll(" : atoms{ ");
|
||||
try bw.writeAll(" : atoms{ ");
|
||||
for (list.atoms.keys(), 0..) |ref, i| {
|
||||
try writer.print("{}", .{ref});
|
||||
if (i < list.atoms.keys().len - 1) try writer.writeAll(", ");
|
||||
try bw.print("{f}", .{ref});
|
||||
if (i < list.atoms.keys().len - 1) try bw.writeAll(", ");
|
||||
}
|
||||
try writer.writeAll(" }");
|
||||
try bw.writeAll(" }");
|
||||
}
|
||||
|
||||
const assert = std.debug.assert;
|
||||
|
||||
@ -449,23 +449,17 @@ const FormatContext = struct {
|
||||
elf_file: *Elf,
|
||||
};
|
||||
|
||||
fn formatSymtab(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatSymtab(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const self = ctx.self;
|
||||
const elf_file = ctx.elf_file;
|
||||
try writer.writeAll(" globals\n");
|
||||
try bw.writeAll(" globals\n");
|
||||
for (self.symbols.items, 0..) |sym, i| {
|
||||
const ref = self.resolveSymbol(@intCast(i), elf_file);
|
||||
if (elf_file.symbol(ref)) |ref_sym| {
|
||||
try writer.print(" {}\n", .{ref_sym.fmt(elf_file)});
|
||||
try bw.print(" {f}\n", .{ref_sym.fmt(elf_file)});
|
||||
} else {
|
||||
try writer.print(" {s} : unclaimed\n", .{sym.name(elf_file)});
|
||||
try bw.print(" {s} : unclaimed\n", .{sym.name(elf_file)});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,16 +157,10 @@ pub const Section = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn format(
|
||||
msec: Section,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(msec: Section, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = msec;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format directly");
|
||||
}
|
||||
|
||||
@ -182,17 +176,11 @@ pub const Section = struct {
|
||||
elf_file: *Elf,
|
||||
};
|
||||
|
||||
pub fn format2(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
pub fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const msec = ctx.msec;
|
||||
const elf_file = ctx.elf_file;
|
||||
try writer.print("{s} : @{x} : size({x}) : align({x}) : entsize({x}) : type({x}) : flags({x})\n", .{
|
||||
try bw.print("{s} : @{x} : size({x}) : align({x}) : entsize({x}) : type({x}) : flags({x})\n", .{
|
||||
msec.name(elf_file),
|
||||
msec.address(elf_file),
|
||||
msec.size,
|
||||
@ -202,7 +190,7 @@ pub const Section = struct {
|
||||
msec.flags,
|
||||
});
|
||||
for (msec.subsections.items) |msub| {
|
||||
try writer.print(" {}\n", .{msub.fmt(elf_file)});
|
||||
try bw.print(" {f}\n", .{msub.fmt(elf_file)});
|
||||
}
|
||||
}
|
||||
|
||||
@ -231,16 +219,10 @@ pub const Subsection = struct {
|
||||
return msec.bytes.items[msub.string_index..][0..msub.size];
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
msub: Subsection,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(msub: Subsection, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = msub;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format directly");
|
||||
}
|
||||
|
||||
@ -256,22 +238,16 @@ pub const Subsection = struct {
|
||||
elf_file: *Elf,
|
||||
};
|
||||
|
||||
pub fn format2(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
pub fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const msub = ctx.msub;
|
||||
const elf_file = ctx.elf_file;
|
||||
try writer.print("@{x} : align({x}) : size({x})", .{
|
||||
try bw.print("@{x} : align({x}) : size({x})", .{
|
||||
msub.address(elf_file),
|
||||
msub.alignment,
|
||||
msub.size,
|
||||
});
|
||||
if (!msub.alive) try writer.writeAll(" : [*]");
|
||||
if (!msub.alive) try bw.writeAll(" : [*]");
|
||||
}
|
||||
|
||||
pub const Index = u32;
|
||||
|
||||
@ -281,7 +281,7 @@ fn initAtoms(
|
||||
elf.SHT_GROUP => {
|
||||
if (shdr.sh_info >= self.symtab.items.len) {
|
||||
// TODO convert into an error
|
||||
log.debug("{}: invalid symbol index in sh_info", .{self.fmtPath()});
|
||||
log.debug("{f}: invalid symbol index in sh_info", .{self.fmtPath()});
|
||||
continue;
|
||||
}
|
||||
const group_info_sym = self.symtab.items[shdr.sh_info];
|
||||
@ -448,7 +448,8 @@ fn parseEhFrame(
|
||||
const fdes_start = self.fdes.items.len;
|
||||
const cies_start = self.cies.items.len;
|
||||
|
||||
var it = eh_frame.Iterator{ .data = raw };
|
||||
var it: eh_frame.Iterator = undefined;
|
||||
it.br.initFixed(raw);
|
||||
while (try it.next()) |rec| {
|
||||
const rel_range = filterRelocs(self.relocs.items[rel_start..][0..relocs.len], rec.offset, rec.size + 4);
|
||||
switch (rec.tag) {
|
||||
@ -488,7 +489,7 @@ fn parseEhFrame(
|
||||
if (cie.offset == cie_ptr) break @as(u32, @intCast(cie_index));
|
||||
} else {
|
||||
// TODO convert into an error
|
||||
log.debug("{s}: no matching CIE found for FDE at offset {x}", .{
|
||||
log.debug("{f}: no matching CIE found for FDE at offset {x}", .{
|
||||
self.fmtPath(),
|
||||
fde.offset,
|
||||
});
|
||||
@ -582,7 +583,7 @@ pub fn scanRelocs(self: *Object, elf_file: *Elf, undefs: anytype) !void {
|
||||
if (sym.flags.import) {
|
||||
if (sym.type(elf_file) != elf.STT_FUNC)
|
||||
// TODO convert into an error
|
||||
log.debug("{s}: {s}: CIE referencing external data reference", .{
|
||||
log.debug("{fs}: {s}: CIE referencing external data reference", .{
|
||||
self.fmtPath(), sym.name(elf_file),
|
||||
});
|
||||
sym.flags.needs_plt = true;
|
||||
@ -796,7 +797,7 @@ pub fn initInputMergeSections(self: *Object, elf_file: *Elf) !void {
|
||||
if (!isNull(data[end .. end + sh_entsize])) {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("string not null terminated", .{});
|
||||
err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
return error.LinkFailure;
|
||||
}
|
||||
end += sh_entsize;
|
||||
@ -811,7 +812,7 @@ pub fn initInputMergeSections(self: *Object, elf_file: *Elf) !void {
|
||||
if (shdr.sh_size % sh_entsize != 0) {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("size not a multiple of sh_entsize", .{});
|
||||
err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
return error.LinkFailure;
|
||||
}
|
||||
|
||||
@ -889,7 +890,7 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) error{
|
||||
var err = try diags.addErrorWithNotes(2);
|
||||
try err.addMsg("invalid symbol value: {x}", .{esym.st_value});
|
||||
err.addNote("for symbol {s}", .{sym.name(elf_file)});
|
||||
err.addNote("in {}", .{self.fmtPath()});
|
||||
err.addNote("in {f}", .{self.fmtPath()});
|
||||
return error.LinkFailure;
|
||||
};
|
||||
|
||||
@ -914,7 +915,7 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) error{
|
||||
const res = imsec.findSubsection(@intCast(@as(i64, @intCast(esym.st_value)) + rel.r_addend)) orelse {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("invalid relocation at offset 0x{x}", .{rel.r_offset});
|
||||
err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
return error.LinkFailure;
|
||||
};
|
||||
|
||||
@ -952,7 +953,7 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
|
||||
const is_tls = sym.type(elf_file) == elf.STT_TLS;
|
||||
const name = if (is_tls) ".tls_common" else ".common";
|
||||
const name_offset = @as(u32, @intCast(self.strtab.items.len));
|
||||
try self.strtab.writer(gpa).print("{s}\x00", .{name});
|
||||
try self.strtab.print(gpa, "{s}\x00", .{name});
|
||||
|
||||
var sh_flags: u32 = elf.SHF_ALLOC | elf.SHF_WRITE;
|
||||
if (is_tls) sh_flags |= elf.SHF_TLS;
|
||||
@ -1191,28 +1192,26 @@ pub fn codeDecompressAlloc(self: *Object, elf_file: *Elf, atom_index: Atom.Index
|
||||
const atom_ptr = self.atom(atom_index).?;
|
||||
const shdr = atom_ptr.inputShdr(elf_file);
|
||||
const handle = elf_file.fileHandle(self.file_handle);
|
||||
const data = try self.preadShdrContentsAlloc(gpa, handle, atom_ptr.input_section_index);
|
||||
defer if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) gpa.free(data);
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(try self.preadShdrContentsAlloc(gpa, handle, atom_ptr.input_section_index));
|
||||
defer if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) gpa.free(br.storageBuffer());
|
||||
|
||||
if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) {
|
||||
const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
|
||||
const chdr = (try br.takeStruct(elf.Elf64_Chdr)).*;
|
||||
switch (chdr.ch_type) {
|
||||
.ZLIB => {
|
||||
var stream = std.io.fixedBufferStream(data[@sizeOf(elf.Elf64_Chdr)..]);
|
||||
var zlib_stream = std.compress.zlib.decompressor(stream.reader());
|
||||
const size = std.math.cast(usize, chdr.ch_size) orelse return error.Overflow;
|
||||
const decomp = try gpa.alloc(u8, size);
|
||||
const nread = zlib_stream.reader().readAll(decomp) catch return error.InputOutput;
|
||||
if (nread != decomp.len) {
|
||||
return error.InputOutput;
|
||||
}
|
||||
return decomp;
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(try gpa.alloc(u8, std.math.cast(usize, chdr.ch_size) orelse return error.Overflow));
|
||||
errdefer gpa.free(bw.buffer);
|
||||
try std.compress.zlib.decompress(&br, &bw);
|
||||
if (bw.end != bw.buffer.len) return error.InputOutput;
|
||||
return bw.buffer;
|
||||
},
|
||||
else => @panic("TODO unhandled compression scheme"),
|
||||
}
|
||||
}
|
||||
|
||||
return data;
|
||||
return br.storageBuffer();
|
||||
}
|
||||
|
||||
fn locals(self: *Object) []Symbol {
|
||||
@ -1432,16 +1431,10 @@ pub fn group(self: *Object, index: Elf.Group.Index) *Elf.Group {
|
||||
return &self.groups.items[index];
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
self: *Object,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(self: *Object, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = self;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format objects directly");
|
||||
}
|
||||
|
||||
@ -1457,28 +1450,22 @@ const FormatContext = struct {
|
||||
elf_file: *Elf,
|
||||
};
|
||||
|
||||
fn formatSymtab(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatSymtab(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const object = ctx.object;
|
||||
const elf_file = ctx.elf_file;
|
||||
try writer.writeAll(" locals\n");
|
||||
try bw.writeAll(" locals\n");
|
||||
for (object.locals()) |sym| {
|
||||
try writer.print(" {}\n", .{sym.fmt(elf_file)});
|
||||
try bw.print(" {f}\n", .{sym.fmt(elf_file)});
|
||||
}
|
||||
try writer.writeAll(" globals\n");
|
||||
try bw.writeAll(" globals\n");
|
||||
for (object.globals(), 0..) |sym, i| {
|
||||
const first_global = object.first_global.?;
|
||||
const ref = object.resolveSymbol(@intCast(i + first_global), elf_file);
|
||||
if (elf_file.symbol(ref)) |ref_sym| {
|
||||
try writer.print(" {}\n", .{ref_sym.fmt(elf_file)});
|
||||
try bw.print(" {f}\n", .{ref_sym.fmt(elf_file)});
|
||||
} else {
|
||||
try writer.print(" {s} : unclaimed\n", .{sym.name(elf_file)});
|
||||
try bw.print(" {s} : unclaimed\n", .{sym.name(elf_file)});
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1490,19 +1477,13 @@ pub fn fmtAtoms(self: *Object, elf_file: *Elf) std.fmt.Formatter(formatAtoms) {
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatAtoms(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatAtoms(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const object = ctx.object;
|
||||
try writer.writeAll(" atoms\n");
|
||||
try bw.writeAll(" atoms\n");
|
||||
for (object.atoms_indexes.items) |atom_index| {
|
||||
const atom_ptr = object.atom(atom_index) orelse continue;
|
||||
try writer.print(" {}\n", .{atom_ptr.fmt(ctx.elf_file)});
|
||||
try bw.print(" {f}\n", .{atom_ptr.fmt(ctx.elf_file)});
|
||||
}
|
||||
}
|
||||
|
||||
@ -1513,18 +1494,12 @@ pub fn fmtCies(self: *Object, elf_file: *Elf) std.fmt.Formatter(formatCies) {
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatCies(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatCies(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const object = ctx.object;
|
||||
try writer.writeAll(" cies\n");
|
||||
try bw.writeAll(" cies\n");
|
||||
for (object.cies.items, 0..) |cie, i| {
|
||||
try writer.print(" cie({d}) : {}\n", .{ i, cie.fmt(ctx.elf_file) });
|
||||
try bw.print(" cie({d}) : {f}\n", .{ i, cie.fmt(ctx.elf_file) });
|
||||
}
|
||||
}
|
||||
|
||||
@ -1535,18 +1510,12 @@ pub fn fmtFdes(self: *Object, elf_file: *Elf) std.fmt.Formatter(formatFdes) {
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatFdes(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatFdes(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const object = ctx.object;
|
||||
try writer.writeAll(" fdes\n");
|
||||
try bw.writeAll(" fdes\n");
|
||||
for (object.fdes.items, 0..) |fde, i| {
|
||||
try writer.print(" fde({d}) : {}\n", .{ i, fde.fmt(ctx.elf_file) });
|
||||
try bw.print(" fde({d}) : {f}\n", .{ i, fde.fmt(ctx.elf_file) });
|
||||
}
|
||||
}
|
||||
|
||||
@ -1557,26 +1526,20 @@ pub fn fmtGroups(self: *Object, elf_file: *Elf) std.fmt.Formatter(formatGroups)
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatGroups(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatGroups(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const object = ctx.object;
|
||||
const elf_file = ctx.elf_file;
|
||||
try writer.writeAll(" groups\n");
|
||||
try bw.writeAll(" groups\n");
|
||||
for (object.groups.items, 0..) |g, g_index| {
|
||||
try writer.print(" {s}({d})", .{ if (g.is_comdat) "COMDAT" else "GROUP", g_index });
|
||||
if (!g.alive) try writer.writeAll(" : [*]");
|
||||
try writer.writeByte('\n');
|
||||
try bw.print(" {s}({d})", .{ if (g.is_comdat) "COMDAT" else "GROUP", g_index });
|
||||
if (!g.alive) try bw.writeAll(" : [*]");
|
||||
try bw.writeByte('\n');
|
||||
const g_members = g.members(elf_file);
|
||||
for (g_members) |shndx| {
|
||||
const atom_index = object.atoms_indexes.items[shndx];
|
||||
const atom_ptr = object.atom(atom_index) orelse continue;
|
||||
try writer.print(" atom({d}) : {s}\n", .{ atom_index, atom_ptr.name(elf_file) });
|
||||
try bw.print(" atom({d}) : {s}\n", .{ atom_index, atom_ptr.name(elf_file) });
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1585,18 +1548,12 @@ pub fn fmtPath(self: Object) std.fmt.Formatter(formatPath) {
|
||||
return .{ .data = self };
|
||||
}
|
||||
|
||||
fn formatPath(
|
||||
object: Object,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatPath(object: Object, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
if (object.archive) |ar| {
|
||||
try writer.print("{}({})", .{ ar.path, object.path });
|
||||
try bw.print("{f}({f})", .{ ar.path, object.path });
|
||||
} else {
|
||||
try writer.print("{}", .{object.path});
|
||||
try bw.print("{f}", .{object.path});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -509,16 +509,10 @@ pub fn setSymbolExtra(self: *SharedObject, index: u32, extra: Symbol.Extra) void
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
self: SharedObject,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(self: SharedObject, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = self;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("unreachable");
|
||||
}
|
||||
|
||||
@ -534,23 +528,17 @@ const FormatContext = struct {
|
||||
elf_file: *Elf,
|
||||
};
|
||||
|
||||
fn formatSymtab(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatSymtab(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const shared = ctx.shared;
|
||||
const elf_file = ctx.elf_file;
|
||||
try writer.writeAll(" globals\n");
|
||||
try bw.writeAll(" globals\n");
|
||||
for (shared.symbols.items, 0..) |sym, i| {
|
||||
const ref = shared.resolveSymbol(@intCast(i), elf_file);
|
||||
if (elf_file.symbol(ref)) |ref_sym| {
|
||||
try writer.print(" {}\n", .{ref_sym.fmt(elf_file)});
|
||||
try bw.print(" {f}\n", .{ref_sym.fmt(elf_file)});
|
||||
} else {
|
||||
try writer.print(" {s} : unclaimed\n", .{sym.name(elf_file)});
|
||||
try bw.print(" {s} : unclaimed\n", .{sym.name(elf_file)});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -316,16 +316,10 @@ pub fn setOutputSym(symbol: Symbol, elf_file: *Elf, out: *elf.Elf64_Sym) void {
|
||||
out.st_size = esym.st_size;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
symbol: Symbol,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(symbol: Symbol, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = symbol;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format Symbol directly");
|
||||
}
|
||||
|
||||
@ -341,24 +335,18 @@ pub fn fmtName(symbol: Symbol, elf_file: *Elf) std.fmt.Formatter(formatName) {
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatName(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn formatName(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const elf_file = ctx.elf_file;
|
||||
const symbol = ctx.symbol;
|
||||
try writer.writeAll(symbol.name(elf_file));
|
||||
try bw.writeAll(symbol.name(elf_file));
|
||||
switch (symbol.version_index.VERSION) {
|
||||
@intFromEnum(elf.VER_NDX.LOCAL), @intFromEnum(elf.VER_NDX.GLOBAL) => {},
|
||||
else => {
|
||||
const file_ptr = symbol.file(elf_file).?;
|
||||
assert(file_ptr == .shared_object);
|
||||
const shared_object = file_ptr.shared_object;
|
||||
try writer.print("@{s}", .{shared_object.versionString(symbol.version_index)});
|
||||
try bw.print("@{s}", .{shared_object.versionString(symbol.version_index)});
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -370,17 +358,11 @@ pub fn fmt(symbol: Symbol, elf_file: *Elf) std.fmt.Formatter(format2) {
|
||||
} };
|
||||
}
|
||||
|
||||
fn format2(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const symbol = ctx.symbol;
|
||||
const elf_file = ctx.elf_file;
|
||||
try writer.print("%{d} : {s} : @{x}", .{
|
||||
try bw.print("%{d} : {f} : @{x}", .{
|
||||
symbol.esym_index,
|
||||
symbol.fmtName(elf_file),
|
||||
symbol.address(.{ .plt = false, .trampoline = false }, elf_file),
|
||||
@ -388,25 +370,25 @@ fn format2(
|
||||
if (symbol.file(elf_file)) |file_ptr| {
|
||||
if (symbol.isAbs(elf_file)) {
|
||||
if (symbol.elfSym(elf_file).st_shndx == elf.SHN_UNDEF) {
|
||||
try writer.writeAll(" : undef");
|
||||
try bw.writeAll(" : undef");
|
||||
} else {
|
||||
try writer.writeAll(" : absolute");
|
||||
try bw.writeAll(" : absolute");
|
||||
}
|
||||
} else if (symbol.outputShndx(elf_file)) |shndx| {
|
||||
try writer.print(" : shdr({d})", .{shndx});
|
||||
try bw.print(" : shdr({d})", .{shndx});
|
||||
}
|
||||
if (symbol.atom(elf_file)) |atom_ptr| {
|
||||
try writer.print(" : atom({d})", .{atom_ptr.atom_index});
|
||||
try bw.print(" : atom({d})", .{atom_ptr.atom_index});
|
||||
}
|
||||
var buf: [2]u8 = .{'_'} ** 2;
|
||||
if (symbol.flags.@"export") buf[0] = 'E';
|
||||
if (symbol.flags.import) buf[1] = 'I';
|
||||
try writer.print(" : {s}", .{&buf});
|
||||
if (symbol.flags.weak) try writer.writeAll(" : weak");
|
||||
try bw.print(" : {s}", .{&buf});
|
||||
if (symbol.flags.weak) try bw.writeAll(" : weak");
|
||||
switch (file_ptr) {
|
||||
inline else => |x| try writer.print(" : {s}({d})", .{ @tagName(file_ptr), x.index }),
|
||||
inline else => |x| try bw.print(" : {s}({d})", .{ @tagName(file_ptr), x.index }),
|
||||
}
|
||||
} else try writer.writeAll(" : unresolved");
|
||||
} else try bw.writeAll(" : unresolved");
|
||||
}
|
||||
|
||||
pub const Flags = packed struct {
|
||||
|
||||
@ -65,16 +65,10 @@ fn trampolineSize(cpu_arch: std.Target.Cpu.Arch) usize {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
thunk: Thunk,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(thunk: Thunk, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = thunk;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format Thunk directly");
|
||||
}
|
||||
|
||||
@ -90,20 +84,14 @@ const FormatContext = struct {
|
||||
elf_file: *Elf,
|
||||
};
|
||||
|
||||
fn format2(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const thunk = ctx.thunk;
|
||||
const elf_file = ctx.elf_file;
|
||||
try writer.print("@{x} : size({x})\n", .{ thunk.value, thunk.size(elf_file) });
|
||||
try bw.print("@{x} : size({x})\n", .{ thunk.value, thunk.size(elf_file) });
|
||||
for (thunk.symbols.keys()) |ref| {
|
||||
const sym = elf_file.symbol(ref).?;
|
||||
try writer.print(" {} : {s} : @{x}\n", .{ ref, sym.name(elf_file), sym.value });
|
||||
try bw.print(" {f} : {s} : @{x}\n", .{ ref, sym.name(elf_file), sym.value });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -35,9 +35,6 @@ lazy_syms: LazySymbolTable = .{},
|
||||
/// Table of tracked `Nav`s.
|
||||
navs: NavTable = .{},
|
||||
|
||||
/// TLS variables indexed by Atom.Index.
|
||||
tls_variables: TlsTable = .{},
|
||||
|
||||
/// Table of tracked `Uav`s.
|
||||
uavs: UavTable = .{},
|
||||
|
||||
@ -257,7 +254,6 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
|
||||
meta.exports.deinit(allocator);
|
||||
}
|
||||
self.uavs.deinit(allocator);
|
||||
self.tls_variables.deinit(allocator);
|
||||
|
||||
if (self.dwarf) |*dwarf| {
|
||||
dwarf.deinit();
|
||||
@ -925,7 +921,7 @@ pub fn getNavVAddr(
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
log.debug("getNavVAddr {}({d})", .{ nav.fqn.fmt(ip), nav_index });
|
||||
log.debug("getNavVAddr {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
|
||||
const this_sym_index = if (nav.getExtern(ip)) |@"extern"| try self.getGlobalSymbol(
|
||||
elf_file,
|
||||
nav.name.toSlice(ip),
|
||||
@ -1268,7 +1264,7 @@ fn updateNavCode(
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
|
||||
log.debug("updateNavCode {}({d})", .{ nav.fqn.fmt(ip), nav_index });
|
||||
log.debug("updateNavCode {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
|
||||
|
||||
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
|
||||
const required_alignment = switch (pt.navAlignment(nav_index)) {
|
||||
@ -1302,7 +1298,7 @@ fn updateNavCode(
|
||||
self.allocateAtom(atom_ptr, true, elf_file) catch |err|
|
||||
return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)});
|
||||
|
||||
log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value });
|
||||
log.debug("growing {f} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value });
|
||||
if (old_vaddr != atom_ptr.value) {
|
||||
sym.value = 0;
|
||||
esym.st_value = 0;
|
||||
@ -1347,7 +1343,7 @@ fn updateNavCode(
|
||||
const file_offset = atom_ptr.offset(elf_file);
|
||||
elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
|
||||
return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
|
||||
log.debug("writing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len });
|
||||
log.debug("writing {f} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len });
|
||||
}
|
||||
}
|
||||
|
||||
@ -1365,7 +1361,7 @@ fn updateTlv(
|
||||
const gpa = zcu.gpa;
|
||||
const nav = ip.getNav(nav_index);
|
||||
|
||||
log.debug("updateTlv {}({d})", .{ nav.fqn.fmt(ip), nav_index });
|
||||
log.debug("updateTlv {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
|
||||
|
||||
const required_alignment = pt.navAlignment(nav_index);
|
||||
|
||||
@ -1386,9 +1382,6 @@ fn updateTlv(
|
||||
atom_ptr.alignment = required_alignment;
|
||||
atom_ptr.size = code.len;
|
||||
|
||||
const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index);
|
||||
assert(!gop.found_existing); // TODO incremental updates
|
||||
|
||||
self.allocateAtom(atom_ptr, true, elf_file) catch |err|
|
||||
return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)});
|
||||
sym.value = 0;
|
||||
@ -1424,7 +1417,7 @@ pub fn updateFunc(
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
const func = zcu.funcInfo(func_index);
|
||||
|
||||
log.debug("updateFunc {}({d})", .{ ip.getNav(func.owner_nav).fqn.fmt(ip), func.owner_nav });
|
||||
log.debug("updateFunc {f}({d})", .{ ip.getNav(func.owner_nav).fqn.fmt(ip), func.owner_nav });
|
||||
|
||||
const sym_index = try self.getOrCreateMetadataForNav(zcu, func.owner_nav);
|
||||
self.atom(self.symbol(sym_index).ref.index).?.freeRelocs(self);
|
||||
@ -1447,7 +1440,7 @@ pub fn updateFunc(
|
||||
const code = code_buffer.items;
|
||||
|
||||
const shndx = try self.getNavShdrIndex(elf_file, zcu, func.owner_nav, sym_index, code);
|
||||
log.debug("setting shdr({x},{s}) for {}", .{
|
||||
log.debug("setting shdr({x},{s}) for {f}", .{
|
||||
shndx,
|
||||
elf_file.getShString(elf_file.sections.items(.shdr)[shndx].sh_name),
|
||||
ip.getNav(func.owner_nav).fqn.fmt(ip),
|
||||
@ -1529,7 +1522,7 @@ pub fn updateNav(
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
|
||||
log.debug("updateNav {}({d})", .{ nav.fqn.fmt(ip), nav_index });
|
||||
log.debug("updateNav {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
|
||||
|
||||
const nav_init = switch (ip.indexToKey(nav.status.fully_resolved.val)) {
|
||||
.func => .none,
|
||||
@ -1546,7 +1539,6 @@ pub fn updateNav(
|
||||
defer debug_wip_nav.deinit();
|
||||
dwarf.finishWipNav(pt, nav_index, &debug_wip_nav) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.Overflow => return error.Overflow,
|
||||
else => |e| return elf_file.base.cgFail(nav_index, "failed to finish dwarf nav: {s}", .{@errorName(e)}),
|
||||
};
|
||||
}
|
||||
@ -1576,7 +1568,7 @@ pub fn updateNav(
|
||||
const code = code_buffer.items;
|
||||
|
||||
const shndx = try self.getNavShdrIndex(elf_file, zcu, nav_index, sym_index, code);
|
||||
log.debug("setting shdr({x},{s}) for {}", .{
|
||||
log.debug("setting shdr({x},{s}) for {f}", .{
|
||||
shndx,
|
||||
elf_file.getShString(elf_file.sections.items(.shdr)[shndx].sh_name),
|
||||
nav.fqn.fmt(ip),
|
||||
@ -1588,7 +1580,6 @@ pub fn updateNav(
|
||||
|
||||
if (debug_wip_nav) |*wip_nav| self.dwarf.?.finishWipNav(pt, nav_index, wip_nav) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.Overflow => return error.Overflow,
|
||||
else => |e| return elf_file.base.cgFail(nav_index, "failed to finish dwarf nav: {s}", .{@errorName(e)}),
|
||||
};
|
||||
} else if (self.dwarf) |*dwarf| try dwarf.updateComptimeNav(pt, nav_index);
|
||||
@ -1622,7 +1613,7 @@ fn updateLazySymbol(
|
||||
defer code_buffer.deinit(gpa);
|
||||
|
||||
const name_str_index = blk: {
|
||||
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
|
||||
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{f}", .{
|
||||
@tagName(sym.kind),
|
||||
Type.fromInterned(sym.ty).fmt(pt),
|
||||
});
|
||||
@ -2207,25 +2198,19 @@ const FormatContext = struct {
|
||||
elf_file: *Elf,
|
||||
};
|
||||
|
||||
fn formatSymtab(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatSymtab(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const self = ctx.self;
|
||||
const elf_file = ctx.elf_file;
|
||||
try writer.writeAll(" locals\n");
|
||||
try bw.writeAll(" locals\n");
|
||||
for (self.local_symbols.items) |index| {
|
||||
const local = self.symbols.items[index];
|
||||
try writer.print(" {}\n", .{local.fmt(elf_file)});
|
||||
try bw.print(" {f}\n", .{local.fmt(elf_file)});
|
||||
}
|
||||
try writer.writeAll(" globals\n");
|
||||
try bw.writeAll(" globals\n");
|
||||
for (ctx.self.global_symbols.items) |index| {
|
||||
const global = self.symbols.items[index];
|
||||
try writer.print(" {}\n", .{global.fmt(elf_file)});
|
||||
try bw.print(" {f}\n", .{global.fmt(elf_file)});
|
||||
}
|
||||
}
|
||||
|
||||
@ -2236,18 +2221,12 @@ pub fn fmtAtoms(self: *ZigObject, elf_file: *Elf) std.fmt.Formatter(formatAtoms)
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatAtoms(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatAtoms(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
try writer.writeAll(" atoms\n");
|
||||
try bw.writeAll(" atoms\n");
|
||||
for (ctx.self.atoms_indexes.items) |atom_index| {
|
||||
const atom_ptr = ctx.self.atom(atom_index) orelse continue;
|
||||
try writer.print(" {}\n", .{atom_ptr.fmt(ctx.elf_file)});
|
||||
try bw.print(" {f}\n", .{atom_ptr.fmt(ctx.elf_file)});
|
||||
}
|
||||
}
|
||||
|
||||
@ -2285,7 +2264,7 @@ fn checkNavAllocated(pt: Zcu.PerThread, index: InternPool.Nav.Index, meta: AvMet
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(index);
|
||||
log.err("NAV {}({d}) assigned symbol {d} but not allocated!", .{
|
||||
log.err("NAV {f}({d}) assigned symbol {d} but not allocated!", .{
|
||||
nav.fqn.fmt(ip),
|
||||
index,
|
||||
meta.symbol_index,
|
||||
@ -2298,7 +2277,7 @@ fn checkUavAllocated(pt: Zcu.PerThread, index: InternPool.Index, meta: AvMetadat
|
||||
const zcu = pt.zcu;
|
||||
const uav = Value.fromInterned(index);
|
||||
const ty = uav.typeOf(zcu);
|
||||
log.err("UAV {}({d}) assigned symbol {d} but not allocated!", .{
|
||||
log.err("UAV {f}({d}) assigned symbol {d} but not allocated!", .{
|
||||
ty.fmt(pt),
|
||||
index,
|
||||
meta.symbol_index,
|
||||
|
||||
@ -49,14 +49,12 @@ pub const Fde = struct {
|
||||
|
||||
pub fn format(
|
||||
fde: Fde,
|
||||
bw: *std.io.BufferedWriter,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: *std.io.BufferedWriter,
|
||||
) !void {
|
||||
_ = fde;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
_ = bw;
|
||||
@compileError("do not format FDEs directly");
|
||||
}
|
||||
|
||||
@ -74,24 +72,22 @@ pub const Fde = struct {
|
||||
|
||||
fn format2(
|
||||
ctx: FdeFormatContext,
|
||||
bw: *std.io.BufferedWriter,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: *std.io.BufferedWriter,
|
||||
) !void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const fde = ctx.fde;
|
||||
const elf_file = ctx.elf_file;
|
||||
const base_addr = fde.address(elf_file);
|
||||
const object = elf_file.file(fde.file_index).?.object;
|
||||
const atom_name = fde.atom(object).name(elf_file);
|
||||
try writer.print("@{x} : size({x}) : cie({d}) : {s}", .{
|
||||
try bw.print("@{x} : size({x}) : cie({d}) : {s}", .{
|
||||
base_addr + fde.out_offset,
|
||||
fde.calcSize(),
|
||||
fde.cie_index,
|
||||
atom_name,
|
||||
});
|
||||
if (!fde.alive) try writer.writeAll(" : [*]");
|
||||
if (!fde.alive) try bw.writeAll(" : [*]");
|
||||
}
|
||||
};
|
||||
|
||||
@ -152,14 +148,12 @@ pub const Cie = struct {
|
||||
|
||||
pub fn format(
|
||||
cie: Cie,
|
||||
bw: *std.io.BufferedWriter,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: *std.io.BufferedWriter,
|
||||
) !void {
|
||||
_ = cie;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
_ = bw;
|
||||
@compileError("do not format CIEs directly");
|
||||
}
|
||||
|
||||
@ -177,26 +171,23 @@ pub const Cie = struct {
|
||||
|
||||
fn format2(
|
||||
ctx: CieFormatContext,
|
||||
bw: *std.io.BufferedWriter,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: *std.io.BufferedWriter,
|
||||
) !void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const cie = ctx.cie;
|
||||
const elf_file = ctx.elf_file;
|
||||
const base_addr = cie.address(elf_file);
|
||||
try writer.print("@{x} : size({x})", .{
|
||||
try bw.print("@{x} : size({x})", .{
|
||||
base_addr + cie.out_offset,
|
||||
cie.calcSize(),
|
||||
});
|
||||
if (!cie.alive) try writer.writeAll(" : [*]");
|
||||
if (!cie.alive) try bw.writeAll(" : [*]");
|
||||
}
|
||||
};
|
||||
|
||||
pub const Iterator = struct {
|
||||
data: []const u8,
|
||||
pos: usize = 0,
|
||||
br: std.io.BufferedReader,
|
||||
|
||||
pub const Record = struct {
|
||||
tag: enum { fde, cie },
|
||||
@ -205,22 +196,18 @@ pub const Iterator = struct {
|
||||
};
|
||||
|
||||
pub fn next(it: *Iterator) !?Record {
|
||||
if (it.pos >= it.data.len) return null;
|
||||
if (it.br.seek >= it.br.storageBuffer().len) return null;
|
||||
|
||||
var stream = std.io.fixedBufferStream(it.data[it.pos..]);
|
||||
const reader = stream.reader();
|
||||
const size = try it.br.takeInt(u32, .little);
|
||||
if (size == 0xFFFFFFFF) @panic("DWARF CFI is 32bit on macOS");
|
||||
|
||||
const size = try reader.readInt(u32, .little);
|
||||
if (size == 0) return null;
|
||||
if (size == 0xFFFFFFFF) @panic("TODO");
|
||||
|
||||
const id = try reader.readInt(u32, .little);
|
||||
const record = Record{
|
||||
const id = try it.br.takeInt(u32, .little);
|
||||
const record: Record = .{
|
||||
.tag = if (id == 0) .cie else .fde,
|
||||
.offset = it.pos,
|
||||
.offset = it.br.seek,
|
||||
.size = size,
|
||||
};
|
||||
it.pos += size + 4;
|
||||
try it.br.discard(size);
|
||||
|
||||
return record;
|
||||
}
|
||||
@ -316,7 +303,7 @@ fn resolveReloc(rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela, elf_file:
|
||||
const S = math.cast(i64, sym.address(.{}, elf_file)) orelse return error.Overflow;
|
||||
const A = rel.r_addend;
|
||||
|
||||
relocs_log.debug(" {s}: {x}: [{x} => {x}] ({s})", .{
|
||||
relocs_log.debug(" {f}: {x}: [{x} => {x}] ({s})", .{
|
||||
relocation.fmtRelocType(rel.r_type(), cpu_arch),
|
||||
offset,
|
||||
P,
|
||||
@ -332,7 +319,7 @@ fn resolveReloc(rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela, elf_file:
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeEhFrame(elf_file: *Elf, writer: *std.io.BufferedWriter) !void {
|
||||
pub fn writeEhFrame(elf_file: *Elf, bw: *std.io.BufferedWriter) !void {
|
||||
relocs_log.debug("{x}: .eh_frame", .{
|
||||
elf_file.sections.items(.shdr)[elf_file.section_indexes.eh_frame.?].sh_addr,
|
||||
});
|
||||
@ -356,7 +343,7 @@ pub fn writeEhFrame(elf_file: *Elf, writer: *std.io.BufferedWriter) !void {
|
||||
};
|
||||
}
|
||||
|
||||
try writer.writeAll(contents);
|
||||
try bw.writeAll(contents);
|
||||
}
|
||||
}
|
||||
|
||||
@ -384,22 +371,22 @@ pub fn writeEhFrame(elf_file: *Elf, writer: *std.io.BufferedWriter) !void {
|
||||
};
|
||||
}
|
||||
|
||||
try writer.writeAll(contents);
|
||||
try bw.writeAll(contents);
|
||||
}
|
||||
}
|
||||
|
||||
try writer.writeInt(u32, 0, .little);
|
||||
try bw.writeInt(u32, 0, .little);
|
||||
|
||||
if (has_reloc_errors) return error.RelocFailure;
|
||||
}
|
||||
|
||||
pub fn writeEhFrameRelocatable(elf_file: *Elf, writer: *std.io.BufferedWriter) !void {
|
||||
pub fn writeEhFrameRelocatable(elf_file: *Elf, bw: *std.io.BufferedWriter) !void {
|
||||
for (elf_file.objects.items) |index| {
|
||||
const object = elf_file.file(index).?.object;
|
||||
|
||||
for (object.cies.items) |cie| {
|
||||
if (!cie.alive) continue;
|
||||
try writer.writeAll(cie.data(elf_file));
|
||||
try bw.writeAll(cie.data(elf_file));
|
||||
}
|
||||
}
|
||||
|
||||
@ -418,7 +405,7 @@ pub fn writeEhFrameRelocatable(elf_file: *Elf, writer: *std.io.BufferedWriter) !
|
||||
.little,
|
||||
);
|
||||
|
||||
try writer.writeAll(contents);
|
||||
try bw.writeAll(contents);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -438,7 +425,7 @@ fn emitReloc(elf_file: *Elf, r_offset: u64, sym: *const Symbol, rel: elf.Elf64_R
|
||||
},
|
||||
}
|
||||
|
||||
relocs_log.debug(" {s}: [{x} => {d}({s})] + {x}", .{
|
||||
relocs_log.debug(" {f}: [{x} => {d}({s})] + {x}", .{
|
||||
relocation.fmtRelocType(r_type, cpu_arch),
|
||||
r_offset,
|
||||
r_sym,
|
||||
@ -495,14 +482,14 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, relocs: *std.ArrayList(elf.Elf64_Rela)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeEhFrameHdr(elf_file: *Elf, writer: *std.io.BufferedWriter) !void {
|
||||
pub fn writeEhFrameHdr(elf_file: *Elf, bw: *std.io.BufferedWriter) !void {
|
||||
const comp = elf_file.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
|
||||
try writer.writeByte(1); // version
|
||||
try writer.writeByte(DW_EH_PE.pcrel | DW_EH_PE.sdata4);
|
||||
try writer.writeByte(DW_EH_PE.udata4);
|
||||
try writer.writeByte(DW_EH_PE.datarel | DW_EH_PE.sdata4);
|
||||
try bw.writeByte(1); // version
|
||||
try bw.writeByte(DW_EH_PE.pcrel | DW_EH_PE.sdata4);
|
||||
try bw.writeByte(DW_EH_PE.udata4);
|
||||
try bw.writeByte(DW_EH_PE.datarel | DW_EH_PE.sdata4);
|
||||
|
||||
const shdrs = elf_file.sections.items(.shdr);
|
||||
const eh_frame_shdr = shdrs[elf_file.section_indexes.eh_frame.?];
|
||||
@ -513,7 +500,7 @@ pub fn writeEhFrameHdr(elf_file: *Elf, writer: *std.io.BufferedWriter) !void {
|
||||
const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
|
||||
break :existing_size sym.atom(elf_file).?.size;
|
||||
};
|
||||
try writer.writeInt(
|
||||
try bw.writeInt(
|
||||
u32,
|
||||
@as(u32, @bitCast(@as(
|
||||
i32,
|
||||
@ -521,7 +508,7 @@ pub fn writeEhFrameHdr(elf_file: *Elf, writer: *std.io.BufferedWriter) !void {
|
||||
))),
|
||||
.little,
|
||||
);
|
||||
try writer.writeInt(u32, num_fdes, .little);
|
||||
try bw.writeInt(u32, num_fdes, .little);
|
||||
|
||||
const Entry = struct {
|
||||
init_addr: u32,
|
||||
@ -561,7 +548,7 @@ pub fn writeEhFrameHdr(elf_file: *Elf, writer: *std.io.BufferedWriter) !void {
|
||||
}
|
||||
|
||||
std.mem.sort(Entry, entries.items, {}, Entry.lessThan);
|
||||
try writer.writeAll(std.mem.sliceAsBytes(entries.items));
|
||||
try bw.writeAll(std.mem.sliceAsBytes(entries.items));
|
||||
}
|
||||
|
||||
const eh_frame_hdr_header_size: usize = 12;
|
||||
@ -607,11 +594,11 @@ const riscv = struct {
|
||||
fn reportInvalidReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela) !void {
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("invalid relocation type {} at offset 0x{x}", .{
|
||||
try err.addMsg("invalid relocation type {f} at offset 0x{x}", .{
|
||||
relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch),
|
||||
rel.r_offset,
|
||||
});
|
||||
err.addNote("in {}:.eh_frame", .{elf_file.file(rec.file_index).?.fmtPath()});
|
||||
err.addNote("in {f}:.eh_frame", .{elf_file.file(rec.file_index).?.fmtPath()});
|
||||
return error.RelocFailure;
|
||||
}
|
||||
|
||||
|
||||
@ -14,19 +14,13 @@ pub const File = union(enum) {
|
||||
return .{ .data = file };
|
||||
}
|
||||
|
||||
fn formatPath(
|
||||
file: File,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatPath(file: File, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
switch (file) {
|
||||
.zig_object => |zo| try writer.writeAll(zo.basename),
|
||||
.linker_defined => try writer.writeAll("(linker defined)"),
|
||||
.object => |x| try writer.print("{}", .{x.fmtPath()}),
|
||||
.shared_object => |x| try writer.print("{}", .{@as(Path, x.path)}),
|
||||
.zig_object => |zo| try bw.writeAll(zo.basename),
|
||||
.linker_defined => try bw.writeAll("(linker defined)"),
|
||||
.object => |x| try bw.print("{f}", .{x.fmtPath()}),
|
||||
.shared_object => |x| try bw.print("{f}", .{x.path}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -111,7 +111,7 @@ fn markLive(atom: *Atom, elf_file: *Elf) void {
|
||||
const target_sym = elf_file.symbol(ref) orelse continue;
|
||||
const target_atom = target_sym.atom(elf_file) orelse continue;
|
||||
target_atom.alive = true;
|
||||
gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
|
||||
gc_track_live_log.debug("{f}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
|
||||
if (markAtom(target_atom)) markLive(target_atom, elf_file);
|
||||
}
|
||||
}
|
||||
@ -128,7 +128,7 @@ fn markLive(atom: *Atom, elf_file: *Elf) void {
|
||||
}
|
||||
const target_atom = target_sym.atom(elf_file) orelse continue;
|
||||
target_atom.alive = true;
|
||||
gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
|
||||
gc_track_live_log.debug("{f}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
|
||||
if (markAtom(target_atom)) markLive(target_atom, elf_file);
|
||||
}
|
||||
}
|
||||
@ -170,7 +170,7 @@ pub fn dumpPrunedAtoms(elf_file: *Elf) !void {
|
||||
for (file.atoms()) |atom_index| {
|
||||
const atom = file.atom(atom_index) orelse continue;
|
||||
if (!atom.alive)
|
||||
try stderr.print("link: removing unused section '{s}' in file '{}'\n", .{
|
||||
try stderr.print("link: removing unused section '{s}' in file '{f}'\n", .{
|
||||
atom.name(elf_file),
|
||||
atom.file(elf_file).?.fmtPath(),
|
||||
});
|
||||
@ -185,15 +185,9 @@ const Level = struct {
|
||||
self.value += 1;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
self: *const @This(),
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(self: *const @This(), bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
try writer.writeByteNTimes(' ', self.value);
|
||||
try bw.splatByteAll(' ', self.value);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
|
||||
try elf_file.allocateNonAllocSections();
|
||||
|
||||
if (build_options.enable_logging) {
|
||||
state_log.debug("{}", .{elf_file.dumpState()});
|
||||
state_log.debug("{f}", .{elf_file.dumpState()});
|
||||
}
|
||||
|
||||
try elf_file.writeMergeSections();
|
||||
@ -96,36 +96,35 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
|
||||
};
|
||||
|
||||
if (build_options.enable_logging) {
|
||||
state_log.debug("ar_symtab\n{}\n", .{ar_symtab.fmt(elf_file)});
|
||||
state_log.debug("ar_strtab\n{}\n", .{ar_strtab});
|
||||
state_log.debug("ar_symtab\n{f}\n", .{ar_symtab.fmt(elf_file)});
|
||||
state_log.debug("ar_strtab\n{f}\n", .{ar_strtab});
|
||||
}
|
||||
|
||||
var buffer = std.ArrayList(u8).init(gpa);
|
||||
defer buffer.deinit();
|
||||
try buffer.ensureTotalCapacityPrecise(total_size);
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(try gpa.alloc(u8, total_size));
|
||||
defer gpa.free(bw.buffer);
|
||||
|
||||
// Write magic
|
||||
try buffer.writer().writeAll(elf.ARMAG);
|
||||
try bw.writeAll(elf.ARMAG);
|
||||
|
||||
// Write symtab
|
||||
try ar_symtab.write(.p64, elf_file, buffer.writer());
|
||||
try ar_symtab.write(.p64, elf_file, &bw);
|
||||
|
||||
// Write strtab
|
||||
if (ar_strtab.size() > 0) {
|
||||
if (!mem.isAligned(buffer.items.len, 2)) try buffer.writer().writeByte(0);
|
||||
try ar_strtab.write(buffer.writer());
|
||||
if (!mem.isAligned(bw.count, 2)) try bw.writeByte(0);
|
||||
try ar_strtab.write(&bw);
|
||||
}
|
||||
|
||||
// Write object files
|
||||
for (files.items) |index| {
|
||||
if (!mem.isAligned(buffer.items.len, 2)) try buffer.writer().writeByte(0);
|
||||
try elf_file.file(index).?.writeAr(elf_file, buffer.writer());
|
||||
if (!mem.isAligned(bw.count, 2)) try bw.writeByte(0);
|
||||
try elf_file.file(index).?.writeAr(elf_file, &bw);
|
||||
}
|
||||
|
||||
assert(buffer.items.len == total_size);
|
||||
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try elf_file.base.file.?.setEndPos(total_size);
|
||||
try elf_file.base.file.?.pwriteAll(buffer.items, 0);
|
||||
try elf_file.base.file.?.pwriteAll(bw.buffer, 0);
|
||||
|
||||
if (diags.hasErrors()) return error.LinkFailure;
|
||||
}
|
||||
@ -170,7 +169,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation) !void {
|
||||
try elf_file.allocateNonAllocSections();
|
||||
|
||||
if (build_options.enable_logging) {
|
||||
state_log.debug("{}", .{elf_file.dumpState()});
|
||||
state_log.debug("{f}", .{elf_file.dumpState()});
|
||||
}
|
||||
|
||||
try writeAtoms(elf_file);
|
||||
@ -407,17 +406,16 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
|
||||
};
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
|
||||
var buffer: std.io.AllocatingWriter = undefined;
|
||||
const bw = buffer.init(gpa);
|
||||
defer buffer.deinit();
|
||||
try buffer.ensureTotalCapacity(gpa, sh_size - existing_size);
|
||||
try eh_frame.writeEhFrameRelocatable(elf_file, bw);
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(try gpa.alloc(u8, sh_size - existing_size));
|
||||
defer gpa.free(bw.buffer);
|
||||
try eh_frame.writeEhFrameRelocatable(elf_file, &bw);
|
||||
log.debug("writing .eh_frame from 0x{x} to 0x{x}", .{
|
||||
shdr.sh_offset + existing_size,
|
||||
shdr.sh_offset + sh_size,
|
||||
});
|
||||
assert(buffer.getWritten().len == sh_size - existing_size);
|
||||
try elf_file.base.file.?.pwriteAll(buffer.getWritten(), shdr.sh_offset + existing_size);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try elf_file.base.file.?.pwriteAll(bw.buffer, shdr.sh_offset + existing_size);
|
||||
}
|
||||
if (elf_file.section_indexes.eh_frame_rela) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
|
||||
@ -148,19 +148,13 @@ pub fn fmtRelocType(r_type: u32, cpu_arch: std.Target.Cpu.Arch) std.fmt.Formatte
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatRelocType(
|
||||
ctx: FormatRelocTypeCtx,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatRelocType(ctx: FormatRelocTypeCtx, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const r_type = ctx.r_type;
|
||||
switch (ctx.cpu_arch) {
|
||||
.x86_64 => try writer.print("R_X86_64_{s}", .{@tagName(@as(elf.R_X86_64, @enumFromInt(r_type)))}),
|
||||
.aarch64 => try writer.print("R_AARCH64_{s}", .{@tagName(@as(elf.R_AARCH64, @enumFromInt(r_type)))}),
|
||||
.riscv64 => try writer.print("R_RISCV_{s}", .{@tagName(@as(elf.R_RISCV, @enumFromInt(r_type)))}),
|
||||
.x86_64 => try bw.print("R_X86_64_{s}", .{@tagName(@as(elf.R_X86_64, @enumFromInt(r_type)))}),
|
||||
.aarch64 => try bw.print("R_AARCH64_{s}", .{@tagName(@as(elf.R_AARCH64, @enumFromInt(r_type)))}),
|
||||
.riscv64 => try bw.print("R_RISCV_{s}", .{@tagName(@as(elf.R_RISCV, @enumFromInt(r_type)))}),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
@ -94,115 +94,115 @@ pub const DynamicSection = struct {
|
||||
return nentries * @sizeOf(elf.Elf64_Dyn);
|
||||
}
|
||||
|
||||
pub fn write(dt: DynamicSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(dt: DynamicSection, elf_file: *Elf, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
const shdrs = elf_file.sections.items(.shdr);
|
||||
|
||||
// NEEDED
|
||||
for (dt.needed.items) |off| {
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_NEEDED, .d_val = off });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_NEEDED, .d_val = off });
|
||||
}
|
||||
|
||||
if (dt.soname) |off| {
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SONAME, .d_val = off });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SONAME, .d_val = off });
|
||||
}
|
||||
|
||||
// RUNPATH
|
||||
// TODO add option in Options to revert to old RPATH tag
|
||||
if (dt.rpath > 0) {
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RUNPATH, .d_val = dt.rpath });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RUNPATH, .d_val = dt.rpath });
|
||||
}
|
||||
|
||||
// INIT
|
||||
if (elf_file.sectionByName(".init")) |shndx| {
|
||||
const addr = shdrs[shndx].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT, .d_val = addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT, .d_val = addr });
|
||||
}
|
||||
|
||||
// FINI
|
||||
if (elf_file.sectionByName(".fini")) |shndx| {
|
||||
const addr = shdrs[shndx].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI, .d_val = addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI, .d_val = addr });
|
||||
}
|
||||
|
||||
// INIT_ARRAY
|
||||
if (elf_file.sectionByName(".init_array")) |shndx| {
|
||||
const shdr = shdrs[shndx];
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT_ARRAY, .d_val = shdr.sh_addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT_ARRAYSZ, .d_val = shdr.sh_size });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT_ARRAY, .d_val = shdr.sh_addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT_ARRAYSZ, .d_val = shdr.sh_size });
|
||||
}
|
||||
|
||||
// FINI_ARRAY
|
||||
if (elf_file.sectionByName(".fini_array")) |shndx| {
|
||||
const shdr = shdrs[shndx];
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI_ARRAY, .d_val = shdr.sh_addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI_ARRAYSZ, .d_val = shdr.sh_size });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI_ARRAY, .d_val = shdr.sh_addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI_ARRAYSZ, .d_val = shdr.sh_size });
|
||||
}
|
||||
|
||||
// RELA
|
||||
if (elf_file.section_indexes.rela_dyn) |shndx| {
|
||||
const shdr = shdrs[shndx];
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELA, .d_val = shdr.sh_addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELASZ, .d_val = shdr.sh_size });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELAENT, .d_val = shdr.sh_entsize });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELA, .d_val = shdr.sh_addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELASZ, .d_val = shdr.sh_size });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELAENT, .d_val = shdr.sh_entsize });
|
||||
}
|
||||
|
||||
// JMPREL
|
||||
if (elf_file.section_indexes.rela_plt) |shndx| {
|
||||
const shdr = shdrs[shndx];
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_JMPREL, .d_val = shdr.sh_addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTRELSZ, .d_val = shdr.sh_size });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTREL, .d_val = elf.DT_RELA });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_JMPREL, .d_val = shdr.sh_addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTRELSZ, .d_val = shdr.sh_size });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTREL, .d_val = elf.DT_RELA });
|
||||
}
|
||||
|
||||
// PLTGOT
|
||||
if (elf_file.section_indexes.got_plt) |shndx| {
|
||||
const addr = shdrs[shndx].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTGOT, .d_val = addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTGOT, .d_val = addr });
|
||||
}
|
||||
|
||||
{
|
||||
assert(elf_file.section_indexes.hash != null);
|
||||
const addr = shdrs[elf_file.section_indexes.hash.?].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_HASH, .d_val = addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_HASH, .d_val = addr });
|
||||
}
|
||||
|
||||
if (elf_file.section_indexes.gnu_hash) |shndx| {
|
||||
const addr = shdrs[shndx].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_GNU_HASH, .d_val = addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_GNU_HASH, .d_val = addr });
|
||||
}
|
||||
|
||||
// TEXTREL
|
||||
if (elf_file.has_text_reloc) {
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_TEXTREL, .d_val = 0 });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_TEXTREL, .d_val = 0 });
|
||||
}
|
||||
|
||||
// SYMTAB + SYMENT
|
||||
{
|
||||
assert(elf_file.section_indexes.dynsymtab != null);
|
||||
const shdr = shdrs[elf_file.section_indexes.dynsymtab.?];
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SYMTAB, .d_val = shdr.sh_addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SYMENT, .d_val = shdr.sh_entsize });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SYMTAB, .d_val = shdr.sh_addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SYMENT, .d_val = shdr.sh_entsize });
|
||||
}
|
||||
|
||||
// STRTAB + STRSZ
|
||||
{
|
||||
assert(elf_file.section_indexes.dynstrtab != null);
|
||||
const shdr = shdrs[elf_file.section_indexes.dynstrtab.?];
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_STRTAB, .d_val = shdr.sh_addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_STRSZ, .d_val = shdr.sh_size });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_STRTAB, .d_val = shdr.sh_addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_STRSZ, .d_val = shdr.sh_size });
|
||||
}
|
||||
|
||||
// VERSYM
|
||||
if (elf_file.section_indexes.versym) |shndx| {
|
||||
const addr = shdrs[shndx].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_VERSYM, .d_val = addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_VERSYM, .d_val = addr });
|
||||
}
|
||||
|
||||
// VERNEED + VERNEEDNUM
|
||||
if (elf_file.section_indexes.verneed) |shndx| {
|
||||
const addr = shdrs[shndx].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_VERNEED, .d_val = addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_VERNEED, .d_val = addr });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{
|
||||
.d_tag = elf.DT_VERNEEDNUM,
|
||||
.d_val = elf_file.verneed.verneed.items.len,
|
||||
});
|
||||
@ -210,18 +210,18 @@ pub const DynamicSection = struct {
|
||||
|
||||
// FLAGS
|
||||
if (dt.getFlags(elf_file)) |flags| {
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FLAGS, .d_val = flags });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FLAGS, .d_val = flags });
|
||||
}
|
||||
// FLAGS_1
|
||||
if (dt.getFlags1(elf_file)) |flags_1| {
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FLAGS_1, .d_val = flags_1 });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FLAGS_1, .d_val = flags_1 });
|
||||
}
|
||||
|
||||
// DEBUG
|
||||
if (!elf_file.isEffectivelyDynLib()) try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_DEBUG, .d_val = 0 });
|
||||
if (!elf_file.isEffectivelyDynLib()) try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_DEBUG, .d_val = 0 });
|
||||
|
||||
// NULL
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_NULL, .d_val = 0 });
|
||||
try bw.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_NULL, .d_val = 0 });
|
||||
}
|
||||
};
|
||||
|
||||
@ -360,7 +360,7 @@ pub const GotSection = struct {
|
||||
return s;
|
||||
}
|
||||
|
||||
pub fn write(got: GotSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(got: GotSection, elf_file: *Elf, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
const comp = elf_file.base.comp;
|
||||
const is_dyn_lib = elf_file.isEffectivelyDynLib();
|
||||
const apply_relocs = true; // TODO add user option for this
|
||||
@ -381,47 +381,47 @@ pub const GotSection = struct {
|
||||
}
|
||||
break :blk value;
|
||||
};
|
||||
try writeInt(value, elf_file, writer);
|
||||
try writeInt(value, elf_file, bw);
|
||||
},
|
||||
.tlsld => {
|
||||
try writeInt(if (is_dyn_lib) @as(u64, 0) else 1, elf_file, writer);
|
||||
try writeInt(0, elf_file, writer);
|
||||
try writeInt(if (is_dyn_lib) @as(u64, 0) else 1, elf_file, bw);
|
||||
try writeInt(0, elf_file, bw);
|
||||
},
|
||||
.tlsgd => {
|
||||
if (symbol.?.flags.import) {
|
||||
try writeInt(0, elf_file, writer);
|
||||
try writeInt(0, elf_file, writer);
|
||||
try writeInt(0, elf_file, bw);
|
||||
try writeInt(0, elf_file, bw);
|
||||
} else {
|
||||
try writeInt(if (is_dyn_lib) @as(u64, 0) else 1, elf_file, writer);
|
||||
try writeInt(if (is_dyn_lib) @as(u64, 0) else 1, elf_file, bw);
|
||||
const offset = symbol.?.address(.{}, elf_file) - elf_file.dtpAddress();
|
||||
try writeInt(offset, elf_file, writer);
|
||||
try writeInt(offset, elf_file, bw);
|
||||
}
|
||||
},
|
||||
.gottp => {
|
||||
if (symbol.?.flags.import) {
|
||||
try writeInt(0, elf_file, writer);
|
||||
try writeInt(0, elf_file, bw);
|
||||
} else if (is_dyn_lib) {
|
||||
const offset = if (apply_relocs)
|
||||
symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()
|
||||
else
|
||||
0;
|
||||
try writeInt(offset, elf_file, writer);
|
||||
try writeInt(offset, elf_file, bw);
|
||||
} else {
|
||||
const offset = symbol.?.address(.{}, elf_file) - elf_file.tpAddress();
|
||||
try writeInt(offset, elf_file, writer);
|
||||
try writeInt(offset, elf_file, bw);
|
||||
}
|
||||
},
|
||||
.tlsdesc => {
|
||||
if (symbol.?.flags.import) {
|
||||
try writeInt(0, elf_file, writer);
|
||||
try writeInt(0, elf_file, writer);
|
||||
try writeInt(0, elf_file, bw);
|
||||
try writeInt(0, elf_file, bw);
|
||||
} else {
|
||||
try writeInt(0, elf_file, writer);
|
||||
try writeInt(0, elf_file, bw);
|
||||
const offset = if (apply_relocs)
|
||||
symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()
|
||||
else
|
||||
0;
|
||||
try writeInt(offset, elf_file, writer);
|
||||
try writeInt(offset, elf_file, bw);
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -615,20 +615,14 @@ pub const GotSection = struct {
|
||||
return .{ .data = .{ .got = got, .elf_file = elf_file } };
|
||||
}
|
||||
|
||||
pub fn format2(
|
||||
ctx: FormatCtx,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
pub fn format2(ctx: FormatCtx, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const got = ctx.got;
|
||||
const elf_file = ctx.elf_file;
|
||||
try writer.writeAll("GOT\n");
|
||||
try bw.writeAll("GOT\n");
|
||||
for (got.entries.items) |entry| {
|
||||
const symbol = elf_file.symbol(entry.ref).?;
|
||||
try writer.print(" {d}@0x{x} => {}@0x{x} ({s})\n", .{
|
||||
try bw.print(" {d}@0x{x} => {f}@0x{x} ({s})\n", .{
|
||||
entry.cell_index,
|
||||
entry.address(elf_file),
|
||||
entry.ref,
|
||||
@ -678,11 +672,11 @@ pub const PltSection = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(plt: PltSection, elf_file: *Elf, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
const cpu_arch = elf_file.getTarget().cpu.arch;
|
||||
switch (cpu_arch) {
|
||||
.x86_64 => try x86_64.write(plt, elf_file, writer),
|
||||
.aarch64 => try aarch64.write(plt, elf_file, writer),
|
||||
.x86_64 => try x86_64.write(plt, elf_file, bw),
|
||||
.aarch64 => try aarch64.write(plt, elf_file, bw),
|
||||
else => return error.UnsupportedCpuArch,
|
||||
}
|
||||
}
|
||||
@ -703,7 +697,7 @@ pub const PltSection = struct {
|
||||
const r_sym: u64 = extra.dynamic;
|
||||
const r_type = relocation.encode(.jump_slot, cpu_arch);
|
||||
|
||||
relocs_log.debug(" {s}: [{x} => {d}({s})] + 0", .{
|
||||
relocs_log.debug(" {f}: [{x} => {d}({s})] + 0", .{
|
||||
relocation.fmtRelocType(r_type, cpu_arch),
|
||||
r_offset,
|
||||
r_sym,
|
||||
@ -758,20 +752,14 @@ pub const PltSection = struct {
|
||||
return .{ .data = .{ .plt = plt, .elf_file = elf_file } };
|
||||
}
|
||||
|
||||
pub fn format2(
|
||||
ctx: FormatCtx,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
pub fn format2(ctx: FormatCtx, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const plt = ctx.plt;
|
||||
const elf_file = ctx.elf_file;
|
||||
try writer.writeAll("PLT\n");
|
||||
try bw.writeAll("PLT\n");
|
||||
for (plt.symbols.items, 0..) |ref, i| {
|
||||
const symbol = elf_file.symbol(ref).?;
|
||||
try writer.print(" {d}@0x{x} => {}@0x{x} ({s})\n", .{
|
||||
try bw.print(" {d}@0x{x} => {f}@0x{x} ({s})\n", .{
|
||||
i,
|
||||
symbol.pltAddress(elf_file),
|
||||
ref,
|
||||
@ -782,7 +770,7 @@ pub const PltSection = struct {
|
||||
}
|
||||
|
||||
const x86_64 = struct {
|
||||
fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
|
||||
fn write(plt: PltSection, elf_file: *Elf, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
const shdrs = elf_file.sections.items(.shdr);
|
||||
const plt_addr = shdrs[elf_file.section_indexes.plt.?].sh_addr;
|
||||
const got_plt_addr = shdrs[elf_file.section_indexes.got_plt.?].sh_addr;
|
||||
@ -796,8 +784,8 @@ pub const PltSection = struct {
|
||||
mem.writeInt(i32, preamble[8..][0..4], @as(i32, @intCast(disp)), .little);
|
||||
disp = @as(i64, @intCast(got_plt_addr + 16)) - @as(i64, @intCast(plt_addr + 14)) - 4;
|
||||
mem.writeInt(i32, preamble[14..][0..4], @as(i32, @intCast(disp)), .little);
|
||||
try writer.writeAll(&preamble);
|
||||
try writer.writeByteNTimes(0xcc, preambleSize(.x86_64) - preamble.len);
|
||||
try bw.writeAll(&preamble);
|
||||
try bw.splatByteAll(0xcc, preambleSize(.x86_64) - preamble.len);
|
||||
|
||||
for (plt.symbols.items, 0..) |ref, i| {
|
||||
const sym = elf_file.symbol(ref).?;
|
||||
@ -811,13 +799,13 @@ pub const PltSection = struct {
|
||||
};
|
||||
mem.writeInt(i32, entry[6..][0..4], @as(i32, @intCast(i)), .little);
|
||||
mem.writeInt(i32, entry[12..][0..4], @as(i32, @intCast(disp)), .little);
|
||||
try writer.writeAll(&entry);
|
||||
try bw.writeAll(&entry);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const aarch64 = struct {
|
||||
fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
|
||||
fn write(plt: PltSection, elf_file: *Elf, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
{
|
||||
const shdrs = elf_file.sections.items(.shdr);
|
||||
const plt_addr: i64 = @intCast(shdrs[elf_file.section_indexes.plt.?].sh_addr);
|
||||
@ -845,7 +833,7 @@ pub const PltSection = struct {
|
||||
};
|
||||
comptime assert(preamble.len == 8);
|
||||
for (preamble) |inst| {
|
||||
try writer.writeInt(u32, inst.toU32(), .little);
|
||||
try bw.writeInt(u32, inst.toU32(), .little);
|
||||
}
|
||||
}
|
||||
|
||||
@ -864,7 +852,7 @@ pub const PltSection = struct {
|
||||
};
|
||||
comptime assert(insts.len == 4);
|
||||
for (insts) |inst| {
|
||||
try writer.writeInt(u32, inst.toU32(), .little);
|
||||
try bw.writeInt(u32, inst.toU32(), .little);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -883,22 +871,22 @@ pub const GotPltSection = struct {
|
||||
return preamble_size + elf_file.plt.symbols.items.len * 8;
|
||||
}
|
||||
|
||||
pub fn write(got_plt: GotPltSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(got_plt: GotPltSection, elf_file: *Elf, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
_ = got_plt;
|
||||
{
|
||||
// [0]: _DYNAMIC
|
||||
const symbol = elf_file.linkerDefinedPtr().?.dynamicSymbol(elf_file).?;
|
||||
try writer.writeInt(u64, @intCast(symbol.address(.{}, elf_file)), .little);
|
||||
try bw.writeInt(u64, @intCast(symbol.address(.{}, elf_file)), .little);
|
||||
}
|
||||
// [1]: 0x0
|
||||
// [2]: 0x0
|
||||
try writer.writeInt(u64, 0x0, .little);
|
||||
try writer.writeInt(u64, 0x0, .little);
|
||||
try bw.writeInt(u64, 0x0, .little);
|
||||
try bw.writeInt(u64, 0x0, .little);
|
||||
if (elf_file.section_indexes.plt) |shndx| {
|
||||
const plt_addr = elf_file.sections.items(.shdr)[shndx].sh_addr;
|
||||
for (0..elf_file.plt.symbols.items.len) |_| {
|
||||
// [N]: .plt
|
||||
try writer.writeInt(u64, plt_addr, .little);
|
||||
try bw.writeInt(u64, plt_addr, .little);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -934,11 +922,11 @@ pub const PltGotSection = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(plt_got: PltGotSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(plt_got: PltGotSection, elf_file: *Elf, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
const cpu_arch = elf_file.getTarget().cpu.arch;
|
||||
switch (cpu_arch) {
|
||||
.x86_64 => try x86_64.write(plt_got, elf_file, writer),
|
||||
.aarch64 => try aarch64.write(plt_got, elf_file, writer),
|
||||
.x86_64 => try x86_64.write(plt_got, elf_file, bw),
|
||||
.aarch64 => try aarch64.write(plt_got, elf_file, bw),
|
||||
else => return error.UnsupportedCpuArch,
|
||||
}
|
||||
}
|
||||
@ -970,7 +958,7 @@ pub const PltGotSection = struct {
|
||||
}
|
||||
|
||||
const x86_64 = struct {
|
||||
pub fn write(plt_got: PltGotSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(plt_got: PltGotSection, elf_file: *Elf, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
for (plt_got.symbols.items) |ref| {
|
||||
const sym = elf_file.symbol(ref).?;
|
||||
const target_addr = sym.gotAddress(elf_file);
|
||||
@ -982,13 +970,13 @@ pub const PltGotSection = struct {
|
||||
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
|
||||
};
|
||||
mem.writeInt(i32, entry[6..][0..4], @as(i32, @intCast(disp)), .little);
|
||||
try writer.writeAll(&entry);
|
||||
try bw.writeAll(&entry);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const aarch64 = struct {
|
||||
fn write(plt_got: PltGotSection, elf_file: *Elf, writer: anytype) !void {
|
||||
fn write(plt_got: PltGotSection, elf_file: *Elf, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
for (plt_got.symbols.items) |ref| {
|
||||
const sym = elf_file.symbol(ref).?;
|
||||
const target_addr = sym.gotAddress(elf_file);
|
||||
@ -1003,7 +991,7 @@ pub const PltGotSection = struct {
|
||||
};
|
||||
comptime assert(insts.len == 4);
|
||||
for (insts) |inst| {
|
||||
try writer.writeInt(u32, inst.toU32(), .little);
|
||||
try bw.writeInt(u32, inst.toU32(), .little);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1167,23 +1155,23 @@ pub const DynsymSection = struct {
|
||||
return @as(u32, @intCast(dynsym.entries.items.len + 1));
|
||||
}
|
||||
|
||||
pub fn write(dynsym: DynsymSection, elf_file: *Elf, writer: anytype) !void {
|
||||
try writer.writeStruct(Elf.null_sym);
|
||||
pub fn write(dynsym: DynsymSection, elf_file: *Elf, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
try bw.writeStruct(Elf.null_sym);
|
||||
for (dynsym.entries.items) |entry| {
|
||||
const sym = elf_file.symbol(entry.ref).?;
|
||||
var out_sym: elf.Elf64_Sym = Elf.null_sym;
|
||||
sym.setOutputSym(elf_file, &out_sym);
|
||||
out_sym.st_name = entry.off;
|
||||
try writer.writeStruct(out_sym);
|
||||
try bw.writeStruct(out_sym);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const HashSection = struct {
|
||||
buffer: std.ArrayListUnmanaged(u8) = .empty,
|
||||
buffer: []u32 = &.{},
|
||||
|
||||
pub fn deinit(hs: *HashSection, allocator: Allocator) void {
|
||||
hs.buffer.deinit(allocator);
|
||||
pub fn deinit(hs: *HashSection, gpa: Allocator) void {
|
||||
gpa.free(hs.buffer);
|
||||
}
|
||||
|
||||
pub fn generate(hs: *HashSection, elf_file: *Elf) !void {
|
||||
@ -1193,30 +1181,25 @@ pub const HashSection = struct {
|
||||
const gpa = comp.gpa;
|
||||
const nsyms = elf_file.dynsym.count();
|
||||
|
||||
var buckets = try gpa.alloc(u32, nsyms);
|
||||
defer gpa.free(buckets);
|
||||
@memset(buckets, 0);
|
||||
assert(hs.buffer.len == 0);
|
||||
hs.buffer = try gpa.alloc(u32, 2 * (1 + nsyms));
|
||||
|
||||
var chains = try gpa.alloc(u32, nsyms);
|
||||
defer gpa.free(chains);
|
||||
@memset(hs.buffer[0..2], std.mem.nativeToLittle(u32, @intCast(nsyms)));
|
||||
const buckets = hs.buffer[2..][0..nsyms];
|
||||
@memset(buckets, 0);
|
||||
const chains = hs.buffer[2 + nsyms ..][0..nsyms];
|
||||
@memset(chains, 0);
|
||||
|
||||
for (elf_file.dynsym.entries.items, 1..) |entry, i| {
|
||||
const name = elf_file.getDynString(entry.off);
|
||||
const hash = hasher(name) % buckets.len;
|
||||
chains[@as(u32, @intCast(i))] = buckets[hash];
|
||||
buckets[hash] = @as(u32, @intCast(i));
|
||||
const hash = hasher(name) % nsyms;
|
||||
chains[i] = buckets[hash];
|
||||
buckets[hash] = std.mem.nativeToLittle(u32, @intCast(i));
|
||||
}
|
||||
|
||||
try hs.buffer.ensureTotalCapacityPrecise(gpa, (2 + nsyms * 2) * 4);
|
||||
hs.buffer.writer(gpa).writeInt(u32, @as(u32, @intCast(nsyms)), .little) catch unreachable;
|
||||
hs.buffer.writer(gpa).writeInt(u32, @as(u32, @intCast(nsyms)), .little) catch unreachable;
|
||||
hs.buffer.writer(gpa).writeAll(mem.sliceAsBytes(buckets)) catch unreachable;
|
||||
hs.buffer.writer(gpa).writeAll(mem.sliceAsBytes(chains)) catch unreachable;
|
||||
}
|
||||
|
||||
pub inline fn size(hs: HashSection) usize {
|
||||
return hs.buffer.items.len;
|
||||
return @sizeOf(u32) * hs.buffer.len;
|
||||
}
|
||||
|
||||
pub fn hasher(name: [:0]const u8) u32 {
|
||||
@ -1266,17 +1249,14 @@ pub const GnuHashSection = struct {
|
||||
return header_size + hash.num_bloom * 8 + hash.num_buckets * 4 + hash.num_exports * 4;
|
||||
}
|
||||
|
||||
pub fn write(hash: GnuHashSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(hash: GnuHashSection, elf_file: *Elf, br: *std.io.BufferedWriter) !void {
|
||||
const exports = getExports(elf_file);
|
||||
const export_off = elf_file.dynsym.count() - hash.num_exports;
|
||||
|
||||
var counting = std.io.countingWriter(writer);
|
||||
const cwriter = counting.writer();
|
||||
|
||||
try cwriter.writeInt(u32, hash.num_buckets, .little);
|
||||
try cwriter.writeInt(u32, export_off, .little);
|
||||
try cwriter.writeInt(u32, hash.num_bloom, .little);
|
||||
try cwriter.writeInt(u32, bloom_shift, .little);
|
||||
try br.writeInt(u32, hash.num_buckets, .little);
|
||||
try br.writeInt(u32, export_off, .little);
|
||||
try br.writeInt(u32, hash.num_bloom, .little);
|
||||
try br.writeInt(u32, bloom_shift, .little);
|
||||
|
||||
const comp = elf_file.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
@ -1300,7 +1280,7 @@ pub const GnuHashSection = struct {
|
||||
bloom[idx] |= @as(u64, 1) << @as(u6, @intCast((h >> bloom_shift) % 64));
|
||||
}
|
||||
|
||||
try cwriter.writeAll(mem.sliceAsBytes(bloom));
|
||||
try br.writeAll(mem.sliceAsBytes(bloom));
|
||||
|
||||
// Fill in the hash bucket indices
|
||||
const buckets = try gpa.alloc(u32, hash.num_buckets);
|
||||
@ -1313,7 +1293,7 @@ pub const GnuHashSection = struct {
|
||||
}
|
||||
}
|
||||
|
||||
try cwriter.writeAll(mem.sliceAsBytes(buckets));
|
||||
try br.writeAll(mem.sliceAsBytes(buckets));
|
||||
|
||||
// Finally, write the hash table
|
||||
const table = try gpa.alloc(u32, hash.num_exports);
|
||||
@ -1329,9 +1309,9 @@ pub const GnuHashSection = struct {
|
||||
}
|
||||
}
|
||||
|
||||
try cwriter.writeAll(mem.sliceAsBytes(table));
|
||||
try br.writeAll(mem.sliceAsBytes(table));
|
||||
|
||||
assert(counting.bytes_written == hash.size());
|
||||
assert(br.count == hash.size());
|
||||
}
|
||||
|
||||
pub fn hasher(name: [:0]const u8) u32 {
|
||||
@ -1478,9 +1458,9 @@ pub const VerneedSection = struct {
|
||||
return vern.verneed.items.len * @sizeOf(elf.Elf64_Verneed) + vern.vernaux.items.len * @sizeOf(elf.Vernaux);
|
||||
}
|
||||
|
||||
pub fn write(vern: VerneedSection, writer: anytype) !void {
|
||||
try writer.writeAll(mem.sliceAsBytes(vern.verneed.items));
|
||||
try writer.writeAll(mem.sliceAsBytes(vern.vernaux.items));
|
||||
pub fn write(vern: VerneedSection, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
try bw.writeAll(mem.sliceAsBytes(vern.verneed.items));
|
||||
try bw.writeAll(mem.sliceAsBytes(vern.vernaux.items));
|
||||
}
|
||||
};
|
||||
|
||||
@ -1506,11 +1486,11 @@ pub const GroupSection = struct {
|
||||
return (members.len + 1) * @sizeOf(u32);
|
||||
}
|
||||
|
||||
pub fn write(cgs: GroupSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(cgs: GroupSection, elf_file: *Elf, bw: *std.io.BufferedWriter) !void {
|
||||
const cg = cgs.group(elf_file);
|
||||
const object = cg.file(elf_file).object;
|
||||
const members = cg.members(elf_file);
|
||||
try writer.writeInt(u32, if (cg.is_comdat) elf.GRP_COMDAT else 0, .little);
|
||||
try bw.writeInt(u32, if (cg.is_comdat) elf.GRP_COMDAT else 0, .little);
|
||||
for (members) |shndx| {
|
||||
const shdr = object.shdrs.items[shndx];
|
||||
switch (shdr.sh_type) {
|
||||
@ -1522,26 +1502,26 @@ pub const GroupSection = struct {
|
||||
atom.output_section_index == rela_shdr.sh_info)
|
||||
break rela_shndx;
|
||||
} else unreachable;
|
||||
try writer.writeInt(u32, @intCast(rela_shndx), .little);
|
||||
try bw.writeInt(u32, @intCast(rela_shndx), .little);
|
||||
},
|
||||
else => {
|
||||
const atom_index = object.atoms_indexes.items[shndx];
|
||||
const atom = object.atom(atom_index).?;
|
||||
try writer.writeInt(u32, atom.output_section_index, .little);
|
||||
try bw.writeInt(u32, atom.output_section_index, .little);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
fn writeInt(value: anytype, elf_file: *Elf, writer: anytype) !void {
|
||||
fn writeInt(value: anytype, elf_file: *Elf, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
const entry_size = elf_file.archPtrWidthBytes();
|
||||
const target = elf_file.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
switch (entry_size) {
|
||||
2 => try writer.writeInt(u16, @intCast(value), endian),
|
||||
4 => try writer.writeInt(u32, @intCast(value), endian),
|
||||
8 => try writer.writeInt(u64, @intCast(value), endian),
|
||||
2 => try bw.writeInt(u16, @intCast(value), endian),
|
||||
4 => try bw.writeInt(u32, @intCast(value), endian),
|
||||
8 => try bw.writeInt(u64, @intCast(value), endian),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
@ -41,7 +41,7 @@ pub fn parse(
|
||||
try line_col.append(gpa, .{ .line = line, .column = column });
|
||||
switch (tok.id) {
|
||||
.invalid => {
|
||||
return diags.failParse(path, "invalid token in LD script: '{s}' ({d}:{d})", .{
|
||||
return diags.failParse(path, "invalid token in LD script: '{f}' ({d}:{d})", .{
|
||||
std.fmt.fmtSliceEscapeLower(tok.get(data)), line, column,
|
||||
});
|
||||
},
|
||||
|
||||
@ -41,9 +41,9 @@ data_in_code_cmd: macho.linkedit_data_command = .{ .cmd = .DATA_IN_CODE },
|
||||
uuid_cmd: macho.uuid_command = .{ .uuid = [_]u8{0} ** 16 },
|
||||
codesig_cmd: macho.linkedit_data_command = .{ .cmd = .CODE_SIGNATURE },
|
||||
|
||||
pagezero_seg_index: ?u8 = null,
|
||||
text_seg_index: ?u8 = null,
|
||||
linkedit_seg_index: ?u8 = null,
|
||||
pagezero_seg_index: ?u4 = null,
|
||||
text_seg_index: ?u4 = null,
|
||||
linkedit_seg_index: ?u4 = null,
|
||||
text_sect_index: ?u8 = null,
|
||||
data_sect_index: ?u8 = null,
|
||||
got_sect_index: ?u8 = null,
|
||||
@ -76,10 +76,10 @@ unwind_info: UnwindInfo = .{},
|
||||
data_in_code: DataInCode = .{},
|
||||
|
||||
/// Tracked loadable segments during incremental linking.
|
||||
zig_text_seg_index: ?u8 = null,
|
||||
zig_const_seg_index: ?u8 = null,
|
||||
zig_data_seg_index: ?u8 = null,
|
||||
zig_bss_seg_index: ?u8 = null,
|
||||
zig_text_seg_index: ?u4 = null,
|
||||
zig_const_seg_index: ?u4 = null,
|
||||
zig_data_seg_index: ?u4 = null,
|
||||
zig_bss_seg_index: ?u4 = null,
|
||||
|
||||
/// Tracked section headers with incremental updates to Zig object.
|
||||
zig_text_sect_index: ?u8 = null,
|
||||
@ -543,7 +543,7 @@ pub fn flush(
|
||||
self.allocateSyntheticSymbols();
|
||||
|
||||
if (build_options.enable_logging) {
|
||||
state_log.debug("{}", .{self.dumpState()});
|
||||
state_log.debug("{f}", .{self.dumpState()});
|
||||
}
|
||||
|
||||
// Beyond this point, everything has been allocated a virtual address and we can resolve
|
||||
@ -591,6 +591,7 @@ pub fn flush(
|
||||
error.NoSpaceLeft => unreachable,
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.LinkFailure => return error.LinkFailure,
|
||||
else => unreachable,
|
||||
};
|
||||
try self.writeHeader(ncmds, sizeofcmds);
|
||||
self.writeUuid(uuid_cmd_offset, self.requiresCodeSig()) catch |err| switch (err) {
|
||||
@ -677,12 +678,12 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
|
||||
|
||||
try argv.append("-platform_version");
|
||||
try argv.append(@tagName(self.platform.os_tag));
|
||||
try argv.append(try std.fmt.allocPrint(arena, "{}", .{self.platform.version}));
|
||||
try argv.append(try std.fmt.allocPrint(arena, "{f}", .{self.platform.version}));
|
||||
|
||||
if (self.sdk_version) |ver| {
|
||||
try argv.append(try std.fmt.allocPrint(arena, "{d}.{d}", .{ ver.major, ver.minor }));
|
||||
} else {
|
||||
try argv.append(try std.fmt.allocPrint(arena, "{}", .{self.platform.version}));
|
||||
try argv.append(try std.fmt.allocPrint(arena, "{f}", .{self.platform.version}));
|
||||
}
|
||||
|
||||
if (comp.sysroot) |syslibroot| {
|
||||
@ -863,7 +864,7 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
|
||||
|
||||
const path, const file = input.pathAndFile().?;
|
||||
// TODO don't classify now, it's too late. The input file has already been classified
|
||||
log.debug("classifying input file {}", .{path});
|
||||
log.debug("classifying input file {f}", .{path});
|
||||
|
||||
const fh = try self.addFileHandle(file);
|
||||
var buffer: [Archive.SARMAG]u8 = undefined;
|
||||
@ -1074,7 +1075,7 @@ fn accessLibPath(
|
||||
|
||||
for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
|
||||
test_path.clearRetainingCapacity();
|
||||
try test_path.writer().print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext });
|
||||
try test_path.print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext });
|
||||
try checked_paths.append(try arena.dupe(u8, test_path.items));
|
||||
fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => continue,
|
||||
@ -1097,7 +1098,7 @@ fn accessFrameworkPath(
|
||||
|
||||
for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
|
||||
test_path.clearRetainingCapacity();
|
||||
try test_path.writer().print("{s}" ++ sep ++ "{s}.framework" ++ sep ++ "{s}{s}", .{
|
||||
try test_path.print("{s}" ++ sep ++ "{s}.framework" ++ sep ++ "{s}{s}", .{
|
||||
search_dir,
|
||||
name,
|
||||
name,
|
||||
@ -1178,9 +1179,9 @@ fn parseDependentDylibs(self: *MachO) !void {
|
||||
for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
|
||||
test_path.clearRetainingCapacity();
|
||||
if (self.base.comp.sysroot) |root| {
|
||||
try test_path.writer().print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext });
|
||||
try test_path.print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext });
|
||||
} else {
|
||||
try test_path.writer().print("{s}{s}", .{ path, ext });
|
||||
try test_path.print("{s}{s}", .{ path, ext });
|
||||
}
|
||||
try checked_paths.append(try arena.dupe(u8, test_path.items));
|
||||
fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
|
||||
@ -1591,7 +1592,7 @@ fn reportUndefs(self: *MachO) !void {
|
||||
const ref = refs.items[inote];
|
||||
const file = self.getFile(ref.file).?;
|
||||
const atom = ref.getAtom(self).?;
|
||||
err.addNote("referenced by {}:{s}", .{ file.fmtPath(), atom.getName(self) });
|
||||
err.addNote("referenced by {f}:{s}", .{ file.fmtPath(), atom.getName(self) });
|
||||
}
|
||||
|
||||
if (refs.items.len > max_notes) {
|
||||
@ -2131,7 +2132,7 @@ fn initSegments(self: *MachO) !void {
|
||||
|
||||
mem.sort(Entry, entries.items, self, Entry.lessThan);
|
||||
|
||||
const backlinks = try gpa.alloc(u8, entries.items.len);
|
||||
const backlinks = try gpa.alloc(u4, entries.items.len);
|
||||
defer gpa.free(backlinks);
|
||||
for (entries.items, 0..) |entry, i| {
|
||||
backlinks[entry.index] = @intCast(i);
|
||||
@ -2145,7 +2146,7 @@ fn initSegments(self: *MachO) !void {
|
||||
self.segments.appendAssumeCapacity(segments[sorted.index]);
|
||||
}
|
||||
|
||||
for (&[_]*?u8{
|
||||
for (&[_]*?u4{
|
||||
&self.pagezero_seg_index,
|
||||
&self.text_seg_index,
|
||||
&self.linkedit_seg_index,
|
||||
@ -2163,7 +2164,7 @@ fn initSegments(self: *MachO) !void {
|
||||
for (slice.items(.header), slice.items(.segment_id)) |header, *seg_id| {
|
||||
const segname = header.segName();
|
||||
const segment_id = self.getSegmentByName(segname) orelse blk: {
|
||||
const segment_id = @as(u8, @intCast(self.segments.items.len));
|
||||
const segment_id: u4 = @intCast(self.segments.items.len);
|
||||
const protection = getSegmentProt(segname);
|
||||
try self.segments.append(gpa, .{
|
||||
.cmdsize = @sizeOf(macho.segment_command_64),
|
||||
@ -2526,10 +2527,9 @@ fn writeThunkWorker(self: *MachO, thunk: Thunk) void {
|
||||
|
||||
const doWork = struct {
|
||||
fn doWork(th: Thunk, buffer: []u8, macho_file: *MachO) !void {
|
||||
const off = try macho_file.cast(usize, th.value);
|
||||
const size = th.size();
|
||||
var stream = std.io.fixedBufferStream(buffer[off..][0..size]);
|
||||
try th.write(macho_file, stream.writer());
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(buffer[try macho_file.cast(usize, th.value)..][0..th.size()]);
|
||||
try th.write(macho_file, &bw);
|
||||
}
|
||||
}.doWork;
|
||||
const out = self.sections.items(.out)[thunk.out_n_sect].items;
|
||||
@ -2556,15 +2556,16 @@ fn writeSyntheticSectionWorker(self: *MachO, sect_id: u8, out: []u8) void {
|
||||
|
||||
const doWork = struct {
|
||||
fn doWork(macho_file: *MachO, tag: Tag, buffer: []u8) !void {
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(buffer);
|
||||
switch (tag) {
|
||||
.eh_frame => eh_frame.write(macho_file, buffer),
|
||||
.unwind_info => try macho_file.unwind_info.write(macho_file, buffer),
|
||||
.got => try macho_file.got.write(macho_file, stream.writer()),
|
||||
.stubs => try macho_file.stubs.write(macho_file, stream.writer()),
|
||||
.la_symbol_ptr => try macho_file.la_symbol_ptr.write(macho_file, stream.writer()),
|
||||
.tlv_ptr => try macho_file.tlv_ptr.write(macho_file, stream.writer()),
|
||||
.objc_stubs => try macho_file.objc_stubs.write(macho_file, stream.writer()),
|
||||
.unwind_info => try macho_file.unwind_info.write(macho_file, &bw),
|
||||
.got => try macho_file.got.write(macho_file, &bw),
|
||||
.stubs => try macho_file.stubs.write(macho_file, &bw),
|
||||
.la_symbol_ptr => try macho_file.la_symbol_ptr.write(macho_file, &bw),
|
||||
.tlv_ptr => try macho_file.tlv_ptr.write(macho_file, &bw),
|
||||
.objc_stubs => try macho_file.objc_stubs.write(macho_file, &bw),
|
||||
}
|
||||
}
|
||||
}.doWork;
|
||||
@ -2605,8 +2606,9 @@ fn updateLazyBindSizeWorker(self: *MachO) void {
|
||||
try macho_file.lazy_bind_section.updateSize(macho_file);
|
||||
const sect_id = macho_file.stubs_helper_sect_index.?;
|
||||
const out = &macho_file.sections.items(.out)[sect_id];
|
||||
var stream = std.io.fixedBufferStream(out.items);
|
||||
try macho_file.stubs_helper.write(macho_file, stream.writer());
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(out.items);
|
||||
try macho_file.stubs_helper.write(macho_file, &bw);
|
||||
}
|
||||
}.doWork;
|
||||
doWork(self) catch |err|
|
||||
@ -2665,23 +2667,21 @@ fn writeDyldInfo(self: *MachO) !void {
|
||||
needed_size += cmd.lazy_bind_size;
|
||||
needed_size += cmd.export_size;
|
||||
|
||||
const buffer = try gpa.alloc(u8, needed_size);
|
||||
defer gpa.free(buffer);
|
||||
@memset(buffer, 0);
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(try gpa.alloc(u8, needed_size));
|
||||
defer gpa.free(bw.buffer);
|
||||
@memset(bw.buffer, 0);
|
||||
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
const writer = stream.writer();
|
||||
|
||||
try self.rebase_section.write(writer);
|
||||
try stream.seekTo(cmd.bind_off - base_off);
|
||||
try self.bind_section.write(writer);
|
||||
try stream.seekTo(cmd.weak_bind_off - base_off);
|
||||
try self.weak_bind_section.write(writer);
|
||||
try stream.seekTo(cmd.lazy_bind_off - base_off);
|
||||
try self.lazy_bind_section.write(writer);
|
||||
try stream.seekTo(cmd.export_off - base_off);
|
||||
try self.export_trie.write(writer);
|
||||
try self.pwriteAll(buffer, cmd.rebase_off);
|
||||
try self.rebase_section.write(&bw);
|
||||
bw.end = cmd.bind_off - base_off;
|
||||
try self.bind_section.write(&bw);
|
||||
bw.end = cmd.weak_bind_off - base_off;
|
||||
try self.weak_bind_section.write(&bw);
|
||||
bw.end = cmd.lazy_bind_off - base_off;
|
||||
try self.lazy_bind_section.write(&bw);
|
||||
bw.end = cmd.export_off - base_off;
|
||||
try self.export_trie.write(&bw);
|
||||
try self.pwriteAll(bw.buffer, cmd.rebase_off);
|
||||
}
|
||||
|
||||
pub fn writeDataInCode(self: *MachO) !void {
|
||||
@ -2689,22 +2689,30 @@ pub fn writeDataInCode(self: *MachO) !void {
|
||||
defer tracy.end();
|
||||
const gpa = self.base.comp.gpa;
|
||||
const cmd = self.data_in_code_cmd;
|
||||
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.data_in_code.size());
|
||||
defer buffer.deinit();
|
||||
try self.data_in_code.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, cmd.dataoff);
|
||||
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(try gpa.alloc(u8, self.data_in_code.size()));
|
||||
defer gpa.free(bw.buffer);
|
||||
|
||||
try self.data_in_code.write(self, &bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, cmd.dataoff);
|
||||
}
|
||||
|
||||
fn writeIndsymtab(self: *MachO) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const gpa = self.base.comp.gpa;
|
||||
const cmd = self.dysymtab_cmd;
|
||||
const needed_size = cmd.nindirectsyms * @sizeOf(u32);
|
||||
var buffer = try std.ArrayList(u8).initCapacity(gpa, needed_size);
|
||||
defer buffer.deinit();
|
||||
try self.indsymtab.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, cmd.indirectsymoff);
|
||||
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(try gpa.alloc(u8, @sizeOf(u32) * cmd.nindirectsyms));
|
||||
defer gpa.free(bw.buffer);
|
||||
|
||||
try self.indsymtab.write(self, &bw);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, cmd.indirectsymoff);
|
||||
}
|
||||
|
||||
pub fn writeSymtabToFile(self: *MachO) !void {
|
||||
@ -2814,15 +2822,13 @@ fn calcSymtabSize(self: *MachO) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
|
||||
fn writeLoadCommands(self: *MachO) anyerror!struct { usize, usize, u64 } {
|
||||
const comp = self.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
const needed_size = try load_commands.calcLoadCommandsSize(self, false);
|
||||
const buffer = try gpa.alloc(u8, needed_size);
|
||||
defer gpa.free(buffer);
|
||||
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
const writer = stream.writer();
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(try gpa.alloc(u8, try load_commands.calcLoadCommandsSize(self, false)));
|
||||
defer gpa.free(bw.buffer);
|
||||
|
||||
var ncmds: usize = 0;
|
||||
|
||||
@ -2831,26 +2837,26 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
|
||||
const slice = self.sections.slice();
|
||||
var sect_id: usize = 0;
|
||||
for (self.segments.items) |seg| {
|
||||
try writer.writeStruct(seg);
|
||||
try bw.writeStruct(seg);
|
||||
for (slice.items(.header)[sect_id..][0..seg.nsects]) |header| {
|
||||
try writer.writeStruct(header);
|
||||
try bw.writeStruct(header);
|
||||
}
|
||||
sect_id += seg.nsects;
|
||||
}
|
||||
ncmds += self.segments.items.len;
|
||||
}
|
||||
|
||||
try writer.writeStruct(self.dyld_info_cmd);
|
||||
try bw.writeStruct(self.dyld_info_cmd);
|
||||
ncmds += 1;
|
||||
try writer.writeStruct(self.function_starts_cmd);
|
||||
try bw.writeStruct(self.function_starts_cmd);
|
||||
ncmds += 1;
|
||||
try writer.writeStruct(self.data_in_code_cmd);
|
||||
try bw.writeStruct(self.data_in_code_cmd);
|
||||
ncmds += 1;
|
||||
try writer.writeStruct(self.symtab_cmd);
|
||||
try bw.writeStruct(self.symtab_cmd);
|
||||
ncmds += 1;
|
||||
try writer.writeStruct(self.dysymtab_cmd);
|
||||
try bw.writeStruct(self.dysymtab_cmd);
|
||||
ncmds += 1;
|
||||
try load_commands.writeDylinkerLC(writer);
|
||||
try load_commands.writeDylinkerLC(&bw);
|
||||
ncmds += 1;
|
||||
|
||||
if (self.getInternalObject()) |obj| {
|
||||
@ -2861,7 +2867,7 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
|
||||
0
|
||||
else
|
||||
@as(u32, @intCast(sym.getAddress(.{ .stubs = true }, self) - seg.vmaddr));
|
||||
try writer.writeStruct(macho.entry_point_command{
|
||||
try bw.writeStruct(macho.entry_point_command{
|
||||
.entryoff = entryoff,
|
||||
.stacksize = self.base.stack_size,
|
||||
});
|
||||
@ -2870,35 +2876,35 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
|
||||
}
|
||||
|
||||
if (self.base.isDynLib()) {
|
||||
try load_commands.writeDylibIdLC(self, writer);
|
||||
try load_commands.writeDylibIdLC(self, &bw);
|
||||
ncmds += 1;
|
||||
}
|
||||
|
||||
for (self.rpath_list) |rpath| {
|
||||
try load_commands.writeRpathLC(rpath, writer);
|
||||
try load_commands.writeRpathLC(&bw, rpath);
|
||||
ncmds += 1;
|
||||
}
|
||||
if (comp.config.any_sanitize_thread) {
|
||||
const path = try comp.tsan_lib.?.full_object_path.toString(gpa);
|
||||
defer gpa.free(path);
|
||||
const rpath = std.fs.path.dirname(path) orelse ".";
|
||||
try load_commands.writeRpathLC(rpath, writer);
|
||||
try load_commands.writeRpathLC(&bw, rpath);
|
||||
ncmds += 1;
|
||||
}
|
||||
|
||||
try writer.writeStruct(macho.source_version_command{ .version = 0 });
|
||||
try bw.writeStruct(macho.source_version_command{ .version = 0 });
|
||||
ncmds += 1;
|
||||
|
||||
if (self.platform.isBuildVersionCompatible()) {
|
||||
try load_commands.writeBuildVersionLC(self.platform, self.sdk_version, writer);
|
||||
try load_commands.writeBuildVersionLC(&bw, self.platform, self.sdk_version);
|
||||
ncmds += 1;
|
||||
} else {
|
||||
try load_commands.writeVersionMinLC(self.platform, self.sdk_version, writer);
|
||||
try load_commands.writeVersionMinLC(&bw, self.platform, self.sdk_version);
|
||||
ncmds += 1;
|
||||
}
|
||||
|
||||
const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + stream.pos;
|
||||
try writer.writeStruct(self.uuid_cmd);
|
||||
const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + bw.count;
|
||||
try bw.writeStruct(self.uuid_cmd);
|
||||
ncmds += 1;
|
||||
|
||||
for (self.dylibs.items) |index| {
|
||||
@ -2916,20 +2922,19 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
|
||||
.timestamp = dylib_id.timestamp,
|
||||
.current_version = dylib_id.current_version,
|
||||
.compatibility_version = dylib_id.compatibility_version,
|
||||
}, writer);
|
||||
}, &bw);
|
||||
ncmds += 1;
|
||||
}
|
||||
|
||||
if (self.requiresCodeSig()) {
|
||||
try writer.writeStruct(self.codesig_cmd);
|
||||
try bw.writeStruct(self.codesig_cmd);
|
||||
ncmds += 1;
|
||||
}
|
||||
|
||||
assert(stream.pos == needed_size);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, @sizeOf(macho.mach_header_64));
|
||||
|
||||
try self.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
|
||||
|
||||
return .{ ncmds, buffer.len, uuid_cmd_offset };
|
||||
return .{ ncmds, bw.end, uuid_cmd_offset };
|
||||
}
|
||||
|
||||
fn writeHeader(self: *MachO, ncmds: usize, sizeofcmds: usize) !void {
|
||||
@ -3012,27 +3017,28 @@ pub fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
|
||||
}
|
||||
|
||||
pub fn writeCodeSignature(self: *MachO, code_sig: *CodeSignature) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const seg = self.getTextSegment();
|
||||
const offset = self.codesig_cmd.dataoff;
|
||||
|
||||
var buffer = std.ArrayList(u8).init(self.base.comp.gpa);
|
||||
defer buffer.deinit();
|
||||
try buffer.ensureTotalCapacityPrecise(code_sig.size());
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(try gpa.alloc(u8, code_sig.size()));
|
||||
defer gpa.free(bw.buffer);
|
||||
try code_sig.writeAdhocSignature(self, .{
|
||||
.file = self.base.file.?,
|
||||
.exec_seg_base = seg.fileoff,
|
||||
.exec_seg_limit = seg.filesize,
|
||||
.file_size = offset,
|
||||
.dylib = self.base.isDynLib(),
|
||||
}, buffer.writer());
|
||||
assert(buffer.items.len == code_sig.size());
|
||||
}, &bw);
|
||||
|
||||
log.debug("writing code signature from 0x{x} to 0x{x}", .{
|
||||
offset,
|
||||
offset + buffer.items.len,
|
||||
offset + bw.end,
|
||||
});
|
||||
|
||||
try self.pwriteAll(buffer.items, offset);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.pwriteAll(bw.buffer, offset);
|
||||
}
|
||||
|
||||
pub fn updateFunc(
|
||||
@ -3341,7 +3347,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
|
||||
}
|
||||
|
||||
const appendSect = struct {
|
||||
fn appendSect(macho_file: *MachO, sect_id: u8, seg_id: u8) void {
|
||||
fn appendSect(macho_file: *MachO, sect_id: u8, seg_id: u4) void {
|
||||
const sect = &macho_file.sections.items(.header)[sect_id];
|
||||
const seg = macho_file.segments.items[seg_id];
|
||||
sect.addr = seg.vmaddr;
|
||||
@ -3600,7 +3606,7 @@ inline fn requiresThunks(self: MachO) bool {
|
||||
}
|
||||
|
||||
pub fn isZigSegment(self: MachO, seg_id: u8) bool {
|
||||
inline for (&[_]?u8{
|
||||
inline for (&[_]?u4{
|
||||
self.zig_text_seg_index,
|
||||
self.zig_const_seg_index,
|
||||
self.zig_data_seg_index,
|
||||
@ -3648,9 +3654,9 @@ pub fn addSegment(self: *MachO, name: []const u8, opts: struct {
|
||||
fileoff: u64 = 0,
|
||||
filesize: u64 = 0,
|
||||
prot: macho.vm_prot_t = macho.PROT.NONE,
|
||||
}) error{OutOfMemory}!u8 {
|
||||
}) error{OutOfMemory}!u4 {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const index = @as(u8, @intCast(self.segments.items.len));
|
||||
const index: u4 = @intCast(self.segments.items.len);
|
||||
try self.segments.append(gpa, .{
|
||||
.segname = makeStaticString(name),
|
||||
.vmaddr = opts.vmaddr,
|
||||
@ -3700,9 +3706,9 @@ pub fn makeStaticString(bytes: []const u8) [16]u8 {
|
||||
return buf;
|
||||
}
|
||||
|
||||
pub fn getSegmentByName(self: MachO, segname: []const u8) ?u8 {
|
||||
pub fn getSegmentByName(self: MachO, segname: []const u8) ?u4 {
|
||||
for (self.segments.items, 0..) |seg, i| {
|
||||
if (mem.eql(u8, segname, seg.segName())) return @as(u8, @intCast(i));
|
||||
if (mem.eql(u8, segname, seg.segName())) return @intCast(i);
|
||||
} else return null;
|
||||
}
|
||||
|
||||
@ -3791,7 +3797,7 @@ pub fn reportParseError2(
|
||||
const diags = &self.base.comp.link_diags;
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg(format, args);
|
||||
err.addNote("while parsing {}", .{self.getFile(file_index).?.fmtPath()});
|
||||
err.addNote("while parsing {f}", .{self.getFile(file_index).?.fmtPath()});
|
||||
}
|
||||
|
||||
fn reportMissingDependencyError(
|
||||
@ -3806,7 +3812,7 @@ fn reportMissingDependencyError(
|
||||
var err = try diags.addErrorWithNotes(2 + checked_paths.len);
|
||||
try err.addMsg(format, args);
|
||||
err.addNote("while resolving {s}", .{path});
|
||||
err.addNote("a dependency of {}", .{self.getFile(parent).?.fmtPath()});
|
||||
err.addNote("a dependency of {f}", .{self.getFile(parent).?.fmtPath()});
|
||||
for (checked_paths) |p| {
|
||||
err.addNote("tried {s}", .{p});
|
||||
}
|
||||
@ -3823,7 +3829,7 @@ fn reportDependencyError(
|
||||
var err = try diags.addErrorWithNotes(2);
|
||||
try err.addMsg(format, args);
|
||||
err.addNote("while parsing {s}", .{path});
|
||||
err.addNote("a dependency of {}", .{self.getFile(parent).?.fmtPath()});
|
||||
err.addNote("a dependency of {f}", .{self.getFile(parent).?.fmtPath()});
|
||||
}
|
||||
|
||||
fn reportDuplicates(self: *MachO) error{ HasDuplicates, OutOfMemory }!void {
|
||||
@ -3853,12 +3859,12 @@ fn reportDuplicates(self: *MachO) error{ HasDuplicates, OutOfMemory }!void {
|
||||
|
||||
var err = try diags.addErrorWithNotes(nnotes + 1);
|
||||
try err.addMsg("duplicate symbol definition: {s}", .{sym.getName(self)});
|
||||
err.addNote("defined by {}", .{sym.getFile(self).?.fmtPath()});
|
||||
err.addNote("defined by {f}", .{sym.getFile(self).?.fmtPath()});
|
||||
|
||||
var inote: usize = 0;
|
||||
while (inote < @min(notes.items.len, max_notes)) : (inote += 1) {
|
||||
const file = self.getFile(notes.items[inote]).?;
|
||||
err.addNote("defined by {}", .{file.fmtPath()});
|
||||
err.addNote("defined by {f}", .{file.fmtPath()});
|
||||
}
|
||||
|
||||
if (notes.items.len > max_notes) {
|
||||
@ -3904,31 +3910,25 @@ pub fn dumpState(self: *MachO) std.fmt.Formatter(fmtDumpState) {
|
||||
return .{ .data = self };
|
||||
}
|
||||
|
||||
fn fmtDumpState(
|
||||
self: *MachO,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn fmtDumpState(self: *MachO, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
if (self.getZigObject()) |zo| {
|
||||
try writer.print("zig_object({d}) : {s}\n", .{ zo.index, zo.basename });
|
||||
try writer.print("{}{}\n", .{
|
||||
try bw.print("zig_object({d}) : {s}\n", .{ zo.index, zo.basename });
|
||||
try bw.print("{f}{f}\n", .{
|
||||
zo.fmtAtoms(self),
|
||||
zo.fmtSymtab(self),
|
||||
});
|
||||
}
|
||||
for (self.objects.items) |index| {
|
||||
const object = self.getFile(index).?.object;
|
||||
try writer.print("object({d}) : {} : has_debug({})", .{
|
||||
try bw.print("object({d}) : {f} : has_debug({})", .{
|
||||
index,
|
||||
object.fmtPath(),
|
||||
object.hasDebugInfo(),
|
||||
});
|
||||
if (!object.alive) try writer.writeAll(" : ([*])");
|
||||
try writer.writeByte('\n');
|
||||
try writer.print("{}{}{}{}{}\n", .{
|
||||
if (!object.alive) try bw.writeAll(" : ([*])");
|
||||
try bw.writeByte('\n');
|
||||
try bw.print("{f}{f}{f}{f}{f}\n", .{
|
||||
object.fmtAtoms(self),
|
||||
object.fmtCies(self),
|
||||
object.fmtFdes(self),
|
||||
@ -3938,48 +3938,42 @@ fn fmtDumpState(
|
||||
}
|
||||
for (self.dylibs.items) |index| {
|
||||
const dylib = self.getFile(index).?.dylib;
|
||||
try writer.print("dylib({d}) : {} : needed({}) : weak({})", .{
|
||||
try bw.print("dylib({d}) : {f} : needed({}) : weak({})", .{
|
||||
index,
|
||||
@as(Path, dylib.path),
|
||||
dylib.needed,
|
||||
dylib.weak,
|
||||
});
|
||||
if (!dylib.isAlive(self)) try writer.writeAll(" : ([*])");
|
||||
try writer.writeByte('\n');
|
||||
try writer.print("{}\n", .{dylib.fmtSymtab(self)});
|
||||
if (!dylib.isAlive(self)) try bw.writeAll(" : ([*])");
|
||||
try bw.writeByte('\n');
|
||||
try bw.print("{f}\n", .{dylib.fmtSymtab(self)});
|
||||
}
|
||||
if (self.getInternalObject()) |internal| {
|
||||
try writer.print("internal({d}) : internal\n", .{internal.index});
|
||||
try writer.print("{}{}\n", .{ internal.fmtAtoms(self), internal.fmtSymtab(self) });
|
||||
try bw.print("internal({d}) : internal\n", .{internal.index});
|
||||
try bw.print("{f}{f}\n", .{ internal.fmtAtoms(self), internal.fmtSymtab(self) });
|
||||
}
|
||||
try writer.writeAll("thunks\n");
|
||||
try bw.writeAll("thunks\n");
|
||||
for (self.thunks.items, 0..) |thunk, index| {
|
||||
try writer.print("thunk({d}) : {}\n", .{ index, thunk.fmt(self) });
|
||||
try bw.print("thunk({d}) : {f}\n", .{ index, thunk.fmt(self) });
|
||||
}
|
||||
try writer.print("stubs\n{}\n", .{self.stubs.fmt(self)});
|
||||
try writer.print("objc_stubs\n{}\n", .{self.objc_stubs.fmt(self)});
|
||||
try writer.print("got\n{}\n", .{self.got.fmt(self)});
|
||||
try writer.print("tlv_ptr\n{}\n", .{self.tlv_ptr.fmt(self)});
|
||||
try writer.writeByte('\n');
|
||||
try writer.print("sections\n{}\n", .{self.fmtSections()});
|
||||
try writer.print("segments\n{}\n", .{self.fmtSegments()});
|
||||
try bw.print("stubs\n{f}\n", .{self.stubs.fmt(self)});
|
||||
try bw.print("objc_stubs\n{f}\n", .{self.objc_stubs.fmt(self)});
|
||||
try bw.print("got\n{f}\n", .{self.got.fmt(self)});
|
||||
try bw.print("tlv_ptr\n{f}\n", .{self.tlv_ptr.fmt(self)});
|
||||
try bw.writeByte('\n');
|
||||
try bw.print("sections\n{f}\n", .{self.fmtSections()});
|
||||
try bw.print("segments\n{f}\n", .{self.fmtSegments()});
|
||||
}
|
||||
|
||||
fn fmtSections(self: *MachO) std.fmt.Formatter(formatSections) {
|
||||
return .{ .data = self };
|
||||
}
|
||||
|
||||
fn formatSections(
|
||||
self: *MachO,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn formatSections(self: *MachO, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const slice = self.sections.slice();
|
||||
for (slice.items(.header), slice.items(.segment_id), 0..) |header, seg_id, i| {
|
||||
try writer.print(
|
||||
try bw.print(
|
||||
"sect({d}) : seg({d}) : {s},{s} : @{x} ({x}) : align({x}) : size({x}) : relocs({x};{d})\n",
|
||||
.{
|
||||
i, seg_id, header.segName(), header.sectName(), header.addr, header.offset,
|
||||
@ -3993,16 +3987,10 @@ fn fmtSegments(self: *MachO) std.fmt.Formatter(formatSegments) {
|
||||
return .{ .data = self };
|
||||
}
|
||||
|
||||
fn formatSegments(
|
||||
self: *MachO,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn formatSegments(self: *MachO, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
for (self.segments.items, 0..) |seg, i| {
|
||||
try writer.print("seg({d}) : {s} : @{x}-{x} ({x}-{x})\n", .{
|
||||
try bw.print("seg({d}) : {s} : @{x}-{x} ({x}-{x})\n", .{
|
||||
i, seg.segName(), seg.vmaddr, seg.vmaddr + seg.vmsize,
|
||||
seg.fileoff, seg.fileoff + seg.filesize,
|
||||
});
|
||||
@ -4013,13 +4001,7 @@ pub fn fmtSectType(tt: u8) std.fmt.Formatter(formatSectType) {
|
||||
return .{ .data = tt };
|
||||
}
|
||||
|
||||
fn formatSectType(
|
||||
tt: u8,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn formatSectType(tt: u8, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const name = switch (tt) {
|
||||
macho.S_REGULAR => "REGULAR",
|
||||
@ -4044,9 +4026,9 @@ fn formatSectType(
|
||||
macho.S_THREAD_LOCAL_VARIABLE_POINTERS => "THREAD_LOCAL_VARIABLE_POINTERS",
|
||||
macho.S_THREAD_LOCAL_INIT_FUNCTION_POINTERS => "THREAD_LOCAL_INIT_FUNCTION_POINTERS",
|
||||
macho.S_INIT_FUNC_OFFSETS => "INIT_FUNC_OFFSETS",
|
||||
else => |x| return writer.print("UNKNOWN({x})", .{x}),
|
||||
else => |x| return bw.print("UNKNOWN({x})", .{x}),
|
||||
};
|
||||
try writer.print("{s}", .{name});
|
||||
try bw.print("{s}", .{name});
|
||||
}
|
||||
|
||||
const is_hot_update_compatible = switch (builtin.target.os.tag) {
|
||||
@ -4058,7 +4040,7 @@ const default_entry_symbol_name = "_main";
|
||||
|
||||
const Section = struct {
|
||||
header: macho.section_64,
|
||||
segment_id: u8,
|
||||
segment_id: u4,
|
||||
atoms: std.ArrayListUnmanaged(Ref) = .empty,
|
||||
free_list: std.ArrayListUnmanaged(Atom.Index) = .empty,
|
||||
last_atom_index: Atom.Index = 0,
|
||||
@ -4288,17 +4270,11 @@ pub const Platform = struct {
|
||||
cpu_arch: std.Target.Cpu.Arch,
|
||||
};
|
||||
|
||||
pub fn formatTarget(
|
||||
ctx: FmtCtx,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn formatTarget(ctx: FmtCtx, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
try writer.print("{s}-{s}", .{ @tagName(ctx.cpu_arch), @tagName(ctx.platform.os_tag) });
|
||||
try bw.print("{s}-{s}", .{ @tagName(ctx.cpu_arch), @tagName(ctx.platform.os_tag) });
|
||||
if (ctx.platform.abi != .none) {
|
||||
try writer.print("-{s}", .{@tagName(ctx.platform.abi)});
|
||||
try bw.print("-{s}", .{@tagName(ctx.platform.abi)});
|
||||
}
|
||||
}
|
||||
|
||||
@ -4390,7 +4366,7 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi
|
||||
// The file/property is also available with vendored libc.
|
||||
fn readSdkVersionFromSettings(arena: Allocator, dir: []const u8) ![]const u8 {
|
||||
const sdk_path = try fs.path.join(arena, &.{ dir, "SDKSettings.json" });
|
||||
const contents = try fs.cwd().readFileAlloc(arena, sdk_path, std.math.maxInt(u16));
|
||||
const contents = try fs.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16)));
|
||||
const parsed = try std.json.parseFromSlice(std.json.Value, arena, contents, .{});
|
||||
if (parsed.value.object.get("MinimalDisplayName")) |ver| return ver.string;
|
||||
return error.SdkVersionFailure;
|
||||
@ -4406,7 +4382,7 @@ fn parseSdkVersion(raw: []const u8) ?std.SemanticVersion {
|
||||
};
|
||||
|
||||
const parseNext = struct {
|
||||
fn parseNext(it: anytype) ?u16 {
|
||||
fn parseNext(it: *std.mem.SplitIterator(u8, .any)) ?u16 {
|
||||
const nn = it.next() orelse return null;
|
||||
return std.fmt.parseInt(u16, nn, 10) catch null;
|
||||
}
|
||||
@ -4507,15 +4483,9 @@ pub const Ref = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
ref: Ref,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(ref: Ref, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
try writer.print("%{d} in file({d})", .{ ref.index, ref.file });
|
||||
try bw.print("%{d} in file({d})", .{ ref.index, ref.file });
|
||||
}
|
||||
};
|
||||
|
||||
@ -5315,7 +5285,7 @@ fn createThunks(macho_file: *MachO, sect_id: u8) !void {
|
||||
try scanThunkRelocs(thunk_index, gpa, atoms[start..i], macho_file);
|
||||
thunk.value = advanceSection(header, thunk.size(), .@"4");
|
||||
|
||||
log.debug("thunk({d}) : {}", .{ thunk_index, thunk.fmt(macho_file) });
|
||||
log.debug("thunk({d}) : {f}", .{ thunk_index, thunk.fmt(macho_file) });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -29,7 +29,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File
|
||||
pos += @sizeOf(ar_hdr);
|
||||
|
||||
if (!mem.eql(u8, &hdr.ar_fmag, ARFMAG)) {
|
||||
return diags.failParse(path, "invalid header delimiter: expected '{s}', found '{s}'", .{
|
||||
return diags.failParse(path, "invalid header delimiter: expected '{f}', found '{f}'", .{
|
||||
std.fmt.fmtSliceEscapeLower(ARFMAG), std.fmt.fmtSliceEscapeLower(&hdr.ar_fmag),
|
||||
});
|
||||
}
|
||||
@ -71,53 +71,29 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File
|
||||
.mtime = hdr.date() catch 0,
|
||||
};
|
||||
|
||||
log.debug("extracting object '{}' from archive '{}'", .{ object.path, path });
|
||||
log.debug("extracting object '{f}' from archive '{f}'", .{ object.path, path });
|
||||
|
||||
try self.objects.append(gpa, object);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeHeader(
|
||||
bw: *std.io.BufferedWriter,
|
||||
object_name: []const u8,
|
||||
object_size: usize,
|
||||
format: Format,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
var hdr: ar_hdr = .{
|
||||
.ar_name = undefined,
|
||||
.ar_date = undefined,
|
||||
.ar_uid = undefined,
|
||||
.ar_gid = undefined,
|
||||
.ar_mode = undefined,
|
||||
.ar_size = undefined,
|
||||
.ar_fmag = undefined,
|
||||
};
|
||||
@memset(mem.asBytes(&hdr), 0x20);
|
||||
inline for (@typeInfo(ar_hdr).@"struct".fields) |field| {
|
||||
var stream = std.io.fixedBufferStream(&@field(hdr, field.name));
|
||||
stream.writer().print("0", .{}) catch unreachable;
|
||||
}
|
||||
) anyerror!void {
|
||||
var hdr: ar_hdr = undefined;
|
||||
@memset(mem.asBytes(&hdr), ' ');
|
||||
inline for (@typeInfo(ar_hdr).@"struct".fields) |field| @field(hdr, field.name)[0] = '0';
|
||||
@memcpy(&hdr.ar_fmag, ARFMAG);
|
||||
|
||||
const object_name_len = mem.alignForward(usize, object_name.len + 1, ptrWidth(format));
|
||||
_ = std.fmt.bufPrint(&hdr.ar_name, "#1/{d}", .{object_name_len}) catch unreachable;
|
||||
const total_object_size = object_size + object_name_len;
|
||||
|
||||
{
|
||||
var stream = std.io.fixedBufferStream(&hdr.ar_name);
|
||||
stream.writer().print("#1/{d}", .{object_name_len}) catch unreachable;
|
||||
}
|
||||
{
|
||||
var stream = std.io.fixedBufferStream(&hdr.ar_size);
|
||||
stream.writer().print("{d}", .{total_object_size}) catch unreachable;
|
||||
}
|
||||
|
||||
try writer.writeAll(mem.asBytes(&hdr));
|
||||
try writer.print("{s}\x00", .{object_name});
|
||||
|
||||
const padding = object_name_len - object_name.len - 1;
|
||||
if (padding > 0) {
|
||||
try writer.writeByteNTimes(0, padding);
|
||||
}
|
||||
_ = std.fmt.bufPrint(&hdr.ar_size, "{d}", .{total_object_size}) catch unreachable;
|
||||
try bw.writeStruct(hdr);
|
||||
try bw.writeAll(object_name);
|
||||
try bw.splatByteAll(0, object_name_len - object_name.len);
|
||||
}
|
||||
|
||||
// Archive files start with the ARMAG identifying string. Then follows a
|
||||
@ -201,12 +177,12 @@ pub const ArSymtab = struct {
|
||||
return ptr_width + ar.entries.items.len * 2 * ptr_width + ptr_width + mem.alignForward(usize, ar.strtab.buffer.items.len, ptr_width);
|
||||
}
|
||||
|
||||
pub fn write(ar: ArSymtab, format: Format, macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn write(ar: ArSymtab, bw: *std.io.BufferedWriter, format: Format, macho_file: *MachO) anyerror!void {
|
||||
const ptr_width = ptrWidth(format);
|
||||
// Header
|
||||
try writeHeader(SYMDEF, ar.size(format), format, writer);
|
||||
try writeHeader(bw, SYMDEF, ar.size(format), format);
|
||||
// Symtab size
|
||||
try writeInt(format, ar.entries.items.len * 2 * ptr_width, writer);
|
||||
try writeInt(bw, format, ar.entries.items.len * 2 * ptr_width);
|
||||
// Symtab entries
|
||||
for (ar.entries.items) |entry| {
|
||||
const file_off = switch (macho_file.getFile(entry.file).?) {
|
||||
@ -215,19 +191,16 @@ pub const ArSymtab = struct {
|
||||
else => unreachable,
|
||||
};
|
||||
// Name offset
|
||||
try writeInt(format, entry.off, writer);
|
||||
try writeInt(bw, format, entry.off);
|
||||
// File offset
|
||||
try writeInt(format, file_off, writer);
|
||||
try writeInt(bw, format, file_off);
|
||||
}
|
||||
// Strtab size
|
||||
const strtab_size = mem.alignForward(usize, ar.strtab.buffer.items.len, ptr_width);
|
||||
const padding = strtab_size - ar.strtab.buffer.items.len;
|
||||
try writeInt(format, strtab_size, writer);
|
||||
try writeInt(bw, format, strtab_size);
|
||||
// Strtab
|
||||
try writer.writeAll(ar.strtab.buffer.items);
|
||||
if (padding > 0) {
|
||||
try writer.writeByteNTimes(0, padding);
|
||||
}
|
||||
try bw.writeAll(ar.strtab.buffer.items);
|
||||
try bw.splatByteAll(0, strtab_size - ar.strtab.buffer.items.len);
|
||||
}
|
||||
|
||||
const FormatContext = struct {
|
||||
@ -239,20 +212,14 @@ pub const ArSymtab = struct {
|
||||
return .{ .data = .{ .ar = ar, .macho_file = macho_file } };
|
||||
}
|
||||
|
||||
fn format2(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const ar = ctx.ar;
|
||||
const macho_file = ctx.macho_file;
|
||||
for (ar.entries.items, 0..) |entry, i| {
|
||||
const name = ar.strtab.getAssumeExists(entry.off);
|
||||
const file = macho_file.getFile(entry.file).?;
|
||||
try writer.print(" {d}: {s} in file({d})({})\n", .{ i, name, entry.file, file.fmtPath() });
|
||||
try bw.print(" {d}: {s} in file({d})({f})\n", .{ i, name, entry.file, file.fmtPath() });
|
||||
}
|
||||
}
|
||||
|
||||
@ -282,10 +249,10 @@ pub fn ptrWidth(format: Format) usize {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn writeInt(format: Format, value: u64, writer: anytype) !void {
|
||||
pub fn writeInt(bw: *std.io.BufferedWriter, format: Format, value: u64) anyerror!void {
|
||||
switch (format) {
|
||||
.p32 => try writer.writeInt(u32, std.math.cast(u32, value) orelse return error.Overflow, .little),
|
||||
.p64 => try writer.writeInt(u64, value, .little),
|
||||
.p32 => try bw.writeInt(u32, std.math.cast(u32, value) orelse return error.Overflow, .little),
|
||||
.p64 => try bw.writeInt(u64, value, .little),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -580,8 +580,10 @@ pub fn resolveRelocs(self: Atom, macho_file: *MachO, buffer: []u8) !void {
|
||||
|
||||
relocs_log.debug("{x}: {s}", .{ self.value, name });
|
||||
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(buffer);
|
||||
|
||||
var has_error = false;
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
var i: usize = 0;
|
||||
while (i < relocs.len) : (i += 1) {
|
||||
const rel = relocs[i];
|
||||
@ -592,30 +594,28 @@ pub fn resolveRelocs(self: Atom, macho_file: *MachO, buffer: []u8) !void {
|
||||
if (rel.getTargetSymbol(self, macho_file).getFile(macho_file) == null) continue;
|
||||
}
|
||||
|
||||
try stream.seekTo(rel_offset);
|
||||
self.resolveRelocInner(rel, subtractor, buffer, macho_file, stream.writer()) catch |err| {
|
||||
switch (err) {
|
||||
error.RelaxFail => {
|
||||
const target = switch (rel.tag) {
|
||||
.@"extern" => rel.getTargetSymbol(self, macho_file).getName(macho_file),
|
||||
.local => rel.getTargetAtom(self, macho_file).getName(macho_file),
|
||||
};
|
||||
try macho_file.reportParseError2(
|
||||
file.getIndex(),
|
||||
"{s}: 0x{x}: 0x{x}: failed to relax relocation: type {}, target {s}",
|
||||
.{
|
||||
name,
|
||||
self.getAddress(macho_file),
|
||||
rel.offset,
|
||||
rel.fmtPretty(macho_file.getTarget().cpu.arch),
|
||||
target,
|
||||
},
|
||||
);
|
||||
has_error = true;
|
||||
},
|
||||
error.RelaxFailUnexpectedInstruction => has_error = true,
|
||||
else => |e| return e,
|
||||
}
|
||||
bw.end = std.math.cast(usize, rel_offset) orelse return error.Overflow;
|
||||
self.resolveRelocInner(rel, subtractor, buffer, macho_file, &bw) catch |err| switch (@as(ResolveError, @errorCast(err))) {
|
||||
error.RelaxFail => {
|
||||
const target = switch (rel.tag) {
|
||||
.@"extern" => rel.getTargetSymbol(self, macho_file).getName(macho_file),
|
||||
.local => rel.getTargetAtom(self, macho_file).getName(macho_file),
|
||||
};
|
||||
try macho_file.reportParseError2(
|
||||
file.getIndex(),
|
||||
"{s}: 0x{x}: 0x{x}: failed to relax relocation: type {f}, target {s}",
|
||||
.{
|
||||
name,
|
||||
self.getAddress(macho_file),
|
||||
rel.offset,
|
||||
rel.fmtPretty(macho_file.getTarget().cpu.arch),
|
||||
target,
|
||||
},
|
||||
);
|
||||
has_error = true;
|
||||
},
|
||||
error.RelaxFailUnexpectedInstruction => has_error = true,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
@ -638,8 +638,8 @@ fn resolveRelocInner(
|
||||
subtractor: ?Relocation,
|
||||
code: []u8,
|
||||
macho_file: *MachO,
|
||||
writer: anytype,
|
||||
) ResolveError!void {
|
||||
bw: *std.io.BufferedWriter,
|
||||
) anyerror!void {
|
||||
const t = &macho_file.base.comp.root_mod.resolved_target.result;
|
||||
const cpu_arch = t.cpu.arch;
|
||||
const rel_offset = math.cast(usize, rel.offset - self.off) orelse return error.Overflow;
|
||||
@ -653,7 +653,7 @@ fn resolveRelocInner(
|
||||
const divExact = struct {
|
||||
fn divExact(atom: Atom, r: Relocation, num: u12, den: u12, ctx: *MachO) !u12 {
|
||||
return math.divExact(u12, num, den) catch {
|
||||
try ctx.reportParseError2(atom.getFile(ctx).getIndex(), "{s}: unexpected remainder when resolving {s} at offset 0x{x}", .{
|
||||
try ctx.reportParseError2(atom.getFile(ctx).getIndex(), "{s}: unexpected remainder when resolving {f} at offset 0x{x}", .{
|
||||
atom.getName(ctx),
|
||||
r.fmtPretty(ctx.getTarget().cpu.arch),
|
||||
r.offset,
|
||||
@ -664,14 +664,14 @@ fn resolveRelocInner(
|
||||
}.divExact;
|
||||
|
||||
switch (rel.tag) {
|
||||
.local => relocs_log.debug(" {x}<+{d}>: {}: [=> {x}] atom({d})", .{
|
||||
.local => relocs_log.debug(" {x}<+{d}>: {f}: [=> {x}] atom({d})", .{
|
||||
P,
|
||||
rel_offset,
|
||||
rel.fmtPretty(cpu_arch),
|
||||
S + A - SUB,
|
||||
rel.getTargetAtom(self, macho_file).atom_index,
|
||||
}),
|
||||
.@"extern" => relocs_log.debug(" {x}<+{d}>: {}: [=> {x}] G({x}) ({s})", .{
|
||||
.@"extern" => relocs_log.debug(" {x}<+{d}>: {f}: [=> {x}] G({x}) ({s})", .{
|
||||
P,
|
||||
rel_offset,
|
||||
rel.fmtPretty(cpu_arch),
|
||||
@ -690,14 +690,14 @@ fn resolveRelocInner(
|
||||
if (rel.tag == .@"extern") {
|
||||
const sym = rel.getTargetSymbol(self, macho_file);
|
||||
if (sym.isTlvInit(macho_file)) {
|
||||
try writer.writeInt(u64, @intCast(S - TLS), .little);
|
||||
try bw.writeInt(u64, @intCast(S - TLS), .little);
|
||||
return;
|
||||
}
|
||||
if (sym.flags.import) return;
|
||||
}
|
||||
try writer.writeInt(u64, @bitCast(S + A - SUB), .little);
|
||||
try bw.writeInt(u64, @bitCast(S + A - SUB), .little);
|
||||
} else if (rel.meta.length == 2) {
|
||||
try writer.writeInt(u32, @bitCast(@as(i32, @truncate(S + A - SUB))), .little);
|
||||
try bw.writeInt(u32, @bitCast(@as(i32, @truncate(S + A - SUB))), .little);
|
||||
} else unreachable;
|
||||
},
|
||||
|
||||
@ -705,7 +705,7 @@ fn resolveRelocInner(
|
||||
assert(rel.tag == .@"extern");
|
||||
assert(rel.meta.length == 2);
|
||||
assert(rel.meta.pcrel);
|
||||
try writer.writeInt(i32, @intCast(G + A - P), .little);
|
||||
try bw.writeInt(i32, @intCast(G + A - P), .little);
|
||||
},
|
||||
|
||||
.branch => {
|
||||
@ -714,7 +714,7 @@ fn resolveRelocInner(
|
||||
assert(rel.tag == .@"extern");
|
||||
|
||||
switch (cpu_arch) {
|
||||
.x86_64 => try writer.writeInt(i32, @intCast(S + A - P), .little),
|
||||
.x86_64 => try bw.writeInt(i32, @intCast(S + A - P), .little),
|
||||
.aarch64 => {
|
||||
const disp: i28 = math.cast(i28, S + A - P) orelse blk: {
|
||||
const thunk = self.getThunk(macho_file);
|
||||
@ -732,10 +732,10 @@ fn resolveRelocInner(
|
||||
assert(rel.meta.length == 2);
|
||||
assert(rel.meta.pcrel);
|
||||
if (rel.getTargetSymbol(self, macho_file).getSectionFlags().has_got) {
|
||||
try writer.writeInt(i32, @intCast(G + A - P), .little);
|
||||
try bw.writeInt(i32, @intCast(G + A - P), .little);
|
||||
} else {
|
||||
try x86_64.relaxGotLoad(self, code[rel_offset - 3 ..], rel, macho_file);
|
||||
try writer.writeInt(i32, @intCast(S + A - P), .little);
|
||||
try bw.writeInt(i32, @intCast(S + A - P), .little);
|
||||
}
|
||||
},
|
||||
|
||||
@ -746,17 +746,17 @@ fn resolveRelocInner(
|
||||
const sym = rel.getTargetSymbol(self, macho_file);
|
||||
if (sym.getSectionFlags().tlv_ptr) {
|
||||
const S_: i64 = @intCast(sym.getTlvPtrAddress(macho_file));
|
||||
try writer.writeInt(i32, @intCast(S_ + A - P), .little);
|
||||
try bw.writeInt(i32, @intCast(S_ + A - P), .little);
|
||||
} else {
|
||||
try x86_64.relaxTlv(code[rel_offset - 3 ..], t);
|
||||
try writer.writeInt(i32, @intCast(S + A - P), .little);
|
||||
try bw.writeInt(i32, @intCast(S + A - P), .little);
|
||||
}
|
||||
},
|
||||
|
||||
.signed, .signed1, .signed2, .signed4 => {
|
||||
assert(rel.meta.length == 2);
|
||||
assert(rel.meta.pcrel);
|
||||
try writer.writeInt(i32, @intCast(S + A - P), .little);
|
||||
try bw.writeInt(i32, @intCast(S + A - P), .little);
|
||||
},
|
||||
|
||||
.page,
|
||||
@ -808,7 +808,7 @@ fn resolveRelocInner(
|
||||
2 => try divExact(self, rel, @truncate(target), 4, macho_file),
|
||||
3 => try divExact(self, rel, @truncate(target), 8, macho_file),
|
||||
};
|
||||
try writer.writeInt(u32, inst.toU32(), .little);
|
||||
try bw.writeInt(u32, inst.toU32(), .little);
|
||||
}
|
||||
},
|
||||
|
||||
@ -886,7 +886,7 @@ fn resolveRelocInner(
|
||||
.sf = @as(u1, @truncate(reg_info.size)),
|
||||
},
|
||||
};
|
||||
try writer.writeInt(u32, inst.toU32(), .little);
|
||||
try bw.writeInt(u32, inst.toU32(), .little);
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -900,19 +900,19 @@ const x86_64 = struct {
|
||||
switch (old_inst.encoding.mnemonic) {
|
||||
.mov => {
|
||||
const inst = Instruction.new(old_inst.prefix, .lea, &old_inst.ops, t) catch return error.RelaxFail;
|
||||
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
|
||||
relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
|
||||
encode(&.{inst}, code) catch return error.RelaxFail;
|
||||
},
|
||||
else => |x| {
|
||||
var err = try diags.addErrorWithNotes(2);
|
||||
try err.addMsg("{s}: 0x{x}: 0x{x}: failed to relax relocation of type {}", .{
|
||||
try err.addMsg("{s}: 0x{x}: 0x{x}: failed to relax relocation of type {f}", .{
|
||||
self.getName(macho_file),
|
||||
self.getAddress(macho_file),
|
||||
rel.offset,
|
||||
rel.fmtPretty(.x86_64),
|
||||
});
|
||||
err.addNote("expected .mov instruction but found .{s}", .{@tagName(x)});
|
||||
err.addNote("while parsing {}", .{self.getFile(macho_file).fmtPath()});
|
||||
err.addNote("while parsing {f}", .{self.getFile(macho_file).fmtPath()});
|
||||
return error.RelaxFailUnexpectedInstruction;
|
||||
},
|
||||
}
|
||||
@ -924,7 +924,7 @@ const x86_64 = struct {
|
||||
switch (old_inst.encoding.mnemonic) {
|
||||
.mov => {
|
||||
const inst = Instruction.new(old_inst.prefix, .lea, &old_inst.ops, t) catch return error.RelaxFail;
|
||||
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
|
||||
relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
|
||||
encode(&.{inst}, code) catch return error.RelaxFail;
|
||||
},
|
||||
else => return error.RelaxFail,
|
||||
@ -938,11 +938,9 @@ const x86_64 = struct {
|
||||
}
|
||||
|
||||
fn encode(insts: []const Instruction, code: []u8) !void {
|
||||
var stream = std.io.fixedBufferStream(code);
|
||||
const writer = stream.writer();
|
||||
for (insts) |inst| {
|
||||
try inst.encode(writer, .{});
|
||||
}
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(code);
|
||||
for (insts) |inst| try inst.encode(&bw, .{});
|
||||
}
|
||||
|
||||
const bits = @import("../../arch/x86_64/bits.zig");
|
||||
@ -1003,7 +1001,7 @@ pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.r
|
||||
}
|
||||
|
||||
switch (rel.tag) {
|
||||
.local => relocs_log.debug(" {}: [{x} => {d}({s},{s})] + {x}", .{
|
||||
.local => relocs_log.debug(" {f}: [{x} => {d}({s},{s})] + {x}", .{
|
||||
rel.fmtPretty(cpu_arch),
|
||||
r_address,
|
||||
r_symbolnum,
|
||||
@ -1011,7 +1009,7 @@ pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.r
|
||||
macho_file.sections.items(.header)[r_symbolnum - 1].sectName(),
|
||||
addend,
|
||||
}),
|
||||
.@"extern" => relocs_log.debug(" {}: [{x} => {d}({s})] + {x}", .{
|
||||
.@"extern" => relocs_log.debug(" {f}: [{x} => {d}({s})] + {x}", .{
|
||||
rel.fmtPretty(cpu_arch),
|
||||
r_address,
|
||||
r_symbolnum,
|
||||
@ -1142,33 +1140,27 @@ const FormatContext = struct {
|
||||
macho_file: *MachO,
|
||||
};
|
||||
|
||||
fn format2(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const atom = ctx.atom;
|
||||
const macho_file = ctx.macho_file;
|
||||
const file = atom.getFile(macho_file);
|
||||
try writer.print("atom({d}) : {s} : @{x} : sect({d}) : align({x}) : size({x}) : nreloc({d}) : thunk({d})", .{
|
||||
try bw.print("atom({d}) : {s} : @{x} : sect({d}) : align({x}) : size({x}) : nreloc({d}) : thunk({d})", .{
|
||||
atom.atom_index, atom.getName(macho_file), atom.getAddress(macho_file),
|
||||
atom.out_n_sect, atom.alignment, atom.size,
|
||||
atom.getRelocs(macho_file).len, atom.getExtra(macho_file).thunk,
|
||||
});
|
||||
if (!atom.isAlive()) try writer.writeAll(" : [*]");
|
||||
if (!atom.isAlive()) try bw.writeAll(" : [*]");
|
||||
if (atom.getUnwindRecords(macho_file).len > 0) {
|
||||
try writer.writeAll(" : unwind{ ");
|
||||
try bw.writeAll(" : unwind{ ");
|
||||
const extra = atom.getExtra(macho_file);
|
||||
for (atom.getUnwindRecords(macho_file), extra.unwind_index..) |index, i| {
|
||||
const rec = file.object.getUnwindRecord(index);
|
||||
try writer.print("{d}", .{index});
|
||||
if (!rec.alive) try writer.writeAll("([*])");
|
||||
if (i < extra.unwind_index + extra.unwind_count - 1) try writer.writeAll(", ");
|
||||
try bw.print("{d}", .{index});
|
||||
if (!rec.alive) try bw.writeAll("([*])");
|
||||
if (i < extra.unwind_index + extra.unwind_count - 1) try bw.writeAll(", ");
|
||||
}
|
||||
try writer.writeAll(" }");
|
||||
try bw.writeAll(" }");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -247,7 +247,7 @@ pub fn deinit(self: *CodeSignature, allocator: Allocator) void {
|
||||
pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, path: []const u8) !void {
|
||||
const file = try fs.cwd().openFile(path, .{});
|
||||
defer file.close();
|
||||
const inner = try file.readToEndAlloc(allocator, std.math.maxInt(u32));
|
||||
const inner = try file.readToEndAlloc(allocator, .unlimited);
|
||||
self.entitlements = .{ .inner = inner };
|
||||
}
|
||||
|
||||
@ -304,10 +304,12 @@ pub fn writeAdhocSignature(
|
||||
var hash: [hash_size]u8 = undefined;
|
||||
|
||||
if (self.requirements) |*req| {
|
||||
var buf = std.ArrayList(u8).init(allocator);
|
||||
defer buf.deinit();
|
||||
try req.write(buf.writer());
|
||||
Sha256.hash(buf.items, &hash, .{});
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
aw.init(allocator);
|
||||
defer aw.deinit();
|
||||
|
||||
try req.write(&aw.buffered_writer);
|
||||
Sha256.hash(aw.getWritten(), &hash, .{});
|
||||
self.code_directory.addSpecialHash(req.slotType(), hash);
|
||||
|
||||
try blobs.append(.{ .requirements = req });
|
||||
@ -316,10 +318,12 @@ pub fn writeAdhocSignature(
|
||||
}
|
||||
|
||||
if (self.entitlements) |*ents| {
|
||||
var buf = std.ArrayList(u8).init(allocator);
|
||||
defer buf.deinit();
|
||||
try ents.write(buf.writer());
|
||||
Sha256.hash(buf.items, &hash, .{});
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
aw.init(allocator);
|
||||
defer aw.deinit();
|
||||
|
||||
try ents.write(&aw.buffered_writer);
|
||||
Sha256.hash(aw.getWritten(), &hash, .{});
|
||||
self.code_directory.addSpecialHash(ents.slotType(), hash);
|
||||
|
||||
try blobs.append(.{ .entitlements = ents });
|
||||
|
||||
@ -269,18 +269,15 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void {
|
||||
|
||||
fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, usize } {
|
||||
const gpa = self.allocator;
|
||||
const needed_size = load_commands.calcLoadCommandsSizeDsym(macho_file, self);
|
||||
const buffer = try gpa.alloc(u8, needed_size);
|
||||
defer gpa.free(buffer);
|
||||
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
const writer = stream.writer();
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(try gpa.alloc(u8, load_commands.calcLoadCommandsSizeDsym(macho_file, self)));
|
||||
defer gpa.free(bw.buffer);
|
||||
|
||||
var ncmds: usize = 0;
|
||||
|
||||
// UUID comes first presumably to speed up lookup by the consumer like lldb.
|
||||
@memcpy(&self.uuid_cmd.uuid, &macho_file.uuid_cmd.uuid);
|
||||
try writer.writeStruct(self.uuid_cmd);
|
||||
try bw.writeStruct(self.uuid_cmd);
|
||||
ncmds += 1;
|
||||
|
||||
// Segment and section load commands
|
||||
@ -293,11 +290,11 @@ fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, u
|
||||
var out_seg = seg;
|
||||
out_seg.fileoff = 0;
|
||||
out_seg.filesize = 0;
|
||||
try writer.writeStruct(out_seg);
|
||||
try bw.writeStruct(out_seg);
|
||||
for (slice.items(.header)[sect_id..][0..seg.nsects]) |header| {
|
||||
var out_header = header;
|
||||
out_header.offset = 0;
|
||||
try writer.writeStruct(out_header);
|
||||
try bw.writeStruct(out_header);
|
||||
}
|
||||
sect_id += seg.nsects;
|
||||
}
|
||||
@ -306,23 +303,22 @@ fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, u
|
||||
// Next, commit DSYM's __LINKEDIT and __DWARF segments headers.
|
||||
sect_id = 0;
|
||||
for (self.segments.items) |seg| {
|
||||
try writer.writeStruct(seg);
|
||||
try bw.writeStruct(seg);
|
||||
for (self.sections.items[sect_id..][0..seg.nsects]) |header| {
|
||||
try writer.writeStruct(header);
|
||||
try bw.writeStruct(header);
|
||||
}
|
||||
sect_id += seg.nsects;
|
||||
}
|
||||
ncmds += self.segments.items.len;
|
||||
}
|
||||
|
||||
try writer.writeStruct(self.symtab_cmd);
|
||||
try bw.writeStruct(self.symtab_cmd);
|
||||
ncmds += 1;
|
||||
|
||||
assert(stream.pos == needed_size);
|
||||
assert(bw.end == bw.buffer.len);
|
||||
try self.file.?.pwriteAll(bw.buffer, @sizeOf(macho.mach_header_64));
|
||||
|
||||
try self.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
|
||||
|
||||
return .{ ncmds, buffer.len };
|
||||
return .{ ncmds, bw.end };
|
||||
}
|
||||
|
||||
fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: usize, sizeofcmds: usize) !void {
|
||||
|
||||
@ -81,7 +81,7 @@ pub const InfoReader = struct {
|
||||
.dwarf64 => 12,
|
||||
} + cuh_length;
|
||||
while (p.pos < end_pos) {
|
||||
const di_code = try p.readUleb128(u64);
|
||||
const di_code = try p.readLeb128(u64);
|
||||
if (di_code == 0) return error.UnexpectedEndOfFile;
|
||||
if (di_code == code) return;
|
||||
|
||||
@ -174,14 +174,14 @@ pub const InfoReader = struct {
|
||||
dw.FORM.block1 => try p.readByte(),
|
||||
dw.FORM.block2 => try p.readInt(u16),
|
||||
dw.FORM.block4 => try p.readInt(u32),
|
||||
dw.FORM.block => try p.readUleb128(u64),
|
||||
dw.FORM.block => try p.readLeb128(u64),
|
||||
else => unreachable,
|
||||
};
|
||||
return p.readNBytes(len);
|
||||
}
|
||||
|
||||
pub fn readExprLoc(p: *InfoReader) ![]const u8 {
|
||||
const len: u64 = try p.readUleb128(u64);
|
||||
const len: u64 = try p.readLeb128(u64);
|
||||
return p.readNBytes(len);
|
||||
}
|
||||
|
||||
@ -191,8 +191,8 @@ pub const InfoReader = struct {
|
||||
dw.FORM.data2, dw.FORM.ref2 => try p.readInt(u16),
|
||||
dw.FORM.data4, dw.FORM.ref4 => try p.readInt(u32),
|
||||
dw.FORM.data8, dw.FORM.ref8, dw.FORM.ref_sig8 => try p.readInt(u64),
|
||||
dw.FORM.udata, dw.FORM.ref_udata => try p.readUleb128(u64),
|
||||
dw.FORM.sdata => @bitCast(try p.readIleb128(i64)),
|
||||
dw.FORM.udata, dw.FORM.ref_udata => try p.readLeb128(u64),
|
||||
dw.FORM.sdata => @bitCast(try p.readLeb128(i64)),
|
||||
else => return error.UnhandledConstantForm,
|
||||
};
|
||||
}
|
||||
@ -203,7 +203,7 @@ pub const InfoReader = struct {
|
||||
dw.FORM.strx2, dw.FORM.addrx2 => try p.readInt(u16),
|
||||
dw.FORM.strx3, dw.FORM.addrx3 => error.UnhandledForm,
|
||||
dw.FORM.strx4, dw.FORM.addrx4 => try p.readInt(u32),
|
||||
dw.FORM.strx, dw.FORM.addrx => try p.readUleb128(u64),
|
||||
dw.FORM.strx, dw.FORM.addrx => try p.readLeb128(u64),
|
||||
else => return error.UnhandledIndexForm,
|
||||
};
|
||||
}
|
||||
@ -272,20 +272,11 @@ pub const InfoReader = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn readUleb128(p: *InfoReader, comptime Type: type) !Type {
|
||||
var stream = std.io.fixedBufferStream(p.bytes()[p.pos..]);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const value: Type = try leb.readUleb128(Type, creader.reader());
|
||||
p.pos += math.cast(usize, creader.bytes_read) orelse return error.Overflow;
|
||||
return value;
|
||||
}
|
||||
|
||||
pub fn readIleb128(p: *InfoReader, comptime Type: type) !Type {
|
||||
var stream = std.io.fixedBufferStream(p.bytes()[p.pos..]);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const value: Type = try leb.readIleb128(Type, creader.reader());
|
||||
p.pos += math.cast(usize, creader.bytes_read) orelse return error.Overflow;
|
||||
return value;
|
||||
pub fn readLeb128(p: *InfoReader, comptime Type: type) !Type {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(p.bytes()[p.pos..]);
|
||||
defer p.pos += br.seek;
|
||||
return br.takeLeb128(Type);
|
||||
}
|
||||
|
||||
pub fn seekTo(p: *InfoReader, off: u64) !void {
|
||||
@ -307,10 +298,10 @@ pub const AbbrevReader = struct {
|
||||
|
||||
pub fn readDecl(p: *AbbrevReader) !?AbbrevDecl {
|
||||
const pos = p.pos;
|
||||
const code = try p.readUleb128(Code);
|
||||
const code = try p.readLeb128(Code);
|
||||
if (code == 0) return null;
|
||||
|
||||
const tag = try p.readUleb128(Tag);
|
||||
const tag = try p.readLeb128(Tag);
|
||||
const has_children = (try p.readByte()) > 0;
|
||||
return .{
|
||||
.code = code,
|
||||
@ -323,8 +314,8 @@ pub const AbbrevReader = struct {
|
||||
|
||||
pub fn readAttr(p: *AbbrevReader) !?AbbrevAttr {
|
||||
const pos = p.pos;
|
||||
const at = try p.readUleb128(At);
|
||||
const form = try p.readUleb128(Form);
|
||||
const at = try p.readLeb128(At);
|
||||
const form = try p.readLeb128(Form);
|
||||
return if (at == 0 and form == 0) null else .{
|
||||
.at = at,
|
||||
.form = form,
|
||||
@ -339,12 +330,11 @@ pub const AbbrevReader = struct {
|
||||
return p.bytes()[p.pos];
|
||||
}
|
||||
|
||||
pub fn readUleb128(p: *AbbrevReader, comptime Type: type) !Type {
|
||||
var stream = std.io.fixedBufferStream(p.bytes()[p.pos..]);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const value: Type = try leb.readUleb128(Type, creader.reader());
|
||||
p.pos += math.cast(usize, creader.bytes_read) orelse return error.Overflow;
|
||||
return value;
|
||||
pub fn readLeb128(p: *AbbrevReader, comptime Type: type) !Type {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(p.bytes()[p.pos..]);
|
||||
defer p.pos += br.seek;
|
||||
return br.takeLeb128(Type);
|
||||
}
|
||||
|
||||
pub fn seekTo(p: *AbbrevReader, off: u64) !void {
|
||||
|
||||
@ -61,7 +61,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
|
||||
const file = macho_file.getFileHandle(self.file_handle);
|
||||
const offset = self.offset;
|
||||
|
||||
log.debug("parsing dylib from binary: {}", .{@as(Path, self.path)});
|
||||
log.debug("parsing dylib from binary: {f}", .{@as(Path, self.path)});
|
||||
|
||||
var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
|
||||
{
|
||||
@ -140,7 +140,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
|
||||
|
||||
if (self.platform) |platform| {
|
||||
if (!macho_file.platform.eqlTarget(platform)) {
|
||||
try macho_file.reportParseError2(self.index, "invalid platform: {}", .{
|
||||
try macho_file.reportParseError2(self.index, "invalid platform: {f}", .{
|
||||
platform.fmtTarget(macho_file.getTarget().cpu.arch),
|
||||
});
|
||||
return error.InvalidTarget;
|
||||
@ -148,7 +148,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
|
||||
// TODO: this can cause the CI to fail so I'm commenting this check out so that
|
||||
// I can work out the rest of the changes first
|
||||
// if (macho_file.platform.version.order(platform.version) == .lt) {
|
||||
// try macho_file.reportParseError2(self.index, "object file built for newer platform: {}: {} < {}", .{
|
||||
// try macho_file.reportParseError2(self.index, "object file built for newer platform: {f}: {f} < {f}", .{
|
||||
// macho_file.platform.fmtTarget(macho_file.getTarget().cpu.arch),
|
||||
// macho_file.platform.version,
|
||||
// platform.version,
|
||||
@ -158,46 +158,6 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
|
||||
}
|
||||
}
|
||||
|
||||
const TrieIterator = struct {
|
||||
data: []const u8,
|
||||
pos: usize = 0,
|
||||
|
||||
fn getStream(it: *TrieIterator) std.io.FixedBufferStream([]const u8) {
|
||||
return std.io.fixedBufferStream(it.data[it.pos..]);
|
||||
}
|
||||
|
||||
fn readUleb128(it: *TrieIterator) !u64 {
|
||||
var stream = it.getStream();
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
const value = try std.leb.readUleb128(u64, reader);
|
||||
it.pos += math.cast(usize, creader.bytes_read) orelse return error.Overflow;
|
||||
return value;
|
||||
}
|
||||
|
||||
fn readString(it: *TrieIterator) ![:0]const u8 {
|
||||
var stream = it.getStream();
|
||||
const reader = stream.reader();
|
||||
|
||||
var count: usize = 0;
|
||||
while (true) : (count += 1) {
|
||||
const byte = try reader.readByte();
|
||||
if (byte == 0) break;
|
||||
}
|
||||
|
||||
const str = @as([*:0]const u8, @ptrCast(it.data.ptr + it.pos))[0..count :0];
|
||||
it.pos += count + 1;
|
||||
return str;
|
||||
}
|
||||
|
||||
fn readByte(it: *TrieIterator) !u8 {
|
||||
var stream = it.getStream();
|
||||
const value = try stream.reader().readByte();
|
||||
it.pos += 1;
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn addExport(self: *Dylib, allocator: Allocator, name: []const u8, flags: Export.Flags) !void {
|
||||
try self.exports.append(allocator, .{
|
||||
.name = try self.addString(allocator, name),
|
||||
@ -207,16 +167,16 @@ pub fn addExport(self: *Dylib, allocator: Allocator, name: []const u8, flags: Ex
|
||||
|
||||
fn parseTrieNode(
|
||||
self: *Dylib,
|
||||
it: *TrieIterator,
|
||||
br: *std.io.BufferedReader,
|
||||
allocator: Allocator,
|
||||
arena: Allocator,
|
||||
prefix: []const u8,
|
||||
) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
const size = try it.readUleb128();
|
||||
const size = try br.takeLeb128(u64);
|
||||
if (size > 0) {
|
||||
const flags = try it.readUleb128();
|
||||
const flags = try br.takeLeb128(u8);
|
||||
const kind = flags & macho.EXPORT_SYMBOL_FLAGS_KIND_MASK;
|
||||
const out_flags = Export.Flags{
|
||||
.abs = kind == macho.EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE,
|
||||
@ -224,29 +184,28 @@ fn parseTrieNode(
|
||||
.weak = flags & macho.EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION != 0,
|
||||
};
|
||||
if (flags & macho.EXPORT_SYMBOL_FLAGS_REEXPORT != 0) {
|
||||
_ = try it.readUleb128(); // dylib ordinal
|
||||
const name = try it.readString();
|
||||
_ = try br.takeLeb128(u64); // dylib ordinal
|
||||
const name = try br.takeSentinel(0);
|
||||
try self.addExport(allocator, if (name.len > 0) name else prefix, out_flags);
|
||||
} else if (flags & macho.EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER != 0) {
|
||||
_ = try it.readUleb128(); // stub offset
|
||||
_ = try it.readUleb128(); // resolver offset
|
||||
_ = try br.takeLeb128(u64); // stub offset
|
||||
_ = try br.takeLeb128(u64); // resolver offset
|
||||
try self.addExport(allocator, prefix, out_flags);
|
||||
} else {
|
||||
_ = try it.readUleb128(); // VM offset
|
||||
_ = try br.takeLeb128(u64); // VM offset
|
||||
try self.addExport(allocator, prefix, out_flags);
|
||||
}
|
||||
}
|
||||
|
||||
const nedges = try it.readByte();
|
||||
|
||||
const nedges = try br.takeByte();
|
||||
for (0..nedges) |_| {
|
||||
const label = try it.readString();
|
||||
const off = try it.readUleb128();
|
||||
const label = try br.takeSentinel(0);
|
||||
const off = try br.takeLeb128(usize);
|
||||
const prefix_label = try std.fmt.allocPrint(arena, "{s}{s}", .{ prefix, label });
|
||||
const curr = it.pos;
|
||||
it.pos = math.cast(usize, off) orelse return error.Overflow;
|
||||
try self.parseTrieNode(it, allocator, arena, prefix_label);
|
||||
it.pos = curr;
|
||||
const seek = br.seek;
|
||||
br.seek = off;
|
||||
try self.parseTrieNode(br, allocator, arena, prefix_label);
|
||||
br.seek = seek;
|
||||
}
|
||||
}
|
||||
|
||||
@ -257,8 +216,9 @@ fn parseTrie(self: *Dylib, data: []const u8, macho_file: *MachO) !void {
|
||||
var arena = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena.deinit();
|
||||
|
||||
var it: TrieIterator = .{ .data = data };
|
||||
try self.parseTrieNode(&it, gpa, arena.allocator(), "");
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
try self.parseTrieNode(&br, gpa, arena.allocator(), "");
|
||||
}
|
||||
|
||||
fn parseTbd(self: *Dylib, macho_file: *MachO) !void {
|
||||
@ -267,7 +227,7 @@ fn parseTbd(self: *Dylib, macho_file: *MachO) !void {
|
||||
|
||||
const gpa = macho_file.base.comp.gpa;
|
||||
|
||||
log.debug("parsing dylib from stub: {}", .{self.path});
|
||||
log.debug("parsing dylib from stub: {f}", .{self.path});
|
||||
|
||||
const file = macho_file.getFileHandle(self.file_handle);
|
||||
var lib_stub = LibStub.loadFromFile(gpa, file) catch |err| {
|
||||
@ -716,24 +676,18 @@ const FormatContext = struct {
|
||||
macho_file: *MachO,
|
||||
};
|
||||
|
||||
fn formatSymtab(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatSymtab(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const dylib = ctx.dylib;
|
||||
const macho_file = ctx.macho_file;
|
||||
try writer.writeAll(" globals\n");
|
||||
try bw.writeAll(" globals\n");
|
||||
for (dylib.symbols.items, 0..) |sym, i| {
|
||||
const ref = dylib.getSymbolRef(@intCast(i), macho_file);
|
||||
if (ref.getFile(macho_file) == null) {
|
||||
// TODO any better way of handling this?
|
||||
try writer.print(" {s} : unclaimed\n", .{sym.getName(macho_file)});
|
||||
try bw.print(" {s} : unclaimed\n", .{sym.getName(macho_file)});
|
||||
} else {
|
||||
try writer.print(" {}\n", .{ref.getSymbol(macho_file).?.fmt(macho_file)});
|
||||
try bw.print(" {f}\n", .{ref.getSymbol(macho_file).?.fmt(macho_file)});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -261,7 +261,7 @@ fn addObjcMethnameSection(self: *InternalObject, methname: []const u8, macho_fil
|
||||
|
||||
sect.offset = @intCast(self.objc_methnames.items.len);
|
||||
try self.objc_methnames.ensureUnusedCapacity(gpa, methname.len + 1);
|
||||
self.objc_methnames.writer(gpa).print("{s}\x00", .{methname}) catch unreachable;
|
||||
self.objc_methnames.print(gpa, "{s}\x00", .{methname}) catch unreachable;
|
||||
|
||||
const name_str = try self.addString(gpa, "ltmp");
|
||||
const sym_index = try self.addSymbol(gpa);
|
||||
@ -848,18 +848,12 @@ pub fn fmtAtoms(self: *InternalObject, macho_file: *MachO) std.fmt.Formatter(for
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatAtoms(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatAtoms(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
try writer.writeAll(" atoms\n");
|
||||
try bw.writeAll(" atoms\n");
|
||||
for (ctx.self.getAtoms()) |atom_index| {
|
||||
const atom = ctx.self.getAtom(atom_index) orelse continue;
|
||||
try writer.print(" {}\n", .{atom.fmt(ctx.macho_file)});
|
||||
try bw.print(" {f}\n", .{atom.fmt(ctx.macho_file)});
|
||||
}
|
||||
}
|
||||
|
||||
@ -870,24 +864,18 @@ pub fn fmtSymtab(self: *InternalObject, macho_file: *MachO) std.fmt.Formatter(fo
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatSymtab(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatSymtab(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const macho_file = ctx.macho_file;
|
||||
const self = ctx.self;
|
||||
try writer.writeAll(" symbols\n");
|
||||
try bw.writeAll(" symbols\n");
|
||||
for (self.symbols.items, 0..) |sym, i| {
|
||||
const ref = self.getSymbolRef(@intCast(i), macho_file);
|
||||
if (ref.getFile(macho_file) == null) {
|
||||
// TODO any better way of handling this?
|
||||
try writer.print(" {s} : unclaimed\n", .{sym.getName(macho_file)});
|
||||
try bw.print(" {s} : unclaimed\n", .{sym.getName(macho_file)});
|
||||
} else {
|
||||
try writer.print(" {}\n", .{ref.getSymbol(macho_file).?.fmt(macho_file)});
|
||||
try bw.print(" {f}\n", .{ref.getSymbol(macho_file).?.fmt(macho_file)});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
log.debug("parsing {}", .{self.fmtPath()});
|
||||
log.debug("parsing {f}", .{self.fmtPath()});
|
||||
|
||||
const gpa = macho_file.base.comp.gpa;
|
||||
const handle = macho_file.getFileHandle(self.file_handle);
|
||||
@ -239,7 +239,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
|
||||
|
||||
if (self.platform) |platform| {
|
||||
if (!macho_file.platform.eqlTarget(platform)) {
|
||||
try macho_file.reportParseError2(self.index, "invalid platform: {}", .{
|
||||
try macho_file.reportParseError2(self.index, "invalid platform: {f}", .{
|
||||
platform.fmtTarget(cpu_arch),
|
||||
});
|
||||
return error.InvalidTarget;
|
||||
@ -247,7 +247,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
|
||||
// TODO: this causes the CI to fail so I'm commenting this check out so that
|
||||
// I can work out the rest of the changes first
|
||||
// if (macho_file.platform.version.order(platform.version) == .lt) {
|
||||
// try macho_file.reportParseError2(self.index, "object file built for newer platform: {}: {} < {}", .{
|
||||
// try macho_file.reportParseError2(self.index, "object file built for newer platform: {f}: {f} < {f}", .{
|
||||
// macho_file.platform.fmtTarget(macho_file.getTarget().cpu.arch),
|
||||
// macho_file.platform.version,
|
||||
// platform.version,
|
||||
@ -1065,7 +1065,8 @@ fn initEhFrameRecords(self: *Object, allocator: Allocator, sect_id: u8, file: Fi
|
||||
}
|
||||
}
|
||||
|
||||
var it = eh_frame.Iterator{ .data = self.eh_frame_data.items };
|
||||
var it: eh_frame.Iterator = undefined;
|
||||
it.br.initFixed(self.eh_frame_data.items);
|
||||
while (try it.next()) |rec| {
|
||||
switch (rec.tag) {
|
||||
.cie => try self.cies.append(allocator, .{
|
||||
@ -1694,11 +1695,11 @@ pub fn updateArSize(self: *Object, macho_file: *MachO) !void {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn writeAr(self: Object, bw: *std.io.BufferedWriter, ar_format: Archive.Format, macho_file: *MachO) !void {
|
||||
// Header
|
||||
const size = try macho_file.cast(usize, self.output_ar_state.size);
|
||||
const basename = std.fs.path.basename(self.path.sub_path);
|
||||
try Archive.writeHeader(basename, size, ar_format, writer);
|
||||
try Archive.writeHeader(bw, basename, size, ar_format);
|
||||
// Data
|
||||
const file = macho_file.getFileHandle(self.file_handle);
|
||||
// TODO try using copyRangeAll
|
||||
@ -1707,7 +1708,7 @@ pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writ
|
||||
defer gpa.free(data);
|
||||
const amt = try file.preadAll(data, self.offset);
|
||||
if (amt != size) return error.InputOutput;
|
||||
try writer.writeAll(data);
|
||||
try bw.writeAll(data);
|
||||
}
|
||||
|
||||
pub fn calcSymtabSize(self: *Object, macho_file: *MachO) void {
|
||||
@ -1861,7 +1862,7 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void {
|
||||
}
|
||||
gpa.free(sections_data);
|
||||
}
|
||||
@memset(sections_data, &[0]u8{});
|
||||
@memset(sections_data, &.{});
|
||||
const file = macho_file.getFileHandle(self.file_handle);
|
||||
|
||||
for (headers, 0..) |header, n_sect| {
|
||||
@ -2512,16 +2513,10 @@ pub fn readSectionData(self: Object, allocator: Allocator, file: File.Handle, n_
|
||||
return data;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
self: *Object,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(self: *Object, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = self;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format objects directly");
|
||||
}
|
||||
|
||||
@ -2537,20 +2532,14 @@ pub fn fmtAtoms(self: *Object, macho_file: *MachO) std.fmt.Formatter(formatAtoms
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatAtoms(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatAtoms(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const object = ctx.object;
|
||||
const macho_file = ctx.macho_file;
|
||||
try writer.writeAll(" atoms\n");
|
||||
try bw.writeAll(" atoms\n");
|
||||
for (object.getAtoms()) |atom_index| {
|
||||
const atom = object.getAtom(atom_index) orelse continue;
|
||||
try writer.print(" {}\n", .{atom.fmt(macho_file)});
|
||||
try bw.print(" {f}\n", .{atom.fmt(macho_file)});
|
||||
}
|
||||
}
|
||||
|
||||
@ -2561,18 +2550,12 @@ pub fn fmtCies(self: *Object, macho_file: *MachO) std.fmt.Formatter(formatCies)
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatCies(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatCies(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const object = ctx.object;
|
||||
try writer.writeAll(" cies\n");
|
||||
try bw.writeAll(" cies\n");
|
||||
for (object.cies.items, 0..) |cie, i| {
|
||||
try writer.print(" cie({d}) : {}\n", .{ i, cie.fmt(ctx.macho_file) });
|
||||
try bw.print(" cie({d}) : {f}\n", .{ i, cie.fmt(ctx.macho_file) });
|
||||
}
|
||||
}
|
||||
|
||||
@ -2583,18 +2566,12 @@ pub fn fmtFdes(self: *Object, macho_file: *MachO) std.fmt.Formatter(formatFdes)
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatFdes(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatFdes(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const object = ctx.object;
|
||||
try writer.writeAll(" fdes\n");
|
||||
try bw.writeAll(" fdes\n");
|
||||
for (object.fdes.items, 0..) |fde, i| {
|
||||
try writer.print(" fde({d}) : {}\n", .{ i, fde.fmt(ctx.macho_file) });
|
||||
try bw.print(" fde({d}) : {f}\n", .{ i, fde.fmt(ctx.macho_file) });
|
||||
}
|
||||
}
|
||||
|
||||
@ -2605,19 +2582,13 @@ pub fn fmtUnwindRecords(self: *Object, macho_file: *MachO) std.fmt.Formatter(for
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatUnwindRecords(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatUnwindRecords(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const object = ctx.object;
|
||||
const macho_file = ctx.macho_file;
|
||||
try writer.writeAll(" unwind records\n");
|
||||
try bw.writeAll(" unwind records\n");
|
||||
for (object.unwind_records_indexes.items) |rec| {
|
||||
try writer.print(" rec({d}) : {}\n", .{ rec, object.getUnwindRecord(rec).fmt(macho_file) });
|
||||
try bw.print(" rec({d}) : {f}\n", .{ rec, object.getUnwindRecord(rec).fmt(macho_file) });
|
||||
}
|
||||
}
|
||||
|
||||
@ -2628,34 +2599,28 @@ pub fn fmtSymtab(self: *Object, macho_file: *MachO) std.fmt.Formatter(formatSymt
|
||||
} };
|
||||
}
|
||||
|
||||
fn formatSymtab(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatSymtab(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const object = ctx.object;
|
||||
const macho_file = ctx.macho_file;
|
||||
try writer.writeAll(" symbols\n");
|
||||
try bw.writeAll(" symbols\n");
|
||||
for (object.symbols.items, 0..) |sym, i| {
|
||||
const ref = object.getSymbolRef(@intCast(i), macho_file);
|
||||
if (ref.getFile(macho_file) == null) {
|
||||
// TODO any better way of handling this?
|
||||
try writer.print(" {s} : unclaimed\n", .{sym.getName(macho_file)});
|
||||
try bw.print(" {s} : unclaimed\n", .{sym.getName(macho_file)});
|
||||
} else {
|
||||
try writer.print(" {}\n", .{ref.getSymbol(macho_file).?.fmt(macho_file)});
|
||||
try bw.print(" {f}\n", .{ref.getSymbol(macho_file).?.fmt(macho_file)});
|
||||
}
|
||||
}
|
||||
for (object.stab_files.items) |sf| {
|
||||
try writer.print(" stabs({s},{s},{s})\n", .{
|
||||
try bw.print(" stabs({s},{s},{s})\n", .{
|
||||
sf.getCompDir(object.*),
|
||||
sf.getTuName(object.*),
|
||||
sf.getOsoPath(object.*),
|
||||
});
|
||||
for (sf.stabs.items) |stab| {
|
||||
try writer.print(" {}", .{stab.fmt(object.*)});
|
||||
try bw.print(" {f}", .{stab.fmt(object.*)});
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2664,20 +2629,14 @@ pub fn fmtPath(self: Object) std.fmt.Formatter(formatPath) {
|
||||
return .{ .data = self };
|
||||
}
|
||||
|
||||
fn formatPath(
|
||||
object: Object,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn formatPath(object: Object, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
if (object.in_archive) |ar| {
|
||||
try writer.print("{}({s})", .{
|
||||
@as(Path, ar.path), object.path.basename(),
|
||||
try bw.print("{f}({s})", .{
|
||||
ar.path, object.path.basename(),
|
||||
});
|
||||
} else {
|
||||
try writer.print("{}", .{@as(Path, object.path)});
|
||||
try bw.print("{f}", .{object.path});
|
||||
}
|
||||
}
|
||||
|
||||
@ -2731,16 +2690,10 @@ const StabFile = struct {
|
||||
return object.symbols.items[index];
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
stab: Stab,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(stab: Stab, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = stab;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format stabs directly");
|
||||
}
|
||||
|
||||
@ -2750,22 +2703,16 @@ const StabFile = struct {
|
||||
return .{ .data = .{ stab, object } };
|
||||
}
|
||||
|
||||
fn format2(
|
||||
ctx: StabFormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn format2(ctx: StabFormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const stab, const object = ctx;
|
||||
const sym = stab.getSymbol(object).?;
|
||||
if (stab.is_func) {
|
||||
try writer.print("func({d})", .{stab.index.?});
|
||||
try bw.print("func({d})", .{stab.index.?});
|
||||
} else if (sym.visibility == .global) {
|
||||
try writer.print("gsym({d})", .{stab.index.?});
|
||||
try bw.print("gsym({d})", .{stab.index.?});
|
||||
} else {
|
||||
try writer.print("stsym({d})", .{stab.index.?});
|
||||
try bw.print("stsym({d})", .{stab.index.?});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -76,16 +76,10 @@ pub fn fmtPretty(rel: Relocation, cpu_arch: std.Target.Cpu.Arch) std.fmt.Formatt
|
||||
return .{ .data = .{ rel, cpu_arch } };
|
||||
}
|
||||
|
||||
fn formatPretty(
|
||||
ctx: FormatCtx,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn formatPretty(ctx: FormatCtx, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const rel, const cpu_arch = ctx;
|
||||
const str = switch (rel.type) {
|
||||
try bw.writeAll(switch (rel.type) {
|
||||
.signed => "X86_64_RELOC_SIGNED",
|
||||
.signed1 => "X86_64_RELOC_SIGNED_1",
|
||||
.signed2 => "X86_64_RELOC_SIGNED_2",
|
||||
@ -118,8 +112,7 @@ fn formatPretty(
|
||||
.aarch64 => "ARM64_RELOC_UNSIGNED",
|
||||
else => unreachable,
|
||||
},
|
||||
};
|
||||
try writer.writeAll(str);
|
||||
});
|
||||
}
|
||||
|
||||
pub const Type = enum {
|
||||
|
||||
@ -286,16 +286,10 @@ pub fn setOutputSym(symbol: Symbol, macho_file: *MachO, out: *macho.nlist_64) vo
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
symbol: Symbol,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(symbol: Symbol, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = symbol;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format symbols directly");
|
||||
}
|
||||
|
||||
@ -311,26 +305,20 @@ pub fn fmt(symbol: Symbol, macho_file: *MachO) std.fmt.Formatter(format2) {
|
||||
} };
|
||||
}
|
||||
|
||||
fn format2(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const symbol = ctx.symbol;
|
||||
try writer.print("%{d} : {s} : @{x}", .{
|
||||
try bw.print("%{d} : {s} : @{x}", .{
|
||||
symbol.nlist_idx,
|
||||
symbol.getName(ctx.macho_file),
|
||||
symbol.getAddress(.{}, ctx.macho_file),
|
||||
});
|
||||
if (symbol.getFile(ctx.macho_file)) |file| {
|
||||
if (symbol.getOutputSectionIndex(ctx.macho_file) != 0) {
|
||||
try writer.print(" : sect({d})", .{symbol.getOutputSectionIndex(ctx.macho_file)});
|
||||
try bw.print(" : sect({d})", .{symbol.getOutputSectionIndex(ctx.macho_file)});
|
||||
}
|
||||
if (symbol.getAtom(ctx.macho_file)) |atom| {
|
||||
try writer.print(" : atom({d})", .{atom.atom_index});
|
||||
try bw.print(" : atom({d})", .{atom.atom_index});
|
||||
}
|
||||
var buf: [3]u8 = .{'_'} ** 3;
|
||||
if (symbol.flags.@"export") buf[0] = 'E';
|
||||
@ -340,16 +328,16 @@ fn format2(
|
||||
.hidden => buf[2] = 'H',
|
||||
.global => buf[2] = 'G',
|
||||
}
|
||||
try writer.print(" : {s}", .{&buf});
|
||||
if (symbol.flags.weak) try writer.writeAll(" : weak");
|
||||
if (symbol.isSymbolStab(ctx.macho_file)) try writer.writeAll(" : stab");
|
||||
try bw.print(" : {s}", .{&buf});
|
||||
if (symbol.flags.weak) try bw.writeAll(" : weak");
|
||||
if (symbol.isSymbolStab(ctx.macho_file)) try bw.writeAll(" : stab");
|
||||
switch (file) {
|
||||
.zig_object => |x| try writer.print(" : zig_object({d})", .{x.index}),
|
||||
.internal => |x| try writer.print(" : internal({d})", .{x.index}),
|
||||
.object => |x| try writer.print(" : object({d})", .{x.index}),
|
||||
.dylib => |x| try writer.print(" : dylib({d})", .{x.index}),
|
||||
.zig_object => |x| try bw.print(" : zig_object({d})", .{x.index}),
|
||||
.internal => |x| try bw.print(" : internal({d})", .{x.index}),
|
||||
.object => |x| try bw.print(" : object({d})", .{x.index}),
|
||||
.dylib => |x| try bw.print(" : dylib({d})", .{x.index}),
|
||||
}
|
||||
} else try writer.writeAll(" : unresolved");
|
||||
} else try bw.writeAll(" : unresolved");
|
||||
}
|
||||
|
||||
pub const Flags = packed struct {
|
||||
|
||||
@ -20,16 +20,16 @@ pub fn getTargetAddress(thunk: Thunk, ref: MachO.Ref, macho_file: *MachO) u64 {
|
||||
return thunk.getAddress(macho_file) + thunk.symbols.getIndex(ref).? * trampoline_size;
|
||||
}
|
||||
|
||||
pub fn write(thunk: Thunk, macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn write(thunk: Thunk, macho_file: *MachO, bw: *std.io.BufferedWriter) !void {
|
||||
for (thunk.symbols.keys(), 0..) |ref, i| {
|
||||
const sym = ref.getSymbol(macho_file).?;
|
||||
const saddr = thunk.getAddress(macho_file) + i * trampoline_size;
|
||||
const taddr = sym.getAddress(.{}, macho_file);
|
||||
const pages = try aarch64.calcNumberOfPages(@intCast(saddr), @intCast(taddr));
|
||||
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
|
||||
try bw.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
|
||||
const off: u12 = @truncate(taddr);
|
||||
try writer.writeInt(u32, aarch64.Instruction.add(.x16, .x16, off, false).toU32(), .little);
|
||||
try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
|
||||
try bw.writeInt(u32, aarch64.Instruction.add(.x16, .x16, off, false).toU32(), .little);
|
||||
try bw.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,16 +61,10 @@ pub fn writeSymtab(thunk: Thunk, macho_file: *MachO, ctx: anytype) void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
thunk: Thunk,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(thunk: Thunk, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = thunk;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format Thunk directly");
|
||||
}
|
||||
|
||||
@ -86,20 +80,14 @@ const FormatContext = struct {
|
||||
macho_file: *MachO,
|
||||
};
|
||||
|
||||
fn format2(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
const thunk = ctx.thunk;
|
||||
const macho_file = ctx.macho_file;
|
||||
try writer.print("@{x} : size({x})\n", .{ thunk.value, thunk.size() });
|
||||
try bw.print("@{x} : size({x})\n", .{ thunk.value, thunk.size() });
|
||||
for (thunk.symbols.keys()) |ref| {
|
||||
const sym = ref.getSymbol(macho_file).?;
|
||||
try writer.print(" {} : {s} : @{x}\n", .{ ref, sym.getName(macho_file), sym.value });
|
||||
try bw.print(" {f} : {s} : @{x}\n", .{ ref, sym.getName(macho_file), sym.value });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -133,7 +133,7 @@ pub fn generate(info: *UnwindInfo, macho_file: *MachO) !void {
|
||||
for (info.records.items) |ref| {
|
||||
const rec = ref.getUnwindRecord(macho_file);
|
||||
const atom = rec.getAtom(macho_file);
|
||||
log.debug("@{x}-{x} : {s} : rec({d}) : object({d}) : {}", .{
|
||||
log.debug("@{x}-{x} : {s} : rec({d}) : object({d}) : {f}", .{
|
||||
rec.getAtomAddress(macho_file),
|
||||
rec.getAtomAddress(macho_file) + rec.length,
|
||||
atom.getName(macho_file),
|
||||
@ -202,7 +202,7 @@ pub fn generate(info: *UnwindInfo, macho_file: *MachO) !void {
|
||||
if (i >= max_common_encodings) break;
|
||||
if (slice[i].count < 2) continue;
|
||||
info.appendCommonEncoding(slice[i].enc);
|
||||
log.debug("adding common encoding: {d} => {}", .{ i, slice[i].enc });
|
||||
log.debug("adding common encoding: {d} => {f}", .{ i, slice[i].enc });
|
||||
}
|
||||
}
|
||||
|
||||
@ -255,7 +255,7 @@ pub fn generate(info: *UnwindInfo, macho_file: *MachO) !void {
|
||||
page.kind = .compressed;
|
||||
}
|
||||
|
||||
log.debug("{}", .{page.fmt(info.*)});
|
||||
log.debug("{f}", .{page.fmt(info.*)});
|
||||
|
||||
try info.pages.append(gpa, page);
|
||||
}
|
||||
@ -289,13 +289,10 @@ pub fn calcSize(info: UnwindInfo) usize {
|
||||
return total_size;
|
||||
}
|
||||
|
||||
pub fn write(info: UnwindInfo, macho_file: *MachO, buffer: []u8) !void {
|
||||
pub fn write(info: UnwindInfo, macho_file: *MachO, bw: *std.io.BufferedWriter) anyerror!void {
|
||||
const seg = macho_file.getTextSegment();
|
||||
const header = macho_file.sections.items(.header)[macho_file.unwind_info_sect_index.?];
|
||||
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
const writer = stream.writer();
|
||||
|
||||
const common_encodings_offset: u32 = @sizeOf(macho.unwind_info_section_header);
|
||||
const common_encodings_count: u32 = info.common_encodings_count;
|
||||
const personalities_offset: u32 = common_encodings_offset + common_encodings_count * @sizeOf(u32);
|
||||
@ -303,7 +300,7 @@ pub fn write(info: UnwindInfo, macho_file: *MachO, buffer: []u8) !void {
|
||||
const indexes_offset: u32 = personalities_offset + personalities_count * @sizeOf(u32);
|
||||
const indexes_count: u32 = @as(u32, @intCast(info.pages.items.len + 1));
|
||||
|
||||
try writer.writeStruct(macho.unwind_info_section_header{
|
||||
try bw.writeStruct(macho.unwind_info_section_header{
|
||||
.commonEncodingsArraySectionOffset = common_encodings_offset,
|
||||
.commonEncodingsArrayCount = common_encodings_count,
|
||||
.personalityArraySectionOffset = personalities_offset,
|
||||
@ -312,11 +309,11 @@ pub fn write(info: UnwindInfo, macho_file: *MachO, buffer: []u8) !void {
|
||||
.indexCount = indexes_count,
|
||||
});
|
||||
|
||||
try writer.writeAll(mem.sliceAsBytes(info.common_encodings[0..info.common_encodings_count]));
|
||||
try bw.writeAll(mem.sliceAsBytes(info.common_encodings[0..info.common_encodings_count]));
|
||||
|
||||
for (info.personalities[0..info.personalities_count]) |ref| {
|
||||
const sym = ref.getSymbol(macho_file).?;
|
||||
try writer.writeInt(u32, @intCast(sym.getGotAddress(macho_file) - seg.vmaddr), .little);
|
||||
try bw.writeInt(u32, @intCast(sym.getGotAddress(macho_file) - seg.vmaddr), .little);
|
||||
}
|
||||
|
||||
const pages_base_offset = @as(u32, @intCast(header.size - (info.pages.items.len * second_level_page_bytes)));
|
||||
@ -325,7 +322,7 @@ pub fn write(info: UnwindInfo, macho_file: *MachO, buffer: []u8) !void {
|
||||
for (info.pages.items, 0..) |page, i| {
|
||||
assert(page.count > 0);
|
||||
const rec = info.records.items[page.start].getUnwindRecord(macho_file);
|
||||
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
|
||||
try bw.writeStruct(macho.unwind_info_section_header_index_entry{
|
||||
.functionOffset = @as(u32, @intCast(rec.getAtomAddress(macho_file) - seg.vmaddr)),
|
||||
.secondLevelPagesSectionOffset = @as(u32, @intCast(pages_base_offset + i * second_level_page_bytes)),
|
||||
.lsdaIndexArraySectionOffset = lsda_base_offset +
|
||||
@ -335,7 +332,7 @@ pub fn write(info: UnwindInfo, macho_file: *MachO, buffer: []u8) !void {
|
||||
|
||||
const last_rec = info.records.items[info.records.items.len - 1].getUnwindRecord(macho_file);
|
||||
const sentinel_address = @as(u32, @intCast(last_rec.getAtomAddress(macho_file) + last_rec.length - seg.vmaddr));
|
||||
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
|
||||
try bw.writeStruct(macho.unwind_info_section_header_index_entry{
|
||||
.functionOffset = sentinel_address,
|
||||
.secondLevelPagesSectionOffset = 0,
|
||||
.lsdaIndexArraySectionOffset = lsda_base_offset +
|
||||
@ -344,23 +341,20 @@ pub fn write(info: UnwindInfo, macho_file: *MachO, buffer: []u8) !void {
|
||||
|
||||
for (info.lsdas.items) |index| {
|
||||
const rec = info.records.items[index].getUnwindRecord(macho_file);
|
||||
try writer.writeStruct(macho.unwind_info_section_header_lsda_index_entry{
|
||||
try bw.writeStruct(macho.unwind_info_section_header_lsda_index_entry{
|
||||
.functionOffset = @as(u32, @intCast(rec.getAtomAddress(macho_file) - seg.vmaddr)),
|
||||
.lsdaOffset = @as(u32, @intCast(rec.getLsdaAddress(macho_file) - seg.vmaddr)),
|
||||
});
|
||||
}
|
||||
|
||||
for (info.pages.items) |page| {
|
||||
const start = stream.pos;
|
||||
try page.write(info, macho_file, writer);
|
||||
const nwritten = stream.pos - start;
|
||||
if (nwritten < second_level_page_bytes) {
|
||||
const padding = math.cast(usize, second_level_page_bytes - nwritten) orelse return error.Overflow;
|
||||
try writer.writeByteNTimes(0, padding);
|
||||
}
|
||||
const start = bw.count;
|
||||
try page.write(info, macho_file, bw);
|
||||
const nwritten = bw.count - start;
|
||||
try bw.splatByteAll(0, math.cast(usize, second_level_page_bytes - nwritten) orelse return error.Overflow);
|
||||
}
|
||||
|
||||
@memset(buffer[stream.pos..], 0);
|
||||
@memset(bw.unusedCapacitySlice(), 0);
|
||||
}
|
||||
|
||||
fn getOrPutPersonalityFunction(info: *UnwindInfo, ref: MachO.Ref) error{TooManyPersonalities}!u2 {
|
||||
@ -455,15 +449,9 @@ pub const Encoding = extern struct {
|
||||
return enc.enc == other.enc;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
enc: Encoding,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(enc: Encoding, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
try writer.print("0x{x:0>8}", .{enc.enc});
|
||||
try bw.print("0x{x:0>8}", .{enc.enc});
|
||||
}
|
||||
};
|
||||
|
||||
@ -517,16 +505,10 @@ pub const Record = struct {
|
||||
return lsda.getAddress(macho_file) + rec.lsda_offset;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
rec: Record,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(rec: Record, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = rec;
|
||||
_ = bw;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format UnwindInfo.Records directly");
|
||||
}
|
||||
|
||||
@ -542,22 +524,16 @@ pub const Record = struct {
|
||||
macho_file: *MachO,
|
||||
};
|
||||
|
||||
fn format2(
|
||||
ctx: FormatContext,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn format2(ctx: FormatContext, bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8) anyerror!void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
const rec = ctx.rec;
|
||||
const macho_file = ctx.macho_file;
|
||||
try writer.print("{x} : len({x})", .{
|
||||
try bw.print("{x} : len({x})", .{
|
||||
rec.enc.enc, rec.length,
|
||||
});
|
||||
if (rec.enc.isDwarf(macho_file)) try writer.print(" : fde({d})", .{rec.fde});
|
||||
try writer.print(" : {s}", .{rec.getAtom(macho_file).getName(macho_file)});
|
||||
if (!rec.alive) try writer.writeAll(" : [*]");
|
||||
if (rec.enc.isDwarf(macho_file)) try bw.print(" : fde({d})", .{rec.fde});
|
||||
try bw.print(" : {s}", .{rec.getAtom(macho_file).getName(macho_file)});
|
||||
if (!rec.alive) try bw.writeAll(" : [*]");
|
||||
}
|
||||
|
||||
pub const Index = u32;
|
||||
@ -613,16 +589,10 @@ const Page = struct {
|
||||
return null;
|
||||
}
|
||||
|
||||
fn format(
|
||||
page: *const Page,
|
||||
comptime unused_format_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
fn format(page: *const Page, bw: *std.io.BufferedWriter, comptime unused_format_string: []const u8) anyerror!void {
|
||||
_ = page;
|
||||
_ = bw;
|
||||
_ = unused_format_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format Page directly; use page.fmt()");
|
||||
}
|
||||
|
||||
@ -631,23 +601,17 @@ const Page = struct {
|
||||
info: UnwindInfo,
|
||||
};
|
||||
|
||||
fn format2(
|
||||
ctx: FormatPageContext,
|
||||
comptime unused_format_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
_ = options;
|
||||
fn format2(ctx: FormatPageContext, bw: *std.io.BufferedWriter, comptime unused_format_string: []const u8) anyerror!void {
|
||||
_ = unused_format_string;
|
||||
try writer.writeAll("Page:\n");
|
||||
try writer.print(" kind: {s}\n", .{@tagName(ctx.page.kind)});
|
||||
try writer.print(" entries: {d} - {d}\n", .{
|
||||
try bw.writeAll("Page:\n");
|
||||
try bw.print(" kind: {s}\n", .{@tagName(ctx.page.kind)});
|
||||
try bw.print(" entries: {d} - {d}\n", .{
|
||||
ctx.page.start,
|
||||
ctx.page.start + ctx.page.count,
|
||||
});
|
||||
try writer.print(" encodings (count = {d})\n", .{ctx.page.page_encodings_count});
|
||||
try bw.print(" encodings (count = {d})\n", .{ctx.page.page_encodings_count});
|
||||
for (ctx.page.page_encodings[0..ctx.page.page_encodings_count], 0..) |enc, i| {
|
||||
try writer.print(" {d}: {}\n", .{ ctx.info.common_encodings_count + i, enc });
|
||||
try bw.print(" {d}: {f}\n", .{ ctx.info.common_encodings_count + i, enc });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user