mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
Merge pull request #25036 from ziglang/GenericWriter
std.Io: delete GenericWriter, AnyWriter, and null_writer
This commit is contained in:
commit
4b948e8556
2
lib/compiler/aro/aro/Attribute.zig
vendored
2
lib/compiler/aro/aro/Attribute.zig
vendored
@ -780,7 +780,7 @@ fn ignoredAttrErr(p: *Parser, tok: TokenIndex, attr: Attribute.Tag, context: []c
|
||||
const strings_top = p.strings.items.len;
|
||||
defer p.strings.items.len = strings_top;
|
||||
|
||||
try p.strings.writer().print("attribute '{s}' ignored on {s}", .{ @tagName(attr), context });
|
||||
try p.strings.print("attribute '{s}' ignored on {s}", .{ @tagName(attr), context });
|
||||
const str = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
|
||||
try p.errStr(.ignored_attribute, tok, str);
|
||||
}
|
||||
|
||||
5
lib/compiler/aro/aro/Builtins/Builtin.zig
vendored
5
lib/compiler/aro/aro/Builtins/Builtin.zig
vendored
@ -119,8 +119,7 @@ pub fn nameFromUniqueIndex(index: u16, buf: []u8) []u8 {
|
||||
|
||||
var node_index: u16 = 0;
|
||||
var count: u16 = index;
|
||||
var fbs = std.io.fixedBufferStream(buf);
|
||||
const w = fbs.writer();
|
||||
var w: std.Io.Writer = .fixed(buf);
|
||||
|
||||
while (true) {
|
||||
var sibling_index = dafsa[node_index].child_index;
|
||||
@ -142,7 +141,7 @@ pub fn nameFromUniqueIndex(index: u16, buf: []u8) []u8 {
|
||||
if (count == 0) break;
|
||||
}
|
||||
|
||||
return fbs.getWritten();
|
||||
return w.buffered();
|
||||
}
|
||||
|
||||
/// We're 1 bit shy of being able to fit this in a u32:
|
||||
|
||||
64
lib/compiler/aro/aro/Compilation.zig
vendored
64
lib/compiler/aro/aro/Compilation.zig
vendored
@ -16,6 +16,7 @@ const Pragma = @import("Pragma.zig");
|
||||
const StrInt = @import("StringInterner.zig");
|
||||
const record_layout = @import("record_layout.zig");
|
||||
const target_util = @import("target.zig");
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
pub const Error = error{
|
||||
/// A fatal error has ocurred and compilation has stopped.
|
||||
@ -199,7 +200,7 @@ fn getTimestamp(comp: *Compilation) !u47 {
|
||||
return @intCast(std.math.clamp(timestamp, 0, max_timestamp));
|
||||
}
|
||||
|
||||
fn generateDateAndTime(w: anytype, timestamp: u47) !void {
|
||||
fn generateDateAndTime(w: *Writer, timestamp: u47) !void {
|
||||
const epoch_seconds = EpochSeconds{ .secs = timestamp };
|
||||
const epoch_day = epoch_seconds.getEpochDay();
|
||||
const day_seconds = epoch_seconds.getDaySeconds();
|
||||
@ -242,7 +243,7 @@ pub const SystemDefinesMode = enum {
|
||||
include_system_defines,
|
||||
};
|
||||
|
||||
fn generateSystemDefines(comp: *Compilation, w: anytype) !void {
|
||||
fn generateSystemDefines(comp: *Compilation, w: *Writer) !void {
|
||||
const ptr_width = comp.target.ptrBitWidth();
|
||||
|
||||
if (comp.langopts.gnuc_version > 0) {
|
||||
@ -533,11 +534,20 @@ fn generateSystemDefines(comp: *Compilation, w: anytype) !void {
|
||||
pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefinesMode) !Source {
|
||||
try comp.generateBuiltinTypes();
|
||||
|
||||
var buf = std.array_list.Managed(u8).init(comp.gpa);
|
||||
defer buf.deinit();
|
||||
var allocating: std.Io.Writer.Allocating = .init(comp.gpa);
|
||||
defer allocating.deinit();
|
||||
|
||||
generateBuiltinMacrosWriter(comp, system_defines_mode, &allocating.writer) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
return comp.addSourceFromBuffer("<builtin>", allocating.written());
|
||||
}
|
||||
|
||||
pub fn generateBuiltinMacrosWriter(comp: *Compilation, system_defines_mode: SystemDefinesMode, buf: *Writer) !void {
|
||||
if (system_defines_mode == .include_system_defines) {
|
||||
try buf.appendSlice(
|
||||
try buf.writeAll(
|
||||
\\#define __VERSION__ "Aro
|
||||
++ " " ++ @import("../backend.zig").version_str ++ "\"\n" ++
|
||||
\\#define __Aro__
|
||||
@ -545,11 +555,11 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi
|
||||
);
|
||||
}
|
||||
|
||||
try buf.appendSlice("#define __STDC__ 1\n");
|
||||
try buf.writer().print("#define __STDC_HOSTED__ {d}\n", .{@intFromBool(comp.target.os.tag != .freestanding)});
|
||||
try buf.writeAll("#define __STDC__ 1\n");
|
||||
try buf.print("#define __STDC_HOSTED__ {d}\n", .{@intFromBool(comp.target.os.tag != .freestanding)});
|
||||
|
||||
// standard macros
|
||||
try buf.appendSlice(
|
||||
try buf.writeAll(
|
||||
\\#define __STDC_NO_COMPLEX__ 1
|
||||
\\#define __STDC_NO_THREADS__ 1
|
||||
\\#define __STDC_NO_VLA__ 1
|
||||
@ -561,23 +571,21 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi
|
||||
\\
|
||||
);
|
||||
if (comp.langopts.standard.StdCVersionMacro()) |stdc_version| {
|
||||
try buf.appendSlice("#define __STDC_VERSION__ ");
|
||||
try buf.appendSlice(stdc_version);
|
||||
try buf.append('\n');
|
||||
try buf.writeAll("#define __STDC_VERSION__ ");
|
||||
try buf.writeAll(stdc_version);
|
||||
try buf.writeByte('\n');
|
||||
}
|
||||
|
||||
// timestamps
|
||||
const timestamp = try comp.getTimestamp();
|
||||
try generateDateAndTime(buf.writer(), timestamp);
|
||||
try generateDateAndTime(buf, timestamp);
|
||||
|
||||
if (system_defines_mode == .include_system_defines) {
|
||||
try comp.generateSystemDefines(buf.writer());
|
||||
try comp.generateSystemDefines(buf);
|
||||
}
|
||||
|
||||
return comp.addSourceFromBuffer("<builtin>", buf.items);
|
||||
}
|
||||
|
||||
fn generateFloatMacros(w: anytype, prefix: []const u8, semantics: target_util.FPSemantics, ext: []const u8) !void {
|
||||
fn generateFloatMacros(w: *Writer, prefix: []const u8, semantics: target_util.FPSemantics, ext: []const u8) !void {
|
||||
const denormMin = semantics.chooseValue(
|
||||
[]const u8,
|
||||
.{
|
||||
@ -656,7 +664,7 @@ fn generateFloatMacros(w: anytype, prefix: []const u8, semantics: target_util.FP
|
||||
try w.print("#define {s}MIN__ {s}{s}\n", .{ prefix_slice, min, ext });
|
||||
}
|
||||
|
||||
fn generateTypeMacro(w: anytype, mapper: StrInt.TypeMapper, name: []const u8, ty: Type, langopts: LangOpts) !void {
|
||||
fn generateTypeMacro(w: *Writer, mapper: StrInt.TypeMapper, name: []const u8, ty: Type, langopts: LangOpts) !void {
|
||||
try w.print("#define {s} ", .{name});
|
||||
try ty.print(mapper, langopts, w);
|
||||
try w.writeByte('\n');
|
||||
@ -762,7 +770,7 @@ fn generateFastOrLeastType(
|
||||
bits: usize,
|
||||
kind: enum { least, fast },
|
||||
signedness: std.builtin.Signedness,
|
||||
w: anytype,
|
||||
w: *Writer,
|
||||
mapper: StrInt.TypeMapper,
|
||||
) !void {
|
||||
const ty = comp.intLeastN(bits, signedness); // defining the fast types as the least types is permitted
|
||||
@ -793,7 +801,7 @@ fn generateFastOrLeastType(
|
||||
try comp.generateFmt(prefix, w, ty);
|
||||
}
|
||||
|
||||
fn generateFastAndLeastWidthTypes(comp: *Compilation, w: anytype, mapper: StrInt.TypeMapper) !void {
|
||||
fn generateFastAndLeastWidthTypes(comp: *Compilation, w: *Writer, mapper: StrInt.TypeMapper) !void {
|
||||
const sizes = [_]usize{ 8, 16, 32, 64 };
|
||||
for (sizes) |size| {
|
||||
try comp.generateFastOrLeastType(size, .least, .signed, w, mapper);
|
||||
@ -803,7 +811,7 @@ fn generateFastAndLeastWidthTypes(comp: *Compilation, w: anytype, mapper: StrInt
|
||||
}
|
||||
}
|
||||
|
||||
fn generateExactWidthTypes(comp: *const Compilation, w: anytype, mapper: StrInt.TypeMapper) !void {
|
||||
fn generateExactWidthTypes(comp: *const Compilation, w: *Writer, mapper: StrInt.TypeMapper) !void {
|
||||
try comp.generateExactWidthType(w, mapper, .schar);
|
||||
|
||||
if (comp.intSize(.short) > comp.intSize(.char)) {
|
||||
@ -851,7 +859,7 @@ fn generateExactWidthTypes(comp: *const Compilation, w: anytype, mapper: StrInt.
|
||||
}
|
||||
}
|
||||
|
||||
fn generateFmt(comp: *const Compilation, prefix: []const u8, w: anytype, ty: Type) !void {
|
||||
fn generateFmt(comp: *const Compilation, prefix: []const u8, w: *Writer, ty: Type) !void {
|
||||
const unsigned = ty.isUnsignedInt(comp);
|
||||
const modifier = ty.formatModifier();
|
||||
const formats = if (unsigned) "ouxX" else "di";
|
||||
@ -860,7 +868,7 @@ fn generateFmt(comp: *const Compilation, prefix: []const u8, w: anytype, ty: Typ
|
||||
}
|
||||
}
|
||||
|
||||
fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: anytype, ty: Type) !void {
|
||||
fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: *Writer, ty: Type) !void {
|
||||
return w.print("#define {s}_C_SUFFIX__ {s}\n", .{ prefix, ty.intValueSuffix(comp) });
|
||||
}
|
||||
|
||||
@ -868,7 +876,7 @@ fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: anytype,
|
||||
/// Name macro (e.g. #define __UINT32_TYPE__ unsigned int)
|
||||
/// Format strings (e.g. #define __UINT32_FMTu__ "u")
|
||||
/// Suffix macro (e.g. #define __UINT32_C_SUFFIX__ U)
|
||||
fn generateExactWidthType(comp: *const Compilation, w: anytype, mapper: StrInt.TypeMapper, specifier: Type.Specifier) !void {
|
||||
fn generateExactWidthType(comp: *const Compilation, w: *Writer, mapper: StrInt.TypeMapper, specifier: Type.Specifier) !void {
|
||||
var ty = Type{ .specifier = specifier };
|
||||
const width = 8 * ty.sizeof(comp).?;
|
||||
const unsigned = ty.isUnsignedInt(comp);
|
||||
@ -998,7 +1006,7 @@ fn generateVaListType(comp: *Compilation) !Type {
|
||||
return ty;
|
||||
}
|
||||
|
||||
fn generateIntMax(comp: *const Compilation, w: anytype, name: []const u8, ty: Type) !void {
|
||||
fn generateIntMax(comp: *const Compilation, w: *Writer, name: []const u8, ty: Type) !void {
|
||||
const bit_count: u8 = @intCast(ty.sizeof(comp).? * 8);
|
||||
const unsigned = ty.isUnsignedInt(comp);
|
||||
const max: u128 = switch (bit_count) {
|
||||
@ -1023,7 +1031,7 @@ pub fn wcharMax(comp: *const Compilation) u32 {
|
||||
};
|
||||
}
|
||||
|
||||
fn generateExactWidthIntMax(comp: *const Compilation, w: anytype, specifier: Type.Specifier) !void {
|
||||
fn generateExactWidthIntMax(comp: *const Compilation, w: *Writer, specifier: Type.Specifier) !void {
|
||||
var ty = Type{ .specifier = specifier };
|
||||
const bit_count: u8 = @intCast(ty.sizeof(comp).? * 8);
|
||||
const unsigned = ty.isUnsignedInt(comp);
|
||||
@ -1040,16 +1048,16 @@ fn generateExactWidthIntMax(comp: *const Compilation, w: anytype, specifier: Typ
|
||||
return comp.generateIntMax(w, name, ty);
|
||||
}
|
||||
|
||||
fn generateIntWidth(comp: *Compilation, w: anytype, name: []const u8, ty: Type) !void {
|
||||
fn generateIntWidth(comp: *Compilation, w: *Writer, name: []const u8, ty: Type) !void {
|
||||
try w.print("#define __{s}_WIDTH__ {d}\n", .{ name, 8 * ty.sizeof(comp).? });
|
||||
}
|
||||
|
||||
fn generateIntMaxAndWidth(comp: *Compilation, w: anytype, name: []const u8, ty: Type) !void {
|
||||
fn generateIntMaxAndWidth(comp: *Compilation, w: *Writer, name: []const u8, ty: Type) !void {
|
||||
try comp.generateIntMax(w, name, ty);
|
||||
try comp.generateIntWidth(w, name, ty);
|
||||
}
|
||||
|
||||
fn generateSizeofType(comp: *Compilation, w: anytype, name: []const u8, ty: Type) !void {
|
||||
fn generateSizeofType(comp: *Compilation, w: *Writer, name: []const u8, ty: Type) !void {
|
||||
try w.print("#define {s} {d}\n", .{ name, ty.sizeof(comp).? });
|
||||
}
|
||||
|
||||
|
||||
166
lib/compiler/aro/aro/Parser.zig
vendored
166
lib/compiler/aro/aro/Parser.zig
vendored
@ -101,7 +101,7 @@ value_map: Tree.ValueMap,
|
||||
|
||||
// buffers used during compilation
|
||||
syms: SymbolStack = .{},
|
||||
strings: std.array_list.AlignedManaged(u8, .@"4"),
|
||||
strings: std.array_list.Managed(u8),
|
||||
labels: std.array_list.Managed(Label),
|
||||
list_buf: NodeList,
|
||||
decl_buf: NodeList,
|
||||
@ -447,7 +447,17 @@ pub fn typeStr(p: *Parser, ty: Type) ![]const u8 {
|
||||
defer p.strings.items.len = strings_top;
|
||||
|
||||
const mapper = p.comp.string_interner.getSlowTypeMapper();
|
||||
try ty.print(mapper, p.comp.langopts, p.strings.writer());
|
||||
{
|
||||
var unmanaged = p.strings.moveToUnmanaged();
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
|
||||
defer {
|
||||
unmanaged = allocating.toArrayList();
|
||||
p.strings = unmanaged.toManaged(p.comp.gpa);
|
||||
}
|
||||
ty.print(mapper, p.comp.langopts, &allocating.writer) catch |e| switch (e) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
}
|
||||
return try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
|
||||
}
|
||||
|
||||
@ -455,7 +465,7 @@ pub fn typePairStr(p: *Parser, a: Type, b: Type) ![]const u8 {
|
||||
return p.typePairStrExtra(a, " and ", b);
|
||||
}
|
||||
|
||||
pub fn typePairStrExtra(p: *Parser, a: Type, msg: []const u8, b: Type) ![]const u8 {
|
||||
pub fn typePairStrExtra(p: *Parser, a: Type, msg: []const u8, b: Type) Error![]const u8 {
|
||||
if (@import("builtin").mode != .Debug) {
|
||||
if (a.is(.invalid) or b.is(.invalid)) {
|
||||
return "Tried to render invalid type - this is an aro bug.";
|
||||
@ -466,29 +476,60 @@ pub fn typePairStrExtra(p: *Parser, a: Type, msg: []const u8, b: Type) ![]const
|
||||
|
||||
try p.strings.append('\'');
|
||||
const mapper = p.comp.string_interner.getSlowTypeMapper();
|
||||
try a.print(mapper, p.comp.langopts, p.strings.writer());
|
||||
{
|
||||
var unmanaged = p.strings.moveToUnmanaged();
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
|
||||
defer {
|
||||
unmanaged = allocating.toArrayList();
|
||||
p.strings = unmanaged.toManaged(p.comp.gpa);
|
||||
}
|
||||
a.print(mapper, p.comp.langopts, &allocating.writer) catch |e| switch (e) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
}
|
||||
try p.strings.append('\'');
|
||||
try p.strings.appendSlice(msg);
|
||||
try p.strings.append('\'');
|
||||
try b.print(mapper, p.comp.langopts, p.strings.writer());
|
||||
{
|
||||
var unmanaged = p.strings.moveToUnmanaged();
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
|
||||
defer {
|
||||
unmanaged = allocating.toArrayList();
|
||||
p.strings = unmanaged.toManaged(p.comp.gpa);
|
||||
}
|
||||
b.print(mapper, p.comp.langopts, &allocating.writer) catch |e| switch (e) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
}
|
||||
try p.strings.append('\'');
|
||||
return try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
|
||||
}
|
||||
|
||||
pub fn valueChangedStr(p: *Parser, res: *Result, old_value: Value, int_ty: Type) ![]const u8 {
|
||||
pub fn valueChangedStr(p: *Parser, res: *Result, old_value: Value, int_ty: Type) Error![]const u8 {
|
||||
const strings_top = p.strings.items.len;
|
||||
defer p.strings.items.len = strings_top;
|
||||
|
||||
var w = p.strings.writer();
|
||||
const type_pair_str = try p.typePairStrExtra(res.ty, " to ", int_ty);
|
||||
try w.writeAll(type_pair_str);
|
||||
{
|
||||
var unmanaged = p.strings.moveToUnmanaged();
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
|
||||
defer {
|
||||
unmanaged = allocating.toArrayList();
|
||||
p.strings = unmanaged.toManaged(p.comp.gpa);
|
||||
}
|
||||
allocating.writer.writeAll(type_pair_str) catch return error.OutOfMemory;
|
||||
|
||||
try w.writeAll(" changes ");
|
||||
if (res.val.isZero(p.comp)) try w.writeAll("non-zero ");
|
||||
try w.writeAll("value from ");
|
||||
try old_value.print(res.ty, p.comp, w);
|
||||
try w.writeAll(" to ");
|
||||
try res.val.print(int_ty, p.comp, w);
|
||||
allocating.writer.writeAll(" changes ") catch return error.OutOfMemory;
|
||||
if (res.val.isZero(p.comp)) allocating.writer.writeAll("non-zero ") catch return error.OutOfMemory;
|
||||
allocating.writer.writeAll("value from ") catch return error.OutOfMemory;
|
||||
old_value.print(res.ty, p.comp, &allocating.writer) catch |e| switch (e) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
allocating.writer.writeAll(" to ") catch return error.OutOfMemory;
|
||||
res.val.print(int_ty, p.comp, &allocating.writer) catch |e| switch (e) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
}
|
||||
|
||||
return try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
|
||||
}
|
||||
@ -498,9 +539,8 @@ fn checkDeprecatedUnavailable(p: *Parser, ty: Type, usage_tok: TokenIndex, decl_
|
||||
const strings_top = p.strings.items.len;
|
||||
defer p.strings.items.len = strings_top;
|
||||
|
||||
const w = p.strings.writer();
|
||||
const msg_str = p.comp.interner.get(@"error".msg.ref()).bytes;
|
||||
try w.print("call to '{s}' declared with attribute error: {f}", .{
|
||||
try p.strings.print("call to '{s}' declared with attribute error: {f}", .{
|
||||
p.tokSlice(@"error".__name_tok), std.zig.fmtString(msg_str),
|
||||
});
|
||||
const str = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
|
||||
@ -510,9 +550,8 @@ fn checkDeprecatedUnavailable(p: *Parser, ty: Type, usage_tok: TokenIndex, decl_
|
||||
const strings_top = p.strings.items.len;
|
||||
defer p.strings.items.len = strings_top;
|
||||
|
||||
const w = p.strings.writer();
|
||||
const msg_str = p.comp.interner.get(warning.msg.ref()).bytes;
|
||||
try w.print("call to '{s}' declared with attribute warning: {f}", .{
|
||||
try p.strings.print("call to '{s}' declared with attribute warning: {f}", .{
|
||||
p.tokSlice(warning.__name_tok), std.zig.fmtString(msg_str),
|
||||
});
|
||||
const str = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
|
||||
@ -532,17 +571,16 @@ fn errDeprecated(p: *Parser, tag: Diagnostics.Tag, tok_i: TokenIndex, msg: ?Valu
|
||||
const strings_top = p.strings.items.len;
|
||||
defer p.strings.items.len = strings_top;
|
||||
|
||||
const w = p.strings.writer();
|
||||
try w.print("'{s}' is ", .{p.tokSlice(tok_i)});
|
||||
try p.strings.print("'{s}' is ", .{p.tokSlice(tok_i)});
|
||||
const reason: []const u8 = switch (tag) {
|
||||
.unavailable => "unavailable",
|
||||
.deprecated_declarations => "deprecated",
|
||||
else => unreachable,
|
||||
};
|
||||
try w.writeAll(reason);
|
||||
try p.strings.appendSlice(reason);
|
||||
if (msg) |m| {
|
||||
const str = p.comp.interner.get(m.ref()).bytes;
|
||||
try w.print(": {f}", .{std.zig.fmtString(str)});
|
||||
try p.strings.print(": {f}", .{std.zig.fmtString(str)});
|
||||
}
|
||||
const str = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
|
||||
return p.errStr(tag, tok_i, str);
|
||||
@ -681,7 +719,7 @@ fn diagnoseIncompleteDefinitions(p: *Parser) !void {
|
||||
}
|
||||
|
||||
/// root : (decl | assembly ';' | staticAssert)*
|
||||
pub fn parse(pp: *Preprocessor) Compilation.Error!Tree {
|
||||
pub fn parse(pp: *Preprocessor) Error!Tree {
|
||||
assert(pp.linemarkers == .none);
|
||||
pp.comp.pragmaEvent(.before_parse);
|
||||
|
||||
@ -693,7 +731,7 @@ pub fn parse(pp: *Preprocessor) Compilation.Error!Tree {
|
||||
.gpa = pp.comp.gpa,
|
||||
.arena = arena.allocator(),
|
||||
.tok_ids = pp.tokens.items(.id),
|
||||
.strings = std.array_list.AlignedManaged(u8, .@"4").init(pp.comp.gpa),
|
||||
.strings = std.array_list.Managed(u8).init(pp.comp.gpa),
|
||||
.value_map = Tree.ValueMap.init(pp.comp.gpa),
|
||||
.data = NodeList.init(pp.comp.gpa),
|
||||
.labels = std.array_list.Managed(Label).init(pp.comp.gpa),
|
||||
@ -1218,38 +1256,46 @@ fn decl(p: *Parser) Error!bool {
|
||||
return true;
|
||||
}
|
||||
|
||||
fn staticAssertMessage(p: *Parser, cond_node: NodeIndex, message: Result) !?[]const u8 {
|
||||
fn staticAssertMessage(p: *Parser, cond_node: NodeIndex, message: Result) Error!?[]const u8 {
|
||||
const cond_tag = p.nodes.items(.tag)[@intFromEnum(cond_node)];
|
||||
if (cond_tag != .builtin_types_compatible_p and message.node == .none) return null;
|
||||
|
||||
var buf = std.array_list.Managed(u8).init(p.gpa);
|
||||
defer buf.deinit();
|
||||
var allocating: std.Io.Writer.Allocating = .init(p.gpa);
|
||||
defer allocating.deinit();
|
||||
|
||||
const buf = &allocating.writer;
|
||||
|
||||
if (cond_tag == .builtin_types_compatible_p) {
|
||||
const mapper = p.comp.string_interner.getSlowTypeMapper();
|
||||
const data = p.nodes.items(.data)[@intFromEnum(cond_node)].bin;
|
||||
|
||||
try buf.appendSlice("'__builtin_types_compatible_p(");
|
||||
buf.writeAll("'__builtin_types_compatible_p(") catch return error.OutOfMemory;
|
||||
|
||||
const lhs_ty = p.nodes.items(.ty)[@intFromEnum(data.lhs)];
|
||||
try lhs_ty.print(mapper, p.comp.langopts, buf.writer());
|
||||
try buf.appendSlice(", ");
|
||||
lhs_ty.print(mapper, p.comp.langopts, buf) catch |e| switch (e) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
buf.writeAll(", ") catch return error.OutOfMemory;
|
||||
|
||||
const rhs_ty = p.nodes.items(.ty)[@intFromEnum(data.rhs)];
|
||||
try rhs_ty.print(mapper, p.comp.langopts, buf.writer());
|
||||
rhs_ty.print(mapper, p.comp.langopts, buf) catch |e| switch (e) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
|
||||
try buf.appendSlice(")'");
|
||||
buf.writeAll(")'") catch return error.OutOfMemory;
|
||||
}
|
||||
if (message.node != .none) {
|
||||
assert(p.nodes.items(.tag)[@intFromEnum(message.node)] == .string_literal_expr);
|
||||
if (buf.items.len > 0) {
|
||||
try buf.append(' ');
|
||||
if (buf.buffered().len > 0) {
|
||||
buf.writeByte(' ') catch return error.OutOfMemory;
|
||||
}
|
||||
const bytes = p.comp.interner.get(message.val.ref()).bytes;
|
||||
try buf.ensureUnusedCapacity(bytes.len);
|
||||
try Value.printString(bytes, message.ty, p.comp, buf.writer());
|
||||
try allocating.ensureUnusedCapacity(bytes.len);
|
||||
Value.printString(bytes, message.ty, p.comp, buf) catch |e| switch (e) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
}
|
||||
return try p.comp.diagnostics.arena.allocator().dupe(u8, buf.items);
|
||||
return try p.comp.diagnostics.arena.allocator().dupe(u8, allocating.written());
|
||||
}
|
||||
|
||||
/// staticAssert
|
||||
@ -4981,7 +5027,7 @@ const CallExpr = union(enum) {
|
||||
return true;
|
||||
}
|
||||
|
||||
fn checkVarArg(self: CallExpr, p: *Parser, first_after: TokenIndex, param_tok: TokenIndex, arg: *Result, arg_idx: u32) !void {
|
||||
fn checkVarArg(self: CallExpr, p: *Parser, first_after: TokenIndex, param_tok: TokenIndex, arg: *Result, arg_idx: u32) Error!void {
|
||||
if (self == .standard) return;
|
||||
|
||||
const builtin_tok = p.nodes.items(.data)[@intFromEnum(self.builtin.node)].decl.name;
|
||||
@ -5183,7 +5229,17 @@ pub const Result = struct {
|
||||
const strings_top = p.strings.items.len;
|
||||
defer p.strings.items.len = strings_top;
|
||||
|
||||
try res.val.print(res.ty, p.comp, p.strings.writer());
|
||||
{
|
||||
var unmanaged = p.strings.moveToUnmanaged();
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
|
||||
defer {
|
||||
unmanaged = allocating.toArrayList();
|
||||
p.strings = unmanaged.toManaged(p.comp.gpa);
|
||||
}
|
||||
res.val.print(res.ty, p.comp, &allocating.writer) catch |e| switch (e) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
}
|
||||
return try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
|
||||
}
|
||||
|
||||
@ -5347,7 +5403,7 @@ pub const Result = struct {
|
||||
conditional,
|
||||
add,
|
||||
sub,
|
||||
}) !bool {
|
||||
}) Error!bool {
|
||||
if (b.ty.specifier == .invalid) {
|
||||
try a.saveValue(p);
|
||||
a.ty = Type.invalid;
|
||||
@ -5643,7 +5699,7 @@ pub const Result = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn floatToIntWarning(res: *Result, p: *Parser, int_ty: Type, old_value: Value, change_kind: Value.FloatToIntChangeKind, tok: TokenIndex) !void {
|
||||
fn floatToIntWarning(res: *Result, p: *Parser, int_ty: Type, old_value: Value, change_kind: Value.FloatToIntChangeKind, tok: TokenIndex) Error!void {
|
||||
switch (change_kind) {
|
||||
.none => return p.errStr(.float_to_int, tok, try p.typePairStrExtra(res.ty, " to ", int_ty)),
|
||||
.out_of_range => return p.errStr(.float_out_of_range, tok, try p.typePairStrExtra(res.ty, " to ", int_ty)),
|
||||
@ -5866,7 +5922,7 @@ pub const Result = struct {
|
||||
res.val = .{};
|
||||
}
|
||||
|
||||
fn castType(res: *Result, p: *Parser, to: Type, operand_tok: TokenIndex, l_paren: TokenIndex) !void {
|
||||
fn castType(res: *Result, p: *Parser, to: Type, operand_tok: TokenIndex, l_paren: TokenIndex) Error!void {
|
||||
var cast_kind: Tree.CastKind = undefined;
|
||||
|
||||
if (to.is(.void)) {
|
||||
@ -7595,9 +7651,19 @@ fn validateFieldAccess(p: *Parser, record_ty: *const Type.Record, expr_ty: Type,
|
||||
|
||||
p.strings.items.len = 0;
|
||||
|
||||
try p.strings.writer().print("'{s}' in '", .{p.tokSlice(field_name_tok)});
|
||||
try p.strings.print("'{s}' in '", .{p.tokSlice(field_name_tok)});
|
||||
const mapper = p.comp.string_interner.getSlowTypeMapper();
|
||||
try expr_ty.print(mapper, p.comp.langopts, p.strings.writer());
|
||||
{
|
||||
var unmanaged = p.strings.moveToUnmanaged();
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
|
||||
defer {
|
||||
unmanaged = allocating.toArrayList();
|
||||
p.strings = unmanaged.toManaged(p.comp.gpa);
|
||||
}
|
||||
expr_ty.print(mapper, p.comp.langopts, &allocating.writer) catch |e| switch (e) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
}
|
||||
try p.strings.append('\'');
|
||||
|
||||
const duped = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items);
|
||||
@ -8016,7 +8082,17 @@ fn primaryExpr(p: *Parser) Error!Result {
|
||||
defer p.strings.items.len = strings_top;
|
||||
|
||||
const mapper = p.comp.string_interner.getSlowTypeMapper();
|
||||
try Type.printNamed(func_ty, p.tokSlice(p.func.name), mapper, p.comp.langopts, p.strings.writer());
|
||||
{
|
||||
var unmanaged = p.strings.moveToUnmanaged();
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
|
||||
defer {
|
||||
unmanaged = allocating.toArrayList();
|
||||
p.strings = unmanaged.toManaged(p.comp.gpa);
|
||||
}
|
||||
Type.printNamed(func_ty, p.tokSlice(p.func.name), mapper, p.comp.langopts, &allocating.writer) catch |e| switch (e) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
}
|
||||
try p.strings.append(0);
|
||||
const predef = try p.makePredefinedIdentifier(strings_top);
|
||||
ty = predef.ty;
|
||||
|
||||
29
lib/compiler/aro/aro/Preprocessor.zig
vendored
29
lib/compiler/aro/aro/Preprocessor.zig
vendored
@ -15,6 +15,7 @@ const TokenWithExpansionLocs = Tree.TokenWithExpansionLocs;
|
||||
const Attribute = @import("Attribute.zig");
|
||||
const features = @import("features.zig");
|
||||
const Hideset = @import("Hideset.zig");
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
const DefineMap = std.StringHashMapUnmanaged(Macro);
|
||||
const RawTokenList = std.array_list.Managed(RawToken);
|
||||
@ -982,7 +983,7 @@ fn expr(pp: *Preprocessor, tokenizer: *Tokenizer) MacroError!bool {
|
||||
.tok_i = @intCast(token_state.tokens_len),
|
||||
.arena = pp.arena.allocator(),
|
||||
.in_macro = true,
|
||||
.strings = std.array_list.AlignedManaged(u8, .@"4").init(pp.comp.gpa),
|
||||
.strings = std.array_list.Managed(u8).init(pp.comp.gpa),
|
||||
|
||||
.data = undefined,
|
||||
.value_map = undefined,
|
||||
@ -1193,24 +1194,21 @@ fn expandObjMacro(pp: *Preprocessor, simple_macro: *const Macro) Error!ExpandBuf
|
||||
.macro_file => {
|
||||
const start = pp.comp.generated_buf.items.len;
|
||||
const source = pp.comp.getSource(pp.expansion_source_loc.id);
|
||||
const w = pp.comp.generated_buf.writer(pp.gpa);
|
||||
try w.print("\"{s}\"\n", .{source.path});
|
||||
try pp.comp.generated_buf.print(pp.gpa, "\"{s}\"\n", .{source.path});
|
||||
|
||||
buf.appendAssumeCapacity(try pp.makeGeneratedToken(start, .string_literal, tok));
|
||||
},
|
||||
.macro_line => {
|
||||
const start = pp.comp.generated_buf.items.len;
|
||||
const source = pp.comp.getSource(pp.expansion_source_loc.id);
|
||||
const w = pp.comp.generated_buf.writer(pp.gpa);
|
||||
try w.print("{d}\n", .{source.physicalLine(pp.expansion_source_loc)});
|
||||
try pp.comp.generated_buf.print(pp.gpa, "{d}\n", .{source.physicalLine(pp.expansion_source_loc)});
|
||||
|
||||
buf.appendAssumeCapacity(try pp.makeGeneratedToken(start, .pp_num, tok));
|
||||
},
|
||||
.macro_counter => {
|
||||
defer pp.counter += 1;
|
||||
const start = pp.comp.generated_buf.items.len;
|
||||
const w = pp.comp.generated_buf.writer(pp.gpa);
|
||||
try w.print("{d}\n", .{pp.counter});
|
||||
try pp.comp.generated_buf.print(pp.gpa, "{d}\n", .{pp.counter});
|
||||
|
||||
buf.appendAssumeCapacity(try pp.makeGeneratedToken(start, .pp_num, tok));
|
||||
},
|
||||
@ -1682,8 +1680,7 @@ fn expandFuncMacro(
|
||||
break :blk false;
|
||||
} else try pp.handleBuiltinMacro(raw.id, arg, macro_tok.loc);
|
||||
const start = pp.comp.generated_buf.items.len;
|
||||
const w = pp.comp.generated_buf.writer(pp.gpa);
|
||||
try w.print("{}\n", .{@intFromBool(result)});
|
||||
try pp.comp.generated_buf.print(pp.gpa, "{}\n", .{@intFromBool(result)});
|
||||
try buf.append(try pp.makeGeneratedToken(start, .pp_num, tokFromRaw(raw)));
|
||||
},
|
||||
.macro_param_has_c_attribute => {
|
||||
@ -2988,18 +2985,16 @@ fn embed(pp: *Preprocessor, tokenizer: *Tokenizer) MacroError!void {
|
||||
// TODO: We currently only support systems with CHAR_BIT == 8
|
||||
// If the target's CHAR_BIT is not 8, we need to write out correctly-sized embed_bytes
|
||||
// and correctly account for the target's endianness
|
||||
const writer = pp.comp.generated_buf.writer(pp.gpa);
|
||||
|
||||
{
|
||||
const byte = embed_bytes[0];
|
||||
const start = pp.comp.generated_buf.items.len;
|
||||
try writer.print("{d}", .{byte});
|
||||
try pp.comp.generated_buf.print(pp.gpa, "{d}", .{byte});
|
||||
pp.addTokenAssumeCapacity(try pp.makeGeneratedToken(start, .embed_byte, filename_tok));
|
||||
}
|
||||
|
||||
for (embed_bytes[1..]) |byte| {
|
||||
const start = pp.comp.generated_buf.items.len;
|
||||
try writer.print(",{d}", .{byte});
|
||||
try pp.comp.generated_buf.print(pp.gpa, ",{d}", .{byte});
|
||||
pp.addTokenAssumeCapacity(.{ .id = .comma, .loc = .{ .id = .generated, .byte_offset = @intCast(start) } });
|
||||
pp.addTokenAssumeCapacity(try pp.makeGeneratedToken(start + 1, .embed_byte, filename_tok));
|
||||
}
|
||||
@ -3241,7 +3236,7 @@ fn findIncludeSource(pp: *Preprocessor, tokenizer: *Tokenizer, first: RawToken,
|
||||
|
||||
fn printLinemarker(
|
||||
pp: *Preprocessor,
|
||||
w: anytype,
|
||||
w: *Writer,
|
||||
line_no: u32,
|
||||
source: Source,
|
||||
start_resume: enum(u8) { start, @"resume", none },
|
||||
@ -3301,7 +3296,7 @@ pub const DumpMode = enum {
|
||||
/// Pretty-print the macro define or undef at location `loc`.
|
||||
/// We re-tokenize the directive because we are printing a macro that may have the same name as one in
|
||||
/// `pp.defines` but a different definition (due to being #undef'ed and then redefined)
|
||||
fn prettyPrintMacro(pp: *Preprocessor, w: anytype, loc: Source.Location, parts: enum { name_only, name_and_body }) !void {
|
||||
fn prettyPrintMacro(pp: *Preprocessor, w: *Writer, loc: Source.Location, parts: enum { name_only, name_and_body }) !void {
|
||||
const source = pp.comp.getSource(loc.id);
|
||||
var tokenizer: Tokenizer = .{
|
||||
.buf = source.buf,
|
||||
@ -3339,7 +3334,7 @@ fn prettyPrintMacro(pp: *Preprocessor, w: anytype, loc: Source.Location, parts:
|
||||
}
|
||||
}
|
||||
|
||||
fn prettyPrintMacrosOnly(pp: *Preprocessor, w: anytype) !void {
|
||||
fn prettyPrintMacrosOnly(pp: *Preprocessor, w: *Writer) !void {
|
||||
var it = pp.defines.valueIterator();
|
||||
while (it.next()) |macro| {
|
||||
if (macro.is_builtin) continue;
|
||||
@ -3351,7 +3346,7 @@ fn prettyPrintMacrosOnly(pp: *Preprocessor, w: anytype) !void {
|
||||
}
|
||||
|
||||
/// Pretty print tokens and try to preserve whitespace.
|
||||
pub fn prettyPrintTokens(pp: *Preprocessor, w: anytype, macro_dump_mode: DumpMode) !void {
|
||||
pub fn prettyPrintTokens(pp: *Preprocessor, w: *Writer, macro_dump_mode: DumpMode) !void {
|
||||
if (macro_dump_mode == .macros_only) {
|
||||
return pp.prettyPrintMacrosOnly(w);
|
||||
}
|
||||
|
||||
17
lib/compiler/aro/aro/Type.zig
vendored
17
lib/compiler/aro/aro/Type.zig
vendored
@ -9,6 +9,7 @@ const StringInterner = @import("StringInterner.zig");
|
||||
const StringId = StringInterner.StringId;
|
||||
const target_util = @import("target.zig");
|
||||
const LangOpts = @import("LangOpts.zig");
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
pub const Qualifiers = packed struct {
|
||||
@"const": bool = false,
|
||||
@ -23,7 +24,7 @@ pub const Qualifiers = packed struct {
|
||||
return quals.@"const" or quals.restrict or quals.@"volatile" or quals.atomic;
|
||||
}
|
||||
|
||||
pub fn dump(quals: Qualifiers, w: anytype) !void {
|
||||
pub fn dump(quals: Qualifiers, w: *Writer) !void {
|
||||
if (quals.@"const") try w.writeAll("const ");
|
||||
if (quals.atomic) try w.writeAll("_Atomic ");
|
||||
if (quals.@"volatile") try w.writeAll("volatile ");
|
||||
@ -2411,12 +2412,12 @@ pub fn intValueSuffix(ty: Type, comp: *const Compilation) []const u8 {
|
||||
}
|
||||
|
||||
/// Print type in C style
|
||||
pub fn print(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void {
|
||||
pub fn print(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: *Writer) Writer.Error!void {
|
||||
_ = try ty.printPrologue(mapper, langopts, w);
|
||||
try ty.printEpilogue(mapper, langopts, w);
|
||||
}
|
||||
|
||||
pub fn printNamed(ty: Type, name: []const u8, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void {
|
||||
pub fn printNamed(ty: Type, name: []const u8, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: *Writer) Writer.Error!void {
|
||||
const simple = try ty.printPrologue(mapper, langopts, w);
|
||||
if (simple) try w.writeByte(' ');
|
||||
try w.writeAll(name);
|
||||
@ -2426,7 +2427,7 @@ pub fn printNamed(ty: Type, name: []const u8, mapper: StringInterner.TypeMapper,
|
||||
const StringGetter = fn (TokenIndex) []const u8;
|
||||
|
||||
/// return true if `ty` is simple
|
||||
fn printPrologue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!bool {
|
||||
fn printPrologue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: *Writer) Writer.Error!bool {
|
||||
if (ty.qual.atomic) {
|
||||
var non_atomic_ty = ty;
|
||||
non_atomic_ty.qual.atomic = false;
|
||||
@ -2497,7 +2498,7 @@ fn printPrologue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts
|
||||
return true;
|
||||
}
|
||||
|
||||
fn printEpilogue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void {
|
||||
fn printEpilogue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: *Writer) Writer.Error!void {
|
||||
if (ty.qual.atomic) return;
|
||||
if (ty.isPtr()) {
|
||||
const elem_ty = ty.elemType();
|
||||
@ -2564,7 +2565,7 @@ fn printEpilogue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts
|
||||
const dump_detailed_containers = false;
|
||||
|
||||
// Print as Zig types since those are actually readable
|
||||
pub fn dump(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void {
|
||||
pub fn dump(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: *Writer) Writer.Error!void {
|
||||
try ty.qual.dump(w);
|
||||
switch (ty.specifier) {
|
||||
.invalid => try w.writeAll("invalid"),
|
||||
@ -2656,7 +2657,7 @@ pub fn dump(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w:
|
||||
}
|
||||
}
|
||||
|
||||
fn dumpEnum(@"enum": *Enum, mapper: StringInterner.TypeMapper, w: anytype) @TypeOf(w).Error!void {
|
||||
fn dumpEnum(@"enum": *Enum, mapper: StringInterner.TypeMapper, w: *Writer) Writer.Error!void {
|
||||
try w.writeAll(" {");
|
||||
for (@"enum".fields) |field| {
|
||||
try w.print(" {s} = {d},", .{ mapper.lookup(field.name), field.value });
|
||||
@ -2664,7 +2665,7 @@ fn dumpEnum(@"enum": *Enum, mapper: StringInterner.TypeMapper, w: anytype) @Type
|
||||
try w.writeAll(" }");
|
||||
}
|
||||
|
||||
fn dumpRecord(record: *Record, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void {
|
||||
fn dumpRecord(record: *Record, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: *Writer) Writer.Error!void {
|
||||
try w.writeAll(" {");
|
||||
for (record.fields) |field| {
|
||||
try w.writeByte(' ');
|
||||
|
||||
5
lib/compiler/aro/aro/Value.zig
vendored
5
lib/compiler/aro/aro/Value.zig
vendored
@ -9,6 +9,7 @@ const Compilation = @import("Compilation.zig");
|
||||
const Type = @import("Type.zig");
|
||||
const target_util = @import("target.zig");
|
||||
const annex_g = @import("annex_g.zig");
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
const Value = @This();
|
||||
|
||||
@ -953,7 +954,7 @@ pub fn maxInt(ty: Type, comp: *Compilation) !Value {
|
||||
return twosCompIntLimit(.max, ty, comp);
|
||||
}
|
||||
|
||||
pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w).Error!void {
|
||||
pub fn print(v: Value, ty: Type, comp: *const Compilation, w: *Writer) Writer.Error!void {
|
||||
if (ty.is(.bool)) {
|
||||
return w.writeAll(if (v.isZero(comp)) "false" else "true");
|
||||
}
|
||||
@ -977,7 +978,7 @@ pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w
|
||||
}
|
||||
}
|
||||
|
||||
pub fn printString(bytes: []const u8, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w).Error!void {
|
||||
pub fn printString(bytes: []const u8, ty: Type, comp: *const Compilation, w: *Writer) Writer.Error!void {
|
||||
const size: Compilation.CharUnitSize = @enumFromInt(ty.elemType().sizeof(comp).?);
|
||||
const without_null = bytes[0 .. bytes.len - @intFromEnum(size)];
|
||||
try w.writeByte('"');
|
||||
|
||||
@ -116,15 +116,17 @@ pub fn translate(
|
||||
var driver: aro.Driver = .{ .comp = comp };
|
||||
defer driver.deinit();
|
||||
|
||||
var macro_buf = std.array_list.Managed(u8).init(gpa);
|
||||
var macro_buf: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer macro_buf.deinit();
|
||||
|
||||
assert(!try driver.parseArgs(std.io.null_writer, macro_buf.writer(), args));
|
||||
var trash: [64]u8 = undefined;
|
||||
var discarding: std.Io.Writer.Discarding = .init(&trash);
|
||||
assert(!try driver.parseArgs(&discarding.writer, ¯o_buf.writer, args));
|
||||
assert(driver.inputs.items.len == 1);
|
||||
const source = driver.inputs.items[0];
|
||||
|
||||
const builtin_macros = try comp.generateBuiltinMacros(.include_system_defines);
|
||||
const user_macros = try comp.addSourceFromBuffer("<command line>", macro_buf.items);
|
||||
const user_macros = try comp.addSourceFromBuffer("<command line>", macro_buf.written());
|
||||
|
||||
var pp = try aro.Preprocessor.initDefault(comp);
|
||||
defer pp.deinit();
|
||||
@ -698,11 +700,10 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const Type.Enum, field_
|
||||
}
|
||||
|
||||
fn getTypeStr(c: *Context, ty: Type) ![]const u8 {
|
||||
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer buf.deinit(c.gpa);
|
||||
const w = buf.writer(c.gpa);
|
||||
try ty.print(c.mapper, c.comp.langopts, w);
|
||||
return c.arena.dupe(u8, buf.items);
|
||||
var allocating: std.Io.Writer.Allocating = .init(c.gpa);
|
||||
defer allocating.deinit();
|
||||
ty.print(c.mapper, c.comp.langopts, &allocating.writer) catch return error.OutOfMemory;
|
||||
return c.arena.dupe(u8, allocating.written());
|
||||
}
|
||||
|
||||
fn transType(c: *Context, scope: *Scope, raw_ty: Type, qual_handling: Type.QualHandling, source_loc: TokenIndex) TypeError!ZigNode {
|
||||
@ -1820,6 +1821,7 @@ pub fn main() !void {
|
||||
var tree = translate(gpa, &aro_comp, args) catch |err| switch (err) {
|
||||
error.ParsingFailed, error.FatalError => renderErrorsAndExit(&aro_comp),
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
error.StreamTooLong => std.process.fatal("An input file was larger than 4GiB", .{}),
|
||||
};
|
||||
defer tree.deinit(gpa);
|
||||
|
||||
@ -832,7 +832,7 @@ const Context = struct {
|
||||
|
||||
fn addTokenFmt(c: *Context, tag: TokenTag, comptime format: []const u8, args: anytype) Allocator.Error!TokenIndex {
|
||||
const start_index = c.buf.items.len;
|
||||
try c.buf.writer().print(format ++ " ", args);
|
||||
try c.buf.print(format ++ " ", args);
|
||||
|
||||
try c.tokens.append(c.gpa, .{
|
||||
.tag = tag,
|
||||
|
||||
@ -16,31 +16,31 @@ const std = @import("std");
|
||||
|
||||
const AF_ICON: u32 = 1;
|
||||
|
||||
pub fn isAnimatedIcon(reader: anytype) bool {
|
||||
pub fn isAnimatedIcon(reader: *std.Io.Reader) bool {
|
||||
const flags = getAniheaderFlags(reader) catch return false;
|
||||
return flags & AF_ICON == AF_ICON;
|
||||
}
|
||||
|
||||
fn getAniheaderFlags(reader: anytype) !u32 {
|
||||
const riff_header = try reader.readBytesNoEof(4);
|
||||
if (!std.mem.eql(u8, &riff_header, "RIFF")) return error.InvalidFormat;
|
||||
fn getAniheaderFlags(reader: *std.Io.Reader) !u32 {
|
||||
const riff_header = try reader.takeArray(4);
|
||||
if (!std.mem.eql(u8, riff_header, "RIFF")) return error.InvalidFormat;
|
||||
|
||||
_ = try reader.readInt(u32, .little); // size of RIFF chunk
|
||||
_ = try reader.takeInt(u32, .little); // size of RIFF chunk
|
||||
|
||||
const form_type = try reader.readBytesNoEof(4);
|
||||
if (!std.mem.eql(u8, &form_type, "ACON")) return error.InvalidFormat;
|
||||
const form_type = try reader.takeArray(4);
|
||||
if (!std.mem.eql(u8, form_type, "ACON")) return error.InvalidFormat;
|
||||
|
||||
while (true) {
|
||||
const chunk_id = try reader.readBytesNoEof(4);
|
||||
const chunk_len = try reader.readInt(u32, .little);
|
||||
if (!std.mem.eql(u8, &chunk_id, "anih")) {
|
||||
const chunk_id = try reader.takeArray(4);
|
||||
const chunk_len = try reader.takeInt(u32, .little);
|
||||
if (!std.mem.eql(u8, chunk_id, "anih")) {
|
||||
// TODO: Move file cursor instead of skipBytes
|
||||
try reader.skipBytes(chunk_len, .{});
|
||||
try reader.discardAll(chunk_len);
|
||||
continue;
|
||||
}
|
||||
|
||||
const aniheader = try reader.readStruct(ANIHEADER);
|
||||
return std.mem.nativeToLittle(u32, aniheader.flags);
|
||||
const aniheader = try reader.takeStruct(ANIHEADER, .little);
|
||||
return aniheader.flags;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -22,13 +22,13 @@ pub const Tree = struct {
|
||||
return @alignCast(@fieldParentPtr("base", self.node));
|
||||
}
|
||||
|
||||
pub fn dump(self: *Tree, writer: anytype) @TypeOf(writer).Error!void {
|
||||
pub fn dump(self: *Tree, writer: *std.io.Writer) !void {
|
||||
try self.node.dump(self, writer, 0);
|
||||
}
|
||||
};
|
||||
|
||||
pub const CodePageLookup = struct {
|
||||
lookup: std.ArrayListUnmanaged(SupportedCodePage) = .empty,
|
||||
lookup: std.ArrayList(SupportedCodePage) = .empty,
|
||||
allocator: Allocator,
|
||||
default_code_page: SupportedCodePage,
|
||||
|
||||
@ -726,10 +726,10 @@ pub const Node = struct {
|
||||
pub fn dump(
|
||||
node: *const Node,
|
||||
tree: *const Tree,
|
||||
writer: anytype,
|
||||
writer: *std.io.Writer,
|
||||
indent: usize,
|
||||
) @TypeOf(writer).Error!void {
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
) std.io.Writer.Error!void {
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(@tagName(node.id));
|
||||
switch (node.id) {
|
||||
.root => {
|
||||
@ -768,11 +768,11 @@ pub const Node = struct {
|
||||
.grouped_expression => {
|
||||
const grouped: *const Node.GroupedExpression = @alignCast(@fieldParentPtr("base", node));
|
||||
try writer.writeAll("\n");
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(grouped.open_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
try grouped.expression.dump(tree, writer, indent + 1);
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(grouped.close_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
},
|
||||
@ -790,13 +790,13 @@ pub const Node = struct {
|
||||
for (accelerators.optional_statements) |statement| {
|
||||
try statement.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(accelerators.begin_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
for (accelerators.accelerators) |accelerator| {
|
||||
try accelerator.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(accelerators.end_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
},
|
||||
@ -815,25 +815,25 @@ pub const Node = struct {
|
||||
const dialog: *const Node.Dialog = @alignCast(@fieldParentPtr("base", node));
|
||||
try writer.print(" {s} {s} [{d} common_resource_attributes]\n", .{ dialog.id.slice(tree.source), dialog.type.slice(tree.source), dialog.common_resource_attributes.len });
|
||||
inline for (.{ "x", "y", "width", "height" }) |arg| {
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.writeAll(arg ++ ":\n");
|
||||
try @field(dialog, arg).dump(tree, writer, indent + 2);
|
||||
}
|
||||
if (dialog.help_id) |help_id| {
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.writeAll("help_id:\n");
|
||||
try help_id.dump(tree, writer, indent + 2);
|
||||
}
|
||||
for (dialog.optional_statements) |statement| {
|
||||
try statement.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(dialog.begin_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
for (dialog.controls) |control| {
|
||||
try control.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(dialog.end_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
},
|
||||
@ -845,30 +845,30 @@ pub const Node = struct {
|
||||
}
|
||||
try writer.writeByte('\n');
|
||||
if (control.class) |class| {
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.writeAll("class:\n");
|
||||
try class.dump(tree, writer, indent + 2);
|
||||
}
|
||||
inline for (.{ "id", "x", "y", "width", "height" }) |arg| {
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.writeAll(arg ++ ":\n");
|
||||
try @field(control, arg).dump(tree, writer, indent + 2);
|
||||
}
|
||||
inline for (.{ "style", "exstyle", "help_id" }) |arg| {
|
||||
if (@field(control, arg)) |val_node| {
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.writeAll(arg ++ ":\n");
|
||||
try val_node.dump(tree, writer, indent + 2);
|
||||
}
|
||||
}
|
||||
if (control.extra_data_begin != null) {
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(control.extra_data_begin.?.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
for (control.extra_data) |data_node| {
|
||||
try data_node.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(control.extra_data_end.?.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
}
|
||||
@ -877,17 +877,17 @@ pub const Node = struct {
|
||||
const toolbar: *const Node.Toolbar = @alignCast(@fieldParentPtr("base", node));
|
||||
try writer.print(" {s} {s} [{d} common_resource_attributes]\n", .{ toolbar.id.slice(tree.source), toolbar.type.slice(tree.source), toolbar.common_resource_attributes.len });
|
||||
inline for (.{ "button_width", "button_height" }) |arg| {
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.writeAll(arg ++ ":\n");
|
||||
try @field(toolbar, arg).dump(tree, writer, indent + 2);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(toolbar.begin_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
for (toolbar.buttons) |button_or_sep| {
|
||||
try button_or_sep.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(toolbar.end_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
},
|
||||
@ -898,17 +898,17 @@ pub const Node = struct {
|
||||
try statement.dump(tree, writer, indent + 1);
|
||||
}
|
||||
if (menu.help_id) |help_id| {
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.writeAll("help_id:\n");
|
||||
try help_id.dump(tree, writer, indent + 2);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(menu.begin_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
for (menu.items) |item| {
|
||||
try item.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(menu.end_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
},
|
||||
@ -926,7 +926,7 @@ pub const Node = struct {
|
||||
try writer.print(" {s} {s}\n", .{ menu_item.menuitem.slice(tree.source), menu_item.text.slice(tree.source) });
|
||||
inline for (.{ "id", "type", "state" }) |arg| {
|
||||
if (@field(menu_item, arg)) |val_node| {
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.writeAll(arg ++ ":\n");
|
||||
try val_node.dump(tree, writer, indent + 2);
|
||||
}
|
||||
@ -935,13 +935,13 @@ pub const Node = struct {
|
||||
.popup => {
|
||||
const popup: *const Node.Popup = @alignCast(@fieldParentPtr("base", node));
|
||||
try writer.print(" {s} {s} [{d} options]\n", .{ popup.popup.slice(tree.source), popup.text.slice(tree.source), popup.option_list.len });
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(popup.begin_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
for (popup.items) |item| {
|
||||
try item.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(popup.end_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
},
|
||||
@ -950,18 +950,18 @@ pub const Node = struct {
|
||||
try writer.print(" {s} {s}\n", .{ popup.popup.slice(tree.source), popup.text.slice(tree.source) });
|
||||
inline for (.{ "id", "type", "state", "help_id" }) |arg| {
|
||||
if (@field(popup, arg)) |val_node| {
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.writeAll(arg ++ ":\n");
|
||||
try val_node.dump(tree, writer, indent + 2);
|
||||
}
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(popup.begin_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
for (popup.items) |item| {
|
||||
try item.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(popup.end_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
},
|
||||
@ -971,13 +971,13 @@ pub const Node = struct {
|
||||
for (version_info.fixed_info) |fixed_info| {
|
||||
try fixed_info.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(version_info.begin_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
for (version_info.block_statements) |block| {
|
||||
try block.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(version_info.end_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
},
|
||||
@ -994,13 +994,13 @@ pub const Node = struct {
|
||||
for (block.values) |value| {
|
||||
try value.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(block.begin_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
for (block.children) |child| {
|
||||
try child.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(block.end_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
},
|
||||
@ -1025,13 +1025,13 @@ pub const Node = struct {
|
||||
for (string_table.optional_statements) |statement| {
|
||||
try statement.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(string_table.begin_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
for (string_table.strings) |string| {
|
||||
try string.dump(tree, writer, indent + 1);
|
||||
}
|
||||
try writer.writeByteNTimes(' ', indent);
|
||||
try writer.splatByteAll(' ', indent);
|
||||
try writer.writeAll(string_table.end_token.slice(tree.source));
|
||||
try writer.writeAll("\n");
|
||||
},
|
||||
@ -1039,7 +1039,7 @@ pub const Node = struct {
|
||||
try writer.writeAll("\n");
|
||||
const string: *const Node.StringTableString = @alignCast(@fieldParentPtr("base", node));
|
||||
try string.id.dump(tree, writer, indent + 1);
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.print("{s}\n", .{string.string.slice(tree.source)});
|
||||
},
|
||||
.language_statement => {
|
||||
@ -1051,12 +1051,12 @@ pub const Node = struct {
|
||||
.font_statement => {
|
||||
const font: *const Node.FontStatement = @alignCast(@fieldParentPtr("base", node));
|
||||
try writer.print(" {s} typeface: {s}\n", .{ font.identifier.slice(tree.source), font.typeface.slice(tree.source) });
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.writeAll("point_size:\n");
|
||||
try font.point_size.dump(tree, writer, indent + 2);
|
||||
inline for (.{ "weight", "italic", "char_set" }) |arg| {
|
||||
if (@field(font, arg)) |arg_node| {
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.writeAll(arg ++ ":\n");
|
||||
try arg_node.dump(tree, writer, indent + 2);
|
||||
}
|
||||
@ -1071,7 +1071,7 @@ pub const Node = struct {
|
||||
const invalid: *const Node.Invalid = @alignCast(@fieldParentPtr("base", node));
|
||||
try writer.print(" context.len: {}\n", .{invalid.context.len});
|
||||
for (invalid.context) |context_token| {
|
||||
try writer.writeByteNTimes(' ', indent + 1);
|
||||
try writer.splatByteAll(' ', indent + 1);
|
||||
try writer.print("{s}:{s}", .{ @tagName(context_token.id), context_token.slice(tree.source) });
|
||||
try writer.writeByte('\n');
|
||||
}
|
||||
|
||||
@ -27,6 +27,7 @@ pub const windows_format_id = std.mem.readInt(u16, "BM", native_endian);
|
||||
pub const file_header_len = 14;
|
||||
|
||||
pub const ReadError = error{
|
||||
ReadFailed,
|
||||
UnexpectedEOF,
|
||||
InvalidFileHeader,
|
||||
ImpossiblePixelDataOffset,
|
||||
@ -94,9 +95,12 @@ pub const BitmapInfo = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn read(reader: anytype, max_size: u64) ReadError!BitmapInfo {
|
||||
pub fn read(reader: *std.Io.Reader, max_size: u64) ReadError!BitmapInfo {
|
||||
var bitmap_info: BitmapInfo = undefined;
|
||||
const file_header = reader.readBytesNoEof(file_header_len) catch return error.UnexpectedEOF;
|
||||
const file_header = reader.takeArray(file_header_len) catch |err| switch (err) {
|
||||
error.EndOfStream => return error.UnexpectedEOF,
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
const id = std.mem.readInt(u16, file_header[0..2], native_endian);
|
||||
if (id != windows_format_id) return error.InvalidFileHeader;
|
||||
@ -104,14 +108,17 @@ pub fn read(reader: anytype, max_size: u64) ReadError!BitmapInfo {
|
||||
bitmap_info.pixel_data_offset = std.mem.readInt(u32, file_header[10..14], .little);
|
||||
if (bitmap_info.pixel_data_offset > max_size) return error.ImpossiblePixelDataOffset;
|
||||
|
||||
bitmap_info.dib_header_size = reader.readInt(u32, .little) catch return error.UnexpectedEOF;
|
||||
bitmap_info.dib_header_size = reader.takeInt(u32, .little) catch return error.UnexpectedEOF;
|
||||
if (bitmap_info.pixel_data_offset < file_header_len + bitmap_info.dib_header_size) return error.ImpossiblePixelDataOffset;
|
||||
const dib_version = BitmapHeader.Version.get(bitmap_info.dib_header_size);
|
||||
switch (dib_version) {
|
||||
.@"nt3.1", .@"nt4.0", .@"nt5.0" => {
|
||||
var dib_header_buf: [@sizeOf(BITMAPINFOHEADER)]u8 align(@alignOf(BITMAPINFOHEADER)) = undefined;
|
||||
std.mem.writeInt(u32, dib_header_buf[0..4], bitmap_info.dib_header_size, .little);
|
||||
reader.readNoEof(dib_header_buf[4..]) catch return error.UnexpectedEOF;
|
||||
reader.readSliceAll(dib_header_buf[4..]) catch |err| switch (err) {
|
||||
error.EndOfStream => return error.UnexpectedEOF,
|
||||
error.ReadFailed => |e| return e,
|
||||
};
|
||||
var dib_header: *BITMAPINFOHEADER = @ptrCast(&dib_header_buf);
|
||||
structFieldsLittleToNative(BITMAPINFOHEADER, dib_header);
|
||||
|
||||
@ -126,7 +133,10 @@ pub fn read(reader: anytype, max_size: u64) ReadError!BitmapInfo {
|
||||
.@"win2.0" => {
|
||||
var dib_header_buf: [@sizeOf(BITMAPCOREHEADER)]u8 align(@alignOf(BITMAPCOREHEADER)) = undefined;
|
||||
std.mem.writeInt(u32, dib_header_buf[0..4], bitmap_info.dib_header_size, .little);
|
||||
reader.readNoEof(dib_header_buf[4..]) catch return error.UnexpectedEOF;
|
||||
reader.readSliceAll(dib_header_buf[4..]) catch |err| switch (err) {
|
||||
error.EndOfStream => return error.UnexpectedEOF,
|
||||
error.ReadFailed => |e| return e,
|
||||
};
|
||||
const dib_header: *BITMAPCOREHEADER = @ptrCast(&dib_header_buf);
|
||||
structFieldsLittleToNative(BITMAPCOREHEADER, dib_header);
|
||||
|
||||
@ -238,26 +248,26 @@ fn structFieldsLittleToNative(comptime T: type, x: *T) void {
|
||||
|
||||
test "read" {
|
||||
var bmp_data = "BM<\x00\x00\x00\x00\x00\x00\x006\x00\x00\x00(\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01\x00\x10\x00\x00\x00\x00\x00\x06\x00\x00\x00\x12\x0b\x00\x00\x12\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x7f\x00\x00\x00\x00".*;
|
||||
var fbs = std.io.fixedBufferStream(&bmp_data);
|
||||
var fbs: std.Io.Reader = .fixed(&bmp_data);
|
||||
|
||||
{
|
||||
const bitmap = try read(fbs.reader(), bmp_data.len);
|
||||
const bitmap = try read(&fbs, bmp_data.len);
|
||||
try std.testing.expectEqual(@as(u32, BitmapHeader.Version.@"nt3.1".len()), bitmap.dib_header_size);
|
||||
}
|
||||
|
||||
{
|
||||
fbs.reset();
|
||||
fbs.seek = 0;
|
||||
bmp_data[file_header_len] = 11;
|
||||
try std.testing.expectError(error.UnknownBitmapVersion, read(fbs.reader(), bmp_data.len));
|
||||
try std.testing.expectError(error.UnknownBitmapVersion, read(&fbs, bmp_data.len));
|
||||
|
||||
// restore
|
||||
bmp_data[file_header_len] = BitmapHeader.Version.@"nt3.1".len();
|
||||
}
|
||||
|
||||
{
|
||||
fbs.reset();
|
||||
fbs.seek = 0;
|
||||
bmp_data[0] = 'b';
|
||||
try std.testing.expectError(error.InvalidFileHeader, read(fbs.reader(), bmp_data.len));
|
||||
try std.testing.expectError(error.InvalidFileHeader, read(&fbs, bmp_data.len));
|
||||
|
||||
// restore
|
||||
bmp_data[0] = 'B';
|
||||
@ -265,13 +275,13 @@ test "read" {
|
||||
|
||||
{
|
||||
const cutoff_len = file_header_len + BitmapHeader.Version.@"nt3.1".len() - 1;
|
||||
var dib_cutoff_fbs = std.io.fixedBufferStream(bmp_data[0..cutoff_len]);
|
||||
try std.testing.expectError(error.UnexpectedEOF, read(dib_cutoff_fbs.reader(), bmp_data.len));
|
||||
var dib_cutoff_fbs: std.Io.Reader = .fixed(bmp_data[0..cutoff_len]);
|
||||
try std.testing.expectError(error.UnexpectedEOF, read(&dib_cutoff_fbs, bmp_data.len));
|
||||
}
|
||||
|
||||
{
|
||||
const cutoff_len = file_header_len - 1;
|
||||
var bmp_cutoff_fbs = std.io.fixedBufferStream(bmp_data[0..cutoff_len]);
|
||||
try std.testing.expectError(error.UnexpectedEOF, read(bmp_cutoff_fbs.reader(), bmp_data.len));
|
||||
var bmp_cutoff_fbs: std.Io.Reader = .fixed(bmp_data[0..cutoff_len]);
|
||||
try std.testing.expectError(error.UnexpectedEOF, read(&bmp_cutoff_fbs, bmp_data.len));
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,20 +80,20 @@ pub const usage_string_after_command_name =
|
||||
\\
|
||||
;
|
||||
|
||||
pub fn writeUsage(writer: anytype, command_name: []const u8) !void {
|
||||
pub fn writeUsage(writer: *std.Io.Writer, command_name: []const u8) !void {
|
||||
try writer.writeAll("Usage: ");
|
||||
try writer.writeAll(command_name);
|
||||
try writer.writeAll(usage_string_after_command_name);
|
||||
}
|
||||
|
||||
pub const Diagnostics = struct {
|
||||
errors: std.ArrayListUnmanaged(ErrorDetails) = .empty,
|
||||
errors: std.ArrayList(ErrorDetails) = .empty,
|
||||
allocator: Allocator,
|
||||
|
||||
pub const ErrorDetails = struct {
|
||||
arg_index: usize,
|
||||
arg_span: ArgSpan = .{},
|
||||
msg: std.ArrayListUnmanaged(u8) = .empty,
|
||||
msg: std.ArrayList(u8) = .empty,
|
||||
type: Type = .err,
|
||||
print_args: bool = true,
|
||||
|
||||
@ -148,7 +148,7 @@ pub const Options = struct {
|
||||
allocator: Allocator,
|
||||
input_source: IoSource = .{ .filename = &[_]u8{} },
|
||||
output_source: IoSource = .{ .filename = &[_]u8{} },
|
||||
extra_include_paths: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
extra_include_paths: std.ArrayList([]const u8) = .empty,
|
||||
ignore_include_env_var: bool = false,
|
||||
preprocess: Preprocess = .yes,
|
||||
default_language_id: ?u16 = null,
|
||||
@ -295,7 +295,7 @@ pub const Options = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dumpVerbose(self: *const Options, writer: anytype) !void {
|
||||
pub fn dumpVerbose(self: *const Options, writer: *std.Io.Writer) !void {
|
||||
const input_source_name = switch (self.input_source) {
|
||||
.stdio => "<stdin>",
|
||||
.filename => |filename| filename,
|
||||
@ -520,8 +520,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
// - or / on its own is an error
|
||||
else => {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid option: {s}", .{arg.prefixSlice()});
|
||||
try err_details.msg.print(allocator, "invalid option: {s}", .{arg.prefixSlice()});
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
continue :next_arg;
|
||||
@ -532,8 +531,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
const args_remaining = args.len - arg_i;
|
||||
if (args_remaining <= 2 and arg.looksLikeFilepath()) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .note, .print_args = true, .arg_index = arg_i };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.writeAll("this argument was inferred to be a filepath, so argument parsing was terminated");
|
||||
try err_details.msg.appendSlice(allocator, "this argument was inferred to be a filepath, so argument parsing was terminated");
|
||||
try diagnostics.append(err_details);
|
||||
|
||||
break;
|
||||
@ -550,16 +548,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":output-format")) {
|
||||
const value = arg.value(":output-format".len, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":output-format".len) });
|
||||
try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":output-format".len) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
};
|
||||
output_format = std.meta.stringToEnum(Options.OutputFormat, value.slice) orelse blk: {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid output format setting: {s} ", .{value.slice});
|
||||
try err_details.msg.print(allocator, "invalid output format setting: {s} ", .{value.slice});
|
||||
try diagnostics.append(err_details);
|
||||
break :blk output_format;
|
||||
};
|
||||
@ -569,16 +565,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":auto-includes")) {
|
||||
const value = arg.value(":auto-includes".len, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":auto-includes".len) });
|
||||
try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":auto-includes".len) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
};
|
||||
options.auto_includes = std.meta.stringToEnum(Options.AutoIncludes, value.slice) orelse blk: {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid auto includes setting: {s} ", .{value.slice});
|
||||
try err_details.msg.print(allocator, "invalid auto includes setting: {s} ", .{value.slice});
|
||||
try diagnostics.append(err_details);
|
||||
break :blk options.auto_includes;
|
||||
};
|
||||
@ -587,16 +581,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":input-format")) {
|
||||
const value = arg.value(":input-format".len, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":input-format".len) });
|
||||
try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":input-format".len) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
};
|
||||
input_format = std.meta.stringToEnum(Options.InputFormat, value.slice) orelse blk: {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid input format setting: {s} ", .{value.slice});
|
||||
try err_details.msg.print(allocator, "invalid input format setting: {s} ", .{value.slice});
|
||||
try diagnostics.append(err_details);
|
||||
break :blk input_format;
|
||||
};
|
||||
@ -606,16 +598,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":depfile-fmt")) {
|
||||
const value = arg.value(":depfile-fmt".len, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":depfile-fmt".len) });
|
||||
try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":depfile-fmt".len) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
};
|
||||
options.depfile_fmt = std.meta.stringToEnum(Options.DepfileFormat, value.slice) orelse blk: {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid depfile format setting: {s} ", .{value.slice});
|
||||
try err_details.msg.print(allocator, "invalid depfile format setting: {s} ", .{value.slice});
|
||||
try diagnostics.append(err_details);
|
||||
break :blk options.depfile_fmt;
|
||||
};
|
||||
@ -624,8 +614,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":depfile")) {
|
||||
const value = arg.value(":depfile".len, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":depfile".len) });
|
||||
try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":depfile".len) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
@ -643,8 +632,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":target")) {
|
||||
const value = arg.value(":target".len, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":target".len) });
|
||||
try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":target".len) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
@ -655,8 +643,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
const arch_str = target_it.first();
|
||||
const arch = cvtres.supported_targets.Arch.fromStringIgnoreCase(arch_str) orelse {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid or unsupported target architecture: {s}", .{arch_str});
|
||||
try err_details.msg.print(allocator, "invalid or unsupported target architecture: {s}", .{arch_str});
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
@ -680,13 +667,11 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
.prefix_len = arg.prefixSlice().len,
|
||||
.value_offset = arg.name_offset + 3,
|
||||
} };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value for {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(3) });
|
||||
try err_details.msg.print(allocator, "missing value for {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(3) });
|
||||
try diagnostics.append(err_details);
|
||||
}
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(3) });
|
||||
try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(3) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
continue :next_arg;
|
||||
@ -695,16 +680,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
else if (std.ascii.startsWithIgnoreCase(arg_name, "tn")) {
|
||||
const value = arg.value(2, arg_i, args) catch no_value: {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try diagnostics.append(err_details);
|
||||
// dummy zero-length slice starting where the value would have been
|
||||
const value_start = arg.name_offset + 2;
|
||||
break :no_value Arg.Value{ .slice = arg.full[value_start..value_start] };
|
||||
};
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
@ -716,16 +699,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
{
|
||||
const value = arg.value(2, arg_i, args) catch no_value: {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try diagnostics.append(err_details);
|
||||
// dummy zero-length slice starting where the value would have been
|
||||
const value_start = arg.name_offset + 2;
|
||||
break :no_value Arg.Value{ .slice = arg.full[value_start..value_start] };
|
||||
};
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
@ -733,8 +714,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
// Unsupported MUI options that do not need a value
|
||||
else if (std.ascii.startsWithIgnoreCase(arg_name, "g1")) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionSpan(2) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try diagnostics.append(err_details);
|
||||
arg.name_offset += 2;
|
||||
}
|
||||
@ -747,15 +727,13 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
std.ascii.startsWithIgnoreCase(arg_name, "ta"))
|
||||
{
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionSpan(2) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try diagnostics.append(err_details);
|
||||
arg.name_offset += 2;
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, "fo")) {
|
||||
const value = arg.value(2, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing output path after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try err_details.msg.print(allocator, "missing output path after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
@ -767,8 +745,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, "sl")) {
|
||||
const value = arg.value(2, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing language tag after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try err_details.msg.print(allocator, "missing language tag after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
@ -776,24 +753,20 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
const percent_str = value.slice;
|
||||
const percent: u32 = parsePercent(percent_str) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid percent format '{s}'", .{percent_str});
|
||||
try err_details.msg.print(allocator, "invalid percent format '{s}'", .{percent_str});
|
||||
try diagnostics.append(err_details);
|
||||
var note_details = Diagnostics.ErrorDetails{ .type = .note, .print_args = false, .arg_index = arg_i };
|
||||
var note_writer = note_details.msg.writer(allocator);
|
||||
try note_writer.writeAll("string length percent must be an integer between 1 and 100 (inclusive)");
|
||||
try note_details.msg.appendSlice(allocator, "string length percent must be an integer between 1 and 100 (inclusive)");
|
||||
try diagnostics.append(note_details);
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
};
|
||||
if (percent == 0 or percent > 100) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("percent out of range: {} (parsed from '{s}')", .{ percent, percent_str });
|
||||
try err_details.msg.print(allocator, "percent out of range: {} (parsed from '{s}')", .{ percent, percent_str });
|
||||
try diagnostics.append(err_details);
|
||||
var note_details = Diagnostics.ErrorDetails{ .type = .note, .print_args = false, .arg_index = arg_i };
|
||||
var note_writer = note_details.msg.writer(allocator);
|
||||
try note_writer.writeAll("string length percent must be an integer between 1 and 100 (inclusive)");
|
||||
try note_details.msg.appendSlice(allocator, "string length percent must be an integer between 1 and 100 (inclusive)");
|
||||
try diagnostics.append(note_details);
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
@ -805,8 +778,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, "ln")) {
|
||||
const value = arg.value(2, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing language tag after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try err_details.msg.print(allocator, "missing language tag after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
@ -814,16 +786,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
const tag = value.slice;
|
||||
options.default_language_id = lang.tagToInt(tag) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid language tag: {s}", .{tag});
|
||||
try err_details.msg.print(allocator, "invalid language tag: {s}", .{tag});
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
};
|
||||
if (options.default_language_id.? == lang.LOCALE_CUSTOM_UNSPECIFIED) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .warning, .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("language tag '{s}' does not have an assigned ID so it will be resolved to LOCALE_CUSTOM_UNSPECIFIED (id=0x{x})", .{ tag, lang.LOCALE_CUSTOM_UNSPECIFIED });
|
||||
try err_details.msg.print(allocator, "language tag '{s}' does not have an assigned ID so it will be resolved to LOCALE_CUSTOM_UNSPECIFIED (id=0x{x})", .{ tag, lang.LOCALE_CUSTOM_UNSPECIFIED });
|
||||
try diagnostics.append(err_details);
|
||||
}
|
||||
arg_i += value.index_increment;
|
||||
@ -831,8 +801,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, "l")) {
|
||||
const value = arg.value(1, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing language ID after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try err_details.msg.print(allocator, "missing language ID after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
@ -840,8 +809,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
const num_str = value.slice;
|
||||
options.default_language_id = lang.parseInt(num_str) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid language ID: {s}", .{num_str});
|
||||
try err_details.msg.print(allocator, "invalid language ID: {s}", .{num_str});
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
@ -860,16 +828,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
{
|
||||
const value = arg.value(1, arg_i, args) catch no_value: {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try diagnostics.append(err_details);
|
||||
// dummy zero-length slice starting where the value would have been
|
||||
const value_start = arg.name_offset + 1;
|
||||
break :no_value Arg.Value{ .slice = arg.full[value_start..value_start] };
|
||||
};
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
@ -882,16 +848,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
{
|
||||
const value = arg.value(1, arg_i, args) catch no_value: {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try diagnostics.append(err_details);
|
||||
// dummy zero-length slice starting where the value would have been
|
||||
const value_start = arg.name_offset + 1;
|
||||
break :no_value Arg.Value{ .slice = arg.full[value_start..value_start] };
|
||||
};
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
@ -899,15 +863,13 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
// 1 char unsupported LCX/LCE options that do not need a value
|
||||
else if (std.ascii.startsWithIgnoreCase(arg_name, "t")) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionSpan(1) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try diagnostics.append(err_details);
|
||||
arg.name_offset += 1;
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, "c")) {
|
||||
const value = arg.value(1, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing code page ID after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try err_details.msg.print(allocator, "missing code page ID after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
@ -915,8 +877,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
const num_str = value.slice;
|
||||
const code_page_id = std.fmt.parseUnsigned(u16, num_str, 10) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid code page ID: {s}", .{num_str});
|
||||
try err_details.msg.print(allocator, "invalid code page ID: {s}", .{num_str});
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
@ -924,16 +885,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
options.default_code_page = code_pages.getByIdentifierEnsureSupported(code_page_id) catch |err| switch (err) {
|
||||
error.InvalidCodePage => {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid or unknown code page ID: {}", .{code_page_id});
|
||||
try err_details.msg.print(allocator, "invalid or unknown code page ID: {}", .{code_page_id});
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
},
|
||||
error.UnsupportedCodePage => {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("unsupported code page: {s} (id={})", .{
|
||||
try err_details.msg.print(allocator, "unsupported code page: {s} (id={})", .{
|
||||
@tagName(code_pages.getByIdentifier(code_page_id) catch unreachable),
|
||||
code_page_id,
|
||||
});
|
||||
@ -957,8 +916,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, "i")) {
|
||||
const value = arg.value(1, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing include path after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try err_details.msg.print(allocator, "missing include path after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
@ -986,15 +944,13 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
// Undocumented option with unknown function
|
||||
// TODO: More investigation to figure out what it does (if anything)
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .warning, .arg_index = arg_i, .arg_span = arg.optionSpan(1) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("option {s}{s} has no effect (it is undocumented and its function is unknown in the Win32 RC compiler)", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try err_details.msg.print(allocator, "option {s}{s} has no effect (it is undocumented and its function is unknown in the Win32 RC compiler)", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try diagnostics.append(err_details);
|
||||
arg.name_offset += 1;
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, "d")) {
|
||||
const value = arg.value(1, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing symbol to define after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try err_details.msg.print(allocator, "missing symbol to define after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
@ -1009,8 +965,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
try options.define(symbol, symbol_value);
|
||||
} else {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .warning, .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("symbol \"{s}\" is not a valid identifier and therefore cannot be defined", .{symbol});
|
||||
try err_details.msg.print(allocator, "symbol \"{s}\" is not a valid identifier and therefore cannot be defined", .{symbol});
|
||||
try diagnostics.append(err_details);
|
||||
}
|
||||
arg_i += value.index_increment;
|
||||
@ -1018,8 +973,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, "u")) {
|
||||
const value = arg.value(1, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing symbol to undefine after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try err_details.msg.print(allocator, "missing symbol to undefine after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
@ -1029,16 +983,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
try options.undefine(symbol);
|
||||
} else {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .warning, .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("symbol \"{s}\" is not a valid identifier and therefore cannot be undefined", .{symbol});
|
||||
try err_details.msg.print(allocator, "symbol \"{s}\" is not a valid identifier and therefore cannot be undefined", .{symbol});
|
||||
try diagnostics.append(err_details);
|
||||
}
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
} else {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid option: {s}{s}", .{ arg.prefixSlice(), arg.name() });
|
||||
try err_details.msg.print(allocator, "invalid option: {s}{s}", .{ arg.prefixSlice(), arg.name() });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
continue :next_arg;
|
||||
@ -1055,16 +1007,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
|
||||
if (positionals.len == 0) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .print_args = false, .arg_index = arg_i };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.writeAll("missing input filename");
|
||||
try err_details.msg.appendSlice(allocator, "missing input filename");
|
||||
try diagnostics.append(err_details);
|
||||
|
||||
if (args.len > 0) {
|
||||
const last_arg = args[args.len - 1];
|
||||
if (arg_i > 0 and last_arg.len > 0 and last_arg[0] == '/' and isSupportedInputExtension(std.fs.path.extension(last_arg))) {
|
||||
var note_details = Diagnostics.ErrorDetails{ .type = .note, .print_args = true, .arg_index = arg_i - 1 };
|
||||
var note_writer = note_details.msg.writer(allocator);
|
||||
try note_writer.writeAll("if this argument was intended to be the input filename, adding -- in front of it will exclude it from option parsing");
|
||||
try note_details.msg.appendSlice(allocator, "if this argument was intended to be the input filename, adding -- in front of it will exclude it from option parsing");
|
||||
try diagnostics.append(note_details);
|
||||
}
|
||||
}
|
||||
@ -1099,16 +1049,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
if (positionals.len > 1) {
|
||||
if (output_filename != null) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i + 1 };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.writeAll("output filename already specified");
|
||||
try err_details.msg.appendSlice(allocator, "output filename already specified");
|
||||
try diagnostics.append(err_details);
|
||||
var note_details = Diagnostics.ErrorDetails{
|
||||
.type = .note,
|
||||
.arg_index = output_filename_context.arg.index,
|
||||
.arg_span = output_filename_context.arg.value.argSpan(output_filename_context.arg.arg),
|
||||
};
|
||||
var note_writer = note_details.msg.writer(allocator);
|
||||
try note_writer.writeAll("output filename previously specified here");
|
||||
try note_details.msg.appendSlice(allocator, "output filename previously specified here");
|
||||
try diagnostics.append(note_details);
|
||||
} else {
|
||||
output_filename = positionals[1];
|
||||
@ -1173,16 +1121,15 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
var print_output_format_source_note: bool = false;
|
||||
if (options.depfile_path != null and (options.input_format == .res or options.output_format == .rcpp)) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .warning, .arg_index = depfile_context.index, .arg_span = depfile_context.value.argSpan(depfile_context.arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
if (options.input_format == .res) {
|
||||
try msg_writer.print("the {s}{s} option was ignored because the input format is '{s}'", .{
|
||||
try err_details.msg.print(allocator, "the {s}{s} option was ignored because the input format is '{s}'", .{
|
||||
depfile_context.arg.prefixSlice(),
|
||||
depfile_context.arg.optionWithoutPrefix(depfile_context.option_len),
|
||||
@tagName(options.input_format),
|
||||
});
|
||||
print_input_format_source_note = true;
|
||||
} else if (options.output_format == .rcpp) {
|
||||
try msg_writer.print("the {s}{s} option was ignored because the output format is '{s}'", .{
|
||||
try err_details.msg.print(allocator, "the {s}{s} option was ignored because the output format is '{s}'", .{
|
||||
depfile_context.arg.prefixSlice(),
|
||||
depfile_context.arg.optionWithoutPrefix(depfile_context.option_len),
|
||||
@tagName(options.output_format),
|
||||
@ -1193,16 +1140,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
}
|
||||
if (!isSupportedTransformation(options.input_format, options.output_format)) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = input_filename_arg_i, .print_args = false };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("input format '{s}' cannot be converted to output format '{s}'", .{ @tagName(options.input_format), @tagName(options.output_format) });
|
||||
try err_details.msg.print(allocator, "input format '{s}' cannot be converted to output format '{s}'", .{ @tagName(options.input_format), @tagName(options.output_format) });
|
||||
try diagnostics.append(err_details);
|
||||
print_input_format_source_note = true;
|
||||
print_output_format_source_note = true;
|
||||
}
|
||||
if (options.preprocess == .only and options.output_format != .rcpp) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = preprocess_only_context.index };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("the {s}{s} option cannot be used with output format '{s}'", .{
|
||||
try err_details.msg.print(allocator, "the {s}{s} option cannot be used with output format '{s}'", .{
|
||||
preprocess_only_context.arg.prefixSlice(),
|
||||
preprocess_only_context.arg.optionWithoutPrefix(preprocess_only_context.option_len),
|
||||
@tagName(options.output_format),
|
||||
@ -1214,8 +1159,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
switch (input_format_source) {
|
||||
.inferred_from_input_filename => {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .note, .arg_index = input_filename_arg_i };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.writeAll("the input format was inferred from the input filename");
|
||||
try err_details.msg.appendSlice(allocator, "the input format was inferred from the input filename");
|
||||
try diagnostics.append(err_details);
|
||||
},
|
||||
.input_format_arg => {
|
||||
@ -1224,8 +1168,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
.arg_index = input_format_context.index,
|
||||
.arg_span = input_format_context.value.argSpan(input_format_context.arg),
|
||||
};
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.writeAll("the input format was specified here");
|
||||
try err_details.msg.appendSlice(allocator, "the input format was specified here");
|
||||
try diagnostics.append(err_details);
|
||||
},
|
||||
}
|
||||
@ -1234,11 +1177,10 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
switch (output_format_source) {
|
||||
.inferred_from_input_filename, .unable_to_infer_from_input_filename => {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .note, .arg_index = input_filename_arg_i };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
if (output_format_source == .inferred_from_input_filename) {
|
||||
try msg_writer.writeAll("the output format was inferred from the input filename");
|
||||
try err_details.msg.appendSlice(allocator, "the output format was inferred from the input filename");
|
||||
} else {
|
||||
try msg_writer.writeAll("the output format was unable to be inferred from the input filename, so the default was used");
|
||||
try err_details.msg.appendSlice(allocator, "the output format was unable to be inferred from the input filename, so the default was used");
|
||||
}
|
||||
try diagnostics.append(err_details);
|
||||
},
|
||||
@ -1248,11 +1190,10 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
.arg => |ctx| .{ .type = .note, .arg_index = ctx.index, .arg_span = ctx.value.argSpan(ctx.arg) },
|
||||
.unspecified => unreachable,
|
||||
};
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
if (output_format_source == .inferred_from_output_filename) {
|
||||
try msg_writer.writeAll("the output format was inferred from the output filename");
|
||||
try err_details.msg.appendSlice(allocator, "the output format was inferred from the output filename");
|
||||
} else {
|
||||
try msg_writer.writeAll("the output format was unable to be inferred from the output filename, so the default was used");
|
||||
try err_details.msg.appendSlice(allocator, "the output format was unable to be inferred from the output filename, so the default was used");
|
||||
}
|
||||
try diagnostics.append(err_details);
|
||||
},
|
||||
@ -1262,14 +1203,12 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
.arg_index = output_format_context.index,
|
||||
.arg_span = output_format_context.value.argSpan(output_format_context.arg),
|
||||
};
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.writeAll("the output format was specified here");
|
||||
try err_details.msg.appendSlice(allocator, "the output format was specified here");
|
||||
try diagnostics.append(err_details);
|
||||
},
|
||||
.inferred_from_preprocess_only => {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .note, .arg_index = preprocess_only_context.index };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("the output format was inferred from the usage of the {s}{s} option", .{
|
||||
try err_details.msg.print(allocator, "the output format was inferred from the usage of the {s}{s} option", .{
|
||||
preprocess_only_context.arg.prefixSlice(),
|
||||
preprocess_only_context.arg.optionWithoutPrefix(preprocess_only_context.option_len),
|
||||
});
|
||||
@ -1291,19 +1230,19 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
}
|
||||
|
||||
pub fn filepathWithExtension(allocator: Allocator, path: []const u8, ext: []const u8) ![]const u8 {
|
||||
var buf = std.array_list.Managed(u8).init(allocator);
|
||||
errdefer buf.deinit();
|
||||
var buf: std.ArrayList(u8) = .empty;
|
||||
errdefer buf.deinit(allocator);
|
||||
if (std.fs.path.dirname(path)) |dirname| {
|
||||
var end_pos = dirname.len;
|
||||
// We want to ensure that we write a path separator at the end, so if the dirname
|
||||
// doesn't end with a path sep then include the char after the dirname
|
||||
// which must be a path sep.
|
||||
if (!std.fs.path.isSep(dirname[dirname.len - 1])) end_pos += 1;
|
||||
try buf.appendSlice(path[0..end_pos]);
|
||||
try buf.appendSlice(allocator, path[0..end_pos]);
|
||||
}
|
||||
try buf.appendSlice(std.fs.path.stem(path));
|
||||
try buf.appendSlice(ext);
|
||||
return try buf.toOwnedSlice();
|
||||
try buf.appendSlice(allocator, std.fs.path.stem(path));
|
||||
try buf.appendSlice(allocator, ext);
|
||||
return try buf.toOwnedSlice(allocator);
|
||||
}
|
||||
|
||||
pub fn isSupportedInputExtension(ext: []const u8) bool {
|
||||
@ -1537,7 +1476,7 @@ fn testParseOutput(args: []const []const u8, expected_output: []const u8) !?Opti
|
||||
var options = parse(std.testing.allocator, args, &diagnostics) catch |err| switch (err) {
|
||||
error.ParseError => {
|
||||
try diagnostics.renderToWriter(args, &output.writer, .no_color);
|
||||
try std.testing.expectEqualStrings(expected_output, output.getWritten());
|
||||
try std.testing.expectEqualStrings(expected_output, output.written());
|
||||
return null;
|
||||
},
|
||||
else => |e| return e,
|
||||
@ -1545,7 +1484,7 @@ fn testParseOutput(args: []const []const u8, expected_output: []const u8) !?Opti
|
||||
errdefer options.deinit();
|
||||
|
||||
try diagnostics.renderToWriter(args, &output.writer, .no_color);
|
||||
try std.testing.expectEqualStrings(expected_output, output.getWritten());
|
||||
try std.testing.expectEqualStrings(expected_output, output.written());
|
||||
return options;
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -43,7 +43,7 @@ pub const Resource = struct {
|
||||
};
|
||||
|
||||
pub const ParsedResources = struct {
|
||||
list: std.ArrayListUnmanaged(Resource) = .empty,
|
||||
list: std.ArrayList(Resource) = .empty,
|
||||
allocator: Allocator,
|
||||
|
||||
pub fn init(allocator: Allocator) ParsedResources {
|
||||
@ -157,7 +157,7 @@ pub fn parseNameOrOrdinal(allocator: Allocator, reader: *std.Io.Reader) !NameOrO
|
||||
const ordinal_value = try reader.takeInt(u16, .little);
|
||||
return .{ .ordinal = ordinal_value };
|
||||
}
|
||||
var name_buf = try std.ArrayListUnmanaged(u16).initCapacity(allocator, 16);
|
||||
var name_buf = try std.ArrayList(u16).initCapacity(allocator, 16);
|
||||
errdefer name_buf.deinit(allocator);
|
||||
var code_unit = first_code_unit;
|
||||
while (code_unit != 0) {
|
||||
@ -373,7 +373,7 @@ pub fn writeCoff(allocator: Allocator, writer: *std.Io.Writer, resources: []cons
|
||||
try writer.writeAll(string_table.bytes.items);
|
||||
}
|
||||
|
||||
fn writeSymbol(writer: anytype, symbol: std.coff.Symbol) !void {
|
||||
fn writeSymbol(writer: *std.Io.Writer, symbol: std.coff.Symbol) !void {
|
||||
try writer.writeAll(&symbol.name);
|
||||
try writer.writeInt(u32, symbol.value, .little);
|
||||
try writer.writeInt(u16, @intFromEnum(symbol.section_number), .little);
|
||||
@ -383,7 +383,7 @@ fn writeSymbol(writer: anytype, symbol: std.coff.Symbol) !void {
|
||||
try writer.writeInt(u8, symbol.number_of_aux_symbols, .little);
|
||||
}
|
||||
|
||||
fn writeSectionDefinition(writer: anytype, def: std.coff.SectionDefinition) !void {
|
||||
fn writeSectionDefinition(writer: *std.Io.Writer, def: std.coff.SectionDefinition) !void {
|
||||
try writer.writeInt(u32, def.length, .little);
|
||||
try writer.writeInt(u16, def.number_of_relocations, .little);
|
||||
try writer.writeInt(u16, def.number_of_linenumbers, .little);
|
||||
@ -417,7 +417,7 @@ pub const ResourceDirectoryEntry = extern struct {
|
||||
to_subdirectory: bool,
|
||||
},
|
||||
|
||||
pub fn writeCoff(self: ResourceDirectoryEntry, writer: anytype) !void {
|
||||
pub fn writeCoff(self: ResourceDirectoryEntry, writer: *std.Io.Writer) !void {
|
||||
try writer.writeInt(u32, @bitCast(self.entry), .little);
|
||||
try writer.writeInt(u32, @bitCast(self.offset), .little);
|
||||
}
|
||||
@ -435,7 +435,7 @@ const ResourceTree = struct {
|
||||
type_to_name_map: std.ArrayHashMapUnmanaged(NameOrOrdinal, NameToLanguageMap, NameOrOrdinalHashContext, true),
|
||||
rsrc_string_table: std.ArrayHashMapUnmanaged(NameOrOrdinal, void, NameOrOrdinalHashContext, true),
|
||||
deduplicated_data: std.StringArrayHashMapUnmanaged(u32),
|
||||
data_offsets: std.ArrayListUnmanaged(u32),
|
||||
data_offsets: std.ArrayList(u32),
|
||||
rsrc02_len: u32,
|
||||
coff_options: CoffOptions,
|
||||
allocator: Allocator,
|
||||
@ -675,13 +675,13 @@ const ResourceTree = struct {
|
||||
return &.{};
|
||||
}
|
||||
|
||||
var level2_list: std.ArrayListUnmanaged(*const NameToLanguageMap) = .empty;
|
||||
var level2_list: std.ArrayList(*const NameToLanguageMap) = .empty;
|
||||
defer level2_list.deinit(allocator);
|
||||
|
||||
var level3_list: std.ArrayListUnmanaged(*const LanguageToResourceMap) = .empty;
|
||||
var level3_list: std.ArrayList(*const LanguageToResourceMap) = .empty;
|
||||
defer level3_list.deinit(allocator);
|
||||
|
||||
var resources_list: std.ArrayListUnmanaged(*const RelocatableResource) = .empty;
|
||||
var resources_list: std.ArrayList(*const RelocatableResource) = .empty;
|
||||
defer resources_list.deinit(allocator);
|
||||
|
||||
var relocations = Relocations.init(allocator);
|
||||
@ -896,7 +896,7 @@ const ResourceTree = struct {
|
||||
return symbols;
|
||||
}
|
||||
|
||||
fn writeRelocation(writer: anytype, relocation: std.coff.Relocation) !void {
|
||||
fn writeRelocation(writer: *std.Io.Writer, relocation: std.coff.Relocation) !void {
|
||||
try writer.writeInt(u32, relocation.virtual_address, .little);
|
||||
try writer.writeInt(u32, relocation.symbol_table_index, .little);
|
||||
try writer.writeInt(u16, relocation.type, .little);
|
||||
@ -928,7 +928,7 @@ const Relocation = struct {
|
||||
|
||||
const Relocations = struct {
|
||||
allocator: Allocator,
|
||||
list: std.ArrayListUnmanaged(Relocation) = .empty,
|
||||
list: std.ArrayList(Relocation) = .empty,
|
||||
cur_symbol_index: u32 = 5,
|
||||
|
||||
pub fn init(allocator: Allocator) Relocations {
|
||||
@ -952,7 +952,7 @@ const Relocations = struct {
|
||||
/// Does not do deduplication (only because there's no chance of duplicate strings in this
|
||||
/// instance).
|
||||
const StringTable = struct {
|
||||
bytes: std.ArrayListUnmanaged(u8) = .empty,
|
||||
bytes: std.ArrayList(u8) = .empty,
|
||||
|
||||
pub fn deinit(self: *StringTable, allocator: Allocator) void {
|
||||
self.bytes.deinit(allocator);
|
||||
|
||||
@ -15,10 +15,10 @@ const builtin = @import("builtin");
|
||||
const native_endian = builtin.cpu.arch.endian();
|
||||
|
||||
pub const Diagnostics = struct {
|
||||
errors: std.ArrayListUnmanaged(ErrorDetails) = .empty,
|
||||
errors: std.ArrayList(ErrorDetails) = .empty,
|
||||
/// Append-only, cannot handle removing strings.
|
||||
/// Expects to own all strings within the list.
|
||||
strings: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
strings: std.ArrayList([]const u8) = .empty,
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator) Diagnostics {
|
||||
@ -256,7 +256,7 @@ pub const ErrorDetails = struct {
|
||||
.{ "literal", "unquoted literal" },
|
||||
});
|
||||
|
||||
pub fn writeCommaSeparated(self: ExpectedTypes, writer: anytype) !void {
|
||||
pub fn writeCommaSeparated(self: ExpectedTypes, writer: *std.Io.Writer) !void {
|
||||
const struct_info = @typeInfo(ExpectedTypes).@"struct";
|
||||
const num_real_fields = struct_info.fields.len - 1;
|
||||
const num_padding_bits = @bitSizeOf(ExpectedTypes) - num_real_fields;
|
||||
@ -441,7 +441,7 @@ pub const ErrorDetails = struct {
|
||||
} };
|
||||
}
|
||||
|
||||
pub fn render(self: ErrorDetails, writer: anytype, source: []const u8, strings: []const []const u8) !void {
|
||||
pub fn render(self: ErrorDetails, writer: *std.Io.Writer, source: []const u8, strings: []const []const u8) !void {
|
||||
switch (self.err) {
|
||||
.unfinished_string_literal => {
|
||||
return writer.print("unfinished string literal at '{f}', expected closing '\"'", .{self.fmtToken(source)});
|
||||
@ -987,12 +987,14 @@ pub fn renderErrorMessage(writer: *std.io.Writer, tty_config: std.io.tty.Config,
|
||||
if (corresponding_span != null and corresponding_file != null) {
|
||||
var worth_printing_lines: bool = true;
|
||||
var initial_lines_err: ?anyerror = null;
|
||||
var file_reader_buf: [max_source_line_bytes * 2]u8 = undefined;
|
||||
var corresponding_lines: ?CorrespondingLines = CorrespondingLines.init(
|
||||
cwd,
|
||||
err_details,
|
||||
source_line_for_display.line,
|
||||
corresponding_span.?,
|
||||
corresponding_file.?,
|
||||
&file_reader_buf,
|
||||
) catch |err| switch (err) {
|
||||
error.NotWorthPrintingLines => blk: {
|
||||
worth_printing_lines = false;
|
||||
@ -1078,10 +1080,17 @@ const CorrespondingLines = struct {
|
||||
at_eof: bool = false,
|
||||
span: SourceMappings.CorrespondingSpan,
|
||||
file: std.fs.File,
|
||||
buffered_reader: std.fs.File.Reader,
|
||||
file_reader: std.fs.File.Reader,
|
||||
code_page: SupportedCodePage,
|
||||
|
||||
pub fn init(cwd: std.fs.Dir, err_details: ErrorDetails, line_for_comparison: []const u8, corresponding_span: SourceMappings.CorrespondingSpan, corresponding_file: []const u8) !CorrespondingLines {
|
||||
pub fn init(
|
||||
cwd: std.fs.Dir,
|
||||
err_details: ErrorDetails,
|
||||
line_for_comparison: []const u8,
|
||||
corresponding_span: SourceMappings.CorrespondingSpan,
|
||||
corresponding_file: []const u8,
|
||||
file_reader_buf: []u8,
|
||||
) !CorrespondingLines {
|
||||
// We don't do line comparison for this error, so don't print the note if the line
|
||||
// number is different
|
||||
if (err_details.err == .string_literal_too_long and err_details.token.line_number != corresponding_span.start_line) {
|
||||
@ -1096,18 +1105,14 @@ const CorrespondingLines = struct {
|
||||
var corresponding_lines = CorrespondingLines{
|
||||
.span = corresponding_span,
|
||||
.file = try utils.openFileNotDir(cwd, corresponding_file, .{}),
|
||||
.buffered_reader = undefined,
|
||||
.code_page = err_details.code_page,
|
||||
.file_reader = undefined,
|
||||
};
|
||||
corresponding_lines.buffered_reader = corresponding_lines.file.reader(&.{});
|
||||
corresponding_lines.file_reader = corresponding_lines.file.reader(file_reader_buf);
|
||||
errdefer corresponding_lines.deinit();
|
||||
|
||||
var fbs = std.io.fixedBufferStream(&corresponding_lines.line_buf);
|
||||
const writer = fbs.writer();
|
||||
|
||||
try corresponding_lines.writeLineFromStreamVerbatim(
|
||||
writer,
|
||||
corresponding_lines.buffered_reader.interface.adaptToOldInterface(),
|
||||
&corresponding_lines.file_reader.interface,
|
||||
corresponding_span.start_line,
|
||||
);
|
||||
|
||||
@ -1145,12 +1150,8 @@ const CorrespondingLines = struct {
|
||||
self.line_len = 0;
|
||||
self.visual_line_len = 0;
|
||||
|
||||
var fbs = std.io.fixedBufferStream(&self.line_buf);
|
||||
const writer = fbs.writer();
|
||||
|
||||
try self.writeLineFromStreamVerbatim(
|
||||
writer,
|
||||
self.buffered_reader.interface.adaptToOldInterface(),
|
||||
&self.file_reader.interface,
|
||||
self.line_num,
|
||||
);
|
||||
|
||||
@ -1164,7 +1165,7 @@ const CorrespondingLines = struct {
|
||||
return visual_line;
|
||||
}
|
||||
|
||||
fn writeLineFromStreamVerbatim(self: *CorrespondingLines, writer: anytype, input: anytype, line_num: usize) !void {
|
||||
fn writeLineFromStreamVerbatim(self: *CorrespondingLines, input: *std.Io.Reader, line_num: usize) !void {
|
||||
while (try readByteOrEof(input)) |byte| {
|
||||
switch (byte) {
|
||||
'\n', '\r' => {
|
||||
@ -1184,13 +1185,9 @@ const CorrespondingLines = struct {
|
||||
}
|
||||
},
|
||||
else => {
|
||||
if (self.line_num == line_num) {
|
||||
if (writer.writeByte(byte)) {
|
||||
self.line_len += 1;
|
||||
} else |err| switch (err) {
|
||||
error.NoSpaceLeft => {},
|
||||
else => |e| return e,
|
||||
}
|
||||
if (self.line_num == line_num and self.line_len < self.line_buf.len) {
|
||||
self.line_buf[self.line_len] = byte;
|
||||
self.line_len += 1;
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1201,8 +1198,8 @@ const CorrespondingLines = struct {
|
||||
self.line_num += 1;
|
||||
}
|
||||
|
||||
fn readByteOrEof(reader: anytype) !?u8 {
|
||||
return reader.readByte() catch |err| switch (err) {
|
||||
fn readByteOrEof(reader: *std.Io.Reader) !?u8 {
|
||||
return reader.takeByte() catch |err| switch (err) {
|
||||
error.EndOfStream => return null,
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
@ -8,80 +8,66 @@ const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const native_endian = builtin.cpu.arch.endian();
|
||||
|
||||
pub const ReadError = std.mem.Allocator.Error || error{ InvalidHeader, InvalidImageType, ImpossibleDataSize, UnexpectedEOF, ReadError };
|
||||
pub const ReadError = std.mem.Allocator.Error || error{ InvalidHeader, InvalidImageType, ImpossibleDataSize, UnexpectedEOF, ReadFailed };
|
||||
|
||||
pub fn read(allocator: std.mem.Allocator, reader: anytype, max_size: u64) ReadError!IconDir {
|
||||
// Some Reader implementations have an empty ReadError error set which would
|
||||
// cause 'unreachable else' if we tried to use an else in the switch, so we
|
||||
// need to detect this case and not try to translate to ReadError
|
||||
const anyerror_reader_errorset = @TypeOf(reader).Error == anyerror;
|
||||
const empty_reader_errorset = @typeInfo(@TypeOf(reader).Error).error_set == null or @typeInfo(@TypeOf(reader).Error).error_set.?.len == 0;
|
||||
if (empty_reader_errorset and !anyerror_reader_errorset) {
|
||||
return readAnyError(allocator, reader, max_size) catch |err| switch (err) {
|
||||
error.EndOfStream => error.UnexpectedEOF,
|
||||
else => |e| return e,
|
||||
};
|
||||
} else {
|
||||
return readAnyError(allocator, reader, max_size) catch |err| switch (err) {
|
||||
error.OutOfMemory,
|
||||
error.InvalidHeader,
|
||||
error.InvalidImageType,
|
||||
error.ImpossibleDataSize,
|
||||
=> |e| return e,
|
||||
error.EndOfStream => error.UnexpectedEOF,
|
||||
// The remaining errors are dependent on the `reader`, so
|
||||
// we just translate them all to generic ReadError
|
||||
else => error.ReadError,
|
||||
};
|
||||
}
|
||||
pub fn read(allocator: std.mem.Allocator, reader: *std.Io.Reader, max_size: u64) ReadError!IconDir {
|
||||
return readInner(allocator, reader, max_size) catch |err| switch (err) {
|
||||
error.OutOfMemory,
|
||||
error.InvalidHeader,
|
||||
error.InvalidImageType,
|
||||
error.ImpossibleDataSize,
|
||||
error.ReadFailed,
|
||||
=> |e| return e,
|
||||
error.EndOfStream => error.UnexpectedEOF,
|
||||
};
|
||||
}
|
||||
|
||||
// TODO: This seems like a somewhat strange pattern, could be a better way
|
||||
// to do this. Maybe it makes more sense to handle the translation
|
||||
// at the call site instead of having a helper function here.
|
||||
pub fn readAnyError(allocator: std.mem.Allocator, reader: anytype, max_size: u64) !IconDir {
|
||||
const reserved = try reader.readInt(u16, .little);
|
||||
fn readInner(allocator: std.mem.Allocator, reader: *std.Io.Reader, max_size: u64) !IconDir {
|
||||
const reserved = try reader.takeInt(u16, .little);
|
||||
if (reserved != 0) {
|
||||
return error.InvalidHeader;
|
||||
}
|
||||
|
||||
const image_type = reader.readEnum(ImageType, .little) catch |err| switch (err) {
|
||||
error.InvalidValue => return error.InvalidImageType,
|
||||
const image_type = reader.takeEnum(ImageType, .little) catch |err| switch (err) {
|
||||
error.InvalidEnumTag => return error.InvalidImageType,
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
const num_images = try reader.readInt(u16, .little);
|
||||
const num_images = try reader.takeInt(u16, .little);
|
||||
|
||||
// To avoid over-allocation in the case of a file that says it has way more
|
||||
// entries than it actually does, we use an ArrayList with a conservatively
|
||||
// limited initial capacity instead of allocating the entire slice at once.
|
||||
const initial_capacity = @min(num_images, 8);
|
||||
var entries = try std.array_list.Managed(Entry).initCapacity(allocator, initial_capacity);
|
||||
errdefer entries.deinit();
|
||||
var entries = try std.ArrayList(Entry).initCapacity(allocator, initial_capacity);
|
||||
errdefer entries.deinit(allocator);
|
||||
|
||||
var i: usize = 0;
|
||||
while (i < num_images) : (i += 1) {
|
||||
var entry: Entry = undefined;
|
||||
entry.width = try reader.readByte();
|
||||
entry.height = try reader.readByte();
|
||||
entry.num_colors = try reader.readByte();
|
||||
entry.reserved = try reader.readByte();
|
||||
entry.width = try reader.takeByte();
|
||||
entry.height = try reader.takeByte();
|
||||
entry.num_colors = try reader.takeByte();
|
||||
entry.reserved = try reader.takeByte();
|
||||
switch (image_type) {
|
||||
.icon => {
|
||||
entry.type_specific_data = .{ .icon = .{
|
||||
.color_planes = try reader.readInt(u16, .little),
|
||||
.bits_per_pixel = try reader.readInt(u16, .little),
|
||||
.color_planes = try reader.takeInt(u16, .little),
|
||||
.bits_per_pixel = try reader.takeInt(u16, .little),
|
||||
} };
|
||||
},
|
||||
.cursor => {
|
||||
entry.type_specific_data = .{ .cursor = .{
|
||||
.hotspot_x = try reader.readInt(u16, .little),
|
||||
.hotspot_y = try reader.readInt(u16, .little),
|
||||
.hotspot_x = try reader.takeInt(u16, .little),
|
||||
.hotspot_y = try reader.takeInt(u16, .little),
|
||||
} };
|
||||
},
|
||||
}
|
||||
entry.data_size_in_bytes = try reader.readInt(u32, .little);
|
||||
entry.data_offset_from_start_of_file = try reader.readInt(u32, .little);
|
||||
entry.data_size_in_bytes = try reader.takeInt(u32, .little);
|
||||
entry.data_offset_from_start_of_file = try reader.takeInt(u32, .little);
|
||||
// Validate that the offset/data size is feasible
|
||||
if (@as(u64, entry.data_offset_from_start_of_file) + entry.data_size_in_bytes > max_size) {
|
||||
return error.ImpossibleDataSize;
|
||||
@ -101,12 +87,12 @@ pub fn readAnyError(allocator: std.mem.Allocator, reader: anytype, max_size: u64
|
||||
if (entry.data_size_in_bytes < 16) {
|
||||
return error.ImpossibleDataSize;
|
||||
}
|
||||
try entries.append(entry);
|
||||
try entries.append(allocator, entry);
|
||||
}
|
||||
|
||||
return .{
|
||||
.image_type = image_type,
|
||||
.entries = try entries.toOwnedSlice(),
|
||||
.entries = try entries.toOwnedSlice(allocator),
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
@ -135,7 +121,7 @@ pub const IconDir = struct {
|
||||
return @intCast(IconDir.res_header_byte_len + self.entries.len * Entry.res_byte_len);
|
||||
}
|
||||
|
||||
pub fn writeResData(self: IconDir, writer: anytype, first_image_id: u16) !void {
|
||||
pub fn writeResData(self: IconDir, writer: *std.Io.Writer, first_image_id: u16) !void {
|
||||
try writer.writeInt(u16, 0, .little);
|
||||
try writer.writeInt(u16, @intFromEnum(self.image_type), .little);
|
||||
// We know that entries.len must fit into a u16
|
||||
@ -173,7 +159,7 @@ pub const Entry = struct {
|
||||
|
||||
pub const res_byte_len = 14;
|
||||
|
||||
pub fn writeResData(self: Entry, writer: anytype, id: u16) !void {
|
||||
pub fn writeResData(self: Entry, writer: *std.Io.Writer, id: u16) !void {
|
||||
switch (self.type_specific_data) {
|
||||
.icon => |icon_data| {
|
||||
try writer.writeInt(u8, @as(u8, @truncate(self.width)), .little);
|
||||
@ -198,8 +184,8 @@ pub const Entry = struct {
|
||||
|
||||
test "icon" {
|
||||
const data = "\x00\x00\x01\x00\x01\x00\x10\x10\x00\x00\x01\x00\x10\x00\x10\x00\x00\x00\x16\x00\x00\x00" ++ [_]u8{0} ** 16;
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
const icon = try read(std.testing.allocator, fbs.reader(), data.len);
|
||||
var fbs: std.Io.Reader = .fixed(data);
|
||||
const icon = try read(std.testing.allocator, &fbs, data.len);
|
||||
defer icon.deinit();
|
||||
|
||||
try std.testing.expectEqual(ImageType.icon, icon.image_type);
|
||||
@ -211,26 +197,26 @@ test "icon too many images" {
|
||||
// it's not possible to hit EOF when looking for more RESDIR structures, since they are
|
||||
// themselves 16 bytes long, so we'll always hit ImpossibleDataSize instead.
|
||||
const data = "\x00\x00\x01\x00\x02\x00\x10\x10\x00\x00\x01\x00\x10\x00\x10\x00\x00\x00\x16\x00\x00\x00" ++ [_]u8{0} ** 16;
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, fbs.reader(), data.len));
|
||||
var fbs: std.Io.Reader = .fixed(data);
|
||||
try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, &fbs, data.len));
|
||||
}
|
||||
|
||||
test "icon data size past EOF" {
|
||||
const data = "\x00\x00\x01\x00\x01\x00\x10\x10\x00\x00\x01\x00\x10\x00\x10\x01\x00\x00\x16\x00\x00\x00" ++ [_]u8{0} ** 16;
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, fbs.reader(), data.len));
|
||||
var fbs: std.Io.Reader = .fixed(data);
|
||||
try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, &fbs, data.len));
|
||||
}
|
||||
|
||||
test "icon data offset past EOF" {
|
||||
const data = "\x00\x00\x01\x00\x01\x00\x10\x10\x00\x00\x01\x00\x10\x00\x10\x00\x00\x00\x17\x00\x00\x00" ++ [_]u8{0} ** 16;
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, fbs.reader(), data.len));
|
||||
var fbs: std.Io.Reader = .fixed(data);
|
||||
try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, &fbs, data.len));
|
||||
}
|
||||
|
||||
test "icon data size too small" {
|
||||
const data = "\x00\x00\x01\x00\x01\x00\x10\x10\x00\x00\x01\x00\x10\x00\x0F\x00\x00\x00\x16\x00\x00\x00";
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, fbs.reader(), data.len));
|
||||
var fbs: std.Io.Reader = .fixed(data);
|
||||
try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, &fbs, data.len));
|
||||
}
|
||||
|
||||
pub const ImageFormat = enum(u2) {
|
||||
|
||||
@ -119,6 +119,7 @@ test tagToId {
|
||||
}
|
||||
|
||||
test "exhaustive tagToId" {
|
||||
@setEvalBranchQuota(2000);
|
||||
inline for (@typeInfo(LanguageId).@"enum".fields) |field| {
|
||||
const id = tagToId(field.name) catch |err| {
|
||||
std.debug.print("tag: {s}\n", .{field.name});
|
||||
@ -131,8 +132,8 @@ test "exhaustive tagToId" {
|
||||
}
|
||||
var buf: [32]u8 = undefined;
|
||||
inline for (valid_alternate_sorts) |parsed_sort| {
|
||||
var fbs = std.io.fixedBufferStream(&buf);
|
||||
const writer = fbs.writer();
|
||||
var fbs: std.Io.Writer = .fixed(&buf);
|
||||
const writer = &fbs;
|
||||
writer.writeAll(parsed_sort.language_code) catch unreachable;
|
||||
writer.writeAll("-") catch unreachable;
|
||||
writer.writeAll(parsed_sort.country_code.?) catch unreachable;
|
||||
@ -146,12 +147,12 @@ test "exhaustive tagToId" {
|
||||
break :field name_buf;
|
||||
};
|
||||
const expected = @field(LanguageId, &expected_field_name);
|
||||
const id = tagToId(fbs.getWritten()) catch |err| {
|
||||
std.debug.print("tag: {s}\n", .{fbs.getWritten()});
|
||||
const id = tagToId(fbs.buffered()) catch |err| {
|
||||
std.debug.print("tag: {s}\n", .{fbs.buffered()});
|
||||
return err;
|
||||
};
|
||||
try std.testing.expectEqual(expected, id orelse {
|
||||
std.debug.print("tag: {s}, expected: {}, got null\n", .{ fbs.getWritten(), expected });
|
||||
std.debug.print("tag: {s}, expected: {}, got null\n", .{ fbs.buffered(), expected });
|
||||
return error.TestExpectedEqual;
|
||||
});
|
||||
}
|
||||
|
||||
@ -469,8 +469,8 @@ pub fn parseQuotedString(
|
||||
const T = if (literal_type == .ascii) u8 else u16;
|
||||
std.debug.assert(bytes.slice.len >= 2); // must at least have 2 double quote chars
|
||||
|
||||
var buf = try std.array_list.Managed(T).initCapacity(allocator, bytes.slice.len);
|
||||
errdefer buf.deinit();
|
||||
var buf = try std.ArrayList(T).initCapacity(allocator, bytes.slice.len);
|
||||
errdefer buf.deinit(allocator);
|
||||
|
||||
var iterative_parser = IterativeStringParser.init(bytes, options);
|
||||
|
||||
@ -480,13 +480,13 @@ pub fn parseQuotedString(
|
||||
.ascii => switch (options.output_code_page) {
|
||||
.windows1252 => {
|
||||
if (parsed.from_escaped_integer) {
|
||||
try buf.append(@truncate(c));
|
||||
try buf.append(allocator, @truncate(c));
|
||||
} else if (windows1252.bestFitFromCodepoint(c)) |best_fit| {
|
||||
try buf.append(best_fit);
|
||||
try buf.append(allocator, best_fit);
|
||||
} else if (c < 0x10000 or c == code_pages.Codepoint.invalid) {
|
||||
try buf.append('?');
|
||||
try buf.append(allocator, '?');
|
||||
} else {
|
||||
try buf.appendSlice("??");
|
||||
try buf.appendSlice(allocator, "??");
|
||||
}
|
||||
},
|
||||
.utf8 => {
|
||||
@ -500,35 +500,35 @@ pub fn parseQuotedString(
|
||||
}
|
||||
var utf8_buf: [4]u8 = undefined;
|
||||
const utf8_len = std.unicode.utf8Encode(codepoint_to_encode, &utf8_buf) catch unreachable;
|
||||
try buf.appendSlice(utf8_buf[0..utf8_len]);
|
||||
try buf.appendSlice(allocator, utf8_buf[0..utf8_len]);
|
||||
},
|
||||
},
|
||||
.wide => {
|
||||
// Parsing any string type as a wide string is handled separately, see parseQuotedStringAsWideString
|
||||
std.debug.assert(iterative_parser.declared_string_type == .wide);
|
||||
if (parsed.from_escaped_integer) {
|
||||
try buf.append(std.mem.nativeToLittle(u16, @truncate(c)));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, @truncate(c)));
|
||||
} else if (c == code_pages.Codepoint.invalid) {
|
||||
try buf.append(std.mem.nativeToLittle(u16, '<27>'));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, '<27>'));
|
||||
} else if (c < 0x10000) {
|
||||
const short: u16 = @intCast(c);
|
||||
try buf.append(std.mem.nativeToLittle(u16, short));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, short));
|
||||
} else {
|
||||
if (!parsed.escaped_surrogate_pair) {
|
||||
const high = @as(u16, @intCast((c - 0x10000) >> 10)) + 0xD800;
|
||||
try buf.append(std.mem.nativeToLittle(u16, high));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, high));
|
||||
}
|
||||
const low = @as(u16, @intCast(c & 0x3FF)) + 0xDC00;
|
||||
try buf.append(std.mem.nativeToLittle(u16, low));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, low));
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if (literal_type == .wide) {
|
||||
return buf.toOwnedSliceSentinel(0);
|
||||
return buf.toOwnedSliceSentinel(allocator, 0);
|
||||
} else {
|
||||
return buf.toOwnedSlice();
|
||||
return buf.toOwnedSlice(allocator);
|
||||
}
|
||||
}
|
||||
|
||||
@ -564,8 +564,8 @@ pub fn parseQuotedStringAsWideString(allocator: std.mem.Allocator, bytes: Source
|
||||
// Note: We're only handling the case of parsing an ASCII string into a wide string from here on out.
|
||||
// TODO: The logic below is similar to that in AcceleratorKeyCodepointTranslator, might be worth merging the two
|
||||
|
||||
var buf = try std.array_list.Managed(u16).initCapacity(allocator, bytes.slice.len);
|
||||
errdefer buf.deinit();
|
||||
var buf = try std.ArrayList(u16).initCapacity(allocator, bytes.slice.len);
|
||||
errdefer buf.deinit(allocator);
|
||||
|
||||
var iterative_parser = IterativeStringParser.init(bytes, options);
|
||||
|
||||
@ -578,23 +578,23 @@ pub fn parseQuotedStringAsWideString(allocator: std.mem.Allocator, bytes: Source
|
||||
.windows1252 => windows1252.toCodepoint(byte_to_interpret),
|
||||
.utf8 => if (byte_to_interpret > 0x7F) '<27>' else byte_to_interpret,
|
||||
};
|
||||
try buf.append(std.mem.nativeToLittle(u16, code_unit_to_encode));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, code_unit_to_encode));
|
||||
} else if (c == code_pages.Codepoint.invalid) {
|
||||
try buf.append(std.mem.nativeToLittle(u16, '<27>'));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, '<27>'));
|
||||
} else if (c < 0x10000) {
|
||||
const short: u16 = @intCast(c);
|
||||
try buf.append(std.mem.nativeToLittle(u16, short));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, short));
|
||||
} else {
|
||||
if (!parsed.escaped_surrogate_pair) {
|
||||
const high = @as(u16, @intCast((c - 0x10000) >> 10)) + 0xD800;
|
||||
try buf.append(std.mem.nativeToLittle(u16, high));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, high));
|
||||
}
|
||||
const low = @as(u16, @intCast(c & 0x3FF)) + 0xDC00;
|
||||
try buf.append(std.mem.nativeToLittle(u16, low));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, low));
|
||||
}
|
||||
}
|
||||
|
||||
return buf.toOwnedSliceSentinel(0);
|
||||
return buf.toOwnedSliceSentinel(allocator, 0);
|
||||
}
|
||||
|
||||
test "parse quoted ascii string" {
|
||||
|
||||
@ -3,6 +3,7 @@ const builtin = @import("builtin");
|
||||
const removeComments = @import("comments.zig").removeComments;
|
||||
const parseAndRemoveLineCommands = @import("source_mapping.zig").parseAndRemoveLineCommands;
|
||||
const compile = @import("compile.zig").compile;
|
||||
const Dependencies = @import("compile.zig").Dependencies;
|
||||
const Diagnostics = @import("errors.zig").Diagnostics;
|
||||
const cli = @import("cli.zig");
|
||||
const preprocess = @import("preprocess.zig");
|
||||
@ -13,8 +14,6 @@ const hasDisjointCodePage = @import("disjoint_code_page.zig").hasDisjointCodePag
|
||||
const fmtResourceType = @import("res.zig").NameOrOrdinal.fmtResourceType;
|
||||
const aro = @import("aro");
|
||||
|
||||
var stdout_buffer: [1024]u8 = undefined;
|
||||
|
||||
pub fn main() !void {
|
||||
var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
defer std.debug.assert(gpa.deinit() == .ok);
|
||||
@ -43,11 +42,13 @@ pub fn main() !void {
|
||||
cli_args = args[3..];
|
||||
}
|
||||
|
||||
var stdout_writer2 = std.fs.File.stdout().writer(&stdout_buffer);
|
||||
var stdout_buffer: [1024]u8 = undefined;
|
||||
var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
|
||||
const stdout = &stdout_writer.interface;
|
||||
var error_handler: ErrorHandler = switch (zig_integration) {
|
||||
true => .{
|
||||
.server = .{
|
||||
.out = &stdout_writer2.interface,
|
||||
.out = stdout,
|
||||
.in = undefined, // won't be receiving messages
|
||||
},
|
||||
},
|
||||
@ -83,28 +84,23 @@ pub fn main() !void {
|
||||
defer options.deinit();
|
||||
|
||||
if (options.print_help_and_exit) {
|
||||
const stdout = std.fs.File.stdout();
|
||||
try cli.writeUsage(stdout.deprecatedWriter(), "zig rc");
|
||||
try cli.writeUsage(stdout, "zig rc");
|
||||
try stdout.flush();
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't allow verbose when integrating with Zig via stdout
|
||||
options.verbose = false;
|
||||
|
||||
const stdout_writer = std.fs.File.stdout().deprecatedWriter();
|
||||
if (options.verbose) {
|
||||
try options.dumpVerbose(stdout_writer);
|
||||
try stdout_writer.writeByte('\n');
|
||||
try options.dumpVerbose(stdout);
|
||||
try stdout.writeByte('\n');
|
||||
try stdout.flush();
|
||||
}
|
||||
|
||||
var dependencies_list = std.array_list.Managed([]const u8).init(allocator);
|
||||
defer {
|
||||
for (dependencies_list.items) |item| {
|
||||
allocator.free(item);
|
||||
}
|
||||
dependencies_list.deinit();
|
||||
}
|
||||
const maybe_dependencies_list: ?*std.array_list.Managed([]const u8) = if (options.depfile_path != null) &dependencies_list else null;
|
||||
var dependencies = Dependencies.init(allocator);
|
||||
defer dependencies.deinit();
|
||||
const maybe_dependencies: ?*Dependencies = if (options.depfile_path != null) &dependencies else null;
|
||||
|
||||
var include_paths = LazyIncludePaths{
|
||||
.arena = arena,
|
||||
@ -115,7 +111,7 @@ pub fn main() !void {
|
||||
|
||||
const full_input = full_input: {
|
||||
if (options.input_format == .rc and options.preprocess != .no) {
|
||||
var preprocessed_buf = std.array_list.Managed(u8).init(allocator);
|
||||
var preprocessed_buf: std.Io.Writer.Allocating = .init(allocator);
|
||||
errdefer preprocessed_buf.deinit();
|
||||
|
||||
// We're going to throw away everything except the final preprocessed output anyway,
|
||||
@ -127,26 +123,27 @@ pub fn main() !void {
|
||||
var comp = aro.Compilation.init(aro_arena, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
|
||||
var argv = std.array_list.Managed([]const u8).init(comp.gpa);
|
||||
defer argv.deinit();
|
||||
var argv: std.ArrayList([]const u8) = .empty;
|
||||
defer argv.deinit(aro_arena);
|
||||
|
||||
try argv.append("arocc"); // dummy command name
|
||||
try argv.append(aro_arena, "arocc"); // dummy command name
|
||||
const resolved_include_paths = try include_paths.get(&error_handler);
|
||||
try preprocess.appendAroArgs(aro_arena, &argv, options, resolved_include_paths);
|
||||
try argv.append(switch (options.input_source) {
|
||||
try argv.append(aro_arena, switch (options.input_source) {
|
||||
.stdio => "-",
|
||||
.filename => |filename| filename,
|
||||
});
|
||||
|
||||
if (options.verbose) {
|
||||
try stdout_writer.writeAll("Preprocessor: arocc (built-in)\n");
|
||||
try stdout.writeAll("Preprocessor: arocc (built-in)\n");
|
||||
for (argv.items[0 .. argv.items.len - 1]) |arg| {
|
||||
try stdout_writer.print("{s} ", .{arg});
|
||||
try stdout.print("{s} ", .{arg});
|
||||
}
|
||||
try stdout_writer.print("{s}\n\n", .{argv.items[argv.items.len - 1]});
|
||||
try stdout.print("{s}\n\n", .{argv.items[argv.items.len - 1]});
|
||||
try stdout.flush();
|
||||
}
|
||||
|
||||
preprocess.preprocess(&comp, preprocessed_buf.writer(), argv.items, maybe_dependencies_list) catch |err| switch (err) {
|
||||
preprocess.preprocess(&comp, &preprocessed_buf.writer, argv.items, maybe_dependencies) catch |err| switch (err) {
|
||||
error.GeneratedSourceError => {
|
||||
try error_handler.emitAroDiagnostics(allocator, "failed during preprocessor setup (this is always a bug):", &comp);
|
||||
std.process.exit(1);
|
||||
@ -249,14 +246,15 @@ pub fn main() !void {
|
||||
defer diagnostics.deinit();
|
||||
|
||||
var output_buffer: [4096]u8 = undefined;
|
||||
var res_stream_writer = res_stream.source.writer(allocator).adaptToNewApi(&output_buffer);
|
||||
const output_buffered_stream = &res_stream_writer.new_interface;
|
||||
var res_stream_writer = res_stream.source.writer(allocator, &output_buffer);
|
||||
defer res_stream_writer.deinit(&res_stream.source);
|
||||
const output_buffered_stream = res_stream_writer.interface();
|
||||
|
||||
compile(allocator, final_input, output_buffered_stream, .{
|
||||
.cwd = std.fs.cwd(),
|
||||
.diagnostics = &diagnostics,
|
||||
.source_mappings = &mapping_results.mappings,
|
||||
.dependencies_list = maybe_dependencies_list,
|
||||
.dependencies = maybe_dependencies,
|
||||
.ignore_include_env_var = options.ignore_include_env_var,
|
||||
.extra_include_paths = options.extra_include_paths.items,
|
||||
.system_include_paths = try include_paths.get(&error_handler),
|
||||
@ -303,7 +301,7 @@ pub fn main() !void {
|
||||
};
|
||||
|
||||
try write_stream.beginArray();
|
||||
for (dependencies_list.items) |dep_path| {
|
||||
for (dependencies.list.items) |dep_path| {
|
||||
try write_stream.write(dep_path);
|
||||
}
|
||||
try write_stream.endArray();
|
||||
@ -342,10 +340,10 @@ pub fn main() !void {
|
||||
defer coff_stream.deinit(allocator);
|
||||
|
||||
var coff_output_buffer: [4096]u8 = undefined;
|
||||
var coff_output_buffered_stream = coff_stream.source.writer(allocator).adaptToNewApi(&coff_output_buffer);
|
||||
var coff_output_buffered_stream = coff_stream.source.writer(allocator, &coff_output_buffer);
|
||||
|
||||
var cvtres_diagnostics: cvtres.Diagnostics = .{ .none = {} };
|
||||
cvtres.writeCoff(allocator, &coff_output_buffered_stream.new_interface, resources.list.items, options.coff_options, &cvtres_diagnostics) catch |err| {
|
||||
cvtres.writeCoff(allocator, coff_output_buffered_stream.interface(), resources.list.items, options.coff_options, &cvtres_diagnostics) catch |err| {
|
||||
switch (err) {
|
||||
error.DuplicateResource => {
|
||||
const duplicate_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
|
||||
@ -382,7 +380,7 @@ pub fn main() !void {
|
||||
std.process.exit(1);
|
||||
};
|
||||
|
||||
try coff_output_buffered_stream.new_interface.flush();
|
||||
try coff_output_buffered_stream.interface().flush();
|
||||
}
|
||||
|
||||
const IoStream = struct {
|
||||
@ -425,7 +423,7 @@ const IoStream = struct {
|
||||
pub const Source = union(enum) {
|
||||
file: std.fs.File,
|
||||
stdio: std.fs.File,
|
||||
memory: std.ArrayListUnmanaged(u8),
|
||||
memory: std.ArrayList(u8),
|
||||
/// The source has been closed and any usage of the Source in this state is illegal (except deinit).
|
||||
closed: void,
|
||||
|
||||
@ -472,26 +470,34 @@ const IoStream = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub const WriterContext = struct {
|
||||
self: *Source,
|
||||
allocator: std.mem.Allocator,
|
||||
};
|
||||
pub const WriteError = std.mem.Allocator.Error || std.fs.File.WriteError;
|
||||
pub const Writer = std.io.GenericWriter(WriterContext, WriteError, write);
|
||||
pub const Writer = union(enum) {
|
||||
file: std.fs.File.Writer,
|
||||
allocating: std.Io.Writer.Allocating,
|
||||
|
||||
pub fn write(ctx: WriterContext, bytes: []const u8) WriteError!usize {
|
||||
switch (ctx.self.*) {
|
||||
inline .file, .stdio => |file| return file.write(bytes),
|
||||
.memory => |*list| {
|
||||
try list.appendSlice(ctx.allocator, bytes);
|
||||
return bytes.len;
|
||||
},
|
||||
.closed => unreachable,
|
||||
pub const Error = std.mem.Allocator.Error || std.fs.File.WriteError;
|
||||
|
||||
pub fn interface(this: *@This()) *std.Io.Writer {
|
||||
return switch (this.*) {
|
||||
.file => |*fw| &fw.interface,
|
||||
.allocating => |*a| &a.writer,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writer(self: *Source, allocator: std.mem.Allocator) Writer {
|
||||
return .{ .context = .{ .self = self, .allocator = allocator } };
|
||||
pub fn deinit(this: *@This(), source: *Source) void {
|
||||
switch (this.*) {
|
||||
.file => {},
|
||||
.allocating => |*a| source.memory = a.toArrayList(),
|
||||
}
|
||||
this.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn writer(source: *Source, allocator: std.mem.Allocator, buffer: []u8) Writer {
|
||||
return switch (source.*) {
|
||||
.file, .stdio => |file| .{ .file = file.writer(buffer) },
|
||||
.memory => |*list| .{ .allocating = .fromArrayList(allocator, list) },
|
||||
.closed => unreachable,
|
||||
};
|
||||
}
|
||||
};
|
||||
};
|
||||
@ -721,7 +727,7 @@ fn cliDiagnosticsToErrorBundle(
|
||||
});
|
||||
|
||||
var cur_err: ?ErrorBundle.ErrorMessage = null;
|
||||
var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .empty;
|
||||
var cur_notes: std.ArrayList(ErrorBundle.ErrorMessage) = .empty;
|
||||
defer cur_notes.deinit(gpa);
|
||||
for (diagnostics.errors.items) |err_details| {
|
||||
switch (err_details.type) {
|
||||
@ -763,10 +769,10 @@ fn diagnosticsToErrorBundle(
|
||||
try bundle.init(gpa);
|
||||
errdefer bundle.deinit();
|
||||
|
||||
var msg_buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer msg_buf.deinit(gpa);
|
||||
var msg_buf: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer msg_buf.deinit();
|
||||
var cur_err: ?ErrorBundle.ErrorMessage = null;
|
||||
var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .empty;
|
||||
var cur_notes: std.ArrayList(ErrorBundle.ErrorMessage) = .empty;
|
||||
defer cur_notes.deinit(gpa);
|
||||
for (diagnostics.errors.items) |err_details| {
|
||||
switch (err_details.type) {
|
||||
@ -789,7 +795,7 @@ fn diagnosticsToErrorBundle(
|
||||
const column = err_details.token.calculateColumn(source, 1, source_line_start) + 1;
|
||||
|
||||
msg_buf.clearRetainingCapacity();
|
||||
try err_details.render(msg_buf.writer(gpa), source, diagnostics.strings.items);
|
||||
try err_details.render(&msg_buf.writer, source, diagnostics.strings.items);
|
||||
|
||||
const src_loc = src_loc: {
|
||||
var src_loc: ErrorBundle.SourceLocation = .{
|
||||
@ -817,7 +823,7 @@ fn diagnosticsToErrorBundle(
|
||||
try flushErrorMessageIntoBundle(&bundle, err, cur_notes.items);
|
||||
}
|
||||
cur_err = .{
|
||||
.msg = try bundle.addString(msg_buf.items),
|
||||
.msg = try bundle.addString(msg_buf.written()),
|
||||
.src_loc = src_loc,
|
||||
};
|
||||
cur_notes.clearRetainingCapacity();
|
||||
@ -825,7 +831,7 @@ fn diagnosticsToErrorBundle(
|
||||
.note => {
|
||||
cur_err.?.notes_len += 1;
|
||||
try cur_notes.append(gpa, .{
|
||||
.msg = try bundle.addString(msg_buf.items),
|
||||
.msg = try bundle.addString(msg_buf.written()),
|
||||
.src_loc = src_loc,
|
||||
});
|
||||
},
|
||||
@ -876,7 +882,7 @@ fn aroDiagnosticsToErrorBundle(
|
||||
var msg_writer = MsgWriter.init(gpa);
|
||||
defer msg_writer.deinit();
|
||||
var cur_err: ?ErrorBundle.ErrorMessage = null;
|
||||
var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .empty;
|
||||
var cur_notes: std.ArrayList(ErrorBundle.ErrorMessage) = .empty;
|
||||
defer cur_notes.deinit(gpa);
|
||||
for (comp.diagnostics.list.items) |msg| {
|
||||
switch (msg.kind) {
|
||||
@ -971,11 +977,11 @@ const MsgWriter = struct {
|
||||
}
|
||||
|
||||
pub fn print(m: *MsgWriter, comptime fmt: []const u8, args: anytype) void {
|
||||
m.buf.writer().print(fmt, args) catch {};
|
||||
m.buf.print(fmt, args) catch {};
|
||||
}
|
||||
|
||||
pub fn write(m: *MsgWriter, msg: []const u8) void {
|
||||
m.buf.writer().writeAll(msg) catch {};
|
||||
m.buf.appendSlice(msg) catch {};
|
||||
}
|
||||
|
||||
pub fn setColor(m: *MsgWriter, color: std.io.tty.Color) void {
|
||||
|
||||
@ -82,8 +82,8 @@ pub const Parser = struct {
|
||||
}
|
||||
|
||||
fn parseRoot(self: *Self) Error!*Node {
|
||||
var statements = std.array_list.Managed(*Node).init(self.state.allocator);
|
||||
defer statements.deinit();
|
||||
var statements: std.ArrayList(*Node) = .empty;
|
||||
defer statements.deinit(self.state.allocator);
|
||||
|
||||
try self.parseStatements(&statements);
|
||||
try self.check(.eof);
|
||||
@ -95,7 +95,7 @@ pub const Parser = struct {
|
||||
return &node.base;
|
||||
}
|
||||
|
||||
fn parseStatements(self: *Self, statements: *std.array_list.Managed(*Node)) Error!void {
|
||||
fn parseStatements(self: *Self, statements: *std.ArrayList(*Node)) Error!void {
|
||||
while (true) {
|
||||
try self.nextToken(.whitespace_delimiter_only);
|
||||
if (self.state.token.id == .eof) break;
|
||||
@ -105,7 +105,7 @@ pub const Parser = struct {
|
||||
// (usually it will end up with bogus things like 'file
|
||||
// not found: {')
|
||||
const statement = try self.parseStatement();
|
||||
try statements.append(statement);
|
||||
try statements.append(self.state.allocator, statement);
|
||||
}
|
||||
}
|
||||
|
||||
@ -115,7 +115,7 @@ pub const Parser = struct {
|
||||
/// current token is unchanged.
|
||||
/// The returned slice is allocated by the parser's arena
|
||||
fn parseCommonResourceAttributes(self: *Self) ![]Token {
|
||||
var common_resource_attributes: std.ArrayListUnmanaged(Token) = .empty;
|
||||
var common_resource_attributes: std.ArrayList(Token) = .empty;
|
||||
while (true) {
|
||||
const maybe_common_resource_attribute = try self.lookaheadToken(.normal);
|
||||
if (maybe_common_resource_attribute.id == .literal and rc.CommonResourceAttributes.map.has(maybe_common_resource_attribute.slice(self.lexer.buffer))) {
|
||||
@ -135,7 +135,7 @@ pub const Parser = struct {
|
||||
/// current token is unchanged.
|
||||
/// The returned slice is allocated by the parser's arena
|
||||
fn parseOptionalStatements(self: *Self, resource: ResourceType) ![]*Node {
|
||||
var optional_statements: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
var optional_statements: std.ArrayList(*Node) = .empty;
|
||||
|
||||
const num_statement_types = @typeInfo(rc.OptionalStatements).@"enum".fields.len;
|
||||
var statement_type_has_duplicates = [_]bool{false} ** num_statement_types;
|
||||
@ -355,8 +355,8 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var strings = std.array_list.Managed(*Node).init(self.state.allocator);
|
||||
defer strings.deinit();
|
||||
var strings: std.ArrayList(*Node) = .empty;
|
||||
defer strings.deinit(self.state.allocator);
|
||||
while (true) {
|
||||
const maybe_end_token = try self.lookaheadToken(.normal);
|
||||
switch (maybe_end_token.id) {
|
||||
@ -392,7 +392,7 @@ pub const Parser = struct {
|
||||
.maybe_comma = comma_token,
|
||||
.string = self.state.token,
|
||||
};
|
||||
try strings.append(&string_node.base);
|
||||
try strings.append(self.state.allocator, &string_node.base);
|
||||
}
|
||||
|
||||
if (strings.items.len == 0) {
|
||||
@ -501,7 +501,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var accelerators: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
var accelerators: std.ArrayList(*Node) = .empty;
|
||||
|
||||
while (true) {
|
||||
const lookahead = try self.lookaheadToken(.normal);
|
||||
@ -519,7 +519,7 @@ pub const Parser = struct {
|
||||
|
||||
const idvalue = try self.parseExpression(.{ .allowed_types = .{ .number = true } });
|
||||
|
||||
var type_and_options: std.ArrayListUnmanaged(Token) = .empty;
|
||||
var type_and_options: std.ArrayList(Token) = .empty;
|
||||
while (true) {
|
||||
if (!(try self.parseOptionalToken(.comma))) break;
|
||||
|
||||
@ -584,7 +584,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var controls: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
var controls: std.ArrayList(*Node) = .empty;
|
||||
defer controls.deinit(self.state.allocator);
|
||||
while (try self.parseControlStatement(resource)) |control_node| {
|
||||
// The number of controls must fit in a u16 in order for it to
|
||||
@ -643,7 +643,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var buttons: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
var buttons: std.ArrayList(*Node) = .empty;
|
||||
defer buttons.deinit(self.state.allocator);
|
||||
while (try self.parseToolbarButtonStatement()) |button_node| {
|
||||
// The number of buttons must fit in a u16 in order for it to
|
||||
@ -701,7 +701,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var items: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
var items: std.ArrayList(*Node) = .empty;
|
||||
defer items.deinit(self.state.allocator);
|
||||
while (try self.parseMenuItemStatement(resource, id_token, 1)) |item_node| {
|
||||
try items.append(self.state.allocator, item_node);
|
||||
@ -735,7 +735,7 @@ pub const Parser = struct {
|
||||
// common resource attributes must all be contiguous and come before optional-statements
|
||||
const common_resource_attributes = try self.parseCommonResourceAttributes();
|
||||
|
||||
var fixed_info: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
var fixed_info: std.ArrayList(*Node) = .empty;
|
||||
while (try self.parseVersionStatement()) |version_statement| {
|
||||
try fixed_info.append(self.state.arena, version_statement);
|
||||
}
|
||||
@ -744,7 +744,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var block_statements: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
var block_statements: std.ArrayList(*Node) = .empty;
|
||||
while (try self.parseVersionBlockOrValue(id_token, 1)) |block_node| {
|
||||
try block_statements.append(self.state.arena, block_node);
|
||||
}
|
||||
@ -852,8 +852,8 @@ pub const Parser = struct {
|
||||
/// Expects the current token to be a begin token.
|
||||
/// After return, the current token will be the end token.
|
||||
fn parseRawDataBlock(self: *Self) Error![]*Node {
|
||||
var raw_data = std.array_list.Managed(*Node).init(self.state.allocator);
|
||||
defer raw_data.deinit();
|
||||
var raw_data: std.ArrayList(*Node) = .empty;
|
||||
defer raw_data.deinit(self.state.allocator);
|
||||
while (true) {
|
||||
const maybe_end_token = try self.lookaheadToken(.normal);
|
||||
switch (maybe_end_token.id) {
|
||||
@ -888,7 +888,7 @@ pub const Parser = struct {
|
||||
else => {},
|
||||
}
|
||||
const expression = try self.parseExpression(.{ .allowed_types = .{ .number = true, .string = true } });
|
||||
try raw_data.append(expression);
|
||||
try raw_data.append(self.state.allocator, expression);
|
||||
|
||||
if (expression.isNumberExpression()) {
|
||||
const maybe_close_paren = try self.lookaheadToken(.normal);
|
||||
@ -1125,7 +1125,7 @@ pub const Parser = struct {
|
||||
|
||||
_ = try self.parseOptionalToken(.comma);
|
||||
|
||||
var options: std.ArrayListUnmanaged(Token) = .empty;
|
||||
var options: std.ArrayList(Token) = .empty;
|
||||
while (true) {
|
||||
const option_token = try self.lookaheadToken(.normal);
|
||||
if (!rc.MenuItem.Option.map.has(option_token.slice(self.lexer.buffer))) {
|
||||
@ -1160,7 +1160,7 @@ pub const Parser = struct {
|
||||
}
|
||||
try self.skipAnyCommas();
|
||||
|
||||
var options: std.ArrayListUnmanaged(Token) = .empty;
|
||||
var options: std.ArrayList(Token) = .empty;
|
||||
while (true) {
|
||||
const option_token = try self.lookaheadToken(.normal);
|
||||
if (!rc.MenuItem.Option.map.has(option_token.slice(self.lexer.buffer))) {
|
||||
@ -1175,7 +1175,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var items: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
var items: std.ArrayList(*Node) = .empty;
|
||||
while (try self.parseMenuItemStatement(resource, top_level_menu_id_token, nesting_level + 1)) |item_node| {
|
||||
try items.append(self.state.arena, item_node);
|
||||
}
|
||||
@ -1245,7 +1245,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var items: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
var items: std.ArrayList(*Node) = .empty;
|
||||
while (try self.parseMenuItemStatement(resource, top_level_menu_id_token, nesting_level + 1)) |item_node| {
|
||||
try items.append(self.state.arena, item_node);
|
||||
}
|
||||
@ -1322,7 +1322,7 @@ pub const Parser = struct {
|
||||
switch (statement_type) {
|
||||
.file_version, .product_version => {
|
||||
var parts_buffer: [4]*Node = undefined;
|
||||
var parts = std.ArrayListUnmanaged(*Node).initBuffer(&parts_buffer);
|
||||
var parts = std.ArrayList(*Node).initBuffer(&parts_buffer);
|
||||
|
||||
while (true) {
|
||||
const value = try self.parseExpression(.{ .allowed_types = .{ .number = true } });
|
||||
@ -1402,7 +1402,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var children: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
var children: std.ArrayList(*Node) = .empty;
|
||||
while (try self.parseVersionBlockOrValue(top_level_version_id_token, nesting_level + 1)) |value_node| {
|
||||
try children.append(self.state.arena, value_node);
|
||||
}
|
||||
@ -1435,7 +1435,7 @@ pub const Parser = struct {
|
||||
}
|
||||
|
||||
fn parseBlockValuesList(self: *Self, had_comma_before_first_value: bool) Error![]*Node {
|
||||
var values: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
var values: std.ArrayList(*Node) = .empty;
|
||||
var seen_number: bool = false;
|
||||
var first_string_value: ?*Node = null;
|
||||
while (true) {
|
||||
|
||||
@ -2,28 +2,32 @@ const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const cli = @import("cli.zig");
|
||||
const Dependencies = @import("compile.zig").Dependencies;
|
||||
const aro = @import("aro");
|
||||
|
||||
const PreprocessError = error{ ArgError, GeneratedSourceError, PreprocessError, StreamTooLong, OutOfMemory };
|
||||
|
||||
pub fn preprocess(
|
||||
comp: *aro.Compilation,
|
||||
writer: anytype,
|
||||
writer: *std.Io.Writer,
|
||||
/// Expects argv[0] to be the command name
|
||||
argv: []const []const u8,
|
||||
maybe_dependencies_list: ?*std.array_list.Managed([]const u8),
|
||||
maybe_dependencies: ?*Dependencies,
|
||||
) PreprocessError!void {
|
||||
try comp.addDefaultPragmaHandlers();
|
||||
|
||||
var driver: aro.Driver = .{ .comp = comp, .aro_name = "arocc" };
|
||||
defer driver.deinit();
|
||||
|
||||
var macro_buf = std.array_list.Managed(u8).init(comp.gpa);
|
||||
var macro_buf: std.Io.Writer.Allocating = .init(comp.gpa);
|
||||
defer macro_buf.deinit();
|
||||
|
||||
_ = driver.parseArgs(std.io.null_writer, macro_buf.writer(), argv) catch |err| switch (err) {
|
||||
var trash: [64]u8 = undefined;
|
||||
var discarding: std.Io.Writer.Discarding = .init(&trash);
|
||||
_ = driver.parseArgs(&discarding.writer, ¯o_buf.writer, argv) catch |err| switch (err) {
|
||||
error.FatalError => return error.ArgError,
|
||||
error.OutOfMemory => |e| return e,
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
|
||||
if (hasAnyErrors(comp)) return error.ArgError;
|
||||
@ -33,7 +37,7 @@ pub fn preprocess(
|
||||
error.FatalError => return error.GeneratedSourceError,
|
||||
else => |e| return e,
|
||||
};
|
||||
const user_macros = comp.addSourceFromBuffer("<command line>", macro_buf.items) catch |err| switch (err) {
|
||||
const user_macros = comp.addSourceFromBuffer("<command line>", macro_buf.written()) catch |err| switch (err) {
|
||||
error.FatalError => return error.GeneratedSourceError,
|
||||
else => |e| return e,
|
||||
};
|
||||
@ -59,15 +63,17 @@ pub fn preprocess(
|
||||
|
||||
if (hasAnyErrors(comp)) return error.PreprocessError;
|
||||
|
||||
try pp.prettyPrintTokens(writer, .result_only);
|
||||
pp.prettyPrintTokens(writer, .result_only) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
|
||||
if (maybe_dependencies_list) |dependencies_list| {
|
||||
if (maybe_dependencies) |dependencies| {
|
||||
for (comp.sources.values()) |comp_source| {
|
||||
if (comp_source.id == builtin_macros.id or comp_source.id == user_macros.id) continue;
|
||||
if (comp_source.id == .unused or comp_source.id == .generated) continue;
|
||||
const duped_path = try dependencies_list.allocator.dupe(u8, comp_source.path);
|
||||
errdefer dependencies_list.allocator.free(duped_path);
|
||||
try dependencies_list.append(duped_path);
|
||||
const duped_path = try dependencies.allocator.dupe(u8, comp_source.path);
|
||||
errdefer dependencies.allocator.free(duped_path);
|
||||
try dependencies.list.append(dependencies.allocator, duped_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -87,8 +93,8 @@ fn hasAnyErrors(comp: *aro.Compilation) bool {
|
||||
|
||||
/// `arena` is used for temporary -D argument strings and the INCLUDE environment variable.
|
||||
/// The arena should be kept alive at least as long as `argv`.
|
||||
pub fn appendAroArgs(arena: Allocator, argv: *std.array_list.Managed([]const u8), options: cli.Options, system_include_paths: []const []const u8) !void {
|
||||
try argv.appendSlice(&.{
|
||||
pub fn appendAroArgs(arena: Allocator, argv: *std.ArrayList([]const u8), options: cli.Options, system_include_paths: []const []const u8) !void {
|
||||
try argv.appendSlice(arena, &.{
|
||||
"-E",
|
||||
"--comments",
|
||||
"-fuse-line-directives",
|
||||
@ -99,13 +105,13 @@ pub fn appendAroArgs(arena: Allocator, argv: *std.array_list.Managed([]const u8)
|
||||
"-D_WIN32", // undocumented, but defined by default
|
||||
});
|
||||
for (options.extra_include_paths.items) |extra_include_path| {
|
||||
try argv.append("-I");
|
||||
try argv.append(extra_include_path);
|
||||
try argv.append(arena, "-I");
|
||||
try argv.append(arena, extra_include_path);
|
||||
}
|
||||
|
||||
for (system_include_paths) |include_path| {
|
||||
try argv.append("-isystem");
|
||||
try argv.append(include_path);
|
||||
try argv.append(arena, "-isystem");
|
||||
try argv.append(arena, include_path);
|
||||
}
|
||||
|
||||
if (!options.ignore_include_env_var) {
|
||||
@ -119,8 +125,8 @@ pub fn appendAroArgs(arena: Allocator, argv: *std.array_list.Managed([]const u8)
|
||||
};
|
||||
var it = std.mem.tokenizeScalar(u8, INCLUDE, delimiter);
|
||||
while (it.next()) |include_path| {
|
||||
try argv.append("-isystem");
|
||||
try argv.append(include_path);
|
||||
try argv.append(arena, "-isystem");
|
||||
try argv.append(arena, include_path);
|
||||
}
|
||||
}
|
||||
|
||||
@ -128,13 +134,13 @@ pub fn appendAroArgs(arena: Allocator, argv: *std.array_list.Managed([]const u8)
|
||||
while (symbol_it.next()) |entry| {
|
||||
switch (entry.value_ptr.*) {
|
||||
.define => |value| {
|
||||
try argv.append("-D");
|
||||
try argv.append(arena, "-D");
|
||||
const define_arg = try std.fmt.allocPrint(arena, "{s}={s}", .{ entry.key_ptr.*, value });
|
||||
try argv.append(define_arg);
|
||||
try argv.append(arena, define_arg);
|
||||
},
|
||||
.undefine => {
|
||||
try argv.append("-U");
|
||||
try argv.append(entry.key_ptr.*);
|
||||
try argv.append(arena, "-U");
|
||||
try argv.append(arena, entry.key_ptr.*);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -258,7 +258,7 @@ pub const NameOrOrdinal = union(enum) {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write(self: NameOrOrdinal, writer: anytype) !void {
|
||||
pub fn write(self: NameOrOrdinal, writer: *std.Io.Writer) !void {
|
||||
switch (self) {
|
||||
.name => |name| {
|
||||
try writer.writeAll(std.mem.sliceAsBytes(name[0 .. name.len + 1]));
|
||||
@ -270,7 +270,7 @@ pub const NameOrOrdinal = union(enum) {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeEmpty(writer: anytype) !void {
|
||||
pub fn writeEmpty(writer: *std.Io.Writer) !void {
|
||||
try writer.writeInt(u16, 0, .little);
|
||||
}
|
||||
|
||||
@ -283,8 +283,8 @@ pub const NameOrOrdinal = union(enum) {
|
||||
|
||||
pub fn nameFromString(allocator: Allocator, bytes: SourceBytes) !NameOrOrdinal {
|
||||
// Names have a limit of 256 UTF-16 code units + null terminator
|
||||
var buf = try std.array_list.Managed(u16).initCapacity(allocator, @min(257, bytes.slice.len));
|
||||
errdefer buf.deinit();
|
||||
var buf = try std.ArrayList(u16).initCapacity(allocator, @min(257, bytes.slice.len));
|
||||
errdefer buf.deinit(allocator);
|
||||
|
||||
var i: usize = 0;
|
||||
while (bytes.code_page.codepointAt(i, bytes.slice)) |codepoint| : (i += codepoint.byte_len) {
|
||||
@ -292,27 +292,27 @@ pub const NameOrOrdinal = union(enum) {
|
||||
|
||||
const c = codepoint.value;
|
||||
if (c == Codepoint.invalid) {
|
||||
try buf.append(std.mem.nativeToLittle(u16, '<27>'));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, '<27>'));
|
||||
} else if (c < 0x7F) {
|
||||
// ASCII chars in names are always converted to uppercase
|
||||
try buf.append(std.mem.nativeToLittle(u16, std.ascii.toUpper(@intCast(c))));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, std.ascii.toUpper(@intCast(c))));
|
||||
} else if (c < 0x10000) {
|
||||
const short: u16 = @intCast(c);
|
||||
try buf.append(std.mem.nativeToLittle(u16, short));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, short));
|
||||
} else {
|
||||
const high = @as(u16, @intCast((c - 0x10000) >> 10)) + 0xD800;
|
||||
try buf.append(std.mem.nativeToLittle(u16, high));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, high));
|
||||
|
||||
// Note: This can cut-off in the middle of a UTF-16 surrogate pair,
|
||||
// i.e. it can make the string end with an unpaired high surrogate
|
||||
if (buf.items.len == 256) break;
|
||||
|
||||
const low = @as(u16, @intCast(c & 0x3FF)) + 0xDC00;
|
||||
try buf.append(std.mem.nativeToLittle(u16, low));
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, low));
|
||||
}
|
||||
}
|
||||
|
||||
return NameOrOrdinal{ .name = try buf.toOwnedSliceSentinel(0) };
|
||||
return NameOrOrdinal{ .name = try buf.toOwnedSliceSentinel(allocator, 0) };
|
||||
}
|
||||
|
||||
/// Returns `null` if the bytes do not form a valid number.
|
||||
@ -1079,7 +1079,7 @@ pub const FixedFileInfo = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn write(self: FixedFileInfo, writer: anytype) !void {
|
||||
pub fn write(self: FixedFileInfo, writer: *std.Io.Writer) !void {
|
||||
try writer.writeInt(u32, signature, .little);
|
||||
try writer.writeInt(u32, version, .little);
|
||||
try writer.writeInt(u32, self.file_version.mostSignificantCombinedParts(), .little);
|
||||
|
||||
@ -10,7 +10,7 @@ pub const ParseLineCommandsResult = struct {
|
||||
|
||||
const CurrentMapping = struct {
|
||||
line_num: usize = 1,
|
||||
filename: std.ArrayListUnmanaged(u8) = .empty,
|
||||
filename: std.ArrayList(u8) = .empty,
|
||||
pending: bool = true,
|
||||
ignore_contents: bool = false,
|
||||
};
|
||||
@ -574,8 +574,8 @@ fn parseFilename(allocator: Allocator, str: []const u8) error{ OutOfMemory, Inva
|
||||
escape_u,
|
||||
};
|
||||
|
||||
var filename = try std.array_list.Managed(u8).initCapacity(allocator, str.len);
|
||||
errdefer filename.deinit();
|
||||
var filename = try std.ArrayList(u8).initCapacity(allocator, str.len);
|
||||
errdefer filename.deinit(allocator);
|
||||
var state: State = .string;
|
||||
var index: usize = 0;
|
||||
var escape_len: usize = undefined;
|
||||
@ -693,7 +693,7 @@ fn parseFilename(allocator: Allocator, str: []const u8) error{ OutOfMemory, Inva
|
||||
}
|
||||
}
|
||||
|
||||
return filename.toOwnedSlice();
|
||||
return filename.toOwnedSlice(allocator);
|
||||
}
|
||||
|
||||
fn testParseFilename(expected: []const u8, input: []const u8) !void {
|
||||
@ -927,7 +927,7 @@ test "SourceMappings collapse" {
|
||||
|
||||
/// Same thing as StringTable in Zig's src/Wasm.zig
|
||||
pub const StringTable = struct {
|
||||
data: std.ArrayListUnmanaged(u8) = .empty,
|
||||
data: std.ArrayList(u8) = .empty,
|
||||
map: std.HashMapUnmanaged(u32, void, std.hash_map.StringIndexContext, std.hash_map.default_max_load_percentage) = .empty,
|
||||
|
||||
pub fn deinit(self: *StringTable, allocator: Allocator) void {
|
||||
|
||||
@ -1,36 +1,5 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn windows1252ToUtf8Stream(writer: anytype, reader: anytype) !usize {
|
||||
var bytes_written: usize = 0;
|
||||
var utf8_buf: [3]u8 = undefined;
|
||||
while (true) {
|
||||
const c = reader.readByte() catch |err| switch (err) {
|
||||
error.EndOfStream => return bytes_written,
|
||||
else => |e| return e,
|
||||
};
|
||||
const codepoint = toCodepoint(c);
|
||||
if (codepoint <= 0x7F) {
|
||||
try writer.writeByte(c);
|
||||
bytes_written += 1;
|
||||
} else {
|
||||
const utf8_len = std.unicode.utf8Encode(codepoint, &utf8_buf) catch unreachable;
|
||||
try writer.writeAll(utf8_buf[0..utf8_len]);
|
||||
bytes_written += utf8_len;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of code units written to the writer
|
||||
pub fn windows1252ToUtf16AllocZ(allocator: std.mem.Allocator, win1252_str: []const u8) ![:0]u16 {
|
||||
// Guaranteed to need exactly the same number of code units as Windows-1252 bytes
|
||||
var utf16_slice = try allocator.allocSentinel(u16, win1252_str.len, 0);
|
||||
errdefer allocator.free(utf16_slice);
|
||||
for (win1252_str, 0..) |c, i| {
|
||||
utf16_slice[i] = toCodepoint(c);
|
||||
}
|
||||
return utf16_slice;
|
||||
}
|
||||
|
||||
/// https://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WindowsBestFit/bestfit1252.txt
|
||||
pub fn toCodepoint(c: u8) u16 {
|
||||
return switch (c) {
|
||||
@ -572,17 +541,3 @@ pub fn bestFitFromCodepoint(codepoint: u21) ?u8 {
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
test "windows-1252 to utf8" {
|
||||
var buf = std.array_list.Managed(u8).init(std.testing.allocator);
|
||||
defer buf.deinit();
|
||||
|
||||
const input_windows1252 = "\x81pqrstuvwxyz{|}~\x80\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8e\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9e\x9f\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff";
|
||||
const expected_utf8 = "\xc2\x81pqrstuvwxyz{|}~€‚ƒ„…†‡ˆ‰Š‹ŒŽ‘’“”•–—˜™š›œžŸ¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ";
|
||||
|
||||
var fbs = std.io.fixedBufferStream(input_windows1252);
|
||||
const bytes_written = try windows1252ToUtf8Stream(buf.writer(), fbs.reader());
|
||||
|
||||
try std.testing.expectEqualStrings(expected_utf8, buf.items);
|
||||
try std.testing.expectEqual(expected_utf8.len, bytes_written);
|
||||
}
|
||||
|
||||
@ -6,6 +6,7 @@ const gpa = std.heap.wasm_allocator;
|
||||
const assert = std.debug.assert;
|
||||
const log = std.log;
|
||||
const Oom = error{OutOfMemory};
|
||||
const ArrayList = std.ArrayList;
|
||||
|
||||
ast_node: Ast.Node.Index,
|
||||
file: Walk.File.Index,
|
||||
@ -189,7 +190,7 @@ pub fn lookup(decl: *const Decl, name: []const u8) ?Decl.Index {
|
||||
}
|
||||
|
||||
/// Appends the fully qualified name to `out`.
|
||||
pub fn fqn(decl: *const Decl, out: *std.ArrayListUnmanaged(u8)) Oom!void {
|
||||
pub fn fqn(decl: *const Decl, out: *ArrayList(u8)) Oom!void {
|
||||
try decl.append_path(out);
|
||||
if (decl.parent != .none) {
|
||||
try append_parent_ns(out, decl.parent);
|
||||
@ -199,12 +200,12 @@ pub fn fqn(decl: *const Decl, out: *std.ArrayListUnmanaged(u8)) Oom!void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reset_with_path(decl: *const Decl, list: *std.ArrayListUnmanaged(u8)) Oom!void {
|
||||
pub fn reset_with_path(decl: *const Decl, list: *ArrayList(u8)) Oom!void {
|
||||
list.clearRetainingCapacity();
|
||||
try append_path(decl, list);
|
||||
}
|
||||
|
||||
pub fn append_path(decl: *const Decl, list: *std.ArrayListUnmanaged(u8)) Oom!void {
|
||||
pub fn append_path(decl: *const Decl, list: *ArrayList(u8)) Oom!void {
|
||||
const start = list.items.len;
|
||||
// Prefer the module name alias.
|
||||
for (Walk.modules.keys(), Walk.modules.values()) |pkg_name, pkg_file| {
|
||||
@ -230,7 +231,7 @@ pub fn append_path(decl: *const Decl, list: *std.ArrayListUnmanaged(u8)) Oom!voi
|
||||
}
|
||||
}
|
||||
|
||||
pub fn append_parent_ns(list: *std.ArrayListUnmanaged(u8), parent: Decl.Index) Oom!void {
|
||||
pub fn append_parent_ns(list: *ArrayList(u8), parent: Decl.Index) Oom!void {
|
||||
assert(parent != .none);
|
||||
const decl = parent.get();
|
||||
if (decl.parent != .none) {
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
const std = @import("std");
|
||||
const Ast = std.zig.Ast;
|
||||
const assert = std.debug.assert;
|
||||
const ArrayList = std.ArrayList;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
const Walk = @import("Walk");
|
||||
const Decl = Walk.Decl;
|
||||
@ -30,7 +32,7 @@ pub const Annotation = struct {
|
||||
|
||||
pub fn fileSourceHtml(
|
||||
file_index: Walk.File.Index,
|
||||
out: *std.ArrayListUnmanaged(u8),
|
||||
out: *ArrayList(u8),
|
||||
root_node: Ast.Node.Index,
|
||||
options: RenderSourceOptions,
|
||||
) !void {
|
||||
@ -38,7 +40,7 @@ pub fn fileSourceHtml(
|
||||
const file = file_index.get();
|
||||
|
||||
const g = struct {
|
||||
var field_access_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var field_access_buffer: ArrayList(u8) = .empty;
|
||||
};
|
||||
|
||||
const start_token = ast.firstToken(root_node);
|
||||
@ -88,7 +90,7 @@ pub fn fileSourceHtml(
|
||||
if (next_annotate_index >= options.source_location_annotations.len) break;
|
||||
const next_annotation = options.source_location_annotations[next_annotate_index];
|
||||
if (cursor <= next_annotation.file_byte_offset) break;
|
||||
try out.writer(gpa).print("<span id=\"{s}{d}\"></span>", .{
|
||||
try out.print(gpa, "<span id=\"{s}{d}\"></span>", .{
|
||||
options.annotation_prefix, next_annotation.dom_id,
|
||||
});
|
||||
next_annotate_index += 1;
|
||||
@ -318,7 +320,7 @@ pub fn fileSourceHtml(
|
||||
}
|
||||
}
|
||||
|
||||
fn appendUnindented(out: *std.ArrayListUnmanaged(u8), s: []const u8, indent: usize) !void {
|
||||
fn appendUnindented(out: *ArrayList(u8), s: []const u8, indent: usize) !void {
|
||||
var it = std.mem.splitScalar(u8, s, '\n');
|
||||
var is_first_line = true;
|
||||
while (it.next()) |line| {
|
||||
@ -332,7 +334,7 @@ fn appendUnindented(out: *std.ArrayListUnmanaged(u8), s: []const u8, indent: usi
|
||||
}
|
||||
}
|
||||
|
||||
pub fn appendEscaped(out: *std.ArrayListUnmanaged(u8), s: []const u8) !void {
|
||||
pub fn appendEscaped(out: *ArrayList(u8), s: []const u8) !void {
|
||||
for (s) |c| {
|
||||
try out.ensureUnusedCapacity(gpa, 6);
|
||||
switch (c) {
|
||||
@ -347,7 +349,7 @@ pub fn appendEscaped(out: *std.ArrayListUnmanaged(u8), s: []const u8) !void {
|
||||
|
||||
fn walkFieldAccesses(
|
||||
file_index: Walk.File.Index,
|
||||
out: *std.ArrayListUnmanaged(u8),
|
||||
out: *ArrayList(u8),
|
||||
node: Ast.Node.Index,
|
||||
) Oom!void {
|
||||
const ast = file_index.get_ast();
|
||||
@ -371,7 +373,7 @@ fn walkFieldAccesses(
|
||||
|
||||
fn resolveIdentLink(
|
||||
file_index: Walk.File.Index,
|
||||
out: *std.ArrayListUnmanaged(u8),
|
||||
out: *ArrayList(u8),
|
||||
ident_token: Ast.TokenIndex,
|
||||
) Oom!void {
|
||||
const decl_index = file_index.get().lookup_token(ident_token);
|
||||
@ -391,7 +393,7 @@ fn unindent(s: []const u8, indent: usize) []const u8 {
|
||||
return s[indent_idx..];
|
||||
}
|
||||
|
||||
pub fn resolveDeclLink(decl_index: Decl.Index, out: *std.ArrayListUnmanaged(u8)) Oom!void {
|
||||
pub fn resolveDeclLink(decl_index: Decl.Index, out: *ArrayList(u8)) Oom!void {
|
||||
const decl = decl_index.get();
|
||||
switch (decl.categorize()) {
|
||||
.alias => |alias_decl| try alias_decl.get().fqn(out),
|
||||
|
||||
@ -5,6 +5,8 @@ const Ast = std.zig.Ast;
|
||||
const Walk = @import("Walk");
|
||||
const markdown = @import("markdown.zig");
|
||||
const Decl = Walk.Decl;
|
||||
const ArrayList = std.ArrayList;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
const fileSourceHtml = @import("html_render.zig").fileSourceHtml;
|
||||
const appendEscaped = @import("html_render.zig").appendEscaped;
|
||||
@ -66,8 +68,8 @@ export fn unpack(tar_ptr: [*]u8, tar_len: usize) void {
|
||||
};
|
||||
}
|
||||
|
||||
var query_string: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var query_results: std.ArrayListUnmanaged(Decl.Index) = .empty;
|
||||
var query_string: ArrayList(u8) = .empty;
|
||||
var query_results: ArrayList(Decl.Index) = .empty;
|
||||
|
||||
/// Resizes the query string to be the correct length; returns the pointer to
|
||||
/// the query string.
|
||||
@ -99,11 +101,11 @@ fn query_exec_fallible(query: []const u8, ignore_case: bool) !void {
|
||||
segments: u16,
|
||||
};
|
||||
const g = struct {
|
||||
var full_path_search_text: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var full_path_search_text_lower: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var doc_search_text: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var full_path_search_text: ArrayList(u8) = .empty;
|
||||
var full_path_search_text_lower: ArrayList(u8) = .empty;
|
||||
var doc_search_text: ArrayList(u8) = .empty;
|
||||
/// Each element matches a corresponding query_results element.
|
||||
var scores: std.ArrayListUnmanaged(Score) = .empty;
|
||||
var scores: ArrayList(Score) = .empty;
|
||||
};
|
||||
|
||||
// First element stores the size of the list.
|
||||
@ -234,7 +236,7 @@ const ErrorIdentifier = packed struct(u64) {
|
||||
return ast.tokenTag(token_index - 1) == .doc_comment;
|
||||
}
|
||||
|
||||
fn html(ei: ErrorIdentifier, base_decl: Decl.Index, out: *std.ArrayListUnmanaged(u8)) Oom!void {
|
||||
fn html(ei: ErrorIdentifier, base_decl: Decl.Index, out: *ArrayList(u8)) Oom!void {
|
||||
const decl_index = ei.decl_index;
|
||||
const ast = decl_index.get().file.get_ast();
|
||||
const name = ast.tokenSlice(ei.token_index);
|
||||
@ -260,7 +262,7 @@ const ErrorIdentifier = packed struct(u64) {
|
||||
}
|
||||
};
|
||||
|
||||
var string_result: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var string_result: ArrayList(u8) = .empty;
|
||||
var error_set_result: std.StringArrayHashMapUnmanaged(ErrorIdentifier) = .empty;
|
||||
|
||||
export fn decl_error_set(decl_index: Decl.Index) Slice(ErrorIdentifier) {
|
||||
@ -411,7 +413,7 @@ fn decl_fields_fallible(decl_index: Decl.Index) ![]Ast.Node.Index {
|
||||
|
||||
fn ast_decl_fields_fallible(ast: *Ast, ast_index: Ast.Node.Index) ![]Ast.Node.Index {
|
||||
const g = struct {
|
||||
var result: std.ArrayListUnmanaged(Ast.Node.Index) = .empty;
|
||||
var result: ArrayList(Ast.Node.Index) = .empty;
|
||||
};
|
||||
g.result.clearRetainingCapacity();
|
||||
var buf: [2]Ast.Node.Index = undefined;
|
||||
@ -429,7 +431,7 @@ fn ast_decl_fields_fallible(ast: *Ast, ast_index: Ast.Node.Index) ![]Ast.Node.In
|
||||
|
||||
fn decl_params_fallible(decl_index: Decl.Index) ![]Ast.Node.Index {
|
||||
const g = struct {
|
||||
var result: std.ArrayListUnmanaged(Ast.Node.Index) = .empty;
|
||||
var result: ArrayList(Ast.Node.Index) = .empty;
|
||||
};
|
||||
g.result.clearRetainingCapacity();
|
||||
const decl = decl_index.get();
|
||||
@ -460,7 +462,7 @@ export fn decl_param_html(decl_index: Decl.Index, param_node: Ast.Node.Index) St
|
||||
}
|
||||
|
||||
fn decl_field_html_fallible(
|
||||
out: *std.ArrayListUnmanaged(u8),
|
||||
out: *ArrayList(u8),
|
||||
decl_index: Decl.Index,
|
||||
field_node: Ast.Node.Index,
|
||||
) !void {
|
||||
@ -480,7 +482,7 @@ fn decl_field_html_fallible(
|
||||
}
|
||||
|
||||
fn decl_param_html_fallible(
|
||||
out: *std.ArrayListUnmanaged(u8),
|
||||
out: *ArrayList(u8),
|
||||
decl_index: Decl.Index,
|
||||
param_node: Ast.Node.Index,
|
||||
) !void {
|
||||
@ -649,7 +651,7 @@ export fn decl_docs_html(decl_index: Decl.Index, short: bool) String {
|
||||
}
|
||||
|
||||
fn collect_docs(
|
||||
list: *std.ArrayListUnmanaged(u8),
|
||||
list: *ArrayList(u8),
|
||||
ast: *const Ast,
|
||||
first_doc_comment: Ast.TokenIndex,
|
||||
) Oom!void {
|
||||
@ -667,7 +669,7 @@ fn collect_docs(
|
||||
}
|
||||
|
||||
fn render_docs(
|
||||
out: *std.ArrayListUnmanaged(u8),
|
||||
out: *ArrayList(u8),
|
||||
decl_index: Decl.Index,
|
||||
first_doc_comment: Ast.TokenIndex,
|
||||
short: bool,
|
||||
@ -691,11 +693,10 @@ fn render_docs(
|
||||
defer parsed_doc.deinit(gpa);
|
||||
|
||||
const g = struct {
|
||||
var link_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var link_buffer: ArrayList(u8) = .empty;
|
||||
};
|
||||
|
||||
const Writer = std.ArrayListUnmanaged(u8).Writer;
|
||||
const Renderer = markdown.Renderer(Writer, Decl.Index);
|
||||
const Renderer = markdown.Renderer(Decl.Index);
|
||||
const renderer: Renderer = .{
|
||||
.context = decl_index,
|
||||
.renderFn = struct {
|
||||
@ -703,8 +704,8 @@ fn render_docs(
|
||||
r: Renderer,
|
||||
doc: markdown.Document,
|
||||
node: markdown.Document.Node.Index,
|
||||
writer: Writer,
|
||||
) !void {
|
||||
writer: *Writer,
|
||||
) Writer.Error!void {
|
||||
const data = doc.nodes.items(.data)[@intFromEnum(node)];
|
||||
switch (doc.nodes.items(.tag)[@intFromEnum(node)]) {
|
||||
.code_span => {
|
||||
@ -712,7 +713,7 @@ fn render_docs(
|
||||
const content = doc.string(data.text.content);
|
||||
if (resolve_decl_path(r.context, content)) |resolved_decl_index| {
|
||||
g.link_buffer.clearRetainingCapacity();
|
||||
try resolveDeclLink(resolved_decl_index, &g.link_buffer);
|
||||
resolveDeclLink(resolved_decl_index, &g.link_buffer) catch return error.WriteFailed;
|
||||
|
||||
try writer.writeAll("<a href=\"#");
|
||||
_ = missing_feature_url_escape;
|
||||
@ -730,7 +731,12 @@ fn render_docs(
|
||||
}
|
||||
}.render,
|
||||
};
|
||||
try renderer.render(parsed_doc, out.writer(gpa));
|
||||
|
||||
var allocating = Writer.Allocating.fromArrayList(gpa, out);
|
||||
defer out.* = allocating.toArrayList();
|
||||
renderer.render(parsed_doc, &allocating.writer) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
}
|
||||
|
||||
fn resolve_decl_path(decl_index: Decl.Index, path: []const u8) ?Decl.Index {
|
||||
@ -827,7 +833,7 @@ export fn find_module_root(pkg: Walk.ModuleIndex) Decl.Index {
|
||||
}
|
||||
|
||||
/// Set by `set_input_string`.
|
||||
var input_string: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var input_string: ArrayList(u8) = .empty;
|
||||
|
||||
export fn set_input_string(len: usize) [*]u8 {
|
||||
input_string.resize(gpa, len) catch @panic("OOM");
|
||||
@ -849,7 +855,7 @@ export fn find_decl() Decl.Index {
|
||||
if (result != .none) return result;
|
||||
|
||||
const g = struct {
|
||||
var match_fqn: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var match_fqn: ArrayList(u8) = .empty;
|
||||
};
|
||||
for (Walk.decls.items, 0..) |*decl, decl_index| {
|
||||
g.match_fqn.clearRetainingCapacity();
|
||||
@ -905,7 +911,7 @@ export fn type_fn_members(parent: Decl.Index, include_private: bool) Slice(Decl.
|
||||
|
||||
export fn namespace_members(parent: Decl.Index, include_private: bool) Slice(Decl.Index) {
|
||||
const g = struct {
|
||||
var members: std.ArrayListUnmanaged(Decl.Index) = .empty;
|
||||
var members: ArrayList(Decl.Index) = .empty;
|
||||
};
|
||||
|
||||
g.members.clearRetainingCapacity();
|
||||
|
||||
@ -2,25 +2,26 @@ const std = @import("std");
|
||||
const Document = @import("Document.zig");
|
||||
const Node = Document.Node;
|
||||
const assert = std.debug.assert;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
/// A Markdown document renderer.
|
||||
///
|
||||
/// Each concrete `Renderer` type has a `renderDefault` function, with the
|
||||
/// intention that custom `renderFn` implementations can call `renderDefault`
|
||||
/// for node types for which they require no special rendering.
|
||||
pub fn Renderer(comptime Writer: type, comptime Context: type) type {
|
||||
pub fn Renderer(comptime Context: type) type {
|
||||
return struct {
|
||||
renderFn: *const fn (
|
||||
r: Self,
|
||||
doc: Document,
|
||||
node: Node.Index,
|
||||
writer: Writer,
|
||||
writer: *Writer,
|
||||
) Writer.Error!void = renderDefault,
|
||||
context: Context,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn render(r: Self, doc: Document, writer: Writer) Writer.Error!void {
|
||||
pub fn render(r: Self, doc: Document, writer: *Writer) Writer.Error!void {
|
||||
try r.renderFn(r, doc, .root, writer);
|
||||
}
|
||||
|
||||
@ -28,7 +29,7 @@ pub fn Renderer(comptime Writer: type, comptime Context: type) type {
|
||||
r: Self,
|
||||
doc: Document,
|
||||
node: Node.Index,
|
||||
writer: Writer,
|
||||
writer: *Writer,
|
||||
) Writer.Error!void {
|
||||
const data = doc.nodes.items(.data)[@intFromEnum(node)];
|
||||
switch (doc.nodes.items(.tag)[@intFromEnum(node)]) {
|
||||
@ -188,8 +189,8 @@ pub fn Renderer(comptime Writer: type, comptime Context: type) type {
|
||||
pub fn renderInlineNodeText(
|
||||
doc: Document,
|
||||
node: Node.Index,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
writer: *Writer,
|
||||
) Writer.Error!void {
|
||||
const data = doc.nodes.items(.data)[@intFromEnum(node)];
|
||||
switch (doc.nodes.items(.tag)[@intFromEnum(node)]) {
|
||||
.root,
|
||||
@ -234,14 +235,12 @@ pub fn fmtHtml(bytes: []const u8) std.fmt.Formatter([]const u8, formatHtml) {
|
||||
return .{ .data = bytes };
|
||||
}
|
||||
|
||||
fn formatHtml(bytes: []const u8, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
for (bytes) |b| {
|
||||
switch (b) {
|
||||
'<' => try writer.writeAll("<"),
|
||||
'>' => try writer.writeAll(">"),
|
||||
'&' => try writer.writeAll("&"),
|
||||
'"' => try writer.writeAll("""),
|
||||
else => try writer.writeByte(b),
|
||||
}
|
||||
}
|
||||
fn formatHtml(bytes: []const u8, w: *Writer) Writer.Error!void {
|
||||
for (bytes) |b| switch (b) {
|
||||
'<' => try w.writeAll("<"),
|
||||
'>' => try w.writeAll(">"),
|
||||
'&' => try w.writeAll("&"),
|
||||
'"' => try w.writeAll("""),
|
||||
else => try w.writeByte(b),
|
||||
};
|
||||
}
|
||||
|
||||
@ -257,7 +257,7 @@ const Check = struct {
|
||||
fn dumpSection(allocator: Allocator, name: [:0]const u8) Check {
|
||||
var check = Check.create(allocator, .dump_section);
|
||||
const off: u32 = @intCast(check.data.items.len);
|
||||
check.data.writer().print("{s}\x00", .{name}) catch @panic("OOM");
|
||||
check.data.print("{s}\x00", .{name}) catch @panic("OOM");
|
||||
check.payload = .{ .dump_section = off };
|
||||
return check;
|
||||
}
|
||||
@ -1320,7 +1320,8 @@ const MachODumper = struct {
|
||||
}
|
||||
bindings.deinit();
|
||||
}
|
||||
try ctx.parseBindInfo(data, &bindings);
|
||||
var data_reader: std.Io.Reader = .fixed(data);
|
||||
try ctx.parseBindInfo(&data_reader, &bindings);
|
||||
mem.sort(Binding, bindings.items, {}, Binding.lessThan);
|
||||
for (bindings.items) |binding| {
|
||||
try writer.print("0x{x} [addend: {d}]", .{ binding.address, binding.addend });
|
||||
@ -1335,11 +1336,7 @@ const MachODumper = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn parseBindInfo(ctx: ObjectContext, data: []const u8, bindings: *std.array_list.Managed(Binding)) !void {
|
||||
var stream = std.io.fixedBufferStream(data);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
|
||||
fn parseBindInfo(ctx: ObjectContext, reader: *std.Io.Reader, bindings: *std.array_list.Managed(Binding)) !void {
|
||||
var seg_id: ?u8 = null;
|
||||
var tag: Binding.Tag = .self;
|
||||
var ordinal: u16 = 0;
|
||||
@ -1350,7 +1347,7 @@ const MachODumper = struct {
|
||||
defer name_buf.deinit();
|
||||
|
||||
while (true) {
|
||||
const byte = reader.readByte() catch break;
|
||||
const byte = reader.takeByte() catch break;
|
||||
const opc = byte & macho.BIND_OPCODE_MASK;
|
||||
const imm = byte & macho.BIND_IMMEDIATE_MASK;
|
||||
switch (opc) {
|
||||
@ -1371,18 +1368,17 @@ const MachODumper = struct {
|
||||
},
|
||||
macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB => {
|
||||
seg_id = imm;
|
||||
offset = try std.leb.readUleb128(u64, reader);
|
||||
offset = try reader.takeLeb128(u64);
|
||||
},
|
||||
macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM => {
|
||||
name_buf.clearRetainingCapacity();
|
||||
try reader.readUntilDelimiterArrayList(&name_buf, 0, std.math.maxInt(u32));
|
||||
try name_buf.append(0);
|
||||
try name_buf.appendSlice(try reader.takeDelimiterInclusive(0));
|
||||
},
|
||||
macho.BIND_OPCODE_SET_ADDEND_SLEB => {
|
||||
addend = try std.leb.readIleb128(i64, reader);
|
||||
addend = try reader.takeLeb128(i64);
|
||||
},
|
||||
macho.BIND_OPCODE_ADD_ADDR_ULEB => {
|
||||
const x = try std.leb.readUleb128(u64, reader);
|
||||
const x = try reader.takeLeb128(u64);
|
||||
offset = @intCast(@as(i64, @intCast(offset)) + @as(i64, @bitCast(x)));
|
||||
},
|
||||
macho.BIND_OPCODE_DO_BIND,
|
||||
@ -1397,14 +1393,14 @@ const MachODumper = struct {
|
||||
switch (opc) {
|
||||
macho.BIND_OPCODE_DO_BIND => {},
|
||||
macho.BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB => {
|
||||
add_addr = try std.leb.readUleb128(u64, reader);
|
||||
add_addr = try reader.takeLeb128(u64);
|
||||
},
|
||||
macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED => {
|
||||
add_addr = imm * @sizeOf(u64);
|
||||
},
|
||||
macho.BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB => {
|
||||
count = try std.leb.readUleb128(u64, reader);
|
||||
skip = try std.leb.readUleb128(u64, reader);
|
||||
count = try reader.takeLeb128(u64);
|
||||
skip = try reader.takeLeb128(u64);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -1621,8 +1617,9 @@ const MachODumper = struct {
|
||||
var ctx = ObjectContext{ .gpa = gpa, .data = bytes, .header = hdr };
|
||||
try ctx.parse();
|
||||
|
||||
var output = std.array_list.Managed(u8).init(gpa);
|
||||
const writer = output.writer();
|
||||
var output: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer output.deinit();
|
||||
const writer = &output.writer;
|
||||
|
||||
switch (check.kind) {
|
||||
.headers => {
|
||||
@ -1787,8 +1784,9 @@ const ElfDumper = struct {
|
||||
try ctx.objects.append(gpa, .{ .name = name, .off = stream.pos, .len = size });
|
||||
}
|
||||
|
||||
var output = std.array_list.Managed(u8).init(gpa);
|
||||
const writer = output.writer();
|
||||
var output: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer output.deinit();
|
||||
const writer = &output.writer;
|
||||
|
||||
switch (check.kind) {
|
||||
.archive_symtab => if (ctx.symtab.items.len > 0) {
|
||||
@ -1944,8 +1942,9 @@ const ElfDumper = struct {
|
||||
else => {},
|
||||
};
|
||||
|
||||
var output = std.array_list.Managed(u8).init(gpa);
|
||||
const writer = output.writer();
|
||||
var output: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer output.deinit();
|
||||
const writer = &output.writer;
|
||||
|
||||
switch (check.kind) {
|
||||
.headers => {
|
||||
@ -2398,10 +2397,10 @@ const WasmDumper = struct {
|
||||
return error.UnsupportedWasmVersion;
|
||||
}
|
||||
|
||||
var output = std.array_list.Managed(u8).init(gpa);
|
||||
var output: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer output.deinit();
|
||||
parseAndDumpInner(step, check, bytes, &fbs, &output) catch |err| switch (err) {
|
||||
error.EndOfStream => try output.appendSlice("\n<UnexpectedEndOfStream>"),
|
||||
parseAndDumpInner(step, check, bytes, &fbs, &output.writer) catch |err| switch (err) {
|
||||
error.EndOfStream => try output.writer.writeAll("\n<UnexpectedEndOfStream>"),
|
||||
else => |e| return e,
|
||||
};
|
||||
return output.toOwnedSlice();
|
||||
@ -2412,10 +2411,9 @@ const WasmDumper = struct {
|
||||
check: Check,
|
||||
bytes: []const u8,
|
||||
fbs: *std.io.FixedBufferStream([]const u8),
|
||||
output: *std.array_list.Managed(u8),
|
||||
writer: *std.Io.Writer,
|
||||
) !void {
|
||||
const reader = fbs.reader();
|
||||
const writer = output.writer();
|
||||
|
||||
switch (check.kind) {
|
||||
.headers => {
|
||||
|
||||
163
lib/std/Io.zig
163
lib/std/Io.zig
@ -144,61 +144,6 @@ pub fn GenericReader(
|
||||
return @errorCast(self.any().readAllAlloc(allocator, max_size));
|
||||
}
|
||||
|
||||
pub inline fn readUntilDelimiterArrayList(
|
||||
self: Self,
|
||||
array_list: *std.array_list.Managed(u8),
|
||||
delimiter: u8,
|
||||
max_size: usize,
|
||||
) (NoEofError || Allocator.Error || error{StreamTooLong})!void {
|
||||
return @errorCast(self.any().readUntilDelimiterArrayList(
|
||||
array_list,
|
||||
delimiter,
|
||||
max_size,
|
||||
));
|
||||
}
|
||||
|
||||
pub inline fn readUntilDelimiterAlloc(
|
||||
self: Self,
|
||||
allocator: Allocator,
|
||||
delimiter: u8,
|
||||
max_size: usize,
|
||||
) (NoEofError || Allocator.Error || error{StreamTooLong})![]u8 {
|
||||
return @errorCast(self.any().readUntilDelimiterAlloc(
|
||||
allocator,
|
||||
delimiter,
|
||||
max_size,
|
||||
));
|
||||
}
|
||||
|
||||
pub inline fn readUntilDelimiter(
|
||||
self: Self,
|
||||
buf: []u8,
|
||||
delimiter: u8,
|
||||
) (NoEofError || error{StreamTooLong})![]u8 {
|
||||
return @errorCast(self.any().readUntilDelimiter(buf, delimiter));
|
||||
}
|
||||
|
||||
pub inline fn readUntilDelimiterOrEofAlloc(
|
||||
self: Self,
|
||||
allocator: Allocator,
|
||||
delimiter: u8,
|
||||
max_size: usize,
|
||||
) (Error || Allocator.Error || error{StreamTooLong})!?[]u8 {
|
||||
return @errorCast(self.any().readUntilDelimiterOrEofAlloc(
|
||||
allocator,
|
||||
delimiter,
|
||||
max_size,
|
||||
));
|
||||
}
|
||||
|
||||
pub inline fn readUntilDelimiterOrEof(
|
||||
self: Self,
|
||||
buf: []u8,
|
||||
delimiter: u8,
|
||||
) (Error || error{StreamTooLong})!?[]u8 {
|
||||
return @errorCast(self.any().readUntilDelimiterOrEof(buf, delimiter));
|
||||
}
|
||||
|
||||
pub inline fn streamUntilDelimiter(
|
||||
self: Self,
|
||||
writer: anytype,
|
||||
@ -326,103 +271,8 @@ pub fn GenericReader(
|
||||
};
|
||||
}
|
||||
|
||||
/// Deprecated in favor of `Writer`.
|
||||
pub fn GenericWriter(
|
||||
comptime Context: type,
|
||||
comptime WriteError: type,
|
||||
comptime writeFn: fn (context: Context, bytes: []const u8) WriteError!usize,
|
||||
) type {
|
||||
return struct {
|
||||
context: Context,
|
||||
|
||||
const Self = @This();
|
||||
pub const Error = WriteError;
|
||||
|
||||
pub inline fn write(self: Self, bytes: []const u8) Error!usize {
|
||||
return writeFn(self.context, bytes);
|
||||
}
|
||||
|
||||
pub inline fn writeAll(self: Self, bytes: []const u8) Error!void {
|
||||
return @errorCast(self.any().writeAll(bytes));
|
||||
}
|
||||
|
||||
pub inline fn print(self: Self, comptime format: []const u8, args: anytype) Error!void {
|
||||
return @errorCast(self.any().print(format, args));
|
||||
}
|
||||
|
||||
pub inline fn writeByte(self: Self, byte: u8) Error!void {
|
||||
return @errorCast(self.any().writeByte(byte));
|
||||
}
|
||||
|
||||
pub inline fn writeByteNTimes(self: Self, byte: u8, n: usize) Error!void {
|
||||
return @errorCast(self.any().writeByteNTimes(byte, n));
|
||||
}
|
||||
|
||||
pub inline fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) Error!void {
|
||||
return @errorCast(self.any().writeBytesNTimes(bytes, n));
|
||||
}
|
||||
|
||||
pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) Error!void {
|
||||
return @errorCast(self.any().writeInt(T, value, endian));
|
||||
}
|
||||
|
||||
pub inline fn writeStruct(self: Self, value: anytype) Error!void {
|
||||
return @errorCast(self.any().writeStruct(value));
|
||||
}
|
||||
|
||||
pub inline fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) Error!void {
|
||||
return @errorCast(self.any().writeStructEndian(value, endian));
|
||||
}
|
||||
|
||||
pub inline fn any(self: *const Self) AnyWriter {
|
||||
return .{
|
||||
.context = @ptrCast(&self.context),
|
||||
.writeFn = typeErasedWriteFn,
|
||||
};
|
||||
}
|
||||
|
||||
fn typeErasedWriteFn(context: *const anyopaque, bytes: []const u8) anyerror!usize {
|
||||
const ptr: *const Context = @ptrCast(@alignCast(context));
|
||||
return writeFn(ptr.*, bytes);
|
||||
}
|
||||
|
||||
/// Helper for bridging to the new `Writer` API while upgrading.
|
||||
pub fn adaptToNewApi(self: *const Self, buffer: []u8) Adapter {
|
||||
return .{
|
||||
.derp_writer = self.*,
|
||||
.new_interface = .{
|
||||
.buffer = buffer,
|
||||
.vtable = &.{ .drain = Adapter.drain },
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub const Adapter = struct {
|
||||
derp_writer: Self,
|
||||
new_interface: Writer,
|
||||
err: ?Error = null,
|
||||
|
||||
fn drain(w: *std.io.Writer, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
|
||||
_ = splat;
|
||||
const a: *@This() = @alignCast(@fieldParentPtr("new_interface", w));
|
||||
const buffered = w.buffered();
|
||||
if (buffered.len != 0) return w.consume(a.derp_writer.write(buffered) catch |err| {
|
||||
a.err = err;
|
||||
return error.WriteFailed;
|
||||
});
|
||||
return a.derp_writer.write(data[0]) catch |err| {
|
||||
a.err = err;
|
||||
return error.WriteFailed;
|
||||
};
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
/// Deprecated in favor of `Reader`.
|
||||
pub const AnyReader = @import("Io/DeprecatedReader.zig");
|
||||
/// Deprecated in favor of `Writer`.
|
||||
pub const AnyWriter = @import("Io/DeprecatedWriter.zig");
|
||||
/// Deprecated in favor of `Reader`.
|
||||
pub const FixedBufferStream = @import("Io/fixed_buffer_stream.zig").FixedBufferStream;
|
||||
/// Deprecated in favor of `Reader`.
|
||||
@ -434,19 +284,6 @@ pub const countingReader = @import("Io/counting_reader.zig").countingReader;
|
||||
|
||||
pub const tty = @import("Io/tty.zig");
|
||||
|
||||
/// Deprecated in favor of `Writer.Discarding`.
|
||||
pub const null_writer: NullWriter = .{ .context = {} };
|
||||
/// Deprecated in favor of `Writer.Discarding`.
|
||||
pub const NullWriter = GenericWriter(void, error{}, dummyWrite);
|
||||
fn dummyWrite(context: void, data: []const u8) error{}!usize {
|
||||
_ = context;
|
||||
return data.len;
|
||||
}
|
||||
|
||||
test null_writer {
|
||||
null_writer.writeAll("yay" ** 10) catch |err| switch (err) {};
|
||||
}
|
||||
|
||||
pub fn poll(
|
||||
gpa: Allocator,
|
||||
comptime StreamEnum: type,
|
||||
|
||||
@ -93,100 +93,6 @@ pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) anyer
|
||||
return try array_list.toOwnedSlice();
|
||||
}
|
||||
|
||||
/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
|
||||
/// Replaces the `std.array_list.Managed` contents by reading from the stream until `delimiter` is found.
|
||||
/// Does not include the delimiter in the result.
|
||||
/// If the `std.array_list.Managed` length would exceed `max_size`, `error.StreamTooLong` is returned and the
|
||||
/// `std.array_list.Managed` is populated with `max_size` bytes from the stream.
|
||||
pub fn readUntilDelimiterArrayList(
|
||||
self: Self,
|
||||
array_list: *std.array_list.Managed(u8),
|
||||
delimiter: u8,
|
||||
max_size: usize,
|
||||
) anyerror!void {
|
||||
array_list.shrinkRetainingCapacity(0);
|
||||
try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
|
||||
}
|
||||
|
||||
/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
|
||||
/// Allocates enough memory to read until `delimiter`. If the allocated
|
||||
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
|
||||
/// Caller owns returned memory.
|
||||
/// If this function returns an error, the contents from the stream read so far are lost.
|
||||
pub fn readUntilDelimiterAlloc(
|
||||
self: Self,
|
||||
allocator: mem.Allocator,
|
||||
delimiter: u8,
|
||||
max_size: usize,
|
||||
) anyerror![]u8 {
|
||||
var array_list = std.array_list.Managed(u8).init(allocator);
|
||||
defer array_list.deinit();
|
||||
try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
|
||||
return try array_list.toOwnedSlice();
|
||||
}
|
||||
|
||||
/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
|
||||
/// Reads from the stream until specified byte is found. If the buffer is not
|
||||
/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
|
||||
/// If end-of-stream is found, `error.EndOfStream` is returned.
|
||||
/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
|
||||
/// delimiter byte is written to the output buffer but is not included
|
||||
/// in the returned slice.
|
||||
pub fn readUntilDelimiter(self: Self, buf: []u8, delimiter: u8) anyerror![]u8 {
|
||||
var fbs = std.io.fixedBufferStream(buf);
|
||||
try self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len);
|
||||
const output = fbs.getWritten();
|
||||
buf[output.len] = delimiter; // emulating old behaviour
|
||||
return output;
|
||||
}
|
||||
|
||||
/// Deprecated: use `streamUntilDelimiter` with ArrayList's (or any other's) writer instead.
|
||||
/// Allocates enough memory to read until `delimiter` or end-of-stream.
|
||||
/// If the allocated memory would be greater than `max_size`, returns
|
||||
/// `error.StreamTooLong`. If end-of-stream is found, returns the rest
|
||||
/// of the stream. If this function is called again after that, returns
|
||||
/// null.
|
||||
/// Caller owns returned memory.
|
||||
/// If this function returns an error, the contents from the stream read so far are lost.
|
||||
pub fn readUntilDelimiterOrEofAlloc(
|
||||
self: Self,
|
||||
allocator: mem.Allocator,
|
||||
delimiter: u8,
|
||||
max_size: usize,
|
||||
) anyerror!?[]u8 {
|
||||
var array_list = std.array_list.Managed(u8).init(allocator);
|
||||
defer array_list.deinit();
|
||||
self.streamUntilDelimiter(array_list.writer(), delimiter, max_size) catch |err| switch (err) {
|
||||
error.EndOfStream => if (array_list.items.len == 0) {
|
||||
return null;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
return try array_list.toOwnedSlice();
|
||||
}
|
||||
|
||||
/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
|
||||
/// Reads from the stream until specified byte is found. If the buffer is not
|
||||
/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
|
||||
/// If end-of-stream is found, returns the rest of the stream. If this
|
||||
/// function is called again after that, returns null.
|
||||
/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
|
||||
/// delimiter byte is written to the output buffer but is not included
|
||||
/// in the returned slice.
|
||||
pub fn readUntilDelimiterOrEof(self: Self, buf: []u8, delimiter: u8) anyerror!?[]u8 {
|
||||
var fbs = std.io.fixedBufferStream(buf);
|
||||
self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len) catch |err| switch (err) {
|
||||
error.EndOfStream => if (fbs.getWritten().len == 0) {
|
||||
return null;
|
||||
},
|
||||
|
||||
else => |e| return e,
|
||||
};
|
||||
const output = fbs.getWritten();
|
||||
buf[output.len] = delimiter; // emulating old behaviour
|
||||
return output;
|
||||
}
|
||||
|
||||
/// Appends to the `writer` contents by reading from the stream until `delimiter` is found.
|
||||
/// Does not write the delimiter itself.
|
||||
/// If `optional_max_size` is not null and amount of written bytes exceeds `optional_max_size`,
|
||||
@ -384,7 +290,3 @@ const mem = std.mem;
|
||||
const testing = std.testing;
|
||||
const native_endian = @import("builtin").target.cpu.arch.endian();
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
test {
|
||||
_ = @import("Reader/test.zig");
|
||||
}
|
||||
|
||||
@ -1,114 +0,0 @@
|
||||
const std = @import("../std.zig");
|
||||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const native_endian = @import("builtin").target.cpu.arch.endian();
|
||||
|
||||
context: *const anyopaque,
|
||||
writeFn: *const fn (context: *const anyopaque, bytes: []const u8) anyerror!usize,
|
||||
|
||||
const Self = @This();
|
||||
pub const Error = anyerror;
|
||||
|
||||
pub fn write(self: Self, bytes: []const u8) anyerror!usize {
|
||||
return self.writeFn(self.context, bytes);
|
||||
}
|
||||
|
||||
pub fn writeAll(self: Self, bytes: []const u8) anyerror!void {
|
||||
var index: usize = 0;
|
||||
while (index != bytes.len) {
|
||||
index += try self.write(bytes[index..]);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print(self: Self, comptime format: []const u8, args: anytype) anyerror!void {
|
||||
return std.fmt.format(self, format, args);
|
||||
}
|
||||
|
||||
pub fn writeByte(self: Self, byte: u8) anyerror!void {
|
||||
const array = [1]u8{byte};
|
||||
return self.writeAll(&array);
|
||||
}
|
||||
|
||||
pub fn writeByteNTimes(self: Self, byte: u8, n: usize) anyerror!void {
|
||||
var bytes: [256]u8 = undefined;
|
||||
@memset(bytes[0..], byte);
|
||||
|
||||
var remaining: usize = n;
|
||||
while (remaining > 0) {
|
||||
const to_write = @min(remaining, bytes.len);
|
||||
try self.writeAll(bytes[0..to_write]);
|
||||
remaining -= to_write;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) anyerror!void {
|
||||
var i: usize = 0;
|
||||
while (i < n) : (i += 1) {
|
||||
try self.writeAll(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) anyerror!void {
|
||||
var bytes: [@divExact(@typeInfo(T).int.bits, 8)]u8 = undefined;
|
||||
mem.writeInt(std.math.ByteAlignedInt(@TypeOf(value)), &bytes, value, endian);
|
||||
return self.writeAll(&bytes);
|
||||
}
|
||||
|
||||
pub fn writeStruct(self: Self, value: anytype) anyerror!void {
|
||||
// Only extern and packed structs have defined in-memory layout.
|
||||
comptime assert(@typeInfo(@TypeOf(value)).@"struct".layout != .auto);
|
||||
return self.writeAll(mem.asBytes(&value));
|
||||
}
|
||||
|
||||
pub fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) anyerror!void {
|
||||
// TODO: make sure this value is not a reference type
|
||||
if (native_endian == endian) {
|
||||
return self.writeStruct(value);
|
||||
} else {
|
||||
var copy = value;
|
||||
mem.byteSwapAllFields(@TypeOf(value), ©);
|
||||
return self.writeStruct(copy);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeFile(self: Self, file: std.fs.File) anyerror!void {
|
||||
// TODO: figure out how to adjust std lib abstractions so that this ends up
|
||||
// doing sendfile or maybe even copy_file_range under the right conditions.
|
||||
var buf: [4000]u8 = undefined;
|
||||
while (true) {
|
||||
const n = try file.readAll(&buf);
|
||||
try self.writeAll(buf[0..n]);
|
||||
if (n < buf.len) return;
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper for bridging to the new `Writer` API while upgrading.
|
||||
pub fn adaptToNewApi(self: *const Self, buffer: []u8) Adapter {
|
||||
return .{
|
||||
.derp_writer = self.*,
|
||||
.new_interface = .{
|
||||
.buffer = buffer,
|
||||
.vtable = &.{ .drain = Adapter.drain },
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub const Adapter = struct {
|
||||
derp_writer: Self,
|
||||
new_interface: std.io.Writer,
|
||||
err: ?Error = null,
|
||||
|
||||
fn drain(w: *std.io.Writer, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
|
||||
_ = splat;
|
||||
const a: *@This() = @alignCast(@fieldParentPtr("new_interface", w));
|
||||
const buffered = w.buffered();
|
||||
if (buffered.len != 0) return w.consume(a.derp_writer.write(buffered) catch |err| {
|
||||
a.err = err;
|
||||
return error.WriteFailed;
|
||||
});
|
||||
return a.derp_writer.write(data[0]) catch |err| {
|
||||
a.err = err;
|
||||
return error.WriteFailed;
|
||||
};
|
||||
}
|
||||
};
|
||||
@ -143,8 +143,8 @@ pub const failing: Reader = .{
|
||||
|
||||
/// This is generally safe to `@constCast` because it has an empty buffer, so
|
||||
/// there is not really a way to accidentally attempt mutation of these fields.
|
||||
const ending_state: Reader = .fixed(&.{});
|
||||
pub const ending: *Reader = @constCast(&ending_state);
|
||||
pub const ending_instance: Reader = .fixed(&.{});
|
||||
pub const ending: *Reader = @constCast(&ending_instance);
|
||||
|
||||
pub fn limited(r: *Reader, limit: Limit, buffer: []u8) Limited {
|
||||
return .init(r, limit, buffer);
|
||||
@ -784,7 +784,7 @@ pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
|
||||
}
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
/// `delimiter` is found, advancing the seek position.
|
||||
/// `delimiter` is found, advancing the seek position up to the delimiter.
|
||||
///
|
||||
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
|
||||
/// to a delimiter, unless it would result in a length 0 return value, in which
|
||||
@ -814,6 +814,37 @@ pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
|
||||
return result[0 .. result.len - 1];
|
||||
}
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
/// `delimiter` is found, advancing the seek position past the delimiter.
|
||||
///
|
||||
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
|
||||
/// to a delimiter, unless it would result in a length 0 return value, in which
|
||||
/// case `null` is returned instead.
|
||||
///
|
||||
/// If the delimiter is not found within a number of bytes matching the
|
||||
/// capacity of this `Reader`, `error.StreamTooLong` is returned. In
|
||||
/// such case, the stream state is unmodified as if this function was never
|
||||
/// called.
|
||||
///
|
||||
/// Invalidates previously returned values from `peek`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `takeDelimiterInclusive`
|
||||
/// * `takeDelimiterExclusive`
|
||||
pub fn takeDelimiter(r: *Reader, delimiter: u8) error{ ReadFailed, StreamTooLong }!?[]u8 {
|
||||
const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
|
||||
error.EndOfStream => {
|
||||
const remaining = r.buffer[r.seek..r.end];
|
||||
if (remaining.len == 0) return null;
|
||||
r.toss(remaining.len);
|
||||
return remaining;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
r.toss(result.len + 1);
|
||||
return result[0 .. result.len - 1];
|
||||
}
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
/// `delimiter` is found, without advancing the seek position.
|
||||
///
|
||||
@ -846,6 +877,8 @@ pub fn peekDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
|
||||
/// Appends to `w` contents by reading from the stream until `delimiter` is
|
||||
/// found. Does not write the delimiter itself.
|
||||
///
|
||||
/// Does not discard the delimiter from the `Reader`.
|
||||
///
|
||||
/// Returns number of bytes streamed, which may be zero, or error.EndOfStream
|
||||
/// if the delimiter was not found.
|
||||
///
|
||||
@ -899,6 +932,8 @@ pub const StreamDelimiterLimitError = error{
|
||||
/// Appends to `w` contents by reading from the stream until `delimiter` is found.
|
||||
/// Does not write the delimiter itself.
|
||||
///
|
||||
/// Does not discard the delimiter from the `Reader`.
|
||||
///
|
||||
/// Returns number of bytes streamed, which may be zero. End of stream can be
|
||||
/// detected by checking if the next byte in the stream is the delimiter.
|
||||
///
|
||||
@ -1128,7 +1163,11 @@ pub inline fn takeStruct(r: *Reader, comptime T: type, endian: std.builtin.Endia
|
||||
.@"struct" => |info| switch (info.layout) {
|
||||
.auto => @compileError("ill-defined memory layout"),
|
||||
.@"extern" => {
|
||||
var res = (try r.takeStructPointer(T)).*;
|
||||
// This code works around https://github.com/ziglang/zig/issues/25067
|
||||
// by avoiding a call to `peekStructPointer`.
|
||||
const struct_bytes = try r.takeArray(@sizeOf(T));
|
||||
var res: T = undefined;
|
||||
@memcpy(@as([]u8, @ptrCast(&res)), struct_bytes);
|
||||
if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
|
||||
return res;
|
||||
},
|
||||
@ -1153,7 +1192,11 @@ pub inline fn peekStruct(r: *Reader, comptime T: type, endian: std.builtin.Endia
|
||||
.@"struct" => |info| switch (info.layout) {
|
||||
.auto => @compileError("ill-defined memory layout"),
|
||||
.@"extern" => {
|
||||
var res = (try r.peekStructPointer(T)).*;
|
||||
// This code works around https://github.com/ziglang/zig/issues/25067
|
||||
// by avoiding a call to `peekStructPointer`.
|
||||
const struct_bytes = try r.peekArray(@sizeOf(T));
|
||||
var res: T = undefined;
|
||||
@memcpy(@as([]u8, @ptrCast(&res)), struct_bytes);
|
||||
if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
|
||||
return res;
|
||||
},
|
||||
|
||||
@ -1,351 +0,0 @@
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("../../std.zig");
|
||||
const testing = std.testing;
|
||||
|
||||
test "Reader" {
|
||||
var buf = "a\x02".*;
|
||||
var fis = std.io.fixedBufferStream(&buf);
|
||||
const reader = fis.reader();
|
||||
try testing.expect((try reader.readByte()) == 'a');
|
||||
try testing.expect((try reader.readEnum(enum(u8) {
|
||||
a = 0,
|
||||
b = 99,
|
||||
c = 2,
|
||||
d = 3,
|
||||
}, builtin.cpu.arch.endian())) == .c);
|
||||
try testing.expectError(error.EndOfStream, reader.readByte());
|
||||
}
|
||||
|
||||
test "isBytes" {
|
||||
var fis = std.io.fixedBufferStream("foobar");
|
||||
const reader = fis.reader();
|
||||
try testing.expectEqual(true, try reader.isBytes("foo"));
|
||||
try testing.expectEqual(false, try reader.isBytes("qux"));
|
||||
}
|
||||
|
||||
test "skipBytes" {
|
||||
var fis = std.io.fixedBufferStream("foobar");
|
||||
const reader = fis.reader();
|
||||
try reader.skipBytes(3, .{});
|
||||
try testing.expect(try reader.isBytes("bar"));
|
||||
try reader.skipBytes(0, .{});
|
||||
try testing.expectError(error.EndOfStream, reader.skipBytes(1, .{}));
|
||||
}
|
||||
|
||||
test "readUntilDelimiterArrayList returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
|
||||
const a = std.testing.allocator;
|
||||
var list = std.array_list.Managed(u8).init(a);
|
||||
defer list.deinit();
|
||||
|
||||
var fis = std.io.fixedBufferStream("0000\n1234\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
try reader.readUntilDelimiterArrayList(&list, '\n', 5);
|
||||
try std.testing.expectEqualStrings("0000", list.items);
|
||||
try reader.readUntilDelimiterArrayList(&list, '\n', 5);
|
||||
try std.testing.expectEqualStrings("1234", list.items);
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterArrayList(&list, '\n', 5));
|
||||
}
|
||||
|
||||
test "readUntilDelimiterArrayList returns an empty ArrayList" {
|
||||
const a = std.testing.allocator;
|
||||
var list = std.array_list.Managed(u8).init(a);
|
||||
defer list.deinit();
|
||||
|
||||
var fis = std.io.fixedBufferStream("\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
try reader.readUntilDelimiterArrayList(&list, '\n', 5);
|
||||
try std.testing.expectEqualStrings("", list.items);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterArrayList returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
|
||||
const a = std.testing.allocator;
|
||||
var list = std.array_list.Managed(u8).init(a);
|
||||
defer list.deinit();
|
||||
|
||||
var fis = std.io.fixedBufferStream("1234567\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterArrayList(&list, '\n', 5));
|
||||
try std.testing.expectEqualStrings("12345", list.items);
|
||||
try reader.readUntilDelimiterArrayList(&list, '\n', 5);
|
||||
try std.testing.expectEqualStrings("67", list.items);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterArrayList returns EndOfStream" {
|
||||
const a = std.testing.allocator;
|
||||
var list = std.array_list.Managed(u8).init(a);
|
||||
defer list.deinit();
|
||||
|
||||
var fis = std.io.fixedBufferStream("1234");
|
||||
const reader = fis.reader();
|
||||
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterArrayList(&list, '\n', 5));
|
||||
try std.testing.expectEqualStrings("1234", list.items);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterAlloc returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("0000\n1234\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
{
|
||||
const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("0000", result);
|
||||
}
|
||||
|
||||
{
|
||||
const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("1234", result);
|
||||
}
|
||||
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterAlloc(a, '\n', 5));
|
||||
}
|
||||
|
||||
test "readUntilDelimiterAlloc returns an empty ArrayList" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
{
|
||||
const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("", result);
|
||||
}
|
||||
}
|
||||
|
||||
test "readUntilDelimiterAlloc returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("1234567\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterAlloc(a, '\n', 5));
|
||||
|
||||
const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("67", result);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterAlloc returns EndOfStream" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("1234");
|
||||
const reader = fis.reader();
|
||||
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterAlloc(a, '\n', 5));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns bytes read until the delimiter" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("0000\n1234\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("0000", try reader.readUntilDelimiter(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("1234", try reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns an empty string" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("", try reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns StreamTooLong, then an empty string" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("12345\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("", try reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns StreamTooLong, then bytes read until the delimiter" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234567\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("67", try reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns EndOfStream" {
|
||||
{
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
{
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns bytes read until delimiter, then EndOfStream" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("1234", try reader.readUntilDelimiter(&buf, '\n'));
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns StreamTooLong, then EndOfStream" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("12345");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter writes all bytes read to the output buffer" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("0000\n12345");
|
||||
const reader = fis.reader();
|
||||
_ = try reader.readUntilDelimiter(&buf, '\n');
|
||||
try std.testing.expectEqualStrings("0000\n", &buf);
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("12345", &buf);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEofAlloc returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("0000\n1234\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
{
|
||||
const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("0000", result);
|
||||
}
|
||||
|
||||
{
|
||||
const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("1234", result);
|
||||
}
|
||||
|
||||
try std.testing.expect((try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)) == null);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEofAlloc returns an empty ArrayList" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
{
|
||||
const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("", result);
|
||||
}
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEofAlloc returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("1234567\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEofAlloc(a, '\n', 5));
|
||||
|
||||
const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("67", result);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns bytes read until the delimiter" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("0000\n1234\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("0000", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
try std.testing.expectEqualStrings("1234", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns an empty string" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns StreamTooLong, then an empty string" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("12345\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns StreamTooLong, then bytes read until the delimiter" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234567\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("67", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns null" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expect((try reader.readUntilDelimiterOrEof(&buf, '\n')) == null);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns bytes read until delimiter, then null" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("1234", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
try std.testing.expect((try reader.readUntilDelimiterOrEof(&buf, '\n')) == null);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns bytes read until end-of-stream" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("1234", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns StreamTooLong, then bytes read until end-of-stream" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234567");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("67", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof writes all bytes read to the output buffer" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("0000\n12345");
|
||||
const reader = fis.reader();
|
||||
_ = try reader.readUntilDelimiterOrEof(&buf, '\n');
|
||||
try std.testing.expectEqualStrings("0000\n", &buf);
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("12345", &buf);
|
||||
}
|
||||
|
||||
test "streamUntilDelimiter writes all bytes without delimiter to the output" {
|
||||
const input_string = "some_string_with_delimiter!";
|
||||
var input_fbs = std.io.fixedBufferStream(input_string);
|
||||
const reader = input_fbs.reader();
|
||||
|
||||
var output: [input_string.len]u8 = undefined;
|
||||
var output_fbs = std.io.fixedBufferStream(&output);
|
||||
const writer = output_fbs.writer();
|
||||
|
||||
try reader.streamUntilDelimiter(writer, '!', input_fbs.buffer.len);
|
||||
try std.testing.expectEqualStrings("some_string_with_delimiter", output_fbs.getWritten());
|
||||
try std.testing.expectError(error.EndOfStream, reader.streamUntilDelimiter(writer, '!', input_fbs.buffer.len));
|
||||
|
||||
input_fbs.reset();
|
||||
output_fbs.reset();
|
||||
|
||||
try std.testing.expectError(error.StreamTooLong, reader.streamUntilDelimiter(writer, '!', 5));
|
||||
}
|
||||
@ -8,6 +8,7 @@ const Limit = std.Io.Limit;
|
||||
const File = std.fs.File;
|
||||
const testing = std.testing;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const ArrayList = std.ArrayList;
|
||||
|
||||
vtable: *const VTable,
|
||||
/// If this has length zero, the writer is unbuffered, and `flush` is a no-op.
|
||||
@ -2374,6 +2375,29 @@ pub fn unreachableRebase(w: *Writer, preserve: usize, capacity: usize) Error!voi
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub fn fromArrayList(array_list: *ArrayList(u8)) Writer {
|
||||
defer array_list.* = .empty;
|
||||
return .{
|
||||
.vtable = &.{
|
||||
.drain = fixedDrain,
|
||||
.flush = noopFlush,
|
||||
.rebase = failingRebase,
|
||||
},
|
||||
.buffer = array_list.allocatedSlice(),
|
||||
.end = array_list.items.len,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn toArrayList(w: *Writer) ArrayList(u8) {
|
||||
const result: ArrayList(u8) = .{
|
||||
.items = w.buffer[0..w.end],
|
||||
.capacity = w.buffer.len,
|
||||
};
|
||||
w.buffer = &.{};
|
||||
w.end = 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Provides a `Writer` implementation based on calling `Hasher.update`, sending
|
||||
/// all data also to an underlying `Writer`.
|
||||
///
|
||||
@ -2546,7 +2570,7 @@ pub const Allocating = struct {
|
||||
}
|
||||
|
||||
/// Replaces `array_list` with empty, taking ownership of the memory.
|
||||
pub fn fromArrayList(allocator: Allocator, array_list: *std.ArrayListUnmanaged(u8)) Allocating {
|
||||
pub fn fromArrayList(allocator: Allocator, array_list: *ArrayList(u8)) Allocating {
|
||||
defer array_list.* = .empty;
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
@ -2572,9 +2596,9 @@ pub const Allocating = struct {
|
||||
|
||||
/// Returns an array list that takes ownership of the allocated memory.
|
||||
/// Resets the `Allocating` to an empty state.
|
||||
pub fn toArrayList(a: *Allocating) std.ArrayListUnmanaged(u8) {
|
||||
pub fn toArrayList(a: *Allocating) ArrayList(u8) {
|
||||
const w = &a.writer;
|
||||
const result: std.ArrayListUnmanaged(u8) = .{
|
||||
const result: ArrayList(u8) = .{
|
||||
.items = w.buffer[0..w.end],
|
||||
.capacity = w.buffer.len,
|
||||
};
|
||||
@ -2603,7 +2627,7 @@ pub const Allocating = struct {
|
||||
|
||||
pub fn toOwnedSliceSentinel(a: *Allocating, comptime sentinel: u8) error{OutOfMemory}![:sentinel]u8 {
|
||||
const gpa = a.allocator;
|
||||
var list = toArrayList(a);
|
||||
var list = @This().toArrayList(a);
|
||||
defer a.setArrayList(list);
|
||||
return list.toOwnedSliceSentinel(gpa, sentinel);
|
||||
}
|
||||
@ -2670,7 +2694,7 @@ pub const Allocating = struct {
|
||||
list.ensureUnusedCapacity(gpa, minimum_len) catch return error.WriteFailed;
|
||||
}
|
||||
|
||||
fn setArrayList(a: *Allocating, list: std.ArrayListUnmanaged(u8)) void {
|
||||
fn setArrayList(a: *Allocating, list: ArrayList(u8)) void {
|
||||
a.writer.buffer = list.allocatedSlice();
|
||||
a.writer.end = list.items.len;
|
||||
}
|
||||
|
||||
@ -17,7 +17,6 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
|
||||
pub const GetSeekPosError = error{};
|
||||
|
||||
pub const Reader = io.GenericReader(*Self, ReadError, read);
|
||||
pub const Writer = io.GenericWriter(*Self, WriteError, write);
|
||||
|
||||
const Self = @This();
|
||||
|
||||
@ -25,10 +24,6 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
pub fn writer(self: *Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
pub fn read(self: *Self, dest: []u8) ReadError!usize {
|
||||
const size = @min(dest.len, self.buffer.len - self.pos);
|
||||
const end = self.pos + size;
|
||||
@ -39,23 +34,6 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
|
||||
return size;
|
||||
}
|
||||
|
||||
/// If the returned number of bytes written is less than requested, the
|
||||
/// buffer is full. Returns `error.NoSpaceLeft` when no bytes would be written.
|
||||
/// Note: `error.NoSpaceLeft` matches the corresponding error from
|
||||
/// `std.fs.File.WriteError`.
|
||||
pub fn write(self: *Self, bytes: []const u8) WriteError!usize {
|
||||
if (bytes.len == 0) return 0;
|
||||
if (self.pos >= self.buffer.len) return error.NoSpaceLeft;
|
||||
|
||||
const n = @min(self.buffer.len - self.pos, bytes.len);
|
||||
@memcpy(self.buffer[self.pos..][0..n], bytes[0..n]);
|
||||
self.pos += n;
|
||||
|
||||
if (n == 0) return error.NoSpaceLeft;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
pub fn seekTo(self: *Self, pos: u64) SeekError!void {
|
||||
self.pos = @min(std.math.lossyCast(usize, pos), self.buffer.len);
|
||||
}
|
||||
@ -84,10 +62,6 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
|
||||
return self.pos;
|
||||
}
|
||||
|
||||
pub fn getWritten(self: Self) Buffer {
|
||||
return self.buffer[0..self.pos];
|
||||
}
|
||||
|
||||
pub fn reset(self: *Self) void {
|
||||
self.pos = 0;
|
||||
}
|
||||
@ -117,49 +91,6 @@ fn Slice(comptime T: type) type {
|
||||
}
|
||||
}
|
||||
|
||||
test "output" {
|
||||
var buf: [255]u8 = undefined;
|
||||
var fbs = fixedBufferStream(&buf);
|
||||
const stream = fbs.writer();
|
||||
|
||||
try stream.print("{s}{s}!", .{ "Hello", "World" });
|
||||
try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
|
||||
}
|
||||
|
||||
test "output at comptime" {
|
||||
comptime {
|
||||
var buf: [255]u8 = undefined;
|
||||
var fbs = fixedBufferStream(&buf);
|
||||
const stream = fbs.writer();
|
||||
|
||||
try stream.print("{s}{s}!", .{ "Hello", "World" });
|
||||
try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
|
||||
}
|
||||
}
|
||||
|
||||
test "output 2" {
|
||||
var buffer: [10]u8 = undefined;
|
||||
var fbs = fixedBufferStream(&buffer);
|
||||
|
||||
try fbs.writer().writeAll("Hello");
|
||||
try testing.expect(mem.eql(u8, fbs.getWritten(), "Hello"));
|
||||
|
||||
try fbs.writer().writeAll("world");
|
||||
try testing.expect(mem.eql(u8, fbs.getWritten(), "Helloworld"));
|
||||
|
||||
try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("!"));
|
||||
try testing.expect(mem.eql(u8, fbs.getWritten(), "Helloworld"));
|
||||
|
||||
fbs.reset();
|
||||
try testing.expect(fbs.getWritten().len == 0);
|
||||
|
||||
try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("Hello world!"));
|
||||
try testing.expect(mem.eql(u8, fbs.getWritten(), "Hello worl"));
|
||||
|
||||
try fbs.seekTo((try fbs.getEndPos()) + 1);
|
||||
try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("H"));
|
||||
}
|
||||
|
||||
test "input" {
|
||||
const bytes = [_]u8{ 1, 2, 3, 4, 5, 6, 7 };
|
||||
var fbs = fixedBufferStream(&bytes);
|
||||
|
||||
@ -167,7 +167,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
|
||||
const file = try std.fs.cwd().openFile(path, .{ .mode = .write_only });
|
||||
defer file.close();
|
||||
|
||||
try file.deprecatedWriter().writeAll(name);
|
||||
try file.writeAll(name);
|
||||
return;
|
||||
},
|
||||
.windows => {
|
||||
|
||||
@ -336,39 +336,6 @@ pub fn AlignedManaged(comptime T: type, comptime alignment: ?mem.Alignment) type
|
||||
try unmanaged.print(gpa, fmt, args);
|
||||
}
|
||||
|
||||
pub const Writer = if (T != u8) void else std.io.GenericWriter(*Self, Allocator.Error, appendWrite);
|
||||
|
||||
/// Initializes a Writer which will append to the list.
|
||||
pub fn writer(self: *Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
/// Same as `append` except it returns the number of bytes written, which is always the same
|
||||
/// as `m.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
|
||||
/// Invalidates element pointers if additional memory is needed.
|
||||
fn appendWrite(self: *Self, m: []const u8) Allocator.Error!usize {
|
||||
try self.appendSlice(m);
|
||||
return m.len;
|
||||
}
|
||||
|
||||
pub const FixedWriter = std.io.GenericWriter(*Self, Allocator.Error, appendWriteFixed);
|
||||
|
||||
/// Initializes a Writer which will append to the list but will return
|
||||
/// `error.OutOfMemory` rather than increasing capacity.
|
||||
pub fn fixedWriter(self: *Self) FixedWriter {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
/// The purpose of this function existing is to match `std.io.GenericWriter` API.
|
||||
fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
|
||||
const available_capacity = self.capacity - self.items.len;
|
||||
if (m.len > available_capacity)
|
||||
return error.OutOfMemory;
|
||||
|
||||
self.appendSliceAssumeCapacity(m);
|
||||
return m.len;
|
||||
}
|
||||
|
||||
/// Append a value to the list `n` times.
|
||||
/// Allocates more memory as necessary.
|
||||
/// Invalidates element pointers if additional memory is needed.
|
||||
@ -1083,48 +1050,6 @@ pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
|
||||
self.items.len += w.end;
|
||||
}
|
||||
|
||||
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
|
||||
pub const WriterContext = struct {
|
||||
self: *Self,
|
||||
allocator: Allocator,
|
||||
};
|
||||
|
||||
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
|
||||
pub const Writer = if (T != u8)
|
||||
@compileError("The Writer interface is only defined for ArrayList(u8) " ++
|
||||
"but the given type is ArrayList(" ++ @typeName(T) ++ ")")
|
||||
else
|
||||
std.io.GenericWriter(WriterContext, Allocator.Error, appendWrite);
|
||||
|
||||
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
|
||||
pub fn writer(self: *Self, gpa: Allocator) Writer {
|
||||
return .{ .context = .{ .self = self, .allocator = gpa } };
|
||||
}
|
||||
|
||||
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
|
||||
fn appendWrite(context: WriterContext, m: []const u8) Allocator.Error!usize {
|
||||
try context.self.appendSlice(context.allocator, m);
|
||||
return m.len;
|
||||
}
|
||||
|
||||
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
|
||||
pub const FixedWriter = std.io.GenericWriter(*Self, Allocator.Error, appendWriteFixed);
|
||||
|
||||
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
|
||||
pub fn fixedWriter(self: *Self) FixedWriter {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
|
||||
fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
|
||||
const available_capacity = self.capacity - self.items.len;
|
||||
if (m.len > available_capacity)
|
||||
return error.OutOfMemory;
|
||||
|
||||
self.appendSliceAssumeCapacity(m);
|
||||
return m.len;
|
||||
}
|
||||
|
||||
/// Append a value to the list `n` times.
|
||||
/// Allocates more memory as necessary.
|
||||
/// Invalidates element pointers if additional memory is needed.
|
||||
@ -2116,60 +2041,6 @@ test "Managed(T) of struct T" {
|
||||
}
|
||||
}
|
||||
|
||||
test "Managed(u8) implements writer" {
|
||||
const a = testing.allocator;
|
||||
|
||||
{
|
||||
var buffer = Managed(u8).init(a);
|
||||
defer buffer.deinit();
|
||||
|
||||
const x: i32 = 42;
|
||||
const y: i32 = 1234;
|
||||
try buffer.writer().print("x: {}\ny: {}\n", .{ x, y });
|
||||
|
||||
try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
|
||||
}
|
||||
{
|
||||
var list = AlignedManaged(u8, .@"2").init(a);
|
||||
defer list.deinit();
|
||||
|
||||
const writer = list.writer();
|
||||
try writer.writeAll("a");
|
||||
try writer.writeAll("bc");
|
||||
try writer.writeAll("d");
|
||||
try writer.writeAll("efg");
|
||||
|
||||
try testing.expectEqualSlices(u8, list.items, "abcdefg");
|
||||
}
|
||||
}
|
||||
|
||||
test "ArrayList(u8) implements writer" {
|
||||
const a = testing.allocator;
|
||||
|
||||
{
|
||||
var buffer: ArrayList(u8) = .empty;
|
||||
defer buffer.deinit(a);
|
||||
|
||||
const x: i32 = 42;
|
||||
const y: i32 = 1234;
|
||||
try buffer.writer(a).print("x: {}\ny: {}\n", .{ x, y });
|
||||
|
||||
try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
|
||||
}
|
||||
{
|
||||
var list: Aligned(u8, .@"2") = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
const writer = list.writer(a);
|
||||
try writer.writeAll("a");
|
||||
try writer.writeAll("bc");
|
||||
try writer.writeAll("d");
|
||||
try writer.writeAll("efg");
|
||||
|
||||
try testing.expectEqualSlices(u8, list.items, "abcdefg");
|
||||
}
|
||||
}
|
||||
|
||||
test "shrink still sets length when resizing is disabled" {
|
||||
var failing_allocator = testing.FailingAllocator.init(testing.allocator, .{ .resize_fail_index = 0 });
|
||||
const a = failing_allocator.allocator();
|
||||
|
||||
@ -108,8 +108,7 @@ pub const Base64Encoder = struct {
|
||||
}
|
||||
}
|
||||
|
||||
// dest must be compatible with std.io.GenericWriter's writeAll interface
|
||||
pub fn encodeWriter(encoder: *const Base64Encoder, dest: anytype, source: []const u8) !void {
|
||||
pub fn encodeWriter(encoder: *const Base64Encoder, dest: *std.Io.Writer, source: []const u8) !void {
|
||||
var chunker = window(u8, source, 3, 3);
|
||||
while (chunker.next()) |chunk| {
|
||||
var temp: [5]u8 = undefined;
|
||||
|
||||
@ -801,18 +801,6 @@ fn AegisMac(comptime T: type) type {
|
||||
ctx.update(msg);
|
||||
ctx.final(out);
|
||||
}
|
||||
|
||||
pub const Error = error{};
|
||||
pub const Writer = std.io.GenericWriter(*Mac, Error, write);
|
||||
|
||||
fn write(self: *Mac, bytes: []const u8) Error!usize {
|
||||
self.update(bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writer(self: *Mac) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -185,18 +185,6 @@ pub fn Blake2s(comptime out_bits: usize) type {
|
||||
r.* ^= v[i] ^ v[i + 8];
|
||||
}
|
||||
}
|
||||
|
||||
pub const Error = error{};
|
||||
pub const Writer = std.io.GenericWriter(*Self, Error, write);
|
||||
|
||||
fn write(self: *Self, bytes: []const u8) Error!usize {
|
||||
self.update(bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writer(self: *Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -474,18 +474,6 @@ pub const Blake3 = struct {
|
||||
}
|
||||
output.rootOutputBytes(out_slice);
|
||||
}
|
||||
|
||||
pub const Error = error{};
|
||||
pub const Writer = std.io.GenericWriter(*Blake3, Error, write);
|
||||
|
||||
fn write(self: *Blake3, bytes: []const u8) Error!usize {
|
||||
self.update(bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writer(self: *Blake3) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
|
||||
// Use named type declarations to workaround crash with anonymous structs (issue #4373).
|
||||
|
||||
@ -4,6 +4,12 @@
|
||||
//! Laid out in memory like:
|
||||
//! capacity |--------------------------|
|
||||
//! data |-------------|
|
||||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
|
||||
data: []u8,
|
||||
capacity: usize,
|
||||
allocator: Allocator,
|
||||
@ -45,12 +51,6 @@ pub fn prependSlice(self: *ArrayListReverse, data: []const u8) Error!void {
|
||||
self.data.ptr = begin;
|
||||
}
|
||||
|
||||
pub const Writer = std.io.GenericWriter(*ArrayListReverse, Error, prependSliceSize);
|
||||
/// Warning: This writer writes backwards. `fn print` will NOT work as expected.
|
||||
pub fn writer(self: *ArrayListReverse) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
fn prependSliceSize(self: *ArrayListReverse, data: []const u8) Error!usize {
|
||||
try self.prependSlice(data);
|
||||
return data.len;
|
||||
@ -77,11 +77,6 @@ pub fn toOwnedSlice(self: *ArrayListReverse) Error![]u8 {
|
||||
return new_memory;
|
||||
}
|
||||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
|
||||
test ArrayListReverse {
|
||||
var b = ArrayListReverse.init(testing.allocator);
|
||||
defer b.deinit();
|
||||
|
||||
@ -1721,53 +1721,55 @@ test "Test happy flow" {
|
||||
|
||||
// Code to test NIST Known Answer Tests (KAT), see PQCgenKAT.c.
|
||||
|
||||
const sha2 = crypto.hash.sha2;
|
||||
test "NIST KAT test d00.Kyber512" {
|
||||
try testNistKat(d00.Kyber512, "e9c2bd37133fcb40772f81559f14b1f58dccd1c816701be9ba6214d43baf4547");
|
||||
}
|
||||
|
||||
test "NIST KAT test" {
|
||||
inline for (.{
|
||||
.{ d00.Kyber512, "e9c2bd37133fcb40772f81559f14b1f58dccd1c816701be9ba6214d43baf4547" },
|
||||
.{ d00.Kyber1024, "89248f2f33f7f4f7051729111f3049c409a933ec904aedadf035f30fa5646cd5" },
|
||||
.{ d00.Kyber768, "a1e122cad3c24bc51622e4c242d8b8acbcd3f618fee4220400605ca8f9ea02c2" },
|
||||
}) |modeHash| {
|
||||
const mode = modeHash[0];
|
||||
var seed: [48]u8 = undefined;
|
||||
for (&seed, 0..) |*s, i| {
|
||||
s.* = @as(u8, @intCast(i));
|
||||
}
|
||||
var f = sha2.Sha256.init(.{});
|
||||
const fw = f.writer();
|
||||
var g = NistDRBG.init(seed);
|
||||
try std.fmt.format(fw, "# {s}\n\n", .{mode.name});
|
||||
for (0..100) |i| {
|
||||
g.fill(&seed);
|
||||
try std.fmt.format(fw, "count = {}\n", .{i});
|
||||
try std.fmt.format(fw, "seed = {X}\n", .{&seed});
|
||||
var g2 = NistDRBG.init(seed);
|
||||
test "NIST KAT test d00.Kyber1024" {
|
||||
try testNistKat(d00.Kyber1024, "89248f2f33f7f4f7051729111f3049c409a933ec904aedadf035f30fa5646cd5");
|
||||
}
|
||||
|
||||
// This is not equivalent to g2.fill(kseed[:]). As the reference
|
||||
// implementation calls randombytes twice generating the keypair,
|
||||
// we have to do that as well.
|
||||
var kseed: [64]u8 = undefined;
|
||||
var eseed: [32]u8 = undefined;
|
||||
g2.fill(kseed[0..32]);
|
||||
g2.fill(kseed[32..64]);
|
||||
g2.fill(&eseed);
|
||||
const kp = try mode.KeyPair.generateDeterministic(kseed);
|
||||
const e = kp.public_key.encaps(eseed);
|
||||
const ss2 = try kp.secret_key.decaps(&e.ciphertext);
|
||||
try testing.expectEqual(ss2, e.shared_secret);
|
||||
try std.fmt.format(fw, "pk = {X}\n", .{&kp.public_key.toBytes()});
|
||||
try std.fmt.format(fw, "sk = {X}\n", .{&kp.secret_key.toBytes()});
|
||||
try std.fmt.format(fw, "ct = {X}\n", .{&e.ciphertext});
|
||||
try std.fmt.format(fw, "ss = {X}\n\n", .{&e.shared_secret});
|
||||
}
|
||||
test "NIST KAT test d00.Kyber768" {
|
||||
try testNistKat(d00.Kyber768, "a1e122cad3c24bc51622e4c242d8b8acbcd3f618fee4220400605ca8f9ea02c2");
|
||||
}
|
||||
|
||||
var out: [32]u8 = undefined;
|
||||
f.final(&out);
|
||||
var outHex: [64]u8 = undefined;
|
||||
_ = try std.fmt.bufPrint(&outHex, "{x}", .{&out});
|
||||
try testing.expectEqual(outHex, modeHash[1].*);
|
||||
fn testNistKat(mode: type, hash: []const u8) !void {
|
||||
var seed: [48]u8 = undefined;
|
||||
for (&seed, 0..) |*s, i| {
|
||||
s.* = @as(u8, @intCast(i));
|
||||
}
|
||||
var fw: std.Io.Writer.Hashing(crypto.hash.sha2.Sha256) = .init(&.{});
|
||||
var g = NistDRBG.init(seed);
|
||||
try fw.writer.print("# {s}\n\n", .{mode.name});
|
||||
for (0..100) |i| {
|
||||
g.fill(&seed);
|
||||
try fw.writer.print("count = {}\n", .{i});
|
||||
try fw.writer.print("seed = {X}\n", .{&seed});
|
||||
var g2 = NistDRBG.init(seed);
|
||||
|
||||
// This is not equivalent to g2.fill(kseed[:]). As the reference
|
||||
// implementation calls randombytes twice generating the keypair,
|
||||
// we have to do that as well.
|
||||
var kseed: [64]u8 = undefined;
|
||||
var eseed: [32]u8 = undefined;
|
||||
g2.fill(kseed[0..32]);
|
||||
g2.fill(kseed[32..64]);
|
||||
g2.fill(&eseed);
|
||||
const kp = try mode.KeyPair.generateDeterministic(kseed);
|
||||
const e = kp.public_key.encaps(eseed);
|
||||
const ss2 = try kp.secret_key.decaps(&e.ciphertext);
|
||||
try testing.expectEqual(ss2, e.shared_secret);
|
||||
try fw.writer.print("pk = {X}\n", .{&kp.public_key.toBytes()});
|
||||
try fw.writer.print("sk = {X}\n", .{&kp.secret_key.toBytes()});
|
||||
try fw.writer.print("ct = {X}\n", .{&e.ciphertext});
|
||||
try fw.writer.print("ss = {X}\n\n", .{&e.shared_secret});
|
||||
}
|
||||
|
||||
var out: [32]u8 = undefined;
|
||||
fw.hasher.final(&out);
|
||||
var outHex: [64]u8 = undefined;
|
||||
_ = try std.fmt.bufPrint(&outHex, "{x}", .{&out});
|
||||
try testing.expectEqualStrings(&outHex, hash);
|
||||
}
|
||||
|
||||
const NistDRBG = struct {
|
||||
|
||||
@ -304,31 +304,34 @@ const crypt_format = struct {
|
||||
|
||||
/// Serialize parameters into a string in modular crypt format.
|
||||
pub fn serialize(params: anytype, str: []u8) EncodingError![]const u8 {
|
||||
var buf = io.fixedBufferStream(str);
|
||||
try serializeTo(params, buf.writer());
|
||||
return buf.getWritten();
|
||||
var w: std.Io.Writer = .fixed(str);
|
||||
serializeTo(params, &w) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.NoSpaceLeft,
|
||||
else => |e| return e,
|
||||
};
|
||||
return w.buffered();
|
||||
}
|
||||
|
||||
/// Compute the number of bytes required to serialize `params`
|
||||
pub fn calcSize(params: anytype) usize {
|
||||
var trash: [128]u8 = undefined;
|
||||
var d: std.Io.Writer.Discarding = .init(&trash);
|
||||
serializeTo(params, &d) catch unreachable;
|
||||
serializeTo(params, &d.writer) catch unreachable;
|
||||
return @intCast(d.fullCount());
|
||||
}
|
||||
|
||||
fn serializeTo(params: anytype, out: anytype) !void {
|
||||
fn serializeTo(params: anytype, w: *std.Io.Writer) !void {
|
||||
var header: [14]u8 = undefined;
|
||||
header[0..3].* = prefix.*;
|
||||
Codec.intEncode(header[3..4], params.ln);
|
||||
Codec.intEncode(header[4..9], params.r);
|
||||
Codec.intEncode(header[9..14], params.p);
|
||||
try out.writeAll(&header);
|
||||
try out.writeAll(params.salt);
|
||||
try out.writeAll("$");
|
||||
try w.writeAll(&header);
|
||||
try w.writeAll(params.salt);
|
||||
try w.writeAll("$");
|
||||
var buf: [@TypeOf(params.hash).max_encoded_length]u8 = undefined;
|
||||
const hash_str = try params.hash.toB64(&buf);
|
||||
try out.writeAll(hash_str);
|
||||
try w.writeAll(hash_str);
|
||||
}
|
||||
|
||||
/// Custom codec that maps 6 bits into 8 like regular Base64, but uses its own alphabet,
|
||||
|
||||
@ -373,18 +373,6 @@ fn Sha2x32(comptime iv: Iv32, digest_bits: comptime_int) type {
|
||||
|
||||
for (&d.s, v) |*dv, vv| dv.* +%= vv;
|
||||
}
|
||||
|
||||
pub const Error = error{};
|
||||
pub const Writer = std.io.GenericWriter(*Self, Error, write);
|
||||
|
||||
fn write(self: *Self, bytes: []const u8) Error!usize {
|
||||
self.update(bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writer(self: *Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -80,18 +80,6 @@ pub fn Keccak(comptime f: u11, comptime output_bits: u11, comptime default_delim
|
||||
self.st.pad();
|
||||
self.st.squeeze(out[0..]);
|
||||
}
|
||||
|
||||
pub const Error = error{};
|
||||
pub const Writer = std.io.GenericWriter(*Self, Error, write);
|
||||
|
||||
fn write(self: *Self, bytes: []const u8) Error!usize {
|
||||
self.update(bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writer(self: *Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@ -191,18 +179,6 @@ fn ShakeLike(comptime security_level: u11, comptime default_delim: u8, comptime
|
||||
pub fn fillBlock(self: *Self) void {
|
||||
self.st.fillBlock();
|
||||
}
|
||||
|
||||
pub const Error = error{};
|
||||
pub const Writer = std.io.GenericWriter(*Self, Error, write);
|
||||
|
||||
fn write(self: *Self, bytes: []const u8) Error!usize {
|
||||
self.update(bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writer(self: *Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@ -284,18 +260,6 @@ fn CShakeLike(comptime security_level: u11, comptime default_delim: u8, comptime
|
||||
pub fn fillBlock(self: *Self) void {
|
||||
self.shaker.fillBlock();
|
||||
}
|
||||
|
||||
pub const Error = error{};
|
||||
pub const Writer = std.io.GenericWriter(*Self, Error, write);
|
||||
|
||||
fn write(self: *Self, bytes: []const u8) Error!usize {
|
||||
self.update(bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writer(self: *Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@ -390,18 +354,6 @@ fn KMacLike(comptime security_level: u11, comptime default_delim: u8, comptime r
|
||||
ctx.update(msg);
|
||||
ctx.final(out);
|
||||
}
|
||||
|
||||
pub const Error = error{};
|
||||
pub const Writer = std.io.GenericWriter(*Self, Error, write);
|
||||
|
||||
fn write(self: *Self, bytes: []const u8) Error!usize {
|
||||
self.update(bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writer(self: *Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@ -482,18 +434,6 @@ fn TupleHashLike(comptime security_level: u11, comptime default_delim: u8, compt
|
||||
}
|
||||
self.cshaker.squeeze(out);
|
||||
}
|
||||
|
||||
pub const Error = error{};
|
||||
pub const Writer = std.io.GenericWriter(*Self, Error, write);
|
||||
|
||||
fn write(self: *Self, bytes: []const u8) Error!usize {
|
||||
self.update(bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writer(self: *Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -238,18 +238,6 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
|
||||
pub fn toInt(msg: []const u8, key: *const [key_length]u8) T {
|
||||
return State.hash(msg, key);
|
||||
}
|
||||
|
||||
pub const Error = error{};
|
||||
pub const Writer = std.io.GenericWriter(*Self, Error, write);
|
||||
|
||||
fn write(self: *Self, bytes: []const u8) Error!usize {
|
||||
self.update(bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writer(self: *Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -8,6 +8,8 @@ const OP = std.dwarf.OP;
|
||||
const abi = std.debug.Dwarf.abi;
|
||||
const mem = std.mem;
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
/// Expressions can be evaluated in different contexts, each requiring its own set of inputs.
|
||||
/// Callers should specify all the fields relevant to their context. If a field is required
|
||||
@ -782,7 +784,7 @@ pub fn Builder(comptime options: Options) type {
|
||||
|
||||
return struct {
|
||||
/// Zero-operand instructions
|
||||
pub fn writeOpcode(writer: anytype, comptime opcode: u8) !void {
|
||||
pub fn writeOpcode(writer: *Writer, comptime opcode: u8) !void {
|
||||
if (options.call_frame_context and !comptime isOpcodeValidInCFA(opcode)) return error.InvalidCFAOpcode;
|
||||
switch (opcode) {
|
||||
OP.dup,
|
||||
@ -823,14 +825,14 @@ pub fn Builder(comptime options: Options) type {
|
||||
}
|
||||
|
||||
// 2.5.1.1: Literal Encodings
|
||||
pub fn writeLiteral(writer: anytype, literal: u8) !void {
|
||||
pub fn writeLiteral(writer: *Writer, literal: u8) !void {
|
||||
switch (literal) {
|
||||
0...31 => |n| try writer.writeByte(n + OP.lit0),
|
||||
else => return error.InvalidLiteral,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeConst(writer: anytype, comptime T: type, value: T) !void {
|
||||
pub fn writeConst(writer: *Writer, comptime T: type, value: T) !void {
|
||||
if (@typeInfo(T) != .int) @compileError("Constants must be integers");
|
||||
|
||||
switch (T) {
|
||||
@ -852,7 +854,7 @@ pub fn Builder(comptime options: Options) type {
|
||||
else => switch (@typeInfo(T).int.signedness) {
|
||||
.unsigned => {
|
||||
try writer.writeByte(OP.constu);
|
||||
try leb.writeUleb128(writer, value);
|
||||
try writer.writeUleb128(value);
|
||||
},
|
||||
.signed => {
|
||||
try writer.writeByte(OP.consts);
|
||||
@ -862,105 +864,105 @@ pub fn Builder(comptime options: Options) type {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeConstx(writer: anytype, debug_addr_offset: anytype) !void {
|
||||
pub fn writeConstx(writer: *Writer, debug_addr_offset: anytype) !void {
|
||||
try writer.writeByte(OP.constx);
|
||||
try leb.writeUleb128(writer, debug_addr_offset);
|
||||
try writer.writeUleb128(debug_addr_offset);
|
||||
}
|
||||
|
||||
pub fn writeConstType(writer: anytype, die_offset: anytype, value_bytes: []const u8) !void {
|
||||
pub fn writeConstType(writer: *Writer, die_offset: anytype, value_bytes: []const u8) !void {
|
||||
if (options.call_frame_context) return error.InvalidCFAOpcode;
|
||||
if (value_bytes.len > 0xff) return error.InvalidTypeLength;
|
||||
try writer.writeByte(OP.const_type);
|
||||
try leb.writeUleb128(writer, die_offset);
|
||||
try writer.writeUleb128(die_offset);
|
||||
try writer.writeByte(@intCast(value_bytes.len));
|
||||
try writer.writeAll(value_bytes);
|
||||
}
|
||||
|
||||
pub fn writeAddr(writer: anytype, value: addr_type) !void {
|
||||
pub fn writeAddr(writer: *Writer, value: addr_type) !void {
|
||||
try writer.writeByte(OP.addr);
|
||||
try writer.writeInt(addr_type, value, options.endian);
|
||||
}
|
||||
|
||||
pub fn writeAddrx(writer: anytype, debug_addr_offset: anytype) !void {
|
||||
pub fn writeAddrx(writer: *Writer, debug_addr_offset: anytype) !void {
|
||||
if (options.call_frame_context) return error.InvalidCFAOpcode;
|
||||
try writer.writeByte(OP.addrx);
|
||||
try leb.writeUleb128(writer, debug_addr_offset);
|
||||
try writer.writeUleb128(debug_addr_offset);
|
||||
}
|
||||
|
||||
// 2.5.1.2: Register Values
|
||||
pub fn writeFbreg(writer: anytype, offset: anytype) !void {
|
||||
pub fn writeFbreg(writer: *Writer, offset: anytype) !void {
|
||||
try writer.writeByte(OP.fbreg);
|
||||
try leb.writeIleb128(writer, offset);
|
||||
}
|
||||
|
||||
pub fn writeBreg(writer: anytype, register: u8, offset: anytype) !void {
|
||||
pub fn writeBreg(writer: *Writer, register: u8, offset: anytype) !void {
|
||||
if (register > 31) return error.InvalidRegister;
|
||||
try writer.writeByte(OP.breg0 + register);
|
||||
try leb.writeIleb128(writer, offset);
|
||||
}
|
||||
|
||||
pub fn writeBregx(writer: anytype, register: anytype, offset: anytype) !void {
|
||||
pub fn writeBregx(writer: *Writer, register: anytype, offset: anytype) !void {
|
||||
try writer.writeByte(OP.bregx);
|
||||
try leb.writeUleb128(writer, register);
|
||||
try writer.writeUleb128(register);
|
||||
try leb.writeIleb128(writer, offset);
|
||||
}
|
||||
|
||||
pub fn writeRegvalType(writer: anytype, register: anytype, offset: anytype) !void {
|
||||
pub fn writeRegvalType(writer: *Writer, register: anytype, offset: anytype) !void {
|
||||
if (options.call_frame_context) return error.InvalidCFAOpcode;
|
||||
try writer.writeByte(OP.regval_type);
|
||||
try leb.writeUleb128(writer, register);
|
||||
try leb.writeUleb128(writer, offset);
|
||||
try writer.writeUleb128(register);
|
||||
try writer.writeUleb128(offset);
|
||||
}
|
||||
|
||||
// 2.5.1.3: Stack Operations
|
||||
pub fn writePick(writer: anytype, index: u8) !void {
|
||||
pub fn writePick(writer: *Writer, index: u8) !void {
|
||||
try writer.writeByte(OP.pick);
|
||||
try writer.writeByte(index);
|
||||
}
|
||||
|
||||
pub fn writeDerefSize(writer: anytype, size: u8) !void {
|
||||
pub fn writeDerefSize(writer: *Writer, size: u8) !void {
|
||||
try writer.writeByte(OP.deref_size);
|
||||
try writer.writeByte(size);
|
||||
}
|
||||
|
||||
pub fn writeXDerefSize(writer: anytype, size: u8) !void {
|
||||
pub fn writeXDerefSize(writer: *Writer, size: u8) !void {
|
||||
try writer.writeByte(OP.xderef_size);
|
||||
try writer.writeByte(size);
|
||||
}
|
||||
|
||||
pub fn writeDerefType(writer: anytype, size: u8, die_offset: anytype) !void {
|
||||
pub fn writeDerefType(writer: *Writer, size: u8, die_offset: anytype) !void {
|
||||
if (options.call_frame_context) return error.InvalidCFAOpcode;
|
||||
try writer.writeByte(OP.deref_type);
|
||||
try writer.writeByte(size);
|
||||
try leb.writeUleb128(writer, die_offset);
|
||||
try writer.writeUleb128(die_offset);
|
||||
}
|
||||
|
||||
pub fn writeXDerefType(writer: anytype, size: u8, die_offset: anytype) !void {
|
||||
pub fn writeXDerefType(writer: *Writer, size: u8, die_offset: anytype) !void {
|
||||
try writer.writeByte(OP.xderef_type);
|
||||
try writer.writeByte(size);
|
||||
try leb.writeUleb128(writer, die_offset);
|
||||
try writer.writeUleb128(die_offset);
|
||||
}
|
||||
|
||||
// 2.5.1.4: Arithmetic and Logical Operations
|
||||
|
||||
pub fn writePlusUconst(writer: anytype, uint_value: anytype) !void {
|
||||
pub fn writePlusUconst(writer: *Writer, uint_value: anytype) !void {
|
||||
try writer.writeByte(OP.plus_uconst);
|
||||
try leb.writeUleb128(writer, uint_value);
|
||||
try writer.writeUleb128(uint_value);
|
||||
}
|
||||
|
||||
// 2.5.1.5: Control Flow Operations
|
||||
|
||||
pub fn writeSkip(writer: anytype, offset: i16) !void {
|
||||
pub fn writeSkip(writer: *Writer, offset: i16) !void {
|
||||
try writer.writeByte(OP.skip);
|
||||
try writer.writeInt(i16, offset, options.endian);
|
||||
}
|
||||
|
||||
pub fn writeBra(writer: anytype, offset: i16) !void {
|
||||
pub fn writeBra(writer: *Writer, offset: i16) !void {
|
||||
try writer.writeByte(OP.bra);
|
||||
try writer.writeInt(i16, offset, options.endian);
|
||||
}
|
||||
|
||||
pub fn writeCall(writer: anytype, comptime T: type, offset: T) !void {
|
||||
pub fn writeCall(writer: *Writer, comptime T: type, offset: T) !void {
|
||||
if (options.call_frame_context) return error.InvalidCFAOpcode;
|
||||
switch (T) {
|
||||
u16 => try writer.writeByte(OP.call2),
|
||||
@ -971,45 +973,45 @@ pub fn Builder(comptime options: Options) type {
|
||||
try writer.writeInt(T, offset, options.endian);
|
||||
}
|
||||
|
||||
pub fn writeCallRef(writer: anytype, comptime is_64: bool, value: if (is_64) u64 else u32) !void {
|
||||
pub fn writeCallRef(writer: *Writer, comptime is_64: bool, value: if (is_64) u64 else u32) !void {
|
||||
if (options.call_frame_context) return error.InvalidCFAOpcode;
|
||||
try writer.writeByte(OP.call_ref);
|
||||
try writer.writeInt(if (is_64) u64 else u32, value, options.endian);
|
||||
}
|
||||
|
||||
pub fn writeConvert(writer: anytype, die_offset: anytype) !void {
|
||||
pub fn writeConvert(writer: *Writer, die_offset: anytype) !void {
|
||||
if (options.call_frame_context) return error.InvalidCFAOpcode;
|
||||
try writer.writeByte(OP.convert);
|
||||
try leb.writeUleb128(writer, die_offset);
|
||||
try writer.writeUleb128(die_offset);
|
||||
}
|
||||
|
||||
pub fn writeReinterpret(writer: anytype, die_offset: anytype) !void {
|
||||
pub fn writeReinterpret(writer: *Writer, die_offset: anytype) !void {
|
||||
if (options.call_frame_context) return error.InvalidCFAOpcode;
|
||||
try writer.writeByte(OP.reinterpret);
|
||||
try leb.writeUleb128(writer, die_offset);
|
||||
try writer.writeUleb128(die_offset);
|
||||
}
|
||||
|
||||
// 2.5.1.7: Special Operations
|
||||
|
||||
pub fn writeEntryValue(writer: anytype, expression: []const u8) !void {
|
||||
pub fn writeEntryValue(writer: *Writer, expression: []const u8) !void {
|
||||
try writer.writeByte(OP.entry_value);
|
||||
try leb.writeUleb128(writer, expression.len);
|
||||
try writer.writeUleb128(expression.len);
|
||||
try writer.writeAll(expression);
|
||||
}
|
||||
|
||||
// 2.6: Location Descriptions
|
||||
pub fn writeReg(writer: anytype, register: u8) !void {
|
||||
pub fn writeReg(writer: *Writer, register: u8) !void {
|
||||
try writer.writeByte(OP.reg0 + register);
|
||||
}
|
||||
|
||||
pub fn writeRegx(writer: anytype, register: anytype) !void {
|
||||
pub fn writeRegx(writer: *Writer, register: anytype) !void {
|
||||
try writer.writeByte(OP.regx);
|
||||
try leb.writeUleb128(writer, register);
|
||||
try writer.writeUleb128(register);
|
||||
}
|
||||
|
||||
pub fn writeImplicitValue(writer: anytype, value_bytes: []const u8) !void {
|
||||
pub fn writeImplicitValue(writer: *Writer, value_bytes: []const u8) !void {
|
||||
try writer.writeByte(OP.implicit_value);
|
||||
try leb.writeUleb128(writer, value_bytes.len);
|
||||
try writer.writeUleb128(value_bytes.len);
|
||||
try writer.writeAll(value_bytes);
|
||||
}
|
||||
};
|
||||
@ -1042,8 +1044,7 @@ fn isOpcodeRegisterLocation(opcode: u8) bool {
|
||||
};
|
||||
}
|
||||
|
||||
const testing = std.testing;
|
||||
test "DWARF expressions" {
|
||||
test "basics" {
|
||||
const allocator = std.testing.allocator;
|
||||
|
||||
const options = Options{};
|
||||
@ -1052,10 +1053,10 @@ test "DWARF expressions" {
|
||||
|
||||
const b = Builder(options);
|
||||
|
||||
var program = std.array_list.Managed(u8).init(allocator);
|
||||
var program: std.Io.Writer.Allocating = .init(allocator);
|
||||
defer program.deinit();
|
||||
|
||||
const writer = program.writer();
|
||||
const writer = &program.writer;
|
||||
|
||||
// Literals
|
||||
{
|
||||
@ -1064,7 +1065,7 @@ test "DWARF expressions" {
|
||||
try b.writeLiteral(writer, @intCast(i));
|
||||
}
|
||||
|
||||
_ = try stack_machine.run(program.items, allocator, context, 0);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, 0);
|
||||
|
||||
for (0..32) |i| {
|
||||
const expected = 31 - i;
|
||||
@ -1108,16 +1109,16 @@ test "DWARF expressions" {
|
||||
var mock_compile_unit: std.debug.Dwarf.CompileUnit = undefined;
|
||||
mock_compile_unit.addr_base = 1;
|
||||
|
||||
var mock_debug_addr = std.array_list.Managed(u8).init(allocator);
|
||||
var mock_debug_addr: std.Io.Writer.Allocating = .init(allocator);
|
||||
defer mock_debug_addr.deinit();
|
||||
|
||||
try mock_debug_addr.writer().writeInt(u16, 0, native_endian);
|
||||
try mock_debug_addr.writer().writeInt(usize, input[11], native_endian);
|
||||
try mock_debug_addr.writer().writeInt(usize, input[12], native_endian);
|
||||
try mock_debug_addr.writer.writeInt(u16, 0, native_endian);
|
||||
try mock_debug_addr.writer.writeInt(usize, input[11], native_endian);
|
||||
try mock_debug_addr.writer.writeInt(usize, input[12], native_endian);
|
||||
|
||||
const context = Context{
|
||||
const context: Context = .{
|
||||
.compile_unit = &mock_compile_unit,
|
||||
.debug_addr = mock_debug_addr.items,
|
||||
.debug_addr = mock_debug_addr.written(),
|
||||
};
|
||||
|
||||
try b.writeConstx(writer, @as(usize, 1));
|
||||
@ -1127,7 +1128,7 @@ test "DWARF expressions" {
|
||||
const type_bytes: []const u8 = &.{ 1, 2, 3, 4 };
|
||||
try b.writeConstType(writer, die_offset, type_bytes);
|
||||
|
||||
_ = try stack_machine.run(program.items, allocator, context, 0);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, 0);
|
||||
|
||||
const const_type = stack_machine.stack.pop().?.const_type;
|
||||
try testing.expectEqual(die_offset, const_type.type_offset);
|
||||
@ -1185,7 +1186,7 @@ test "DWARF expressions" {
|
||||
try b.writeBregx(writer, abi.ipRegNum(native_arch).?, @as(usize, 300));
|
||||
try b.writeRegvalType(writer, @as(u8, 0), @as(usize, 400));
|
||||
|
||||
_ = try stack_machine.run(program.items, allocator, context, 0);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, 0);
|
||||
|
||||
const regval_type = stack_machine.stack.pop().?.regval_type;
|
||||
try testing.expectEqual(@as(usize, 400), regval_type.type_offset);
|
||||
@ -1214,7 +1215,7 @@ test "DWARF expressions" {
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeConst(writer, u8, 1);
|
||||
try b.writeOpcode(writer, OP.dup);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 1), stack_machine.stack.pop().?.generic);
|
||||
try testing.expectEqual(@as(usize, 1), stack_machine.stack.pop().?.generic);
|
||||
|
||||
@ -1222,7 +1223,7 @@ test "DWARF expressions" {
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeConst(writer, u8, 1);
|
||||
try b.writeOpcode(writer, OP.drop);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expect(stack_machine.stack.pop() == null);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1231,7 +1232,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u8, 5);
|
||||
try b.writeConst(writer, u8, 6);
|
||||
try b.writePick(writer, 2);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 4), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1240,7 +1241,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u8, 5);
|
||||
try b.writeConst(writer, u8, 6);
|
||||
try b.writeOpcode(writer, OP.over);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 5), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1248,7 +1249,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u8, 5);
|
||||
try b.writeConst(writer, u8, 6);
|
||||
try b.writeOpcode(writer, OP.swap);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 5), stack_machine.stack.pop().?.generic);
|
||||
try testing.expectEqual(@as(usize, 6), stack_machine.stack.pop().?.generic);
|
||||
|
||||
@ -1258,7 +1259,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u8, 5);
|
||||
try b.writeConst(writer, u8, 6);
|
||||
try b.writeOpcode(writer, OP.rot);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 5), stack_machine.stack.pop().?.generic);
|
||||
try testing.expectEqual(@as(usize, 4), stack_machine.stack.pop().?.generic);
|
||||
try testing.expectEqual(@as(usize, 6), stack_machine.stack.pop().?.generic);
|
||||
@ -1269,7 +1270,7 @@ test "DWARF expressions" {
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeAddr(writer, @intFromPtr(&deref_target));
|
||||
try b.writeOpcode(writer, OP.deref);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(deref_target, stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1277,14 +1278,14 @@ test "DWARF expressions" {
|
||||
try b.writeLiteral(writer, 0);
|
||||
try b.writeAddr(writer, @intFromPtr(&deref_target));
|
||||
try b.writeOpcode(writer, OP.xderef);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(deref_target, stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeAddr(writer, @intFromPtr(&deref_target));
|
||||
try b.writeDerefSize(writer, 1);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, @as(*const u8, @ptrCast(&deref_target)).*), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1292,7 +1293,7 @@ test "DWARF expressions" {
|
||||
try b.writeLiteral(writer, 0);
|
||||
try b.writeAddr(writer, @intFromPtr(&deref_target));
|
||||
try b.writeXDerefSize(writer, 1);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, @as(*const u8, @ptrCast(&deref_target)).*), stack_machine.stack.pop().?.generic);
|
||||
|
||||
const type_offset: usize = @truncate(0xaabbaabb_aabbaabb);
|
||||
@ -1301,7 +1302,7 @@ test "DWARF expressions" {
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeAddr(writer, @intFromPtr(&deref_target));
|
||||
try b.writeDerefType(writer, 1, type_offset);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
const deref_type = stack_machine.stack.pop().?.regval_type;
|
||||
try testing.expectEqual(type_offset, deref_type.type_offset);
|
||||
try testing.expectEqual(@as(u8, 1), deref_type.type_size);
|
||||
@ -1312,7 +1313,7 @@ test "DWARF expressions" {
|
||||
try b.writeLiteral(writer, 0);
|
||||
try b.writeAddr(writer, @intFromPtr(&deref_target));
|
||||
try b.writeXDerefType(writer, 1, type_offset);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
const xderef_type = stack_machine.stack.pop().?.regval_type;
|
||||
try testing.expectEqual(type_offset, xderef_type.type_offset);
|
||||
try testing.expectEqual(@as(u8, 1), xderef_type.type_size);
|
||||
@ -1323,7 +1324,7 @@ test "DWARF expressions" {
|
||||
stack_machine.reset();
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeOpcode(writer, OP.push_object_address);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, @intFromPtr(context.object_address.?)), stack_machine.stack.pop().?.generic);
|
||||
|
||||
// TODO: Test OP.form_tls_address
|
||||
@ -1333,7 +1334,7 @@ test "DWARF expressions" {
|
||||
stack_machine.reset();
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeOpcode(writer, OP.call_frame_cfa);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(context.cfa.?, stack_machine.stack.pop().?.generic);
|
||||
}
|
||||
|
||||
@ -1345,7 +1346,7 @@ test "DWARF expressions" {
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeConst(writer, i16, -4096);
|
||||
try b.writeOpcode(writer, OP.abs);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 4096), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1353,7 +1354,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u16, 0xff0f);
|
||||
try b.writeConst(writer, u16, 0xf0ff);
|
||||
try b.writeOpcode(writer, OP.@"and");
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 0xf00f), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1361,7 +1362,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, i16, -404);
|
||||
try b.writeConst(writer, i16, 100);
|
||||
try b.writeOpcode(writer, OP.div);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(isize, -404 / 100), @as(isize, @bitCast(stack_machine.stack.pop().?.generic)));
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1369,7 +1370,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u16, 200);
|
||||
try b.writeConst(writer, u16, 50);
|
||||
try b.writeOpcode(writer, OP.minus);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 150), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1377,7 +1378,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u16, 123);
|
||||
try b.writeConst(writer, u16, 100);
|
||||
try b.writeOpcode(writer, OP.mod);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 23), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1385,7 +1386,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u16, 0xff);
|
||||
try b.writeConst(writer, u16, 0xee);
|
||||
try b.writeOpcode(writer, OP.mul);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 0xed12), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1394,7 +1395,7 @@ test "DWARF expressions" {
|
||||
try b.writeOpcode(writer, OP.neg);
|
||||
try b.writeConst(writer, i16, -6);
|
||||
try b.writeOpcode(writer, OP.neg);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 6), stack_machine.stack.pop().?.generic);
|
||||
try testing.expectEqual(@as(isize, -5), @as(isize, @bitCast(stack_machine.stack.pop().?.generic)));
|
||||
|
||||
@ -1402,7 +1403,7 @@ test "DWARF expressions" {
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeConst(writer, u16, 0xff0f);
|
||||
try b.writeOpcode(writer, OP.not);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(~@as(usize, 0xff0f), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1410,7 +1411,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u16, 0xff0f);
|
||||
try b.writeConst(writer, u16, 0xf0ff);
|
||||
try b.writeOpcode(writer, OP.@"or");
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 0xffff), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1418,14 +1419,14 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, i16, 402);
|
||||
try b.writeConst(writer, i16, 100);
|
||||
try b.writeOpcode(writer, OP.plus);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 502), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeConst(writer, u16, 4096);
|
||||
try b.writePlusUconst(writer, @as(usize, 8192));
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 4096 + 8192), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1433,7 +1434,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u16, 0xfff);
|
||||
try b.writeConst(writer, u16, 1);
|
||||
try b.writeOpcode(writer, OP.shl);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 0xfff << 1), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1441,7 +1442,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u16, 0xfff);
|
||||
try b.writeConst(writer, u16, 1);
|
||||
try b.writeOpcode(writer, OP.shr);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 0xfff >> 1), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1449,7 +1450,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u16, 0xfff);
|
||||
try b.writeConst(writer, u16, 1);
|
||||
try b.writeOpcode(writer, OP.shr);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, @bitCast(@as(isize, 0xfff) >> 1)), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1457,7 +1458,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u16, 0xf0ff);
|
||||
try b.writeConst(writer, u16, 0xff0f);
|
||||
try b.writeOpcode(writer, OP.xor);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 0x0ff0), stack_machine.stack.pop().?.generic);
|
||||
}
|
||||
|
||||
@ -1486,7 +1487,7 @@ test "DWARF expressions" {
|
||||
try b.writeConst(writer, u16, 1);
|
||||
try b.writeConst(writer, u16, 0);
|
||||
try b.writeOpcode(writer, e[0]);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, e[3]), stack_machine.stack.pop().?.generic);
|
||||
try testing.expectEqual(@as(usize, e[2]), stack_machine.stack.pop().?.generic);
|
||||
try testing.expectEqual(@as(usize, e[1]), stack_machine.stack.pop().?.generic);
|
||||
@ -1497,7 +1498,7 @@ test "DWARF expressions" {
|
||||
try b.writeLiteral(writer, 2);
|
||||
try b.writeSkip(writer, 1);
|
||||
try b.writeLiteral(writer, 3);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 2), stack_machine.stack.pop().?.generic);
|
||||
|
||||
stack_machine.reset();
|
||||
@ -1509,7 +1510,7 @@ test "DWARF expressions" {
|
||||
try b.writeBra(writer, 1);
|
||||
try b.writeLiteral(writer, 4);
|
||||
try b.writeLiteral(writer, 5);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 5), stack_machine.stack.pop().?.generic);
|
||||
try testing.expectEqual(@as(usize, 4), stack_machine.stack.pop().?.generic);
|
||||
try testing.expect(stack_machine.stack.pop() == null);
|
||||
@ -1535,7 +1536,7 @@ test "DWARF expressions" {
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeConstType(writer, @as(usize, 0), &value_bytes);
|
||||
try b.writeConvert(writer, @as(usize, 0));
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(value, stack_machine.stack.pop().?.generic);
|
||||
|
||||
// Reinterpret to generic type
|
||||
@ -1543,7 +1544,7 @@ test "DWARF expressions" {
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeConstType(writer, @as(usize, 0), &value_bytes);
|
||||
try b.writeReinterpret(writer, @as(usize, 0));
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(value, stack_machine.stack.pop().?.generic);
|
||||
|
||||
// Reinterpret to new type
|
||||
@ -1553,7 +1554,7 @@ test "DWARF expressions" {
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeConstType(writer, @as(usize, 0), &value_bytes);
|
||||
try b.writeReinterpret(writer, die_offset);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
const const_type = stack_machine.stack.pop().?.const_type;
|
||||
try testing.expectEqual(die_offset, const_type.type_offset);
|
||||
|
||||
@ -1561,7 +1562,7 @@ test "DWARF expressions" {
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeLiteral(writer, 0);
|
||||
try b.writeReinterpret(writer, die_offset);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
const regval_type = stack_machine.stack.pop().?.regval_type;
|
||||
try testing.expectEqual(die_offset, regval_type.type_offset);
|
||||
}
|
||||
@ -1573,20 +1574,20 @@ test "DWARF expressions" {
|
||||
stack_machine.reset();
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeOpcode(writer, OP.nop);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expect(stack_machine.stack.pop() == null);
|
||||
|
||||
// Sub-expression
|
||||
{
|
||||
var sub_program = std.array_list.Managed(u8).init(allocator);
|
||||
var sub_program: std.Io.Writer.Allocating = .init(allocator);
|
||||
defer sub_program.deinit();
|
||||
const sub_writer = sub_program.writer();
|
||||
const sub_writer = &sub_program.writer;
|
||||
try b.writeLiteral(sub_writer, 3);
|
||||
|
||||
stack_machine.reset();
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeEntryValue(writer, sub_program.items);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
try b.writeEntryValue(writer, sub_program.written());
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 3), stack_machine.stack.pop().?.generic);
|
||||
}
|
||||
|
||||
@ -1605,15 +1606,15 @@ test "DWARF expressions" {
|
||||
if (abi.regBytes(&thread_context, 0, reg_context)) |reg_bytes| {
|
||||
mem.writeInt(usize, reg_bytes[0..@sizeOf(usize)], 0xee, native_endian);
|
||||
|
||||
var sub_program = std.array_list.Managed(u8).init(allocator);
|
||||
var sub_program: std.Io.Writer.Allocating = .init(allocator);
|
||||
defer sub_program.deinit();
|
||||
const sub_writer = sub_program.writer();
|
||||
const sub_writer = &sub_program.writer;
|
||||
try b.writeReg(sub_writer, 0);
|
||||
|
||||
stack_machine.reset();
|
||||
program.clearRetainingCapacity();
|
||||
try b.writeEntryValue(writer, sub_program.items);
|
||||
_ = try stack_machine.run(program.items, allocator, context, null);
|
||||
try b.writeEntryValue(writer, sub_program.written());
|
||||
_ = try stack_machine.run(program.written(), allocator, context, null);
|
||||
try testing.expectEqual(@as(usize, 0xee), stack_machine.stack.pop().?.generic);
|
||||
} else |err| {
|
||||
switch (err) {
|
||||
|
||||
@ -2,10 +2,11 @@ const std = @import("../std.zig");
|
||||
const File = std.fs.File;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const pdb = std.pdb;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const Pdb = @This();
|
||||
|
||||
in_file: File,
|
||||
file_reader: *File.Reader,
|
||||
msf: Msf,
|
||||
allocator: Allocator,
|
||||
string_table: ?*MsfStream,
|
||||
@ -35,39 +36,38 @@ pub const Module = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init(allocator: Allocator, path: []const u8) !Pdb {
|
||||
const file = try std.fs.cwd().openFile(path, .{});
|
||||
errdefer file.close();
|
||||
|
||||
pub fn init(gpa: Allocator, file_reader: *File.Reader) !Pdb {
|
||||
return .{
|
||||
.in_file = file,
|
||||
.allocator = allocator,
|
||||
.file_reader = file_reader,
|
||||
.allocator = gpa,
|
||||
.string_table = null,
|
||||
.dbi = null,
|
||||
.msf = try Msf.init(allocator, file),
|
||||
.modules = &[_]Module{},
|
||||
.sect_contribs = &[_]pdb.SectionContribEntry{},
|
||||
.msf = try Msf.init(gpa, file_reader),
|
||||
.modules = &.{},
|
||||
.sect_contribs = &.{},
|
||||
.guid = undefined,
|
||||
.age = undefined,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Pdb) void {
|
||||
self.in_file.close();
|
||||
self.msf.deinit(self.allocator);
|
||||
const gpa = self.allocator;
|
||||
self.msf.deinit(gpa);
|
||||
for (self.modules) |*module| {
|
||||
module.deinit(self.allocator);
|
||||
module.deinit(gpa);
|
||||
}
|
||||
self.allocator.free(self.modules);
|
||||
self.allocator.free(self.sect_contribs);
|
||||
gpa.free(self.modules);
|
||||
gpa.free(self.sect_contribs);
|
||||
}
|
||||
|
||||
pub fn parseDbiStream(self: *Pdb) !void {
|
||||
var stream = self.getStream(pdb.StreamType.dbi) orelse
|
||||
return error.InvalidDebugInfo;
|
||||
const reader = stream.reader();
|
||||
|
||||
const header = try reader.readStruct(std.pdb.DbiStreamHeader);
|
||||
const gpa = self.allocator;
|
||||
const reader = &stream.interface;
|
||||
|
||||
const header = try reader.takeStruct(std.pdb.DbiStreamHeader, .little);
|
||||
if (header.version_header != 19990903) // V70, only value observed by LLVM team
|
||||
return error.UnknownPDBVersion;
|
||||
// if (header.Age != age)
|
||||
@ -76,22 +76,28 @@ pub fn parseDbiStream(self: *Pdb) !void {
|
||||
const mod_info_size = header.mod_info_size;
|
||||
const section_contrib_size = header.section_contribution_size;
|
||||
|
||||
var modules = std.array_list.Managed(Module).init(self.allocator);
|
||||
var modules = std.array_list.Managed(Module).init(gpa);
|
||||
errdefer modules.deinit();
|
||||
|
||||
// Module Info Substream
|
||||
var mod_info_offset: usize = 0;
|
||||
while (mod_info_offset != mod_info_size) {
|
||||
const mod_info = try reader.readStruct(pdb.ModInfo);
|
||||
const mod_info = try reader.takeStruct(pdb.ModInfo, .little);
|
||||
var this_record_len: usize = @sizeOf(pdb.ModInfo);
|
||||
|
||||
const module_name = try reader.readUntilDelimiterAlloc(self.allocator, 0, 1024);
|
||||
errdefer self.allocator.free(module_name);
|
||||
this_record_len += module_name.len + 1;
|
||||
var module_name: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer module_name.deinit();
|
||||
this_record_len += try reader.streamDelimiterLimit(&module_name.writer, 0, .limited(1024));
|
||||
assert(reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
|
||||
reader.toss(1);
|
||||
this_record_len += 1;
|
||||
|
||||
const obj_file_name = try reader.readUntilDelimiterAlloc(self.allocator, 0, 1024);
|
||||
errdefer self.allocator.free(obj_file_name);
|
||||
this_record_len += obj_file_name.len + 1;
|
||||
var obj_file_name: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer obj_file_name.deinit();
|
||||
this_record_len += try reader.streamDelimiterLimit(&obj_file_name.writer, 0, .limited(1024));
|
||||
assert(reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
|
||||
reader.toss(1);
|
||||
this_record_len += 1;
|
||||
|
||||
if (this_record_len % 4 != 0) {
|
||||
const round_to_next_4 = (this_record_len | 0x3) + 1;
|
||||
@ -100,10 +106,10 @@ pub fn parseDbiStream(self: *Pdb) !void {
|
||||
this_record_len += march_forward_bytes;
|
||||
}
|
||||
|
||||
try modules.append(Module{
|
||||
try modules.append(.{
|
||||
.mod_info = mod_info,
|
||||
.module_name = module_name,
|
||||
.obj_file_name = obj_file_name,
|
||||
.module_name = try module_name.toOwnedSlice(),
|
||||
.obj_file_name = try obj_file_name.toOwnedSlice(),
|
||||
|
||||
.populated = false,
|
||||
.symbols = undefined,
|
||||
@ -117,21 +123,21 @@ pub fn parseDbiStream(self: *Pdb) !void {
|
||||
}
|
||||
|
||||
// Section Contribution Substream
|
||||
var sect_contribs = std.array_list.Managed(pdb.SectionContribEntry).init(self.allocator);
|
||||
var sect_contribs = std.array_list.Managed(pdb.SectionContribEntry).init(gpa);
|
||||
errdefer sect_contribs.deinit();
|
||||
|
||||
var sect_cont_offset: usize = 0;
|
||||
if (section_contrib_size != 0) {
|
||||
const version = reader.readEnum(std.pdb.SectionContrSubstreamVersion, .little) catch |err| switch (err) {
|
||||
error.InvalidValue => return error.InvalidDebugInfo,
|
||||
else => |e| return e,
|
||||
const version = reader.takeEnum(std.pdb.SectionContrSubstreamVersion, .little) catch |err| switch (err) {
|
||||
error.InvalidEnumTag, error.EndOfStream => return error.InvalidDebugInfo,
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
};
|
||||
_ = version;
|
||||
sect_cont_offset += @sizeOf(u32);
|
||||
}
|
||||
while (sect_cont_offset != section_contrib_size) {
|
||||
const entry = try sect_contribs.addOne();
|
||||
entry.* = try reader.readStruct(pdb.SectionContribEntry);
|
||||
entry.* = try reader.takeStruct(pdb.SectionContribEntry, .little);
|
||||
sect_cont_offset += @sizeOf(pdb.SectionContribEntry);
|
||||
|
||||
if (sect_cont_offset > section_contrib_size)
|
||||
@ -143,29 +149,28 @@ pub fn parseDbiStream(self: *Pdb) !void {
|
||||
}
|
||||
|
||||
pub fn parseInfoStream(self: *Pdb) !void {
|
||||
var stream = self.getStream(pdb.StreamType.pdb) orelse
|
||||
return error.InvalidDebugInfo;
|
||||
const reader = stream.reader();
|
||||
var stream = self.getStream(pdb.StreamType.pdb) orelse return error.InvalidDebugInfo;
|
||||
const reader = &stream.interface;
|
||||
|
||||
// Parse the InfoStreamHeader.
|
||||
const version = try reader.readInt(u32, .little);
|
||||
const signature = try reader.readInt(u32, .little);
|
||||
const version = try reader.takeInt(u32, .little);
|
||||
const signature = try reader.takeInt(u32, .little);
|
||||
_ = signature;
|
||||
const age = try reader.readInt(u32, .little);
|
||||
const guid = try reader.readBytesNoEof(16);
|
||||
const age = try reader.takeInt(u32, .little);
|
||||
const guid = try reader.takeArray(16);
|
||||
|
||||
if (version != 20000404) // VC70, only value observed by LLVM team
|
||||
return error.UnknownPDBVersion;
|
||||
|
||||
self.guid = guid;
|
||||
self.guid = guid.*;
|
||||
self.age = age;
|
||||
|
||||
const gpa = self.allocator;
|
||||
|
||||
// Find the string table.
|
||||
const string_table_index = str_tab_index: {
|
||||
const name_bytes_len = try reader.readInt(u32, .little);
|
||||
const name_bytes = try self.allocator.alloc(u8, name_bytes_len);
|
||||
defer self.allocator.free(name_bytes);
|
||||
try reader.readNoEof(name_bytes);
|
||||
const name_bytes_len = try reader.takeInt(u32, .little);
|
||||
const name_bytes = try reader.readAlloc(gpa, name_bytes_len);
|
||||
|
||||
const HashTableHeader = extern struct {
|
||||
size: u32,
|
||||
@ -175,23 +180,23 @@ pub fn parseInfoStream(self: *Pdb) !void {
|
||||
return cap * 2 / 3 + 1;
|
||||
}
|
||||
};
|
||||
const hash_tbl_hdr = try reader.readStruct(HashTableHeader);
|
||||
const hash_tbl_hdr = try reader.takeStruct(HashTableHeader, .little);
|
||||
if (hash_tbl_hdr.capacity == 0)
|
||||
return error.InvalidDebugInfo;
|
||||
|
||||
if (hash_tbl_hdr.size > HashTableHeader.maxLoad(hash_tbl_hdr.capacity))
|
||||
return error.InvalidDebugInfo;
|
||||
|
||||
const present = try readSparseBitVector(&reader, self.allocator);
|
||||
defer self.allocator.free(present);
|
||||
const present = try readSparseBitVector(reader, gpa);
|
||||
defer gpa.free(present);
|
||||
if (present.len != hash_tbl_hdr.size)
|
||||
return error.InvalidDebugInfo;
|
||||
const deleted = try readSparseBitVector(&reader, self.allocator);
|
||||
defer self.allocator.free(deleted);
|
||||
const deleted = try readSparseBitVector(reader, gpa);
|
||||
defer gpa.free(deleted);
|
||||
|
||||
for (present) |_| {
|
||||
const name_offset = try reader.readInt(u32, .little);
|
||||
const name_index = try reader.readInt(u32, .little);
|
||||
const name_offset = try reader.takeInt(u32, .little);
|
||||
const name_index = try reader.takeInt(u32, .little);
|
||||
if (name_offset > name_bytes.len)
|
||||
return error.InvalidDebugInfo;
|
||||
const name = std.mem.sliceTo(name_bytes[name_offset..], 0);
|
||||
@ -233,6 +238,7 @@ pub fn getSymbolName(self: *Pdb, module: *Module, address: u64) ?[]const u8 {
|
||||
pub fn getLineNumberInfo(self: *Pdb, module: *Module, address: u64) !std.debug.SourceLocation {
|
||||
std.debug.assert(module.populated);
|
||||
const subsect_info = module.subsect_info;
|
||||
const gpa = self.allocator;
|
||||
|
||||
var sect_offset: usize = 0;
|
||||
var skip_len: usize = undefined;
|
||||
@ -287,7 +293,16 @@ pub fn getLineNumberInfo(self: *Pdb, module: *Module, address: u64) !std.debug.S
|
||||
const chksum_hdr: *align(1) pdb.FileChecksumEntryHeader = @ptrCast(&module.subsect_info[subsect_index]);
|
||||
const strtab_offset = @sizeOf(pdb.StringTableHeader) + chksum_hdr.file_name_offset;
|
||||
try self.string_table.?.seekTo(strtab_offset);
|
||||
const source_file_name = try self.string_table.?.reader().readUntilDelimiterAlloc(self.allocator, 0, 1024);
|
||||
const source_file_name = s: {
|
||||
const string_reader = &self.string_table.?.interface;
|
||||
var source_file_name: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer source_file_name.deinit();
|
||||
_ = try string_reader.streamDelimiterLimit(&source_file_name.writer, 0, .limited(1024));
|
||||
assert(string_reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
|
||||
string_reader.toss(1);
|
||||
break :s try source_file_name.toOwnedSlice();
|
||||
};
|
||||
errdefer gpa.free(source_file_name);
|
||||
|
||||
const line_entry_idx = line_i - 1;
|
||||
|
||||
@ -341,19 +356,16 @@ pub fn getModule(self: *Pdb, index: usize) !?*Module {
|
||||
|
||||
const stream = self.getStreamById(mod.mod_info.module_sym_stream) orelse
|
||||
return error.MissingDebugInfo;
|
||||
const reader = stream.reader();
|
||||
const reader = &stream.interface;
|
||||
|
||||
const signature = try reader.readInt(u32, .little);
|
||||
const signature = try reader.takeInt(u32, .little);
|
||||
if (signature != 4)
|
||||
return error.InvalidDebugInfo;
|
||||
|
||||
mod.symbols = try self.allocator.alloc(u8, mod.mod_info.sym_byte_size - 4);
|
||||
errdefer self.allocator.free(mod.symbols);
|
||||
try reader.readNoEof(mod.symbols);
|
||||
const gpa = self.allocator;
|
||||
|
||||
mod.subsect_info = try self.allocator.alloc(u8, mod.mod_info.c13_byte_size);
|
||||
errdefer self.allocator.free(mod.subsect_info);
|
||||
try reader.readNoEof(mod.subsect_info);
|
||||
mod.symbols = try reader.readAlloc(gpa, mod.mod_info.sym_byte_size - 4);
|
||||
mod.subsect_info = try reader.readAlloc(gpa, mod.mod_info.c13_byte_size);
|
||||
|
||||
var sect_offset: usize = 0;
|
||||
var skip_len: usize = undefined;
|
||||
@ -379,8 +391,7 @@ pub fn getModule(self: *Pdb, index: usize) !?*Module {
|
||||
}
|
||||
|
||||
pub fn getStreamById(self: *Pdb, id: u32) ?*MsfStream {
|
||||
if (id >= self.msf.streams.len)
|
||||
return null;
|
||||
if (id >= self.msf.streams.len) return null;
|
||||
return &self.msf.streams[id];
|
||||
}
|
||||
|
||||
@ -394,17 +405,14 @@ const Msf = struct {
|
||||
directory: MsfStream,
|
||||
streams: []MsfStream,
|
||||
|
||||
fn init(allocator: Allocator, file: File) !Msf {
|
||||
const in = file.deprecatedReader();
|
||||
fn init(gpa: Allocator, file_reader: *File.Reader) !Msf {
|
||||
const superblock = try file_reader.interface.takeStruct(pdb.SuperBlock, .little);
|
||||
|
||||
const superblock = try in.readStruct(pdb.SuperBlock);
|
||||
|
||||
// Sanity checks
|
||||
if (!std.mem.eql(u8, &superblock.file_magic, pdb.SuperBlock.expect_magic))
|
||||
return error.InvalidDebugInfo;
|
||||
if (superblock.free_block_map_block != 1 and superblock.free_block_map_block != 2)
|
||||
return error.InvalidDebugInfo;
|
||||
const file_len = try file.getEndPos();
|
||||
const file_len = try file_reader.getSize();
|
||||
if (superblock.num_blocks * superblock.block_size != file_len)
|
||||
return error.InvalidDebugInfo;
|
||||
switch (superblock.block_size) {
|
||||
@ -417,163 +425,182 @@ const Msf = struct {
|
||||
if (dir_block_count > superblock.block_size / @sizeOf(u32))
|
||||
return error.UnhandledBigDirectoryStream; // cf. BlockMapAddr comment.
|
||||
|
||||
try file.seekTo(superblock.block_size * superblock.block_map_addr);
|
||||
const dir_blocks = try allocator.alloc(u32, dir_block_count);
|
||||
try file_reader.seekTo(superblock.block_size * superblock.block_map_addr);
|
||||
const dir_blocks = try gpa.alloc(u32, dir_block_count);
|
||||
for (dir_blocks) |*b| {
|
||||
b.* = try in.readInt(u32, .little);
|
||||
b.* = try file_reader.interface.takeInt(u32, .little);
|
||||
}
|
||||
var directory = MsfStream.init(
|
||||
superblock.block_size,
|
||||
file,
|
||||
dir_blocks,
|
||||
);
|
||||
var directory_buffer: [64]u8 = undefined;
|
||||
var directory = MsfStream.init(superblock.block_size, file_reader, dir_blocks, &directory_buffer);
|
||||
|
||||
const begin = directory.pos;
|
||||
const stream_count = try directory.reader().readInt(u32, .little);
|
||||
const stream_sizes = try allocator.alloc(u32, stream_count);
|
||||
defer allocator.free(stream_sizes);
|
||||
const begin = directory.logicalPos();
|
||||
const stream_count = try directory.interface.takeInt(u32, .little);
|
||||
const stream_sizes = try gpa.alloc(u32, stream_count);
|
||||
defer gpa.free(stream_sizes);
|
||||
|
||||
// Microsoft's implementation uses @as(u32, -1) for inexistent streams.
|
||||
// These streams are not used, but still participate in the file
|
||||
// and must be taken into account when resolving stream indices.
|
||||
const Nil = 0xFFFFFFFF;
|
||||
const nil_size = 0xFFFFFFFF;
|
||||
for (stream_sizes) |*s| {
|
||||
const size = try directory.reader().readInt(u32, .little);
|
||||
s.* = if (size == Nil) 0 else blockCountFromSize(size, superblock.block_size);
|
||||
const size = try directory.interface.takeInt(u32, .little);
|
||||
s.* = if (size == nil_size) 0 else blockCountFromSize(size, superblock.block_size);
|
||||
}
|
||||
|
||||
const streams = try allocator.alloc(MsfStream, stream_count);
|
||||
const streams = try gpa.alloc(MsfStream, stream_count);
|
||||
errdefer gpa.free(streams);
|
||||
|
||||
for (streams, 0..) |*stream, i| {
|
||||
const size = stream_sizes[i];
|
||||
if (size == 0) {
|
||||
stream.* = MsfStream{
|
||||
.blocks = &[_]u32{},
|
||||
};
|
||||
stream.* = .empty;
|
||||
} else {
|
||||
var blocks = try allocator.alloc(u32, size);
|
||||
var j: u32 = 0;
|
||||
while (j < size) : (j += 1) {
|
||||
const block_id = try directory.reader().readInt(u32, .little);
|
||||
const blocks = try gpa.alloc(u32, size);
|
||||
errdefer gpa.free(blocks);
|
||||
for (blocks) |*block| {
|
||||
const block_id = try directory.interface.takeInt(u32, .little);
|
||||
const n = (block_id % superblock.block_size);
|
||||
// 0 is for pdb.SuperBlock, 1 and 2 for FPMs.
|
||||
if (block_id == 0 or n == 1 or n == 2 or block_id * superblock.block_size > file_len)
|
||||
return error.InvalidBlockIndex;
|
||||
blocks[j] = block_id;
|
||||
block.* = block_id;
|
||||
}
|
||||
|
||||
stream.* = MsfStream.init(
|
||||
superblock.block_size,
|
||||
file,
|
||||
blocks,
|
||||
);
|
||||
const buffer = try gpa.alloc(u8, 64);
|
||||
errdefer gpa.free(buffer);
|
||||
stream.* = .init(superblock.block_size, file_reader, blocks, buffer);
|
||||
}
|
||||
}
|
||||
|
||||
const end = directory.pos;
|
||||
const end = directory.logicalPos();
|
||||
if (end - begin != superblock.num_directory_bytes)
|
||||
return error.InvalidStreamDirectory;
|
||||
|
||||
return Msf{
|
||||
return .{
|
||||
.directory = directory,
|
||||
.streams = streams,
|
||||
};
|
||||
}
|
||||
|
||||
fn deinit(self: *Msf, allocator: Allocator) void {
|
||||
allocator.free(self.directory.blocks);
|
||||
fn deinit(self: *Msf, gpa: Allocator) void {
|
||||
gpa.free(self.directory.blocks);
|
||||
for (self.streams) |*stream| {
|
||||
allocator.free(stream.blocks);
|
||||
gpa.free(stream.interface.buffer);
|
||||
gpa.free(stream.blocks);
|
||||
}
|
||||
allocator.free(self.streams);
|
||||
gpa.free(self.streams);
|
||||
}
|
||||
};
|
||||
|
||||
const MsfStream = struct {
|
||||
in_file: File = undefined,
|
||||
pos: u64 = undefined,
|
||||
blocks: []u32 = undefined,
|
||||
block_size: u32 = undefined,
|
||||
file_reader: *File.Reader,
|
||||
next_read_pos: u64,
|
||||
blocks: []u32,
|
||||
block_size: u32,
|
||||
interface: std.Io.Reader,
|
||||
err: ?Error,
|
||||
|
||||
pub const Error = @typeInfo(@typeInfo(@TypeOf(read)).@"fn".return_type.?).error_union.error_set;
|
||||
const Error = File.Reader.SeekError;
|
||||
|
||||
fn init(block_size: u32, file: File, blocks: []u32) MsfStream {
|
||||
const stream = MsfStream{
|
||||
.in_file = file,
|
||||
.pos = 0,
|
||||
const empty: MsfStream = .{
|
||||
.file_reader = undefined,
|
||||
.next_read_pos = 0,
|
||||
.blocks = &.{},
|
||||
.block_size = undefined,
|
||||
.interface = .ending_instance,
|
||||
.err = null,
|
||||
};
|
||||
|
||||
fn init(block_size: u32, file_reader: *File.Reader, blocks: []u32, buffer: []u8) MsfStream {
|
||||
return .{
|
||||
.file_reader = file_reader,
|
||||
.next_read_pos = 0,
|
||||
.blocks = blocks,
|
||||
.block_size = block_size,
|
||||
.interface = .{
|
||||
.vtable = &.{ .stream = stream },
|
||||
.buffer = buffer,
|
||||
.seek = 0,
|
||||
.end = 0,
|
||||
},
|
||||
.err = null,
|
||||
};
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
fn read(self: *MsfStream, buffer: []u8) !usize {
|
||||
var block_id = @as(usize, @intCast(self.pos / self.block_size));
|
||||
if (block_id >= self.blocks.len) return 0; // End of Stream
|
||||
var block = self.blocks[block_id];
|
||||
var offset = self.pos % self.block_size;
|
||||
fn stream(r: *std.Io.Reader, w: *std.Io.Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize {
|
||||
const ms: *MsfStream = @alignCast(@fieldParentPtr("interface", r));
|
||||
|
||||
try self.in_file.seekTo(block * self.block_size + offset);
|
||||
const in = self.in_file.deprecatedReader();
|
||||
var block_id: usize = @intCast(ms.next_read_pos / ms.block_size);
|
||||
if (block_id >= ms.blocks.len) return error.EndOfStream;
|
||||
var block = ms.blocks[block_id];
|
||||
var offset = ms.next_read_pos % ms.block_size;
|
||||
|
||||
var size: usize = 0;
|
||||
var rem_buffer = buffer;
|
||||
while (size < buffer.len) {
|
||||
const size_to_read = @min(self.block_size - offset, rem_buffer.len);
|
||||
size += try in.read(rem_buffer[0..size_to_read]);
|
||||
rem_buffer = buffer[size..];
|
||||
offset += size_to_read;
|
||||
ms.file_reader.seekTo(block * ms.block_size + offset) catch |err| {
|
||||
ms.err = err;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
|
||||
var remaining = @intFromEnum(limit);
|
||||
while (remaining != 0) {
|
||||
const stream_len: usize = @min(remaining, ms.block_size - offset);
|
||||
const n = try ms.file_reader.interface.stream(w, .limited(stream_len));
|
||||
remaining -= n;
|
||||
offset += n;
|
||||
|
||||
// If we're at the end of a block, go to the next one.
|
||||
if (offset == self.block_size) {
|
||||
if (offset == ms.block_size) {
|
||||
offset = 0;
|
||||
block_id += 1;
|
||||
if (block_id >= self.blocks.len) break; // End of Stream
|
||||
block = self.blocks[block_id];
|
||||
try self.in_file.seekTo(block * self.block_size);
|
||||
if (block_id >= ms.blocks.len) break; // End of Stream
|
||||
block = ms.blocks[block_id];
|
||||
ms.file_reader.seekTo(block * ms.block_size) catch |err| {
|
||||
ms.err = err;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
self.pos += buffer.len;
|
||||
return buffer.len;
|
||||
const total = @intFromEnum(limit) - remaining;
|
||||
ms.next_read_pos += total;
|
||||
return total;
|
||||
}
|
||||
|
||||
pub fn seekBy(self: *MsfStream, len: i64) !void {
|
||||
self.pos = @as(u64, @intCast(@as(i64, @intCast(self.pos)) + len));
|
||||
if (self.pos >= self.blocks.len * self.block_size)
|
||||
return error.EOF;
|
||||
pub fn logicalPos(ms: *const MsfStream) u64 {
|
||||
return ms.next_read_pos - ms.interface.bufferedLen();
|
||||
}
|
||||
|
||||
pub fn seekTo(self: *MsfStream, len: u64) !void {
|
||||
self.pos = len;
|
||||
if (self.pos >= self.blocks.len * self.block_size)
|
||||
return error.EOF;
|
||||
pub fn seekBy(ms: *MsfStream, len: i64) !void {
|
||||
ms.next_read_pos = @as(u64, @intCast(@as(i64, @intCast(ms.logicalPos())) + len));
|
||||
if (ms.next_read_pos >= ms.blocks.len * ms.block_size) return error.EOF;
|
||||
ms.interface.tossBuffered();
|
||||
}
|
||||
|
||||
fn getSize(self: *const MsfStream) u64 {
|
||||
return self.blocks.len * self.block_size;
|
||||
pub fn seekTo(ms: *MsfStream, len: u64) !void {
|
||||
ms.next_read_pos = len;
|
||||
if (ms.next_read_pos >= ms.blocks.len * ms.block_size) return error.EOF;
|
||||
ms.interface.tossBuffered();
|
||||
}
|
||||
|
||||
fn getFilePos(self: MsfStream) u64 {
|
||||
const block_id = self.pos / self.block_size;
|
||||
const block = self.blocks[block_id];
|
||||
const offset = self.pos % self.block_size;
|
||||
|
||||
return block * self.block_size + offset;
|
||||
fn getSize(ms: *const MsfStream) u64 {
|
||||
return ms.blocks.len * ms.block_size;
|
||||
}
|
||||
|
||||
pub fn reader(self: *MsfStream) std.io.GenericReader(*MsfStream, Error, read) {
|
||||
return .{ .context = self };
|
||||
fn getFilePos(ms: *const MsfStream) u64 {
|
||||
const pos = ms.logicalPos();
|
||||
const block_id = pos / ms.block_size;
|
||||
const block = ms.blocks[block_id];
|
||||
const offset = pos % ms.block_size;
|
||||
|
||||
return block * ms.block_size + offset;
|
||||
}
|
||||
};
|
||||
|
||||
fn readSparseBitVector(stream: anytype, allocator: Allocator) ![]u32 {
|
||||
const num_words = try stream.readInt(u32, .little);
|
||||
fn readSparseBitVector(reader: *std.Io.Reader, allocator: Allocator) ![]u32 {
|
||||
const num_words = try reader.takeInt(u32, .little);
|
||||
var list = std.array_list.Managed(u32).init(allocator);
|
||||
errdefer list.deinit();
|
||||
var word_i: u32 = 0;
|
||||
while (word_i != num_words) : (word_i += 1) {
|
||||
const word = try stream.readInt(u32, .little);
|
||||
const word = try reader.takeInt(u32, .little);
|
||||
var bit_i: u5 = 0;
|
||||
while (true) : (bit_i += 1) {
|
||||
if (word & (@as(u32, 1) << bit_i) != 0) {
|
||||
|
||||
@ -713,22 +713,26 @@ pub const Module = switch (native_os) {
|
||||
},
|
||||
.uefi, .windows => struct {
|
||||
base_address: usize,
|
||||
pdb: ?Pdb = null,
|
||||
dwarf: ?Dwarf = null,
|
||||
pdb: ?Pdb,
|
||||
dwarf: ?Dwarf,
|
||||
coff_image_base: u64,
|
||||
|
||||
/// Only used if pdb is non-null
|
||||
coff_section_headers: []coff.SectionHeader,
|
||||
|
||||
pub fn deinit(self: *@This(), allocator: Allocator) void {
|
||||
pub fn deinit(self: *@This(), gpa: Allocator) void {
|
||||
if (self.dwarf) |*dwarf| {
|
||||
dwarf.deinit(allocator);
|
||||
dwarf.deinit(gpa);
|
||||
}
|
||||
|
||||
if (self.pdb) |*p| {
|
||||
gpa.free(p.file_reader.interface.buffer);
|
||||
gpa.destroy(p.file_reader);
|
||||
p.deinit();
|
||||
allocator.free(self.coff_section_headers);
|
||||
gpa.free(self.coff_section_headers);
|
||||
}
|
||||
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
fn getSymbolFromPdb(self: *@This(), relocated_address: usize) !?std.debug.Symbol {
|
||||
@ -970,23 +974,25 @@ fn readMachODebugInfo(allocator: Allocator, macho_file: File) !Module {
|
||||
};
|
||||
}
|
||||
|
||||
fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
|
||||
fn readCoffDebugInfo(gpa: Allocator, coff_obj: *coff.Coff) !Module {
|
||||
nosuspend {
|
||||
var di: Module = .{
|
||||
.base_address = undefined,
|
||||
.coff_image_base = coff_obj.getImageBase(),
|
||||
.coff_section_headers = undefined,
|
||||
.pdb = null,
|
||||
.dwarf = null,
|
||||
};
|
||||
|
||||
if (coff_obj.getSectionByName(".debug_info")) |_| {
|
||||
// This coff file has embedded DWARF debug info
|
||||
var sections: Dwarf.SectionArray = Dwarf.null_section_array;
|
||||
errdefer for (sections) |section| if (section) |s| if (s.owned) allocator.free(s.data);
|
||||
errdefer for (sections) |section| if (section) |s| if (s.owned) gpa.free(s.data);
|
||||
|
||||
inline for (@typeInfo(Dwarf.Section.Id).@"enum".fields, 0..) |section, i| {
|
||||
sections[i] = if (coff_obj.getSectionByName("." ++ section.name)) |section_header| blk: {
|
||||
break :blk .{
|
||||
.data = try coff_obj.getSectionDataAlloc(section_header, allocator),
|
||||
.data = try coff_obj.getSectionDataAlloc(section_header, gpa),
|
||||
.virtual_address = section_header.virtual_address,
|
||||
.owned = true,
|
||||
};
|
||||
@ -999,7 +1005,7 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
|
||||
.is_macho = false,
|
||||
};
|
||||
|
||||
try Dwarf.open(&dwarf, allocator);
|
||||
try Dwarf.open(&dwarf, gpa);
|
||||
di.dwarf = dwarf;
|
||||
}
|
||||
|
||||
@ -1008,20 +1014,31 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
|
||||
if (fs.path.isAbsolute(raw_path)) {
|
||||
break :blk raw_path;
|
||||
} else {
|
||||
const self_dir = try fs.selfExeDirPathAlloc(allocator);
|
||||
defer allocator.free(self_dir);
|
||||
break :blk try fs.path.join(allocator, &.{ self_dir, raw_path });
|
||||
const self_dir = try fs.selfExeDirPathAlloc(gpa);
|
||||
defer gpa.free(self_dir);
|
||||
break :blk try fs.path.join(gpa, &.{ self_dir, raw_path });
|
||||
}
|
||||
};
|
||||
defer if (path.ptr != raw_path.ptr) allocator.free(path);
|
||||
defer if (path.ptr != raw_path.ptr) gpa.free(path);
|
||||
|
||||
di.pdb = Pdb.init(allocator, path) catch |err| switch (err) {
|
||||
const pdb_file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound, error.IsDir => {
|
||||
if (di.dwarf == null) return error.MissingDebugInfo;
|
||||
return di;
|
||||
},
|
||||
else => return err,
|
||||
else => |e| return e,
|
||||
};
|
||||
errdefer pdb_file.close();
|
||||
|
||||
const pdb_file_reader_buffer = try gpa.alloc(u8, 4096);
|
||||
errdefer gpa.free(pdb_file_reader_buffer);
|
||||
|
||||
const pdb_file_reader = try gpa.create(File.Reader);
|
||||
errdefer gpa.destroy(pdb_file_reader);
|
||||
|
||||
pdb_file_reader.* = pdb_file.reader(pdb_file_reader_buffer);
|
||||
|
||||
di.pdb = try Pdb.init(gpa, pdb_file_reader);
|
||||
try di.pdb.?.parseInfoStream();
|
||||
try di.pdb.?.parseDbiStream();
|
||||
|
||||
@ -1029,8 +1046,8 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
|
||||
return error.InvalidDebugInfo;
|
||||
|
||||
// Only used by the pdb path
|
||||
di.coff_section_headers = try coff_obj.getSectionHeadersAlloc(allocator);
|
||||
errdefer allocator.free(di.coff_section_headers);
|
||||
di.coff_section_headers = try coff_obj.getSectionHeadersAlloc(gpa);
|
||||
errdefer gpa.free(di.coff_section_headers);
|
||||
|
||||
return di;
|
||||
}
|
||||
|
||||
@ -1097,14 +1097,6 @@ pub fn deprecatedReader(file: File) DeprecatedReader {
|
||||
return .{ .context = file };
|
||||
}
|
||||
|
||||
/// Deprecated in favor of `Writer`.
|
||||
pub const DeprecatedWriter = io.GenericWriter(File, WriteError, write);
|
||||
|
||||
/// Deprecated in favor of `Writer`.
|
||||
pub fn deprecatedWriter(file: File) DeprecatedWriter {
|
||||
return .{ .context = file };
|
||||
}
|
||||
|
||||
/// Memoizes key information about a file handle such as:
|
||||
/// * The size from calling stat, or the error that occurred therein.
|
||||
/// * The current seek position.
|
||||
|
||||
@ -6,7 +6,7 @@
|
||||
//! The high-level `parseFromSlice` and `parseFromTokenSource` deserialize a JSON document into a Zig type.
|
||||
//! Parse into a dynamically-typed `Value` to load any JSON value for runtime inspection.
|
||||
//!
|
||||
//! The low-level `writeStream` emits syntax-conformant JSON tokens to a `std.io.GenericWriter`.
|
||||
//! The low-level `writeStream` emits syntax-conformant JSON tokens to a `std.Io.Writer`.
|
||||
//! The high-level `stringify` serializes a Zig or `Value` type into JSON.
|
||||
|
||||
const builtin = @import("builtin");
|
||||
|
||||
@ -33,28 +33,6 @@ pub fn readUleb128(comptime T: type, reader: anytype) !T {
|
||||
return @as(T, @truncate(value));
|
||||
}
|
||||
|
||||
/// Write a single unsigned integer as unsigned LEB128 to the given writer.
|
||||
pub fn writeUleb128(writer: anytype, arg: anytype) !void {
|
||||
const Arg = @TypeOf(arg);
|
||||
const Int = switch (Arg) {
|
||||
comptime_int => std.math.IntFittingRange(arg, arg),
|
||||
else => Arg,
|
||||
};
|
||||
const Value = if (@typeInfo(Int).int.bits < 8) u8 else Int;
|
||||
var value: Value = arg;
|
||||
|
||||
while (true) {
|
||||
const byte: u8 = @truncate(value & 0x7f);
|
||||
value >>= 7;
|
||||
if (value == 0) {
|
||||
try writer.writeByte(byte);
|
||||
break;
|
||||
} else {
|
||||
try writer.writeByte(byte | 0x80);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a single signed LEB128 value from the given reader as type T,
|
||||
/// or error.Overflow if the value cannot fit.
|
||||
pub fn readIleb128(comptime T: type, reader: anytype) !T {
|
||||
@ -374,84 +352,3 @@ test "deserialize unsigned LEB128" {
|
||||
// Decode sequence of ULEB128 values
|
||||
try test_read_uleb128_seq(u64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
|
||||
}
|
||||
|
||||
fn test_write_leb128(value: anytype) !void {
|
||||
const T = @TypeOf(value);
|
||||
const signedness = @typeInfo(T).int.signedness;
|
||||
const t_signed = signedness == .signed;
|
||||
|
||||
const writeStream = if (t_signed) writeIleb128 else writeUleb128;
|
||||
const readStream = if (t_signed) readIleb128 else readUleb128;
|
||||
|
||||
// decode to a larger bit size too, to ensure sign extension
|
||||
// is working as expected
|
||||
const larger_type_bits = ((@typeInfo(T).int.bits + 8) / 8) * 8;
|
||||
const B = std.meta.Int(signedness, larger_type_bits);
|
||||
|
||||
const bytes_needed = bn: {
|
||||
if (@typeInfo(T).int.bits <= 7) break :bn @as(u16, 1);
|
||||
|
||||
const unused_bits = if (value < 0) @clz(~value) else @clz(value);
|
||||
const used_bits: u16 = (@typeInfo(T).int.bits - unused_bits) + @intFromBool(t_signed);
|
||||
if (used_bits <= 7) break :bn @as(u16, 1);
|
||||
break :bn ((used_bits + 6) / 7);
|
||||
};
|
||||
|
||||
const max_groups = if (@typeInfo(T).int.bits == 0) 1 else (@typeInfo(T).int.bits + 6) / 7;
|
||||
|
||||
var buf: [max_groups]u8 = undefined;
|
||||
var fbs = std.io.fixedBufferStream(&buf);
|
||||
|
||||
// stream write
|
||||
try writeStream(fbs.writer(), value);
|
||||
const w1_pos = fbs.pos;
|
||||
try testing.expect(w1_pos == bytes_needed);
|
||||
|
||||
// stream read
|
||||
fbs.pos = 0;
|
||||
const sr = try readStream(T, fbs.reader());
|
||||
try testing.expect(fbs.pos == w1_pos);
|
||||
try testing.expect(sr == value);
|
||||
|
||||
// bigger type stream read
|
||||
fbs.pos = 0;
|
||||
const bsr = try readStream(B, fbs.reader());
|
||||
try testing.expect(fbs.pos == w1_pos);
|
||||
try testing.expect(bsr == value);
|
||||
}
|
||||
|
||||
test "serialize unsigned LEB128" {
|
||||
if (builtin.cpu.arch == .x86 and builtin.abi == .musl and builtin.link_mode == .dynamic) return error.SkipZigTest;
|
||||
|
||||
const max_bits = 18;
|
||||
|
||||
comptime var t = 0;
|
||||
inline while (t <= max_bits) : (t += 1) {
|
||||
const T = std.meta.Int(.unsigned, t);
|
||||
const min = std.math.minInt(T);
|
||||
const max = std.math.maxInt(T);
|
||||
var i = @as(std.meta.Int(.unsigned, @typeInfo(T).int.bits + 1), min);
|
||||
|
||||
while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i)));
|
||||
}
|
||||
}
|
||||
|
||||
test "serialize signed LEB128" {
|
||||
if (builtin.cpu.arch == .x86 and builtin.abi == .musl and builtin.link_mode == .dynamic) return error.SkipZigTest;
|
||||
|
||||
// explicitly test i0 because starting `t` at 0
|
||||
// will break the while loop
|
||||
try test_write_leb128(@as(i0, 0));
|
||||
|
||||
const max_bits = 18;
|
||||
|
||||
comptime var t = 1;
|
||||
inline while (t <= max_bits) : (t += 1) {
|
||||
const T = std.meta.Int(.signed, t);
|
||||
const min = std.math.minInt(T);
|
||||
const max = std.math.maxInt(T);
|
||||
var i = @as(std.meta.Int(.signed, @typeInfo(T).int.bits + 1), min);
|
||||
|
||||
while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1883,10 +1883,8 @@ pub const GenericBlob = extern struct {
|
||||
pub const data_in_code_entry = extern struct {
|
||||
/// From mach_header to start of data range.
|
||||
offset: u32,
|
||||
|
||||
/// Number of bytes in data range.
|
||||
length: u16,
|
||||
|
||||
/// A DICE_KIND value.
|
||||
kind: u16,
|
||||
};
|
||||
|
||||
@ -683,11 +683,11 @@ test "mmap" {
|
||||
const file = try tmp.dir.createFile(test_out_file, .{});
|
||||
defer file.close();
|
||||
|
||||
const stream = file.deprecatedWriter();
|
||||
var stream = file.writer(&.{});
|
||||
|
||||
var i: u32 = 0;
|
||||
while (i < alloc_size / @sizeOf(u32)) : (i += 1) {
|
||||
try stream.writeInt(u32, i, .little);
|
||||
try stream.interface.writeInt(u32, i, .little);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,12 @@
|
||||
const std = @import("std.zig");
|
||||
//! The Time Zone Information Format (TZif)
|
||||
//! https://datatracker.ietf.org/doc/html/rfc8536
|
||||
|
||||
const builtin = @import("builtin");
|
||||
|
||||
const std = @import("std.zig");
|
||||
const Reader = std.Io.Reader;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
pub const Transition = struct {
|
||||
ts: i64,
|
||||
timetype: *Timetype,
|
||||
@ -34,7 +40,7 @@ pub const Leapsecond = struct {
|
||||
};
|
||||
|
||||
pub const Tz = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
allocator: Allocator,
|
||||
transitions: []const Transition,
|
||||
timetypes: []const Timetype,
|
||||
leapseconds: []const Leapsecond,
|
||||
@ -54,34 +60,30 @@ pub const Tz = struct {
|
||||
},
|
||||
};
|
||||
|
||||
pub fn parse(allocator: std.mem.Allocator, reader: anytype) !Tz {
|
||||
var legacy_header = try reader.readStruct(Header);
|
||||
pub fn parse(allocator: Allocator, reader: *Reader) !Tz {
|
||||
const legacy_header = try reader.takeStruct(Header, .big);
|
||||
if (!std.mem.eql(u8, &legacy_header.magic, "TZif")) return error.BadHeader;
|
||||
if (legacy_header.version != 0 and legacy_header.version != '2' and legacy_header.version != '3') return error.BadVersion;
|
||||
if (legacy_header.version != 0 and legacy_header.version != '2' and legacy_header.version != '3')
|
||||
return error.BadVersion;
|
||||
|
||||
if (builtin.target.cpu.arch.endian() != std.builtin.Endian.big) {
|
||||
std.mem.byteSwapAllFields(@TypeOf(legacy_header.counts), &legacy_header.counts);
|
||||
}
|
||||
|
||||
if (legacy_header.version == 0) {
|
||||
if (legacy_header.version == 0)
|
||||
return parseBlock(allocator, reader, legacy_header, true);
|
||||
} else {
|
||||
// If the format is modern, just skip over the legacy data
|
||||
const skipv = legacy_header.counts.timecnt * 5 + legacy_header.counts.typecnt * 6 + legacy_header.counts.charcnt + legacy_header.counts.leapcnt * 8 + legacy_header.counts.isstdcnt + legacy_header.counts.isutcnt;
|
||||
try reader.skipBytes(skipv, .{});
|
||||
|
||||
var header = try reader.readStruct(Header);
|
||||
if (!std.mem.eql(u8, &header.magic, "TZif")) return error.BadHeader;
|
||||
if (header.version != '2' and header.version != '3') return error.BadVersion;
|
||||
if (builtin.target.cpu.arch.endian() != std.builtin.Endian.big) {
|
||||
std.mem.byteSwapAllFields(@TypeOf(header.counts), &header.counts);
|
||||
}
|
||||
// If the format is modern, just skip over the legacy data
|
||||
const skip_n = legacy_header.counts.timecnt * 5 +
|
||||
legacy_header.counts.typecnt * 6 +
|
||||
legacy_header.counts.charcnt + legacy_header.counts.leapcnt * 8 +
|
||||
legacy_header.counts.isstdcnt + legacy_header.counts.isutcnt;
|
||||
try reader.discardAll(skip_n);
|
||||
|
||||
return parseBlock(allocator, reader, header, false);
|
||||
}
|
||||
var header = try reader.takeStruct(Header, .big);
|
||||
if (!std.mem.eql(u8, &header.magic, "TZif")) return error.BadHeader;
|
||||
if (header.version != '2' and header.version != '3') return error.BadVersion;
|
||||
|
||||
return parseBlock(allocator, reader, header, false);
|
||||
}
|
||||
|
||||
fn parseBlock(allocator: std.mem.Allocator, reader: anytype, header: Header, legacy: bool) !Tz {
|
||||
fn parseBlock(allocator: Allocator, reader: *Reader, header: Header, legacy: bool) !Tz {
|
||||
if (header.counts.isstdcnt != 0 and header.counts.isstdcnt != header.counts.typecnt) return error.Malformed; // rfc8536: isstdcnt [...] MUST either be zero or equal to "typecnt"
|
||||
if (header.counts.isutcnt != 0 and header.counts.isutcnt != header.counts.typecnt) return error.Malformed; // rfc8536: isutcnt [...] MUST either be zero or equal to "typecnt"
|
||||
if (header.counts.typecnt == 0) return error.Malformed; // rfc8536: typecnt [...] MUST NOT be zero
|
||||
@ -98,12 +100,12 @@ pub const Tz = struct {
|
||||
// Parse transition types
|
||||
var i: usize = 0;
|
||||
while (i < header.counts.timecnt) : (i += 1) {
|
||||
transitions[i].ts = if (legacy) try reader.readInt(i32, .big) else try reader.readInt(i64, .big);
|
||||
transitions[i].ts = if (legacy) try reader.takeInt(i32, .big) else try reader.takeInt(i64, .big);
|
||||
}
|
||||
|
||||
i = 0;
|
||||
while (i < header.counts.timecnt) : (i += 1) {
|
||||
const tt = try reader.readByte();
|
||||
const tt = try reader.takeByte();
|
||||
if (tt >= timetypes.len) return error.Malformed; // rfc8536: Each type index MUST be in the range [0, "typecnt" - 1]
|
||||
transitions[i].timetype = &timetypes[tt];
|
||||
}
|
||||
@ -111,11 +113,11 @@ pub const Tz = struct {
|
||||
// Parse time types
|
||||
i = 0;
|
||||
while (i < header.counts.typecnt) : (i += 1) {
|
||||
const offset = try reader.readInt(i32, .big);
|
||||
const offset = try reader.takeInt(i32, .big);
|
||||
if (offset < -2147483648) return error.Malformed; // rfc8536: utoff [...] MUST NOT be -2**31
|
||||
const dst = try reader.readByte();
|
||||
const dst = try reader.takeByte();
|
||||
if (dst != 0 and dst != 1) return error.Malformed; // rfc8536: (is)dst [...] The value MUST be 0 or 1.
|
||||
const idx = try reader.readByte();
|
||||
const idx = try reader.takeByte();
|
||||
if (idx > header.counts.charcnt - 1) return error.Malformed; // rfc8536: (desig)idx [...] Each index MUST be in the range [0, "charcnt" - 1]
|
||||
timetypes[i] = .{
|
||||
.offset = offset,
|
||||
@ -128,7 +130,7 @@ pub const Tz = struct {
|
||||
}
|
||||
|
||||
var designators_data: [256 + 6]u8 = undefined;
|
||||
try reader.readNoEof(designators_data[0..header.counts.charcnt]);
|
||||
try reader.readSliceAll(designators_data[0..header.counts.charcnt]);
|
||||
const designators = designators_data[0..header.counts.charcnt];
|
||||
if (designators[designators.len - 1] != 0) return error.Malformed; // rfc8536: charcnt [...] includes the trailing NUL (0x00) octet
|
||||
|
||||
@ -144,12 +146,12 @@ pub const Tz = struct {
|
||||
// Parse leap seconds
|
||||
i = 0;
|
||||
while (i < header.counts.leapcnt) : (i += 1) {
|
||||
const occur: i64 = if (legacy) try reader.readInt(i32, .big) else try reader.readInt(i64, .big);
|
||||
const occur: i64 = if (legacy) try reader.takeInt(i32, .big) else try reader.takeInt(i64, .big);
|
||||
if (occur < 0) return error.Malformed; // rfc8536: occur [...] MUST be nonnegative
|
||||
if (i > 0 and leapseconds[i - 1].occurrence + 2419199 > occur) return error.Malformed; // rfc8536: occur [...] each later value MUST be at least 2419199 greater than the previous value
|
||||
if (occur > std.math.maxInt(i48)) return error.Malformed; // Unreasonably far into the future
|
||||
|
||||
const corr = try reader.readInt(i32, .big);
|
||||
const corr = try reader.takeInt(i32, .big);
|
||||
if (i == 0 and corr != -1 and corr != 1) return error.Malformed; // rfc8536: The correction value in the first leap-second record, if present, MUST be either one (1) or minus one (-1)
|
||||
if (i > 0 and leapseconds[i - 1].correction != corr + 1 and leapseconds[i - 1].correction != corr - 1) return error.Malformed; // rfc8536: The correction values in adjacent leap-second records MUST differ by exactly one (1)
|
||||
if (corr > std.math.maxInt(i16)) return error.Malformed; // Unreasonably large correction
|
||||
@ -163,7 +165,7 @@ pub const Tz = struct {
|
||||
// Parse standard/wall indicators
|
||||
i = 0;
|
||||
while (i < header.counts.isstdcnt) : (i += 1) {
|
||||
const stdtime = try reader.readByte();
|
||||
const stdtime = try reader.takeByte();
|
||||
if (stdtime == 1) {
|
||||
timetypes[i].flags |= 0x02;
|
||||
}
|
||||
@ -172,7 +174,7 @@ pub const Tz = struct {
|
||||
// Parse UT/local indicators
|
||||
i = 0;
|
||||
while (i < header.counts.isutcnt) : (i += 1) {
|
||||
const ut = try reader.readByte();
|
||||
const ut = try reader.takeByte();
|
||||
if (ut == 1) {
|
||||
timetypes[i].flags |= 0x04;
|
||||
if (!timetypes[i].standardTimeIndicator()) return error.Malformed; // rfc8536: standard/wall value MUST be one (1) if the UT/local value is one (1)
|
||||
@ -182,9 +184,8 @@ pub const Tz = struct {
|
||||
// Footer
|
||||
var footer: ?[]u8 = null;
|
||||
if (!legacy) {
|
||||
if ((try reader.readByte()) != '\n') return error.Malformed; // An rfc8536 footer must start with a newline
|
||||
var footerdata_buf: [128]u8 = undefined;
|
||||
const footer_mem = reader.readUntilDelimiter(&footerdata_buf, '\n') catch |err| switch (err) {
|
||||
if ((try reader.takeByte()) != '\n') return error.Malformed; // An rfc8536 footer must start with a newline
|
||||
const footer_mem = reader.takeSentinel('\n') catch |err| switch (err) {
|
||||
error.StreamTooLong => return error.OverlargeFooter, // Read more than 128 bytes, much larger than any reasonable POSIX TZ string
|
||||
else => return err,
|
||||
};
|
||||
@ -194,7 +195,7 @@ pub const Tz = struct {
|
||||
}
|
||||
errdefer if (footer) |ft| allocator.free(ft);
|
||||
|
||||
return Tz{
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.transitions = transitions,
|
||||
.timetypes = timetypes,
|
||||
@ -215,9 +216,9 @@ pub const Tz = struct {
|
||||
|
||||
test "slim" {
|
||||
const data = @embedFile("tz/asia_tokyo.tzif");
|
||||
var in_stream = std.io.fixedBufferStream(data);
|
||||
var in_stream: Reader = .fixed(data);
|
||||
|
||||
var tz = try std.Tz.parse(std.testing.allocator, in_stream.reader());
|
||||
var tz = try std.Tz.parse(std.testing.allocator, &in_stream);
|
||||
defer tz.deinit();
|
||||
|
||||
try std.testing.expectEqual(tz.transitions.len, 9);
|
||||
@ -228,9 +229,9 @@ test "slim" {
|
||||
|
||||
test "fat" {
|
||||
const data = @embedFile("tz/antarctica_davis.tzif");
|
||||
var in_stream = std.io.fixedBufferStream(data);
|
||||
var in_stream: Reader = .fixed(data);
|
||||
|
||||
var tz = try std.Tz.parse(std.testing.allocator, in_stream.reader());
|
||||
var tz = try std.Tz.parse(std.testing.allocator, &in_stream);
|
||||
defer tz.deinit();
|
||||
|
||||
try std.testing.expectEqual(tz.transitions.len, 8);
|
||||
@ -241,9 +242,9 @@ test "fat" {
|
||||
test "legacy" {
|
||||
// Taken from Slackware 8.0, from 2001
|
||||
const data = @embedFile("tz/europe_vatican.tzif");
|
||||
var in_stream = std.io.fixedBufferStream(data);
|
||||
var in_stream: Reader = .fixed(data);
|
||||
|
||||
var tz = try std.Tz.parse(std.testing.allocator, in_stream.reader());
|
||||
var tz = try std.Tz.parse(std.testing.allocator, &in_stream);
|
||||
defer tz.deinit();
|
||||
|
||||
try std.testing.expectEqual(tz.transitions.len, 170);
|
||||
|
||||
@ -5893,15 +5893,16 @@ fn buildGlibcCrtFile(comp: *Compilation, crt_file: glibc.CrtFile, prog_node: std
|
||||
|
||||
fn buildGlibcSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) void {
|
||||
defer comp.link_task_queue.finishPrelinkItem(comp);
|
||||
if (glibc.buildSharedObjects(comp, prog_node)) |_| {
|
||||
// The job should no longer be queued up since it succeeded.
|
||||
comp.queued_jobs.glibc_shared_objects = false;
|
||||
} else |err| switch (err) {
|
||||
error.AlreadyReported => return,
|
||||
else => comp.lockAndSetMiscFailure(.glibc_shared_objects, "unable to build glibc shared objects: {s}", .{
|
||||
@errorName(err),
|
||||
}),
|
||||
}
|
||||
glibc.buildSharedObjects(comp, prog_node) catch unreachable;
|
||||
//if (glibc.buildSharedObjects(comp, prog_node)) |_| {
|
||||
// // The job should no longer be queued up since it succeeded.
|
||||
// comp.queued_jobs.glibc_shared_objects = false;
|
||||
//} else |err| switch (err) {
|
||||
// error.AlreadyReported => return,
|
||||
// else => comp.lockAndSetMiscFailure(.glibc_shared_objects, "unable to build glibc shared objects: {s}", .{
|
||||
// @errorName(err),
|
||||
// }),
|
||||
//}
|
||||
}
|
||||
|
||||
fn buildFreeBSDCrtFile(comp: *Compilation, crt_file: freebsd.CrtFile, prog_node: std.Progress.Node) void {
|
||||
|
||||
@ -76,7 +76,9 @@ fn runThread(ids: *IncrementalDebugServer) void {
|
||||
ids.mutex.lock();
|
||||
}
|
||||
defer ids.mutex.unlock();
|
||||
handleCommand(ids.zcu, &text_out, cmd, arg) catch @panic("IncrementalDebugServer: out of memory");
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(gpa, &text_out);
|
||||
defer text_out = allocating.toArrayList();
|
||||
handleCommand(ids.zcu, &allocating.writer, cmd, arg) catch @panic("IncrementalDebugServer: out of memory");
|
||||
}
|
||||
text_out.append(gpa, '\n') catch @panic("IncrementalDebugServer: out of memory");
|
||||
conn.stream.writeAll(text_out.items) catch @panic("IncrementalDebugServer: failed to write");
|
||||
@ -119,10 +121,8 @@ const help_str: []const u8 =
|
||||
\\
|
||||
;
|
||||
|
||||
fn handleCommand(zcu: *Zcu, output: *std.ArrayListUnmanaged(u8), cmd_str: []const u8, arg_str: []const u8) Allocator.Error!void {
|
||||
fn handleCommand(zcu: *Zcu, w: *std.Io.Writer, cmd_str: []const u8, arg_str: []const u8) error{ WriteFailed, OutOfMemory }!void {
|
||||
const ip = &zcu.intern_pool;
|
||||
const gpa = zcu.gpa;
|
||||
const w = output.writer(gpa);
|
||||
if (std.mem.eql(u8, cmd_str, "help")) {
|
||||
try w.writeAll(help_str);
|
||||
} else if (std.mem.eql(u8, cmd_str, "summary")) {
|
||||
|
||||
@ -200,7 +200,7 @@ pub const JobQueue = struct {
|
||||
|
||||
const hash_slice = hash.toSlice();
|
||||
|
||||
try buf.writer().print(
|
||||
try buf.print(
|
||||
\\ pub const {f} = struct {{
|
||||
\\
|
||||
, .{std.zig.fmtId(hash_slice)});
|
||||
@ -226,13 +226,13 @@ pub const JobQueue = struct {
|
||||
}
|
||||
}
|
||||
|
||||
try buf.writer().print(
|
||||
try buf.print(
|
||||
\\ pub const build_root = "{f}";
|
||||
\\
|
||||
, .{std.fmt.alt(fetch.package_root, .formatEscapeString)});
|
||||
|
||||
if (fetch.has_build_zig) {
|
||||
try buf.writer().print(
|
||||
try buf.print(
|
||||
\\ pub const build_zig = @import("{f}");
|
||||
\\
|
||||
, .{std.zig.fmtString(hash_slice)});
|
||||
@ -245,7 +245,7 @@ pub const JobQueue = struct {
|
||||
);
|
||||
for (manifest.dependencies.keys(), manifest.dependencies.values()) |name, dep| {
|
||||
const h = depDigest(fetch.package_root, jq.global_cache, dep) orelse continue;
|
||||
try buf.writer().print(
|
||||
try buf.print(
|
||||
" .{{ \"{f}\", \"{f}\" }},\n",
|
||||
.{ std.zig.fmtString(name), std.zig.fmtString(h.toSlice()) },
|
||||
);
|
||||
@ -277,7 +277,7 @@ pub const JobQueue = struct {
|
||||
|
||||
for (root_manifest.dependencies.keys(), root_manifest.dependencies.values()) |name, dep| {
|
||||
const h = depDigest(root_fetch.package_root, jq.global_cache, dep) orelse continue;
|
||||
try buf.writer().print(
|
||||
try buf.print(
|
||||
" .{{ \"{f}\", \"{f}\" }},\n",
|
||||
.{ std.zig.fmtString(name), std.zig.fmtString(h.toSlice()) },
|
||||
);
|
||||
|
||||
@ -31,7 +31,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
var lowered_relocs = lowered.relocs;
|
||||
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
|
||||
const start_offset: u32 = @intCast(emit.code.items.len);
|
||||
try lowered_inst.encode(emit.code.writer(gpa));
|
||||
std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), lowered_inst.toU32(), .little);
|
||||
|
||||
while (lowered_relocs.len > 0 and
|
||||
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
|
||||
|
||||
@ -518,7 +518,7 @@ pub const Instruction = union(Lir.Format) {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn encode(inst: Instruction, writer: anytype) !void {
|
||||
pub fn encode(inst: Instruction, writer: *std.Io.Writer) !void {
|
||||
try writer.writeInt(u32, inst.toU32(), .little);
|
||||
}
|
||||
|
||||
|
||||
@ -3,7 +3,7 @@ const Emit = @This();
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const leb = std.leb;
|
||||
const ArrayList = std.ArrayList;
|
||||
|
||||
const Wasm = link.File.Wasm;
|
||||
const Mir = @import("Mir.zig");
|
||||
@ -15,7 +15,7 @@ const codegen = @import("../../codegen.zig");
|
||||
mir: Mir,
|
||||
wasm: *Wasm,
|
||||
/// The binary representation that will be emitted by this module.
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
code: *ArrayList(u8),
|
||||
|
||||
pub const Error = error{
|
||||
OutOfMemory,
|
||||
@ -85,7 +85,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
if (is_obj) {
|
||||
@panic("TODO");
|
||||
} else {
|
||||
leb.writeUleb128(code.fixedWriter(), 1 + @intFromEnum(indirect_func_idx)) catch unreachable;
|
||||
writeUleb128(code, 1 + @intFromEnum(indirect_func_idx));
|
||||
}
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -99,7 +99,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
// MIR is lowered during flush, so there is indeed only one thread at this time.
|
||||
const errors_len = 1 + comp.zcu.?.intern_pool.global_error_set.getNamesFromMainThread().len;
|
||||
leb.writeIleb128(code.fixedWriter(), errors_len) catch unreachable;
|
||||
writeSleb128(code, errors_len);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -122,7 +122,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
continue :loop tags[inst];
|
||||
} else {
|
||||
const addr: u32 = wasm.errorNameTableAddr();
|
||||
leb.writeIleb128(code.fixedWriter(), addr) catch unreachable;
|
||||
writeSleb128(code, addr);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -131,7 +131,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
.br_if, .br, .memory_grow, .memory_size => {
|
||||
try code.ensureUnusedCapacity(gpa, 11);
|
||||
code.appendAssumeCapacity(@intFromEnum(tags[inst]));
|
||||
leb.writeUleb128(code.fixedWriter(), datas[inst].label) catch unreachable;
|
||||
writeUleb128(code, datas[inst].label);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -140,7 +140,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
.local_get, .local_set, .local_tee => {
|
||||
try code.ensureUnusedCapacity(gpa, 11);
|
||||
code.appendAssumeCapacity(@intFromEnum(tags[inst]));
|
||||
leb.writeUleb128(code.fixedWriter(), datas[inst].local) catch unreachable;
|
||||
writeUleb128(code, datas[inst].local);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -153,8 +153,8 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
try code.ensureUnusedCapacity(gpa, 11 + 10 * labels.len);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_table));
|
||||
// -1 because default label is not part of length/depth.
|
||||
leb.writeUleb128(code.fixedWriter(), extra.data.length - 1) catch unreachable;
|
||||
for (labels) |label| leb.writeUleb128(code.fixedWriter(), label) catch unreachable;
|
||||
writeUleb128(code, extra.data.length - 1);
|
||||
for (labels) |label| writeUleb128(code, label);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -199,9 +199,9 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
code.appendNTimesAssumeCapacity(0, 5);
|
||||
} else {
|
||||
const index: Wasm.Flush.FuncTypeIndex = .fromTypeIndex(func_ty_index, &wasm.flush_buffer);
|
||||
leb.writeUleb128(code.fixedWriter(), @intFromEnum(index)) catch unreachable;
|
||||
writeUleb128(code, @intFromEnum(index));
|
||||
}
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // table index
|
||||
writeUleb128(code, @as(u32, 0)); // table index
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -263,7 +263,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
code.appendNTimesAssumeCapacity(0, 5);
|
||||
} else {
|
||||
const sp_global: Wasm.GlobalIndex = .stack_pointer;
|
||||
std.leb.writeUleb128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
|
||||
writeUleb128(code, @intFromEnum(sp_global));
|
||||
}
|
||||
|
||||
inst += 1;
|
||||
@ -291,7 +291,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
.i32_const => {
|
||||
try code.ensureUnusedCapacity(gpa, 6);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
leb.writeIleb128(code.fixedWriter(), datas[inst].imm32) catch unreachable;
|
||||
writeSleb128(code, datas[inst].imm32);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -300,7 +300,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
try code.ensureUnusedCapacity(gpa, 11);
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
|
||||
const int64: i64 = @bitCast(mir.extraData(Mir.Imm64, datas[inst].payload).data.toInt());
|
||||
leb.writeIleb128(code.fixedWriter(), int64) catch unreachable;
|
||||
writeSleb128(code, int64);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -476,33 +476,33 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
const extra_index = datas[inst].payload;
|
||||
const opcode = mir.extra[extra_index];
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix));
|
||||
leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
|
||||
writeUleb128(code, opcode);
|
||||
switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) {
|
||||
// bulk-memory opcodes
|
||||
.data_drop => {
|
||||
const segment = mir.extra[extra_index + 1];
|
||||
leb.writeUleb128(code.fixedWriter(), segment) catch unreachable;
|
||||
writeUleb128(code, segment);
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.memory_init => {
|
||||
const segment = mir.extra[extra_index + 1];
|
||||
leb.writeUleb128(code.fixedWriter(), segment) catch unreachable;
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // memory index
|
||||
writeUleb128(code, segment);
|
||||
writeUleb128(code, @as(u32, 0)); // memory index
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.memory_fill => {
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // memory index
|
||||
writeUleb128(code, @as(u32, 0)); // memory index
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
.memory_copy => {
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // dst memory index
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // src memory index
|
||||
writeUleb128(code, @as(u32, 0)); // dst memory index
|
||||
writeUleb128(code, @as(u32, 0)); // src memory index
|
||||
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
@ -538,7 +538,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
const extra_index = datas[inst].payload;
|
||||
const opcode = mir.extra[extra_index];
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.simd_prefix));
|
||||
leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
|
||||
writeUleb128(code, opcode);
|
||||
switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) {
|
||||
.v128_store,
|
||||
.v128_load,
|
||||
@ -824,7 +824,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
const extra_index = datas[inst].payload;
|
||||
const opcode = mir.extra[extra_index];
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
|
||||
leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
|
||||
writeUleb128(code, opcode);
|
||||
switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) {
|
||||
.i32_atomic_load,
|
||||
.i64_atomic_load,
|
||||
@ -900,7 +900,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
// Hard-codes memory index 0 since multi-memory proposal is
|
||||
// not yet accepted nor implemented.
|
||||
const memory_index: u32 = 0;
|
||||
leb.writeUleb128(code.fixedWriter(), memory_index) catch unreachable;
|
||||
writeUleb128(code, memory_index);
|
||||
inst += 1;
|
||||
continue :loop tags[inst];
|
||||
},
|
||||
@ -915,15 +915,15 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
}
|
||||
|
||||
/// Asserts 20 unused capacity.
|
||||
fn encodeMemArg(code: *std.ArrayListUnmanaged(u8), mem_arg: Mir.MemArg) void {
|
||||
fn encodeMemArg(code: *ArrayList(u8), mem_arg: Mir.MemArg) void {
|
||||
assert(code.unusedCapacitySlice().len >= 20);
|
||||
// Wasm encodes alignment as power of 2, rather than natural alignment.
|
||||
const encoded_alignment = @ctz(mem_arg.alignment);
|
||||
leb.writeUleb128(code.fixedWriter(), encoded_alignment) catch unreachable;
|
||||
leb.writeUleb128(code.fixedWriter(), mem_arg.offset) catch unreachable;
|
||||
writeUleb128(code, encoded_alignment);
|
||||
writeUleb128(code, mem_arg.offset);
|
||||
}
|
||||
|
||||
fn uavRefObj(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
|
||||
fn uavRefObj(wasm: *Wasm, code: *ArrayList(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
|
||||
const comp = wasm.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
|
||||
@ -940,7 +940,7 @@ fn uavRefObj(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.I
|
||||
code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
|
||||
}
|
||||
|
||||
fn uavRefExe(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
|
||||
fn uavRefExe(wasm: *Wasm, code: *ArrayList(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
|
||||
const comp = wasm.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
|
||||
@ -949,10 +949,10 @@ fn uavRefExe(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.I
|
||||
code.appendAssumeCapacity(@intFromEnum(opcode));
|
||||
|
||||
const addr = wasm.uavAddr(value);
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + offset))) catch unreachable;
|
||||
writeUleb128(code, @as(u32, @intCast(@as(i64, addr) + offset)));
|
||||
}
|
||||
|
||||
fn navRefOff(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff, is_wasm32: bool) !void {
|
||||
fn navRefOff(wasm: *Wasm, code: *ArrayList(u8), data: Mir.NavRefOff, is_wasm32: bool) !void {
|
||||
const comp = wasm.base.comp;
|
||||
const zcu = comp.zcu.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
@ -975,10 +975,22 @@ fn navRefOff(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff
|
||||
code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
|
||||
} else {
|
||||
const addr = wasm.navAddr(data.nav_index);
|
||||
leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + data.offset))) catch unreachable;
|
||||
writeUleb128(code, @as(u32, @intCast(@as(i64, addr) + data.offset)));
|
||||
}
|
||||
}
|
||||
|
||||
fn appendOutputFunctionIndex(code: *std.ArrayListUnmanaged(u8), i: Wasm.OutputFunctionIndex) void {
|
||||
leb.writeUleb128(code.fixedWriter(), @intFromEnum(i)) catch unreachable;
|
||||
fn appendOutputFunctionIndex(code: *ArrayList(u8), i: Wasm.OutputFunctionIndex) void {
|
||||
writeUleb128(code, @intFromEnum(i));
|
||||
}
|
||||
|
||||
fn writeUleb128(code: *ArrayList(u8), arg: anytype) void {
|
||||
var w: std.Io.Writer = .fixed(code.unusedCapacitySlice());
|
||||
w.writeUleb128(arg) catch unreachable;
|
||||
code.items.len += w.end;
|
||||
}
|
||||
|
||||
fn writeSleb128(code: *ArrayList(u8), arg: anytype) void {
|
||||
var w: std.Io.Writer = .fixed(code.unusedCapacitySlice());
|
||||
w.writeSleb128(arg) catch unreachable;
|
||||
code.items.len += w.end;
|
||||
}
|
||||
|
||||
@ -675,10 +675,13 @@ pub fn lower(mir: *const Mir, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) st
|
||||
// Write the locals in the prologue of the function body.
|
||||
try code.ensureUnusedCapacity(gpa, 5 + mir.locals.len * 6 + 38);
|
||||
|
||||
std.leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(mir.locals.len))) catch unreachable;
|
||||
var w: std.Io.Writer = .fixed(code.unusedCapacitySlice());
|
||||
|
||||
w.writeLeb128(@as(u32, @intCast(mir.locals.len))) catch unreachable;
|
||||
|
||||
for (mir.locals) |local| {
|
||||
std.leb.writeUleb128(code.fixedWriter(), @as(u32, 1)) catch unreachable;
|
||||
code.appendAssumeCapacity(@intFromEnum(local));
|
||||
w.writeLeb128(@as(u32, 1)) catch unreachable;
|
||||
w.writeByte(@intFromEnum(local)) catch unreachable;
|
||||
}
|
||||
|
||||
// Stack management section of function prologue.
|
||||
@ -686,33 +689,35 @@ pub fn lower(mir: *const Mir, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) st
|
||||
if (stack_alignment.toByteUnits()) |align_bytes| {
|
||||
const sp_global: Wasm.GlobalIndex = .stack_pointer;
|
||||
// load stack pointer
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_get));
|
||||
std.leb.writeUleb128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
|
||||
w.writeByte(@intFromEnum(std.wasm.Opcode.global_get)) catch unreachable;
|
||||
w.writeUleb128(@intFromEnum(sp_global)) catch unreachable;
|
||||
// store stack pointer so we can restore it when we return from the function
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
|
||||
leb.writeUleb128(code.fixedWriter(), mir.prologue.sp_local) catch unreachable;
|
||||
w.writeByte(@intFromEnum(std.wasm.Opcode.local_tee)) catch unreachable;
|
||||
w.writeUleb128(mir.prologue.sp_local) catch unreachable;
|
||||
// get the total stack size
|
||||
const aligned_stack: i32 = @intCast(stack_alignment.forward(mir.prologue.stack_size));
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
leb.writeIleb128(code.fixedWriter(), aligned_stack) catch unreachable;
|
||||
w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const)) catch unreachable;
|
||||
w.writeSleb128(aligned_stack) catch unreachable;
|
||||
// subtract it from the current stack pointer
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_sub));
|
||||
w.writeByte(@intFromEnum(std.wasm.Opcode.i32_sub)) catch unreachable;
|
||||
// Get negative stack alignment
|
||||
const neg_stack_align = @as(i32, @intCast(align_bytes)) * -1;
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
leb.writeIleb128(code.fixedWriter(), neg_stack_align) catch unreachable;
|
||||
w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const)) catch unreachable;
|
||||
w.writeSleb128(neg_stack_align) catch unreachable;
|
||||
// Bitwise-and the value to get the new stack pointer to ensure the
|
||||
// pointers are aligned with the abi alignment.
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_and));
|
||||
w.writeByte(@intFromEnum(std.wasm.Opcode.i32_and)) catch unreachable;
|
||||
// The bottom will be used to calculate all stack pointer offsets.
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
|
||||
leb.writeUleb128(code.fixedWriter(), mir.prologue.bottom_stack_local) catch unreachable;
|
||||
w.writeByte(@intFromEnum(std.wasm.Opcode.local_tee)) catch unreachable;
|
||||
w.writeUleb128(mir.prologue.bottom_stack_local) catch unreachable;
|
||||
// Store the current stack pointer value into the global stack pointer so other function calls will
|
||||
// start from this value instead and not overwrite the current stack.
|
||||
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
|
||||
std.leb.writeUleb128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
|
||||
w.writeByte(@intFromEnum(std.wasm.Opcode.global_set)) catch unreachable;
|
||||
w.writeUleb128(@intFromEnum(sp_global)) catch unreachable;
|
||||
}
|
||||
|
||||
code.items.len += w.end;
|
||||
|
||||
var emit: Emit = .{
|
||||
.mir = mir.*,
|
||||
.wasm = wasm,
|
||||
|
||||
@ -6,6 +6,7 @@ const link = @import("link.zig");
|
||||
const log = std.log.scoped(.codegen);
|
||||
const mem = std.mem;
|
||||
const math = std.math;
|
||||
const ArrayList = std.ArrayList;
|
||||
const target_util = @import("target.zig");
|
||||
const trace = @import("tracy.zig").trace;
|
||||
|
||||
@ -179,7 +180,7 @@ pub fn emitFunction(
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
func_index: InternPool.Index,
|
||||
any_mir: *const AnyMir,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
code: *ArrayList(u8),
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
) CodeGenError!void {
|
||||
const zcu = pt.zcu;
|
||||
@ -204,7 +205,7 @@ pub fn generateLazyFunction(
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
code: *ArrayList(u8),
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
) CodeGenError!void {
|
||||
const zcu = pt.zcu;
|
||||
@ -236,7 +237,7 @@ pub fn generateLazySymbol(
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
// TODO don't use an "out" parameter like this; put it in the result instead
|
||||
alignment: *Alignment,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
code: *ArrayList(u8),
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
) CodeGenError!void {
|
||||
@ -311,7 +312,7 @@ pub fn generateSymbol(
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
val: Value,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
code: *ArrayList(u8),
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
) GenerateSymbolError!void {
|
||||
const tracy = trace(@src());
|
||||
@ -379,7 +380,7 @@ pub fn generateSymbol(
|
||||
},
|
||||
.err => |err| {
|
||||
const int = try pt.getErrorValue(err.name);
|
||||
try code.writer(gpa).writeInt(u16, @intCast(int), endian);
|
||||
mem.writeInt(u16, try code.addManyAsArray(gpa, 2), @intCast(int), endian);
|
||||
},
|
||||
.error_union => |error_union| {
|
||||
const payload_ty = ty.errorUnionPayload(zcu);
|
||||
@ -389,7 +390,7 @@ pub fn generateSymbol(
|
||||
};
|
||||
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
try code.writer(gpa).writeInt(u16, err_val, endian);
|
||||
mem.writeInt(u16, try code.addManyAsArray(gpa, 2), err_val, endian);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -399,7 +400,7 @@ pub fn generateSymbol(
|
||||
|
||||
// error value first when its type is larger than the error union's payload
|
||||
if (error_align.order(payload_align) == .gt) {
|
||||
try code.writer(gpa).writeInt(u16, err_val, endian);
|
||||
mem.writeInt(u16, try code.addManyAsArray(gpa, 2), err_val, endian);
|
||||
}
|
||||
|
||||
// emit payload part of the error union
|
||||
@ -421,7 +422,7 @@ pub fn generateSymbol(
|
||||
// Payload size is larger than error set, so emit our error set last
|
||||
if (error_align.compare(.lte, payload_align)) {
|
||||
const begin = code.items.len;
|
||||
try code.writer(gpa).writeInt(u16, err_val, endian);
|
||||
mem.writeInt(u16, try code.addManyAsArray(gpa, 2), err_val, endian);
|
||||
const unpadded_end = code.items.len - begin;
|
||||
const padded_end = abi_align.forward(unpadded_end);
|
||||
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
|
||||
@ -476,7 +477,7 @@ pub fn generateSymbol(
|
||||
}));
|
||||
try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent);
|
||||
}
|
||||
try code.writer(gpa).writeByte(@intFromBool(payload_val != null));
|
||||
try code.append(gpa, @intFromBool(payload_val != null));
|
||||
try code.appendNTimes(gpa, 0, padding);
|
||||
}
|
||||
},
|
||||
@ -721,7 +722,7 @@ fn lowerPtr(
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
ptr_val: InternPool.Index,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
code: *ArrayList(u8),
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
prev_offset: u64,
|
||||
) GenerateSymbolError!void {
|
||||
@ -774,7 +775,7 @@ fn lowerUavRef(
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
uav: InternPool.Key.Ptr.BaseAddr.Uav,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
code: *ArrayList(u8),
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
offset: u64,
|
||||
) GenerateSymbolError!void {
|
||||
@ -834,7 +835,7 @@ fn lowerNavRef(
|
||||
lf: *link.File,
|
||||
pt: Zcu.PerThread,
|
||||
nav_index: InternPool.Nav.Index,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
code: *ArrayList(u8),
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
offset: u64,
|
||||
) GenerateSymbolError!void {
|
||||
|
||||
@ -512,7 +512,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
{
|
||||
var map_contents = std.array_list.Managed(u8).init(arena);
|
||||
for (metadata.all_versions[0 .. target_ver_index + 1]) |ver| {
|
||||
try map_contents.writer().print("FBSD_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
|
||||
try map_contents.print("FBSD_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
|
||||
}
|
||||
try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items });
|
||||
map_contents.deinit();
|
||||
@ -524,20 +524,17 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
for (libs, 0..) |lib, lib_i| {
|
||||
stubs_asm.shrinkRetainingCapacity(0);
|
||||
|
||||
const stubs_writer = stubs_asm.writer();
|
||||
|
||||
try stubs_writer.writeAll(".text\n");
|
||||
try stubs_asm.appendSlice(".text\n");
|
||||
|
||||
var sym_i: usize = 0;
|
||||
var sym_name_buf = std.array_list.Managed(u8).init(arena);
|
||||
var sym_name_buf: std.Io.Writer.Allocating = .init(arena);
|
||||
var opt_symbol_name: ?[]const u8 = null;
|
||||
var versions = try std.DynamicBitSetUnmanaged.initEmpty(arena, metadata.all_versions.len);
|
||||
var weak_linkages = try std.DynamicBitSetUnmanaged.initEmpty(arena, metadata.all_versions.len);
|
||||
|
||||
var inc_fbs = std.io.fixedBufferStream(metadata.inclusions);
|
||||
var inc_reader = inc_fbs.reader();
|
||||
var inc_reader: std.Io.Reader = .fixed(metadata.inclusions);
|
||||
|
||||
const fn_inclusions_len = try inc_reader.readInt(u16, .little);
|
||||
const fn_inclusions_len = try inc_reader.takeInt(u16, .little);
|
||||
|
||||
// Pick the default symbol version:
|
||||
// - If there are no versions, don't emit it
|
||||
@ -550,19 +547,21 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
while (sym_i < fn_inclusions_len) : (sym_i += 1) {
|
||||
const sym_name = opt_symbol_name orelse n: {
|
||||
sym_name_buf.clearRetainingCapacity();
|
||||
try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
|
||||
_ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
|
||||
assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
|
||||
inc_reader.toss(1);
|
||||
|
||||
opt_symbol_name = sym_name_buf.items;
|
||||
opt_symbol_name = sym_name_buf.written();
|
||||
versions.unsetAll();
|
||||
weak_linkages.unsetAll();
|
||||
chosen_def_ver_index = 255;
|
||||
chosen_unversioned_ver_index = 255;
|
||||
|
||||
break :n sym_name_buf.items;
|
||||
break :n sym_name_buf.written();
|
||||
};
|
||||
{
|
||||
const targets = try std.leb.readUleb128(u64, inc_reader);
|
||||
var lib_index = try inc_reader.readByte();
|
||||
const targets = try inc_reader.takeLeb128(u64);
|
||||
var lib_index = try inc_reader.takeByte();
|
||||
|
||||
const is_unversioned = (lib_index & (1 << 5)) != 0;
|
||||
const is_weak = (lib_index & (1 << 6)) != 0;
|
||||
@ -576,7 +575,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
|
||||
|
||||
while (true) {
|
||||
const byte = try inc_reader.readByte();
|
||||
const byte = try inc_reader.takeByte();
|
||||
const last = (byte & 0b1000_0000) != 0;
|
||||
const ver_i = @as(u7, @truncate(byte));
|
||||
if (ok_lib_and_target and ver_i <= target_ver_index) {
|
||||
@ -608,7 +607,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
// .globl _Exit
|
||||
// .type _Exit, %function
|
||||
// _Exit: .long 0
|
||||
try stubs_writer.print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.{s} {s}
|
||||
\\.type {s}, %function
|
||||
@ -640,7 +639,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
.{ sym_name, ver.major, ver.minor },
|
||||
);
|
||||
|
||||
try stubs_writer.print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.{s} {s}
|
||||
\\.type {s}, %function
|
||||
@ -665,14 +664,14 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
}
|
||||
}
|
||||
|
||||
try stubs_writer.writeAll(".data\n");
|
||||
try stubs_asm.appendSlice(".data\n");
|
||||
|
||||
// FreeBSD's `libc.so.7` contains strong references to `__progname` and `environ` which are
|
||||
// defined in the statically-linked startup code. Those references cause the linker to put
|
||||
// the symbols in the dynamic symbol table. We need to create dummy references to them here
|
||||
// to get the same effect.
|
||||
if (std.mem.eql(u8, lib.name, "c")) {
|
||||
try stubs_writer.print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.globl __progname
|
||||
\\.globl environ
|
||||
@ -686,7 +685,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
});
|
||||
}
|
||||
|
||||
const obj_inclusions_len = try inc_reader.readInt(u16, .little);
|
||||
const obj_inclusions_len = try inc_reader.takeInt(u16, .little);
|
||||
|
||||
var sizes = try arena.alloc(u16, metadata.all_versions.len);
|
||||
|
||||
@ -696,21 +695,23 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
while (sym_i < obj_inclusions_len) : (sym_i += 1) {
|
||||
const sym_name = opt_symbol_name orelse n: {
|
||||
sym_name_buf.clearRetainingCapacity();
|
||||
try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
|
||||
_ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
|
||||
assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
|
||||
inc_reader.toss(1);
|
||||
|
||||
opt_symbol_name = sym_name_buf.items;
|
||||
opt_symbol_name = sym_name_buf.written();
|
||||
versions.unsetAll();
|
||||
weak_linkages.unsetAll();
|
||||
chosen_def_ver_index = 255;
|
||||
chosen_unversioned_ver_index = 255;
|
||||
|
||||
break :n sym_name_buf.items;
|
||||
break :n sym_name_buf.written();
|
||||
};
|
||||
|
||||
{
|
||||
const targets = try std.leb.readUleb128(u64, inc_reader);
|
||||
const size = try std.leb.readUleb128(u16, inc_reader);
|
||||
var lib_index = try inc_reader.readByte();
|
||||
const targets = try inc_reader.takeLeb128(u64);
|
||||
const size = try inc_reader.takeLeb128(u16);
|
||||
var lib_index = try inc_reader.takeByte();
|
||||
|
||||
const is_unversioned = (lib_index & (1 << 5)) != 0;
|
||||
const is_weak = (lib_index & (1 << 6)) != 0;
|
||||
@ -724,7 +725,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
|
||||
|
||||
while (true) {
|
||||
const byte = try inc_reader.readByte();
|
||||
const byte = try inc_reader.takeByte();
|
||||
const last = (byte & 0b1000_0000) != 0;
|
||||
const ver_i = @as(u7, @truncate(byte));
|
||||
if (ok_lib_and_target and ver_i <= target_ver_index) {
|
||||
@ -758,7 +759,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
// .type malloc_conf, %object
|
||||
// .size malloc_conf, 4
|
||||
// malloc_conf: .fill 4, 1, 0
|
||||
try stubs_writer.print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.{s} {s}
|
||||
\\.type {s}, %object
|
||||
@ -794,7 +795,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
.{ sym_name, ver.major, ver.minor },
|
||||
);
|
||||
|
||||
try stubs_asm.writer().print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.{s} {s}
|
||||
\\.type {s}, %object
|
||||
@ -822,9 +823,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
}
|
||||
}
|
||||
|
||||
try stubs_writer.writeAll(".tdata\n");
|
||||
try stubs_asm.appendSlice(".tdata\n");
|
||||
|
||||
const tls_inclusions_len = try inc_reader.readInt(u16, .little);
|
||||
const tls_inclusions_len = try inc_reader.takeInt(u16, .little);
|
||||
|
||||
sym_i = 0;
|
||||
opt_symbol_name = null;
|
||||
@ -832,21 +833,23 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
while (sym_i < tls_inclusions_len) : (sym_i += 1) {
|
||||
const sym_name = opt_symbol_name orelse n: {
|
||||
sym_name_buf.clearRetainingCapacity();
|
||||
try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
|
||||
_ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
|
||||
assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
|
||||
inc_reader.toss(1);
|
||||
|
||||
opt_symbol_name = sym_name_buf.items;
|
||||
opt_symbol_name = sym_name_buf.written();
|
||||
versions.unsetAll();
|
||||
weak_linkages.unsetAll();
|
||||
chosen_def_ver_index = 255;
|
||||
chosen_unversioned_ver_index = 255;
|
||||
|
||||
break :n sym_name_buf.items;
|
||||
break :n sym_name_buf.written();
|
||||
};
|
||||
|
||||
{
|
||||
const targets = try std.leb.readUleb128(u64, inc_reader);
|
||||
const size = try std.leb.readUleb128(u16, inc_reader);
|
||||
var lib_index = try inc_reader.readByte();
|
||||
const targets = try inc_reader.takeLeb128(u64);
|
||||
const size = try inc_reader.takeLeb128(u16);
|
||||
var lib_index = try inc_reader.takeByte();
|
||||
|
||||
const is_unversioned = (lib_index & (1 << 5)) != 0;
|
||||
const is_weak = (lib_index & (1 << 6)) != 0;
|
||||
@ -860,7 +863,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
|
||||
|
||||
while (true) {
|
||||
const byte = try inc_reader.readByte();
|
||||
const byte = try inc_reader.takeByte();
|
||||
const last = (byte & 0b1000_0000) != 0;
|
||||
const ver_i = @as(u7, @truncate(byte));
|
||||
if (ok_lib_and_target and ver_i <= target_ver_index) {
|
||||
@ -894,7 +897,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
// .type _ThreadRuneLocale, %object
|
||||
// .size _ThreadRuneLocale, 4
|
||||
// _ThreadRuneLocale: .fill 4, 1, 0
|
||||
try stubs_writer.print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.{s} {s}
|
||||
\\.type {s}, %tls_object
|
||||
@ -930,7 +933,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
.{ sym_name, ver.major, ver.minor },
|
||||
);
|
||||
|
||||
try stubs_writer.print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.{s} {s}
|
||||
\\.type {s}, %tls_object
|
||||
|
||||
@ -752,9 +752,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
var map_contents = std.array_list.Managed(u8).init(arena);
|
||||
for (metadata.all_versions[0 .. target_ver_index + 1]) |ver| {
|
||||
if (ver.patch == 0) {
|
||||
try map_contents.writer().print("GLIBC_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
|
||||
try map_contents.print("GLIBC_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
|
||||
} else {
|
||||
try map_contents.writer().print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch });
|
||||
try map_contents.print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch });
|
||||
}
|
||||
}
|
||||
try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items });
|
||||
@ -773,7 +773,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
try stubs_asm.appendSlice(".text\n");
|
||||
|
||||
var sym_i: usize = 0;
|
||||
var sym_name_buf = std.array_list.Managed(u8).init(arena);
|
||||
var sym_name_buf: std.Io.Writer.Allocating = .init(arena);
|
||||
var opt_symbol_name: ?[]const u8 = null;
|
||||
var versions_buffer: [32]u8 = undefined;
|
||||
var versions_len: usize = undefined;
|
||||
@ -794,24 +794,25 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
// twice, which causes a "duplicate symbol" assembler error.
|
||||
var versions_written = std.AutoArrayHashMap(Version, void).init(arena);
|
||||
|
||||
var inc_fbs = std.io.fixedBufferStream(metadata.inclusions);
|
||||
var inc_reader = inc_fbs.reader();
|
||||
var inc_reader: std.Io.Reader = .fixed(metadata.inclusions);
|
||||
|
||||
const fn_inclusions_len = try inc_reader.readInt(u16, .little);
|
||||
const fn_inclusions_len = try inc_reader.takeInt(u16, .little);
|
||||
|
||||
while (sym_i < fn_inclusions_len) : (sym_i += 1) {
|
||||
const sym_name = opt_symbol_name orelse n: {
|
||||
sym_name_buf.clearRetainingCapacity();
|
||||
try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
|
||||
_ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
|
||||
assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
|
||||
inc_reader.toss(1);
|
||||
|
||||
opt_symbol_name = sym_name_buf.items;
|
||||
opt_symbol_name = sym_name_buf.written();
|
||||
versions_buffer = undefined;
|
||||
versions_len = 0;
|
||||
|
||||
break :n sym_name_buf.items;
|
||||
break :n sym_name_buf.written();
|
||||
};
|
||||
const targets = try std.leb.readUleb128(u64, inc_reader);
|
||||
var lib_index = try inc_reader.readByte();
|
||||
const targets = try inc_reader.takeLeb128(u64);
|
||||
var lib_index = try inc_reader.takeByte();
|
||||
|
||||
const is_terminal = (lib_index & (1 << 7)) != 0;
|
||||
if (is_terminal) {
|
||||
@ -825,7 +826,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
|
||||
|
||||
while (true) {
|
||||
const byte = try inc_reader.readByte();
|
||||
const byte = try inc_reader.takeByte();
|
||||
const last = (byte & 0b1000_0000) != 0;
|
||||
const ver_i = @as(u7, @truncate(byte));
|
||||
if (ok_lib_and_target and ver_i <= target_ver_index) {
|
||||
@ -880,7 +881,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
"{s}_{d}_{d}",
|
||||
.{ sym_name, ver.major, ver.minor },
|
||||
);
|
||||
try stubs_asm.writer().print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.globl {s}
|
||||
\\.type {s}, %function
|
||||
@ -905,7 +906,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
"{s}_{d}_{d}_{d}",
|
||||
.{ sym_name, ver.major, ver.minor, ver.patch },
|
||||
);
|
||||
try stubs_asm.writer().print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.globl {s}
|
||||
\\.type {s}, %function
|
||||
@ -950,7 +951,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
// versions where the symbol didn't exist. We only care about modern glibc versions, so use
|
||||
// a strong reference.
|
||||
if (std.mem.eql(u8, lib.name, "c")) {
|
||||
try stubs_asm.writer().print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.globl _IO_stdin_used
|
||||
\\{s} _IO_stdin_used
|
||||
@ -963,7 +964,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
|
||||
try stubs_asm.appendSlice(".data\n");
|
||||
|
||||
const obj_inclusions_len = try inc_reader.readInt(u16, .little);
|
||||
const obj_inclusions_len = try inc_reader.takeInt(u16, .little);
|
||||
|
||||
var sizes = try arena.alloc(u16, metadata.all_versions.len);
|
||||
|
||||
@ -974,17 +975,19 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
while (sym_i < obj_inclusions_len) : (sym_i += 1) {
|
||||
const sym_name = opt_symbol_name orelse n: {
|
||||
sym_name_buf.clearRetainingCapacity();
|
||||
try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
|
||||
_ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
|
||||
assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
|
||||
inc_reader.toss(1);
|
||||
|
||||
opt_symbol_name = sym_name_buf.items;
|
||||
opt_symbol_name = sym_name_buf.written();
|
||||
versions_buffer = undefined;
|
||||
versions_len = 0;
|
||||
|
||||
break :n sym_name_buf.items;
|
||||
break :n sym_name_buf.written();
|
||||
};
|
||||
const targets = try std.leb.readUleb128(u64, inc_reader);
|
||||
const size = try std.leb.readUleb128(u16, inc_reader);
|
||||
var lib_index = try inc_reader.readByte();
|
||||
const targets = try inc_reader.takeLeb128(u64);
|
||||
const size = try inc_reader.takeLeb128(u16);
|
||||
var lib_index = try inc_reader.takeByte();
|
||||
|
||||
const is_terminal = (lib_index & (1 << 7)) != 0;
|
||||
if (is_terminal) {
|
||||
@ -998,7 +1001,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
|
||||
|
||||
while (true) {
|
||||
const byte = try inc_reader.readByte();
|
||||
const byte = try inc_reader.takeByte();
|
||||
const last = (byte & 0b1000_0000) != 0;
|
||||
const ver_i = @as(u7, @truncate(byte));
|
||||
if (ok_lib_and_target and ver_i <= target_ver_index) {
|
||||
@ -1055,7 +1058,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
"{s}_{d}_{d}",
|
||||
.{ sym_name, ver.major, ver.minor },
|
||||
);
|
||||
try stubs_asm.writer().print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.globl {s}
|
||||
\\.type {s}, %object
|
||||
@ -1083,7 +1086,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
"{s}_{d}_{d}_{d}",
|
||||
.{ sym_name, ver.major, ver.minor, ver.patch },
|
||||
);
|
||||
try stubs_asm.writer().print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.globl {s}
|
||||
\\.type {s}, %object
|
||||
|
||||
@ -304,9 +304,8 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
|
||||
const include_dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "mingw", "def-include" });
|
||||
|
||||
if (comp.verbose_cc) print: {
|
||||
std.debug.lockStdErr();
|
||||
defer std.debug.unlockStdErr();
|
||||
const stderr = std.fs.File.stderr().deprecatedWriter();
|
||||
var stderr = std.debug.lockStderrWriter(&.{});
|
||||
defer std.debug.unlockStderrWriter();
|
||||
nosuspend stderr.print("def file: {s}\n", .{def_file_path}) catch break :print;
|
||||
nosuspend stderr.print("include dir: {s}\n", .{include_dir}) catch break :print;
|
||||
nosuspend stderr.print("output path: {s}\n", .{def_final_path}) catch break :print;
|
||||
@ -335,7 +334,10 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
|
||||
// new scope to ensure definition file is written before passing the path to WriteImportLibrary
|
||||
const def_final_file = try o_dir.createFile(final_def_basename, .{ .truncate = true });
|
||||
defer def_final_file.close();
|
||||
try pp.prettyPrintTokens(def_final_file.deprecatedWriter(), .result_only);
|
||||
var buffer: [1024]u8 = undefined;
|
||||
var def_final_file_writer = def_final_file.writer(&buffer);
|
||||
try pp.prettyPrintTokens(&def_final_file_writer.interface, .result_only);
|
||||
try def_final_file_writer.interface.flush();
|
||||
}
|
||||
|
||||
const lib_final_path = try std.fs.path.join(gpa, &.{ "o", &digest, final_lib_basename });
|
||||
@ -410,9 +412,9 @@ fn findDef(
|
||||
// Try the archtecture-specific path first.
|
||||
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "{s}" ++ s ++ "{s}.def";
|
||||
if (zig_lib_directory.path) |p| {
|
||||
try override_path.writer().print("{s}" ++ s ++ fmt_path, .{ p, lib_path, lib_name });
|
||||
try override_path.print("{s}" ++ s ++ fmt_path, .{ p, lib_path, lib_name });
|
||||
} else {
|
||||
try override_path.writer().print(fmt_path, .{ lib_path, lib_name });
|
||||
try override_path.print(fmt_path, .{ lib_path, lib_name });
|
||||
}
|
||||
if (std.fs.cwd().access(override_path.items, .{})) |_| {
|
||||
return override_path.toOwnedSlice();
|
||||
@ -427,9 +429,9 @@ fn findDef(
|
||||
override_path.shrinkRetainingCapacity(0);
|
||||
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "lib-common" ++ s ++ "{s}.def";
|
||||
if (zig_lib_directory.path) |p| {
|
||||
try override_path.writer().print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
|
||||
try override_path.print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
|
||||
} else {
|
||||
try override_path.writer().print(fmt_path, .{lib_name});
|
||||
try override_path.print(fmt_path, .{lib_name});
|
||||
}
|
||||
if (std.fs.cwd().access(override_path.items, .{})) |_| {
|
||||
return override_path.toOwnedSlice();
|
||||
@ -444,9 +446,9 @@ fn findDef(
|
||||
override_path.shrinkRetainingCapacity(0);
|
||||
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "lib-common" ++ s ++ "{s}.def.in";
|
||||
if (zig_lib_directory.path) |p| {
|
||||
try override_path.writer().print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
|
||||
try override_path.print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
|
||||
} else {
|
||||
try override_path.writer().print(fmt_path, .{lib_name});
|
||||
try override_path.print(fmt_path, .{lib_name});
|
||||
}
|
||||
if (std.fs.cwd().access(override_path.items, .{})) |_| {
|
||||
return override_path.toOwnedSlice();
|
||||
|
||||
@ -140,21 +140,21 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
|
||||
if (!is_arch_specific) {
|
||||
// Look for an arch specific override.
|
||||
override_path.shrinkRetainingCapacity(0);
|
||||
try override_path.writer().print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.s", .{
|
||||
try override_path.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.s", .{
|
||||
dirname, arch_name, noextbasename,
|
||||
});
|
||||
if (source_table.contains(override_path.items))
|
||||
continue;
|
||||
|
||||
override_path.shrinkRetainingCapacity(0);
|
||||
try override_path.writer().print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.S", .{
|
||||
try override_path.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.S", .{
|
||||
dirname, arch_name, noextbasename,
|
||||
});
|
||||
if (source_table.contains(override_path.items))
|
||||
continue;
|
||||
|
||||
override_path.shrinkRetainingCapacity(0);
|
||||
try override_path.writer().print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.c", .{
|
||||
try override_path.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.c", .{
|
||||
dirname, arch_name, noextbasename,
|
||||
});
|
||||
if (source_table.contains(override_path.items))
|
||||
|
||||
@ -460,18 +460,15 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
for (libs, 0..) |lib, lib_i| {
|
||||
stubs_asm.shrinkRetainingCapacity(0);
|
||||
|
||||
const stubs_writer = stubs_asm.writer();
|
||||
|
||||
try stubs_writer.writeAll(".text\n");
|
||||
try stubs_asm.appendSlice(".text\n");
|
||||
|
||||
var sym_i: usize = 0;
|
||||
var sym_name_buf = std.array_list.Managed(u8).init(arena);
|
||||
var sym_name_buf: std.Io.Writer.Allocating = .init(arena);
|
||||
var opt_symbol_name: ?[]const u8 = null;
|
||||
|
||||
var inc_fbs = std.io.fixedBufferStream(metadata.inclusions);
|
||||
var inc_reader = inc_fbs.reader();
|
||||
var inc_reader: std.Io.Reader = .fixed(metadata.inclusions);
|
||||
|
||||
const fn_inclusions_len = try inc_reader.readInt(u16, .little);
|
||||
const fn_inclusions_len = try inc_reader.takeInt(u16, .little);
|
||||
|
||||
var chosen_ver_index: usize = 255;
|
||||
var chosen_is_weak: bool = undefined;
|
||||
@ -479,17 +476,19 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
while (sym_i < fn_inclusions_len) : (sym_i += 1) {
|
||||
const sym_name = opt_symbol_name orelse n: {
|
||||
sym_name_buf.clearRetainingCapacity();
|
||||
try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
|
||||
_ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
|
||||
assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
|
||||
inc_reader.toss(1);
|
||||
|
||||
opt_symbol_name = sym_name_buf.items;
|
||||
opt_symbol_name = sym_name_buf.written();
|
||||
chosen_ver_index = 255;
|
||||
|
||||
break :n sym_name_buf.items;
|
||||
break :n sym_name_buf.written();
|
||||
};
|
||||
|
||||
{
|
||||
const targets = try std.leb.readUleb128(u64, inc_reader);
|
||||
var lib_index = try inc_reader.readByte();
|
||||
const targets = try inc_reader.takeLeb128(u64);
|
||||
var lib_index = try inc_reader.takeByte();
|
||||
|
||||
const is_weak = (lib_index & (1 << 6)) != 0;
|
||||
const is_terminal = (lib_index & (1 << 7)) != 0;
|
||||
@ -502,7 +501,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
|
||||
|
||||
while (true) {
|
||||
const byte = try inc_reader.readByte();
|
||||
const byte = try inc_reader.takeByte();
|
||||
const last = (byte & 0b1000_0000) != 0;
|
||||
const ver_i = @as(u7, @truncate(byte));
|
||||
if (ok_lib_and_target and ver_i <= target_ver_index and
|
||||
@ -525,7 +524,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
// .globl _Exit
|
||||
// .type _Exit, %function
|
||||
// _Exit: .long 0
|
||||
try stubs_writer.print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.{s} {s}
|
||||
\\.type {s}, %function
|
||||
@ -542,9 +541,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
}
|
||||
}
|
||||
|
||||
try stubs_writer.writeAll(".data\n");
|
||||
try stubs_asm.appendSlice(".data\n");
|
||||
|
||||
const obj_inclusions_len = try inc_reader.readInt(u16, .little);
|
||||
const obj_inclusions_len = try inc_reader.takeInt(u16, .little);
|
||||
|
||||
sym_i = 0;
|
||||
opt_symbol_name = null;
|
||||
@ -554,18 +553,20 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
while (sym_i < obj_inclusions_len) : (sym_i += 1) {
|
||||
const sym_name = opt_symbol_name orelse n: {
|
||||
sym_name_buf.clearRetainingCapacity();
|
||||
try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
|
||||
_ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
|
||||
assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
|
||||
inc_reader.toss(1);
|
||||
|
||||
opt_symbol_name = sym_name_buf.items;
|
||||
opt_symbol_name = sym_name_buf.written();
|
||||
chosen_ver_index = 255;
|
||||
|
||||
break :n sym_name_buf.items;
|
||||
break :n sym_name_buf.written();
|
||||
};
|
||||
|
||||
{
|
||||
const targets = try std.leb.readUleb128(u64, inc_reader);
|
||||
const size = try std.leb.readUleb128(u16, inc_reader);
|
||||
var lib_index = try inc_reader.readByte();
|
||||
const targets = try inc_reader.takeLeb128(u64);
|
||||
const size = try inc_reader.takeLeb128(u16);
|
||||
var lib_index = try inc_reader.takeByte();
|
||||
|
||||
const is_weak = (lib_index & (1 << 6)) != 0;
|
||||
const is_terminal = (lib_index & (1 << 7)) != 0;
|
||||
@ -578,7 +579,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
|
||||
|
||||
while (true) {
|
||||
const byte = try inc_reader.readByte();
|
||||
const byte = try inc_reader.takeByte();
|
||||
const last = (byte & 0b1000_0000) != 0;
|
||||
const ver_i = @as(u7, @truncate(byte));
|
||||
if (ok_lib_and_target and ver_i <= target_ver_index and
|
||||
@ -603,7 +604,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
// .type malloc_conf, %object
|
||||
// .size malloc_conf, 4
|
||||
// malloc_conf: .fill 4, 1, 0
|
||||
try stubs_writer.print(
|
||||
try stubs_asm.print(
|
||||
\\.balign {d}
|
||||
\\.{s} {s}
|
||||
\\.type {s}, %object
|
||||
|
||||
@ -1976,7 +1976,7 @@ fn resolveLibInput(
|
||||
.root_dir = lib_directory,
|
||||
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.tbd", .{lib_name}),
|
||||
};
|
||||
try checked_paths.writer(gpa).print("\n {f}", .{test_path});
|
||||
try checked_paths.print(gpa, "\n {f}", .{test_path});
|
||||
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => break :tbd,
|
||||
else => |e| fatal("unable to search for tbd library '{f}': {s}", .{ test_path, @errorName(e) }),
|
||||
@ -1995,7 +1995,7 @@ fn resolveLibInput(
|
||||
},
|
||||
}),
|
||||
};
|
||||
try checked_paths.writer(gpa).print("\n {f}", .{test_path});
|
||||
try checked_paths.print(gpa, "\n {f}", .{test_path});
|
||||
switch (try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, .{
|
||||
.path = test_path,
|
||||
.query = name_query.query,
|
||||
@ -2012,7 +2012,7 @@ fn resolveLibInput(
|
||||
.root_dir = lib_directory,
|
||||
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.so", .{lib_name}),
|
||||
};
|
||||
try checked_paths.writer(gpa).print("\n {f}", .{test_path});
|
||||
try checked_paths.print(gpa, "\n {f}", .{test_path});
|
||||
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => break :so,
|
||||
else => |e| fatal("unable to search for so library '{f}': {s}", .{
|
||||
@ -2030,7 +2030,7 @@ fn resolveLibInput(
|
||||
.root_dir = lib_directory,
|
||||
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.a", .{lib_name}),
|
||||
};
|
||||
try checked_paths.writer(gpa).print("\n {f}", .{test_path});
|
||||
try checked_paths.print(gpa, "\n {f}", .{test_path});
|
||||
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => break :mingw,
|
||||
else => |e| fatal("unable to search for static library '{f}': {s}", .{ test_path, @errorName(e) }),
|
||||
|
||||
@ -2179,13 +2179,13 @@ fn writeDataDirectoriesHeaders(coff: *Coff) !void {
|
||||
fn writeHeader(coff: *Coff) !void {
|
||||
const target = &coff.base.comp.root_mod.resolved_target.result;
|
||||
const gpa = coff.base.comp.gpa;
|
||||
var buffer = std.array_list.Managed(u8).init(gpa);
|
||||
var buffer: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer buffer.deinit();
|
||||
const writer = buffer.writer();
|
||||
const writer = &buffer.writer;
|
||||
|
||||
try buffer.ensureTotalCapacity(coff.getSizeOfHeaders());
|
||||
writer.writeAll(&msdos_stub) catch unreachable;
|
||||
mem.writeInt(u32, buffer.items[0x3c..][0..4], msdos_stub.len, .little);
|
||||
mem.writeInt(u32, buffer.writer.buffer[0x3c..][0..4], msdos_stub.len, .little);
|
||||
|
||||
writer.writeAll("PE\x00\x00") catch unreachable;
|
||||
var flags = coff_util.CoffHeaderFlags{
|
||||
@ -2313,7 +2313,7 @@ fn writeHeader(coff: *Coff) !void {
|
||||
},
|
||||
}
|
||||
|
||||
try coff.pwriteAll(buffer.items, 0);
|
||||
try coff.pwriteAll(buffer.written(), 0);
|
||||
}
|
||||
|
||||
pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
|
||||
|
||||
@ -811,10 +811,6 @@ fn flushInner(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id) !void {
|
||||
|
||||
if (self.base.gc_sections) {
|
||||
try gc.gcAtoms(self);
|
||||
|
||||
if (self.base.print_gc_sections) {
|
||||
try gc.dumpPrunedAtoms(self);
|
||||
}
|
||||
}
|
||||
|
||||
self.checkDuplicates() catch |err| switch (err) {
|
||||
@ -3005,7 +3001,7 @@ fn writeAtoms(self: *Elf) !void {
|
||||
undefs.deinit();
|
||||
}
|
||||
|
||||
var buffer = std.array_list.Managed(u8).init(gpa);
|
||||
var buffer: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer buffer.deinit();
|
||||
|
||||
const slice = self.sections.slice();
|
||||
@ -3032,9 +3028,9 @@ fn writeAtoms(self: *Elf) !void {
|
||||
try buffer.ensureUnusedCapacity(thunk_size);
|
||||
const shdr = slice.items(.shdr)[th.output_section_index];
|
||||
const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset;
|
||||
try th.write(self, buffer.writer());
|
||||
assert(buffer.items.len == thunk_size);
|
||||
try self.pwriteAll(buffer.items, offset);
|
||||
try th.write(self, &buffer.writer);
|
||||
assert(buffer.written().len == thunk_size);
|
||||
try self.pwriteAll(buffer.written(), offset);
|
||||
buffer.clearRetainingCapacity();
|
||||
}
|
||||
}
|
||||
@ -3166,26 +3162,26 @@ fn writeSyntheticSections(self: *Elf) !void {
|
||||
|
||||
if (self.section_indexes.verneed) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.verneed.size());
|
||||
var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.verneed.size());
|
||||
defer buffer.deinit();
|
||||
try self.verneed.write(buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try self.verneed.write(&buffer.writer);
|
||||
try self.pwriteAll(buffer.written(), shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.dynamic) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.dynamic.size(self));
|
||||
var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.dynamic.size(self));
|
||||
defer buffer.deinit();
|
||||
try self.dynamic.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try self.dynamic.write(self, &buffer.writer);
|
||||
try self.pwriteAll(buffer.written(), shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.dynsymtab) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.dynsym.size());
|
||||
var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.dynsym.size());
|
||||
defer buffer.deinit();
|
||||
try self.dynsym.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try self.dynsym.write(self, &buffer.writer);
|
||||
try self.pwriteAll(buffer.written(), shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.dynstrtab) |shndx| {
|
||||
@ -3201,28 +3197,28 @@ fn writeSyntheticSections(self: *Elf) !void {
|
||||
};
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
const sh_size = try self.cast(usize, shdr.sh_size);
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
|
||||
var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, @intCast(sh_size - existing_size));
|
||||
defer buffer.deinit();
|
||||
try eh_frame.writeEhFrame(self, buffer.writer());
|
||||
assert(buffer.items.len == sh_size - existing_size);
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
|
||||
try eh_frame.writeEhFrame(self, &buffer.writer);
|
||||
assert(buffer.written().len == sh_size - existing_size);
|
||||
try self.pwriteAll(buffer.written(), shdr.sh_offset + existing_size);
|
||||
}
|
||||
|
||||
if (self.section_indexes.eh_frame_hdr) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
const sh_size = try self.cast(usize, shdr.sh_size);
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, sh_size);
|
||||
var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, sh_size);
|
||||
defer buffer.deinit();
|
||||
try eh_frame.writeEhFrameHdr(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try eh_frame.writeEhFrameHdr(self, &buffer.writer);
|
||||
try self.pwriteAll(buffer.written(), shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.got) |index| {
|
||||
const shdr = slice.items(.shdr)[index];
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.got.size(self));
|
||||
var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.got.size(self));
|
||||
defer buffer.deinit();
|
||||
try self.got.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try self.got.write(self, &buffer.writer);
|
||||
try self.pwriteAll(buffer.written(), shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.rela_dyn) |shndx| {
|
||||
@ -3235,26 +3231,26 @@ fn writeSyntheticSections(self: *Elf) !void {
|
||||
|
||||
if (self.section_indexes.plt) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.plt.size(self));
|
||||
var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.plt.size(self));
|
||||
defer buffer.deinit();
|
||||
try self.plt.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try self.plt.write(self, &buffer.writer);
|
||||
try self.pwriteAll(buffer.written(), shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.got_plt) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.got_plt.size(self));
|
||||
var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.got_plt.size(self));
|
||||
defer buffer.deinit();
|
||||
try self.got_plt.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try self.got_plt.write(self, &buffer.writer);
|
||||
try self.pwriteAll(buffer.written(), shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.plt_got) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.plt_got.size(self));
|
||||
var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.plt_got.size(self));
|
||||
defer buffer.deinit();
|
||||
try self.plt_got.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try self.plt_got.write(self, &buffer.writer);
|
||||
try self.pwriteAll(buffer.written(), shdr.sh_offset);
|
||||
}
|
||||
|
||||
if (self.section_indexes.rela_plt) |shndx| {
|
||||
@ -3757,7 +3753,7 @@ pub fn insertShString(self: *Elf, name: [:0]const u8) error{OutOfMemory}!u32 {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const off = @as(u32, @intCast(self.shstrtab.items.len));
|
||||
try self.shstrtab.ensureUnusedCapacity(gpa, name.len + 1);
|
||||
self.shstrtab.writer(gpa).print("{s}\x00", .{name}) catch unreachable;
|
||||
self.shstrtab.print(gpa, "{s}\x00", .{name}) catch unreachable;
|
||||
return off;
|
||||
}
|
||||
|
||||
@ -3770,7 +3766,7 @@ pub fn insertDynString(self: *Elf, name: []const u8) error{OutOfMemory}!u32 {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const off = @as(u32, @intCast(self.dynstrtab.items.len));
|
||||
try self.dynstrtab.ensureUnusedCapacity(gpa, name.len + 1);
|
||||
self.dynstrtab.writer(gpa).print("{s}\x00", .{name}) catch unreachable;
|
||||
self.dynstrtab.print(gpa, "{s}\x00", .{name}) catch unreachable;
|
||||
return off;
|
||||
}
|
||||
|
||||
|
||||
@ -123,8 +123,7 @@ pub fn setArHdr(opts: struct {
|
||||
@memcpy(&hdr.ar_fmag, elf.ARFMAG);
|
||||
|
||||
{
|
||||
var stream = std.io.fixedBufferStream(&hdr.ar_name);
|
||||
const writer = stream.writer();
|
||||
var writer: std.Io.Writer = .fixed(&hdr.ar_name);
|
||||
switch (opts.name) {
|
||||
.symtab => writer.print("{s}", .{elf.SYM64NAME}) catch unreachable,
|
||||
.strtab => writer.print("//", .{}) catch unreachable,
|
||||
@ -133,8 +132,8 @@ pub fn setArHdr(opts: struct {
|
||||
}
|
||||
}
|
||||
{
|
||||
var stream = std.io.fixedBufferStream(&hdr.ar_size);
|
||||
stream.writer().print("{d}", .{opts.size}) catch unreachable;
|
||||
var writer: std.Io.Writer = .fixed(&hdr.ar_size);
|
||||
writer.print("{d}", .{opts.size}) catch unreachable;
|
||||
}
|
||||
|
||||
return hdr;
|
||||
@ -246,7 +245,7 @@ pub const ArStrtab = struct {
|
||||
|
||||
pub fn insert(ar: *ArStrtab, allocator: Allocator, name: []const u8) error{OutOfMemory}!u32 {
|
||||
const off = @as(u32, @intCast(ar.buffer.items.len));
|
||||
try ar.buffer.writer(allocator).print("{s}/{c}", .{ name, strtab_delimiter });
|
||||
try ar.buffer.print(allocator, "{s}/{c}", .{ name, strtab_delimiter });
|
||||
return off;
|
||||
}
|
||||
|
||||
|
||||
@ -621,7 +621,6 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
|
||||
|
||||
const cpu_arch = elf_file.getTarget().cpu.arch;
|
||||
const file_ptr = self.file(elf_file).?;
|
||||
var stream = std.io.fixedBufferStream(code);
|
||||
|
||||
const rels = self.relocs(elf_file);
|
||||
var it = RelocsIterator{ .relocs = rels };
|
||||
@ -661,20 +660,16 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
|
||||
target.name(elf_file),
|
||||
});
|
||||
|
||||
try stream.seekTo(r_offset);
|
||||
|
||||
const args = ResolveArgs{ P, A, S, GOT, G, TP, DTP };
|
||||
|
||||
switch (cpu_arch) {
|
||||
.x86_64 => x86_64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
|
||||
.x86_64 => x86_64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code) catch |err| switch (err) {
|
||||
error.RelocFailure,
|
||||
error.RelaxFailure,
|
||||
error.InvalidInstruction,
|
||||
error.CannotEncode,
|
||||
=> has_reloc_errors = true,
|
||||
else => |e| return e,
|
||||
},
|
||||
.aarch64, .aarch64_be => aarch64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
|
||||
.aarch64, .aarch64_be => aarch64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code) catch |err| switch (err) {
|
||||
error.RelocFailure,
|
||||
error.RelaxFailure,
|
||||
error.UnexpectedRemainder,
|
||||
@ -682,7 +677,7 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
|
||||
=> has_reloc_errors = true,
|
||||
else => |e| return e,
|
||||
},
|
||||
.riscv64, .riscv64be => riscv.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
|
||||
.riscv64, .riscv64be => riscv.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code) catch |err| switch (err) {
|
||||
error.RelocFailure,
|
||||
error.RelaxFailure,
|
||||
=> has_reloc_errors = true,
|
||||
@ -701,7 +696,8 @@ fn resolveDynAbsReloc(
|
||||
rel: elf.Elf64_Rela,
|
||||
action: RelocAction,
|
||||
elf_file: *Elf,
|
||||
writer: anytype,
|
||||
code: []u8,
|
||||
r_offset: usize,
|
||||
) !void {
|
||||
const comp = elf_file.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
@ -726,7 +722,7 @@ fn resolveDynAbsReloc(
|
||||
.copyrel,
|
||||
.cplt,
|
||||
.none,
|
||||
=> try writer.writeInt(i64, S + A, .little),
|
||||
=> mem.writeInt(i64, code[r_offset..][0..8], S + A, .little),
|
||||
|
||||
.dyn_copyrel => {
|
||||
if (is_writeable or elf_file.z_nocopyreloc) {
|
||||
@ -737,9 +733,9 @@ fn resolveDynAbsReloc(
|
||||
.addend = A,
|
||||
.target = target,
|
||||
});
|
||||
try applyDynamicReloc(A, elf_file, writer);
|
||||
applyDynamicReloc(A, code, r_offset);
|
||||
} else {
|
||||
try writer.writeInt(i64, S + A, .little);
|
||||
mem.writeInt(i64, code[r_offset..][0..8], S + A, .little);
|
||||
}
|
||||
},
|
||||
|
||||
@ -752,9 +748,9 @@ fn resolveDynAbsReloc(
|
||||
.addend = A,
|
||||
.target = target,
|
||||
});
|
||||
try applyDynamicReloc(A, elf_file, writer);
|
||||
applyDynamicReloc(A, code, r_offset);
|
||||
} else {
|
||||
try writer.writeInt(i64, S + A, .little);
|
||||
mem.writeInt(i64, code[r_offset..][0..8], S + A, .little);
|
||||
}
|
||||
},
|
||||
|
||||
@ -766,7 +762,7 @@ fn resolveDynAbsReloc(
|
||||
.addend = A,
|
||||
.target = target,
|
||||
});
|
||||
try applyDynamicReloc(A, elf_file, writer);
|
||||
applyDynamicReloc(A, code, r_offset);
|
||||
},
|
||||
|
||||
.baserel => {
|
||||
@ -776,7 +772,7 @@ fn resolveDynAbsReloc(
|
||||
.addend = S + A,
|
||||
.target = target,
|
||||
});
|
||||
try applyDynamicReloc(S + A, elf_file, writer);
|
||||
applyDynamicReloc(S + A, code, r_offset);
|
||||
},
|
||||
|
||||
.ifunc => {
|
||||
@ -787,16 +783,13 @@ fn resolveDynAbsReloc(
|
||||
.addend = S_ + A,
|
||||
.target = target,
|
||||
});
|
||||
try applyDynamicReloc(S_ + A, elf_file, writer);
|
||||
applyDynamicReloc(S_ + A, code, r_offset);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn applyDynamicReloc(value: i64, elf_file: *Elf, writer: anytype) !void {
|
||||
_ = elf_file;
|
||||
// if (elf_file.options.apply_dynamic_relocs) {
|
||||
try writer.writeInt(i64, value, .little);
|
||||
// }
|
||||
fn applyDynamicReloc(value: i64, code: []u8, r_offset: usize) void {
|
||||
mem.writeInt(i64, code[r_offset..][0..8], value, .little);
|
||||
}
|
||||
|
||||
pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: anytype) !void {
|
||||
@ -804,7 +797,6 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
|
||||
|
||||
const cpu_arch = elf_file.getTarget().cpu.arch;
|
||||
const file_ptr = self.file(elf_file).?;
|
||||
var stream = std.io.fixedBufferStream(code);
|
||||
|
||||
const rels = self.relocs(elf_file);
|
||||
var has_reloc_errors = false;
|
||||
@ -863,18 +855,16 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
|
||||
target.name(elf_file),
|
||||
});
|
||||
|
||||
try stream.seekTo(r_offset);
|
||||
|
||||
switch (cpu_arch) {
|
||||
.x86_64 => x86_64.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
|
||||
.x86_64 => x86_64.resolveRelocNonAlloc(self, elf_file, rel, target, args, code[r_offset..]) catch |err| switch (err) {
|
||||
error.RelocFailure => has_reloc_errors = true,
|
||||
else => |e| return e,
|
||||
},
|
||||
.aarch64, .aarch64_be => aarch64.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
|
||||
.aarch64, .aarch64_be => aarch64.resolveRelocNonAlloc(self, elf_file, rel, target, args, code[r_offset..]) catch |err| switch (err) {
|
||||
error.RelocFailure => has_reloc_errors = true,
|
||||
else => |e| return e,
|
||||
},
|
||||
.riscv64, .riscv64be => riscv.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
|
||||
.riscv64, .riscv64be => riscv.resolveRelocNonAlloc(self, elf_file, rel, target, args, code[r_offset..]) catch |err| switch (err) {
|
||||
error.RelocFailure => has_reloc_errors = true,
|
||||
else => |e| return e,
|
||||
},
|
||||
@ -915,7 +905,7 @@ const Format = struct {
|
||||
atom: Atom,
|
||||
elf_file: *Elf,
|
||||
|
||||
fn default(f: Format, w: *std.io.Writer) std.io.Writer.Error!void {
|
||||
fn default(f: Format, w: *Writer) Writer.Error!void {
|
||||
const atom = f.atom;
|
||||
const elf_file = f.elf_file;
|
||||
try w.print("atom({d}) : {s} : @{x} : shdr({d}) : align({x}) : size({x}) : prev({f}) : next({f})", .{
|
||||
@ -1068,16 +1058,13 @@ const x86_64 = struct {
|
||||
args: ResolveArgs,
|
||||
it: *RelocsIterator,
|
||||
code: []u8,
|
||||
stream: anytype,
|
||||
) (error{ InvalidInstruction, CannotEncode } || RelocError)!void {
|
||||
) !void {
|
||||
dev.check(.x86_64_backend);
|
||||
const t = &elf_file.base.comp.root_mod.resolved_target.result;
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
|
||||
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
|
||||
|
||||
const cwriter = stream.writer();
|
||||
|
||||
const P, const A, const S, const GOT, const G, const TP, const DTP = args;
|
||||
|
||||
switch (r_type) {
|
||||
@ -1089,58 +1076,60 @@ const x86_64 = struct {
|
||||
rel,
|
||||
dynAbsRelocAction(target, elf_file),
|
||||
elf_file,
|
||||
cwriter,
|
||||
code,
|
||||
r_offset,
|
||||
);
|
||||
},
|
||||
|
||||
.PLT32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little),
|
||||
.PC32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little),
|
||||
.PLT32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S + A - P)), .little),
|
||||
.PC32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S + A - P)), .little),
|
||||
|
||||
.GOTPCREL => try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little),
|
||||
.GOTPC32 => try cwriter.writeInt(i32, @as(i32, @intCast(GOT + A - P)), .little),
|
||||
.GOTPC64 => try cwriter.writeInt(i64, GOT + A - P, .little),
|
||||
.GOTPCREL => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(G + GOT + A - P)), .little),
|
||||
.GOTPC32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(GOT + A - P)), .little),
|
||||
.GOTPC64 => mem.writeInt(i64, code[r_offset..][0..8], GOT + A - P, .little),
|
||||
|
||||
.GOTPCRELX => {
|
||||
if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
|
||||
x86_64.relaxGotpcrelx(code[r_offset - 2 ..], t) catch break :blk;
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
|
||||
mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S + A - P)), .little);
|
||||
return;
|
||||
}
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
|
||||
mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(G + GOT + A - P)), .little);
|
||||
},
|
||||
|
||||
.REX_GOTPCRELX => {
|
||||
if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
|
||||
x86_64.relaxRexGotpcrelx(code[r_offset - 3 ..], t) catch break :blk;
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
|
||||
mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S + A - P)), .little);
|
||||
return;
|
||||
}
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
|
||||
mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(G + GOT + A - P)), .little);
|
||||
},
|
||||
|
||||
.@"32" => try cwriter.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
|
||||
.@"32S" => try cwriter.writeInt(i32, @as(i32, @truncate(S + A)), .little),
|
||||
.@"32" => mem.writeInt(u32, code[r_offset..][0..4], @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
|
||||
.@"32S" => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A)), .little),
|
||||
|
||||
.TPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - TP)), .little),
|
||||
.TPOFF64 => try cwriter.writeInt(i64, S + A - TP, .little),
|
||||
.TPOFF32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A - TP)), .little),
|
||||
.TPOFF64 => mem.writeInt(i64, code[r_offset..][0..8], S + A - TP, .little),
|
||||
|
||||
.DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - DTP)), .little),
|
||||
.DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
|
||||
.DTPOFF32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A - DTP)), .little),
|
||||
.DTPOFF64 => mem.writeInt(i64, code[r_offset..][0..8], S + A - DTP, .little),
|
||||
|
||||
.TLSGD => {
|
||||
if (target.flags.has_tlsgd) {
|
||||
const S_ = target.tlsGdAddress(elf_file);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S_ + A - P)), .little);
|
||||
} else if (target.flags.has_gottp) {
|
||||
const S_ = target.gotTpAddress(elf_file);
|
||||
try x86_64.relaxTlsGdToIe(atom, &.{ rel, it.next().? }, @intCast(S_ - P), elf_file, stream);
|
||||
try x86_64.relaxTlsGdToIe(atom, &.{ rel, it.next().? }, @intCast(S_ - P), elf_file, code, r_offset);
|
||||
} else {
|
||||
try x86_64.relaxTlsGdToLe(
|
||||
atom,
|
||||
&.{ rel, it.next().? },
|
||||
@as(i32, @intCast(S - TP)),
|
||||
elf_file,
|
||||
stream,
|
||||
code,
|
||||
r_offset,
|
||||
);
|
||||
}
|
||||
},
|
||||
@ -1149,14 +1138,15 @@ const x86_64 = struct {
|
||||
if (elf_file.got.tlsld_index) |entry_index| {
|
||||
const tlsld_entry = elf_file.got.entries.items[entry_index];
|
||||
const S_ = tlsld_entry.address(elf_file);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S_ + A - P)), .little);
|
||||
} else {
|
||||
try x86_64.relaxTlsLdToLe(
|
||||
atom,
|
||||
&.{ rel, it.next().? },
|
||||
@as(i32, @intCast(TP - elf_file.tlsAddress())),
|
||||
elf_file,
|
||||
stream,
|
||||
code,
|
||||
r_offset,
|
||||
);
|
||||
}
|
||||
},
|
||||
@ -1164,7 +1154,7 @@ const x86_64 = struct {
|
||||
.GOTPC32_TLSDESC => {
|
||||
if (target.flags.has_tlsdesc) {
|
||||
const S_ = target.tlsDescAddress(elf_file);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S_ + A - P)), .little);
|
||||
} else {
|
||||
x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..], t) catch {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
@ -1176,26 +1166,26 @@ const x86_64 = struct {
|
||||
});
|
||||
return error.RelaxFailure;
|
||||
};
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
|
||||
mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S - TP)), .little);
|
||||
}
|
||||
},
|
||||
|
||||
.TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
|
||||
// call -> nop
|
||||
try cwriter.writeAll(&.{ 0x66, 0x90 });
|
||||
code[r_offset..][0..2].* = .{ 0x66, 0x90 };
|
||||
},
|
||||
|
||||
.GOTTPOFF => {
|
||||
if (target.flags.has_gottp) {
|
||||
const S_ = target.gotTpAddress(elf_file);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S_ + A - P)), .little);
|
||||
} else {
|
||||
x86_64.relaxGotTpOff(code[r_offset - 3 ..], t);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
|
||||
mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S - TP)), .little);
|
||||
}
|
||||
},
|
||||
|
||||
.GOT32 => try cwriter.writeInt(i32, @as(i32, @intCast(G + A)), .little),
|
||||
.GOT32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(G + A)), .little),
|
||||
|
||||
else => try atom.reportUnhandledRelocError(rel, elf_file),
|
||||
}
|
||||
@ -1207,45 +1197,42 @@ const x86_64 = struct {
|
||||
rel: elf.Elf64_Rela,
|
||||
target: *const Symbol,
|
||||
args: ResolveArgs,
|
||||
it: *RelocsIterator,
|
||||
code: []u8,
|
||||
stream: anytype,
|
||||
) !void {
|
||||
dev.check(.x86_64_backend);
|
||||
_ = code;
|
||||
_ = it;
|
||||
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
|
||||
const cwriter = stream.writer();
|
||||
|
||||
_, const A, const S, const GOT, _, _, const DTP = args;
|
||||
|
||||
var writer: Writer = .fixed(code);
|
||||
|
||||
switch (r_type) {
|
||||
.NONE => unreachable,
|
||||
.@"8" => try cwriter.writeInt(u8, @as(u8, @bitCast(@as(i8, @intCast(S + A)))), .little),
|
||||
.@"16" => try cwriter.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
|
||||
.@"32" => try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
|
||||
.@"32S" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.@"8" => try writer.writeInt(u8, @as(u8, @bitCast(@as(i8, @intCast(S + A)))), .little),
|
||||
.@"16" => try writer.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
|
||||
.@"32" => try writer.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
|
||||
.@"32S" => try writer.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
try writer.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i64, S + A, .little),
|
||||
try writer.writeInt(i64, S + A, .little),
|
||||
.DTPOFF32 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
try writer.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
|
||||
try writer.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
|
||||
.DTPOFF64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
try writer.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i64, S + A - DTP, .little),
|
||||
.GOTOFF64 => try cwriter.writeInt(i64, S + A - GOT, .little),
|
||||
.GOTPC64 => try cwriter.writeInt(i64, GOT + A, .little),
|
||||
try writer.writeInt(i64, S + A - DTP, .little),
|
||||
.GOTOFF64 => try writer.writeInt(i64, S + A - GOT, .little),
|
||||
.GOTPC64 => try writer.writeInt(i64, GOT + A, .little),
|
||||
.SIZE32 => {
|
||||
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
|
||||
try cwriter.writeInt(u32, @bitCast(@as(i32, @intCast(size + A))), .little);
|
||||
try writer.writeInt(u32, @bitCast(@as(i32, @intCast(size + A))), .little);
|
||||
},
|
||||
.SIZE64 => {
|
||||
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
|
||||
try cwriter.writeInt(i64, @intCast(size + A), .little);
|
||||
try writer.writeInt(i64, @intCast(size + A), .little);
|
||||
},
|
||||
else => try atom.reportUnhandledRelocError(rel, elf_file),
|
||||
}
|
||||
@ -1288,12 +1275,12 @@ const x86_64 = struct {
|
||||
rels: []const elf.Elf64_Rela,
|
||||
value: i32,
|
||||
elf_file: *Elf,
|
||||
stream: anytype,
|
||||
code: []u8,
|
||||
r_offset: usize,
|
||||
) !void {
|
||||
dev.check(.x86_64_backend);
|
||||
assert(rels.len == 2);
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
const writer = stream.writer();
|
||||
const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type());
|
||||
switch (rel) {
|
||||
.PC32,
|
||||
@ -1304,8 +1291,7 @@ const x86_64 = struct {
|
||||
0x48, 0x03, 0x05, 0, 0, 0, 0, // add foo@gottpoff(%rip), %rax
|
||||
};
|
||||
std.mem.writeInt(i32, insts[12..][0..4], value - 12, .little);
|
||||
try stream.seekBy(-4);
|
||||
try writer.writeAll(&insts);
|
||||
@memcpy(code[r_offset - 4 ..][0..insts.len], &insts);
|
||||
},
|
||||
|
||||
else => {
|
||||
@ -1329,12 +1315,12 @@ const x86_64 = struct {
|
||||
rels: []const elf.Elf64_Rela,
|
||||
value: i32,
|
||||
elf_file: *Elf,
|
||||
stream: anytype,
|
||||
code: []u8,
|
||||
r_offset: usize,
|
||||
) !void {
|
||||
dev.check(.x86_64_backend);
|
||||
assert(rels.len == 2);
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
const writer = stream.writer();
|
||||
const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type());
|
||||
switch (rel) {
|
||||
.PC32,
|
||||
@ -1346,8 +1332,7 @@ const x86_64 = struct {
|
||||
0x48, 0x2d, 0, 0, 0, 0, // sub $tls_size, %rax
|
||||
};
|
||||
std.mem.writeInt(i32, insts[8..][0..4], value, .little);
|
||||
try stream.seekBy(-3);
|
||||
try writer.writeAll(&insts);
|
||||
@memcpy(code[r_offset - 3 ..][0..insts.len], &insts);
|
||||
},
|
||||
|
||||
.GOTPCREL,
|
||||
@ -1360,8 +1345,7 @@ const x86_64 = struct {
|
||||
0x90, // nop
|
||||
};
|
||||
std.mem.writeInt(i32, insts[8..][0..4], value, .little);
|
||||
try stream.seekBy(-3);
|
||||
try writer.writeAll(&insts);
|
||||
@memcpy(code[r_offset - 3 ..][0..insts.len], &insts);
|
||||
},
|
||||
|
||||
else => {
|
||||
@ -1390,7 +1374,7 @@ const x86_64 = struct {
|
||||
// TODO: hack to force imm32s in the assembler
|
||||
.{ .imm = .s(-129) },
|
||||
}, t) catch return false;
|
||||
var trash: std.io.Writer.Discarding = .init(&.{});
|
||||
var trash: Writer.Discarding = .init(&.{});
|
||||
inst.encode(&trash.writer, .{}) catch return false;
|
||||
return true;
|
||||
},
|
||||
@ -1437,12 +1421,12 @@ const x86_64 = struct {
|
||||
rels: []const elf.Elf64_Rela,
|
||||
value: i32,
|
||||
elf_file: *Elf,
|
||||
stream: anytype,
|
||||
code: []u8,
|
||||
r_offset: usize,
|
||||
) !void {
|
||||
dev.check(.x86_64_backend);
|
||||
assert(rels.len == 2);
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
const writer = stream.writer();
|
||||
const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type());
|
||||
switch (rel) {
|
||||
.PC32,
|
||||
@ -1455,8 +1439,7 @@ const x86_64 = struct {
|
||||
0x48, 0x81, 0xc0, 0, 0, 0, 0, // add $tp_offset, %rax
|
||||
};
|
||||
std.mem.writeInt(i32, insts[12..][0..4], value, .little);
|
||||
try stream.seekBy(-4);
|
||||
try writer.writeAll(&insts);
|
||||
@memcpy(code[r_offset - 4 ..][0..insts.len], &insts);
|
||||
relocs_log.debug(" relaxing {f} and {f}", .{
|
||||
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
|
||||
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
|
||||
@ -1486,8 +1469,8 @@ const x86_64 = struct {
|
||||
}
|
||||
|
||||
fn encode(insts: []const Instruction, code: []u8) !void {
|
||||
var stream: std.io.Writer = .fixed(code);
|
||||
for (insts) |inst| try inst.encode(&stream, .{});
|
||||
var writer: Writer = .fixed(code);
|
||||
for (insts) |inst| try inst.encode(&writer, .{});
|
||||
}
|
||||
|
||||
const bits = @import("../../arch/x86_64/bits.zig");
|
||||
@ -1592,14 +1575,12 @@ const aarch64 = struct {
|
||||
args: ResolveArgs,
|
||||
it: *RelocsIterator,
|
||||
code_buffer: []u8,
|
||||
stream: anytype,
|
||||
) (error{ UnexpectedRemainder, DivisionByZero } || RelocError)!void {
|
||||
_ = it;
|
||||
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
|
||||
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
|
||||
const cwriter = stream.writer();
|
||||
const code = code_buffer[r_offset..][0..4];
|
||||
const file_ptr = atom.file(elf_file).?;
|
||||
|
||||
@ -1614,7 +1595,8 @@ const aarch64 = struct {
|
||||
rel,
|
||||
dynAbsRelocAction(target, elf_file),
|
||||
elf_file,
|
||||
cwriter,
|
||||
code_buffer,
|
||||
r_offset,
|
||||
);
|
||||
},
|
||||
|
||||
@ -1782,25 +1764,20 @@ const aarch64 = struct {
|
||||
rel: elf.Elf64_Rela,
|
||||
target: *const Symbol,
|
||||
args: ResolveArgs,
|
||||
it: *RelocsIterator,
|
||||
code: []u8,
|
||||
stream: anytype,
|
||||
) !void {
|
||||
_ = it;
|
||||
_ = code;
|
||||
|
||||
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
|
||||
const cwriter = stream.writer();
|
||||
|
||||
_, const A, const S, _, _, _, _ = args;
|
||||
|
||||
var writer: Writer = .fixed(code);
|
||||
switch (r_type) {
|
||||
.NONE => unreachable,
|
||||
.ABS32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.ABS32 => try writer.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.ABS64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
try writer.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i64, S + A, .little),
|
||||
try writer.writeInt(i64, S + A, .little),
|
||||
else => try atom.reportUnhandledRelocError(rel, elf_file),
|
||||
}
|
||||
}
|
||||
@ -1861,12 +1838,10 @@ const riscv = struct {
|
||||
args: ResolveArgs,
|
||||
it: *RelocsIterator,
|
||||
code: []u8,
|
||||
stream: anytype,
|
||||
) !void {
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
|
||||
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
|
||||
const cwriter = stream.writer();
|
||||
|
||||
const P, const A, const S, const GOT, const G, const TP, const DTP = args;
|
||||
_ = TP;
|
||||
@ -1875,7 +1850,7 @@ const riscv = struct {
|
||||
switch (r_type) {
|
||||
.NONE => unreachable,
|
||||
|
||||
.@"32" => try cwriter.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
|
||||
.@"32" => mem.writeInt(u32, code[r_offset..][0..4], @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
|
||||
|
||||
.@"64" => {
|
||||
try atom.resolveDynAbsReloc(
|
||||
@ -1883,7 +1858,8 @@ const riscv = struct {
|
||||
rel,
|
||||
dynAbsRelocAction(target, elf_file),
|
||||
elf_file,
|
||||
cwriter,
|
||||
code,
|
||||
r_offset,
|
||||
);
|
||||
},
|
||||
|
||||
@ -1997,15 +1973,9 @@ const riscv = struct {
|
||||
rel: elf.Elf64_Rela,
|
||||
target: *const Symbol,
|
||||
args: ResolveArgs,
|
||||
it: *RelocsIterator,
|
||||
code: []u8,
|
||||
stream: anytype,
|
||||
) !void {
|
||||
_ = it;
|
||||
|
||||
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
|
||||
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
|
||||
const cwriter = stream.writer();
|
||||
|
||||
_, const A, const S, const GOT, _, _, const DTP = args;
|
||||
_ = GOT;
|
||||
@ -2014,30 +1984,29 @@ const riscv = struct {
|
||||
switch (r_type) {
|
||||
.NONE => unreachable,
|
||||
|
||||
.@"32" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.@"32" => mem.writeInt(i32, code[0..4], @intCast(S + A), .little),
|
||||
.@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
mem.writeInt(u64, code[0..8], value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i64, S + A, .little),
|
||||
mem.writeInt(i64, code[0..8], S + A, .little),
|
||||
.ADD8 => riscv_util.writeAddend(i8, .add, code[0..1], S + A),
|
||||
.SUB8 => riscv_util.writeAddend(i8, .sub, code[0..1], S + A),
|
||||
.ADD16 => riscv_util.writeAddend(i16, .add, code[0..2], S + A),
|
||||
.SUB16 => riscv_util.writeAddend(i16, .sub, code[0..2], S + A),
|
||||
.ADD32 => riscv_util.writeAddend(i32, .add, code[0..4], S + A),
|
||||
.SUB32 => riscv_util.writeAddend(i32, .sub, code[0..4], S + A),
|
||||
.ADD64 => riscv_util.writeAddend(i64, .add, code[0..8], S + A),
|
||||
.SUB64 => riscv_util.writeAddend(i64, .sub, code[0..8], S + A),
|
||||
|
||||
.ADD8 => riscv_util.writeAddend(i8, .add, code[r_offset..][0..1], S + A),
|
||||
.SUB8 => riscv_util.writeAddend(i8, .sub, code[r_offset..][0..1], S + A),
|
||||
.ADD16 => riscv_util.writeAddend(i16, .add, code[r_offset..][0..2], S + A),
|
||||
.SUB16 => riscv_util.writeAddend(i16, .sub, code[r_offset..][0..2], S + A),
|
||||
.ADD32 => riscv_util.writeAddend(i32, .add, code[r_offset..][0..4], S + A),
|
||||
.SUB32 => riscv_util.writeAddend(i32, .sub, code[r_offset..][0..4], S + A),
|
||||
.ADD64 => riscv_util.writeAddend(i64, .add, code[r_offset..][0..8], S + A),
|
||||
.SUB64 => riscv_util.writeAddend(i64, .sub, code[r_offset..][0..8], S + A),
|
||||
.SET8 => mem.writeInt(i8, code[0..1], @as(i8, @truncate(S + A)), .little),
|
||||
.SET16 => mem.writeInt(i16, code[0..2], @as(i16, @truncate(S + A)), .little),
|
||||
.SET32 => mem.writeInt(i32, code[0..4], @as(i32, @truncate(S + A)), .little),
|
||||
|
||||
.SET8 => mem.writeInt(i8, code[r_offset..][0..1], @as(i8, @truncate(S + A)), .little),
|
||||
.SET16 => mem.writeInt(i16, code[r_offset..][0..2], @as(i16, @truncate(S + A)), .little),
|
||||
.SET32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A)), .little),
|
||||
.SET6 => riscv_util.writeSetSub6(.set, code[0..1], S + A),
|
||||
.SUB6 => riscv_util.writeSetSub6(.sub, code[0..1], S + A),
|
||||
|
||||
.SET6 => riscv_util.writeSetSub6(.set, code[r_offset..][0..1], S + A),
|
||||
.SUB6 => riscv_util.writeSetSub6(.sub, code[r_offset..][0..1], S + A),
|
||||
|
||||
.SET_ULEB128 => try riscv_util.writeSetSubUleb(.set, stream, S + A),
|
||||
.SUB_ULEB128 => try riscv_util.writeSetSubUleb(.sub, stream, S - A),
|
||||
.SET_ULEB128 => riscv_util.writeSetUleb(code, S + A),
|
||||
.SUB_ULEB128 => riscv_util.writeSubUleb(code, S - A),
|
||||
|
||||
else => try atom.reportUnhandledRelocError(rel, elf_file),
|
||||
}
|
||||
@ -2108,14 +2077,16 @@ pub const Extra = struct {
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const elf = std.elf;
|
||||
const eh_frame = @import("eh_frame.zig");
|
||||
const log = std.log.scoped(.link);
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const relocs_log = std.log.scoped(.link_relocs);
|
||||
const Allocator = mem.Allocator;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
const eh_frame = @import("eh_frame.zig");
|
||||
const relocation = @import("relocation.zig");
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const Atom = @This();
|
||||
const Elf = @import("../Elf.zig");
|
||||
const Fde = eh_frame.Fde;
|
||||
|
||||
@ -89,7 +89,7 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
|
||||
list.dirty = false;
|
||||
}
|
||||
|
||||
pub fn write(list: AtomList, buffer: *std.array_list.Managed(u8), undefs: anytype, elf_file: *Elf) !void {
|
||||
pub fn write(list: AtomList, buffer: *std.Io.Writer.Allocating, undefs: anytype, elf_file: *Elf) !void {
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
const osec = elf_file.sections.items(.shdr)[list.output_section_index];
|
||||
assert(osec.sh_type != elf.SHT_NOBITS);
|
||||
@ -98,8 +98,7 @@ pub fn write(list: AtomList, buffer: *std.array_list.Managed(u8), undefs: anytyp
|
||||
log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)});
|
||||
|
||||
const list_size = math.cast(usize, list.size) orelse return error.Overflow;
|
||||
try buffer.ensureUnusedCapacity(list_size);
|
||||
buffer.appendNTimesAssumeCapacity(0, list_size);
|
||||
try buffer.writer.splatByteAll(0, list_size);
|
||||
|
||||
for (list.atoms.keys()) |ref| {
|
||||
const atom_ptr = elf_file.atom(ref).?;
|
||||
@ -113,7 +112,7 @@ pub fn write(list: AtomList, buffer: *std.array_list.Managed(u8), undefs: anytyp
|
||||
const object = atom_ptr.file(elf_file).?.object;
|
||||
const code = try object.codeDecompressAlloc(elf_file, ref.index);
|
||||
defer gpa.free(code);
|
||||
const out_code = buffer.items[off..][0..size];
|
||||
const out_code = buffer.written()[off..][0..size];
|
||||
@memcpy(out_code, code);
|
||||
|
||||
if (osec.sh_flags & elf.SHF_ALLOC == 0)
|
||||
@ -122,7 +121,7 @@ pub fn write(list: AtomList, buffer: *std.array_list.Managed(u8), undefs: anytyp
|
||||
try atom_ptr.resolveRelocsAlloc(elf_file, out_code);
|
||||
}
|
||||
|
||||
try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file));
|
||||
try elf_file.base.file.?.pwriteAll(buffer.written(), list.offset(elf_file));
|
||||
buffer.clearRetainingCapacity();
|
||||
}
|
||||
|
||||
|
||||
@ -952,7 +952,7 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
|
||||
const is_tls = sym.type(elf_file) == elf.STT_TLS;
|
||||
const name = if (is_tls) ".tls_common" else ".common";
|
||||
const name_offset = @as(u32, @intCast(self.strtab.items.len));
|
||||
try self.strtab.writer(gpa).print("{s}\x00", .{name});
|
||||
try self.strtab.print(gpa, "{s}\x00", .{name});
|
||||
|
||||
var sh_flags: u32 = elf.SHF_ALLOC | elf.SHF_WRITE;
|
||||
if (is_tls) sh_flags |= elf.SHF_TLS;
|
||||
|
||||
@ -162,22 +162,6 @@ fn prune(elf_file: *Elf) void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dumpPrunedAtoms(elf_file: *Elf) !void {
|
||||
const stderr = std.fs.File.stderr().deprecatedWriter();
|
||||
for (elf_file.objects.items) |index| {
|
||||
const file = elf_file.file(index).?;
|
||||
for (file.atoms()) |atom_index| {
|
||||
const atom = file.atom(atom_index) orelse continue;
|
||||
if (!atom.alive)
|
||||
// TODO should we simply print to stderr?
|
||||
try stderr.print("link: removing unused section '{s}' in file '{f}'\n", .{
|
||||
atom.name(elf_file),
|
||||
atom.file(elf_file).?.fmtPath(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const Level = struct {
|
||||
value: usize = 0,
|
||||
|
||||
|
||||
@ -100,32 +100,33 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
|
||||
state_log.debug("ar_strtab\n{f}\n", .{ar_strtab});
|
||||
}
|
||||
|
||||
var buffer = std.array_list.Managed(u8).init(gpa);
|
||||
defer buffer.deinit();
|
||||
try buffer.ensureTotalCapacityPrecise(total_size);
|
||||
const buffer = try gpa.alloc(u8, total_size);
|
||||
defer gpa.free(buffer);
|
||||
|
||||
var writer: std.Io.Writer = .fixed(buffer);
|
||||
|
||||
// Write magic
|
||||
try buffer.writer().writeAll(elf.ARMAG);
|
||||
try writer.writeAll(elf.ARMAG);
|
||||
|
||||
// Write symtab
|
||||
try ar_symtab.write(.p64, elf_file, buffer.writer());
|
||||
try ar_symtab.write(.p64, elf_file, &writer);
|
||||
|
||||
// Write strtab
|
||||
if (ar_strtab.size() > 0) {
|
||||
if (!mem.isAligned(buffer.items.len, 2)) try buffer.writer().writeByte(0);
|
||||
try ar_strtab.write(buffer.writer());
|
||||
if (!mem.isAligned(writer.end, 2)) try writer.writeByte(0);
|
||||
try ar_strtab.write(&writer);
|
||||
}
|
||||
|
||||
// Write object files
|
||||
for (files.items) |index| {
|
||||
if (!mem.isAligned(buffer.items.len, 2)) try buffer.writer().writeByte(0);
|
||||
try elf_file.file(index).?.writeAr(elf_file, buffer.writer());
|
||||
if (!mem.isAligned(writer.end, 2)) try writer.writeByte(0);
|
||||
try elf_file.file(index).?.writeAr(elf_file, &writer);
|
||||
}
|
||||
|
||||
assert(buffer.items.len == total_size);
|
||||
assert(writer.buffered().len == total_size);
|
||||
|
||||
try elf_file.base.file.?.setEndPos(total_size);
|
||||
try elf_file.base.file.?.pwriteAll(buffer.items, 0);
|
||||
try elf_file.base.file.?.pwriteAll(writer.buffered(), 0);
|
||||
|
||||
if (diags.hasErrors()) return error.LinkFailure;
|
||||
}
|
||||
@ -407,15 +408,16 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
|
||||
};
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
|
||||
defer buffer.deinit();
|
||||
try eh_frame.writeEhFrameRelocatable(elf_file, buffer.writer());
|
||||
const buffer = try gpa.alloc(u8, @intCast(sh_size - existing_size));
|
||||
defer gpa.free(buffer);
|
||||
var writer: std.Io.Writer = .fixed(buffer);
|
||||
try eh_frame.writeEhFrameRelocatable(elf_file, &writer);
|
||||
log.debug("writing .eh_frame from 0x{x} to 0x{x}", .{
|
||||
shdr.sh_offset + existing_size,
|
||||
shdr.sh_offset + sh_size,
|
||||
});
|
||||
assert(buffer.items.len == sh_size - existing_size);
|
||||
try elf_file.base.file.?.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
|
||||
assert(writer.buffered().len == sh_size - existing_size);
|
||||
try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset + existing_size);
|
||||
}
|
||||
if (elf_file.section_indexes.eh_frame_rela) |shndx| {
|
||||
const shdr = slice.items(.shdr)[shndx];
|
||||
@ -446,15 +448,16 @@ fn writeGroups(elf_file: *Elf) !void {
|
||||
for (elf_file.group_sections.items) |cgs| {
|
||||
const shdr = elf_file.sections.items(.shdr)[cgs.shndx];
|
||||
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, sh_size);
|
||||
defer buffer.deinit();
|
||||
try cgs.write(elf_file, buffer.writer());
|
||||
assert(buffer.items.len == sh_size);
|
||||
const buffer = try gpa.alloc(u8, sh_size);
|
||||
defer gpa.free(buffer);
|
||||
var writer: std.Io.Writer = .fixed(buffer);
|
||||
try cgs.write(elf_file, &writer);
|
||||
assert(writer.buffered().len == sh_size);
|
||||
log.debug("writing group from 0x{x} to 0x{x}", .{
|
||||
shdr.sh_offset,
|
||||
shdr.sh_offset + shdr.sh_size,
|
||||
});
|
||||
try elf_file.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -94,134 +94,134 @@ pub const DynamicSection = struct {
|
||||
return nentries * @sizeOf(elf.Elf64_Dyn);
|
||||
}
|
||||
|
||||
pub fn write(dt: DynamicSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(dt: DynamicSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
|
||||
const shdrs = elf_file.sections.items(.shdr);
|
||||
|
||||
// NEEDED
|
||||
for (dt.needed.items) |off| {
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_NEEDED, .d_val = off });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_NEEDED, .d_val = off }), .little);
|
||||
}
|
||||
|
||||
if (dt.soname) |off| {
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SONAME, .d_val = off });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_SONAME, .d_val = off }), .little);
|
||||
}
|
||||
|
||||
// RUNPATH
|
||||
// TODO add option in Options to revert to old RPATH tag
|
||||
if (dt.rpath > 0) {
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RUNPATH, .d_val = dt.rpath });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_RUNPATH, .d_val = dt.rpath }), .little);
|
||||
}
|
||||
|
||||
// INIT
|
||||
if (elf_file.sectionByName(".init")) |shndx| {
|
||||
const addr = shdrs[shndx].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT, .d_val = addr });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_INIT, .d_val = addr }), .little);
|
||||
}
|
||||
|
||||
// FINI
|
||||
if (elf_file.sectionByName(".fini")) |shndx| {
|
||||
const addr = shdrs[shndx].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI, .d_val = addr });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_FINI, .d_val = addr }), .little);
|
||||
}
|
||||
|
||||
// INIT_ARRAY
|
||||
if (elf_file.sectionByName(".init_array")) |shndx| {
|
||||
const shdr = shdrs[shndx];
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT_ARRAY, .d_val = shdr.sh_addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT_ARRAYSZ, .d_val = shdr.sh_size });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_INIT_ARRAY, .d_val = shdr.sh_addr }), .little);
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_INIT_ARRAYSZ, .d_val = shdr.sh_size }), .little);
|
||||
}
|
||||
|
||||
// FINI_ARRAY
|
||||
if (elf_file.sectionByName(".fini_array")) |shndx| {
|
||||
const shdr = shdrs[shndx];
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI_ARRAY, .d_val = shdr.sh_addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI_ARRAYSZ, .d_val = shdr.sh_size });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_FINI_ARRAY, .d_val = shdr.sh_addr }), .little);
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_FINI_ARRAYSZ, .d_val = shdr.sh_size }), .little);
|
||||
}
|
||||
|
||||
// RELA
|
||||
if (elf_file.section_indexes.rela_dyn) |shndx| {
|
||||
const shdr = shdrs[shndx];
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELA, .d_val = shdr.sh_addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELASZ, .d_val = shdr.sh_size });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELAENT, .d_val = shdr.sh_entsize });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_RELA, .d_val = shdr.sh_addr }), .little);
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_RELASZ, .d_val = shdr.sh_size }), .little);
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_RELAENT, .d_val = shdr.sh_entsize }), .little);
|
||||
}
|
||||
|
||||
// JMPREL
|
||||
if (elf_file.section_indexes.rela_plt) |shndx| {
|
||||
const shdr = shdrs[shndx];
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_JMPREL, .d_val = shdr.sh_addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTRELSZ, .d_val = shdr.sh_size });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTREL, .d_val = elf.DT_RELA });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_JMPREL, .d_val = shdr.sh_addr }), .little);
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_PLTRELSZ, .d_val = shdr.sh_size }), .little);
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_PLTREL, .d_val = elf.DT_RELA }), .little);
|
||||
}
|
||||
|
||||
// PLTGOT
|
||||
if (elf_file.section_indexes.got_plt) |shndx| {
|
||||
const addr = shdrs[shndx].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTGOT, .d_val = addr });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_PLTGOT, .d_val = addr }), .little);
|
||||
}
|
||||
|
||||
{
|
||||
assert(elf_file.section_indexes.hash != null);
|
||||
const addr = shdrs[elf_file.section_indexes.hash.?].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_HASH, .d_val = addr });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_HASH, .d_val = addr }), .little);
|
||||
}
|
||||
|
||||
if (elf_file.section_indexes.gnu_hash) |shndx| {
|
||||
const addr = shdrs[shndx].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_GNU_HASH, .d_val = addr });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_GNU_HASH, .d_val = addr }), .little);
|
||||
}
|
||||
|
||||
// TEXTREL
|
||||
if (elf_file.has_text_reloc) {
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_TEXTREL, .d_val = 0 });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_TEXTREL, .d_val = 0 }), .little);
|
||||
}
|
||||
|
||||
// SYMTAB + SYMENT
|
||||
{
|
||||
assert(elf_file.section_indexes.dynsymtab != null);
|
||||
const shdr = shdrs[elf_file.section_indexes.dynsymtab.?];
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SYMTAB, .d_val = shdr.sh_addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SYMENT, .d_val = shdr.sh_entsize });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_SYMTAB, .d_val = shdr.sh_addr }), .little);
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_SYMENT, .d_val = shdr.sh_entsize }), .little);
|
||||
}
|
||||
|
||||
// STRTAB + STRSZ
|
||||
{
|
||||
assert(elf_file.section_indexes.dynstrtab != null);
|
||||
const shdr = shdrs[elf_file.section_indexes.dynstrtab.?];
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_STRTAB, .d_val = shdr.sh_addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_STRSZ, .d_val = shdr.sh_size });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_STRTAB, .d_val = shdr.sh_addr }), .little);
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_STRSZ, .d_val = shdr.sh_size }), .little);
|
||||
}
|
||||
|
||||
// VERSYM
|
||||
if (elf_file.section_indexes.versym) |shndx| {
|
||||
const addr = shdrs[shndx].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_VERSYM, .d_val = addr });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_VERSYM, .d_val = addr }), .little);
|
||||
}
|
||||
|
||||
// VERNEED + VERNEEDNUM
|
||||
if (elf_file.section_indexes.verneed) |shndx| {
|
||||
const addr = shdrs[shndx].sh_addr;
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_VERNEED, .d_val = addr });
|
||||
try writer.writeStruct(elf.Elf64_Dyn{
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_VERNEED, .d_val = addr }), .little);
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{
|
||||
.d_tag = elf.DT_VERNEEDNUM,
|
||||
.d_val = elf_file.verneed.verneed.items.len,
|
||||
});
|
||||
}), .little);
|
||||
}
|
||||
|
||||
// FLAGS
|
||||
if (dt.getFlags(elf_file)) |flags| {
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FLAGS, .d_val = flags });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_FLAGS, .d_val = flags }), .little);
|
||||
}
|
||||
// FLAGS_1
|
||||
if (dt.getFlags1(elf_file)) |flags_1| {
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FLAGS_1, .d_val = flags_1 });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_FLAGS_1, .d_val = flags_1 }), .little);
|
||||
}
|
||||
|
||||
// DEBUG
|
||||
if (!elf_file.isEffectivelyDynLib()) try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_DEBUG, .d_val = 0 });
|
||||
if (!elf_file.isEffectivelyDynLib()) try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_DEBUG, .d_val = 0 }), .little);
|
||||
|
||||
// NULL
|
||||
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_NULL, .d_val = 0 });
|
||||
try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_NULL, .d_val = 0 }), .little);
|
||||
}
|
||||
};
|
||||
|
||||
@ -360,7 +360,7 @@ pub const GotSection = struct {
|
||||
return s;
|
||||
}
|
||||
|
||||
pub fn write(got: GotSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(got: GotSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
|
||||
const comp = elf_file.base.comp;
|
||||
const is_dyn_lib = elf_file.isEffectivelyDynLib();
|
||||
const apply_relocs = true; // TODO add user option for this
|
||||
@ -666,7 +666,7 @@ pub const PltSection = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(plt: PltSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
|
||||
const cpu_arch = elf_file.getTarget().cpu.arch;
|
||||
switch (cpu_arch) {
|
||||
.x86_64 => try x86_64.write(plt, elf_file, writer),
|
||||
@ -763,7 +763,7 @@ pub const PltSection = struct {
|
||||
}
|
||||
|
||||
const x86_64 = struct {
|
||||
fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
|
||||
fn write(plt: PltSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
|
||||
const shdrs = elf_file.sections.items(.shdr);
|
||||
const plt_addr = shdrs[elf_file.section_indexes.plt.?].sh_addr;
|
||||
const got_plt_addr = shdrs[elf_file.section_indexes.got_plt.?].sh_addr;
|
||||
@ -778,7 +778,7 @@ pub const PltSection = struct {
|
||||
disp = @as(i64, @intCast(got_plt_addr + 16)) - @as(i64, @intCast(plt_addr + 14)) - 4;
|
||||
mem.writeInt(i32, preamble[14..][0..4], @as(i32, @intCast(disp)), .little);
|
||||
try writer.writeAll(&preamble);
|
||||
try writer.writeByteNTimes(0xcc, preambleSize(.x86_64) - preamble.len);
|
||||
try writer.splatByteAll(0xcc, preambleSize(.x86_64) - preamble.len);
|
||||
|
||||
for (plt.symbols.items, 0..) |ref, i| {
|
||||
const sym = elf_file.symbol(ref).?;
|
||||
@ -798,7 +798,7 @@ pub const PltSection = struct {
|
||||
};
|
||||
|
||||
const aarch64 = struct {
|
||||
fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
|
||||
fn write(plt: PltSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
|
||||
{
|
||||
const shdrs = elf_file.sections.items(.shdr);
|
||||
const plt_addr: i64 = @intCast(shdrs[elf_file.section_indexes.plt.?].sh_addr);
|
||||
@ -853,7 +853,7 @@ pub const GotPltSection = struct {
|
||||
return preamble_size + elf_file.plt.symbols.items.len * 8;
|
||||
}
|
||||
|
||||
pub fn write(got_plt: GotPltSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(got_plt: GotPltSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
|
||||
_ = got_plt;
|
||||
{
|
||||
// [0]: _DYNAMIC
|
||||
@ -904,7 +904,7 @@ pub const PltGotSection = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(plt_got: PltGotSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(plt_got: PltGotSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
|
||||
const cpu_arch = elf_file.getTarget().cpu.arch;
|
||||
switch (cpu_arch) {
|
||||
.x86_64 => try x86_64.write(plt_got, elf_file, writer),
|
||||
@ -940,7 +940,7 @@ pub const PltGotSection = struct {
|
||||
}
|
||||
|
||||
const x86_64 = struct {
|
||||
pub fn write(plt_got: PltGotSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(plt_got: PltGotSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
|
||||
for (plt_got.symbols.items) |ref| {
|
||||
const sym = elf_file.symbol(ref).?;
|
||||
const target_addr = sym.gotAddress(elf_file);
|
||||
@ -958,7 +958,7 @@ pub const PltGotSection = struct {
|
||||
};
|
||||
|
||||
const aarch64 = struct {
|
||||
fn write(plt_got: PltGotSection, elf_file: *Elf, writer: anytype) !void {
|
||||
fn write(plt_got: PltGotSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
|
||||
for (plt_got.symbols.items) |ref| {
|
||||
const sym = elf_file.symbol(ref).?;
|
||||
const target_addr = sym.gotAddress(elf_file);
|
||||
@ -1133,14 +1133,14 @@ pub const DynsymSection = struct {
|
||||
return @as(u32, @intCast(dynsym.entries.items.len + 1));
|
||||
}
|
||||
|
||||
pub fn write(dynsym: DynsymSection, elf_file: *Elf, writer: anytype) !void {
|
||||
try writer.writeStruct(Elf.null_sym);
|
||||
pub fn write(dynsym: DynsymSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
|
||||
try writer.writeStruct(Elf.null_sym, .little);
|
||||
for (dynsym.entries.items) |entry| {
|
||||
const sym = elf_file.symbol(entry.ref).?;
|
||||
var out_sym: elf.Elf64_Sym = Elf.null_sym;
|
||||
sym.setOutputSym(elf_file, &out_sym);
|
||||
out_sym.st_name = entry.off;
|
||||
try writer.writeStruct(out_sym);
|
||||
try writer.writeStruct(out_sym, .little);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -1175,10 +1175,12 @@ pub const HashSection = struct {
|
||||
}
|
||||
|
||||
try hs.buffer.ensureTotalCapacityPrecise(gpa, (2 + nsyms * 2) * 4);
|
||||
hs.buffer.writer(gpa).writeInt(u32, @as(u32, @intCast(nsyms)), .little) catch unreachable;
|
||||
hs.buffer.writer(gpa).writeInt(u32, @as(u32, @intCast(nsyms)), .little) catch unreachable;
|
||||
hs.buffer.writer(gpa).writeAll(mem.sliceAsBytes(buckets)) catch unreachable;
|
||||
hs.buffer.writer(gpa).writeAll(mem.sliceAsBytes(chains)) catch unreachable;
|
||||
var w: std.Io.Writer = .fixed(hs.buffer.unusedCapacitySlice());
|
||||
w.writeInt(u32, @as(u32, @intCast(nsyms)), .little) catch unreachable;
|
||||
w.writeInt(u32, @as(u32, @intCast(nsyms)), .little) catch unreachable;
|
||||
w.writeAll(@ptrCast(buckets)) catch unreachable;
|
||||
w.writeAll(@ptrCast(chains)) catch unreachable;
|
||||
hs.buffer.items.len += w.end;
|
||||
}
|
||||
|
||||
pub inline fn size(hs: HashSection) usize {
|
||||
@ -1439,7 +1441,7 @@ pub const VerneedSection = struct {
|
||||
return vern.verneed.items.len * @sizeOf(elf.Elf64_Verneed) + vern.vernaux.items.len * @sizeOf(elf.Vernaux);
|
||||
}
|
||||
|
||||
pub fn write(vern: VerneedSection, writer: anytype) !void {
|
||||
pub fn write(vern: VerneedSection, writer: *std.Io.Writer) !void {
|
||||
try writer.writeAll(mem.sliceAsBytes(vern.verneed.items));
|
||||
try writer.writeAll(mem.sliceAsBytes(vern.vernaux.items));
|
||||
}
|
||||
@ -1467,7 +1469,7 @@ pub const GroupSection = struct {
|
||||
return (members.len + 1) * @sizeOf(u32);
|
||||
}
|
||||
|
||||
pub fn write(cgs: GroupSection, elf_file: *Elf, writer: anytype) !void {
|
||||
pub fn write(cgs: GroupSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
|
||||
const cg = cgs.group(elf_file);
|
||||
const object = cg.file(elf_file).object;
|
||||
const members = cg.members(elf_file);
|
||||
@ -1495,7 +1497,7 @@ pub const GroupSection = struct {
|
||||
}
|
||||
};
|
||||
|
||||
fn writeInt(value: anytype, elf_file: *Elf, writer: anytype) !void {
|
||||
fn writeInt(value: anytype, elf_file: *Elf, writer: *std.Io.Writer) !void {
|
||||
const entry_size = elf_file.archPtrWidthBytes();
|
||||
const target = elf_file.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
|
||||
@ -589,7 +589,7 @@ pub fn flush(
|
||||
);
|
||||
|
||||
const ncmds, const sizeofcmds, const uuid_cmd_offset = self.writeLoadCommands() catch |err| switch (err) {
|
||||
error.NoSpaceLeft => unreachable,
|
||||
error.WriteFailed => unreachable,
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.LinkFailure => return error.LinkFailure,
|
||||
};
|
||||
@ -1074,7 +1074,7 @@ fn accessLibPath(
|
||||
|
||||
for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
|
||||
test_path.clearRetainingCapacity();
|
||||
try test_path.writer().print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext });
|
||||
try test_path.print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext });
|
||||
try checked_paths.append(try arena.dupe(u8, test_path.items));
|
||||
fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => continue,
|
||||
@ -1097,7 +1097,7 @@ fn accessFrameworkPath(
|
||||
|
||||
for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
|
||||
test_path.clearRetainingCapacity();
|
||||
try test_path.writer().print("{s}" ++ sep ++ "{s}.framework" ++ sep ++ "{s}{s}", .{
|
||||
try test_path.print("{s}" ++ sep ++ "{s}.framework" ++ sep ++ "{s}{s}", .{
|
||||
search_dir,
|
||||
name,
|
||||
name,
|
||||
@ -1178,9 +1178,9 @@ fn parseDependentDylibs(self: *MachO) !void {
|
||||
for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
|
||||
test_path.clearRetainingCapacity();
|
||||
if (self.base.comp.sysroot) |root| {
|
||||
try test_path.writer().print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext });
|
||||
try test_path.print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext });
|
||||
} else {
|
||||
try test_path.writer().print("{s}{s}", .{ path, ext });
|
||||
try test_path.print("{s}{s}", .{ path, ext });
|
||||
}
|
||||
try checked_paths.append(try arena.dupe(u8, test_path.items));
|
||||
fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
|
||||
@ -2528,8 +2528,8 @@ fn writeThunkWorker(self: *MachO, thunk: Thunk) void {
|
||||
fn doWork(th: Thunk, buffer: []u8, macho_file: *MachO) !void {
|
||||
const off = try macho_file.cast(usize, th.value);
|
||||
const size = th.size();
|
||||
var stream = std.io.fixedBufferStream(buffer[off..][0..size]);
|
||||
try th.write(macho_file, stream.writer());
|
||||
var stream: Writer = .fixed(buffer[off..][0..size]);
|
||||
try th.write(macho_file, &stream);
|
||||
}
|
||||
}.doWork;
|
||||
const out = self.sections.items(.out)[thunk.out_n_sect].items;
|
||||
@ -2556,15 +2556,15 @@ fn writeSyntheticSectionWorker(self: *MachO, sect_id: u8, out: []u8) void {
|
||||
|
||||
const doWork = struct {
|
||||
fn doWork(macho_file: *MachO, tag: Tag, buffer: []u8) !void {
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
var stream: Writer = .fixed(buffer);
|
||||
switch (tag) {
|
||||
.eh_frame => eh_frame.write(macho_file, buffer),
|
||||
.unwind_info => try macho_file.unwind_info.write(macho_file, buffer),
|
||||
.got => try macho_file.got.write(macho_file, stream.writer()),
|
||||
.stubs => try macho_file.stubs.write(macho_file, stream.writer()),
|
||||
.la_symbol_ptr => try macho_file.la_symbol_ptr.write(macho_file, stream.writer()),
|
||||
.tlv_ptr => try macho_file.tlv_ptr.write(macho_file, stream.writer()),
|
||||
.objc_stubs => try macho_file.objc_stubs.write(macho_file, stream.writer()),
|
||||
.got => try macho_file.got.write(macho_file, &stream),
|
||||
.stubs => try macho_file.stubs.write(macho_file, &stream),
|
||||
.la_symbol_ptr => try macho_file.la_symbol_ptr.write(macho_file, &stream),
|
||||
.tlv_ptr => try macho_file.tlv_ptr.write(macho_file, &stream),
|
||||
.objc_stubs => try macho_file.objc_stubs.write(macho_file, &stream),
|
||||
}
|
||||
}
|
||||
}.doWork;
|
||||
@ -2605,8 +2605,8 @@ fn updateLazyBindSizeWorker(self: *MachO) void {
|
||||
try macho_file.lazy_bind_section.updateSize(macho_file);
|
||||
const sect_id = macho_file.stubs_helper_sect_index.?;
|
||||
const out = &macho_file.sections.items(.out)[sect_id];
|
||||
var stream = std.io.fixedBufferStream(out.items);
|
||||
try macho_file.stubs_helper.write(macho_file, stream.writer());
|
||||
var stream: Writer = .fixed(out.items);
|
||||
try macho_file.stubs_helper.write(macho_file, &stream);
|
||||
}
|
||||
}.doWork;
|
||||
doWork(self) catch |err|
|
||||
@ -2669,18 +2669,17 @@ fn writeDyldInfo(self: *MachO) !void {
|
||||
defer gpa.free(buffer);
|
||||
@memset(buffer, 0);
|
||||
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
const writer = stream.writer();
|
||||
var writer: Writer = .fixed(buffer);
|
||||
|
||||
try self.rebase_section.write(writer);
|
||||
try stream.seekTo(cmd.bind_off - base_off);
|
||||
try self.bind_section.write(writer);
|
||||
try stream.seekTo(cmd.weak_bind_off - base_off);
|
||||
try self.weak_bind_section.write(writer);
|
||||
try stream.seekTo(cmd.lazy_bind_off - base_off);
|
||||
try self.lazy_bind_section.write(writer);
|
||||
try stream.seekTo(cmd.export_off - base_off);
|
||||
try self.export_trie.write(writer);
|
||||
try self.rebase_section.write(&writer);
|
||||
writer.end = @intCast(cmd.bind_off - base_off);
|
||||
try self.bind_section.write(&writer);
|
||||
writer.end = @intCast(cmd.weak_bind_off - base_off);
|
||||
try self.weak_bind_section.write(&writer);
|
||||
writer.end = @intCast(cmd.lazy_bind_off - base_off);
|
||||
try self.lazy_bind_section.write(&writer);
|
||||
writer.end = @intCast(cmd.export_off - base_off);
|
||||
try self.export_trie.write(&writer);
|
||||
try self.pwriteAll(buffer, cmd.rebase_off);
|
||||
}
|
||||
|
||||
@ -2689,10 +2688,10 @@ pub fn writeDataInCode(self: *MachO) !void {
|
||||
defer tracy.end();
|
||||
const gpa = self.base.comp.gpa;
|
||||
const cmd = self.data_in_code_cmd;
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.data_in_code.size());
|
||||
var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.data_in_code.size());
|
||||
defer buffer.deinit();
|
||||
try self.data_in_code.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, cmd.dataoff);
|
||||
self.data_in_code.write(self, &buffer.writer) catch return error.OutOfMemory;
|
||||
try self.pwriteAll(buffer.written(), cmd.dataoff);
|
||||
}
|
||||
|
||||
fn writeIndsymtab(self: *MachO) !void {
|
||||
@ -2701,10 +2700,11 @@ fn writeIndsymtab(self: *MachO) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const cmd = self.dysymtab_cmd;
|
||||
const needed_size = cmd.nindirectsyms * @sizeOf(u32);
|
||||
var buffer = try std.array_list.Managed(u8).initCapacity(gpa, needed_size);
|
||||
defer buffer.deinit();
|
||||
try self.indsymtab.write(self, buffer.writer());
|
||||
try self.pwriteAll(buffer.items, cmd.indirectsymoff);
|
||||
const buffer = try gpa.alloc(u8, needed_size);
|
||||
defer gpa.free(buffer);
|
||||
var writer: Writer = .fixed(buffer);
|
||||
try self.indsymtab.write(self, &writer);
|
||||
try self.pwriteAll(buffer, cmd.indirectsymoff);
|
||||
}
|
||||
|
||||
pub fn writeSymtabToFile(self: *MachO) !void {
|
||||
@ -2821,8 +2821,7 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
|
||||
const buffer = try gpa.alloc(u8, needed_size);
|
||||
defer gpa.free(buffer);
|
||||
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
const writer = stream.writer();
|
||||
var writer: Writer = .fixed(buffer);
|
||||
|
||||
var ncmds: usize = 0;
|
||||
|
||||
@ -2831,26 +2830,26 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
|
||||
const slice = self.sections.slice();
|
||||
var sect_id: usize = 0;
|
||||
for (self.segments.items) |seg| {
|
||||
try writer.writeStruct(seg);
|
||||
try writer.writeStruct(seg, .little);
|
||||
for (slice.items(.header)[sect_id..][0..seg.nsects]) |header| {
|
||||
try writer.writeStruct(header);
|
||||
try writer.writeStruct(header, .little);
|
||||
}
|
||||
sect_id += seg.nsects;
|
||||
}
|
||||
ncmds += self.segments.items.len;
|
||||
}
|
||||
|
||||
try writer.writeStruct(self.dyld_info_cmd);
|
||||
try writer.writeStruct(self.dyld_info_cmd, .little);
|
||||
ncmds += 1;
|
||||
try writer.writeStruct(self.function_starts_cmd);
|
||||
try writer.writeStruct(self.function_starts_cmd, .little);
|
||||
ncmds += 1;
|
||||
try writer.writeStruct(self.data_in_code_cmd);
|
||||
try writer.writeStruct(self.data_in_code_cmd, .little);
|
||||
ncmds += 1;
|
||||
try writer.writeStruct(self.symtab_cmd);
|
||||
try writer.writeStruct(self.symtab_cmd, .little);
|
||||
ncmds += 1;
|
||||
try writer.writeStruct(self.dysymtab_cmd);
|
||||
try writer.writeStruct(self.dysymtab_cmd, .little);
|
||||
ncmds += 1;
|
||||
try load_commands.writeDylinkerLC(writer);
|
||||
try load_commands.writeDylinkerLC(&writer);
|
||||
ncmds += 1;
|
||||
|
||||
if (self.getInternalObject()) |obj| {
|
||||
@ -2861,44 +2860,44 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
|
||||
0
|
||||
else
|
||||
@as(u32, @intCast(sym.getAddress(.{ .stubs = true }, self) - seg.vmaddr));
|
||||
try writer.writeStruct(macho.entry_point_command{
|
||||
try writer.writeStruct(@as(macho.entry_point_command, .{
|
||||
.entryoff = entryoff,
|
||||
.stacksize = self.base.stack_size,
|
||||
});
|
||||
}), .little);
|
||||
ncmds += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (self.base.isDynLib()) {
|
||||
try load_commands.writeDylibIdLC(self, writer);
|
||||
try load_commands.writeDylibIdLC(self, &writer);
|
||||
ncmds += 1;
|
||||
}
|
||||
|
||||
for (self.rpath_list) |rpath| {
|
||||
try load_commands.writeRpathLC(rpath, writer);
|
||||
try load_commands.writeRpathLC(rpath, &writer);
|
||||
ncmds += 1;
|
||||
}
|
||||
if (comp.config.any_sanitize_thread) {
|
||||
const path = try comp.tsan_lib.?.full_object_path.toString(gpa);
|
||||
defer gpa.free(path);
|
||||
const rpath = std.fs.path.dirname(path) orelse ".";
|
||||
try load_commands.writeRpathLC(rpath, writer);
|
||||
try load_commands.writeRpathLC(rpath, &writer);
|
||||
ncmds += 1;
|
||||
}
|
||||
|
||||
try writer.writeStruct(macho.source_version_command{ .version = 0 });
|
||||
try writer.writeStruct(@as(macho.source_version_command, .{ .version = 0 }), .little);
|
||||
ncmds += 1;
|
||||
|
||||
if (self.platform.isBuildVersionCompatible()) {
|
||||
try load_commands.writeBuildVersionLC(self.platform, self.sdk_version, writer);
|
||||
try load_commands.writeBuildVersionLC(self.platform, self.sdk_version, &writer);
|
||||
ncmds += 1;
|
||||
} else {
|
||||
try load_commands.writeVersionMinLC(self.platform, self.sdk_version, writer);
|
||||
try load_commands.writeVersionMinLC(self.platform, self.sdk_version, &writer);
|
||||
ncmds += 1;
|
||||
}
|
||||
|
||||
const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + stream.pos;
|
||||
try writer.writeStruct(self.uuid_cmd);
|
||||
const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + writer.end;
|
||||
try writer.writeStruct(self.uuid_cmd, .little);
|
||||
ncmds += 1;
|
||||
|
||||
for (self.dylibs.items) |index| {
|
||||
@ -2916,16 +2915,16 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
|
||||
.timestamp = dylib_id.timestamp,
|
||||
.current_version = dylib_id.current_version,
|
||||
.compatibility_version = dylib_id.compatibility_version,
|
||||
}, writer);
|
||||
}, &writer);
|
||||
ncmds += 1;
|
||||
}
|
||||
|
||||
if (self.requiresCodeSig()) {
|
||||
try writer.writeStruct(self.codesig_cmd);
|
||||
try writer.writeStruct(self.codesig_cmd, .little);
|
||||
ncmds += 1;
|
||||
}
|
||||
|
||||
assert(stream.pos == needed_size);
|
||||
assert(writer.end == needed_size);
|
||||
|
||||
try self.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
|
||||
|
||||
@ -3014,25 +3013,32 @@ pub fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
|
||||
pub fn writeCodeSignature(self: *MachO, code_sig: *CodeSignature) !void {
|
||||
const seg = self.getTextSegment();
|
||||
const offset = self.codesig_cmd.dataoff;
|
||||
const gpa = self.base.comp.gpa;
|
||||
|
||||
var buffer = std.array_list.Managed(u8).init(self.base.comp.gpa);
|
||||
var buffer: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer buffer.deinit();
|
||||
try buffer.ensureTotalCapacityPrecise(code_sig.size());
|
||||
try code_sig.writeAdhocSignature(self, .{
|
||||
// The writeAdhocSignature function internally changes code_sig.size()
|
||||
// during the execution.
|
||||
try buffer.ensureUnusedCapacity(code_sig.size());
|
||||
|
||||
code_sig.writeAdhocSignature(self, .{
|
||||
.file = self.base.file.?,
|
||||
.exec_seg_base = seg.fileoff,
|
||||
.exec_seg_limit = seg.filesize,
|
||||
.file_size = offset,
|
||||
.dylib = self.base.isDynLib(),
|
||||
}, buffer.writer());
|
||||
assert(buffer.items.len == code_sig.size());
|
||||
}, &buffer.writer) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
assert(buffer.written().len == code_sig.size());
|
||||
|
||||
log.debug("writing code signature from 0x{x} to 0x{x}", .{
|
||||
offset,
|
||||
offset + buffer.items.len,
|
||||
offset + buffer.written().len,
|
||||
});
|
||||
|
||||
try self.pwriteAll(buffer.items, offset);
|
||||
try self.pwriteAll(buffer.written(), offset);
|
||||
}
|
||||
|
||||
pub fn updateFunc(
|
||||
@ -5372,7 +5378,7 @@ const macho = std.macho;
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const meta = std.meta;
|
||||
const Writer = std.io.Writer;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
const aarch64 = codegen.aarch64.encoding;
|
||||
const bind = @import("MachO/dyld_info/bind.zig");
|
||||
|
||||
@ -81,34 +81,20 @@ pub fn writeHeader(
|
||||
object_name: []const u8,
|
||||
object_size: usize,
|
||||
format: Format,
|
||||
writer: anytype,
|
||||
writer: *Writer,
|
||||
) !void {
|
||||
var hdr: ar_hdr = .{
|
||||
.ar_name = undefined,
|
||||
.ar_date = undefined,
|
||||
.ar_uid = undefined,
|
||||
.ar_gid = undefined,
|
||||
.ar_mode = undefined,
|
||||
.ar_size = undefined,
|
||||
.ar_fmag = undefined,
|
||||
};
|
||||
@memset(mem.asBytes(&hdr), 0x20);
|
||||
inline for (@typeInfo(ar_hdr).@"struct".fields) |field| {
|
||||
var stream = std.io.fixedBufferStream(&@field(hdr, field.name));
|
||||
stream.writer().print("0", .{}) catch unreachable;
|
||||
}
|
||||
@memcpy(&hdr.ar_fmag, ARFMAG);
|
||||
var hdr: ar_hdr = .{};
|
||||
|
||||
const object_name_len = mem.alignForward(usize, object_name.len + 1, ptrWidth(format));
|
||||
const total_object_size = object_size + object_name_len;
|
||||
|
||||
{
|
||||
var stream = std.io.fixedBufferStream(&hdr.ar_name);
|
||||
stream.writer().print("#1/{d}", .{object_name_len}) catch unreachable;
|
||||
var stream: Writer = .fixed(&hdr.ar_name);
|
||||
stream.print("#1/{d}", .{object_name_len}) catch unreachable;
|
||||
}
|
||||
{
|
||||
var stream = std.io.fixedBufferStream(&hdr.ar_size);
|
||||
stream.writer().print("{d}", .{total_object_size}) catch unreachable;
|
||||
var stream: Writer = .fixed(&hdr.ar_size);
|
||||
stream.print("{d}", .{total_object_size}) catch unreachable;
|
||||
}
|
||||
|
||||
try writer.writeAll(mem.asBytes(&hdr));
|
||||
@ -116,7 +102,7 @@ pub fn writeHeader(
|
||||
|
||||
const padding = object_name_len - object_name.len - 1;
|
||||
if (padding > 0) {
|
||||
try writer.writeByteNTimes(0, padding);
|
||||
try writer.splatByteAll(0, padding);
|
||||
}
|
||||
}
|
||||
|
||||
@ -138,25 +124,19 @@ pub const SYMDEF64_SORTED = "__.SYMDEF_64 SORTED";
|
||||
|
||||
pub const ar_hdr = extern struct {
|
||||
/// Member file name, sometimes / terminated.
|
||||
ar_name: [16]u8,
|
||||
|
||||
ar_name: [16]u8 = "0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20".*,
|
||||
/// File date, decimal seconds since Epoch.
|
||||
ar_date: [12]u8,
|
||||
|
||||
ar_date: [12]u8 = "0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20".*,
|
||||
/// User ID, in ASCII format.
|
||||
ar_uid: [6]u8,
|
||||
|
||||
ar_uid: [6]u8 = "0\x20\x20\x20\x20\x20".*,
|
||||
/// Group ID, in ASCII format.
|
||||
ar_gid: [6]u8,
|
||||
|
||||
ar_gid: [6]u8 = "0\x20\x20\x20\x20\x20".*,
|
||||
/// File mode, in ASCII octal.
|
||||
ar_mode: [8]u8,
|
||||
|
||||
ar_mode: [8]u8 = "0\x20\x20\x20\x20\x20\x20\x20".*,
|
||||
/// File size, in ASCII decimal.
|
||||
ar_size: [10]u8,
|
||||
|
||||
ar_size: [10]u8 = "0\x20\x20\x20\x20\x20\x20\x20\x20\x20".*,
|
||||
/// Always contains ARFMAG.
|
||||
ar_fmag: [2]u8,
|
||||
ar_fmag: [2]u8 = ARFMAG.*,
|
||||
|
||||
fn date(self: ar_hdr) !u64 {
|
||||
const value = mem.trimEnd(u8, &self.ar_date, &[_]u8{@as(u8, 0x20)});
|
||||
@ -201,7 +181,7 @@ pub const ArSymtab = struct {
|
||||
return ptr_width + ar.entries.items.len * 2 * ptr_width + ptr_width + mem.alignForward(usize, ar.strtab.buffer.items.len, ptr_width);
|
||||
}
|
||||
|
||||
pub fn write(ar: ArSymtab, format: Format, macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn write(ar: ArSymtab, format: Format, macho_file: *MachO, writer: *Writer) !void {
|
||||
const ptr_width = ptrWidth(format);
|
||||
// Header
|
||||
try writeHeader(SYMDEF, ar.size(format), format, writer);
|
||||
@ -226,7 +206,7 @@ pub const ArSymtab = struct {
|
||||
// Strtab
|
||||
try writer.writeAll(ar.strtab.buffer.items);
|
||||
if (padding > 0) {
|
||||
try writer.writeByteNTimes(0, padding);
|
||||
try writer.splatByteAll(0, padding);
|
||||
}
|
||||
}
|
||||
|
||||
@ -275,7 +255,7 @@ pub fn ptrWidth(format: Format) usize {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn writeInt(format: Format, value: u64, writer: anytype) !void {
|
||||
pub fn writeInt(format: Format, value: u64, writer: *Writer) !void {
|
||||
switch (format) {
|
||||
.p32 => try writer.writeInt(u32, std.math.cast(u32, value) orelse return error.Overflow, .little),
|
||||
.p64 => try writer.writeInt(u64, value, .little),
|
||||
@ -299,7 +279,7 @@ const mem = std.mem;
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Path = std.Build.Cache.Path;
|
||||
const Writer = std.io.Writer;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
const Archive = @This();
|
||||
const File = @import("file.zig").File;
|
||||
|
||||
@ -581,19 +581,19 @@ pub fn resolveRelocs(self: Atom, macho_file: *MachO, buffer: []u8) !void {
|
||||
relocs_log.debug("{x}: {s}", .{ self.value, name });
|
||||
|
||||
var has_error = false;
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
var stream: Writer = .fixed(buffer);
|
||||
var i: usize = 0;
|
||||
while (i < relocs.len) : (i += 1) {
|
||||
const rel = relocs[i];
|
||||
const rel_offset = rel.offset - self.off;
|
||||
const rel_offset: usize = @intCast(rel.offset - self.off);
|
||||
const subtractor = if (rel.meta.has_subtractor) relocs[i - 1] else null;
|
||||
|
||||
if (rel.tag == .@"extern") {
|
||||
if (rel.getTargetSymbol(self, macho_file).getFile(macho_file) == null) continue;
|
||||
}
|
||||
|
||||
try stream.seekTo(rel_offset);
|
||||
self.resolveRelocInner(rel, subtractor, buffer, macho_file, stream.writer()) catch |err| {
|
||||
stream.end = rel_offset;
|
||||
self.resolveRelocInner(rel, subtractor, buffer, macho_file, &stream) catch |err| {
|
||||
switch (err) {
|
||||
error.RelaxFail => {
|
||||
const target = switch (rel.tag) {
|
||||
@ -630,6 +630,7 @@ const ResolveError = error{
|
||||
UnexpectedRemainder,
|
||||
Overflow,
|
||||
OutOfMemory,
|
||||
WriteFailed,
|
||||
};
|
||||
|
||||
fn resolveRelocInner(
|
||||
@ -638,7 +639,7 @@ fn resolveRelocInner(
|
||||
subtractor: ?Relocation,
|
||||
code: []u8,
|
||||
macho_file: *MachO,
|
||||
writer: anytype,
|
||||
writer: *Writer,
|
||||
) ResolveError!void {
|
||||
const t = &macho_file.base.comp.root_mod.resolved_target.result;
|
||||
const cpu_arch = t.cpu.arch;
|
||||
@ -1147,7 +1148,7 @@ const math = std.math;
|
||||
const mem = std.mem;
|
||||
const log = std.log.scoped(.link);
|
||||
const relocs_log = std.log.scoped(.link_relocs);
|
||||
const Writer = std.io.Writer;
|
||||
const Writer = std.Io.Writer;
|
||||
const Allocator = mem.Allocator;
|
||||
const AtomicBool = std.atomic.Value(bool);
|
||||
|
||||
|
||||
@ -263,7 +263,7 @@ pub fn writeAdhocSignature(
|
||||
self: *CodeSignature,
|
||||
macho_file: *MachO,
|
||||
opts: WriteOpts,
|
||||
writer: anytype,
|
||||
writer: *std.Io.Writer,
|
||||
) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
@ -304,10 +304,10 @@ pub fn writeAdhocSignature(
|
||||
var hash: [hash_size]u8 = undefined;
|
||||
|
||||
if (self.requirements) |*req| {
|
||||
var buf = std.array_list.Managed(u8).init(allocator);
|
||||
defer buf.deinit();
|
||||
try req.write(buf.writer());
|
||||
Sha256.hash(buf.items, &hash, .{});
|
||||
var a: std.Io.Writer.Allocating = .init(allocator);
|
||||
defer a.deinit();
|
||||
try req.write(&a.writer);
|
||||
Sha256.hash(a.written(), &hash, .{});
|
||||
self.code_directory.addSpecialHash(req.slotType(), hash);
|
||||
|
||||
try blobs.append(.{ .requirements = req });
|
||||
@ -316,10 +316,10 @@ pub fn writeAdhocSignature(
|
||||
}
|
||||
|
||||
if (self.entitlements) |*ents| {
|
||||
var buf = std.array_list.Managed(u8).init(allocator);
|
||||
defer buf.deinit();
|
||||
try ents.write(buf.writer());
|
||||
Sha256.hash(buf.items, &hash, .{});
|
||||
var a: std.Io.Writer.Allocating = .init(allocator);
|
||||
defer a.deinit();
|
||||
try ents.write(&a.writer);
|
||||
Sha256.hash(a.written(), &hash, .{});
|
||||
self.code_directory.addSpecialHash(ents.slotType(), hash);
|
||||
|
||||
try blobs.append(.{ .entitlements = ents });
|
||||
|
||||
@ -273,14 +273,13 @@ fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, u
|
||||
const buffer = try gpa.alloc(u8, needed_size);
|
||||
defer gpa.free(buffer);
|
||||
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
const writer = stream.writer();
|
||||
var writer: Writer = .fixed(buffer);
|
||||
|
||||
var ncmds: usize = 0;
|
||||
|
||||
// UUID comes first presumably to speed up lookup by the consumer like lldb.
|
||||
@memcpy(&self.uuid_cmd.uuid, &macho_file.uuid_cmd.uuid);
|
||||
try writer.writeStruct(self.uuid_cmd);
|
||||
try writer.writeStruct(self.uuid_cmd, .little);
|
||||
ncmds += 1;
|
||||
|
||||
// Segment and section load commands
|
||||
@ -293,11 +292,11 @@ fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, u
|
||||
var out_seg = seg;
|
||||
out_seg.fileoff = 0;
|
||||
out_seg.filesize = 0;
|
||||
try writer.writeStruct(out_seg);
|
||||
try writer.writeStruct(out_seg, .little);
|
||||
for (slice.items(.header)[sect_id..][0..seg.nsects]) |header| {
|
||||
var out_header = header;
|
||||
out_header.offset = 0;
|
||||
try writer.writeStruct(out_header);
|
||||
try writer.writeStruct(out_header, .little);
|
||||
}
|
||||
sect_id += seg.nsects;
|
||||
}
|
||||
@ -306,19 +305,19 @@ fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, u
|
||||
// Next, commit DSYM's __LINKEDIT and __DWARF segments headers.
|
||||
sect_id = 0;
|
||||
for (self.segments.items) |seg| {
|
||||
try writer.writeStruct(seg);
|
||||
try writer.writeStruct(seg, .little);
|
||||
for (self.sections.items[sect_id..][0..seg.nsects]) |header| {
|
||||
try writer.writeStruct(header);
|
||||
try writer.writeStruct(header, .little);
|
||||
}
|
||||
sect_id += seg.nsects;
|
||||
}
|
||||
ncmds += self.segments.items.len;
|
||||
}
|
||||
|
||||
try writer.writeStruct(self.symtab_cmd);
|
||||
try writer.writeStruct(self.symtab_cmd, .little);
|
||||
ncmds += 1;
|
||||
|
||||
assert(stream.pos == needed_size);
|
||||
assert(writer.end == needed_size);
|
||||
|
||||
try self.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
|
||||
|
||||
@ -460,7 +459,7 @@ const math = std.math;
|
||||
const mem = std.mem;
|
||||
const padToIdeal = MachO.padToIdeal;
|
||||
const trace = @import("../../tracy.zig").trace;
|
||||
const Writer = std.io.Writer;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const MachO = @import("../MachO.zig");
|
||||
|
||||
@ -261,7 +261,7 @@ fn addObjcMethnameSection(self: *InternalObject, methname: []const u8, macho_fil
|
||||
|
||||
sect.offset = @intCast(self.objc_methnames.items.len);
|
||||
try self.objc_methnames.ensureUnusedCapacity(gpa, methname.len + 1);
|
||||
self.objc_methnames.writer(gpa).print("{s}\x00", .{methname}) catch unreachable;
|
||||
self.objc_methnames.print(gpa, "{s}\x00", .{methname}) catch unreachable;
|
||||
|
||||
const name_str = try self.addString(gpa, "ltmp");
|
||||
const sym_index = try self.addSymbol(gpa);
|
||||
|
||||
@ -293,8 +293,7 @@ pub fn write(info: UnwindInfo, macho_file: *MachO, buffer: []u8) !void {
|
||||
const seg = macho_file.getTextSegment();
|
||||
const header = macho_file.sections.items(.header)[macho_file.unwind_info_sect_index.?];
|
||||
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
const writer = stream.writer();
|
||||
var writer: Writer = .fixed(buffer);
|
||||
|
||||
const common_encodings_offset: u32 = @sizeOf(macho.unwind_info_section_header);
|
||||
const common_encodings_count: u32 = info.common_encodings_count;
|
||||
@ -303,14 +302,14 @@ pub fn write(info: UnwindInfo, macho_file: *MachO, buffer: []u8) !void {
|
||||
const indexes_offset: u32 = personalities_offset + personalities_count * @sizeOf(u32);
|
||||
const indexes_count: u32 = @as(u32, @intCast(info.pages.items.len + 1));
|
||||
|
||||
try writer.writeStruct(macho.unwind_info_section_header{
|
||||
try writer.writeStruct(@as(macho.unwind_info_section_header, .{
|
||||
.commonEncodingsArraySectionOffset = common_encodings_offset,
|
||||
.commonEncodingsArrayCount = common_encodings_count,
|
||||
.personalityArraySectionOffset = personalities_offset,
|
||||
.personalityArrayCount = personalities_count,
|
||||
.indexSectionOffset = indexes_offset,
|
||||
.indexCount = indexes_count,
|
||||
});
|
||||
}), .little);
|
||||
|
||||
try writer.writeAll(mem.sliceAsBytes(info.common_encodings[0..info.common_encodings_count]));
|
||||
|
||||
@ -325,42 +324,42 @@ pub fn write(info: UnwindInfo, macho_file: *MachO, buffer: []u8) !void {
|
||||
for (info.pages.items, 0..) |page, i| {
|
||||
assert(page.count > 0);
|
||||
const rec = info.records.items[page.start].getUnwindRecord(macho_file);
|
||||
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
|
||||
try writer.writeStruct(@as(macho.unwind_info_section_header_index_entry, .{
|
||||
.functionOffset = @as(u32, @intCast(rec.getAtomAddress(macho_file) - seg.vmaddr)),
|
||||
.secondLevelPagesSectionOffset = @as(u32, @intCast(pages_base_offset + i * second_level_page_bytes)),
|
||||
.lsdaIndexArraySectionOffset = lsda_base_offset +
|
||||
info.lsdas_lookup.items[page.start] * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
|
||||
});
|
||||
}), .little);
|
||||
}
|
||||
|
||||
const last_rec = info.records.items[info.records.items.len - 1].getUnwindRecord(macho_file);
|
||||
const sentinel_address = @as(u32, @intCast(last_rec.getAtomAddress(macho_file) + last_rec.length - seg.vmaddr));
|
||||
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
|
||||
try writer.writeStruct(@as(macho.unwind_info_section_header_index_entry, .{
|
||||
.functionOffset = sentinel_address,
|
||||
.secondLevelPagesSectionOffset = 0,
|
||||
.lsdaIndexArraySectionOffset = lsda_base_offset +
|
||||
@as(u32, @intCast(info.lsdas.items.len)) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
|
||||
});
|
||||
}), .little);
|
||||
|
||||
for (info.lsdas.items) |index| {
|
||||
const rec = info.records.items[index].getUnwindRecord(macho_file);
|
||||
try writer.writeStruct(macho.unwind_info_section_header_lsda_index_entry{
|
||||
try writer.writeStruct(@as(macho.unwind_info_section_header_lsda_index_entry, .{
|
||||
.functionOffset = @as(u32, @intCast(rec.getAtomAddress(macho_file) - seg.vmaddr)),
|
||||
.lsdaOffset = @as(u32, @intCast(rec.getLsdaAddress(macho_file) - seg.vmaddr)),
|
||||
});
|
||||
}), .little);
|
||||
}
|
||||
|
||||
for (info.pages.items) |page| {
|
||||
const start = stream.pos;
|
||||
try page.write(info, macho_file, writer);
|
||||
const nwritten = stream.pos - start;
|
||||
const start = writer.end;
|
||||
try page.write(info, macho_file, &writer);
|
||||
const nwritten = writer.end - start;
|
||||
if (nwritten < second_level_page_bytes) {
|
||||
const padding = math.cast(usize, second_level_page_bytes - nwritten) orelse return error.Overflow;
|
||||
try writer.writeByteNTimes(0, padding);
|
||||
try writer.splatByteAll(0, padding);
|
||||
}
|
||||
}
|
||||
|
||||
@memset(buffer[stream.pos..], 0);
|
||||
@memset(buffer[writer.end..], 0);
|
||||
}
|
||||
|
||||
fn getOrPutPersonalityFunction(info: *UnwindInfo, ref: MachO.Ref) error{TooManyPersonalities}!u2 {
|
||||
@ -611,33 +610,33 @@ const Page = struct {
|
||||
} };
|
||||
}
|
||||
|
||||
fn write(page: Page, info: UnwindInfo, macho_file: *MachO, writer: anytype) !void {
|
||||
fn write(page: Page, info: UnwindInfo, macho_file: *MachO, writer: *Writer) !void {
|
||||
const seg = macho_file.getTextSegment();
|
||||
|
||||
switch (page.kind) {
|
||||
.regular => {
|
||||
try writer.writeStruct(macho.unwind_info_regular_second_level_page_header{
|
||||
try writer.writeStruct(@as(macho.unwind_info_regular_second_level_page_header, .{
|
||||
.entryPageOffset = @sizeOf(macho.unwind_info_regular_second_level_page_header),
|
||||
.entryCount = page.count,
|
||||
});
|
||||
}), .little);
|
||||
|
||||
for (info.records.items[page.start..][0..page.count]) |ref| {
|
||||
const rec = ref.getUnwindRecord(macho_file);
|
||||
try writer.writeStruct(macho.unwind_info_regular_second_level_entry{
|
||||
try writer.writeStruct(@as(macho.unwind_info_regular_second_level_entry, .{
|
||||
.functionOffset = @as(u32, @intCast(rec.getAtomAddress(macho_file) - seg.vmaddr)),
|
||||
.encoding = rec.enc.enc,
|
||||
});
|
||||
}), .little);
|
||||
}
|
||||
},
|
||||
.compressed => {
|
||||
const entry_offset = @sizeOf(macho.unwind_info_compressed_second_level_page_header) +
|
||||
@as(u16, @intCast(page.page_encodings_count)) * @sizeOf(u32);
|
||||
try writer.writeStruct(macho.unwind_info_compressed_second_level_page_header{
|
||||
try writer.writeStruct(@as(macho.unwind_info_compressed_second_level_page_header, .{
|
||||
.entryPageOffset = entry_offset,
|
||||
.entryCount = page.count,
|
||||
.encodingsPageOffset = @sizeOf(macho.unwind_info_compressed_second_level_page_header),
|
||||
.encodingsCount = page.page_encodings_count,
|
||||
});
|
||||
}), .little);
|
||||
|
||||
for (page.page_encodings[0..page.page_encodings_count]) |enc| {
|
||||
try writer.writeInt(u32, enc.enc, .little);
|
||||
@ -656,7 +655,7 @@ const Page = struct {
|
||||
.funcOffset = @as(u24, @intCast(rec.getAtomAddress(macho_file) - first_rec.getAtomAddress(macho_file))),
|
||||
.encodingIndex = @as(u8, @intCast(enc_index)),
|
||||
};
|
||||
try writer.writeStruct(compressed);
|
||||
try writer.writeStruct(compressed, .little);
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -673,7 +672,7 @@ const macho = std.macho;
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const trace = @import("../../tracy.zig").trace;
|
||||
const Writer = std.io.Writer;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const Atom = @import("Atom.zig");
|
||||
|
||||
@ -110,12 +110,14 @@ pub fn updateSize(rebase: *Rebase, macho_file: *MachO) !void {
|
||||
fn finalize(rebase: *Rebase, gpa: Allocator) !void {
|
||||
if (rebase.entries.items.len == 0) return;
|
||||
|
||||
const writer = rebase.buffer.writer(gpa);
|
||||
|
||||
log.debug("rebase opcodes", .{});
|
||||
|
||||
std.mem.sort(Entry, rebase.entries.items, {}, Entry.lessThan);
|
||||
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(gpa, &rebase.buffer);
|
||||
defer rebase.buffer = allocating.toArrayList();
|
||||
const writer = &allocating.writer;
|
||||
|
||||
try setTypePointer(writer);
|
||||
|
||||
var start: usize = 0;
|
||||
@ -226,13 +228,13 @@ fn setTypePointer(writer: anytype) !void {
|
||||
fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void {
|
||||
log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset });
|
||||
try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id)));
|
||||
try std.leb.writeUleb128(writer, offset);
|
||||
try writer.writeUleb128(offset);
|
||||
}
|
||||
|
||||
fn rebaseAddAddr(addr: u64, writer: anytype) !void {
|
||||
log.debug(">>> rebase with add: {x}", .{addr});
|
||||
try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB);
|
||||
try std.leb.writeUleb128(writer, addr);
|
||||
try writer.writeUleb128(addr);
|
||||
}
|
||||
|
||||
fn rebaseTimes(count: usize, writer: anytype) !void {
|
||||
@ -241,15 +243,15 @@ fn rebaseTimes(count: usize, writer: anytype) !void {
|
||||
try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @as(u4, @truncate(count)));
|
||||
} else {
|
||||
try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_ULEB_TIMES);
|
||||
try std.leb.writeUleb128(writer, count);
|
||||
try writer.writeUleb128(count);
|
||||
}
|
||||
}
|
||||
|
||||
fn rebaseTimesSkip(count: usize, skip: u64, writer: anytype) !void {
|
||||
log.debug(">>> rebase with count: {d} and skip: {x}", .{ count, skip });
|
||||
try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB);
|
||||
try std.leb.writeUleb128(writer, count);
|
||||
try std.leb.writeUleb128(writer, skip);
|
||||
try writer.writeUleb128(count);
|
||||
try writer.writeUleb128(skip);
|
||||
}
|
||||
|
||||
fn addAddr(addr: u64, writer: anytype) !void {
|
||||
@ -262,7 +264,7 @@ fn addAddr(addr: u64, writer: anytype) !void {
|
||||
}
|
||||
}
|
||||
try writer.writeByte(macho.REBASE_OPCODE_ADD_ADDR_ULEB);
|
||||
try std.leb.writeUleb128(writer, addr);
|
||||
try writer.writeUleb128(addr);
|
||||
}
|
||||
|
||||
fn done(writer: anytype) !void {
|
||||
@ -649,7 +651,6 @@ test "rebase - composite" {
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const leb = std.leb;
|
||||
const log = std.log.scoped(.link_dyld_info);
|
||||
const macho = std.macho;
|
||||
const mem = std.mem;
|
||||
|
||||
@ -170,8 +170,13 @@ fn finalize(self: *Trie, allocator: Allocator) !void {
|
||||
}
|
||||
|
||||
try self.buffer.ensureTotalCapacityPrecise(allocator, size);
|
||||
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(allocator, &self.buffer);
|
||||
defer self.buffer = allocating.toArrayList();
|
||||
const writer = &allocating.writer;
|
||||
|
||||
for (ordered_nodes.items) |node_index| {
|
||||
try self.writeNode(node_index, self.buffer.writer(allocator));
|
||||
try self.writeNode(node_index, writer);
|
||||
}
|
||||
}
|
||||
|
||||
@ -232,7 +237,7 @@ pub fn deinit(self: *Trie, allocator: Allocator) void {
|
||||
self.buffer.deinit(allocator);
|
||||
}
|
||||
|
||||
pub fn write(self: Trie, writer: anytype) !void {
|
||||
pub fn write(self: Trie, writer: *std.Io.Writer) !void {
|
||||
if (self.buffer.items.len == 0) return;
|
||||
try writer.writeAll(self.buffer.items);
|
||||
}
|
||||
@ -243,7 +248,7 @@ pub fn write(self: Trie, writer: anytype) !void {
|
||||
/// iterate over `Trie.ordered_nodes` and call this method on each node.
|
||||
/// This is one of the requirements of the MachO.
|
||||
/// Panics if `finalize` was not called before calling this method.
|
||||
fn writeNode(self: *Trie, node_index: Node.Index, writer: anytype) !void {
|
||||
fn writeNode(self: *Trie, node_index: Node.Index, writer: *std.Io.Writer) !void {
|
||||
const slice = self.nodes.slice();
|
||||
const edges = slice.items(.edges)[node_index];
|
||||
const is_terminal = slice.items(.is_terminal)[node_index];
|
||||
@ -253,21 +258,21 @@ fn writeNode(self: *Trie, node_index: Node.Index, writer: anytype) !void {
|
||||
if (is_terminal) {
|
||||
// Terminal node info: encode export flags and vmaddr offset of this symbol.
|
||||
var info_buf: [@sizeOf(u64) * 2]u8 = undefined;
|
||||
var info_stream = std.io.fixedBufferStream(&info_buf);
|
||||
var info_stream: std.Io.Writer = .fixed(&info_buf);
|
||||
// TODO Implement for special flags.
|
||||
assert(export_flags & macho.EXPORT_SYMBOL_FLAGS_REEXPORT == 0 and
|
||||
export_flags & macho.EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER == 0);
|
||||
try leb.writeUleb128(info_stream.writer(), export_flags);
|
||||
try leb.writeUleb128(info_stream.writer(), vmaddr_offset);
|
||||
try info_stream.writeUleb128(export_flags);
|
||||
try info_stream.writeUleb128(vmaddr_offset);
|
||||
|
||||
// Encode the size of the terminal node info.
|
||||
var size_buf: [@sizeOf(u64)]u8 = undefined;
|
||||
var size_stream = std.io.fixedBufferStream(&size_buf);
|
||||
try leb.writeUleb128(size_stream.writer(), info_stream.pos);
|
||||
var size_stream: std.Io.Writer = .fixed(&size_buf);
|
||||
try size_stream.writeUleb128(info_stream.end);
|
||||
|
||||
// Now, write them to the output stream.
|
||||
try writer.writeAll(size_buf[0..size_stream.pos]);
|
||||
try writer.writeAll(info_buf[0..info_stream.pos]);
|
||||
try writer.writeAll(size_buf[0..size_stream.end]);
|
||||
try writer.writeAll(info_buf[0..info_stream.end]);
|
||||
} else {
|
||||
// Non-terminal node is delimited by 0 byte.
|
||||
try writer.writeByte(0);
|
||||
@ -280,7 +285,7 @@ fn writeNode(self: *Trie, node_index: Node.Index, writer: anytype) !void {
|
||||
// Write edge label and offset to next node in trie.
|
||||
try writer.writeAll(edge.label);
|
||||
try writer.writeByte(0);
|
||||
try leb.writeUleb128(writer, slice.items(.trie_offset)[edge.node]);
|
||||
try writer.writeUleb128(slice.items(.trie_offset)[edge.node]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -414,7 +419,6 @@ test "ordering bug" {
|
||||
}
|
||||
|
||||
const assert = std.debug.assert;
|
||||
const leb = std.leb;
|
||||
const log = std.log.scoped(.macho);
|
||||
const macho = std.macho;
|
||||
const mem = std.mem;
|
||||
|
||||
@ -132,12 +132,14 @@ pub const Bind = struct {
|
||||
fn finalize(self: *Self, gpa: Allocator, ctx: *MachO) !void {
|
||||
if (self.entries.items.len == 0) return;
|
||||
|
||||
const writer = self.buffer.writer(gpa);
|
||||
|
||||
log.debug("bind opcodes", .{});
|
||||
|
||||
std.mem.sort(Entry, self.entries.items, ctx, Entry.lessThan);
|
||||
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(gpa, &self.buffer);
|
||||
defer self.buffer = allocating.toArrayList();
|
||||
const writer = &allocating.writer;
|
||||
|
||||
var start: usize = 0;
|
||||
var seg_id: ?u8 = null;
|
||||
for (self.entries.items, 0..) |entry, i| {
|
||||
@ -151,7 +153,7 @@ pub const Bind = struct {
|
||||
try done(writer);
|
||||
}
|
||||
|
||||
fn finalizeSegment(entries: []const Entry, ctx: *MachO, writer: anytype) !void {
|
||||
fn finalizeSegment(entries: []const Entry, ctx: *MachO, writer: *std.Io.Writer) !void {
|
||||
if (entries.len == 0) return;
|
||||
|
||||
const seg_id = entries[0].segment_id;
|
||||
@ -263,7 +265,7 @@ pub const Bind = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write(self: Self, writer: anytype) !void {
|
||||
pub fn write(self: Self, writer: *std.Io.Writer) !void {
|
||||
try writer.writeAll(self.buffer.items);
|
||||
}
|
||||
};
|
||||
@ -385,12 +387,14 @@ pub const WeakBind = struct {
|
||||
fn finalize(self: *Self, gpa: Allocator, ctx: *MachO) !void {
|
||||
if (self.entries.items.len == 0) return;
|
||||
|
||||
const writer = self.buffer.writer(gpa);
|
||||
|
||||
log.debug("weak bind opcodes", .{});
|
||||
|
||||
std.mem.sort(Entry, self.entries.items, ctx, Entry.lessThan);
|
||||
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(gpa, &self.buffer);
|
||||
defer self.buffer = allocating.toArrayList();
|
||||
const writer = &allocating.writer;
|
||||
|
||||
var start: usize = 0;
|
||||
var seg_id: ?u8 = null;
|
||||
for (self.entries.items, 0..) |entry, i| {
|
||||
@ -404,7 +408,7 @@ pub const WeakBind = struct {
|
||||
try done(writer);
|
||||
}
|
||||
|
||||
fn finalizeSegment(entries: []const Entry, ctx: *MachO, writer: anytype) !void {
|
||||
fn finalizeSegment(entries: []const Entry, ctx: *MachO, writer: *std.Io.Writer) !void {
|
||||
if (entries.len == 0) return;
|
||||
|
||||
const seg_id = entries[0].segment_id;
|
||||
@ -505,7 +509,7 @@ pub const WeakBind = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write(self: Self, writer: anytype) !void {
|
||||
pub fn write(self: Self, writer: *std.Io.Writer) !void {
|
||||
try writer.writeAll(self.buffer.items);
|
||||
}
|
||||
};
|
||||
@ -555,8 +559,6 @@ pub const LazyBind = struct {
|
||||
fn finalize(self: *Self, gpa: Allocator, ctx: *MachO) !void {
|
||||
try self.offsets.ensureTotalCapacityPrecise(gpa, self.entries.items.len);
|
||||
|
||||
const writer = self.buffer.writer(gpa);
|
||||
|
||||
log.debug("lazy bind opcodes", .{});
|
||||
|
||||
var addend: i64 = 0;
|
||||
@ -578,6 +580,9 @@ pub const LazyBind = struct {
|
||||
break :ord macho.BIND_SPECIAL_DYLIB_SELF;
|
||||
};
|
||||
|
||||
var allocating: std.Io.Writer.Allocating = .fromArrayList(gpa, &self.buffer);
|
||||
defer self.buffer = allocating.toArrayList();
|
||||
const writer = &allocating.writer;
|
||||
try setSegmentOffset(entry.segment_id, entry.offset, writer);
|
||||
try setSymbol(name, flags, writer);
|
||||
try setDylibOrdinal(ordinal, writer);
|
||||
@ -592,30 +597,30 @@ pub const LazyBind = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write(self: Self, writer: anytype) !void {
|
||||
pub fn write(self: Self, writer: *std.Io.Writer) !void {
|
||||
try writer.writeAll(self.buffer.items);
|
||||
}
|
||||
};
|
||||
|
||||
fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void {
|
||||
fn setSegmentOffset(segment_id: u8, offset: u64, writer: *std.Io.Writer) !void {
|
||||
log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset });
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id)));
|
||||
try std.leb.writeUleb128(writer, offset);
|
||||
try writer.writeUleb128(offset);
|
||||
}
|
||||
|
||||
fn setSymbol(name: []const u8, flags: u8, writer: anytype) !void {
|
||||
fn setSymbol(name: []const u8, flags: u8, writer: *std.Io.Writer) !void {
|
||||
log.debug(">>> set symbol: {s} with flags: {x}", .{ name, flags });
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | @as(u4, @truncate(flags)));
|
||||
try writer.writeAll(name);
|
||||
try writer.writeByte(0);
|
||||
}
|
||||
|
||||
fn setTypePointer(writer: anytype) !void {
|
||||
fn setTypePointer(writer: *std.Io.Writer) !void {
|
||||
log.debug(">>> set type: {d}", .{macho.BIND_TYPE_POINTER});
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @as(u4, @truncate(macho.BIND_TYPE_POINTER)));
|
||||
}
|
||||
|
||||
fn setDylibOrdinal(ordinal: i16, writer: anytype) !void {
|
||||
fn setDylibOrdinal(ordinal: i16, writer: *std.Io.Writer) !void {
|
||||
if (ordinal <= 0) {
|
||||
switch (ordinal) {
|
||||
macho.BIND_SPECIAL_DYLIB_SELF,
|
||||
@ -634,23 +639,23 @@ fn setDylibOrdinal(ordinal: i16, writer: anytype) !void {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @as(u4, @truncate(cast)));
|
||||
} else {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
|
||||
try std.leb.writeUleb128(writer, cast);
|
||||
try writer.writeUleb128(cast);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn setAddend(addend: i64, writer: anytype) !void {
|
||||
fn setAddend(addend: i64, writer: *std.Io.Writer) !void {
|
||||
log.debug(">>> set addend: {x}", .{addend});
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_ADDEND_SLEB);
|
||||
try std.leb.writeIleb128(writer, addend);
|
||||
}
|
||||
|
||||
fn doBind(writer: anytype) !void {
|
||||
fn doBind(writer: *std.Io.Writer) !void {
|
||||
log.debug(">>> bind", .{});
|
||||
try writer.writeByte(macho.BIND_OPCODE_DO_BIND);
|
||||
}
|
||||
|
||||
fn doBindAddAddr(addr: u64, writer: anytype) !void {
|
||||
fn doBindAddAddr(addr: u64, writer: *std.Io.Writer) !void {
|
||||
log.debug(">>> bind with add: {x}", .{addr});
|
||||
if (std.mem.isAlignedGeneric(u64, addr, @sizeOf(u64))) {
|
||||
const imm = @divExact(addr, @sizeOf(u64));
|
||||
@ -662,29 +667,28 @@ fn doBindAddAddr(addr: u64, writer: anytype) !void {
|
||||
}
|
||||
}
|
||||
try writer.writeByte(macho.BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB);
|
||||
try std.leb.writeUleb128(writer, addr);
|
||||
try writer.writeUleb128(addr);
|
||||
}
|
||||
|
||||
fn doBindTimesSkip(count: usize, skip: u64, writer: anytype) !void {
|
||||
fn doBindTimesSkip(count: usize, skip: u64, writer: *std.Io.Writer) !void {
|
||||
log.debug(">>> bind with count: {d} and skip: {x}", .{ count, skip });
|
||||
try writer.writeByte(macho.BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB);
|
||||
try std.leb.writeUleb128(writer, count);
|
||||
try std.leb.writeUleb128(writer, skip);
|
||||
try writer.writeUleb128(count);
|
||||
try writer.writeUleb128(skip);
|
||||
}
|
||||
|
||||
fn addAddr(addr: u64, writer: anytype) !void {
|
||||
fn addAddr(addr: u64, writer: *std.Io.Writer) !void {
|
||||
log.debug(">>> add: {x}", .{addr});
|
||||
try writer.writeByte(macho.BIND_OPCODE_ADD_ADDR_ULEB);
|
||||
try std.leb.writeUleb128(writer, addr);
|
||||
try writer.writeUleb128(addr);
|
||||
}
|
||||
|
||||
fn done(writer: anytype) !void {
|
||||
fn done(writer: *std.Io.Writer) !void {
|
||||
log.debug(">>> done", .{});
|
||||
try writer.writeByte(macho.BIND_OPCODE_DONE);
|
||||
}
|
||||
|
||||
const assert = std.debug.assert;
|
||||
const leb = std.leb;
|
||||
const log = std.log.scoped(.link_dyld_info);
|
||||
const macho = std.macho;
|
||||
const mem = std.mem;
|
||||
|
||||
@ -3,9 +3,9 @@ const assert = std.debug.assert;
|
||||
const log = std.log.scoped(.link);
|
||||
const macho = std.macho;
|
||||
const mem = std.mem;
|
||||
const Writer = std.io.Writer;
|
||||
const Writer = std.Io.Writer;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const DebugSymbols = @import("DebugSymbols.zig");
|
||||
const Dylib = @import("Dylib.zig");
|
||||
const MachO = @import("../MachO.zig");
|
||||
@ -181,22 +181,22 @@ pub fn calcMinHeaderPadSize(macho_file: *MachO) !u32 {
|
||||
return offset;
|
||||
}
|
||||
|
||||
pub fn writeDylinkerLC(writer: anytype) !void {
|
||||
pub fn writeDylinkerLC(writer: *Writer) !void {
|
||||
const name_len = mem.sliceTo(default_dyld_path, 0).len;
|
||||
const cmdsize = @as(u32, @intCast(mem.alignForward(
|
||||
u64,
|
||||
@sizeOf(macho.dylinker_command) + name_len,
|
||||
@sizeOf(u64),
|
||||
)));
|
||||
try writer.writeStruct(macho.dylinker_command{
|
||||
try writer.writeStruct(@as(macho.dylinker_command, .{
|
||||
.cmd = .LOAD_DYLINKER,
|
||||
.cmdsize = cmdsize,
|
||||
.name = @sizeOf(macho.dylinker_command),
|
||||
});
|
||||
}), .little);
|
||||
try writer.writeAll(mem.sliceTo(default_dyld_path, 0));
|
||||
const padding = cmdsize - @sizeOf(macho.dylinker_command) - name_len;
|
||||
if (padding > 0) {
|
||||
try writer.writeByteNTimes(0, padding);
|
||||
try writer.splatByteAll(0, padding);
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,14 +208,14 @@ const WriteDylibLCCtx = struct {
|
||||
compatibility_version: u32 = 0x10000,
|
||||
};
|
||||
|
||||
pub fn writeDylibLC(ctx: WriteDylibLCCtx, writer: anytype) !void {
|
||||
pub fn writeDylibLC(ctx: WriteDylibLCCtx, writer: *Writer) !void {
|
||||
const name_len = ctx.name.len + 1;
|
||||
const cmdsize = @as(u32, @intCast(mem.alignForward(
|
||||
u64,
|
||||
@sizeOf(macho.dylib_command) + name_len,
|
||||
@sizeOf(u64),
|
||||
)));
|
||||
try writer.writeStruct(macho.dylib_command{
|
||||
try writer.writeStruct(@as(macho.dylib_command, .{
|
||||
.cmd = ctx.cmd,
|
||||
.cmdsize = cmdsize,
|
||||
.dylib = .{
|
||||
@ -224,16 +224,16 @@ pub fn writeDylibLC(ctx: WriteDylibLCCtx, writer: anytype) !void {
|
||||
.current_version = ctx.current_version,
|
||||
.compatibility_version = ctx.compatibility_version,
|
||||
},
|
||||
});
|
||||
}), .little);
|
||||
try writer.writeAll(ctx.name);
|
||||
try writer.writeByte(0);
|
||||
const padding = cmdsize - @sizeOf(macho.dylib_command) - name_len;
|
||||
if (padding > 0) {
|
||||
try writer.writeByteNTimes(0, padding);
|
||||
try writer.splatByteAll(0, padding);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeDylibIdLC(macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn writeDylibIdLC(macho_file: *MachO, writer: *Writer) !void {
|
||||
const comp = macho_file.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
assert(comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic);
|
||||
@ -259,26 +259,26 @@ pub fn writeDylibIdLC(macho_file: *MachO, writer: anytype) !void {
|
||||
}, writer);
|
||||
}
|
||||
|
||||
pub fn writeRpathLC(rpath: []const u8, writer: anytype) !void {
|
||||
pub fn writeRpathLC(rpath: []const u8, writer: *Writer) !void {
|
||||
const rpath_len = rpath.len + 1;
|
||||
const cmdsize = @as(u32, @intCast(mem.alignForward(
|
||||
u64,
|
||||
@sizeOf(macho.rpath_command) + rpath_len,
|
||||
@sizeOf(u64),
|
||||
)));
|
||||
try writer.writeStruct(macho.rpath_command{
|
||||
try writer.writeStruct(@as(macho.rpath_command, .{
|
||||
.cmdsize = cmdsize,
|
||||
.path = @sizeOf(macho.rpath_command),
|
||||
});
|
||||
}), .little);
|
||||
try writer.writeAll(rpath);
|
||||
try writer.writeByte(0);
|
||||
const padding = cmdsize - @sizeOf(macho.rpath_command) - rpath_len;
|
||||
if (padding > 0) {
|
||||
try writer.writeByteNTimes(0, padding);
|
||||
try writer.splatByteAll(0, padding);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeVersionMinLC(platform: MachO.Platform, sdk_version: ?std.SemanticVersion, writer: anytype) !void {
|
||||
pub fn writeVersionMinLC(platform: MachO.Platform, sdk_version: ?std.SemanticVersion, writer: *Writer) !void {
|
||||
const cmd: macho.LC = switch (platform.os_tag) {
|
||||
.macos => .VERSION_MIN_MACOSX,
|
||||
.ios => .VERSION_MIN_IPHONEOS,
|
||||
@ -296,9 +296,9 @@ pub fn writeVersionMinLC(platform: MachO.Platform, sdk_version: ?std.SemanticVer
|
||||
}));
|
||||
}
|
||||
|
||||
pub fn writeBuildVersionLC(platform: MachO.Platform, sdk_version: ?std.SemanticVersion, writer: anytype) !void {
|
||||
pub fn writeBuildVersionLC(platform: MachO.Platform, sdk_version: ?std.SemanticVersion, writer: *Writer) !void {
|
||||
const cmdsize = @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version);
|
||||
try writer.writeStruct(macho.build_version_command{
|
||||
try writer.writeStruct(@as(macho.build_version_command, .{
|
||||
.cmdsize = cmdsize,
|
||||
.platform = platform.toApplePlatform(),
|
||||
.minos = platform.toAppleVersion(),
|
||||
@ -307,7 +307,7 @@ pub fn writeBuildVersionLC(platform: MachO.Platform, sdk_version: ?std.SemanticV
|
||||
else
|
||||
platform.toAppleVersion(),
|
||||
.ntools = 1,
|
||||
});
|
||||
}), .little);
|
||||
try writer.writeAll(mem.asBytes(&macho.build_tool_version{
|
||||
.tool = .ZIG,
|
||||
.version = 0x0,
|
||||
|
||||
@ -205,35 +205,32 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
|
||||
state_log.debug("ar_symtab\n{f}\n", .{ar_symtab.fmt(macho_file)});
|
||||
}
|
||||
|
||||
var buffer = std.array_list.Managed(u8).init(gpa);
|
||||
defer buffer.deinit();
|
||||
try buffer.ensureTotalCapacityPrecise(total_size);
|
||||
const writer = buffer.writer();
|
||||
const buffer = try gpa.alloc(u8, total_size);
|
||||
defer gpa.free(buffer);
|
||||
var writer: Writer = .fixed(buffer);
|
||||
|
||||
// Write magic
|
||||
try writer.writeAll(Archive.ARMAG);
|
||||
writer.writeAll(Archive.ARMAG) catch unreachable;
|
||||
|
||||
// Write symtab
|
||||
ar_symtab.write(format, macho_file, writer) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| return diags.fail("failed to write archive symbol table: {s}", .{@errorName(e)}),
|
||||
};
|
||||
ar_symtab.write(format, macho_file, &writer) catch |err|
|
||||
return diags.fail("failed to write archive symbol table: {t}", .{err});
|
||||
|
||||
// Write object files
|
||||
for (files.items) |index| {
|
||||
const aligned = mem.alignForward(usize, buffer.items.len, 2);
|
||||
const padding = aligned - buffer.items.len;
|
||||
const aligned = mem.alignForward(usize, writer.end, 2);
|
||||
const padding = aligned - writer.end;
|
||||
if (padding > 0) {
|
||||
try writer.writeByteNTimes(0, padding);
|
||||
writer.splatByteAll(0, padding) catch unreachable;
|
||||
}
|
||||
macho_file.getFile(index).?.writeAr(format, macho_file, writer) catch |err|
|
||||
return diags.fail("failed to write archive: {s}", .{@errorName(err)});
|
||||
macho_file.getFile(index).?.writeAr(format, macho_file, &writer) catch |err|
|
||||
return diags.fail("failed to write archive: {t}", .{err});
|
||||
}
|
||||
|
||||
assert(buffer.items.len == total_size);
|
||||
assert(writer.end == total_size);
|
||||
|
||||
try macho_file.setEndPos(total_size);
|
||||
try macho_file.pwriteAll(buffer.items, 0);
|
||||
try macho_file.pwriteAll(writer.buffered(), 0);
|
||||
|
||||
if (diags.hasErrors()) return error.LinkFailure;
|
||||
}
|
||||
@ -693,8 +690,7 @@ fn writeLoadCommands(macho_file: *MachO) error{ LinkFailure, OutOfMemory }!struc
|
||||
const buffer = try gpa.alloc(u8, needed_size);
|
||||
defer gpa.free(buffer);
|
||||
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
const writer = stream.writer();
|
||||
var writer: Writer = .fixed(buffer);
|
||||
|
||||
var ncmds: usize = 0;
|
||||
|
||||
@ -702,43 +698,43 @@ fn writeLoadCommands(macho_file: *MachO) error{ LinkFailure, OutOfMemory }!struc
|
||||
{
|
||||
assert(macho_file.segments.items.len == 1);
|
||||
const seg = macho_file.segments.items[0];
|
||||
writer.writeStruct(seg) catch |err| switch (err) {
|
||||
error.NoSpaceLeft => unreachable,
|
||||
writer.writeStruct(seg, .little) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
for (macho_file.sections.items(.header)) |header| {
|
||||
writer.writeStruct(header) catch |err| switch (err) {
|
||||
error.NoSpaceLeft => unreachable,
|
||||
writer.writeStruct(header, .little) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
}
|
||||
ncmds += 1;
|
||||
}
|
||||
|
||||
writer.writeStruct(macho_file.data_in_code_cmd) catch |err| switch (err) {
|
||||
error.NoSpaceLeft => unreachable,
|
||||
writer.writeStruct(macho_file.data_in_code_cmd, .little) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
ncmds += 1;
|
||||
writer.writeStruct(macho_file.symtab_cmd) catch |err| switch (err) {
|
||||
error.NoSpaceLeft => unreachable,
|
||||
writer.writeStruct(macho_file.symtab_cmd, .little) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
ncmds += 1;
|
||||
writer.writeStruct(macho_file.dysymtab_cmd) catch |err| switch (err) {
|
||||
error.NoSpaceLeft => unreachable,
|
||||
writer.writeStruct(macho_file.dysymtab_cmd, .little) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
ncmds += 1;
|
||||
|
||||
if (macho_file.platform.isBuildVersionCompatible()) {
|
||||
load_commands.writeBuildVersionLC(macho_file.platform, macho_file.sdk_version, writer) catch |err| switch (err) {
|
||||
error.NoSpaceLeft => unreachable,
|
||||
load_commands.writeBuildVersionLC(macho_file.platform, macho_file.sdk_version, &writer) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
ncmds += 1;
|
||||
} else {
|
||||
load_commands.writeVersionMinLC(macho_file.platform, macho_file.sdk_version, writer) catch |err| switch (err) {
|
||||
error.NoSpaceLeft => unreachable,
|
||||
load_commands.writeVersionMinLC(macho_file.platform, macho_file.sdk_version, &writer) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
ncmds += 1;
|
||||
}
|
||||
|
||||
assert(stream.pos == needed_size);
|
||||
assert(writer.end == needed_size);
|
||||
|
||||
try macho_file.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
|
||||
|
||||
|
||||
@ -27,7 +27,7 @@ pub const GotSection = struct {
|
||||
return got.symbols.items.len * @sizeOf(u64);
|
||||
}
|
||||
|
||||
pub fn write(got: GotSection, macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn write(got: GotSection, macho_file: *MachO, writer: *Writer) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
for (got.symbols.items) |ref| {
|
||||
@ -89,7 +89,7 @@ pub const StubsSection = struct {
|
||||
return stubs.symbols.items.len * header.reserved2;
|
||||
}
|
||||
|
||||
pub fn write(stubs: StubsSection, macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn write(stubs: StubsSection, macho_file: *MachO, writer: *Writer) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
const cpu_arch = macho_file.getTarget().cpu.arch;
|
||||
@ -174,7 +174,7 @@ pub const StubsHelperSection = struct {
|
||||
return s;
|
||||
}
|
||||
|
||||
pub fn write(stubs_helper: StubsHelperSection, macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn write(stubs_helper: StubsHelperSection, macho_file: *MachO, writer: *Writer) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -217,7 +217,7 @@ pub const StubsHelperSection = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn writePreamble(stubs_helper: StubsHelperSection, macho_file: *MachO, writer: anytype) !void {
|
||||
fn writePreamble(stubs_helper: StubsHelperSection, macho_file: *MachO, writer: *Writer) !void {
|
||||
_ = stubs_helper;
|
||||
const obj = macho_file.getInternalObject().?;
|
||||
const cpu_arch = macho_file.getTarget().cpu.arch;
|
||||
@ -273,7 +273,7 @@ pub const LaSymbolPtrSection = struct {
|
||||
return macho_file.stubs.symbols.items.len * @sizeOf(u64);
|
||||
}
|
||||
|
||||
pub fn write(laptr: LaSymbolPtrSection, macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn write(laptr: LaSymbolPtrSection, macho_file: *MachO, writer: *Writer) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
_ = laptr;
|
||||
@ -323,7 +323,7 @@ pub const TlvPtrSection = struct {
|
||||
return tlv.symbols.items.len * @sizeOf(u64);
|
||||
}
|
||||
|
||||
pub fn write(tlv: TlvPtrSection, macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn write(tlv: TlvPtrSection, macho_file: *MachO, writer: *Writer) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -394,7 +394,7 @@ pub const ObjcStubsSection = struct {
|
||||
return objc.symbols.items.len * entrySize(macho_file.getTarget().cpu.arch);
|
||||
}
|
||||
|
||||
pub fn write(objc: ObjcStubsSection, macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn write(objc: ObjcStubsSection, macho_file: *MachO, writer: *Writer) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -487,7 +487,7 @@ pub const Indsymtab = struct {
|
||||
macho_file.dysymtab_cmd.nindirectsyms = ind.nsyms(macho_file);
|
||||
}
|
||||
|
||||
pub fn write(ind: Indsymtab, macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn write(ind: Indsymtab, macho_file: *MachO, writer: *Writer) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -564,7 +564,7 @@ pub const DataInCode = struct {
|
||||
macho_file.data_in_code_cmd.datasize = math.cast(u32, dice.size()) orelse return error.Overflow;
|
||||
}
|
||||
|
||||
pub fn write(dice: DataInCode, macho_file: *MachO, writer: anytype) !void {
|
||||
pub fn write(dice: DataInCode, macho_file: *MachO, writer: *Writer) !void {
|
||||
const base_address = if (!macho_file.base.isRelocatable())
|
||||
macho_file.getTextSegment().vmaddr
|
||||
else
|
||||
@ -572,11 +572,11 @@ pub const DataInCode = struct {
|
||||
for (dice.entries.items) |entry| {
|
||||
const atom_address = entry.atom_ref.getAtom(macho_file).?.getAddress(macho_file);
|
||||
const offset = atom_address + entry.offset - base_address;
|
||||
try writer.writeStruct(macho.data_in_code_entry{
|
||||
try writer.writeStruct(@as(macho.data_in_code_entry, .{
|
||||
.offset = @intCast(offset),
|
||||
.length = entry.length,
|
||||
.kind = entry.kind,
|
||||
});
|
||||
}), .little);
|
||||
}
|
||||
}
|
||||
|
||||
@ -594,7 +594,7 @@ const assert = std.debug.assert;
|
||||
const macho = std.macho;
|
||||
const math = std.math;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Writer = std.io.Writer;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
const trace = @import("../../tracy.zig").trace;
|
||||
const MachO = @import("../MachO.zig");
|
||||
|
||||
@ -19,6 +19,7 @@ const mem = std.mem;
|
||||
const leb = std.leb;
|
||||
const log = std.log.scoped(.link);
|
||||
const assert = std.debug.assert;
|
||||
const ArrayList = std.ArrayList;
|
||||
|
||||
/// Ordered list of data segments that will appear in the final binary.
|
||||
/// When sorted, to-be-merged segments will be made adjacent.
|
||||
@ -27,9 +28,9 @@ data_segments: std.AutoArrayHashMapUnmanaged(Wasm.DataSegmentId, u32) = .empty,
|
||||
/// Each time a `data_segment` offset equals zero it indicates a new group, and
|
||||
/// the next element in this array will contain the total merged segment size.
|
||||
/// Value is the virtual memory address of the end of the segment.
|
||||
data_segment_groups: std.ArrayListUnmanaged(DataSegmentGroup) = .empty,
|
||||
data_segment_groups: ArrayList(DataSegmentGroup) = .empty,
|
||||
|
||||
binary_bytes: std.ArrayListUnmanaged(u8) = .empty,
|
||||
binary_bytes: ArrayList(u8) = .empty,
|
||||
missing_exports: std.AutoArrayHashMapUnmanaged(String, void) = .empty,
|
||||
function_imports: std.AutoArrayHashMapUnmanaged(String, Wasm.FunctionImportId) = .empty,
|
||||
global_imports: std.AutoArrayHashMapUnmanaged(String, Wasm.GlobalImportId) = .empty,
|
||||
@ -563,8 +564,6 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
try binary_bytes.appendSlice(gpa, &std.wasm.magic ++ &std.wasm.version);
|
||||
assert(binary_bytes.items.len == 8);
|
||||
|
||||
const binary_writer = binary_bytes.writer(gpa);
|
||||
|
||||
// Type section.
|
||||
for (f.function_imports.values()) |id| {
|
||||
try f.func_types.put(gpa, id.functionType(wasm), {});
|
||||
@ -576,16 +575,16 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
|
||||
for (f.func_types.keys()) |func_type_index| {
|
||||
const func_type = func_type_index.ptr(wasm);
|
||||
try leb.writeUleb128(binary_writer, std.wasm.function_type);
|
||||
try appendLeb128(gpa, binary_bytes, std.wasm.function_type);
|
||||
const params = func_type.params.slice(wasm);
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(params.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(params.len)));
|
||||
for (params) |param_ty| {
|
||||
try leb.writeUleb128(binary_writer, @intFromEnum(param_ty));
|
||||
try appendLeb128(gpa, binary_bytes, @intFromEnum(param_ty));
|
||||
}
|
||||
const returns = func_type.returns.slice(wasm);
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(returns.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(returns.len)));
|
||||
for (returns) |ret_ty| {
|
||||
try leb.writeUleb128(binary_writer, @intFromEnum(ret_ty));
|
||||
try appendLeb128(gpa, binary_bytes, @intFromEnum(ret_ty));
|
||||
}
|
||||
}
|
||||
replaceVecSectionHeader(binary_bytes, header_offset, .type, @intCast(f.func_types.entries.len));
|
||||
@ -605,31 +604,31 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
|
||||
for (f.function_imports.values()) |id| {
|
||||
const module_name = id.moduleName(wasm).slice(wasm).?;
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(module_name.len)));
|
||||
try binary_writer.writeAll(module_name);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(module_name.len)));
|
||||
try binary_bytes.appendSlice(gpa, module_name);
|
||||
|
||||
const name = id.importName(wasm).slice(wasm);
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
|
||||
try binary_writer.writeAll(name);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
|
||||
try binary_writer.writeByte(@intFromEnum(std.wasm.ExternalKind.function));
|
||||
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.function));
|
||||
const type_index: FuncTypeIndex = .fromTypeIndex(id.functionType(wasm), f);
|
||||
try leb.writeUleb128(binary_writer, @intFromEnum(type_index));
|
||||
try appendLeb128(gpa, binary_bytes, @intFromEnum(type_index));
|
||||
}
|
||||
total_imports += f.function_imports.entries.len;
|
||||
|
||||
for (wasm.table_imports.values()) |id| {
|
||||
const table_import = id.value(wasm);
|
||||
const module_name = table_import.module_name.slice(wasm);
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(module_name.len)));
|
||||
try binary_writer.writeAll(module_name);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(module_name.len)));
|
||||
try binary_bytes.appendSlice(gpa, module_name);
|
||||
|
||||
const name = table_import.name.slice(wasm);
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
|
||||
try binary_writer.writeAll(name);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
|
||||
try binary_writer.writeByte(@intFromEnum(std.wasm.ExternalKind.table));
|
||||
try leb.writeUleb128(binary_writer, @intFromEnum(@as(std.wasm.RefType, table_import.flags.ref_type.to())));
|
||||
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.table));
|
||||
try appendLeb128(gpa, binary_bytes, @intFromEnum(@as(std.wasm.RefType, table_import.flags.ref_type.to())));
|
||||
try emitLimits(gpa, binary_bytes, table_import.limits());
|
||||
}
|
||||
total_imports += wasm.table_imports.entries.len;
|
||||
@ -650,17 +649,17 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
|
||||
for (f.global_imports.values()) |id| {
|
||||
const module_name = id.moduleName(wasm).slice(wasm).?;
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(module_name.len)));
|
||||
try binary_writer.writeAll(module_name);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(module_name.len)));
|
||||
try binary_bytes.appendSlice(gpa, module_name);
|
||||
|
||||
const name = id.importName(wasm).slice(wasm);
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
|
||||
try binary_writer.writeAll(name);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
|
||||
try binary_writer.writeByte(@intFromEnum(std.wasm.ExternalKind.global));
|
||||
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.global));
|
||||
const global_type = id.globalType(wasm);
|
||||
try leb.writeUleb128(binary_writer, @intFromEnum(@as(std.wasm.Valtype, global_type.valtype)));
|
||||
try binary_writer.writeByte(@intFromBool(global_type.mutable));
|
||||
try appendLeb128(gpa, binary_bytes, @intFromEnum(@as(std.wasm.Valtype, global_type.valtype)));
|
||||
try binary_bytes.append(gpa, @intFromBool(global_type.mutable));
|
||||
}
|
||||
total_imports += f.global_imports.entries.len;
|
||||
|
||||
@ -677,7 +676,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
|
||||
for (wasm.functions.keys()) |function| {
|
||||
const index: FuncTypeIndex = .fromTypeIndex(function.typeIndex(wasm), f);
|
||||
try leb.writeUleb128(binary_writer, @intFromEnum(index));
|
||||
try appendLeb128(gpa, binary_bytes, @intFromEnum(index));
|
||||
}
|
||||
|
||||
replaceVecSectionHeader(binary_bytes, header_offset, .function, @intCast(wasm.functions.count()));
|
||||
@ -689,7 +688,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
|
||||
|
||||
for (wasm.tables.keys()) |table| {
|
||||
try leb.writeUleb128(binary_writer, @intFromEnum(@as(std.wasm.RefType, table.refType(wasm))));
|
||||
try appendLeb128(gpa, binary_bytes, @intFromEnum(@as(std.wasm.RefType, table.refType(wasm))));
|
||||
try emitLimits(gpa, binary_bytes, table.limits(wasm));
|
||||
}
|
||||
|
||||
@ -743,39 +742,39 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
|
||||
for (wasm.function_exports.keys(), wasm.function_exports.values()) |exp_name, function_index| {
|
||||
const name = exp_name.slice(wasm);
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.function));
|
||||
const func_index = Wasm.OutputFunctionIndex.fromFunctionIndex(wasm, function_index);
|
||||
try leb.writeUleb128(binary_writer, @intFromEnum(func_index));
|
||||
try appendLeb128(gpa, binary_bytes, @intFromEnum(func_index));
|
||||
}
|
||||
exports_len += wasm.function_exports.entries.len;
|
||||
|
||||
if (wasm.export_table and f.indirect_function_table.entries.len > 0) {
|
||||
const name = "__indirect_function_table";
|
||||
const index: u32 = @intCast(wasm.tables.getIndex(.__indirect_function_table).?);
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.table));
|
||||
try leb.writeUleb128(binary_writer, index);
|
||||
try appendLeb128(gpa, binary_bytes, index);
|
||||
exports_len += 1;
|
||||
}
|
||||
|
||||
if (export_memory) {
|
||||
const name = "memory";
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.memory));
|
||||
try leb.writeUleb128(binary_writer, @as(u32, 0));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, 0));
|
||||
exports_len += 1;
|
||||
}
|
||||
|
||||
for (wasm.global_exports.items) |exp| {
|
||||
const name = exp.name.slice(wasm);
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.global));
|
||||
try leb.writeUleb128(binary_writer, @intFromEnum(exp.global_index));
|
||||
try appendLeb128(gpa, binary_bytes, @intFromEnum(exp.global_index));
|
||||
}
|
||||
exports_len += wasm.global_exports.items.len;
|
||||
|
||||
@ -802,18 +801,22 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
const table_index: u32 = @intCast(wasm.tables.getIndex(.__indirect_function_table).?);
|
||||
// passive with implicit 0-index table or set table index manually
|
||||
const flags: u32 = if (table_index == 0) 0x0 else 0x02;
|
||||
try leb.writeUleb128(binary_writer, flags);
|
||||
try appendLeb128(gpa, binary_bytes, flags);
|
||||
if (flags == 0x02) {
|
||||
try leb.writeUleb128(binary_writer, table_index);
|
||||
try appendLeb128(gpa, binary_bytes, table_index);
|
||||
}
|
||||
// We start at index 1, so unresolved function pointers are invalid
|
||||
try emitInit(binary_writer, .{ .i32_const = 1 });
|
||||
if (flags == 0x02) {
|
||||
try leb.writeUleb128(binary_writer, @as(u8, 0)); // represents funcref
|
||||
{
|
||||
var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, binary_bytes);
|
||||
defer binary_bytes.* = aw.toArrayList();
|
||||
try emitInit(&aw.writer, .{ .i32_const = 1 });
|
||||
}
|
||||
try leb.writeUleb128(binary_writer, @as(u32, @intCast(f.indirect_function_table.entries.len)));
|
||||
if (flags == 0x02) {
|
||||
try appendLeb128(gpa, binary_bytes, @as(u8, 0)); // represents funcref
|
||||
}
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(f.indirect_function_table.entries.len)));
|
||||
for (f.indirect_function_table.keys()) |func_index| {
|
||||
try leb.writeUleb128(binary_writer, @intFromEnum(func_index));
|
||||
try appendLeb128(gpa, binary_bytes, @intFromEnum(func_index));
|
||||
}
|
||||
|
||||
replaceVecSectionHeader(binary_bytes, header_offset, .element, 1);
|
||||
@ -851,7 +854,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
.object_function => |i| {
|
||||
const ptr = i.ptr(wasm);
|
||||
const code = ptr.code.slice(wasm);
|
||||
try leb.writeUleb128(binary_writer, code.len);
|
||||
try appendLeb128(gpa, binary_bytes, code.len);
|
||||
const code_start = binary_bytes.items.len;
|
||||
try binary_bytes.appendSlice(gpa, code);
|
||||
if (!is_obj) applyRelocs(binary_bytes.items[code_start..], ptr.offset, ptr.relocations(wasm), wasm);
|
||||
@ -946,12 +949,14 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
const group_size = group_end_addr - group_start_addr;
|
||||
log.debug("emit data section group, {d} bytes", .{group_size});
|
||||
const flags: Object.DataSegmentFlags = if (segment_id.isPassive(wasm)) .passive else .active;
|
||||
try leb.writeUleb128(binary_writer, @intFromEnum(flags));
|
||||
try appendLeb128(gpa, binary_bytes, @intFromEnum(flags));
|
||||
// Passive segments are initialized at runtime.
|
||||
if (flags != .passive) {
|
||||
try emitInit(binary_writer, .{ .i32_const = @as(i32, @bitCast(group_start_addr)) });
|
||||
var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, binary_bytes);
|
||||
defer binary_bytes.* = aw.toArrayList();
|
||||
try emitInit(&aw.writer, .{ .i32_const = @as(i32, @bitCast(group_start_addr)) });
|
||||
}
|
||||
try leb.writeUleb128(binary_writer, group_size);
|
||||
try appendLeb128(gpa, binary_bytes, group_size);
|
||||
}
|
||||
if (segment_id.isEmpty(wasm)) {
|
||||
// It counted for virtual memory but it does not go into the binary.
|
||||
@ -1077,7 +1082,7 @@ const VirtualAddrs = struct {
|
||||
fn emitNameSection(
|
||||
wasm: *Wasm,
|
||||
data_segment_groups: []const DataSegmentGroup,
|
||||
binary_bytes: *std.ArrayListUnmanaged(u8),
|
||||
binary_bytes: *ArrayList(u8),
|
||||
) !void {
|
||||
const f = &wasm.flush_buffer;
|
||||
const comp = wasm.base.comp;
|
||||
@ -1087,7 +1092,7 @@ fn emitNameSection(
|
||||
defer writeCustomSectionHeader(binary_bytes, header_offset);
|
||||
|
||||
const name_name = "name";
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, name_name.len));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, name_name.len));
|
||||
try binary_bytes.appendSlice(gpa, name_name);
|
||||
|
||||
{
|
||||
@ -1095,18 +1100,18 @@ fn emitNameSection(
|
||||
defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.function));
|
||||
|
||||
const total_functions: u32 = @intCast(f.function_imports.entries.len + wasm.functions.entries.len);
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), total_functions);
|
||||
try appendLeb128(gpa, binary_bytes, total_functions);
|
||||
|
||||
for (f.function_imports.keys(), 0..) |name_index, function_index| {
|
||||
const name = name_index.slice(wasm);
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(function_index)));
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(function_index)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
}
|
||||
for (wasm.functions.keys(), f.function_imports.entries.len..) |resolution, function_index| {
|
||||
const name = resolution.name(wasm).?;
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(function_index)));
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(function_index)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
}
|
||||
}
|
||||
@ -1116,18 +1121,18 @@ fn emitNameSection(
|
||||
defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.global));
|
||||
|
||||
const total_globals: u32 = @intCast(f.global_imports.entries.len + wasm.globals.entries.len);
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), total_globals);
|
||||
try appendLeb128(gpa, binary_bytes, total_globals);
|
||||
|
||||
for (f.global_imports.keys(), 0..) |name_index, global_index| {
|
||||
const name = name_index.slice(wasm);
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(global_index)));
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(global_index)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
}
|
||||
for (wasm.globals.keys(), f.global_imports.entries.len..) |resolution, global_index| {
|
||||
const name = resolution.name(wasm).?;
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(global_index)));
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(global_index)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
}
|
||||
}
|
||||
@ -1137,12 +1142,12 @@ fn emitNameSection(
|
||||
defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.data_segment));
|
||||
|
||||
const total_data_segments: u32 = @intCast(data_segment_groups.len);
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), total_data_segments);
|
||||
try appendLeb128(gpa, binary_bytes, total_data_segments);
|
||||
|
||||
for (data_segment_groups, 0..) |group, i| {
|
||||
const name, _ = splitSegmentName(group.first_segment.name(wasm));
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(i)));
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(i)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
}
|
||||
}
|
||||
@ -1150,7 +1155,7 @@ fn emitNameSection(
|
||||
|
||||
fn emitFeaturesSection(
|
||||
gpa: Allocator,
|
||||
binary_bytes: *std.ArrayListUnmanaged(u8),
|
||||
binary_bytes: *ArrayList(u8),
|
||||
target: *const std.Target,
|
||||
) Allocator.Error!void {
|
||||
const feature_count = target.cpu.features.count();
|
||||
@ -1159,87 +1164,84 @@ fn emitFeaturesSection(
|
||||
const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
|
||||
defer writeCustomSectionHeader(binary_bytes, header_offset);
|
||||
|
||||
const writer = binary_bytes.writer(gpa);
|
||||
const target_features = "target_features";
|
||||
try leb.writeUleb128(writer, @as(u32, @intCast(target_features.len)));
|
||||
try writer.writeAll(target_features);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(target_features.len)));
|
||||
try binary_bytes.appendSlice(gpa, target_features);
|
||||
|
||||
try leb.writeUleb128(writer, @as(u32, @intCast(feature_count)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(feature_count)));
|
||||
|
||||
var safety_count = feature_count;
|
||||
for (target.cpu.arch.allFeaturesList(), 0..) |*feature, i| {
|
||||
if (!target.cpu.has(.wasm, @as(std.Target.wasm.Feature, @enumFromInt(i)))) continue;
|
||||
safety_count -= 1;
|
||||
|
||||
try leb.writeUleb128(writer, @as(u32, '+'));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, '+'));
|
||||
// Depends on llvm_name for the hyphenated version that matches wasm tooling conventions.
|
||||
const name = feature.llvm_name.?;
|
||||
try leb.writeUleb128(writer, @as(u32, @intCast(name.len)));
|
||||
try writer.writeAll(name);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
}
|
||||
assert(safety_count == 0);
|
||||
}
|
||||
|
||||
fn emitBuildIdSection(gpa: Allocator, binary_bytes: *std.ArrayListUnmanaged(u8), build_id: []const u8) !void {
|
||||
fn emitBuildIdSection(gpa: Allocator, binary_bytes: *ArrayList(u8), build_id: []const u8) !void {
|
||||
const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
|
||||
defer writeCustomSectionHeader(binary_bytes, header_offset);
|
||||
|
||||
const writer = binary_bytes.writer(gpa);
|
||||
const hdr_build_id = "build_id";
|
||||
try leb.writeUleb128(writer, @as(u32, @intCast(hdr_build_id.len)));
|
||||
try writer.writeAll(hdr_build_id);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(hdr_build_id.len)));
|
||||
try binary_bytes.appendSlice(gpa, hdr_build_id);
|
||||
|
||||
try leb.writeUleb128(writer, @as(u32, 1));
|
||||
try leb.writeUleb128(writer, @as(u32, @intCast(build_id.len)));
|
||||
try writer.writeAll(build_id);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, 1));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(build_id.len)));
|
||||
try binary_bytes.appendSlice(gpa, build_id);
|
||||
}
|
||||
|
||||
fn emitProducerSection(gpa: Allocator, binary_bytes: *std.ArrayListUnmanaged(u8)) !void {
|
||||
fn emitProducerSection(gpa: Allocator, binary_bytes: *ArrayList(u8)) !void {
|
||||
const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
|
||||
defer writeCustomSectionHeader(binary_bytes, header_offset);
|
||||
|
||||
const writer = binary_bytes.writer(gpa);
|
||||
const producers = "producers";
|
||||
try leb.writeUleb128(writer, @as(u32, @intCast(producers.len)));
|
||||
try writer.writeAll(producers);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(producers.len)));
|
||||
try binary_bytes.appendSlice(gpa, producers);
|
||||
|
||||
try leb.writeUleb128(writer, @as(u32, 2)); // 2 fields: Language + processed-by
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, 2)); // 2 fields: Language + processed-by
|
||||
|
||||
// language field
|
||||
{
|
||||
const language = "language";
|
||||
try leb.writeUleb128(writer, @as(u32, @intCast(language.len)));
|
||||
try writer.writeAll(language);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(language.len)));
|
||||
try binary_bytes.appendSlice(gpa, language);
|
||||
|
||||
// field_value_count (TODO: Parse object files for producer sections to detect their language)
|
||||
try leb.writeUleb128(writer, @as(u32, 1));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, 1));
|
||||
|
||||
// versioned name
|
||||
{
|
||||
try leb.writeUleb128(writer, @as(u32, 3)); // len of "Zig"
|
||||
try writer.writeAll("Zig");
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, 3)); // len of "Zig"
|
||||
try binary_bytes.appendSlice(gpa, "Zig");
|
||||
|
||||
try leb.writeUleb128(writer, @as(u32, @intCast(build_options.version.len)));
|
||||
try writer.writeAll(build_options.version);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(build_options.version.len)));
|
||||
try binary_bytes.appendSlice(gpa, build_options.version);
|
||||
}
|
||||
}
|
||||
|
||||
// processed-by field
|
||||
{
|
||||
const processed_by = "processed-by";
|
||||
try leb.writeUleb128(writer, @as(u32, @intCast(processed_by.len)));
|
||||
try writer.writeAll(processed_by);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(processed_by.len)));
|
||||
try binary_bytes.appendSlice(gpa, processed_by);
|
||||
|
||||
// field_value_count (TODO: Parse object files for producer sections to detect other used tools)
|
||||
try leb.writeUleb128(writer, @as(u32, 1));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, 1));
|
||||
|
||||
// versioned name
|
||||
{
|
||||
try leb.writeUleb128(writer, @as(u32, 3)); // len of "Zig"
|
||||
try writer.writeAll("Zig");
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, 3)); // len of "Zig"
|
||||
try binary_bytes.appendSlice(gpa, "Zig");
|
||||
|
||||
try leb.writeUleb128(writer, @as(u32, @intCast(build_options.version.len)));
|
||||
try writer.writeAll(build_options.version);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(build_options.version.len)));
|
||||
try binary_bytes.appendSlice(gpa, build_options.version);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1280,99 +1282,97 @@ fn wantSegmentMerge(
|
||||
const section_header_reserve_size = 1 + 5 + 5;
|
||||
const section_header_size = 5 + 1;
|
||||
|
||||
fn reserveVecSectionHeader(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 {
|
||||
fn reserveVecSectionHeader(gpa: Allocator, bytes: *ArrayList(u8)) Allocator.Error!u32 {
|
||||
try bytes.appendNTimes(gpa, 0, section_header_reserve_size);
|
||||
return @intCast(bytes.items.len - section_header_reserve_size);
|
||||
}
|
||||
|
||||
fn replaceVecSectionHeader(
|
||||
bytes: *std.ArrayListUnmanaged(u8),
|
||||
bytes: *ArrayList(u8),
|
||||
offset: u32,
|
||||
section: std.wasm.Section,
|
||||
n_items: u32,
|
||||
) void {
|
||||
const size: u32 = @intCast(bytes.items.len - offset - section_header_reserve_size + uleb128size(n_items));
|
||||
var buf: [section_header_reserve_size]u8 = undefined;
|
||||
var fbw = std.io.fixedBufferStream(&buf);
|
||||
const w = fbw.writer();
|
||||
var w: std.Io.Writer = .fixed(&buf);
|
||||
w.writeByte(@intFromEnum(section)) catch unreachable;
|
||||
leb.writeUleb128(w, size) catch unreachable;
|
||||
leb.writeUleb128(w, n_items) catch unreachable;
|
||||
bytes.replaceRangeAssumeCapacity(offset, section_header_reserve_size, fbw.getWritten());
|
||||
w.writeUleb128(size) catch unreachable;
|
||||
w.writeUleb128(n_items) catch unreachable;
|
||||
bytes.replaceRangeAssumeCapacity(offset, section_header_reserve_size, w.buffered());
|
||||
}
|
||||
|
||||
fn reserveCustomSectionHeader(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 {
|
||||
fn reserveCustomSectionHeader(gpa: Allocator, bytes: *ArrayList(u8)) Allocator.Error!u32 {
|
||||
try bytes.appendNTimes(gpa, 0, section_header_size);
|
||||
return @intCast(bytes.items.len - section_header_size);
|
||||
}
|
||||
|
||||
fn writeCustomSectionHeader(bytes: *std.ArrayListUnmanaged(u8), offset: u32) void {
|
||||
fn writeCustomSectionHeader(bytes: *ArrayList(u8), offset: u32) void {
|
||||
return replaceHeader(bytes, offset, 0); // 0 = 'custom' section
|
||||
}
|
||||
|
||||
fn replaceHeader(bytes: *std.ArrayListUnmanaged(u8), offset: u32, tag: u8) void {
|
||||
fn replaceHeader(bytes: *ArrayList(u8), offset: u32, tag: u8) void {
|
||||
const size: u32 = @intCast(bytes.items.len - offset - section_header_size);
|
||||
var buf: [section_header_size]u8 = undefined;
|
||||
var fbw = std.io.fixedBufferStream(&buf);
|
||||
const w = fbw.writer();
|
||||
var w: std.Io.Writer = .fixed(&buf);
|
||||
w.writeByte(tag) catch unreachable;
|
||||
leb.writeUleb128(w, size) catch unreachable;
|
||||
bytes.replaceRangeAssumeCapacity(offset, section_header_size, fbw.getWritten());
|
||||
w.writeUleb128(size) catch unreachable;
|
||||
bytes.replaceRangeAssumeCapacity(offset, section_header_size, w.buffered());
|
||||
}
|
||||
|
||||
const max_size_encoding = 5;
|
||||
|
||||
fn reserveSize(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 {
|
||||
fn reserveSize(gpa: Allocator, bytes: *ArrayList(u8)) Allocator.Error!u32 {
|
||||
try bytes.appendNTimes(gpa, 0, max_size_encoding);
|
||||
return @intCast(bytes.items.len - max_size_encoding);
|
||||
}
|
||||
|
||||
fn replaceSize(bytes: *std.ArrayListUnmanaged(u8), offset: u32) void {
|
||||
fn replaceSize(bytes: *ArrayList(u8), offset: u32) void {
|
||||
const size: u32 = @intCast(bytes.items.len - offset - max_size_encoding);
|
||||
var buf: [max_size_encoding]u8 = undefined;
|
||||
var fbw = std.io.fixedBufferStream(&buf);
|
||||
leb.writeUleb128(fbw.writer(), size) catch unreachable;
|
||||
bytes.replaceRangeAssumeCapacity(offset, max_size_encoding, fbw.getWritten());
|
||||
var w: std.Io.Writer = .fixed(&buf);
|
||||
w.writeUleb128(size) catch unreachable;
|
||||
bytes.replaceRangeAssumeCapacity(offset, max_size_encoding, w.buffered());
|
||||
}
|
||||
|
||||
fn emitLimits(
|
||||
gpa: Allocator,
|
||||
binary_bytes: *std.ArrayListUnmanaged(u8),
|
||||
binary_bytes: *ArrayList(u8),
|
||||
limits: std.wasm.Limits,
|
||||
) Allocator.Error!void {
|
||||
try binary_bytes.append(gpa, @bitCast(limits.flags));
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), limits.min);
|
||||
if (limits.flags.has_max) try leb.writeUleb128(binary_bytes.writer(gpa), limits.max);
|
||||
try appendLeb128(gpa, binary_bytes, limits.min);
|
||||
if (limits.flags.has_max) try appendLeb128(gpa, binary_bytes, limits.max);
|
||||
}
|
||||
|
||||
fn emitMemoryImport(
|
||||
wasm: *Wasm,
|
||||
binary_bytes: *std.ArrayListUnmanaged(u8),
|
||||
binary_bytes: *ArrayList(u8),
|
||||
name_index: String,
|
||||
memory_import: *const Wasm.MemoryImport,
|
||||
) Allocator.Error!void {
|
||||
const gpa = wasm.base.comp.gpa;
|
||||
const module_name = memory_import.module_name.slice(wasm);
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(module_name.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(module_name.len)));
|
||||
try binary_bytes.appendSlice(gpa, module_name);
|
||||
|
||||
const name = name_index.slice(wasm);
|
||||
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
|
||||
try binary_bytes.appendSlice(gpa, name);
|
||||
|
||||
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.memory));
|
||||
try emitLimits(gpa, binary_bytes, memory_import.limits());
|
||||
}
|
||||
|
||||
pub fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void {
|
||||
fn emitInit(writer: *std.Io.Writer, init_expr: std.wasm.InitExpression) !void {
|
||||
switch (init_expr) {
|
||||
.i32_const => |val| {
|
||||
try writer.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
try leb.writeIleb128(writer, val);
|
||||
try writer.writeSleb128(val);
|
||||
},
|
||||
.i64_const => |val| {
|
||||
try writer.writeByte(@intFromEnum(std.wasm.Opcode.i64_const));
|
||||
try leb.writeIleb128(writer, val);
|
||||
try writer.writeSleb128(val);
|
||||
},
|
||||
.f32_const => |val| {
|
||||
try writer.writeByte(@intFromEnum(std.wasm.Opcode.f32_const));
|
||||
@ -1384,13 +1384,13 @@ pub fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void {
|
||||
},
|
||||
.global_get => |val| {
|
||||
try writer.writeByte(@intFromEnum(std.wasm.Opcode.global_get));
|
||||
try leb.writeUleb128(writer, val);
|
||||
try writer.writeUleb128(val);
|
||||
},
|
||||
}
|
||||
try writer.writeByte(@intFromEnum(std.wasm.Opcode.end));
|
||||
}
|
||||
|
||||
pub fn emitExpr(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanaged(u8), expr: Wasm.Expr) Allocator.Error!void {
|
||||
pub fn emitExpr(wasm: *const Wasm, binary_bytes: *ArrayList(u8), expr: Wasm.Expr) Allocator.Error!void {
|
||||
const gpa = wasm.base.comp.gpa;
|
||||
const slice = expr.slice(wasm);
|
||||
try binary_bytes.appendSlice(gpa, slice[0 .. slice.len + 1]); // +1 to include end opcode
|
||||
@ -1398,21 +1398,20 @@ pub fn emitExpr(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanaged(u8), ex
|
||||
|
||||
fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.array_list.Managed(u8)) !void {
|
||||
const gpa = wasm.base.comp.gpa;
|
||||
const writer = binary_bytes.writer(gpa);
|
||||
try leb.writeUleb128(writer, @intFromEnum(Wasm.SubsectionType.segment_info));
|
||||
try appendLeb128(gpa, binary_bytes, @intFromEnum(Wasm.SubsectionType.segment_info));
|
||||
const segment_offset = binary_bytes.items.len;
|
||||
|
||||
try leb.writeUleb128(writer, @as(u32, @intCast(wasm.segment_info.count())));
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(wasm.segment_info.count())));
|
||||
for (wasm.segment_info.values()) |segment_info| {
|
||||
log.debug("Emit segment: {s} align({d}) flags({b})", .{
|
||||
segment_info.name,
|
||||
segment_info.alignment,
|
||||
segment_info.flags,
|
||||
});
|
||||
try leb.writeUleb128(writer, @as(u32, @intCast(segment_info.name.len)));
|
||||
try writer.writeAll(segment_info.name);
|
||||
try leb.writeUleb128(writer, segment_info.alignment.toLog2Units());
|
||||
try leb.writeUleb128(writer, segment_info.flags);
|
||||
try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(segment_info.name.len)));
|
||||
try binary_bytes.appendSlice(gpa, segment_info.name);
|
||||
try appendLeb128(gpa, binary_bytes, segment_info.alignment.toLog2Units());
|
||||
try appendLeb128(gpa, binary_bytes, segment_info.flags);
|
||||
}
|
||||
|
||||
var buf: [5]u8 = undefined;
|
||||
@ -1429,7 +1428,7 @@ fn uleb128size(x: u32) u32 {
|
||||
|
||||
fn emitTagNameTable(
|
||||
gpa: Allocator,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
code: *ArrayList(u8),
|
||||
tag_name_offs: []const u32,
|
||||
tag_name_bytes: []const u8,
|
||||
base: u32,
|
||||
@ -1604,7 +1603,7 @@ fn reloc_leb_type(code: []u8, index: FuncTypeIndex) void {
|
||||
leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(index));
|
||||
}
|
||||
|
||||
fn emitCallCtorsFunction(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
|
||||
fn emitCallCtorsFunction(wasm: *const Wasm, binary_bytes: *ArrayList(u8)) Allocator.Error!void {
|
||||
const gpa = wasm.base.comp.gpa;
|
||||
|
||||
try binary_bytes.ensureUnusedCapacity(gpa, 5 + 1);
|
||||
@ -1631,7 +1630,7 @@ fn emitCallCtorsFunction(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanage
|
||||
|
||||
fn emitInitMemoryFunction(
|
||||
wasm: *const Wasm,
|
||||
binary_bytes: *std.ArrayListUnmanaged(u8),
|
||||
binary_bytes: *ArrayList(u8),
|
||||
virtual_addrs: *const VirtualAddrs,
|
||||
) Allocator.Error!void {
|
||||
const comp = wasm.base.comp;
|
||||
@ -1734,7 +1733,7 @@ fn emitInitMemoryFunction(
|
||||
// notify any waiters for segment initialization completion
|
||||
appendReservedI32Const(binary_bytes, flag_address);
|
||||
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
leb.writeIleb128(binary_bytes.fixedWriter(), @as(i32, -1)) catch unreachable; // number of waiters
|
||||
appendReservedLeb128(binary_bytes, @as(i32, -1)); // number of waiters
|
||||
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
|
||||
appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.memory_atomic_notify));
|
||||
appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment
|
||||
@ -1750,7 +1749,7 @@ fn emitInitMemoryFunction(
|
||||
appendReservedI32Const(binary_bytes, flag_address);
|
||||
appendReservedI32Const(binary_bytes, 1); // expected flag value
|
||||
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
|
||||
leb.writeIleb128(binary_bytes.fixedWriter(), @as(i64, -1)) catch unreachable; // timeout
|
||||
appendReservedLeb128(binary_bytes, @as(i64, -1)); // timeout
|
||||
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
|
||||
appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.memory_atomic_wait32));
|
||||
appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment
|
||||
@ -1779,7 +1778,7 @@ fn emitInitMemoryFunction(
|
||||
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
|
||||
}
|
||||
|
||||
fn emitInitTlsFunction(wasm: *const Wasm, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
|
||||
fn emitInitTlsFunction(wasm: *const Wasm, bytes: *ArrayList(u8)) Allocator.Error!void {
|
||||
const comp = wasm.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
|
||||
@ -1840,14 +1839,14 @@ fn emitInitTlsFunction(wasm: *const Wasm, bytes: *std.ArrayListUnmanaged(u8)) Al
|
||||
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
|
||||
}
|
||||
|
||||
fn emitStartSection(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8), i: Wasm.OutputFunctionIndex) !void {
|
||||
fn emitStartSection(gpa: Allocator, bytes: *ArrayList(u8), i: Wasm.OutputFunctionIndex) !void {
|
||||
const header_offset = try reserveVecSectionHeader(gpa, bytes);
|
||||
replaceVecSectionHeader(bytes, header_offset, .start, @intFromEnum(i));
|
||||
}
|
||||
|
||||
fn emitTagNameFunction(
|
||||
wasm: *Wasm,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
code: *ArrayList(u8),
|
||||
table_base_addr: u32,
|
||||
table_index: u32,
|
||||
enum_type_ip: InternPool.Index,
|
||||
@ -1959,22 +1958,34 @@ fn emitTagNameFunction(
|
||||
}
|
||||
|
||||
/// Writes an unsigned 32-bit integer as a LEB128-encoded 'i32.const' value.
|
||||
fn appendReservedI32Const(bytes: *std.ArrayListUnmanaged(u8), val: u32) void {
|
||||
fn appendReservedI32Const(bytes: *ArrayList(u8), val: u32) void {
|
||||
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
|
||||
leb.writeIleb128(bytes.fixedWriter(), @as(i32, @bitCast(val))) catch unreachable;
|
||||
var w: std.Io.Writer = .fromArrayList(bytes);
|
||||
defer bytes.* = w.toArrayList();
|
||||
return w.writeSleb128(val) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
/// Writes an unsigned 64-bit integer as a LEB128-encoded 'i64.const' value.
|
||||
fn appendReservedI64Const(bytes: *std.ArrayListUnmanaged(u8), val: u64) void {
|
||||
fn appendReservedI64Const(bytes: *ArrayList(u8), val: u64) void {
|
||||
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
|
||||
leb.writeIleb128(bytes.fixedWriter(), @as(i64, @bitCast(val))) catch unreachable;
|
||||
var w: std.Io.Writer = .fromArrayList(bytes);
|
||||
defer bytes.* = w.toArrayList();
|
||||
return w.writeSleb128(val) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
fn appendReservedUleb32(bytes: *std.ArrayListUnmanaged(u8), val: u32) void {
|
||||
leb.writeUleb128(bytes.fixedWriter(), val) catch unreachable;
|
||||
fn appendReservedUleb32(bytes: *ArrayList(u8), val: u32) void {
|
||||
var w: std.Io.Writer = .fromArrayList(bytes);
|
||||
defer bytes.* = w.toArrayList();
|
||||
return w.writeUleb128(val) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
fn appendGlobal(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8), mutable: u8, val: u32) Allocator.Error!void {
|
||||
fn appendGlobal(gpa: Allocator, bytes: *ArrayList(u8), mutable: u8, val: u32) Allocator.Error!void {
|
||||
try bytes.ensureUnusedCapacity(gpa, 9);
|
||||
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Valtype.i32));
|
||||
bytes.appendAssumeCapacity(mutable);
|
||||
@ -1982,3 +1993,19 @@ fn appendGlobal(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8), mutable: u8,
|
||||
appendReservedUleb32(bytes, val);
|
||||
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
|
||||
}
|
||||
|
||||
fn appendLeb128(gpa: Allocator, bytes: *ArrayList(u8), value: anytype) Allocator.Error!void {
|
||||
var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, bytes);
|
||||
defer bytes.* = aw.toArrayList();
|
||||
return aw.writer.writeLeb128(value) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
}
|
||||
|
||||
fn appendReservedLeb128(bytes: *ArrayList(u8), value: anytype) void {
|
||||
var w: std.Io.Writer = .fromArrayList(bytes);
|
||||
defer bytes.* = w.toArrayList();
|
||||
return w.writeLeb128(value) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
@ -9,29 +9,28 @@ pub fn writeSetSub6(comptime op: enum { set, sub }, code: *[1]u8, addend: anytyp
|
||||
mem.writeInt(u8, code, value, .little);
|
||||
}
|
||||
|
||||
pub fn writeSetSubUleb(comptime op: enum { set, sub }, stream: *std.io.FixedBufferStream([]u8), addend: i64) !void {
|
||||
switch (op) {
|
||||
.set => try overwriteUleb(stream, @intCast(addend)),
|
||||
.sub => {
|
||||
const position = try stream.getPos();
|
||||
const value: u64 = try std.leb.readUleb128(u64, stream.reader());
|
||||
try stream.seekTo(position);
|
||||
try overwriteUleb(stream, value -% @as(u64, @intCast(addend)));
|
||||
},
|
||||
}
|
||||
pub fn writeSubUleb(code: []u8, addend: i64) void {
|
||||
var reader: std.Io.Reader = .fixed(code);
|
||||
const value = reader.takeLeb128(u64) catch unreachable;
|
||||
overwriteUleb(code, value -% @as(u64, @intCast(addend)));
|
||||
}
|
||||
|
||||
fn overwriteUleb(stream: *std.io.FixedBufferStream([]u8), addend: u64) !void {
|
||||
pub fn writeSetUleb(code: []u8, addend: i64) void {
|
||||
overwriteUleb(code, @intCast(addend));
|
||||
}
|
||||
|
||||
fn overwriteUleb(code: []u8, addend: u64) void {
|
||||
var value: u64 = addend;
|
||||
const writer = stream.writer();
|
||||
var i: usize = 0;
|
||||
|
||||
while (true) {
|
||||
const byte = stream.buffer[stream.pos];
|
||||
const byte = code[i];
|
||||
if (byte & 0x80 == 0) break;
|
||||
try writer.writeByte(0x80 | @as(u8, @truncate(value & 0x7f)));
|
||||
code[i] = 0x80 | @as(u8, @truncate(value & 0x7f));
|
||||
i += 1;
|
||||
value >>= 7;
|
||||
}
|
||||
stream.buffer[stream.pos] = @truncate(value & 0x7f);
|
||||
code[i] = @truncate(value & 0x7f);
|
||||
}
|
||||
|
||||
pub fn writeAddend(
|
||||
|
||||
@ -4230,7 +4230,7 @@ fn serveUpdateResults(s: *Server, comp: *Compilation) !void {
|
||||
const decl_name = zir.nullTerminatedString(zir.getDeclaration(resolved.inst).name);
|
||||
|
||||
const gop = try files.getOrPut(gpa, resolved.file);
|
||||
if (!gop.found_existing) try file_name_bytes.writer(gpa).print("{f}\x00", .{file.path.fmt(comp)});
|
||||
if (!gop.found_existing) try file_name_bytes.print(gpa, "{f}\x00", .{file.path.fmt(comp)});
|
||||
|
||||
const codegen_ns = tr.decl_codegen_ns.get(tracked_inst) orelse 0;
|
||||
const link_ns = tr.decl_link_ns.get(tracked_inst) orelse 0;
|
||||
@ -7451,7 +7451,7 @@ const Templates = struct {
|
||||
i += "_NAME".len;
|
||||
continue;
|
||||
} else if (std.mem.startsWith(u8, contents[i + 1 ..], "FINGERPRINT")) {
|
||||
try templates.buffer.writer().print("0x{x}", .{fingerprint.int()});
|
||||
try templates.buffer.print("0x{x}", .{fingerprint.int()});
|
||||
i += "_FINGERPRINT".len;
|
||||
continue;
|
||||
} else if (std.mem.startsWith(u8, contents[i + 1 ..], "ZIGVER")) {
|
||||
|
||||
@ -1075,17 +1075,6 @@ test "assigning packed struct inside another packed struct" {
|
||||
try expect(S.mem.padding == 0);
|
||||
}
|
||||
|
||||
test "packed struct used as part of anon decl name" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
const S = packed struct { a: u0 = 0 };
|
||||
var a: u8 = 0;
|
||||
_ = &a;
|
||||
try std.io.null_writer.print("\n{} {}\n", .{ a, S{} });
|
||||
}
|
||||
|
||||
test "packed struct acts as a namespace" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
|
||||
@ -200,35 +200,42 @@ test "variadic functions" {
|
||||
if (builtin.cpu.arch.isSPARC() and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23718
|
||||
|
||||
const S = struct {
|
||||
fn printf(list_ptr: *std.array_list.Managed(u8), format: [*:0]const u8, ...) callconv(.c) void {
|
||||
fn printf(buffer: [*]u8, format: [*:0]const u8, ...) callconv(.c) void {
|
||||
var ap = @cVaStart();
|
||||
defer @cVaEnd(&ap);
|
||||
vprintf(list_ptr, format, &ap);
|
||||
vprintf(buffer, format, &ap);
|
||||
}
|
||||
|
||||
fn vprintf(
|
||||
list: *std.array_list.Managed(u8),
|
||||
format: [*:0]const u8,
|
||||
ap: *std.builtin.VaList,
|
||||
) callconv(.c) void {
|
||||
for (std.mem.span(format)) |c| switch (c) {
|
||||
fn vprintf(buffer: [*]u8, format: [*:0]const u8, ap: *std.builtin.VaList) callconv(.c) void {
|
||||
var i: usize = 0;
|
||||
for (format[0..3]) |byte| switch (byte) {
|
||||
's' => {
|
||||
const arg = @cVaArg(ap, [*:0]const u8);
|
||||
list.writer().print("{s}", .{arg}) catch return;
|
||||
buffer[i..][0..5].* = arg[0..5].*;
|
||||
i += 5;
|
||||
},
|
||||
'd' => {
|
||||
const arg = @cVaArg(ap, c_int);
|
||||
list.writer().print("{d}", .{arg}) catch return;
|
||||
switch (arg) {
|
||||
1 => {
|
||||
buffer[i] = '1';
|
||||
i += 1;
|
||||
},
|
||||
5 => {
|
||||
buffer[i] = '5';
|
||||
i += 1;
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
var list = std.array_list.Managed(u8).init(std.testing.allocator);
|
||||
defer list.deinit();
|
||||
S.printf(&list, "dsd", @as(c_int, 1), @as([*:0]const u8, "hello"), @as(c_int, 5));
|
||||
try std.testing.expectEqualStrings("1hello5", list.items);
|
||||
var buffer: [7]u8 = undefined;
|
||||
S.printf(&buffer, "dsd", @as(c_int, 1), @as([*:0]const u8, "hello"), @as(c_int, 5));
|
||||
try expect(std.mem.eql(u8, &buffer, "1hello5"));
|
||||
}
|
||||
|
||||
test "copy VaList" {
|
||||
|
||||
@ -10,7 +10,8 @@ pub fn main() !void {
|
||||
dir_name, .{});
|
||||
const file_name = args.next().?;
|
||||
const file = try dir.createFile(file_name, .{});
|
||||
try file.deprecatedWriter().print(
|
||||
var file_writer = file.writer(&.{});
|
||||
try file_writer.interface.print(
|
||||
\\{s}
|
||||
\\{s}
|
||||
\\Hello, world!
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user