From a72b9d403d036ac0da64ed826c535fff0c142e6a Mon Sep 17 00:00:00 2001 From: Lachlan Easton Date: Sat, 29 Aug 2020 11:02:22 +1000 Subject: [PATCH 01/56] Refactor zig fmt indentation. Remove indent from rendering code and have a stream handle automatic indentation --- lib/std/io.zig | 13 +- lib/std/io/auto_indenting_stream.zig | 135 ++ lib/std/io/change_detection_stream.zig | 58 + lib/std/io/find_byte_out_stream.zig | 44 + lib/std/io/writer.zig | 4 + lib/std/zig/parser_test.zig | 109 +- lib/std/zig/render.zig | 1656 +++++++++++------------- src-self-hosted/main.zig | 9 +- src-self-hosted/stage2.zig | 2 +- 9 files changed, 1113 insertions(+), 917 deletions(-) create mode 100644 lib/std/io/auto_indenting_stream.zig create mode 100644 lib/std/io/change_detection_stream.zig create mode 100644 lib/std/io/find_byte_out_stream.zig diff --git a/lib/std/io.zig b/lib/std/io.zig index e30ed1fa92..1514d80cb0 100644 --- a/lib/std/io.zig +++ b/lib/std/io.zig @@ -169,6 +169,15 @@ pub const BitOutStream = BitWriter; /// Deprecated: use `bitWriter` pub const bitOutStream = bitWriter; +pub const AutoIndentingStream = @import("io/auto_indenting_stream.zig").AutoIndentingStream; +pub const autoIndentingStream = @import("io/auto_indenting_stream.zig").autoIndentingStream; + +pub const ChangeDetectionStream = @import("io/change_detection_stream.zig").ChangeDetectionStream; +pub const changeDetectionStream = @import("io/change_detection_stream.zig").changeDetectionStream; + +pub const FindByteOutStream = @import("io/find_byte_out_stream.zig").FindByteOutStream; +pub const findByteOutStream = @import("io/find_byte_out_stream.zig").findByteOutStream; + pub const Packing = @import("io/serialization.zig").Packing; pub const Serializer = @import("io/serialization.zig").Serializer; @@ -182,10 +191,10 @@ pub const BufferedAtomicFile = @import("io/buffered_atomic_file.zig").BufferedAt pub const StreamSource = @import("io/stream_source.zig").StreamSource; /// A Writer that doesn't write to anything. -pub const null_writer = @as(NullWriter, .{ .context = {} }); +pub var null_writer = @as(NullWriter, .{ .context = {} }); /// Deprecated: use `null_writer` -pub const null_out_stream = null_writer; +pub var null_out_stream = null_writer; const NullWriter = Writer(void, error{}, dummyWrite); /// Deprecated: use NullWriter diff --git a/lib/std/io/auto_indenting_stream.zig b/lib/std/io/auto_indenting_stream.zig new file mode 100644 index 0000000000..e7657c1f91 --- /dev/null +++ b/lib/std/io/auto_indenting_stream.zig @@ -0,0 +1,135 @@ +const std = @import("../std.zig"); +const io = std.io; +const mem = std.mem; +const assert = std.debug.assert; + +pub fn AutoIndentingStream(comptime indent_delta: u8, comptime OutStreamType: type) type { + return struct { + const Self = @This(); + pub const Error = OutStreamType.Error; + pub const OutStream = io.Writer(*Self, Error, write); + + out_stream: *OutStreamType, + current_line_empty: bool = true, + indent_stack: [255]u8 = undefined, + indent_stack_top: u8 = 0, + indent_one_shot_count: u8 = 0, // automatically popped when applied + applied_indent: u8 = 0, // the most recently applied indent + indent_next_line: u8 = 0, // not used until the next line + + pub fn init(out_stream: *OutStreamType) Self { + return Self{ .out_stream = out_stream }; + } + + pub fn writer(self: *Self) OutStream { + return .{ .context = self }; + } + + pub fn write(self: *Self, bytes: []const u8) Error!usize { + if (bytes.len == 0) + return @as(usize, 0); + + try self.applyIndent(); + return self.writeNoIndent(bytes); + } + + fn writeNoIndent(self: *Self, bytes: []const u8) Error!usize { + try self.out_stream.outStream().writeAll(bytes); + if (bytes[bytes.len - 1] == '\n') + self.resetLine(); + return bytes.len; + } + + pub fn insertNewline(self: *Self) Error!void { + _ = try self.writeNoIndent("\n"); + } + + fn resetLine(self: *Self) void { + self.current_line_empty = true; + self.indent_next_line = 0; + } + + /// Insert a newline unless the current line is blank + pub fn maybeInsertNewline(self: *Self) Error!void { + if (!self.current_line_empty) + try self.insertNewline(); + } + + /// Push default indentation + pub fn pushIndent(self: *Self) void { + // Doesn't actually write any indentation. Just primes the stream to be able to write the correct indentation if it needs to. + self.pushIndentN(indent_delta); + } + + /// Push an indent of arbitrary width + pub fn pushIndentN(self: *Self, n: u8) void { + assert(self.indent_stack_top < std.math.maxInt(u8)); + self.indent_stack[self.indent_stack_top] = n; + self.indent_stack_top += 1; + } + + /// Push an indent that is automatically popped after being applied + pub fn pushIndentOneShot(self: *Self) void { + self.indent_one_shot_count += 1; + self.pushIndent(); + } + + /// Turns all one-shot indents into regular indents + /// Returns number of indents that must now be manually popped + pub fn lockOneShotIndent(self: *Self) u8 { + var locked_count = self.indent_one_shot_count; + self.indent_one_shot_count = 0; + return locked_count; + } + + /// Push an indent that should not take effect until the next line + pub fn pushIndentNextLine(self: *Self) void { + self.indent_next_line += 1; + self.pushIndent(); + } + + pub fn popIndent(self: *Self) void { + assert(self.indent_stack_top != 0); + self.indent_stack_top -= 1; + self.indent_next_line = std.math.min(self.indent_stack_top, self.indent_next_line); // Tentative indent may have been popped before there was a newline + } + + /// Writes ' ' bytes if the current line is empty + fn applyIndent(self: *Self) Error!void { + const current_indent = self.currentIndent(); + if (self.current_line_empty and current_indent > 0) { + try self.out_stream.outStream().writeByteNTimes(' ', current_indent); + self.applied_indent = current_indent; + } + + self.indent_stack_top -= self.indent_one_shot_count; + self.indent_one_shot_count = 0; + self.current_line_empty = false; + } + + /// Checks to see if the most recent indentation exceeds the currently pushed indents + pub fn isLineOverIndented(self: *Self) bool { + if (self.current_line_empty) return false; + return self.applied_indent > self.currentIndent(); + } + + fn currentIndent(self: *Self) u8 { + var indent_current: u8 = 0; + if (self.indent_stack_top > 0) { + const stack_top = self.indent_stack_top - self.indent_next_line; + for (self.indent_stack[0..stack_top]) |indent| { + indent_current += indent; + } + } + return indent_current; + } + }; +} + +pub fn autoIndentingStream( + comptime indent_delta: u8, + underlying_stream: anytype, +) AutoIndentingStream(indent_delta, @TypeOf(underlying_stream).Child) { + comptime assert(@typeInfo(@TypeOf(underlying_stream)) == .Pointer); + return AutoIndentingStream(indent_delta, @TypeOf(underlying_stream).Child).init(underlying_stream); +} diff --git a/lib/std/io/change_detection_stream.zig b/lib/std/io/change_detection_stream.zig new file mode 100644 index 0000000000..941569320c --- /dev/null +++ b/lib/std/io/change_detection_stream.zig @@ -0,0 +1,58 @@ +const std = @import("../std.zig"); +const io = std.io; +const mem = std.mem; +const assert = std.debug.assert; + +pub fn ChangeDetectionStream(comptime OutStreamType: type) type { + return struct { + const Self = @This(); + pub const Error = OutStreamType.Error; + pub const OutStream = io.OutStream(*Self, Error, write); + + anything_changed: bool = false, + out_stream: *OutStreamType, + source_index: usize, + source: []const u8, + + pub fn init(source: []const u8, out_stream: *OutStreamType) Self { + return Self{ + .out_stream = out_stream, + .source_index = 0, + .source = source, + }; + } + + pub fn outStream(self: *Self) OutStream { + return .{ .context = self }; + } + + fn write(self: *Self, bytes: []const u8) Error!usize { + if (!self.anything_changed) { + const end = self.source_index + bytes.len; + if (end > self.source.len) { + self.anything_changed = true; + } else { + const src_slice = self.source[self.source_index..end]; + self.source_index += bytes.len; + if (!mem.eql(u8, bytes, src_slice)) { + self.anything_changed = true; + } + } + } + + return self.out_stream.write(bytes); + } + + pub fn changeDetected(self: *Self) bool { + return self.anything_changed or (self.source_index != self.source.len); + } + }; +} + +pub fn changeDetectionStream( + source: []const u8, + underlying_stream: anytype, +) ChangeDetectionStream(@TypeOf(underlying_stream).Child) { + comptime assert(@typeInfo(@TypeOf(underlying_stream)) == .Pointer); + return ChangeDetectionStream(@TypeOf(underlying_stream).Child).init(source, underlying_stream); +} diff --git a/lib/std/io/find_byte_out_stream.zig b/lib/std/io/find_byte_out_stream.zig new file mode 100644 index 0000000000..e835cbd584 --- /dev/null +++ b/lib/std/io/find_byte_out_stream.zig @@ -0,0 +1,44 @@ +const std = @import("../std.zig"); +const io = std.io; +const assert = std.debug.assert; + +// An OutStream that returns whether the given character has been written to it. +// The contents are not written to anything. +pub fn FindByteOutStream(comptime OutStreamType: type) type { + return struct { + const Self = @This(); + pub const Error = OutStreamType.Error; + pub const OutStream = io.OutStream(*Self, Error, write); + + out_stream: *OutStreamType, + byte_found: bool, + byte: u8, + + pub fn init(byte: u8, out_stream: *OutStreamType) Self { + return Self{ + .out_stream = out_stream, + .byte = byte, + .byte_found = false, + }; + } + + pub fn outStream(self: *Self) OutStream { + return .{ .context = self }; + } + + fn write(self: *Self, bytes: []const u8) Error!usize { + if (!self.byte_found) { + self.byte_found = blk: { + for (bytes) |b| + if (b == self.byte) break :blk true; + break :blk false; + }; + } + return self.out_stream.writer().write(bytes); + } + }; +} +pub fn findByteOutStream(byte: u8, underlying_stream: anytype) FindByteOutStream(@TypeOf(underlying_stream).Child) { + comptime assert(@typeInfo(@TypeOf(underlying_stream)) == .Pointer); + return FindByteOutStream(@TypeOf(underlying_stream).Child).init(byte, underlying_stream); +} diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig index 39729ef0a2..ffdca0d6a6 100644 --- a/lib/std/io/writer.zig +++ b/lib/std/io/writer.zig @@ -18,6 +18,10 @@ pub fn Writer( const Self = @This(); pub const Error = WriteError; + pub fn writer(self: *const Self) Self { + return self.*; + } + pub fn write(self: Self, bytes: []const u8) Error!usize { return writeFn(self.context, bytes); } diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index 8259af32a6..f4da650efb 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -615,6 +615,17 @@ test "zig fmt: infix operator and then multiline string literal" { ); } +test "zig fmt: infix operator and then multiline string literal" { + try testCanonical( + \\const x = "" ++ + \\ \\ hi0 + \\ \\ hi1 + \\ \\ hi2 + \\; + \\ + ); +} + test "zig fmt: C pointers" { try testCanonical( \\const Ptr = [*c]i32; @@ -885,6 +896,28 @@ test "zig fmt: 2nd arg multiline string" { ); } +test "zig fmt: 2nd arg multiline string many args" { + try testCanonical( + \\comptime { + \\ cases.addAsm("hello world linux x86_64", + \\ \\.text + \\ , "Hello, world!\n", "Hello, world!\n"); + \\} + \\ + ); +} + +test "zig fmt: final arg multiline string" { + try testCanonical( + \\comptime { + \\ cases.addAsm("hello world linux x86_64", "Hello, world!\n", + \\ \\.text + \\ ); + \\} + \\ + ); +} + test "zig fmt: if condition wraps" { try testTransform( \\comptime { @@ -915,6 +948,11 @@ test "zig fmt: if condition wraps" { \\ var a = if (a) |*f| x: { \\ break :x &a.b; \\ } else |err| err; + \\ var a = if (cond and + \\ cond) |*f| + \\ x: { + \\ break :x &a.b; + \\ } else |err| err; \\} , \\comptime { @@ -951,6 +989,35 @@ test "zig fmt: if condition wraps" { \\ var a = if (a) |*f| x: { \\ break :x &a.b; \\ } else |err| err; + \\ var a = if (cond and + \\ cond) |*f| + \\ x: { + \\ break :x &a.b; + \\ } else |err| err; + \\} + \\ + ); +} + +test "zig fmt: if condition has line break but must not wrap" { + try testCanonical( + \\comptime { + \\ if (self.user_input_options.put( + \\ name, + \\ UserInputOption{ + \\ .name = name, + \\ .used = false, + \\ }, + \\ ) catch unreachable) |*prev_value| { + \\ foo(); + \\ bar(); + \\ } + \\ if (put( + \\ a, + \\ b, + \\ )) { + \\ foo(); + \\ } \\} \\ ); @@ -977,6 +1044,18 @@ test "zig fmt: if condition has line break but must not wrap" { ); } +test "zig fmt: function call with multiline argument" { + try testCanonical( + \\comptime { + \\ self.user_input_options.put(name, UserInputOption{ + \\ .name = name, + \\ .used = false, + \\ }); + \\} + \\ + ); +} + test "zig fmt: same-line doc comment on variable declaration" { try testTransform( \\pub const MAP_ANONYMOUS = 0x1000; /// allocated from memory, swap space @@ -1228,7 +1307,7 @@ test "zig fmt: array literal with hint" { \\const a = []u8{ \\ 1, 2, \\ 3, // - \\ 4, + \\ 4, \\ 5, 6, \\ 7, \\}; @@ -1293,7 +1372,7 @@ test "zig fmt: multiline string parameter in fn call with trailing comma" { \\ \\ZIG_C_HEADER_FILES {} \\ \\ZIG_DIA_GUIDS_LIB {} \\ \\ - \\ , + \\ , \\ std.cstr.toSliceConst(c.ZIG_CMAKE_BINARY_DIR), \\ std.cstr.toSliceConst(c.ZIG_CXX_COMPILER), \\ std.cstr.toSliceConst(c.ZIG_DIA_GUIDS_LIB), @@ -2885,20 +2964,20 @@ test "zig fmt: multiline string in array" { try testCanonical( \\const Foo = [][]const u8{ \\ \\aaa - \\, + \\ , \\ \\bbb \\}; \\ \\fn bar() void { \\ const Foo = [][]const u8{ \\ \\aaa - \\ , + \\ , \\ \\bbb \\ }; \\ const Bar = [][]const u8{ // comment here \\ \\aaa \\ \\ - \\ , // and another comment can go here + \\ , // and another comment can go here \\ \\bbb \\ }; \\} @@ -3214,6 +3293,23 @@ test "zig fmt: C var args" { ); } +test "zig fmt: Only indent multiline string literals in function calls" { + try testCanonical( + \\test "zig fmt:" { + \\ try testTransform( + \\ \\const X = struct { + \\ \\ foo: i32, bar: i8 }; + \\ , + \\ \\const X = struct { + \\ \\ foo: i32, bar: i8 + \\ \\}; + \\ \\ + \\ ); + \\} + \\ + ); +} + const std = @import("std"); const mem = std.mem; const warn = std.debug.warn; @@ -3256,7 +3352,8 @@ fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *b var buffer = std.ArrayList(u8).init(allocator); errdefer buffer.deinit(); - anything_changed.* = try std.zig.render(allocator, buffer.outStream(), tree); + const outStream = buffer.outStream(); + anything_changed.* = try std.zig.render(allocator, &outStream, tree); return buffer.toOwnedSlice(); } fn testTransform(source: []const u8, expected_source: []const u8) !void { diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 4d44c41bfa..d7bba2f6bf 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -6,6 +6,7 @@ const std = @import("../std.zig"); const assert = std.debug.assert; const mem = std.mem; +const meta = std.meta; const ast = std.zig.ast; const Token = std.zig.Token; @@ -17,74 +18,37 @@ pub const Error = error{ }; /// Returns whether anything changed -pub fn render(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree) (@TypeOf(stream).Error || Error)!bool { +pub fn render(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree) (meta.Child(@TypeOf(stream)).Error || Error)!bool { // cannot render an invalid tree std.debug.assert(tree.errors.len == 0); - // make a passthrough stream that checks whether something changed - const MyStream = struct { - const MyStream = @This(); - const StreamError = @TypeOf(stream).Error; + var s = stream.*; + var change_detection_stream = std.io.changeDetectionStream(tree.source, &s); + var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &change_detection_stream); - child_stream: @TypeOf(stream), - anything_changed: bool, - source_index: usize, - source: []const u8, + try renderRoot(allocator, &auto_indenting_stream, tree); - fn write(self: *MyStream, bytes: []const u8) StreamError!usize { - if (!self.anything_changed) { - const end = self.source_index + bytes.len; - if (end > self.source.len) { - self.anything_changed = true; - } else { - const src_slice = self.source[self.source_index..end]; - self.source_index += bytes.len; - if (!mem.eql(u8, bytes, src_slice)) { - self.anything_changed = true; - } - } - } - - return self.child_stream.write(bytes); - } - }; - var my_stream = MyStream{ - .child_stream = stream, - .anything_changed = false, - .source_index = 0, - .source = tree.source, - }; - const my_stream_stream: std.io.Writer(*MyStream, MyStream.StreamError, MyStream.write) = .{ - .context = &my_stream, - }; - - try renderRoot(allocator, my_stream_stream, tree); - - if (my_stream.source_index != my_stream.source.len) { - my_stream.anything_changed = true; - } - - return my_stream.anything_changed; + return change_detection_stream.changeDetected(); } fn renderRoot( allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, -) (@TypeOf(stream).Error || Error)!void { +) (@TypeOf(stream.*).Error || Error)!void { + // render all the line comments at the beginning of the file for (tree.token_ids) |token_id, i| { if (token_id != .LineComment) break; const token_loc = tree.token_locs[i]; - try stream.print("{}\n", .{mem.trimRight(u8, tree.tokenSliceLoc(token_loc), " ")}); + try stream.writer().print("{}\n", .{mem.trimRight(u8, tree.tokenSliceLoc(token_loc), " ")}); const next_token = tree.token_locs[i + 1]; const loc = tree.tokenLocationLoc(token_loc.end, next_token); if (loc.line >= 2) { - try stream.writeByte('\n'); + try stream.insertNewline(); } } - var start_col: usize = 0; var decl_i: ast.NodeIndex = 0; const root_decls = tree.root_node.decls(); @@ -189,23 +153,22 @@ fn renderRoot( try copyFixingWhitespace(stream, tree.source[start..end]); } - try renderTopLevelDecl(allocator, stream, tree, 0, &start_col, decl); + try renderTopLevelDecl(allocator, stream, tree, decl); decl_i += 1; if (decl_i >= root_decls.len) return; - try renderExtraNewline(tree, stream, &start_col, root_decls[decl_i]); + try renderExtraNewline(tree, stream, root_decls[decl_i]); } } -fn renderExtraNewline(tree: *ast.Tree, stream: anytype, start_col: *usize, node: *ast.Node) @TypeOf(stream).Error!void { - return renderExtraNewlineToken(tree, stream, start_col, node.firstToken()); +fn renderExtraNewline(tree: *ast.Tree, stream: anytype, node: *ast.Node) @TypeOf(stream.*).Error!void { + return renderExtraNewlineToken(tree, stream, node.firstToken()); } fn renderExtraNewlineToken( tree: *ast.Tree, stream: anytype, - start_col: *usize, first_token: ast.TokenIndex, -) @TypeOf(stream).Error!void { +) @TypeOf(stream.*).Error!void { var prev_token = first_token; if (prev_token == 0) return; var newline_threshold: usize = 2; @@ -218,28 +181,27 @@ fn renderExtraNewlineToken( const prev_token_end = tree.token_locs[prev_token - 1].end; const loc = tree.tokenLocation(prev_token_end, first_token); if (loc.line >= newline_threshold) { - try stream.writeByte('\n'); - start_col.* = 0; + try stream.insertNewline(); } } -fn renderTopLevelDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@TypeOf(stream).Error || Error)!void { - try renderContainerDecl(allocator, stream, tree, indent, start_col, decl, .Newline); +fn renderTopLevelDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, decl: *ast.Node) (@TypeOf(stream.*).Error || Error)!void { + try renderContainerDecl(allocator, stream, tree, decl, .Newline); } -fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node, space: Space) (@TypeOf(stream).Error || Error)!void { +fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, decl: *ast.Node, space: Space) (@TypeOf(stream.*).Error || Error)!void { switch (decl.tag) { .FnProto => { const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl); - try renderDocComments(tree, stream, fn_proto, fn_proto.getTrailer("doc_comments"), indent, start_col); + try renderDocComments(tree, stream, fn_proto, fn_proto.getTrailer("doc_comments")); if (fn_proto.getTrailer("body_node")) |body_node| { - try renderExpression(allocator, stream, tree, indent, start_col, decl, .Space); - try renderExpression(allocator, stream, tree, indent, start_col, body_node, space); + try renderExpression(allocator, stream, tree, decl, .Space); + try renderExpression(allocator, stream, tree, body_node, space); } else { - try renderExpression(allocator, stream, tree, indent, start_col, decl, .None); - try renderToken(tree, stream, tree.nextToken(decl.lastToken()), indent, start_col, space); + try renderExpression(allocator, stream, tree, decl, .None); + try renderToken(tree, stream, tree.nextToken(decl.lastToken()), space); } }, @@ -247,35 +209,35 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tr const use_decl = @fieldParentPtr(ast.Node.Use, "base", decl); if (use_decl.visib_token) |visib_token| { - try renderToken(tree, stream, visib_token, indent, start_col, .Space); // pub + try renderToken(tree, stream, visib_token, .Space); // pub } - try renderToken(tree, stream, use_decl.use_token, indent, start_col, .Space); // usingnamespace - try renderExpression(allocator, stream, tree, indent, start_col, use_decl.expr, .None); - try renderToken(tree, stream, use_decl.semicolon_token, indent, start_col, space); // ; + try renderToken(tree, stream, use_decl.use_token, .Space); // usingnamespace + try renderExpression(allocator, stream, tree, use_decl.expr, .None); + try renderToken(tree, stream, use_decl.semicolon_token, space); // ; }, .VarDecl => { const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", decl); - try renderDocComments(tree, stream, var_decl, var_decl.getTrailer("doc_comments"), indent, start_col); - try renderVarDecl(allocator, stream, tree, indent, start_col, var_decl); + try renderDocComments(tree, stream, var_decl, var_decl.getTrailer("doc_comments")); + try renderVarDecl(allocator, stream, tree, var_decl); }, .TestDecl => { const test_decl = @fieldParentPtr(ast.Node.TestDecl, "base", decl); - try renderDocComments(tree, stream, test_decl, test_decl.doc_comments, indent, start_col); - try renderToken(tree, stream, test_decl.test_token, indent, start_col, .Space); - try renderExpression(allocator, stream, tree, indent, start_col, test_decl.name, .Space); - try renderExpression(allocator, stream, tree, indent, start_col, test_decl.body_node, space); + try renderDocComments(tree, stream, test_decl, test_decl.doc_comments); + try renderToken(tree, stream, test_decl.test_token, .Space); + try renderExpression(allocator, stream, tree, test_decl.name, .Space); + try renderExpression(allocator, stream, tree, test_decl.body_node, space); }, .ContainerField => { const field = @fieldParentPtr(ast.Node.ContainerField, "base", decl); - try renderDocComments(tree, stream, field, field.doc_comments, indent, start_col); + try renderDocComments(tree, stream, field, field.doc_comments); if (field.comptime_token) |t| { - try renderToken(tree, stream, t, indent, start_col, .Space); // comptime + try renderToken(tree, stream, t, .Space); // comptime } const src_has_trailing_comma = blk: { @@ -288,68 +250,67 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tr const last_token_space: Space = if (src_has_trailing_comma) .None else space; if (field.type_expr == null and field.value_expr == null) { - try renderToken(tree, stream, field.name_token, indent, start_col, last_token_space); // name + try renderToken(tree, stream, field.name_token, last_token_space); // name } else if (field.type_expr != null and field.value_expr == null) { - try renderToken(tree, stream, field.name_token, indent, start_col, .None); // name - try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, .Space); // : + try renderToken(tree, stream, field.name_token, .None); // name + try renderToken(tree, stream, tree.nextToken(field.name_token), .Space); // : if (field.align_expr) |align_value_expr| { - try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, .Space); // type + try renderExpression(allocator, stream, tree, field.type_expr.?, .Space); // type const lparen_token = tree.prevToken(align_value_expr.firstToken()); const align_kw = tree.prevToken(lparen_token); const rparen_token = tree.nextToken(align_value_expr.lastToken()); - try renderToken(tree, stream, align_kw, indent, start_col, .None); // align - try renderToken(tree, stream, lparen_token, indent, start_col, .None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, align_value_expr, .None); // alignment - try renderToken(tree, stream, rparen_token, indent, start_col, last_token_space); // ) + try renderToken(tree, stream, align_kw, .None); // align + try renderToken(tree, stream, lparen_token, .None); // ( + try renderExpression(allocator, stream, tree, align_value_expr, .None); // alignment + try renderToken(tree, stream, rparen_token, last_token_space); // ) } else { - try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, last_token_space); // type + try renderExpression(allocator, stream, tree, field.type_expr.?, last_token_space); // type } } else if (field.type_expr == null and field.value_expr != null) { - try renderToken(tree, stream, field.name_token, indent, start_col, .Space); // name - try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, .Space); // = - try renderExpression(allocator, stream, tree, indent, start_col, field.value_expr.?, last_token_space); // value + try renderToken(tree, stream, field.name_token, .Space); // name + try renderToken(tree, stream, tree.nextToken(field.name_token), .Space); // = + try renderExpression(allocator, stream, tree, field.value_expr.?, last_token_space); // value } else { - try renderToken(tree, stream, field.name_token, indent, start_col, .None); // name - try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, .Space); // : + try renderToken(tree, stream, field.name_token, .None); // name + try renderToken(tree, stream, tree.nextToken(field.name_token), .Space); // : if (field.align_expr) |align_value_expr| { - try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, .Space); // type + try renderExpression(allocator, stream, tree, field.type_expr.?, .Space); // type const lparen_token = tree.prevToken(align_value_expr.firstToken()); const align_kw = tree.prevToken(lparen_token); const rparen_token = tree.nextToken(align_value_expr.lastToken()); - try renderToken(tree, stream, align_kw, indent, start_col, .None); // align - try renderToken(tree, stream, lparen_token, indent, start_col, .None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, align_value_expr, .None); // alignment - try renderToken(tree, stream, rparen_token, indent, start_col, .Space); // ) + try renderToken(tree, stream, align_kw, .None); // align + try renderToken(tree, stream, lparen_token, .None); // ( + try renderExpression(allocator, stream, tree, align_value_expr, .None); // alignment + try renderToken(tree, stream, rparen_token, .Space); // ) } else { - try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, .Space); // type + try renderExpression(allocator, stream, tree, field.type_expr.?, .Space); // type } - try renderToken(tree, stream, tree.prevToken(field.value_expr.?.firstToken()), indent, start_col, .Space); // = - try renderExpression(allocator, stream, tree, indent, start_col, field.value_expr.?, last_token_space); // value + try renderToken(tree, stream, tree.prevToken(field.value_expr.?.firstToken()), .Space); // = + try renderExpression(allocator, stream, tree, field.value_expr.?, last_token_space); // value } if (src_has_trailing_comma) { const comma = tree.nextToken(field.lastToken()); - try renderToken(tree, stream, comma, indent, start_col, space); + try renderToken(tree, stream, comma, space); } }, .Comptime => { assert(!decl.requireSemiColon()); - try renderExpression(allocator, stream, tree, indent, start_col, decl, space); + try renderExpression(allocator, stream, tree, decl, space); }, .DocComment => { const comment = @fieldParentPtr(ast.Node.DocComment, "base", decl); const kind = tree.token_ids[comment.first_line]; - try renderToken(tree, stream, comment.first_line, indent, start_col, .Newline); + try renderToken(tree, stream, comment.first_line, .Newline); var tok_i = comment.first_line + 1; while (true) : (tok_i += 1) { const tok_id = tree.token_ids[tok_i]; if (tok_id == kind) { - try stream.writeByteNTimes(' ', indent); - try renderToken(tree, stream, tok_i, indent, start_col, .Newline); + try renderToken(tree, stream, tok_i, .Newline); } else if (tok_id == .LineComment) { continue; } else { @@ -365,11 +326,9 @@ fn renderExpression( allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, - indent: usize, - start_col: *usize, base: *ast.Node, space: Space, -) (@TypeOf(stream).Error || Error)!void { +) (@TypeOf(stream.*).Error || Error)!void { switch (base.tag) { .Identifier, .IntegerLiteral, @@ -383,18 +342,18 @@ fn renderExpression( .UndefinedLiteral, => { const casted_node = base.cast(ast.Node.OneToken).?; - return renderToken(tree, stream, casted_node.token, indent, start_col, space); + return renderToken(tree, stream, casted_node.token, space); }, .AnyType => { const any_type = base.castTag(.AnyType).?; if (mem.eql(u8, tree.tokenSlice(any_type.token), "var")) { // TODO remove in next release cycle - try stream.writeAll("anytype"); - if (space == .Comma) try stream.writeAll(",\n"); + try stream.writer().writeAll("anytype"); + if (space == .Comma) try stream.writer().writeAll(",\n"); return; } - return renderToken(tree, stream, any_type.token, indent, start_col, space); + return renderToken(tree, stream, any_type.token, space); }, .Block, .LabeledBlock => { @@ -424,65 +383,65 @@ fn renderExpression( }; if (block.label) |label| { - try renderToken(tree, stream, label, indent, start_col, Space.None); - try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space); + try renderToken(tree, stream, label, Space.None); + try renderToken(tree, stream, tree.nextToken(label), Space.Space); } if (block.statements.len == 0) { - try renderToken(tree, stream, block.lbrace, indent + indent_delta, start_col, Space.None); - return renderToken(tree, stream, block.rbrace, indent, start_col, space); + stream.pushIndentNextLine(); + defer stream.popIndent(); + try renderToken(tree, stream, block.lbrace, Space.None); } else { - const block_indent = indent + indent_delta; - try renderToken(tree, stream, block.lbrace, block_indent, start_col, Space.Newline); + stream.pushIndentNextLine(); + defer stream.popIndent(); + + try renderToken(tree, stream, block.lbrace, Space.Newline); for (block.statements) |statement, i| { - try stream.writeByteNTimes(' ', block_indent); - try renderStatement(allocator, stream, tree, block_indent, start_col, statement); + try renderStatement(allocator, stream, tree, statement); if (i + 1 < block.statements.len) { - try renderExtraNewline(tree, stream, start_col, block.statements[i + 1]); + try renderExtraNewline(tree, stream, block.statements[i + 1]); } } - - try stream.writeByteNTimes(' ', indent); - return renderToken(tree, stream, block.rbrace, indent, start_col, space); } + return renderToken(tree, stream, block.rbrace, space); }, .Defer => { const defer_node = @fieldParentPtr(ast.Node.Defer, "base", base); - try renderToken(tree, stream, defer_node.defer_token, indent, start_col, Space.Space); + try renderToken(tree, stream, defer_node.defer_token, Space.Space); if (defer_node.payload) |payload| { - try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space); + try renderExpression(allocator, stream, tree, payload, Space.Space); } - return renderExpression(allocator, stream, tree, indent, start_col, defer_node.expr, space); + return renderExpression(allocator, stream, tree, defer_node.expr, space); }, .Comptime => { const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", base); - try renderToken(tree, stream, comptime_node.comptime_token, indent, start_col, Space.Space); - return renderExpression(allocator, stream, tree, indent, start_col, comptime_node.expr, space); + try renderToken(tree, stream, comptime_node.comptime_token, Space.Space); + return renderExpression(allocator, stream, tree, comptime_node.expr, space); }, .Nosuspend => { const nosuspend_node = @fieldParentPtr(ast.Node.Nosuspend, "base", base); if (mem.eql(u8, tree.tokenSlice(nosuspend_node.nosuspend_token), "noasync")) { // TODO: remove this - try stream.writeAll("nosuspend "); + try stream.writer().writeAll("nosuspend "); } else { - try renderToken(tree, stream, nosuspend_node.nosuspend_token, indent, start_col, Space.Space); + try renderToken(tree, stream, nosuspend_node.nosuspend_token, Space.Space); } - return renderExpression(allocator, stream, tree, indent, start_col, nosuspend_node.expr, space); + return renderExpression(allocator, stream, tree, nosuspend_node.expr, space); }, .Suspend => { const suspend_node = @fieldParentPtr(ast.Node.Suspend, "base", base); if (suspend_node.body) |body| { - try renderToken(tree, stream, suspend_node.suspend_token, indent, start_col, Space.Space); - return renderExpression(allocator, stream, tree, indent, start_col, body, space); + try renderToken(tree, stream, suspend_node.suspend_token, Space.Space); + return renderExpression(allocator, stream, tree, body, space); } else { - return renderToken(tree, stream, suspend_node.suspend_token, indent, start_col, space); + return renderToken(tree, stream, suspend_node.suspend_token, space); } }, @@ -490,26 +449,21 @@ fn renderExpression( const infix_op_node = @fieldParentPtr(ast.Node.Catch, "base", base); const op_space = Space.Space; - try renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.lhs, op_space); + try renderExpression(allocator, stream, tree, infix_op_node.lhs, op_space); const after_op_space = blk: { - const loc = tree.tokenLocation(tree.token_locs[infix_op_node.op_token].end, tree.nextToken(infix_op_node.op_token)); - break :blk if (loc.line == 0) op_space else Space.Newline; + const same_line = tree.tokensOnSameLine(infix_op_node.op_token, tree.nextToken(infix_op_node.op_token)); + break :blk if (same_line) op_space else Space.Newline; }; - try renderToken(tree, stream, infix_op_node.op_token, indent, start_col, after_op_space); - if (after_op_space == Space.Newline and - tree.token_ids[tree.nextToken(infix_op_node.op_token)] != .MultilineStringLiteralLine) - { - try stream.writeByteNTimes(' ', indent + indent_delta); - start_col.* = indent + indent_delta; - } + try renderToken(tree, stream, infix_op_node.op_token, after_op_space); if (infix_op_node.payload) |payload| { - try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space); + try renderExpression(allocator, stream, tree, payload, Space.Space); } - return renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.rhs, space); + stream.pushIndentOneShot(); + return renderExpression(allocator, stream, tree, infix_op_node.rhs, space); }, .Add, @@ -561,22 +515,16 @@ fn renderExpression( .Period, .ErrorUnion, .Range => Space.None, else => Space.Space, }; - try renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.lhs, op_space); + try renderExpression(allocator, stream, tree, infix_op_node.lhs, op_space); const after_op_space = blk: { const loc = tree.tokenLocation(tree.token_locs[infix_op_node.op_token].end, tree.nextToken(infix_op_node.op_token)); break :blk if (loc.line == 0) op_space else Space.Newline; }; - try renderToken(tree, stream, infix_op_node.op_token, indent, start_col, after_op_space); - if (after_op_space == Space.Newline and - tree.token_ids[tree.nextToken(infix_op_node.op_token)] != .MultilineStringLiteralLine) - { - try stream.writeByteNTimes(' ', indent + indent_delta); - start_col.* = indent + indent_delta; - } - - return renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.rhs, space); + try renderToken(tree, stream, infix_op_node.op_token, after_op_space); + stream.pushIndentOneShot(); + return renderExpression(allocator, stream, tree, infix_op_node.rhs, space); }, .BitNot, @@ -587,8 +535,8 @@ fn renderExpression( .AddressOf, => { const casted_node = @fieldParentPtr(ast.Node.SimplePrefixOp, "base", base); - try renderToken(tree, stream, casted_node.op_token, indent, start_col, Space.None); - return renderExpression(allocator, stream, tree, indent, start_col, casted_node.rhs, space); + try renderToken(tree, stream, casted_node.op_token, Space.None); + return renderExpression(allocator, stream, tree, casted_node.rhs, space); }, .Try, @@ -596,8 +544,8 @@ fn renderExpression( .Await, => { const casted_node = @fieldParentPtr(ast.Node.SimplePrefixOp, "base", base); - try renderToken(tree, stream, casted_node.op_token, indent, start_col, Space.Space); - return renderExpression(allocator, stream, tree, indent, start_col, casted_node.rhs, space); + try renderToken(tree, stream, casted_node.op_token, Space.Space); + return renderExpression(allocator, stream, tree, casted_node.rhs, space); }, .ArrayType => { @@ -606,8 +554,6 @@ fn renderExpression( allocator, stream, tree, - indent, - start_col, array_type.op_token, array_type.rhs, array_type.len_expr, @@ -621,8 +567,6 @@ fn renderExpression( allocator, stream, tree, - indent, - start_col, array_type.op_token, array_type.rhs, array_type.len_expr, @@ -635,111 +579,111 @@ fn renderExpression( const ptr_type = @fieldParentPtr(ast.Node.PtrType, "base", base); const op_tok_id = tree.token_ids[ptr_type.op_token]; switch (op_tok_id) { - .Asterisk, .AsteriskAsterisk => try stream.writeByte('*'), + .Asterisk, .AsteriskAsterisk => try stream.writer().writeByte('*'), .LBracket => if (tree.token_ids[ptr_type.op_token + 2] == .Identifier) - try stream.writeAll("[*c") + try stream.writer().writeAll("[*c") else - try stream.writeAll("[*"), + try stream.writer().writeAll("[*"), else => unreachable, } if (ptr_type.ptr_info.sentinel) |sentinel| { const colon_token = tree.prevToken(sentinel.firstToken()); - try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // : + try renderToken(tree, stream, colon_token, Space.None); // : const sentinel_space = switch (op_tok_id) { .LBracket => Space.None, else => Space.Space, }; - try renderExpression(allocator, stream, tree, indent, start_col, sentinel, sentinel_space); + try renderExpression(allocator, stream, tree, sentinel, sentinel_space); } switch (op_tok_id) { .Asterisk, .AsteriskAsterisk => {}, - .LBracket => try stream.writeByte(']'), + .LBracket => try stream.writer().writeByte(']'), else => unreachable, } if (ptr_type.ptr_info.allowzero_token) |allowzero_token| { - try renderToken(tree, stream, allowzero_token, indent, start_col, Space.Space); // allowzero + try renderToken(tree, stream, allowzero_token, Space.Space); // allowzero } if (ptr_type.ptr_info.align_info) |align_info| { const lparen_token = tree.prevToken(align_info.node.firstToken()); const align_token = tree.prevToken(lparen_token); - try renderToken(tree, stream, align_token, indent, start_col, Space.None); // align - try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // ( + try renderToken(tree, stream, align_token, Space.None); // align + try renderToken(tree, stream, lparen_token, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, align_info.node, Space.None); + try renderExpression(allocator, stream, tree, align_info.node, Space.None); if (align_info.bit_range) |bit_range| { const colon1 = tree.prevToken(bit_range.start.firstToken()); const colon2 = tree.prevToken(bit_range.end.firstToken()); - try renderToken(tree, stream, colon1, indent, start_col, Space.None); // : - try renderExpression(allocator, stream, tree, indent, start_col, bit_range.start, Space.None); - try renderToken(tree, stream, colon2, indent, start_col, Space.None); // : - try renderExpression(allocator, stream, tree, indent, start_col, bit_range.end, Space.None); + try renderToken(tree, stream, colon1, Space.None); // : + try renderExpression(allocator, stream, tree, bit_range.start, Space.None); + try renderToken(tree, stream, colon2, Space.None); // : + try renderExpression(allocator, stream, tree, bit_range.end, Space.None); const rparen_token = tree.nextToken(bit_range.end.lastToken()); - try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // ) + try renderToken(tree, stream, rparen_token, Space.Space); // ) } else { const rparen_token = tree.nextToken(align_info.node.lastToken()); - try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // ) + try renderToken(tree, stream, rparen_token, Space.Space); // ) } } if (ptr_type.ptr_info.const_token) |const_token| { - try renderToken(tree, stream, const_token, indent, start_col, Space.Space); // const + try renderToken(tree, stream, const_token, Space.Space); // const } if (ptr_type.ptr_info.volatile_token) |volatile_token| { - try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); // volatile + try renderToken(tree, stream, volatile_token, Space.Space); // volatile } - return renderExpression(allocator, stream, tree, indent, start_col, ptr_type.rhs, space); + return renderExpression(allocator, stream, tree, ptr_type.rhs, space); }, .SliceType => { const slice_type = @fieldParentPtr(ast.Node.SliceType, "base", base); - try renderToken(tree, stream, slice_type.op_token, indent, start_col, Space.None); // [ + try renderToken(tree, stream, slice_type.op_token, Space.None); // [ if (slice_type.ptr_info.sentinel) |sentinel| { const colon_token = tree.prevToken(sentinel.firstToken()); - try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // : - try renderExpression(allocator, stream, tree, indent, start_col, sentinel, Space.None); - try renderToken(tree, stream, tree.nextToken(sentinel.lastToken()), indent, start_col, Space.None); // ] + try renderToken(tree, stream, colon_token, Space.None); // : + try renderExpression(allocator, stream, tree, sentinel, Space.None); + try renderToken(tree, stream, tree.nextToken(sentinel.lastToken()), Space.None); // ] } else { - try renderToken(tree, stream, tree.nextToken(slice_type.op_token), indent, start_col, Space.None); // ] + try renderToken(tree, stream, tree.nextToken(slice_type.op_token), Space.None); // ] } if (slice_type.ptr_info.allowzero_token) |allowzero_token| { - try renderToken(tree, stream, allowzero_token, indent, start_col, Space.Space); // allowzero + try renderToken(tree, stream, allowzero_token, Space.Space); // allowzero } if (slice_type.ptr_info.align_info) |align_info| { const lparen_token = tree.prevToken(align_info.node.firstToken()); const align_token = tree.prevToken(lparen_token); - try renderToken(tree, stream, align_token, indent, start_col, Space.None); // align - try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // ( + try renderToken(tree, stream, align_token, Space.None); // align + try renderToken(tree, stream, lparen_token, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, align_info.node, Space.None); + try renderExpression(allocator, stream, tree, align_info.node, Space.None); if (align_info.bit_range) |bit_range| { const colon1 = tree.prevToken(bit_range.start.firstToken()); const colon2 = tree.prevToken(bit_range.end.firstToken()); - try renderToken(tree, stream, colon1, indent, start_col, Space.None); // : - try renderExpression(allocator, stream, tree, indent, start_col, bit_range.start, Space.None); - try renderToken(tree, stream, colon2, indent, start_col, Space.None); // : - try renderExpression(allocator, stream, tree, indent, start_col, bit_range.end, Space.None); + try renderToken(tree, stream, colon1, Space.None); // : + try renderExpression(allocator, stream, tree, bit_range.start, Space.None); + try renderToken(tree, stream, colon2, Space.None); // : + try renderExpression(allocator, stream, tree, bit_range.end, Space.None); const rparen_token = tree.nextToken(bit_range.end.lastToken()); - try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // ) + try renderToken(tree, stream, rparen_token, Space.Space); // ) } else { const rparen_token = tree.nextToken(align_info.node.lastToken()); - try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // ) + try renderToken(tree, stream, rparen_token, Space.Space); // ) } } if (slice_type.ptr_info.const_token) |const_token| { - try renderToken(tree, stream, const_token, indent, start_col, Space.Space); + try renderToken(tree, stream, const_token, Space.Space); } if (slice_type.ptr_info.volatile_token) |volatile_token| { - try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); + try renderToken(tree, stream, volatile_token, Space.Space); } - return renderExpression(allocator, stream, tree, indent, start_col, slice_type.rhs, space); + return renderExpression(allocator, stream, tree, slice_type.rhs, space); }, .ArrayInitializer, .ArrayInitializerDot => { @@ -768,27 +712,33 @@ fn renderExpression( if (exprs.len == 0) { switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None), + .dot => |dot| try renderToken(tree, stream, dot, Space.None), + .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), } - try renderToken(tree, stream, lbrace, indent, start_col, Space.None); - return renderToken(tree, stream, rtoken, indent, start_col, space); - } - if (exprs.len == 1 and tree.token_ids[exprs[0].lastToken() + 1] == .RBrace) { - const expr = exprs[0]; - switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None), + { + stream.pushIndent(); + defer stream.popIndent(); + try renderToken(tree, stream, lbrace, Space.None); } - try renderToken(tree, stream, lbrace, indent, start_col, Space.None); - try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.None); - return renderToken(tree, stream, rtoken, indent, start_col, space); + + return renderToken(tree, stream, rtoken, space); + } + if (exprs.len == 1 and tree.token_ids[exprs[0].*.lastToken() + 1] == .RBrace) { + const expr = exprs[0]; + + switch (lhs) { + .dot => |dot| try renderToken(tree, stream, dot, Space.None), + .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), + } + try renderToken(tree, stream, lbrace, Space.None); + try renderExpression(allocator, stream, tree, expr, Space.None); + return renderToken(tree, stream, rtoken, space); } switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None), + .dot => |dot| try renderToken(tree, stream, dot, Space.None), + .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), } // scan to find row size @@ -832,77 +782,68 @@ fn renderExpression( // Null stream for counting the printed length of each expression var counting_stream = std.io.countingOutStream(std.io.null_out_stream); + var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &counting_stream); for (exprs) |expr, i| { counting_stream.bytes_written = 0; - var dummy_col: usize = 0; - try renderExpression(allocator, counting_stream.outStream(), tree, indent, &dummy_col, expr, Space.None); + try renderExpression(allocator, &auto_indenting_stream, tree, expr, Space.None); const width = @intCast(usize, counting_stream.bytes_written); const col = i % row_size; column_widths[col] = std.math.max(column_widths[col], width); expr_widths[i] = width; } - var new_indent = indent + indent_delta; + { + stream.pushIndentNextLine(); + defer stream.popIndent(); + try renderToken(tree, stream, lbrace, Space.Newline); - if (tree.token_ids[tree.nextToken(lbrace)] != .MultilineStringLiteralLine) { - try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline); - try stream.writeByteNTimes(' ', new_indent); - } else { - new_indent -= indent_delta; - try renderToken(tree, stream, lbrace, new_indent, start_col, Space.None); + var col: usize = 1; + for (exprs) |expr, i| { + if (i + 1 < exprs.len) { + const next_expr = exprs[i + 1]; + try renderExpression(allocator, stream, tree, expr, Space.None); + + const comma = tree.nextToken(expr.*.lastToken()); + + if (col != row_size) { + try renderToken(tree, stream, comma, Space.Space); // , + + const padding = column_widths[i % row_size] - expr_widths[i]; + try stream.writer().writeByteNTimes(' ', padding); + + col += 1; + continue; + } + col = 1; + + if (tree.token_ids[tree.nextToken(comma)] != .MultilineStringLiteralLine) { + try renderToken(tree, stream, comma, Space.Newline); // , + } else { + try renderToken(tree, stream, comma, Space.None); // , + } + + try renderExtraNewline(tree, stream, next_expr); + } else { + try renderExpression(allocator, stream, tree, expr, Space.Comma); // , + } + } } - - var col: usize = 1; + return renderToken(tree, stream, rtoken, space); + } else { + try renderToken(tree, stream, lbrace, Space.Space); for (exprs) |expr, i| { if (i + 1 < exprs.len) { const next_expr = exprs[i + 1]; - try renderExpression(allocator, stream, tree, new_indent, start_col, expr, Space.None); - - const comma = tree.nextToken(expr.lastToken()); - - if (col != row_size) { - try renderToken(tree, stream, comma, new_indent, start_col, Space.Space); // , - - const padding = column_widths[i % row_size] - expr_widths[i]; - try stream.writeByteNTimes(' ', padding); - - col += 1; - continue; - } - col = 1; - - if (tree.token_ids[tree.nextToken(comma)] != .MultilineStringLiteralLine) { - try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline); // , - } else { - try renderToken(tree, stream, comma, new_indent, start_col, Space.None); // , - } - - try renderExtraNewline(tree, stream, start_col, next_expr); - if (next_expr.tag != .MultilineStringLiteral) { - try stream.writeByteNTimes(' ', new_indent); - } + try renderExpression(allocator, stream, tree, expr, Space.None); + const comma = tree.nextToken(expr.*.lastToken()); + try renderToken(tree, stream, comma, Space.Space); // , } else { - try renderExpression(allocator, stream, tree, new_indent, start_col, expr, Space.Comma); // , - } - } - if (exprs[exprs.len - 1].tag != .MultilineStringLiteral) { - try stream.writeByteNTimes(' ', indent); - } - return renderToken(tree, stream, rtoken, indent, start_col, space); - } else { - try renderToken(tree, stream, lbrace, indent, start_col, Space.Space); - for (exprs) |expr, i| { - if (i + 1 < exprs.len) { - try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.None); - const comma = tree.nextToken(expr.lastToken()); - try renderToken(tree, stream, comma, indent, start_col, Space.Space); // , - } else { - try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.Space); + try renderExpression(allocator, stream, tree, expr, Space.Space); } } - return renderToken(tree, stream, rtoken, indent, start_col, space); + return renderToken(tree, stream, rtoken, space); } }, @@ -932,11 +873,17 @@ fn renderExpression( if (field_inits.len == 0) { switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None), + .dot => |dot| try renderToken(tree, stream, dot, Space.None), + .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), } - try renderToken(tree, stream, lbrace, indent + indent_delta, start_col, Space.None); - return renderToken(tree, stream, rtoken, indent, start_col, space); + + { + stream.pushIndentNextLine(); + defer stream.popIndent(); + try renderToken(tree, stream, lbrace, Space.None); + } + + return renderToken(tree, stream, rtoken, space); } const src_has_trailing_comma = blk: { @@ -952,9 +899,10 @@ fn renderExpression( const expr_outputs_one_line = blk: { // render field expressions until a LF is found for (field_inits) |field_init| { - var find_stream = FindByteOutStream.init('\n'); - var dummy_col: usize = 0; - try renderExpression(allocator, find_stream.outStream(), tree, 0, &dummy_col, field_init, Space.None); + var find_stream = std.io.findByteOutStream('\n', &std.io.null_out_stream); + var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &find_stream); + + try renderExpression(allocator, &auto_indenting_stream, tree, field_init, Space.None); if (find_stream.byte_found) break :blk false; } break :blk true; @@ -967,7 +915,6 @@ fn renderExpression( .StructInitializer, .StructInitializerDot, => break :blk, - else => {}, } @@ -977,76 +924,78 @@ fn renderExpression( } switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None), + .dot => |dot| try renderToken(tree, stream, dot, Space.None), + .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), } - try renderToken(tree, stream, lbrace, indent, start_col, Space.Space); - try renderExpression(allocator, stream, tree, indent, start_col, &field_init.base, Space.Space); - return renderToken(tree, stream, rtoken, indent, start_col, space); + try renderToken(tree, stream, lbrace, Space.Space); + try renderExpression(allocator, stream, tree, &field_init.base, Space.Space); + return renderToken(tree, stream, rtoken, space); } if (!src_has_trailing_comma and src_same_line and expr_outputs_one_line) { // render all on one line, no trailing comma switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None), + .dot => |dot| try renderToken(tree, stream, dot, Space.None), + .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), } - try renderToken(tree, stream, lbrace, indent, start_col, Space.Space); + try renderToken(tree, stream, lbrace, Space.Space); for (field_inits) |field_init, i| { if (i + 1 < field_inits.len) { - try renderExpression(allocator, stream, tree, indent, start_col, field_init, Space.None); + try renderExpression(allocator, stream, tree, field_init, Space.None); const comma = tree.nextToken(field_init.lastToken()); - try renderToken(tree, stream, comma, indent, start_col, Space.Space); + try renderToken(tree, stream, comma, Space.Space); } else { - try renderExpression(allocator, stream, tree, indent, start_col, field_init, Space.Space); + try renderExpression(allocator, stream, tree, field_init, Space.Space); } } - return renderToken(tree, stream, rtoken, indent, start_col, space); + return renderToken(tree, stream, rtoken, space); } - const new_indent = indent + indent_delta; + { + switch (lhs) { + .dot => |dot| try renderToken(tree, stream, dot, Space.None), + .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), + } - switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, new_indent, start_col, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, new_indent, start_col, node, Space.None), - } - try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline); + stream.pushIndentNextLine(); + defer stream.popIndent(); - for (field_inits) |field_init, i| { - try stream.writeByteNTimes(' ', new_indent); + try renderToken(tree, stream, lbrace, Space.Newline); - if (i + 1 < field_inits.len) { - try renderExpression(allocator, stream, tree, new_indent, start_col, field_init, Space.None); + for (field_inits) |field_init, i| { + if (i + 1 < field_inits.len) { + const next_field_init = field_inits[i + 1]; + try renderExpression(allocator, stream, tree, field_init, Space.None); - const comma = tree.nextToken(field_init.lastToken()); - try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline); + const comma = tree.nextToken(field_init.lastToken()); + try renderToken(tree, stream, comma, Space.Newline); - try renderExtraNewline(tree, stream, start_col, field_inits[i + 1]); - } else { - try renderExpression(allocator, stream, tree, new_indent, start_col, field_init, Space.Comma); + try renderExtraNewline(tree, stream, next_field_init); + } else { + try renderExpression(allocator, stream, tree, field_init, Space.Comma); + } } } - try stream.writeByteNTimes(' ', indent); - return renderToken(tree, stream, rtoken, indent, start_col, space); + return renderToken(tree, stream, rtoken, space); }, .Call => { const call = @fieldParentPtr(ast.Node.Call, "base", base); if (call.async_token) |async_token| { - try renderToken(tree, stream, async_token, indent, start_col, Space.Space); + try renderToken(tree, stream, async_token, Space.Space); } - try renderExpression(allocator, stream, tree, indent, start_col, call.lhs, Space.None); + try renderExpression(allocator, stream, tree, call.lhs, Space.None); const lparen = tree.nextToken(call.lhs.lastToken()); if (call.params_len == 0) { - try renderToken(tree, stream, lparen, indent, start_col, Space.None); - return renderToken(tree, stream, call.rtoken, indent, start_col, space); + try renderToken(tree, stream, lparen, Space.None); + return renderToken(tree, stream, call.rtoken, space); } const src_has_trailing_comma = blk: { @@ -1055,43 +1004,41 @@ fn renderExpression( }; if (src_has_trailing_comma) { - const new_indent = indent + indent_delta; - try renderToken(tree, stream, lparen, new_indent, start_col, Space.Newline); + try renderToken(tree, stream, lparen, Space.Newline); const params = call.params(); for (params) |param_node, i| { - const param_node_new_indent = if (param_node.tag == .MultilineStringLiteral) blk: { - break :blk indent; - } else blk: { - try stream.writeByteNTimes(' ', new_indent); - break :blk new_indent; - }; + stream.pushIndent(); + defer stream.popIndent(); if (i + 1 < params.len) { - try renderExpression(allocator, stream, tree, param_node_new_indent, start_col, param_node, Space.None); + const next_node = params[i + 1]; + try renderExpression(allocator, stream, tree, param_node, Space.None); const comma = tree.nextToken(param_node.lastToken()); - try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline); // , - try renderExtraNewline(tree, stream, start_col, params[i + 1]); + try renderToken(tree, stream, comma, Space.Newline); // , + try renderExtraNewline(tree, stream, next_node); } else { - try renderExpression(allocator, stream, tree, param_node_new_indent, start_col, param_node, Space.Comma); - try stream.writeByteNTimes(' ', indent); - return renderToken(tree, stream, call.rtoken, indent, start_col, space); + try renderExpression(allocator, stream, tree, param_node, Space.Comma); } } + return renderToken(tree, stream, call.rtoken, space); } - try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( + try renderToken(tree, stream, lparen, Space.None); // ( const params = call.params(); for (params) |param_node, i| { - try renderExpression(allocator, stream, tree, indent, start_col, param_node, Space.None); + if (param_node.*.tag == .MultilineStringLiteral) stream.pushIndentOneShot(); + + try renderExpression(allocator, stream, tree, param_node, Space.None); if (i + 1 < params.len) { + const next_param = params[i + 1]; const comma = tree.nextToken(param_node.lastToken()); - try renderToken(tree, stream, comma, indent, start_col, Space.Space); + try renderToken(tree, stream, comma, Space.Space); } } - return renderToken(tree, stream, call.rtoken, indent, start_col, space); + return renderToken(tree, stream, call.rtoken, space); }, .ArrayAccess => { @@ -1100,26 +1047,25 @@ fn renderExpression( const lbracket = tree.nextToken(suffix_op.lhs.lastToken()); const rbracket = tree.nextToken(suffix_op.index_expr.lastToken()); - try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None); - try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [ + try renderExpression(allocator, stream, tree, suffix_op.lhs, Space.None); + try renderToken(tree, stream, lbracket, Space.None); // [ const starts_with_comment = tree.token_ids[lbracket + 1] == .LineComment; const ends_with_comment = tree.token_ids[rbracket - 1] == .LineComment; - const new_indent = if (ends_with_comment) indent + indent_delta else indent; - const new_space = if (ends_with_comment) Space.Newline else Space.None; - try renderExpression(allocator, stream, tree, new_indent, start_col, suffix_op.index_expr, new_space); - if (starts_with_comment) { - try stream.writeByte('\n'); + { + const new_space = if (ends_with_comment) Space.Newline else Space.None; + + stream.pushIndent(); + defer stream.popIndent(); + try renderExpression(allocator, stream, tree, suffix_op.index_expr, new_space); } - if (ends_with_comment or starts_with_comment) { - try stream.writeByteNTimes(' ', indent); - } - return renderToken(tree, stream, rbracket, indent, start_col, space); // ] + if (starts_with_comment) try stream.maybeInsertNewline(); + return renderToken(tree, stream, rbracket, space); // ] }, + .Slice => { const suffix_op = base.castTag(.Slice).?; - - try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None); + try renderExpression(allocator, stream, tree, suffix_op.lhs, Space.None); const lbracket = tree.prevToken(suffix_op.start.firstToken()); const dotdot = tree.nextToken(suffix_op.start.lastToken()); @@ -1129,32 +1075,33 @@ fn renderExpression( const after_start_space = if (after_start_space_bool) Space.Space else Space.None; const after_op_space = if (suffix_op.end != null) after_start_space else Space.None; - try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [ - try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.start, after_start_space); - try renderToken(tree, stream, dotdot, indent, start_col, after_op_space); // .. + try renderToken(tree, stream, lbracket, Space.None); // [ + try renderExpression(allocator, stream, tree, suffix_op.start, after_start_space); + try renderToken(tree, stream, dotdot, after_op_space); // .. if (suffix_op.end) |end| { const after_end_space = if (suffix_op.sentinel != null) Space.Space else Space.None; - try renderExpression(allocator, stream, tree, indent, start_col, end, after_end_space); + try renderExpression(allocator, stream, tree, end, after_end_space); } if (suffix_op.sentinel) |sentinel| { const colon = tree.prevToken(sentinel.firstToken()); - try renderToken(tree, stream, colon, indent, start_col, Space.None); // : - try renderExpression(allocator, stream, tree, indent, start_col, sentinel, Space.None); + try renderToken(tree, stream, colon, Space.None); // : + try renderExpression(allocator, stream, tree, sentinel, Space.None); } - return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // ] + return renderToken(tree, stream, suffix_op.rtoken, space); // ] }, + .Deref => { const suffix_op = base.castTag(.Deref).?; - try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None); - return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // .* + try renderExpression(allocator, stream, tree, suffix_op.lhs, Space.None); + return renderToken(tree, stream, suffix_op.rtoken, space); // .* }, .UnwrapOptional => { const suffix_op = base.castTag(.UnwrapOptional).?; - try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None); - try renderToken(tree, stream, tree.prevToken(suffix_op.rtoken), indent, start_col, Space.None); // . - return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // ? + try renderExpression(allocator, stream, tree, suffix_op.lhs, Space.None); + try renderToken(tree, stream, tree.prevToken(suffix_op.rtoken), Space.None); // . + return renderToken(tree, stream, suffix_op.rtoken, space); // ? }, .Break => { @@ -1163,145 +1110,152 @@ fn renderExpression( const maybe_label = flow_expr.getLabel(); if (maybe_label == null and maybe_rhs == null) { - return renderToken(tree, stream, flow_expr.ltoken, indent, start_col, space); // break + return renderToken(tree, stream, flow_expr.ltoken, space); // break } - try renderToken(tree, stream, flow_expr.ltoken, indent, start_col, Space.Space); // break + try renderToken(tree, stream, flow_expr.ltoken, Space.Space); // break if (maybe_label) |label| { const colon = tree.nextToken(flow_expr.ltoken); - try renderToken(tree, stream, colon, indent, start_col, Space.None); // : + try renderToken(tree, stream, colon, Space.None); // : if (maybe_rhs == null) { - return renderToken(tree, stream, label, indent, start_col, space); // label + return renderToken(tree, stream, label, space); // label } - try renderToken(tree, stream, label, indent, start_col, Space.Space); // label + try renderToken(tree, stream, label, Space.Space); // label } - return renderExpression(allocator, stream, tree, indent, start_col, maybe_rhs.?, space); + return renderExpression(allocator, stream, tree, maybe_rhs.?, space); }, .Continue => { const flow_expr = base.castTag(.Continue).?; if (flow_expr.getLabel()) |label| { - try renderToken(tree, stream, flow_expr.ltoken, indent, start_col, Space.Space); // continue + try renderToken(tree, stream, flow_expr.ltoken, Space.Space); // continue const colon = tree.nextToken(flow_expr.ltoken); - try renderToken(tree, stream, colon, indent, start_col, Space.None); // : - return renderToken(tree, stream, label, indent, start_col, space); // label + try renderToken(tree, stream, colon, Space.None); // : + return renderToken(tree, stream, label, space); // label } else { - return renderToken(tree, stream, flow_expr.ltoken, indent, start_col, space); // continue + return renderToken(tree, stream, flow_expr.ltoken, space); // continue } }, .Return => { const flow_expr = base.castTag(.Return).?; if (flow_expr.getRHS()) |rhs| { - try renderToken(tree, stream, flow_expr.ltoken, indent, start_col, Space.Space); - return renderExpression(allocator, stream, tree, indent, start_col, rhs, space); + try renderToken(tree, stream, flow_expr.ltoken, Space.Space); + return renderExpression(allocator, stream, tree, rhs, space); } else { - return renderToken(tree, stream, flow_expr.ltoken, indent, start_col, space); + return renderToken(tree, stream, flow_expr.ltoken, space); } }, .Payload => { const payload = @fieldParentPtr(ast.Node.Payload, "base", base); - try renderToken(tree, stream, payload.lpipe, indent, start_col, Space.None); - try renderExpression(allocator, stream, tree, indent, start_col, payload.error_symbol, Space.None); - return renderToken(tree, stream, payload.rpipe, indent, start_col, space); + try renderToken(tree, stream, payload.lpipe, Space.None); + try renderExpression(allocator, stream, tree, payload.error_symbol, Space.None); + return renderToken(tree, stream, payload.rpipe, space); }, .PointerPayload => { const payload = @fieldParentPtr(ast.Node.PointerPayload, "base", base); - try renderToken(tree, stream, payload.lpipe, indent, start_col, Space.None); + try renderToken(tree, stream, payload.lpipe, Space.None); if (payload.ptr_token) |ptr_token| { - try renderToken(tree, stream, ptr_token, indent, start_col, Space.None); + try renderToken(tree, stream, ptr_token, Space.None); } - try renderExpression(allocator, stream, tree, indent, start_col, payload.value_symbol, Space.None); - return renderToken(tree, stream, payload.rpipe, indent, start_col, space); + try renderExpression(allocator, stream, tree, payload.value_symbol, Space.None); + return renderToken(tree, stream, payload.rpipe, space); }, .PointerIndexPayload => { const payload = @fieldParentPtr(ast.Node.PointerIndexPayload, "base", base); - try renderToken(tree, stream, payload.lpipe, indent, start_col, Space.None); + try renderToken(tree, stream, payload.lpipe, Space.None); if (payload.ptr_token) |ptr_token| { - try renderToken(tree, stream, ptr_token, indent, start_col, Space.None); + try renderToken(tree, stream, ptr_token, Space.None); } - try renderExpression(allocator, stream, tree, indent, start_col, payload.value_symbol, Space.None); + try renderExpression(allocator, stream, tree, payload.value_symbol, Space.None); if (payload.index_symbol) |index_symbol| { const comma = tree.nextToken(payload.value_symbol.lastToken()); - try renderToken(tree, stream, comma, indent, start_col, Space.Space); - try renderExpression(allocator, stream, tree, indent, start_col, index_symbol, Space.None); + try renderToken(tree, stream, comma, Space.Space); + try renderExpression(allocator, stream, tree, index_symbol, Space.None); } - return renderToken(tree, stream, payload.rpipe, indent, start_col, space); + return renderToken(tree, stream, payload.rpipe, space); }, .GroupedExpression => { const grouped_expr = @fieldParentPtr(ast.Node.GroupedExpression, "base", base); - try renderToken(tree, stream, grouped_expr.lparen, indent, start_col, Space.None); - try renderExpression(allocator, stream, tree, indent, start_col, grouped_expr.expr, Space.None); - return renderToken(tree, stream, grouped_expr.rparen, indent, start_col, space); + try renderToken(tree, stream, grouped_expr.lparen, Space.None); + { + stream.pushIndentOneShot(); + try renderExpression(allocator, stream, tree, grouped_expr.expr, Space.None); + } + return renderToken(tree, stream, grouped_expr.rparen, space); }, .FieldInitializer => { const field_init = @fieldParentPtr(ast.Node.FieldInitializer, "base", base); - try renderToken(tree, stream, field_init.period_token, indent, start_col, Space.None); // . - try renderToken(tree, stream, field_init.name_token, indent, start_col, Space.Space); // name - try renderToken(tree, stream, tree.nextToken(field_init.name_token), indent, start_col, Space.Space); // = - return renderExpression(allocator, stream, tree, indent, start_col, field_init.expr, space); + try renderToken(tree, stream, field_init.period_token, Space.None); // . + try renderToken(tree, stream, field_init.name_token, Space.Space); // name + try renderToken(tree, stream, tree.nextToken(field_init.name_token), Space.Space); // = + return renderExpression(allocator, stream, tree, field_init.expr, space); }, .ContainerDecl => { const container_decl = @fieldParentPtr(ast.Node.ContainerDecl, "base", base); if (container_decl.layout_token) |layout_token| { - try renderToken(tree, stream, layout_token, indent, start_col, Space.Space); + try renderToken(tree, stream, layout_token, Space.Space); } switch (container_decl.init_arg_expr) { .None => { - try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.Space); // union + try renderToken(tree, stream, container_decl.kind_token, Space.Space); // union }, .Enum => |enum_tag_type| { - try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.None); // union + try renderToken(tree, stream, container_decl.kind_token, Space.None); // union const lparen = tree.nextToken(container_decl.kind_token); const enum_token = tree.nextToken(lparen); - try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( - try renderToken(tree, stream, enum_token, indent, start_col, Space.None); // enum + try renderToken(tree, stream, lparen, Space.None); // ( + try renderToken(tree, stream, enum_token, Space.None); // enum if (enum_tag_type) |expr| { - try renderToken(tree, stream, tree.nextToken(enum_token), indent, start_col, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.None); + try renderToken(tree, stream, tree.nextToken(enum_token), Space.None); // ( + try renderExpression(allocator, stream, tree, expr, Space.None); const rparen = tree.nextToken(expr.lastToken()); - try renderToken(tree, stream, rparen, indent, start_col, Space.None); // ) - try renderToken(tree, stream, tree.nextToken(rparen), indent, start_col, Space.Space); // ) + try renderToken(tree, stream, rparen, Space.None); // ) + try renderToken(tree, stream, tree.nextToken(rparen), Space.Space); // ) } else { - try renderToken(tree, stream, tree.nextToken(enum_token), indent, start_col, Space.Space); // ) + try renderToken(tree, stream, tree.nextToken(enum_token), Space.Space); // ) } }, .Type => |type_expr| { - try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.None); // union + try renderToken(tree, stream, container_decl.kind_token, Space.None); // union const lparen = tree.nextToken(container_decl.kind_token); const rparen = tree.nextToken(type_expr.lastToken()); - try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, type_expr, Space.None); - try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) + try renderToken(tree, stream, lparen, Space.None); // ( + try renderExpression(allocator, stream, tree, type_expr, Space.None); + try renderToken(tree, stream, rparen, Space.Space); // ) }, } if (container_decl.fields_and_decls_len == 0) { - try renderToken(tree, stream, container_decl.lbrace_token, indent + indent_delta, start_col, Space.None); // { - return renderToken(tree, stream, container_decl.rbrace_token, indent, start_col, space); // } + { + stream.pushIndentNextLine(); + defer stream.popIndent(); + try renderToken(tree, stream, container_decl.lbrace_token, Space.None); // { + } + return renderToken(tree, stream, container_decl.rbrace_token, space); // } } const src_has_trailing_comma = blk: { @@ -1332,43 +1286,39 @@ fn renderExpression( if (src_has_trailing_comma or !src_has_only_fields) { // One declaration per line - const new_indent = indent + indent_delta; - try renderToken(tree, stream, container_decl.lbrace_token, new_indent, start_col, .Newline); // { + stream.pushIndentNextLine(); + defer stream.popIndent(); + try renderToken(tree, stream, container_decl.lbrace_token, .Newline); // { for (fields_and_decls) |decl, i| { - try stream.writeByteNTimes(' ', new_indent); - try renderContainerDecl(allocator, stream, tree, new_indent, start_col, decl, .Newline); + try renderContainerDecl(allocator, stream, tree, decl, .Newline); if (i + 1 < fields_and_decls.len) { - try renderExtraNewline(tree, stream, start_col, fields_and_decls[i + 1]); + try renderExtraNewline(tree, stream, fields_and_decls[i + 1]); } } - - try stream.writeByteNTimes(' ', indent); } else if (src_has_newline) { // All the declarations on the same line, but place the items on // their own line - try renderToken(tree, stream, container_decl.lbrace_token, indent, start_col, .Newline); // { + try renderToken(tree, stream, container_decl.lbrace_token, .Newline); // { - const new_indent = indent + indent_delta; - try stream.writeByteNTimes(' ', new_indent); + stream.pushIndent(); + defer stream.popIndent(); for (fields_and_decls) |decl, i| { const space_after_decl: Space = if (i + 1 >= fields_and_decls.len) .Newline else .Space; - try renderContainerDecl(allocator, stream, tree, new_indent, start_col, decl, space_after_decl); + try renderContainerDecl(allocator, stream, tree, decl, space_after_decl); } - - try stream.writeByteNTimes(' ', indent); } else { // All the declarations on the same line - try renderToken(tree, stream, container_decl.lbrace_token, indent, start_col, .Space); // { + try renderToken(tree, stream, container_decl.lbrace_token, .Space); // { for (fields_and_decls) |decl| { - try renderContainerDecl(allocator, stream, tree, indent, start_col, decl, .Space); + try renderContainerDecl(allocator, stream, tree, decl, .Space); } } - return renderToken(tree, stream, container_decl.rbrace_token, indent, start_col, space); // } + return renderToken(tree, stream, container_decl.rbrace_token, space); // } }, .ErrorSetDecl => { @@ -1377,9 +1327,9 @@ fn renderExpression( const lbrace = tree.nextToken(err_set_decl.error_token); if (err_set_decl.decls_len == 0) { - try renderToken(tree, stream, err_set_decl.error_token, indent, start_col, Space.None); - try renderToken(tree, stream, lbrace, indent, start_col, Space.None); - return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space); + try renderToken(tree, stream, err_set_decl.error_token, Space.None); + try renderToken(tree, stream, lbrace, Space.None); + return renderToken(tree, stream, err_set_decl.rbrace_token, space); } if (err_set_decl.decls_len == 1) blk: { @@ -1393,13 +1343,13 @@ fn renderExpression( break :blk; } - try renderToken(tree, stream, err_set_decl.error_token, indent, start_col, Space.None); // error - try renderToken(tree, stream, lbrace, indent, start_col, Space.None); // { - try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None); - return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space); // } + try renderToken(tree, stream, err_set_decl.error_token, Space.None); // error + try renderToken(tree, stream, lbrace, Space.None); // { + try renderExpression(allocator, stream, tree, node, Space.None); + return renderToken(tree, stream, err_set_decl.rbrace_token, space); // } } - try renderToken(tree, stream, err_set_decl.error_token, indent, start_col, Space.None); // error + try renderToken(tree, stream, err_set_decl.error_token, Space.None); // error const src_has_trailing_comma = blk: { const maybe_comma = tree.prevToken(err_set_decl.rbrace_token); @@ -1407,78 +1357,72 @@ fn renderExpression( }; if (src_has_trailing_comma) { - try renderToken(tree, stream, lbrace, indent, start_col, Space.Newline); // { - const new_indent = indent + indent_delta; + { + stream.pushIndent(); + defer stream.popIndent(); - const decls = err_set_decl.decls(); - for (decls) |node, i| { - try stream.writeByteNTimes(' ', new_indent); + try renderToken(tree, stream, lbrace, Space.Newline); // { + const decls = err_set_decl.decls(); + for (decls) |node, i| { + if (i + 1 < decls.len) { + try renderExpression(allocator, stream, tree, node, Space.None); + try renderToken(tree, stream, tree.nextToken(node.lastToken()), Space.Newline); // , - if (i + 1 < decls.len) { - try renderExpression(allocator, stream, tree, new_indent, start_col, node, Space.None); - try renderToken(tree, stream, tree.nextToken(node.lastToken()), new_indent, start_col, Space.Newline); // , - - try renderExtraNewline(tree, stream, start_col, decls[i + 1]); - } else { - try renderExpression(allocator, stream, tree, new_indent, start_col, node, Space.Comma); + try renderExtraNewline(tree, stream, decls[i + 1]); + } else { + try renderExpression(allocator, stream, tree, node, Space.Comma); + } } } - try stream.writeByteNTimes(' ', indent); - return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space); // } + return renderToken(tree, stream, err_set_decl.rbrace_token, space); // } } else { - try renderToken(tree, stream, lbrace, indent, start_col, Space.Space); // { + try renderToken(tree, stream, lbrace, Space.Space); // { const decls = err_set_decl.decls(); for (decls) |node, i| { if (i + 1 < decls.len) { - try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None); + try renderExpression(allocator, stream, tree, node, Space.None); const comma_token = tree.nextToken(node.lastToken()); assert(tree.token_ids[comma_token] == .Comma); - try renderToken(tree, stream, comma_token, indent, start_col, Space.Space); // , - try renderExtraNewline(tree, stream, start_col, decls[i + 1]); + try renderToken(tree, stream, comma_token, Space.Space); // , + try renderExtraNewline(tree, stream, decls[i + 1]); } else { - try renderExpression(allocator, stream, tree, indent, start_col, node, Space.Space); + try renderExpression(allocator, stream, tree, node, Space.Space); } } - return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space); // } + return renderToken(tree, stream, err_set_decl.rbrace_token, space); // } } }, .ErrorTag => { const tag = @fieldParentPtr(ast.Node.ErrorTag, "base", base); - try renderDocComments(tree, stream, tag, tag.doc_comments, indent, start_col); - return renderToken(tree, stream, tag.name_token, indent, start_col, space); // name + try renderDocComments(tree, stream, tag, tag.doc_comments); + return renderToken(tree, stream, tag.name_token, space); // name }, .MultilineStringLiteral => { - // TODO: Don't indent in this function, but let the caller indent. - // If this has been implemented, a lot of hacky solutions in i.e. ArrayInit and FunctionCall can be removed const multiline_str_literal = @fieldParentPtr(ast.Node.MultilineStringLiteral, "base", base); - var skip_first_indent = true; - if (tree.token_ids[multiline_str_literal.firstToken() - 1] != .LineComment) { - try stream.print("\n", .{}); - skip_first_indent = false; - } - - for (multiline_str_literal.lines()) |t| { - if (!skip_first_indent) { - try stream.writeByteNTimes(' ', indent + indent_delta); + { + const locked_indents = stream.lockOneShotIndent(); + defer { + var i: u8 = 0; + while (i < locked_indents) : (i += 1) stream.popIndent(); } - try renderToken(tree, stream, t, indent, start_col, Space.None); - skip_first_indent = false; + try stream.maybeInsertNewline(); + + for (multiline_str_literal.lines()) |t| try renderToken(tree, stream, t, Space.None); } - try stream.writeByteNTimes(' ', indent); }, .BuiltinCall => { const builtin_call = @fieldParentPtr(ast.Node.BuiltinCall, "base", base); - try renderToken(tree, stream, builtin_call.builtin_token, indent, start_col, Space.None); // @name + try renderToken(tree, stream, builtin_call.builtin_token, Space.None); // @name const src_params_trailing_comma = blk: { if (builtin_call.params_len < 2) break :blk false; @@ -1490,31 +1434,30 @@ fn renderExpression( const lparen = tree.nextToken(builtin_call.builtin_token); if (!src_params_trailing_comma) { - try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( + try renderToken(tree, stream, lparen, Space.None); // ( // render all on one line, no trailing comma const params = builtin_call.params(); for (params) |param_node, i| { - try renderExpression(allocator, stream, tree, indent, start_col, param_node, Space.None); + try renderExpression(allocator, stream, tree, param_node, Space.None); if (i + 1 < params.len) { const comma_token = tree.nextToken(param_node.lastToken()); - try renderToken(tree, stream, comma_token, indent, start_col, Space.Space); // , + try renderToken(tree, stream, comma_token, Space.Space); // , } } } else { // one param per line - const new_indent = indent + indent_delta; - try renderToken(tree, stream, lparen, new_indent, start_col, Space.Newline); // ( + stream.pushIndent(); + defer stream.popIndent(); + try renderToken(tree, stream, lparen, Space.Newline); // ( for (builtin_call.params()) |param_node| { - try stream.writeByteNTimes(' ', new_indent); - try renderExpression(allocator, stream, tree, indent, start_col, param_node, Space.Comma); + try renderExpression(allocator, stream, tree, param_node, Space.Comma); } - try stream.writeByteNTimes(' ', indent); } - return renderToken(tree, stream, builtin_call.rparen_token, indent, start_col, space); // ) + return renderToken(tree, stream, builtin_call.rparen_token, space); // ) }, .FnProto => { @@ -1524,24 +1467,24 @@ fn renderExpression( const visib_token = tree.token_ids[visib_token_index]; assert(visib_token == .Keyword_pub or visib_token == .Keyword_export); - try renderToken(tree, stream, visib_token_index, indent, start_col, Space.Space); // pub + try renderToken(tree, stream, visib_token_index, Space.Space); // pub } if (fn_proto.getTrailer("extern_export_inline_token")) |extern_export_inline_token| { if (fn_proto.getTrailer("is_extern_prototype") == null) - try renderToken(tree, stream, extern_export_inline_token, indent, start_col, Space.Space); // extern/export/inline + try renderToken(tree, stream, extern_export_inline_token, Space.Space); // extern/export/inline } if (fn_proto.getTrailer("lib_name")) |lib_name| { - try renderExpression(allocator, stream, tree, indent, start_col, lib_name, Space.Space); + try renderExpression(allocator, stream, tree, lib_name, Space.Space); } const lparen = if (fn_proto.getTrailer("name_token")) |name_token| blk: { - try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn - try renderToken(tree, stream, name_token, indent, start_col, Space.None); // name + try renderToken(tree, stream, fn_proto.fn_token, Space.Space); // fn + try renderToken(tree, stream, name_token, Space.None); // name break :blk tree.nextToken(name_token); } else blk: { - try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn + try renderToken(tree, stream, fn_proto.fn_token, Space.Space); // fn break :blk tree.nextToken(fn_proto.fn_token); }; assert(tree.token_ids[lparen] == .LParen); @@ -1568,47 +1511,45 @@ fn renderExpression( }; if (!src_params_trailing_comma) { - try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( + try renderToken(tree, stream, lparen, Space.None); // ( // render all on one line, no trailing comma for (fn_proto.params()) |param_decl, i| { - try renderParamDecl(allocator, stream, tree, indent, start_col, param_decl, Space.None); + try renderParamDecl(allocator, stream, tree, param_decl, Space.None); if (i + 1 < fn_proto.params_len or fn_proto.getTrailer("var_args_token") != null) { const comma = tree.nextToken(param_decl.lastToken()); - try renderToken(tree, stream, comma, indent, start_col, Space.Space); // , + try renderToken(tree, stream, comma, Space.Space); // , } } if (fn_proto.getTrailer("var_args_token")) |var_args_token| { - try renderToken(tree, stream, var_args_token, indent, start_col, Space.None); + try renderToken(tree, stream, var_args_token, Space.None); } } else { // one param per line - const new_indent = indent + indent_delta; - try renderToken(tree, stream, lparen, new_indent, start_col, Space.Newline); // ( + stream.pushIndent(); + defer stream.popIndent(); + try renderToken(tree, stream, lparen, Space.Newline); // ( for (fn_proto.params()) |param_decl| { - try stream.writeByteNTimes(' ', new_indent); - try renderParamDecl(allocator, stream, tree, new_indent, start_col, param_decl, Space.Comma); + try renderParamDecl(allocator, stream, tree, param_decl, Space.Comma); } if (fn_proto.getTrailer("var_args_token")) |var_args_token| { - try stream.writeByteNTimes(' ', new_indent); - try renderToken(tree, stream, var_args_token, new_indent, start_col, Space.Comma); + try renderToken(tree, stream, var_args_token, Space.Comma); } - try stream.writeByteNTimes(' ', indent); } - try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) + try renderToken(tree, stream, rparen, Space.Space); // ) if (fn_proto.getTrailer("align_expr")) |align_expr| { const align_rparen = tree.nextToken(align_expr.lastToken()); const align_lparen = tree.prevToken(align_expr.firstToken()); const align_kw = tree.prevToken(align_lparen); - try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align - try renderToken(tree, stream, align_lparen, indent, start_col, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, align_expr, Space.None); - try renderToken(tree, stream, align_rparen, indent, start_col, Space.Space); // ) + try renderToken(tree, stream, align_kw, Space.None); // align + try renderToken(tree, stream, align_lparen, Space.None); // ( + try renderExpression(allocator, stream, tree, align_expr, Space.None); + try renderToken(tree, stream, align_rparen, Space.Space); // ) } if (fn_proto.getTrailer("section_expr")) |section_expr| { @@ -1616,10 +1557,10 @@ fn renderExpression( const section_lparen = tree.prevToken(section_expr.firstToken()); const section_kw = tree.prevToken(section_lparen); - try renderToken(tree, stream, section_kw, indent, start_col, Space.None); // section - try renderToken(tree, stream, section_lparen, indent, start_col, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, section_expr, Space.None); - try renderToken(tree, stream, section_rparen, indent, start_col, Space.Space); // ) + try renderToken(tree, stream, section_kw, Space.None); // section + try renderToken(tree, stream, section_lparen, Space.None); // ( + try renderExpression(allocator, stream, tree, section_expr, Space.None); + try renderToken(tree, stream, section_rparen, Space.Space); // ) } if (fn_proto.getTrailer("callconv_expr")) |callconv_expr| { @@ -1627,23 +1568,23 @@ fn renderExpression( const callconv_lparen = tree.prevToken(callconv_expr.firstToken()); const callconv_kw = tree.prevToken(callconv_lparen); - try renderToken(tree, stream, callconv_kw, indent, start_col, Space.None); // callconv - try renderToken(tree, stream, callconv_lparen, indent, start_col, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, callconv_expr, Space.None); - try renderToken(tree, stream, callconv_rparen, indent, start_col, Space.Space); // ) + try renderToken(tree, stream, callconv_kw, Space.None); // callconv + try renderToken(tree, stream, callconv_lparen, Space.None); // ( + try renderExpression(allocator, stream, tree, callconv_expr, Space.None); + try renderToken(tree, stream, callconv_rparen, Space.Space); // ) } else if (fn_proto.getTrailer("is_extern_prototype") != null) { - try stream.writeAll("callconv(.C) "); + try stream.writer().writeAll("callconv(.C) "); } else if (fn_proto.getTrailer("is_async") != null) { - try stream.writeAll("callconv(.Async) "); + try stream.writer().writeAll("callconv(.Async) "); } switch (fn_proto.return_type) { .Explicit => |node| { - return renderExpression(allocator, stream, tree, indent, start_col, node, space); + return renderExpression(allocator, stream, tree, node, space); }, .InferErrorSet => |node| { - try renderToken(tree, stream, tree.prevToken(node.firstToken()), indent, start_col, Space.None); // ! - return renderExpression(allocator, stream, tree, indent, start_col, node, space); + try renderToken(tree, stream, tree.prevToken(node.firstToken()), Space.None); // ! + return renderExpression(allocator, stream, tree, node, space); }, .Invalid => unreachable, } @@ -1653,11 +1594,11 @@ fn renderExpression( const anyframe_type = @fieldParentPtr(ast.Node.AnyFrameType, "base", base); if (anyframe_type.result) |result| { - try renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, Space.None); // anyframe - try renderToken(tree, stream, result.arrow_token, indent, start_col, Space.None); // -> - return renderExpression(allocator, stream, tree, indent, start_col, result.return_type, space); + try renderToken(tree, stream, anyframe_type.anyframe_token, Space.None); // anyframe + try renderToken(tree, stream, result.arrow_token, Space.None); // -> + return renderExpression(allocator, stream, tree, result.return_type, space); } else { - return renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, space); // anyframe + return renderToken(tree, stream, anyframe_type.anyframe_token, space); // anyframe } }, @@ -1666,38 +1607,38 @@ fn renderExpression( .Switch => { const switch_node = @fieldParentPtr(ast.Node.Switch, "base", base); - try renderToken(tree, stream, switch_node.switch_token, indent, start_col, Space.Space); // switch - try renderToken(tree, stream, tree.nextToken(switch_node.switch_token), indent, start_col, Space.None); // ( + try renderToken(tree, stream, switch_node.switch_token, Space.Space); // switch + try renderToken(tree, stream, tree.nextToken(switch_node.switch_token), Space.None); // ( const rparen = tree.nextToken(switch_node.expr.lastToken()); const lbrace = tree.nextToken(rparen); if (switch_node.cases_len == 0) { - try renderExpression(allocator, stream, tree, indent, start_col, switch_node.expr, Space.None); - try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) - try renderToken(tree, stream, lbrace, indent, start_col, Space.None); // { - return renderToken(tree, stream, switch_node.rbrace, indent, start_col, space); // } + try renderExpression(allocator, stream, tree, switch_node.expr, Space.None); + try renderToken(tree, stream, rparen, Space.Space); // ) + try renderToken(tree, stream, lbrace, Space.None); // { + return renderToken(tree, stream, switch_node.rbrace, space); // } } - try renderExpression(allocator, stream, tree, indent, start_col, switch_node.expr, Space.None); + try renderExpression(allocator, stream, tree, switch_node.expr, Space.None); + try renderToken(tree, stream, rparen, Space.Space); // ) - const new_indent = indent + indent_delta; + { + stream.pushIndentNextLine(); + defer stream.popIndent(); + try renderToken(tree, stream, lbrace, Space.Newline); // { - try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) - try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline); // { + const cases = switch_node.cases(); + for (cases) |node, i| { + try renderExpression(allocator, stream, tree, node, Space.Comma); - const cases = switch_node.cases(); - for (cases) |node, i| { - try stream.writeByteNTimes(' ', new_indent); - try renderExpression(allocator, stream, tree, new_indent, start_col, node, Space.Comma); - - if (i + 1 < cases.len) { - try renderExtraNewline(tree, stream, start_col, cases[i + 1]); + if (i + 1 < cases.len) { + try renderExtraNewline(tree, stream, cases[i + 1]); + } } } - try stream.writeByteNTimes(' ', indent); - return renderToken(tree, stream, switch_node.rbrace, indent, start_col, space); // } + return renderToken(tree, stream, switch_node.rbrace, space); // } }, .SwitchCase => { @@ -1714,43 +1655,41 @@ fn renderExpression( const items = switch_case.items(); for (items) |node, i| { if (i + 1 < items.len) { - try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None); + try renderExpression(allocator, stream, tree, node, Space.None); const comma_token = tree.nextToken(node.lastToken()); - try renderToken(tree, stream, comma_token, indent, start_col, Space.Space); // , - try renderExtraNewline(tree, stream, start_col, items[i + 1]); + try renderToken(tree, stream, comma_token, Space.Space); // , + try renderExtraNewline(tree, stream, items[i + 1]); } else { - try renderExpression(allocator, stream, tree, indent, start_col, node, Space.Space); + try renderExpression(allocator, stream, tree, node, Space.Space); } } } else { const items = switch_case.items(); for (items) |node, i| { if (i + 1 < items.len) { - try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None); + try renderExpression(allocator, stream, tree, node, Space.None); const comma_token = tree.nextToken(node.lastToken()); - try renderToken(tree, stream, comma_token, indent, start_col, Space.Newline); // , - try renderExtraNewline(tree, stream, start_col, items[i + 1]); - try stream.writeByteNTimes(' ', indent); + try renderToken(tree, stream, comma_token, Space.Newline); // , + try renderExtraNewline(tree, stream, items[i + 1]); } else { - try renderExpression(allocator, stream, tree, indent, start_col, node, Space.Comma); - try stream.writeByteNTimes(' ', indent); + try renderExpression(allocator, stream, tree, node, Space.Comma); } } } - try renderToken(tree, stream, switch_case.arrow_token, indent, start_col, Space.Space); // => + try renderToken(tree, stream, switch_case.arrow_token, Space.Space); // => if (switch_case.payload) |payload| { - try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space); + try renderExpression(allocator, stream, tree, payload, Space.Space); } - return renderExpression(allocator, stream, tree, indent, start_col, switch_case.expr, space); + return renderExpression(allocator, stream, tree, switch_case.expr, space); }, .SwitchElse => { const switch_else = @fieldParentPtr(ast.Node.SwitchElse, "base", base); - return renderToken(tree, stream, switch_else.token, indent, start_col, space); + return renderToken(tree, stream, switch_else.token, space); }, .Else => { const else_node = @fieldParentPtr(ast.Node.Else, "base", base); @@ -1759,37 +1698,37 @@ fn renderExpression( const same_line = body_is_block or tree.tokensOnSameLine(else_node.else_token, else_node.body.lastToken()); const after_else_space = if (same_line or else_node.payload != null) Space.Space else Space.Newline; - try renderToken(tree, stream, else_node.else_token, indent, start_col, after_else_space); + try renderToken(tree, stream, else_node.else_token, after_else_space); if (else_node.payload) |payload| { const payload_space = if (same_line) Space.Space else Space.Newline; - try renderExpression(allocator, stream, tree, indent, start_col, payload, payload_space); + try renderExpression(allocator, stream, tree, payload, payload_space); } if (same_line) { - return renderExpression(allocator, stream, tree, indent, start_col, else_node.body, space); + return renderExpression(allocator, stream, tree, else_node.body, space); + } else { + stream.pushIndent(); + defer stream.popIndent(); + return renderExpression(allocator, stream, tree, else_node.body, space); } - - try stream.writeByteNTimes(' ', indent + indent_delta); - start_col.* = indent + indent_delta; - return renderExpression(allocator, stream, tree, indent, start_col, else_node.body, space); }, .While => { const while_node = @fieldParentPtr(ast.Node.While, "base", base); if (while_node.label) |label| { - try renderToken(tree, stream, label, indent, start_col, Space.None); // label - try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space); // : + try renderToken(tree, stream, label, Space.None); // label + try renderToken(tree, stream, tree.nextToken(label), Space.Space); // : } if (while_node.inline_token) |inline_token| { - try renderToken(tree, stream, inline_token, indent, start_col, Space.Space); // inline + try renderToken(tree, stream, inline_token, Space.Space); // inline } - try renderToken(tree, stream, while_node.while_token, indent, start_col, Space.Space); // while - try renderToken(tree, stream, tree.nextToken(while_node.while_token), indent, start_col, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, while_node.condition, Space.None); + try renderToken(tree, stream, while_node.while_token, Space.Space); // while + try renderToken(tree, stream, tree.nextToken(while_node.while_token), Space.None); // ( + try renderExpression(allocator, stream, tree, while_node.condition, Space.None); const cond_rparen = tree.nextToken(while_node.condition.lastToken()); @@ -1811,12 +1750,12 @@ fn renderExpression( { const rparen_space = if (while_node.payload != null or while_node.continue_expr != null) Space.Space else block_start_space; - try renderToken(tree, stream, cond_rparen, indent, start_col, rparen_space); // ) + try renderToken(tree, stream, cond_rparen, rparen_space); // ) } if (while_node.payload) |payload| { - const payload_space = if (while_node.continue_expr != null) Space.Space else block_start_space; - try renderExpression(allocator, stream, tree, indent, start_col, payload, payload_space); + const payload_space = Space.Space; //if (while_node.continue_expr != null) Space.Space else block_start_space; + try renderExpression(allocator, stream, tree, payload, payload_space); } if (while_node.continue_expr) |continue_expr| { @@ -1824,29 +1763,22 @@ fn renderExpression( const lparen = tree.prevToken(continue_expr.firstToken()); const colon = tree.prevToken(lparen); - try renderToken(tree, stream, colon, indent, start_col, Space.Space); // : - try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( + try renderToken(tree, stream, colon, Space.Space); // : + try renderToken(tree, stream, lparen, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, continue_expr, Space.None); + try renderExpression(allocator, stream, tree, continue_expr, Space.None); - try renderToken(tree, stream, rparen, indent, start_col, block_start_space); // ) + try renderToken(tree, stream, rparen, block_start_space); // ) } - var new_indent = indent; - if (block_start_space == Space.Newline) { - new_indent += indent_delta; - try stream.writeByteNTimes(' ', new_indent); - start_col.* = new_indent; + { + if (!body_is_block) stream.pushIndent(); + defer if (!body_is_block) stream.popIndent(); + try renderExpression(allocator, stream, tree, while_node.body, after_body_space); } - try renderExpression(allocator, stream, tree, indent, start_col, while_node.body, after_body_space); - if (while_node.@"else") |@"else"| { - if (after_body_space == Space.Newline) { - try stream.writeByteNTimes(' ', indent); - start_col.* = indent; - } - return renderExpression(allocator, stream, tree, indent, start_col, &@"else".base, space); + return renderExpression(allocator, stream, tree, &@"else".base, space); } }, @@ -1854,17 +1786,17 @@ fn renderExpression( const for_node = @fieldParentPtr(ast.Node.For, "base", base); if (for_node.label) |label| { - try renderToken(tree, stream, label, indent, start_col, Space.None); // label - try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space); // : + try renderToken(tree, stream, label, Space.None); // label + try renderToken(tree, stream, tree.nextToken(label), Space.Space); // : } if (for_node.inline_token) |inline_token| { - try renderToken(tree, stream, inline_token, indent, start_col, Space.Space); // inline + try renderToken(tree, stream, inline_token, Space.Space); // inline } - try renderToken(tree, stream, for_node.for_token, indent, start_col, Space.Space); // for - try renderToken(tree, stream, tree.nextToken(for_node.for_token), indent, start_col, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, for_node.array_expr, Space.None); + try renderToken(tree, stream, for_node.for_token, Space.Space); // for + try renderToken(tree, stream, tree.nextToken(for_node.for_token), Space.None); // ( + try renderExpression(allocator, stream, tree, for_node.array_expr, Space.None); const rparen = tree.nextToken(for_node.array_expr.lastToken()); @@ -1872,10 +1804,10 @@ fn renderExpression( const src_one_line_to_body = !body_is_block and tree.tokensOnSameLine(rparen, for_node.body.firstToken()); const body_on_same_line = body_is_block or src_one_line_to_body; - try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) + try renderToken(tree, stream, rparen, Space.Space); // ) const space_after_payload = if (body_on_same_line) Space.Space else Space.Newline; - try renderExpression(allocator, stream, tree, indent, start_col, for_node.payload, space_after_payload); // |x| + try renderExpression(allocator, stream, tree, for_node.payload, space_after_payload); // |x| const space_after_body = blk: { if (for_node.@"else") |@"else"| { @@ -1890,13 +1822,14 @@ fn renderExpression( } }; - const body_indent = if (body_on_same_line) indent else indent + indent_delta; - if (!body_on_same_line) try stream.writeByteNTimes(' ', body_indent); - try renderExpression(allocator, stream, tree, body_indent, start_col, for_node.body, space_after_body); // { body } + { + if (!body_on_same_line) stream.pushIndent(); + defer if (!body_on_same_line) stream.popIndent(); + try renderExpression(allocator, stream, tree, for_node.body, space_after_body); // { body } + } if (for_node.@"else") |@"else"| { - if (space_after_body == Space.Newline) try stream.writeByteNTimes(' ', indent); - return renderExpression(allocator, stream, tree, indent, start_col, &@"else".base, space); // else + return renderExpression(allocator, stream, tree, &@"else".base, space); // else } }, @@ -1906,29 +1839,29 @@ fn renderExpression( const lparen = tree.nextToken(if_node.if_token); const rparen = tree.nextToken(if_node.condition.lastToken()); - try renderToken(tree, stream, if_node.if_token, indent, start_col, Space.Space); // if - try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( + try renderToken(tree, stream, if_node.if_token, Space.Space); // if + try renderToken(tree, stream, lparen, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, if_node.condition, Space.None); // condition + try renderExpression(allocator, stream, tree, if_node.condition, Space.None); // condition const body_is_if_block = if_node.body.tag == .If; const body_is_block = nodeIsBlock(if_node.body); if (body_is_if_block) { - try renderExtraNewline(tree, stream, start_col, if_node.body); + try renderExtraNewline(tree, stream, if_node.body); } else if (body_is_block) { const after_rparen_space = if (if_node.payload == null) Space.BlockStart else Space.Space; - try renderToken(tree, stream, rparen, indent, start_col, after_rparen_space); // ) + try renderToken(tree, stream, rparen, after_rparen_space); // ) if (if_node.payload) |payload| { - try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.BlockStart); // |x| + try renderExpression(allocator, stream, tree, payload, Space.BlockStart); // |x| } if (if_node.@"else") |@"else"| { - try renderExpression(allocator, stream, tree, indent, start_col, if_node.body, Space.SpaceOrOutdent); - return renderExpression(allocator, stream, tree, indent, start_col, &@"else".base, space); + try renderExpression(allocator, stream, tree, if_node.body, Space.SpaceOrOutdent); + return renderExpression(allocator, stream, tree, &@"else".base, space); } else { - return renderExpression(allocator, stream, tree, indent, start_col, if_node.body, space); + return renderExpression(allocator, stream, tree, if_node.body, space); } } @@ -1936,186 +1869,181 @@ fn renderExpression( if (src_has_newline) { const after_rparen_space = if (if_node.payload == null) Space.Newline else Space.Space; - try renderToken(tree, stream, rparen, indent, start_col, after_rparen_space); // ) + try renderToken(tree, stream, rparen, after_rparen_space); // ) if (if_node.payload) |payload| { - try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Newline); + try renderExpression(allocator, stream, tree, payload, Space.Newline); } - const new_indent = indent + indent_delta; - try stream.writeByteNTimes(' ', new_indent); - if (if_node.@"else") |@"else"| { const else_is_block = nodeIsBlock(@"else".body); - try renderExpression(allocator, stream, tree, new_indent, start_col, if_node.body, Space.Newline); - try stream.writeByteNTimes(' ', indent); + + { + stream.pushIndent(); + defer stream.popIndent(); + try renderExpression(allocator, stream, tree, if_node.body, Space.Newline); + } if (else_is_block) { - try renderToken(tree, stream, @"else".else_token, indent, start_col, Space.Space); // else + try renderToken(tree, stream, @"else".else_token, Space.Space); // else if (@"else".payload) |payload| { - try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space); + try renderExpression(allocator, stream, tree, payload, Space.Space); } - return renderExpression(allocator, stream, tree, indent, start_col, @"else".body, space); + return renderExpression(allocator, stream, tree, @"else".body, space); } else { const after_else_space = if (@"else".payload == null) Space.Newline else Space.Space; - try renderToken(tree, stream, @"else".else_token, indent, start_col, after_else_space); // else + try renderToken(tree, stream, @"else".else_token, after_else_space); // else if (@"else".payload) |payload| { - try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Newline); + try renderExpression(allocator, stream, tree, payload, Space.Newline); } - try stream.writeByteNTimes(' ', new_indent); - return renderExpression(allocator, stream, tree, new_indent, start_col, @"else".body, space); + stream.pushIndent(); + defer stream.popIndent(); + return renderExpression(allocator, stream, tree, @"else".body, space); } } else { - return renderExpression(allocator, stream, tree, new_indent, start_col, if_node.body, space); + stream.pushIndent(); + defer stream.popIndent(); + return renderExpression(allocator, stream, tree, if_node.body, space); } } - try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) + // Single line if statement + + try renderToken(tree, stream, rparen, Space.Space); // ) if (if_node.payload) |payload| { - try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space); + try renderExpression(allocator, stream, tree, payload, Space.Space); } if (if_node.@"else") |@"else"| { - try renderExpression(allocator, stream, tree, indent, start_col, if_node.body, Space.Space); - try renderToken(tree, stream, @"else".else_token, indent, start_col, Space.Space); + try renderExpression(allocator, stream, tree, if_node.body, Space.Space); + try renderToken(tree, stream, @"else".else_token, Space.Space); if (@"else".payload) |payload| { - try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space); + try renderExpression(allocator, stream, tree, payload, Space.Space); } - return renderExpression(allocator, stream, tree, indent, start_col, @"else".body, space); + return renderExpression(allocator, stream, tree, @"else".body, space); } else { - return renderExpression(allocator, stream, tree, indent, start_col, if_node.body, space); + return renderExpression(allocator, stream, tree, if_node.body, space); } }, .Asm => { const asm_node = @fieldParentPtr(ast.Node.Asm, "base", base); - try renderToken(tree, stream, asm_node.asm_token, indent, start_col, Space.Space); // asm + try renderToken(tree, stream, asm_node.asm_token, Space.Space); // asm if (asm_node.volatile_token) |volatile_token| { - try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); // volatile - try renderToken(tree, stream, tree.nextToken(volatile_token), indent, start_col, Space.None); // ( + try renderToken(tree, stream, volatile_token, Space.Space); // volatile + try renderToken(tree, stream, tree.nextToken(volatile_token), Space.None); // ( } else { - try renderToken(tree, stream, tree.nextToken(asm_node.asm_token), indent, start_col, Space.None); // ( + try renderToken(tree, stream, tree.nextToken(asm_node.asm_token), Space.None); // ( } - if (asm_node.outputs.len == 0 and asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) { - try renderExpression(allocator, stream, tree, indent, start_col, asm_node.template, Space.None); - return renderToken(tree, stream, asm_node.rparen, indent, start_col, space); - } + asmblk: { + stream.pushIndent(); + defer stream.popIndent(); - try renderExpression(allocator, stream, tree, indent, start_col, asm_node.template, Space.Newline); + if (asm_node.outputs.len == 0 and asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) { + try renderExpression(allocator, stream, tree, asm_node.template, Space.None); + break :asmblk; + } - const indent_once = indent + indent_delta; + try renderExpression(allocator, stream, tree, asm_node.template, Space.Newline); - if (asm_node.template.tag == .MultilineStringLiteral) { - // After rendering a multiline string literal the cursor is - // already offset by indent - try stream.writeByteNTimes(' ', indent_delta); - } else { - try stream.writeByteNTimes(' ', indent_once); - } + const colon1 = tree.nextToken(asm_node.template.lastToken()); - const colon1 = tree.nextToken(asm_node.template.lastToken()); - const indent_extra = indent_once + 2; + const colon2 = if (asm_node.outputs.len == 0) blk: { + try renderToken(tree, stream, colon1, Space.Newline); // : - const colon2 = if (asm_node.outputs.len == 0) blk: { - try renderToken(tree, stream, colon1, indent, start_col, Space.Newline); // : - try stream.writeByteNTimes(' ', indent_once); + break :blk tree.nextToken(colon1); + } else blk: { + try renderToken(tree, stream, colon1, Space.Space); // : - break :blk tree.nextToken(colon1); - } else blk: { - try renderToken(tree, stream, colon1, indent, start_col, Space.Space); // : + stream.pushIndentN(2); + defer stream.popIndent(); - for (asm_node.outputs) |*asm_output, i| { - if (i + 1 < asm_node.outputs.len) { - const next_asm_output = asm_node.outputs[i + 1]; - try renderAsmOutput(allocator, stream, tree, indent_extra, start_col, asm_output, Space.None); + for (asm_node.outputs) |*asm_output, i| { + if (i + 1 < asm_node.outputs.len) { + const next_asm_output = asm_node.outputs[i + 1]; + try renderAsmOutput(allocator, stream, tree, asm_output, Space.None); - const comma = tree.prevToken(next_asm_output.firstToken()); - try renderToken(tree, stream, comma, indent_extra, start_col, Space.Newline); // , - try renderExtraNewlineToken(tree, stream, start_col, next_asm_output.firstToken()); + const comma = tree.prevToken(next_asm_output.firstToken()); + try renderToken(tree, stream, comma, Space.Newline); // , + try renderExtraNewlineToken(tree, stream, next_asm_output.firstToken()); + } else if (asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) { + try renderAsmOutput(allocator, stream, tree, asm_output, Space.Newline); + break :asmblk; + } else { + try renderAsmOutput(allocator, stream, tree, asm_output, Space.Newline); + const comma_or_colon = tree.nextToken(asm_output.lastToken()); + break :blk switch (tree.token_ids[comma_or_colon]) { + .Comma => tree.nextToken(comma_or_colon), + else => comma_or_colon, + }; + } + } + unreachable; + }; - try stream.writeByteNTimes(' ', indent_extra); - } else if (asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) { - try renderAsmOutput(allocator, stream, tree, indent_extra, start_col, asm_output, Space.Newline); - try stream.writeByteNTimes(' ', indent); - return renderToken(tree, stream, asm_node.rparen, indent, start_col, space); + const colon3 = if (asm_node.inputs.len == 0) blk: { + try renderToken(tree, stream, colon2, Space.Newline); // : + break :blk tree.nextToken(colon2); + } else blk: { + try renderToken(tree, stream, colon2, Space.Space); // : + stream.pushIndentN(2); + defer stream.popIndent(); + for (asm_node.inputs) |*asm_input, i| { + if (i + 1 < asm_node.inputs.len) { + const next_asm_input = &asm_node.inputs[i + 1]; + try renderAsmInput(allocator, stream, tree, asm_input, Space.None); + + const comma = tree.prevToken(next_asm_input.firstToken()); + try renderToken(tree, stream, comma, Space.Newline); // , + try renderExtraNewlineToken(tree, stream, next_asm_input.firstToken()); + } else if (asm_node.clobbers.len == 0) { + try renderAsmInput(allocator, stream, tree, asm_input, Space.Newline); + break :asmblk; + } else { + try renderAsmInput(allocator, stream, tree, asm_input, Space.Newline); + const comma_or_colon = tree.nextToken(asm_input.lastToken()); + break :blk switch (tree.token_ids[comma_or_colon]) { + .Comma => tree.nextToken(comma_or_colon), + else => comma_or_colon, + }; + } + } + unreachable; + }; + + try renderToken(tree, stream, colon3, Space.Space); // : + stream.pushIndentN(2); + defer stream.popIndent(); + for (asm_node.clobbers) |clobber_node, i| { + if (i + 1 >= asm_node.clobbers.len) { + try renderExpression(allocator, stream, tree, clobber_node, Space.Newline); + break :asmblk; } else { - try renderAsmOutput(allocator, stream, tree, indent_extra, start_col, asm_output, Space.Newline); - try stream.writeByteNTimes(' ', indent_once); - const comma_or_colon = tree.nextToken(asm_output.lastToken()); - break :blk switch (tree.token_ids[comma_or_colon]) { - .Comma => tree.nextToken(comma_or_colon), - else => comma_or_colon, - }; + try renderExpression(allocator, stream, tree, clobber_node, Space.None); + const comma = tree.nextToken(clobber_node.lastToken()); + try renderToken(tree, stream, comma, Space.Space); // , } } - unreachable; - }; - - const colon3 = if (asm_node.inputs.len == 0) blk: { - try renderToken(tree, stream, colon2, indent, start_col, Space.Newline); // : - try stream.writeByteNTimes(' ', indent_once); - - break :blk tree.nextToken(colon2); - } else blk: { - try renderToken(tree, stream, colon2, indent, start_col, Space.Space); // : - - for (asm_node.inputs) |*asm_input, i| { - if (i + 1 < asm_node.inputs.len) { - const next_asm_input = &asm_node.inputs[i + 1]; - try renderAsmInput(allocator, stream, tree, indent_extra, start_col, asm_input, Space.None); - - const comma = tree.prevToken(next_asm_input.firstToken()); - try renderToken(tree, stream, comma, indent_extra, start_col, Space.Newline); // , - try renderExtraNewlineToken(tree, stream, start_col, next_asm_input.firstToken()); - - try stream.writeByteNTimes(' ', indent_extra); - } else if (asm_node.clobbers.len == 0) { - try renderAsmInput(allocator, stream, tree, indent_extra, start_col, asm_input, Space.Newline); - try stream.writeByteNTimes(' ', indent); - return renderToken(tree, stream, asm_node.rparen, indent, start_col, space); // ) - } else { - try renderAsmInput(allocator, stream, tree, indent_extra, start_col, asm_input, Space.Newline); - try stream.writeByteNTimes(' ', indent_once); - const comma_or_colon = tree.nextToken(asm_input.lastToken()); - break :blk switch (tree.token_ids[comma_or_colon]) { - .Comma => tree.nextToken(comma_or_colon), - else => comma_or_colon, - }; - } - } - unreachable; - }; - - try renderToken(tree, stream, colon3, indent, start_col, Space.Space); // : - - for (asm_node.clobbers) |clobber_node, i| { - if (i + 1 >= asm_node.clobbers.len) { - try renderExpression(allocator, stream, tree, indent_extra, start_col, clobber_node, Space.Newline); - try stream.writeByteNTimes(' ', indent); - return renderToken(tree, stream, asm_node.rparen, indent, start_col, space); - } else { - try renderExpression(allocator, stream, tree, indent_extra, start_col, clobber_node, Space.None); - const comma = tree.nextToken(clobber_node.lastToken()); - try renderToken(tree, stream, comma, indent_once, start_col, Space.Space); // , - } } + + return renderToken(tree, stream, asm_node.rparen, space); }, .EnumLiteral => { const enum_literal = @fieldParentPtr(ast.Node.EnumLiteral, "base", base); - try renderToken(tree, stream, enum_literal.dot, indent, start_col, Space.None); // . - return renderToken(tree, stream, enum_literal.name, indent, start_col, space); // name + try renderToken(tree, stream, enum_literal.dot, Space.None); // . + return renderToken(tree, stream, enum_literal.name, space); // name }, .ContainerField, @@ -2131,116 +2059,113 @@ fn renderArrayType( allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, - indent: usize, - start_col: *usize, lbracket: ast.TokenIndex, rhs: *ast.Node, len_expr: *ast.Node, opt_sentinel: ?*ast.Node, space: Space, -) (@TypeOf(stream).Error || Error)!void { +) (@TypeOf(stream.*).Error || Error)!void { const rbracket = tree.nextToken(if (opt_sentinel) |sentinel| sentinel.lastToken() else len_expr.lastToken()); - try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [ - const starts_with_comment = tree.token_ids[lbracket + 1] == .LineComment; const ends_with_comment = tree.token_ids[rbracket - 1] == .LineComment; - const new_indent = if (ends_with_comment) indent + indent_delta else indent; const new_space = if (ends_with_comment) Space.Newline else Space.None; - try renderExpression(allocator, stream, tree, new_indent, start_col, len_expr, new_space); - if (starts_with_comment) { - try stream.writeByte('\n'); - } - if (ends_with_comment or starts_with_comment) { - try stream.writeByteNTimes(' ', indent); - } - if (opt_sentinel) |sentinel| { - const colon_token = tree.prevToken(sentinel.firstToken()); - try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // : - try renderExpression(allocator, stream, tree, indent, start_col, sentinel, Space.None); - } - try renderToken(tree, stream, rbracket, indent, start_col, Space.None); // ] + { + const do_indent = (starts_with_comment or ends_with_comment); + if (do_indent) stream.pushIndent(); + defer if (do_indent) stream.popIndent(); - return renderExpression(allocator, stream, tree, indent, start_col, rhs, space); + try renderToken(tree, stream, lbracket, Space.None); // [ + try renderExpression(allocator, stream, tree, len_expr, new_space); + + if (starts_with_comment) { + try stream.maybeInsertNewline(); + } + if (opt_sentinel) |sentinel| { + const colon_token = tree.prevToken(sentinel.firstToken()); + try renderToken(tree, stream, colon_token, Space.None); // : + try renderExpression(allocator, stream, tree, sentinel, Space.None); + } + if (starts_with_comment) { + try stream.maybeInsertNewline(); + } + } + try renderToken(tree, stream, rbracket, Space.None); // ] + + return renderExpression(allocator, stream, tree, rhs, space); } fn renderAsmOutput( allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, - indent: usize, - start_col: *usize, asm_output: *const ast.Node.Asm.Output, space: Space, -) (@TypeOf(stream).Error || Error)!void { - try stream.writeAll("["); - try renderExpression(allocator, stream, tree, indent, start_col, asm_output.symbolic_name, Space.None); - try stream.writeAll("] "); - try renderExpression(allocator, stream, tree, indent, start_col, asm_output.constraint, Space.None); - try stream.writeAll(" ("); +) (@TypeOf(stream.*).Error || Error)!void { + try stream.writer().writeAll("["); + try renderExpression(allocator, stream, tree, asm_output.symbolic_name, Space.None); + try stream.writer().writeAll("] "); + try renderExpression(allocator, stream, tree, asm_output.constraint, Space.None); + try stream.writer().writeAll(" ("); switch (asm_output.kind) { ast.Node.Asm.Output.Kind.Variable => |variable_name| { - try renderExpression(allocator, stream, tree, indent, start_col, &variable_name.base, Space.None); + try renderExpression(allocator, stream, tree, &variable_name.base, Space.None); }, ast.Node.Asm.Output.Kind.Return => |return_type| { - try stream.writeAll("-> "); - try renderExpression(allocator, stream, tree, indent, start_col, return_type, Space.None); + try stream.writer().writeAll("-> "); + try renderExpression(allocator, stream, tree, return_type, Space.None); }, } - return renderToken(tree, stream, asm_output.lastToken(), indent, start_col, space); // ) + return renderToken(tree, stream, asm_output.lastToken(), space); // ) } fn renderAsmInput( allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, - indent: usize, - start_col: *usize, asm_input: *const ast.Node.Asm.Input, space: Space, -) (@TypeOf(stream).Error || Error)!void { - try stream.writeAll("["); - try renderExpression(allocator, stream, tree, indent, start_col, asm_input.symbolic_name, Space.None); - try stream.writeAll("] "); - try renderExpression(allocator, stream, tree, indent, start_col, asm_input.constraint, Space.None); - try stream.writeAll(" ("); - try renderExpression(allocator, stream, tree, indent, start_col, asm_input.expr, Space.None); - return renderToken(tree, stream, asm_input.lastToken(), indent, start_col, space); // ) +) (@TypeOf(stream.*).Error || Error)!void { + try stream.writer().writeAll("["); + try renderExpression(allocator, stream, tree, asm_input.symbolic_name, Space.None); + try stream.writer().writeAll("] "); + try renderExpression(allocator, stream, tree, asm_input.constraint, Space.None); + try stream.writer().writeAll(" ("); + try renderExpression(allocator, stream, tree, asm_input.expr, Space.None); + return renderToken(tree, stream, asm_input.lastToken(), space); // ) } fn renderVarDecl( allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, - indent: usize, - start_col: *usize, var_decl: *ast.Node.VarDecl, -) (@TypeOf(stream).Error || Error)!void { +) (@TypeOf(stream.*).Error || Error)!void { if (var_decl.getTrailer("visib_token")) |visib_token| { - try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub + try renderToken(tree, stream, visib_token, Space.Space); // pub } if (var_decl.getTrailer("extern_export_token")) |extern_export_token| { - try renderToken(tree, stream, extern_export_token, indent, start_col, Space.Space); // extern + try renderToken(tree, stream, extern_export_token, Space.Space); // extern if (var_decl.getTrailer("lib_name")) |lib_name| { - try renderExpression(allocator, stream, tree, indent, start_col, lib_name, Space.Space); // "lib" + try renderExpression(allocator, stream, tree, lib_name, Space.Space); // "lib" } } if (var_decl.getTrailer("comptime_token")) |comptime_token| { - try renderToken(tree, stream, comptime_token, indent, start_col, Space.Space); // comptime + try renderToken(tree, stream, comptime_token, Space.Space); // comptime } if (var_decl.getTrailer("thread_local_token")) |thread_local_token| { - try renderToken(tree, stream, thread_local_token, indent, start_col, Space.Space); // threadlocal + try renderToken(tree, stream, thread_local_token, Space.Space); // threadlocal } - try renderToken(tree, stream, var_decl.mut_token, indent, start_col, Space.Space); // var + try renderToken(tree, stream, var_decl.mut_token, Space.Space); // var const name_space = if (var_decl.getTrailer("type_node") == null and (var_decl.getTrailer("align_node") != null or @@ -2249,70 +2174,69 @@ fn renderVarDecl( Space.Space else Space.None; - try renderToken(tree, stream, var_decl.name_token, indent, start_col, name_space); + try renderToken(tree, stream, var_decl.name_token, name_space); if (var_decl.getTrailer("type_node")) |type_node| { - try renderToken(tree, stream, tree.nextToken(var_decl.name_token), indent, start_col, Space.Space); + try renderToken(tree, stream, tree.nextToken(var_decl.name_token), Space.Space); const s = if (var_decl.getTrailer("align_node") != null or var_decl.getTrailer("section_node") != null or var_decl.getTrailer("init_node") != null) Space.Space else Space.None; - try renderExpression(allocator, stream, tree, indent, start_col, type_node, s); + try renderExpression(allocator, stream, tree, type_node, s); } if (var_decl.getTrailer("align_node")) |align_node| { const lparen = tree.prevToken(align_node.firstToken()); const align_kw = tree.prevToken(lparen); const rparen = tree.nextToken(align_node.lastToken()); - try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align - try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, align_node, Space.None); + try renderToken(tree, stream, align_kw, Space.None); // align + try renderToken(tree, stream, lparen, Space.None); // ( + try renderExpression(allocator, stream, tree, align_node, Space.None); const s = if (var_decl.getTrailer("section_node") != null or var_decl.getTrailer("init_node") != null) Space.Space else Space.None; - try renderToken(tree, stream, rparen, indent, start_col, s); // ) + try renderToken(tree, stream, rparen, s); // ) } if (var_decl.getTrailer("section_node")) |section_node| { const lparen = tree.prevToken(section_node.firstToken()); const section_kw = tree.prevToken(lparen); const rparen = tree.nextToken(section_node.lastToken()); - try renderToken(tree, stream, section_kw, indent, start_col, Space.None); // linksection - try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( - try renderExpression(allocator, stream, tree, indent, start_col, section_node, Space.None); + try renderToken(tree, stream, section_kw, Space.None); // linksection + try renderToken(tree, stream, lparen, Space.None); // ( + try renderExpression(allocator, stream, tree, section_node, Space.None); const s = if (var_decl.getTrailer("init_node") != null) Space.Space else Space.None; - try renderToken(tree, stream, rparen, indent, start_col, s); // ) + try renderToken(tree, stream, rparen, s); // ) } if (var_decl.getTrailer("init_node")) |init_node| { const s = if (init_node.tag == .MultilineStringLiteral) Space.None else Space.Space; - try renderToken(tree, stream, var_decl.getTrailer("eq_token").?, indent, start_col, s); // = - try renderExpression(allocator, stream, tree, indent, start_col, init_node, Space.None); + try renderToken(tree, stream, var_decl.getTrailer("eq_token").?, s); // = + stream.pushIndentOneShot(); + try renderExpression(allocator, stream, tree, init_node, Space.None); } - try renderToken(tree, stream, var_decl.semicolon_token, indent, start_col, Space.Newline); + try renderToken(tree, stream, var_decl.semicolon_token, Space.Newline); } fn renderParamDecl( allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, - indent: usize, - start_col: *usize, param_decl: ast.Node.FnProto.ParamDecl, space: Space, -) (@TypeOf(stream).Error || Error)!void { - try renderDocComments(tree, stream, param_decl, param_decl.doc_comments, indent, start_col); +) (@TypeOf(stream.*).Error || Error)!void { + try renderDocComments(tree, stream, param_decl, param_decl.doc_comments); if (param_decl.comptime_token) |comptime_token| { - try renderToken(tree, stream, comptime_token, indent, start_col, Space.Space); + try renderToken(tree, stream, comptime_token, Space.Space); } if (param_decl.noalias_token) |noalias_token| { - try renderToken(tree, stream, noalias_token, indent, start_col, Space.Space); + try renderToken(tree, stream, noalias_token, Space.Space); } if (param_decl.name_token) |name_token| { - try renderToken(tree, stream, name_token, indent, start_col, Space.None); - try renderToken(tree, stream, tree.nextToken(name_token), indent, start_col, Space.Space); // : + try renderToken(tree, stream, name_token, Space.None); + try renderToken(tree, stream, tree.nextToken(name_token), Space.Space); // : } switch (param_decl.param_type) { - .any_type, .type_expr => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, space), + .any_type, .type_expr => |node| try renderExpression(allocator, stream, tree, node, space), } } @@ -2320,24 +2244,22 @@ fn renderStatement( allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, - indent: usize, - start_col: *usize, base: *ast.Node, -) (@TypeOf(stream).Error || Error)!void { +) (@TypeOf(stream.*).Error || Error)!void { switch (base.tag) { .VarDecl => { const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", base); - try renderVarDecl(allocator, stream, tree, indent, start_col, var_decl); + try renderVarDecl(allocator, stream, tree, var_decl); }, else => { if (base.requireSemiColon()) { - try renderExpression(allocator, stream, tree, indent, start_col, base, Space.None); + try renderExpression(allocator, stream, tree, base, Space.None); const semicolon_index = tree.nextToken(base.lastToken()); assert(tree.token_ids[semicolon_index] == .Semicolon); - try renderToken(tree, stream, semicolon_index, indent, start_col, Space.Newline); + try renderToken(tree, stream, semicolon_index, Space.Newline); } else { - try renderExpression(allocator, stream, tree, indent, start_col, base, Space.Newline); + try renderExpression(allocator, stream, tree, base, Space.Newline); } }, } @@ -2358,22 +2280,17 @@ fn renderTokenOffset( tree: *ast.Tree, stream: anytype, token_index: ast.TokenIndex, - indent: usize, - start_col: *usize, space: Space, token_skip_bytes: usize, -) (@TypeOf(stream).Error || Error)!void { +) (@TypeOf(stream.*).Error || Error)!void { if (space == Space.BlockStart) { - if (start_col.* < indent + indent_delta) - return renderToken(tree, stream, token_index, indent, start_col, Space.Space); - try renderToken(tree, stream, token_index, indent, start_col, Space.Newline); - try stream.writeByteNTimes(' ', indent); - start_col.* = indent; - return; + // If placing the lbrace on the current line would cause an uggly gap then put the lbrace on the next line + const new_space = if (stream.isLineOverIndented()) Space.Newline else Space.Space; + return renderToken(tree, stream, token_index, new_space); } var token_loc = tree.token_locs[token_index]; - try stream.writeAll(mem.trimRight(u8, tree.tokenSliceLoc(token_loc)[token_skip_bytes..], " ")); + try stream.writer().writeAll(mem.trimRight(u8, tree.tokenSliceLoc(token_loc)[token_skip_bytes..], " ")); if (space == Space.NoComment) return; @@ -2382,20 +2299,20 @@ fn renderTokenOffset( var next_token_loc = tree.token_locs[token_index + 1]; if (space == Space.Comma) switch (next_token_id) { - .Comma => return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline), + .Comma => return renderToken(tree, stream, token_index + 1, Space.Newline), .LineComment => { - try stream.writeAll(", "); - return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline); + try stream.writer().writeAll(", "); + return renderToken(tree, stream, token_index + 1, Space.Newline); }, else => { if (token_index + 2 < tree.token_ids.len and tree.token_ids[token_index + 2] == .MultilineStringLiteralLine) { - try stream.writeAll(","); + try stream.writer().writeAll(","); return; } else { - try stream.writeAll(",\n"); - start_col.* = 0; + try stream.writer().writeAll(","); + try stream.insertNewline(); return; } }, @@ -2419,15 +2336,14 @@ fn renderTokenOffset( if (next_token_id == .MultilineStringLiteralLine) { return; } else { - try stream.writeAll("\n"); - start_col.* = 0; + try stream.insertNewline(); return; } }, Space.Space, Space.SpaceOrOutdent => { if (next_token_id == .MultilineStringLiteralLine) return; - try stream.writeByte(' '); + try stream.writer().writeByte(' '); return; }, Space.NoComment, Space.Comma, Space.BlockStart => unreachable, @@ -2444,8 +2360,7 @@ fn renderTokenOffset( next_token_id = tree.token_ids[token_index + offset]; next_token_loc = tree.token_locs[token_index + offset]; if (next_token_id != .LineComment) { - try stream.writeByte('\n'); - start_col.* = 0; + try stream.insertNewline(); return; } }, @@ -2458,7 +2373,7 @@ fn renderTokenOffset( var loc = tree.tokenLocationLoc(token_loc.end, next_token_loc); if (loc.line == 0) { - try stream.print(" {}", .{mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " ")}); + try stream.writer().print(" {}", .{mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " ")}); offset = 2; token_loc = next_token_loc; next_token_loc = tree.token_locs[token_index + offset]; @@ -2466,26 +2381,16 @@ fn renderTokenOffset( if (next_token_id != .LineComment) { switch (space) { Space.None, Space.Space => { - try stream.writeByte('\n'); - const after_comment_token = tree.token_ids[token_index + offset]; - const next_line_indent = switch (after_comment_token) { - .RParen, .RBrace, .RBracket => indent, - else => indent + indent_delta, - }; - try stream.writeByteNTimes(' ', next_line_indent); - start_col.* = next_line_indent; + try stream.insertNewline(); }, Space.SpaceOrOutdent => { - try stream.writeByte('\n'); - try stream.writeByteNTimes(' ', indent); - start_col.* = indent; + try stream.insertNewline(); }, Space.Newline => { if (next_token_id == .MultilineStringLiteralLine) { return; } else { - try stream.writeAll("\n"); - start_col.* = 0; + try stream.insertNewline(); return; } }, @@ -2501,10 +2406,9 @@ fn renderTokenOffset( // translate-c doesn't generate correct newlines // in generated code (loc.line == 0) so treat that case // as though there was meant to be a newline between the tokens - const newline_count = if (loc.line <= 1) @as(u8, 1) else @as(u8, 2); - try stream.writeByteNTimes('\n', newline_count); - try stream.writeByteNTimes(' ', indent); - try stream.writeAll(mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " ")); + var newline_count = if (loc.line <= 1) @as(u8, 1) else @as(u8, 2); + while (newline_count > 0) : (newline_count -= 1) try stream.insertNewline(); + try stream.writer().writeAll(mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " ")); offset += 1; token_loc = next_token_loc; @@ -2516,32 +2420,15 @@ fn renderTokenOffset( if (next_token_id == .MultilineStringLiteralLine) { return; } else { - try stream.writeAll("\n"); - start_col.* = 0; + try stream.insertNewline(); return; } }, Space.None, Space.Space => { - try stream.writeByte('\n'); - - const after_comment_token = tree.token_ids[token_index + offset]; - const next_line_indent = switch (after_comment_token) { - .RParen, .RBrace, .RBracket => blk: { - if (indent > indent_delta) { - break :blk indent - indent_delta; - } else { - break :blk 0; - } - }, - else => indent, - }; - try stream.writeByteNTimes(' ', next_line_indent); - start_col.* = next_line_indent; + try stream.insertNewline(); }, Space.SpaceOrOutdent => { - try stream.writeByte('\n'); - try stream.writeByteNTimes(' ', indent); - start_col.* = indent; + try stream.insertNewline(); }, Space.NoNewline => {}, Space.NoComment, Space.Comma, Space.BlockStart => unreachable, @@ -2556,11 +2443,9 @@ fn renderToken( tree: *ast.Tree, stream: anytype, token_index: ast.TokenIndex, - indent: usize, - start_col: *usize, space: Space, -) (@TypeOf(stream).Error || Error)!void { - return renderTokenOffset(tree, stream, token_index, indent, start_col, space, 0); +) (@TypeOf(stream.*).Error || Error)!void { + return renderTokenOffset(tree, stream, token_index, space, 0); } fn renderDocComments( @@ -2568,11 +2453,9 @@ fn renderDocComments( stream: anytype, node: anytype, doc_comments: ?*ast.Node.DocComment, - indent: usize, - start_col: *usize, -) (@TypeOf(stream).Error || Error)!void { +) (@TypeOf(stream.*).Error || Error)!void { const comment = doc_comments orelse return; - return renderDocCommentsToken(tree, stream, comment, node.firstToken(), indent, start_col); + return renderDocCommentsToken(tree, stream, comment, node.firstToken()); } fn renderDocCommentsToken( @@ -2580,20 +2463,16 @@ fn renderDocCommentsToken( stream: anytype, comment: *ast.Node.DocComment, first_token: ast.TokenIndex, - indent: usize, - start_col: *usize, -) (@TypeOf(stream).Error || Error)!void { +) (@TypeOf(stream.*).Error || Error)!void { var tok_i = comment.first_line; while (true) : (tok_i += 1) { switch (tree.token_ids[tok_i]) { .DocComment, .ContainerDocComment => { if (comment.first_line < first_token) { - try renderToken(tree, stream, tok_i, indent, start_col, Space.Newline); - try stream.writeByteNTimes(' ', indent); + try renderToken(tree, stream, tok_i, Space.Newline); } else { - try renderToken(tree, stream, tok_i, indent, start_col, Space.NoComment); - try stream.writeAll("\n"); - try stream.writeByteNTimes(' ', indent); + try renderToken(tree, stream, tok_i, Space.NoComment); + try stream.insertNewline(); } }, .LineComment => continue, @@ -2665,41 +2544,10 @@ fn nodeCausesSliceOpSpace(base: *ast.Node) bool { }; } -/// A `std.io.OutStream` that returns whether the given character has been written to it. -/// The contents are not written to anything. -const FindByteOutStream = struct { - byte_found: bool, - byte: u8, - - pub const Error = error{}; - pub const OutStream = std.io.OutStream(*FindByteOutStream, Error, write); - - pub fn init(byte: u8) FindByteOutStream { - return FindByteOutStream{ - .byte = byte, - .byte_found = false, - }; - } - - pub fn write(self: *FindByteOutStream, bytes: []const u8) Error!usize { - if (self.byte_found) return bytes.len; - self.byte_found = blk: { - for (bytes) |b| - if (b == self.byte) break :blk true; - break :blk false; - }; - return bytes.len; - } - - pub fn outStream(self: *FindByteOutStream) OutStream { - return .{ .context = self }; - } -}; - -fn copyFixingWhitespace(stream: anytype, slice: []const u8) @TypeOf(stream).Error!void { +fn copyFixingWhitespace(stream: anytype, slice: []const u8) @TypeOf(stream.*).Error!void { for (slice) |byte| switch (byte) { - '\t' => try stream.writeAll(" "), + '\t' => try stream.writer().writeAll(" "), '\r' => {}, - else => try stream.writeByte(byte), + else => try stream.writer().writeByte(byte), }; } diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index 7a67e197cc..019982e9b7 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -682,13 +682,13 @@ pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void { process.exit(1); } if (check_flag) { - const anything_changed = try std.zig.render(gpa, io.null_out_stream, tree); + const anything_changed = try std.zig.render(gpa, &io.null_out_stream, tree); const code = if (anything_changed) @as(u8, 1) else @as(u8, 0); process.exit(code); } const stdout = io.getStdOut().outStream(); - _ = try std.zig.render(gpa, stdout, tree); + _ = try std.zig.render(gpa, &stdout, tree); return; } @@ -830,7 +830,7 @@ fn fmtPathFile( } if (check_mode) { - const anything_changed = try std.zig.render(fmt.gpa, io.null_out_stream, tree); + const anything_changed = try std.zig.render(fmt.gpa, &io.null_out_stream, tree); if (anything_changed) { std.debug.print("{}\n", .{file_path}); fmt.any_error = true; @@ -839,7 +839,8 @@ fn fmtPathFile( // As a heuristic, we make enough capacity for the same as the input source. try fmt.out_buffer.ensureCapacity(source_code.len); fmt.out_buffer.items.len = 0; - const anything_changed = try std.zig.render(fmt.gpa, fmt.out_buffer.writer(), tree); + const writer = fmt.out_buffer.writer(); + const anything_changed = try std.zig.render(fmt.gpa, &writer, tree); if (!anything_changed) return; // Good thing we didn't waste any file system access on this. diff --git a/src-self-hosted/stage2.zig b/src-self-hosted/stage2.zig index 30d2ea44db..29b8f3df44 100644 --- a/src-self-hosted/stage2.zig +++ b/src-self-hosted/stage2.zig @@ -151,7 +151,7 @@ export fn stage2_free_clang_errors(errors_ptr: [*]translate_c.ClangErrMsg, error export fn stage2_render_ast(tree: *ast.Tree, output_file: *FILE) Error { const c_out_stream = std.io.cOutStream(output_file); - _ = std.zig.render(std.heap.c_allocator, c_out_stream, tree) catch |e| switch (e) { + _ = std.zig.render(std.heap.c_allocator, &c_out_stream, tree) catch |e| switch (e) { error.WouldBlock => unreachable, // stage1 opens stuff in exclusively blocking mode error.NotOpenForWriting => unreachable, error.SystemResources => return .SystemResources, From 7d950210a64f51cba6c4edaacbd9c67f12e72604 Mon Sep 17 00:00:00 2001 From: Lachlan Easton Date: Sat, 29 Aug 2020 23:07:47 +1000 Subject: [PATCH 02/56] zig fmt review comments --- lib/std/io/auto_indenting_stream.zig | 23 ++++++++++++++--------- lib/std/io/change_detection_stream.zig | 17 +++++++++-------- lib/std/io/find_byte_out_stream.zig | 16 ++++++++-------- 3 files changed, 31 insertions(+), 25 deletions(-) diff --git a/lib/std/io/auto_indenting_stream.zig b/lib/std/io/auto_indenting_stream.zig index e7657c1f91..227dd616a1 100644 --- a/lib/std/io/auto_indenting_stream.zig +++ b/lib/std/io/auto_indenting_stream.zig @@ -3,13 +3,15 @@ const io = std.io; const mem = std.mem; const assert = std.debug.assert; -pub fn AutoIndentingStream(comptime indent_delta: u8, comptime OutStreamType: type) type { +/// Automatically inserts indentation of written data by keeping +/// track of the current indentation level +pub fn AutoIndentingStream(comptime indent_delta: u8, comptime WriterType: type) type { return struct { const Self = @This(); - pub const Error = OutStreamType.Error; - pub const OutStream = io.Writer(*Self, Error, write); + pub const Error = WriterType.Error; + pub const Writer = io.Writer(*Self, Error, write); - out_stream: *OutStreamType, + writer_pointer: *WriterType, current_line_empty: bool = true, indent_stack: [255]u8 = undefined, indent_stack_top: u8 = 0, @@ -17,11 +19,11 @@ pub fn AutoIndentingStream(comptime indent_delta: u8, comptime OutStreamType: ty applied_indent: u8 = 0, // the most recently applied indent indent_next_line: u8 = 0, // not used until the next line - pub fn init(out_stream: *OutStreamType) Self { - return Self{ .out_stream = out_stream }; + pub fn init(writer_pointer: *WriterType) Self { + return Self{ .writer_pointer = writer_pointer }; } - pub fn writer(self: *Self) OutStream { + pub fn writer(self: *Self) Writer { return .{ .context = self }; } @@ -34,7 +36,10 @@ pub fn AutoIndentingStream(comptime indent_delta: u8, comptime OutStreamType: ty } fn writeNoIndent(self: *Self, bytes: []const u8) Error!usize { - try self.out_stream.outStream().writeAll(bytes); + if (bytes.len == 0) + return @as(usize, 0); + + try self.writer_pointer.outStream().writeAll(bytes); if (bytes[bytes.len - 1] == '\n') self.resetLine(); return bytes.len; @@ -98,7 +103,7 @@ pub fn AutoIndentingStream(comptime indent_delta: u8, comptime OutStreamType: ty fn applyIndent(self: *Self) Error!void { const current_indent = self.currentIndent(); if (self.current_line_empty and current_indent > 0) { - try self.out_stream.outStream().writeByteNTimes(' ', current_indent); + try self.writer_pointer.outStream().writeByteNTimes(' ', current_indent); self.applied_indent = current_indent; } diff --git a/lib/std/io/change_detection_stream.zig b/lib/std/io/change_detection_stream.zig index 941569320c..98c8130b44 100644 --- a/lib/std/io/change_detection_stream.zig +++ b/lib/std/io/change_detection_stream.zig @@ -3,26 +3,27 @@ const io = std.io; const mem = std.mem; const assert = std.debug.assert; -pub fn ChangeDetectionStream(comptime OutStreamType: type) type { +/// Used to detect if the data written to a stream differs from a source buffer +pub fn ChangeDetectionStream(comptime WriterType: type) type { return struct { const Self = @This(); - pub const Error = OutStreamType.Error; - pub const OutStream = io.OutStream(*Self, Error, write); + pub const Error = WriterType.Error; + pub const Writer = io.Writer(*Self, Error, write); anything_changed: bool = false, - out_stream: *OutStreamType, + writer_pointer: *WriterType, source_index: usize, source: []const u8, - pub fn init(source: []const u8, out_stream: *OutStreamType) Self { + pub fn init(source: []const u8, writer_pointer: *WriterType) Self { return Self{ - .out_stream = out_stream, + .writer_pointer = writer_pointer, .source_index = 0, .source = source, }; } - pub fn outStream(self: *Self) OutStream { + pub fn writer(self: *Self) Writer { return .{ .context = self }; } @@ -40,7 +41,7 @@ pub fn ChangeDetectionStream(comptime OutStreamType: type) type { } } - return self.out_stream.write(bytes); + return self.writer_pointer.write(bytes); } pub fn changeDetected(self: *Self) bool { diff --git a/lib/std/io/find_byte_out_stream.zig b/lib/std/io/find_byte_out_stream.zig index e835cbd584..b316a98549 100644 --- a/lib/std/io/find_byte_out_stream.zig +++ b/lib/std/io/find_byte_out_stream.zig @@ -2,21 +2,21 @@ const std = @import("../std.zig"); const io = std.io; const assert = std.debug.assert; -// An OutStream that returns whether the given character has been written to it. -// The contents are not written to anything. -pub fn FindByteOutStream(comptime OutStreamType: type) type { +/// An OutStream that returns whether the given character has been written to it. +/// The contents are not written to anything. +pub fn FindByteOutStream(comptime WriterType: type) type { return struct { const Self = @This(); - pub const Error = OutStreamType.Error; + pub const Error = WriterType.Error; pub const OutStream = io.OutStream(*Self, Error, write); - out_stream: *OutStreamType, + writer_pointer: *WriterType, byte_found: bool, byte: u8, - pub fn init(byte: u8, out_stream: *OutStreamType) Self { + pub fn init(byte: u8, writer_pointer: *WriterType) Self { return Self{ - .out_stream = out_stream, + .writer_pointer = writer_pointer, .byte = byte, .byte_found = false, }; @@ -34,7 +34,7 @@ pub fn FindByteOutStream(comptime OutStreamType: type) type { break :blk false; }; } - return self.out_stream.writer().write(bytes); + return self.writer_pointer.writer().write(bytes); } }; } From 50c8a53188bdd20321990b7a4999f534b9e613dd Mon Sep 17 00:00:00 2001 From: Lachlan Easton Date: Sun, 30 Aug 2020 10:32:21 +1000 Subject: [PATCH 03/56] Use ArrayList instead of fixed array for auto_indenting_stream --- lib/std/io/auto_indenting_stream.zig | 78 ++++++++++++++++------------ lib/std/io/find_byte_out_stream.zig | 4 +- lib/std/zig/render.zig | 71 +++++++++++++------------ 3 files changed, 83 insertions(+), 70 deletions(-) diff --git a/lib/std/io/auto_indenting_stream.zig b/lib/std/io/auto_indenting_stream.zig index 227dd616a1..71547026a4 100644 --- a/lib/std/io/auto_indenting_stream.zig +++ b/lib/std/io/auto_indenting_stream.zig @@ -1,26 +1,36 @@ const std = @import("../std.zig"); const io = std.io; const mem = std.mem; +const Allocator = mem.Allocator; +const ArrayList = std.ArrayList; const assert = std.debug.assert; /// Automatically inserts indentation of written data by keeping /// track of the current indentation level -pub fn AutoIndentingStream(comptime indent_delta: u8, comptime WriterType: type) type { +pub fn AutoIndentingStream(comptime indent_delta: usize, comptime WriterType: type) type { return struct { const Self = @This(); pub const Error = WriterType.Error; + pub const PushError = Allocator.Error; pub const Writer = io.Writer(*Self, Error, write); + const Stack = ArrayList(usize); writer_pointer: *WriterType, - current_line_empty: bool = true, - indent_stack: [255]u8 = undefined, - indent_stack_top: u8 = 0, - indent_one_shot_count: u8 = 0, // automatically popped when applied - applied_indent: u8 = 0, // the most recently applied indent - indent_next_line: u8 = 0, // not used until the next line + indent_stack: Stack, - pub fn init(writer_pointer: *WriterType) Self { - return Self{ .writer_pointer = writer_pointer }; + current_line_empty: bool = true, + indent_one_shot_count: usize = 0, // automatically popped when applied + applied_indent: usize = 0, // the most recently applied indent + indent_next_line: usize = 0, // not used until the next line + + pub fn init(writer_pointer: *WriterType, allocator: *Allocator) Self { + var indent_stack = Stack.init(allocator); + return Self{ .writer_pointer = writer_pointer, .indent_stack = indent_stack }; + } + + /// Release all allocated memory. + pub fn deinit(self: Self) void { + self.indent_stack.deinit(); } pub fn writer(self: *Self) Writer { @@ -39,7 +49,7 @@ pub fn AutoIndentingStream(comptime indent_delta: u8, comptime WriterType: type) if (bytes.len == 0) return @as(usize, 0); - try self.writer_pointer.outStream().writeAll(bytes); + try self.writer_pointer.writer().writeAll(bytes); if (bytes[bytes.len - 1] == '\n') self.resetLine(); return bytes.len; @@ -61,53 +71,52 @@ pub fn AutoIndentingStream(comptime indent_delta: u8, comptime WriterType: type) } /// Push default indentation - pub fn pushIndent(self: *Self) void { - // Doesn't actually write any indentation. Just primes the stream to be able to write the correct indentation if it needs to. - self.pushIndentN(indent_delta); + pub fn pushIndent(self: *Self) PushError!void { + // Doesn't actually write any indentation. + // Just primes the stream to be able to write the correct indentation if it needs to. + try self.pushIndentN(indent_delta); } /// Push an indent of arbitrary width - pub fn pushIndentN(self: *Self, n: u8) void { - assert(self.indent_stack_top < std.math.maxInt(u8)); - self.indent_stack[self.indent_stack_top] = n; - self.indent_stack_top += 1; + pub fn pushIndentN(self: *Self, n: usize) PushError!void { + try self.indent_stack.append(n); } /// Push an indent that is automatically popped after being applied - pub fn pushIndentOneShot(self: *Self) void { + pub fn pushIndentOneShot(self: *Self) PushError!void { self.indent_one_shot_count += 1; - self.pushIndent(); + try self.pushIndent(); } /// Turns all one-shot indents into regular indents /// Returns number of indents that must now be manually popped - pub fn lockOneShotIndent(self: *Self) u8 { + pub fn lockOneShotIndent(self: *Self) usize { var locked_count = self.indent_one_shot_count; self.indent_one_shot_count = 0; return locked_count; } /// Push an indent that should not take effect until the next line - pub fn pushIndentNextLine(self: *Self) void { + pub fn pushIndentNextLine(self: *Self) PushError!void { self.indent_next_line += 1; - self.pushIndent(); + try self.pushIndent(); } pub fn popIndent(self: *Self) void { - assert(self.indent_stack_top != 0); - self.indent_stack_top -= 1; - self.indent_next_line = std.math.min(self.indent_stack_top, self.indent_next_line); // Tentative indent may have been popped before there was a newline + assert(self.indent_stack.items.len != 0); + self.indent_stack.items.len -= 1; + self.indent_next_line = std.math.min(self.indent_stack.items.len, self.indent_next_line); // Tentative indent may have been popped before there was a newline } /// Writes ' ' bytes if the current line is empty fn applyIndent(self: *Self) Error!void { const current_indent = self.currentIndent(); if (self.current_line_empty and current_indent > 0) { - try self.writer_pointer.outStream().writeByteNTimes(' ', current_indent); + try self.writer_pointer.writer().writeByteNTimes(' ', current_indent); self.applied_indent = current_indent; } - self.indent_stack_top -= self.indent_one_shot_count; + self.indent_stack.items.len -= self.indent_one_shot_count; self.indent_one_shot_count = 0; self.current_line_empty = false; } @@ -118,11 +127,11 @@ pub fn AutoIndentingStream(comptime indent_delta: u8, comptime WriterType: type) return self.applied_indent > self.currentIndent(); } - fn currentIndent(self: *Self) u8 { - var indent_current: u8 = 0; - if (self.indent_stack_top > 0) { - const stack_top = self.indent_stack_top - self.indent_next_line; - for (self.indent_stack[0..stack_top]) |indent| { + fn currentIndent(self: *Self) usize { + var indent_current: usize = 0; + if (self.indent_stack.items.len > 0) { + const stack_top = self.indent_stack.items.len - self.indent_next_line; + for (self.indent_stack.items[0..stack_top]) |indent| { indent_current += indent; } } @@ -132,9 +141,10 @@ pub fn AutoIndentingStream(comptime indent_delta: u8, comptime WriterType: type) } pub fn autoIndentingStream( - comptime indent_delta: u8, + comptime indent_delta: usize, underlying_stream: anytype, + allocator: *Allocator, ) AutoIndentingStream(indent_delta, @TypeOf(underlying_stream).Child) { comptime assert(@typeInfo(@TypeOf(underlying_stream)) == .Pointer); - return AutoIndentingStream(indent_delta, @TypeOf(underlying_stream).Child).init(underlying_stream); + return AutoIndentingStream(indent_delta, @TypeOf(underlying_stream).Child).init(underlying_stream, allocator); } diff --git a/lib/std/io/find_byte_out_stream.zig b/lib/std/io/find_byte_out_stream.zig index b316a98549..0bf3a15545 100644 --- a/lib/std/io/find_byte_out_stream.zig +++ b/lib/std/io/find_byte_out_stream.zig @@ -8,7 +8,7 @@ pub fn FindByteOutStream(comptime WriterType: type) type { return struct { const Self = @This(); pub const Error = WriterType.Error; - pub const OutStream = io.OutStream(*Self, Error, write); + pub const Writer = io.Writer(*Self, Error, write); writer_pointer: *WriterType, byte_found: bool, @@ -22,7 +22,7 @@ pub fn FindByteOutStream(comptime WriterType: type) type { }; } - pub fn outStream(self: *Self) OutStream { + pub fn writer(self: *Self) Writer { return .{ .context = self }; } diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index d7bba2f6bf..e3133a5501 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -24,7 +24,8 @@ pub fn render(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree) (meta var s = stream.*; var change_detection_stream = std.io.changeDetectionStream(tree.source, &s); - var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &change_detection_stream); + var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &change_detection_stream, allocator); + defer auto_indenting_stream.deinit(); try renderRoot(allocator, &auto_indenting_stream, tree); @@ -388,11 +389,11 @@ fn renderExpression( } if (block.statements.len == 0) { - stream.pushIndentNextLine(); + try stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, block.lbrace, Space.None); } else { - stream.pushIndentNextLine(); + try stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, block.lbrace, Space.Newline); @@ -462,7 +463,7 @@ fn renderExpression( try renderExpression(allocator, stream, tree, payload, Space.Space); } - stream.pushIndentOneShot(); + try stream.pushIndentOneShot(); return renderExpression(allocator, stream, tree, infix_op_node.rhs, space); }, @@ -523,7 +524,7 @@ fn renderExpression( }; try renderToken(tree, stream, infix_op_node.op_token, after_op_space); - stream.pushIndentOneShot(); + try stream.pushIndentOneShot(); return renderExpression(allocator, stream, tree, infix_op_node.rhs, space); }, @@ -717,7 +718,7 @@ fn renderExpression( } { - stream.pushIndent(); + try stream.pushIndent(); defer stream.popIndent(); try renderToken(tree, stream, lbrace, Space.None); } @@ -782,7 +783,8 @@ fn renderExpression( // Null stream for counting the printed length of each expression var counting_stream = std.io.countingOutStream(std.io.null_out_stream); - var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &counting_stream); + var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &counting_stream, allocator); + defer auto_indenting_stream.deinit(); for (exprs) |expr, i| { counting_stream.bytes_written = 0; @@ -794,7 +796,7 @@ fn renderExpression( } { - stream.pushIndentNextLine(); + try stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, lbrace, Space.Newline); @@ -878,7 +880,7 @@ fn renderExpression( } { - stream.pushIndentNextLine(); + try stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, lbrace, Space.None); } @@ -900,7 +902,8 @@ fn renderExpression( // render field expressions until a LF is found for (field_inits) |field_init| { var find_stream = std.io.findByteOutStream('\n', &std.io.null_out_stream); - var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &find_stream); + var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &find_stream, allocator); + defer auto_indenting_stream.deinit(); try renderExpression(allocator, &auto_indenting_stream, tree, field_init, Space.None); if (find_stream.byte_found) break :blk false; @@ -960,7 +963,7 @@ fn renderExpression( .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), } - stream.pushIndentNextLine(); + try stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, lbrace, Space.Newline); @@ -1008,7 +1011,7 @@ fn renderExpression( const params = call.params(); for (params) |param_node, i| { - stream.pushIndent(); + try stream.pushIndent(); defer stream.popIndent(); if (i + 1 < params.len) { @@ -1028,7 +1031,7 @@ fn renderExpression( const params = call.params(); for (params) |param_node, i| { - if (param_node.*.tag == .MultilineStringLiteral) stream.pushIndentOneShot(); + if (param_node.*.tag == .MultilineStringLiteral) try stream.pushIndentOneShot(); try renderExpression(allocator, stream, tree, param_node, Space.None); @@ -1055,7 +1058,7 @@ fn renderExpression( { const new_space = if (ends_with_comment) Space.Newline else Space.None; - stream.pushIndent(); + try stream.pushIndent(); defer stream.popIndent(); try renderExpression(allocator, stream, tree, suffix_op.index_expr, new_space); } @@ -1191,7 +1194,7 @@ fn renderExpression( try renderToken(tree, stream, grouped_expr.lparen, Space.None); { - stream.pushIndentOneShot(); + try stream.pushIndentOneShot(); try renderExpression(allocator, stream, tree, grouped_expr.expr, Space.None); } return renderToken(tree, stream, grouped_expr.rparen, space); @@ -1251,7 +1254,7 @@ fn renderExpression( if (container_decl.fields_and_decls_len == 0) { { - stream.pushIndentNextLine(); + try stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, container_decl.lbrace_token, Space.None); // { } @@ -1286,7 +1289,7 @@ fn renderExpression( if (src_has_trailing_comma or !src_has_only_fields) { // One declaration per line - stream.pushIndentNextLine(); + try stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, container_decl.lbrace_token, .Newline); // { @@ -1302,7 +1305,7 @@ fn renderExpression( // their own line try renderToken(tree, stream, container_decl.lbrace_token, .Newline); // { - stream.pushIndent(); + try stream.pushIndent(); defer stream.popIndent(); for (fields_and_decls) |decl, i| { @@ -1358,7 +1361,7 @@ fn renderExpression( if (src_has_trailing_comma) { { - stream.pushIndent(); + try stream.pushIndent(); defer stream.popIndent(); try renderToken(tree, stream, lbrace, Space.Newline); // { @@ -1448,7 +1451,7 @@ fn renderExpression( } } else { // one param per line - stream.pushIndent(); + try stream.pushIndent(); defer stream.popIndent(); try renderToken(tree, stream, lparen, Space.Newline); // ( @@ -1527,7 +1530,7 @@ fn renderExpression( } } else { // one param per line - stream.pushIndent(); + try stream.pushIndent(); defer stream.popIndent(); try renderToken(tree, stream, lparen, Space.Newline); // ( @@ -1624,7 +1627,7 @@ fn renderExpression( try renderToken(tree, stream, rparen, Space.Space); // ) { - stream.pushIndentNextLine(); + try stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, lbrace, Space.Newline); // { @@ -1708,7 +1711,7 @@ fn renderExpression( if (same_line) { return renderExpression(allocator, stream, tree, else_node.body, space); } else { - stream.pushIndent(); + try stream.pushIndent(); defer stream.popIndent(); return renderExpression(allocator, stream, tree, else_node.body, space); } @@ -1772,7 +1775,7 @@ fn renderExpression( } { - if (!body_is_block) stream.pushIndent(); + if (!body_is_block) try stream.pushIndent(); defer if (!body_is_block) stream.popIndent(); try renderExpression(allocator, stream, tree, while_node.body, after_body_space); } @@ -1823,7 +1826,7 @@ fn renderExpression( }; { - if (!body_on_same_line) stream.pushIndent(); + if (!body_on_same_line) try stream.pushIndent(); defer if (!body_on_same_line) stream.popIndent(); try renderExpression(allocator, stream, tree, for_node.body, space_after_body); // { body } } @@ -1879,7 +1882,7 @@ fn renderExpression( const else_is_block = nodeIsBlock(@"else".body); { - stream.pushIndent(); + try stream.pushIndent(); defer stream.popIndent(); try renderExpression(allocator, stream, tree, if_node.body, Space.Newline); } @@ -1900,12 +1903,12 @@ fn renderExpression( try renderExpression(allocator, stream, tree, payload, Space.Newline); } - stream.pushIndent(); + try stream.pushIndent(); defer stream.popIndent(); return renderExpression(allocator, stream, tree, @"else".body, space); } } else { - stream.pushIndent(); + try stream.pushIndent(); defer stream.popIndent(); return renderExpression(allocator, stream, tree, if_node.body, space); } @@ -1946,7 +1949,7 @@ fn renderExpression( } asmblk: { - stream.pushIndent(); + try stream.pushIndent(); defer stream.popIndent(); if (asm_node.outputs.len == 0 and asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) { @@ -1965,7 +1968,7 @@ fn renderExpression( } else blk: { try renderToken(tree, stream, colon1, Space.Space); // : - stream.pushIndentN(2); + try stream.pushIndentN(2); defer stream.popIndent(); for (asm_node.outputs) |*asm_output, i| { @@ -1996,7 +1999,7 @@ fn renderExpression( break :blk tree.nextToken(colon2); } else blk: { try renderToken(tree, stream, colon2, Space.Space); // : - stream.pushIndentN(2); + try stream.pushIndentN(2); defer stream.popIndent(); for (asm_node.inputs) |*asm_input, i| { if (i + 1 < asm_node.inputs.len) { @@ -2022,7 +2025,7 @@ fn renderExpression( }; try renderToken(tree, stream, colon3, Space.Space); // : - stream.pushIndentN(2); + try stream.pushIndentN(2); defer stream.popIndent(); for (asm_node.clobbers) |clobber_node, i| { if (i + 1 >= asm_node.clobbers.len) { @@ -2075,7 +2078,7 @@ fn renderArrayType( const new_space = if (ends_with_comment) Space.Newline else Space.None; { const do_indent = (starts_with_comment or ends_with_comment); - if (do_indent) stream.pushIndent(); + if (do_indent) try stream.pushIndent(); defer if (do_indent) stream.popIndent(); try renderToken(tree, stream, lbracket, Space.None); // [ @@ -2209,7 +2212,7 @@ fn renderVarDecl( if (var_decl.getTrailer("init_node")) |init_node| { const s = if (init_node.tag == .MultilineStringLiteral) Space.None else Space.Space; try renderToken(tree, stream, var_decl.getTrailer("eq_token").?, s); // = - stream.pushIndentOneShot(); + try stream.pushIndentOneShot(); try renderExpression(allocator, stream, tree, init_node, Space.None); } From 5aca3baea62326dee301ec29c567dc224baa4a08 Mon Sep 17 00:00:00 2001 From: Lachlan Easton Date: Mon, 31 Aug 2020 23:39:30 +1000 Subject: [PATCH 04/56] zig fmt: Remove dynamic stack from auto-indenting-stream --- lib/std/io.zig | 4 +- lib/std/io/auto_indenting_stream.zig | 58 +++++++++------------- lib/std/io/change_detection_stream.zig | 4 +- lib/std/io/find_byte_out_stream.zig | 4 +- lib/std/target.zig | 2 +- lib/std/zig/parser_test.zig | 10 ++-- lib/std/zig/render.zig | 68 +++++++++++++------------- 7 files changed, 68 insertions(+), 82 deletions(-) diff --git a/lib/std/io.zig b/lib/std/io.zig index 1514d80cb0..3f02128a6c 100644 --- a/lib/std/io.zig +++ b/lib/std/io.zig @@ -191,10 +191,10 @@ pub const BufferedAtomicFile = @import("io/buffered_atomic_file.zig").BufferedAt pub const StreamSource = @import("io/stream_source.zig").StreamSource; /// A Writer that doesn't write to anything. -pub var null_writer = @as(NullWriter, .{ .context = {} }); +pub const null_writer = @as(NullWriter, .{ .context = {} }); /// Deprecated: use `null_writer` -pub var null_out_stream = null_writer; +pub const null_out_stream = null_writer; const NullWriter = Writer(void, error{}, dummyWrite); /// Deprecated: use NullWriter diff --git a/lib/std/io/auto_indenting_stream.zig b/lib/std/io/auto_indenting_stream.zig index 71547026a4..bebbf9aed6 100644 --- a/lib/std/io/auto_indenting_stream.zig +++ b/lib/std/io/auto_indenting_stream.zig @@ -1,37 +1,31 @@ const std = @import("../std.zig"); const io = std.io; const mem = std.mem; -const Allocator = mem.Allocator; -const ArrayList = std.ArrayList; const assert = std.debug.assert; /// Automatically inserts indentation of written data by keeping /// track of the current indentation level -pub fn AutoIndentingStream(comptime indent_delta: usize, comptime WriterType: type) type { +pub fn AutoIndentingStream(comptime WriterType: type) type { return struct { const Self = @This(); pub const Error = WriterType.Error; - pub const PushError = Allocator.Error; pub const Writer = io.Writer(*Self, Error, write); - const Stack = ArrayList(usize); writer_pointer: *WriterType, - indent_stack: Stack, + indent_stack: usize = 0, + indent_delta: usize, current_line_empty: bool = true, indent_one_shot_count: usize = 0, // automatically popped when applied applied_indent: usize = 0, // the most recently applied indent indent_next_line: usize = 0, // not used until the next line - pub fn init(writer_pointer: *WriterType, allocator: *Allocator) Self { - var indent_stack = Stack.init(allocator); - return Self{ .writer_pointer = writer_pointer, .indent_stack = indent_stack }; + pub fn init(indent_delta: usize, writer_pointer: *WriterType) Self { + return Self{ .writer_pointer = writer_pointer, .indent_delta = indent_delta }; } /// Release all allocated memory. - pub fn deinit(self: Self) void { - self.indent_stack.deinit(); - } + pub fn deinit(self: Self) void {} pub fn writer(self: *Self) Writer { return .{ .context = self }; @@ -71,21 +65,16 @@ pub fn AutoIndentingStream(comptime indent_delta: usize, comptime WriterType: ty } /// Push default indentation - pub fn pushIndent(self: *Self) PushError!void { + pub fn pushIndent(self: *Self) void { // Doesn't actually write any indentation. // Just primes the stream to be able to write the correct indentation if it needs to. - try self.pushIndentN(indent_delta); - } - - /// Push an indent of arbitrary width - pub fn pushIndentN(self: *Self, n: usize) PushError!void { - try self.indent_stack.append(n); + self.indent_stack += 1; } /// Push an indent that is automatically popped after being applied - pub fn pushIndentOneShot(self: *Self) PushError!void { + pub fn pushIndentOneShot(self: *Self) void { self.indent_one_shot_count += 1; - try self.pushIndent(); + self.pushIndent(); } /// Turns all one-shot indents into regular indents @@ -97,15 +86,15 @@ pub fn AutoIndentingStream(comptime indent_delta: usize, comptime WriterType: ty } /// Push an indent that should not take effect until the next line - pub fn pushIndentNextLine(self: *Self) PushError!void { + pub fn pushIndentNextLine(self: *Self) void { self.indent_next_line += 1; - try self.pushIndent(); + self.pushIndent(); } pub fn popIndent(self: *Self) void { - assert(self.indent_stack.items.len != 0); - self.indent_stack.items.len -= 1; - self.indent_next_line = std.math.min(self.indent_stack.items.len, self.indent_next_line); // Tentative indent may have been popped before there was a newline + assert(self.indent_stack != 0); + self.indent_stack -= 1; + self.indent_next_line = std.math.min(self.indent_stack, self.indent_next_line); // Tentative indent may have been popped before there was a newline } /// Writes ' ' bytes if the current line is empty @@ -116,7 +105,7 @@ pub fn AutoIndentingStream(comptime indent_delta: usize, comptime WriterType: ty self.applied_indent = current_indent; } - self.indent_stack.items.len -= self.indent_one_shot_count; + self.indent_stack -= self.indent_one_shot_count; self.indent_one_shot_count = 0; self.current_line_empty = false; } @@ -129,11 +118,9 @@ pub fn AutoIndentingStream(comptime indent_delta: usize, comptime WriterType: ty fn currentIndent(self: *Self) usize { var indent_current: usize = 0; - if (self.indent_stack.items.len > 0) { - const stack_top = self.indent_stack.items.len - self.indent_next_line; - for (self.indent_stack.items[0..stack_top]) |indent| { - indent_current += indent; - } + if (self.indent_stack > 0) { + const stack_top = self.indent_stack - self.indent_next_line; + indent_current = stack_top * self.indent_delta; } return indent_current; } @@ -141,10 +128,9 @@ pub fn AutoIndentingStream(comptime indent_delta: usize, comptime WriterType: ty } pub fn autoIndentingStream( - comptime indent_delta: usize, + indent_delta: usize, underlying_stream: anytype, - allocator: *Allocator, -) AutoIndentingStream(indent_delta, @TypeOf(underlying_stream).Child) { +) AutoIndentingStream(@TypeOf(underlying_stream).Child) { comptime assert(@typeInfo(@TypeOf(underlying_stream)) == .Pointer); - return AutoIndentingStream(indent_delta, @TypeOf(underlying_stream).Child).init(underlying_stream, allocator); + return AutoIndentingStream(@TypeOf(underlying_stream).Child).init(indent_delta, underlying_stream); } diff --git a/lib/std/io/change_detection_stream.zig b/lib/std/io/change_detection_stream.zig index 98c8130b44..b559e66751 100644 --- a/lib/std/io/change_detection_stream.zig +++ b/lib/std/io/change_detection_stream.zig @@ -11,11 +11,11 @@ pub fn ChangeDetectionStream(comptime WriterType: type) type { pub const Writer = io.Writer(*Self, Error, write); anything_changed: bool = false, - writer_pointer: *WriterType, + writer_pointer: *const WriterType, source_index: usize, source: []const u8, - pub fn init(source: []const u8, writer_pointer: *WriterType) Self { + pub fn init(source: []const u8, writer_pointer: *const WriterType) Self { return Self{ .writer_pointer = writer_pointer, .source_index = 0, diff --git a/lib/std/io/find_byte_out_stream.zig b/lib/std/io/find_byte_out_stream.zig index 0bf3a15545..bfd0e815e4 100644 --- a/lib/std/io/find_byte_out_stream.zig +++ b/lib/std/io/find_byte_out_stream.zig @@ -10,11 +10,11 @@ pub fn FindByteOutStream(comptime WriterType: type) type { pub const Error = WriterType.Error; pub const Writer = io.Writer(*Self, Error, write); - writer_pointer: *WriterType, + writer_pointer: *const WriterType, byte_found: bool, byte: u8, - pub fn init(byte: u8, writer_pointer: *WriterType) Self { + pub fn init(byte: u8, writer_pointer: *const WriterType) Self { return Self{ .writer_pointer = writer_pointer, .byte = byte, diff --git a/lib/std/target.zig b/lib/std/target.zig index deb7c85984..034ab780d0 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -101,7 +101,7 @@ pub const Target = struct { /// Latest Windows version that the Zig Standard Library is aware of pub const latest = WindowsVersion.win10_20h1; - + pub const Range = struct { min: WindowsVersion, max: WindowsVersion, diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index f4da650efb..9369d44010 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -2827,7 +2827,7 @@ test "zig fmt: inline asm" { \\ return asm volatile ("syscall" \\ : [ret] "={rax}" (-> usize) \\ : [number] "{rax}" (number), - \\ [arg1] "{rdi}" (arg1) + \\ [arg1] "{rdi}" (arg1) \\ : "rcx", "r11" \\ ); \\} @@ -2930,14 +2930,14 @@ test "zig fmt: inline asm parameter alignment" { \\ \\ foo \\ \\ bar \\ : [_] "" (-> usize), - \\ [_] "" (-> usize) + \\ [_] "" (-> usize) \\ ); \\ asm volatile ( \\ \\ foo \\ \\ bar \\ : \\ : [_] "" (0), - \\ [_] "" (0) + \\ [_] "" (0) \\ ); \\ asm volatile ( \\ \\ foo @@ -2950,9 +2950,9 @@ test "zig fmt: inline asm parameter alignment" { \\ \\ foo \\ \\ bar \\ : [_] "" (-> usize), - \\ [_] "" (-> usize) + \\ [_] "" (-> usize) \\ : [_] "" (0), - \\ [_] "" (0) + \\ [_] "" (0) \\ : "", "" \\ ); \\} diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index e3133a5501..471b98398b 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -24,7 +24,7 @@ pub fn render(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree) (meta var s = stream.*; var change_detection_stream = std.io.changeDetectionStream(tree.source, &s); - var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &change_detection_stream, allocator); + var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &change_detection_stream); defer auto_indenting_stream.deinit(); try renderRoot(allocator, &auto_indenting_stream, tree); @@ -389,11 +389,11 @@ fn renderExpression( } if (block.statements.len == 0) { - try stream.pushIndentNextLine(); + stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, block.lbrace, Space.None); } else { - try stream.pushIndentNextLine(); + stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, block.lbrace, Space.Newline); @@ -463,7 +463,7 @@ fn renderExpression( try renderExpression(allocator, stream, tree, payload, Space.Space); } - try stream.pushIndentOneShot(); + stream.pushIndentOneShot(); return renderExpression(allocator, stream, tree, infix_op_node.rhs, space); }, @@ -524,7 +524,7 @@ fn renderExpression( }; try renderToken(tree, stream, infix_op_node.op_token, after_op_space); - try stream.pushIndentOneShot(); + stream.pushIndentOneShot(); return renderExpression(allocator, stream, tree, infix_op_node.rhs, space); }, @@ -718,7 +718,7 @@ fn renderExpression( } { - try stream.pushIndent(); + stream.pushIndent(); defer stream.popIndent(); try renderToken(tree, stream, lbrace, Space.None); } @@ -783,7 +783,7 @@ fn renderExpression( // Null stream for counting the printed length of each expression var counting_stream = std.io.countingOutStream(std.io.null_out_stream); - var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &counting_stream, allocator); + var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &counting_stream); defer auto_indenting_stream.deinit(); for (exprs) |expr, i| { @@ -796,7 +796,7 @@ fn renderExpression( } { - try stream.pushIndentNextLine(); + stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, lbrace, Space.Newline); @@ -880,7 +880,7 @@ fn renderExpression( } { - try stream.pushIndentNextLine(); + stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, lbrace, Space.None); } @@ -902,7 +902,7 @@ fn renderExpression( // render field expressions until a LF is found for (field_inits) |field_init| { var find_stream = std.io.findByteOutStream('\n', &std.io.null_out_stream); - var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &find_stream, allocator); + var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &find_stream); defer auto_indenting_stream.deinit(); try renderExpression(allocator, &auto_indenting_stream, tree, field_init, Space.None); @@ -963,7 +963,7 @@ fn renderExpression( .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), } - try stream.pushIndentNextLine(); + stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, lbrace, Space.Newline); @@ -1011,7 +1011,7 @@ fn renderExpression( const params = call.params(); for (params) |param_node, i| { - try stream.pushIndent(); + stream.pushIndent(); defer stream.popIndent(); if (i + 1 < params.len) { @@ -1031,7 +1031,7 @@ fn renderExpression( const params = call.params(); for (params) |param_node, i| { - if (param_node.*.tag == .MultilineStringLiteral) try stream.pushIndentOneShot(); + if (param_node.*.tag == .MultilineStringLiteral) stream.pushIndentOneShot(); try renderExpression(allocator, stream, tree, param_node, Space.None); @@ -1058,7 +1058,7 @@ fn renderExpression( { const new_space = if (ends_with_comment) Space.Newline else Space.None; - try stream.pushIndent(); + stream.pushIndent(); defer stream.popIndent(); try renderExpression(allocator, stream, tree, suffix_op.index_expr, new_space); } @@ -1194,7 +1194,7 @@ fn renderExpression( try renderToken(tree, stream, grouped_expr.lparen, Space.None); { - try stream.pushIndentOneShot(); + stream.pushIndentOneShot(); try renderExpression(allocator, stream, tree, grouped_expr.expr, Space.None); } return renderToken(tree, stream, grouped_expr.rparen, space); @@ -1254,7 +1254,7 @@ fn renderExpression( if (container_decl.fields_and_decls_len == 0) { { - try stream.pushIndentNextLine(); + stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, container_decl.lbrace_token, Space.None); // { } @@ -1289,7 +1289,7 @@ fn renderExpression( if (src_has_trailing_comma or !src_has_only_fields) { // One declaration per line - try stream.pushIndentNextLine(); + stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, container_decl.lbrace_token, .Newline); // { @@ -1305,7 +1305,7 @@ fn renderExpression( // their own line try renderToken(tree, stream, container_decl.lbrace_token, .Newline); // { - try stream.pushIndent(); + stream.pushIndent(); defer stream.popIndent(); for (fields_and_decls) |decl, i| { @@ -1361,7 +1361,7 @@ fn renderExpression( if (src_has_trailing_comma) { { - try stream.pushIndent(); + stream.pushIndent(); defer stream.popIndent(); try renderToken(tree, stream, lbrace, Space.Newline); // { @@ -1451,7 +1451,7 @@ fn renderExpression( } } else { // one param per line - try stream.pushIndent(); + stream.pushIndent(); defer stream.popIndent(); try renderToken(tree, stream, lparen, Space.Newline); // ( @@ -1530,7 +1530,7 @@ fn renderExpression( } } else { // one param per line - try stream.pushIndent(); + stream.pushIndent(); defer stream.popIndent(); try renderToken(tree, stream, lparen, Space.Newline); // ( @@ -1627,7 +1627,7 @@ fn renderExpression( try renderToken(tree, stream, rparen, Space.Space); // ) { - try stream.pushIndentNextLine(); + stream.pushIndentNextLine(); defer stream.popIndent(); try renderToken(tree, stream, lbrace, Space.Newline); // { @@ -1711,7 +1711,7 @@ fn renderExpression( if (same_line) { return renderExpression(allocator, stream, tree, else_node.body, space); } else { - try stream.pushIndent(); + stream.pushIndent(); defer stream.popIndent(); return renderExpression(allocator, stream, tree, else_node.body, space); } @@ -1775,7 +1775,7 @@ fn renderExpression( } { - if (!body_is_block) try stream.pushIndent(); + if (!body_is_block) stream.pushIndent(); defer if (!body_is_block) stream.popIndent(); try renderExpression(allocator, stream, tree, while_node.body, after_body_space); } @@ -1826,7 +1826,7 @@ fn renderExpression( }; { - if (!body_on_same_line) try stream.pushIndent(); + if (!body_on_same_line) stream.pushIndent(); defer if (!body_on_same_line) stream.popIndent(); try renderExpression(allocator, stream, tree, for_node.body, space_after_body); // { body } } @@ -1882,7 +1882,7 @@ fn renderExpression( const else_is_block = nodeIsBlock(@"else".body); { - try stream.pushIndent(); + stream.pushIndent(); defer stream.popIndent(); try renderExpression(allocator, stream, tree, if_node.body, Space.Newline); } @@ -1903,12 +1903,12 @@ fn renderExpression( try renderExpression(allocator, stream, tree, payload, Space.Newline); } - try stream.pushIndent(); + stream.pushIndent(); defer stream.popIndent(); return renderExpression(allocator, stream, tree, @"else".body, space); } } else { - try stream.pushIndent(); + stream.pushIndent(); defer stream.popIndent(); return renderExpression(allocator, stream, tree, if_node.body, space); } @@ -1949,7 +1949,7 @@ fn renderExpression( } asmblk: { - try stream.pushIndent(); + stream.pushIndent(); defer stream.popIndent(); if (asm_node.outputs.len == 0 and asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) { @@ -1968,7 +1968,7 @@ fn renderExpression( } else blk: { try renderToken(tree, stream, colon1, Space.Space); // : - try stream.pushIndentN(2); + stream.pushIndent(); defer stream.popIndent(); for (asm_node.outputs) |*asm_output, i| { @@ -1999,7 +1999,7 @@ fn renderExpression( break :blk tree.nextToken(colon2); } else blk: { try renderToken(tree, stream, colon2, Space.Space); // : - try stream.pushIndentN(2); + stream.pushIndent(); defer stream.popIndent(); for (asm_node.inputs) |*asm_input, i| { if (i + 1 < asm_node.inputs.len) { @@ -2025,7 +2025,7 @@ fn renderExpression( }; try renderToken(tree, stream, colon3, Space.Space); // : - try stream.pushIndentN(2); + stream.pushIndent(); defer stream.popIndent(); for (asm_node.clobbers) |clobber_node, i| { if (i + 1 >= asm_node.clobbers.len) { @@ -2078,7 +2078,7 @@ fn renderArrayType( const new_space = if (ends_with_comment) Space.Newline else Space.None; { const do_indent = (starts_with_comment or ends_with_comment); - if (do_indent) try stream.pushIndent(); + if (do_indent) stream.pushIndent(); defer if (do_indent) stream.popIndent(); try renderToken(tree, stream, lbracket, Space.None); // [ @@ -2212,7 +2212,7 @@ fn renderVarDecl( if (var_decl.getTrailer("init_node")) |init_node| { const s = if (init_node.tag == .MultilineStringLiteral) Space.None else Space.Space; try renderToken(tree, stream, var_decl.getTrailer("eq_token").?, s); // = - try stream.pushIndentOneShot(); + stream.pushIndentOneShot(); try renderExpression(allocator, stream, tree, init_node, Space.None); } From 029ec456bce5fc6c57eea496db1cebed55e31ede Mon Sep 17 00:00:00 2001 From: Lachlan Easton Date: Mon, 31 Aug 2020 23:32:42 +1000 Subject: [PATCH 05/56] zig fmt: Set indent_delta to 2 when rendering inline asm --- lib/std/io/auto_indenting_stream.zig | 36 ++++++++++++++++++---------- lib/std/zig/parser_test.zig | 10 ++++---- lib/std/zig/render.zig | 7 +++--- 3 files changed, 33 insertions(+), 20 deletions(-) diff --git a/lib/std/io/auto_indenting_stream.zig b/lib/std/io/auto_indenting_stream.zig index bebbf9aed6..e2233f8cc2 100644 --- a/lib/std/io/auto_indenting_stream.zig +++ b/lib/std/io/auto_indenting_stream.zig @@ -13,7 +13,7 @@ pub fn AutoIndentingStream(comptime WriterType: type) type { writer_pointer: *WriterType, - indent_stack: usize = 0, + indent_count: usize = 0, indent_delta: usize, current_line_empty: bool = true, indent_one_shot_count: usize = 0, // automatically popped when applied @@ -24,9 +24,6 @@ pub fn AutoIndentingStream(comptime WriterType: type) type { return Self{ .writer_pointer = writer_pointer, .indent_delta = indent_delta }; } - /// Release all allocated memory. - pub fn deinit(self: Self) void {} - pub fn writer(self: *Self) Writer { return .{ .context = self }; } @@ -39,6 +36,21 @@ pub fn AutoIndentingStream(comptime WriterType: type) type { return self.writeNoIndent(bytes); } + // Change the indent delta without changing the final indentation level + pub fn setIndentDelta(self: *Self, indent_delta: usize) void { + if (self.indent_delta == indent_delta) { + return; + } else if (self.indent_delta > indent_delta) { + assert(self.indent_delta % indent_delta == 0); + self.indent_count = self.indent_count * (self.indent_delta / indent_delta); + } else { + // assert that the current indentation (in spaces) in a multiple of the new delta + assert((self.indent_count * self.indent_delta) % indent_delta == 0); + self.indent_count = self.indent_count / (indent_delta / self.indent_delta); + } + self.indent_delta = indent_delta; + } + fn writeNoIndent(self: *Self, bytes: []const u8) Error!usize { if (bytes.len == 0) return @as(usize, 0); @@ -68,7 +80,7 @@ pub fn AutoIndentingStream(comptime WriterType: type) type { pub fn pushIndent(self: *Self) void { // Doesn't actually write any indentation. // Just primes the stream to be able to write the correct indentation if it needs to. - self.indent_stack += 1; + self.indent_count += 1; } /// Push an indent that is automatically popped after being applied @@ -92,9 +104,9 @@ pub fn AutoIndentingStream(comptime WriterType: type) type { } pub fn popIndent(self: *Self) void { - assert(self.indent_stack != 0); - self.indent_stack -= 1; - self.indent_next_line = std.math.min(self.indent_stack, self.indent_next_line); // Tentative indent may have been popped before there was a newline + assert(self.indent_count != 0); + self.indent_count -= 1; + self.indent_next_line = std.math.min(self.indent_count, self.indent_next_line); // Tentative indent may have been popped before there was a newline } /// Writes ' ' bytes if the current line is empty @@ -105,7 +117,7 @@ pub fn AutoIndentingStream(comptime WriterType: type) type { self.applied_indent = current_indent; } - self.indent_stack -= self.indent_one_shot_count; + self.indent_count -= self.indent_one_shot_count; self.indent_one_shot_count = 0; self.current_line_empty = false; } @@ -118,9 +130,9 @@ pub fn AutoIndentingStream(comptime WriterType: type) type { fn currentIndent(self: *Self) usize { var indent_current: usize = 0; - if (self.indent_stack > 0) { - const stack_top = self.indent_stack - self.indent_next_line; - indent_current = stack_top * self.indent_delta; + if (self.indent_count > 0) { + const indent_count = self.indent_count - self.indent_next_line; + indent_current = indent_count * self.indent_delta; } return indent_current; } diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index 9369d44010..f4da650efb 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -2827,7 +2827,7 @@ test "zig fmt: inline asm" { \\ return asm volatile ("syscall" \\ : [ret] "={rax}" (-> usize) \\ : [number] "{rax}" (number), - \\ [arg1] "{rdi}" (arg1) + \\ [arg1] "{rdi}" (arg1) \\ : "rcx", "r11" \\ ); \\} @@ -2930,14 +2930,14 @@ test "zig fmt: inline asm parameter alignment" { \\ \\ foo \\ \\ bar \\ : [_] "" (-> usize), - \\ [_] "" (-> usize) + \\ [_] "" (-> usize) \\ ); \\ asm volatile ( \\ \\ foo \\ \\ bar \\ : \\ : [_] "" (0), - \\ [_] "" (0) + \\ [_] "" (0) \\ ); \\ asm volatile ( \\ \\ foo @@ -2950,9 +2950,9 @@ test "zig fmt: inline asm parameter alignment" { \\ \\ foo \\ \\ bar \\ : [_] "" (-> usize), - \\ [_] "" (-> usize) + \\ [_] "" (-> usize) \\ : [_] "" (0), - \\ [_] "" (0) + \\ [_] "" (0) \\ : "", "" \\ ); \\} diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 471b98398b..e4b03193c7 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -11,6 +11,7 @@ const ast = std.zig.ast; const Token = std.zig.Token; const indent_delta = 4; +const asm_indent_delta = 2; pub const Error = error{ /// Ran out of memory allocating call stack frames to complete rendering. @@ -25,7 +26,6 @@ pub fn render(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree) (meta var s = stream.*; var change_detection_stream = std.io.changeDetectionStream(tree.source, &s); var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &change_detection_stream); - defer auto_indenting_stream.deinit(); try renderRoot(allocator, &auto_indenting_stream, tree); @@ -784,7 +784,6 @@ fn renderExpression( // Null stream for counting the printed length of each expression var counting_stream = std.io.countingOutStream(std.io.null_out_stream); var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &counting_stream); - defer auto_indenting_stream.deinit(); for (exprs) |expr, i| { counting_stream.bytes_written = 0; @@ -903,7 +902,6 @@ fn renderExpression( for (field_inits) |field_init| { var find_stream = std.io.findByteOutStream('\n', &std.io.null_out_stream); var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &find_stream); - defer auto_indenting_stream.deinit(); try renderExpression(allocator, &auto_indenting_stream, tree, field_init, Space.None); if (find_stream.byte_found) break :blk false; @@ -1959,6 +1957,9 @@ fn renderExpression( try renderExpression(allocator, stream, tree, asm_node.template, Space.Newline); + stream.setIndentDelta(asm_indent_delta); + defer stream.setIndentDelta(indent_delta); + const colon1 = tree.nextToken(asm_node.template.lastToken()); const colon2 = if (asm_node.outputs.len == 0) blk: { From bc24b86d82ec3b8d7d6e7e5d2d3dceb82d7b53dc Mon Sep 17 00:00:00 2001 From: Lachlan Easton Date: Tue, 1 Sep 2020 13:19:34 +1000 Subject: [PATCH 06/56] zig fmt: Fix regression not covered by testing --- lib/std/io/auto_indenting_stream.zig | 4 +++- lib/std/zig/parser_test.zig | 11 +++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/lib/std/io/auto_indenting_stream.zig b/lib/std/io/auto_indenting_stream.zig index e2233f8cc2..d4256324f1 100644 --- a/lib/std/io/auto_indenting_stream.zig +++ b/lib/std/io/auto_indenting_stream.zig @@ -106,7 +106,9 @@ pub fn AutoIndentingStream(comptime WriterType: type) type { pub fn popIndent(self: *Self) void { assert(self.indent_count != 0); self.indent_count -= 1; - self.indent_next_line = std.math.min(self.indent_count, self.indent_next_line); // Tentative indent may have been popped before there was a newline + + if (self.indent_next_line > 0) + self.indent_next_line -= 1; } /// Writes ' ' bytes if the current line is empty diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index f4da650efb..8652a73c50 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -3310,6 +3310,17 @@ test "zig fmt: Only indent multiline string literals in function calls" { ); } +test "zig fmt: Don't add extra newline after if" { + try testCanonical( + \\pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void { + \\ if (cwd().symLink(existing_path, new_path, .{})) { + \\ return; + \\ } + \\} + \\ + ); +} + const std = @import("std"); const mem = std.mem; const warn = std.debug.warn; From 7841c9b7d105a62ccb55f4f6589d5ff31194a013 Mon Sep 17 00:00:00 2001 From: Lachlan Easton Date: Tue, 1 Sep 2020 21:59:27 +1000 Subject: [PATCH 07/56] zig fmt: Fix merge errors --- lib/std/zig/render.zig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index eb6df45c20..b7a2b8675a 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -1425,7 +1425,7 @@ fn renderExpression( // TODO remove after 0.7.0 release if (mem.eql(u8, tree.tokenSlice(builtin_call.builtin_token), "@OpaqueType")) - return stream.writeAll("@Type(.Opaque)"); + return stream.writer().writeAll("@Type(.Opaque)"); try renderToken(tree, stream, builtin_call.builtin_token, Space.None); // @name @@ -2184,11 +2184,11 @@ fn renderVarDecl( Space.None; try renderToken(tree, stream, var_decl.name_token, name_space); - if (var_decl.getTrailer("type_node")) |type_node| { + if (var_decl.getTypeNode()) |type_node| { try renderToken(tree, stream, tree.nextToken(var_decl.name_token), Space.Space); - const s = if (var_decl.getTrailer("align_node") != null or - var_decl.getTrailer("section_node") != null or - var_decl.getTrailer("init_node") != null) Space.Space else Space.None; + const s = if (var_decl.getAlignNode() != null or + var_decl.getSectionNode() != null or + var_decl.getInitNode() != null) Space.Space else Space.None; try renderExpression(allocator, stream, tree, type_node, s); } From c51b871c4516003e8d2c84e7e1c36124c3797f5c Mon Sep 17 00:00:00 2001 From: LemonBoy Date: Tue, 1 Sep 2020 17:29:10 +0200 Subject: [PATCH 08/56] ir: Typecheck the sentinel value in *[N:S1]T to [S2]T casts Closes #6054 --- src/ir.cpp | 7 ++++++- test/compile_errors.zig | 8 ++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/ir.cpp b/src/ir.cpp index 692dd392e1..36be78ed7c 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -15341,9 +15341,14 @@ static IrInstGen *ir_analyze_cast(IrAnalyze *ira, IrInst *source_instr, ZigType *array_type = actual_type->data.pointer.child_type; bool const_ok = (slice_ptr_type->data.pointer.is_const || array_type->data.array.len == 0 || !actual_type->data.pointer.is_const); + if (const_ok && types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type, array_type->data.array.child_type, source_node, - !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk) + !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk && + (slice_ptr_type->data.pointer.sentinel == nullptr || + (array_type->data.array.sentinel != nullptr && + const_values_equal(ira->codegen, array_type->data.array.sentinel, + slice_ptr_type->data.pointer.sentinel)))) { // If the pointers both have ABI align, it works. // Or if the array length is 0, alignment doesn't matter. diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 9e81ed27a7..f6e00e1dbb 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,14 @@ const tests = @import("tests.zig"); const std = @import("std"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add("slice sentinel mismatch", + \\export fn entry() void { + \\ const y: [:1]const u8 = &[_:2]u8{ 1, 2 }; + \\} + , &[_][]const u8{ + "tmp.zig:2:37: error: expected type '[:1]const u8', found '*const [2:2]u8'", + }); + cases.add("@Type with undefined", \\comptime { \\ _ = @Type(.{ .Array = .{ .len = 0, .child = u8, .sentinel = undefined } }); From dd4994a4e4379454f6b58779276f1b6aa9ed6e1b Mon Sep 17 00:00:00 2001 From: LemonBoy Date: Tue, 1 Sep 2020 18:45:35 +0200 Subject: [PATCH 09/56] std: Fix C-string with missing NUL terminator Spotted thanks to the stricter conversion rules. --- lib/std/net.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/net.zig b/lib/std/net.zig index 10e5b371f8..5a1407c35f 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1164,7 +1164,7 @@ fn linuxLookupNameFromDnsSearch( } const search = if (rc.search.isNull() or dots >= rc.ndots or mem.endsWith(u8, name, ".")) - &[_]u8{} + "" else rc.search.span(); From 73a8c9beaa63c48b6ddaeec8d2a67b239f0dec92 Mon Sep 17 00:00:00 2001 From: LemonBoy Date: Tue, 1 Sep 2020 18:48:43 +0200 Subject: [PATCH 10/56] std: Don't trust stat() size in readAllAlloc fns Some files such as the ones in /proc report a st_size of zero, try to read the file anyway if we hit that case. --- lib/std/fs.zig | 4 +--- lib/std/fs/file.zig | 28 +++++++++++++++++++++------- lib/std/fs/test.zig | 9 ++++----- src-self-hosted/main.zig | 3 ++- src-self-hosted/stage2.zig | 1 - 5 files changed, 28 insertions(+), 17 deletions(-) diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 21a00eeb1d..9a44660570 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -1454,9 +1454,7 @@ pub const Dir = struct { var file = try self.openFile(file_path, .{}); defer file.close(); - const stat_size = try file.getEndPos(); - - return file.readAllAllocOptions(allocator, stat_size, max_bytes, alignment, optional_sentinel); + return file.readAllAllocOptions(allocator, max_bytes, alignment, optional_sentinel); } pub const DeleteTreeError = error{ diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig index 6fb2385a85..c34e5f9437 100644 --- a/lib/std/fs/file.zig +++ b/lib/std/fs/file.zig @@ -365,8 +365,8 @@ pub const File = struct { /// On success, caller owns returned buffer. /// If the file is larger than `max_bytes`, returns `error.FileTooBig`. - pub fn readAllAlloc(self: File, allocator: *mem.Allocator, stat_size: u64, max_bytes: usize) ![]u8 { - return self.readAllAllocOptions(allocator, stat_size, max_bytes, @alignOf(u8), null); + pub fn readAllAlloc(self: File, allocator: *mem.Allocator, max_bytes: usize) ![]u8 { + return self.readAllAllocOptions(allocator, max_bytes, @alignOf(u8), null); } /// On success, caller owns returned buffer. @@ -375,19 +375,33 @@ pub const File = struct { pub fn readAllAllocOptions( self: File, allocator: *mem.Allocator, - stat_size: u64, max_bytes: usize, comptime alignment: u29, comptime optional_sentinel: ?u8, ) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) { + const stat_size = try self.getEndPos(); const size = math.cast(usize, stat_size) catch math.maxInt(usize); if (size > max_bytes) return error.FileTooBig; - const buf = try allocator.allocWithOptions(u8, size, alignment, optional_sentinel); - errdefer allocator.free(buf); + // The file size returned by stat is used as hint to set the buffer + // size. If the reported size is zero, as it happens on Linux for files + // in /proc, a small buffer is allocated instead. + const initial_cap = (if (size > 0) size else 1024) + @boolToInt(optional_sentinel != null); + var array_list = try std.ArrayListAligned(u8, alignment).initCapacity(allocator, initial_cap); + defer array_list.deinit(); - try self.reader().readNoEof(buf); - return buf; + self.reader().readAllArrayList(&array_list, max_bytes) catch |err| switch (err) { + error.StreamTooLong => return error.FileTooBig, + else => |e| return e, + }; + + if (optional_sentinel) |sentinel| { + try array_list.append(sentinel); + const buf = array_list.toOwnedSlice(); + return buf[0 .. buf.len - 1 :sentinel]; + } else { + return array_list.toOwnedSlice(); + } } pub const ReadError = os.ReadError; diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 409a53b1a7..c567602dd7 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -188,30 +188,29 @@ test "readAllAlloc" { var file = try tmp_dir.dir.createFile("test_file", .{ .read = true }); defer file.close(); - const buf1 = try file.readAllAlloc(testing.allocator, 0, 1024); + const buf1 = try file.readAllAlloc(testing.allocator, 1024); defer testing.allocator.free(buf1); testing.expect(buf1.len == 0); const write_buf: []const u8 = "this is a test.\nthis is a test.\nthis is a test.\nthis is a test.\n"; try file.writeAll(write_buf); try file.seekTo(0); - const file_size = try file.getEndPos(); // max_bytes > file_size - const buf2 = try file.readAllAlloc(testing.allocator, file_size, 1024); + const buf2 = try file.readAllAlloc(testing.allocator, 1024); defer testing.allocator.free(buf2); testing.expectEqual(write_buf.len, buf2.len); testing.expect(std.mem.eql(u8, write_buf, buf2)); try file.seekTo(0); // max_bytes == file_size - const buf3 = try file.readAllAlloc(testing.allocator, file_size, write_buf.len); + const buf3 = try file.readAllAlloc(testing.allocator, write_buf.len); defer testing.allocator.free(buf3); testing.expectEqual(write_buf.len, buf3.len); testing.expect(std.mem.eql(u8, write_buf, buf3)); // max_bytes < file_size - testing.expectError(error.FileTooBig, file.readAllAlloc(testing.allocator, file_size, write_buf.len - 1)); + testing.expectError(error.FileTooBig, file.readAllAlloc(testing.allocator, write_buf.len - 1)); } test "directory operations on files" { diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index ac25cd2eb8..b6ccc8a218 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -742,6 +742,7 @@ const FmtError = error{ LinkQuotaExceeded, FileBusy, EndOfStream, + Unseekable, NotOpenForWriting, } || fs.File.OpenError; @@ -805,7 +806,7 @@ fn fmtPathFile( if (stat.kind == .Directory) return error.IsDir; - const source_code = source_file.readAllAlloc(fmt.gpa, stat.size, max_src_size) catch |err| switch (err) { + const source_code = source_file.readAllAlloc(fmt.gpa, max_src_size) catch |err| switch (err) { error.ConnectionResetByPeer => unreachable, error.ConnectionTimedOut => unreachable, error.NotOpenForReading => unreachable, diff --git a/src-self-hosted/stage2.zig b/src-self-hosted/stage2.zig index 30d2ea44db..45b8ad3073 100644 --- a/src-self-hosted/stage2.zig +++ b/src-self-hosted/stage2.zig @@ -615,7 +615,6 @@ export fn stage2_libc_parse(stage1_libc: *Stage2LibCInstallation, libc_file_z: [ error.NotOpenForWriting => unreachable, error.NotOpenForReading => unreachable, error.Unexpected => return .Unexpected, - error.EndOfStream => return .EndOfFile, error.IsDir => return .IsDir, error.ConnectionResetByPeer => unreachable, error.ConnectionTimedOut => unreachable, From 5f31d54064b204910c18667d0b68b8bf2b57cffb Mon Sep 17 00:00:00 2001 From: LemonBoy Date: Wed, 2 Sep 2020 10:51:15 +0200 Subject: [PATCH 11/56] std: ArrayList.initCapacity now respects the specified cap Don't use the user-supplied cap as starting point for a resize. Doing so overallocates memory and thus negates the whole point of specifying a precise cap value. --- lib/std/array_list.zig | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index a7432a30ae..f298d14631 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -46,7 +46,11 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { /// Deinitialize with `deinit` or use `toOwnedSlice`. pub fn initCapacity(allocator: *Allocator, num: usize) !Self { var self = Self.init(allocator); - try self.ensureCapacity(num); + + const new_memory = try self.allocator.allocAdvanced(T, alignment, num, .at_least); + self.items.ptr = new_memory.ptr; + self.capacity = new_memory.len; + return self; } @@ -366,7 +370,11 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Deinitialize with `deinit` or use `toOwnedSlice`. pub fn initCapacity(allocator: *Allocator, num: usize) !Self { var self = Self{}; - try self.ensureCapacity(allocator, num); + + const new_memory = try self.allocator.allocAdvanced(T, alignment, num, .at_least); + self.items.ptr = new_memory.ptr; + self.capacity = new_memory.len; + return self; } From bb848dbeeec133b2ca1d6dc42fe9dd04b848b58a Mon Sep 17 00:00:00 2001 From: Lachlan Easton Date: Wed, 2 Sep 2020 20:16:28 +1000 Subject: [PATCH 12/56] zig fmt: Patch rename stream to ais (auto indenting stream) & other small refactors --- lib/std/io/auto_indenting_stream.zig | 24 +- lib/std/io/change_detection_stream.zig | 26 +- lib/std/io/find_byte_out_stream.zig | 26 +- lib/std/io/writer.zig | 4 - lib/std/zig/parser_test.zig | 2 +- lib/std/zig/render.zig | 1229 ++++++++++++------------ src-self-hosted/main.zig | 8 +- src-self-hosted/stage2.zig | 2 +- 8 files changed, 653 insertions(+), 668 deletions(-) diff --git a/lib/std/io/auto_indenting_stream.zig b/lib/std/io/auto_indenting_stream.zig index d4256324f1..d08878e851 100644 --- a/lib/std/io/auto_indenting_stream.zig +++ b/lib/std/io/auto_indenting_stream.zig @@ -5,13 +5,13 @@ const assert = std.debug.assert; /// Automatically inserts indentation of written data by keeping /// track of the current indentation level -pub fn AutoIndentingStream(comptime WriterType: type) type { +pub fn AutoIndentingStream(comptime UnderlyingWriter: type) type { return struct { const Self = @This(); - pub const Error = WriterType.Error; + pub const Error = UnderlyingWriter.Error; pub const Writer = io.Writer(*Self, Error, write); - writer_pointer: *WriterType, + underlying_writer: UnderlyingWriter, indent_count: usize = 0, indent_delta: usize, @@ -20,10 +20,6 @@ pub fn AutoIndentingStream(comptime WriterType: type) type { applied_indent: usize = 0, // the most recently applied indent indent_next_line: usize = 0, // not used until the next line - pub fn init(indent_delta: usize, writer_pointer: *WriterType) Self { - return Self{ .writer_pointer = writer_pointer, .indent_delta = indent_delta }; - } - pub fn writer(self: *Self) Writer { return .{ .context = self }; } @@ -55,7 +51,7 @@ pub fn AutoIndentingStream(comptime WriterType: type) type { if (bytes.len == 0) return @as(usize, 0); - try self.writer_pointer.writer().writeAll(bytes); + try self.underlying_writer.writeAll(bytes); if (bytes[bytes.len - 1] == '\n') self.resetLine(); return bytes.len; @@ -115,7 +111,7 @@ pub fn AutoIndentingStream(comptime WriterType: type) type { fn applyIndent(self: *Self) Error!void { const current_indent = self.currentIndent(); if (self.current_line_empty and current_indent > 0) { - try self.writer_pointer.writer().writeByteNTimes(' ', current_indent); + try self.underlying_writer.writeByteNTimes(' ', current_indent); self.applied_indent = current_indent; } @@ -143,8 +139,10 @@ pub fn AutoIndentingStream(comptime WriterType: type) type { pub fn autoIndentingStream( indent_delta: usize, - underlying_stream: anytype, -) AutoIndentingStream(@TypeOf(underlying_stream).Child) { - comptime assert(@typeInfo(@TypeOf(underlying_stream)) == .Pointer); - return AutoIndentingStream(@TypeOf(underlying_stream).Child).init(indent_delta, underlying_stream); + underlying_writer: anytype, +) AutoIndentingStream(@TypeOf(underlying_writer)) { + return AutoIndentingStream(@TypeOf(underlying_writer)){ + .underlying_writer = underlying_writer, + .indent_delta = indent_delta, + }; } diff --git a/lib/std/io/change_detection_stream.zig b/lib/std/io/change_detection_stream.zig index b559e66751..5ba2bb3c10 100644 --- a/lib/std/io/change_detection_stream.zig +++ b/lib/std/io/change_detection_stream.zig @@ -10,19 +10,11 @@ pub fn ChangeDetectionStream(comptime WriterType: type) type { pub const Error = WriterType.Error; pub const Writer = io.Writer(*Self, Error, write); - anything_changed: bool = false, - writer_pointer: *const WriterType, + anything_changed: bool, + underlying_writer: WriterType, source_index: usize, source: []const u8, - pub fn init(source: []const u8, writer_pointer: *const WriterType) Self { - return Self{ - .writer_pointer = writer_pointer, - .source_index = 0, - .source = source, - }; - } - pub fn writer(self: *Self) Writer { return .{ .context = self }; } @@ -41,7 +33,7 @@ pub fn ChangeDetectionStream(comptime WriterType: type) type { } } - return self.writer_pointer.write(bytes); + return self.underlying_writer.write(bytes); } pub fn changeDetected(self: *Self) bool { @@ -52,8 +44,12 @@ pub fn ChangeDetectionStream(comptime WriterType: type) type { pub fn changeDetectionStream( source: []const u8, - underlying_stream: anytype, -) ChangeDetectionStream(@TypeOf(underlying_stream).Child) { - comptime assert(@typeInfo(@TypeOf(underlying_stream)) == .Pointer); - return ChangeDetectionStream(@TypeOf(underlying_stream).Child).init(source, underlying_stream); + underlying_writer: anytype, +) ChangeDetectionStream(@TypeOf(underlying_writer)) { + return ChangeDetectionStream(@TypeOf(underlying_writer)){ + .anything_changed = false, + .underlying_writer = underlying_writer, + .source_index = 0, + .source = source, + }; } diff --git a/lib/std/io/find_byte_out_stream.zig b/lib/std/io/find_byte_out_stream.zig index bfd0e815e4..b8689b7992 100644 --- a/lib/std/io/find_byte_out_stream.zig +++ b/lib/std/io/find_byte_out_stream.zig @@ -4,24 +4,16 @@ const assert = std.debug.assert; /// An OutStream that returns whether the given character has been written to it. /// The contents are not written to anything. -pub fn FindByteOutStream(comptime WriterType: type) type { +pub fn FindByteOutStream(comptime UnderlyingWriter: type) type { return struct { const Self = @This(); - pub const Error = WriterType.Error; + pub const Error = UnderlyingWriter.Error; pub const Writer = io.Writer(*Self, Error, write); - writer_pointer: *const WriterType, + underlying_writer: UnderlyingWriter, byte_found: bool, byte: u8, - pub fn init(byte: u8, writer_pointer: *const WriterType) Self { - return Self{ - .writer_pointer = writer_pointer, - .byte = byte, - .byte_found = false, - }; - } - pub fn writer(self: *Self) Writer { return .{ .context = self }; } @@ -34,11 +26,15 @@ pub fn FindByteOutStream(comptime WriterType: type) type { break :blk false; }; } - return self.writer_pointer.writer().write(bytes); + return self.underlying_writer.write(bytes); } }; } -pub fn findByteOutStream(byte: u8, underlying_stream: anytype) FindByteOutStream(@TypeOf(underlying_stream).Child) { - comptime assert(@typeInfo(@TypeOf(underlying_stream)) == .Pointer); - return FindByteOutStream(@TypeOf(underlying_stream).Child).init(byte, underlying_stream); + +pub fn findByteOutStream(byte: u8, underlying_writer: anytype) FindByteOutStream(@TypeOf(underlying_writer)) { + return FindByteOutStream(@TypeOf(underlying_writer)){ + .underlying_writer = underlying_writer, + .byte = byte, + .byte_found = false, + }; } diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig index ffdca0d6a6..39729ef0a2 100644 --- a/lib/std/io/writer.zig +++ b/lib/std/io/writer.zig @@ -18,10 +18,6 @@ pub fn Writer( const Self = @This(); pub const Error = WriteError; - pub fn writer(self: *const Self) Self { - return self.*; - } - pub fn write(self: Self, bytes: []const u8) Error!usize { return writeFn(self.context, bytes); } diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index 8652a73c50..36ceb400dc 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -3364,7 +3364,7 @@ fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *b errdefer buffer.deinit(); const outStream = buffer.outStream(); - anything_changed.* = try std.zig.render(allocator, &outStream, tree); + anything_changed.* = try std.zig.render(allocator, outStream, tree); return buffer.toOwnedSlice(); } fn testTransform(source: []const u8, expected_source: []const u8) !void { diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index b7a2b8675a..237ca07d2b 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -19,13 +19,12 @@ pub const Error = error{ }; /// Returns whether anything changed -pub fn render(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree) (meta.Child(@TypeOf(stream)).Error || Error)!bool { +pub fn render(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree) (@TypeOf(stream).Error || Error)!bool { // cannot render an invalid tree std.debug.assert(tree.errors.len == 0); - var s = stream.*; - var change_detection_stream = std.io.changeDetectionStream(tree.source, &s); - var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &change_detection_stream); + var change_detection_stream = std.io.changeDetectionStream(tree.source, stream); + var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, change_detection_stream.writer()); try renderRoot(allocator, &auto_indenting_stream, tree); @@ -34,19 +33,19 @@ pub fn render(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree) (meta fn renderRoot( allocator: *mem.Allocator, - stream: anytype, + ais: anytype, tree: *ast.Tree, -) (@TypeOf(stream.*).Error || Error)!void { +) (@TypeOf(ais.*).Error || Error)!void { // render all the line comments at the beginning of the file for (tree.token_ids) |token_id, i| { if (token_id != .LineComment) break; const token_loc = tree.token_locs[i]; - try stream.writer().print("{}\n", .{mem.trimRight(u8, tree.tokenSliceLoc(token_loc), " ")}); + try ais.writer().print("{}\n", .{mem.trimRight(u8, tree.tokenSliceLoc(token_loc), " ")}); const next_token = tree.token_locs[i + 1]; const loc = tree.tokenLocationLoc(token_loc.end, next_token); if (loc.line >= 2) { - try stream.insertNewline(); + try ais.insertNewline(); } } @@ -110,7 +109,7 @@ fn renderRoot( // If there's no next reformatted `decl`, just copy the // remaining input tokens and bail out. const start = tree.token_locs[copy_start_token_index].start; - try copyFixingWhitespace(stream, tree.source[start..]); + try copyFixingWhitespace(ais, tree.source[start..]); return; } decl = root_decls[decl_i]; @@ -151,25 +150,25 @@ fn renderRoot( const start = tree.token_locs[copy_start_token_index].start; const end = tree.token_locs[copy_end_token_index].start; - try copyFixingWhitespace(stream, tree.source[start..end]); + try copyFixingWhitespace(ais, tree.source[start..end]); } - try renderTopLevelDecl(allocator, stream, tree, decl); + try renderTopLevelDecl(allocator, ais, tree, decl); decl_i += 1; if (decl_i >= root_decls.len) return; - try renderExtraNewline(tree, stream, root_decls[decl_i]); + try renderExtraNewline(tree, ais, root_decls[decl_i]); } } -fn renderExtraNewline(tree: *ast.Tree, stream: anytype, node: *ast.Node) @TypeOf(stream.*).Error!void { - return renderExtraNewlineToken(tree, stream, node.firstToken()); +fn renderExtraNewline(tree: *ast.Tree, ais: anytype, node: *ast.Node) @TypeOf(ais.*).Error!void { + return renderExtraNewlineToken(tree, ais, node.firstToken()); } fn renderExtraNewlineToken( tree: *ast.Tree, - stream: anytype, + ais: anytype, first_token: ast.TokenIndex, -) @TypeOf(stream.*).Error!void { +) @TypeOf(ais.*).Error!void { var prev_token = first_token; if (prev_token == 0) return; var newline_threshold: usize = 2; @@ -182,27 +181,27 @@ fn renderExtraNewlineToken( const prev_token_end = tree.token_locs[prev_token - 1].end; const loc = tree.tokenLocation(prev_token_end, first_token); if (loc.line >= newline_threshold) { - try stream.insertNewline(); + try ais.insertNewline(); } } -fn renderTopLevelDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, decl: *ast.Node) (@TypeOf(stream.*).Error || Error)!void { - try renderContainerDecl(allocator, stream, tree, decl, .Newline); +fn renderTopLevelDecl(allocator: *mem.Allocator, ais: anytype, tree: *ast.Tree, decl: *ast.Node) (@TypeOf(ais.*).Error || Error)!void { + try renderContainerDecl(allocator, ais, tree, decl, .Newline); } -fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, decl: *ast.Node, space: Space) (@TypeOf(stream.*).Error || Error)!void { +fn renderContainerDecl(allocator: *mem.Allocator, ais: anytype, tree: *ast.Tree, decl: *ast.Node, space: Space) (@TypeOf(ais.*).Error || Error)!void { switch (decl.tag) { .FnProto => { const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl); - try renderDocComments(tree, stream, fn_proto, fn_proto.getDocComments()); + try renderDocComments(tree, ais, fn_proto, fn_proto.getDocComments()); if (fn_proto.getBodyNode()) |body_node| { - try renderExpression(allocator, stream, tree, decl, .Space); - try renderExpression(allocator, stream, tree, body_node, space); + try renderExpression(allocator, ais, tree, decl, .Space); + try renderExpression(allocator, ais, tree, body_node, space); } else { - try renderExpression(allocator, stream, tree, decl, .None); - try renderToken(tree, stream, tree.nextToken(decl.lastToken()), space); + try renderExpression(allocator, ais, tree, decl, .None); + try renderToken(tree, ais, tree.nextToken(decl.lastToken()), space); } }, @@ -210,35 +209,35 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tr const use_decl = @fieldParentPtr(ast.Node.Use, "base", decl); if (use_decl.visib_token) |visib_token| { - try renderToken(tree, stream, visib_token, .Space); // pub + try renderToken(tree, ais, visib_token, .Space); // pub } - try renderToken(tree, stream, use_decl.use_token, .Space); // usingnamespace - try renderExpression(allocator, stream, tree, use_decl.expr, .None); - try renderToken(tree, stream, use_decl.semicolon_token, space); // ; + try renderToken(tree, ais, use_decl.use_token, .Space); // usingnamespace + try renderExpression(allocator, ais, tree, use_decl.expr, .None); + try renderToken(tree, ais, use_decl.semicolon_token, space); // ; }, .VarDecl => { const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", decl); - try renderDocComments(tree, stream, var_decl, var_decl.getDocComments()); - try renderVarDecl(allocator, stream, tree, var_decl); + try renderDocComments(tree, ais, var_decl, var_decl.getDocComments()); + try renderVarDecl(allocator, ais, tree, var_decl); }, .TestDecl => { const test_decl = @fieldParentPtr(ast.Node.TestDecl, "base", decl); - try renderDocComments(tree, stream, test_decl, test_decl.doc_comments); - try renderToken(tree, stream, test_decl.test_token, .Space); - try renderExpression(allocator, stream, tree, test_decl.name, .Space); - try renderExpression(allocator, stream, tree, test_decl.body_node, space); + try renderDocComments(tree, ais, test_decl, test_decl.doc_comments); + try renderToken(tree, ais, test_decl.test_token, .Space); + try renderExpression(allocator, ais, tree, test_decl.name, .Space); + try renderExpression(allocator, ais, tree, test_decl.body_node, space); }, .ContainerField => { const field = @fieldParentPtr(ast.Node.ContainerField, "base", decl); - try renderDocComments(tree, stream, field, field.doc_comments); + try renderDocComments(tree, ais, field, field.doc_comments); if (field.comptime_token) |t| { - try renderToken(tree, stream, t, .Space); // comptime + try renderToken(tree, ais, t, .Space); // comptime } const src_has_trailing_comma = blk: { @@ -251,67 +250,67 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tr const last_token_space: Space = if (src_has_trailing_comma) .None else space; if (field.type_expr == null and field.value_expr == null) { - try renderToken(tree, stream, field.name_token, last_token_space); // name + try renderToken(tree, ais, field.name_token, last_token_space); // name } else if (field.type_expr != null and field.value_expr == null) { - try renderToken(tree, stream, field.name_token, .None); // name - try renderToken(tree, stream, tree.nextToken(field.name_token), .Space); // : + try renderToken(tree, ais, field.name_token, .None); // name + try renderToken(tree, ais, tree.nextToken(field.name_token), .Space); // : if (field.align_expr) |align_value_expr| { - try renderExpression(allocator, stream, tree, field.type_expr.?, .Space); // type + try renderExpression(allocator, ais, tree, field.type_expr.?, .Space); // type const lparen_token = tree.prevToken(align_value_expr.firstToken()); const align_kw = tree.prevToken(lparen_token); const rparen_token = tree.nextToken(align_value_expr.lastToken()); - try renderToken(tree, stream, align_kw, .None); // align - try renderToken(tree, stream, lparen_token, .None); // ( - try renderExpression(allocator, stream, tree, align_value_expr, .None); // alignment - try renderToken(tree, stream, rparen_token, last_token_space); // ) + try renderToken(tree, ais, align_kw, .None); // align + try renderToken(tree, ais, lparen_token, .None); // ( + try renderExpression(allocator, ais, tree, align_value_expr, .None); // alignment + try renderToken(tree, ais, rparen_token, last_token_space); // ) } else { - try renderExpression(allocator, stream, tree, field.type_expr.?, last_token_space); // type + try renderExpression(allocator, ais, tree, field.type_expr.?, last_token_space); // type } } else if (field.type_expr == null and field.value_expr != null) { - try renderToken(tree, stream, field.name_token, .Space); // name - try renderToken(tree, stream, tree.nextToken(field.name_token), .Space); // = - try renderExpression(allocator, stream, tree, field.value_expr.?, last_token_space); // value + try renderToken(tree, ais, field.name_token, .Space); // name + try renderToken(tree, ais, tree.nextToken(field.name_token), .Space); // = + try renderExpression(allocator, ais, tree, field.value_expr.?, last_token_space); // value } else { - try renderToken(tree, stream, field.name_token, .None); // name - try renderToken(tree, stream, tree.nextToken(field.name_token), .Space); // : + try renderToken(tree, ais, field.name_token, .None); // name + try renderToken(tree, ais, tree.nextToken(field.name_token), .Space); // : if (field.align_expr) |align_value_expr| { - try renderExpression(allocator, stream, tree, field.type_expr.?, .Space); // type + try renderExpression(allocator, ais, tree, field.type_expr.?, .Space); // type const lparen_token = tree.prevToken(align_value_expr.firstToken()); const align_kw = tree.prevToken(lparen_token); const rparen_token = tree.nextToken(align_value_expr.lastToken()); - try renderToken(tree, stream, align_kw, .None); // align - try renderToken(tree, stream, lparen_token, .None); // ( - try renderExpression(allocator, stream, tree, align_value_expr, .None); // alignment - try renderToken(tree, stream, rparen_token, .Space); // ) + try renderToken(tree, ais, align_kw, .None); // align + try renderToken(tree, ais, lparen_token, .None); // ( + try renderExpression(allocator, ais, tree, align_value_expr, .None); // alignment + try renderToken(tree, ais, rparen_token, .Space); // ) } else { - try renderExpression(allocator, stream, tree, field.type_expr.?, .Space); // type + try renderExpression(allocator, ais, tree, field.type_expr.?, .Space); // type } - try renderToken(tree, stream, tree.prevToken(field.value_expr.?.firstToken()), .Space); // = - try renderExpression(allocator, stream, tree, field.value_expr.?, last_token_space); // value + try renderToken(tree, ais, tree.prevToken(field.value_expr.?.firstToken()), .Space); // = + try renderExpression(allocator, ais, tree, field.value_expr.?, last_token_space); // value } if (src_has_trailing_comma) { const comma = tree.nextToken(field.lastToken()); - try renderToken(tree, stream, comma, space); + try renderToken(tree, ais, comma, space); } }, .Comptime => { assert(!decl.requireSemiColon()); - try renderExpression(allocator, stream, tree, decl, space); + try renderExpression(allocator, ais, tree, decl, space); }, .DocComment => { const comment = @fieldParentPtr(ast.Node.DocComment, "base", decl); const kind = tree.token_ids[comment.first_line]; - try renderToken(tree, stream, comment.first_line, .Newline); + try renderToken(tree, ais, comment.first_line, .Newline); var tok_i = comment.first_line + 1; while (true) : (tok_i += 1) { const tok_id = tree.token_ids[tok_i]; if (tok_id == kind) { - try renderToken(tree, stream, tok_i, .Newline); + try renderToken(tree, ais, tok_i, .Newline); } else if (tok_id == .LineComment) { continue; } else { @@ -325,11 +324,11 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tr fn renderExpression( allocator: *mem.Allocator, - stream: anytype, + ais: anytype, tree: *ast.Tree, base: *ast.Node, space: Space, -) (@TypeOf(stream.*).Error || Error)!void { +) (@TypeOf(ais.*).Error || Error)!void { switch (base.tag) { .Identifier, .IntegerLiteral, @@ -343,18 +342,18 @@ fn renderExpression( .UndefinedLiteral, => { const casted_node = base.cast(ast.Node.OneToken).?; - return renderToken(tree, stream, casted_node.token, space); + return renderToken(tree, ais, casted_node.token, space); }, .AnyType => { const any_type = base.castTag(.AnyType).?; if (mem.eql(u8, tree.tokenSlice(any_type.token), "var")) { // TODO remove in next release cycle - try stream.writer().writeAll("anytype"); - if (space == .Comma) try stream.writer().writeAll(",\n"); + try ais.writer().writeAll("anytype"); + if (space == .Comma) try ais.writer().writeAll(",\n"); return; } - return renderToken(tree, stream, any_type.token, space); + return renderToken(tree, ais, any_type.token, space); }, .Block, .LabeledBlock => { @@ -384,65 +383,65 @@ fn renderExpression( }; if (block.label) |label| { - try renderToken(tree, stream, label, Space.None); - try renderToken(tree, stream, tree.nextToken(label), Space.Space); + try renderToken(tree, ais, label, Space.None); + try renderToken(tree, ais, tree.nextToken(label), Space.Space); } if (block.statements.len == 0) { - stream.pushIndentNextLine(); - defer stream.popIndent(); - try renderToken(tree, stream, block.lbrace, Space.None); + ais.pushIndentNextLine(); + defer ais.popIndent(); + try renderToken(tree, ais, block.lbrace, Space.None); } else { - stream.pushIndentNextLine(); - defer stream.popIndent(); + ais.pushIndentNextLine(); + defer ais.popIndent(); - try renderToken(tree, stream, block.lbrace, Space.Newline); + try renderToken(tree, ais, block.lbrace, Space.Newline); for (block.statements) |statement, i| { - try renderStatement(allocator, stream, tree, statement); + try renderStatement(allocator, ais, tree, statement); if (i + 1 < block.statements.len) { - try renderExtraNewline(tree, stream, block.statements[i + 1]); + try renderExtraNewline(tree, ais, block.statements[i + 1]); } } } - return renderToken(tree, stream, block.rbrace, space); + return renderToken(tree, ais, block.rbrace, space); }, .Defer => { const defer_node = @fieldParentPtr(ast.Node.Defer, "base", base); - try renderToken(tree, stream, defer_node.defer_token, Space.Space); + try renderToken(tree, ais, defer_node.defer_token, Space.Space); if (defer_node.payload) |payload| { - try renderExpression(allocator, stream, tree, payload, Space.Space); + try renderExpression(allocator, ais, tree, payload, Space.Space); } - return renderExpression(allocator, stream, tree, defer_node.expr, space); + return renderExpression(allocator, ais, tree, defer_node.expr, space); }, .Comptime => { const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", base); - try renderToken(tree, stream, comptime_node.comptime_token, Space.Space); - return renderExpression(allocator, stream, tree, comptime_node.expr, space); + try renderToken(tree, ais, comptime_node.comptime_token, Space.Space); + return renderExpression(allocator, ais, tree, comptime_node.expr, space); }, .Nosuspend => { const nosuspend_node = @fieldParentPtr(ast.Node.Nosuspend, "base", base); if (mem.eql(u8, tree.tokenSlice(nosuspend_node.nosuspend_token), "noasync")) { // TODO: remove this - try stream.writer().writeAll("nosuspend "); + try ais.writer().writeAll("nosuspend "); } else { - try renderToken(tree, stream, nosuspend_node.nosuspend_token, Space.Space); + try renderToken(tree, ais, nosuspend_node.nosuspend_token, Space.Space); } - return renderExpression(allocator, stream, tree, nosuspend_node.expr, space); + return renderExpression(allocator, ais, tree, nosuspend_node.expr, space); }, .Suspend => { const suspend_node = @fieldParentPtr(ast.Node.Suspend, "base", base); if (suspend_node.body) |body| { - try renderToken(tree, stream, suspend_node.suspend_token, Space.Space); - return renderExpression(allocator, stream, tree, body, space); + try renderToken(tree, ais, suspend_node.suspend_token, Space.Space); + return renderExpression(allocator, ais, tree, body, space); } else { - return renderToken(tree, stream, suspend_node.suspend_token, space); + return renderToken(tree, ais, suspend_node.suspend_token, space); } }, @@ -450,21 +449,21 @@ fn renderExpression( const infix_op_node = @fieldParentPtr(ast.Node.Catch, "base", base); const op_space = Space.Space; - try renderExpression(allocator, stream, tree, infix_op_node.lhs, op_space); + try renderExpression(allocator, ais, tree, infix_op_node.lhs, op_space); const after_op_space = blk: { const same_line = tree.tokensOnSameLine(infix_op_node.op_token, tree.nextToken(infix_op_node.op_token)); break :blk if (same_line) op_space else Space.Newline; }; - try renderToken(tree, stream, infix_op_node.op_token, after_op_space); + try renderToken(tree, ais, infix_op_node.op_token, after_op_space); if (infix_op_node.payload) |payload| { - try renderExpression(allocator, stream, tree, payload, Space.Space); + try renderExpression(allocator, ais, tree, payload, Space.Space); } - stream.pushIndentOneShot(); - return renderExpression(allocator, stream, tree, infix_op_node.rhs, space); + ais.pushIndentOneShot(); + return renderExpression(allocator, ais, tree, infix_op_node.rhs, space); }, .Add, @@ -516,16 +515,16 @@ fn renderExpression( .Period, .ErrorUnion, .Range => Space.None, else => Space.Space, }; - try renderExpression(allocator, stream, tree, infix_op_node.lhs, op_space); + try renderExpression(allocator, ais, tree, infix_op_node.lhs, op_space); const after_op_space = blk: { const loc = tree.tokenLocation(tree.token_locs[infix_op_node.op_token].end, tree.nextToken(infix_op_node.op_token)); break :blk if (loc.line == 0) op_space else Space.Newline; }; - try renderToken(tree, stream, infix_op_node.op_token, after_op_space); - stream.pushIndentOneShot(); - return renderExpression(allocator, stream, tree, infix_op_node.rhs, space); + try renderToken(tree, ais, infix_op_node.op_token, after_op_space); + ais.pushIndentOneShot(); + return renderExpression(allocator, ais, tree, infix_op_node.rhs, space); }, .BitNot, @@ -536,8 +535,8 @@ fn renderExpression( .AddressOf, => { const casted_node = @fieldParentPtr(ast.Node.SimplePrefixOp, "base", base); - try renderToken(tree, stream, casted_node.op_token, Space.None); - return renderExpression(allocator, stream, tree, casted_node.rhs, space); + try renderToken(tree, ais, casted_node.op_token, Space.None); + return renderExpression(allocator, ais, tree, casted_node.rhs, space); }, .Try, @@ -545,15 +544,15 @@ fn renderExpression( .Await, => { const casted_node = @fieldParentPtr(ast.Node.SimplePrefixOp, "base", base); - try renderToken(tree, stream, casted_node.op_token, Space.Space); - return renderExpression(allocator, stream, tree, casted_node.rhs, space); + try renderToken(tree, ais, casted_node.op_token, Space.Space); + return renderExpression(allocator, ais, tree, casted_node.rhs, space); }, .ArrayType => { const array_type = @fieldParentPtr(ast.Node.ArrayType, "base", base); return renderArrayType( allocator, - stream, + ais, tree, array_type.op_token, array_type.rhs, @@ -566,7 +565,7 @@ fn renderExpression( const array_type = @fieldParentPtr(ast.Node.ArrayTypeSentinel, "base", base); return renderArrayType( allocator, - stream, + ais, tree, array_type.op_token, array_type.rhs, @@ -580,111 +579,111 @@ fn renderExpression( const ptr_type = @fieldParentPtr(ast.Node.PtrType, "base", base); const op_tok_id = tree.token_ids[ptr_type.op_token]; switch (op_tok_id) { - .Asterisk, .AsteriskAsterisk => try stream.writer().writeByte('*'), + .Asterisk, .AsteriskAsterisk => try ais.writer().writeByte('*'), .LBracket => if (tree.token_ids[ptr_type.op_token + 2] == .Identifier) - try stream.writer().writeAll("[*c") + try ais.writer().writeAll("[*c") else - try stream.writer().writeAll("[*"), + try ais.writer().writeAll("[*"), else => unreachable, } if (ptr_type.ptr_info.sentinel) |sentinel| { const colon_token = tree.prevToken(sentinel.firstToken()); - try renderToken(tree, stream, colon_token, Space.None); // : + try renderToken(tree, ais, colon_token, Space.None); // : const sentinel_space = switch (op_tok_id) { .LBracket => Space.None, else => Space.Space, }; - try renderExpression(allocator, stream, tree, sentinel, sentinel_space); + try renderExpression(allocator, ais, tree, sentinel, sentinel_space); } switch (op_tok_id) { .Asterisk, .AsteriskAsterisk => {}, - .LBracket => try stream.writer().writeByte(']'), + .LBracket => try ais.writer().writeByte(']'), else => unreachable, } if (ptr_type.ptr_info.allowzero_token) |allowzero_token| { - try renderToken(tree, stream, allowzero_token, Space.Space); // allowzero + try renderToken(tree, ais, allowzero_token, Space.Space); // allowzero } if (ptr_type.ptr_info.align_info) |align_info| { const lparen_token = tree.prevToken(align_info.node.firstToken()); const align_token = tree.prevToken(lparen_token); - try renderToken(tree, stream, align_token, Space.None); // align - try renderToken(tree, stream, lparen_token, Space.None); // ( + try renderToken(tree, ais, align_token, Space.None); // align + try renderToken(tree, ais, lparen_token, Space.None); // ( - try renderExpression(allocator, stream, tree, align_info.node, Space.None); + try renderExpression(allocator, ais, tree, align_info.node, Space.None); if (align_info.bit_range) |bit_range| { const colon1 = tree.prevToken(bit_range.start.firstToken()); const colon2 = tree.prevToken(bit_range.end.firstToken()); - try renderToken(tree, stream, colon1, Space.None); // : - try renderExpression(allocator, stream, tree, bit_range.start, Space.None); - try renderToken(tree, stream, colon2, Space.None); // : - try renderExpression(allocator, stream, tree, bit_range.end, Space.None); + try renderToken(tree, ais, colon1, Space.None); // : + try renderExpression(allocator, ais, tree, bit_range.start, Space.None); + try renderToken(tree, ais, colon2, Space.None); // : + try renderExpression(allocator, ais, tree, bit_range.end, Space.None); const rparen_token = tree.nextToken(bit_range.end.lastToken()); - try renderToken(tree, stream, rparen_token, Space.Space); // ) + try renderToken(tree, ais, rparen_token, Space.Space); // ) } else { const rparen_token = tree.nextToken(align_info.node.lastToken()); - try renderToken(tree, stream, rparen_token, Space.Space); // ) + try renderToken(tree, ais, rparen_token, Space.Space); // ) } } if (ptr_type.ptr_info.const_token) |const_token| { - try renderToken(tree, stream, const_token, Space.Space); // const + try renderToken(tree, ais, const_token, Space.Space); // const } if (ptr_type.ptr_info.volatile_token) |volatile_token| { - try renderToken(tree, stream, volatile_token, Space.Space); // volatile + try renderToken(tree, ais, volatile_token, Space.Space); // volatile } - return renderExpression(allocator, stream, tree, ptr_type.rhs, space); + return renderExpression(allocator, ais, tree, ptr_type.rhs, space); }, .SliceType => { const slice_type = @fieldParentPtr(ast.Node.SliceType, "base", base); - try renderToken(tree, stream, slice_type.op_token, Space.None); // [ + try renderToken(tree, ais, slice_type.op_token, Space.None); // [ if (slice_type.ptr_info.sentinel) |sentinel| { const colon_token = tree.prevToken(sentinel.firstToken()); - try renderToken(tree, stream, colon_token, Space.None); // : - try renderExpression(allocator, stream, tree, sentinel, Space.None); - try renderToken(tree, stream, tree.nextToken(sentinel.lastToken()), Space.None); // ] + try renderToken(tree, ais, colon_token, Space.None); // : + try renderExpression(allocator, ais, tree, sentinel, Space.None); + try renderToken(tree, ais, tree.nextToken(sentinel.lastToken()), Space.None); // ] } else { - try renderToken(tree, stream, tree.nextToken(slice_type.op_token), Space.None); // ] + try renderToken(tree, ais, tree.nextToken(slice_type.op_token), Space.None); // ] } if (slice_type.ptr_info.allowzero_token) |allowzero_token| { - try renderToken(tree, stream, allowzero_token, Space.Space); // allowzero + try renderToken(tree, ais, allowzero_token, Space.Space); // allowzero } if (slice_type.ptr_info.align_info) |align_info| { const lparen_token = tree.prevToken(align_info.node.firstToken()); const align_token = tree.prevToken(lparen_token); - try renderToken(tree, stream, align_token, Space.None); // align - try renderToken(tree, stream, lparen_token, Space.None); // ( + try renderToken(tree, ais, align_token, Space.None); // align + try renderToken(tree, ais, lparen_token, Space.None); // ( - try renderExpression(allocator, stream, tree, align_info.node, Space.None); + try renderExpression(allocator, ais, tree, align_info.node, Space.None); if (align_info.bit_range) |bit_range| { const colon1 = tree.prevToken(bit_range.start.firstToken()); const colon2 = tree.prevToken(bit_range.end.firstToken()); - try renderToken(tree, stream, colon1, Space.None); // : - try renderExpression(allocator, stream, tree, bit_range.start, Space.None); - try renderToken(tree, stream, colon2, Space.None); // : - try renderExpression(allocator, stream, tree, bit_range.end, Space.None); + try renderToken(tree, ais, colon1, Space.None); // : + try renderExpression(allocator, ais, tree, bit_range.start, Space.None); + try renderToken(tree, ais, colon2, Space.None); // : + try renderExpression(allocator, ais, tree, bit_range.end, Space.None); const rparen_token = tree.nextToken(bit_range.end.lastToken()); - try renderToken(tree, stream, rparen_token, Space.Space); // ) + try renderToken(tree, ais, rparen_token, Space.Space); // ) } else { const rparen_token = tree.nextToken(align_info.node.lastToken()); - try renderToken(tree, stream, rparen_token, Space.Space); // ) + try renderToken(tree, ais, rparen_token, Space.Space); // ) } } if (slice_type.ptr_info.const_token) |const_token| { - try renderToken(tree, stream, const_token, Space.Space); + try renderToken(tree, ais, const_token, Space.Space); } if (slice_type.ptr_info.volatile_token) |volatile_token| { - try renderToken(tree, stream, volatile_token, Space.Space); + try renderToken(tree, ais, volatile_token, Space.Space); } - return renderExpression(allocator, stream, tree, slice_type.rhs, space); + return renderExpression(allocator, ais, tree, slice_type.rhs, space); }, .ArrayInitializer, .ArrayInitializerDot => { @@ -713,33 +712,33 @@ fn renderExpression( if (exprs.len == 0) { switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), + .dot => |dot| try renderToken(tree, ais, dot, Space.None), + .node => |node| try renderExpression(allocator, ais, tree, node, Space.None), } { - stream.pushIndent(); - defer stream.popIndent(); - try renderToken(tree, stream, lbrace, Space.None); + ais.pushIndent(); + defer ais.popIndent(); + try renderToken(tree, ais, lbrace, Space.None); } - return renderToken(tree, stream, rtoken, space); + return renderToken(tree, ais, rtoken, space); } if (exprs.len == 1 and tree.token_ids[exprs[0].*.lastToken() + 1] == .RBrace) { const expr = exprs[0]; switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), + .dot => |dot| try renderToken(tree, ais, dot, Space.None), + .node => |node| try renderExpression(allocator, ais, tree, node, Space.None), } - try renderToken(tree, stream, lbrace, Space.None); - try renderExpression(allocator, stream, tree, expr, Space.None); - return renderToken(tree, stream, rtoken, space); + try renderToken(tree, ais, lbrace, Space.None); + try renderExpression(allocator, ais, tree, expr, Space.None); + return renderToken(tree, ais, rtoken, space); } switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), + .dot => |dot| try renderToken(tree, ais, dot, Space.None), + .node => |node| try renderExpression(allocator, ais, tree, node, Space.None), } // scan to find row size @@ -781,9 +780,9 @@ fn renderExpression( var expr_widths = widths[0 .. widths.len - row_size]; var column_widths = widths[widths.len - row_size ..]; - // Null stream for counting the printed length of each expression + // Null ais for counting the printed length of each expression var counting_stream = std.io.countingOutStream(std.io.null_out_stream); - var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &counting_stream); + var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, counting_stream.writer()); for (exprs) |expr, i| { counting_stream.bytes_written = 0; @@ -795,23 +794,23 @@ fn renderExpression( } { - stream.pushIndentNextLine(); - defer stream.popIndent(); - try renderToken(tree, stream, lbrace, Space.Newline); + ais.pushIndentNextLine(); + defer ais.popIndent(); + try renderToken(tree, ais, lbrace, Space.Newline); var col: usize = 1; for (exprs) |expr, i| { if (i + 1 < exprs.len) { const next_expr = exprs[i + 1]; - try renderExpression(allocator, stream, tree, expr, Space.None); + try renderExpression(allocator, ais, tree, expr, Space.None); const comma = tree.nextToken(expr.*.lastToken()); if (col != row_size) { - try renderToken(tree, stream, comma, Space.Space); // , + try renderToken(tree, ais, comma, Space.Space); // , const padding = column_widths[i % row_size] - expr_widths[i]; - try stream.writer().writeByteNTimes(' ', padding); + try ais.writer().writeByteNTimes(' ', padding); col += 1; continue; @@ -819,32 +818,32 @@ fn renderExpression( col = 1; if (tree.token_ids[tree.nextToken(comma)] != .MultilineStringLiteralLine) { - try renderToken(tree, stream, comma, Space.Newline); // , + try renderToken(tree, ais, comma, Space.Newline); // , } else { - try renderToken(tree, stream, comma, Space.None); // , + try renderToken(tree, ais, comma, Space.None); // , } - try renderExtraNewline(tree, stream, next_expr); + try renderExtraNewline(tree, ais, next_expr); } else { - try renderExpression(allocator, stream, tree, expr, Space.Comma); // , + try renderExpression(allocator, ais, tree, expr, Space.Comma); // , } } } - return renderToken(tree, stream, rtoken, space); + return renderToken(tree, ais, rtoken, space); } else { - try renderToken(tree, stream, lbrace, Space.Space); + try renderToken(tree, ais, lbrace, Space.Space); for (exprs) |expr, i| { if (i + 1 < exprs.len) { const next_expr = exprs[i + 1]; - try renderExpression(allocator, stream, tree, expr, Space.None); + try renderExpression(allocator, ais, tree, expr, Space.None); const comma = tree.nextToken(expr.*.lastToken()); - try renderToken(tree, stream, comma, Space.Space); // , + try renderToken(tree, ais, comma, Space.Space); // , } else { - try renderExpression(allocator, stream, tree, expr, Space.Space); + try renderExpression(allocator, ais, tree, expr, Space.Space); } } - return renderToken(tree, stream, rtoken, space); + return renderToken(tree, ais, rtoken, space); } }, @@ -874,17 +873,17 @@ fn renderExpression( if (field_inits.len == 0) { switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), + .dot => |dot| try renderToken(tree, ais, dot, Space.None), + .node => |node| try renderExpression(allocator, ais, tree, node, Space.None), } { - stream.pushIndentNextLine(); - defer stream.popIndent(); - try renderToken(tree, stream, lbrace, Space.None); + ais.pushIndentNextLine(); + defer ais.popIndent(); + try renderToken(tree, ais, lbrace, Space.None); } - return renderToken(tree, stream, rtoken, space); + return renderToken(tree, ais, rtoken, space); } const src_has_trailing_comma = blk: { @@ -900,8 +899,8 @@ fn renderExpression( const expr_outputs_one_line = blk: { // render field expressions until a LF is found for (field_inits) |field_init| { - var find_stream = std.io.findByteOutStream('\n', &std.io.null_out_stream); - var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, &find_stream); + var find_stream = std.io.findByteOutStream('\n', std.io.null_out_stream); + var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, find_stream.writer()); try renderExpression(allocator, &auto_indenting_stream, tree, field_init, Space.None); if (find_stream.byte_found) break :blk false; @@ -925,78 +924,78 @@ fn renderExpression( } switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), + .dot => |dot| try renderToken(tree, ais, dot, Space.None), + .node => |node| try renderExpression(allocator, ais, tree, node, Space.None), } - try renderToken(tree, stream, lbrace, Space.Space); - try renderExpression(allocator, stream, tree, &field_init.base, Space.Space); - return renderToken(tree, stream, rtoken, space); + try renderToken(tree, ais, lbrace, Space.Space); + try renderExpression(allocator, ais, tree, &field_init.base, Space.Space); + return renderToken(tree, ais, rtoken, space); } if (!src_has_trailing_comma and src_same_line and expr_outputs_one_line) { // render all on one line, no trailing comma switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), + .dot => |dot| try renderToken(tree, ais, dot, Space.None), + .node => |node| try renderExpression(allocator, ais, tree, node, Space.None), } - try renderToken(tree, stream, lbrace, Space.Space); + try renderToken(tree, ais, lbrace, Space.Space); for (field_inits) |field_init, i| { if (i + 1 < field_inits.len) { - try renderExpression(allocator, stream, tree, field_init, Space.None); + try renderExpression(allocator, ais, tree, field_init, Space.None); const comma = tree.nextToken(field_init.lastToken()); - try renderToken(tree, stream, comma, Space.Space); + try renderToken(tree, ais, comma, Space.Space); } else { - try renderExpression(allocator, stream, tree, field_init, Space.Space); + try renderExpression(allocator, ais, tree, field_init, Space.Space); } } - return renderToken(tree, stream, rtoken, space); + return renderToken(tree, ais, rtoken, space); } { switch (lhs) { - .dot => |dot| try renderToken(tree, stream, dot, Space.None), - .node => |node| try renderExpression(allocator, stream, tree, node, Space.None), + .dot => |dot| try renderToken(tree, ais, dot, Space.None), + .node => |node| try renderExpression(allocator, ais, tree, node, Space.None), } - stream.pushIndentNextLine(); - defer stream.popIndent(); + ais.pushIndentNextLine(); + defer ais.popIndent(); - try renderToken(tree, stream, lbrace, Space.Newline); + try renderToken(tree, ais, lbrace, Space.Newline); for (field_inits) |field_init, i| { if (i + 1 < field_inits.len) { const next_field_init = field_inits[i + 1]; - try renderExpression(allocator, stream, tree, field_init, Space.None); + try renderExpression(allocator, ais, tree, field_init, Space.None); const comma = tree.nextToken(field_init.lastToken()); - try renderToken(tree, stream, comma, Space.Newline); + try renderToken(tree, ais, comma, Space.Newline); - try renderExtraNewline(tree, stream, next_field_init); + try renderExtraNewline(tree, ais, next_field_init); } else { - try renderExpression(allocator, stream, tree, field_init, Space.Comma); + try renderExpression(allocator, ais, tree, field_init, Space.Comma); } } } - return renderToken(tree, stream, rtoken, space); + return renderToken(tree, ais, rtoken, space); }, .Call => { const call = @fieldParentPtr(ast.Node.Call, "base", base); if (call.async_token) |async_token| { - try renderToken(tree, stream, async_token, Space.Space); + try renderToken(tree, ais, async_token, Space.Space); } - try renderExpression(allocator, stream, tree, call.lhs, Space.None); + try renderExpression(allocator, ais, tree, call.lhs, Space.None); const lparen = tree.nextToken(call.lhs.lastToken()); if (call.params_len == 0) { - try renderToken(tree, stream, lparen, Space.None); - return renderToken(tree, stream, call.rtoken, space); + try renderToken(tree, ais, lparen, Space.None); + return renderToken(tree, ais, call.rtoken, space); } const src_has_trailing_comma = blk: { @@ -1005,41 +1004,41 @@ fn renderExpression( }; if (src_has_trailing_comma) { - try renderToken(tree, stream, lparen, Space.Newline); + try renderToken(tree, ais, lparen, Space.Newline); const params = call.params(); for (params) |param_node, i| { - stream.pushIndent(); - defer stream.popIndent(); + ais.pushIndent(); + defer ais.popIndent(); if (i + 1 < params.len) { const next_node = params[i + 1]; - try renderExpression(allocator, stream, tree, param_node, Space.None); + try renderExpression(allocator, ais, tree, param_node, Space.None); const comma = tree.nextToken(param_node.lastToken()); - try renderToken(tree, stream, comma, Space.Newline); // , - try renderExtraNewline(tree, stream, next_node); + try renderToken(tree, ais, comma, Space.Newline); // , + try renderExtraNewline(tree, ais, next_node); } else { - try renderExpression(allocator, stream, tree, param_node, Space.Comma); + try renderExpression(allocator, ais, tree, param_node, Space.Comma); } } - return renderToken(tree, stream, call.rtoken, space); + return renderToken(tree, ais, call.rtoken, space); } - try renderToken(tree, stream, lparen, Space.None); // ( + try renderToken(tree, ais, lparen, Space.None); // ( const params = call.params(); for (params) |param_node, i| { - if (param_node.*.tag == .MultilineStringLiteral) stream.pushIndentOneShot(); + if (param_node.*.tag == .MultilineStringLiteral) ais.pushIndentOneShot(); - try renderExpression(allocator, stream, tree, param_node, Space.None); + try renderExpression(allocator, ais, tree, param_node, Space.None); if (i + 1 < params.len) { const next_param = params[i + 1]; const comma = tree.nextToken(param_node.lastToken()); - try renderToken(tree, stream, comma, Space.Space); + try renderToken(tree, ais, comma, Space.Space); } } - return renderToken(tree, stream, call.rtoken, space); + return renderToken(tree, ais, call.rtoken, space); }, .ArrayAccess => { @@ -1048,25 +1047,25 @@ fn renderExpression( const lbracket = tree.nextToken(suffix_op.lhs.lastToken()); const rbracket = tree.nextToken(suffix_op.index_expr.lastToken()); - try renderExpression(allocator, stream, tree, suffix_op.lhs, Space.None); - try renderToken(tree, stream, lbracket, Space.None); // [ + try renderExpression(allocator, ais, tree, suffix_op.lhs, Space.None); + try renderToken(tree, ais, lbracket, Space.None); // [ const starts_with_comment = tree.token_ids[lbracket + 1] == .LineComment; const ends_with_comment = tree.token_ids[rbracket - 1] == .LineComment; { const new_space = if (ends_with_comment) Space.Newline else Space.None; - stream.pushIndent(); - defer stream.popIndent(); - try renderExpression(allocator, stream, tree, suffix_op.index_expr, new_space); + ais.pushIndent(); + defer ais.popIndent(); + try renderExpression(allocator, ais, tree, suffix_op.index_expr, new_space); } - if (starts_with_comment) try stream.maybeInsertNewline(); - return renderToken(tree, stream, rbracket, space); // ] + if (starts_with_comment) try ais.maybeInsertNewline(); + return renderToken(tree, ais, rbracket, space); // ] }, .Slice => { const suffix_op = base.castTag(.Slice).?; - try renderExpression(allocator, stream, tree, suffix_op.lhs, Space.None); + try renderExpression(allocator, ais, tree, suffix_op.lhs, Space.None); const lbracket = tree.prevToken(suffix_op.start.firstToken()); const dotdot = tree.nextToken(suffix_op.start.lastToken()); @@ -1076,33 +1075,33 @@ fn renderExpression( const after_start_space = if (after_start_space_bool) Space.Space else Space.None; const after_op_space = if (suffix_op.end != null) after_start_space else Space.None; - try renderToken(tree, stream, lbracket, Space.None); // [ - try renderExpression(allocator, stream, tree, suffix_op.start, after_start_space); - try renderToken(tree, stream, dotdot, after_op_space); // .. + try renderToken(tree, ais, lbracket, Space.None); // [ + try renderExpression(allocator, ais, tree, suffix_op.start, after_start_space); + try renderToken(tree, ais, dotdot, after_op_space); // .. if (suffix_op.end) |end| { const after_end_space = if (suffix_op.sentinel != null) Space.Space else Space.None; - try renderExpression(allocator, stream, tree, end, after_end_space); + try renderExpression(allocator, ais, tree, end, after_end_space); } if (suffix_op.sentinel) |sentinel| { const colon = tree.prevToken(sentinel.firstToken()); - try renderToken(tree, stream, colon, Space.None); // : - try renderExpression(allocator, stream, tree, sentinel, Space.None); + try renderToken(tree, ais, colon, Space.None); // : + try renderExpression(allocator, ais, tree, sentinel, Space.None); } - return renderToken(tree, stream, suffix_op.rtoken, space); // ] + return renderToken(tree, ais, suffix_op.rtoken, space); // ] }, .Deref => { const suffix_op = base.castTag(.Deref).?; - try renderExpression(allocator, stream, tree, suffix_op.lhs, Space.None); - return renderToken(tree, stream, suffix_op.rtoken, space); // .* + try renderExpression(allocator, ais, tree, suffix_op.lhs, Space.None); + return renderToken(tree, ais, suffix_op.rtoken, space); // .* }, .UnwrapOptional => { const suffix_op = base.castTag(.UnwrapOptional).?; - try renderExpression(allocator, stream, tree, suffix_op.lhs, Space.None); - try renderToken(tree, stream, tree.prevToken(suffix_op.rtoken), Space.None); // . - return renderToken(tree, stream, suffix_op.rtoken, space); // ? + try renderExpression(allocator, ais, tree, suffix_op.lhs, Space.None); + try renderToken(tree, ais, tree.prevToken(suffix_op.rtoken), Space.None); // . + return renderToken(tree, ais, suffix_op.rtoken, space); // ? }, .Break => { @@ -1111,152 +1110,152 @@ fn renderExpression( const maybe_label = flow_expr.getLabel(); if (maybe_label == null and maybe_rhs == null) { - return renderToken(tree, stream, flow_expr.ltoken, space); // break + return renderToken(tree, ais, flow_expr.ltoken, space); // break } - try renderToken(tree, stream, flow_expr.ltoken, Space.Space); // break + try renderToken(tree, ais, flow_expr.ltoken, Space.Space); // break if (maybe_label) |label| { const colon = tree.nextToken(flow_expr.ltoken); - try renderToken(tree, stream, colon, Space.None); // : + try renderToken(tree, ais, colon, Space.None); // : if (maybe_rhs == null) { - return renderToken(tree, stream, label, space); // label + return renderToken(tree, ais, label, space); // label } - try renderToken(tree, stream, label, Space.Space); // label + try renderToken(tree, ais, label, Space.Space); // label } - return renderExpression(allocator, stream, tree, maybe_rhs.?, space); + return renderExpression(allocator, ais, tree, maybe_rhs.?, space); }, .Continue => { const flow_expr = base.castTag(.Continue).?; if (flow_expr.getLabel()) |label| { - try renderToken(tree, stream, flow_expr.ltoken, Space.Space); // continue + try renderToken(tree, ais, flow_expr.ltoken, Space.Space); // continue const colon = tree.nextToken(flow_expr.ltoken); - try renderToken(tree, stream, colon, Space.None); // : - return renderToken(tree, stream, label, space); // label + try renderToken(tree, ais, colon, Space.None); // : + return renderToken(tree, ais, label, space); // label } else { - return renderToken(tree, stream, flow_expr.ltoken, space); // continue + return renderToken(tree, ais, flow_expr.ltoken, space); // continue } }, .Return => { const flow_expr = base.castTag(.Return).?; if (flow_expr.getRHS()) |rhs| { - try renderToken(tree, stream, flow_expr.ltoken, Space.Space); - return renderExpression(allocator, stream, tree, rhs, space); + try renderToken(tree, ais, flow_expr.ltoken, Space.Space); + return renderExpression(allocator, ais, tree, rhs, space); } else { - return renderToken(tree, stream, flow_expr.ltoken, space); + return renderToken(tree, ais, flow_expr.ltoken, space); } }, .Payload => { const payload = @fieldParentPtr(ast.Node.Payload, "base", base); - try renderToken(tree, stream, payload.lpipe, Space.None); - try renderExpression(allocator, stream, tree, payload.error_symbol, Space.None); - return renderToken(tree, stream, payload.rpipe, space); + try renderToken(tree, ais, payload.lpipe, Space.None); + try renderExpression(allocator, ais, tree, payload.error_symbol, Space.None); + return renderToken(tree, ais, payload.rpipe, space); }, .PointerPayload => { const payload = @fieldParentPtr(ast.Node.PointerPayload, "base", base); - try renderToken(tree, stream, payload.lpipe, Space.None); + try renderToken(tree, ais, payload.lpipe, Space.None); if (payload.ptr_token) |ptr_token| { - try renderToken(tree, stream, ptr_token, Space.None); + try renderToken(tree, ais, ptr_token, Space.None); } - try renderExpression(allocator, stream, tree, payload.value_symbol, Space.None); - return renderToken(tree, stream, payload.rpipe, space); + try renderExpression(allocator, ais, tree, payload.value_symbol, Space.None); + return renderToken(tree, ais, payload.rpipe, space); }, .PointerIndexPayload => { const payload = @fieldParentPtr(ast.Node.PointerIndexPayload, "base", base); - try renderToken(tree, stream, payload.lpipe, Space.None); + try renderToken(tree, ais, payload.lpipe, Space.None); if (payload.ptr_token) |ptr_token| { - try renderToken(tree, stream, ptr_token, Space.None); + try renderToken(tree, ais, ptr_token, Space.None); } - try renderExpression(allocator, stream, tree, payload.value_symbol, Space.None); + try renderExpression(allocator, ais, tree, payload.value_symbol, Space.None); if (payload.index_symbol) |index_symbol| { const comma = tree.nextToken(payload.value_symbol.lastToken()); - try renderToken(tree, stream, comma, Space.Space); - try renderExpression(allocator, stream, tree, index_symbol, Space.None); + try renderToken(tree, ais, comma, Space.Space); + try renderExpression(allocator, ais, tree, index_symbol, Space.None); } - return renderToken(tree, stream, payload.rpipe, space); + return renderToken(tree, ais, payload.rpipe, space); }, .GroupedExpression => { const grouped_expr = @fieldParentPtr(ast.Node.GroupedExpression, "base", base); - try renderToken(tree, stream, grouped_expr.lparen, Space.None); + try renderToken(tree, ais, grouped_expr.lparen, Space.None); { - stream.pushIndentOneShot(); - try renderExpression(allocator, stream, tree, grouped_expr.expr, Space.None); + ais.pushIndentOneShot(); + try renderExpression(allocator, ais, tree, grouped_expr.expr, Space.None); } - return renderToken(tree, stream, grouped_expr.rparen, space); + return renderToken(tree, ais, grouped_expr.rparen, space); }, .FieldInitializer => { const field_init = @fieldParentPtr(ast.Node.FieldInitializer, "base", base); - try renderToken(tree, stream, field_init.period_token, Space.None); // . - try renderToken(tree, stream, field_init.name_token, Space.Space); // name - try renderToken(tree, stream, tree.nextToken(field_init.name_token), Space.Space); // = - return renderExpression(allocator, stream, tree, field_init.expr, space); + try renderToken(tree, ais, field_init.period_token, Space.None); // . + try renderToken(tree, ais, field_init.name_token, Space.Space); // name + try renderToken(tree, ais, tree.nextToken(field_init.name_token), Space.Space); // = + return renderExpression(allocator, ais, tree, field_init.expr, space); }, .ContainerDecl => { const container_decl = @fieldParentPtr(ast.Node.ContainerDecl, "base", base); if (container_decl.layout_token) |layout_token| { - try renderToken(tree, stream, layout_token, Space.Space); + try renderToken(tree, ais, layout_token, Space.Space); } switch (container_decl.init_arg_expr) { .None => { - try renderToken(tree, stream, container_decl.kind_token, Space.Space); // union + try renderToken(tree, ais, container_decl.kind_token, Space.Space); // union }, .Enum => |enum_tag_type| { - try renderToken(tree, stream, container_decl.kind_token, Space.None); // union + try renderToken(tree, ais, container_decl.kind_token, Space.None); // union const lparen = tree.nextToken(container_decl.kind_token); const enum_token = tree.nextToken(lparen); - try renderToken(tree, stream, lparen, Space.None); // ( - try renderToken(tree, stream, enum_token, Space.None); // enum + try renderToken(tree, ais, lparen, Space.None); // ( + try renderToken(tree, ais, enum_token, Space.None); // enum if (enum_tag_type) |expr| { - try renderToken(tree, stream, tree.nextToken(enum_token), Space.None); // ( - try renderExpression(allocator, stream, tree, expr, Space.None); + try renderToken(tree, ais, tree.nextToken(enum_token), Space.None); // ( + try renderExpression(allocator, ais, tree, expr, Space.None); const rparen = tree.nextToken(expr.lastToken()); - try renderToken(tree, stream, rparen, Space.None); // ) - try renderToken(tree, stream, tree.nextToken(rparen), Space.Space); // ) + try renderToken(tree, ais, rparen, Space.None); // ) + try renderToken(tree, ais, tree.nextToken(rparen), Space.Space); // ) } else { - try renderToken(tree, stream, tree.nextToken(enum_token), Space.Space); // ) + try renderToken(tree, ais, tree.nextToken(enum_token), Space.Space); // ) } }, .Type => |type_expr| { - try renderToken(tree, stream, container_decl.kind_token, Space.None); // union + try renderToken(tree, ais, container_decl.kind_token, Space.None); // union const lparen = tree.nextToken(container_decl.kind_token); const rparen = tree.nextToken(type_expr.lastToken()); - try renderToken(tree, stream, lparen, Space.None); // ( - try renderExpression(allocator, stream, tree, type_expr, Space.None); - try renderToken(tree, stream, rparen, Space.Space); // ) + try renderToken(tree, ais, lparen, Space.None); // ( + try renderExpression(allocator, ais, tree, type_expr, Space.None); + try renderToken(tree, ais, rparen, Space.Space); // ) }, } if (container_decl.fields_and_decls_len == 0) { { - stream.pushIndentNextLine(); - defer stream.popIndent(); - try renderToken(tree, stream, container_decl.lbrace_token, Space.None); // { + ais.pushIndentNextLine(); + defer ais.popIndent(); + try renderToken(tree, ais, container_decl.lbrace_token, Space.None); // { } - return renderToken(tree, stream, container_decl.rbrace_token, space); // } + return renderToken(tree, ais, container_decl.rbrace_token, space); // } } const src_has_trailing_comma = blk: { @@ -1287,39 +1286,39 @@ fn renderExpression( if (src_has_trailing_comma or !src_has_only_fields) { // One declaration per line - stream.pushIndentNextLine(); - defer stream.popIndent(); - try renderToken(tree, stream, container_decl.lbrace_token, .Newline); // { + ais.pushIndentNextLine(); + defer ais.popIndent(); + try renderToken(tree, ais, container_decl.lbrace_token, .Newline); // { for (fields_and_decls) |decl, i| { - try renderContainerDecl(allocator, stream, tree, decl, .Newline); + try renderContainerDecl(allocator, ais, tree, decl, .Newline); if (i + 1 < fields_and_decls.len) { - try renderExtraNewline(tree, stream, fields_and_decls[i + 1]); + try renderExtraNewline(tree, ais, fields_and_decls[i + 1]); } } } else if (src_has_newline) { // All the declarations on the same line, but place the items on // their own line - try renderToken(tree, stream, container_decl.lbrace_token, .Newline); // { + try renderToken(tree, ais, container_decl.lbrace_token, .Newline); // { - stream.pushIndent(); - defer stream.popIndent(); + ais.pushIndent(); + defer ais.popIndent(); for (fields_and_decls) |decl, i| { const space_after_decl: Space = if (i + 1 >= fields_and_decls.len) .Newline else .Space; - try renderContainerDecl(allocator, stream, tree, decl, space_after_decl); + try renderContainerDecl(allocator, ais, tree, decl, space_after_decl); } } else { // All the declarations on the same line - try renderToken(tree, stream, container_decl.lbrace_token, .Space); // { + try renderToken(tree, ais, container_decl.lbrace_token, .Space); // { for (fields_and_decls) |decl| { - try renderContainerDecl(allocator, stream, tree, decl, .Space); + try renderContainerDecl(allocator, ais, tree, decl, .Space); } } - return renderToken(tree, stream, container_decl.rbrace_token, space); // } + return renderToken(tree, ais, container_decl.rbrace_token, space); // } }, .ErrorSetDecl => { @@ -1328,9 +1327,9 @@ fn renderExpression( const lbrace = tree.nextToken(err_set_decl.error_token); if (err_set_decl.decls_len == 0) { - try renderToken(tree, stream, err_set_decl.error_token, Space.None); - try renderToken(tree, stream, lbrace, Space.None); - return renderToken(tree, stream, err_set_decl.rbrace_token, space); + try renderToken(tree, ais, err_set_decl.error_token, Space.None); + try renderToken(tree, ais, lbrace, Space.None); + return renderToken(tree, ais, err_set_decl.rbrace_token, space); } if (err_set_decl.decls_len == 1) blk: { @@ -1344,13 +1343,13 @@ fn renderExpression( break :blk; } - try renderToken(tree, stream, err_set_decl.error_token, Space.None); // error - try renderToken(tree, stream, lbrace, Space.None); // { - try renderExpression(allocator, stream, tree, node, Space.None); - return renderToken(tree, stream, err_set_decl.rbrace_token, space); // } + try renderToken(tree, ais, err_set_decl.error_token, Space.None); // error + try renderToken(tree, ais, lbrace, Space.None); // { + try renderExpression(allocator, ais, tree, node, Space.None); + return renderToken(tree, ais, err_set_decl.rbrace_token, space); // } } - try renderToken(tree, stream, err_set_decl.error_token, Space.None); // error + try renderToken(tree, ais, err_set_decl.error_token, Space.None); // error const src_has_trailing_comma = blk: { const maybe_comma = tree.prevToken(err_set_decl.rbrace_token); @@ -1359,64 +1358,64 @@ fn renderExpression( if (src_has_trailing_comma) { { - stream.pushIndent(); - defer stream.popIndent(); + ais.pushIndent(); + defer ais.popIndent(); - try renderToken(tree, stream, lbrace, Space.Newline); // { + try renderToken(tree, ais, lbrace, Space.Newline); // { const decls = err_set_decl.decls(); for (decls) |node, i| { if (i + 1 < decls.len) { - try renderExpression(allocator, stream, tree, node, Space.None); - try renderToken(tree, stream, tree.nextToken(node.lastToken()), Space.Newline); // , + try renderExpression(allocator, ais, tree, node, Space.None); + try renderToken(tree, ais, tree.nextToken(node.lastToken()), Space.Newline); // , - try renderExtraNewline(tree, stream, decls[i + 1]); + try renderExtraNewline(tree, ais, decls[i + 1]); } else { - try renderExpression(allocator, stream, tree, node, Space.Comma); + try renderExpression(allocator, ais, tree, node, Space.Comma); } } } - return renderToken(tree, stream, err_set_decl.rbrace_token, space); // } + return renderToken(tree, ais, err_set_decl.rbrace_token, space); // } } else { - try renderToken(tree, stream, lbrace, Space.Space); // { + try renderToken(tree, ais, lbrace, Space.Space); // { const decls = err_set_decl.decls(); for (decls) |node, i| { if (i + 1 < decls.len) { - try renderExpression(allocator, stream, tree, node, Space.None); + try renderExpression(allocator, ais, tree, node, Space.None); const comma_token = tree.nextToken(node.lastToken()); assert(tree.token_ids[comma_token] == .Comma); - try renderToken(tree, stream, comma_token, Space.Space); // , - try renderExtraNewline(tree, stream, decls[i + 1]); + try renderToken(tree, ais, comma_token, Space.Space); // , + try renderExtraNewline(tree, ais, decls[i + 1]); } else { - try renderExpression(allocator, stream, tree, node, Space.Space); + try renderExpression(allocator, ais, tree, node, Space.Space); } } - return renderToken(tree, stream, err_set_decl.rbrace_token, space); // } + return renderToken(tree, ais, err_set_decl.rbrace_token, space); // } } }, .ErrorTag => { const tag = @fieldParentPtr(ast.Node.ErrorTag, "base", base); - try renderDocComments(tree, stream, tag, tag.doc_comments); - return renderToken(tree, stream, tag.name_token, space); // name + try renderDocComments(tree, ais, tag, tag.doc_comments); + return renderToken(tree, ais, tag.name_token, space); // name }, .MultilineStringLiteral => { const multiline_str_literal = @fieldParentPtr(ast.Node.MultilineStringLiteral, "base", base); { - const locked_indents = stream.lockOneShotIndent(); + const locked_indents = ais.lockOneShotIndent(); defer { var i: u8 = 0; - while (i < locked_indents) : (i += 1) stream.popIndent(); + while (i < locked_indents) : (i += 1) ais.popIndent(); } - try stream.maybeInsertNewline(); + try ais.maybeInsertNewline(); - for (multiline_str_literal.lines()) |t| try renderToken(tree, stream, t, Space.None); + for (multiline_str_literal.lines()) |t| try renderToken(tree, ais, t, Space.None); } }, @@ -1425,9 +1424,9 @@ fn renderExpression( // TODO remove after 0.7.0 release if (mem.eql(u8, tree.tokenSlice(builtin_call.builtin_token), "@OpaqueType")) - return stream.writer().writeAll("@Type(.Opaque)"); + return ais.writer().writeAll("@Type(.Opaque)"); - try renderToken(tree, stream, builtin_call.builtin_token, Space.None); // @name + try renderToken(tree, ais, builtin_call.builtin_token, Space.None); // @name const src_params_trailing_comma = blk: { if (builtin_call.params_len < 2) break :blk false; @@ -1439,30 +1438,30 @@ fn renderExpression( const lparen = tree.nextToken(builtin_call.builtin_token); if (!src_params_trailing_comma) { - try renderToken(tree, stream, lparen, Space.None); // ( + try renderToken(tree, ais, lparen, Space.None); // ( // render all on one line, no trailing comma const params = builtin_call.params(); for (params) |param_node, i| { - try renderExpression(allocator, stream, tree, param_node, Space.None); + try renderExpression(allocator, ais, tree, param_node, Space.None); if (i + 1 < params.len) { const comma_token = tree.nextToken(param_node.lastToken()); - try renderToken(tree, stream, comma_token, Space.Space); // , + try renderToken(tree, ais, comma_token, Space.Space); // , } } } else { // one param per line - stream.pushIndent(); - defer stream.popIndent(); - try renderToken(tree, stream, lparen, Space.Newline); // ( + ais.pushIndent(); + defer ais.popIndent(); + try renderToken(tree, ais, lparen, Space.Newline); // ( for (builtin_call.params()) |param_node| { - try renderExpression(allocator, stream, tree, param_node, Space.Comma); + try renderExpression(allocator, ais, tree, param_node, Space.Comma); } } - return renderToken(tree, stream, builtin_call.rparen_token, space); // ) + return renderToken(tree, ais, builtin_call.rparen_token, space); // ) }, .FnProto => { @@ -1472,24 +1471,24 @@ fn renderExpression( const visib_token = tree.token_ids[visib_token_index]; assert(visib_token == .Keyword_pub or visib_token == .Keyword_export); - try renderToken(tree, stream, visib_token_index, Space.Space); // pub + try renderToken(tree, ais, visib_token_index, Space.Space); // pub } if (fn_proto.getExternExportInlineToken()) |extern_export_inline_token| { if (fn_proto.getIsExternPrototype() == null) - try renderToken(tree, stream, extern_export_inline_token, Space.Space); // extern/export/inline + try renderToken(tree, ais, extern_export_inline_token, Space.Space); // extern/export/inline } if (fn_proto.getLibName()) |lib_name| { - try renderExpression(allocator, stream, tree, lib_name, Space.Space); + try renderExpression(allocator, ais, tree, lib_name, Space.Space); } const lparen = if (fn_proto.getNameToken()) |name_token| blk: { - try renderToken(tree, stream, fn_proto.fn_token, Space.Space); // fn - try renderToken(tree, stream, name_token, Space.None); // name + try renderToken(tree, ais, fn_proto.fn_token, Space.Space); // fn + try renderToken(tree, ais, name_token, Space.None); // name break :blk tree.nextToken(name_token); } else blk: { - try renderToken(tree, stream, fn_proto.fn_token, Space.Space); // fn + try renderToken(tree, ais, fn_proto.fn_token, Space.Space); // fn break :blk tree.nextToken(fn_proto.fn_token); }; assert(tree.token_ids[lparen] == .LParen); @@ -1516,45 +1515,45 @@ fn renderExpression( }; if (!src_params_trailing_comma) { - try renderToken(tree, stream, lparen, Space.None); // ( + try renderToken(tree, ais, lparen, Space.None); // ( // render all on one line, no trailing comma for (fn_proto.params()) |param_decl, i| { - try renderParamDecl(allocator, stream, tree, param_decl, Space.None); + try renderParamDecl(allocator, ais, tree, param_decl, Space.None); if (i + 1 < fn_proto.params_len or fn_proto.getVarArgsToken() != null) { const comma = tree.nextToken(param_decl.lastToken()); - try renderToken(tree, stream, comma, Space.Space); // , + try renderToken(tree, ais, comma, Space.Space); // , } } if (fn_proto.getVarArgsToken()) |var_args_token| { - try renderToken(tree, stream, var_args_token, Space.None); + try renderToken(tree, ais, var_args_token, Space.None); } } else { // one param per line - stream.pushIndent(); - defer stream.popIndent(); - try renderToken(tree, stream, lparen, Space.Newline); // ( + ais.pushIndent(); + defer ais.popIndent(); + try renderToken(tree, ais, lparen, Space.Newline); // ( for (fn_proto.params()) |param_decl| { - try renderParamDecl(allocator, stream, tree, param_decl, Space.Comma); + try renderParamDecl(allocator, ais, tree, param_decl, Space.Comma); } if (fn_proto.getVarArgsToken()) |var_args_token| { - try renderToken(tree, stream, var_args_token, Space.Comma); + try renderToken(tree, ais, var_args_token, Space.Comma); } } - try renderToken(tree, stream, rparen, Space.Space); // ) + try renderToken(tree, ais, rparen, Space.Space); // ) if (fn_proto.getAlignExpr()) |align_expr| { const align_rparen = tree.nextToken(align_expr.lastToken()); const align_lparen = tree.prevToken(align_expr.firstToken()); const align_kw = tree.prevToken(align_lparen); - try renderToken(tree, stream, align_kw, Space.None); // align - try renderToken(tree, stream, align_lparen, Space.None); // ( - try renderExpression(allocator, stream, tree, align_expr, Space.None); - try renderToken(tree, stream, align_rparen, Space.Space); // ) + try renderToken(tree, ais, align_kw, Space.None); // align + try renderToken(tree, ais, align_lparen, Space.None); // ( + try renderExpression(allocator, ais, tree, align_expr, Space.None); + try renderToken(tree, ais, align_rparen, Space.Space); // ) } if (fn_proto.getSectionExpr()) |section_expr| { @@ -1562,10 +1561,10 @@ fn renderExpression( const section_lparen = tree.prevToken(section_expr.firstToken()); const section_kw = tree.prevToken(section_lparen); - try renderToken(tree, stream, section_kw, Space.None); // section - try renderToken(tree, stream, section_lparen, Space.None); // ( - try renderExpression(allocator, stream, tree, section_expr, Space.None); - try renderToken(tree, stream, section_rparen, Space.Space); // ) + try renderToken(tree, ais, section_kw, Space.None); // section + try renderToken(tree, ais, section_lparen, Space.None); // ( + try renderExpression(allocator, ais, tree, section_expr, Space.None); + try renderToken(tree, ais, section_rparen, Space.Space); // ) } if (fn_proto.getCallconvExpr()) |callconv_expr| { @@ -1573,23 +1572,23 @@ fn renderExpression( const callconv_lparen = tree.prevToken(callconv_expr.firstToken()); const callconv_kw = tree.prevToken(callconv_lparen); - try renderToken(tree, stream, callconv_kw, Space.None); // callconv - try renderToken(tree, stream, callconv_lparen, Space.None); // ( - try renderExpression(allocator, stream, tree, callconv_expr, Space.None); - try renderToken(tree, stream, callconv_rparen, Space.Space); // ) + try renderToken(tree, ais, callconv_kw, Space.None); // callconv + try renderToken(tree, ais, callconv_lparen, Space.None); // ( + try renderExpression(allocator, ais, tree, callconv_expr, Space.None); + try renderToken(tree, ais, callconv_rparen, Space.Space); // ) } else if (fn_proto.getIsExternPrototype() != null) { - try stream.writer().writeAll("callconv(.C) "); + try ais.writer().writeAll("callconv(.C) "); } else if (fn_proto.getIsAsync() != null) { - try stream.writer().writeAll("callconv(.Async) "); + try ais.writer().writeAll("callconv(.Async) "); } switch (fn_proto.return_type) { .Explicit => |node| { - return renderExpression(allocator, stream, tree, node, space); + return renderExpression(allocator, ais, tree, node, space); }, .InferErrorSet => |node| { - try renderToken(tree, stream, tree.prevToken(node.firstToken()), Space.None); // ! - return renderExpression(allocator, stream, tree, node, space); + try renderToken(tree, ais, tree.prevToken(node.firstToken()), Space.None); // ! + return renderExpression(allocator, ais, tree, node, space); }, .Invalid => unreachable, } @@ -1599,11 +1598,11 @@ fn renderExpression( const anyframe_type = @fieldParentPtr(ast.Node.AnyFrameType, "base", base); if (anyframe_type.result) |result| { - try renderToken(tree, stream, anyframe_type.anyframe_token, Space.None); // anyframe - try renderToken(tree, stream, result.arrow_token, Space.None); // -> - return renderExpression(allocator, stream, tree, result.return_type, space); + try renderToken(tree, ais, anyframe_type.anyframe_token, Space.None); // anyframe + try renderToken(tree, ais, result.arrow_token, Space.None); // -> + return renderExpression(allocator, ais, tree, result.return_type, space); } else { - return renderToken(tree, stream, anyframe_type.anyframe_token, space); // anyframe + return renderToken(tree, ais, anyframe_type.anyframe_token, space); // anyframe } }, @@ -1612,38 +1611,38 @@ fn renderExpression( .Switch => { const switch_node = @fieldParentPtr(ast.Node.Switch, "base", base); - try renderToken(tree, stream, switch_node.switch_token, Space.Space); // switch - try renderToken(tree, stream, tree.nextToken(switch_node.switch_token), Space.None); // ( + try renderToken(tree, ais, switch_node.switch_token, Space.Space); // switch + try renderToken(tree, ais, tree.nextToken(switch_node.switch_token), Space.None); // ( const rparen = tree.nextToken(switch_node.expr.lastToken()); const lbrace = tree.nextToken(rparen); if (switch_node.cases_len == 0) { - try renderExpression(allocator, stream, tree, switch_node.expr, Space.None); - try renderToken(tree, stream, rparen, Space.Space); // ) - try renderToken(tree, stream, lbrace, Space.None); // { - return renderToken(tree, stream, switch_node.rbrace, space); // } + try renderExpression(allocator, ais, tree, switch_node.expr, Space.None); + try renderToken(tree, ais, rparen, Space.Space); // ) + try renderToken(tree, ais, lbrace, Space.None); // { + return renderToken(tree, ais, switch_node.rbrace, space); // } } - try renderExpression(allocator, stream, tree, switch_node.expr, Space.None); - try renderToken(tree, stream, rparen, Space.Space); // ) + try renderExpression(allocator, ais, tree, switch_node.expr, Space.None); + try renderToken(tree, ais, rparen, Space.Space); // ) { - stream.pushIndentNextLine(); - defer stream.popIndent(); - try renderToken(tree, stream, lbrace, Space.Newline); // { + ais.pushIndentNextLine(); + defer ais.popIndent(); + try renderToken(tree, ais, lbrace, Space.Newline); // { const cases = switch_node.cases(); for (cases) |node, i| { - try renderExpression(allocator, stream, tree, node, Space.Comma); + try renderExpression(allocator, ais, tree, node, Space.Comma); if (i + 1 < cases.len) { - try renderExtraNewline(tree, stream, cases[i + 1]); + try renderExtraNewline(tree, ais, cases[i + 1]); } } } - return renderToken(tree, stream, switch_node.rbrace, space); // } + return renderToken(tree, ais, switch_node.rbrace, space); // } }, .SwitchCase => { @@ -1660,41 +1659,41 @@ fn renderExpression( const items = switch_case.items(); for (items) |node, i| { if (i + 1 < items.len) { - try renderExpression(allocator, stream, tree, node, Space.None); + try renderExpression(allocator, ais, tree, node, Space.None); const comma_token = tree.nextToken(node.lastToken()); - try renderToken(tree, stream, comma_token, Space.Space); // , - try renderExtraNewline(tree, stream, items[i + 1]); + try renderToken(tree, ais, comma_token, Space.Space); // , + try renderExtraNewline(tree, ais, items[i + 1]); } else { - try renderExpression(allocator, stream, tree, node, Space.Space); + try renderExpression(allocator, ais, tree, node, Space.Space); } } } else { const items = switch_case.items(); for (items) |node, i| { if (i + 1 < items.len) { - try renderExpression(allocator, stream, tree, node, Space.None); + try renderExpression(allocator, ais, tree, node, Space.None); const comma_token = tree.nextToken(node.lastToken()); - try renderToken(tree, stream, comma_token, Space.Newline); // , - try renderExtraNewline(tree, stream, items[i + 1]); + try renderToken(tree, ais, comma_token, Space.Newline); // , + try renderExtraNewline(tree, ais, items[i + 1]); } else { - try renderExpression(allocator, stream, tree, node, Space.Comma); + try renderExpression(allocator, ais, tree, node, Space.Comma); } } } - try renderToken(tree, stream, switch_case.arrow_token, Space.Space); // => + try renderToken(tree, ais, switch_case.arrow_token, Space.Space); // => if (switch_case.payload) |payload| { - try renderExpression(allocator, stream, tree, payload, Space.Space); + try renderExpression(allocator, ais, tree, payload, Space.Space); } - return renderExpression(allocator, stream, tree, switch_case.expr, space); + return renderExpression(allocator, ais, tree, switch_case.expr, space); }, .SwitchElse => { const switch_else = @fieldParentPtr(ast.Node.SwitchElse, "base", base); - return renderToken(tree, stream, switch_else.token, space); + return renderToken(tree, ais, switch_else.token, space); }, .Else => { const else_node = @fieldParentPtr(ast.Node.Else, "base", base); @@ -1703,19 +1702,19 @@ fn renderExpression( const same_line = body_is_block or tree.tokensOnSameLine(else_node.else_token, else_node.body.lastToken()); const after_else_space = if (same_line or else_node.payload != null) Space.Space else Space.Newline; - try renderToken(tree, stream, else_node.else_token, after_else_space); + try renderToken(tree, ais, else_node.else_token, after_else_space); if (else_node.payload) |payload| { const payload_space = if (same_line) Space.Space else Space.Newline; - try renderExpression(allocator, stream, tree, payload, payload_space); + try renderExpression(allocator, ais, tree, payload, payload_space); } if (same_line) { - return renderExpression(allocator, stream, tree, else_node.body, space); + return renderExpression(allocator, ais, tree, else_node.body, space); } else { - stream.pushIndent(); - defer stream.popIndent(); - return renderExpression(allocator, stream, tree, else_node.body, space); + ais.pushIndent(); + defer ais.popIndent(); + return renderExpression(allocator, ais, tree, else_node.body, space); } }, @@ -1723,17 +1722,17 @@ fn renderExpression( const while_node = @fieldParentPtr(ast.Node.While, "base", base); if (while_node.label) |label| { - try renderToken(tree, stream, label, Space.None); // label - try renderToken(tree, stream, tree.nextToken(label), Space.Space); // : + try renderToken(tree, ais, label, Space.None); // label + try renderToken(tree, ais, tree.nextToken(label), Space.Space); // : } if (while_node.inline_token) |inline_token| { - try renderToken(tree, stream, inline_token, Space.Space); // inline + try renderToken(tree, ais, inline_token, Space.Space); // inline } - try renderToken(tree, stream, while_node.while_token, Space.Space); // while - try renderToken(tree, stream, tree.nextToken(while_node.while_token), Space.None); // ( - try renderExpression(allocator, stream, tree, while_node.condition, Space.None); + try renderToken(tree, ais, while_node.while_token, Space.Space); // while + try renderToken(tree, ais, tree.nextToken(while_node.while_token), Space.None); // ( + try renderExpression(allocator, ais, tree, while_node.condition, Space.None); const cond_rparen = tree.nextToken(while_node.condition.lastToken()); @@ -1755,12 +1754,12 @@ fn renderExpression( { const rparen_space = if (while_node.payload != null or while_node.continue_expr != null) Space.Space else block_start_space; - try renderToken(tree, stream, cond_rparen, rparen_space); // ) + try renderToken(tree, ais, cond_rparen, rparen_space); // ) } if (while_node.payload) |payload| { const payload_space = Space.Space; //if (while_node.continue_expr != null) Space.Space else block_start_space; - try renderExpression(allocator, stream, tree, payload, payload_space); + try renderExpression(allocator, ais, tree, payload, payload_space); } if (while_node.continue_expr) |continue_expr| { @@ -1768,22 +1767,22 @@ fn renderExpression( const lparen = tree.prevToken(continue_expr.firstToken()); const colon = tree.prevToken(lparen); - try renderToken(tree, stream, colon, Space.Space); // : - try renderToken(tree, stream, lparen, Space.None); // ( + try renderToken(tree, ais, colon, Space.Space); // : + try renderToken(tree, ais, lparen, Space.None); // ( - try renderExpression(allocator, stream, tree, continue_expr, Space.None); + try renderExpression(allocator, ais, tree, continue_expr, Space.None); - try renderToken(tree, stream, rparen, block_start_space); // ) + try renderToken(tree, ais, rparen, block_start_space); // ) } { - if (!body_is_block) stream.pushIndent(); - defer if (!body_is_block) stream.popIndent(); - try renderExpression(allocator, stream, tree, while_node.body, after_body_space); + if (!body_is_block) ais.pushIndent(); + defer if (!body_is_block) ais.popIndent(); + try renderExpression(allocator, ais, tree, while_node.body, after_body_space); } if (while_node.@"else") |@"else"| { - return renderExpression(allocator, stream, tree, &@"else".base, space); + return renderExpression(allocator, ais, tree, &@"else".base, space); } }, @@ -1791,17 +1790,17 @@ fn renderExpression( const for_node = @fieldParentPtr(ast.Node.For, "base", base); if (for_node.label) |label| { - try renderToken(tree, stream, label, Space.None); // label - try renderToken(tree, stream, tree.nextToken(label), Space.Space); // : + try renderToken(tree, ais, label, Space.None); // label + try renderToken(tree, ais, tree.nextToken(label), Space.Space); // : } if (for_node.inline_token) |inline_token| { - try renderToken(tree, stream, inline_token, Space.Space); // inline + try renderToken(tree, ais, inline_token, Space.Space); // inline } - try renderToken(tree, stream, for_node.for_token, Space.Space); // for - try renderToken(tree, stream, tree.nextToken(for_node.for_token), Space.None); // ( - try renderExpression(allocator, stream, tree, for_node.array_expr, Space.None); + try renderToken(tree, ais, for_node.for_token, Space.Space); // for + try renderToken(tree, ais, tree.nextToken(for_node.for_token), Space.None); // ( + try renderExpression(allocator, ais, tree, for_node.array_expr, Space.None); const rparen = tree.nextToken(for_node.array_expr.lastToken()); @@ -1809,10 +1808,10 @@ fn renderExpression( const src_one_line_to_body = !body_is_block and tree.tokensOnSameLine(rparen, for_node.body.firstToken()); const body_on_same_line = body_is_block or src_one_line_to_body; - try renderToken(tree, stream, rparen, Space.Space); // ) + try renderToken(tree, ais, rparen, Space.Space); // ) const space_after_payload = if (body_on_same_line) Space.Space else Space.Newline; - try renderExpression(allocator, stream, tree, for_node.payload, space_after_payload); // |x| + try renderExpression(allocator, ais, tree, for_node.payload, space_after_payload); // |x| const space_after_body = blk: { if (for_node.@"else") |@"else"| { @@ -1828,13 +1827,13 @@ fn renderExpression( }; { - if (!body_on_same_line) stream.pushIndent(); - defer if (!body_on_same_line) stream.popIndent(); - try renderExpression(allocator, stream, tree, for_node.body, space_after_body); // { body } + if (!body_on_same_line) ais.pushIndent(); + defer if (!body_on_same_line) ais.popIndent(); + try renderExpression(allocator, ais, tree, for_node.body, space_after_body); // { body } } if (for_node.@"else") |@"else"| { - return renderExpression(allocator, stream, tree, &@"else".base, space); // else + return renderExpression(allocator, ais, tree, &@"else".base, space); // else } }, @@ -1844,29 +1843,29 @@ fn renderExpression( const lparen = tree.nextToken(if_node.if_token); const rparen = tree.nextToken(if_node.condition.lastToken()); - try renderToken(tree, stream, if_node.if_token, Space.Space); // if - try renderToken(tree, stream, lparen, Space.None); // ( + try renderToken(tree, ais, if_node.if_token, Space.Space); // if + try renderToken(tree, ais, lparen, Space.None); // ( - try renderExpression(allocator, stream, tree, if_node.condition, Space.None); // condition + try renderExpression(allocator, ais, tree, if_node.condition, Space.None); // condition const body_is_if_block = if_node.body.tag == .If; const body_is_block = nodeIsBlock(if_node.body); if (body_is_if_block) { - try renderExtraNewline(tree, stream, if_node.body); + try renderExtraNewline(tree, ais, if_node.body); } else if (body_is_block) { const after_rparen_space = if (if_node.payload == null) Space.BlockStart else Space.Space; - try renderToken(tree, stream, rparen, after_rparen_space); // ) + try renderToken(tree, ais, rparen, after_rparen_space); // ) if (if_node.payload) |payload| { - try renderExpression(allocator, stream, tree, payload, Space.BlockStart); // |x| + try renderExpression(allocator, ais, tree, payload, Space.BlockStart); // |x| } if (if_node.@"else") |@"else"| { - try renderExpression(allocator, stream, tree, if_node.body, Space.SpaceOrOutdent); - return renderExpression(allocator, stream, tree, &@"else".base, space); + try renderExpression(allocator, ais, tree, if_node.body, Space.SpaceOrOutdent); + return renderExpression(allocator, ais, tree, &@"else".base, space); } else { - return renderExpression(allocator, stream, tree, if_node.body, space); + return renderExpression(allocator, ais, tree, if_node.body, space); } } @@ -1874,121 +1873,121 @@ fn renderExpression( if (src_has_newline) { const after_rparen_space = if (if_node.payload == null) Space.Newline else Space.Space; - try renderToken(tree, stream, rparen, after_rparen_space); // ) + try renderToken(tree, ais, rparen, after_rparen_space); // ) if (if_node.payload) |payload| { - try renderExpression(allocator, stream, tree, payload, Space.Newline); + try renderExpression(allocator, ais, tree, payload, Space.Newline); } if (if_node.@"else") |@"else"| { const else_is_block = nodeIsBlock(@"else".body); { - stream.pushIndent(); - defer stream.popIndent(); - try renderExpression(allocator, stream, tree, if_node.body, Space.Newline); + ais.pushIndent(); + defer ais.popIndent(); + try renderExpression(allocator, ais, tree, if_node.body, Space.Newline); } if (else_is_block) { - try renderToken(tree, stream, @"else".else_token, Space.Space); // else + try renderToken(tree, ais, @"else".else_token, Space.Space); // else if (@"else".payload) |payload| { - try renderExpression(allocator, stream, tree, payload, Space.Space); + try renderExpression(allocator, ais, tree, payload, Space.Space); } - return renderExpression(allocator, stream, tree, @"else".body, space); + return renderExpression(allocator, ais, tree, @"else".body, space); } else { const after_else_space = if (@"else".payload == null) Space.Newline else Space.Space; - try renderToken(tree, stream, @"else".else_token, after_else_space); // else + try renderToken(tree, ais, @"else".else_token, after_else_space); // else if (@"else".payload) |payload| { - try renderExpression(allocator, stream, tree, payload, Space.Newline); + try renderExpression(allocator, ais, tree, payload, Space.Newline); } - stream.pushIndent(); - defer stream.popIndent(); - return renderExpression(allocator, stream, tree, @"else".body, space); + ais.pushIndent(); + defer ais.popIndent(); + return renderExpression(allocator, ais, tree, @"else".body, space); } } else { - stream.pushIndent(); - defer stream.popIndent(); - return renderExpression(allocator, stream, tree, if_node.body, space); + ais.pushIndent(); + defer ais.popIndent(); + return renderExpression(allocator, ais, tree, if_node.body, space); } } // Single line if statement - try renderToken(tree, stream, rparen, Space.Space); // ) + try renderToken(tree, ais, rparen, Space.Space); // ) if (if_node.payload) |payload| { - try renderExpression(allocator, stream, tree, payload, Space.Space); + try renderExpression(allocator, ais, tree, payload, Space.Space); } if (if_node.@"else") |@"else"| { - try renderExpression(allocator, stream, tree, if_node.body, Space.Space); - try renderToken(tree, stream, @"else".else_token, Space.Space); + try renderExpression(allocator, ais, tree, if_node.body, Space.Space); + try renderToken(tree, ais, @"else".else_token, Space.Space); if (@"else".payload) |payload| { - try renderExpression(allocator, stream, tree, payload, Space.Space); + try renderExpression(allocator, ais, tree, payload, Space.Space); } - return renderExpression(allocator, stream, tree, @"else".body, space); + return renderExpression(allocator, ais, tree, @"else".body, space); } else { - return renderExpression(allocator, stream, tree, if_node.body, space); + return renderExpression(allocator, ais, tree, if_node.body, space); } }, .Asm => { const asm_node = @fieldParentPtr(ast.Node.Asm, "base", base); - try renderToken(tree, stream, asm_node.asm_token, Space.Space); // asm + try renderToken(tree, ais, asm_node.asm_token, Space.Space); // asm if (asm_node.volatile_token) |volatile_token| { - try renderToken(tree, stream, volatile_token, Space.Space); // volatile - try renderToken(tree, stream, tree.nextToken(volatile_token), Space.None); // ( + try renderToken(tree, ais, volatile_token, Space.Space); // volatile + try renderToken(tree, ais, tree.nextToken(volatile_token), Space.None); // ( } else { - try renderToken(tree, stream, tree.nextToken(asm_node.asm_token), Space.None); // ( + try renderToken(tree, ais, tree.nextToken(asm_node.asm_token), Space.None); // ( } asmblk: { - stream.pushIndent(); - defer stream.popIndent(); + ais.pushIndent(); + defer ais.popIndent(); if (asm_node.outputs.len == 0 and asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) { - try renderExpression(allocator, stream, tree, asm_node.template, Space.None); + try renderExpression(allocator, ais, tree, asm_node.template, Space.None); break :asmblk; } - try renderExpression(allocator, stream, tree, asm_node.template, Space.Newline); + try renderExpression(allocator, ais, tree, asm_node.template, Space.Newline); - stream.setIndentDelta(asm_indent_delta); - defer stream.setIndentDelta(indent_delta); + ais.setIndentDelta(asm_indent_delta); + defer ais.setIndentDelta(indent_delta); const colon1 = tree.nextToken(asm_node.template.lastToken()); const colon2 = if (asm_node.outputs.len == 0) blk: { - try renderToken(tree, stream, colon1, Space.Newline); // : + try renderToken(tree, ais, colon1, Space.Newline); // : break :blk tree.nextToken(colon1); } else blk: { - try renderToken(tree, stream, colon1, Space.Space); // : + try renderToken(tree, ais, colon1, Space.Space); // : - stream.pushIndent(); - defer stream.popIndent(); + ais.pushIndent(); + defer ais.popIndent(); for (asm_node.outputs) |*asm_output, i| { if (i + 1 < asm_node.outputs.len) { const next_asm_output = asm_node.outputs[i + 1]; - try renderAsmOutput(allocator, stream, tree, asm_output, Space.None); + try renderAsmOutput(allocator, ais, tree, asm_output, Space.None); const comma = tree.prevToken(next_asm_output.firstToken()); - try renderToken(tree, stream, comma, Space.Newline); // , - try renderExtraNewlineToken(tree, stream, next_asm_output.firstToken()); + try renderToken(tree, ais, comma, Space.Newline); // , + try renderExtraNewlineToken(tree, ais, next_asm_output.firstToken()); } else if (asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) { - try renderAsmOutput(allocator, stream, tree, asm_output, Space.Newline); + try renderAsmOutput(allocator, ais, tree, asm_output, Space.Newline); break :asmblk; } else { - try renderAsmOutput(allocator, stream, tree, asm_output, Space.Newline); + try renderAsmOutput(allocator, ais, tree, asm_output, Space.Newline); const comma_or_colon = tree.nextToken(asm_output.lastToken()); break :blk switch (tree.token_ids[comma_or_colon]) { .Comma => tree.nextToken(comma_or_colon), @@ -2000,25 +1999,25 @@ fn renderExpression( }; const colon3 = if (asm_node.inputs.len == 0) blk: { - try renderToken(tree, stream, colon2, Space.Newline); // : + try renderToken(tree, ais, colon2, Space.Newline); // : break :blk tree.nextToken(colon2); } else blk: { - try renderToken(tree, stream, colon2, Space.Space); // : - stream.pushIndent(); - defer stream.popIndent(); + try renderToken(tree, ais, colon2, Space.Space); // : + ais.pushIndent(); + defer ais.popIndent(); for (asm_node.inputs) |*asm_input, i| { if (i + 1 < asm_node.inputs.len) { const next_asm_input = &asm_node.inputs[i + 1]; - try renderAsmInput(allocator, stream, tree, asm_input, Space.None); + try renderAsmInput(allocator, ais, tree, asm_input, Space.None); const comma = tree.prevToken(next_asm_input.firstToken()); - try renderToken(tree, stream, comma, Space.Newline); // , - try renderExtraNewlineToken(tree, stream, next_asm_input.firstToken()); + try renderToken(tree, ais, comma, Space.Newline); // , + try renderExtraNewlineToken(tree, ais, next_asm_input.firstToken()); } else if (asm_node.clobbers.len == 0) { - try renderAsmInput(allocator, stream, tree, asm_input, Space.Newline); + try renderAsmInput(allocator, ais, tree, asm_input, Space.Newline); break :asmblk; } else { - try renderAsmInput(allocator, stream, tree, asm_input, Space.Newline); + try renderAsmInput(allocator, ais, tree, asm_input, Space.Newline); const comma_or_colon = tree.nextToken(asm_input.lastToken()); break :blk switch (tree.token_ids[comma_or_colon]) { .Comma => tree.nextToken(comma_or_colon), @@ -2029,29 +2028,29 @@ fn renderExpression( unreachable; }; - try renderToken(tree, stream, colon3, Space.Space); // : - stream.pushIndent(); - defer stream.popIndent(); + try renderToken(tree, ais, colon3, Space.Space); // : + ais.pushIndent(); + defer ais.popIndent(); for (asm_node.clobbers) |clobber_node, i| { if (i + 1 >= asm_node.clobbers.len) { - try renderExpression(allocator, stream, tree, clobber_node, Space.Newline); + try renderExpression(allocator, ais, tree, clobber_node, Space.Newline); break :asmblk; } else { - try renderExpression(allocator, stream, tree, clobber_node, Space.None); + try renderExpression(allocator, ais, tree, clobber_node, Space.None); const comma = tree.nextToken(clobber_node.lastToken()); - try renderToken(tree, stream, comma, Space.Space); // , + try renderToken(tree, ais, comma, Space.Space); // , } } } - return renderToken(tree, stream, asm_node.rparen, space); + return renderToken(tree, ais, asm_node.rparen, space); }, .EnumLiteral => { const enum_literal = @fieldParentPtr(ast.Node.EnumLiteral, "base", base); - try renderToken(tree, stream, enum_literal.dot, Space.None); // . - return renderToken(tree, stream, enum_literal.name, space); // name + try renderToken(tree, ais, enum_literal.dot, Space.None); // . + return renderToken(tree, ais, enum_literal.name, space); // name }, .ContainerField, @@ -2065,14 +2064,14 @@ fn renderExpression( fn renderArrayType( allocator: *mem.Allocator, - stream: anytype, + ais: anytype, tree: *ast.Tree, lbracket: ast.TokenIndex, rhs: *ast.Node, len_expr: *ast.Node, opt_sentinel: ?*ast.Node, space: Space, -) (@TypeOf(stream.*).Error || Error)!void { +) (@TypeOf(ais.*).Error || Error)!void { const rbracket = tree.nextToken(if (opt_sentinel) |sentinel| sentinel.lastToken() else @@ -2083,97 +2082,97 @@ fn renderArrayType( const new_space = if (ends_with_comment) Space.Newline else Space.None; { const do_indent = (starts_with_comment or ends_with_comment); - if (do_indent) stream.pushIndent(); - defer if (do_indent) stream.popIndent(); + if (do_indent) ais.pushIndent(); + defer if (do_indent) ais.popIndent(); - try renderToken(tree, stream, lbracket, Space.None); // [ - try renderExpression(allocator, stream, tree, len_expr, new_space); + try renderToken(tree, ais, lbracket, Space.None); // [ + try renderExpression(allocator, ais, tree, len_expr, new_space); if (starts_with_comment) { - try stream.maybeInsertNewline(); + try ais.maybeInsertNewline(); } if (opt_sentinel) |sentinel| { const colon_token = tree.prevToken(sentinel.firstToken()); - try renderToken(tree, stream, colon_token, Space.None); // : - try renderExpression(allocator, stream, tree, sentinel, Space.None); + try renderToken(tree, ais, colon_token, Space.None); // : + try renderExpression(allocator, ais, tree, sentinel, Space.None); } if (starts_with_comment) { - try stream.maybeInsertNewline(); + try ais.maybeInsertNewline(); } } - try renderToken(tree, stream, rbracket, Space.None); // ] + try renderToken(tree, ais, rbracket, Space.None); // ] - return renderExpression(allocator, stream, tree, rhs, space); + return renderExpression(allocator, ais, tree, rhs, space); } fn renderAsmOutput( allocator: *mem.Allocator, - stream: anytype, + ais: anytype, tree: *ast.Tree, asm_output: *const ast.Node.Asm.Output, space: Space, -) (@TypeOf(stream.*).Error || Error)!void { - try stream.writer().writeAll("["); - try renderExpression(allocator, stream, tree, asm_output.symbolic_name, Space.None); - try stream.writer().writeAll("] "); - try renderExpression(allocator, stream, tree, asm_output.constraint, Space.None); - try stream.writer().writeAll(" ("); +) (@TypeOf(ais.*).Error || Error)!void { + try ais.writer().writeAll("["); + try renderExpression(allocator, ais, tree, asm_output.symbolic_name, Space.None); + try ais.writer().writeAll("] "); + try renderExpression(allocator, ais, tree, asm_output.constraint, Space.None); + try ais.writer().writeAll(" ("); switch (asm_output.kind) { ast.Node.Asm.Output.Kind.Variable => |variable_name| { - try renderExpression(allocator, stream, tree, &variable_name.base, Space.None); + try renderExpression(allocator, ais, tree, &variable_name.base, Space.None); }, ast.Node.Asm.Output.Kind.Return => |return_type| { - try stream.writer().writeAll("-> "); - try renderExpression(allocator, stream, tree, return_type, Space.None); + try ais.writer().writeAll("-> "); + try renderExpression(allocator, ais, tree, return_type, Space.None); }, } - return renderToken(tree, stream, asm_output.lastToken(), space); // ) + return renderToken(tree, ais, asm_output.lastToken(), space); // ) } fn renderAsmInput( allocator: *mem.Allocator, - stream: anytype, + ais: anytype, tree: *ast.Tree, asm_input: *const ast.Node.Asm.Input, space: Space, -) (@TypeOf(stream.*).Error || Error)!void { - try stream.writer().writeAll("["); - try renderExpression(allocator, stream, tree, asm_input.symbolic_name, Space.None); - try stream.writer().writeAll("] "); - try renderExpression(allocator, stream, tree, asm_input.constraint, Space.None); - try stream.writer().writeAll(" ("); - try renderExpression(allocator, stream, tree, asm_input.expr, Space.None); - return renderToken(tree, stream, asm_input.lastToken(), space); // ) +) (@TypeOf(ais.*).Error || Error)!void { + try ais.writer().writeAll("["); + try renderExpression(allocator, ais, tree, asm_input.symbolic_name, Space.None); + try ais.writer().writeAll("] "); + try renderExpression(allocator, ais, tree, asm_input.constraint, Space.None); + try ais.writer().writeAll(" ("); + try renderExpression(allocator, ais, tree, asm_input.expr, Space.None); + return renderToken(tree, ais, asm_input.lastToken(), space); // ) } fn renderVarDecl( allocator: *mem.Allocator, - stream: anytype, + ais: anytype, tree: *ast.Tree, var_decl: *ast.Node.VarDecl, -) (@TypeOf(stream.*).Error || Error)!void { +) (@TypeOf(ais.*).Error || Error)!void { if (var_decl.getVisibToken()) |visib_token| { - try renderToken(tree, stream, visib_token, Space.Space); // pub + try renderToken(tree, ais, visib_token, Space.Space); // pub } if (var_decl.getExternExportToken()) |extern_export_token| { - try renderToken(tree, stream, extern_export_token, Space.Space); // extern + try renderToken(tree, ais, extern_export_token, Space.Space); // extern if (var_decl.getLibName()) |lib_name| { - try renderExpression(allocator, stream, tree, lib_name, Space.Space); // "lib" + try renderExpression(allocator, ais, tree, lib_name, Space.Space); // "lib" } } if (var_decl.getComptimeToken()) |comptime_token| { - try renderToken(tree, stream, comptime_token, Space.Space); // comptime + try renderToken(tree, ais, comptime_token, Space.Space); // comptime } if (var_decl.getThreadLocalToken()) |thread_local_token| { - try renderToken(tree, stream, thread_local_token, Space.Space); // threadlocal + try renderToken(tree, ais, thread_local_token, Space.Space); // threadlocal } - try renderToken(tree, stream, var_decl.mut_token, Space.Space); // var + try renderToken(tree, ais, var_decl.mut_token, Space.Space); // var const name_space = if (var_decl.getTypeNode() == null and (var_decl.getAlignNode() != null or @@ -2182,92 +2181,92 @@ fn renderVarDecl( Space.Space else Space.None; - try renderToken(tree, stream, var_decl.name_token, name_space); + try renderToken(tree, ais, var_decl.name_token, name_space); if (var_decl.getTypeNode()) |type_node| { - try renderToken(tree, stream, tree.nextToken(var_decl.name_token), Space.Space); + try renderToken(tree, ais, tree.nextToken(var_decl.name_token), Space.Space); const s = if (var_decl.getAlignNode() != null or var_decl.getSectionNode() != null or var_decl.getInitNode() != null) Space.Space else Space.None; - try renderExpression(allocator, stream, tree, type_node, s); + try renderExpression(allocator, ais, tree, type_node, s); } if (var_decl.getAlignNode()) |align_node| { const lparen = tree.prevToken(align_node.firstToken()); const align_kw = tree.prevToken(lparen); const rparen = tree.nextToken(align_node.lastToken()); - try renderToken(tree, stream, align_kw, Space.None); // align - try renderToken(tree, stream, lparen, Space.None); // ( - try renderExpression(allocator, stream, tree, align_node, Space.None); + try renderToken(tree, ais, align_kw, Space.None); // align + try renderToken(tree, ais, lparen, Space.None); // ( + try renderExpression(allocator, ais, tree, align_node, Space.None); const s = if (var_decl.getSectionNode() != null or var_decl.getInitNode() != null) Space.Space else Space.None; - try renderToken(tree, stream, rparen, s); // ) + try renderToken(tree, ais, rparen, s); // ) } if (var_decl.getSectionNode()) |section_node| { const lparen = tree.prevToken(section_node.firstToken()); const section_kw = tree.prevToken(lparen); const rparen = tree.nextToken(section_node.lastToken()); - try renderToken(tree, stream, section_kw, Space.None); // linksection - try renderToken(tree, stream, lparen, Space.None); // ( - try renderExpression(allocator, stream, tree, section_node, Space.None); + try renderToken(tree, ais, section_kw, Space.None); // linksection + try renderToken(tree, ais, lparen, Space.None); // ( + try renderExpression(allocator, ais, tree, section_node, Space.None); const s = if (var_decl.getInitNode() != null) Space.Space else Space.None; - try renderToken(tree, stream, rparen, s); // ) + try renderToken(tree, ais, rparen, s); // ) } if (var_decl.getInitNode()) |init_node| { const s = if (init_node.tag == .MultilineStringLiteral) Space.None else Space.Space; - try renderToken(tree, stream, var_decl.getEqToken().?, s); // = - stream.pushIndentOneShot(); - try renderExpression(allocator, stream, tree, init_node, Space.None); + try renderToken(tree, ais, var_decl.getEqToken().?, s); // = + ais.pushIndentOneShot(); + try renderExpression(allocator, ais, tree, init_node, Space.None); } - try renderToken(tree, stream, var_decl.semicolon_token, Space.Newline); + try renderToken(tree, ais, var_decl.semicolon_token, Space.Newline); } fn renderParamDecl( allocator: *mem.Allocator, - stream: anytype, + ais: anytype, tree: *ast.Tree, param_decl: ast.Node.FnProto.ParamDecl, space: Space, -) (@TypeOf(stream.*).Error || Error)!void { - try renderDocComments(tree, stream, param_decl, param_decl.doc_comments); +) (@TypeOf(ais.*).Error || Error)!void { + try renderDocComments(tree, ais, param_decl, param_decl.doc_comments); if (param_decl.comptime_token) |comptime_token| { - try renderToken(tree, stream, comptime_token, Space.Space); + try renderToken(tree, ais, comptime_token, Space.Space); } if (param_decl.noalias_token) |noalias_token| { - try renderToken(tree, stream, noalias_token, Space.Space); + try renderToken(tree, ais, noalias_token, Space.Space); } if (param_decl.name_token) |name_token| { - try renderToken(tree, stream, name_token, Space.None); - try renderToken(tree, stream, tree.nextToken(name_token), Space.Space); // : + try renderToken(tree, ais, name_token, Space.None); + try renderToken(tree, ais, tree.nextToken(name_token), Space.Space); // : } switch (param_decl.param_type) { - .any_type, .type_expr => |node| try renderExpression(allocator, stream, tree, node, space), + .any_type, .type_expr => |node| try renderExpression(allocator, ais, tree, node, space), } } fn renderStatement( allocator: *mem.Allocator, - stream: anytype, + ais: anytype, tree: *ast.Tree, base: *ast.Node, -) (@TypeOf(stream.*).Error || Error)!void { +) (@TypeOf(ais.*).Error || Error)!void { switch (base.tag) { .VarDecl => { const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", base); - try renderVarDecl(allocator, stream, tree, var_decl); + try renderVarDecl(allocator, ais, tree, var_decl); }, else => { if (base.requireSemiColon()) { - try renderExpression(allocator, stream, tree, base, Space.None); + try renderExpression(allocator, ais, tree, base, Space.None); const semicolon_index = tree.nextToken(base.lastToken()); assert(tree.token_ids[semicolon_index] == .Semicolon); - try renderToken(tree, stream, semicolon_index, Space.Newline); + try renderToken(tree, ais, semicolon_index, Space.Newline); } else { - try renderExpression(allocator, stream, tree, base, Space.Newline); + try renderExpression(allocator, ais, tree, base, Space.Newline); } }, } @@ -2286,19 +2285,19 @@ const Space = enum { fn renderTokenOffset( tree: *ast.Tree, - stream: anytype, + ais: anytype, token_index: ast.TokenIndex, space: Space, token_skip_bytes: usize, -) (@TypeOf(stream.*).Error || Error)!void { +) (@TypeOf(ais.*).Error || Error)!void { if (space == Space.BlockStart) { // If placing the lbrace on the current line would cause an uggly gap then put the lbrace on the next line - const new_space = if (stream.isLineOverIndented()) Space.Newline else Space.Space; - return renderToken(tree, stream, token_index, new_space); + const new_space = if (ais.isLineOverIndented()) Space.Newline else Space.Space; + return renderToken(tree, ais, token_index, new_space); } var token_loc = tree.token_locs[token_index]; - try stream.writer().writeAll(mem.trimRight(u8, tree.tokenSliceLoc(token_loc)[token_skip_bytes..], " ")); + try ais.writer().writeAll(mem.trimRight(u8, tree.tokenSliceLoc(token_loc)[token_skip_bytes..], " ")); if (space == Space.NoComment) return; @@ -2307,20 +2306,20 @@ fn renderTokenOffset( var next_token_loc = tree.token_locs[token_index + 1]; if (space == Space.Comma) switch (next_token_id) { - .Comma => return renderToken(tree, stream, token_index + 1, Space.Newline), + .Comma => return renderToken(tree, ais, token_index + 1, Space.Newline), .LineComment => { - try stream.writer().writeAll(", "); - return renderToken(tree, stream, token_index + 1, Space.Newline); + try ais.writer().writeAll(", "); + return renderToken(tree, ais, token_index + 1, Space.Newline); }, else => { if (token_index + 2 < tree.token_ids.len and tree.token_ids[token_index + 2] == .MultilineStringLiteralLine) { - try stream.writer().writeAll(","); + try ais.writer().writeAll(","); return; } else { - try stream.writer().writeAll(","); - try stream.insertNewline(); + try ais.writer().writeAll(","); + try ais.insertNewline(); return; } }, @@ -2344,14 +2343,14 @@ fn renderTokenOffset( if (next_token_id == .MultilineStringLiteralLine) { return; } else { - try stream.insertNewline(); + try ais.insertNewline(); return; } }, Space.Space, Space.SpaceOrOutdent => { if (next_token_id == .MultilineStringLiteralLine) return; - try stream.writer().writeByte(' '); + try ais.writer().writeByte(' '); return; }, Space.NoComment, Space.Comma, Space.BlockStart => unreachable, @@ -2368,7 +2367,7 @@ fn renderTokenOffset( next_token_id = tree.token_ids[token_index + offset]; next_token_loc = tree.token_locs[token_index + offset]; if (next_token_id != .LineComment) { - try stream.insertNewline(); + try ais.insertNewline(); return; } }, @@ -2381,7 +2380,7 @@ fn renderTokenOffset( var loc = tree.tokenLocationLoc(token_loc.end, next_token_loc); if (loc.line == 0) { - try stream.writer().print(" {}", .{mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " ")}); + try ais.writer().print(" {}", .{mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " ")}); offset = 2; token_loc = next_token_loc; next_token_loc = tree.token_locs[token_index + offset]; @@ -2389,16 +2388,16 @@ fn renderTokenOffset( if (next_token_id != .LineComment) { switch (space) { Space.None, Space.Space => { - try stream.insertNewline(); + try ais.insertNewline(); }, Space.SpaceOrOutdent => { - try stream.insertNewline(); + try ais.insertNewline(); }, Space.Newline => { if (next_token_id == .MultilineStringLiteralLine) { return; } else { - try stream.insertNewline(); + try ais.insertNewline(); return; } }, @@ -2415,8 +2414,8 @@ fn renderTokenOffset( // in generated code (loc.line == 0) so treat that case // as though there was meant to be a newline between the tokens var newline_count = if (loc.line <= 1) @as(u8, 1) else @as(u8, 2); - while (newline_count > 0) : (newline_count -= 1) try stream.insertNewline(); - try stream.writer().writeAll(mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " ")); + while (newline_count > 0) : (newline_count -= 1) try ais.insertNewline(); + try ais.writer().writeAll(mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " ")); offset += 1; token_loc = next_token_loc; @@ -2428,15 +2427,15 @@ fn renderTokenOffset( if (next_token_id == .MultilineStringLiteralLine) { return; } else { - try stream.insertNewline(); + try ais.insertNewline(); return; } }, Space.None, Space.Space => { - try stream.insertNewline(); + try ais.insertNewline(); }, Space.SpaceOrOutdent => { - try stream.insertNewline(); + try ais.insertNewline(); }, Space.NoNewline => {}, Space.NoComment, Space.Comma, Space.BlockStart => unreachable, @@ -2449,38 +2448,38 @@ fn renderTokenOffset( fn renderToken( tree: *ast.Tree, - stream: anytype, + ais: anytype, token_index: ast.TokenIndex, space: Space, -) (@TypeOf(stream.*).Error || Error)!void { - return renderTokenOffset(tree, stream, token_index, space, 0); +) (@TypeOf(ais.*).Error || Error)!void { + return renderTokenOffset(tree, ais, token_index, space, 0); } fn renderDocComments( tree: *ast.Tree, - stream: anytype, + ais: anytype, node: anytype, doc_comments: ?*ast.Node.DocComment, -) (@TypeOf(stream.*).Error || Error)!void { +) (@TypeOf(ais.*).Error || Error)!void { const comment = doc_comments orelse return; - return renderDocCommentsToken(tree, stream, comment, node.firstToken()); + return renderDocCommentsToken(tree, ais, comment, node.firstToken()); } fn renderDocCommentsToken( tree: *ast.Tree, - stream: anytype, + ais: anytype, comment: *ast.Node.DocComment, first_token: ast.TokenIndex, -) (@TypeOf(stream.*).Error || Error)!void { +) (@TypeOf(ais.*).Error || Error)!void { var tok_i = comment.first_line; while (true) : (tok_i += 1) { switch (tree.token_ids[tok_i]) { .DocComment, .ContainerDocComment => { if (comment.first_line < first_token) { - try renderToken(tree, stream, tok_i, Space.Newline); + try renderToken(tree, ais, tok_i, Space.Newline); } else { - try renderToken(tree, stream, tok_i, Space.NoComment); - try stream.insertNewline(); + try renderToken(tree, ais, tok_i, Space.NoComment); + try ais.insertNewline(); } }, .LineComment => continue, @@ -2552,10 +2551,10 @@ fn nodeCausesSliceOpSpace(base: *ast.Node) bool { }; } -fn copyFixingWhitespace(stream: anytype, slice: []const u8) @TypeOf(stream.*).Error!void { +fn copyFixingWhitespace(ais: anytype, slice: []const u8) @TypeOf(ais.*).Error!void { for (slice) |byte| switch (byte) { - '\t' => try stream.writer().writeAll(" "), + '\t' => try ais.writer().writeAll(" "), '\r' => {}, - else => try stream.writer().writeByte(byte), + else => try ais.writer().writeByte(byte), }; } diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index d14a28419d..8c5c034238 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -682,13 +682,13 @@ pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void { process.exit(1); } if (check_flag) { - const anything_changed = try std.zig.render(gpa, &io.null_out_stream, tree); + const anything_changed = try std.zig.render(gpa, io.null_out_stream, tree); const code = if (anything_changed) @as(u8, 1) else @as(u8, 0); process.exit(code); } const stdout = io.getStdOut().outStream(); - _ = try std.zig.render(gpa, &stdout, tree); + _ = try std.zig.render(gpa, stdout, tree); return; } @@ -830,7 +830,7 @@ fn fmtPathFile( } if (check_mode) { - const anything_changed = try std.zig.render(fmt.gpa, &io.null_out_stream, tree); + const anything_changed = try std.zig.render(fmt.gpa, io.null_out_stream, tree); if (anything_changed) { std.debug.print("{}\n", .{file_path}); fmt.any_error = true; @@ -840,7 +840,7 @@ fn fmtPathFile( try fmt.out_buffer.ensureCapacity(source_code.len); fmt.out_buffer.items.len = 0; const writer = fmt.out_buffer.writer(); - const anything_changed = try std.zig.render(fmt.gpa, &writer, tree); + const anything_changed = try std.zig.render(fmt.gpa, writer, tree); if (!anything_changed) return; // Good thing we didn't waste any file system access on this. diff --git a/src-self-hosted/stage2.zig b/src-self-hosted/stage2.zig index 29b8f3df44..30d2ea44db 100644 --- a/src-self-hosted/stage2.zig +++ b/src-self-hosted/stage2.zig @@ -151,7 +151,7 @@ export fn stage2_free_clang_errors(errors_ptr: [*]translate_c.ClangErrMsg, error export fn stage2_render_ast(tree: *ast.Tree, output_file: *FILE) Error { const c_out_stream = std.io.cOutStream(output_file); - _ = std.zig.render(std.heap.c_allocator, &c_out_stream, tree) catch |e| switch (e) { + _ = std.zig.render(std.heap.c_allocator, c_out_stream, tree) catch |e| switch (e) { error.WouldBlock => unreachable, // stage1 opens stuff in exclusively blocking mode error.NotOpenForWriting => unreachable, error.SystemResources => return .SystemResources, From 25f666330480a2391d1c06e1beab8d517a096e99 Mon Sep 17 00:00:00 2001 From: Aransentin Date: Wed, 2 Sep 2020 22:16:40 +0000 Subject: [PATCH 13/56] Add the sync functions --- lib/std/os.zig | 68 +++++++++++++++++++++++++++++++++ lib/std/os/linux.zig | 16 ++++++++ lib/std/os/windows/kernel32.zig | 2 + 3 files changed, 86 insertions(+) diff --git a/lib/std/os.zig b/lib/std/os.zig index e8431c386b..d25fe2c56a 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -5328,3 +5328,71 @@ pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t { else => |err| return std.os.unexpectedErrno(err), } } + +pub const SyncError = error{ + InputOutput, + NoSpaceLeft, + DiskQuota, + AccessDenied, +} || UnexpectedError; + +/// Write all pending file contents and metadata modifications to all filesystems. +pub fn sync() void { + system.sync(); +} + +/// Write all pending file contents and metadata modifications to the filesystem which contains the specified file. +pub fn syncfs(fd: fd_t) SyncError!void { + const rc = system.syncfs(fd); + switch (errno(rc)) { + 0 => return, + EBADF, EINVAL, EROFS => unreachable, + EIO => return error.InputOutput, + ENOSPC => return error.NoSpaceLeft, + EDQUOT => return error.DiskQuota, + else => |err| return std.os.unexpectedErrno(err), + } +} + +/// Write all pending file contents and metadata modifications for the specified file descriptor to the underlying filesystem. +pub fn fsync(fd: fd_t) SyncError!void { + if (std.Target.current.os.tag == .windows) { + if (windows.kernel32.FlushFileBuffers(fd) != 0) + return; + switch (windows.kernel32.GetLastError()) { + .SUCCESS => return, + .INVALID_HANDLE => unreachable, + .ACCESS_DENIED => return error.AccessDenied, // a sync was performed but the system couldn't update the access time + .UNEXP_NET_ERR => return error.InputOutput, + else => return error.InputOutput, + } + } + const rc = system.fsync(fd); + switch (errno(rc)) { + 0 => return, + EBADF, EINVAL, EROFS => unreachable, + EIO => return error.InputOutput, + ENOSPC => return error.NoSpaceLeft, + EDQUOT => return error.DiskQuota, + else => |err| return std.os.unexpectedErrno(err), + } +} + +/// Write all pending file contents for the specified file descriptor to the underlying filesystem, but not necessarily the metadata. +pub fn fdatasync(fd: fd_t) SyncError!void { + if (std.Target.current.os.tag == .windows) { + return fsync(fd) catch |err| switch (err) { + SyncError.AccessDenied => return, // fdatasync doesn't promise that the access time was synced + else => return err, + }; + } + const rc = system.fdatasync(fd); + switch (errno(rc)) { + 0 => return, + EBADF, EINVAL, EROFS => unreachable, + EIO => return error.InputOutput, + ENOSPC => return error.NoSpaceLeft, + EDQUOT => return error.DiskQuota, + else => |err| return std.os.unexpectedErrno(err), + } +} diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 13094b3a3a..1f916876cf 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -1226,6 +1226,22 @@ pub fn bpf(cmd: BPF.Cmd, attr: *BPF.Attr, size: u32) usize { return syscall3(.bpf, @enumToInt(cmd), @ptrToInt(attr), size); } +pub fn sync() void { + _ = syscall0(.sync); +} + +pub fn syncfs(fd: fd_t) usize { + return syscall1(.syncfs, @bitCast(usize, @as(isize, fd))); +} + +pub fn fsync(fd: fd_t) usize { + return syscall1(.fsync, @bitCast(usize, @as(isize, fd))); +} + +pub fn fdatasync(fd: fd_t) usize { + return syscall1(.fdatasync, @bitCast(usize, @as(isize, fd))); +} + test "" { if (builtin.os.tag == .linux) { _ = @import("linux/test.zig"); diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig index fce9eea908..05d160485d 100644 --- a/lib/std/os/windows/kernel32.zig +++ b/lib/std/os/windows/kernel32.zig @@ -287,3 +287,5 @@ pub extern "kernel32" fn K32GetWsChangesEx(hProcess: HANDLE, lpWatchInfoEx: PPSA pub extern "kernel32" fn K32InitializeProcessForWsWatch(hProcess: HANDLE) callconv(.Stdcall) BOOL; pub extern "kernel32" fn K32QueryWorkingSet(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL; pub extern "kernel32" fn K32QueryWorkingSetEx(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL; + +pub extern "kernel32" fn FlushFileBuffers(hFile: HANDLE) callconv(.Stdcall) BOOL; From fb3c5b84ede6fa48949c8069bf735ac67ec21091 Mon Sep 17 00:00:00 2001 From: daurnimator Date: Mon, 31 Aug 2020 22:31:29 +1000 Subject: [PATCH 14/56] std: add fmt option to escape non-printable characters --- lib/std/fmt.zig | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 16d0eaa07a..3067a55759 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -66,6 +66,7 @@ fn peekIsAlign(comptime fmt: []const u8) bool { /// - output numeric value in hexadecimal notation /// - `s`: print a pointer-to-many as a c-string, use zero-termination /// - `B` and `Bi`: output a memory size in either metric (1000) or power-of-two (1024) based notation. works for both float and integer values. +/// - `e` and `E`: if printing a string, escape non-printable characters /// - `e`: output floating point value in scientific notation /// - `d`: output numeric value in decimal notation /// - `b`: output integer value in binary notation @@ -599,6 +600,16 @@ pub fn formatText( try formatInt(c, 16, fmt[0] == 'X', FormatOptions{ .width = 2, .fill = '0' }, writer); } return; + } else if (comptime (std.mem.eql(u8, fmt, "e") or std.mem.eql(u8, fmt, "E"))) { + for (bytes) |c| { + if (std.ascii.isPrint(c)) { + try writer.writeByte(c); + } else { + try writer.writeAll("\\x"); + try formatInt(c, 16, fmt[0] == 'E', FormatOptions{ .width = 2, .fill = '0' }, writer); + } + } + return; } else { @compileError("Unknown format string: '" ++ fmt ++ "'"); } @@ -1319,6 +1330,12 @@ test "slice" { try testFmt("buf: Test\n Other text", "buf: {s}\n Other text", .{"Test"}); } +test "escape non-printable" { + try testFmt("abc", "{e}", .{"abc"}); + try testFmt("ab\\xffc", "{e}", .{"ab\xffc"}); + try testFmt("ab\\xFFc", "{E}", .{"ab\xffc"}); +} + test "pointer" { { const value = @intToPtr(*align(1) i32, 0xdeadbeef); From bf1d83482b581e49499e545cc625155c1043ef0c Mon Sep 17 00:00:00 2001 From: pfg Date: Wed, 2 Sep 2020 22:28:37 -0700 Subject: [PATCH 15/56] "Support" allocating 0 bit types --- lib/std/mem/Allocator.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index bb59de2a7e..f14373970a 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -159,7 +159,7 @@ fn moveBytes( /// Returns a pointer to undefined memory. /// Call `destroy` with the result to free the memory. pub fn create(self: *Allocator, comptime T: type) Error!*T { - if (@sizeOf(T) == 0) return &(T{}); + if (@sizeOf(T) == 0) return @as(*T, undefined); const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress()); return &slice[0]; } From e747d2ba172a086e6df831c854ebe7f92bf07cd0 Mon Sep 17 00:00:00 2001 From: Jens Goldberg Date: Thu, 3 Sep 2020 07:49:18 +0000 Subject: [PATCH 16/56] Add C declarations and tests for the sync functions --- lib/std/c.zig | 5 +++++ lib/std/os/test.zig | 20 ++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/lib/std/c.zig b/lib/std/c.zig index b4e5fc7392..1b3f403ab5 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -330,3 +330,8 @@ pub const FILE = @Type(.Opaque); pub extern "c" fn dlopen(path: [*:0]const u8, mode: c_int) ?*c_void; pub extern "c" fn dlclose(handle: *c_void) c_int; pub extern "c" fn dlsym(handle: ?*c_void, symbol: [*:0]const u8) ?*c_void; + +pub extern "c" fn sync() void; +pub extern "c" fn syncfs(fd: c_int) c_int; +pub extern "c" fn fsync(fd: c_int) c_int; +pub extern "c" fn fdatasync(fd: c_int) c_int; diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig index 576125e2a3..0d8d2cc0db 100644 --- a/lib/std/os/test.zig +++ b/lib/std/os/test.zig @@ -555,3 +555,23 @@ test "signalfd" { return error.SkipZigTest; _ = std.os.signalfd; } + +test "sync" { + if (builtin.os.tag != .linux and builtin.os.tag != .windows) + return error.SkipZigTest; + + var tmp = tmpDir(.{}); + defer tmp.cleanup(); + + const test_out_file = "os_tmp_test"; + const file = try tmp.dir.createFile(test_out_file, .{}); + defer { + file.close(); + tmp.dir.deleteFile(test_out_file) catch {}; + } + + try os.syncfs(file.handle); + try os.fsync(file.handle); + try os.fdatasync(file.handle); + os.sync(); +} From 68818983aef0d44f43f9575d8207053d5b7250ba Mon Sep 17 00:00:00 2001 From: Jens Goldberg Date: Thu, 3 Sep 2020 09:52:26 +0000 Subject: [PATCH 17/56] Split the sync tests into sync and fsync --- lib/std/os/test.zig | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig index 0d8d2cc0db..0a453d8b2e 100644 --- a/lib/std/os/test.zig +++ b/lib/std/os/test.zig @@ -557,6 +557,24 @@ test "signalfd" { } test "sync" { + if (builtin.os.tag != .linux) + return error.SkipZigTest; + + var tmp = tmpDir(.{}); + defer tmp.cleanup(); + + const test_out_file = "os_tmp_test"; + const file = try tmp.dir.createFile(test_out_file, .{}); + defer { + file.close(); + tmp.dir.deleteFile(test_out_file) catch {}; + } + + os.sync(); + try os.syncfs(file.handle); +} + +test "fsync" { if (builtin.os.tag != .linux and builtin.os.tag != .windows) return error.SkipZigTest; @@ -570,8 +588,6 @@ test "sync" { tmp.dir.deleteFile(test_out_file) catch {}; } - try os.syncfs(file.handle); try os.fsync(file.handle); try os.fdatasync(file.handle); - os.sync(); } From 9a59cdcd41f5a05d70a02d89178afaf8789791c6 Mon Sep 17 00:00:00 2001 From: Vexu Date: Thu, 27 Aug 2020 23:07:39 +0300 Subject: [PATCH 18/56] stage2: various small type fixes --- src-self-hosted/Module.zig | 6 ++++++ src-self-hosted/type.zig | 10 +++++----- src-self-hosted/value.zig | 6 +++--- src-self-hosted/zir_sema.zig | 6 ++++++ 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig index 24dcb541b4..c4b0f70d5c 100644 --- a/src-self-hosted/Module.zig +++ b/src-self-hosted/Module.zig @@ -2801,6 +2801,12 @@ pub fn resolvePeerTypes(self: *Module, scope: *Scope, instructions: []*Inst) !Ty prev_inst = next_inst; continue; } + if (next_inst.ty.zigTypeTag() == .Undefined) + continue; + if (prev_inst.ty.zigTypeTag() == .Undefined) { + prev_inst = next_inst; + continue; + } if (prev_inst.ty.isInt() and next_inst.ty.isInt() and prev_inst.ty.isSignedInt() == next_inst.ty.isSignedInt()) diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig index a9a1acf44b..66a7961073 100644 --- a/src-self-hosted/type.zig +++ b/src-self-hosted/type.zig @@ -163,7 +163,7 @@ pub const Type = extern union { // Hot path for common case: if (a.castPointer()) |a_payload| { if (b.castPointer()) |b_payload| { - return eql(a_payload.pointee_type, b_payload.pointee_type); + return a.tag() == b.tag() and eql(a_payload.pointee_type, b_payload.pointee_type); } } const is_slice_a = isSlice(a); @@ -189,7 +189,7 @@ pub const Type = extern union { .Array => { if (a.arrayLen() != b.arrayLen()) return false; - if (a.elemType().eql(b.elemType())) + if (!a.elemType().eql(b.elemType())) return false; const sentinel_a = a.arraySentinel(); const sentinel_b = b.arraySentinel(); @@ -501,9 +501,9 @@ pub const Type = extern union { .noreturn, => return out_stream.writeAll(@tagName(t)), - .enum_literal => return out_stream.writeAll("@TypeOf(.EnumLiteral)"), - .@"null" => return out_stream.writeAll("@TypeOf(null)"), - .@"undefined" => return out_stream.writeAll("@TypeOf(undefined)"), + .enum_literal => return out_stream.writeAll("@Type(.EnumLiteral)"), + .@"null" => return out_stream.writeAll("@Type(.Null)"), + .@"undefined" => return out_stream.writeAll("@Type(.Undefined)"), .@"anyframe" => return out_stream.writeAll("anyframe"), .anyerror_void_error_union => return out_stream.writeAll("anyerror!void"), diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig index bfd205f4d9..b65aa06bea 100644 --- a/src-self-hosted/value.zig +++ b/src-self-hosted/value.zig @@ -301,15 +301,15 @@ pub const Value = extern union { .comptime_int_type => return out_stream.writeAll("comptime_int"), .comptime_float_type => return out_stream.writeAll("comptime_float"), .noreturn_type => return out_stream.writeAll("noreturn"), - .null_type => return out_stream.writeAll("@TypeOf(null)"), - .undefined_type => return out_stream.writeAll("@TypeOf(undefined)"), + .null_type => return out_stream.writeAll("@Type(.Null)"), + .undefined_type => return out_stream.writeAll("@Type(.Undefined)"), .fn_noreturn_no_args_type => return out_stream.writeAll("fn() noreturn"), .fn_void_no_args_type => return out_stream.writeAll("fn() void"), .fn_naked_noreturn_no_args_type => return out_stream.writeAll("fn() callconv(.Naked) noreturn"), .fn_ccc_void_no_args_type => return out_stream.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"), .const_slice_u8_type => return out_stream.writeAll("[]const u8"), - .enum_literal_type => return out_stream.writeAll("@TypeOf(.EnumLiteral)"), + .enum_literal_type => return out_stream.writeAll("@Type(.EnumLiteral)"), .anyframe_type => return out_stream.writeAll("anyframe"), .null_value => return out_stream.writeAll("null"), diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig index 676b662077..88a130c1db 100644 --- a/src-self-hosted/zir_sema.zig +++ b/src-self-hosted/zir_sema.zig @@ -1239,6 +1239,12 @@ fn analyzeInstArithmetic(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) Inn if (casted_lhs.value()) |lhs_val| { if (casted_rhs.value()) |rhs_val| { + if (lhs_val.isUndef() or rhs_val.isUndef()) { + return mod.constInst(scope, inst.base.src, .{ + .ty = resolved_type, + .val = Value.initTag(.undef), + }); + } return analyzeInstComptimeOp(mod, scope, scalar_type, inst, lhs_val, rhs_val); } } From ff7c6e1e3cea86e130e15a720c729a05763b5f08 Mon Sep 17 00:00:00 2001 From: Vexu Date: Fri, 28 Aug 2020 14:35:13 +0300 Subject: [PATCH 19/56] stage2: astgen orelse --- src-self-hosted/astgen.zig | 70 ++++++++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 26 deletions(-) diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig index 17db584e56..bb56bf34b1 100644 --- a/src-self-hosted/astgen.zig +++ b/src-self-hosted/astgen.zig @@ -277,10 +277,10 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr .ArrayAccess => return arrayAccess(mod, scope, rl, node.castTag(.ArrayAccess).?), .Catch => return catchExpr(mod, scope, rl, node.castTag(.Catch).?), .Comptime => return comptimeKeyword(mod, scope, rl, node.castTag(.Comptime).?), + .OrElse => return orelseExpr(mod, scope, rl, node.castTag(.OrElse).?), .Defer => return mod.failNode(scope, node, "TODO implement astgen.expr for .Defer", .{}), .Range => return mod.failNode(scope, node, "TODO implement astgen.expr for .Range", .{}), - .OrElse => return mod.failNode(scope, node, "TODO implement astgen.expr for .OrElse", .{}), .Await => return mod.failNode(scope, node, "TODO implement astgen.expr for .Await", .{}), .Resume => return mod.failNode(scope, node, "TODO implement astgen.expr for .Resume", .{}), .Try => return mod.failNode(scope, node, "TODO implement astgen.expr for .Try", .{}), @@ -790,13 +790,31 @@ fn errorType(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) InnerError!* } fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Catch) InnerError!*zir.Inst { - const tree = scope.tree(); - const src = tree.token_locs[node.op_token].start; + return orelseCatchExpr(mod, scope, rl, node.lhs, node.op_token, .iserr, .unwrap_err_unsafe, node.rhs, node.payload); +} - const err_union_ptr = try expr(mod, scope, .ref, node.lhs); - // TODO we could avoid an unnecessary copy if .iserr took a pointer - const err_union = try addZIRUnOp(mod, scope, src, .deref, err_union_ptr); - const cond = try addZIRUnOp(mod, scope, src, .iserr, err_union); +fn orelseExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.SimpleInfixOp) InnerError!*zir.Inst { + return orelseCatchExpr(mod, scope, rl, node.lhs, node.op_token, .isnull, .unwrap_optional_unsafe, node.rhs, null); +} + +fn orelseCatchExpr( + mod: *Module, + scope: *Scope, + rl: ResultLoc, + lhs: *ast.Node, + op_token: ast.TokenIndex, + cond_op: zir.Inst.Tag, + unwrap_op: zir.Inst.Tag, + rhs: *ast.Node, + payload_node: ?*ast.Node, +) InnerError!*zir.Inst { + const tree = scope.tree(); + const src = tree.token_locs[op_token].start; + + const operand_ptr = try expr(mod, scope, .ref, lhs); + // TODO we could avoid an unnecessary copy if .iserr, .isnull took a pointer + const err_union = try addZIRUnOp(mod, scope, src, .deref, operand_ptr); + const cond = try addZIRUnOp(mod, scope, src, cond_op, err_union); var block_scope: Scope.GenZIR = .{ .parent = scope, @@ -825,55 +843,55 @@ fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Catch) .inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = block }, }; - var err_scope: Scope.GenZIR = .{ + var then_scope: Scope.GenZIR = .{ .parent = scope, .decl = block_scope.decl, .arena = block_scope.arena, .instructions = .{}, }; - defer err_scope.instructions.deinit(mod.gpa); + defer then_scope.instructions.deinit(mod.gpa); var err_val_scope: Scope.LocalVal = undefined; - const err_sub_scope = blk: { - const payload = node.payload orelse - break :blk &err_scope.base; + const then_sub_scope = blk: { + const payload = payload_node orelse + break :blk &then_scope.base; const err_name = tree.tokenSlice(payload.castTag(.Payload).?.error_symbol.firstToken()); if (mem.eql(u8, err_name, "_")) - break :blk &err_scope.base; + break :blk &then_scope.base; - const unwrapped_err_ptr = try addZIRUnOp(mod, &err_scope.base, src, .unwrap_err_code, err_union_ptr); + const unwrapped_err_ptr = try addZIRUnOp(mod, &then_scope.base, src, .unwrap_err_code, operand_ptr); err_val_scope = .{ - .parent = &err_scope.base, - .gen_zir = &err_scope, + .parent = &then_scope.base, + .gen_zir = &then_scope, .name = err_name, - .inst = try addZIRUnOp(mod, &err_scope.base, src, .deref, unwrapped_err_ptr), + .inst = try addZIRUnOp(mod, &then_scope.base, src, .deref, unwrapped_err_ptr), }; break :blk &err_val_scope.base; }; - _ = try addZIRInst(mod, &err_scope.base, src, zir.Inst.Break, .{ + _ = try addZIRInst(mod, &then_scope.base, src, zir.Inst.Break, .{ .block = block, - .operand = try expr(mod, err_sub_scope, branch_rl, node.rhs), + .operand = try expr(mod, then_sub_scope, branch_rl, rhs), }, .{}); - var not_err_scope: Scope.GenZIR = .{ + var else_scope: Scope.GenZIR = .{ .parent = scope, .decl = block_scope.decl, .arena = block_scope.arena, .instructions = .{}, }; - defer not_err_scope.instructions.deinit(mod.gpa); + defer else_scope.instructions.deinit(mod.gpa); - const unwrapped_payload = try addZIRUnOp(mod, ¬_err_scope.base, src, .unwrap_err_unsafe, err_union_ptr); - _ = try addZIRInst(mod, ¬_err_scope.base, src, zir.Inst.Break, .{ + const unwrapped_payload = try addZIRUnOp(mod, &else_scope.base, src, unwrap_op, operand_ptr); + _ = try addZIRInst(mod, &else_scope.base, src, zir.Inst.Break, .{ .block = block, .operand = unwrapped_payload, }, .{}); - condbr.positionals.then_body = .{ .instructions = try err_scope.arena.dupe(*zir.Inst, err_scope.instructions.items) }; - condbr.positionals.else_body = .{ .instructions = try not_err_scope.arena.dupe(*zir.Inst, not_err_scope.instructions.items) }; - return rlWrap(mod, scope, rl, &block.base); + condbr.positionals.then_body = .{ .instructions = try then_scope.arena.dupe(*zir.Inst, then_scope.instructions.items) }; + condbr.positionals.else_body = .{ .instructions = try else_scope.arena.dupe(*zir.Inst, else_scope.instructions.items) }; + return rlWrapPtr(mod, scope, rl, &block.base); } /// Return whether the identifier names of two tokens are equal. Resolves @"" tokens without allocating. From 2a628fd401bf057a71175c8b723375fd4f375a84 Mon Sep 17 00:00:00 2001 From: Vexu Date: Fri, 28 Aug 2020 14:55:04 +0300 Subject: [PATCH 20/56] stage2: astgen slice --- src-self-hosted/astgen.zig | 32 +++++++++++++++++++++++++++++++- src-self-hosted/zir.zig | 22 ++++++++++++++++++++++ src-self-hosted/zir_sema.zig | 10 ++++++++++ 3 files changed, 63 insertions(+), 1 deletion(-) diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig index bb56bf34b1..2c091a86ec 100644 --- a/src-self-hosted/astgen.zig +++ b/src-self-hosted/astgen.zig @@ -275,6 +275,7 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr .ErrorType => return rlWrap(mod, scope, rl, try errorType(mod, scope, node.castTag(.ErrorType).?)), .For => return forExpr(mod, scope, rl, node.castTag(.For).?), .ArrayAccess => return arrayAccess(mod, scope, rl, node.castTag(.ArrayAccess).?), + .Slice => return rlWrap(mod, scope, rl, try sliceExpr(mod, scope, node.castTag(.Slice).?)), .Catch => return catchExpr(mod, scope, rl, node.castTag(.Catch).?), .Comptime => return comptimeKeyword(mod, scope, rl, node.castTag(.Comptime).?), .OrElse => return orelseExpr(mod, scope, rl, node.castTag(.OrElse).?), @@ -284,7 +285,6 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr .Await => return mod.failNode(scope, node, "TODO implement astgen.expr for .Await", .{}), .Resume => return mod.failNode(scope, node, "TODO implement astgen.expr for .Resume", .{}), .Try => return mod.failNode(scope, node, "TODO implement astgen.expr for .Try", .{}), - .Slice => return mod.failNode(scope, node, "TODO implement astgen.expr for .Slice", .{}), .ArrayInitializer => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayInitializer", .{}), .ArrayInitializerDot => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayInitializerDot", .{}), .StructInitializer => return mod.failNode(scope, node, "TODO implement astgen.expr for .StructInitializer", .{}), @@ -951,6 +951,36 @@ fn arrayAccess(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Array return rlWrapPtr(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.ElemPtr, .{ .array_ptr = array_ptr, .index = index }, .{})); } +fn sliceExpr(mod: *Module, scope: *Scope, node: *ast.Node.Slice) InnerError!*zir.Inst { + const tree = scope.tree(); + const src = tree.token_locs[node.rtoken].start; + + const usize_type = try addZIRInstConst(mod, scope, src, .{ + .ty = Type.initTag(.type), + .val = Value.initTag(.usize_type), + }); + + const array_ptr = try expr(mod, scope, .ref, node.lhs); + const start = try expr(mod, scope, .{ .ty = usize_type }, node.start); + + if (node.end == null and node.sentinel == null) { + return try addZIRBinOp(mod, scope, src, .slice_start, array_ptr, start); + } + + const end = if (node.end) |end| try expr(mod, scope, .{ .ty = usize_type }, end) else null; + // we could get the child type here, but it is easier to just do it in semantic analysis. + const sentinel = if (node.sentinel) |sentinel| try expr(mod, scope, .none, sentinel) else null; + + return try addZIRInst( + mod, + scope, + src, + zir.Inst.Slice, + .{ .array_ptr = array_ptr, .start = start }, + .{ .end = end, .sentinel = sentinel }, + ); +} + fn deref(mod: *Module, scope: *Scope, node: *ast.Node.SimpleSuffixOp) InnerError!*zir.Inst { const tree = scope.tree(); const src = tree.token_locs[node.rtoken].start; diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig index 04d3393626..9d0a5b825e 100644 --- a/src-self-hosted/zir.zig +++ b/src-self-hosted/zir.zig @@ -231,6 +231,10 @@ pub const Inst = struct { const_slice_type, /// Create a pointer type with attributes ptr_type, + /// Slice operation `array_ptr[start..end:sentinel]` + slice, + /// Slice operation with just start `lhs[rhs..]` + slice_start, /// Write a value to a pointer. For loading, see `deref`. store, /// String Literal. Makes an anonymous Decl and then takes a pointer to it. @@ -343,6 +347,7 @@ pub const Inst = struct { .xor, .error_union_type, .merge_error_sets, + .slice_start, => BinOp, .block, @@ -380,6 +385,7 @@ pub const Inst = struct { .ptr_type => PtrType, .enum_literal => EnumLiteral, .error_set => ErrorSet, + .slice => Slice, }; } @@ -481,6 +487,8 @@ pub const Inst = struct { .error_union_type, .bitnot, .error_set, + .slice, + .slice_start, => false, .@"break", @@ -961,6 +969,20 @@ pub const Inst = struct { }, kw_args: struct {}, }; + + pub const Slice = struct { + pub const base_tag = Tag.slice; + base: Inst, + + positionals: struct { + array_ptr: *Inst, + start: *Inst, + }, + kw_args: struct { + end: ?*Inst = null, + sentinel: ?*Inst = null, + }, + }; }; pub const ErrorMsg = struct { diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig index 88a130c1db..012bc63581 100644 --- a/src-self-hosted/zir_sema.zig +++ b/src-self-hosted/zir_sema.zig @@ -132,6 +132,8 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError! .error_union_type => return analyzeInstErrorUnionType(mod, scope, old_inst.castTag(.error_union_type).?), .anyframe_type => return analyzeInstAnyframeType(mod, scope, old_inst.castTag(.anyframe_type).?), .error_set => return analyzeInstErrorSet(mod, scope, old_inst.castTag(.error_set).?), + .slice => return analyzeInstSlice(mod, scope, old_inst.castTag(.slice).?), + .slice_start => return analyzeInstSliceStart(mod, scope, old_inst.castTag(.slice_start).?), } } @@ -1172,6 +1174,14 @@ fn analyzeInstElemPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.ElemPtr) Inne return mod.fail(scope, inst.base.src, "TODO implement more analyze elemptr", .{}); } +fn analyzeInstSlice(mod: *Module, scope: *Scope, inst: *zir.Inst.Slice) InnerError!*Inst { + return mod.fail(scope, inst.base.src, "TODO implement analyzeInstSlice", .{}); +} + +fn analyzeInstSliceStart(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { + return mod.fail(scope, inst.base.src, "TODO implement analyzeInstSliceStart", .{}); +} + fn analyzeInstShl(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { return mod.fail(scope, inst.base.src, "TODO implement analyzeInstShl", .{}); } From 6ab0ac161e02c2361b72d124423509556b9332fa Mon Sep 17 00:00:00 2001 From: Vexu Date: Fri, 28 Aug 2020 15:51:27 +0300 Subject: [PATCH 21/56] stage2: slice return type analysis --- src-self-hosted/Module.zig | 66 +++++++++++++++++++++ src-self-hosted/codegen.zig | 2 +- src-self-hosted/codegen/c.zig | 2 +- src-self-hosted/type.zig | 108 +++++++++++++++++++++++++++++----- src-self-hosted/zir.zig | 2 +- src-self-hosted/zir_sema.zig | 12 +++- 6 files changed, 172 insertions(+), 20 deletions(-) diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig index c4b0f70d5c..93509c6674 100644 --- a/src-self-hosted/Module.zig +++ b/src-self-hosted/Module.zig @@ -2591,6 +2591,72 @@ pub fn analyzeIsErr(self: *Module, scope: *Scope, src: usize, operand: *Inst) In return self.fail(scope, src, "TODO implement analysis of iserr", .{}); } +pub fn analyzeSlice(self: *Module, scope: *Scope, src: usize, array_ptr: *Inst, start: *Inst, end_opt: ?*Inst, sentinel_opt: ?*Inst) InnerError!*Inst { + const ptr_child = switch (array_ptr.ty.zigTypeTag()) { + .Pointer => array_ptr.ty.elemType(), + else => return self.fail(scope, src, "expected pointer, found '{}'", .{array_ptr.ty}), + }; + + var array_type = ptr_child; + const elem_type = switch (ptr_child.zigTypeTag()) { + .Array => ptr_child.elemType(), + .Pointer => blk: { + if (ptr_child.isSinglePointer()) { + if (ptr_child.elemType().zigTypeTag() == .Array) { + array_type = ptr_child.elemType(); + break :blk ptr_child.elemType().elemType(); + } + + return self.fail(scope, src, "slice of single-item pointer", .{}); + } + break :blk ptr_child.elemType(); + }, + else => return self.fail(scope, src, "slice of non-array type '{}'", .{ptr_child}), + }; + + const slice_sentinel = if (sentinel_opt) |sentinel| blk: { + const casted = try self.coerce(scope, elem_type, sentinel); + break :blk try self.resolveConstValue(scope, casted); + } else null; + + var return_ptr_size: std.builtin.TypeInfo.Pointer.Size = .Slice; + var return_elem_type = elem_type; + if (end_opt) |end| { + if (end.value()) |end_val| { + if (start.value()) |start_val| { + const start_u64 = start_val.toUnsignedInt(); + const end_u64 = end_val.toUnsignedInt(); + if (start_u64 > end_u64) { + return self.fail(scope, src, "out of bounds slice", .{}); + } + + const len = end_u64 - start_u64; + const array_sentinel = if (array_type.zigTypeTag() == .Array and end_u64 == array_type.arrayLen()) + array_type.sentinel() + else + slice_sentinel; + return_elem_type = try self.arrayType(scope, len, array_sentinel, elem_type); + return_ptr_size = .One; + } + } + } + const return_type = try self.ptrType( + scope, + src, + return_elem_type, + if (end_opt == null) slice_sentinel else null, + 0, // TODO alignment + 0, + 0, + !ptr_child.isConstPtr(), + ptr_child.isAllowzeroPtr(), + ptr_child.isVolatilePtr(), + return_ptr_size, + ); + + return self.fail(scope, src, "TODO implement analysis of slice", .{}); +} + /// Asserts that lhs and rhs types are both numeric. pub fn cmpNumeric( self: *Module, diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index d6e3194c12..6f08c7a689 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -132,7 +132,7 @@ pub fn generateSymbol( .Array => { // TODO populate .debug_info for the array if (typed_value.val.cast(Value.Payload.Bytes)) |payload| { - if (typed_value.ty.arraySentinel()) |sentinel| { + if (typed_value.ty.sentinel()) |sentinel| { try code.ensureCapacity(code.items.len + payload.data.len + 1); code.appendSliceAssumeCapacity(payload.data); const prev_len = code.items.len; diff --git a/src-self-hosted/codegen/c.zig b/src-self-hosted/codegen/c.zig index c037c55289..34ddcfbb3b 100644 --- a/src-self-hosted/codegen/c.zig +++ b/src-self-hosted/codegen/c.zig @@ -85,7 +85,7 @@ fn genArray(file: *C, decl: *Decl) !void { const name = try map(file.base.allocator, mem.span(decl.name)); defer file.base.allocator.free(name); if (tv.val.cast(Value.Payload.Bytes)) |payload| - if (tv.ty.arraySentinel()) |sentinel| + if (tv.ty.sentinel()) |sentinel| if (sentinel.toUnsignedInt() == 0) try file.constants.writer().print("const char *const {} = \"{}\";\n", .{ name, payload.data }) else diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig index 66a7961073..4966395512 100644 --- a/src-self-hosted/type.zig +++ b/src-self-hosted/type.zig @@ -191,8 +191,8 @@ pub const Type = extern union { return false; if (!a.elemType().eql(b.elemType())) return false; - const sentinel_a = a.arraySentinel(); - const sentinel_b = b.arraySentinel(); + const sentinel_a = a.sentinel(); + const sentinel_b = b.sentinel(); if (sentinel_a) |sa| { if (sentinel_b) |sb| { return sa.eql(sb); @@ -630,8 +630,8 @@ pub const Type = extern union { const payload = @fieldParentPtr(Payload.Pointer, "base", ty.ptr_otherwise); if (payload.sentinel) |some| switch (payload.size) { .One, .C => unreachable, - .Many => try out_stream.writeAll("[*:{}]"), - .Slice => try out_stream.writeAll("[:{}]"), + .Many => try out_stream.print("[*:{}]", .{some}), + .Slice => try out_stream.print("[:{}]", .{some}), } else switch (payload.size) { .One => try out_stream.writeAll("*"), .Many => try out_stream.writeAll("[*]"), @@ -1341,6 +1341,81 @@ pub const Type = extern union { }; } + pub fn isAllowzeroPtr(self: Type) bool { + return switch (self.tag()) { + .u8, + .i8, + .u16, + .i16, + .u32, + .i32, + .u64, + .i64, + .usize, + .isize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .f16, + .f32, + .f64, + .f128, + .c_void, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, + .@"null", + .@"undefined", + .array, + .array_sentinel, + .array_u8, + .array_u8_sentinel_0, + .fn_noreturn_no_args, + .fn_void_no_args, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .function, + .int_unsigned, + .int_signed, + .single_mut_pointer, + .single_const_pointer, + .many_const_pointer, + .many_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + .const_slice, + .mut_slice, + .single_const_pointer_to_comptime_int, + .const_slice_u8, + .optional, + .optional_single_mut_pointer, + .optional_single_const_pointer, + .enum_literal, + .error_union, + .@"anyframe", + .anyframe_T, + .anyerror_void_error_union, + .error_set, + .error_set_single, + => false, + + .pointer => { + const payload = @fieldParentPtr(Payload.Pointer, "base", self.ptr_otherwise); + return payload.@"allowzero"; + }, + }; + } + /// Asserts that the type is an optional pub fn isPtrLikeOptional(self: Type) bool { switch (self.tag()) { @@ -1585,8 +1660,8 @@ pub const Type = extern union { }; } - /// Asserts the type is an array or vector. - pub fn arraySentinel(self: Type) ?Value { + /// Asserts the type is an array, pointer or vector. + pub fn sentinel(self: Type) ?Value { return switch (self.tag()) { .u8, .i8, @@ -1626,16 +1701,8 @@ pub const Type = extern union { .fn_naked_noreturn_no_args, .fn_ccc_void_no_args, .function, - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, .const_slice, .mut_slice, - .single_const_pointer_to_comptime_int, .const_slice_u8, .int_unsigned, .int_signed, @@ -1651,7 +1718,18 @@ pub const Type = extern union { .error_set_single, => unreachable, - .array, .array_u8 => return null, + .single_const_pointer, + .single_mut_pointer, + .many_const_pointer, + .many_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + .single_const_pointer_to_comptime_int, + .array, + .array_u8, + => return null, + + .pointer => return self.cast(Payload.Pointer).?.sentinel, .array_sentinel => return self.cast(Payload.ArraySentinel).?.sentinel, .array_u8_sentinel_0 => return Value.initTag(.zero), }; diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig index 9d0a5b825e..b6d7fab4c5 100644 --- a/src-self-hosted/zir.zig +++ b/src-self-hosted/zir.zig @@ -2596,7 +2596,7 @@ const EmitZIR = struct { var len_pl = Value.Payload.Int_u64{ .int = ty.arrayLen() }; const len = Value.initPayload(&len_pl.base); - const inst = if (ty.arraySentinel()) |sentinel| blk: { + const inst = if (ty.sentinel()) |sentinel| blk: { const inst = try self.arena.allocator.create(Inst.ArrayTypeSentinel); inst.* = .{ .base = .{ diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig index 012bc63581..c99da39c04 100644 --- a/src-self-hosted/zir_sema.zig +++ b/src-self-hosted/zir_sema.zig @@ -1175,11 +1175,19 @@ fn analyzeInstElemPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.ElemPtr) Inne } fn analyzeInstSlice(mod: *Module, scope: *Scope, inst: *zir.Inst.Slice) InnerError!*Inst { - return mod.fail(scope, inst.base.src, "TODO implement analyzeInstSlice", .{}); + const array_ptr = try resolveInst(mod, scope, inst.positionals.array_ptr); + const start = try resolveInst(mod, scope, inst.positionals.start); + const end = if (inst.kw_args.end) |end| try resolveInst(mod, scope, end) else null; + const sentinel = if (inst.kw_args.sentinel) |sentinel| try resolveInst(mod, scope, sentinel) else null; + + return mod.analyzeSlice(scope, inst.base.src, array_ptr, start, end, sentinel); } fn analyzeInstSliceStart(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { - return mod.fail(scope, inst.base.src, "TODO implement analyzeInstSliceStart", .{}); + const array_ptr = try resolveInst(mod, scope, inst.positionals.lhs); + const start = try resolveInst(mod, scope, inst.positionals.rhs); + + return mod.analyzeSlice(scope, inst.base.src, array_ptr, start, null, null); } fn analyzeInstShl(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst { From 1174cb15173208ead5f2ce828ade5b7d07ce6abe Mon Sep 17 00:00:00 2001 From: Vexu Date: Fri, 28 Aug 2020 15:56:24 +0300 Subject: [PATCH 22/56] stage2: fix tokenizer float bug --- lib/std/zig/tokenizer.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig index 47c7d23b35..86968c73b2 100644 --- a/lib/std/zig/tokenizer.zig +++ b/lib/std/zig/tokenizer.zig @@ -1175,6 +1175,7 @@ pub const Tokenizer = struct { }, .num_dot_dec => switch (c) { '.' => { + result.id = .IntegerLiteral; self.index -= 1; state = .start; break; @@ -1183,7 +1184,6 @@ pub const Tokenizer = struct { state = .float_exponent_unsigned; }, '0'...'9' => { - result.id = .FloatLiteral; state = .float_fraction_dec; }, else => { @@ -1769,6 +1769,7 @@ test "tokenizer - number literals decimal" { testTokenize("7", &[_]Token.Id{.IntegerLiteral}); testTokenize("8", &[_]Token.Id{.IntegerLiteral}); testTokenize("9", &[_]Token.Id{.IntegerLiteral}); + testTokenize("1..", &[_]Token.Id{ .IntegerLiteral, .Ellipsis2 }); testTokenize("0a", &[_]Token.Id{ .Invalid, .Identifier }); testTokenize("9b", &[_]Token.Id{ .Invalid, .Identifier }); testTokenize("1z", &[_]Token.Id{ .Invalid, .Identifier }); From 6f0126e9573a6bde9cbe5b113208e0a515b2eee7 Mon Sep 17 00:00:00 2001 From: Vexu Date: Thu, 3 Sep 2020 14:58:47 +0300 Subject: [PATCH 23/56] stage2: split Scope.Container from Scope.File --- src-self-hosted/Module.zig | 142 ++++++++++++++++++++++------------- src-self-hosted/codegen.zig | 4 +- src-self-hosted/link/Elf.zig | 8 +- 3 files changed, 96 insertions(+), 58 deletions(-) diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig index 93509c6674..8d7a4d7b36 100644 --- a/src-self-hosted/Module.zig +++ b/src-self-hosted/Module.zig @@ -125,7 +125,7 @@ pub const Decl = struct { /// mapping them to an address in the output file. /// Memory owned by this decl, using Module's allocator. name: [*:0]const u8, - /// The direct parent container of the Decl. This is either a `Scope.File` or `Scope.ZIRModule`. + /// The direct parent container of the Decl. This is either a `Scope.Container` or `Scope.ZIRModule`. /// Reference to externally owned memory. scope: *Scope, /// The AST Node decl index or ZIR Inst index that contains this declaration. @@ -217,9 +217,10 @@ pub const Decl = struct { pub fn src(self: Decl) usize { switch (self.scope.tag) { - .file => { - const file = @fieldParentPtr(Scope.File, "base", self.scope); - const tree = file.contents.tree; + .container => { + const container = @fieldParentPtr(Scope.Container, "base", self.scope); + const tree = container.file_scope.contents.tree; + // TODO Container should have it's own decls() const decl_node = tree.root_node.decls()[self.src_index]; return tree.token_locs[decl_node.firstToken()].start; }, @@ -229,6 +230,7 @@ pub const Decl = struct { const src_decl = module.decls[self.src_index]; return src_decl.inst.src; }, + .file, .block => unreachable, .gen_zir => unreachable, .local_val => unreachable, @@ -359,6 +361,7 @@ pub const Scope = struct { .local_ptr => return self.cast(LocalPtr).?.gen_zir.arena, .zir_module => return &self.cast(ZIRModule).?.contents.module.arena.allocator, .file => unreachable, + .container => unreachable, } } @@ -368,15 +371,16 @@ pub const Scope = struct { return switch (self.tag) { .block => self.cast(Block).?.decl, .gen_zir => self.cast(GenZIR).?.decl, - .local_val => return self.cast(LocalVal).?.gen_zir.decl, - .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl, + .local_val => self.cast(LocalVal).?.gen_zir.decl, + .local_ptr => self.cast(LocalPtr).?.gen_zir.decl, .decl => self.cast(DeclAnalysis).?.decl, .zir_module => null, .file => null, + .container => null, }; } - /// Asserts the scope has a parent which is a ZIRModule or File and + /// Asserts the scope has a parent which is a ZIRModule or Container and /// returns it. pub fn namespace(self: *Scope) *Scope { switch (self.tag) { @@ -385,7 +389,8 @@ pub const Scope = struct { .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope, .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope, .decl => return self.cast(DeclAnalysis).?.decl.scope, - .zir_module, .file => return self, + .file => return &self.cast(File).?.root_container.base, + .zir_module, .container => return self, } } @@ -399,8 +404,9 @@ pub const Scope = struct { .local_val => unreachable, .local_ptr => unreachable, .decl => unreachable, + .file => unreachable, .zir_module => return self.cast(ZIRModule).?.fullyQualifiedNameHash(name), - .file => return self.cast(File).?.fullyQualifiedNameHash(name), + .container => return self.cast(Container).?.fullyQualifiedNameHash(name), } } @@ -409,11 +415,12 @@ pub const Scope = struct { switch (self.tag) { .file => return self.cast(File).?.contents.tree, .zir_module => unreachable, - .decl => return self.cast(DeclAnalysis).?.decl.scope.cast(File).?.contents.tree, - .block => return self.cast(Block).?.decl.scope.cast(File).?.contents.tree, - .gen_zir => return self.cast(GenZIR).?.decl.scope.cast(File).?.contents.tree, - .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope.cast(File).?.contents.tree, - .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope.cast(File).?.contents.tree, + .decl => return self.cast(DeclAnalysis).?.decl.scope.cast(Container).?.file_scope.contents.tree, + .block => return self.cast(Block).?.decl.scope.cast(Container).?.file_scope.contents.tree, + .gen_zir => return self.cast(GenZIR).?.decl.scope.cast(Container).?.file_scope.contents.tree, + .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree, + .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree, + .container => return self.cast(Container).?.file_scope.contents.tree, } } @@ -427,13 +434,15 @@ pub const Scope = struct { .decl => unreachable, .zir_module => unreachable, .file => unreachable, + .container => unreachable, }; } - /// Asserts the scope has a parent which is a ZIRModule or File and + /// Asserts the scope has a parent which is a ZIRModule, Contaienr or File and /// returns the sub_file_path field. pub fn subFilePath(base: *Scope) []const u8 { switch (base.tag) { + .container => return @fieldParentPtr(Container, "base", base).file_scope.sub_file_path, .file => return @fieldParentPtr(File, "base", base).sub_file_path, .zir_module => return @fieldParentPtr(ZIRModule, "base", base).sub_file_path, .block => unreachable, @@ -453,11 +462,13 @@ pub const Scope = struct { .local_val => unreachable, .local_ptr => unreachable, .decl => unreachable, + .container => unreachable, } } pub fn getSource(base: *Scope, module: *Module) ![:0]const u8 { switch (base.tag) { + .container => return @fieldParentPtr(Container, "base", base).file_scope.getSource(module), .file => return @fieldParentPtr(File, "base", base).getSource(module), .zir_module => return @fieldParentPtr(ZIRModule, "base", base).getSource(module), .gen_zir => unreachable, @@ -471,8 +482,9 @@ pub const Scope = struct { /// Asserts the scope is a namespace Scope and removes the Decl from the namespace. pub fn removeDecl(base: *Scope, child: *Decl) void { switch (base.tag) { - .file => return @fieldParentPtr(File, "base", base).removeDecl(child), + .container => return @fieldParentPtr(Container, "base", base).removeDecl(child), .zir_module => return @fieldParentPtr(ZIRModule, "base", base).removeDecl(child), + .file => unreachable, .block => unreachable, .gen_zir => unreachable, .local_val => unreachable, @@ -499,6 +511,7 @@ pub const Scope = struct { .local_val => unreachable, .local_ptr => unreachable, .decl => unreachable, + .container => unreachable, } } @@ -515,6 +528,8 @@ pub const Scope = struct { zir_module, /// .zig source code. file, + /// struct, enum or union, every .file contains one of these. + container, block, decl, gen_zir, @@ -522,6 +537,38 @@ pub const Scope = struct { local_ptr, }; + pub const Container = struct { + pub const base_tag: Tag = .container; + base: Scope = Scope{ .tag = base_tag }, + + file_scope: *Scope.File, + + /// Direct children of the file. + decls: ArrayListUnmanaged(*Decl), + + // TODO implement container types and put this in a status union + // ty: Type + + pub fn deinit(self: *Container, gpa: *Allocator) void { + self.decls.deinit(gpa); + self.* = undefined; + } + + pub fn removeDecl(self: *Container, child: *Decl) void { + for (self.decls.items) |item, i| { + if (item == child) { + _ = self.decls.swapRemove(i); + return; + } + } + } + + pub fn fullyQualifiedNameHash(self: *Container, name: []const u8) NameHash { + // TODO container scope qualified names. + return std.zig.hashSrc(name); + } + }; + pub const File = struct { pub const base_tag: Tag = .file; base: Scope = Scope{ .tag = base_tag }, @@ -544,8 +591,7 @@ pub const Scope = struct { loaded_success, }, - /// Direct children of the file. - decls: ArrayListUnmanaged(*Decl), + root_container: Container, pub fn unload(self: *File, gpa: *Allocator) void { switch (self.status) { @@ -569,20 +615,11 @@ pub const Scope = struct { } pub fn deinit(self: *File, gpa: *Allocator) void { - self.decls.deinit(gpa); + self.root_container.deinit(gpa); self.unload(gpa); self.* = undefined; } - pub fn removeDecl(self: *File, child: *Decl) void { - for (self.decls.items) |item, i| { - if (item == child) { - _ = self.decls.swapRemove(i); - return; - } - } - } - pub fn dumpSrc(self: *File, src: usize) void { const loc = std.zig.findLineColumn(self.source.bytes, src); std.debug.print("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 }); @@ -604,11 +641,6 @@ pub const Scope = struct { .bytes => |bytes| return bytes, } } - - pub fn fullyQualifiedNameHash(self: *File, name: []const u8) NameHash { - // We don't have struct scopes yet so this is currently just a simple name hash. - return std.zig.hashSrc(name); - } }; pub const ZIRModule = struct { @@ -861,7 +893,10 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module { .source = .{ .unloaded = {} }, .contents = .{ .not_available = {} }, .status = .never_loaded, - .decls = .{}, + .root_container = .{ + .file_scope = root_scope, + .decls = .{}, + }, }; break :blk &root_scope.base; } else if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zir")) { @@ -969,7 +1004,7 @@ pub fn update(self: *Module) !void { // to force a refresh we unload now. if (self.root_scope.cast(Scope.File)) |zig_file| { zig_file.unload(self.gpa); - self.analyzeRootSrcFile(zig_file) catch |err| switch (err) { + self.analyzeContainer(&zig_file.root_container) catch |err| switch (err) { error.AnalysisFail => { assert(self.totalErrorCount() != 0); }, @@ -1237,8 +1272,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { const tracy = trace(@src()); defer tracy.end(); - const file_scope = decl.scope.cast(Scope.File).?; - const tree = try self.getAstTree(file_scope); + const container_scope = decl.scope.cast(Scope.Container).?; + const tree = try self.getAstTree(container_scope); const ast_node = tree.root_node.decls()[decl.src_index]; switch (ast_node.tag) { .FnProto => { @@ -1698,10 +1733,12 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module { } } -fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree { +fn getAstTree(self: *Module, container_scope: *Scope.Container) !*ast.Tree { const tracy = trace(@src()); defer tracy.end(); + const root_scope = container_scope.file_scope; + switch (root_scope.status) { .never_loaded, .unloaded_success => { try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1); @@ -1743,24 +1780,24 @@ fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree { } } -fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void { +fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void { const tracy = trace(@src()); defer tracy.end(); // We may be analyzing it for the first time, or this may be // an incremental update. This code handles both cases. - const tree = try self.getAstTree(root_scope); + const tree = try self.getAstTree(container_scope); const decls = tree.root_node.decls(); try self.work_queue.ensureUnusedCapacity(decls.len); - try root_scope.decls.ensureCapacity(self.gpa, decls.len); + try container_scope.decls.ensureCapacity(self.gpa, decls.len); // Keep track of the decls that we expect to see in this file so that // we know which ones have been deleted. var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa); defer deleted_decls.deinit(); - try deleted_decls.ensureCapacity(root_scope.decls.items.len); - for (root_scope.decls.items) |file_decl| { + try deleted_decls.ensureCapacity(container_scope.decls.items.len); + for (container_scope.decls.items) |file_decl| { deleted_decls.putAssumeCapacityNoClobber(file_decl, {}); } @@ -1773,7 +1810,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void { const name_loc = tree.token_locs[name_tok]; const name = tree.tokenSliceLoc(name_loc); - const name_hash = root_scope.fullyQualifiedNameHash(name); + const name_hash = container_scope.fullyQualifiedNameHash(name); const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl)); if (self.decl_table.get(name_hash)) |decl| { // Update the AST Node index of the decl, even if its contents are unchanged, it may @@ -1801,8 +1838,8 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void { } } } else { - const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash); - root_scope.decls.appendAssumeCapacity(new_decl); + const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash); + container_scope.decls.appendAssumeCapacity(new_decl); if (fn_proto.getExternExportInlineToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); @@ -1812,7 +1849,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void { } else if (src_decl.castTag(.VarDecl)) |var_decl| { const name_loc = tree.token_locs[var_decl.name_token]; const name = tree.tokenSliceLoc(name_loc); - const name_hash = root_scope.fullyQualifiedNameHash(name); + const name_hash = container_scope.fullyQualifiedNameHash(name); const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl)); if (self.decl_table.get(name_hash)) |decl| { // Update the AST Node index of the decl, even if its contents are unchanged, it may @@ -1828,8 +1865,8 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void { decl.contents_hash = contents_hash; } } else { - const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash); - root_scope.decls.appendAssumeCapacity(new_decl); + const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash); + container_scope.decls.appendAssumeCapacity(new_decl); if (var_decl.getExternExportToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); @@ -1841,11 +1878,11 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void { const name = try std.fmt.allocPrint(self.gpa, "__comptime_{}", .{name_index}); defer self.gpa.free(name); - const name_hash = root_scope.fullyQualifiedNameHash(name); + const name_hash = container_scope.fullyQualifiedNameHash(name); const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl)); - const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash); - root_scope.decls.appendAssumeCapacity(new_decl); + const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash); + container_scope.decls.appendAssumeCapacity(new_decl); self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); } else if (src_decl.castTag(.ContainerField)) |container_field| { log.err("TODO: analyze container field", .{}); @@ -3124,6 +3161,7 @@ fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Err self.failed_files.putAssumeCapacityNoClobber(scope, err_msg); }, .file => unreachable, + .container => unreachable, } return error.AnalysisFail; } diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index 6f08c7a689..bad1f59b88 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -436,8 +436,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try branch_stack.append(.{}); const src_data: struct {lbrace_src: usize, rbrace_src: usize, source: []const u8} = blk: { - if (module_fn.owner_decl.scope.cast(Module.Scope.File)) |scope_file| { - const tree = scope_file.contents.tree; + if (module_fn.owner_decl.scope.cast(Module.Scope.Container)) |container_scope| { + const tree = container_scope.file_scope.contents.tree; const fn_proto = tree.root_node.decls()[module_fn.owner_decl.src_index].castTag(.FnProto).?; const block = fn_proto.getBodyNode().?.castTag(.Block).?; const lbrace_src = tree.token_locs[block.lbrace].start; diff --git a/src-self-hosted/link/Elf.zig b/src-self-hosted/link/Elf.zig index 69f1260d20..451160630a 100644 --- a/src-self-hosted/link/Elf.zig +++ b/src-self-hosted/link/Elf.zig @@ -1656,8 +1656,8 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { try dbg_line_buffer.ensureCapacity(26); const line_off: u28 = blk: { - if (decl.scope.cast(Module.Scope.File)) |scope_file| { - const tree = scope_file.contents.tree; + if (decl.scope.cast(Module.Scope.Container)) |container_scope| { + const tree = container_scope.file_scope.contents.tree; const file_ast_decls = tree.root_node.decls(); // TODO Look into improving the performance here by adding a token-index-to-line // lookup table. Currently this involves scanning over the source code for newlines. @@ -2157,8 +2157,8 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec const tracy = trace(@src()); defer tracy.end(); - const scope_file = decl.scope.cast(Module.Scope.File).?; - const tree = scope_file.contents.tree; + const container_scope = decl.scope.cast(Module.Scope.Container).?; + const tree = container_scope.file_scope.contents.tree; const file_ast_decls = tree.root_node.decls(); // TODO Look into improving the performance here by adding a token-index-to-line // lookup table. Currently this involves scanning over the source code for newlines. From e8a2aecd2f3ed13d7b9fb74248d455752de19840 Mon Sep 17 00:00:00 2001 From: Isaac Freund Date: Thu, 3 Sep 2020 15:08:37 +0200 Subject: [PATCH 24/56] std: fix linux uid_t, use uid_t/gid_t in std.os - correct uid_t from i32 to u32 on linux - define uid_t and gid_t for OSes missing definitions - use uid_t/gid_t instead of plain u32s throughout std.os --- lib/std/child_process.zig | 4 +-- lib/std/os.zig | 8 ++--- lib/std/os/bits/darwin.zig | 8 +++-- lib/std/os/bits/dragonfly.zig | 13 ++++++-- lib/std/os/bits/freebsd.zig | 8 +++-- lib/std/os/bits/linux.zig | 8 ++--- lib/std/os/bits/linux/x86_64.zig | 5 +-- lib/std/os/linux.zig | 52 ++++++++++++++++---------------- lib/std/process.zig | 8 ++--- 9 files changed, 65 insertions(+), 49 deletions(-) diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index 287fc3e7cd..c64fefbc63 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -44,10 +44,10 @@ pub const ChildProcess = struct { stderr_behavior: StdIo, /// Set to change the user id when spawning the child process. - uid: if (builtin.os.tag == .windows) void else ?u32, + uid: if (builtin.os.tag == .windows or builtin.os.tag == .wasi) void else ?os.uid_t, /// Set to change the group id when spawning the child process. - gid: if (builtin.os.tag == .windows) void else ?u32, + gid: if (builtin.os.tag == .windows or builtin.os.tag == .wasi) void else ?os.gid_t, /// Set to change the current working directory when spawning the child process. cwd: ?[]const u8, diff --git a/lib/std/os.zig b/lib/std/os.zig index e8431c386b..3574468a38 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -2518,7 +2518,7 @@ pub const SetIdError = error{ PermissionDenied, } || UnexpectedError; -pub fn setuid(uid: u32) SetIdError!void { +pub fn setuid(uid: uid_t) SetIdError!void { switch (errno(system.setuid(uid))) { 0 => return, EAGAIN => return error.ResourceLimitReached, @@ -2528,7 +2528,7 @@ pub fn setuid(uid: u32) SetIdError!void { } } -pub fn setreuid(ruid: u32, euid: u32) SetIdError!void { +pub fn setreuid(ruid: uid_t, euid: uid_t) SetIdError!void { switch (errno(system.setreuid(ruid, euid))) { 0 => return, EAGAIN => return error.ResourceLimitReached, @@ -2538,7 +2538,7 @@ pub fn setreuid(ruid: u32, euid: u32) SetIdError!void { } } -pub fn setgid(gid: u32) SetIdError!void { +pub fn setgid(gid: gid_t) SetIdError!void { switch (errno(system.setgid(gid))) { 0 => return, EAGAIN => return error.ResourceLimitReached, @@ -2548,7 +2548,7 @@ pub fn setgid(gid: u32) SetIdError!void { } } -pub fn setregid(rgid: u32, egid: u32) SetIdError!void { +pub fn setregid(rgid: gid_t, egid: gid_t) SetIdError!void { switch (errno(system.setregid(rgid, egid))) { 0 => return, EAGAIN => return error.ResourceLimitReached, diff --git a/lib/std/os/bits/darwin.zig b/lib/std/os/bits/darwin.zig index 375127f278..ce73d2a6dc 100644 --- a/lib/std/os/bits/darwin.zig +++ b/lib/std/os/bits/darwin.zig @@ -7,9 +7,13 @@ const std = @import("../../std.zig"); const assert = std.debug.assert; const maxInt = std.math.maxInt; +// See: https://opensource.apple.com/source/xnu/xnu-6153.141.1/bsd/sys/_types.h.auto.html +// TODO: audit mode_t/pid_t, should likely be u16/i32 pub const fd_t = c_int; pub const pid_t = c_int; pub const mode_t = c_uint; +pub const uid_t = u32; +pub const gid_t = u32; pub const in_port_t = u16; pub const sa_family_t = u8; @@ -79,8 +83,8 @@ pub const Stat = extern struct { mode: u16, nlink: u16, ino: ino_t, - uid: u32, - gid: u32, + uid: uid_t, + gid: gid_t, rdev: i32, atimesec: isize, atimensec: isize, diff --git a/lib/std/os/bits/dragonfly.zig b/lib/std/os/bits/dragonfly.zig index 8b6d6be212..1412aa5c41 100644 --- a/lib/std/os/bits/dragonfly.zig +++ b/lib/std/os/bits/dragonfly.zig @@ -9,10 +9,17 @@ const maxInt = std.math.maxInt; pub fn S_ISCHR(m: u32) bool { return m & S_IFMT == S_IFCHR; } + +// See: +// - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/include/unistd.h +// - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/sys/sys/types.h +// TODO: mode_t should probably be changed to a u16, audit pid_t/off_t as well pub const fd_t = c_int; pub const pid_t = c_int; pub const off_t = c_long; pub const mode_t = c_uint; +pub const uid_t = u32; +pub const gid_t = u32; pub const ENOTSUP = EOPNOTSUPP; pub const EWOULDBLOCK = EAGAIN; @@ -151,8 +158,8 @@ pub const Stat = extern struct { dev: c_uint, mode: c_ushort, padding1: u16, - uid: c_uint, - gid: c_uint, + uid: uid_t, + gid: gid_t, rdev: c_uint, atim: timespec, mtim: timespec, @@ -511,7 +518,7 @@ pub const siginfo_t = extern struct { si_errno: c_int, si_code: c_int, si_pid: c_int, - si_uid: c_uint, + si_uid: uid_t, si_status: c_int, si_addr: ?*c_void, si_value: union_sigval, diff --git a/lib/std/os/bits/freebsd.zig b/lib/std/os/bits/freebsd.zig index 22edf4b9d1..32936f7515 100644 --- a/lib/std/os/bits/freebsd.zig +++ b/lib/std/os/bits/freebsd.zig @@ -6,8 +6,12 @@ const std = @import("../../std.zig"); const maxInt = std.math.maxInt; +// See https://svnweb.freebsd.org/base/head/sys/sys/_types.h?view=co +// TODO: audit pid_t/mode_t. They should likely be i32 and u16, respectively pub const fd_t = c_int; pub const pid_t = c_int; +pub const uid_t = u32; +pub const gid_t = u32; pub const mode_t = c_uint; pub const socklen_t = u32; @@ -128,8 +132,8 @@ pub const Stat = extern struct { mode: u16, __pad0: u16, - uid: u32, - gid: u32, + uid: uid_t, + gid: gid_t, __pad1: u32, rdev: u64, diff --git a/lib/std/os/bits/linux.zig b/lib/std/os/bits/linux.zig index 1327eaa330..1e12a278f3 100644 --- a/lib/std/os/bits/linux.zig +++ b/lib/std/os/bits/linux.zig @@ -29,7 +29,7 @@ const is_mips = builtin.arch.isMIPS(); pub const pid_t = i32; pub const fd_t = i32; -pub const uid_t = i32; +pub const uid_t = u32; pub const gid_t = u32; pub const clock_t = isize; @@ -853,7 +853,7 @@ pub const signalfd_siginfo = extern struct { errno: i32, code: i32, pid: u32, - uid: u32, + uid: uid_t, fd: i32, tid: u32, band: u32, @@ -1491,10 +1491,10 @@ pub const Statx = extern struct { nlink: u32, /// User ID of owner - uid: u32, + uid: uid_t, /// Group ID of owner - gid: u32, + gid: gid_t, /// File type and mode mode: u16, diff --git a/lib/std/os/bits/linux/x86_64.zig b/lib/std/os/bits/linux/x86_64.zig index 0800feeddf..0f01c40813 100644 --- a/lib/std/os/bits/linux/x86_64.zig +++ b/lib/std/os/bits/linux/x86_64.zig @@ -7,6 +7,7 @@ const std = @import("../../../std.zig"); const pid_t = linux.pid_t; const uid_t = linux.uid_t; +const gid_t = linux.gid_t; const clock_t = linux.clock_t; const stack_t = linux.stack_t; const sigset_t = linux.sigset_t; @@ -523,8 +524,8 @@ pub const Stat = extern struct { nlink: usize, mode: u32, - uid: u32, - gid: u32, + uid: uid_t, + gid: gid_t, __pad0: u32, rdev: u64, size: off_t, diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 13094b3a3a..5e2a554018 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -655,7 +655,7 @@ pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize { return syscall2(.nanosleep, @ptrToInt(req), @ptrToInt(rem)); } -pub fn setuid(uid: u32) usize { +pub fn setuid(uid: uid_t) usize { if (@hasField(SYS, "setuid32")) { return syscall1(.setuid32, uid); } else { @@ -663,7 +663,7 @@ pub fn setuid(uid: u32) usize { } } -pub fn setgid(gid: u32) usize { +pub fn setgid(gid: gid_t) usize { if (@hasField(SYS, "setgid32")) { return syscall1(.setgid32, gid); } else { @@ -671,7 +671,7 @@ pub fn setgid(gid: u32) usize { } } -pub fn setreuid(ruid: u32, euid: u32) usize { +pub fn setreuid(ruid: uid_t, euid: uid_t) usize { if (@hasField(SYS, "setreuid32")) { return syscall2(.setreuid32, ruid, euid); } else { @@ -679,7 +679,7 @@ pub fn setreuid(ruid: u32, euid: u32) usize { } } -pub fn setregid(rgid: u32, egid: u32) usize { +pub fn setregid(rgid: gid_t, egid: gid_t) usize { if (@hasField(SYS, "setregid32")) { return syscall2(.setregid32, rgid, egid); } else { @@ -687,47 +687,47 @@ pub fn setregid(rgid: u32, egid: u32) usize { } } -pub fn getuid() u32 { +pub fn getuid() uid_t { if (@hasField(SYS, "getuid32")) { - return @as(u32, syscall0(.getuid32)); + return @as(uid_t, syscall0(.getuid32)); } else { - return @as(u32, syscall0(.getuid)); + return @as(uid_t, syscall0(.getuid)); } } -pub fn getgid() u32 { +pub fn getgid() gid_t { if (@hasField(SYS, "getgid32")) { - return @as(u32, syscall0(.getgid32)); + return @as(gid_t, syscall0(.getgid32)); } else { - return @as(u32, syscall0(.getgid)); + return @as(gid_t, syscall0(.getgid)); } } -pub fn geteuid() u32 { +pub fn geteuid() uid_t { if (@hasField(SYS, "geteuid32")) { - return @as(u32, syscall0(.geteuid32)); + return @as(uid_t, syscall0(.geteuid32)); } else { - return @as(u32, syscall0(.geteuid)); + return @as(uid_t, syscall0(.geteuid)); } } -pub fn getegid() u32 { +pub fn getegid() gid_t { if (@hasField(SYS, "getegid32")) { - return @as(u32, syscall0(.getegid32)); + return @as(gid_t, syscall0(.getegid32)); } else { - return @as(u32, syscall0(.getegid)); + return @as(gid_t, syscall0(.getegid)); } } -pub fn seteuid(euid: u32) usize { - return setreuid(std.math.maxInt(u32), euid); +pub fn seteuid(euid: uid_t) usize { + return setresuid(std.math.maxInt(uid_t), euid); } -pub fn setegid(egid: u32) usize { - return setregid(std.math.maxInt(u32), egid); +pub fn setegid(egid: gid_t) usize { + return setregid(std.math.maxInt(gid_t), egid); } -pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize { +pub fn getresuid(ruid: *uid_t, euid: *uid_t, suid: *uid_t) usize { if (@hasField(SYS, "getresuid32")) { return syscall3(.getresuid32, @ptrToInt(ruid), @ptrToInt(euid), @ptrToInt(suid)); } else { @@ -735,7 +735,7 @@ pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize { } } -pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize { +pub fn getresgid(rgid: *gid_t, egid: *gid_t, sgid: *gid_t) usize { if (@hasField(SYS, "getresgid32")) { return syscall3(.getresgid32, @ptrToInt(rgid), @ptrToInt(egid), @ptrToInt(sgid)); } else { @@ -743,7 +743,7 @@ pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize { } } -pub fn setresuid(ruid: u32, euid: u32, suid: u32) usize { +pub fn setresuid(ruid: uid_t, euid: uid_t, suid: uid_t) usize { if (@hasField(SYS, "setresuid32")) { return syscall3(.setresuid32, ruid, euid, suid); } else { @@ -751,7 +751,7 @@ pub fn setresuid(ruid: u32, euid: u32, suid: u32) usize { } } -pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize { +pub fn setresgid(rgid: gid_t, egid: gid_t, sgid: gid_t) usize { if (@hasField(SYS, "setresgid32")) { return syscall3(.setresgid32, rgid, egid, sgid); } else { @@ -759,7 +759,7 @@ pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize { } } -pub fn getgroups(size: usize, list: *u32) usize { +pub fn getgroups(size: usize, list: *gid_t) usize { if (@hasField(SYS, "getgroups32")) { return syscall2(.getgroups32, size, @ptrToInt(list)); } else { @@ -767,7 +767,7 @@ pub fn getgroups(size: usize, list: *u32) usize { } } -pub fn setgroups(size: usize, list: *const u32) usize { +pub fn setgroups(size: usize, list: *const gid_t) usize { if (@hasField(SYS, "setgroups32")) { return syscall2(.setgroups32, size, @ptrToInt(list)); } else { diff --git a/lib/std/process.zig b/lib/std/process.zig index 69befa2fc8..9cb571714c 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -578,8 +578,8 @@ fn testWindowsCmdLine(input_cmd_line: [*]const u8, expected_args: []const []cons } pub const UserInfo = struct { - uid: u32, - gid: u32, + uid: os.uid_t, + gid: os.gid_t, }; /// POSIX function which gets a uid from username. @@ -607,8 +607,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo { var buf: [std.mem.page_size]u8 = undefined; var name_index: usize = 0; var state = State.Start; - var uid: u32 = 0; - var gid: u32 = 0; + var uid: os.uid_t = 0; + var gid: os.gid_t = 0; while (true) { const amt_read = try reader.read(buf[0..]); From 01a365f1b008fc1546f99c339dbae99521c169cd Mon Sep 17 00:00:00 2001 From: Isaac Freund Date: Thu, 3 Sep 2020 15:16:26 +0200 Subject: [PATCH 25/56] std: ensure seteuid/setegid do not change saved id --- lib/std/os/linux.zig | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 5e2a554018..3fc8006d06 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -720,11 +720,25 @@ pub fn getegid() gid_t { } pub fn seteuid(euid: uid_t) usize { - return setresuid(std.math.maxInt(uid_t), euid); + // We use setresuid here instead of setreuid to ensure that the saved uid + // is not changed. This is what musl and recent glibc versions do as well. + // + // The setresuid(2) man page says that if -1 is passed the corresponding + // id will not be changed. Since uid_t is unsigned, this wraps around to the + // max value in C. + comptime assert(@typeInfo(uid_t) == .Int and !@typeInfo(uid_t).Int.is_signed); + return setresuid(std.math.maxInt(uid_t), euid, std.math.maxInt(uid_t)); } pub fn setegid(egid: gid_t) usize { - return setregid(std.math.maxInt(gid_t), egid); + // We use setresgid here instead of setregid to ensure that the saved uid + // is not changed. This is what musl and recent glibc versions do as well. + // + // The setresgid(2) man page says that if -1 is passed the corresponding + // id will not be changed. Since gid_t is unsigned, this wraps around to the + // max value in C. + comptime assert(@typeInfo(uid_t) == .Int and !@typeInfo(uid_t).Int.is_signed); + return setresgid(std.math.maxInt(gid_t), egid, std.math.maxInt(gid_t)); } pub fn getresuid(ruid: *uid_t, euid: *uid_t, suid: *uid_t) usize { From d0d6647fdbfbe1a5764c2624e46eee35052d0da6 Mon Sep 17 00:00:00 2001 From: Isaac Freund Date: Thu, 3 Sep 2020 15:22:43 +0200 Subject: [PATCH 26/56] std: add seteuid/setegid to std.os Currently these are only implemented for linux. --- lib/std/os.zig | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/lib/std/os.zig b/lib/std/os.zig index 3574468a38..8b923989e6 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -2512,11 +2512,12 @@ pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) Read } } -pub const SetIdError = error{ - ResourceLimitReached, +pub const SetEidError = error{ InvalidUserId, PermissionDenied, -} || UnexpectedError; +}; + +pub const SetIdError = error{ResourceLimitReached} || SetEidError || UnexpectedError; pub fn setuid(uid: uid_t) SetIdError!void { switch (errno(system.setuid(uid))) { @@ -2528,6 +2529,15 @@ pub fn setuid(uid: uid_t) SetIdError!void { } } +pub fn seteuid(uid: uid_t) SetEidError!void { + switch (errno(system.seteuid(uid))) { + 0 => return, + EINVAL => return error.InvalidUserId, + EPERM => return error.PermissionDenied, + else => |err| return unexpectedErrno(err), + } +} + pub fn setreuid(ruid: uid_t, euid: uid_t) SetIdError!void { switch (errno(system.setreuid(ruid, euid))) { 0 => return, @@ -2548,6 +2558,15 @@ pub fn setgid(gid: gid_t) SetIdError!void { } } +pub fn setegid(uid: uid_t) SetEidError!void { + switch (errno(system.setegid(uid))) { + 0 => return, + EINVAL => return error.InvalidUserId, + EPERM => return error.PermissionDenied, + else => |err| return unexpectedErrno(err), + } +} + pub fn setregid(rgid: gid_t, egid: gid_t) SetIdError!void { switch (errno(system.setregid(rgid, egid))) { 0 => return, From 4eeeda0f52e6f7a6da53c11930cfd3cb714e4df6 Mon Sep 17 00:00:00 2001 From: Vexu Date: Thu, 3 Sep 2020 16:49:20 +0300 Subject: [PATCH 27/56] remove deprecated fields from types --- src/analyze.cpp | 2 +- src/ir.cpp | 160 +----------------------------------------------- 2 files changed, 2 insertions(+), 160 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index acdbf3e933..b1d362f6e9 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1810,7 +1810,7 @@ Error type_allowed_in_extern(CodeGen *g, ZigType *type_entry, bool *result) { ZigType *get_auto_err_set_type(CodeGen *g, ZigFn *fn_entry) { ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet); buf_resize(&err_set_type->name, 0); - buf_appendf(&err_set_type->name, "@TypeOf(%s).ReturnType.ErrorSet", buf_ptr(&fn_entry->symbol_name)); + buf_appendf(&err_set_type->name, "@typeInfo(@typeInfo(@TypeOf(%s)).Fn.return_type.?).ErrorUnion.error_set", buf_ptr(&fn_entry->symbol_name)); err_set_type->data.error_set.err_count = 0; err_set_type->data.error_set.errors = nullptr; err_set_type->data.error_set.infer_fn = fn_entry; diff --git a/src/ir.cpp b/src/ir.cpp index 9d5814ab6f..cdca38379d 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -22835,167 +22835,9 @@ static IrInstGen *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstSrcFiel bool ptr_is_volatile = false; return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, const_val, err_set_type, ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else if (child_type->id == ZigTypeIdInt) { - if (buf_eql_str(field_name, "bit_count")) { - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_unsigned_negative(ira->codegen, ira->codegen->builtin_types.entry_num_lit_int, - child_type->data.integral.bit_count, false), - ira->codegen->builtin_types.entry_num_lit_int, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else if (buf_eql_str(field_name, "is_signed")) { - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_bool(ira->codegen, child_type->data.integral.is_signed), - ira->codegen->builtin_types.entry_bool, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else { - ir_add_error(ira, &field_ptr_instruction->base.base, - buf_sprintf("type '%s' has no member called '%s'", - buf_ptr(&child_type->name), buf_ptr(field_name))); - return ira->codegen->invalid_inst_gen; - } - } else if (child_type->id == ZigTypeIdFloat) { - if (buf_eql_str(field_name, "bit_count")) { - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_unsigned_negative(ira->codegen, ira->codegen->builtin_types.entry_num_lit_int, - child_type->data.floating.bit_count, false), - ira->codegen->builtin_types.entry_num_lit_int, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else { - ir_add_error(ira, &field_ptr_instruction->base.base, - buf_sprintf("type '%s' has no member called '%s'", - buf_ptr(&child_type->name), buf_ptr(field_name))); - return ira->codegen->invalid_inst_gen; - } - } else if (child_type->id == ZigTypeIdPointer) { - if (buf_eql_str(field_name, "Child")) { - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_type(ira->codegen, child_type->data.pointer.child_type), - ira->codegen->builtin_types.entry_type, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else if (buf_eql_str(field_name, "alignment")) { - bool ptr_is_const = true; - bool ptr_is_volatile = false; - if ((err = type_resolve(ira->codegen, child_type->data.pointer.child_type, - ResolveStatusAlignmentKnown))) - { - return ira->codegen->invalid_inst_gen; - } - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_unsigned_negative(ira->codegen, ira->codegen->builtin_types.entry_num_lit_int, - get_ptr_align(ira->codegen, child_type), false), - ira->codegen->builtin_types.entry_num_lit_int, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else { - ir_add_error(ira, &field_ptr_instruction->base.base, - buf_sprintf("type '%s' has no member called '%s'", - buf_ptr(&child_type->name), buf_ptr(field_name))); - return ira->codegen->invalid_inst_gen; - } - } else if (child_type->id == ZigTypeIdArray) { - if (buf_eql_str(field_name, "Child")) { - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_type(ira->codegen, child_type->data.array.child_type), - ira->codegen->builtin_types.entry_type, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else if (buf_eql_str(field_name, "len")) { - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_unsigned_negative(ira->codegen, ira->codegen->builtin_types.entry_num_lit_int, - child_type->data.array.len, false), - ira->codegen->builtin_types.entry_num_lit_int, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else { - ir_add_error(ira, &field_ptr_instruction->base.base, - buf_sprintf("type '%s' has no member called '%s'", - buf_ptr(&child_type->name), buf_ptr(field_name))); - return ira->codegen->invalid_inst_gen; - } - } else if (child_type->id == ZigTypeIdErrorUnion) { - if (buf_eql_str(field_name, "Payload")) { - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_type(ira->codegen, child_type->data.error_union.payload_type), - ira->codegen->builtin_types.entry_type, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else if (buf_eql_str(field_name, "ErrorSet")) { - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_type(ira->codegen, child_type->data.error_union.err_set_type), - ira->codegen->builtin_types.entry_type, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else { - ir_add_error(ira, &field_ptr_instruction->base.base, - buf_sprintf("type '%s' has no member called '%s'", - buf_ptr(&child_type->name), buf_ptr(field_name))); - return ira->codegen->invalid_inst_gen; - } - } else if (child_type->id == ZigTypeIdOptional) { - if (buf_eql_str(field_name, "Child")) { - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_type(ira->codegen, child_type->data.maybe.child_type), - ira->codegen->builtin_types.entry_type, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else { - ir_add_error(ira, &field_ptr_instruction->base.base, - buf_sprintf("type '%s' has no member called '%s'", - buf_ptr(&child_type->name), buf_ptr(field_name))); - return ira->codegen->invalid_inst_gen; - } - } else if (child_type->id == ZigTypeIdFn) { - if (buf_eql_str(field_name, "ReturnType")) { - if (child_type->data.fn.fn_type_id.return_type == nullptr) { - // Return type can only ever be null, if the function is generic - assert(child_type->data.fn.is_generic); - - ir_add_error(ira, &field_ptr_instruction->base.base, - buf_sprintf("ReturnType has not been resolved because '%s' is generic", buf_ptr(&child_type->name))); - return ira->codegen->invalid_inst_gen; - } - - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_type(ira->codegen, child_type->data.fn.fn_type_id.return_type), - ira->codegen->builtin_types.entry_type, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else if (buf_eql_str(field_name, "is_var_args")) { - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_bool(ira->codegen, child_type->data.fn.fn_type_id.is_var_args), - ira->codegen->builtin_types.entry_bool, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else if (buf_eql_str(field_name, "arg_count")) { - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, - create_const_usize(ira->codegen, child_type->data.fn.fn_type_id.param_count), - ira->codegen->builtin_types.entry_usize, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); - } else { - ir_add_error(ira, &field_ptr_instruction->base.base, - buf_sprintf("type '%s' has no member called '%s'", - buf_ptr(&child_type->name), buf_ptr(field_name))); - return ira->codegen->invalid_inst_gen; - } } else { ir_add_error(ira, &field_ptr_instruction->base.base, - buf_sprintf("type '%s' does not support field access", buf_ptr(&child_type->name))); + buf_sprintf("type '%s' does not support field access", buf_ptr(&container_type->name))); return ira->codegen->invalid_inst_gen; } } else if (field_ptr_instruction->initializing) { From 1df0f3ac24f090e8c58cd4cd6e752110cc5262b8 Mon Sep 17 00:00:00 2001 From: Vexu Date: Thu, 3 Sep 2020 18:09:55 +0300 Subject: [PATCH 28/56] update uses of deprecated type field access --- lib/std/child_process.zig | 4 +- lib/std/debug/leb128.zig | 47 ++++++------ lib/std/fmt.zig | 21 ++--- lib/std/fmt/parse_float.zig | 2 +- lib/std/hash/auto_hash.zig | 2 +- lib/std/heap.zig | 2 +- lib/std/io/reader.zig | 10 +-- lib/std/io/serialization.zig | 6 +- lib/std/io/writer.zig | 10 +-- lib/std/math.zig | 63 +++++++-------- lib/std/math/big.zig | 11 +-- lib/std/math/big/int.zig | 85 +++++++++++---------- lib/std/math/big/int_test.zig | 6 +- lib/std/math/big/rational.zig | 16 ++-- lib/std/math/cos.zig | 2 +- lib/std/math/pow.zig | 4 +- lib/std/math/sin.zig | 2 +- lib/std/math/sqrt.zig | 6 +- lib/std/math/tan.zig | 2 +- lib/std/mem.zig | 42 +++++----- lib/std/mem/Allocator.zig | 6 +- lib/std/os.zig | 2 +- lib/std/os/bits/linux.zig | 2 +- lib/std/os/linux.zig | 8 +- lib/std/os/windows/ws2_32.zig | 2 +- lib/std/rand.zig | 57 ++++++++------ lib/std/special/build_runner.zig | 2 +- lib/std/special/c.zig | 5 +- lib/std/special/compiler_rt/addXf3.zig | 18 +++-- lib/std/special/compiler_rt/aulldiv.zig | 4 +- lib/std/special/compiler_rt/aullrem.zig | 4 +- lib/std/special/compiler_rt/compareXf2.zig | 7 +- lib/std/special/compiler_rt/divdf3.zig | 9 +-- lib/std/special/compiler_rt/divsf3.zig | 7 +- lib/std/special/compiler_rt/divtf3.zig | 5 +- lib/std/special/compiler_rt/divti3.zig | 4 +- lib/std/special/compiler_rt/fixint.zig | 9 ++- lib/std/special/compiler_rt/fixuint.zig | 6 +- lib/std/special/compiler_rt/floatXisf.zig | 9 ++- lib/std/special/compiler_rt/floatsiXf.zig | 7 +- lib/std/special/compiler_rt/floatundisf.zig | 2 +- lib/std/special/compiler_rt/floatunditf.zig | 2 +- lib/std/special/compiler_rt/floatunsitf.zig | 2 +- lib/std/special/compiler_rt/int.zig | 2 +- lib/std/special/compiler_rt/modti3.zig | 4 +- lib/std/special/compiler_rt/mulXf3.zig | 10 +-- lib/std/special/compiler_rt/mulodi4.zig | 2 +- lib/std/special/compiler_rt/muloti4.zig | 6 +- lib/std/special/compiler_rt/negXf2.zig | 3 +- lib/std/special/compiler_rt/shift.zig | 25 +++--- lib/std/special/compiler_rt/truncXfYf2.zig | 4 +- lib/std/special/compiler_rt/udivmod.zig | 70 ++++++++--------- lib/std/start.zig | 2 +- lib/std/thread.zig | 6 +- lib/std/zig.zig | 2 +- test/stage1/behavior/align.zig | 2 +- test/stage1/behavior/array.zig | 10 --- test/stage1/behavior/async_fn.zig | 6 +- test/stage1/behavior/bit_shifting.zig | 12 +-- test/stage1/behavior/bugs/5487.zig | 4 +- test/stage1/behavior/error.zig | 4 +- test/stage1/behavior/misc.zig | 10 --- test/stage1/behavior/reflection.zig | 22 ++---- 63 files changed, 362 insertions(+), 366 deletions(-) diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index 287fc3e7cd..b5ed2c72c6 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -275,9 +275,7 @@ pub const ChildProcess = struct { } fn handleWaitResult(self: *ChildProcess, status: u32) void { - // TODO https://github.com/ziglang/zig/issues/3190 - var term = self.cleanupAfterWait(status); - self.term = term; + self.term = self.cleanupAfterWait(status); } fn cleanupStreams(self: *ChildProcess) void { diff --git a/lib/std/debug/leb128.zig b/lib/std/debug/leb128.zig index eca777c1cf..2b96d39131 100644 --- a/lib/std/debug/leb128.zig +++ b/lib/std/debug/leb128.zig @@ -9,10 +9,10 @@ const testing = std.testing; /// Read a single unsigned LEB128 value from the given reader as type T, /// or error.Overflow if the value cannot fit. pub fn readULEB128(comptime T: type, reader: anytype) !T { - const U = if (T.bit_count < 8) u8 else T; + const U = if (@typeInfo(T).Int.bits < 8) u8 else T; const ShiftT = std.math.Log2Int(U); - const max_group = (U.bit_count + 6) / 7; + const max_group = (@typeInfo(U).Int.bits + 6) / 7; var value = @as(U, 0); var group = @as(ShiftT, 0); @@ -40,7 +40,7 @@ pub fn readULEB128(comptime T: type, reader: anytype) !T { /// Write a single unsigned integer as unsigned LEB128 to the given writer. pub fn writeULEB128(writer: anytype, uint_value: anytype) !void { const T = @TypeOf(uint_value); - const U = if (T.bit_count < 8) u8 else T; + const U = if (@typeInfo(T).Int.bits < 8) u8 else T; var value = @intCast(U, uint_value); while (true) { @@ -68,7 +68,7 @@ pub fn readULEB128Mem(comptime T: type, ptr: *[]const u8) !T { /// returning the number of bytes written. pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize { const T = @TypeOf(uint_value); - const max_group = (T.bit_count + 6) / 7; + const max_group = (@typeInfo(T).Int.bits + 6) / 7; var buf = std.io.fixedBufferStream(ptr); try writeULEB128(buf.writer(), uint_value); return buf.pos; @@ -77,11 +77,11 @@ pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize { /// Read a single signed LEB128 value from the given reader as type T, /// or error.Overflow if the value cannot fit. pub fn readILEB128(comptime T: type, reader: anytype) !T { - const S = if (T.bit_count < 8) i8 else T; - const U = std.meta.Int(false, S.bit_count); + const S = if (@typeInfo(T).Int.bits < 8) i8 else T; + const U = std.meta.Int(false, @typeInfo(S).Int.bits); const ShiftU = std.math.Log2Int(U); - const max_group = (U.bit_count + 6) / 7; + const max_group = (@typeInfo(U).Int.bits + 6) / 7; var value = @as(U, 0); var group = @as(ShiftU, 0); @@ -97,7 +97,7 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T { if (@bitCast(S, temp) >= 0) return error.Overflow; // and all the overflowed bits are 1 - const remaining_shift = @intCast(u3, U.bit_count - @as(u16, shift)); + const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift)); const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift; if (remaining_bits != -1) return error.Overflow; } @@ -127,8 +127,8 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T { /// Write a single signed integer as signed LEB128 to the given writer. pub fn writeILEB128(writer: anytype, int_value: anytype) !void { const T = @TypeOf(int_value); - const S = if (T.bit_count < 8) i8 else T; - const U = std.meta.Int(false, S.bit_count); + const S = if (@typeInfo(T).Int.bits < 8) i8 else T; + const U = std.meta.Int(false, @typeInfo(S).Int.bits); var value = @intCast(S, int_value); @@ -173,7 +173,7 @@ pub fn writeILEB128Mem(ptr: []u8, int_value: anytype) !usize { /// different value without shifting all the following code. pub fn writeUnsignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(false, l * 7)) void { const T = @TypeOf(int); - const U = if (T.bit_count < 8) u8 else T; + const U = if (@typeInfo(T).Int.bits < 8) u8 else T; var value = @intCast(U, int); comptime var i = 0; @@ -346,28 +346,29 @@ test "deserialize unsigned LEB128" { fn test_write_leb128(value: anytype) !void { const T = @TypeOf(value); + const t_signed = @typeInfo(T).Int.is_signed; - const writeStream = if (T.is_signed) writeILEB128 else writeULEB128; - const writeMem = if (T.is_signed) writeILEB128Mem else writeULEB128Mem; - const readStream = if (T.is_signed) readILEB128 else readULEB128; - const readMem = if (T.is_signed) readILEB128Mem else readULEB128Mem; + const writeStream = if (t_signed) writeILEB128 else writeULEB128; + const writeMem = if (t_signed) writeILEB128Mem else writeULEB128Mem; + const readStream = if (t_signed) readILEB128 else readULEB128; + const readMem = if (t_signed) readILEB128Mem else readULEB128Mem; // decode to a larger bit size too, to ensure sign extension // is working as expected - const larger_type_bits = ((T.bit_count + 8) / 8) * 8; - const B = std.meta.Int(T.is_signed, larger_type_bits); + const larger_type_bits = ((@typeInfo(T).Int.bits + 8) / 8) * 8; + const B = std.meta.Int(t_signed, larger_type_bits); const bytes_needed = bn: { - const S = std.meta.Int(T.is_signed, @sizeOf(T) * 8); - if (T.bit_count <= 7) break :bn @as(u16, 1); + const S = std.meta.Int(t_signed, @sizeOf(T) * 8); + if (@typeInfo(T).Int.bits <= 7) break :bn @as(u16, 1); const unused_bits = if (value < 0) @clz(T, ~value) else @clz(T, value); - const used_bits: u16 = (T.bit_count - unused_bits) + @boolToInt(T.is_signed); + const used_bits: u16 = (@typeInfo(T).Int.bits - unused_bits) + @boolToInt(t_signed); if (used_bits <= 7) break :bn @as(u16, 1); break :bn ((used_bits + 6) / 7); }; - const max_groups = if (T.bit_count == 0) 1 else (T.bit_count + 6) / 7; + const max_groups = if (@typeInfo(T).Int.bits == 0) 1 else (@typeInfo(T).Int.bits + 6) / 7; var buf: [max_groups]u8 = undefined; var fbs = std.io.fixedBufferStream(&buf); @@ -414,7 +415,7 @@ test "serialize unsigned LEB128" { const T = std.meta.Int(false, t); const min = std.math.minInt(T); const max = std.math.maxInt(T); - var i = @as(std.meta.Int(false, T.bit_count + 1), min); + var i = @as(std.meta.Int(false, @typeInfo(T).Int.bits + 1), min); while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i)); } @@ -432,7 +433,7 @@ test "serialize signed LEB128" { const T = std.meta.Int(true, t); const min = std.math.minInt(T); const max = std.math.maxInt(T); - var i = @as(std.meta.Int(true, T.bit_count + 1), min); + var i = @as(std.meta.Int(true, @typeInfo(T).Int.bits + 1), min); while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i)); } diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 3067a55759..5b18c8731b 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -91,7 +91,7 @@ pub fn format( if (@typeInfo(@TypeOf(args)) != .Struct) { @compileError("Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args))); } - if (args.len > ArgSetType.bit_count) { + if (args.len > @typeInfo(ArgSetType).Int.bits) { @compileError("32 arguments max are supported per format call"); } @@ -325,7 +325,7 @@ pub fn formatType( max_depth: usize, ) @TypeOf(writer).Error!void { if (comptime std.mem.eql(u8, fmt, "*")) { - try writer.writeAll(@typeName(@TypeOf(value).Child)); + try writer.writeAll(@typeName(@typeInfo(@TypeOf(value)).Pointer.child)); try writer.writeAll("@"); try formatInt(@ptrToInt(value), 16, false, FormatOptions{}, writer); return; @@ -430,12 +430,12 @@ pub fn formatType( if (info.child == u8) { return formatText(value, fmt, options, writer); } - return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) }); + return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) }); }, .Enum, .Union, .Struct => { return formatType(value.*, fmt, options, writer, max_depth); }, - else => return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) }), + else => return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) }), }, .Many, .C => { if (ptr_info.sentinel) |sentinel| { @@ -446,7 +446,7 @@ pub fn formatType( return formatText(mem.span(value), fmt, options, writer); } } - return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) }); + return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) }); }, .Slice => { if (fmt.len > 0 and ((fmt[0] == 'x') or (fmt[0] == 'X'))) { @@ -536,7 +536,7 @@ pub fn formatIntValue( radix = 10; uppercase = false; } else if (comptime std.mem.eql(u8, fmt, "c")) { - if (@TypeOf(int_value).bit_count <= 8) { + if (@typeInfo(@TypeOf(int_value)).Int.bits <= 8) { return formatAsciiChar(@as(u8, int_value), options, writer); } else { @compileError("Cannot print integer that is larger than 8 bits as a ascii"); @@ -945,7 +945,7 @@ pub fn formatInt( } else value; - if (@TypeOf(int_value).is_signed) { + if (@typeInfo(@TypeOf(int_value)).Int.is_signed) { return formatIntSigned(int_value, base, uppercase, options, writer); } else { return formatIntUnsigned(int_value, base, uppercase, options, writer); @@ -987,9 +987,10 @@ fn formatIntUnsigned( writer: anytype, ) !void { assert(base >= 2); - var buf: [math.max(@TypeOf(value).bit_count, 1)]u8 = undefined; - const min_int_bits = comptime math.max(@TypeOf(value).bit_count, @TypeOf(base).bit_count); - const MinInt = std.meta.Int(@TypeOf(value).is_signed, min_int_bits); + const value_info = @typeInfo(@TypeOf(value)).Int; + var buf: [math.max(value_info.bits, 1)]u8 = undefined; + const min_int_bits = comptime math.max(value_info.bits, @typeInfo(@TypeOf(base)).Int.bits); + const MinInt = std.meta.Int(value_info.is_signed, min_int_bits); var a: MinInt = value; var index: usize = buf.len; diff --git a/lib/std/fmt/parse_float.zig b/lib/std/fmt/parse_float.zig index 69557714f6..e4d3c10d92 100644 --- a/lib/std/fmt/parse_float.zig +++ b/lib/std/fmt/parse_float.zig @@ -372,7 +372,7 @@ test "fmt.parseFloat" { const epsilon = 1e-7; inline for ([_]type{ f16, f32, f64, f128 }) |T| { - const Z = std.meta.Int(false, T.bit_count); + const Z = std.meta.Int(false, @typeInfo(T).Float.bits); testing.expectError(error.InvalidCharacter, parseFloat(T, "")); testing.expectError(error.InvalidCharacter, parseFloat(T, " 1")); diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig index 2905a6af13..5877c77b5d 100644 --- a/lib/std/hash/auto_hash.zig +++ b/lib/std/hash/auto_hash.zig @@ -113,7 +113,7 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void { .Array => hashArray(hasher, key, strat), .Vector => |info| { - if (info.child.bit_count % 8 == 0) { + if (std.meta.bitCount(info.child) % 8 == 0) { // If there's no unused bits in the child type, we can just hash // this as an array of bytes. hasher.update(mem.asBytes(&key)); diff --git a/lib/std/heap.zig b/lib/std/heap.zig index d6977f2f9c..7aa9b8bb5f 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -952,7 +952,7 @@ pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator // very near usize? if (mem.page_size << 2 > maxInt(usize)) return; - const USizeShift = std.meta.Int(false, std.math.log2(usize.bit_count)); + const USizeShift = std.meta.Int(false, std.math.log2(std.meta.bitCount(usize))); const large_align = @as(u29, mem.page_size << 2); var align_mask: usize = undefined; diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig index 2ab799046a..4090f5a476 100644 --- a/lib/std/io/reader.zig +++ b/lib/std/io/reader.zig @@ -198,28 +198,28 @@ pub fn Reader( /// Reads a native-endian integer pub fn readIntNative(self: Self, comptime T: type) !T { - const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8); + const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8); return mem.readIntNative(T, &bytes); } /// Reads a foreign-endian integer pub fn readIntForeign(self: Self, comptime T: type) !T { - const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8); + const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8); return mem.readIntForeign(T, &bytes); } pub fn readIntLittle(self: Self, comptime T: type) !T { - const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8); + const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8); return mem.readIntLittle(T, &bytes); } pub fn readIntBig(self: Self, comptime T: type) !T { - const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8); + const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8); return mem.readIntBig(T, &bytes); } pub fn readInt(self: Self, comptime T: type, endian: builtin.Endian) !T { - const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8); + const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8); return mem.readInt(T, &bytes, endian); } diff --git a/lib/std/io/serialization.zig b/lib/std/io/serialization.zig index 4f8c149b47..925c929cee 100644 --- a/lib/std/io/serialization.zig +++ b/lib/std/io/serialization.zig @@ -60,7 +60,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing, const U = std.meta.Int(false, t_bit_count); const Log2U = math.Log2Int(U); - const int_size = (U.bit_count + 7) / 8; + const int_size = (t_bit_count + 7) / 8; if (packing == .Bit) { const result = try self.in_stream.readBitsNoEof(U, t_bit_count); @@ -73,7 +73,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing, if (int_size == 1) { if (t_bit_count == 8) return @bitCast(T, buffer[0]); - const PossiblySignedByte = std.meta.Int(T.is_signed, 8); + const PossiblySignedByte = std.meta.Int(@typeInfo(T).Int.is_signed, 8); return @truncate(T, @bitCast(PossiblySignedByte, buffer[0])); } @@ -247,7 +247,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co const U = std.meta.Int(false, t_bit_count); const Log2U = math.Log2Int(U); - const int_size = (U.bit_count + 7) / 8; + const int_size = (t_bit_count + 7) / 8; const u_value = @bitCast(U, value); diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig index 39729ef0a2..770cd5f0fa 100644 --- a/lib/std/io/writer.zig +++ b/lib/std/io/writer.zig @@ -53,7 +53,7 @@ pub fn Writer( /// Write a native-endian integer. /// TODO audit non-power-of-two int sizes pub fn writeIntNative(self: Self, comptime T: type, value: T) Error!void { - var bytes: [(T.bit_count + 7) / 8]u8 = undefined; + var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined; mem.writeIntNative(T, &bytes, value); return self.writeAll(&bytes); } @@ -61,28 +61,28 @@ pub fn Writer( /// Write a foreign-endian integer. /// TODO audit non-power-of-two int sizes pub fn writeIntForeign(self: Self, comptime T: type, value: T) Error!void { - var bytes: [(T.bit_count + 7) / 8]u8 = undefined; + var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined; mem.writeIntForeign(T, &bytes, value); return self.writeAll(&bytes); } /// TODO audit non-power-of-two int sizes pub fn writeIntLittle(self: Self, comptime T: type, value: T) Error!void { - var bytes: [(T.bit_count + 7) / 8]u8 = undefined; + var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined; mem.writeIntLittle(T, &bytes, value); return self.writeAll(&bytes); } /// TODO audit non-power-of-two int sizes pub fn writeIntBig(self: Self, comptime T: type, value: T) Error!void { - var bytes: [(T.bit_count + 7) / 8]u8 = undefined; + var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined; mem.writeIntBig(T, &bytes, value); return self.writeAll(&bytes); } /// TODO audit non-power-of-two int sizes pub fn writeInt(self: Self, comptime T: type, value: T, endian: builtin.Endian) Error!void { - var bytes: [(T.bit_count + 7) / 8]u8 = undefined; + var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined; mem.writeInt(T, &bytes, value, endian); return self.writeAll(&bytes); } diff --git a/lib/std/math.zig b/lib/std/math.zig index de9f5e349d..f05c967b2d 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -195,7 +195,7 @@ test "" { pub fn floatMantissaBits(comptime T: type) comptime_int { assert(@typeInfo(T) == .Float); - return switch (T.bit_count) { + return switch (@typeInfo(T).Float.bits) { 16 => 10, 32 => 23, 64 => 52, @@ -208,7 +208,7 @@ pub fn floatMantissaBits(comptime T: type) comptime_int { pub fn floatExponentBits(comptime T: type) comptime_int { assert(@typeInfo(T) == .Float); - return switch (T.bit_count) { + return switch (@typeInfo(T).Float.bits) { 16 => 5, 32 => 8, 64 => 11, @@ -347,9 +347,9 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T { /// A negative shift amount results in a right shift. pub fn shl(comptime T: type, a: T, shift_amt: anytype) T { const abs_shift_amt = absCast(shift_amt); - const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt); + const casted_shift_amt = if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0 else @intCast(Log2Int(T), abs_shift_amt); - if (@TypeOf(shift_amt) == comptime_int or @TypeOf(shift_amt).is_signed) { + if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).Int.is_signed) { if (shift_amt < 0) { return a >> casted_shift_amt; } @@ -373,9 +373,9 @@ test "math.shl" { /// A negative shift amount results in a left shift. pub fn shr(comptime T: type, a: T, shift_amt: anytype) T { const abs_shift_amt = absCast(shift_amt); - const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt); + const casted_shift_amt = if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0 else @intCast(Log2Int(T), abs_shift_amt); - if (@TypeOf(shift_amt) == comptime_int or @TypeOf(shift_amt).is_signed) { + if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).Int.is_signed) { if (shift_amt >= 0) { return a >> casted_shift_amt; } else { @@ -400,11 +400,11 @@ test "math.shr" { /// Rotates right. Only unsigned values can be rotated. /// Negative shift values results in shift modulo the bit count. pub fn rotr(comptime T: type, x: T, r: anytype) T { - if (T.is_signed) { + if (@typeInfo(T).Int.is_signed) { @compileError("cannot rotate signed integer"); } else { - const ar = @mod(r, T.bit_count); - return shr(T, x, ar) | shl(T, x, T.bit_count - ar); + const ar = @mod(r, @typeInfo(T).Int.bits); + return shr(T, x, ar) | shl(T, x, @typeInfo(T).Int.bits - ar); } } @@ -419,11 +419,11 @@ test "math.rotr" { /// Rotates left. Only unsigned values can be rotated. /// Negative shift values results in shift modulo the bit count. pub fn rotl(comptime T: type, x: T, r: anytype) T { - if (T.is_signed) { + if (@typeInfo(T).Int.is_signed) { @compileError("cannot rotate signed integer"); } else { - const ar = @mod(r, T.bit_count); - return shl(T, x, ar) | shr(T, x, T.bit_count - ar); + const ar = @mod(r, @typeInfo(T).Int.bits); + return shl(T, x, ar) | shr(T, x, @typeInfo(T).Int.bits - ar); } } @@ -438,7 +438,7 @@ test "math.rotl" { pub fn Log2Int(comptime T: type) type { // comptime ceil log2 comptime var count = 0; - comptime var s = T.bit_count - 1; + comptime var s = @typeInfo(T).Int.bits - 1; inline while (s != 0) : (s >>= 1) { count += 1; } @@ -524,7 +524,7 @@ fn testOverflow() void { pub fn absInt(x: anytype) !@TypeOf(x) { const T = @TypeOf(x); comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt - comptime assert(T.is_signed); // must pass a signed integer to absInt + comptime assert(@typeInfo(T).Int.is_signed); // must pass a signed integer to absInt if (x == minInt(@TypeOf(x))) { return error.Overflow; @@ -557,7 +557,7 @@ fn testAbsFloat() void { pub fn divTrunc(comptime T: type, numerator: T, denominator: T) !T { @setRuntimeSafety(false); if (denominator == 0) return error.DivisionByZero; - if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow; + if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow; return @divTrunc(numerator, denominator); } @@ -578,7 +578,7 @@ fn testDivTrunc() void { pub fn divFloor(comptime T: type, numerator: T, denominator: T) !T { @setRuntimeSafety(false); if (denominator == 0) return error.DivisionByZero; - if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow; + if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow; return @divFloor(numerator, denominator); } @@ -652,7 +652,7 @@ fn testDivCeil() void { pub fn divExact(comptime T: type, numerator: T, denominator: T) !T { @setRuntimeSafety(false); if (denominator == 0) return error.DivisionByZero; - if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow; + if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow; const result = @divTrunc(numerator, denominator); if (result * denominator != numerator) return error.UnexpectedRemainder; return result; @@ -757,10 +757,10 @@ test "math.absCast" { /// Returns the negation of the integer parameter. /// Result is a signed integer. -pub fn negateCast(x: anytype) !std.meta.Int(true, @TypeOf(x).bit_count) { - if (@TypeOf(x).is_signed) return negate(x); +pub fn negateCast(x: anytype) !std.meta.Int(true, std.meta.bitCount(@TypeOf(x))) { + if (@typeInfo(@TypeOf(x)).Int.is_signed) return negate(x); - const int = std.meta.Int(true, @TypeOf(x).bit_count); + const int = std.meta.Int(true, std.meta.bitCount(@TypeOf(x))); if (x > -minInt(int)) return error.Overflow; if (x == -minInt(int)) return minInt(int); @@ -823,7 +823,7 @@ pub fn floorPowerOfTwo(comptime T: type, value: T) T { var x = value; comptime var i = 1; - inline while (T.bit_count > i) : (i *= 2) { + inline while (@typeInfo(T).Int.bits > i) : (i *= 2) { x |= (x >> i); } @@ -847,13 +847,13 @@ fn testFloorPowerOfTwo() void { /// Returns the next power of two (if the value is not already a power of two). /// Only unsigned integers can be used. Zero is not an allowed input. /// Result is a type with 1 more bit than the input type. -pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(T.is_signed, T.bit_count + 1) { +pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits + 1) { comptime assert(@typeInfo(T) == .Int); - comptime assert(!T.is_signed); + comptime assert(!@typeInfo(T).Int.is_signed); assert(value != 0); - comptime const PromotedType = std.meta.Int(T.is_signed, T.bit_count + 1); + comptime const PromotedType = std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits + 1); comptime const shiftType = std.math.Log2Int(PromotedType); - return @as(PromotedType, 1) << @intCast(shiftType, T.bit_count - @clz(T, value - 1)); + return @as(PromotedType, 1) << @intCast(shiftType, @typeInfo(T).Int.bits - @clz(T, value - 1)); } /// Returns the next power of two (if the value is not already a power of two). @@ -861,9 +861,10 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(T.is_signe /// If the value doesn't fit, returns an error. pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) { comptime assert(@typeInfo(T) == .Int); - comptime assert(!T.is_signed); - comptime const PromotedType = std.meta.Int(T.is_signed, T.bit_count + 1); - comptime const overflowBit = @as(PromotedType, 1) << T.bit_count; + const info = @typeInfo(T).Int; + comptime assert(!info.is_signed); + comptime const PromotedType = std.meta.Int(info.is_signed, info.bits + 1); + comptime const overflowBit = @as(PromotedType, 1) << info.bits; var x = ceilPowerOfTwoPromote(T, value); if (overflowBit & x != 0) { return error.Overflow; @@ -911,7 +912,7 @@ fn testCeilPowerOfTwo() !void { pub fn log2_int(comptime T: type, x: T) Log2Int(T) { assert(x != 0); - return @intCast(Log2Int(T), T.bit_count - 1 - @clz(T, x)); + return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(T, x)); } pub fn log2_int_ceil(comptime T: type, x: T) Log2Int(T) { @@ -1008,8 +1009,8 @@ test "max value type" { testing.expect(x == 2147483647); } -pub fn mulWide(comptime T: type, a: T, b: T) std.meta.Int(T.is_signed, T.bit_count * 2) { - const ResultInt = std.meta.Int(T.is_signed, T.bit_count * 2); +pub fn mulWide(comptime T: type, a: T, b: T) std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits * 2) { + const ResultInt = std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits * 2); return @as(ResultInt, a) * @as(ResultInt, b); } diff --git a/lib/std/math/big.zig b/lib/std/math/big.zig index 6246a4fb8b..03257e35ea 100644 --- a/lib/std/math/big.zig +++ b/lib/std/math/big.zig @@ -9,14 +9,15 @@ const assert = std.debug.assert; pub const Rational = @import("big/rational.zig").Rational; pub const int = @import("big/int.zig"); pub const Limb = usize; -pub const DoubleLimb = std.meta.IntType(false, 2 * Limb.bit_count); -pub const SignedDoubleLimb = std.meta.IntType(true, DoubleLimb.bit_count); +const limb_info = @typeInfo(Limb).Int; +pub const DoubleLimb = std.meta.IntType(false, 2 * limb_info.bits); +pub const SignedDoubleLimb = std.meta.IntType(true, 2 * limb_info.bits); pub const Log2Limb = std.math.Log2Int(Limb); comptime { - assert(std.math.floorPowerOfTwo(usize, Limb.bit_count) == Limb.bit_count); - assert(Limb.bit_count <= 64); // u128 set is unsupported - assert(Limb.is_signed == false); + assert(std.math.floorPowerOfTwo(usize, limb_info.bits) == limb_info.bits); + assert(limb_info.bits <= 64); // u128 set is unsupported + assert(limb_info.is_signed == false); } test "" { diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index 28da1064c9..963fc21f3b 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -6,6 +6,7 @@ const std = @import("../../std.zig"); const math = std.math; const Limb = std.math.big.Limb; +const limb_bits = @typeInfo(Limb).Int.bits; const DoubleLimb = std.math.big.DoubleLimb; const SignedDoubleLimb = std.math.big.SignedDoubleLimb; const Log2Limb = std.math.big.Log2Limb; @@ -28,7 +29,7 @@ pub fn calcLimbLen(scalar: anytype) usize { }, .ComptimeInt => { const w_value = if (scalar < 0) -scalar else scalar; - return @divFloor(math.log2(w_value), Limb.bit_count) + 1; + return @divFloor(math.log2(w_value), limb_bits) + 1; }, else => @compileError("parameter must be a primitive integer type"), } @@ -54,7 +55,7 @@ pub fn calcSetStringLimbsBufferLen(base: u8, string_len: usize) usize { } pub fn calcSetStringLimbCount(base: u8, string_len: usize) usize { - return (string_len + (Limb.bit_count / base - 1)) / (Limb.bit_count / base); + return (string_len + (limb_bits / base - 1)) / (limb_bits / base); } /// a + b * c + *carry, sets carry to the overflow bits @@ -68,7 +69,7 @@ pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb { // r2 = b * c const bc = @as(DoubleLimb, math.mulWide(Limb, b, c)); const r2 = @truncate(Limb, bc); - const c2 = @truncate(Limb, bc >> Limb.bit_count); + const c2 = @truncate(Limb, bc >> limb_bits); // r1 = r1 + r2 const c3: Limb = @boolToInt(@addWithOverflow(Limb, r1, r2, &r1)); @@ -181,7 +182,7 @@ pub const Mutable = struct { switch (@typeInfo(T)) { .Int => |info| { - const UT = if (T.is_signed) std.meta.Int(false, T.bit_count - 1) else T; + const UT = if (info.is_signed) std.meta.Int(false, info.bits - 1) else T; const needed_limbs = @sizeOf(UT) / @sizeOf(Limb); assert(needed_limbs <= self.limbs.len); // value too big @@ -190,7 +191,7 @@ pub const Mutable = struct { var w_value: UT = if (value < 0) @intCast(UT, -value) else @intCast(UT, value); - if (info.bits <= Limb.bit_count) { + if (info.bits <= limb_bits) { self.limbs[0] = @as(Limb, w_value); self.len += 1; } else { @@ -200,15 +201,15 @@ pub const Mutable = struct { self.len += 1; // TODO: shift == 64 at compile-time fails. Fails on u128 limbs. - w_value >>= Limb.bit_count / 2; - w_value >>= Limb.bit_count / 2; + w_value >>= limb_bits / 2; + w_value >>= limb_bits / 2; } } }, .ComptimeInt => { comptime var w_value = if (value < 0) -value else value; - const req_limbs = @divFloor(math.log2(w_value), Limb.bit_count) + 1; + const req_limbs = @divFloor(math.log2(w_value), limb_bits) + 1; assert(req_limbs <= self.limbs.len); // value too big self.len = req_limbs; @@ -217,14 +218,14 @@ pub const Mutable = struct { if (w_value <= maxInt(Limb)) { self.limbs[0] = w_value; } else { - const mask = (1 << Limb.bit_count) - 1; + const mask = (1 << limb_bits) - 1; comptime var i = 0; inline while (w_value != 0) : (i += 1) { self.limbs[i] = w_value & mask; - w_value >>= Limb.bit_count / 2; - w_value >>= Limb.bit_count / 2; + w_value >>= limb_bits / 2; + w_value >>= limb_bits / 2; } } }, @@ -506,7 +507,7 @@ pub const Mutable = struct { /// `a.limbs.len + (shift / (@sizeOf(Limb) * 8))`. pub fn shiftLeft(r: *Mutable, a: Const, shift: usize) void { llshl(r.limbs[0..], a.limbs[0..a.limbs.len], shift); - r.normalize(a.limbs.len + (shift / Limb.bit_count) + 1); + r.normalize(a.limbs.len + (shift / limb_bits) + 1); r.positive = a.positive; } @@ -516,7 +517,7 @@ pub const Mutable = struct { /// Asserts there is enough memory to fit the result. The upper bound Limb count is /// `a.limbs.len - (shift / (@sizeOf(Limb) * 8))`. pub fn shiftRight(r: *Mutable, a: Const, shift: usize) void { - if (a.limbs.len <= shift / Limb.bit_count) { + if (a.limbs.len <= shift / limb_bits) { r.len = 1; r.positive = true; r.limbs[0] = 0; @@ -524,7 +525,7 @@ pub const Mutable = struct { } const r_len = llshr(r.limbs[0..], a.limbs[0..a.limbs.len], shift); - r.len = a.limbs.len - (shift / Limb.bit_count); + r.len = a.limbs.len - (shift / limb_bits); r.positive = a.positive; } @@ -772,7 +773,7 @@ pub const Mutable = struct { } if (ab_zero_limb_count != 0) { - rem.shiftLeft(rem.toConst(), ab_zero_limb_count * Limb.bit_count); + rem.shiftLeft(rem.toConst(), ab_zero_limb_count * limb_bits); } } @@ -803,10 +804,10 @@ pub const Mutable = struct { }; tmp.limbs[0] = 0; - // Normalize so y > Limb.bit_count / 2 (i.e. leading bit is set) and even + // Normalize so y > limb_bits / 2 (i.e. leading bit is set) and even var norm_shift = @clz(Limb, y.limbs[y.len - 1]); if (norm_shift == 0 and y.toConst().isOdd()) { - norm_shift = Limb.bit_count; + norm_shift = limb_bits; } x.shiftLeft(x.toConst(), norm_shift); y.shiftLeft(y.toConst(), norm_shift); @@ -820,7 +821,7 @@ pub const Mutable = struct { mem.set(Limb, q.limbs[0..q.len], 0); // 2. - tmp.shiftLeft(y.toConst(), Limb.bit_count * (n - t)); + tmp.shiftLeft(y.toConst(), limb_bits * (n - t)); while (x.toConst().order(tmp.toConst()) != .lt) { q.limbs[n - t] += 1; x.sub(x.toConst(), tmp.toConst()); @@ -833,7 +834,7 @@ pub const Mutable = struct { if (x.limbs[i] == y.limbs[t]) { q.limbs[i - t - 1] = maxInt(Limb); } else { - const num = (@as(DoubleLimb, x.limbs[i]) << Limb.bit_count) | @as(DoubleLimb, x.limbs[i - 1]); + const num = (@as(DoubleLimb, x.limbs[i]) << limb_bits) | @as(DoubleLimb, x.limbs[i - 1]); const z = @intCast(Limb, num / @as(DoubleLimb, y.limbs[t])); q.limbs[i - t - 1] = if (z > maxInt(Limb)) maxInt(Limb) else @as(Limb, z); } @@ -862,11 +863,11 @@ pub const Mutable = struct { // 3.3 tmp.set(q.limbs[i - t - 1]); tmp.mul(tmp.toConst(), y.toConst(), mul_limb_buf, allocator); - tmp.shiftLeft(tmp.toConst(), Limb.bit_count * (i - t - 1)); + tmp.shiftLeft(tmp.toConst(), limb_bits * (i - t - 1)); x.sub(x.toConst(), tmp.toConst()); if (!x.positive) { - tmp.shiftLeft(y.toConst(), Limb.bit_count * (i - t - 1)); + tmp.shiftLeft(y.toConst(), limb_bits * (i - t - 1)); x.add(x.toConst(), tmp.toConst()); q.limbs[i - t - 1] -= 1; } @@ -949,7 +950,7 @@ pub const Const = struct { /// Returns the number of bits required to represent the absolute value of an integer. pub fn bitCountAbs(self: Const) usize { - return (self.limbs.len - 1) * Limb.bit_count + (Limb.bit_count - @clz(Limb, self.limbs[self.limbs.len - 1])); + return (self.limbs.len - 1) * limb_bits + (limb_bits - @clz(Limb, self.limbs[self.limbs.len - 1])); } /// Returns the number of bits required to represent the integer in twos-complement form. @@ -1019,10 +1020,10 @@ pub const Const = struct { /// Returns an error if self cannot be narrowed into the requested type without truncation. pub fn to(self: Const, comptime T: type) ConvertError!T { switch (@typeInfo(T)) { - .Int => { - const UT = std.meta.Int(false, T.bit_count); + .Int => |info| { + const UT = std.meta.Int(false, info.bits); - if (self.bitCountTwosComp() > T.bit_count) { + if (self.bitCountTwosComp() > info.bits) { return error.TargetTooSmall; } @@ -1033,12 +1034,12 @@ pub const Const = struct { } else { for (self.limbs[0..self.limbs.len]) |_, ri| { const limb = self.limbs[self.limbs.len - ri - 1]; - r <<= Limb.bit_count; + r <<= limb_bits; r |= limb; } } - if (!T.is_signed) { + if (!info.is_signed) { return if (self.positive) @intCast(T, r) else error.NegativeIntoUnsigned; } else { if (self.positive) { @@ -1149,7 +1150,7 @@ pub const Const = struct { outer: for (self.limbs[0..self.limbs.len]) |limb| { var shift: usize = 0; - while (shift < Limb.bit_count) : (shift += base_shift) { + while (shift < limb_bits) : (shift += base_shift) { const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & @as(Limb, base - 1)); const ch = std.fmt.digitToChar(r, uppercase); string[digits_len] = ch; @@ -1295,7 +1296,7 @@ pub const Const = struct { /// Memory is allocated as needed to ensure operations never overflow. The range /// is bounded only by available memory. pub const Managed = struct { - pub const sign_bit: usize = 1 << (usize.bit_count - 1); + pub const sign_bit: usize = 1 << (@typeInfo(usize).Int.bits - 1); /// Default number of limbs to allocate on creation of a `Managed`. pub const default_capacity = 4; @@ -1716,7 +1717,7 @@ pub const Managed = struct { /// r = a << shift, in other words, r = a * 2^shift pub fn shiftLeft(r: *Managed, a: Managed, shift: usize) !void { - try r.ensureCapacity(a.len() + (shift / Limb.bit_count) + 1); + try r.ensureCapacity(a.len() + (shift / limb_bits) + 1); var m = r.toMutable(); m.shiftLeft(a.toConst(), shift); r.setMetadata(m.positive, m.len); @@ -1724,13 +1725,13 @@ pub const Managed = struct { /// r = a >> shift pub fn shiftRight(r: *Managed, a: Managed, shift: usize) !void { - if (a.len() <= shift / Limb.bit_count) { + if (a.len() <= shift / limb_bits) { r.metadata = 1; r.limbs[0] = 0; return; } - try r.ensureCapacity(a.len() - (shift / Limb.bit_count)); + try r.ensureCapacity(a.len() - (shift / limb_bits)); var m = r.toMutable(); m.shiftRight(a.toConst(), shift); r.setMetadata(m.positive, m.len); @@ -2021,7 +2022,7 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void { rem.* = 0; for (a) |_, ri| { const i = a.len - ri - 1; - const pdiv = ((@as(DoubleLimb, rem.*) << Limb.bit_count) | a[i]); + const pdiv = ((@as(DoubleLimb, rem.*) << limb_bits) | a[i]); if (pdiv == 0) { quo[i] = 0; @@ -2042,10 +2043,10 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void { fn llshl(r: []Limb, a: []const Limb, shift: usize) void { @setRuntimeSafety(debug_safety); assert(a.len >= 1); - assert(r.len >= a.len + (shift / Limb.bit_count) + 1); + assert(r.len >= a.len + (shift / limb_bits) + 1); - const limb_shift = shift / Limb.bit_count + 1; - const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count); + const limb_shift = shift / limb_bits + 1; + const interior_limb_shift = @intCast(Log2Limb, shift % limb_bits); var carry: Limb = 0; var i: usize = 0; @@ -2057,7 +2058,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void { r[dst_i] = carry | @call(.{ .modifier = .always_inline }, math.shr, .{ Limb, src_digit, - Limb.bit_count - @intCast(Limb, interior_limb_shift), + limb_bits - @intCast(Limb, interior_limb_shift), }); carry = (src_digit << interior_limb_shift); } @@ -2069,10 +2070,10 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void { fn llshr(r: []Limb, a: []const Limb, shift: usize) void { @setRuntimeSafety(debug_safety); assert(a.len >= 1); - assert(r.len >= a.len - (shift / Limb.bit_count)); + assert(r.len >= a.len - (shift / limb_bits)); - const limb_shift = shift / Limb.bit_count; - const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count); + const limb_shift = shift / limb_bits; + const interior_limb_shift = @intCast(Log2Limb, shift % limb_bits); var carry: Limb = 0; var i: usize = 0; @@ -2085,7 +2086,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void { carry = @call(.{ .modifier = .always_inline }, math.shl, .{ Limb, src_digit, - Limb.bit_count - @intCast(Limb, interior_limb_shift), + limb_bits - @intCast(Limb, interior_limb_shift), }); } } @@ -2135,7 +2136,7 @@ fn fixedIntFromSignedDoubleLimb(A: SignedDoubleLimb, storage: []Limb) Mutable { const A_is_positive = A >= 0; const Au = @intCast(DoubleLimb, if (A < 0) -A else A); storage[0] = @truncate(Limb, Au); - storage[1] = @truncate(Limb, Au >> Limb.bit_count); + storage[1] = @truncate(Limb, Au >> limb_bits); return .{ .limbs = storage[0..2], .positive = A_is_positive, diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig index 5931767a82..9de93e94ac 100644 --- a/lib/std/math/big/int_test.zig +++ b/lib/std/math/big/int_test.zig @@ -23,13 +23,13 @@ test "big.int comptime_int set" { var a = try Managed.initSet(testing.allocator, s); defer a.deinit(); - const s_limb_count = 128 / Limb.bit_count; + const s_limb_count = 128 / @typeInfo(Limb).Int.bits; comptime var i: usize = 0; inline while (i < s_limb_count) : (i += 1) { const result = @as(Limb, s & maxInt(Limb)); - s >>= Limb.bit_count / 2; - s >>= Limb.bit_count / 2; + s >>= @typeInfo(Limb).Int.bits / 2; + s >>= @typeInfo(Limb).Int.bits / 2; testing.expect(a.limbs[i] == result); } } diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig index 5b3c105718..d75a7b599c 100644 --- a/lib/std/math/big/rational.zig +++ b/lib/std/math/big/rational.zig @@ -136,7 +136,7 @@ pub const Rational = struct { // Translated from golang.go/src/math/big/rat.go. debug.assert(@typeInfo(T) == .Float); - const UnsignedInt = std.meta.Int(false, T.bit_count); + const UnsignedInt = std.meta.Int(false, @typeInfo(T).Float.bits); const f_bits = @bitCast(UnsignedInt, f); const exponent_bits = math.floatExponentBits(T); @@ -194,8 +194,8 @@ pub const Rational = struct { // TODO: Indicate whether the result is not exact. debug.assert(@typeInfo(T) == .Float); - const fsize = T.bit_count; - const BitReprType = std.meta.Int(false, T.bit_count); + const fsize = @typeInfo(T).Float.bits; + const BitReprType = std.meta.Int(false, fsize); const msize = math.floatMantissaBits(T); const msize1 = msize + 1; @@ -475,16 +475,18 @@ pub const Rational = struct { fn extractLowBits(a: Int, comptime T: type) T { testing.expect(@typeInfo(T) == .Int); - if (T.bit_count <= Limb.bit_count) { + const t_bits = @typeInfo(T).Int.bits; + const limb_bits = @typeInfo(Limb).Int.bits; + if (t_bits <= limb_bits) { return @truncate(T, a.limbs[0]); } else { var r: T = 0; comptime var i: usize = 0; - // Remainder is always 0 since if T.bit_count >= Limb.bit_count -> Limb | T and both + // Remainder is always 0 since if t_bits >= limb_bits -> Limb | T and both // are powers of two. - inline while (i < T.bit_count / Limb.bit_count) : (i += 1) { - r |= math.shl(T, a.limbs[i], i * Limb.bit_count); + inline while (i < t_bits / limb_bits) : (i += 1) { + r |= math.shl(T, a.limbs[i], i * limb_bits); } return r; diff --git a/lib/std/math/cos.zig b/lib/std/math/cos.zig index 3d282c82e1..54d08d12ca 100644 --- a/lib/std/math/cos.zig +++ b/lib/std/math/cos.zig @@ -49,7 +49,7 @@ const pi4c = 2.69515142907905952645E-15; const m4pi = 1.273239544735162542821171882678754627704620361328125; fn cos_(comptime T: type, x_: T) T { - const I = std.meta.Int(true, T.bit_count); + const I = std.meta.Int(true, @typeInfo(T).Float.bits); var x = x_; if (math.isNan(x) or math.isInf(x)) { diff --git a/lib/std/math/pow.zig b/lib/std/math/pow.zig index 30b52acbda..66a371fc3e 100644 --- a/lib/std/math/pow.zig +++ b/lib/std/math/pow.zig @@ -128,7 +128,7 @@ pub fn pow(comptime T: type, x: T, y: T) T { if (yf != 0 and x < 0) { return math.nan(T); } - if (yi >= 1 << (T.bit_count - 1)) { + if (yi >= 1 << (@typeInfo(T).Float.bits - 1)) { return math.exp(y * math.ln(x)); } @@ -150,7 +150,7 @@ pub fn pow(comptime T: type, x: T, y: T) T { var xe = r2.exponent; var x1 = r2.significand; - var i = @floatToInt(std.meta.Int(true, T.bit_count), yi); + var i = @floatToInt(std.meta.Int(true, @typeInfo(T).Float.bits), yi); while (i != 0) : (i >>= 1) { const overflow_shift = math.floatExponentBits(T) + 1; if (xe < -(1 << overflow_shift) or (1 << overflow_shift) < xe) { diff --git a/lib/std/math/sin.zig b/lib/std/math/sin.zig index c7db4f8623..c4a330df5d 100644 --- a/lib/std/math/sin.zig +++ b/lib/std/math/sin.zig @@ -50,7 +50,7 @@ const pi4c = 2.69515142907905952645E-15; const m4pi = 1.273239544735162542821171882678754627704620361328125; fn sin_(comptime T: type, x_: T) T { - const I = std.meta.Int(true, T.bit_count); + const I = std.meta.Int(true, @typeInfo(T).Float.bits); var x = x_; if (x == 0 or math.isNan(x)) { diff --git a/lib/std/math/sqrt.zig b/lib/std/math/sqrt.zig index 34851ca647..1c0b15c3de 100644 --- a/lib/std/math/sqrt.zig +++ b/lib/std/math/sqrt.zig @@ -36,10 +36,10 @@ pub fn sqrt(x: anytype) Sqrt(@TypeOf(x)) { } } -fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, T.bit_count / 2) { +fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, @typeInfo(T).Int.bits / 2) { var op = value; var res: T = 0; - var one: T = 1 << (T.bit_count - 2); + var one: T = 1 << (@typeInfo(T).Int.bits - 2); // "one" starts at the highest power of four <= than the argument. while (one > op) { @@ -55,7 +55,7 @@ fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, T.bit_count / 2) { one >>= 2; } - const ResultType = std.meta.Int(false, T.bit_count / 2); + const ResultType = std.meta.Int(false, @typeInfo(T).Int.bits / 2); return @intCast(ResultType, res); } diff --git a/lib/std/math/tan.zig b/lib/std/math/tan.zig index 5e5a80e15d..358eb8a380 100644 --- a/lib/std/math/tan.zig +++ b/lib/std/math/tan.zig @@ -43,7 +43,7 @@ const pi4c = 2.69515142907905952645E-15; const m4pi = 1.273239544735162542821171882678754627704620361328125; fn tan_(comptime T: type, x_: T) T { - const I = std.meta.Int(true, T.bit_count); + const I = std.meta.Int(true, @typeInfo(T).Float.bits); var x = x_; if (x == 0 or math.isNan(x)) { diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 71190069a8..b10c318635 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -949,7 +949,7 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: builtin. /// This function cannot fail and cannot cause undefined behavior. /// Assumes the endianness of memory is native. This means the function can /// simply pointer cast memory. -pub fn readIntNative(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8) T { +pub fn readIntNative(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T { return @ptrCast(*align(1) const T, bytes).*; } @@ -957,7 +957,7 @@ pub fn readIntNative(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)] /// The bit count of T must be evenly divisible by 8. /// This function cannot fail and cannot cause undefined behavior. /// Assumes the endianness of memory is foreign, so it must byte-swap. -pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8) T { +pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T { return @byteSwap(T, readIntNative(T, bytes)); } @@ -971,18 +971,18 @@ pub const readIntBig = switch (builtin.endian) { .Big => readIntNative, }; -/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0 +/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0 /// and ignores extra bytes. /// The bit count of T must be evenly divisible by 8. /// Assumes the endianness of memory is native. This means the function can /// simply pointer cast memory. pub fn readIntSliceNative(comptime T: type, bytes: []const u8) T { - const n = @divExact(T.bit_count, 8); + const n = @divExact(@typeInfo(T).Int.bits, 8); assert(bytes.len >= n); return readIntNative(T, bytes[0..n]); } -/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0 +/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0 /// and ignores extra bytes. /// The bit count of T must be evenly divisible by 8. /// Assumes the endianness of memory is foreign, so it must byte-swap. @@ -1003,7 +1003,7 @@ pub const readIntSliceBig = switch (builtin.endian) { /// Reads an integer from memory with bit count specified by T. /// The bit count of T must be evenly divisible by 8. /// This function cannot fail and cannot cause undefined behavior. -pub fn readInt(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8, endian: builtin.Endian) T { +pub fn readInt(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8, endian: builtin.Endian) T { if (endian == builtin.endian) { return readIntNative(T, bytes); } else { @@ -1011,11 +1011,11 @@ pub fn readInt(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8, en } } -/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0 +/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0 /// and ignores extra bytes. /// The bit count of T must be evenly divisible by 8. pub fn readIntSlice(comptime T: type, bytes: []const u8, endian: builtin.Endian) T { - const n = @divExact(T.bit_count, 8); + const n = @divExact(@typeInfo(T).Int.bits, 8); assert(bytes.len >= n); return readInt(T, bytes[0..n], endian); } @@ -1060,7 +1060,7 @@ test "readIntBig and readIntLittle" { /// accepts any integer bit width. /// This function stores in native endian, which means it is implemented as a simple /// memory store. -pub fn writeIntNative(comptime T: type, buf: *[(T.bit_count + 7) / 8]u8, value: T) void { +pub fn writeIntNative(comptime T: type, buf: *[(@typeInfo(T).Int.bits + 7) / 8]u8, value: T) void { @ptrCast(*align(1) T, buf).* = value; } @@ -1068,7 +1068,7 @@ pub fn writeIntNative(comptime T: type, buf: *[(T.bit_count + 7) / 8]u8, value: /// This function always succeeds, has defined behavior for all inputs, but /// the integer bit width must be divisible by 8. /// This function stores in foreign endian, which means it does a @byteSwap first. -pub fn writeIntForeign(comptime T: type, buf: *[@divExact(T.bit_count, 8)]u8, value: T) void { +pub fn writeIntForeign(comptime T: type, buf: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T) void { writeIntNative(T, buf, @byteSwap(T, value)); } @@ -1085,7 +1085,7 @@ pub const writeIntBig = switch (builtin.endian) { /// Writes an integer to memory, storing it in twos-complement. /// This function always succeeds, has defined behavior for all inputs, but /// the integer bit width must be divisible by 8. -pub fn writeInt(comptime T: type, buffer: *[@divExact(T.bit_count, 8)]u8, value: T, endian: builtin.Endian) void { +pub fn writeInt(comptime T: type, buffer: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T, endian: builtin.Endian) void { if (endian == builtin.endian) { return writeIntNative(T, buffer, value); } else { @@ -1094,19 +1094,19 @@ pub fn writeInt(comptime T: type, buffer: *[@divExact(T.bit_count, 8)]u8, value: } /// Writes a twos-complement little-endian integer to memory. -/// Asserts that buf.len >= T.bit_count / 8. +/// Asserts that buf.len >= @typeInfo(T).Int.bits / 8. /// The bit count of T must be divisible by 8. /// Any extra bytes in buffer after writing the integer are set to zero. To /// avoid the branch to check for extra buffer bytes, use writeIntLittle /// instead. pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void { - assert(buffer.len >= @divExact(T.bit_count, 8)); + assert(buffer.len >= @divExact(@typeInfo(T).Int.bits, 8)); - if (T.bit_count == 0) + if (@typeInfo(T).Int.bits == 0) return set(u8, buffer, 0); // TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough - const uint = std.meta.Int(false, T.bit_count); + const uint = std.meta.Int(false, @typeInfo(T).Int.bits); var bits = @truncate(uint, value); for (buffer) |*b| { b.* = @truncate(u8, bits); @@ -1115,18 +1115,18 @@ pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void { } /// Writes a twos-complement big-endian integer to memory. -/// Asserts that buffer.len >= T.bit_count / 8. +/// Asserts that buffer.len >= @typeInfo(T).Int.bits / 8. /// The bit count of T must be divisible by 8. /// Any extra bytes in buffer before writing the integer are set to zero. To /// avoid the branch to check for extra buffer bytes, use writeIntBig instead. pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void { - assert(buffer.len >= @divExact(T.bit_count, 8)); + assert(buffer.len >= @divExact(@typeInfo(T).Int.bits, 8)); - if (T.bit_count == 0) + if (@typeInfo(T).Int.bits == 0) return set(u8, buffer, 0); // TODO I want to call writeIntBig here but comptime eval facilities aren't good enough - const uint = std.meta.Int(false, T.bit_count); + const uint = std.meta.Int(false, @typeInfo(T).Int.bits); var bits = @truncate(uint, value); var index: usize = buffer.len; while (index != 0) { @@ -1147,13 +1147,13 @@ pub const writeIntSliceForeign = switch (builtin.endian) { }; /// Writes a twos-complement integer to memory, with the specified endianness. -/// Asserts that buf.len >= T.bit_count / 8. +/// Asserts that buf.len >= @typeInfo(T).Int.bits / 8. /// The bit count of T must be evenly divisible by 8. /// Any extra bytes in buffer not part of the integer are set to zero, with /// respect to endianness. To avoid the branch to check for extra buffer bytes, /// use writeInt instead. pub fn writeIntSlice(comptime T: type, buffer: []u8, value: T, endian: builtin.Endian) void { - comptime assert(T.bit_count % 8 == 0); + comptime assert(@typeInfo(T).Int.bits % 8 == 0); return switch (endian) { .Little => writeIntSliceLittle(T, buffer, value), .Big => writeIntSliceBig(T, buffer, value), diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index bb59de2a7e..34f1ae9098 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -167,11 +167,11 @@ pub fn create(self: *Allocator, comptime T: type) Error!*T { /// `ptr` should be the return value of `create`, or otherwise /// have the same address and alignment property. pub fn destroy(self: *Allocator, ptr: anytype) void { - const T = @TypeOf(ptr).Child; + const info = @typeInfo(@TypeOf(ptr)).Pointer; + const T = info.child; if (@sizeOf(T) == 0) return; const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr)); - const ptr_align = @typeInfo(@TypeOf(ptr)).Pointer.alignment; - _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], ptr_align, 0, 0, @returnAddress()); + _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], info.alignment, 0, 0, @returnAddress()); } /// Allocates an array of `n` items of type `T` and sets all the diff --git a/lib/std/os.zig b/lib/std/os.zig index e8431c386b..81c059c220 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -4504,7 +4504,7 @@ pub fn res_mkquery( // Make a reasonably unpredictable id var ts: timespec = undefined; clock_gettime(CLOCK_REALTIME, &ts) catch {}; - const UInt = std.meta.Int(false, @TypeOf(ts.tv_nsec).bit_count); + const UInt = std.meta.Int(false, std.meta.bitCount(@TypeOf(ts.tv_nsec))); const unsec = @bitCast(UInt, ts.tv_nsec); const id = @truncate(u32, unsec + unsec / 65536); q[0] = @truncate(u8, id / 256); diff --git a/lib/std/os/bits/linux.zig b/lib/std/os/bits/linux.zig index 1327eaa330..eff1cc7c02 100644 --- a/lib/std/os/bits/linux.zig +++ b/lib/std/os/bits/linux.zig @@ -846,7 +846,7 @@ pub const SIG_ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize)); pub const SIG_DFL = @intToPtr(?Sigaction.sigaction_fn, 0); pub const SIG_IGN = @intToPtr(?Sigaction.sigaction_fn, 1); -pub const empty_sigset = [_]u32{0} ** sigset_t.len; +pub const empty_sigset = [_]u32{0} ** @typeInfo(sigset_t).Array.len; pub const signalfd_siginfo = extern struct { signo: u32, diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 13094b3a3a..134b117e85 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -815,17 +815,19 @@ pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigacti return 0; } +const usize_bits = @typeInfo(usize).Int.bits; + pub fn sigaddset(set: *sigset_t, sig: u6) void { const s = sig - 1; // shift in musl: s&8*sizeof *set->__bits-1 - const shift = @intCast(u5, s & (usize.bit_count - 1)); + const shift = @intCast(u5, s & (usize_bits - 1)); const val = @intCast(u32, 1) << shift; - (set.*)[@intCast(usize, s) / usize.bit_count] |= val; + (set.*)[@intCast(usize, s) / usize_bits] |= val; } pub fn sigismember(set: *const sigset_t, sig: u6) bool { const s = sig - 1; - return ((set.*)[@intCast(usize, s) / usize.bit_count] & (@intCast(usize, 1) << (s & (usize.bit_count - 1)))) != 0; + return ((set.*)[@intCast(usize, s) / usize_bits] & (@intCast(usize, 1) << (s & (usize_bits - 1)))) != 0; } pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize { diff --git a/lib/std/os/windows/ws2_32.zig b/lib/std/os/windows/ws2_32.zig index cfc212d15a..ac21b6ffc9 100644 --- a/lib/std/os/windows/ws2_32.zig +++ b/lib/std/os/windows/ws2_32.zig @@ -12,7 +12,7 @@ pub const SOCKET_ERROR = -1; pub const WSADESCRIPTION_LEN = 256; pub const WSASYS_STATUS_LEN = 128; -pub const WSADATA = if (usize.bit_count == u64.bit_count) +pub const WSADATA = if (@sizeOf(usize) == @sizeOf(u64)) extern struct { wVersion: WORD, wHighVersion: WORD, diff --git a/lib/std/rand.zig b/lib/std/rand.zig index 7988efffc9..7e05592869 100644 --- a/lib/std/rand.zig +++ b/lib/std/rand.zig @@ -51,8 +51,9 @@ pub const Random = struct { /// Returns a random int `i` such that `0 <= i <= maxInt(T)`. /// `i` is evenly distributed. pub fn int(r: *Random, comptime T: type) T { - const UnsignedT = std.meta.Int(false, T.bit_count); - const ByteAlignedT = std.meta.Int(false, @divTrunc(T.bit_count + 7, 8) * 8); + const bits = @typeInfo(T).Int.bits; + const UnsignedT = std.meta.Int(false, bits); + const ByteAlignedT = std.meta.Int(false, @divTrunc(bits + 7, 8) * 8); var rand_bytes: [@sizeOf(ByteAlignedT)]u8 = undefined; r.bytes(rand_bytes[0..]); @@ -68,10 +69,11 @@ pub const Random = struct { /// Constant-time implementation off `uintLessThan`. /// The results of this function may be biased. pub fn uintLessThanBiased(r: *Random, comptime T: type, less_than: T) T { - comptime assert(T.is_signed == false); - comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation! + comptime assert(@typeInfo(T).Int.is_signed == false); + const bits = @typeInfo(T).Int.bits; + comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation! assert(0 < less_than); - if (T.bit_count <= 32) { + if (bits <= 32) { return @intCast(T, limitRangeBiased(u32, r.int(u32), less_than)); } else { return @intCast(T, limitRangeBiased(u64, r.int(u64), less_than)); @@ -87,13 +89,15 @@ pub const Random = struct { /// this function is guaranteed to return. /// If you need deterministic runtime bounds, use `uintLessThanBiased`. pub fn uintLessThan(r: *Random, comptime T: type, less_than: T) T { - comptime assert(T.is_signed == false); - comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation! + comptime assert(@typeInfo(T).Int.is_signed == false); + const bits = @typeInfo(T).Int.bits; + comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation! assert(0 < less_than); // Small is typically u32 - const Small = std.meta.Int(false, @divTrunc(T.bit_count + 31, 32) * 32); + const small_bits = @divTrunc(bits + 31, 32) * 32; + const Small = std.meta.Int(false, small_bits); // Large is typically u64 - const Large = std.meta.Int(false, Small.bit_count * 2); + const Large = std.meta.Int(false, small_bits * 2); // adapted from: // http://www.pcg-random.org/posts/bounded-rands.html @@ -105,7 +109,7 @@ pub const Random = struct { // TODO: workaround for https://github.com/ziglang/zig/issues/1770 // should be: // var t: Small = -%less_than; - var t: Small = @bitCast(Small, -%@bitCast(std.meta.Int(true, Small.bit_count), @as(Small, less_than))); + var t: Small = @bitCast(Small, -%@bitCast(std.meta.Int(true, small_bits), @as(Small, less_than))); if (t >= less_than) { t -= less_than; @@ -119,13 +123,13 @@ pub const Random = struct { l = @truncate(Small, m); } } - return @intCast(T, m >> Small.bit_count); + return @intCast(T, m >> small_bits); } /// Constant-time implementation off `uintAtMost`. /// The results of this function may be biased. pub fn uintAtMostBiased(r: *Random, comptime T: type, at_most: T) T { - assert(T.is_signed == false); + assert(@typeInfo(T).Int.is_signed == false); if (at_most == maxInt(T)) { // have the full range return r.int(T); @@ -137,7 +141,7 @@ pub const Random = struct { /// See `uintLessThan`, which this function uses in most cases, /// for commentary on the runtime of this function. pub fn uintAtMost(r: *Random, comptime T: type, at_most: T) T { - assert(T.is_signed == false); + assert(@typeInfo(T).Int.is_signed == false); if (at_most == maxInt(T)) { // have the full range return r.int(T); @@ -149,9 +153,10 @@ pub const Random = struct { /// The results of this function may be biased. pub fn intRangeLessThanBiased(r: *Random, comptime T: type, at_least: T, less_than: T) T { assert(at_least < less_than); - if (T.is_signed) { + const info = @typeInfo(T).Int; + if (info.is_signed) { // Two's complement makes this math pretty easy. - const UnsignedT = std.meta.Int(false, T.bit_count); + const UnsignedT = std.meta.Int(false, info.bits); const lo = @bitCast(UnsignedT, at_least); const hi = @bitCast(UnsignedT, less_than); const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo); @@ -167,9 +172,10 @@ pub const Random = struct { /// for commentary on the runtime of this function. pub fn intRangeLessThan(r: *Random, comptime T: type, at_least: T, less_than: T) T { assert(at_least < less_than); - if (T.is_signed) { + const info = @typeInfo(T).Int; + if (info.is_signed) { // Two's complement makes this math pretty easy. - const UnsignedT = std.meta.Int(false, T.bit_count); + const UnsignedT = std.meta.Int(false, info.bits); const lo = @bitCast(UnsignedT, at_least); const hi = @bitCast(UnsignedT, less_than); const result = lo +% r.uintLessThan(UnsignedT, hi -% lo); @@ -184,9 +190,10 @@ pub const Random = struct { /// The results of this function may be biased. pub fn intRangeAtMostBiased(r: *Random, comptime T: type, at_least: T, at_most: T) T { assert(at_least <= at_most); - if (T.is_signed) { + const info = @typeInfo(T).Int; + if (info.is_signed) { // Two's complement makes this math pretty easy. - const UnsignedT = std.meta.Int(false, T.bit_count); + const UnsignedT = std.meta.Int(false, info.bits); const lo = @bitCast(UnsignedT, at_least); const hi = @bitCast(UnsignedT, at_most); const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo); @@ -202,9 +209,10 @@ pub const Random = struct { /// for commentary on the runtime of this function. pub fn intRangeAtMost(r: *Random, comptime T: type, at_least: T, at_most: T) T { assert(at_least <= at_most); - if (T.is_signed) { + const info = @typeInfo(T).Int; + if (info.is_signed) { // Two's complement makes this math pretty easy. - const UnsignedT = std.meta.Int(false, T.bit_count); + const UnsignedT = std.meta.Int(false, info.bits); const lo = @bitCast(UnsignedT, at_least); const hi = @bitCast(UnsignedT, at_most); const result = lo +% r.uintAtMost(UnsignedT, hi -% lo); @@ -280,14 +288,15 @@ pub const Random = struct { /// into an integer 0 <= result < less_than. /// This function introduces a minor bias. pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T { - comptime assert(T.is_signed == false); - const T2 = std.meta.Int(false, T.bit_count * 2); + comptime assert(@typeInfo(T).Int.is_signed == false); + const bits = @typeInfo(T).Int.bits; + const T2 = std.meta.Int(false, bits * 2); // adapted from: // http://www.pcg-random.org/posts/bounded-rands.html // "Integer Multiplication (Biased)" var m: T2 = @as(T2, random_int) * @as(T2, less_than); - return @intCast(T, m >> T.bit_count); + return @intCast(T, m >> bits); } const SequentialPrng = struct { diff --git a/lib/std/special/build_runner.zig b/lib/std/special/build_runner.zig index 46d3b0b615..3ab74a11a2 100644 --- a/lib/std/special/build_runner.zig +++ b/lib/std/special/build_runner.zig @@ -133,7 +133,7 @@ pub fn main() !void { } fn runBuild(builder: *Builder) anyerror!void { - switch (@typeInfo(@TypeOf(root.build).ReturnType)) { + switch (@typeInfo(@typeInfo(@TypeOf(root.build)).Fn.return_type.?)) { .Void => root.build(builder), .ErrorUnion => try root.build(builder), else => @compileError("expected return type of build to be 'void' or '!void'"), diff --git a/lib/std/special/c.zig b/lib/std/special/c.zig index d5903ece02..ce8d1c29cc 100644 --- a/lib/std/special/c.zig +++ b/lib/std/special/c.zig @@ -516,11 +516,12 @@ export fn roundf(a: f32) f32 { fn generic_fmod(comptime T: type, x: T, y: T) T { @setRuntimeSafety(false); - const uint = std.meta.Int(false, T.bit_count); + const bits = @typeInfo(T).Float.bits; + const uint = std.meta.Int(false, bits); const log2uint = math.Log2Int(uint); const digits = if (T == f32) 23 else 52; const exp_bits = if (T == f32) 9 else 12; - const bits_minus_1 = T.bit_count - 1; + const bits_minus_1 = bits - 1; const mask = if (T == f32) 0xff else 0x7ff; var ux = @bitCast(uint, x); var uy = @bitCast(uint, y); diff --git a/lib/std/special/compiler_rt/addXf3.zig b/lib/std/special/compiler_rt/addXf3.zig index 6dd0faaebb..da1238010e 100644 --- a/lib/std/special/compiler_rt/addXf3.zig +++ b/lib/std/special/compiler_rt/addXf3.zig @@ -59,23 +59,25 @@ pub fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 { } // TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154 -fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 { - const Z = std.meta.Int(false, T.bit_count); - const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1)); +fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 { + const bits = @typeInfo(T).Float.bits; + const Z = std.meta.Int(false, bits); + const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1)); const significandBits = std.math.floatMantissaBits(T); const implicitBit = @as(Z, 1) << significandBits; - const shift = @clz(std.meta.Int(false, T.bit_count), significand.*) - @clz(Z, implicitBit); + const shift = @clz(std.meta.Int(false, bits), significand.*) - @clz(Z, implicitBit); significand.* <<= @intCast(S, shift); return 1 - shift; } // TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154 fn addXf3(comptime T: type, a: T, b: T) T { - const Z = std.meta.Int(false, T.bit_count); - const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1)); + const bits = @typeInfo(T).Float.bits; + const Z = std.meta.Int(false, bits); + const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1)); - const typeWidth = T.bit_count; + const typeWidth = bits; const significandBits = std.math.floatMantissaBits(T); const exponentBits = std.math.floatExponentBits(T); @@ -187,7 +189,7 @@ fn addXf3(comptime T: type, a: T, b: T) T { // If partial cancellation occured, we need to left-shift the result // and adjust the exponent: if (aSignificand < implicitBit << 3) { - const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(false, T.bit_count), implicitBit << 3)); + const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(false, bits), implicitBit << 3)); aSignificand <<= @intCast(S, shift); aExponent -= shift; } diff --git a/lib/std/special/compiler_rt/aulldiv.zig b/lib/std/special/compiler_rt/aulldiv.zig index cf9b26c5a6..321ff288bb 100644 --- a/lib/std/special/compiler_rt/aulldiv.zig +++ b/lib/std/special/compiler_rt/aulldiv.zig @@ -7,8 +7,8 @@ const builtin = @import("builtin"); pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 { @setRuntimeSafety(builtin.is_test); - const s_a = a >> (i64.bit_count - 1); - const s_b = b >> (i64.bit_count - 1); + const s_a = a >> (64 - 1); + const s_b = b >> (64 - 1); const an = (a ^ s_a) -% s_a; const bn = (b ^ s_b) -% s_b; diff --git a/lib/std/special/compiler_rt/aullrem.zig b/lib/std/special/compiler_rt/aullrem.zig index 7c981cc088..a14eb99be3 100644 --- a/lib/std/special/compiler_rt/aullrem.zig +++ b/lib/std/special/compiler_rt/aullrem.zig @@ -7,8 +7,8 @@ const builtin = @import("builtin"); pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 { @setRuntimeSafety(builtin.is_test); - const s_a = a >> (i64.bit_count - 1); - const s_b = b >> (i64.bit_count - 1); + const s_a = a >> (64 - 1); + const s_b = b >> (64 - 1); const an = (a ^ s_a) -% s_a; const bn = (b ^ s_b) -% s_b; diff --git a/lib/std/special/compiler_rt/compareXf2.zig b/lib/std/special/compiler_rt/compareXf2.zig index f50dc67474..05af1e533c 100644 --- a/lib/std/special/compiler_rt/compareXf2.zig +++ b/lib/std/special/compiler_rt/compareXf2.zig @@ -27,8 +27,9 @@ const GE = extern enum(i32) { pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT { @setRuntimeSafety(builtin.is_test); - const srep_t = std.meta.Int(true, T.bit_count); - const rep_t = std.meta.Int(false, T.bit_count); + const bits = @typeInfo(T).Float.bits; + const srep_t = std.meta.Int(true, bits); + const rep_t = std.meta.Int(false, bits); const significandBits = std.math.floatMantissaBits(T); const exponentBits = std.math.floatExponentBits(T); @@ -73,7 +74,7 @@ pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT { pub fn unordcmp(comptime T: type, a: T, b: T) i32 { @setRuntimeSafety(builtin.is_test); - const rep_t = std.meta.Int(false, T.bit_count); + const rep_t = std.meta.Int(false, @typeInfo(T).Float.bits); const significandBits = std.math.floatMantissaBits(T); const exponentBits = std.math.floatExponentBits(T); diff --git a/lib/std/special/compiler_rt/divdf3.zig b/lib/std/special/compiler_rt/divdf3.zig index ad72f96057..11ede3af66 100644 --- a/lib/std/special/compiler_rt/divdf3.zig +++ b/lib/std/special/compiler_rt/divdf3.zig @@ -12,10 +12,9 @@ const builtin = @import("builtin"); pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 { @setRuntimeSafety(builtin.is_test); - const Z = std.meta.Int(false, f64.bit_count); - const SignedZ = std.meta.Int(true, f64.bit_count); + const Z = std.meta.Int(false, 64); + const SignedZ = std.meta.Int(true, 64); - const typeWidth = f64.bit_count; const significandBits = std.math.floatMantissaBits(f64); const exponentBits = std.math.floatExponentBits(f64); @@ -317,9 +316,9 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void { } } -pub fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 { +pub fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 { @setRuntimeSafety(builtin.is_test); - const Z = std.meta.Int(false, T.bit_count); + const Z = std.meta.Int(false, @typeInfo(T).Float.bits); const significandBits = std.math.floatMantissaBits(T); const implicitBit = @as(Z, 1) << significandBits; diff --git a/lib/std/special/compiler_rt/divsf3.zig b/lib/std/special/compiler_rt/divsf3.zig index 80af806eb1..13f4d8e68d 100644 --- a/lib/std/special/compiler_rt/divsf3.zig +++ b/lib/std/special/compiler_rt/divsf3.zig @@ -12,9 +12,8 @@ const builtin = @import("builtin"); pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 { @setRuntimeSafety(builtin.is_test); - const Z = std.meta.Int(false, f32.bit_count); + const Z = std.meta.Int(false, 32); - const typeWidth = f32.bit_count; const significandBits = std.math.floatMantissaBits(f32); const exponentBits = std.math.floatExponentBits(f32); @@ -190,9 +189,9 @@ pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 { } } -fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 { +fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 { @setRuntimeSafety(builtin.is_test); - const Z = std.meta.Int(false, T.bit_count); + const Z = std.meta.Int(false, @typeInfo(T).Float.bits); const significandBits = std.math.floatMantissaBits(T); const implicitBit = @as(Z, 1) << significandBits; diff --git a/lib/std/special/compiler_rt/divtf3.zig b/lib/std/special/compiler_rt/divtf3.zig index f6f7c1bf7d..0582400ce3 100644 --- a/lib/std/special/compiler_rt/divtf3.zig +++ b/lib/std/special/compiler_rt/divtf3.zig @@ -11,10 +11,9 @@ const wideMultiply = @import("divdf3.zig").wideMultiply; pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 { @setRuntimeSafety(builtin.is_test); - const Z = std.meta.Int(false, f128.bit_count); - const SignedZ = std.meta.Int(true, f128.bit_count); + const Z = std.meta.Int(false, 128); + const SignedZ = std.meta.Int(true, 128); - const typeWidth = f128.bit_count; const significandBits = std.math.floatMantissaBits(f128); const exponentBits = std.math.floatExponentBits(f128); diff --git a/lib/std/special/compiler_rt/divti3.zig b/lib/std/special/compiler_rt/divti3.zig index 4b7d459991..a065111510 100644 --- a/lib/std/special/compiler_rt/divti3.zig +++ b/lib/std/special/compiler_rt/divti3.zig @@ -9,8 +9,8 @@ const builtin = @import("builtin"); pub fn __divti3(a: i128, b: i128) callconv(.C) i128 { @setRuntimeSafety(builtin.is_test); - const s_a = a >> (i128.bit_count - 1); - const s_b = b >> (i128.bit_count - 1); + const s_a = a >> (128 - 1); + const s_b = b >> (128 - 1); const an = (a ^ s_a) -% s_a; const bn = (b ^ s_b) -% s_b; diff --git a/lib/std/special/compiler_rt/fixint.zig b/lib/std/special/compiler_rt/fixint.zig index 0bf0c8be1e..1512641be4 100644 --- a/lib/std/special/compiler_rt/fixint.zig +++ b/lib/std/special/compiler_rt/fixint.zig @@ -28,7 +28,7 @@ pub fn fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t) fixint_t { else => unreachable, }; - const typeWidth = rep_t.bit_count; + const typeWidth = @typeInfo(rep_t).Int.bits; const exponentBits = (typeWidth - significandBits - 1); const signBit = (@as(rep_t, 1) << (significandBits + exponentBits)); const maxExponent = ((1 << exponentBits) - 1); @@ -50,12 +50,13 @@ pub fn fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t) fixint_t { if (exponent < 0) return 0; // The unsigned result needs to be large enough to handle an fixint_t or rep_t - const fixuint_t = std.meta.Int(false, fixint_t.bit_count); - const UintResultType = if (fixint_t.bit_count > rep_t.bit_count) fixuint_t else rep_t; + const fixint_bits = @typeInfo(fixint_t).Int.bits; + const fixuint_t = std.meta.Int(false, fixint_bits); + const UintResultType = if (fixint_bits > typeWidth) fixuint_t else rep_t; var uint_result: UintResultType = undefined; // If the value is too large for the integer type, saturate. - if (@intCast(usize, exponent) >= fixint_t.bit_count) { + if (@intCast(usize, exponent) >= fixint_bits) { return if (negative) @as(fixint_t, minInt(fixint_t)) else @as(fixint_t, maxInt(fixint_t)); } diff --git a/lib/std/special/compiler_rt/fixuint.zig b/lib/std/special/compiler_rt/fixuint.zig index 01eb03baa5..3f2d661244 100644 --- a/lib/std/special/compiler_rt/fixuint.zig +++ b/lib/std/special/compiler_rt/fixuint.zig @@ -15,14 +15,14 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t f128 => u128, else => unreachable, }; - const srep_t = @import("std").meta.Int(true, rep_t.bit_count); + const typeWidth = @typeInfo(rep_t).Int.bits; + const srep_t = @import("std").meta.Int(true, typeWidth); const significandBits = switch (fp_t) { f32 => 23, f64 => 52, f128 => 112, else => unreachable, }; - const typeWidth = rep_t.bit_count; const exponentBits = (typeWidth - significandBits - 1); const signBit = (@as(rep_t, 1) << (significandBits + exponentBits)); const maxExponent = ((1 << exponentBits) - 1); @@ -44,7 +44,7 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t if (sign == -1 or exponent < 0) return 0; // If the value is too large for the integer type, saturate. - if (@intCast(c_uint, exponent) >= fixuint_t.bit_count) return ~@as(fixuint_t, 0); + if (@intCast(c_uint, exponent) >= @typeInfo(fixuint_t).Int.bits) return ~@as(fixuint_t, 0); // If 0 <= exponent < significandBits, right shift to get the result. // Otherwise, shift left. diff --git a/lib/std/special/compiler_rt/floatXisf.zig b/lib/std/special/compiler_rt/floatXisf.zig index 650b948396..134a1eba61 100644 --- a/lib/std/special/compiler_rt/floatXisf.zig +++ b/lib/std/special/compiler_rt/floatXisf.zig @@ -12,15 +12,16 @@ const FLT_MANT_DIG = 24; fn __floatXisf(comptime T: type, arg: T) f32 { @setRuntimeSafety(builtin.is_test); - const Z = std.meta.Int(false, T.bit_count); - const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1)); + const bits = @typeInfo(T).Int.bits; + const Z = std.meta.Int(false, bits); + const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1)); if (arg == 0) { return @as(f32, 0.0); } var ai = arg; - const N: u32 = T.bit_count; + const N: u32 = bits; const si = ai >> @intCast(S, (N - 1)); ai = ((ai ^ si) -% si); var a = @bitCast(Z, ai); @@ -66,7 +67,7 @@ fn __floatXisf(comptime T: type, arg: T) f32 { // a is now rounded to FLT_MANT_DIG bits } - const s = @bitCast(Z, arg) >> (T.bit_count - 32); + const s = @bitCast(Z, arg) >> (@typeInfo(T).Int.bits - 32); const r = (@intCast(u32, s) & 0x80000000) | // sign (@intCast(u32, (e + 127)) << 23) | // exponent (@truncate(u32, a) & 0x007fffff); // mantissa-high diff --git a/lib/std/special/compiler_rt/floatsiXf.zig b/lib/std/special/compiler_rt/floatsiXf.zig index 75db3d7040..b6ce36b6f7 100644 --- a/lib/std/special/compiler_rt/floatsiXf.zig +++ b/lib/std/special/compiler_rt/floatsiXf.zig @@ -10,8 +10,9 @@ const maxInt = std.math.maxInt; fn floatsiXf(comptime T: type, a: i32) T { @setRuntimeSafety(builtin.is_test); - const Z = std.meta.Int(false, T.bit_count); - const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1)); + const bits = @typeInfo(T).Float.bits; + const Z = std.meta.Int(false, bits); + const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1)); if (a == 0) { return @as(T, 0.0); @@ -22,7 +23,7 @@ fn floatsiXf(comptime T: type, a: i32) T { const exponentBias = ((1 << exponentBits - 1) - 1); const implicitBit = @as(Z, 1) << significandBits; - const signBit = @as(Z, 1 << Z.bit_count - 1); + const signBit = @as(Z, 1 << bits - 1); const sign = a >> 31; // Take absolute value of a via abs(x) = (x^(x >> 31)) - (x >> 31). diff --git a/lib/std/special/compiler_rt/floatundisf.zig b/lib/std/special/compiler_rt/floatundisf.zig index b580ec91fd..67cd53b21c 100644 --- a/lib/std/special/compiler_rt/floatundisf.zig +++ b/lib/std/special/compiler_rt/floatundisf.zig @@ -15,7 +15,7 @@ pub fn __floatundisf(arg: u64) callconv(.C) f32 { if (arg == 0) return 0; var a = arg; - const N: usize = @TypeOf(a).bit_count; + const N: usize = @typeInfo(@TypeOf(a)).Int.bits; // Number of significant digits const sd = N - @clz(u64, a); // 8 exponent diff --git a/lib/std/special/compiler_rt/floatunditf.zig b/lib/std/special/compiler_rt/floatunditf.zig index 90191c6388..014a479c5f 100644 --- a/lib/std/special/compiler_rt/floatunditf.zig +++ b/lib/std/special/compiler_rt/floatunditf.zig @@ -19,7 +19,7 @@ pub fn __floatunditf(a: u64) callconv(.C) f128 { const exponent_bias = (1 << (exponent_bits - 1)) - 1; const implicit_bit = 1 << mantissa_bits; - const exp: u128 = (u64.bit_count - 1) - @clz(u64, a); + const exp: u128 = (64 - 1) - @clz(u64, a); const shift: u7 = mantissa_bits - @intCast(u7, exp); var result: u128 = (@intCast(u128, a) << shift) ^ implicit_bit; diff --git a/lib/std/special/compiler_rt/floatunsitf.zig b/lib/std/special/compiler_rt/floatunsitf.zig index ceb55f12c8..f59446abac 100644 --- a/lib/std/special/compiler_rt/floatunsitf.zig +++ b/lib/std/special/compiler_rt/floatunsitf.zig @@ -19,7 +19,7 @@ pub fn __floatunsitf(a: u64) callconv(.C) f128 { const exponent_bias = (1 << (exponent_bits - 1)) - 1; const implicit_bit = 1 << mantissa_bits; - const exp = (u64.bit_count - 1) - @clz(u64, a); + const exp = (64 - 1) - @clz(u64, a); const shift = mantissa_bits - @intCast(u7, exp); // TODO(#1148): @bitCast alignment error diff --git a/lib/std/special/compiler_rt/int.zig b/lib/std/special/compiler_rt/int.zig index 141c4e52c1..1fb2c263e1 100644 --- a/lib/std/special/compiler_rt/int.zig +++ b/lib/std/special/compiler_rt/int.zig @@ -219,7 +219,7 @@ fn test_one_divsi3(a: i32, b: i32, expected_q: i32) void { pub fn __udivsi3(n: u32, d: u32) callconv(.C) u32 { @setRuntimeSafety(builtin.is_test); - const n_uword_bits: c_uint = u32.bit_count; + const n_uword_bits: c_uint = 32; // special cases if (d == 0) return 0; // ?! if (n == 0) return 0; diff --git a/lib/std/special/compiler_rt/modti3.zig b/lib/std/special/compiler_rt/modti3.zig index 1f859c2329..9c3de44395 100644 --- a/lib/std/special/compiler_rt/modti3.zig +++ b/lib/std/special/compiler_rt/modti3.zig @@ -14,8 +14,8 @@ const compiler_rt = @import("../compiler_rt.zig"); pub fn __modti3(a: i128, b: i128) callconv(.C) i128 { @setRuntimeSafety(builtin.is_test); - const s_a = a >> (i128.bit_count - 1); // s = a < 0 ? -1 : 0 - const s_b = b >> (i128.bit_count - 1); // s = b < 0 ? -1 : 0 + const s_a = a >> (128 - 1); // s = a < 0 ? -1 : 0 + const s_b = b >> (128 - 1); // s = b < 0 ? -1 : 0 const an = (a ^ s_a) -% s_a; // negate if s == -1 const bn = (b ^ s_b) -% s_b; // negate if s == -1 diff --git a/lib/std/special/compiler_rt/mulXf3.zig b/lib/std/special/compiler_rt/mulXf3.zig index b6984ebbb6..40b5b4f658 100644 --- a/lib/std/special/compiler_rt/mulXf3.zig +++ b/lib/std/special/compiler_rt/mulXf3.zig @@ -33,9 +33,9 @@ pub fn __aeabi_dmul(a: f64, b: f64) callconv(.C) f64 { fn mulXf3(comptime T: type, a: T, b: T) T { @setRuntimeSafety(builtin.is_test); - const Z = std.meta.Int(false, T.bit_count); + const typeWidth = @typeInfo(T).Float.bits; + const Z = std.meta.Int(false, typeWidth); - const typeWidth = T.bit_count; const significandBits = std.math.floatMantissaBits(T); const exponentBits = std.math.floatExponentBits(T); @@ -269,9 +269,9 @@ fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void { } } -fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 { +fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 { @setRuntimeSafety(builtin.is_test); - const Z = std.meta.Int(false, T.bit_count); + const Z = std.meta.Int(false, @typeInfo(T).Float.bits); const significandBits = std.math.floatMantissaBits(T); const implicitBit = @as(Z, 1) << significandBits; @@ -282,7 +282,7 @@ fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i fn wideRightShiftWithSticky(comptime Z: type, hi: *Z, lo: *Z, count: u32) void { @setRuntimeSafety(builtin.is_test); - const typeWidth = Z.bit_count; + const typeWidth = @typeInfo(Z).Int.bits; const S = std.math.Log2Int(Z); if (count < typeWidth) { const sticky = @truncate(u8, lo.* << @intCast(S, typeWidth -% count)); diff --git a/lib/std/special/compiler_rt/mulodi4.zig b/lib/std/special/compiler_rt/mulodi4.zig index b05931e937..fab345fa47 100644 --- a/lib/std/special/compiler_rt/mulodi4.zig +++ b/lib/std/special/compiler_rt/mulodi4.zig @@ -11,7 +11,7 @@ const minInt = std.math.minInt; pub fn __mulodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 { @setRuntimeSafety(builtin.is_test); - const min = @bitCast(i64, @as(u64, 1 << (i64.bit_count - 1))); + const min = @bitCast(i64, @as(u64, 1 << (64 - 1))); const max = ~min; overflow.* = 0; diff --git a/lib/std/special/compiler_rt/muloti4.zig b/lib/std/special/compiler_rt/muloti4.zig index 4beafa3e15..b1ad82da29 100644 --- a/lib/std/special/compiler_rt/muloti4.zig +++ b/lib/std/special/compiler_rt/muloti4.zig @@ -9,7 +9,7 @@ const compiler_rt = @import("../compiler_rt.zig"); pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 { @setRuntimeSafety(builtin.is_test); - const min = @bitCast(i128, @as(u128, 1 << (i128.bit_count - 1))); + const min = @bitCast(i128, @as(u128, 1 << (128 - 1))); const max = ~min; overflow.* = 0; @@ -27,9 +27,9 @@ pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 { return r; } - const sa = a >> (i128.bit_count - 1); + const sa = a >> (128 - 1); const abs_a = (a ^ sa) -% sa; - const sb = b >> (i128.bit_count - 1); + const sb = b >> (128 - 1); const abs_b = (b ^ sb) -% sb; if (abs_a < 2 or abs_b < 2) { diff --git a/lib/std/special/compiler_rt/negXf2.zig b/lib/std/special/compiler_rt/negXf2.zig index 11f9e401e9..ae01e10776 100644 --- a/lib/std/special/compiler_rt/negXf2.zig +++ b/lib/std/special/compiler_rt/negXf2.zig @@ -24,9 +24,8 @@ pub fn __aeabi_dneg(arg: f64) callconv(.AAPCS) f64 { } fn negXf2(comptime T: type, a: T) T { - const Z = std.meta.Int(false, T.bit_count); + const Z = std.meta.Int(false, @typeInfo(T).Float.bits); - const typeWidth = T.bit_count; const significandBits = std.math.floatMantissaBits(T); const exponentBits = std.math.floatExponentBits(T); diff --git a/lib/std/special/compiler_rt/shift.zig b/lib/std/special/compiler_rt/shift.zig index 1609cb115c..acb14c969a 100644 --- a/lib/std/special/compiler_rt/shift.zig +++ b/lib/std/special/compiler_rt/shift.zig @@ -9,8 +9,9 @@ const Log2Int = std.math.Log2Int; fn Dwords(comptime T: type, comptime signed_half: bool) type { return extern union { - pub const HalfTU = std.meta.Int(false, @divExact(T.bit_count, 2)); - pub const HalfTS = std.meta.Int(true, @divExact(T.bit_count, 2)); + pub const bits = @divExact(@typeInfo(T).Int.bits, 2); + pub const HalfTU = std.meta.Int(false, bits); + pub const HalfTS = std.meta.Int(true, bits); pub const HalfT = if (signed_half) HalfTS else HalfTU; all: T, @@ -30,15 +31,15 @@ pub fn ashlXi3(comptime T: type, a: T, b: i32) T { const input = dwords{ .all = a }; var output: dwords = undefined; - if (b >= dwords.HalfT.bit_count) { + if (b >= dwords.bits) { output.s.low = 0; - output.s.high = input.s.low << @intCast(S, b - dwords.HalfT.bit_count); + output.s.high = input.s.low << @intCast(S, b - dwords.bits); } else if (b == 0) { return a; } else { output.s.low = input.s.low << @intCast(S, b); output.s.high = input.s.high << @intCast(S, b); - output.s.high |= input.s.low >> @intCast(S, dwords.HalfT.bit_count - b); + output.s.high |= input.s.low >> @intCast(S, dwords.bits - b); } return output.all; @@ -53,14 +54,14 @@ pub fn ashrXi3(comptime T: type, a: T, b: i32) T { const input = dwords{ .all = a }; var output: dwords = undefined; - if (b >= dwords.HalfT.bit_count) { - output.s.high = input.s.high >> (dwords.HalfT.bit_count - 1); - output.s.low = input.s.high >> @intCast(S, b - dwords.HalfT.bit_count); + if (b >= dwords.bits) { + output.s.high = input.s.high >> (dwords.bits - 1); + output.s.low = input.s.high >> @intCast(S, b - dwords.bits); } else if (b == 0) { return a; } else { output.s.high = input.s.high >> @intCast(S, b); - output.s.low = input.s.high << @intCast(S, dwords.HalfT.bit_count - b); + output.s.low = input.s.high << @intCast(S, dwords.bits - b); // Avoid sign-extension here output.s.low |= @bitCast( dwords.HalfT, @@ -80,14 +81,14 @@ pub fn lshrXi3(comptime T: type, a: T, b: i32) T { const input = dwords{ .all = a }; var output: dwords = undefined; - if (b >= dwords.HalfT.bit_count) { + if (b >= dwords.bits) { output.s.high = 0; - output.s.low = input.s.high >> @intCast(S, b - dwords.HalfT.bit_count); + output.s.low = input.s.high >> @intCast(S, b - dwords.bits); } else if (b == 0) { return a; } else { output.s.high = input.s.high >> @intCast(S, b); - output.s.low = input.s.high << @intCast(S, dwords.HalfT.bit_count - b); + output.s.low = input.s.high << @intCast(S, dwords.bits - b); output.s.low |= input.s.low >> @intCast(S, b); } diff --git a/lib/std/special/compiler_rt/truncXfYf2.zig b/lib/std/special/compiler_rt/truncXfYf2.zig index e096e7e4f0..b5823607ea 100644 --- a/lib/std/special/compiler_rt/truncXfYf2.zig +++ b/lib/std/special/compiler_rt/truncXfYf2.zig @@ -50,7 +50,7 @@ fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t { // Various constants whose values follow from the type parameters. // Any reasonable optimizer will fold and propagate all of these. - const srcBits = src_t.bit_count; + const srcBits = @typeInfo(src_t).Float.bits; const srcExpBits = srcBits - srcSigBits - 1; const srcInfExp = (1 << srcExpBits) - 1; const srcExpBias = srcInfExp >> 1; @@ -65,7 +65,7 @@ fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t { const srcQNaN = 1 << (srcSigBits - 1); const srcNaNCode = srcQNaN - 1; - const dstBits = dst_t.bit_count; + const dstBits = @typeInfo(dst_t).Float.bits; const dstExpBits = dstBits - dstSigBits - 1; const dstInfExp = (1 << dstExpBits) - 1; const dstExpBias = dstInfExp >> 1; diff --git a/lib/std/special/compiler_rt/udivmod.zig b/lib/std/special/compiler_rt/udivmod.zig index 2836f34c85..f8c7e1298b 100644 --- a/lib/std/special/compiler_rt/udivmod.zig +++ b/lib/std/special/compiler_rt/udivmod.zig @@ -15,8 +15,10 @@ const high = 1 - low; pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt { @setRuntimeSafety(is_test); - const SingleInt = @import("std").meta.Int(false, @divExact(DoubleInt.bit_count, 2)); - const SignedDoubleInt = @import("std").meta.Int(true, DoubleInt.bit_count); + const double_int_bits = @typeInfo(DoubleInt).Int.bits; + const single_int_bits = @divExact(double_int_bits, 2); + const SingleInt = @import("std").meta.Int(false, single_int_bits); + const SignedDoubleInt = @import("std").meta.Int(true, double_int_bits); const Log2SingleInt = @import("std").math.Log2Int(SingleInt); const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421 @@ -82,21 +84,21 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: // --- // K 0 sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high]))); - // 0 <= sr <= SingleInt.bit_count - 2 or sr large - if (sr > SingleInt.bit_count - 2) { + // 0 <= sr <= single_int_bits - 2 or sr large + if (sr > single_int_bits - 2) { if (maybe_rem) |rem| { rem.* = a; } return 0; } sr += 1; - // 1 <= sr <= SingleInt.bit_count - 1 - // q.all = a << (DoubleInt.bit_count - sr); + // 1 <= sr <= single_int_bits - 1 + // q.all = a << (double_int_bits - sr); q[low] = 0; - q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr); + q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr); // r.all = a >> sr; r[high] = n[high] >> @intCast(Log2SingleInt, sr); - r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr)); + r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr)); } else { // d[low] != 0 if (d[high] == 0) { @@ -113,74 +115,74 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: } sr = @ctz(SingleInt, d[low]); q[high] = n[high] >> @intCast(Log2SingleInt, sr); - q[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr)); + q[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr)); return @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421 } // K X // --- // 0 K - sr = 1 + SingleInt.bit_count + @as(c_uint, @clz(SingleInt, d[low])) - @as(c_uint, @clz(SingleInt, n[high])); - // 2 <= sr <= DoubleInt.bit_count - 1 - // q.all = a << (DoubleInt.bit_count - sr); + sr = 1 + single_int_bits + @as(c_uint, @clz(SingleInt, d[low])) - @as(c_uint, @clz(SingleInt, n[high])); + // 2 <= sr <= double_int_bits - 1 + // q.all = a << (double_int_bits - sr); // r.all = a >> sr; - if (sr == SingleInt.bit_count) { + if (sr == single_int_bits) { q[low] = 0; q[high] = n[low]; r[high] = 0; r[low] = n[high]; - } else if (sr < SingleInt.bit_count) { - // 2 <= sr <= SingleInt.bit_count - 1 + } else if (sr < single_int_bits) { + // 2 <= sr <= single_int_bits - 1 q[low] = 0; - q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr); + q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr); r[high] = n[high] >> @intCast(Log2SingleInt, sr); - r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr)); + r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr)); } else { - // SingleInt.bit_count + 1 <= sr <= DoubleInt.bit_count - 1 - q[low] = n[low] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr); - q[high] = (n[high] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count)); + // single_int_bits + 1 <= sr <= double_int_bits - 1 + q[low] = n[low] << @intCast(Log2SingleInt, double_int_bits - sr); + q[high] = (n[high] << @intCast(Log2SingleInt, double_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr - single_int_bits)); r[high] = 0; - r[low] = n[high] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count); + r[low] = n[high] >> @intCast(Log2SingleInt, sr - single_int_bits); } } else { // K X // --- // K K sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high]))); - // 0 <= sr <= SingleInt.bit_count - 1 or sr large - if (sr > SingleInt.bit_count - 1) { + // 0 <= sr <= single_int_bits - 1 or sr large + if (sr > single_int_bits - 1) { if (maybe_rem) |rem| { rem.* = a; } return 0; } sr += 1; - // 1 <= sr <= SingleInt.bit_count - // q.all = a << (DoubleInt.bit_count - sr); + // 1 <= sr <= single_int_bits + // q.all = a << (double_int_bits - sr); // r.all = a >> sr; q[low] = 0; - if (sr == SingleInt.bit_count) { + if (sr == single_int_bits) { q[high] = n[low]; r[high] = 0; r[low] = n[high]; } else { r[high] = n[high] >> @intCast(Log2SingleInt, sr); - r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr)); - q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr); + r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr)); + q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr); } } } // Not a special case // q and r are initialized with: - // q.all = a << (DoubleInt.bit_count - sr); + // q.all = a << (double_int_bits - sr); // r.all = a >> sr; - // 1 <= sr <= DoubleInt.bit_count - 1 + // 1 <= sr <= double_int_bits - 1 var carry: u32 = 0; var r_all: DoubleInt = undefined; while (sr > 0) : (sr -= 1) { // r:q = ((r:q) << 1) | carry - r[high] = (r[high] << 1) | (r[low] >> (SingleInt.bit_count - 1)); - r[low] = (r[low] << 1) | (q[high] >> (SingleInt.bit_count - 1)); - q[high] = (q[high] << 1) | (q[low] >> (SingleInt.bit_count - 1)); + r[high] = (r[high] << 1) | (r[low] >> (single_int_bits - 1)); + r[low] = (r[low] << 1) | (q[high] >> (single_int_bits - 1)); + q[high] = (q[high] << 1) | (q[low] >> (single_int_bits - 1)); q[low] = (q[low] << 1) | carry; // carry = 0; // if (r.all >= b) @@ -189,7 +191,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: // carry = 1; // } r_all = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421 - const s: SignedDoubleInt = @bitCast(SignedDoubleInt, b -% r_all -% 1) >> (DoubleInt.bit_count - 1); + const s: SignedDoubleInt = @bitCast(SignedDoubleInt, b -% r_all -% 1) >> (double_int_bits - 1); carry = @intCast(u32, s & 1); r_all -= b & @bitCast(DoubleInt, s); r = @ptrCast(*[2]SingleInt, &r_all).*; // TODO issue #421 diff --git a/lib/std/start.zig b/lib/std/start.zig index 8e443a7c77..e04b2a3320 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -239,7 +239,7 @@ fn callMainAsync(loop: *std.event.Loop) callconv(.Async) u8 { // This is not marked inline because it is called with @asyncCall when // there is an event loop. pub fn callMain() u8 { - switch (@typeInfo(@TypeOf(root.main).ReturnType)) { + switch (@typeInfo(@typeInfo(@TypeOf(root.main)).Fn.return_type.?)) { .NoReturn => { root.main(); }, diff --git a/lib/std/thread.zig b/lib/std/thread.zig index d73907690e..330c425dd6 100644 --- a/lib/std/thread.zig +++ b/lib/std/thread.zig @@ -166,7 +166,7 @@ pub const Thread = struct { fn threadMain(raw_arg: windows.LPVOID) callconv(.C) windows.DWORD { const arg = if (@sizeOf(Context) == 0) {} else @ptrCast(*Context, @alignCast(@alignOf(Context), raw_arg)).*; - switch (@typeInfo(@TypeOf(startFn).ReturnType)) { + switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) { .NoReturn => { startFn(arg); }, @@ -227,7 +227,7 @@ pub const Thread = struct { fn linuxThreadMain(ctx_addr: usize) callconv(.C) u8 { const arg = if (@sizeOf(Context) == 0) {} else @intToPtr(*const Context, ctx_addr).*; - switch (@typeInfo(@TypeOf(startFn).ReturnType)) { + switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) { .NoReturn => { startFn(arg); }, @@ -259,7 +259,7 @@ pub const Thread = struct { fn posixThreadMain(ctx: ?*c_void) callconv(.C) ?*c_void { const arg = if (@sizeOf(Context) == 0) {} else @ptrCast(*Context, @alignCast(@alignOf(Context), ctx)).*; - switch (@typeInfo(@TypeOf(startFn).ReturnType)) { + switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) { .NoReturn => { startFn(arg); }, diff --git a/lib/std/zig.zig b/lib/std/zig.zig index e86a12884f..1dedce4067 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -22,7 +22,7 @@ pub const SrcHash = [16]u8; /// If it is long, blake3 hash is computed. pub fn hashSrc(src: []const u8) SrcHash { var out: SrcHash = undefined; - if (src.len <= SrcHash.len) { + if (src.len <= @typeInfo(SrcHash).Array.len) { std.mem.copy(u8, &out, src); std.mem.set(u8, out[src.len..], 0); } else { diff --git a/test/stage1/behavior/align.zig b/test/stage1/behavior/align.zig index 62f439d6df..0a0cc3bcc0 100644 --- a/test/stage1/behavior/align.zig +++ b/test/stage1/behavior/align.zig @@ -5,7 +5,7 @@ const builtin = @import("builtin"); var foo: u8 align(4) = 100; test "global variable alignment" { - comptime expect(@TypeOf(&foo).alignment == 4); + comptime expect(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4); comptime expect(@TypeOf(&foo) == *align(4) u8); { const slice = @as(*[1]u8, &foo)[0..]; diff --git a/test/stage1/behavior/array.zig b/test/stage1/behavior/array.zig index d5ca44f0a2..9e1d2cbac4 100644 --- a/test/stage1/behavior/array.zig +++ b/test/stage1/behavior/array.zig @@ -136,16 +136,6 @@ test "array literal with specified size" { expect(array[1] == 2); } -test "array child property" { - var x: [5]i32 = undefined; - expect(@TypeOf(x).Child == i32); -} - -test "array len property" { - var x: [5]i32 = undefined; - expect(@TypeOf(x).len == 5); -} - test "array len field" { var arr = [4]u8{ 0, 0, 0, 0 }; var ptr = &arr; diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index 807e4c6275..e2cececf69 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -331,7 +331,7 @@ test "async fn with inferred error set" { fn doTheTest() void { var frame: [1]@Frame(middle) = undefined; var fn_ptr = middle; - var result: @TypeOf(fn_ptr).ReturnType.ErrorSet!void = undefined; + var result: @typeInfo(@typeInfo(@TypeOf(fn_ptr)).Fn.return_type.?).ErrorUnion.error_set!void = undefined; _ = @asyncCall(std.mem.sliceAsBytes(frame[0..]), &result, fn_ptr, .{}); resume global_frame; std.testing.expectError(error.Fail, result); @@ -950,7 +950,7 @@ test "@asyncCall with comptime-known function, but not awaited directly" { fn doTheTest() void { var frame: [1]@Frame(middle) = undefined; - var result: @TypeOf(middle).ReturnType.ErrorSet!void = undefined; + var result: @typeInfo(@typeInfo(@TypeOf(middle)).Fn.return_type.?).ErrorUnion.error_set!void = undefined; _ = @asyncCall(std.mem.sliceAsBytes(frame[0..]), &result, middle, .{}); resume global_frame; std.testing.expectError(error.Fail, result); @@ -1018,7 +1018,7 @@ test "@TypeOf an async function call of generic fn with error union type" { const S = struct { fn func(comptime x: anytype) anyerror!i32 { const T = @TypeOf(async func(x)); - comptime expect(T == @TypeOf(@frame()).Child); + comptime expect(T == @typeInfo(@TypeOf(@frame())).Pointer.child); return undefined; } }; diff --git a/test/stage1/behavior/bit_shifting.zig b/test/stage1/behavior/bit_shifting.zig index 7306acba4a..786cef0802 100644 --- a/test/stage1/behavior/bit_shifting.zig +++ b/test/stage1/behavior/bit_shifting.zig @@ -2,16 +2,18 @@ const std = @import("std"); const expect = std.testing.expect; fn ShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, comptime V: type) type { - expect(Key == std.meta.Int(false, Key.bit_count)); - expect(Key.bit_count >= mask_bit_count); + const key_bits = @typeInfo(Key).Int.bits; + expect(Key == std.meta.Int(false, key_bits)); + expect(key_bits >= mask_bit_count); + const shard_key_bits = mask_bit_count; const ShardKey = std.meta.Int(false, mask_bit_count); - const shift_amount = Key.bit_count - ShardKey.bit_count; + const shift_amount = key_bits - shard_key_bits; return struct { const Self = @This(); - shards: [1 << ShardKey.bit_count]?*Node, + shards: [1 << shard_key_bits]?*Node, pub fn create() Self { - return Self{ .shards = [_]?*Node{null} ** (1 << ShardKey.bit_count) }; + return Self{ .shards = [_]?*Node{null} ** (1 << shard_key_bits) }; } fn getShardKey(key: Key) ShardKey { diff --git a/test/stage1/behavior/bugs/5487.zig b/test/stage1/behavior/bugs/5487.zig index 05967b6de4..02fa677a44 100644 --- a/test/stage1/behavior/bugs/5487.zig +++ b/test/stage1/behavior/bugs/5487.zig @@ -3,8 +3,8 @@ const io = @import("std").io; pub fn write(_: void, bytes: []const u8) !usize { return 0; } -pub fn outStream() io.OutStream(void, @TypeOf(write).ReturnType.ErrorSet, write) { - return io.OutStream(void, @TypeOf(write).ReturnType.ErrorSet, write){ .context = {} }; +pub fn outStream() io.OutStream(void, @typeInfo(@typeInfo(@TypeOf(write)).Fn.return_type.?).ErrorUnion.error_set, write) { + return io.OutStream(void, @typeInfo(@typeInfo(@TypeOf(write)).Fn.return_type.?).ErrorUnion.error_set, write){ .context = {} }; } test "crash" { diff --git a/test/stage1/behavior/error.zig b/test/stage1/behavior/error.zig index 975e08b04f..9635f2870c 100644 --- a/test/stage1/behavior/error.zig +++ b/test/stage1/behavior/error.zig @@ -84,8 +84,8 @@ fn testErrorUnionType() void { const x: anyerror!i32 = 1234; if (x) |value| expect(value == 1234) else |_| unreachable; expect(@typeInfo(@TypeOf(x)) == .ErrorUnion); - expect(@typeInfo(@TypeOf(x).ErrorSet) == .ErrorSet); - expect(@TypeOf(x).ErrorSet == anyerror); + expect(@typeInfo(@typeInfo(@TypeOf(x)).ErrorUnion.error_set) == .ErrorSet); + expect(@typeInfo(@TypeOf(x)).ErrorUnion.error_set == anyerror); } test "error set type" { diff --git a/test/stage1/behavior/misc.zig b/test/stage1/behavior/misc.zig index 57a9ba2576..a71d6f86f3 100644 --- a/test/stage1/behavior/misc.zig +++ b/test/stage1/behavior/misc.zig @@ -24,12 +24,6 @@ test "call disabled extern fn" { disabledExternFn(); } -test "floating point primitive bit counts" { - expect(f16.bit_count == 16); - expect(f32.bit_count == 32); - expect(f64.bit_count == 64); -} - test "short circuit" { testShortCircuit(false, true); comptime testShortCircuit(false, true); @@ -577,10 +571,6 @@ test "slice string literal has correct type" { comptime expect(@TypeOf(array[runtime_zero..]) == []const i32); } -test "pointer child field" { - expect((*u32).Child == u32); -} - test "struct inside function" { testStructInFn(); comptime testStructInFn(); diff --git a/test/stage1/behavior/reflection.zig b/test/stage1/behavior/reflection.zig index ab0a55092c..6d1c341713 100644 --- a/test/stage1/behavior/reflection.zig +++ b/test/stage1/behavior/reflection.zig @@ -2,23 +2,15 @@ const expect = @import("std").testing.expect; const mem = @import("std").mem; const reflection = @This(); -test "reflection: array, pointer, optional, error union type child" { - comptime { - expect(([10]u8).Child == u8); - expect((*u8).Child == u8); - expect((anyerror!u8).Payload == u8); - expect((?u8).Child == u8); - } -} - test "reflection: function return type, var args, and param types" { comptime { - expect(@TypeOf(dummy).ReturnType == i32); - expect(!@TypeOf(dummy).is_var_args); - expect(@TypeOf(dummy).arg_count == 3); - expect(@typeInfo(@TypeOf(dummy)).Fn.args[0].arg_type.? == bool); - expect(@typeInfo(@TypeOf(dummy)).Fn.args[1].arg_type.? == i32); - expect(@typeInfo(@TypeOf(dummy)).Fn.args[2].arg_type.? == f32); + const info = @typeInfo(@TypeOf(dummy)).Fn; + expect(info.return_type.? == i32); + expect(!info.is_var_args); + expect(info.args.len == 3); + expect(info.args[0].arg_type.? == bool); + expect(info.args[1].arg_type.? == i32); + expect(info.args[2].arg_type.? == f32); } } From 969547902b49d6b21af762fb24ed591789b9d2a4 Mon Sep 17 00:00:00 2001 From: LemonBoy Date: Thu, 3 Sep 2020 10:22:35 +0200 Subject: [PATCH 29/56] std: Fix silent overflow in float parsing code A u64 can only hold 19 decimal digits, adjust the limit. --- lib/std/fmt/parse_float.zig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/std/fmt/parse_float.zig b/lib/std/fmt/parse_float.zig index 69557714f6..7e72e7ba51 100644 --- a/lib/std/fmt/parse_float.zig +++ b/lib/std/fmt/parse_float.zig @@ -37,7 +37,9 @@ const std = @import("../std.zig"); const ascii = std.ascii; -const max_digits = 25; +// The mantissa field in FloatRepr is 64bit wide and holds only 19 digits +// without overflowing +const max_digits = 19; const f64_plus_zero: u64 = 0x0000000000000000; const f64_minus_zero: u64 = 0x8000000000000000; @@ -409,6 +411,7 @@ test "fmt.parseFloat" { expect(approxEq(T, try parseFloat(T, "123142.1"), 123142.1, epsilon)); expect(approxEq(T, try parseFloat(T, "-123142.1124"), @as(T, -123142.1124), epsilon)); expect(approxEq(T, try parseFloat(T, "0.7062146892655368"), @as(T, 0.7062146892655368), epsilon)); + expect(approxEq(T, try parseFloat(T, "2.71828182845904523536"), @as(T, 2.718281828459045), epsilon)); } } } From 50e39069518a0c2643cd5e3189ad087b5fbed0c6 Mon Sep 17 00:00:00 2001 From: Kenta Iwasaki <63115601+lithdew@users.noreply.github.com> Date: Fri, 4 Sep 2020 02:57:08 +0900 Subject: [PATCH 30/56] os: return error.SocketNotListening for EINVAL on accept (#6226) --- lib/std/net.zig | 3 +++ lib/std/os.zig | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/std/net.zig b/lib/std/net.zig index 5a1407c35f..45d8f07f04 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1641,6 +1641,9 @@ pub const StreamServer = struct { /// by the socket buffer limits, not by the system memory. SystemResources, + /// Socket is not listening for new connections. + SocketNotListening, + ProtocolFailure, /// Firewall rules forbid connection. diff --git a/lib/std/os.zig b/lib/std/os.zig index e8431c386b..2e4cc3aed0 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -2802,6 +2802,9 @@ pub const AcceptError = error{ /// by the socket buffer limits, not by the system memory. SystemResources, + /// Socket is not listening for new connections. + SocketNotListening, + ProtocolFailure, /// Firewall rules forbid connection. @@ -2870,7 +2873,7 @@ pub fn accept( EBADF => unreachable, // always a race condition ECONNABORTED => return error.ConnectionAborted, EFAULT => unreachable, - EINVAL => unreachable, + EINVAL => return error.SocketNotListening, ENOTSOCK => unreachable, EMFILE => return error.ProcessFdQuotaExceeded, ENFILE => return error.SystemFdQuotaExceeded, From 2a58e30bd5f522bf3077f556f47a1e28c537627e Mon Sep 17 00:00:00 2001 From: Lachlan Easton Date: Thu, 3 Sep 2020 20:16:12 +1000 Subject: [PATCH 31/56] std meta: fix use of alignOf in meta.cast --- lib/std/meta.zig | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/std/meta.zig b/lib/std/meta.zig index aaa8e7ca78..73e0661498 100644 --- a/lib/std/meta.zig +++ b/lib/std/meta.zig @@ -705,34 +705,34 @@ pub fn Vector(comptime len: u32, comptime child: type) type { pub fn cast(comptime DestType: type, target: anytype) DestType { const TargetType = @TypeOf(target); switch (@typeInfo(DestType)) { - .Pointer => { + .Pointer => |dest_ptr| { switch (@typeInfo(TargetType)) { .Int, .ComptimeInt => { return @intToPtr(DestType, target); }, .Pointer => |ptr| { - return @ptrCast(DestType, @alignCast(ptr.alignment, target)); + return @ptrCast(DestType, @alignCast(dest_ptr.alignment, target)); }, .Optional => |opt| { if (@typeInfo(opt.child) == .Pointer) { - return @ptrCast(DestType, @alignCast(@alignOf(opt.child.Child), target)); + return @ptrCast(DestType, @alignCast(dest_ptr, target)); } }, else => {}, } }, - .Optional => |opt| { - if (@typeInfo(opt.child) == .Pointer) { + .Optional => |dest_opt| { + if (@typeInfo(dest_opt.child) == .Pointer) { switch (@typeInfo(TargetType)) { .Int, .ComptimeInt => { return @intToPtr(DestType, target); }, - .Pointer => |ptr| { - return @ptrCast(DestType, @alignCast(ptr.alignment, target)); + .Pointer => { + return @ptrCast(DestType, @alignCast(@alignOf(dest_opt.child.Child), target)); }, .Optional => |target_opt| { if (@typeInfo(target_opt.child) == .Pointer) { - return @ptrCast(DestType, @alignCast(@alignOf(target_opt.child.Child), target)); + return @ptrCast(DestType, @alignCast(@alignOf(dest_opt.child.Child), target)); } }, else => {}, From dac1cd77505ef9fa493e069549c139d74e31081f Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 27 Aug 2020 08:44:13 +0200 Subject: [PATCH 32/56] Write out simple Mach-O object file This commit adds enough Mach-O linker implementation to write out simple Mach-O object file. Be warned however, the object file is largely incomplete: misses relocation info, debug symbols, etc. However, it seemed like a good starting to get the basic understanding right. Signed-off-by: Jakub Konka --- src-self-hosted/codegen.zig | 57 ++- src-self-hosted/link/MachO.zig | 816 ++++++++++++++++++++++++--------- 2 files changed, 647 insertions(+), 226 deletions(-) diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index d6e3194c12..d72ce5ee16 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -1443,7 +1443,57 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { switch (arch) { - .x86_64 => return self.fail(inst.base.src, "TODO implement codegen for call when linking with MachO for x86_64 arch", .{}), + .x86_64 => { + for (info.args) |mc_arg, arg_i| { + const arg = inst.args[arg_i]; + const arg_mcv = try self.resolveInst(inst.args[arg_i]); + // Here we do not use setRegOrMem even though the logic is similar, because + // the function call will move the stack pointer, so the offsets are different. + switch (mc_arg) { + .none => continue, + .register => |reg| { + try self.genSetReg(arg.src, reg, arg_mcv); + // TODO interact with the register allocator to mark the instruction as moved. + }, + .stack_offset => { + // Here we need to emit instructions like this: + // mov qword ptr [rsp + stack_offset], x + return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{}); + }, + .ptr_stack_offset => { + return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + }, + .ptr_embedded_in_code => { + return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + }, + .undef => unreachable, + .immediate => unreachable, + .unreach => unreachable, + .dead => unreachable, + .embedded_in_code => unreachable, + .memory => unreachable, + .compare_flags_signed => unreachable, + .compare_flags_unsigned => unreachable, + } + } + + if (inst.func.cast(ir.Inst.Constant)) |func_inst| { + if (func_inst.val.cast(Value.Payload.Function)) |func_val| { + const func = func_val.func; + const got = &macho_file.sections.items[macho_file.got_section_index.?]; + const ptr_bytes = 8; + const got_addr = @intCast(u32, got.addr + func.owner_decl.link.macho.offset_table_index.? * ptr_bytes); + // ff 14 25 xx xx xx xx call [addr] + try self.code.ensureCapacity(self.code.items.len + 7); + self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 }); + mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr); + } else { + return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + } + } else { + return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + } + }, .aarch64 => return self.fail(inst.base.src, "TODO implement codegen for call when linking with MachO for aarch64 arch", .{}), else => unreachable, } @@ -2486,6 +2536,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; return MCValue{ .memory = got_addr }; + } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + const decl = payload.decl; + const got = &macho_file.sections.items[macho_file.got_section_index.?]; + const got_addr = got.addr + decl.link.macho.offset_table_index.? * ptr_bytes; + return MCValue{ .memory = got_addr }; } else { return self.fail(src, "TODO codegen non-ELF const Decl pointer", .{}); } diff --git a/src-self-hosted/link/MachO.zig b/src-self-hosted/link/MachO.zig index 047e62f950..93d7b2381a 100644 --- a/src-self-hosted/link/MachO.zig +++ b/src-self-hosted/link/MachO.zig @@ -18,36 +18,66 @@ const File = link.File; pub const base_tag: File.Tag = File.Tag.macho; +const LoadCommand = union(enum) { + Segment: macho.segment_command_64, + LinkeditData: macho.linkedit_data_command, + Symtab: macho.symtab_command, + Dysymtab: macho.dysymtab_command, + + pub fn cmdsize(self: LoadCommand) u32 { + return switch (self) { + .Segment => |x| x.cmdsize, + .LinkeditData => |x| x.cmdsize, + .Symtab => |x| x.cmdsize, + .Dysymtab => |x| x.cmdsize, + }; + } +}; + base: File, -/// List of all load command headers that are in the file. -/// We use it to track number and size of all commands needed by the header. -commands: std.ArrayListUnmanaged(macho.load_command) = std.ArrayListUnmanaged(macho.load_command){}, -command_file_offset: ?u64 = null, +/// Table of all load commands +load_commands: std.ArrayListUnmanaged(LoadCommand) = .{}, +segment_cmd_index: ?u16 = null, +symtab_cmd_index: ?u16 = null, +dysymtab_cmd_index: ?u16 = null, +data_in_code_cmd_index: ?u16 = null, -/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write. -/// Same order as in the file. -segments: std.ArrayListUnmanaged(macho.segment_command_64) = std.ArrayListUnmanaged(macho.segment_command_64){}, -/// Section (headers) *always* follow segment (load commands) directly! -sections: std.ArrayListUnmanaged(macho.section_64) = std.ArrayListUnmanaged(macho.section_64){}, +/// Table of all sections +sections: std.ArrayListUnmanaged(macho.section_64) = .{}, -/// Offset (index) into __TEXT segment load command. -text_segment_offset: ?u64 = null, -/// Offset (index) into __LINKEDIT segment load command. -linkedit_segment_offset: ?u664 = null, +/// __TEXT segment sections +text_section_index: ?u16 = null, +cstring_section_index: ?u16 = null, +const_text_section_index: ?u16 = null, +stubs_section_index: ?u16 = null, +stub_helper_section_index: ?u16 = null, + +/// __DATA segment sections +got_section_index: ?u16 = null, +const_data_section_index: ?u16 = null, -/// Entry point load command -entry_point_cmd: ?macho.entry_point_command = null, entry_addr: ?u64 = null, -/// The first 4GB of process' memory is reserved for the null (__PAGEZERO) segment. -/// This is also the start address for our binary. -vm_start_address: u64 = 0x100000000, +/// Table of all symbols used. +/// Internally references string table for names (which are optional). +symbol_table: std.ArrayListUnmanaged(macho.nlist_64) = .{}, -seg_table_dirty: bool = false, +/// Table of symbol names aka the string table. +string_table: std.ArrayListUnmanaged(u8) = .{}, + +/// Table of symbol vaddr values. The values is the absolute vaddr value. +/// If the vaddr of the executable __TEXT segment vaddr changes, the entire offset +/// table needs to be rewritten. +offset_table: std.ArrayListUnmanaged(u64) = .{}, error_flags: File.ErrorFlags = File.ErrorFlags{}, +cmd_table_dirty: bool = false, + +/// Pointer to the last allocated text block +last_text_block: ?*TextBlock = null, + /// `alloc_num / alloc_den` is the factor of padding when allocating. const alloc_num = 4; const alloc_den = 3; @@ -67,7 +97,23 @@ const LIB_SYSTEM_NAME: [*:0]const u8 = "System"; const LIB_SYSTEM_PATH: [*:0]const u8 = DEFAULT_LIB_SEARCH_PATH ++ "/libSystem.B.dylib"; pub const TextBlock = struct { - pub const empty = TextBlock{}; + /// Index into the symbol table + symbol_table_index: ?u32, + /// Index into offset table + offset_table_index: ?u32, + /// Size of this text block + size: u64, + /// Points to the previous and next neighbours + prev: ?*TextBlock, + next: ?*TextBlock, + + pub const empty = TextBlock{ + .symbol_table_index = null, + .offset_table_index = null, + .size = 0, + .prev = null, + .next = null, + }; }; pub const SrcFn = struct { @@ -117,6 +163,12 @@ fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !MachO /// Truncates the existing file contents and overwrites the contents. /// Returns an error if `file` is not already open with +read +write +seek abilities. fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !MachO { + switch (options.output_mode) { + .Exe => {}, + .Obj => {}, + .Lib => return error.TODOImplementWritingLibFiles, + } + var self: MachO = .{ .base = .{ .file = file, @@ -127,37 +179,518 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Mach }; errdefer self.deinit(); - switch (options.output_mode) { - .Exe => { - // The first segment command for executables is always a __PAGEZERO segment. - const pagezero = .{ - .cmd = macho.LC_SEGMENT_64, - .cmdsize = commandSize(@sizeOf(macho.segment_command_64)), - .segname = makeString("__PAGEZERO"), - .vmaddr = 0, - .vmsize = self.vm_start_address, - .fileoff = 0, - .filesize = 0, - .maxprot = macho.VM_PROT_NONE, - .initprot = macho.VM_PROT_NONE, - .nsects = 0, - .flags = 0, - }; - try self.commands.append(allocator, .{ - .cmd = pagezero.cmd, - .cmdsize = pagezero.cmdsize, - }); - try self.segments.append(allocator, pagezero); - }, - .Obj => return error.TODOImplementWritingObjFiles, - .Lib => return error.TODOImplementWritingLibFiles, - } - try self.populateMissingMetadata(); return self; } +pub fn flush(self: *MachO, module: *Module) !void { + switch (self.base.options.output_mode) { + .Exe => { + var last_cmd_offset: usize = @sizeOf(macho.mach_header_64); + { + // Specify path to dynamic linker dyld + const cmdsize = commandSize(@sizeOf(macho.dylinker_command) + mem.lenZ(DEFAULT_DYLD_PATH)); + const load_dylinker = [1]macho.dylinker_command{ + .{ + .cmd = macho.LC_LOAD_DYLINKER, + .cmdsize = cmdsize, + .name = @sizeOf(macho.dylinker_command), + }, + }; + + try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylinker[0..1]), last_cmd_offset); + + const file_offset = last_cmd_offset + @sizeOf(macho.dylinker_command); + try self.addPadding(cmdsize - @sizeOf(macho.dylinker_command), file_offset); + + try self.base.file.?.pwriteAll(mem.spanZ(DEFAULT_DYLD_PATH), file_offset); + last_cmd_offset += cmdsize; + } + + { + // Link against libSystem + const cmdsize = commandSize(@sizeOf(macho.dylib_command) + mem.lenZ(LIB_SYSTEM_PATH)); + // TODO Find a way to work out runtime version from the OS version triple stored in std.Target. + // In the meantime, we're gonna hardcode to the minimum compatibility version of 1.0.0. + const min_version = 0x10000; + const dylib = .{ + .name = @sizeOf(macho.dylib_command), + .timestamp = 2, // not sure why not simply 0; this is reverse engineered from Mach-O files + .current_version = min_version, + .compatibility_version = min_version, + }; + const load_dylib = [1]macho.dylib_command{ + .{ + .cmd = macho.LC_LOAD_DYLIB, + .cmdsize = cmdsize, + .dylib = dylib, + }, + }; + + try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylib[0..1]), last_cmd_offset); + + const file_offset = last_cmd_offset + @sizeOf(macho.dylib_command); + try self.addPadding(cmdsize - @sizeOf(macho.dylib_command), file_offset); + + try self.base.file.?.pwriteAll(mem.spanZ(LIB_SYSTEM_PATH), file_offset); + last_cmd_offset += cmdsize; + } + }, + .Obj => { + { + const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; + symtab.nsyms = @intCast(u32, self.symbol_table.items.len); + const allocated_size = self.allocatedSize(symtab.stroff); + const needed_size = self.string_table.items.len; + log.debug("allocated_size = 0x{x}, needed_size = 0x{x}\n", .{ allocated_size, needed_size }); + + if (needed_size > allocated_size) { + symtab.strsize = 0; + symtab.stroff = @intCast(u32, self.findFreeSpace(needed_size, 1)); + } + symtab.strsize = @intCast(u32, needed_size); + + log.debug("writing string table from 0x{x} to 0x{x}\n", .{ symtab.stroff, symtab.stroff + symtab.strsize }); + + try self.base.file.?.pwriteAll(self.string_table.items, symtab.stroff); + } + + var last_cmd_offset: usize = @sizeOf(macho.mach_header_64); + for (self.load_commands.items) |cmd| { + const cmd_to_write = [1]@TypeOf(cmd){cmd}; + try self.base.file.?.pwriteAll(mem.sliceAsBytes(cmd_to_write[0..1]), last_cmd_offset); + last_cmd_offset += cmd.cmdsize(); + } + const off = @sizeOf(macho.mach_header_64) + @sizeOf(macho.segment_command_64); + try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.sections.items), off); + }, + .Lib => return error.TODOImplementWritingLibFiles, + } + + if (self.entry_addr == null and self.base.options.output_mode == .Exe) { + log.debug("flushing. no_entry_point_found = true\n", .{}); + self.error_flags.no_entry_point_found = true; + } else { + log.debug("flushing. no_entry_point_found = false\n", .{}); + self.error_flags.no_entry_point_found = false; + try self.writeMachOHeader(); + } +} + +pub fn deinit(self: *MachO) void { + self.offset_table.deinit(self.base.allocator); + self.string_table.deinit(self.base.allocator); + self.symbol_table.deinit(self.base.allocator); + self.sections.deinit(self.base.allocator); + self.load_commands.deinit(self.base.allocator); +} + +pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void { + if (decl.link.macho.symbol_table_index) |_| return; + + try self.symbol_table.ensureCapacity(self.base.allocator, self.symbol_table.items.len + 1); + try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1); + + log.debug("allocating symbol index {} for {}\n", .{ self.symbol_table.items.len, decl.name }); + decl.link.macho.symbol_table_index = @intCast(u32, self.symbol_table.items.len); + _ = self.symbol_table.addOneAssumeCapacity(); + + decl.link.macho.offset_table_index = @intCast(u32, self.offset_table.items.len); + _ = self.offset_table.addOneAssumeCapacity(); + + self.symbol_table.items[decl.link.macho.symbol_table_index.?] = .{ + .n_strx = 0, + .n_type = 0, + .n_sect = 0, + .n_desc = 0, + .n_value = 0, + }; + self.offset_table.items[decl.link.macho.offset_table_index.?] = 0; +} + +pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_line_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer { + var it = dbg_info_type_relocs.iterator(); + while (it.next()) |entry| { + entry.value.relocs.deinit(self.base.allocator); + } + dbg_info_type_relocs.deinit(self.base.allocator); + } + + const typed_value = decl.typed_value.most_recent.typed_value; + const res = try codegen.generateSymbol( + &self.base, + decl.src(), + typed_value, + &code_buffer, + &dbg_line_buffer, + &dbg_info_buffer, + &dbg_info_type_relocs, + ); + + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + log.debug("generated code {}\n", .{code}); + + const required_alignment = typed_value.ty.abiAlignment(self.base.options.target); + const symbol = &self.symbol_table.items[decl.link.macho.symbol_table_index.?]; + + const decl_name = mem.spanZ(decl.name); + const name_str_index = try self.makeString(decl_name); + const addr = try self.allocateTextBlock(&decl.link.macho, code.len, required_alignment); + log.debug("allocated text block for {} at 0x{x}\n", .{ decl_name, addr }); + log.debug("updated text section {}\n", .{self.sections.items[self.text_section_index.?]}); + + symbol.* = .{ + .n_strx = name_str_index, + .n_type = macho.N_SECT, + .n_sect = @intCast(u8, self.text_section_index.?) + 1, + .n_desc = 0, + .n_value = addr, + }; + self.offset_table.items[decl.link.macho.offset_table_index.?] = addr; + + try self.writeSymbol(decl.link.macho.symbol_table_index.?); + + const text_section = self.sections.items[self.text_section_index.?]; + const section_offset = symbol.n_value - text_section.addr; + const file_offset = text_section.offset + section_offset; + log.debug("file_offset 0x{x}\n", .{file_offset}); + try self.base.file.?.pwriteAll(code, file_offset); + + // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated. + const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; + return self.updateDeclExports(module, decl, decl_exports); +} + +pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {} + +pub fn updateDeclExports( + self: *MachO, + module: *Module, + decl: *const Module.Decl, + exports: []const *Module.Export, +) !void { + const tracy = trace(@src()); + defer tracy.end(); + + if (decl.link.macho.symbol_table_index == null) return; + + var decl_sym = self.symbol_table.items[decl.link.macho.symbol_table_index.?]; + // TODO implement + if (exports.len == 0) return; + + const exp = exports[0]; + self.entry_addr = decl_sym.n_value; + decl_sym.n_type |= macho.N_EXT; + exp.link.sym_index = 0; +} + +pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {} + +pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl) u64 { + return self.symbol_table.items[decl.link.macho.symbol_table_index.?].n_value; +} + +pub fn populateMissingMetadata(self: *MachO) !void { + if (self.segment_cmd_index == null) { + self.segment_cmd_index = @intCast(u16, self.load_commands.items.len); + try self.load_commands.append(self.base.allocator, .{ + .Segment = .{ + .cmd = macho.LC_SEGMENT_64, + .cmdsize = @sizeOf(macho.segment_command_64), + .segname = makeStaticString(""), + .vmaddr = 0, + .vmsize = 0, + .fileoff = 0, + .filesize = 0, + .maxprot = 0, + .initprot = 0, + .nsects = 0, + .flags = 0, + }, + }); + self.cmd_table_dirty = true; + } + if (self.symtab_cmd_index == null) { + self.symtab_cmd_index = @intCast(u16, self.load_commands.items.len); + try self.load_commands.append(self.base.allocator, .{ + .Symtab = .{ + .cmd = macho.LC_SYMTAB, + .cmdsize = @sizeOf(macho.symtab_command), + .symoff = 0, + .nsyms = 0, + .stroff = 0, + .strsize = 0, + }, + }); + self.cmd_table_dirty = true; + } + if (self.text_section_index == null) { + self.text_section_index = @intCast(u16, self.sections.items.len); + const segment = &self.load_commands.items[self.segment_cmd_index.?].Segment; + segment.cmdsize += @sizeOf(macho.section_64); + segment.nsects += 1; + + const file_size = self.base.options.program_code_size_hint; + const off = @intCast(u32, self.findFreeSpace(file_size, 1)); + const flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS; + + log.debug("found __text section free space 0x{x} to 0x{x}\n", .{ off, off + file_size }); + + try self.sections.append(self.base.allocator, .{ + .sectname = makeStaticString("__text"), + .segname = makeStaticString("__TEXT"), + .addr = 0, + .size = file_size, + .offset = off, + .@"align" = 0x1000, + .reloff = 0, + .nreloc = 0, + .flags = flags, + .reserved1 = 0, + .reserved2 = 0, + .reserved3 = 0, + }); + + segment.vmsize += file_size; + segment.filesize += file_size; + segment.fileoff = off; + + log.debug("initial text section {}\n", .{self.sections.items[self.text_section_index.?]}); + } + { + const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; + if (symtab.symoff == 0) { + const p_align = @sizeOf(macho.nlist_64); + const nsyms = self.base.options.symbol_count_hint; + const file_size = p_align * nsyms; + const off = @intCast(u32, self.findFreeSpace(file_size, p_align)); + log.debug("found symbol table free space 0x{x} to 0x{x}\n", .{ off, off + file_size }); + symtab.symoff = off; + symtab.nsyms = @intCast(u32, nsyms); + } + if (symtab.stroff == 0) { + try self.string_table.append(self.base.allocator, 0); + const file_size = @intCast(u32, self.string_table.items.len); + const off = @intCast(u32, self.findFreeSpace(file_size, 1)); + log.debug("found string table free space 0x{x} to 0x{x}\n", .{ off, off + file_size }); + symtab.stroff = off; + symtab.strsize = file_size; + } + } +} + +fn allocateTextBlock(self: *MachO, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 { + const segment = &self.load_commands.items[self.segment_cmd_index.?].Segment; + const text_section = &self.sections.items[self.text_section_index.?]; + const new_block_ideal_capacity = new_block_size * alloc_num / alloc_den; + + var block_placement: ?*TextBlock = null; + const addr = blk: { + if (self.last_text_block) |last| { + const last_symbol = self.symbol_table.items[last.symbol_table_index.?]; + const ideal_capacity = last.size * alloc_num / alloc_den; + const ideal_capacity_end_addr = last_symbol.n_value + ideal_capacity; + const new_start_addr = mem.alignForwardGeneric(u64, ideal_capacity_end_addr, alignment); + block_placement = last; + break :blk new_start_addr; + } else { + break :blk text_section.addr; + } + }; + log.debug("computed symbol address 0x{x}\n", .{addr}); + + const expand_text_section = block_placement == null or block_placement.?.next == null; + if (expand_text_section) { + const text_capacity = self.allocatedSize(text_section.offset); + const needed_size = (addr + new_block_size) - text_section.addr; + log.debug("text capacity 0x{x}, needed size 0x{x}\n", .{ text_capacity, needed_size }); + + if (needed_size > text_capacity) { + // TODO handle growth + } + + self.last_text_block = text_block; + text_section.size = needed_size; + segment.vmsize = needed_size; + segment.filesize = needed_size; + if (alignment < text_section.@"align") { + text_section.@"align" = @intCast(u32, alignment); + } + } + text_block.size = new_block_size; + + if (text_block.prev) |prev| { + prev.next = text_block.next; + } + if (text_block.next) |next| { + next.prev = text_block.prev; + } + + if (block_placement) |big_block| { + text_block.prev = big_block; + text_block.next = big_block.next; + big_block.next = text_block; + } else { + text_block.prev = null; + text_block.next = null; + } + + return addr; +} + +fn makeStaticString(comptime bytes: []const u8) [16]u8 { + var buf = [_]u8{0} ** 16; + if (bytes.len > buf.len) @compileError("string too long; max 16 bytes"); + mem.copy(u8, buf[0..], bytes); + return buf; +} + +fn makeString(self: *MachO, bytes: []const u8) !u32 { + try self.string_table.ensureCapacity(self.base.allocator, self.string_table.items.len + bytes.len + 1); + const result = self.string_table.items.len; + self.string_table.appendSliceAssumeCapacity(bytes); + self.string_table.appendAssumeCapacity(0); + return @intCast(u32, result); +} + +fn alignSize(comptime Int: type, min_size: anytype, alignment: Int) Int { + const size = @intCast(Int, min_size); + if (size % alignment == 0) return size; + + const div = size / alignment; + return (div + 1) * alignment; +} + +fn commandSize(min_size: anytype) u32 { + return alignSize(u32, min_size, @sizeOf(u64)); +} + +fn addPadding(self: *MachO, size: u64, file_offset: u64) !void { + if (size == 0) return; + + const buf = try self.base.allocator.alloc(u8, size); + defer self.base.allocator.free(buf); + + mem.set(u8, buf[0..], 0); + + try self.base.file.?.pwriteAll(buf, file_offset); +} + +fn detectAllocCollision(self: *MachO, start: u64, size: u64) ?u64 { + const hdr_size: u64 = @sizeOf(macho.mach_header_64); + if (start < hdr_size) + return hdr_size; + + const end = start + satMul(size, alloc_num) / alloc_den; + + { + const off = @sizeOf(macho.mach_header_64); + var tight_size: u64 = 0; + for (self.load_commands.items) |cmd| { + tight_size += cmd.cmdsize(); + } + const increased_size = satMul(tight_size, alloc_num) / alloc_den; + const test_end = off + increased_size; + if (end > off and start < test_end) { + return test_end; + } + } + + for (self.sections.items) |section| { + const increased_size = satMul(section.size, alloc_num) / alloc_den; + const test_end = section.offset + increased_size; + if (end > section.offset and start < test_end) { + return test_end; + } + } + + if (self.symtab_cmd_index) |symtab_index| { + const symtab = self.load_commands.items[symtab_index].Symtab; + { + const tight_size = @sizeOf(macho.nlist_64) * symtab.nsyms; + const increased_size = satMul(tight_size, alloc_num) / alloc_den; + const test_end = symtab.symoff + increased_size; + if (end > symtab.symoff and start < test_end) { + return test_end; + } + } + { + const increased_size = satMul(symtab.strsize, alloc_num) / alloc_den; + const test_end = symtab.stroff + increased_size; + if (end > symtab.stroff and start < test_end) { + return test_end; + } + } + } + + return null; +} + +fn allocatedSize(self: *MachO, start: u64) u64 { + if (start == 0) + return 0; + var min_pos: u64 = std.math.maxInt(u64); + { + const off = @sizeOf(macho.mach_header_64); + if (off > start and off < min_pos) min_pos = off; + } + for (self.sections.items) |section| { + if (section.offset <= start) continue; + if (section.offset < min_pos) min_pos = section.offset; + } + if (self.symtab_cmd_index) |symtab_index| { + const symtab = self.load_commands.items[symtab_index].Symtab; + if (symtab.symoff > start and symtab.symoff < min_pos) min_pos = symtab.symoff; + if (symtab.stroff > start and symtab.stroff < min_pos) min_pos = symtab.stroff; + } + return min_pos - start; +} + +fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u16) u64 { + var start: u64 = 0; + while (self.detectAllocCollision(start, object_size)) |item_end| { + start = mem.alignForwardGeneric(u64, item_end, min_alignment); + } + return start; +} + +fn writeSymbol(self: *MachO, index: usize) !void { + const tracy = trace(@src()); + defer tracy.end(); + + const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; + var sym = [1]macho.nlist_64{self.symbol_table.items[index]}; + const off = symtab.symoff + @sizeOf(macho.nlist_64) * index; + log.debug("writing symbol {} at 0x{x}\n", .{ sym[0], off }); + try self.base.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off); +} + +/// Writes Mach-O file header. +/// Should be invoked last as it needs up-to-date values of ncmds and sizeof_cmds bookkeeping +/// variables. fn writeMachOHeader(self: *MachO) !void { var hdr: macho.mach_header_64 = undefined; hdr.magic = macho.MH_MAGIC_64; @@ -190,193 +723,26 @@ fn writeMachOHeader(self: *MachO) !void { }, }; hdr.filetype = filetype; + hdr.ncmds = @intCast(u32, self.load_commands.items.len); - const ncmds = try math.cast(u32, self.commands.items.len); - hdr.ncmds = ncmds; - - var sizeof_cmds: u32 = 0; - for (self.commands.items) |cmd| { - sizeof_cmds += cmd.cmdsize; + var sizeofcmds: u32 = 0; + for (self.load_commands.items) |cmd| { + sizeofcmds += cmd.cmdsize(); } - hdr.sizeofcmds = sizeof_cmds; + + hdr.sizeofcmds = sizeofcmds; // TODO should these be set to something else? hdr.flags = 0; hdr.reserved = 0; + log.debug("writing Mach-O header {}\n", .{hdr}); + try self.base.file.?.pwriteAll(@ptrCast([*]const u8, &hdr)[0..@sizeOf(macho.mach_header_64)], 0); } -pub fn flush(self: *MachO, module: *Module) !void { - // Save segments first - { - const buf = try self.base.allocator.alloc(macho.segment_command_64, self.segments.items.len); - defer self.base.allocator.free(buf); - - self.command_file_offset = @sizeOf(macho.mach_header_64); - - for (buf) |*seg, i| { - seg.* = self.segments.items[i]; - self.command_file_offset.? += self.segments.items[i].cmdsize; - } - - try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), @sizeOf(macho.mach_header_64)); - } - - switch (self.base.options.output_mode) { - .Exe => { - { - // Specify path to dynamic linker dyld - const cmdsize = commandSize(@sizeOf(macho.dylinker_command) + mem.lenZ(DEFAULT_DYLD_PATH)); - const load_dylinker = [1]macho.dylinker_command{ - .{ - .cmd = macho.LC_LOAD_DYLINKER, - .cmdsize = cmdsize, - .name = @sizeOf(macho.dylinker_command), - }, - }; - try self.commands.append(self.base.allocator, .{ - .cmd = macho.LC_LOAD_DYLINKER, - .cmdsize = cmdsize, - }); - - try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylinker[0..1]), self.command_file_offset.?); - - const file_offset = self.command_file_offset.? + @sizeOf(macho.dylinker_command); - try self.addPadding(cmdsize - @sizeOf(macho.dylinker_command), file_offset); - - try self.base.file.?.pwriteAll(mem.spanZ(DEFAULT_DYLD_PATH), file_offset); - self.command_file_offset.? += cmdsize; - } - - { - // Link against libSystem - const cmdsize = commandSize(@sizeOf(macho.dylib_command) + mem.lenZ(LIB_SYSTEM_PATH)); - // TODO Find a way to work out runtime version from the OS version triple stored in std.Target. - // In the meantime, we're gonna hardcode to the minimum compatibility version of 1.0.0. - const min_version = 0x10000; - const dylib = .{ - .name = @sizeOf(macho.dylib_command), - .timestamp = 2, // not sure why not simply 0; this is reverse engineered from Mach-O files - .current_version = min_version, - .compatibility_version = min_version, - }; - const load_dylib = [1]macho.dylib_command{ - .{ - .cmd = macho.LC_LOAD_DYLIB, - .cmdsize = cmdsize, - .dylib = dylib, - }, - }; - try self.commands.append(self.base.allocator, .{ - .cmd = macho.LC_LOAD_DYLIB, - .cmdsize = cmdsize, - }); - - try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylib[0..1]), self.command_file_offset.?); - - const file_offset = self.command_file_offset.? + @sizeOf(macho.dylib_command); - try self.addPadding(cmdsize - @sizeOf(macho.dylib_command), file_offset); - - try self.base.file.?.pwriteAll(mem.spanZ(LIB_SYSTEM_PATH), file_offset); - self.command_file_offset.? += cmdsize; - } - }, - .Obj => return error.TODOImplementWritingObjFiles, - .Lib => return error.TODOImplementWritingLibFiles, - } - - if (self.entry_addr == null and self.base.options.output_mode == .Exe) { - log.debug("flushing. no_entry_point_found = true\n", .{}); - self.error_flags.no_entry_point_found = true; - } else { - log.debug("flushing. no_entry_point_found = false\n", .{}); - self.error_flags.no_entry_point_found = false; - try self.writeMachOHeader(); - } -} - -pub fn deinit(self: *MachO) void { - self.commands.deinit(self.base.allocator); - self.segments.deinit(self.base.allocator); - self.sections.deinit(self.base.allocator); -} - -pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {} - -pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {} - -pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {} - -pub fn updateDeclExports( - self: *MachO, - module: *Module, - decl: *const Module.Decl, - exports: []const *Module.Export, -) !void {} - -pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {} - -pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl) u64 { - @panic("TODO implement getDeclVAddr for MachO"); -} - -pub fn populateMissingMetadata(self: *MachO) !void { - if (self.text_segment_offset == null) { - self.text_segment_offset = @intCast(u64, self.segments.items.len); - const file_size = alignSize(u64, self.base.options.program_code_size_hint, 0x1000); - log.debug("vmsize/filesize = {}", .{file_size}); - const file_offset = 0; - const vm_address = self.vm_start_address; // the end of __PAGEZERO segment in VM - const protection = macho.VM_PROT_READ | macho.VM_PROT_EXECUTE; - const cmdsize = commandSize(@sizeOf(macho.segment_command_64)); - const text_segment = .{ - .cmd = macho.LC_SEGMENT_64, - .cmdsize = cmdsize, - .segname = makeString("__TEXT"), - .vmaddr = vm_address, - .vmsize = file_size, - .fileoff = 0, // __TEXT segment *always* starts at 0 file offset - .filesize = 0, //file_size, - .maxprot = protection, - .initprot = protection, - .nsects = 0, - .flags = 0, - }; - try self.commands.append(self.base.allocator, .{ - .cmd = macho.LC_SEGMENT_64, - .cmdsize = cmdsize, - }); - try self.segments.append(self.base.allocator, text_segment); - } -} - -fn makeString(comptime bytes: []const u8) [16]u8 { - var buf = [_]u8{0} ** 16; - if (bytes.len > buf.len) @compileError("MachO segment/section name too long"); - mem.copy(u8, buf[0..], bytes); - return buf; -} - -fn alignSize(comptime Int: type, min_size: anytype, alignment: Int) Int { - const size = @intCast(Int, min_size); - if (size % alignment == 0) return size; - - const div = size / alignment; - return (div + 1) * alignment; -} - -fn commandSize(min_size: anytype) u32 { - return alignSize(u32, min_size, @sizeOf(u64)); -} - -fn addPadding(self: *MachO, size: u32, file_offset: u64) !void { - if (size == 0) return; - - const buf = try self.base.allocator.alloc(u8, size); - defer self.base.allocator.free(buf); - - mem.set(u8, buf[0..], 0); - - try self.base.file.?.pwriteAll(buf, file_offset); +/// Saturating multiplication +fn satMul(a: anytype, b: anytype) @TypeOf(a, b) { + const T = @TypeOf(a, b); + return std.math.mul(T, a, b) catch std.math.maxInt(T); } From 17f36566de1cf549907d20dfd963596784691c73 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 3 Sep 2020 15:02:38 -0700 Subject: [PATCH 33/56] stage2: upgrade Scope.Container decls from ArrayList to HashMap --- src-self-hosted/Module.zig | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig index 8d7a4d7b36..d273712cd1 100644 --- a/src-self-hosted/Module.zig +++ b/src-self-hosted/Module.zig @@ -230,8 +230,7 @@ pub const Decl = struct { const src_decl = module.decls[self.src_index]; return src_decl.inst.src; }, - .file, - .block => unreachable, + .file, .block => unreachable, .gen_zir => unreachable, .local_val => unreachable, .local_ptr => unreachable, @@ -544,7 +543,7 @@ pub const Scope = struct { file_scope: *Scope.File, /// Direct children of the file. - decls: ArrayListUnmanaged(*Decl), + decls: std.AutoArrayHashMapUnmanaged(*Decl, void), // TODO implement container types and put this in a status union // ty: Type @@ -555,12 +554,7 @@ pub const Scope = struct { } pub fn removeDecl(self: *Container, child: *Decl) void { - for (self.decls.items) |item, i| { - if (item == child) { - _ = self.decls.swapRemove(i); - return; - } - } + _ = self.decls.remove(child); } pub fn fullyQualifiedNameHash(self: *Container, name: []const u8) NameHash { @@ -1796,9 +1790,9 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void { // we know which ones have been deleted. var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa); defer deleted_decls.deinit(); - try deleted_decls.ensureCapacity(container_scope.decls.items.len); - for (container_scope.decls.items) |file_decl| { - deleted_decls.putAssumeCapacityNoClobber(file_decl, {}); + try deleted_decls.ensureCapacity(container_scope.decls.items().len); + for (container_scope.decls.items()) |entry| { + deleted_decls.putAssumeCapacityNoClobber(entry.key, {}); } for (decls) |src_decl, decl_i| { @@ -1839,7 +1833,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void { } } else { const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash); - container_scope.decls.appendAssumeCapacity(new_decl); + container_scope.decls.putAssumeCapacity(new_decl, {}); if (fn_proto.getExternExportInlineToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); @@ -1866,7 +1860,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void { } } else { const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash); - container_scope.decls.appendAssumeCapacity(new_decl); + container_scope.decls.putAssumeCapacity(new_decl, {}); if (var_decl.getExternExportToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); @@ -1882,7 +1876,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void { const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl)); const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash); - container_scope.decls.appendAssumeCapacity(new_decl); + container_scope.decls.putAssumeCapacity(new_decl, {}); self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); } else if (src_decl.castTag(.ContainerField)) |container_field| { log.err("TODO: analyze container field", .{}); From 88724b2a89157ecc3a8eea03aa0f8a6b66829915 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=28xq=29=20Quei=C3=9Fner?= Date: Thu, 3 Sep 2020 17:48:17 +0200 Subject: [PATCH 34/56] Introduces a space after the ellipsis for test and progress. --- lib/std/progress.zig | 6 +++--- lib/std/special/test_runner.zig | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/std/progress.zig b/lib/std/progress.zig index 654d8cc228..82f2801fa1 100644 --- a/lib/std/progress.zig +++ b/lib/std/progress.zig @@ -197,7 +197,7 @@ pub const Progress = struct { var maybe_node: ?*Node = &self.root; while (maybe_node) |node| { if (need_ellipse) { - self.bufWrite(&end, "...", .{}); + self.bufWrite(&end, "... ", .{}); } need_ellipse = false; if (node.name.len != 0 or node.estimated_total_items != null) { @@ -218,7 +218,7 @@ pub const Progress = struct { maybe_node = node.recently_updated_child; } if (need_ellipse) { - self.bufWrite(&end, "...", .{}); + self.bufWrite(&end, "... ", .{}); } } @@ -253,7 +253,7 @@ pub const Progress = struct { const bytes_needed_for_esc_codes_at_end = if (std.builtin.os.tag == .windows) 0 else 11; const max_end = self.output_buffer.len - bytes_needed_for_esc_codes_at_end; if (end.* > max_end) { - const suffix = "..."; + const suffix = "... "; self.columns_written = self.columns_written - (end.* - max_end) + suffix.len; std.mem.copy(u8, self.output_buffer[max_end..], suffix); end.* = max_end + suffix.len; diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig index 87b011ede8..b9452b79cc 100644 --- a/lib/std/special/test_runner.zig +++ b/lib/std/special/test_runner.zig @@ -40,7 +40,7 @@ pub fn main() anyerror!void { test_node.activate(); progress.refresh(); if (progress.terminal == null) { - std.debug.print("{}/{} {}...", .{ i + 1, test_fn_list.len, test_fn.name }); + std.debug.print("{}/{} {}... ", .{ i + 1, test_fn_list.len, test_fn.name }); } const result = if (test_fn.async_frame_size) |size| switch (io_mode) { .evented => blk: { From abe672956ed037077b23ac6aa29df3dd99795539 Mon Sep 17 00:00:00 2001 From: pfg Date: Thu, 3 Sep 2020 16:33:47 -0700 Subject: [PATCH 35/56] Test 0 bit allocation --- lib/std/heap.zig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index d6977f2f9c..492eb40656 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -915,6 +915,10 @@ pub fn testAllocator(base_allocator: *mem.Allocator) !void { testing.expect(slice.len == 10); allocator.free(slice); + + const zero_bit_ptr = try allocator.create(u0); + zero_bit_ptr.* = 0; + allocator.destroy(zero_bit_ptr); } pub fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) !void { From fac9a4e286e90c5d5574422a17d28e85c55ae67d Mon Sep 17 00:00:00 2001 From: Alexandros Naskos Date: Thu, 27 Aug 2020 13:28:54 +0300 Subject: [PATCH 36/56] Start working on PE/COFF linking. --- src-self-hosted/Module.zig | 5 + src-self-hosted/link.zig | 22 ++- src-self-hosted/link/Coff.zig | 231 ++++++++++++++++++++++++++++ src-self-hosted/link/msdos-stub.bin | Bin 0 -> 128 bytes 4 files changed, 256 insertions(+), 2 deletions(-) create mode 100644 src-self-hosted/link/Coff.zig create mode 100644 src-self-hosted/link/msdos-stub.bin diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig index d273712cd1..01c3cdd449 100644 --- a/src-self-hosted/Module.zig +++ b/src-self-hosted/Module.zig @@ -1820,6 +1820,9 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void { try self.markOutdatedDecl(decl); decl.contents_hash = contents_hash; } else switch (self.bin_file.tag) { + .coff => { + // TODO Implement for COFF + }, .elf => if (decl.fn_link.elf.len != 0) { // TODO Look into detecting when this would be unnecessary by storing enough state // in `Decl` to notice that the line number did not change. @@ -2078,12 +2081,14 @@ fn allocateNewDecl( .deletion_flag = false, .contents_hash = contents_hash, .link = switch (self.bin_file.tag) { + .coff => .{ .coff = {} }, // @TODO .elf => .{ .elf = link.File.Elf.TextBlock.empty }, .macho => .{ .macho = link.File.MachO.TextBlock.empty }, .c => .{ .c = {} }, .wasm => .{ .wasm = {} }, }, .fn_link = switch (self.bin_file.tag) { + .coff => .{ .coff = {} }, // @TODO .elf => .{ .elf = link.File.Elf.SrcFn.empty }, .macho => .{ .macho = link.File.MachO.SrcFn.empty }, .c => .{ .c = {} }, diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig index ecf3876582..4189d636b1 100644 --- a/src-self-hosted/link.zig +++ b/src-self-hosted/link.zig @@ -34,6 +34,7 @@ pub const File = struct { pub const LinkBlock = union { elf: Elf.TextBlock, + coff: void, // @TODO macho: MachO.TextBlock, c: void, wasm: void, @@ -41,6 +42,7 @@ pub const File = struct { pub const LinkFn = union { elf: Elf.SrcFn, + coff: void, // @TODO macho: MachO.SrcFn, c: void, wasm: ?Wasm.FnData, @@ -66,7 +68,7 @@ pub const File = struct { pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: Options) !*File { switch (options.object_format) { .unknown => unreachable, - .coff => return error.TODOImplementCoff, + .coff => return Coff.openPath(allocator, dir, sub_path, options), .elf => return Elf.openPath(allocator, dir, sub_path, options), .macho => return MachO.openPath(allocator, dir, sub_path, options), .wasm => return Wasm.openPath(allocator, dir, sub_path, options), @@ -85,7 +87,7 @@ pub const File = struct { pub fn makeWritable(base: *File, dir: fs.Dir, sub_path: []const u8) !void { switch (base.tag) { - .elf, .macho => { + .coff, .elf, .macho => { if (base.file != null) return; base.file = try dir.createFile(sub_path, .{ .truncate = false, @@ -112,6 +114,7 @@ pub const File = struct { /// after allocateDeclIndexes for any given Decl. pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void { switch (base.tag) { + .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), .macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl), .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), @@ -121,6 +124,7 @@ pub const File = struct { pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) !void { switch (base.tag) { + .coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl), .elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl), .macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl), .c, .wasm => {}, @@ -131,6 +135,7 @@ pub const File = struct { /// any given Decl. pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void { switch (base.tag) { + .coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl), .elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl), .macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl), .c, .wasm => {}, @@ -140,6 +145,7 @@ pub const File = struct { pub fn deinit(base: *File) void { if (base.file) |f| f.close(); switch (base.tag) { + .coff => @fieldParentPtr(Coff, "base", base).deinit(), .elf => @fieldParentPtr(Elf, "base", base).deinit(), .macho => @fieldParentPtr(MachO, "base", base).deinit(), .c => @fieldParentPtr(C, "base", base).deinit(), @@ -149,6 +155,11 @@ pub const File = struct { pub fn destroy(base: *File) void { switch (base.tag) { + .coff => { + const parent = @fieldParentPtr(Coff, "base", base); + parent.deinit(); + base.allocator.destroy(parent); + }, .elf => { const parent = @fieldParentPtr(Elf, "base", base); parent.deinit(); @@ -177,6 +188,7 @@ pub const File = struct { defer tracy.end(); try switch (base.tag) { + .coff => @fieldParentPtr(Coff, "base", base).flush(module), .elf => @fieldParentPtr(Elf, "base", base).flush(module), .macho => @fieldParentPtr(MachO, "base", base).flush(module), .c => @fieldParentPtr(C, "base", base).flush(module), @@ -186,6 +198,7 @@ pub const File = struct { pub fn freeDecl(base: *File, decl: *Module.Decl) void { switch (base.tag) { + .coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl), .elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl), .macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl), .c => unreachable, @@ -195,6 +208,7 @@ pub const File = struct { pub fn errorFlags(base: *File) ErrorFlags { return switch (base.tag) { + .coff => @fieldParentPtr(Coff, "base", base).error_flags, .elf => @fieldParentPtr(Elf, "base", base).error_flags, .macho => @fieldParentPtr(MachO, "base", base).error_flags, .c => return .{ .no_entry_point_found = false }, @@ -211,6 +225,7 @@ pub const File = struct { exports: []const *Module.Export, ) !void { switch (base.tag) { + .coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl, exports), .elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports), .macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports), .c => return {}, @@ -220,6 +235,7 @@ pub const File = struct { pub fn getDeclVAddr(base: *File, decl: *const Module.Decl) u64 { switch (base.tag) { + .coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl), .elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl), .macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl), .c => unreachable, @@ -228,6 +244,7 @@ pub const File = struct { } pub const Tag = enum { + coff, elf, macho, c, @@ -239,6 +256,7 @@ pub const File = struct { }; pub const C = @import("link/C.zig"); + pub const Coff = @import("link/Coff.zig"); pub const Elf = @import("link/Elf.zig"); pub const MachO = @import("link/MachO.zig"); pub const Wasm = @import("link/Wasm.zig"); diff --git a/src-self-hosted/link/Coff.zig b/src-self-hosted/link/Coff.zig new file mode 100644 index 0000000000..eaceddb9a3 --- /dev/null +++ b/src-self-hosted/link/Coff.zig @@ -0,0 +1,231 @@ +const Coff = @This(); + +const std = @import("std"); +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; +const fs = std.fs; + +const Module = @import("../Module.zig"); +const codegen = @import("../codegen/wasm.zig"); +const link = @import("../link.zig"); + + +pub const base_tag: link.File.Tag = .coff; + +const msdos_stub = @embedFile("msdos-stub.bin"); +const coff_file_header_offset = msdos_stub.len + 4; +const optional_header_offset = coff_file_header_offset + 20; + +base: link.File, +ptr_width: enum { p32, p64 }, +error_flags: link.File.ErrorFlags = .{}, + +coff_file_header_dirty: bool = false, +optional_header_dirty: bool = false, + +pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File { + assert(options.object_format == .coff); + + const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) }); + errdefer file.close(); + + var coff_file = try allocator.create(Coff); + errdefer allocator.destroy(coff_file); + + coff_file.* = openFile(allocator, file, options) catch |err| switch (err) { + error.IncrFailed => try createFile(allocator, file, options), + else => |e| return e, + }; + + return &coff_file.base; +} + +/// Returns error.IncrFailed if incremental update could not be performed. +fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff { + switch (options.output_mode) { + .Exe => {}, + .Obj => return error.IncrFailed, // @TODO DO OBJ FILES + .Lib => return error.IncrFailed, + } + var self: Coff = .{ + .base = .{ + .file = file, + .tag = .coff, + .options = options, + .allocator = allocator, + }, + .ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) { + 32 => .p32, + 64 => .p64, + else => return error.UnsupportedELFArchitecture, + }, + }; + errdefer self.deinit(); + + // TODO implement reading the PE/COFF file + return error.IncrFailed; +} + +/// Truncates the existing file contents and overwrites the contents. +/// Returns an error if `file` is not already open with +read +write +seek abilities. +fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff { + switch (options.output_mode) { + .Exe => {}, + .Obj => return error.TODOImplementWritingObjFiles, // @TODO DO OBJ FILES + .Lib => return error.TODOImplementWritingLibFiles, + } + var self: Coff = .{ + .base = .{ + .tag = .coff, + .options = options, + .allocator = allocator, + .file = file, + }, + .ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) { + 32 => .p32, + 64 => .p64, + else => return error.UnsupportedCOFFArchitecture, + }, + .coff_file_header_dirty = true, + .optional_header_dirty = true, + }; + errdefer self.deinit(); + + var output = self.base.file.?.writer(); + + // MS-DOS stub + PE magic + try output.writeAll(msdos_stub ++ "PE\x00\x00"); + const machine_type: u16 = switch (self.base.options.target.cpu.arch) { + .x86_64 => 0x8664, + .i386 => 0x014c, + .riscv32 => 0x5032, + .riscv64 => 0x5064, + else => return error.UnsupportedCOFFArchitecture, + }; + + // Start of COFF file header + try output.writeIntLittle(u16, machine_type); + try output.writeIntLittle(u16, switch (self.ptr_width) { + .p32 => @as(u16, 98), + .p64 => 114, + }); + try output.writeAll("\x00" ** 14); + // Characteristics - IMAGE_FILE_RELOCS_STRIPPED | IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED + var characteristics: u16 = 0x0001 | 0x000 | 0x02002; // @TODO Remove debug info stripped flag when necessary + switch (self.ptr_width) { + // IMAGE_FILE_32BIT_MACHINE + .p32 => characteristics |= 0x0100, + // IMAGE_FILE_LARGE_ADDRESS_AWARE + .p64 => characteristics |= 0x0020, + } + try output.writeIntLittle(u16, characteristics); + try output.writeIntLittle(u16, switch (self.ptr_width) { + .p32 => @as(u16, 0x10b), + .p64 => 0x20b, + }); + + // Start of optional header + // TODO Linker version, use 0.0 for now. + try output.writeAll("\x00" ** 2); + // Zero out every field until "BaseOfCode" + // @TODO Actually write entry point address, base of code address + try output.writeAll("\x00" ** 20); + switch (self.ptr_width) { + .p32 => { + // Zero out base of data + try output.writeAll("\x00" ** 4); + // Write image base + try output.writeIntLittle(u32, 0x40000000); + }, + .p64 => { + // Write image base + try output.writeIntLittle(u64, 0x40000000); + }, + } + + // Section alignment - default to 256 + try output.writeIntLittle(u32, 256); + // File alignment - default to 512 + try output.writeIntLittle(u32, 512); + // TODO - Minimum required windows version - use 6.0 (aka vista for now) + try output.writeIntLittle(u16, 0x6); + try output.writeIntLittle(u16, 0x0); + // TODO - Image version - use 0.0 for now + try output.writeIntLittle(u32, 0x0); + // Subsystem version + try output.writeIntLittle(u16, 0x6); + try output.writeIntLittle(u16, 0x0); + // Reserved zeroes + try output.writeIntLittle(u32, 0x0); + // Size of image - initialize to zero + try output.writeIntLittle(u32, 0x0); + // @TODO Size of headers - calculate this. + try output.writeIntLittle(u32, 0x0); + // Checksum + try output.writeIntLittle(u32, 0x0); + // Subsystem + try output.writeIntLittle(u16, 0x3); + // @TODO Dll characteristics, just using a value from a LLVM produced executable for now. + try output.writeIntLittle(u16, 0x8160); + switch (self.ptr_width) { + .p32 => { + // Stack reserve + try output.writeIntLittle(u32, 0x1000000); + // Stack commit + try output.writeIntLittle(u32, 0x1000); + // Heap reserve + try output.writeIntLittle(u32, 0x100000); + // Heap commit + try output.writeIntLittle(u32, 0x100); + }, + .p64 => { + // Stack reserve + try output.writeIntLittle(u64, 0x1000000); + // Stack commit + try output.writeIntLittle(u64, 0x1000); + // Heap reserve + try output.writeIntLittle(u64, 0x100000); + // Heap commit + try output.writeIntLittle(u64, 0x100); + }, + } + // Reserved loader flags + try output.writeIntLittle(u32, 0x0); + // Number of RVA + sizes + try output.writeIntLittle(u32, 0x0); + + return self; +} + +pub fn flush(self: *Coff, module: *Module) !void { + // @TODO Implement this +} + +pub fn freeDecl(self: *Coff, decl: *Module.Decl) void { + // @TODO Implement this +} + +pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { + // @TODO Implement this +} + +pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void { + // @TODO Implement this +} + +pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void { + // @TODO Implement this +} + +pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl, exports: []const *Module.Export) !void { + // @TODO Implement this +} + +pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 { + // @TODO Implement this + return 0; +} + +pub fn deinit(self: *Coff) void { + // @TODO +} diff --git a/src-self-hosted/link/msdos-stub.bin b/src-self-hosted/link/msdos-stub.bin new file mode 100644 index 0000000000000000000000000000000000000000..96ad91198f0de1eb25b9d9846c44706823dffa58 GIT binary patch literal 128 zcmeZ`n!v!!z`(!)#Q*;@Fzf)*Am9Kd@e>U|X+HT~d< Date: Sun, 30 Aug 2020 14:51:49 +0300 Subject: [PATCH 37/56] Write PE section table --- src-self-hosted/link/Coff.zig | 280 +++++++++++++++++++++++----------- 1 file changed, 187 insertions(+), 93 deletions(-) diff --git a/src-self-hosted/link/Coff.zig b/src-self-hosted/link/Coff.zig index eaceddb9a3..b77944b2cf 100644 --- a/src-self-hosted/link/Coff.zig +++ b/src-self-hosted/link/Coff.zig @@ -1,6 +1,7 @@ const Coff = @This(); const std = @import("std"); +const log = std.log.scoped(.link); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const fs = std.fs; @@ -9,12 +10,9 @@ const Module = @import("../Module.zig"); const codegen = @import("../codegen/wasm.zig"); const link = @import("../link.zig"); - pub const base_tag: link.File.Tag = .coff; const msdos_stub = @embedFile("msdos-stub.bin"); -const coff_file_header_offset = msdos_stub.len + 4; -const optional_header_offset = coff_file_header_offset + 20; base: link.File, ptr_width: enum { p32, p64 }, @@ -70,8 +68,7 @@ fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff { /// Returns an error if `file` is not already open with +read +write +seek abilities. fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff { switch (options.output_mode) { - .Exe => {}, - .Obj => return error.TODOImplementWritingObjFiles, // @TODO DO OBJ FILES + .Exe, .Obj => {}, .Lib => return error.TODOImplementWritingLibFiles, } var self: Coff = .{ @@ -91,108 +88,205 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff }; errdefer self.deinit(); - var output = self.base.file.?.writer(); + var coff_file_header_offset: u32 = 0; + if (options.output_mode == .Exe) { + // Write the MS-DOS stub and the PE signature + try self.base.file.?.pwriteAll(msdos_stub ++ "PE\x00\x00", 0); + coff_file_header_offset = msdos_stub.len + 4; + } - // MS-DOS stub + PE magic - try output.writeAll(msdos_stub ++ "PE\x00\x00"); + // COFF file header + const data_directory_count = 0; + var hdr_data: [112 + data_directory_count * 8 + 2 * 40]u8 = undefined; + var index: usize = 0; + + // @TODO Add an enum(u16) in std.coff, add .toCoffMachine to Arch const machine_type: u16 = switch (self.base.options.target.cpu.arch) { - .x86_64 => 0x8664, - .i386 => 0x014c, + .x86_64 => 0x8664, + .i386 => 0x014c, .riscv32 => 0x5032, .riscv64 => 0x5064, else => return error.UnsupportedCOFFArchitecture, }; + std.mem.writeIntLittle(u16, hdr_data[0..2], machine_type); + index += 2; - // Start of COFF file header - try output.writeIntLittle(u16, machine_type); - try output.writeIntLittle(u16, switch (self.ptr_width) { - .p32 => @as(u16, 98), - .p64 => 114, - }); - try output.writeAll("\x00" ** 14); - // Characteristics - IMAGE_FILE_RELOCS_STRIPPED | IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED - var characteristics: u16 = 0x0001 | 0x000 | 0x02002; // @TODO Remove debug info stripped flag when necessary + // Number of sections (we only use .got, .text) + std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 2); + index += 2; + // TimeDateStamp (u32), PointerToSymbolTable (u32), NumberOfSymbols (u32) + std.mem.set(u8, hdr_data[index..][0..12], 0); + index += 12; + + const optional_header_size = switch (options.output_mode) { + .Exe => data_directory_count * 8 + switch (self.ptr_width) { + .p32 => @as(u16, 96), + .p64 => 112, + }, + else => 0, + }; + std.mem.writeIntLittle(u16, hdr_data[index..][0..2], optional_header_size); + index += 2; + + // Characteristics - IMAGE_FILE_DEBUG_STRIPPED + var characteristics: u16 = 0x200; // TODO Remove debug info stripped flag when necessary + if (options.output_mode == .Exe) { + // IMAGE_FILE_EXECUTABLE_IMAGE + characteristics |= 0x2; + } switch (self.ptr_width) { // IMAGE_FILE_32BIT_MACHINE - .p32 => characteristics |= 0x0100, + .p32 => characteristics |= 0x100, // IMAGE_FILE_LARGE_ADDRESS_AWARE - .p64 => characteristics |= 0x0020, + .p64 => characteristics |= 0x20, } - try output.writeIntLittle(u16, characteristics); - try output.writeIntLittle(u16, switch (self.ptr_width) { - .p32 => @as(u16, 0x10b), - .p64 => 0x20b, - }); + std.mem.writeIntLittle(u16, hdr_data[index..][0..2], characteristics); + index += 2; - // Start of optional header - // TODO Linker version, use 0.0 for now. - try output.writeAll("\x00" ** 2); - // Zero out every field until "BaseOfCode" - // @TODO Actually write entry point address, base of code address - try output.writeAll("\x00" ** 20); - switch (self.ptr_width) { - .p32 => { - // Zero out base of data - try output.writeAll("\x00" ** 4); - // Write image base - try output.writeIntLittle(u32, 0x40000000); - }, - .p64 => { - // Write image base - try output.writeIntLittle(u64, 0x40000000); - }, + assert(index == 20); + try self.base.file.?.pwriteAll(hdr_data[0..index], coff_file_header_offset); + + if (options.output_mode == .Exe) { + // Optional header + index = 0; + std.mem.writeIntLittle(u16, hdr_data[0..2], switch (self.ptr_width) { + .p32 => @as(u16, 0x10b), + .p64 => 0x20b, + }); + index += 2; + + // Linker version (u8 + u8), SizeOfCode (u32), SizeOfInitializedData (u32), SizeOfUninitializedData (u32), AddressOfEntryPoint (u32) + std.mem.set(u8, hdr_data[index..][0..18], 0); + index += 18; + + // Base of code relative to the image base + // @TODO Check where to put this + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1000); + index += 4; + + if (self.ptr_width == .p32) { + // Base of data relative to the image base + std.mem.set(u8, hdr_data[index..][0..4], 0); + index += 4; + + // Image base address + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x400_000); + index += 4; + } else { + // Image base address + std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x140_000_000); + index += 8; + } + + // Section alignment + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 4096); + index += 4; + // File alignment + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 512); + index += 4; + // Required OS version, 6.0 is vista + std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6); + index += 2; + std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0); + index += 2; + // Image version + std.mem.set(u8, hdr_data[index..][0..4], 0); + index += 4; + // Required subsystem version, same as OS version + std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6); + index += 2; + std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0); + index += 2; + // Reserved zeroes (u32), SizeOfImage (u32), SizeOfHeaders (u32), CheckSum (u32) + std.mem.set(u8, hdr_data[index..][0..16], 0); + index += 16; + // Subsystem, TODO: Let users specify the subsystem, always CUI for now + std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 3); + index += 2; + // DLL characteristics, TODO: For now we are just using IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE + std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0x40); + index += 2; + + switch (self.ptr_width) { + .p32 => { + // @TODO See llvm output for 32 bit executables + // Size of stack reserve + commit + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000_000); + index += 4; + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000); + index += 4; + // Size of heap reserve + commit + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x100_000); + index += 4; + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000); + index += 4; + }, + .p64 => { + // Size of stack reserve + commit + std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000_000); + index += 8; + std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000); + index += 8; + // Size of heap reserve + commit + std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x100_000); + index += 8; + std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000); + index += 8; + }, + } + + // Reserved zeroes + std.mem.set(u8, hdr_data[index..][0..4], 0); + index += 4; + + // Number of data directories + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], data_directory_count); + index += 4; + // @TODO Write meaningful stuff here + // Initialize data directories to zero + std.mem.set(u8, hdr_data[index..][0..data_directory_count * 8], 0); + index += data_directory_count * 8; + + assert(index == optional_header_size); } - // Section alignment - default to 256 - try output.writeIntLittle(u32, 256); - // File alignment - default to 512 - try output.writeIntLittle(u32, 512); - // TODO - Minimum required windows version - use 6.0 (aka vista for now) - try output.writeIntLittle(u16, 0x6); - try output.writeIntLittle(u16, 0x0); - // TODO - Image version - use 0.0 for now - try output.writeIntLittle(u32, 0x0); - // Subsystem version - try output.writeIntLittle(u16, 0x6); - try output.writeIntLittle(u16, 0x0); - // Reserved zeroes - try output.writeIntLittle(u32, 0x0); - // Size of image - initialize to zero - try output.writeIntLittle(u32, 0x0); - // @TODO Size of headers - calculate this. - try output.writeIntLittle(u32, 0x0); - // Checksum - try output.writeIntLittle(u32, 0x0); - // Subsystem - try output.writeIntLittle(u16, 0x3); - // @TODO Dll characteristics, just using a value from a LLVM produced executable for now. - try output.writeIntLittle(u16, 0x8160); - switch (self.ptr_width) { - .p32 => { - // Stack reserve - try output.writeIntLittle(u32, 0x1000000); - // Stack commit - try output.writeIntLittle(u32, 0x1000); - // Heap reserve - try output.writeIntLittle(u32, 0x100000); - // Heap commit - try output.writeIntLittle(u32, 0x100); - }, - .p64 => { - // Stack reserve - try output.writeIntLittle(u64, 0x1000000); - // Stack commit - try output.writeIntLittle(u64, 0x1000); - // Heap reserve - try output.writeIntLittle(u64, 0x100000); - // Heap commit - try output.writeIntLittle(u64, 0x100); - }, - } - // Reserved loader flags - try output.writeIntLittle(u32, 0x0); - // Number of RVA + sizes - try output.writeIntLittle(u32, 0x0); + // @TODO Merge this write with the one above + const section_table_offset = coff_file_header_offset + 20 + optional_header_size; + + // Write section table. + // First, the .got section + hdr_data[index..][0..8].* = ".got\x00\x00\x00\x00".*; + index += 8; + // Virtual size (u32) (@TODO Set to initial value in image files, zero otherwise), Virtual address (u32) (@TODO Set to value in image files, zero otherwise), Size of raw data (u32) + std.mem.set(u8, hdr_data[index..][0..12], 0); + index += 12; + // File pointer to the start of the section + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], section_table_offset + 2 * 40); + index += 4; + // Pointer to relocations (u32) (@TODO Initialize this for object files), PointerToLinenumbers (u32), NumberOfRelocations (u16), (@TODO Initialize this for object files), NumberOfLinenumbers (u16) + std.mem.set(u8, hdr_data[index..][0..12], 0); + index += 12; + // Characteristics `IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ = 0x40000040` + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x40000040); + index += 4; + // Then, the .text section + hdr_data[index..][0..8].* = ".text\x00\x00\x00".*; + index += 8; + // Virtual size (u32) (@TODO Set to initial value in image files, zero otherwise), Virtual address (u32) (@TODO Set to value in image files, zero otherwise), Size of raw data (u32) + std.mem.set(u8, hdr_data[index..][0..12], 0); + index += 12; + // File pointer to the start of the section (@TODO Add the initial size of .got) + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], section_table_offset + 2 * 40); + index += 4; + // Pointer to relocations (u32) (@TODO Initialize this for object files), PointerToLinenumbers (u32), NumberOfRelocations (u16), (@TODO Initialize this for object files), NumberOfLinenumbers (u16) + std.mem.set(u8, hdr_data[index..][0..12], 0); + index += 12; + // Characteristics `IMAGE_SCN_CNT_CODE | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE = 0xE0000020` + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0xE0000020); + index += 4; + + assert(index == optional_header_size + 2 * 40); + try self.base.file.?.pwriteAll(hdr_data[0..index], coff_file_header_offset + 20); return self; } From e9b137f23a9b38c6b808452ef216e4e18d3070ed Mon Sep 17 00:00:00 2001 From: Alexandros Naskos Date: Thu, 3 Sep 2020 18:24:42 +0300 Subject: [PATCH 38/56] Completed basic PE linker for stage2 Added std.coff.MachineType Added image characteristic and section flag valued to std.coff Added std.Target.Cpu.Arch.toCoffMachine Fixed stage2 --watch flag on windows --- lib/std/coff.zig | 66 ++++ lib/std/target.zig | 57 +++ src-self-hosted/Module.zig | 4 +- src-self-hosted/codegen.zig | 276 +++++++++------ src-self-hosted/link.zig | 4 +- src-self-hosted/link/Coff.zig | 629 +++++++++++++++++++++++++++++----- src-self-hosted/link/Elf.zig | 8 +- src-self-hosted/main.zig | 13 +- 8 files changed, 856 insertions(+), 201 deletions(-) diff --git a/lib/std/coff.zig b/lib/std/coff.zig index cd567b3a6e..ea3a232187 100644 --- a/lib/std/coff.zig +++ b/lib/std/coff.zig @@ -18,11 +18,77 @@ const IMAGE_FILE_MACHINE_I386 = 0x014c; const IMAGE_FILE_MACHINE_IA64 = 0x0200; const IMAGE_FILE_MACHINE_AMD64 = 0x8664; +pub const MachineType = enum(u16) { + Unknown = 0x0, + /// Matsushita AM33 + AM33 = 0x1d3, + /// x64 + X64 = 0x8664, + /// ARM little endian + ARM = 0x1c0, + /// ARM64 little endian + ARM64 = 0xaa64, + /// ARM Thumb-2 little endian + ARMNT = 0x1c4, + /// EFI byte code + EBC = 0xebc, + /// Intel 386 or later processors and compatible processors + I386 = 0x14c, + /// Intel Itanium processor family + IA64 = 0x200, + /// Mitsubishi M32R little endian + M32R = 0x9041, + /// MIPS16 + MIPS16 = 0x266, + /// MIPS with FPU + MIPSFPU = 0x366, + /// MIPS16 with FPU + MIPSFPU16 = 0x466, + /// Power PC little endian + POWERPC = 0x1f0, + /// Power PC with floating point support + POWERPCFP = 0x1f1, + /// MIPS little endian + R4000 = 0x166, + /// RISC-V 32-bit address space + RISCV32 = 0x5032, + /// RISC-V 64-bit address space + RISCV64 = 0x5064, + /// RISC-V 128-bit address space + RISCV128 = 0x5128, + /// Hitachi SH3 + SH3 = 0x1a2, + /// Hitachi SH3 DSP + SH3DSP = 0x1a3, + /// Hitachi SH4 + SH4 = 0x1a6, + /// Hitachi SH5 + SH5 = 0x1a8, + /// Thumb + Thumb = 0x1c2, + /// MIPS little-endian WCE v2 + WCEMIPSV2 = 0x169, +}; + // OptionalHeader.magic values // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx const IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b; const IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b; +// Image Characteristics +pub const IMAGE_FILE_RELOCS_STRIPPED = 0x1; +pub const IMAGE_FILE_DEBUG_STRIPPED = 0x200; +pub const IMAGE_FILE_EXECUTABLE_IMAGE = 0x2; +pub const IMAGE_FILE_32BIT_MACHINE = 0x100; +pub const IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x20; + +// Section flags +pub const IMAGE_SCN_CNT_INITIALIZED_DATA = 0x40; +pub const IMAGE_SCN_MEM_READ = 0x40000000; +pub const IMAGE_SCN_CNT_CODE = 0x20; +pub const IMAGE_SCN_MEM_EXECUTE = 0x20000000; +pub const IMAGE_SCN_MEM_WRITE = 0x80000000; + const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16; const IMAGE_DEBUG_TYPE_CODEVIEW = 2; const DEBUG_DIRECTORY = 6; diff --git a/lib/std/target.zig b/lib/std/target.zig index 034ab780d0..4fd59b690f 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -771,6 +771,63 @@ pub const Target = struct { }; } + pub fn toCoffMachine(arch: Arch) std.coff.MachineType { + return switch (arch) { + .avr => .Unknown, + .msp430 => .Unknown, + .arc => .Unknown, + .arm => .ARM, + .armeb => .Unknown, + .hexagon => .Unknown, + .le32 => .Unknown, + .mips => .Unknown, + .mipsel => .Unknown, + .powerpc => .POWERPC, + .r600 => .Unknown, + .riscv32 => .RISCV32, + .sparc => .Unknown, + .sparcel => .Unknown, + .tce => .Unknown, + .tcele => .Unknown, + .thumb => .Thumb, + .thumbeb => .Thumb, + .i386 => .I386, + .xcore => .Unknown, + .nvptx => .Unknown, + .amdil => .Unknown, + .hsail => .Unknown, + .spir => .Unknown, + .kalimba => .Unknown, + .shave => .Unknown, + .lanai => .Unknown, + .wasm32 => .Unknown, + .renderscript32 => .Unknown, + .aarch64_32 => .ARM64, + .aarch64 => .ARM64, + .aarch64_be => .Unknown, + .mips64 => .Unknown, + .mips64el => .Unknown, + .powerpc64 => .Unknown, + .powerpc64le => .Unknown, + .riscv64 => .RISCV64, + .x86_64 => .X64, + .nvptx64 => .Unknown, + .le64 => .Unknown, + .amdil64 => .Unknown, + .hsail64 => .Unknown, + .spir64 => .Unknown, + .wasm64 => .Unknown, + .renderscript64 => .Unknown, + .amdgcn => .Unknown, + .bpfel => .Unknown, + .bpfeb => .Unknown, + .sparcv9 => .Unknown, + .s390x => .Unknown, + .ve => .Unknown, + .spu_2 => .Unknown, + }; + } + pub fn endian(arch: Arch) builtin.Endian { return switch (arch) { .avr, diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig index 01c3cdd449..16f465c9d0 100644 --- a/src-self-hosted/Module.zig +++ b/src-self-hosted/Module.zig @@ -2081,14 +2081,14 @@ fn allocateNewDecl( .deletion_flag = false, .contents_hash = contents_hash, .link = switch (self.bin_file.tag) { - .coff => .{ .coff = {} }, // @TODO + .coff => .{ .coff = link.File.Coff.TextBlock.empty }, .elf => .{ .elf = link.File.Elf.TextBlock.empty }, .macho => .{ .macho = link.File.MachO.TextBlock.empty }, .c => .{ .c = {} }, .wasm => .{ .wasm = {} }, }, .fn_link = switch (self.bin_file.tag) { - .coff => .{ .coff = {} }, // @TODO + .coff => .{ .coff = {} }, .elf => .{ .elf = link.File.Elf.SrcFn.empty }, .macho => .{ .macho = link.File.MachO.SrcFn.empty }, .c => .{ .c = {} }, diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index be86111b58..9405a5f72c 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -59,14 +59,21 @@ pub const GenerateSymbolError = error{ AnalysisFail, }; +pub const DebugInfoOutput = union(enum) { + dwarf: struct { + dbg_line: *std.ArrayList(u8), + dbg_info: *std.ArrayList(u8), + dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable, + }, + none, +}; + pub fn generateSymbol( bin_file: *link.File, src: usize, typed_value: TypedValue, code: *std.ArrayList(u8), - dbg_line: *std.ArrayList(u8), - dbg_info: *std.ArrayList(u8), - dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable, + debug_output: DebugInfoOutput, ) GenerateSymbolError!Result { const tracy = trace(@src()); defer tracy.end(); @@ -76,56 +83,56 @@ pub fn generateSymbol( switch (bin_file.options.target.cpu.arch) { .wasm32 => unreachable, // has its own code path .wasm64 => unreachable, // has its own code path - .arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - .armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - .spu_2 => return Function(.spu_2).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), - //.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs), + .arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, debug_output), + .armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, debug_output), + .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, debug_output), + .spu_2 => return Function(.spu_2).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, debug_output), + .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, debug_output), + //.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, debug_output), else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."), } }, @@ -139,7 +146,7 @@ pub fn generateSymbol( switch (try generateSymbol(bin_file, src, .{ .ty = typed_value.ty.elemType(), .val = sentinel, - }, code, dbg_line, dbg_info, dbg_info_type_relocs)) { + }, code, debug_output)) { .appended => return Result{ .appended = {} }, .externally_managed => |slice| { code.appendSliceAssumeCapacity(slice); @@ -239,9 +246,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { target: *const std.Target, mod_fn: *const Module.Fn, code: *std.ArrayList(u8), - dbg_line: *std.ArrayList(u8), - dbg_info: *std.ArrayList(u8), - dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable, + debug_output: DebugInfoOutput, err_msg: ?*ErrorMsg, args: []MCValue, ret_mcv: MCValue, @@ -419,9 +424,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { src: usize, typed_value: TypedValue, code: *std.ArrayList(u8), - dbg_line: *std.ArrayList(u8), - dbg_info: *std.ArrayList(u8), - dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable, + debug_output: DebugInfoOutput, ) GenerateSymbolError!Result { const module_fn = typed_value.val.cast(Value.Payload.Function).?.func; @@ -457,9 +460,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .bin_file = bin_file, .mod_fn = module_fn, .code = code, - .dbg_line = dbg_line, - .dbg_info = dbg_info, - .dbg_info_type_relocs = dbg_info_type_relocs, + .debug_output = debug_output, .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` @@ -598,35 +599,50 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn dbgSetPrologueEnd(self: *Self) InnerError!void { - try self.dbg_line.append(DW.LNS_set_prologue_end); - try self.dbgAdvancePCAndLine(self.prev_di_src); + switch (self.debug_output) { + .dwarf => |dbg_out| { + try dbg_out.dbg_line.append(DW.LNS_set_prologue_end); + try self.dbgAdvancePCAndLine(self.prev_di_src); + }, + .none => {}, + } } fn dbgSetEpilogueBegin(self: *Self) InnerError!void { - try self.dbg_line.append(DW.LNS_set_epilogue_begin); - try self.dbgAdvancePCAndLine(self.prev_di_src); + switch (self.debug_output) { + .dwarf => |dbg_out| { + try dbg_out.dbg_line.append(DW.LNS_set_epilogue_begin); + try self.dbgAdvancePCAndLine(self.prev_di_src); + }, + .none => {}, + } } fn dbgAdvancePCAndLine(self: *Self, src: usize) InnerError!void { - // TODO Look into improving the performance here by adding a token-index-to-line - // lookup table, and changing ir.Inst from storing byte offset to token. Currently - // this involves scanning over the source code for newlines - // (but only from the previous byte offset to the new one). - const delta_line = std.zig.lineDelta(self.source, self.prev_di_src, src); - const delta_pc = self.code.items.len - self.prev_di_pc; self.prev_di_src = src; self.prev_di_pc = self.code.items.len; - // TODO Look into using the DWARF special opcodes to compress this data. It lets you emit - // single-byte opcodes that add different numbers to both the PC and the line number - // at the same time. - try self.dbg_line.ensureCapacity(self.dbg_line.items.len + 11); - self.dbg_line.appendAssumeCapacity(DW.LNS_advance_pc); - leb128.writeULEB128(self.dbg_line.writer(), delta_pc) catch unreachable; - if (delta_line != 0) { - self.dbg_line.appendAssumeCapacity(DW.LNS_advance_line); - leb128.writeILEB128(self.dbg_line.writer(), delta_line) catch unreachable; + switch (self.debug_output) { + .dwarf => |dbg_out| { + // TODO Look into improving the performance here by adding a token-index-to-line + // lookup table, and changing ir.Inst from storing byte offset to token. Currently + // this involves scanning over the source code for newlines + // (but only from the previous byte offset to the new one). + const delta_line = std.zig.lineDelta(self.source, self.prev_di_src, src); + const delta_pc = self.code.items.len - self.prev_di_pc; + // TODO Look into using the DWARF special opcodes to compress this data. It lets you emit + // single-byte opcodes that add different numbers to both the PC and the line number + // at the same time. + try dbg_out.dbg_line.ensureCapacity(dbg_out.dbg_line.items.len + 11); + dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_pc); + leb128.writeULEB128(dbg_out.dbg_line.writer(), delta_pc) catch unreachable; + if (delta_line != 0) { + dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_line); + leb128.writeILEB128(dbg_out.dbg_line.writer(), delta_line) catch unreachable; + } + dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_copy); + }, + .none => {}, } - self.dbg_line.appendAssumeCapacity(DW.LNS_copy); } /// Asserts there is already capacity to insert into top branch inst_table. @@ -654,18 +670,23 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// Adds a Type to the .debug_info at the current position. The bytes will be populated later, /// after codegen for this symbol is done. fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void { - assert(ty.hasCodeGenBits()); - const index = self.dbg_info.items.len; - try self.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4 + switch (self.debug_output) { + .dwarf => |dbg_out| { + assert(ty.hasCodeGenBits()); + const index = dbg_out.dbg_info.items.len; + try dbg_out.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4 - const gop = try self.dbg_info_type_relocs.getOrPut(self.gpa, ty); - if (!gop.found_existing) { - gop.entry.value = .{ - .off = undefined, - .relocs = .{}, - }; + const gop = try dbg_out.dbg_info_type_relocs.getOrPut(self.gpa, ty); + if (!gop.found_existing) { + gop.entry.value = .{ + .off = undefined, + .relocs = .{}, + }; + } + try gop.entry.value.relocs.append(self.gpa, @intCast(u32, index)); + }, + .none => {}, } - try gop.entry.value.relocs.append(self.gpa, @intCast(u32, index)); } fn genFuncInst(self: *Self, inst: *ir.Inst) !MCValue { @@ -1258,14 +1279,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.registers.putAssumeCapacityNoClobber(toCanonicalReg(reg), &inst.base); self.markRegUsed(reg); - try self.dbg_info.ensureCapacity(self.dbg_info.items.len + 8 + name_with_null.len); - self.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter); - self.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc - 1, // ULEB128 dwarf expression length - reg.dwarfLocOp(), - }); - try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4 - self.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string + switch (self.debug_output) { + .dwarf => |dbg_out| { + try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 8 + name_with_null.len); + dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter); + dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc + 1, // ULEB128 dwarf expression length + reg.dwarfLocOp(), + }); + try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4 + dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string + }, + .none => {}, + } }, else => {}, } @@ -1302,7 +1328,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.bin_file.cast(link.File.Elf)) |elf_file| { + if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) { switch (arch) { .x86_64 => { for (info.args) |mc_arg, arg_i| { @@ -1341,10 +1367,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (inst.func.cast(ir.Inst.Constant)) |func_inst| { if (func_inst.val.cast(Value.Payload.Function)) |func_val| { const func = func_val.func; - const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes); + const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { + const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes); + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| + @intCast(u32, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes) + else + unreachable; + // ff 14 25 xx xx xx xx call [addr] try self.code.ensureCapacity(self.code.items.len + 7); self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 }); @@ -1362,10 +1395,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (inst.func.cast(ir.Inst.Constant)) |func_inst| { if (func_inst.val.cast(Value.Payload.Function)) |func_val| { const func = func_val.func; - const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes); + const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { + const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes); + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| + coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes + else + unreachable; try self.genSetReg(inst.base.src, .ra, .{ .memory = got_addr }); mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.ra, 0, .ra).toU32()); @@ -1383,8 +1422,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } if (func_inst.val.cast(Value.Payload.Function)) |func_val| { const func = func_val.func; - const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - const got_addr = @intCast(u16, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * 2); + const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { + const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + break :blk @intCast(u16, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * 2); + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| + @intCast(u16, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * 2) + else + unreachable; + const return_type = func.owner_decl.typed_value.most_recent.typed_value.ty.fnReturnType(); // First, push the return address, then jump; if noreturn, don't bother with the first step // TODO: implement packed struct -> u16 at comptime and move the bitcast here @@ -1420,10 +1465,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (inst.func.cast(ir.Inst.Constant)) |func_inst| { if (func_inst.val.cast(Value.Payload.Function)) |func_val| { const func = func_val.func; - const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes); + const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { + const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes); + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| + coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes + else + unreachable; // TODO only works with leaf functions // at the moment, which works fine for @@ -1983,7 +2033,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (mem.eql(u8, inst.asm_source, "syscall")) { try self.code.appendSlice(&[_]u8{ 0x0f, 0x05 }); - } else { + } else if (inst.asm_source.len != 0) { return self.fail(inst.base.src, "TODO implement support for more x86 assembly instructions", .{}); } @@ -2541,6 +2591,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const got = &macho_file.sections.items[macho_file.got_section_index.?]; const got_addr = got.addr + decl.link.macho.offset_table_index.? * ptr_bytes; return MCValue{ .memory = got_addr }; + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const decl = payload.decl; + const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; + return MCValue{ .memory = got_addr }; } else { return self.fail(src, "TODO codegen non-ELF const Decl pointer", .{}); } diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig index 4189d636b1..655f95ecac 100644 --- a/src-self-hosted/link.zig +++ b/src-self-hosted/link.zig @@ -34,7 +34,7 @@ pub const File = struct { pub const LinkBlock = union { elf: Elf.TextBlock, - coff: void, // @TODO + coff: Coff.TextBlock, macho: MachO.TextBlock, c: void, wasm: void, @@ -42,7 +42,7 @@ pub const File = struct { pub const LinkFn = union { elf: Elf.SrcFn, - coff: void, // @TODO + coff: Coff.SrcFn, macho: MachO.SrcFn, c: void, wasm: ?Wasm.FnData, diff --git a/src-self-hosted/link/Coff.zig b/src-self-hosted/link/Coff.zig index b77944b2cf..4d1f95e567 100644 --- a/src-self-hosted/link/Coff.zig +++ b/src-self-hosted/link/Coff.zig @@ -6,10 +6,22 @@ const Allocator = std.mem.Allocator; const assert = std.debug.assert; const fs = std.fs; +const trace = @import("../tracy.zig").trace; const Module = @import("../Module.zig"); -const codegen = @import("../codegen/wasm.zig"); +const codegen = @import("../codegen.zig"); const link = @import("../link.zig"); +const allocation_padding = 4 / 3; +const minimum_text_block_size = 64 * allocation_padding; + +const section_alignment = 4096; +const file_alignment = 512; +const image_base = 0x400_000; +const section_table_size = 2 * 40; +comptime { + std.debug.assert(std.mem.isAligned(image_base, section_alignment)); +} + pub const base_tag: link.File.Tag = .coff; const msdos_stub = @embedFile("msdos-stub.bin"); @@ -18,8 +30,85 @@ base: link.File, ptr_width: enum { p32, p64 }, error_flags: link.File.ErrorFlags = .{}, -coff_file_header_dirty: bool = false, -optional_header_dirty: bool = false, +text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = .{}, +last_text_block: ?*TextBlock = null, + +/// Section table file pointer. +section_table_offset: u32 = 0, +/// Section data file pointer. +section_data_offset: u32 = 0, +/// Optiona header file pointer. +optional_header_offset: u32 = 0, + +/// Absolute virtual address of the offset table when the executable is loaded in memory. +offset_table_virtual_address: u32 = 0, +/// Current size of the offset table on disk, must be a multiple of `file_alignment` +offset_table_size: u32 = 0, +/// Contains absolute virtual addresses +offset_table: std.ArrayListUnmanaged(u64) = .{}, +/// Free list of offset table indices +offset_table_free_list: std.ArrayListUnmanaged(u32) = .{}, + +/// Virtual address of the entry point procedure relative to `image_base` +entry_addr: ?u32 = null, + +/// Absolute virtual address of the text section when the executable is loaded in memory. +text_section_virtual_address: u32 = 0, +/// Current size of the `.text` section on disk, must be a multiple of `file_alignment` +text_section_size: u32 = 0, + +offset_table_size_dirty: bool = false, +text_section_size_dirty: bool = false, +/// This flag is set when the virtual size of the whole image file when loaded in memory has changed +/// and needs to be updated in the optional header. +size_of_image_dirty: bool = false, + +pub const TextBlock = struct { + /// Offset of the code relative to the start of the text section + text_offset: u32, + /// Used size of the text block + size: u32, + /// This field is undefined for symbols with size = 0. + offset_table_index: u32, + /// Points to the previous and next neighbors, based on the `text_offset`. + /// This can be used to find, for example, the capacity of this `TextBlock`. + prev: ?*TextBlock, + next: ?*TextBlock, + + pub const empty = TextBlock{ + .text_offset = 0, + .size = 0, + .offset_table_index = undefined, + .prev = null, + .next = null, + }; + + /// Returns how much room there is to grow in virtual address space. + fn capacity(self: TextBlock) u64 { + if (self.next) |next| { + return next.text_offset - self.text_offset; + } + // This is the last block, the capacity is only limited by the address space. + return std.math.maxInt(u32) - self.text_offset; + } + + fn freeListEligible(self: TextBlock) bool { + // No need to keep a free list node for the last block. + const next = self.next orelse return false; + const cap = next.text_offset - self.text_offset; + const ideal_cap = self.size * allocation_padding; + if (cap <= ideal_cap) return false; + const surplus = cap - ideal_cap; + return surplus >= minimum_text_block_size; + } + + /// Absolute virtual address of the text block when the file is loaded in memory. + fn getVAddr(self: TextBlock, coff: Coff) u32 { + return coff.text_section_virtual_address + self.text_offset; + } +}; + +pub const SrcFn = void; pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File { assert(options.object_format == .coff); @@ -42,7 +131,7 @@ pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, option fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff { switch (options.output_mode) { .Exe => {}, - .Obj => return error.IncrFailed, // @TODO DO OBJ FILES + .Obj => return error.IncrFailed, .Lib => return error.IncrFailed, } var self: Coff = .{ @@ -67,8 +156,10 @@ fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff { /// Truncates the existing file contents and overwrites the contents. /// Returns an error if `file` is not already open with +read +write +seek abilities. fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff { + // TODO Write object specific relocations, COFF symbol table, then enable object file output. switch (options.output_mode) { - .Exe, .Obj => {}, + .Exe => {}, + .Obj => return error.TODOImplementWritingObjFiles, .Lib => return error.TODOImplementWritingLibFiles, } var self: Coff = .{ @@ -83,8 +174,6 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff 64 => .p64, else => return error.UnsupportedCOFFArchitecture, }, - .coff_file_header_dirty = true, - .optional_header_dirty = true, }; errdefer self.deinit(); @@ -97,18 +186,14 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff // COFF file header const data_directory_count = 0; - var hdr_data: [112 + data_directory_count * 8 + 2 * 40]u8 = undefined; + var hdr_data: [112 + data_directory_count * 8 + section_table_size]u8 = undefined; var index: usize = 0; - // @TODO Add an enum(u16) in std.coff, add .toCoffMachine to Arch - const machine_type: u16 = switch (self.base.options.target.cpu.arch) { - .x86_64 => 0x8664, - .i386 => 0x014c, - .riscv32 => 0x5032, - .riscv64 => 0x5064, - else => return error.UnsupportedCOFFArchitecture, - }; - std.mem.writeIntLittle(u16, hdr_data[0..2], machine_type); + const machine = self.base.options.target.cpu.arch.toCoffMachine(); + if (machine == .Unknown) { + return error.UnsupportedCOFFArchitecture; + } + std.mem.writeIntLittle(u16, hdr_data[0..2], @enumToInt(machine)); index += 2; // Number of sections (we only use .got, .text) @@ -125,20 +210,33 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff }, else => 0, }; + + const section_table_offset = coff_file_header_offset + 20 + optional_header_size; + const default_offset_table_size = file_alignment; + const default_size_of_code = 0; + + self.section_data_offset = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, file_alignment); + const section_data_relative_virtual_address = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, section_alignment); + self.offset_table_virtual_address = image_base + section_data_relative_virtual_address; + self.offset_table_size = default_offset_table_size; + self.section_table_offset = section_table_offset; + self.text_section_virtual_address = image_base + section_data_relative_virtual_address + section_alignment; + self.text_section_size = default_size_of_code; + + // Size of file when loaded in memory + const size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + default_size_of_code, section_alignment); + std.mem.writeIntLittle(u16, hdr_data[index..][0..2], optional_header_size); index += 2; - // Characteristics - IMAGE_FILE_DEBUG_STRIPPED - var characteristics: u16 = 0x200; // TODO Remove debug info stripped flag when necessary + // Characteristics + var characteristics: u16 = std.coff.IMAGE_FILE_DEBUG_STRIPPED | std.coff.IMAGE_FILE_RELOCS_STRIPPED; // TODO Remove debug info stripped flag when necessary if (options.output_mode == .Exe) { - // IMAGE_FILE_EXECUTABLE_IMAGE - characteristics |= 0x2; + characteristics |= std.coff.IMAGE_FILE_EXECUTABLE_IMAGE; } switch (self.ptr_width) { - // IMAGE_FILE_32BIT_MACHINE - .p32 => characteristics |= 0x100, - // IMAGE_FILE_LARGE_ADDRESS_AWARE - .p64 => characteristics |= 0x20, + .p32 => characteristics |= std.coff.IMAGE_FILE_32BIT_MACHINE, + .p64 => characteristics |= std.coff.IMAGE_FILE_LARGE_ADDRESS_AWARE, } std.mem.writeIntLittle(u16, hdr_data[index..][0..2], characteristics); index += 2; @@ -147,6 +245,7 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff try self.base.file.?.pwriteAll(hdr_data[0..index], coff_file_header_offset); if (options.output_mode == .Exe) { + self.optional_header_offset = coff_file_header_offset + 20; // Optional header index = 0; std.mem.writeIntLittle(u16, hdr_data[0..2], switch (self.ptr_width) { @@ -155,34 +254,33 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff }); index += 2; - // Linker version (u8 + u8), SizeOfCode (u32), SizeOfInitializedData (u32), SizeOfUninitializedData (u32), AddressOfEntryPoint (u32) - std.mem.set(u8, hdr_data[index..][0..18], 0); - index += 18; + // Linker version (u8 + u8) + std.mem.set(u8, hdr_data[index..][0..2], 0); + index += 2; - // Base of code relative to the image base - // @TODO Check where to put this - std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1000); - index += 4; + // SizeOfCode (UNUSED, u32), SizeOfInitializedData (u32), SizeOfUninitializedData (u32), AddressOfEntryPoint (u32), BaseOfCode (UNUSED, u32) + std.mem.set(u8, hdr_data[index..][0..20], 0); + index += 20; if (self.ptr_width == .p32) { - // Base of data relative to the image base + // Base of data relative to the image base (UNUSED) std.mem.set(u8, hdr_data[index..][0..4], 0); index += 4; // Image base address - std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x400_000); + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], image_base); index += 4; } else { // Image base address - std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x140_000_000); + std.mem.writeIntLittle(u64, hdr_data[index..][0..8], image_base); index += 8; } // Section alignment - std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 4096); + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], section_alignment); index += 4; // File alignment - std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 512); + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], file_alignment); index += 4; // Required OS version, 6.0 is vista std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6); @@ -197,19 +295,25 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff index += 2; std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0); index += 2; - // Reserved zeroes (u32), SizeOfImage (u32), SizeOfHeaders (u32), CheckSum (u32) - std.mem.set(u8, hdr_data[index..][0..16], 0); - index += 16; + // Reserved zeroes (u32) + std.mem.set(u8, hdr_data[index..][0..4], 0); + index += 4; + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], size_of_image); + index += 4; + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset); + index += 4; + // CheckSum (u32) + std.mem.set(u8, hdr_data[index..][0..4], 0); + index += 4; // Subsystem, TODO: Let users specify the subsystem, always CUI for now std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 3); index += 2; - // DLL characteristics, TODO: For now we are just using IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE - std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0x40); + // DLL characteristics + std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0x0); index += 2; switch (self.ptr_width) { .p32 => { - // @TODO See llvm output for 32 bit executables // Size of stack reserve + commit std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000_000); index += 4; @@ -242,84 +346,447 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff // Number of data directories std.mem.writeIntLittle(u32, hdr_data[index..][0..4], data_directory_count); index += 4; - // @TODO Write meaningful stuff here // Initialize data directories to zero - std.mem.set(u8, hdr_data[index..][0..data_directory_count * 8], 0); + std.mem.set(u8, hdr_data[index..][0 .. data_directory_count * 8], 0); index += data_directory_count * 8; assert(index == optional_header_size); } - // @TODO Merge this write with the one above - const section_table_offset = coff_file_header_offset + 20 + optional_header_size; - // Write section table. // First, the .got section hdr_data[index..][0..8].* = ".got\x00\x00\x00\x00".*; index += 8; - // Virtual size (u32) (@TODO Set to initial value in image files, zero otherwise), Virtual address (u32) (@TODO Set to value in image files, zero otherwise), Size of raw data (u32) - std.mem.set(u8, hdr_data[index..][0..12], 0); - index += 12; - // File pointer to the start of the section - std.mem.writeIntLittle(u32, hdr_data[index..][0..4], section_table_offset + 2 * 40); + if (options.output_mode == .Exe) { + // Virtual size (u32) + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size); + index += 4; + // Virtual address (u32) + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.offset_table_virtual_address - image_base); + index += 4; + } else { + std.mem.set(u8, hdr_data[index..][0..8], 0); + index += 8; + } + // Size of raw data (u32) + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size); index += 4; - // Pointer to relocations (u32) (@TODO Initialize this for object files), PointerToLinenumbers (u32), NumberOfRelocations (u16), (@TODO Initialize this for object files), NumberOfLinenumbers (u16) + // File pointer to the start of the section + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset); + index += 4; + // Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16) std.mem.set(u8, hdr_data[index..][0..12], 0); index += 12; - // Characteristics `IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ = 0x40000040` - std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x40000040); + // Section flags + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], std.coff.IMAGE_SCN_CNT_INITIALIZED_DATA | std.coff.IMAGE_SCN_MEM_READ); index += 4; // Then, the .text section hdr_data[index..][0..8].* = ".text\x00\x00\x00".*; index += 8; - // Virtual size (u32) (@TODO Set to initial value in image files, zero otherwise), Virtual address (u32) (@TODO Set to value in image files, zero otherwise), Size of raw data (u32) - std.mem.set(u8, hdr_data[index..][0..12], 0); - index += 12; - // File pointer to the start of the section (@TODO Add the initial size of .got) - std.mem.writeIntLittle(u32, hdr_data[index..][0..4], section_table_offset + 2 * 40); + if (options.output_mode == .Exe) { + // Virtual size (u32) + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code); + index += 4; + // Virtual address (u32) + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.text_section_virtual_address - image_base); + index += 4; + } else { + std.mem.set(u8, hdr_data[index..][0..8], 0); + index += 8; + } + // Size of raw data (u32) + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code); index += 4; - // Pointer to relocations (u32) (@TODO Initialize this for object files), PointerToLinenumbers (u32), NumberOfRelocations (u16), (@TODO Initialize this for object files), NumberOfLinenumbers (u16) + // File pointer to the start of the section + std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset + default_offset_table_size); + index += 4; + // Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16) std.mem.set(u8, hdr_data[index..][0..12], 0); index += 12; - // Characteristics `IMAGE_SCN_CNT_CODE | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE = 0xE0000020` - std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0xE0000020); + // Section flags + std.mem.writeIntLittle( + u32, + hdr_data[index..][0..4], + std.coff.IMAGE_SCN_CNT_CODE | std.coff.IMAGE_SCN_MEM_EXECUTE | std.coff.IMAGE_SCN_MEM_READ | std.coff.IMAGE_SCN_MEM_WRITE, + ); index += 4; - assert(index == optional_header_size + 2 * 40); - try self.base.file.?.pwriteAll(hdr_data[0..index], coff_file_header_offset + 20); + assert(index == optional_header_size + section_table_size); + try self.base.file.?.pwriteAll(hdr_data[0..index], self.optional_header_offset); + try self.base.file.?.setEndPos(self.section_data_offset + default_offset_table_size + default_size_of_code); return self; } -pub fn flush(self: *Coff, module: *Module) !void { - // @TODO Implement this +pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void { + try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1); + + if (self.offset_table_free_list.popOrNull()) |i| { + decl.link.coff.offset_table_index = i; + } else { + decl.link.coff.offset_table_index = @intCast(u32, self.offset_table.items.len); + _ = self.offset_table.addOneAssumeCapacity(); + + const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8; + if (self.offset_table.items.len > self.offset_table_size / entry_size) { + self.offset_table_size_dirty = true; + } + } + + self.offset_table.items[decl.link.coff.offset_table_index] = 0; } -pub fn freeDecl(self: *Coff, decl: *Module.Decl) void { - // @TODO Implement this +fn allocateTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 { + const new_block_min_capacity = new_block_size * allocation_padding; + + // We use these to indicate our intention to update metadata, placing the new block, + // and possibly removing a free list node. + // It would be simpler to do it inside the for loop below, but that would cause a + // problem if an error was returned later in the function. So this action + // is actually carried out at the end of the function, when errors are no longer possible. + var block_placement: ?*TextBlock = null; + var free_list_removal: ?usize = null; + + const vaddr = blk: { + var i: usize = 0; + while (i < self.text_block_free_list.items.len) { + const free_block = self.text_block_free_list.items[i]; + + const next_block_text_offset = free_block.text_offset + free_block.capacity(); + const new_block_text_offset = std.mem.alignForwardGeneric(u64, free_block.getVAddr(self.*) + free_block.size, alignment) - self.text_section_virtual_address; + if (new_block_text_offset < next_block_text_offset and next_block_text_offset - new_block_text_offset >= new_block_min_capacity) { + block_placement = free_block; + + const remaining_capacity = next_block_text_offset - new_block_text_offset - new_block_min_capacity; + if (remaining_capacity < minimum_text_block_size) { + free_list_removal = i; + } + + break :blk new_block_text_offset + self.text_section_virtual_address; + } else { + if (!free_block.freeListEligible()) { + _ = self.text_block_free_list.swapRemove(i); + } else { + i += 1; + } + continue; + } + } else if (self.last_text_block) |last| { + const new_block_vaddr = std.mem.alignForwardGeneric(u64, last.getVAddr(self.*) + last.size, alignment); + block_placement = last; + break :blk new_block_vaddr; + } else { + break :blk self.text_section_virtual_address; + } + }; + + const expand_text_section = block_placement == null or block_placement.?.next == null; + if (expand_text_section) { + const needed_size = @intCast(u32, std.mem.alignForwardGeneric(u64, vaddr + new_block_size - self.text_section_virtual_address, file_alignment)); + if (needed_size > self.text_section_size) { + const current_text_section_virtual_size = std.mem.alignForwardGeneric(u32, self.text_section_size, section_alignment); + const new_text_section_virtual_size = std.mem.alignForwardGeneric(u32, needed_size, section_alignment); + if (current_text_section_virtual_size != new_text_section_virtual_size) { + self.size_of_image_dirty = true; + // Write new virtual size + var buf: [4]u8 = undefined; + std.mem.writeIntLittle(u32, &buf, new_text_section_virtual_size); + try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 8); + } + + self.text_section_size = needed_size; + self.text_section_size_dirty = true; + } + self.last_text_block = text_block; + } + text_block.text_offset = @intCast(u32, vaddr - self.text_section_virtual_address); + text_block.size = @intCast(u32, new_block_size); + + // This function can also reallocate a text block. + // In this case we need to "unplug" it from its previous location before + // plugging it in to its new location. + if (text_block.prev) |prev| { + prev.next = text_block.next; + } + if (text_block.next) |next| { + next.prev = text_block.prev; + } + + if (block_placement) |big_block| { + text_block.prev = big_block; + text_block.next = big_block.next; + big_block.next = text_block; + } else { + text_block.prev = null; + text_block.next = null; + } + if (free_list_removal) |i| { + _ = self.text_block_free_list.swapRemove(i); + } + return vaddr; +} + +fn growTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 { + const block_vaddr = text_block.getVAddr(self.*); + const align_ok = std.mem.alignBackwardGeneric(u64, block_vaddr, alignment) == block_vaddr; + const need_realloc = !align_ok or new_block_size > text_block.capacity(); + if (!need_realloc) return @as(u64, block_vaddr); + return self.allocateTextBlock(text_block, new_block_size, alignment); +} + +fn shrinkTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64) void { + text_block.size = @intCast(u32, new_block_size); + if (text_block.capacity() - text_block.size >= minimum_text_block_size) { + self.text_block_free_list.append(self.base.allocator, text_block) catch {}; + } +} + +fn freeTextBlock(self: *Coff, text_block: *TextBlock) void { + var already_have_free_list_node = false; + { + var i: usize = 0; + // TODO turn text_block_free_list into a hash map + while (i < self.text_block_free_list.items.len) { + if (self.text_block_free_list.items[i] == text_block) { + _ = self.text_block_free_list.swapRemove(i); + continue; + } + if (self.text_block_free_list.items[i] == text_block.prev) { + already_have_free_list_node = true; + } + i += 1; + } + } + if (self.last_text_block == text_block) { + self.last_text_block = text_block.prev; + } + if (text_block.prev) |prev| { + prev.next = text_block.next; + + if (!already_have_free_list_node and prev.freeListEligible()) { + // The free list is heuristics, it doesn't have to be perfect, so we can + // ignore the OOM here. + self.text_block_free_list.append(self.base.allocator, prev) catch {}; + } + } + + if (text_block.next) |next| { + next.prev = text_block.prev; + } +} + +fn writeOffsetTableEntry(self: *Coff, index: usize) !void { + const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8; + const endian = self.base.options.target.cpu.arch.endian(); + + const offset_table_start = self.section_data_offset; + if (self.offset_table_size_dirty) { + const current_raw_size = self.offset_table_size; + const new_raw_size = self.offset_table_size * 2; + log.debug("growing offset table from raw size {} to {}\n", .{ current_raw_size, new_raw_size }); + + // Move the text section to a new place in the executable + const current_text_section_start = self.section_data_offset + current_raw_size; + const new_text_section_start = self.section_data_offset + new_raw_size; + + const amt = try self.base.file.?.copyRangeAll(current_text_section_start, self.base.file.?, new_text_section_start, self.text_section_size); + if (amt != self.text_section_size) return error.InputOutput; + + // Write the new raw size in the .got header + var buf: [8]u8 = undefined; + std.mem.writeIntLittle(u32, buf[0..4], new_raw_size); + try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 16); + // Write the new .text section file offset in the .text section header + std.mem.writeIntLittle(u32, buf[0..4], new_text_section_start); + try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 20); + + const current_virtual_size = std.mem.alignForwardGeneric(u32, self.offset_table_size, section_alignment); + const new_virtual_size = std.mem.alignForwardGeneric(u32, new_raw_size, section_alignment); + // If we had to move in the virtual address space, we need to fix the VAs in the offset table, as well as the virtual address of the `.text` section + // and the virutal size of the `.got` section + + if (new_virtual_size != current_virtual_size) { + log.debug("growing offset table from virtual size {} to {}\n", .{ current_virtual_size, new_virtual_size }); + self.size_of_image_dirty = true; + const va_offset = new_virtual_size - current_virtual_size; + + // Write .got virtual size + std.mem.writeIntLittle(u32, buf[0..4], new_virtual_size); + try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 8); + + // Write .text new virtual address + self.text_section_virtual_address = self.text_section_virtual_address + va_offset; + std.mem.writeIntLittle(u32, buf[0..4], self.text_section_virtual_address - image_base); + try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 12); + + // Fix the VAs in the offset table + for (self.offset_table.items) |*va, idx| { + if (va.* != 0) { + va.* += va_offset; + + switch (entry_size) { + 4 => { + std.mem.writeInt(u32, buf[0..4], @intCast(u32, va.*), endian); + try self.base.file.?.pwriteAll(buf[0..4], offset_table_start + idx * entry_size); + }, + 8 => { + std.mem.writeInt(u64, &buf, va.*, endian); + try self.base.file.?.pwriteAll(&buf, offset_table_start + idx * entry_size); + }, + else => unreachable, + } + } + } + } + self.offset_table_size = new_raw_size; + self.offset_table_size_dirty = false; + } + // Write the new entry + switch (entry_size) { + 4 => { + var buf: [4]u8 = undefined; + std.mem.writeInt(u32, &buf, @intCast(u32, self.offset_table.items[index]), endian); + try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size); + }, + 8 => { + var buf: [8]u8 = undefined; + std.mem.writeInt(u64, &buf, self.offset_table.items[index], endian); + try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size); + }, + else => unreachable, + } } pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { - // @TODO Implement this + // TODO COFF/PE debug information + // TODO Implement exports + const tracy = trace(@src()); + defer tracy.end(); + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + const typed_value = decl.typed_value.most_recent.typed_value; + const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + const required_alignment = typed_value.ty.abiAlignment(self.base.options.target); + const curr_size = decl.link.coff.size; + if (curr_size != 0) { + const capacity = decl.link.coff.capacity(); + const need_realloc = code.len > capacity or + !std.mem.isAlignedGeneric(u32, decl.link.coff.text_offset, required_alignment); + if (need_realloc) { + const curr_vaddr = self.getDeclVAddr(decl); + const vaddr = try self.growTextBlock(&decl.link.coff, code.len, required_alignment); + log.debug("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, curr_vaddr, vaddr }); + if (vaddr != curr_vaddr) { + log.debug(" (writing new offset table entry)\n", .{}); + self.offset_table.items[decl.link.coff.offset_table_index] = vaddr; + try self.writeOffsetTableEntry(decl.link.coff.offset_table_index); + } + } else if (code.len < curr_size) { + self.shrinkTextBlock(&decl.link.coff, code.len); + } + } else { + const vaddr = try self.allocateTextBlock(&decl.link.coff, code.len, required_alignment); + log.debug("allocated text block for {} at 0x{x} (size: {Bi})\n", .{ std.mem.spanZ(decl.name), vaddr, code.len }); + errdefer self.freeTextBlock(&decl.link.coff); + self.offset_table.items[decl.link.coff.offset_table_index] = vaddr; + try self.writeOffsetTableEntry(decl.link.coff.offset_table_index); + } + + // Write the code into the file + try self.base.file.?.pwriteAll(code, self.section_data_offset + self.offset_table_size + decl.link.coff.text_offset); + + // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated. + const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; + return self.updateDeclExports(module, decl, decl_exports); } -pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void { - // @TODO Implement this -} - -pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void { - // @TODO Implement this +pub fn freeDecl(self: *Coff, decl: *Module.Decl) void { + // Appending to free lists is allowed to fail because the free lists are heuristics based anyway. + self.freeTextBlock(&decl.link.coff); + self.offset_table_free_list.append(self.base.allocator, decl.link.coff.offset_table_index) catch {}; } pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl, exports: []const *Module.Export) !void { - // @TODO Implement this + for (exports) |exp| { + if (exp.options.section) |section_name| { + if (!std.mem.eql(u8, section_name, ".text")) { + try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1); + module.failed_exports.putAssumeCapacityNoClobber( + exp, + try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}), + ); + continue; + } + } + if (std.mem.eql(u8, exp.options.name, "_start")) { + self.entry_addr = decl.link.coff.getVAddr(self.*) - image_base; + } else { + try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1); + module.failed_exports.putAssumeCapacityNoClobber( + exp, + try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: Exports other than '_start'", .{}), + ); + continue; + } + } +} + +pub fn flush(self: *Coff, module: *Module) !void { + if (self.text_section_size_dirty) { + // Write the new raw size in the .text header + var buf: [4]u8 = undefined; + std.mem.writeIntLittle(u32, &buf, self.text_section_size); + try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 16); + try self.base.file.?.setEndPos(self.section_data_offset + self.offset_table_size + self.text_section_size); + self.text_section_size_dirty = false; + } + + if (self.base.options.output_mode == .Exe and self.size_of_image_dirty) { + const new_size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + self.text_section_size, section_alignment); + var buf: [4]u8 = undefined; + std.mem.writeIntLittle(u32, &buf, new_size_of_image); + try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 56); + self.size_of_image_dirty = false; + } + + if (self.entry_addr == null and self.base.options.output_mode == .Exe) { + log.debug("flushing. no_entry_point_found = true\n", .{}); + self.error_flags.no_entry_point_found = true; + } else { + log.debug("flushing. no_entry_point_found = false\n", .{}); + self.error_flags.no_entry_point_found = false; + + if (self.base.options.output_mode == .Exe) { + // Write AddressOfEntryPoint + var buf: [4]u8 = undefined; + std.mem.writeIntLittle(u32, &buf, self.entry_addr.?); + try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 16); + } + } } pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 { - // @TODO Implement this - return 0; + return self.text_section_virtual_address + decl.link.coff.text_offset; +} + +pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void { + // TODO Implement this } pub fn deinit(self: *Coff) void { - // @TODO + self.text_block_free_list.deinit(self.base.allocator); + self.offset_table.deinit(self.base.allocator); + self.offset_table_free_list.deinit(self.base.allocator); } diff --git a/src-self-hosted/link/Elf.zig b/src-self-hosted/link/Elf.zig index 451160630a..e5acde947c 100644 --- a/src-self-hosted/link/Elf.zig +++ b/src-self-hosted/link/Elf.zig @@ -1735,7 +1735,13 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { } else { // TODO implement .debug_info for global variables } - const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, &dbg_line_buffer, &dbg_info_buffer, &dbg_info_type_relocs); + const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index 8c5c034238..40195342f2 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -524,17 +524,22 @@ fn buildOutputType( try stderr.print("\nUnable to parse command: {}\n", .{@errorName(err)}); continue; }) |line| { - if (mem.eql(u8, line, "update")) { + const actual_line = if (line[line.len - 1] == '\r') + line[0 .. line.len - 1] + else + line; + + if (mem.eql(u8, actual_line, "update")) { if (output_mode == .Exe) { try module.makeBinFileWritable(); } try updateModule(gpa, &module, zir_out_path); - } else if (mem.eql(u8, line, "exit")) { + } else if (mem.eql(u8, actual_line, "exit")) { break; - } else if (mem.eql(u8, line, "help")) { + } else if (mem.eql(u8, actual_line, "help")) { try stderr.writeAll(repl_help); } else { - try stderr.print("unknown command: {}\n", .{line}); + try stderr.print("unknown command: {}\n", .{actual_line}); } } else { break; From e9807418e7e58f3cb85d5d3a6d114d5084e305bd Mon Sep 17 00:00:00 2001 From: Alexandros Naskos Date: Fri, 4 Sep 2020 05:22:26 +0300 Subject: [PATCH 39/56] Added .pe ObjectFormat MachO linker no longer collects unused dwarf debug information --- lib/std/target.zig | 1 + src-self-hosted/link.zig | 2 +- src-self-hosted/link/MachO.zig | 25 +------------------------ src-self-hosted/main.zig | 9 +++------ 4 files changed, 6 insertions(+), 31 deletions(-) diff --git a/lib/std/target.zig b/lib/std/target.zig index 4fd59b690f..37425a9a29 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -468,6 +468,7 @@ pub const Target = struct { /// TODO Get rid of this one. unknown, coff, + pe, elf, macho, wasm, diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig index 655f95ecac..fff69a6bbd 100644 --- a/src-self-hosted/link.zig +++ b/src-self-hosted/link.zig @@ -68,7 +68,7 @@ pub const File = struct { pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: Options) !*File { switch (options.object_format) { .unknown => unreachable, - .coff => return Coff.openPath(allocator, dir, sub_path, options), + .coff, .pe => return Coff.openPath(allocator, dir, sub_path, options), .elf => return Elf.openPath(allocator, dir, sub_path, options), .macho => return MachO.openPath(allocator, dir, sub_path, options), .wasm => return Wasm.openPath(allocator, dir, sub_path, options), diff --git a/src-self-hosted/link/MachO.zig b/src-self-hosted/link/MachO.zig index 93d7b2381a..27d0488f25 100644 --- a/src-self-hosted/link/MachO.zig +++ b/src-self-hosted/link/MachO.zig @@ -316,31 +316,8 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_line_buffer.deinit(); - - var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_info_buffer.deinit(); - - var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; - defer { - var it = dbg_info_type_relocs.iterator(); - while (it.next()) |entry| { - entry.value.relocs.deinit(self.base.allocator); - } - dbg_info_type_relocs.deinit(self.base.allocator); - } - const typed_value = decl.typed_value.most_recent.typed_value; - const res = try codegen.generateSymbol( - &self.base, - decl.src(), - typed_value, - &code_buffer, - &dbg_line_buffer, - &dbg_info_buffer, - &dbg_info_type_relocs, - ); + const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none); const code = switch (res) { .externally_managed => |x| x, diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index 40195342f2..5af4460ade 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -153,8 +153,8 @@ const usage_build_generic = \\ elf Executable and Linking Format \\ c Compile to C source code \\ wasm WebAssembly + \\ pe Portable Executable (Windows) \\ coff (planned) Common Object File Format (Windows) - \\ pe (planned) Portable Executable (Windows) \\ macho (planned) macOS relocatables \\ hex (planned) Intel IHEX \\ raw (planned) Dump machine code directly @@ -451,7 +451,7 @@ fn buildOutputType( } else if (mem.eql(u8, ofmt, "coff")) { break :blk .coff; } else if (mem.eql(u8, ofmt, "pe")) { - break :blk .coff; + break :blk .pe; } else if (mem.eql(u8, ofmt, "macho")) { break :blk .macho; } else if (mem.eql(u8, ofmt, "wasm")) { @@ -524,10 +524,7 @@ fn buildOutputType( try stderr.print("\nUnable to parse command: {}\n", .{@errorName(err)}); continue; }) |line| { - const actual_line = if (line[line.len - 1] == '\r') - line[0 .. line.len - 1] - else - line; + const actual_line = mem.trimRight(u8, line, "\r\n "); if (mem.eql(u8, actual_line, "update")) { if (output_mode == .Exe) { From 90743881cf10c4f90da4a8c187997e9eab4d17d5 Mon Sep 17 00:00:00 2001 From: LemonBoy Date: Fri, 4 Sep 2020 09:28:43 +0200 Subject: [PATCH 40/56] std: Minor changes to the fs module * Add a size_hint parameter to the read{toEnd,File}AllocOptions fns * Rename readAllAlloc{,Options} to readToEndAlloc{,Options} as they don't rewind the file before reading * Fix missing rewind in test case --- lib/std/fs.zig | 9 +++++++-- lib/std/fs/file.zig | 16 ++++++++++------ lib/std/fs/test.zig | 9 +++++---- src-self-hosted/Module.zig | 2 ++ src-self-hosted/main.zig | 8 +++++++- 5 files changed, 31 insertions(+), 13 deletions(-) diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 9a44660570..4005f90fcb 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -1437,24 +1437,29 @@ pub const Dir = struct { /// On success, caller owns returned buffer. /// If the file is larger than `max_bytes`, returns `error.FileTooBig`. pub fn readFileAlloc(self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 { - return self.readFileAllocOptions(allocator, file_path, max_bytes, @alignOf(u8), null); + return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null); } /// On success, caller owns returned buffer. /// If the file is larger than `max_bytes`, returns `error.FileTooBig`. + /// If `size_hint` is specified the initial buffer size is calculated using + /// that value, otherwise the effective file size is used instead. /// Allows specifying alignment and a sentinel value. pub fn readFileAllocOptions( self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize, + size_hint: ?usize, comptime alignment: u29, comptime optional_sentinel: ?u8, ) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) { var file = try self.openFile(file_path, .{}); defer file.close(); - return file.readAllAllocOptions(allocator, max_bytes, alignment, optional_sentinel); + const stat_size = size_hint orelse try file.getEndPos(); + + return file.readToEndAllocOptions(allocator, max_bytes, stat_size, alignment, optional_sentinel); } pub const DeleteTreeError = error{ diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig index c34e5f9437..ef1b501ec3 100644 --- a/lib/std/fs/file.zig +++ b/lib/std/fs/file.zig @@ -363,25 +363,29 @@ pub const File = struct { try os.futimens(self.handle, ×); } + /// Reads all the bytes from the current position to the end of the file. /// On success, caller owns returned buffer. /// If the file is larger than `max_bytes`, returns `error.FileTooBig`. - pub fn readAllAlloc(self: File, allocator: *mem.Allocator, max_bytes: usize) ![]u8 { - return self.readAllAllocOptions(allocator, max_bytes, @alignOf(u8), null); + pub fn readToEndAlloc(self: File, allocator: *mem.Allocator, max_bytes: usize) ![]u8 { + return self.readToEndAllocOptions(allocator, max_bytes, null, @alignOf(u8), null); } + /// Reads all the bytes from the current position to the end of the file. /// On success, caller owns returned buffer. /// If the file is larger than `max_bytes`, returns `error.FileTooBig`. + /// If `size_hint` is specified the initial buffer size is calculated using + /// that value, otherwise an arbitrary value is used instead. /// Allows specifying alignment and a sentinel value. - pub fn readAllAllocOptions( + pub fn readToEndAllocOptions( self: File, allocator: *mem.Allocator, max_bytes: usize, + size_hint: ?usize, comptime alignment: u29, comptime optional_sentinel: ?u8, ) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) { - const stat_size = try self.getEndPos(); - const size = math.cast(usize, stat_size) catch math.maxInt(usize); - if (size > max_bytes) return error.FileTooBig; + // If no size hint is provided fall back to the size=0 code path + const size = size_hint orelse 0; // The file size returned by stat is used as hint to set the buffer // size. If the reported size is zero, as it happens on Linux for files diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index c567602dd7..a59bc46245 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -188,7 +188,7 @@ test "readAllAlloc" { var file = try tmp_dir.dir.createFile("test_file", .{ .read = true }); defer file.close(); - const buf1 = try file.readAllAlloc(testing.allocator, 1024); + const buf1 = try file.readToEndAlloc(testing.allocator, 1024); defer testing.allocator.free(buf1); testing.expect(buf1.len == 0); @@ -197,20 +197,21 @@ test "readAllAlloc" { try file.seekTo(0); // max_bytes > file_size - const buf2 = try file.readAllAlloc(testing.allocator, 1024); + const buf2 = try file.readToEndAlloc(testing.allocator, 1024); defer testing.allocator.free(buf2); testing.expectEqual(write_buf.len, buf2.len); testing.expect(std.mem.eql(u8, write_buf, buf2)); try file.seekTo(0); // max_bytes == file_size - const buf3 = try file.readAllAlloc(testing.allocator, write_buf.len); + const buf3 = try file.readToEndAlloc(testing.allocator, write_buf.len); defer testing.allocator.free(buf3); testing.expectEqual(write_buf.len, buf3.len); testing.expect(std.mem.eql(u8, write_buf, buf3)); + try file.seekTo(0); // max_bytes < file_size - testing.expectError(error.FileTooBig, file.readAllAlloc(testing.allocator, write_buf.len - 1)); + testing.expectError(error.FileTooBig, file.readToEndAlloc(testing.allocator, write_buf.len - 1)); } test "directory operations on files" { diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig index c476c307d2..5cc0b3f892 100644 --- a/src-self-hosted/Module.zig +++ b/src-self-hosted/Module.zig @@ -595,6 +595,7 @@ pub const Scope = struct { module.gpa, self.sub_file_path, std.math.maxInt(u32), + null, 1, 0, ); @@ -697,6 +698,7 @@ pub const Scope = struct { module.gpa, self.sub_file_path, std.math.maxInt(u32), + null, 1, 0, ); diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index b6ccc8a218..6c56ef885b 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -806,7 +806,13 @@ fn fmtPathFile( if (stat.kind == .Directory) return error.IsDir; - const source_code = source_file.readAllAlloc(fmt.gpa, max_src_size) catch |err| switch (err) { + const source_code = source_file.readToEndAllocOptions( + fmt.gpa, + max_src_size, + stat.size, + @alignOf(u8), + null, + ) catch |err| switch (err) { error.ConnectionResetByPeer => unreachable, error.ConnectionTimedOut => unreachable, error.NotOpenForReading => unreachable, From 3c8e1bc25b6d122ce1295e2e33bb9f54ae801ec0 Mon Sep 17 00:00:00 2001 From: LemonBoy Date: Fri, 4 Sep 2020 12:48:36 +0200 Subject: [PATCH 41/56] std: Fix for 32bit systems --- lib/std/fs.zig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 4005f90fcb..a217fb3e9b 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -1457,7 +1457,10 @@ pub const Dir = struct { var file = try self.openFile(file_path, .{}); defer file.close(); - const stat_size = size_hint orelse try file.getEndPos(); + // If the file size doesn't fit a usize it'll be certainly greater than + // `max_bytes` + const stat_size = size_hint orelse math.cast(usize, try file.getEndPos()) catch + return error.FileTooBig; return file.readToEndAllocOptions(allocator, max_bytes, stat_size, alignment, optional_sentinel); } From 6b2f4fd20d3c85e5db592f76dea8e56da54e9211 Mon Sep 17 00:00:00 2001 From: Vexu Date: Fri, 4 Sep 2020 21:41:34 +0300 Subject: [PATCH 42/56] langref: atomic ops are allowed on pointers Closes #6217 --- doc/langref.html.in | 10 +++++----- src/ir.cpp | 4 ++-- test/compile_errors.zig | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index b9b8f71c7a..468a77e478 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -6889,7 +6889,7 @@ fn func(y: *i32) void { This builtin function atomically dereferences a pointer and returns the value.

- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float, + {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float, an integer or an enum.

{#header_close#} @@ -6899,7 +6899,7 @@ fn func(y: *i32) void { This builtin function atomically modifies memory and then returns the previous value.

- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float, + {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float, an integer or an enum.

@@ -6925,7 +6925,7 @@ fn func(y: *i32) void { This builtin function atomically stores a value.

- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float, + {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float, an integer or an enum.

{#header_close#} @@ -7208,7 +7208,7 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v more efficiently in machine instructions.

- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float, + {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float, an integer or an enum.

{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}

@@ -7237,7 +7237,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val However if you need a stronger guarantee, use {#link|@cmpxchgStrong#}.

- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float, + {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float, an integer or an enum.

{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}

diff --git a/src/ir.cpp b/src/ir.cpp index 9d5814ab6f..efc9cb6707 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -26752,7 +26752,7 @@ static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxch if (operand_type->id == ZigTypeIdFloat) { ir_add_error(ira, &instruction->type_value->child->base, - buf_sprintf("expected integer, enum or pointer type, found '%s'", buf_ptr(&operand_type->name))); + buf_sprintf("expected bool, integer, enum or pointer type, found '%s'", buf_ptr(&operand_type->name))); return ira->codegen->invalid_inst_gen; } @@ -30407,7 +30407,7 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op) { return ira->codegen->builtin_types.entry_invalid; if (operand_ptr_type == nullptr) { ir_add_error(ira, &op->base, - buf_sprintf("expected integer, float, enum or pointer type, found '%s'", + buf_sprintf("expected bool, integer, float, enum or pointer type, found '%s'", buf_ptr(&operand_type->name))); return ira->codegen->builtin_types.entry_invalid; } diff --git a/test/compile_errors.zig b/test/compile_errors.zig index f6e00e1dbb..e4a00e6421 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -899,7 +899,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ _ = @cmpxchgWeak(f32, &x, 1, 2, .SeqCst, .SeqCst); \\} , &[_][]const u8{ - "tmp.zig:3:22: error: expected integer, enum or pointer type, found 'f32'", + "tmp.zig:3:22: error: expected bool, integer, enum or pointer type, found 'f32'", }); cases.add("atomicrmw with float op not .Xchg, .Add or .Sub", From 0c43b6ef5c26f927b62eeaf3b365c88b07bf8d1a Mon Sep 17 00:00:00 2001 From: yettinmoor <51028130+yettinmoor@users.noreply.github.com> Date: Fri, 4 Sep 2020 21:11:09 +0200 Subject: [PATCH 43/56] document how to escape curly braces in fmt.format --- lib/std/fmt.zig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 3067a55759..112377b887 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -82,6 +82,8 @@ fn peekIsAlign(comptime fmt: []const u8) bool { /// This allows user types to be formatted in a logical manner instead of dumping all fields of the type. /// /// A user type may be a `struct`, `vector`, `union` or `enum` type. +/// +/// To print literal curly braces, escape them by writing them twice, e.g. `{{` or `}}`. pub fn format( writer: anytype, comptime fmt: []const u8, From 09c861b829480be525a787e54117c108705256e6 Mon Sep 17 00:00:00 2001 From: Vexu Date: Fri, 4 Sep 2020 22:49:14 +0300 Subject: [PATCH 44/56] update rest of tests --- doc/langref.html.in | 20 ++++++++++---------- lib/std/pdb.zig | 2 +- lib/std/start.zig | 2 +- test/compile_errors.zig | 23 +++++++---------------- 4 files changed, 19 insertions(+), 28 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index b9b8f71c7a..b01b543a40 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -2156,7 +2156,7 @@ test "pointer casting" { test "pointer child type" { // pointer types have a `child` field which tells you the type they point to. - assert((*u32).Child == u32); + assert(@typeInfo(*u32).Pointer.child == u32); } {#code_end#} {#header_open|Alignment#} @@ -2184,7 +2184,7 @@ test "variable alignment" { assert(@TypeOf(&x) == *i32); assert(*i32 == *align(align_of_i32) i32); if (std.Target.current.cpu.arch == .x86_64) { - assert((*i32).alignment == 4); + assert(@typeInfo(*i32).Pointer.alignment == 4); } } {#code_end#} @@ -2202,7 +2202,7 @@ const assert = @import("std").debug.assert; var foo: u8 align(4) = 100; test "global variable alignment" { - assert(@TypeOf(&foo).alignment == 4); + assert(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4); assert(@TypeOf(&foo) == *align(4) u8); const as_pointer_to_array: *[1]u8 = &foo; const as_slice: []u8 = as_pointer_to_array; @@ -4310,8 +4310,8 @@ test "fn type inference" { const assert = @import("std").debug.assert; test "fn reflection" { - assert(@TypeOf(assert).ReturnType == void); - assert(@TypeOf(assert).is_var_args == false); + assert(@typeInfo(@TypeOf(assert)).Fn.return_type.? == void); + assert(@typeInfo(@TypeOf(assert)).Fn.is_var_args == false); } {#code_end#} {#header_close#} @@ -4611,10 +4611,10 @@ test "error union" { foo = error.SomeError; // Use compile-time reflection to access the payload type of an error union: - comptime assert(@TypeOf(foo).Payload == i32); + comptime assert(@typeInfo(@TypeOf(foo)).ErrorUnion.payload == i32); // Use compile-time reflection to access the error set type of an error union: - comptime assert(@TypeOf(foo).ErrorSet == anyerror); + comptime assert(@typeInfo(@TypeOf(foo)).ErrorUnion.error_set == anyerror); } {#code_end#} {#header_open|Merging Error Sets#} @@ -4991,7 +4991,7 @@ test "optional type" { foo = 1234; // Use compile-time reflection to access the child type of the optional: - comptime assert(@TypeOf(foo).Child == i32); + comptime assert(@typeInfo(@TypeOf(foo)).Optional.child == i32); } {#code_end#} {#header_close#} @@ -7211,7 +7211,7 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float, an integer or an enum.

-

{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}

+

{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}

{#see_also|Compile Variables|cmpxchgWeak#} {#header_close#} {#header_open|@cmpxchgWeak#} @@ -7240,7 +7240,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float, an integer or an enum.

-

{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}

+

{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}

{#see_also|Compile Variables|cmpxchgStrong#} {#header_close#} diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig index e8c61f859d..91e22307d8 100644 --- a/lib/std/pdb.zig +++ b/lib/std/pdb.zig @@ -636,7 +636,7 @@ const MsfStream = struct { blocks: []u32 = undefined, block_size: u32 = undefined, - pub const Error = @TypeOf(read).ReturnType.ErrorSet; + pub const Error = @typeInfo(@typeInfo(@TypeOf(read)).Fn.return_type.?).ErrorUnion.error_set; fn init(block_size: u32, file: File, blocks: []u32) MsfStream { const stream = MsfStream{ diff --git a/lib/std/start.zig b/lib/std/start.zig index e04b2a3320..3eb02ba65e 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -67,7 +67,7 @@ fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv uefi.handle = handle; uefi.system_table = system_table; - switch (@TypeOf(root.main).ReturnType) { + switch (@typeInfo(@TypeOf(read)).Fn.return_type.?) { noreturn => { root.main(); }, diff --git a/test/compile_errors.zig b/test/compile_errors.zig index f6e00e1dbb..d6a0b34911 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -176,11 +176,11 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { , &[_][]const u8{ "tmp.zig:2:17: error: expected type 'u32', found 'error{Ohno}'", "tmp.zig:1:17: note: function cannot return an error", - "tmp.zig:8:5: error: expected type 'void', found '@TypeOf(bar).ReturnType.ErrorSet'", + "tmp.zig:8:5: error: expected type 'void', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set'", "tmp.zig:7:17: note: function cannot return an error", - "tmp.zig:11:15: error: expected type 'u32', found '@TypeOf(bar).ReturnType.ErrorSet!u32'", + "tmp.zig:11:15: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set!u32'", "tmp.zig:10:17: note: function cannot return an error", - "tmp.zig:15:14: error: expected type 'u32', found '@TypeOf(bar).ReturnType.ErrorSet!u32'", + "tmp.zig:15:14: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set!u32'", "tmp.zig:14:5: note: cannot store an error in type 'u32'", }); @@ -1224,7 +1224,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ }; \\} , &[_][]const u8{ - "tmp.zig:11:25: error: expected type 'u32', found '@TypeOf(get_uval).ReturnType.ErrorSet!u32'", + "tmp.zig:11:25: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(get_uval)).Fn.return_type.?).ErrorUnion.error_set!u32'", }); cases.add("assigning to struct or union fields that are not optionals with a function that returns an optional", @@ -1929,7 +1929,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ const info = @TypeOf(slice).unknown; \\} , &[_][]const u8{ - "tmp.zig:3:32: error: type '[]i32' does not support field access", + "tmp.zig:3:32: error: type 'type' does not support field access", }); cases.add("peer cast then implicit cast const pointer to mutable C pointer", @@ -3542,7 +3542,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ } \\} , &[_][]const u8{ - "tmp.zig:5:14: error: duplicate switch value: '@TypeOf(foo).ReturnType.ErrorSet.Foo'", + "tmp.zig:5:14: error: duplicate switch value: '@typeInfo(@typeInfo(@TypeOf(foo)).Fn.return_type.?).ErrorUnion.error_set.Foo'", "tmp.zig:3:14: note: other value is here", }); @@ -3674,7 +3674,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ try foo(); \\} , &[_][]const u8{ - "tmp.zig:5:5: error: cannot resolve inferred error set '@TypeOf(foo).ReturnType.ErrorSet': function 'foo' not fully analyzed yet", + "tmp.zig:5:5: error: cannot resolve inferred error set '@typeInfo(@typeInfo(@TypeOf(foo)).Fn.return_type.?).ErrorUnion.error_set': function 'foo' not fully analyzed yet", }); cases.add("implicit cast of error set not a subset", @@ -7206,15 +7206,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { "tmp.zig:7:24: error: accessing union field 'Bar' while field 'Baz' is set", }); - cases.add("getting return type of generic function", - \\fn generic(a: anytype) void {} - \\comptime { - \\ _ = @TypeOf(generic).ReturnType; - \\} - , &[_][]const u8{ - "tmp.zig:3:25: error: ReturnType has not been resolved because 'fn(anytype) anytype' is generic", - }); - cases.add("unsupported modifier at start of asm output constraint", \\export fn foo() void { \\ var bar: u32 = 3; From 209a3da4f73ab6dd4182649af8fb7439e9145441 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 4 Sep 2020 13:00:10 -0700 Subject: [PATCH 45/56] provide default implementation of std.log on freestanding closes #6252 --- lib/std/log.zig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/std/log.zig b/lib/std/log.zig index 50bdfdc068..7b677f698a 100644 --- a/lib/std/log.zig +++ b/lib/std/log.zig @@ -127,6 +127,10 @@ fn log( if (@enumToInt(message_level) <= @enumToInt(level)) { if (@hasDecl(root, "log")) { root.log(message_level, scope, format, args); + } else if (std.Target.current.os.tag == .freestanding) { + // On freestanding one must provide a log function; we do not have + // any I/O configured. + return; } else if (builtin.mode != .ReleaseSmall) { const held = std.debug.getStderrMutex().acquire(); defer held.release(); From a8a806e925ddb2362e0021bd83dd73f7389a2dbb Mon Sep 17 00:00:00 2001 From: Daniel Ludwig Date: Thu, 3 Sep 2020 08:51:10 +0200 Subject: [PATCH 46/56] std.ChildProcess: use "\Device\Null" on Windows --- lib/std/child_process.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index ed6a3a739e..9219b05088 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -485,8 +485,8 @@ pub const ChildProcess = struct { const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore); const nul_handle = if (any_ignore) - windows.OpenFile(&[_]u16{ 'N', 'U', 'L' }, .{ - .dir = std.fs.cwd().fd, + // "\Device\Null" or "\??\NUL" + windows.OpenFile(&[_]u16{ '\\', 'D', 'e', 'v', 'i', 'c', 'e', '\\', 'N', 'u', 'l', 'l' }, .{ .access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE, .share_access = windows.FILE_SHARE_READ, .creation = windows.OPEN_EXISTING, From 295f09eadcb39b79e46676ff0ea503103439aae2 Mon Sep 17 00:00:00 2001 From: Matt Knight Date: Sun, 6 Sep 2020 16:12:27 -0700 Subject: [PATCH 47/56] implemented and testing op codes for instructions documented in the unofficial bpf insn reference --- lib/std/os/linux/bpf.zig | 336 +++++++++++++++++++++++++++++++++------ 1 file changed, 290 insertions(+), 46 deletions(-) diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig index 928c157c42..e5227b3847 100644 --- a/lib/std/os/linux/bpf.zig +++ b/lib/std/os/linux/bpf.zig @@ -5,6 +5,7 @@ // and substantial portions of the software. usingnamespace std.os; const std = @import("../../std.zig"); +const builtin = @import("builtin"); const expectEqual = std.testing.expectEqual; // instruction classes @@ -328,6 +329,8 @@ pub const Helper = enum(i32) { _, }; +// TODO: determine that this is the expected bit layout for both little and big +// endian systems /// a single BPF instruction pub const Insn = packed struct { code: u8, @@ -340,19 +343,30 @@ pub const Insn = packed struct { /// frame pub const Reg = packed enum(u4) { r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10 }; const Source = packed enum(u1) { reg, imm }; + + const Mode = packed enum(u8) { + imm = IMM, + abs = ABS, + ind = IND, + mem = MEM, + len = LEN, + msh = MSH, + }; + const AluOp = packed enum(u8) { add = ADD, sub = SUB, mul = MUL, div = DIV, - op_or = OR, - op_and = AND, + alu_or = OR, + alu_and = AND, lsh = LSH, rsh = RSH, neg = NEG, mod = MOD, xor = XOR, mov = MOV, + arsh = ARSH, }; pub const Size = packed enum(u8) { @@ -368,6 +382,13 @@ pub const Insn = packed struct { jgt = JGT, jge = JGE, jset = JSET, + jlt = JLT, + jle = JLE, + jne = JNE, + jsgt = JSGT, + jsge = JSGE, + jslt = JSLT, + jsle = JSLE, }; const ImmOrReg = union(Source) { @@ -419,14 +440,102 @@ pub const Insn = packed struct { return alu(64, .add, dst, src); } + pub fn sub(dst: Reg, src: anytype) Insn { + return alu(64, .sub, dst, src); + } + + pub fn mul(dst: Reg, src: anytype) Insn { + return alu(64, .mul, dst, src); + } + + pub fn div(dst: Reg, src: anytype) Insn { + return alu(64, .div, dst, src); + } + + pub fn alu_or(dst: Reg, src: anytype) Insn { + return alu(64, .alu_or, dst, src); + } + + pub fn alu_and(dst: Reg, src: anytype) Insn { + return alu(64, .alu_and, dst, src); + } + + pub fn lsh(dst: Reg, src: anytype) Insn { + return alu(64, .lsh, dst, src); + } + + pub fn rsh(dst: Reg, src: anytype) Insn { + return alu(64, .rsh, dst, src); + } + + pub fn neg(dst: Reg) Insn { + return alu(64, .neg, dst, 0); + } + + pub fn mod(dst: Reg, src: anytype) Insn { + return alu(64, .mod, dst, src); + } + + pub fn xor(dst: Reg, src: anytype) Insn { + return alu(64, .xor, dst, src); + } + + pub fn arsh(dst: Reg, src: anytype) Insn { + return alu(64, .arsh, dst, src); + } + fn jmp(op: JmpOp, dst: Reg, src: anytype, off: i16) Insn { return imm_reg(JMP | @enumToInt(op), dst, src, off); } + pub fn ja(off: i16) Insn { + return jmp(.ja, .r0, 0, off); + } + pub fn jeq(dst: Reg, src: anytype, off: i16) Insn { return jmp(.jeq, dst, src, off); } + pub fn jgt(dst: Reg, src: anytype, off: i16) Insn { + return jmp(.jgt, dst, src, off); + } + + pub fn jge(dst: Reg, src: anytype, off: i16) Insn { + return jmp(.jge, dst, src, off); + } + + pub fn jlt(dst: Reg, src: anytype, off: i16) Insn { + return jmp(.jlt, dst, src, off); + } + + pub fn jle(dst: Reg, src: anytype, off: i16) Insn { + return jmp(.jle, dst, src, off); + } + + pub fn jset(dst: Reg, src: anytype, off: i16) Insn { + return jmp(.jset, dst, src, off); + } + + pub fn jne(dst: Reg, src: anytype, off: i16) Insn { + return jmp(.jne, dst, src, off); + } + + pub fn jsgt(dst: Reg, src: anytype, off: i16) Insn { + return jmp(.jsgt, dst, src, off); + } + + pub fn jsge(dst: Reg, src: anytype, off: i16) Insn { + return jmp(.jsge, dst, src, off); + } + + pub fn jslt(dst: Reg, src: anytype, off: i16) Insn { + return jmp(.jslt, dst, src, off); + } + + pub fn jsle(dst: Reg, src: anytype, off: i16) Insn { + return jmp(.jsle, dst, src, off); + } + pub fn stx_mem(size: Size, dst: Reg, src: Reg, off: i16) Insn { return Insn{ .code = STX | @enumToInt(size) | MEM, @@ -447,17 +556,34 @@ pub const Insn = packed struct { }; } - /// direct packet access, R0 = *(uint *)(skb->data + imm32) - pub fn ld_abs(size: Size, imm: i32) Insn { + fn ld(mode: Mode, size: Size, dst: Reg, src: Reg, imm: i32) Insn { return Insn{ - .code = LD | @enumToInt(size) | ABS, - .dst = 0, - .src = 0, + .code = @enumToInt(mode) | @enumToInt(size) | LD, + .dst = @enumToInt(dst), + .src = @enumToInt(src), .off = 0, .imm = imm, }; } + pub fn ld_abs(size: Size, dst: Reg, src: Reg, imm: i32) Insn { + return ld(.abs, size, dst, src, imm); + } + + pub fn ld_ind(size: Size, dst: Reg, src: Reg, imm: i32) Insn { + return ld(.ind, size, dst, src, imm); + } + + pub fn ldx(size: Size, dst: Reg, src: Reg, off: i16) Insn { + return Insn{ + .code = MEM | @enumToInt(size) | LDX, + .dst = @enumToInt(dst), + .src = @enumToInt(src), + .off = off, + .imm = 0, + }; + } + fn ld_imm_impl1(dst: Reg, src: Reg, imm: u64) Insn { return Insn{ .code = LD | DW | IMM, @@ -478,6 +604,14 @@ pub const Insn = packed struct { }; } + pub fn ld_dw1(dst: Reg, imm: u64) Insn { + return ld_imm_impl1(dst, .r0, imm); + } + + pub fn ld_dw2(imm: u64) Insn { + return ld_imm_impl2(imm); + } + pub fn ld_map_fd1(dst: Reg, map_fd: fd_t) Insn { return ld_imm_impl1(dst, @intToEnum(Reg, PSEUDO_MAP_FD), @intCast(u64, map_fd)); } @@ -486,6 +620,53 @@ pub const Insn = packed struct { return ld_imm_impl2(@intCast(u64, map_fd)); } + pub fn st(comptime size: Size, dst: Reg, off: i16, imm: i32) Insn { + if (size == .double_word) @compileError("TODO: implement st_dw"); + return Insn{ + .code = MEM | @enumToInt(size) | ST, + .dst = @enumToInt(dst), + .src = 0, + .off = off, + .imm = imm, + }; + } + + pub fn stx(size: Size, dst: Reg, off: i16, src: Reg) Insn { + return Insn{ + .code = MEM | @enumToInt(size) | STX, + .dst = @enumToInt(dst), + .src = @enumToInt(src), + .off = off, + .imm = 0, + }; + } + + fn endian_swap(endian: builtin.Endian, comptime size: Size, dst: Reg) Insn { + return Insn{ + .code = switch (endian) { + .Big => 0xdc, + .Little => 0xd4, + }, + .dst = @enumToInt(dst), + .src = 0, + .off = 0, + .imm = switch (size) { + .byte => @compileError("can't swap a single byte"), + .half_word => 16, + .word => 32, + .double_word => 64, + }, + }; + } + + pub fn le(comptime size: Size, dst: Reg) Insn { + return endian_swap(.Little, size, dst); + } + + pub fn be(comptime size: Size, dst: Reg) Insn { + return endian_swap(.Big, size, dst); + } + pub fn call(helper: Helper) Insn { return Insn{ .code = JMP | CALL, @@ -508,59 +689,122 @@ pub const Insn = packed struct { } }; -fn expect_insn(insn: Insn, val: u64) void { - expectEqual(@bitCast(u64, insn), val); -} - test "insn bitsize" { expectEqual(@bitSizeOf(Insn), 64); } -// mov instructions -test "mov imm" { - expect_insn(Insn.mov(.r1, 1), 0x00000001000001b7); +fn expect_opcode(code: u8, insn: Insn) void { + expectEqual(code, insn.code); } -test "mov reg" { - expect_insn(Insn.mov(.r6, .r1), 0x00000000000016bf); -} +// The opcodes were grabbed from https://github.com/iovisor/bpf-docs/blob/master/eBPF.md +test "opcodes" { + // instructions that have a name that end with 1 or 2 are consecutive for + // loading 64-bit immediates (imm is only 32 bits wide) -// alu instructions -test "add imm" { - expect_insn(Insn.add(.r2, -4), 0xfffffffc00000207); -} + // alu instructions + expect_opcode(0x07, Insn.add(.r1, 0)); + expect_opcode(0x0f, Insn.add(.r1, .r2)); + expect_opcode(0x17, Insn.sub(.r1, 0)); + expect_opcode(0x1f, Insn.sub(.r1, .r2)); + expect_opcode(0x27, Insn.mul(.r1, 0)); + expect_opcode(0x2f, Insn.mul(.r1, .r2)); + expect_opcode(0x37, Insn.div(.r1, 0)); + expect_opcode(0x3f, Insn.div(.r1, .r2)); + expect_opcode(0x47, Insn.alu_or(.r1, 0)); + expect_opcode(0x4f, Insn.alu_or(.r1, .r2)); + expect_opcode(0x57, Insn.alu_and(.r1, 0)); + expect_opcode(0x5f, Insn.alu_and(.r1, .r2)); + expect_opcode(0x67, Insn.lsh(.r1, 0)); + expect_opcode(0x6f, Insn.lsh(.r1, .r2)); + expect_opcode(0x77, Insn.rsh(.r1, 0)); + expect_opcode(0x7f, Insn.rsh(.r1, .r2)); + expect_opcode(0x87, Insn.neg(.r1)); + expect_opcode(0x97, Insn.mod(.r1, 0)); + expect_opcode(0x9f, Insn.mod(.r1, .r2)); + expect_opcode(0xa7, Insn.xor(.r1, 0)); + expect_opcode(0xaf, Insn.xor(.r1, .r2)); + expect_opcode(0xb7, Insn.mov(.r1, 0)); + expect_opcode(0xbf, Insn.mov(.r1, .r2)); + expect_opcode(0xc7, Insn.arsh(.r1, 0)); + expect_opcode(0xcf, Insn.arsh(.r1, .r2)); -// ld instructions -test "ld_abs" { - expect_insn(Insn.ld_abs(.byte, 42), 0x0000002a00000030); -} + // atomic instructions: might be more of these not documented in the wild + expect_opcode(0xdb, Insn.xadd(.r1, .r2)); -test "ld_map_fd" { - expect_insn(Insn.ld_map_fd1(.r1, 42), 0x0000002a00001118); - expect_insn(Insn.ld_map_fd2(42), 0x0000000000000000); -} + // TODO: byteswap instructions + expect_opcode(0xd4, Insn.le(.half_word, .r1)); + expectEqual(@intCast(i32, 16), Insn.le(.half_word, .r1).imm); + expect_opcode(0xd4, Insn.le(.word, .r1)); + expectEqual(@intCast(i32, 32), Insn.le(.word, .r1).imm); + expect_opcode(0xd4, Insn.le(.double_word, .r1)); + expectEqual(@intCast(i32, 64), Insn.le(.double_word, .r1).imm); + expect_opcode(0xdc, Insn.be(.half_word, .r1)); + expectEqual(@intCast(i32, 16), Insn.be(.half_word, .r1).imm); + expect_opcode(0xdc, Insn.be(.word, .r1)); + expectEqual(@intCast(i32, 32), Insn.be(.word, .r1).imm); + expect_opcode(0xdc, Insn.be(.double_word, .r1)); + expectEqual(@intCast(i32, 64), Insn.be(.double_word, .r1).imm); -// st instructions -test "stx_mem" { - expect_insn(Insn.stx_mem(.word, .r10, .r0, -4), 0x00000000fffc0a63); -} + // memory instructions + expect_opcode(0x18, Insn.ld_dw1(.r1, 0)); + expect_opcode(0x00, Insn.ld_dw2(0)); -test "xadd" { - expect_insn(Insn.xadd(.r0, .r1), 0x00000000000010db); -} + // loading a map fd + expect_opcode(0x18, Insn.ld_map_fd1(.r1, 0)); + expectEqual(@intCast(u4, PSEUDO_MAP_FD), Insn.ld_map_fd1(.r1, 0).src); + expect_opcode(0x00, Insn.ld_map_fd2(0)); -// jmp instructions -test "jeq imm" { - expect_insn(Insn.jeq(.r0, 0, 2), 0x0000000000020015); -} + expect_opcode(0x38, Insn.ld_abs(.double_word, .r1, .r2, 0)); + expect_opcode(0x20, Insn.ld_abs(.word, .r1, .r2, 0)); + expect_opcode(0x28, Insn.ld_abs(.half_word, .r1, .r2, 0)); + expect_opcode(0x30, Insn.ld_abs(.byte, .r1, .r2, 0)); -// other instructions -test "call" { - expect_insn(Insn.call(.map_lookup_elem), 0x0000000100000085); -} + expect_opcode(0x58, Insn.ld_ind(.double_word, .r1, .r2, 0)); + expect_opcode(0x40, Insn.ld_ind(.word, .r1, .r2, 0)); + expect_opcode(0x48, Insn.ld_ind(.half_word, .r1, .r2, 0)); + expect_opcode(0x50, Insn.ld_ind(.byte, .r1, .r2, 0)); -test "exit" { - expect_insn(Insn.exit(), 0x0000000000000095); + expect_opcode(0x79, Insn.ldx(.double_word, .r1, .r2, 0)); + expect_opcode(0x61, Insn.ldx(.word, .r1, .r2, 0)); + expect_opcode(0x69, Insn.ldx(.half_word, .r1, .r2, 0)); + expect_opcode(0x71, Insn.ldx(.byte, .r1, .r2, 0)); + + expect_opcode(0x62, Insn.st(.word, .r1, 0, 0)); + expect_opcode(0x6a, Insn.st(.half_word, .r1, 0, 0)); + expect_opcode(0x72, Insn.st(.byte, .r1, 0, 0)); + + expect_opcode(0x63, Insn.stx(.word, .r1, 0, .r2)); + expect_opcode(0x6b, Insn.stx(.half_word, .r1, 0, .r2)); + expect_opcode(0x73, Insn.stx(.byte, .r1, 0, .r2)); + expect_opcode(0x7b, Insn.stx(.double_word, .r1, 0, .r2)); + + // branch instructions + expect_opcode(0x05, Insn.ja(0)); + expect_opcode(0x15, Insn.jeq(.r1, 0, 0)); + expect_opcode(0x1d, Insn.jeq(.r1, .r2, 0)); + expect_opcode(0x25, Insn.jgt(.r1, 0, 0)); + expect_opcode(0x2d, Insn.jgt(.r1, .r2, 0)); + expect_opcode(0x35, Insn.jge(.r1, 0, 0)); + expect_opcode(0x3d, Insn.jge(.r1, .r2, 0)); + expect_opcode(0xa5, Insn.jlt(.r1, 0, 0)); + expect_opcode(0xad, Insn.jlt(.r1, .r2, 0)); + expect_opcode(0xb5, Insn.jle(.r1, 0, 0)); + expect_opcode(0xbd, Insn.jle(.r1, .r2, 0)); + expect_opcode(0x45, Insn.jset(.r1, 0, 0)); + expect_opcode(0x4d, Insn.jset(.r1, .r2, 0)); + expect_opcode(0x55, Insn.jne(.r1, 0, 0)); + expect_opcode(0x5d, Insn.jne(.r1, .r2, 0)); + expect_opcode(0x65, Insn.jsgt(.r1, 0, 0)); + expect_opcode(0x6d, Insn.jsgt(.r1, .r2, 0)); + expect_opcode(0x75, Insn.jsge(.r1, 0, 0)); + expect_opcode(0x7d, Insn.jsge(.r1, .r2, 0)); + expect_opcode(0xc5, Insn.jslt(.r1, 0, 0)); + expect_opcode(0xcd, Insn.jslt(.r1, .r2, 0)); + expect_opcode(0xd5, Insn.jsle(.r1, 0, 0)); + expect_opcode(0xdd, Insn.jsle(.r1, .r2, 0)); + expect_opcode(0x85, Insn.call(.unspec)); + expect_opcode(0x95, Insn.exit()); } pub const Cmd = extern enum(usize) { From a993c7dd1bf7d6ffa8e4a235ef2133e5d5bef254 Mon Sep 17 00:00:00 2001 From: Matt Knight Date: Sun, 6 Sep 2020 16:19:49 -0700 Subject: [PATCH 48/56] removed redundant pseudo insn --- lib/std/os/linux/bpf.zig | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig index e5227b3847..0cef923f7c 100644 --- a/lib/std/os/linux/bpf.zig +++ b/lib/std/os/linux/bpf.zig @@ -536,16 +536,6 @@ pub const Insn = packed struct { return jmp(.jsle, dst, src, off); } - pub fn stx_mem(size: Size, dst: Reg, src: Reg, off: i16) Insn { - return Insn{ - .code = STX | @enumToInt(size) | MEM, - .dst = @enumToInt(dst), - .src = @enumToInt(src), - .off = off, - .imm = 0, - }; - } - pub fn xadd(dst: Reg, src: Reg) Insn { return Insn{ .code = STX | XADD | DW, From cf06817768a347353735ddc908c74ca17c1d7d15 Mon Sep 17 00:00:00 2001 From: Matt Knight Date: Sun, 6 Sep 2020 16:21:05 -0700 Subject: [PATCH 49/56] improved compile error message --- lib/std/os/linux/bpf.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig index 0cef923f7c..146bd04ced 100644 --- a/lib/std/os/linux/bpf.zig +++ b/lib/std/os/linux/bpf.zig @@ -611,7 +611,7 @@ pub const Insn = packed struct { } pub fn st(comptime size: Size, dst: Reg, off: i16, imm: i32) Insn { - if (size == .double_word) @compileError("TODO: implement st_dw"); + if (size == .double_word) @compileError("TODO: need to determine how to correctly handle double words"); return Insn{ .code = MEM | @enumToInt(size) | ST, .dst = @enumToInt(dst), From 67817b230f9da645eee22bdb98e715b1505c8f16 Mon Sep 17 00:00:00 2001 From: Matt Knight Date: Sun, 6 Sep 2020 17:09:25 -0700 Subject: [PATCH 50/56] fixed improper builtin import --- lib/std/os/linux/bpf.zig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig index 146bd04ced..226394980e 100644 --- a/lib/std/os/linux/bpf.zig +++ b/lib/std/os/linux/bpf.zig @@ -5,7 +5,6 @@ // and substantial portions of the software. usingnamespace std.os; const std = @import("../../std.zig"); -const builtin = @import("builtin"); const expectEqual = std.testing.expectEqual; // instruction classes @@ -631,7 +630,7 @@ pub const Insn = packed struct { }; } - fn endian_swap(endian: builtin.Endian, comptime size: Size, dst: Reg) Insn { + fn endian_swap(endian: std.builtin.Endian, comptime size: Size, dst: Reg) Insn { return Insn{ .code = switch (endian) { .Big => 0xdc, From eca20b5e03a21694d52ac9d3c5e9c4a4ce4ac07d Mon Sep 17 00:00:00 2001 From: Peter Spiess-Knafl Date: Mon, 7 Sep 2020 17:57:45 +0200 Subject: [PATCH 51/56] Fix compile when using EFI target (Fixes #6275) --- lib/std/start.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/start.zig b/lib/std/start.zig index 3eb02ba65e..c65cd08981 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -67,7 +67,7 @@ fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv uefi.handle = handle; uefi.system_table = system_table; - switch (@typeInfo(@TypeOf(read)).Fn.return_type.?) { + switch (@typeInfo(@TypeOf(root.main)).Fn.return_type.?) { noreturn => { root.main(); }, From 533bfc68bf8b4ad7ffbe5814a622f200dc345b69 Mon Sep 17 00:00:00 2001 From: Vincent Rischmann Date: Sun, 6 Sep 2020 17:55:23 +0200 Subject: [PATCH 52/56] big int: fix Managed.dump() --- lib/std/math/big/int.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index 963fc21f3b..19f6d0809e 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -1449,7 +1449,7 @@ pub const Managed = struct { for (self.limbs[0..self.len()]) |limb| { std.debug.warn("{x} ", .{limb}); } - std.debug.warn("capacity={} positive={}\n", .{ self.limbs.len, self.positive }); + std.debug.warn("capacity={} positive={}\n", .{ self.limbs.len, self.isPositive() }); } /// Negate the sign. From db7a2382977a12e3ad95e3f2249c538d9c31cd87 Mon Sep 17 00:00:00 2001 From: Matthew Knight Date: Mon, 7 Sep 2020 12:41:29 -0700 Subject: [PATCH 53/56] BPF: add some more documentation (#6268) * added documentation for ringbuffers, which context type maps to which program type, and added some formatting --- lib/std/os/linux/bpf.zig | 288 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 284 insertions(+), 4 deletions(-) diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig index 226394980e..df5274a82d 100644 --- a/lib/std/os/linux/bpf.zig +++ b/lib/std/os/linux/bpf.zig @@ -62,6 +62,7 @@ pub const MAXINSNS = 4096; // instruction classes /// jmp mode in word width pub const JMP32 = 0x06; + /// alu mode in double word width pub const ALU64 = 0x07; @@ -72,14 +73,17 @@ pub const XADD = 0xc0; // alu/jmp fields /// mov reg to reg pub const MOV = 0xb0; + /// sign extending arithmetic shift right */ pub const ARSH = 0xc0; // change endianness of a register /// flags for endianness conversion: pub const END = 0xd0; + /// convert to little-endian */ pub const TO_LE = 0x00; + /// convert to big-endian pub const TO_BE = 0x08; pub const FROM_LE = TO_LE; @@ -88,29 +92,39 @@ pub const FROM_BE = TO_BE; // jmp encodings /// jump != * pub const JNE = 0x50; + /// LT is unsigned, '<' pub const JLT = 0xa0; + /// LE is unsigned, '<=' * pub const JLE = 0xb0; + /// SGT is signed '>', GT in x86 pub const JSGT = 0x60; + /// SGE is signed '>=', GE in x86 pub const JSGE = 0x70; + /// SLT is signed, '<' pub const JSLT = 0xc0; + /// SLE is signed, '<=' pub const JSLE = 0xd0; + /// function call pub const CALL = 0x80; + /// function return pub const EXIT = 0x90; /// Flag for prog_attach command. If a sub-cgroup installs some bpf program, the /// program in this cgroup yields to sub-cgroup program. pub const F_ALLOW_OVERRIDE = 0x1; + /// Flag for prog_attach command. If a sub-cgroup installs some bpf program, /// that cgroup program gets run in addition to the program in this cgroup. pub const F_ALLOW_MULTI = 0x2; + /// Flag for prog_attach command. pub const F_REPLACE = 0x4; @@ -164,47 +178,61 @@ pub const PSEUDO_CALL = 1; /// flag for BPF_MAP_UPDATE_ELEM command. create new element or update existing pub const ANY = 0; + /// flag for BPF_MAP_UPDATE_ELEM command. create new element if it didn't exist pub const NOEXIST = 1; + /// flag for BPF_MAP_UPDATE_ELEM command. update existing element pub const EXIST = 2; + /// flag for BPF_MAP_UPDATE_ELEM command. spin_lock-ed map_lookup/map_update pub const F_LOCK = 4; /// flag for BPF_MAP_CREATE command */ pub const BPF_F_NO_PREALLOC = 0x1; + /// flag for BPF_MAP_CREATE command. Instead of having one common LRU list in /// the BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list which can /// scale and perform better. Note, the LRU nodes (including free nodes) cannot /// be moved across different LRU lists. pub const BPF_F_NO_COMMON_LRU = 0x2; + /// flag for BPF_MAP_CREATE command. Specify numa node during map creation pub const BPF_F_NUMA_NODE = 0x4; + /// flag for BPF_MAP_CREATE command. Flags for BPF object read access from /// syscall side pub const BPF_F_RDONLY = 0x8; + /// flag for BPF_MAP_CREATE command. Flags for BPF object write access from /// syscall side pub const BPF_F_WRONLY = 0x10; + /// flag for BPF_MAP_CREATE command. Flag for stack_map, store build_id+offset /// instead of pointer pub const BPF_F_STACK_BUILD_ID = 0x20; + /// flag for BPF_MAP_CREATE command. Zero-initialize hash function seed. This /// should only be used for testing. pub const BPF_F_ZERO_SEED = 0x40; + /// flag for BPF_MAP_CREATE command Flags for accessing BPF object from program /// side. pub const BPF_F_RDONLY_PROG = 0x80; + /// flag for BPF_MAP_CREATE command. Flags for accessing BPF object from program /// side. pub const BPF_F_WRONLY_PROG = 0x100; + /// flag for BPF_MAP_CREATE command. Clone map from listener for newly accepted /// socket pub const BPF_F_CLONE = 0x200; + /// flag for BPF_MAP_CREATE command. Enable memory-mapping BPF map pub const BPF_F_MMAPABLE = 0x400; -/// These values correspond to "syscalls" within the BPF program's environment +/// These values correspond to "syscalls" within the BPF program's environment, +/// each one is documented in std.os.linux.BPF.kern pub const Helper = enum(i32) { unspec, map_lookup_elem, @@ -325,6 +353,29 @@ pub const Helper = enum(i32) { tcp_send_ack, send_signal_thread, jiffies64, + read_branch_records, + get_ns_current_pid_tgid, + xdp_output, + get_netns_cookie, + get_current_ancestor_cgroup_id, + sk_assign, + ktime_get_boot_ns, + seq_printf, + seq_write, + sk_cgroup_id, + sk_ancestor_cgroup_id, + ringbuf_output, + ringbuf_reserve, + ringbuf_submit, + ringbuf_discard, + ringbuf_query, + csum_level, + skc_to_tcp6_sock, + skc_to_tcp_sock, + skc_to_tcp_timewait_sock, + skc_to_tcp_request_sock, + skc_to_udp6_sock, + get_task_stack, _, }; @@ -797,39 +848,123 @@ test "opcodes" { } pub const Cmd = extern enum(usize) { + /// Create a map and return a file descriptor that refers to the map. The + /// close-on-exec file descriptor flag is automatically enabled for the new + /// file descriptor. + /// + /// uses MapCreateAttr map_create, + + /// Look up an element by key in a specified map and return its value. + /// + /// uses MapElemAttr map_lookup_elem, + + /// Create or update an element (key/value pair) in a specified map. + /// + /// uses MapElemAttr map_update_elem, + + /// Look up and delete an element by key in a specified map. + /// + /// uses MapElemAttr map_delete_elem, + + /// Look up an element by key in a specified map and return the key of the + /// next element. map_get_next_key, + + /// Verify and load an eBPF program, returning a new file descriptor + /// associated with the program. The close-on-exec file descriptor flag + /// is automatically enabled for the new file descriptor. + /// + /// uses ProgLoadAttr prog_load, + + /// Pin a map or eBPF program to a path within the minimal BPF filesystem + /// + /// uses ObjAttr obj_pin, + + /// Get the file descriptor of a BPF object pinned to a certain path + /// + /// uses ObjAttr obj_get, + + /// uses ProgAttachAttr prog_attach, + + /// uses ProgAttachAttr prog_detach, + + /// uses TestRunAttr prog_test_run, + + /// uses GetIdAttr prog_get_next_id, + + /// uses GetIdAttr map_get_next_id, + + /// uses GetIdAttr prog_get_fd_by_id, + + /// uses GetIdAttr map_get_fd_by_id, + + /// uses InfoAttr obj_get_info_by_fd, + + /// uses QueryAttr prog_query, + + /// uses RawTracepointAttr raw_tracepoint_open, + + /// uses BtfLoadAttr btf_load, + + /// uses GetIdAttr btf_get_fd_by_id, + + /// uses TaskFdQueryAttr task_fd_query, + + /// uses MapElemAttr map_lookup_and_delete_elem, map_freeze, + + /// uses GetIdAttr btf_get_next_id, + + /// uses MapBatchAttr map_lookup_batch, + + /// uses MapBatchAttr map_lookup_and_delete_batch, + + /// uses MapBatchAttr map_update_batch, + + /// uses MapBatchAttr map_delete_batch, + + /// uses LinkCreateAttr link_create, + + /// uses LinkUpdateAttr link_update, + + /// uses GetIdAttr link_get_fd_by_id, + + /// uses GetIdAttr link_get_next_id, + + /// uses EnableStatsAttr enable_stats, + + /// uses IterCreateAttr iter_create, link_detach, _, @@ -863,42 +998,138 @@ pub const MapType = extern enum(u32) { sk_storage, devmap_hash, struct_ops, + + /// An ordered and shared CPU version of perf_event_array. They have + /// similar semantics: + /// - variable length records + /// - no blocking: when full, reservation fails + /// - memory mappable for ease and speed + /// - epoll notifications for new data, but can busy poll + /// + /// Ringbufs give BPF programs two sets of APIs: + /// - ringbuf_output() allows copy data from one place to a ring + /// buffer, similar to bpf_perf_event_output() + /// - ringbuf_reserve()/ringbuf_commit()/ringbuf_discard() split the + /// process into two steps. First a fixed amount of space is reserved, + /// if that is successful then the program gets a pointer to a chunk of + /// memory and can be submitted with commit() or discarded with + /// discard() + /// + /// ringbuf_output() will incurr an extra memory copy, but allows to submit + /// records of the length that's not known beforehand, and is an easy + /// replacement for perf_event_outptu(). + /// + /// ringbuf_reserve() avoids the extra memory copy but requires a known size + /// of memory beforehand. + /// + /// ringbuf_query() allows to query properties of the map, 4 are currently + /// supported: + /// - BPF_RB_AVAIL_DATA: amount of unconsumed data in ringbuf + /// - BPF_RB_RING_SIZE: returns size of ringbuf + /// - BPF_RB_CONS_POS/BPF_RB_PROD_POS returns current logical position + /// of consumer and producer respectively + /// + /// key size: 0 + /// value size: 0 + /// max entries: size of ringbuf, must be power of 2 ringbuf, + _, }; pub const ProgType = extern enum(u32) { unspec, + + /// context type: __sk_buff socket_filter, + + /// context type: bpf_user_pt_regs_t kprobe, + + /// context type: __sk_buff sched_cls, + + /// context type: __sk_buff sched_act, + + /// context type: u64 tracepoint, + + /// context type: xdp_md xdp, + + /// context type: bpf_perf_event_data perf_event, + + /// context type: __sk_buff cgroup_skb, + + /// context type: bpf_sock cgroup_sock, + + /// context type: __sk_buff lwt_in, + + /// context type: __sk_buff lwt_out, + + /// context type: __sk_buff lwt_xmit, + + /// context type: bpf_sock_ops sock_ops, + + /// context type: __sk_buff sk_skb, + + /// context type: bpf_cgroup_dev_ctx cgroup_device, + + /// context type: sk_msg_md sk_msg, + + /// context type: bpf_raw_tracepoint_args raw_tracepoint, + + /// context type: bpf_sock_addr cgroup_sock_addr, + + /// context type: __sk_buff lwt_seg6local, + + /// context type: u32 lirc_mode2, + + /// context type: sk_reuseport_md sk_reuseport, + + /// context type: __sk_buff flow_dissector, + + /// context type: bpf_sysctl cgroup_sysctl, + + /// context type: bpf_raw_tracepoint_args raw_tracepoint_writable, + + /// context type: bpf_sockopt cgroup_sockopt, + + /// context type: void * tracing, + + /// context type: void * struct_ops, + + /// context type: void * ext, + + /// context type: void * lsm, + + /// context type: bpf_sk_lookup sk_lookup, + _, }; pub const AttachType = extern enum(u32) { @@ -948,27 +1179,38 @@ const obj_name_len = 16; pub const MapCreateAttr = extern struct { /// one of MapType map_type: u32, + /// size of key in bytes key_size: u32, + /// size of value in bytes value_size: u32, + /// max number of entries in a map max_entries: u32, + /// .map_create related flags map_flags: u32, + /// fd pointing to the inner map inner_map_fd: fd_t, + /// numa node (effective only if MapCreateFlags.numa_node is set) numa_node: u32, map_name: [obj_name_len]u8, + /// ifindex of netdev to create on map_ifindex: u32, + /// fd pointing to a BTF type data btf_fd: fd_t, + /// BTF type_id of the key btf_key_type_id: u32, + /// BTF type_id of the value bpf_value_type_id: u32, + /// BTF type_id of a kernel struct stored as the map value btf_vmlinux_value_type_id: u32, }; @@ -988,10 +1230,12 @@ pub const MapElemAttr = extern struct { pub const MapBatchAttr = extern struct { /// start batch, NULL to start from beginning in_batch: u64, + /// output: next start batch out_batch: u64, keys: u64, values: u64, + /// input/output: /// input: # of key/value elements /// output: # of filled elements @@ -1008,35 +1252,49 @@ pub const ProgLoadAttr = extern struct { insn_cnt: u32, insns: u64, license: u64, + /// verbosity level of verifier log_level: u32, + /// size of user buffer log_size: u32, + /// user supplied buffer log_buf: u64, + /// not used kern_version: u32, prog_flags: u32, prog_name: [obj_name_len]u8, - /// ifindex of netdev to prep for. For some prog types expected attach - /// type must be known at load time to verify attach type specific parts - /// of prog (context accesses, allowed helpers, etc). + + /// ifindex of netdev to prep for. prog_ifindex: u32, + + /// For some prog types expected attach type must be known at load time to + /// verify attach type specific parts of prog (context accesses, allowed + /// helpers, etc). expected_attach_type: u32, + /// fd pointing to BTF type data prog_btf_fd: fd_t, + /// userspace bpf_func_info size func_info_rec_size: u32, func_info: u64, + /// number of bpf_func_info records func_info_cnt: u32, + /// userspace bpf_line_info size line_info_rec_size: u32, line_info: u64, + /// number of bpf_line_info records line_info_cnt: u32, + /// in-kernel BTF type id to attach to attact_btf_id: u32, + /// 0 to attach to vmlinux attach_prog_id: u32, }; @@ -1052,10 +1310,13 @@ pub const ObjAttr = extern struct { pub const ProgAttachAttr = extern struct { /// container object to attach to target_fd: fd_t, + /// eBPF program to attach attach_bpf_fd: fd_t, + attach_type: u32, attach_flags: u32, + // TODO: BPF_F_REPLACE flags /// previously attached eBPF program to replace if .replace is used replace_bpf_fd: fd_t, @@ -1065,16 +1326,20 @@ pub const ProgAttachAttr = extern struct { pub const TestAttr = extern struct { prog_fd: fd_t, retval: u32, + /// input: len of data_in data_size_in: u32, + /// input/output: len of data_out. returns ENOSPC if data_out is too small. data_size_out: u32, data_in: u64, data_out: u64, repeat: u32, duration: u32, + /// input: len of ctx_in ctx_size_in: u32, + /// input/output: len of ctx_out. returns ENOSPC if ctx_out is too small. ctx_size_out: u32, ctx_in: u64, @@ -1127,26 +1392,35 @@ pub const BtfLoadAttr = extern struct { btf_log_level: u32, }; +/// struct used by Cmd.task_fd_query pub const TaskFdQueryAttr = extern struct { /// input: pid pid: pid_t, + /// input: fd fd: fd_t, + /// input: flags flags: u32, + /// input/output: buf len buf_len: u32, + /// input/output: /// tp_name for tracepoint /// symbol for kprobe /// filename for uprobe buf: u64, + /// output: prod_id prog_id: u32, + /// output: BPF_FD_TYPE fd_type: u32, + /// output: probe_offset probe_offset: u64, + /// output: probe_addr probe_addr: u64, }; @@ -1155,9 +1429,11 @@ pub const TaskFdQueryAttr = extern struct { pub const LinkCreateAttr = extern struct { /// eBPF program to attach prog_fd: fd_t, + /// object to attach to target_fd: fd_t, attach_type: u32, + /// extra flags flags: u32, }; @@ -1165,10 +1441,13 @@ pub const LinkCreateAttr = extern struct { /// struct used by Cmd.link_update command pub const LinkUpdateAttr = extern struct { link_fd: fd_t, + /// new program to update link with new_prog_fd: fd_t, + /// extra flags flags: u32, + /// expected link's program fd, it is specified only if BPF_F_REPLACE is /// set in flags old_prog_fd: fd_t, @@ -1185,6 +1464,7 @@ pub const IterCreateAttr = extern struct { flags: u32, }; +/// Mega struct that is passed to the bpf() syscall pub const Attr = extern union { map_create: MapCreateAttr, map_elem: MapElemAttr, From a496f94be92581cdf97a1bd60c4dea0369e26395 Mon Sep 17 00:00:00 2001 From: Matt Knight Date: Sun, 6 Sep 2020 17:45:54 -0700 Subject: [PATCH 54/56] added map create, update, delete, and prog load --- lib/std/os/linux/bpf.zig | 181 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 179 insertions(+), 2 deletions(-) diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig index df5274a82d..7c0dd0bb19 100644 --- a/lib/std/os/linux/bpf.zig +++ b/lib/std/os/linux/bpf.zig @@ -3,9 +3,13 @@ // This file is part of [zig](https://ziglang.org/), which is MIT licensed. // The MIT license requires this copyright notice to be included in all copies // and substantial portions of the software. -usingnamespace std.os; +usingnamespace std.os.linux; const std = @import("../../std.zig"); +const errno = getErrno; +const unexpectedErrno = std.os.unexpectedErrno; const expectEqual = std.testing.expectEqual; +const expectError = std.testing.expectError; +const expect = std.testing.expect; // instruction classes pub const LD = 0x00; @@ -1323,7 +1327,7 @@ pub const ProgAttachAttr = extern struct { }; /// struct used by Cmd.prog_test_run command -pub const TestAttr = extern struct { +pub const TestRunAttr = extern struct { prog_fd: fd_t, retval: u32, @@ -1484,3 +1488,176 @@ pub const Attr = extern union { enable_stats: EnableStatsAttr, iter_create: IterCreateAttr, }; + +pub const Log = struct { + level: u32, + buf: []u8, +}; + +pub fn map_create(map_type: MapType, key_size: u32, value_size: u32, max_entries: u32) !fd_t { + var attr = Attr{ + .map_create = std.mem.zeroes(MapCreateAttr), + }; + + attr.map_create.map_type = @enumToInt(map_type); + attr.map_create.key_size = key_size; + attr.map_create.value_size = value_size; + attr.map_create.max_entries = max_entries; + + const rc = bpf(.map_create, &attr, @sizeOf(MapCreateAttr)); + return switch (errno(rc)) { + 0 => @intCast(fd_t, rc), + EINVAL => error.MapTypeOrAttrInvalid, + ENOMEM => error.SystemResources, + EPERM => error.AccessDenied, + else => |err| unexpectedErrno(rc), + }; +} + +test "map_create" { + const map = try map_create(.hash, 4, 4, 32); + defer std.os.close(map); +} + +pub fn map_lookup_elem(fd: fd_t, key: []const u8, value: []u8) !void { + var attr = Attr{ + .map_elem = std.mem.zeroes(MapElemAttr), + }; + + attr.map_elem.map_fd = fd; + attr.map_elem.key = @ptrToInt(key.ptr); + attr.map_elem.result.value = @ptrToInt(value.ptr); + + const rc = bpf(.map_lookup_elem, &attr, @sizeOf(MapElemAttr)); + switch (errno(rc)) { + 0 => return, + EBADF => return error.BadFd, + EFAULT => unreachable, + EINVAL => return error.FieldInAttrNeedsZeroing, + ENOENT => return error.NotFound, + EPERM => return error.AccessDenied, + else => |err| return unexpectedErrno(rc), + } +} + +pub fn map_update_elem(fd: fd_t, key: []const u8, value: []const u8, flags: u64) !void { + var attr = Attr{ + .map_elem = std.mem.zeroes(MapElemAttr), + }; + + attr.map_elem.map_fd = fd; + attr.map_elem.key = @ptrToInt(key.ptr); + attr.map_elem.result = .{ .value = @ptrToInt(value.ptr) }; + attr.map_elem.flags = flags; + + const rc = bpf(.map_update_elem, &attr, @sizeOf(MapElemAttr)); + switch (errno(rc)) { + 0 => return, + E2BIG => return error.ReachedMaxEntries, + EBADF => return error.BadFd, + EFAULT => unreachable, + EINVAL => return error.FieldInAttrNeedsZeroing, + ENOMEM => return error.SystemResources, + EPERM => return error.AccessDenied, + else => |err| return unexpectedErrno(err), + } +} + +pub fn map_delete_elem(fd: fd_t, key: []const u8) !void { + var attr = Attr{ + .map_elem = std.mem.zeroes(MapElemAttr), + }; + + attr.map_elem.map_fd = fd; + attr.map_elem.key = @ptrToInt(key.ptr); + + const rc = bpf(.map_delete_elem, &attr, @sizeOf(MapElemAttr)); + switch (errno(rc)) { + 0 => return, + EBADF => return error.BadFd, + EFAULT => unreachable, + EINVAL => return error.FieldInAttrNeedsZeroing, + ENOENT => return error.NotFound, + EPERM => return error.AccessDenied, + else => |err| return unexpectedErrno(err), + } +} + +test "map lookup, update, and delete" { + const key_size = 4; + const value_size = 4; + const map = try map_create(.hash, key_size, value_size, 1); + defer std.os.close(map); + + const key = std.mem.zeroes([key_size]u8); + var value = std.mem.zeroes([value_size]u8); + + // fails looking up value that doesn't exist + expectError(error.NotFound, map_lookup_elem(map, &key, &value)); + + // succeed at updating and looking up element + try map_update_elem(map, &key, &value, 0); + try map_lookup_elem(map, &key, &value); + + // fails inserting more than max entries + const second_key = [key_size]u8{ 0, 0, 0, 1 }; + expectError(error.ReachedMaxEntries, map_update_elem(map, &second_key, &value, 0)); + + // succeed at deleting an existing elem + try map_delete_elem(map, &key); + expectError(error.NotFound, map_lookup_elem(map, &key, &value)); + + // fail at deleting a non-existing elem + expectError(error.NotFound, map_delete_elem(map, &key)); +} + +pub fn prog_load( + prog_type: ProgType, + insns: []const Insn, + log: ?*Log, + license: []const u8, + kern_version: u32, +) !fd_t { + var attr = Attr{ + .prog_load = std.mem.zeroes(ProgLoadAttr), + }; + + attr.prog_load.prog_type = @enumToInt(prog_type); + attr.prog_load.insns = @ptrToInt(insns.ptr); + attr.prog_load.insn_cnt = @intCast(u32, insns.len); + attr.prog_load.license = @ptrToInt(license.ptr); + attr.prog_load.kern_version = kern_version; + + if (log) |l| { + attr.prog_load.log_buf = @ptrToInt(l.buf.ptr); + attr.prog_load.log_size = @intCast(u32, l.buf.len); + attr.prog_load.log_level = l.level; + } + + const rc = bpf(.prog_load, &attr, @sizeOf(ProgLoadAttr)); + return switch (errno(rc)) { + 0 => @intCast(fd_t, rc), + EACCES => error.UnsafeProgram, + EFAULT => unreachable, + EINVAL => error.InvalidProgram, + EPERM => error.AccessDenied, + else => |err| unexpectedErrno(err), + }; +} + +test "prog_load" { + // this should fail because it does not set r0 before exiting + const bad_prog = [_]Insn{ + Insn.exit(), + }; + + const good_prog = [_]Insn{ + Insn.mov(.r0, 0), + Insn.exit(), + }; + + const prog = try prog_load(.socket_filter, &good_prog, null, "MIT", 0); + defer std.os.close(prog); + + expectError(error.UnsafeProgram, prog_load(.socket_filter, &bad_prog, null, "MIT", 0)); +} From 2328f40b7a0b301ad176e2657694457667be862e Mon Sep 17 00:00:00 2001 From: LemonBoy Date: Mon, 7 Sep 2020 19:07:27 +0200 Subject: [PATCH 55/56] std: Add DEFLATE and zlib decompressors --- lib/std/compress.zig | 13 + lib/std/compress/deflate.zig | 521 ++++++++++++++ lib/std/compress/rfc1951.txt | 955 +++++++++++++++++++++++++ lib/std/compress/rfc1951.txt.fixed.z.9 | Bin 0 -> 12836 bytes lib/std/compress/rfc1951.txt.z.0 | Bin 0 -> 36960 bytes lib/std/compress/rfc1951.txt.z.9 | Bin 0 -> 11111 bytes lib/std/compress/zlib.zig | 178 +++++ lib/std/std.zig | 1 + 8 files changed, 1668 insertions(+) create mode 100644 lib/std/compress.zig create mode 100644 lib/std/compress/deflate.zig create mode 100644 lib/std/compress/rfc1951.txt create mode 100644 lib/std/compress/rfc1951.txt.fixed.z.9 create mode 100644 lib/std/compress/rfc1951.txt.z.0 create mode 100644 lib/std/compress/rfc1951.txt.z.9 create mode 100644 lib/std/compress/zlib.zig diff --git a/lib/std/compress.zig b/lib/std/compress.zig new file mode 100644 index 0000000000..5518f807df --- /dev/null +++ b/lib/std/compress.zig @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2015-2020 Zig Contributors +// This file is part of [zig](https://ziglang.org/), which is MIT licensed. +// The MIT license requires this copyright notice to be included in all copies +// and substantial portions of the software. +const std = @import("std.zig"); + +pub const deflate = @import("compress/deflate.zig"); +pub const zlib = @import("compress/zlib.zig"); + +test "" { + _ = zlib; +} diff --git a/lib/std/compress/deflate.zig b/lib/std/compress/deflate.zig new file mode 100644 index 0000000000..bad23349e8 --- /dev/null +++ b/lib/std/compress/deflate.zig @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2015-2020 Zig Contributors +// This file is part of [zig](https://ziglang.org/), which is MIT licensed. +// The MIT license requires this copyright notice to be included in all copies +// and substantial portions of the software. +// +// Decompressor for DEFLATE data streams (RFC1951) +// +// Heavily inspired by the simple decompressor puff.c by Mark Adler + +const std = @import("std"); +const io = std.io; +const math = std.math; +const mem = std.mem; + +const assert = std.debug.assert; + +const MAXBITS = 15; +const MAXLCODES = 286; +const MAXDCODES = 30; +const MAXCODES = MAXLCODES + MAXDCODES; +const FIXLCODES = 288; + +const Huffman = struct { + count: [MAXBITS + 1]u16, + symbol: [MAXCODES]u16, + + fn construct(self: *Huffman, length: []const u16) !void { + for (self.count) |*val| { + val.* = 0; + } + + for (length) |val| { + self.count[val] += 1; + } + + if (self.count[0] == length.len) + return; + + var left: isize = 1; + for (self.count[1..]) |val| { + left *= 2; + left -= @as(isize, @bitCast(i16, val)); + if (left < 0) + return error.InvalidTree; + } + + var offs: [MAXBITS + 1]u16 = undefined; + { + var len: usize = 1; + offs[1] = 0; + while (len < MAXBITS) : (len += 1) { + offs[len + 1] = offs[len] + self.count[len]; + } + } + + for (length) |val, symbol| { + if (val != 0) { + self.symbol[offs[val]] = @truncate(u16, symbol); + offs[val] += 1; + } + } + } +}; + +pub fn InflateStream(comptime ReaderType: type) type { + return struct { + const Self = @This(); + + pub const Error = ReaderType.Error || error{ + EndOfStream, + BadCounts, + InvalidBlockType, + InvalidDistance, + InvalidFixedCode, + InvalidLength, + InvalidStoredSize, + InvalidSymbol, + InvalidTree, + MissingEOBCode, + NoLastLength, + OutOfCodes, + }; + pub const Reader = io.Reader(*Self, Error, read); + + bit_reader: io.BitReader(.Little, ReaderType), + + // True if the decoder met the end of the compressed stream, no further + // data can be decompressed + seen_eos: bool, + + state: union(enum) { + // Parse a compressed block header and set up the internal state for + // decompressing its contents. + DecodeBlockHeader: void, + // Decode all the symbols in a compressed block. + DecodeBlockData: void, + // Copy N bytes of uncompressed data from the underlying stream into + // the window. + Copy: usize, + // Copy 1 byte into the window. + CopyLit: u8, + // Copy L bytes from the window itself, starting from D bytes + // behind. + CopyFrom: struct { distance: u16, length: u16 }, + }, + + // Sliding window for the LZ77 algorithm + window: struct { + const WSelf = @This(); + + // invariant: buffer length is always a power of 2 + buf: []u8, + // invariant: ri <= wi + wi: usize = 0, // Write index + ri: usize = 0, // Read index + el: usize = 0, // Number of readable elements + + fn readable(self: *WSelf) usize { + return self.el; + } + + fn writable(self: *WSelf) usize { + return self.buf.len - self.el; + } + + // Insert a single byte into the window. + // Returns 1 if there's enough space for the new byte and 0 + // otherwise. + fn append(self: *WSelf, value: u8) usize { + if (self.writable() < 1) return 0; + self.appendUnsafe(value); + return 1; + } + + // Insert a single byte into the window. + // Assumes there's enough space. + fn appendUnsafe(self: *WSelf, value: u8) void { + self.buf[self.wi] = value; + self.wi = (self.wi + 1) & (self.buf.len - 1); + self.el += 1; + } + + // Fill dest[] with data from the window, starting from the read + // position. This updates the read pointer. + // Returns the number of read bytes or 0 if there's nothing to read + // yet. + fn read(self: *WSelf, dest: []u8) usize { + const N = math.min(dest.len, self.readable()); + + if (N == 0) return 0; + + if (self.ri + N < self.buf.len) { + // The data doesn't wrap around + mem.copy(u8, dest, self.buf[self.ri .. self.ri + N]); + } else { + // The data wraps around the buffer, split the copy + std.mem.copy(u8, dest, self.buf[self.ri..]); + // How much data we've copied from `ri` to the end + const r = self.buf.len - self.ri; + std.mem.copy(u8, dest[r..], self.buf[0 .. N - r]); + } + + self.ri = (self.ri + N) & (self.buf.len - 1); + self.el -= N; + + return N; + } + + // Copy `length` bytes starting from `distance` bytes behind the + // write pointer. + // Be careful as the length may be greater than the distance, that's + // how the compressor encodes run-length encoded sequences. + fn copyFrom(self: *WSelf, distance: usize, length: usize) usize { + const N = math.min(length, self.writable()); + + if (N == 0) return 0; + + // TODO: Profile and, if needed, replace with smarter juggling + // of the window memory for the non-overlapping case. + var i: usize = 0; + while (i < N) : (i += 1) { + const index = (self.wi -% distance) % self.buf.len; + self.appendUnsafe(self.buf[index]); + } + + return N; + } + }, + + // Compressor-local Huffman tables used to decompress blocks with + // dynamic codes. + huffman_tables: [2]Huffman = undefined, + + // Huffman tables used for decoding length/distance pairs. + hdist: *Huffman, + hlen: *Huffman, + + fn stored(self: *Self) !void { + // Discard the remaining bits, the lenght field is always + // byte-aligned (and so is the data) + self.bit_reader.alignToByte(); + + const length = (try self.bit_reader.readBitsNoEof(u16, 16)); + const length_cpl = (try self.bit_reader.readBitsNoEof(u16, 16)); + + if (length != ~length_cpl) + return error.InvalidStoredSize; + + self.state = .{ .Copy = length }; + } + + fn fixed(self: *Self) !void { + comptime var lencode: Huffman = undefined; + comptime var distcode: Huffman = undefined; + + // The Huffman codes are specified in the RFC1951, section 3.2.6 + comptime { + @setEvalBranchQuota(100000); + + const len_lengths = // + [_]u16{8} ** 144 ++ + [_]u16{9} ** 112 ++ + [_]u16{7} ** 24 ++ + [_]u16{8} ** 8; + assert(len_lengths.len == FIXLCODES); + try lencode.construct(len_lengths[0..]); + + const dist_lengths = [_]u16{5} ** MAXDCODES; + try distcode.construct(dist_lengths[0..]); + } + + self.hlen = &lencode; + self.hdist = &distcode; + self.state = .DecodeBlockData; + } + + fn dynamic(self: *Self) !void { + // Number of length codes + const nlen = (try self.bit_reader.readBitsNoEof(usize, 5)) + 257; + // Number of distance codes + const ndist = (try self.bit_reader.readBitsNoEof(usize, 5)) + 1; + // Number of code length codes + const ncode = (try self.bit_reader.readBitsNoEof(usize, 4)) + 4; + + if (nlen > MAXLCODES or ndist > MAXDCODES) + return error.BadCounts; + + // Permutation of code length codes + const ORDER = [19]u16{ + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, + 12, 3, 13, 2, 14, 1, 15, + }; + + // Build the Huffman table to decode the code length codes + var lencode: Huffman = undefined; + { + var lengths = std.mem.zeroes([19]u16); + + // Read the code lengths, missing ones are left as zero + for (ORDER[0..ncode]) |val| { + lengths[val] = try self.bit_reader.readBitsNoEof(u16, 3); + } + + try lencode.construct(lengths[0..]); + } + + // Read the length/literal and distance code length tables. + // Zero the table by default so we can avoid explicitly writing out + // zeros for codes 17 and 18 + var lengths = std.mem.zeroes([MAXCODES]u16); + + var i: usize = 0; + while (i < nlen + ndist) { + const symbol = try self.decode(&lencode); + + switch (symbol) { + 0...15 => { + lengths[i] = symbol; + i += 1; + }, + 16 => { + // repeat last length 3..6 times + if (i == 0) return error.NoLastLength; + + const last_length = lengths[i - 1]; + const repeat = 3 + (try self.bit_reader.readBitsNoEof(usize, 2)); + const last_index = i + repeat; + while (i < last_index) : (i += 1) { + lengths[i] = last_length; + } + }, + 17 => { + // repeat zero 3..10 times + i += 3 + (try self.bit_reader.readBitsNoEof(usize, 3)); + }, + 18 => { + // repeat zero 11..138 times + i += 11 + (try self.bit_reader.readBitsNoEof(usize, 7)); + }, + else => return error.InvalidSymbol, + } + } + + if (i > nlen + ndist) + return error.InvalidLength; + + // Check if the end of block code is present + if (lengths[256] == 0) + return error.MissingEOBCode; + + try self.huffman_tables[0].construct(lengths[0..nlen]); + try self.huffman_tables[1].construct(lengths[nlen .. nlen + ndist]); + + self.hlen = &self.huffman_tables[0]; + self.hdist = &self.huffman_tables[1]; + self.state = .DecodeBlockData; + } + + fn codes(self: *Self, lencode: *Huffman, distcode: *Huffman) !bool { + // Size base for length codes 257..285 + const LENS = [29]u16{ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, + }; + // Extra bits for length codes 257..285 + const LEXT = [29]u16{ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, + }; + // Offset base for distance codes 0..29 + const DISTS = [30]u16{ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, + }; + // Extra bits for distance codes 0..29 + const DEXT = [30]u16{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + }; + + while (true) { + const symbol = try self.decode(lencode); + + switch (symbol) { + 0...255 => { + // Literal value + const c = @truncate(u8, symbol); + if (self.window.append(c) == 0) { + self.state = .{ .CopyLit = c }; + return false; + } + }, + 256 => { + // End of block symbol + return true; + }, + 257...285 => { + // Length/distance pair + const length_symbol = symbol - 257; + const length = LENS[length_symbol] + + try self.bit_reader.readBitsNoEof(u16, LEXT[length_symbol]); + + const distance_symbol = try self.decode(distcode); + const distance = DISTS[distance_symbol] + + try self.bit_reader.readBitsNoEof(u16, DEXT[distance_symbol]); + + if (distance > self.window.buf.len) + return error.InvalidDistance; + + const written = self.window.copyFrom(distance, length); + if (written != length) { + self.state = .{ + .CopyFrom = .{ + .distance = distance, + .length = length - @truncate(u16, written), + }, + }; + return false; + } + }, + else => return error.InvalidFixedCode, + } + } + } + + fn decode(self: *Self, h: *Huffman) !u16 { + var len: usize = 1; + var code: usize = 0; + var first: usize = 0; + var index: usize = 0; + + while (len <= MAXBITS) : (len += 1) { + code |= try self.bit_reader.readBitsNoEof(usize, 1); + const count = h.count[len]; + if (code < first + count) + return h.symbol[index + (code - first)]; + index += count; + first += count; + first <<= 1; + code <<= 1; + } + + return error.OutOfCodes; + } + + fn step(self: *Self) !void { + while (true) { + switch (self.state) { + .DecodeBlockHeader => { + // The compressed stream is done + if (self.seen_eos) return; + + const last = try self.bit_reader.readBitsNoEof(u1, 1); + const kind = try self.bit_reader.readBitsNoEof(u2, 2); + + self.seen_eos = last != 0; + + // The next state depends on the block type + switch (kind) { + 0 => try self.stored(), + 1 => try self.fixed(), + 2 => try self.dynamic(), + 3 => return error.InvalidBlockType, + } + }, + .DecodeBlockData => { + if (!try self.codes(self.hlen, self.hdist)) { + return; + } + + self.state = .DecodeBlockHeader; + }, + .Copy => |*length| { + const N = math.min(self.window.writable(), length.*); + + // TODO: This loop can be more efficient. On the other + // hand uncompressed blocks are not that common so... + var i: usize = 0; + while (i < N) : (i += 1) { + var tmp: [1]u8 = undefined; + if ((try self.bit_reader.read(&tmp)) != 1) { + // Unexpected end of stream, keep this error + // consistent with the use of readBitsNoEof + return error.EndOfStream; + } + self.window.appendUnsafe(tmp[0]); + } + + if (N != length.*) { + length.* -= N; + return; + } + + self.state = .DecodeBlockHeader; + }, + .CopyLit => |c| { + if (self.window.append(c) == 0) { + return; + } + + self.state = .DecodeBlockData; + }, + .CopyFrom => |*info| { + const written = self.window.copyFrom(info.distance, info.length); + if (written != info.length) { + info.length -= @truncate(u16, written); + return; + } + + self.state = .DecodeBlockData; + }, + } + } + } + + fn init(source: ReaderType, window_slice: []u8) Self { + assert(math.isPowerOfTwo(window_slice.len)); + + return Self{ + .bit_reader = io.bitReader(.Little, source), + .window = .{ .buf = window_slice }, + .seen_eos = false, + .state = .DecodeBlockHeader, + .hdist = undefined, + .hlen = undefined, + }; + } + + // Implements the io.Reader interface + pub fn read(self: *Self, buffer: []u8) Error!usize { + if (buffer.len == 0) + return 0; + + // Try reading as much as possible from the window + var read_amt: usize = self.window.read(buffer); + while (read_amt < buffer.len) { + // Run the state machine, we can detect the "effective" end of + // stream condition by checking if any progress was made. + // Why "effective"? Because even though `seen_eos` is true we + // may still have to finish processing other decoding steps. + try self.step(); + // No progress was made + if (self.window.readable() == 0) + break; + + read_amt += self.window.read(buffer[read_amt..]); + } + + return read_amt; + } + + pub fn reader(self: *Self) Reader { + return .{ .context = self }; + } + }; +} + +pub fn inflateStream(reader: anytype, window_slice: []u8) InflateStream(@TypeOf(reader)) { + return InflateStream(@TypeOf(reader)).init(reader, window_slice); +} diff --git a/lib/std/compress/rfc1951.txt b/lib/std/compress/rfc1951.txt new file mode 100644 index 0000000000..403c8c722f --- /dev/null +++ b/lib/std/compress/rfc1951.txt @@ -0,0 +1,955 @@ + + + + + + +Network Working Group P. Deutsch +Request for Comments: 1951 Aladdin Enterprises +Category: Informational May 1996 + + + DEFLATE Compressed Data Format Specification version 1.3 + +Status of This Memo + + This memo provides information for the Internet community. This memo + does not specify an Internet standard of any kind. Distribution of + this memo is unlimited. + +IESG Note: + + The IESG takes no position on the validity of any Intellectual + Property Rights statements contained in this document. + +Notices + + Copyright (c) 1996 L. Peter Deutsch + + Permission is granted to copy and distribute this document for any + purpose and without charge, including translations into other + languages and incorporation into compilations, provided that the + copyright notice and this notice are preserved, and that any + substantive changes or deletions from the original are clearly + marked. + + A pointer to the latest version of this and related documentation in + HTML format can be found at the URL + . + +Abstract + + This specification defines a lossless compressed data format that + compresses data using a combination of the LZ77 algorithm and Huffman + coding, with efficiency comparable to the best currently available + general-purpose compression methods. The data can be produced or + consumed, even for an arbitrarily long sequentially presented input + data stream, using only an a priori bounded amount of intermediate + storage. The format can be implemented readily in a manner not + covered by patents. + + + + + + + + +Deutsch Informational [Page 1] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + +Table of Contents + + 1. Introduction ................................................... 2 + 1.1. Purpose ................................................... 2 + 1.2. Intended audience ......................................... 3 + 1.3. Scope ..................................................... 3 + 1.4. Compliance ................................................ 3 + 1.5. Definitions of terms and conventions used ................ 3 + 1.6. Changes from previous versions ............................ 4 + 2. Compressed representation overview ............................. 4 + 3. Detailed specification ......................................... 5 + 3.1. Overall conventions ....................................... 5 + 3.1.1. Packing into bytes .................................. 5 + 3.2. Compressed block format ................................... 6 + 3.2.1. Synopsis of prefix and Huffman coding ............... 6 + 3.2.2. Use of Huffman coding in the "deflate" format ....... 7 + 3.2.3. Details of block format ............................. 9 + 3.2.4. Non-compressed blocks (BTYPE=00) ................... 11 + 3.2.5. Compressed blocks (length and distance codes) ...... 11 + 3.2.6. Compression with fixed Huffman codes (BTYPE=01) .... 12 + 3.2.7. Compression with dynamic Huffman codes (BTYPE=10) .. 13 + 3.3. Compliance ............................................... 14 + 4. Compression algorithm details ................................. 14 + 5. References .................................................... 16 + 6. Security Considerations ....................................... 16 + 7. Source code ................................................... 16 + 8. Acknowledgements .............................................. 16 + 9. Author's Address .............................................. 17 + +1. Introduction + + 1.1. Purpose + + The purpose of this specification is to define a lossless + compressed data format that: + * Is independent of CPU type, operating system, file system, + and character set, and hence can be used for interchange; + * Can be produced or consumed, even for an arbitrarily long + sequentially presented input data stream, using only an a + priori bounded amount of intermediate storage, and hence + can be used in data communications or similar structures + such as Unix filters; + * Compresses data with efficiency comparable to the best + currently available general-purpose compression methods, + and in particular considerably better than the "compress" + program; + * Can be implemented readily in a manner not covered by + patents, and hence can be practiced freely; + + + +Deutsch Informational [Page 2] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + * Is compatible with the file format produced by the current + widely used gzip utility, in that conforming decompressors + will be able to read data produced by the existing gzip + compressor. + + The data format defined by this specification does not attempt to: + + * Allow random access to compressed data; + * Compress specialized data (e.g., raster graphics) as well + as the best currently available specialized algorithms. + + A simple counting argument shows that no lossless compression + algorithm can compress every possible input data set. For the + format defined here, the worst case expansion is 5 bytes per 32K- + byte block, i.e., a size increase of 0.015% for large data sets. + English text usually compresses by a factor of 2.5 to 3; + executable files usually compress somewhat less; graphical data + such as raster images may compress much more. + + 1.2. Intended audience + + This specification is intended for use by implementors of software + to compress data into "deflate" format and/or decompress data from + "deflate" format. + + The text of the specification assumes a basic background in + programming at the level of bits and other primitive data + representations. Familiarity with the technique of Huffman coding + is helpful but not required. + + 1.3. Scope + + The specification specifies a method for representing a sequence + of bytes as a (usually shorter) sequence of bits, and a method for + packing the latter bit sequence into bytes. + + 1.4. Compliance + + Unless otherwise indicated below, a compliant decompressor must be + able to accept and decompress any data set that conforms to all + the specifications presented here; a compliant compressor must + produce data sets that conform to all the specifications presented + here. + + 1.5. Definitions of terms and conventions used + + Byte: 8 bits stored or transmitted as a unit (same as an octet). + For this specification, a byte is exactly 8 bits, even on machines + + + +Deutsch Informational [Page 3] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + which store a character on a number of bits different from eight. + See below, for the numbering of bits within a byte. + + String: a sequence of arbitrary bytes. + + 1.6. Changes from previous versions + + There have been no technical changes to the deflate format since + version 1.1 of this specification. In version 1.2, some + terminology was changed. Version 1.3 is a conversion of the + specification to RFC style. + +2. Compressed representation overview + + A compressed data set consists of a series of blocks, corresponding + to successive blocks of input data. The block sizes are arbitrary, + except that non-compressible blocks are limited to 65,535 bytes. + + Each block is compressed using a combination of the LZ77 algorithm + and Huffman coding. The Huffman trees for each block are independent + of those for previous or subsequent blocks; the LZ77 algorithm may + use a reference to a duplicated string occurring in a previous block, + up to 32K input bytes before. + + Each block consists of two parts: a pair of Huffman code trees that + describe the representation of the compressed data part, and a + compressed data part. (The Huffman trees themselves are compressed + using Huffman encoding.) The compressed data consists of a series of + elements of two types: literal bytes (of strings that have not been + detected as duplicated within the previous 32K input bytes), and + pointers to duplicated strings, where a pointer is represented as a + pair . The representation used in the + "deflate" format limits distances to 32K bytes and lengths to 258 + bytes, but does not limit the size of a block, except for + uncompressible blocks, which are limited as noted above. + + Each type of value (literals, distances, and lengths) in the + compressed data is represented using a Huffman code, using one code + tree for literals and lengths and a separate code tree for distances. + The code trees for each block appear in a compact form just before + the compressed data for that block. + + + + + + + + + + +Deutsch Informational [Page 4] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + +3. Detailed specification + + 3.1. Overall conventions In the diagrams below, a box like this: + + +---+ + | | <-- the vertical bars might be missing + +---+ + + represents one byte; a box like this: + + +==============+ + | | + +==============+ + + represents a variable number of bytes. + + Bytes stored within a computer do not have a "bit order", since + they are always treated as a unit. However, a byte considered as + an integer between 0 and 255 does have a most- and least- + significant bit, and since we write numbers with the most- + significant digit on the left, we also write bytes with the most- + significant bit on the left. In the diagrams below, we number the + bits of a byte so that bit 0 is the least-significant bit, i.e., + the bits are numbered: + + +--------+ + |76543210| + +--------+ + + Within a computer, a number may occupy multiple bytes. All + multi-byte numbers in the format described here are stored with + the least-significant byte first (at the lower memory address). + For example, the decimal number 520 is stored as: + + 0 1 + +--------+--------+ + |00001000|00000010| + +--------+--------+ + ^ ^ + | | + | + more significant byte = 2 x 256 + + less significant byte = 8 + + 3.1.1. Packing into bytes + + This document does not address the issue of the order in which + bits of a byte are transmitted on a bit-sequential medium, + since the final data format described here is byte- rather than + + + +Deutsch Informational [Page 5] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + bit-oriented. However, we describe the compressed block format + in below, as a sequence of data elements of various bit + lengths, not a sequence of bytes. We must therefore specify + how to pack these data elements into bytes to form the final + compressed byte sequence: + + * Data elements are packed into bytes in order of + increasing bit number within the byte, i.e., starting + with the least-significant bit of the byte. + * Data elements other than Huffman codes are packed + starting with the least-significant bit of the data + element. + * Huffman codes are packed starting with the most- + significant bit of the code. + + In other words, if one were to print out the compressed data as + a sequence of bytes, starting with the first byte at the + *right* margin and proceeding to the *left*, with the most- + significant bit of each byte on the left as usual, one would be + able to parse the result from right to left, with fixed-width + elements in the correct MSB-to-LSB order and Huffman codes in + bit-reversed order (i.e., with the first bit of the code in the + relative LSB position). + + 3.2. Compressed block format + + 3.2.1. Synopsis of prefix and Huffman coding + + Prefix coding represents symbols from an a priori known + alphabet by bit sequences (codes), one code for each symbol, in + a manner such that different symbols may be represented by bit + sequences of different lengths, but a parser can always parse + an encoded string unambiguously symbol-by-symbol. + + We define a prefix code in terms of a binary tree in which the + two edges descending from each non-leaf node are labeled 0 and + 1 and in which the leaf nodes correspond one-for-one with (are + labeled with) the symbols of the alphabet; then the code for a + symbol is the sequence of 0's and 1's on the edges leading from + the root to the leaf labeled with that symbol. For example: + + + + + + + + + + + +Deutsch Informational [Page 6] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + /\ Symbol Code + 0 1 ------ ---- + / \ A 00 + /\ B B 1 + 0 1 C 011 + / \ D 010 + A /\ + 0 1 + / \ + D C + + A parser can decode the next symbol from an encoded input + stream by walking down the tree from the root, at each step + choosing the edge corresponding to the next input bit. + + Given an alphabet with known symbol frequencies, the Huffman + algorithm allows the construction of an optimal prefix code + (one which represents strings with those symbol frequencies + using the fewest bits of any possible prefix codes for that + alphabet). Such a code is called a Huffman code. (See + reference [1] in Chapter 5, references for additional + information on Huffman codes.) + + Note that in the "deflate" format, the Huffman codes for the + various alphabets must not exceed certain maximum code lengths. + This constraint complicates the algorithm for computing code + lengths from symbol frequencies. Again, see Chapter 5, + references for details. + + 3.2.2. Use of Huffman coding in the "deflate" format + + The Huffman codes used for each alphabet in the "deflate" + format have two additional rules: + + * All codes of a given bit length have lexicographically + consecutive values, in the same order as the symbols + they represent; + + * Shorter codes lexicographically precede longer codes. + + + + + + + + + + + + +Deutsch Informational [Page 7] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + We could recode the example above to follow this rule as + follows, assuming that the order of the alphabet is ABCD: + + Symbol Code + ------ ---- + A 10 + B 0 + C 110 + D 111 + + I.e., 0 precedes 10 which precedes 11x, and 110 and 111 are + lexicographically consecutive. + + Given this rule, we can define the Huffman code for an alphabet + just by giving the bit lengths of the codes for each symbol of + the alphabet in order; this is sufficient to determine the + actual codes. In our example, the code is completely defined + by the sequence of bit lengths (2, 1, 3, 3). The following + algorithm generates the codes as integers, intended to be read + from most- to least-significant bit. The code lengths are + initially in tree[I].Len; the codes are produced in + tree[I].Code. + + 1) Count the number of codes for each code length. Let + bl_count[N] be the number of codes of length N, N >= 1. + + 2) Find the numerical value of the smallest code for each + code length: + + code = 0; + bl_count[0] = 0; + for (bits = 1; bits <= MAX_BITS; bits++) { + code = (code + bl_count[bits-1]) << 1; + next_code[bits] = code; + } + + 3) Assign numerical values to all codes, using consecutive + values for all codes of the same length with the base + values determined at step 2. Codes that are never used + (which have a bit length of zero) must not be assigned a + value. + + for (n = 0; n <= max_code; n++) { + len = tree[n].Len; + if (len != 0) { + tree[n].Code = next_code[len]; + next_code[len]++; + } + + + +Deutsch Informational [Page 8] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + } + + Example: + + Consider the alphabet ABCDEFGH, with bit lengths (3, 3, 3, 3, + 3, 2, 4, 4). After step 1, we have: + + N bl_count[N] + - ----------- + 2 1 + 3 5 + 4 2 + + Step 2 computes the following next_code values: + + N next_code[N] + - ------------ + 1 0 + 2 0 + 3 2 + 4 14 + + Step 3 produces the following code values: + + Symbol Length Code + ------ ------ ---- + A 3 010 + B 3 011 + C 3 100 + D 3 101 + E 3 110 + F 2 00 + G 4 1110 + H 4 1111 + + 3.2.3. Details of block format + + Each block of compressed data begins with 3 header bits + containing the following data: + + first bit BFINAL + next 2 bits BTYPE + + Note that the header bits do not necessarily begin on a byte + boundary, since a block does not necessarily occupy an integral + number of bytes. + + + + + +Deutsch Informational [Page 9] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + BFINAL is set if and only if this is the last block of the data + set. + + BTYPE specifies how the data are compressed, as follows: + + 00 - no compression + 01 - compressed with fixed Huffman codes + 10 - compressed with dynamic Huffman codes + 11 - reserved (error) + + The only difference between the two compressed cases is how the + Huffman codes for the literal/length and distance alphabets are + defined. + + In all cases, the decoding algorithm for the actual data is as + follows: + + do + read block header from input stream. + if stored with no compression + skip any remaining bits in current partially + processed byte + read LEN and NLEN (see next section) + copy LEN bytes of data to output + otherwise + if compressed with dynamic Huffman codes + read representation of code trees (see + subsection below) + loop (until end of block code recognized) + decode literal/length value from input stream + if value < 256 + copy value (literal byte) to output stream + otherwise + if value = end of block (256) + break from loop + otherwise (value = 257..285) + decode distance from input stream + + move backwards distance bytes in the output + stream, and copy length bytes from this + position to the output stream. + end loop + while not last block + + Note that a duplicated string reference may refer to a string + in a previous block; i.e., the backward distance may cross one + or more block boundaries. However a distance cannot refer past + the beginning of the output stream. (An application using a + + + +Deutsch Informational [Page 10] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + preset dictionary might discard part of the output stream; a + distance can refer to that part of the output stream anyway) + Note also that the referenced string may overlap the current + position; for example, if the last 2 bytes decoded have values + X and Y, a string reference with + adds X,Y,X,Y,X to the output stream. + + We now specify each compression method in turn. + + 3.2.4. Non-compressed blocks (BTYPE=00) + + Any bits of input up to the next byte boundary are ignored. + The rest of the block consists of the following information: + + 0 1 2 3 4... + +---+---+---+---+================================+ + | LEN | NLEN |... LEN bytes of literal data...| + +---+---+---+---+================================+ + + LEN is the number of data bytes in the block. NLEN is the + one's complement of LEN. + + 3.2.5. Compressed blocks (length and distance codes) + + As noted above, encoded data blocks in the "deflate" format + consist of sequences of symbols drawn from three conceptually + distinct alphabets: either literal bytes, from the alphabet of + byte values (0..255), or pairs, + where the length is drawn from (3..258) and the distance is + drawn from (1..32,768). In fact, the literal and length + alphabets are merged into a single alphabet (0..285), where + values 0..255 represent literal bytes, the value 256 indicates + end-of-block, and values 257..285 represent length codes + (possibly in conjunction with extra bits following the symbol + code) as follows: + + + + + + + + + + + + + + + + +Deutsch Informational [Page 11] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + Extra Extra Extra + Code Bits Length(s) Code Bits Lengths Code Bits Length(s) + ---- ---- ------ ---- ---- ------- ---- ---- ------- + 257 0 3 267 1 15,16 277 4 67-82 + 258 0 4 268 1 17,18 278 4 83-98 + 259 0 5 269 2 19-22 279 4 99-114 + 260 0 6 270 2 23-26 280 4 115-130 + 261 0 7 271 2 27-30 281 5 131-162 + 262 0 8 272 2 31-34 282 5 163-194 + 263 0 9 273 3 35-42 283 5 195-226 + 264 0 10 274 3 43-50 284 5 227-257 + 265 1 11,12 275 3 51-58 285 0 258 + 266 1 13,14 276 3 59-66 + + The extra bits should be interpreted as a machine integer + stored with the most-significant bit first, e.g., bits 1110 + represent the value 14. + + Extra Extra Extra + Code Bits Dist Code Bits Dist Code Bits Distance + ---- ---- ---- ---- ---- ------ ---- ---- -------- + 0 0 1 10 4 33-48 20 9 1025-1536 + 1 0 2 11 4 49-64 21 9 1537-2048 + 2 0 3 12 5 65-96 22 10 2049-3072 + 3 0 4 13 5 97-128 23 10 3073-4096 + 4 1 5,6 14 6 129-192 24 11 4097-6144 + 5 1 7,8 15 6 193-256 25 11 6145-8192 + 6 2 9-12 16 7 257-384 26 12 8193-12288 + 7 2 13-16 17 7 385-512 27 12 12289-16384 + 8 3 17-24 18 8 513-768 28 13 16385-24576 + 9 3 25-32 19 8 769-1024 29 13 24577-32768 + + 3.2.6. Compression with fixed Huffman codes (BTYPE=01) + + The Huffman codes for the two alphabets are fixed, and are not + represented explicitly in the data. The Huffman code lengths + for the literal/length alphabet are: + + Lit Value Bits Codes + --------- ---- ----- + 0 - 143 8 00110000 through + 10111111 + 144 - 255 9 110010000 through + 111111111 + 256 - 279 7 0000000 through + 0010111 + 280 - 287 8 11000000 through + 11000111 + + + +Deutsch Informational [Page 12] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + The code lengths are sufficient to generate the actual codes, + as described above; we show the codes in the table for added + clarity. Literal/length values 286-287 will never actually + occur in the compressed data, but participate in the code + construction. + + Distance codes 0-31 are represented by (fixed-length) 5-bit + codes, with possible additional bits as shown in the table + shown in Paragraph 3.2.5, above. Note that distance codes 30- + 31 will never actually occur in the compressed data. + + 3.2.7. Compression with dynamic Huffman codes (BTYPE=10) + + The Huffman codes for the two alphabets appear in the block + immediately after the header bits and before the actual + compressed data, first the literal/length code and then the + distance code. Each code is defined by a sequence of code + lengths, as discussed in Paragraph 3.2.2, above. For even + greater compactness, the code length sequences themselves are + compressed using a Huffman code. The alphabet for code lengths + is as follows: + + 0 - 15: Represent code lengths of 0 - 15 + 16: Copy the previous code length 3 - 6 times. + The next 2 bits indicate repeat length + (0 = 3, ... , 3 = 6) + Example: Codes 8, 16 (+2 bits 11), + 16 (+2 bits 10) will expand to + 12 code lengths of 8 (1 + 6 + 5) + 17: Repeat a code length of 0 for 3 - 10 times. + (3 bits of length) + 18: Repeat a code length of 0 for 11 - 138 times + (7 bits of length) + + A code length of 0 indicates that the corresponding symbol in + the literal/length or distance alphabet will not occur in the + block, and should not participate in the Huffman code + construction algorithm given earlier. If only one distance + code is used, it is encoded using one bit, not zero bits; in + this case there is a single code length of one, with one unused + code. One distance code of zero bits means that there are no + distance codes used at all (the data is all literals). + + We can now define the format of the block: + + 5 Bits: HLIT, # of Literal/Length codes - 257 (257 - 286) + 5 Bits: HDIST, # of Distance codes - 1 (1 - 32) + 4 Bits: HCLEN, # of Code Length codes - 4 (4 - 19) + + + +Deutsch Informational [Page 13] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + (HCLEN + 4) x 3 bits: code lengths for the code length + alphabet given just above, in the order: 16, 17, 18, + 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 + + These code lengths are interpreted as 3-bit integers + (0-7); as above, a code length of 0 means the + corresponding symbol (literal/length or distance code + length) is not used. + + HLIT + 257 code lengths for the literal/length alphabet, + encoded using the code length Huffman code + + HDIST + 1 code lengths for the distance alphabet, + encoded using the code length Huffman code + + The actual compressed data of the block, + encoded using the literal/length and distance Huffman + codes + + The literal/length symbol 256 (end of data), + encoded using the literal/length Huffman code + + The code length repeat codes can cross from HLIT + 257 to the + HDIST + 1 code lengths. In other words, all code lengths form + a single sequence of HLIT + HDIST + 258 values. + + 3.3. Compliance + + A compressor may limit further the ranges of values specified in + the previous section and still be compliant; for example, it may + limit the range of backward pointers to some value smaller than + 32K. Similarly, a compressor may limit the size of blocks so that + a compressible block fits in memory. + + A compliant decompressor must accept the full range of possible + values defined in the previous section, and must accept blocks of + arbitrary size. + +4. Compression algorithm details + + While it is the intent of this document to define the "deflate" + compressed data format without reference to any particular + compression algorithm, the format is related to the compressed + formats produced by LZ77 (Lempel-Ziv 1977, see reference [2] below); + since many variations of LZ77 are patented, it is strongly + recommended that the implementor of a compressor follow the general + algorithm presented here, which is known not to be patented per se. + The material in this section is not part of the definition of the + + + +Deutsch Informational [Page 14] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + specification per se, and a compressor need not follow it in order to + be compliant. + + The compressor terminates a block when it determines that starting a + new block with fresh trees would be useful, or when the block size + fills up the compressor's block buffer. + + The compressor uses a chained hash table to find duplicated strings, + using a hash function that operates on 3-byte sequences. At any + given point during compression, let XYZ be the next 3 input bytes to + be examined (not necessarily all different, of course). First, the + compressor examines the hash chain for XYZ. If the chain is empty, + the compressor simply writes out X as a literal byte and advances one + byte in the input. If the hash chain is not empty, indicating that + the sequence XYZ (or, if we are unlucky, some other 3 bytes with the + same hash function value) has occurred recently, the compressor + compares all strings on the XYZ hash chain with the actual input data + sequence starting at the current point, and selects the longest + match. + + The compressor searches the hash chains starting with the most recent + strings, to favor small distances and thus take advantage of the + Huffman encoding. The hash chains are singly linked. There are no + deletions from the hash chains; the algorithm simply discards matches + that are too old. To avoid a worst-case situation, very long hash + chains are arbitrarily truncated at a certain length, determined by a + run-time parameter. + + To improve overall compression, the compressor optionally defers the + selection of matches ("lazy matching"): after a match of length N has + been found, the compressor searches for a longer match starting at + the next input byte. If it finds a longer match, it truncates the + previous match to a length of one (thus producing a single literal + byte) and then emits the longer match. Otherwise, it emits the + original match, and, as described above, advances N bytes before + continuing. + + Run-time parameters also control this "lazy match" procedure. If + compression ratio is most important, the compressor attempts a + complete second search regardless of the length of the first match. + In the normal case, if the current match is "long enough", the + compressor reduces the search for a longer match, thus speeding up + the process. If speed is most important, the compressor inserts new + strings in the hash table only when no match was found, or when the + match is not "too long". This degrades the compression ratio but + saves time since there are both fewer insertions and fewer searches. + + + + + +Deutsch Informational [Page 15] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + +5. References + + [1] Huffman, D. A., "A Method for the Construction of Minimum + Redundancy Codes", Proceedings of the Institute of Radio + Engineers, September 1952, Volume 40, Number 9, pp. 1098-1101. + + [2] Ziv J., Lempel A., "A Universal Algorithm for Sequential Data + Compression", IEEE Transactions on Information Theory, Vol. 23, + No. 3, pp. 337-343. + + [3] Gailly, J.-L., and Adler, M., ZLIB documentation and sources, + available in ftp://ftp.uu.net/pub/archiving/zip/doc/ + + [4] Gailly, J.-L., and Adler, M., GZIP documentation and sources, + available as gzip-*.tar in ftp://prep.ai.mit.edu/pub/gnu/ + + [5] Schwartz, E. S., and Kallick, B. "Generating a canonical prefix + encoding." Comm. ACM, 7,3 (Mar. 1964), pp. 166-169. + + [6] Hirschberg and Lelewer, "Efficient decoding of prefix codes," + Comm. ACM, 33,4, April 1990, pp. 449-459. + +6. Security Considerations + + Any data compression method involves the reduction of redundancy in + the data. Consequently, any corruption of the data is likely to have + severe effects and be difficult to correct. Uncompressed text, on + the other hand, will probably still be readable despite the presence + of some corrupted bytes. + + It is recommended that systems using this data format provide some + means of validating the integrity of the compressed data. See + reference [3], for example. + +7. Source code + + Source code for a C language implementation of a "deflate" compliant + compressor and decompressor is available within the zlib package at + ftp://ftp.uu.net/pub/archiving/zip/zlib/. + +8. Acknowledgements + + Trademarks cited in this document are the property of their + respective owners. + + Phil Katz designed the deflate format. Jean-Loup Gailly and Mark + Adler wrote the related software described in this specification. + Glenn Randers-Pehrson converted this document to RFC and HTML format. + + + +Deutsch Informational [Page 16] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + +9. Author's Address + + L. Peter Deutsch + Aladdin Enterprises + 203 Santa Margarita Ave. + Menlo Park, CA 94025 + + Phone: (415) 322-0103 (AM only) + FAX: (415) 322-1734 + EMail: + + Questions about the technical content of this specification can be + sent by email to: + + Jean-Loup Gailly and + Mark Adler + + Editorial comments on this specification can be sent by email to: + + L. Peter Deutsch and + Glenn Randers-Pehrson + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Deutsch Informational [Page 17] + diff --git a/lib/std/compress/rfc1951.txt.fixed.z.9 b/lib/std/compress/rfc1951.txt.fixed.z.9 new file mode 100644 index 0000000000000000000000000000000000000000..8ea5904770052de696de797b4ef5f3b1a5535e5b GIT binary patch literal 12836 zcmYLvS5y<|6K{GTjTRt;8fxgh3YY+5sD|DILqrS+NEcakQ|Qf5rHKIn0|G|c0=k+= zF$icVBDUS=0-~ae*xUZE_rBccZyvrgbI#1KsoDL}ky4nhQ{8lKKpIzf zD4ItylaYj^`2i}z;e9g@Q%q@Z_bC$vBcke9(ESMUP23rl=*-u^k+uF$5z`*u^ z$o`7{2%kqBavEuzbH1u??{<^Fa(J_l@y8(TW=wnH+4;udB>H7<} zt*4aN2iP4e&!4Ygq35P*yr7PPcL))*XSOM+X_nT4y%StsMjmc&vsR5r_o4eFUG+HA z>_PEPyvjbFoaVk3X=SM@4j~fnSkC$Tn1_qK##A(94Ago^+JK++=b}5fC{*>*_4yiK zH~l|#IKjKD==ZyR*fLQSKa?`>xn*MEGwQ1zEyV!|apQ#B=PXW6%MbtDl!BJ1= zT}#S?cWeE~nWSRCQ*INgB95ze_bar1pUxWuezsXE4&GS|k)c5I)!W*Hxm_w7i7K4K zU`Awqkgmks7-8_0)Pf)p>N~g~WLan^Qe+Ltb{E&NmvnhXA);4vwthCdeyn%Z)sb~= zz2Iy{W?&M6`0N}Y!169|zc$f=)vylcT$D*51=8d4ljts1*Gb-hk##WIj6G!+X<(I$ zv}w=Jp&5g=Bq-W*5FTWsr}lQkHJ!w)1np6qvGffC7~Eay7+k?>^6jX5SCpo^e0VF= zny3au<|!?)J;}1WbF}oN2i>A`!s4hwGLScyjN{bpmHiVK9BGkDU!&o8%#Ym74w$5cmpF ziE%X0T~a&EtHC$cC}uHrrTXx@;Y}x@T~Z$KF#z@N%G5zk#@PCc4W?hW7w?-t&55Yb zqO0+ZtyRTDU?JAY8EAeFVyKeNHU}nFe+cDY4qB!prjapY^AxDTl_@ND*@oN=p2XrX zJTu@sIV(3%y*}`~tetWr>BATe5baO90xIDwAyyR|&z~VOv%GsRQF=`!rmgbn zr@)?*YLQsB1}accl+O#sYDCat^E<&%J;sqlKHSb6(CkaJ70Yb*g;HEWNHT5EKtzEa z9wX1>BSga8O0&;ATtWvN^Qm>-tEQEm#5LwOK!93uQj~ekON8gwH zuKc!+Ew7x{Wj@A)8kP>upQ4omqJvfb$?0Otigond)~7=25DS?S8yJHcm6+^&=5G&_ z4QynTX`h0T;8u3eezCGC%wNFORsEPF6$WB_OSsAM^H$VBe}C8?1W^t>*g)VNq%*sZ z53H}Nz0is_bt=iIRx2y7AXy*==I1PlcOgaq-gr`zvUu8YcspDtL0%HuJ}phD?6y`~ zN(csp@8!7!%Px!{HW@IHp8U4Xb2mP%G*RB_;+KsVFAXmihF@hDg-{Zhm-|^PLkA^g z6nYV5GmXmR5uAHYz7#$}tqMUeym{wB%HzA~_=F-WnK~WO2*GMBBnY%e`5DQxBnVOD z&44u>wkCGjSXw*Us$B~zqTMvX;U|}?tqrD2*aW95O*|KH(9cUGJ#Hk2NzkZkhV|WM zrWqSvE(o=P7*S|4@JpZ;Io_zVAm13~U|p{0GdCpve=A!LMMEk>pu4dN?U$sDt>@1j z!kBTF+D`)pQG*bCC?aJY+oNjg)ZNJholoG*kB>=SMKPY1^DW~wJ*XVWs)Rj~9W)oh z0|;a|!zR`Edg{--=7w-ePn4fXX{DCpV-C1~l^b4@MdEw-903Ym0AK9`nuAAfo0PNM zh?=xq;HV}~!(x3DF}4KCjbx%}w14oHhQ?rDr!DFJ<$vUiVKvo%x81eRNvrC3h73oD}RO5YLg73;bT*rDJI@ z>C4jR&HD5hNdMEY+9cE~&XWv=x%c3o{>;2;h}Lc1prmGo9wxlWGLszw)TP*xWi9%N z_V$;Sn)CZl=$ZWS*W#ds6Wz~-Tn(+tqVn1AiFP+9vlIotgK9W4HojwZ;D6w_GO1m2 zTzwRe8eP)_HWTIOq{6A^h}zq+)HolX!s)0pLDtRI!f?EmRs65Dx`(?`^0s6JK`TVt zpc2%RbqiYhix0}{o}+O=a>*)*M(Pfs^)miR<4_^U@^6H_L?bJ6cTTyCnFLr14fGF$ z)&C=TrqH>o_}eFTWJ6-Y#iZa2?=vT@^?P@Bt&3GjA5kl^JBA)MW2K;vDuM*_ zUP$GjGABealiU;y+U_1B7a`hBCyR##p!Plsdt6?y+V2*bG)IuzSHY4zN_lRgk_jZ@??#fJ$|SvrP2_(~6b9-xGMP?u3=T|p(@ zy%%5(FEl4o`{S!o^PhAsrMIgrN(#75NUgHD052Ou)lPlKo5V1==$D6IKh_O)8~x_4 z)UAQl5dJ{ys$VBTVD94Ir@tl{Y?SU3h&_BCae3eqr>ye9!fl?MReT@cw8sN> z4OtW~r&;EalS8-We}Yx!8yKj7TW>EdYnORsq9ZLtV{I7rhv%~mx=(VZcbuUhiTTivjfbF~SFE(M zhl6S4aD)~V+8i6%90gIY^{UX*`L1>U_Af|tkL#V`fJu6`OYqxE_+OunDmFFXfWO=biY66l z+glAJZ&5H=Nlm~-1O^m##jFjhdO41Lm9qzgQkg9^JPAkjxX3_fUpOU7#q~Ah&F`Td zn+9yp49YZ2;-^-6MCSzXA4p`XXPFW8Pf*Jqmc#vasAV4?mK!CVhS9Yo^1oRI$zcmA zcPg@l_tnf|Sepa8yIH%-tZ99#Lb4ew;O|0$A&&f#s8CZ^Z5T)M<~5uqsdAP`89qla zL`W$xyjCf3iKN5s$chgrBHTobw2d= z${N<$ehl;;x6ODPxT1X0kHjE4K}7#0m6}z9~Yl^JN0Lb%)MCH`Puya1}c( z4K1KF@NOJUQXIaC`q*SYDr-RVgC!vN3)M0el&wgGvH4JnF;wLmccOg^Qgg9}{3d2= z>#*)E7s_F|6e@5`A#ojDiKlFw`*pjHYZIW_nKne@d+-yMPewzb@MKUQE zbAu75aiXoU^0x5@tc+~-hdQ{8R`kuxcr_oO4>+V6mS_e*&zT1<7<0Of3%n^3)-7;1 z5SPytuM~9d&-P32JeB@@o`pF~_5|C7f>CJNS@cRoaZx9XZy-~JVwl4%l1XShH3KY* zqp`sKNnE;ov<&+eR4bLS*eQ$A*y*!RV5`{37q%goUxzdf-Z8jkl*#%E(1J=_FexY^ zK2--i;UZO`J2S|@l*k8|=A(;rIfrhLdAU{duyj5O<=BO5kn-M`(LQ*jK>Tn`(dp!C z`lJ&a{O>z$`fD1LA;5|=aKFs(1CY59um8%DtJy{YW*FmXVWB93ad19D4|(HpPKROw zIqDZ=t?va^2J_U?4{Yh|IjA)IeM?rxoF9J#lNm@x{Z96d5a`#cVrkPX z5L#(a->EyO9tnR^nJ?IVVWfnLgmmJjNdg&vmPcnIAHAKlsgdR(BoZKRGHaI(BB%8`F-oHNo_>P|3ZfAgL$NjwpJ25OZ%NGSK=!X1Zma&kr# zp5U!U7w;FEHTgi%v#dMCUbHg{&?~!uhB9fFsd(~|yr*PO__WjubVqYFFP2sgl4x)s z$va!3;XMkCgfbhEn{20pc1L&NDF0bNQl2NKF58z3J>cu^WXeH`yr2fuv&5T3gAs}k zyvrGi;`>svbS}AG9mLh;Dh1*H5!Q(HZfe*a1RXyz33C$Np`F*-X>M7`roJ#8>C{*e z+5(Ph>4k()M4%j>v@;o{7Ix(ZpeBKLSYp1GQxnrW!up_Hp;TsJ@vyock}msq*FHMUu-huW|7LiExFp8W^B zW~X&%fWtUC(l&*a0)P~eP>IY&d#_iaB^+BW#Oxd(?4Y=7tJ5o|>q2ebdbQ^(4m=J#LTgJtBNs=dTaurD|(ZQZ8bq$mPtP zP%%%H$P@*ru6aZi7^{8V;3XO|vN4@~PRa|pof||FEWs6_^5;R^vYzy%9Mu|;gUWw< zpAB(-lD#t#z`HMn**?*wNl1#uorhQ`p56{`9$?bZUI+{R8;}4>S*+cFo!BXoWgra8_Bc)5U*tkf{P)x%@xb4r4>(u4K7N8L=mCGrWP=b+A! zVAGzUf4JIqL3%X2@8nB# z%uGPJCCaBb=d=BGb566*^Txrc5Ljyl71^sxeHe<;nuI1vm z(J9L8lp^!I2P>Rh#dOO(|2)?{ZLbz=gJkkq7Al^}*{Ttnh8r(Px2>zs-Dj&T>p)5L zK(e>4pdQ&=?$MS+43{v?ZZEM;SSkETDtYEYB* zq8d+O)z^lEkzL9f{(->b`wz&TmYl<~hJnMobv(e`r-^47nh1Gl>_$My(jFmn~^f){? zIOCWdpbMG+NUEHSbh3ZNDZy2o52AXa$9DEC%Lc5;4jpIxT~C#ws_m!R<*w4~W+8-8Nwthe_j&LIqSCxO3gEHH!Ss$_p~ z9DLe4$k*Rtulhv(dvfxrQ5^M%{3N&HhLK z5~-hDGWTA}&!D-XSs0+@-4?2w>GK*4N1V=|K3^rp2s5`dqyxlb08 zQ($lW3hI2tKQ*T?H%|?J8f-HR-e!GzO!nQcVLBD`965cHGEkV)MElF4=tBYNw1GtG zB)K-*vyX>9A#A>;Yx{?tsv=%;CsMBeH+1QzDgQ03dxCpAE_z5< z_X7xxwdWNE^Hfao@;f5oo4z~WAdHxC+;|u>4VIUu+_tyh`XC+^^#+trh>Tm+^m?dpkh4uzmvC7ozyX)QW6(8o@%eVEnZcbJm z8zW>MP<;|>!`ye=T7qFjRmofTF;Q%AK*3TGf*%l4vBIvSTOTve+hP~j>DXJ3iK^l@ zMXIfyU~Ig6LT|5PT|4a~OgBPV@AJp9#XassJq!LKRI~PK(U`oQR}3O{CKeR+RxWC& zx-1%H#QwwGGYgWi&BLYwaB)Z$BAjO|lxGh}yp-0gS+r|j#UI0t)>1&Q}sjeIz`N2)wY@zD$|@-(CfpPn?Ebj+u`uh?<+ zT@MAuH9!N=OD~G+V*B+%hYSCa|7{x-Rw%VB(g7>T)CRZbr;aN z?@1c|BO{dm>|wA=zZ9p!cDP}RwXS|NZF!Tjf$SZpx=GpK@wbH!>LrvyIr(2~_OK(U z?XjrkK6&AU*n0uH1yU8;;TRue2d~RdqtFyJ#Q7wz-W^t8kFWWi2VQpfNtZu~+z$kh zA49+yrCO?=#umW0_gIaQ7ZE8^7UxMcLGDB==saKrd^hSOx3wfJDiNQ@xxm(yoI2<& z8e(YamxC7edF`QQ_1Ooe@?Q;)T)o`a6<2Za=b-tq(pQ^r zfttNu#xE-g)2=dv4S7u{4NU`i@t*%}jlRB&tRtdDBSd0XyRyz1+!4*>W8m?{_VUF| zt6L8KG*r8P5F~(y=cdTPUZ||4q*-TcsYJ&Rf&q4y<4-v1o|n&d>YYPh9Vx32$l@mt zR!#3IGTi#ly6ArB)_IKz3SBTz_hu9+UXF&>ESS;{=M5+=RZ1 z0G-F&gOC`T+H;8L0OUi4Y_5BD(ZnFcSef;i!dAKGakRS~xh_AnF776&1`AikSi;#j zn#I5EacU_(mu+y0+tN`NKJ)_Y;Q;7SNOKeH_>AYL!`O2=itx?rdW<3E!ExT0$D=?{ zn`hCRBF|VFqU~wXC-kCXDGMkVzB6}wPz}`(!P=$BDnA&=TiIOzlpFT#kp_*hw%O;k zJ9*X7p>>BkOC8)n7^k)2fJ~E9K;^vjA&ny!wHTMwoYLC~94ndEeC3pYi|(Tr!Sdip zqNWT(RW_Bb@}v(Og*g-hRs(uyc7h^CxmCZuY}hd18lBy>elg;$^mH9J6ufXj+NO{| zQL$0%yIuJ3j7M}-f#tV$W6M250N44bx@M&LX<;G2>YC>X6OT_H=;~_h;{!h4eg47@ z*fv@_VsE&=SA?Ekk`UZ^4P#a4?Y&*B6*MKMH z!RFcbSoS)+V>>;P4we=wk#t!FJyroWzX>=O!P|%6LjMu+d|-vPd<)^LW5?E?f!CQ{ zQx;D4QaeDXY`sx`g5S>;MbZN}33j$t|DAkJAV;-!u!@vf^?vhk>-rzJQslX1}_)#xgH%Exu z5A$lW7lR$rX~oAq;HrxUo&!|1y7rwUW`%jRiV9q9&qv5!cwbP*;HF0JB+` zp=}#oy_4P_vf3|w2HbDg2L>Ow=MgAJ{9czWy{w=!P|#R#Y)R?Jb1S794QhCzC>+y; zHE4Jt>NS%*hw@;Of!^Si(L}@-F?U_j%k!x(!CvV)N%6>w77YCz_!JL$aunKH&^~z? z4->xnYXv*(f&TYD+Z~wwdTpA{_eVOv+`(s!rO)Nihq>b_%@S~MH6AGrvl)bWy15lMCCrdQF(CC;-Ctq5-pegqy6iGtCy+M>WLb)Y|xdlY}8cd(rCJDs$i};J_*% zgdCov)%hpNd4b>2NmK}SB_QOc5^PoP46_WAf5)Mzmp<8deoVHE5S%T=dTJgAk5JE( z$WXOU%f!_wdDEEz%wYx1BV$v`zcWGty_@~mOxMC{l*C0>7HioITh_)1Jw#E@_u|m zL&HQ7H)NOVZ*9wLp~IAH9huGaguY{tk7o;kQ&>;WkXX;d;Or38v+l;p0M`8z-kTFM zFlq>jFko-zuwrlLDW_tqH)gDo9#TOY>24pmD!YQ!;~OW=OcXWX+--_l=(3K}IQ*fR z?g>Ll`cOmtgmH5cM~BDEFNVNK7qZRWVzJjyTD)^<3luCNNwJK>l#J~(1UE-(s3ttYh8(7`ZLO}79HDK+I@7i73fE6r+1?+4 zyhu!+1?1{W0DiPhJ4cWI_l&W!`_-qUfn$sWpoIpl8-NDnFAEhX_Zw0VHN9#iC$eZ= z73~TFvq8wwRHd;9G}F69t+|oc0$T7yY`RFfc6Noo`fPT=e?d47OGHQjA+bo_!>#T{ z@+6F5z4kp(FoGg8Tz!xVqZO zW#ej+Xs}?`yFosL^o?}`(x9#)HcwtUTRnXZBJU82id!gN>Hw=F+)6u$%DK;-qJx0L z4kI8tF*gbny9qk4E14WdIovECY?Xg4aq4^cOL#=QW;VzPoRDBuDz6E#_CaxYN9eKx zxnNv)4kK1P=oB9J8nk@V+IIuiwrmASwYLX+#O?Ym+wN3m#RELSo>szc*~YU>2R8ne zSecM8vB(6O#sgX`sBtTW4o>oDpuiF>9#vS6KKLn$>2Y6tOT#Uc5bryxN^Utrw(VX? zKD4~O?w{f}BWZXj3LhZcdqz_EpU%e{W!0Y^v40b<^1|e&1xYSzan_X0A`L5R~}^krXpu$d&g6sN-9t13_f16mQKX!0Iq%8Y%f1c}wxQd`HD4Zq_G9 z=Z(ET3b0>@@=JMn79YOEOTM-z8AuA<#6zEdS+|)Lh+xH;iE3Ppw~n8zU&rJOGHBbA)FDwN? z_D~Gvwk$ghu26s1nQ5aQNVg1nf(lbyXIolGQ` zmz3JxlF<{IW9?AN!>niB{mj$5_aqt`hkB%?X6sVeZ55(1^OojX-)-$3{4czm!|*A+ zZ-ew`U_A)J;bTaBlSwVkMXBOz5-a~AxmSe5XzGaq(P-*sdtgC(J&v-UO!yZCRv!Oq zK-@4};+xS|s5EZW$gt2F4+if`o-f}_wB^56eWpQu@!CRed{LB zE0<;=$Imk>^Yk!xK-xawObv#txznlsMG)i{RT#9@%=`~_%1Xt2iZw%i?Zp*!o#gxF zwI8Y4TN6Wok>{@wO+8Pp(mvh7O_aryt(FFPDtSbR`I24B2H>+IxELAn2ZYJFn=T;u^ZA($0(`?fmV+kx`SGCE9-VW?R^Z z)3X@AF+o`5#AfrVG|08&kf5$#@T9IRhoIQdK?NUpc&_5Kkwaf8YCKG?m0%73LUM-T zYIzzgdr(I9gB*dyW0J)!Ny|RshH;T8=^=Ux<+)uYByRMioR-P_8<|5e&m8w`?ff2%50Pn{(6)>sD`UgD zw@)Yhqt$^{R%#r%>dV2ptRp)2@tLo+k}c;kl2&x59&(R(3imkapDLnJF4)gW+|-iP zwNFy}doWqmq4Sc;q}=TS?W~IHN0D7YsF)OZT1Xw~-)^S1Le&jSCnWclIkEVb+z&_$ zBJMX>i;}ndJ8U-BCY!4@xPZu?P#M0$SVunsVa6XCTvE)XxfB^y%c&IJ?eTmtA;c(! z3SGmoYlk>}cMCNB*IkU~j$w`zw}~(VH~m;Z&}Xf_19uBYzZ20JK_-b}kLF`y*yO#K zqH&Eg6bCVfv|n*TfY`1~->I>od`F(@#eZq=`0{RbM{iC3oOXOl{Y=JJg>8D+%KLJ1 z%lBuW@9Vtw&gj5o4#7{2nXJzQZSCS#hRzV~lm%J11GV{m&uW`vxXl;U>!V(dgn~wir(jVqsO4sLXEFnxO1_A>@$;{O6}>M9dLXW z`DeDIG*@u#M0^uAT20py>BrLUmD`ivnvAwqdgVApJxeamQ_tNG4wiYr3iJYe#o||* zN;Gdu-kN3-x^5Rb`KYEr687tiwZI-H24V2cvu5NJ)q+nl(&AGrBjEesx%gKiTNToK z8y@mpd~pgZT2EL5ysp))k;{~O&d*L?f;DtbOLO9hcY`g9%zMqC{ZDSc+FhaxT54@| zK3g8uXKxpu->io{5|?eDlBO0Pmda};3Bm|8OOG_7A5>c#D4Xs0Ppc?yi&0U9%7CxqdRde)QpCFhlfEb~r%AenM1VV@Q$DzwY|Qo0YI*;OoKw z0+I2&dTyKfwmxpFI#}l%;Uv;MNiH>2Hw_862Hn*5$~7U!B_Mo4C8F|BiTS$ZsXH&k zM5nxnwU3esTXhX;ZH}yKWhs1}cETtXe8Xj)u>Qc!p>Fz^ft%`)KEDgz-z8ie6(ZEL z&${T(_$e-jRKGngbgI;P?w)Xh`NEs@#+CY;fn2t;Dvc(B?O`*@PY?7ax_J)q_fHBcXKZr7GPP5hE;9J?d&%`miT(@B-zSqPJwUL@zT z^|YR)ug8G=%9zHona#F&2hsUv=#=}~OX0^PT>G*A%odiRXaTF&fR=B@Qa$#S7dx)@P>8PGvHAe|K%;vy2?4sa6dYC~05|wKZV7evqyRH}N zD;2;!M>wMRg+8@(9gCWK_7ds(5wgb_N>>cdFWQJ*pKK$&$YdjdJGbhu*U{~MN)6#= zq0idbe<%`inGbY3P$yYM=Y8V?^tNIt+%u42E?aqjb4gNh=#I~SJO0od03#bUS_Y~dN=A5@a1DI7*MCLU!Tc9+rHhxKge=3rHRc`|r= ziZvhdP!-yjt`lGS*LBFDDuhY6s7bxCcbC;;a|NP@Z~x<=vR+yzK=T3PrP6?J8^%e>_FcMI?#%xw7sms$Yd=>T%>G_ zIn1o;+2fk_w<7;c7w)diS|nRTGdfxb{dbi0$62zXMr2zF6_}Vsv#@}Tq;P$`BJGP- zZUD}HW2@Vp{7T(h&$+}9M!|Znf-U>v!_F)6iJ+5YAGutJ9b+yT@xx!~!hSMCBhJw97OOlRAzIp8z z?K>a}Ii9xLE0f>JxaHlr$+3X`F z2221W0wa!)OO2a$-laWorL`S^UNBceZC7`*AKVB3nbLVtzTpCR+tH*Ew?+IyUVR+% ziipK-8RPJg(OY;to^&r9m?I)RwYB7ZC;VyO7~%2WhWz3>2>eLl$c3X;&pXppJAYoM zxW)1uU1MWol|4xC(?*7#*D_9u7va5O@5y`klO%W;o=_FLGxq>oS;sz@oRTtdEQOBB zxs=hz-XmB>CI8HO`SRNqIa;Sm2m-9F??N*#o^m}eeY)W_MU(jwcVx;JHU+KYc_01% z3EVk9^F>Qyz!JP`{|;gQx3DJYHpL(Uob;Hy*nxBO?o z3N{z+QPa;x1Emqi1`-*ceQKQ`R{!<@pGJ#fWmdk&Yv*|4wZbT)> z>UVMW5AB+WJfC+##SzA$#9GoH9Ok2gU!}33+!@SPV4URqloV!@a2hl=1i|MSvz~;= zy0fC>vcKyr$zZn9d?aeOtX`1v(Q49f8m|2^eS2zz50I0^Ur8}JqL;F`F=&(Ih%0vy zDX!4|(l-Ajv{8$mYg!Kj|y%lck(RjM_@L#0{ zVZ3#q^9Pyt&2BWiJm6vK1EZV6fap6cPjDoLsrsDh=S(}W=O!c<=A7UvpZn7!c#o3% zT4AQOwpjiI4QwtK$pI=w1zk-eC3^>DoYDHw>$W4+{l+xn!{FL4G@rlbj1|{MS3;66 z97>U`SPiC?g7KP}1y5>$RRC?bLICxjDB!D<^t?^xDrW*p~Kz|HIZLetGS6~u4dn;YvK6$G$6@92K0pAuLvgaHA6R|`kr4U4*)5i{Q26R;xE zVLOe}A~bpDx}fE7fpT7rYWI}n3GTXd?Y=7RK{&$UrJ8W`lBe!eSiVq$26u1~VAr~e z?bd2&#sBKXK50Db5?<+IA~T+%5sYJ75`qAlqg?RK4!qu<51Fq#;KO(BVRDBZi3T3E z?Ez`JB`}vi>>MpapTzOYQ!#W2ZM>rBKo2?HiR>~NYb9*F!D$ZxDg^Z%N e9=%z~ZroFjuxU@5Lh@_JM8)|aKw;J0+y4i|0)9LI literal 0 HcmV?d00001 diff --git a/lib/std/compress/rfc1951.txt.z.0 b/lib/std/compress/rfc1951.txt.z.0 new file mode 100644 index 0000000000000000000000000000000000000000..3f50fb68f8da776fc6d6833b2cbe11cf822fd075 GIT binary patch literal 36960 zcmcJY>v9{%lIPESRqR9bYGPwai+~7#;6;`lLs68ib0{6HNzb`>G!r1uBr(E;gNr13 z&wkvm8~Y~v82fEsY4-Qetm>);L0R(92%EyCJ1Z;inUz)j@$Y{3=70a6=>vPcE*BrB zvv8v=Kj>qL>F@I9D4!W&R z06QG@27}?GIG!xZ+4XEVFXs=AdW-UMI=gvNyqo}R+*=H%liuhPQ0hOu>fJEx!7kH_ zKRiEvadLQeEWEB~<$PWais!vWuXthLi_`0}KfDvG2LKMXIg7V`oe7o~BVNlq5M;RNc!Qp0F4?JxBkh=9Ye4|@gUXnK7!)9uAZ zf76&%oHUC!CB&-a$;K*Yri&BGP5J^$i2Q&^qGC{ds&X{Cr z*MAr;uBJ=)an+k$mRlg&A1#r<%VGia`N%{-ykw9m_Aa59hUAth0B0^O zb|u`s9)^3jqC|k`Rc|4h3WhNDDZ^|CVW5mg@$;?e=~#pD?eKCah3NsYKPr2(k&$8Co4qrU0GmVD4(5=XdFm#R zm-0s0N8SuK&39JnM@Sub8RxB8eSh}qM5IPZi$0QjUh-+lO+NAB#~)5~+0%=~^^?bs z`P*DBo9M%1czJy_?9U%xPc9!v$n(cPkA~-uf97)oNiRP(uNLFcw+PQ6lb`kai==z5 zE>UL(~my5>x;& zI3Gg1*^n_uQzki=rACr^Bfc?_GqoB{u9rp{qX+aZd*iLb=`x)N;R*D?NJEF>TwG@0 z-k3KF*$NXU1|K3Un$diLid>cf>$;2#$JZmX89-o|fl$L_^a^MS9b^xkw8$fuoP#D% zaVUsFcm=n^Kxb210c=&G$R}@4KURPwlmaHP?DZ{%G@p2WgGc%FtPENOsMD+y>U=cqzl&x& z4SeUjVmD09F=`8=)0@fkdQR+sN<#LF;m6#3GjDVKDr z!Of&M9`@IRYS~azs`&lEmPWHij*yAKFR$LOanhdvI8I}{6l$xAH$@5 z#$7A2sEvxrw3Q&utO+lj*KYF<1o(5(0J`p^6YVi07U}!ZY&6 zrKTkInH4xEem*Z3md_EbSl03|O6kKI6xdJhHC~ybi^gY!<%}6)@;X?L&ofrVeDGRg zOTs%KLoLhE&DRRx?Ti|kyE+60z z%rWh}{CRj?P_!9g!Bu(SCZ)|&s~gD>49du}>8viLABIF}B-b&WRMO-Ezlucpk&1|b zX|x(D2{u`jPg904w_tAYEa6&X6suBI4@nzeQ`(q%QK=%@;b=7dP*6x2P>bmGad>7& zMq81+pk5x)QhfS3Tvlw9&CBK%;4qIfRe!8vuqkc*KuM}LIldDM?n<$1z$;&_ zbkcD@EHD$~q;P|DR^+2RWR-R^p_x&nRrR1p99n+7rc|Y<1UJ%DX7uj!W5!|TBW)- zm-klH>%2E7JU%D?x}<(@Malp=*!!``v{fS^_4{(9=};H)jMyrjD(z4;R)H}iemz~C zOJE7b39T&-t?c5W>|afW)Rk^$NdXaPepQaHFP0cG3b2%TDE<7~a!7$EGL5+1>IjKZ zt1}Msw>f)R%%rzM(q52)Z_|dqB^t{1!aJ~uHh3UK423` zU~iaCsOwrtbS7LyKt)r30c9_%KM17sBChe)tF*q z-W%gpr3J;5=z6gkEuWj^RYM`xm}%vQ@*{Q+bLc~bnybpSz5bPo(f1eUbbgaK=L7Zk zD~qwAfGvo4exYo)m@LQVh<7v+gW-jxuq^LsnW5C$O{COmDZguSE7K_+``uE;2tbCy zD!igzW!_F%#+qC{VLVSONJ+UN7OaVBrM6KTez6o6CE!dlR9y8iWalMyaNM?=r#^lq zuALx}!ZcKFV&R5GfXZy(x5GMMOIY0wh_Hn(C$SJnVYas#j?r*)~kv}lEcbN#mEX5uR$q{ z;Q&N&=sz1`ReA3IKJ`rWHnAT2+X7uSGl0mjk`NHlbTtP$H5M6vZ4pd}62- znawN)OWc6_1`=4)sJ`+Hy)%8PhBML`g;~FcXas|SHKs+V0@wxB%gC3D0i6JrUb1e_ zLQijK`X7m5wIZiTTtlNE*k>JDORpt}2d@z%P#U9ny1ojh>^1mT&3WC=NYRGGC=MLQ zEXVV5^j@wX4x}5MbfQPRfwG9zb(@Px+KH$rm72Q++90EQJ~>)j$n0HWkW4 z7_q^R&WzyY%+x5Lk+~5VYJ@D9yQj%G7>uCkQn77?O`Ap>!3Ybq=BzUqVPMu9&C0>2 zu#k%$R4fQS&cKL`PrabKWf39)tgoA^tov=y+3Hs3p5?2Sp_(mPjZa-UsfS=d1jUfl zOkC8YS3aWs(C+RF6Z;Jm2>z-}(v;i;#gY>Ui+@!m8+Yn<0-@E-OL*#%l-UUpK|U-u z7g$<2Ap)FJsmQE_SSd}Vzkw)qBd~P7l_ueG84%x;dci3T9)||1hxv6`Y10z8;6A{NY0~84*qDdOj5JEw_iJ&FiQ4!jluCyal$5jJ$ea)J`TL)o|S2y=o z{Hq6#G6R~G#Bg3%9M=vsSHI7-?(_ zyU(W|k?nWhOp+Hj9yJ<`N1~d){>DFlE1otQ1_$R&g-iCKN1iyg#uluiwXV4D^t&8L zfKq83UU!1iB-Bx$eK!W*)Ltvd1ibXm-x8XSYPWpzC`L%x*gX_tHndD4i}_OA1;A(e z;zv z@24MBE~%nyv($hxdlsfeq^?x`Qpp#+YYOnTO_AE&t{aws$#^FxysusR_^q1POA~ZSGbY3Se}ZffAE^kaDo9 z1E@h=5%zYwJDqlGJC{1U$gT^Z{}iMDC^X`5n1|{(Q(_t#y2i`VVyG&dQ4JzcPYe+3 zr-seX@6*bP%?y*J?xPrs45%_l$oM}bepruWbOy$b}%1ZPVXJ?T!{j ztw_~ULy|w$3%xc+Wrw2FD9)(cHnNA|d-I%zQqXPv(JNbwm1(e7dJ}Nwy1#AnYVp6l z(c87uu0W(ai+{F(`18-%m$F{=%D`aG!t0S0($L*ZPeh?_igxj_Xm|5v_D5E?TX*e# zv^9732E+>()+%zb3oJzkf$+u=+5o+{B=(qnkpP(gPcvK51PMW&_giMmHI1~01&;^A z<=En8pTC;|jY9hfOf`?cUdYfbNo0c(HP@TxoZMg5-@RvF8Tl(EZ_vYK1)kjKVq2mX z8OgKYz`_BGBD{OfVZngtIhDrdIlNIVkEZ2i)HlQ6iC-c1*)j)G2M|rbpD@0PTSQ>X zV1l2wl*!ScK(B;{Ks8Z&pv z6?tn$E73tS@>IaDd#H77E^fwbMYOQAQJIobEf62|-V#e=x&zk&b0R?1S!-))mf@w6Zp22B7->s| zCq(yg6P$zq7A8Tc?6{J9m5$&T?FpKh2mAI-0>ckhq+x_m)mqZvbw0dY;vy)iJ4S?i z4S$#GE)JpU%b|**mOh)mcy2A8UPQom6S`Rf%`aXjsL6!NJ8D~^ia{BRr9bh=tJ^B} zO=z#ftX=R1;>yU=tBqU_JljZfX_?OEV<_16`&62YRa9}Rf%9pY=1FxnfSTY$@DlA! zQ@zNSV9cX1SPju*--_{>f%Us$+#eYxkFZU5l@zYUzriRt0l>GKeL~2ze@ay=7$KWg zjwMJEE(Z?zLUss?iFniNekupMzg0BkrY8IQ_@A;5ygfB#)ptjTLZVOoDhuoDTb(=} z*JXNl=Z%kb#mbo4p<%D>?Yr&^Xg>4ns=x7bmRnT6=?%$89n0|Rk^Q?}YY+`T>7VsT z^22kxh?Hb4o<57m|Frf;zq6JG*;nb0k9yb5K6G)ES=__i_GrN!-IW|q)V~lk!*o2e zK-Inn9~Xnf#s^ux551AKGY_!I2F1&9ai7f$-5`%$_~HOPDp|Wt|7yy%Dmiclmy%_M zAoJ^nmam`S>vAji9o|aO00uO;0CRcfAJRlTL}lT|^Ux4IC3>Zn6jooojYXJDm?OP# z>L8J^qQGz zsjKFgYA}uz_T28HRFPMJpEEB{w_i8~bmu;{*nE<54m7iK~6(t;9^{!RJ z>e6Nu7hZg1?MoDeeAAnYM!v%j+SP*9=4K{_uc<)0b(B|P(yImZ8oz83alRS0F=dL= zrnIO`NDB=rjx*gpg~uL4jC&u4Iq9MOUrO^3|L z!lFpU5F93!Bm|WjqR&=8RCthNpqMS`*;p;)e5C@FQ7P2lE=_Bd)j6>o)QFzEzPc{e zzlNMPp(@hH9XtMOL!{4upQ}1>O z*xo`(3sUrG66LT%OtBlR&vskgUsYr8xANn}g*5Mx&oGf__Cq-0YeAm;TZf{p<%^{x zM2S!rQ!|p-boEua^$zvGn9Cank|fcs{_N=aiddY6c;Tp~Juo811Gl1q;OmPOCmLOO z-L<--#%r!z9lRmmw=(9xwBEn%$lf_)gkU%@oNrnmy$B9Oj}KeKw6)N5Re+&%C9#$M zGDG)T6?Lr`t}Gw~>=NHxMNK&~AD39+OI0@o3gvvT(sa-&Q#QdeAx#<{Y*Jov(N&3E zp(djMn$jKr&Dq9j`!y}d)vB#}HY{XFxUhak-(%1SE6lTbLft>90BiyWJTt67A%zJY zoCyp8g|Nv3@?{F=pqfP_sgn*N$p*ycK!GqY8!RQ~kxZS$m@c#lKCNX4*CvQXyD28rVSo1X!D5o(jKD)K3_cMUSlOSf4qYHX_kK{A`6oY(G@dM{rXL@ z9s6bVoxiFXY`>-OzUBgvV#9(cF#p=asi)r*uMYqGFV9|{o%%PA9&Hx?y^c_TZ+Ly< zWrX5UBA6gHT5mUtr%wU9_ErV0katl2q>F`~KHR?ce`MtBK+Z!Nhc4;HqGdHBCQi+J z0|f-J^*}MRFLfN;J-Xq@eXsFDy?lYGRe^}k*er(8>;s5O+ld;<4G7)6K1aPv<}5E$)zqZ$9Ow2z8A%$fUN|0 zM)I$V$=y6<>fB?RHE~U>4;P?Q?A*|${~5WuOFVRG1au_n{>D-$XFE5)z0K|bedW4G zkJev_aNO4}kp15(q6jm}<@ngGey++#>Z49ET)(ZlA`iJoe8KC(>frxHTF6W=bora%-zzVOeQLQo9-0;<$!R>Ha%1ZEf1c^2cr z>H-#u)6C#q3=Fdi>JW~d!9}`Y1&0^mf;F8rOPp*OcJXlsud|s3k#0=vllO4 zAD$#wd~4O4w!2&HAV|+*0+fO&Ut9GG?fq0|4}L2f>}0oCon;VQmkQW+8VT=X7%%dU z2w?+dLU>kRchqL9j5o%s*fNI4w>4zI*vhdye_9S!J{V&0)e6A&HfEOg@_2FD4CX?! z-QxH7BmUo_k~O>H6O*`)EbR78Yj?ocZ_`-Xx1sZc*gH3yO=o#c>WtYL;o`Cl&GxZ@ zN(5BFBSTd?%`8U>lu2Xd8C+GhiY>B_2@Y-@9T2^ut>`8|Aw|mB@=^s)axD#B_15_E zSxS8r9tq=+m>J7RzSgAjD>^-xroo*-wvW>FDd?wVMqZNjN^n+3^Lr4yth4A-?Dy;E z@7VUK%{a4i9ITJblCq|-V^l?JKaOV=KZjro!ggwzG^=3mwI5SXG zmQ=^wTyvY9qNS_NASiYF(P5B18C#kRWzdCiM^-m?q!s) zm=IcbqnLX>cuQv1BMOQ9`ll;hR23QQjd8^2_R3Y+B zoe~=~cil04zww+=Ig&*yAR_1b@wv1@q1f<=&^y-MYc|{a-A}>~M2_9gK2olXnFV)@ z^1ovmzN-%R)SMoqI}}vcOLh*%Y3n61<3%uOt|T^jIi5Qw53JZ?5SLDU(7?4@28A0W zkhdsGj!A`5<^JT{G6JOVqPJ$7NL6enca-U2SBYiXLZVhuf-Y)52*(Tm0fX0{v7Z@tlBpy{SsQSJ4-QcaOH*Vsp{sX-&{~of+a4^{RZxzu%YHhz_$EPhX(WKxF)#meY%lU<@@JeOK6WNqou)#f}i4VfPO+1%G((7Yg|2^o)^!ZsF)5vl|2q^gUCycjEjO{J&{vA0ACp{#J-{*^%h{v~Rg|-Q#u-eRwjL4y znA?Z7Zqa<3mPllNS_JZA_0+hc5R{sccCTjk;>;LCpc~}FBNr9R;!Zw+OL~ zxjfYNDY4y2o7T__$I=h;3TUn_9w!_F=Y?8aSFi*+tPvK2S&!C*;Ig!rh#R!~(#~;6 zV5CZPrP8_;nhn_UR?>!q%ZNNOx^YBNY<`GWGIWAtBJ5gSY;5E1yIpmteRBU9_Kwc` ztpD2gwTX4X@4}dD9vdCO-RBgLP@J#qK<<+B8(Pg~r@gheyRX99OUOaq;JHPdoIv?t zX)bYs-i1oZ`V!`lR8y?hww6n+9mduSn=7FWl{$4PCm?|qP717<*?!hG;=qBbnykl3 z1Yiu}rvYS^k?^1~y=a)cdSDVD$EYfY?FpEqDTA;PdMs?Q1}XSgHq==Ih(+pD^k>u+ zl$9joA*@`G(u4%n4q)-iln<}G{)6>|;C=7=Tu$s!{dH_J{><;v{JG%>5){uwsuQF1 z28ZIUem7r#T@8+Eg0>`FtqXJ0>X$0w-ukWvMu9aK`JR$k!E5j8I~7V>-L2Lx|8fc? zhuC`cb9b+?U$2P*WS<`$NG#&DclDhC+1qOAy@2RnU9sP39N4x_eeJJy_kcGJq#M86 z)pznSt%F9}3yAGKeJ3CX2aT2=x5tO@YVQhyLvhSCtUl}RcBjz}^V!#TKxwtQjaH{# zZ!FroLfoNf-g>q7^qoQ3Yjpg#_P)LY+g7L5Xzi|&Wmi}Gl}-RbF$jFm9UUiDdtcuP z%5JC8I@np^(60F6P&9MB+I#xW{_i$+bT{Aa>$9L7bfHzg&qlxQYAl07g~(s+J$k_ZkStDlYsYrnXvJEgu$K^qq~}YM3Ah7ky_C!}eW1hF5!691x7o zR?7(h41H%X4jQ|hy&f;NmF&#EaO#RToy(Z&%y*W{Ss262l33Wm8TJEf*a<{lZjn{8 zh3nOxcPrT9NA1igGiCKEbh5t5+_rWqcV6Mkn^ohm`dQc7cL_y(m8`&X9BTgAk8Ia9 zD;H_=bR9@t+jH&D>_q({eX0X-&bh4Es3tMas!pe|W3tRHHC__P)^-~e>ULHn&UhST zz@bQ29Lf${ws&oP=TN#G1a5n0#VR06Twr5KJOCPWg}1xi#(`5py}#@#I&*TmP~wXsD9LalAr*i^Rx zz1!MZ(Pz^xgR-|}lx#^?9LfRq2_Fs=(=$N{VRsGA>|Xjcaz=Y!zDD=$IOt zt<$gEk7a4M_is^GcW^o&kn=b~pmRwmC-eW0S==T%H!If8KDzHo(wnnSuPzU{laNN=r@6gEy3lU@% zbHzG!i1uR5unHR)_f}SBXo>0Us^Yu&>>CWCMh6`9~(eUiwOJ;!pPFDzp7Z zEi<%sq%wik;cI%pwcnKNiKE0W|C?-Ja4Ycl0f1C(ib~ihTuRad3>!d2%W`fw`mAlL6I*H2Mm&DQv4~i zF1J3X8u}ww&?p_?5>+r}Py4$K@ybru^j06oCvVZ!lB%7ip`l`FC?x8Bnu)F6^oM#x zL9`LVFX@2N^w@Eq2OJ6$(%IU@cB5mBmo@uC+u(-pvGWXdv*Q?%2vu+h&*mS9xBz2Wc?YgD#vaa7s-6ctJ&RY!Z3Z#xe<+ga=Z{%dGy+xzb# zVMPy>)PBv`-@nHGS0|*1rhhqT_9n@VI2fN3BSj6uQB&c41xCaPbkc+7pw^PH9SDGMcPAEr$u7L7vJp%5!D| zWG;`{Gn-U>h(09zYd)>}r1+sKwd7z_C-W;3W(h!R_X*KC$54n<)q+TNKcP*7FpsS= zYK>1f*4}9zd1FYd=18d_<*hnX?dI@dW1A9pXG=$*7c`3T;gid9u_-OY2gN>}#Jk1D zqtKetYHengb=`R3yV^~(2AV3{VY7PX(p?j-0VK%G2xhg&{bHjxm%8E@>}8ZBL_1NT;@ z5yc?%mP*9Z)=)Jhh355Ik2DvpSPY0!S%ykm+~l&Fmb-VuEkfEOzNK`cl#n=^!11|VP0bWita2*|R{$qx#vkU`Kjm_E*$1GzF zd=CiYd7_ZO#Ch7iJ~{OgtAk}j9JX4%kmQTNNSz{+0BFw&y;@p<%C<-#^4f+3E&^ig zyPYR=Eirgfe1G!tY^(TtT^)m*ldJ-)Xp{mWEBkwTyMNn|RDhnpJdKc^*8>yuA!<3g z+~8=zYz@(W*@@^Ju|^W`Ig%2rAmhpVhBATH0qsfm70~`RN2JDC7aQMeUYM|*P0qk^ zOYYLbRnM! z)~M;JgSdr9$9qtnQ%iA6-|$~Ile-F?GdnINLm6R}V%1A?9Yvh>rM8x=nbpR2V{eo7 zY3^36PC${<#tnJYrYtV5&{%50{jMv~148IJ6Y@C!nS2rI|8UrV#5w(fS z(@5b~Om5`ZIyTfzm~PG>*0ws`BL4SMH_&M1wdJ1g~mZf9d8@pU2 zM|NcSY9Gi#!v-3q13zt0P3t^%809CP>bU@6O>}PCtc0mQa1kNls8MKoI^RA*OI4AFtAlKv`yl64`d|063suEw@*PD%%7>@|PZ1W%BSRYbo>iOQ_1w z>!_p9OI;nohFoU>N!5U$IYdH-5{P3-nRX@8oc#P^ZSGfZt39tKzgxXERWRwR z@c|IFJ3evLwJRG@5d1QoQ6HXtNfvAeDsPLh}@za^{g8t0O6tGgmVRN+ChSV>rXB?|#BSwh%<03@T zBwPrkr>y2+!m~a#nTiC`t?bBsbrR@gTZrLDKFFOx4%94SWl#K=`|)09uo5P)uZ}&6 zCQ{rOl{7d*@xd7#4$uit{k16b{sOW4KH(K2)ed$I({=@^QFx*g5k#jzY71Gyu~DAB z=0?iZT7-)NE%$Hxf9+g^xXpr&4ytB4U($c1>}dS14*9aCya^9*Gz~R9piQRKLsP7A zIUcg5Xavh({V)bp`seq^M44z~Nl7C@n_XqlXhZVr&e^U^vSpgQf;qsI3~|W3Rfev5 z8b6-qMYRZzyn2$jH{HZ?jNNe&yFzS={)95%qN{{}>Q9UANL`(WWIFzUWshaed^3!l zO&*DgQf*Q5SQLN$%U>nEnzWTiI>;$4=AkdJ;*Yjf+YC0u$$V>zJZ9Xwuz-r{H(zqj zl)8Fe=_~|^yUi(W(v``??09)JN#{Z6 zImK?A6Comn$rG48O!0(n})G_{j0S)F{e7wC&4j*2_-CJ9IQf49R*>=hL#_0!_+x1viDxx zR}fM~wM-n1T3m6SrJkeCx&s_v=z+rKNsY`g(6oq@nZfik$dlutRLja23d$yWIJYk0 z%T#Ds^L+0Nf^W4?P9(@E!M9q8IWI=E@is8iXu(}f*+nqYb12T}WqUszvcTd`mugsP zg{Q$U%^7a#(H7y6(HhejFZ;sm4M&o~@K|XUHzxj&0@#NY3dhoTI!3E*w4GkE79ehD zsGg_rfNk5@*1*E4yy#4w{QMstP8$w>mnkvXs4Sx$T%jq6|0^ z$WUxN9QA&t-&$0GBM&!u&Vg0<)qBEsy4G=Kb1hRAAbWy{J=6Tw;FWyIn&`>nc5;eC zszfa5u9>}L^xO#GS!A8fj%4>+jVw~Nnp71f`WOf&qk6Fi%vI>3?1W5uXk^;t@>Qi& zHX|%T)jr8Vd^{A#RV1GP*!>Yw&h&gDL#Db+AgV{q8zQEj{JFJe$9K^b#yv*9ZXy`e z6;b%5G{YR)54R*j=7n%w7tW?5IE#tQIQUTW1eccpp~rXP~IWZgceA-kt!bd zgqGf7V|bb!qeTe#E16mbrU~AR2)5x8jq>LLj zcJ}*cPz5zdi>afOA4HaDVI!o^Nsqe;DuZUkU`Jr8Im^30lqr(X zJp{Egad%L%_^6d#qf=0GDY5gC( zV(#bY%3-y{AQ?Zk07080?bziwm!l*bOylM8@o{lR#yw|=(m0ODytKz^`c}fTtCEA< z4bLWBVpUb^^^|A5JEk3)*E%~Lw?#j7-WK2SoOMM%zi&29Kvyz!hyftvdd0`Tp1gd< zHtxPXBhm-a6?1#8a%yMqy$6bg;V90<;`+(s$NX(Bm(5AJcznG)f29Uq*^b+b2Y;s%;Xh#_@3Sq3z^Z+Rr& z6&t`7KW`PsJfAO+;t%-Tp&rxntXVw#uAIbY8RPtW9Gz;1HSFDw+BW8BFOQWv^L;R!`DV>4MH&ogpkJLdUZP3T>>? z*|H+O;~e?rNZwfH(^g%RXo!2Qhh^wKC{Fdlds#u(GLSF>MzI{kx%twT{%%!I{1Mn% zLK6$k)7x}00k{l#u=kv?VsM43uO%=LiOY}+{U$GGUD|^w1I;3XKq653)VC35WueYn z_O)PYlq<5=2R!Q!NH><}4@`I8d&JJR$>mZ(fAyG9Hw28$b5Mv!c}8UU5Ke}=s4$NR zDPn|8lG>_n32&3VP-+sK?a!m(IdO9ToyPKb@-s|{?t08*);y%$W`w;Qupxc+j!jOm zQKjBM_+Zs6%6I0f62MdEov1_W%!c6hsj7C$q!aOjbVxf4Tl zhM!2`?^#=IoJ^^8xScg>Uirw7us@Y9va^y87c+1v~VHMWkyrB8$MjBS4$r z!6?zq=8ZSy)ohL~5!0b70_H*%=ZZGz;d!nD^40>v6W$Jacc^D(!Gc%iWHeQy z5GMZUusGPEMcm-OxgyVaQf%zBx|`JD+YOo_G2w@=up%ShTl(Vg&rbl6E@|y`tl{|h z72fbk@$~YFi2NUVj#U#4_|`!Gw!k{@zbJsU7P1cfb4H OzyJIH{9pg?|Nei(V%6mU literal 0 HcmV?d00001 diff --git a/lib/std/compress/rfc1951.txt.z.9 b/lib/std/compress/rfc1951.txt.z.9 new file mode 100644 index 0000000000000000000000000000000000000000..84e7cbe5b715527cb3498c7261f5c525ff5d9426 GIT binary patch literal 11111 zcmV-tE11-H+O>UaciTpi=y#s`BYJ($o{%v~5qwCLWaq4G%Z?|q*Eez|naq0UfMkop z8U(m_h?Y0|-*3IT(G7sIoRM|%kVv4qy1L$7UES&6&s$yW((I%975)|q01s)UUz=h|6S_5P*-WDo~N6QPKx|Q1;?}Cj`z>vaJ7mOb)FPD+h$R&^Um|I z(Cak2JyEX{0JaH>C{4on-Z;GpZ(-QuLztZT=*9WVi?jFVh}SmLd9GLLMOcLDC4pC$ zTfK~~q9u)_ZgiI8|AXPA)443dqRdr#rQTmhxq72FDS@TGHt?6)X6a3|(z%K%Y>5cP zwT2nMyb@igB~YYHqT+Vwyo-QVDZHAbg~|zuTNNhNTX_*Ct1w&P_+fIZV1-ry;6;=d zS+po=sPqcK6c!)&f0@M5CMxu5*y+4JzkH?MriDHcya6fvs0csOz-pW35d%sH<(n{$ zR)DA(6OoByy)4QwMhNe+bgMJ?{^w|YUF3*kp=q^Yl1ULp2~amm7^79XEb%))1aOF! zu)GN4dAhyL@O9N)_Gq!x#ZbM|Fm20EG?vabQBJD?z}8s^xU5u>!bn@B)Jm-kDYQQz ztpK2pP_|_TlhpM5E-J3m64v87%+|UOh%V!Dg^XVS^gO2J!?lD#QXof$0OBxNm*E=d zg&^RS6aZ$t()1*(?lzKl`-UR`(Q9BX0D>@ zdAUGtD54vU$R~&mOmU@S%_v`G>4p|Cg<&ET7=*Ztb(qBjVH0K_i6v0(VA&(&G9)Ix z1jwWC8s3MsC)^N=Oyid;OIkB;g!AM3Hy4W8UM+!h7aIO5;U%89`sL>feC)|pu{}9B zfd3B5atNG#0BgCuj+XhsHd!AS$oawNI9eQhhQ9~E>H5>~y4b`&0Lo`D`7B%(mF(r7 z%&znm@Ezc*;xx}=5OG=)k@XeIx?qVk<@J!SbN;%_kqJWld;!!eEETl6`2FZeg)!(P zV4e-pqKW^JTSa!&_gsf8hCb<4_SH%TcI^@+NxZ_4AYEJNpc`0aF4#xiHHEO!6E|UWf6=S zr!dJJH8pTj7{e#TawJw!vMq@;Gy>3Dhnv38B~4<|Od&iU0Ugu=R~cRkH}Fe=lp=P5 z!6RT6V%P#iWUU2jpGTt2HYS~+k-fqdq81@^pefJ+MS|x8{0NUM08M}j3^MGPA5jzS zCg2{1Z@!;-Qc{Z^#*D@2UUm&6T=2W)S|Rj^w#j}f7Dov zSug}>?}SVKIY47VOY=H{5(KvW3Xp1I0ZoSL67<`j!p8$N9TM5&DEul^nt*0#B~d3t ztQ=92fje2DftcJN|Kk@WuJ0Xy4gn+4wxnQzC2pb=JOa^_`JGs*DFPY~U1rL(h@9vc z0CW@S-QBZgKoj(~z{17=#@B!Mr_oHNHbFl6_Zw6?u}6`w1!RCBTZT*WA;~5#Zo#6c zufoVD)FMuoAC2O;4@QR`Mq|Y2@-|7gc|;5VaId0IuKE_;t-b^Z@cJbuv97%svCH%b z%n#bgM>TBKQ4Nq?KtkyL6I91FKp-e@(`2x$Q7c#7XYc>^?)>R!)N6yNf}jRy)+9Lq ziFL9DYi1lQl17-a&JENipu-9fG9+0;Ah-62tt$ir2Bm^=4bV{wpw(>>ZlYy7sDS3G zf{Ep$$v>k;R6w#e_0g!z|4J6{zJP$jKkF;7V}MKkr!=Grh#`T(mm1s$^v%J%pw|vzHX!s;1FQfHpV#PCixlqX==+#9o&NiH{7o)^X~I|#FP1o^rO^TlfV|KJ z#dEOoY?KVZM>Wc)5PNM}S>qmVx#N@jVA(%!VazSEZG;SWm1!N{o<5xSK7L^Dy(YS} zutkK6MQe^!B^ecYw~_?$34SN5?9pWh>jP9I9$kNqwyG?m7&LgFeFqGQQkpKx%1WCx zOS8NN2D~&Nrr|PN9p=V568aMuYy^g*`AleG4_!s*YA{xK0;?Hhn^aW@oVMA5!b?wl zqMgNYx>FhO0JzHGa*29e(j$((Y!@#cER3ViMy+=Ba6Rk;xEvYVWDI&JtvemZzHsDs zhS(l(Ys+&>h%;0Z!2EzJC?d)PWNS_&<=5#h=cP*0W_khbjG$m$geAEsGA+=o*)3+- za^fUcQR)K1l$V?;6d<*QxCUA4)690j5*R<{xWwBqF-ftRIND%M)MWhAK;Ynq?9KtF z4K~W@8a5T1p&6<)h&Q4Pc;`?8 z2nRIO`>HICq3$!F|B0G2bE5~zt}dnbfxU53#0R(=qq=#x(e!tsQt+;ep3!`)mg|ZF zmxfd;e4#&q-UB|G3)7Gbjm}%RyvAho!}&RrZ{p|dK+#?^g>Zpwe1VEZCFN$JGpi(4 z(G`WT1!uuDZe$orU21Lk)@*z6+vNTW08|v@N+R{Fdb=#}p%X`BY1c{Ok6T}szL<)0 z)CNrHI=n%=U`@bmvwA|iV)jnNRTsr;R5gSwR&7-KfI)kdIK(A%9vk<`oEfpgy{jaR z(>16`;4mH-x9|R1Z7?Dqh0My%#)|>r>ps93c_1%tW2E4{vWl|VQx8~k+K~hwA}>g) z@RJO+s>w4BDZ0b)r`3oikLmLmCl!0 zw9quEItMb*>zs$+#lCi0GAZ~qa8b9p3IJ@A>-a`^s(O)0hx9Nn0QGn=d(4mZLH2My zF=s4kETIObYJeILv0NQ)s>j#LdgG^lQvFraY6(l{$9N)YK{m#wx;Jt7Y6 zTJr`iC!*?H0P?vbEf?C2T#y611Y+X|3@+mnPU!Z@L)>9Ofl2HBATnE9J(E)rUDR-C zNjP&0B{vKweike*k4RsPXY-DPB!D+Me>RdNIP4Z+a6$_rQH$sa@xIEWMNjm_hPmn@ zB*6H~0+gV`?zogVz)cu~m)eyj1sHY`eHZbbGyOUP)ktl0jw>lu`irxyq(Fg@IoOi1 zJwmb1nQP1?6}l2Pdc)#4G({YaIMx-^cB{jTwF9LvmPEyk`Y(1KQ4Ms&?`z3qGD{Pv zYPa>FJelbOdop`>#5$e3_PAcN5U!#SW23xsyBFyvSe=htOyVXt9uEeC$JL*I(7#Ux z11_1-n36;N5Q4mJsLBJhB9*SV=~D($53F)x5|ob9J280b{nU_ge*U=Qo#xmf2t*c9 zB;ojcm8BJr+Pc!#kwexgQSq!&5@k|?p?ZW7QJSrE_NecE0^V2)@fgPq_j`tYIJD=63L4&6-5&f#OZu8r=Lqrfkm zk21^KX>5E2B&)>;Utsoj||_yZ669PW4fl) z5#Cl55ZusNM=-8D?;R`>uwQgVKG+V08aW9vX=pn#^Idcl?YL zi#bkre!E$uvE(t`vRW*raR?B`+v^a#T-=LtcN%il<#MdP4KS-Hkb(C-O4==a+Djw% zuuAC~Y|s;1XfL$oaXYo@k0oacD+pGPV{jib_hhsUD$YKA;-Di*nkuVRCgElgtxGTw zxHUr0-(1=KRabR6TT5VGbJ`qW`wz+;TsI1!ji2E21g3YeY$+$Brzy=T*=?jE zrY1qHUBNFa_9S9h35+vGCPO1moGHw(V<`Jx?qpGMr3SD_1Cl)C-)^;&NQVp_azgQj;*Zp{uf8c3+@klzNb9E2y zg|66&CAfz!l3|pt(LnaxgYj;#d<`|RoZnR7NM8-29rtaS_Rl-n$? z(=<0*#VBO{c8U=xLQAs0QQ>O2R}tkO=LeY0mA?lcP-j2C4!4-Yn)R#icw{`6 z$Xdybcd8S9Y0Y0A_MFwj^LDHq_f&{`>*dYY!PbOKX4*MNLKrJyavY|+)EOSQ1Izs> z+LRlnh8W{vExhJs4ASD%H zTd2=-l|t&F>tuaxrM;I!s1wwd#FBtoL8^nQKuDBN`9pLMDjrZ-8SA_m3Z7A7d8Nhc zSd-MErzREj1S-~_qGf7Jo8sHLLc^uOLMil6X?r5?8!V|lL44)h^$O}RD7|Xs^R$8K zC0Dx$whfd}^p+aACr#GoF*pDJPuZBZ9`YaN`xr4Hn z0Jc^L6)Qji3P}a06U^Q=9CCL`-Em(9eKmo9diKmD@}rmhsWdg0j~fjx`I6jhjgSJ7 zdPdAiX{s1@SRHjZZ;((p-;%x=I{S=vkJ9A|D$62nhrTdg{p&0NeU1 zI<5b}_vm3n5C3RB`@hb7CqT}#9678`OjC@=%VM_89sT0Nm3K&SxVG2YN?BB!AXWCnxn9){!7wJn zPW##>wC+G|Tvl-Kh~%d#*}tZM0=z?#m9QkXJ{Dck0YCLG0Dm7SE20UDD8Q|j9bW!$ zy7x8zxyO&&pZwp4^C;%uI5Xh#@wv@!RQ6c*sC`?5e!=<6S3gRs($n-4R`d9eBO!eS z7GMhhp!z?%qLW_8lmS^$Xr}O!$8@@58CQ;EaAr(qeXV zbJ>R#YgKW8h*~?|6`y<&aUb)b%_|zWeK;|gw@fFP*5@-Zma0CV{ZkQF?}BB2zsqMc zL3>@liOq-XaRQwOf>wXfJ`jxB9Yf~WL% zAB=W|uc~n(GwyH8Ec7}`Bp)$R*Wm7Od4BG0@lk&on~AV1i~wp&TWu`JI=p=S_Uxil z*K|M=GQIQ&9!GA?0^n#4My6yY(O6B(2Q3MG*`dBI94?|Wl2|+|JJV7iSM8F!P^6H; z)W~FR_M%zd^Kd%&_*>~1&wc>gsV%k&RYU7clV z)~n@5h;U}xW~prf6(u)!-a$P)X-32q%2a?_vsR|2@u01BK$XDq0tg9_Y8$iU*5Tl` zwuUo6ev*oeEeB?%B(o)yuPxUqts8ebN6B0x@=1{qr(`)3JnS_6C#SOLuH_fb`xtF0 z&6(+qXdhAnAntNfk10*e85#tWMiBmf)~jCoD6*MQV{?uN8$8nvA~MCtPij?EEvw>JDboV=s)f08 zn7dY~hR9Q&5?w&6cgN0{TEM6udAdl>y^mH$+q!0;@$6_g9M5NW!WSZ2#nB|}U2ktN z$!99utK#&kB8M`0t(@(bw{ZUgK4b=KA#2TGWcw$|?*d_a>d8Kjw`#+dH*pyn6onWw z=5qy=acgxi+K!1-TOGKqPk-@=68_#R;;Nq_IhEogj-P8M2KeA@mgZEqT*1NffLNZz ztR!9w?+ZwU8RBWX$%F}?07bmEq1!Kk&vAn{l*j?HCIuk?KTELCJvI)6)ZjmWMJm5X1%O6q7vA=2EJW4H)=jm1 zV>yWmbYc18aNFMIz;OF1=fF&2G`gxBv$04I3;2o?Cmd2$aDF3^`&-{Ksl&u%P~;d3 zXp>oWYy)-64^D=31)~33|8ITzcaH#iy9Nn3IHvigBp2(QI7oSvS>o+g-J=JSyEC2K z+C5L!Xnah|ZUgg)eR1~rl-W8->2$Q`!)2IpH1&PWBV1lw<7`+qvX3JPrN|aEYaVYjQ&Qt_rP-=3w&+&ESDd=b4q<%jlftkqqnaZJ6 z7VZ*bvaomoUcggTrSEI5sAb5qs2ueZr6Vek^iIC@tKtWn=yG>kh=nD++8u$ppUtpB z?auRObU@ARbi?P`xZsHB7FC!BQ>b}>|pc5LEC5kY=8yqnG%!1 z8y_Btb`NI#;E+E%I+6t3;n83|t^=7{AX5uuZh##1gSh~i%M2dLOC>_9gPIacrqBveC8vD5(KkBFd5aM1QyDXg<|a8 z_-HU036y}b4JN@LIINT9aBQK>EtIjbn!#W)6)0nYayS_T$Bh|HERn&M4TEpM|b8WTkF{>98%LtSf^1DF#R4u|d20w~|j94~6xfb~R~N^@LM}{GT(HKN%HfiJ}tPG7!iqSxs!Yb2m z(|H|=l_6G7!Px4D!`a}Nq?t4WAqfDEK{$`bbtqPb7=d77p&TC#g7KV`gMk77V49=j z#*C&it6Be$cY%zS3C5s{j!CqqcB&%)eHcuqbtoc3>ZnhY3}zO}G3ck+AxVva0sv-% zIl!(%ITYH!z%>0sqpLt_2NM!w(slUl93cne@w`FZBZd;d*Yxm_fijuT2D5HYm@ z^uO(pVzWR_K71qXhp~U?RUw5v?(RL>3~r<1uyE@^V&)MpC1|k(Bq)-{sGV5!s63R~ z2UBM1v}yGZptct5)BZJPm&;tb`uM9nu2z|jaNTHU7jsQrIx^EpCV_c5C9g7%D);1f zX=VliuDXGG40L;}+Bg4#BU&#_8(A*7X|<3Q zknZ5t-%a@L#OBvUQ??E^|I%PoM-fcsJlx*VjvAx6r_dUZZBnn=Y4J~lnPZ!YzY$i` z7)~oF_hcBV+_doYtdo_kvW60Ti~*E*0qv6RTHTU4neY6iaQ_Z>pqwKTatz7c z<7sNT83hI>>c@-M@B8Zi((Z)t%7v2wCXf3FclM9SF>NXm3+Tn`O9QFqm<*~Tqg|Aa z$+*{ys|CvQi}SYvg;E_gWH@;5Vk8h8_a4rnoqXGbahK)=Ds0+QpA>7F6JOOCSHtf(S) zii0T~ECHL>X|+~)i^E<^%}vnbv`0%iZ3b&_)H|gt9?zvkw;E~Et%XP}s<3;9?pwB> z#5OK6->OFf*-Y@E#K20TM6@p8o^ZY2TDIY;u)161>C*-lC~km8(2hhy%Rlh&@7>tT zFQj$#&izKctM;#PCfN^MS`n+k6Mn#0zAN=gh-mLmA-9JrwYb%IYpk}YpFdT+(2kLIIi_uM?4H^3oMKaKM?OU%cT?CF;q^L@!gRPr3AGJBX;7*?Gkd~r$5U*i zEJ=DkW+mOVBvu^%gh#;nW|#QZG)!+y(rqj%d+Ji_Vwwr6W?NB+*SI>9+rJHcLhftI zE``xEgQ^jlWd$IAo-;Wt7sa(Qb`&6U({8N zZYi|wt!)do>hhz;QRzLGADN3){r<&xxZmDawj9a^G~eg;BZXL&AMn78O=><43M- zL)Z<*>DbZE+`VgrRE(ip7PhiY^-aU^2Xn-x5ATTiZP0nz=8gu3!U=Xnz?llEqw2)3 zgs-lW8g{Bqx)ubV2&6F09JAPAY9=bG=#7A8 zpn8~{Y2Z_}N+;F^O`20(n1|MPxhbkFv2wo^^D-x#m)Cs%@^y&gn{HlLc#yTFhji6C zJLEU6OjQU?k#8u|)GTN+@S5ULw+DMSG5fNlzG0Y*&rDf4?}Lpi)NgT|ZM%yNxYJui9hx7My%jF7CE<|z_|$Up%`9Jdm+>0^va zH`{_6)%%OU7X;mMTi2YL!~MqF)^0HmbLHxWn_^3SEMFLrh{AECSPEh z5Ey){JAmZurWsXP?l>|nlek=dyyZ&>#M_wE8qktB;W1f%xydN?@Iz@YPd5rI=@O-W zjSz;yFeFE|rW1^`ltRp$Y1vJC@iwd50tgo~pGuqxx5$*4N|F~EZy}e{KG>y#i-tjk zEU#N-BG+NIylyOhzNeX>%$V;kmL}^cJmC$(+A#l_e&?LE1%r+EsPl>!;aWTQH#EA@ zGkz`}v^Ro&9{MW@Ud}CdS5%3HmixUO2&c97EyANxN}MxoIEiJCG#6>A(ij_mz6S$+ zlSZgU>2|3BrB?C?)`@)(y19z3jK(o(;VZ=5TLOy=IElMCQ(jHl(wM40=cqNNtjT_s z$p8~t_fZ^vzU4oG z?2mdUlHm{eGq))KPMLKQU5r>JtH$7#zi8{pbWUMN4nG^&b(=k5Urtg2RJf9?^4k04 zau}-GNm(zE$Dmq7FXe@4r_#7aau*WE5u&7wI_g#14w^doTcHvJFgs=raS$^0F}m== zTrn@mh6$gNtL@qKD^>W`yM;n7D+dCVC>gZ!KQ}lc=hAe1FiT^0YMd2(#OI>G;4m%g z?j>E0ifWaF8@MV3u1t!?GRkdsd37TNw(vmrDJ89giwwG94HCuoHeI>Zsix+d_IN}W zDcim~K^KS5v)O`pquO{8X-+6&I>CM6M{QON)UiF5Eci58uFv`ioF#4@T5dZwY@_32 z%y9JmJ*Wa=4pN+>XX9u@W5yi+g0jBk&?KqMX-8W$Br=Y(W0kcrb&pUL;S?SXO$%r| zvbwT8P3mj8;DbAPi2F;#E4Ep)D9uHR-k08KGXYYa$m{%}5!vBG7OOsVqh{~MV3G~& zRwnkOuU-t**|4u3ovAm~4QNQP=e5ReZ@|ajls2pIGjLI|3h~NP+9v{5duKb%TDE=- zFu(#7WXgUHSCP$kpC=&mntIDzLPV;mt{$d5?yJA1F~p5(I_j&pTyKBeSKIAS1*7Bn zAP7d%TLnWf4A1@o_;a*ort(V?VV_qZ^qF_g^RnvH{lYoV>qNRRjo0Vr=juH+`VN;8 z#wG6Zp;b-s1odCBQRTR5yYMz0Vx|-^olK4flj%h8oP1EPAZSMK=P$#-#gILaGf)8F za=n2+f4_MB%!bQhl?HoUhlw| zeeSFCp}G`Aegc~t;kMGVp?dU+D>~%nr7%hPsx9v3*Ksp*sO=x&>TQ6DpTEHr_C$5x zgc-2P@!_;5TyuCh2o8^BEe?VALAx%mfv?sy_yrjE9n#^^xh*WQ=Y*@?yj&Ua$l)0? z>SWTN_SM-IoEH?j5f3|^9uKB79CrUz+kByxxmmCEq|i-D``>(+YE_A2R>>b{)tq87 z9E3TYvRTP3rNqmvdzF&eO~Ttj(C13&6csx=cqcF3NlXqh*C$Z0go&4e@2R8?%tO3= z(8=K7-ICZt*r3Bv!!_A++ExVHv7qaPZJG~{>JmQzakr6_@o@Hncl_v{%#AiPkWN?8n1UdQ8 zcN3O?r++~@;q6|S&bIftio;}Gq6M&dAA4{)bPnyOs3_McJ{Q<(r&`+W3HA_VN#K95-`ln;$kMi5;e-0fS8t{mWAmK z#9!9;-8G2RPhs&H*@K%NHLux&W%vuM;NT)Hx1!~U8lVTL5QnrJMA~U#dXRHI&|JUj z_?b4@jO{}wV$Sl%+eU)poj(u5+|<> 4); + const FCHECK = @truncate(u5, header[1]); + const FDICT = @truncate(u1, header[1] >> 5); + + if ((@as(u16, header[0]) << 8 | header[1]) % 31 != 0) + return error.BadHeader; + + // The CM field must be 8 to indicate the use of DEFLATE + if (CM != 8) return error.InvalidCompression; + // CINFO is the base-2 logarithm of the window size, minus 8. + // Values above 7 are unspecified and therefore rejected. + if (CINFO > 7) return error.InvalidWindowSize; + const window_size: u16 = @as(u16, 1) << (CINFO + 8); + + // TODO: Support this case + if (FDICT != 0) + return error.Unsupported; + + var window_slice = try allocator.alloc(u8, window_size); + + return Self{ + .allocator = allocator, + .inflater = deflate.inflateStream(source, window_slice), + .in_reader = source, + .hasher = std.hash.Adler32.init(), + .window_slice = window_slice, + }; + } + + fn deinit(self: *Self) void { + self.allocator.free(self.window_slice); + } + + // Implements the io.Reader interface + pub fn read(self: *Self, buffer: []u8) Error!usize { + if (buffer.len == 0) + return 0; + + // Read from the compressed stream and update the computed checksum + const r = try self.inflater.read(buffer); + if (r != 0) { + self.hasher.update(buffer[0..r]); + return r; + } + + // We've reached the end of stream, check if the checksum matches + const hash = try self.in_reader.readIntBig(u32); + if (hash != self.hasher.final()) + return error.WrongChecksum; + + return 0; + } + + pub fn reader(self: *Self) Reader { + return .{ .context = self }; + } + }; +} + +pub fn zlibStream(allocator: *mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) { + return ZlibStream(@TypeOf(reader)).init(allocator, reader); +} + +fn testReader(data: []const u8, comptime expected: []const u8) !void { + var in_stream = io.fixedBufferStream(data); + + var zlib_stream = try zlibStream(testing.allocator, in_stream.reader()); + defer zlib_stream.deinit(); + + // Read and decompress the whole file + const buf = try zlib_stream.reader().readAllAlloc(testing.allocator, std.math.maxInt(usize)); + defer testing.allocator.free(buf); + // Calculate its SHA256 hash and check it against the reference + var hash: [32]u8 = undefined; + std.crypto.hash.sha2.Sha256.hash(buf, hash[0..], .{}); + + assertEqual(expected, &hash); +} + +// Assert `expected` == `input` where `input` is a bytestring. +pub fn assertEqual(comptime expected: []const u8, input: []const u8) void { + var expected_bytes: [expected.len / 2]u8 = undefined; + for (expected_bytes) |*r, i| { + r.* = std.fmt.parseInt(u8, expected[2 * i .. 2 * i + 2], 16) catch unreachable; + } + + testing.expectEqualSlices(u8, &expected_bytes, input); +} + +// All the test cases are obtained by compressing the RFC1950 text +// +// https://tools.ietf.org/rfc/rfc1950.txt length=36944 bytes +// SHA256=5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009 +test "compressed data" { + // Compressed with compression level = 0 + try testReader( + @embedFile("rfc1951.txt.z.0"), + "5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009", + ); + // Compressed with compression level = 9 + try testReader( + @embedFile("rfc1951.txt.z.9"), + "5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009", + ); + // Compressed with compression level = 9 and fixed Huffman codes + try testReader( + @embedFile("rfc1951.txt.fixed.z.9"), + "5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009", + ); +} + +test "sanity checks" { + // Truncated header + testing.expectError( + error.EndOfStream, + testReader(&[_]u8{0x78}, ""), + ); + // Failed FCHECK check + testing.expectError( + error.BadHeader, + testReader(&[_]u8{ 0x78, 0x9D }, ""), + ); + // Wrong CM + testing.expectError( + error.InvalidCompression, + testReader(&[_]u8{ 0x79, 0x94 }, ""), + ); + // Wrong CINFO + testing.expectError( + error.InvalidWindowSize, + testReader(&[_]u8{ 0x88, 0x98 }, ""), + ); + // Wrong checksum + testing.expectError( + error.WrongChecksum, + testReader(&[_]u8{ 0x78, 0xda, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00 }, ""), + ); + // Truncated checksum + testing.expectError( + error.EndOfStream, + testReader(&[_]u8{ 0x78, 0xda, 0x03, 0x00, 0x00 }, ""), + ); +} diff --git a/lib/std/std.zig b/lib/std/std.zig index 330f3c253b..4236b29298 100644 --- a/lib/std/std.zig +++ b/lib/std/std.zig @@ -50,6 +50,7 @@ pub const builtin = @import("builtin.zig"); pub const c = @import("c.zig"); pub const cache_hash = @import("cache_hash.zig"); pub const coff = @import("coff.zig"); +pub const compress = @import("compress.zig"); pub const crypto = @import("crypto.zig"); pub const cstr = @import("cstr.zig"); pub const debug = @import("debug.zig"); From c15f39212e32c612cf51f647868be2bdd024d0de Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 7 Sep 2020 15:15:48 -0700 Subject: [PATCH 56/56] build.zig: ignore the compression test files --- build.zig | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/build.zig b/build.zig index 8ac2d4f8ba..3f7f1a9038 100644 --- a/build.zig +++ b/build.zig @@ -123,7 +123,13 @@ pub fn build(b: *Builder) !void { .source_dir = "lib", .install_dir = .Lib, .install_subdir = "zig", - .exclude_extensions = &[_][]const u8{ "test.zig", "README.md" }, + .exclude_extensions = &[_][]const u8{ + "test.zig", + "README.md", + ".z.0", + ".z.9", + "rfc1951.txt", + }, }); const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");