mirror of
https://github.com/ziglang/zig.git
synced 2026-02-08 07:27:18 +00:00
std.zig: finish updating to new I/O API
This commit is contained in:
parent
69cf40da60
commit
ec5c1fac63
@ -10,6 +10,9 @@ const assert = std.debug.assert;
|
||||
const fatal = std.process.fatal;
|
||||
const Server = std.zig.Server;
|
||||
|
||||
var stdin_buffer: [1024]u8 = undefined;
|
||||
var stdout_buffer: [1024]u8 = undefined;
|
||||
|
||||
pub fn main() !void {
|
||||
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena_instance.deinit();
|
||||
@ -22,11 +25,8 @@ pub fn main() !void {
|
||||
return cmdObjCopy(gpa, arena, args[1..]);
|
||||
}
|
||||
|
||||
fn cmdObjCopy(
|
||||
gpa: Allocator,
|
||||
arena: Allocator,
|
||||
args: []const []const u8,
|
||||
) !void {
|
||||
fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
|
||||
_ = gpa;
|
||||
var i: usize = 0;
|
||||
var opt_out_fmt: ?std.Target.ObjectFormat = null;
|
||||
var opt_input: ?[]const u8 = null;
|
||||
@ -225,13 +225,13 @@ fn cmdObjCopy(
|
||||
}
|
||||
|
||||
if (listen) {
|
||||
var stdin_reader = fs.File.stdin().reader(&stdin_buffer);
|
||||
var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
|
||||
var server = try Server.init(.{
|
||||
.gpa = gpa,
|
||||
.in = .stdin(),
|
||||
.out = .stdout(),
|
||||
.in = &stdin_reader.interface,
|
||||
.out = &stdout_writer.interface,
|
||||
.zig_version = builtin.zig_version_string,
|
||||
});
|
||||
defer server.deinit();
|
||||
|
||||
var seen_update = false;
|
||||
while (true) {
|
||||
|
||||
@ -13,6 +13,8 @@ const hasDisjointCodePage = @import("disjoint_code_page.zig").hasDisjointCodePag
|
||||
const fmtResourceType = @import("res.zig").NameOrOrdinal.fmtResourceType;
|
||||
const aro = @import("aro");
|
||||
|
||||
var stdout_buffer: [1024]u8 = undefined;
|
||||
|
||||
pub fn main() !void {
|
||||
var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
defer std.debug.assert(gpa.deinit() == .ok);
|
||||
@ -41,12 +43,12 @@ pub fn main() !void {
|
||||
cli_args = args[3..];
|
||||
}
|
||||
|
||||
var stdout_writer2 = std.fs.File.stdout().writer(&stdout_buffer);
|
||||
var error_handler: ErrorHandler = switch (zig_integration) {
|
||||
true => .{
|
||||
.server = .{
|
||||
.out = std.fs.File.stdout(),
|
||||
.out = &stdout_writer2.interface,
|
||||
.in = undefined, // won't be receiving messages
|
||||
.receive_fifo = undefined, // won't be receiving messages
|
||||
},
|
||||
},
|
||||
false => .{
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
const builtin = @import("builtin");
|
||||
|
||||
const std = @import("std");
|
||||
const io = std.io;
|
||||
const testing = std.testing;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
@ -13,6 +12,8 @@ pub const std_options: std.Options = .{
|
||||
var log_err_count: usize = 0;
|
||||
var fba_buffer: [8192]u8 = undefined;
|
||||
var fba = std.heap.FixedBufferAllocator.init(&fba_buffer);
|
||||
var stdin_buffer: [std.heap.page_size_min]u8 align(std.heap.page_size_min) = undefined;
|
||||
var stdout_buffer: [std.heap.page_size_min]u8 align(std.heap.page_size_min) = undefined;
|
||||
|
||||
const crippled = switch (builtin.zig_backend) {
|
||||
.stage2_powerpc,
|
||||
@ -67,13 +68,13 @@ pub fn main() void {
|
||||
|
||||
fn mainServer() !void {
|
||||
@disableInstrumentation();
|
||||
var stdin_reader = std.fs.File.stdin().reader(&stdin_buffer);
|
||||
var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
|
||||
var server = try std.zig.Server.init(.{
|
||||
.gpa = fba.allocator(),
|
||||
.in = .stdin(),
|
||||
.out = .stdout(),
|
||||
.in = &stdin_reader.interface,
|
||||
.out = &stdout_writer.interface,
|
||||
.zig_version = builtin.zig_version_string,
|
||||
});
|
||||
defer server.deinit();
|
||||
|
||||
if (builtin.fuzz) {
|
||||
const coverage_id = fuzzer_coverage_id();
|
||||
|
||||
@ -905,4 +905,5 @@ test {
|
||||
_ = system;
|
||||
_ = target;
|
||||
_ = c_translation;
|
||||
_ = llvm;
|
||||
}
|
||||
|
||||
@ -370,7 +370,7 @@ fn findNativeIncludeDirWindows(
|
||||
|
||||
for (installs) |install| {
|
||||
result_buf.shrinkAndFree(0);
|
||||
try result_buf.writer().print("{s}\\Include\\{s}\\ucrt", .{ install.path, install.version });
|
||||
try result_buf.print("{s}\\Include\\{s}\\ucrt", .{ install.path, install.version });
|
||||
|
||||
var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound,
|
||||
@ -417,7 +417,7 @@ fn findNativeCrtDirWindows(
|
||||
|
||||
for (installs) |install| {
|
||||
result_buf.shrinkAndFree(0);
|
||||
try result_buf.writer().print("{s}\\Lib\\{s}\\ucrt\\{s}", .{ install.path, install.version, arch_sub_dir });
|
||||
try result_buf.print("{s}\\Lib\\{s}\\ucrt\\{s}", .{ install.path, install.version, arch_sub_dir });
|
||||
|
||||
var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound,
|
||||
@ -484,8 +484,7 @@ fn findNativeKernel32LibDir(
|
||||
|
||||
for (installs) |install| {
|
||||
result_buf.shrinkAndFree(0);
|
||||
const stream = result_buf.writer();
|
||||
try stream.print("{s}\\Lib\\{s}\\um\\{s}", .{ install.path, install.version, arch_sub_dir });
|
||||
try result_buf.print("{s}\\Lib\\{s}\\um\\{s}", .{ install.path, install.version, arch_sub_dir });
|
||||
|
||||
var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound,
|
||||
|
||||
@ -1,6 +1,20 @@
|
||||
in: std.fs.File,
|
||||
out: std.fs.File,
|
||||
receive_fifo: std.fifo.LinearFifo(u8, .Dynamic),
|
||||
const Server = @This();
|
||||
|
||||
const builtin = @import("builtin");
|
||||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const native_endian = builtin.target.cpu.arch.endian();
|
||||
const need_bswap = native_endian != .little;
|
||||
const Cache = std.Build.Cache;
|
||||
const OutMessage = std.zig.Server.Message;
|
||||
const InMessage = std.zig.Client.Message;
|
||||
const Reader = std.Io.Reader;
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
in: *Reader,
|
||||
out: *Writer,
|
||||
|
||||
pub const Message = struct {
|
||||
pub const Header = extern struct {
|
||||
@ -94,9 +108,8 @@ pub const Message = struct {
|
||||
};
|
||||
|
||||
pub const Options = struct {
|
||||
gpa: Allocator,
|
||||
in: std.fs.File,
|
||||
out: std.fs.File,
|
||||
in: *Reader,
|
||||
out: *Writer,
|
||||
zig_version: []const u8,
|
||||
};
|
||||
|
||||
@ -104,96 +117,40 @@ pub fn init(options: Options) !Server {
|
||||
var s: Server = .{
|
||||
.in = options.in,
|
||||
.out = options.out,
|
||||
.receive_fifo = std.fifo.LinearFifo(u8, .Dynamic).init(options.gpa),
|
||||
};
|
||||
try s.serveStringMessage(.zig_version, options.zig_version);
|
||||
return s;
|
||||
}
|
||||
|
||||
pub fn deinit(s: *Server) void {
|
||||
s.receive_fifo.deinit();
|
||||
s.* = undefined;
|
||||
}
|
||||
|
||||
pub fn receiveMessage(s: *Server) !InMessage.Header {
|
||||
const Header = InMessage.Header;
|
||||
const fifo = &s.receive_fifo;
|
||||
var last_amt_zero = false;
|
||||
|
||||
while (true) {
|
||||
const buf = fifo.readableSlice(0);
|
||||
assert(fifo.readableLength() == buf.len);
|
||||
if (buf.len >= @sizeOf(Header)) {
|
||||
const header: *align(1) const Header = @ptrCast(buf[0..@sizeOf(Header)]);
|
||||
const bytes_len = bswap(header.bytes_len);
|
||||
const tag = bswap(header.tag);
|
||||
|
||||
if (buf.len - @sizeOf(Header) >= bytes_len) {
|
||||
fifo.discard(@sizeOf(Header));
|
||||
return .{
|
||||
.tag = tag,
|
||||
.bytes_len = bytes_len,
|
||||
};
|
||||
} else {
|
||||
const needed = bytes_len - (buf.len - @sizeOf(Header));
|
||||
const write_buffer = try fifo.writableWithSize(needed);
|
||||
const amt = try s.in.read(write_buffer);
|
||||
fifo.update(amt);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
const write_buffer = try fifo.writableWithSize(256);
|
||||
const amt = try s.in.read(write_buffer);
|
||||
fifo.update(amt);
|
||||
if (amt == 0) {
|
||||
if (last_amt_zero) return error.BrokenPipe;
|
||||
last_amt_zero = true;
|
||||
}
|
||||
}
|
||||
return s.in.takeStruct(InMessage.Header, .little);
|
||||
}
|
||||
|
||||
pub fn receiveBody_u32(s: *Server) !u32 {
|
||||
const fifo = &s.receive_fifo;
|
||||
const buf = fifo.readableSlice(0);
|
||||
const result = @as(*align(1) const u32, @ptrCast(buf[0..4])).*;
|
||||
fifo.discard(4);
|
||||
return bswap(result);
|
||||
return s.in.takeInt(u32, .little);
|
||||
}
|
||||
|
||||
pub fn serveStringMessage(s: *Server, tag: OutMessage.Tag, msg: []const u8) !void {
|
||||
return s.serveMessage(.{
|
||||
try s.serveMessageHeader(.{
|
||||
.tag = tag,
|
||||
.bytes_len = @as(u32, @intCast(msg.len)),
|
||||
}, &.{msg});
|
||||
.bytes_len = @intCast(msg.len),
|
||||
});
|
||||
try s.out.writeAll(msg);
|
||||
try s.out.flush();
|
||||
}
|
||||
|
||||
pub fn serveMessage(
|
||||
s: *const Server,
|
||||
header: OutMessage.Header,
|
||||
bufs: []const []const u8,
|
||||
) !void {
|
||||
var iovecs: [10]std.posix.iovec_const = undefined;
|
||||
const header_le = bswap(header);
|
||||
iovecs[0] = .{
|
||||
.base = @as([*]const u8, @ptrCast(&header_le)),
|
||||
.len = @sizeOf(OutMessage.Header),
|
||||
};
|
||||
for (bufs, iovecs[1 .. bufs.len + 1]) |buf, *iovec| {
|
||||
iovec.* = .{
|
||||
.base = buf.ptr,
|
||||
.len = buf.len,
|
||||
};
|
||||
}
|
||||
try s.out.writevAll(iovecs[0 .. bufs.len + 1]);
|
||||
/// Don't forget to flush!
|
||||
pub fn serveMessageHeader(s: *const Server, header: OutMessage.Header) !void {
|
||||
try s.out.writeStruct(header, .little);
|
||||
}
|
||||
|
||||
pub fn serveU64Message(s: *Server, tag: OutMessage.Tag, int: u64) !void {
|
||||
const msg_le = bswap(int);
|
||||
return s.serveMessage(.{
|
||||
pub fn serveU64Message(s: *const Server, tag: OutMessage.Tag, int: u64) !void {
|
||||
try serveMessageHeader(s, .{
|
||||
.tag = tag,
|
||||
.bytes_len = @sizeOf(u64),
|
||||
}, &.{std.mem.asBytes(&msg_le)});
|
||||
});
|
||||
try s.out.writeInt(u64, int, .little);
|
||||
try s.out.flush();
|
||||
}
|
||||
|
||||
pub fn serveEmitDigest(
|
||||
@ -201,26 +158,22 @@ pub fn serveEmitDigest(
|
||||
digest: *const [Cache.bin_digest_len]u8,
|
||||
header: OutMessage.EmitDigest,
|
||||
) !void {
|
||||
try s.serveMessage(.{
|
||||
try s.serveMessageHeader(.{
|
||||
.tag = .emit_digest,
|
||||
.bytes_len = @intCast(digest.len + @sizeOf(OutMessage.EmitDigest)),
|
||||
}, &.{
|
||||
std.mem.asBytes(&header),
|
||||
digest,
|
||||
});
|
||||
try s.out.writeStruct(header, .little);
|
||||
try s.out.writeAll(digest);
|
||||
try s.out.flush();
|
||||
}
|
||||
|
||||
pub fn serveTestResults(
|
||||
s: *Server,
|
||||
msg: OutMessage.TestResults,
|
||||
) !void {
|
||||
const msg_le = bswap(msg);
|
||||
try s.serveMessage(.{
|
||||
pub fn serveTestResults(s: *Server, msg: OutMessage.TestResults) !void {
|
||||
try s.serveMessageHeader(.{
|
||||
.tag = .test_results,
|
||||
.bytes_len = @intCast(@sizeOf(OutMessage.TestResults)),
|
||||
}, &.{
|
||||
std.mem.asBytes(&msg_le),
|
||||
});
|
||||
try s.out.writeStruct(msg, .little);
|
||||
try s.out.flush();
|
||||
}
|
||||
|
||||
pub fn serveErrorBundle(s: *Server, error_bundle: std.zig.ErrorBundle) !void {
|
||||
@ -230,91 +183,38 @@ pub fn serveErrorBundle(s: *Server, error_bundle: std.zig.ErrorBundle) !void {
|
||||
};
|
||||
const bytes_len = @sizeOf(OutMessage.ErrorBundle) +
|
||||
4 * error_bundle.extra.len + error_bundle.string_bytes.len;
|
||||
try s.serveMessage(.{
|
||||
try s.serveMessageHeader(.{
|
||||
.tag = .error_bundle,
|
||||
.bytes_len = @intCast(bytes_len),
|
||||
}, &.{
|
||||
std.mem.asBytes(&eb_hdr),
|
||||
// TODO: implement @ptrCast between slices changing the length
|
||||
std.mem.sliceAsBytes(error_bundle.extra),
|
||||
error_bundle.string_bytes,
|
||||
});
|
||||
try s.out.writeStruct(eb_hdr, .little);
|
||||
try s.out.writeSliceEndian(u32, error_bundle.extra, .little);
|
||||
try s.out.writeAll(error_bundle.string_bytes);
|
||||
try s.out.flush();
|
||||
}
|
||||
|
||||
pub const TestMetadata = struct {
|
||||
names: []u32,
|
||||
expected_panic_msgs: []u32,
|
||||
names: []const u32,
|
||||
expected_panic_msgs: []const u32,
|
||||
string_bytes: []const u8,
|
||||
};
|
||||
|
||||
pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void {
|
||||
const header: OutMessage.TestMetadata = .{
|
||||
.tests_len = bswap(@as(u32, @intCast(test_metadata.names.len))),
|
||||
.string_bytes_len = bswap(@as(u32, @intCast(test_metadata.string_bytes.len))),
|
||||
.tests_len = @as(u32, @intCast(test_metadata.names.len)),
|
||||
.string_bytes_len = @as(u32, @intCast(test_metadata.string_bytes.len)),
|
||||
};
|
||||
const trailing = 2;
|
||||
const bytes_len = @sizeOf(OutMessage.TestMetadata) +
|
||||
trailing * @sizeOf(u32) * test_metadata.names.len + test_metadata.string_bytes.len;
|
||||
|
||||
if (need_bswap) {
|
||||
bswap_u32_array(test_metadata.names);
|
||||
bswap_u32_array(test_metadata.expected_panic_msgs);
|
||||
}
|
||||
defer if (need_bswap) {
|
||||
bswap_u32_array(test_metadata.names);
|
||||
bswap_u32_array(test_metadata.expected_panic_msgs);
|
||||
};
|
||||
|
||||
return s.serveMessage(.{
|
||||
try s.serveMessageHeader(.{
|
||||
.tag = .test_metadata,
|
||||
.bytes_len = @intCast(bytes_len),
|
||||
}, &.{
|
||||
std.mem.asBytes(&header),
|
||||
// TODO: implement @ptrCast between slices changing the length
|
||||
std.mem.sliceAsBytes(test_metadata.names),
|
||||
std.mem.sliceAsBytes(test_metadata.expected_panic_msgs),
|
||||
test_metadata.string_bytes,
|
||||
});
|
||||
try s.out.writeStruct(header, .little);
|
||||
try s.out.writeSliceEndian(u32, test_metadata.names, .little);
|
||||
try s.out.writeSliceEndian(u32, test_metadata.expected_panic_msgs, .little);
|
||||
try s.out.writeAll(test_metadata.string_bytes);
|
||||
try s.out.flush();
|
||||
}
|
||||
|
||||
fn bswap(x: anytype) @TypeOf(x) {
|
||||
if (!need_bswap) return x;
|
||||
|
||||
const T = @TypeOf(x);
|
||||
switch (@typeInfo(T)) {
|
||||
.@"enum" => return @as(T, @enumFromInt(@byteSwap(@intFromEnum(x)))),
|
||||
.int => return @byteSwap(x),
|
||||
.@"struct" => |info| switch (info.layout) {
|
||||
.@"extern" => {
|
||||
var result: T = undefined;
|
||||
inline for (info.fields) |field| {
|
||||
@field(result, field.name) = bswap(@field(x, field.name));
|
||||
}
|
||||
return result;
|
||||
},
|
||||
.@"packed" => {
|
||||
const I = info.backing_integer.?;
|
||||
return @as(T, @bitCast(@byteSwap(@as(I, @bitCast(x)))));
|
||||
},
|
||||
.auto => @compileError("auto layout struct"),
|
||||
},
|
||||
else => @compileError("bswap on type " ++ @typeName(T)),
|
||||
}
|
||||
}
|
||||
|
||||
fn bswap_u32_array(slice: []u32) void {
|
||||
comptime assert(need_bswap);
|
||||
for (slice) |*elem| elem.* = @byteSwap(elem.*);
|
||||
}
|
||||
|
||||
const OutMessage = std.zig.Server.Message;
|
||||
const InMessage = std.zig.Client.Message;
|
||||
|
||||
const Server = @This();
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const native_endian = builtin.target.cpu.arch.endian();
|
||||
const need_bswap = native_endian != .little;
|
||||
const Cache = std.Build.Cache;
|
||||
|
||||
@ -1,11 +1,12 @@
|
||||
const WindowsSdk = @This();
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std");
|
||||
const Writer = std.Io.Writer;
|
||||
|
||||
windows10sdk: ?Installation,
|
||||
windows81sdk: ?Installation,
|
||||
msvc_lib_dir: ?[]const u8,
|
||||
|
||||
const WindowsSdk = @This();
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
const windows = std.os.windows;
|
||||
const RRF = windows.advapi32.RRF;
|
||||
|
||||
@ -759,14 +760,13 @@ const MsvcLibDir = struct {
|
||||
while (instances_dir_it.next() catch return error.PathNotFound) |entry| {
|
||||
if (entry.kind != .directory) continue;
|
||||
|
||||
var fbs = std.io.fixedBufferStream(&state_subpath_buf);
|
||||
const writer = fbs.writer();
|
||||
var writer: Writer = .fixed(&state_subpath_buf);
|
||||
|
||||
writer.writeAll(entry.name) catch unreachable;
|
||||
writer.writeByte(std.fs.path.sep) catch unreachable;
|
||||
writer.writeAll("state.json") catch unreachable;
|
||||
|
||||
const json_contents = instances_dir.readFileAlloc(allocator, fbs.getWritten(), std.math.maxInt(usize)) catch continue;
|
||||
const json_contents = instances_dir.readFileAlloc(allocator, writer.buffered(), std.math.maxInt(usize)) catch continue;
|
||||
defer allocator.free(json_contents);
|
||||
|
||||
var parsed = std.json.parseFromSlice(std.json.Value, allocator, json_contents, .{}) catch continue;
|
||||
|
||||
@ -1,3 +1,9 @@
|
||||
pub const BitcodeReader = @import("llvm/BitcodeReader.zig");
|
||||
pub const bitcode_writer = @import("llvm/bitcode_writer.zig");
|
||||
pub const Builder = @import("llvm/Builder.zig");
|
||||
|
||||
test {
|
||||
_ = BitcodeReader;
|
||||
_ = bitcode_writer;
|
||||
_ = Builder;
|
||||
}
|
||||
|
||||
@ -1,6 +1,11 @@
|
||||
const BitcodeReader = @This();
|
||||
|
||||
const std = @import("../../std.zig");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
allocator: std.mem.Allocator,
|
||||
record_arena: std.heap.ArenaAllocator.State,
|
||||
reader: std.io.AnyReader,
|
||||
reader: *std.Io.Reader,
|
||||
keep_names: bool,
|
||||
bit_buffer: u32,
|
||||
bit_offset: u5,
|
||||
@ -93,7 +98,7 @@ pub const Record = struct {
|
||||
};
|
||||
|
||||
pub const InitOptions = struct {
|
||||
reader: std.io.AnyReader,
|
||||
reader: *std.Io.Reader,
|
||||
keep_names: bool = false,
|
||||
};
|
||||
pub fn init(allocator: std.mem.Allocator, options: InitOptions) BitcodeReader {
|
||||
@ -172,7 +177,7 @@ pub fn next(bc: *BitcodeReader) !?Item {
|
||||
|
||||
pub fn skipBlock(bc: *BitcodeReader, block: Block) !void {
|
||||
assert(bc.bit_offset == 0);
|
||||
try bc.reader.skipBytes(@as(u34, block.len) * 4, .{});
|
||||
try bc.reader.discardAll(4 * @as(u34, block.len));
|
||||
try bc.endBlock();
|
||||
}
|
||||
|
||||
@ -371,19 +376,19 @@ fn align32Bits(bc: *BitcodeReader) void {
|
||||
|
||||
fn read32Bits(bc: *BitcodeReader) !u32 {
|
||||
assert(bc.bit_offset == 0);
|
||||
return bc.reader.readInt(u32, .little);
|
||||
return bc.reader.takeInt(u32, .little);
|
||||
}
|
||||
|
||||
fn readBytes(bc: *BitcodeReader, bytes: []u8) !void {
|
||||
assert(bc.bit_offset == 0);
|
||||
try bc.reader.readNoEof(bytes);
|
||||
try bc.reader.readSliceAll(bytes);
|
||||
|
||||
const trailing_bytes = bytes.len % 4;
|
||||
if (trailing_bytes > 0) {
|
||||
var bit_buffer = [1]u8{0} ** 4;
|
||||
try bc.reader.readNoEof(bit_buffer[trailing_bytes..]);
|
||||
var bit_buffer: [4]u8 = @splat(0);
|
||||
try bc.reader.readSliceAll(bit_buffer[trailing_bytes..]);
|
||||
bc.bit_buffer = std.mem.readInt(u32, &bit_buffer, .little);
|
||||
bc.bit_offset = @intCast(trailing_bytes * 8);
|
||||
bc.bit_offset = @intCast(8 * trailing_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
@ -509,7 +514,6 @@ const Abbrev = struct {
|
||||
};
|
||||
};
|
||||
|
||||
const assert = std.debug.assert;
|
||||
const std = @import("../../std.zig");
|
||||
|
||||
const BitcodeReader = @This();
|
||||
test {
|
||||
_ = &skipBlock;
|
||||
}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const Tokenizer = std.zig.Tokenizer;
|
||||
const io = std.io;
|
||||
const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
|
||||
|
||||
const source = @embedFile("../os.zig");
|
||||
@ -22,16 +21,15 @@ pub fn main() !void {
|
||||
const bytes_per_sec_float = @as(f64, @floatFromInt(source.len * iterations)) / elapsed_s;
|
||||
const bytes_per_sec = @as(u64, @intFromFloat(@floor(bytes_per_sec_float)));
|
||||
|
||||
var stdout_file: std.fs.File = .stdout();
|
||||
const stdout = stdout_file.deprecatedWriter();
|
||||
try stdout.print("parsing speed: {:.2}/s, {:.2} used \n", .{
|
||||
fmtIntSizeBin(bytes_per_sec),
|
||||
fmtIntSizeBin(memory_used),
|
||||
});
|
||||
var stdout_buffer: [1024]u8 = undefined;
|
||||
var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
|
||||
const stdout = &stdout_writer.interface;
|
||||
try stdout.print("parsing speed: {Bi:.2}/s, {Bi:.2} used \n", .{ bytes_per_sec, memory_used });
|
||||
try stdout.flush();
|
||||
}
|
||||
|
||||
fn testOnce() usize {
|
||||
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
|
||||
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(&fixed_buffer_mem);
|
||||
const allocator = fixed_buf_alloc.allocator();
|
||||
_ = std.zig.Ast.parse(allocator, source, .zig) catch @panic("parse failure");
|
||||
return fixed_buf_alloc.end_index;
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const mem = std.mem;
|
||||
const io = std.io;
|
||||
const fs = std.fs;
|
||||
const fmt = std.fmt;
|
||||
const testing = std.testing;
|
||||
@ -344,8 +343,8 @@ fn testParser(
|
||||
expected_model: *const Target.Cpu.Model,
|
||||
input: []const u8,
|
||||
) !void {
|
||||
var fbs = io.fixedBufferStream(input);
|
||||
const result = try parser.parse(arch, fbs.reader());
|
||||
var r: std.Io.Reader = .fixed(input);
|
||||
const result = try parser.parse(arch, &r);
|
||||
try testing.expectEqual(expected_model, result.?.model);
|
||||
try testing.expect(expected_model.features.eql(result.?.features));
|
||||
}
|
||||
@ -357,20 +356,17 @@ fn testParser(
|
||||
// When all the lines have been analyzed the finalize method is called.
|
||||
fn CpuinfoParser(comptime impl: anytype) type {
|
||||
return struct {
|
||||
fn parse(arch: Target.Cpu.Arch, reader: anytype) anyerror!?Target.Cpu {
|
||||
var line_buf: [1024]u8 = undefined;
|
||||
fn parse(arch: Target.Cpu.Arch, reader: *std.Io.Reader) !?Target.Cpu {
|
||||
var obj: impl = .{};
|
||||
|
||||
while (true) {
|
||||
const line = (try reader.readUntilDelimiterOrEof(&line_buf, '\n')) orelse break;
|
||||
while (reader.takeDelimiterExclusive('\n')) |line| {
|
||||
const colon_pos = mem.indexOfScalar(u8, line, ':') orelse continue;
|
||||
const key = mem.trimEnd(u8, line[0..colon_pos], " \t");
|
||||
const value = mem.trimStart(u8, line[colon_pos + 1 ..], " \t");
|
||||
|
||||
if (!try obj.line_hook(key, value))
|
||||
break;
|
||||
if (!try obj.line_hook(key, value)) break;
|
||||
} else |err| switch (err) {
|
||||
error.EndOfStream => {},
|
||||
else => |e| return e,
|
||||
}
|
||||
|
||||
return obj.finalize(arch);
|
||||
}
|
||||
};
|
||||
@ -383,15 +379,18 @@ inline fn getAArch64CpuFeature(comptime feat_reg: []const u8) u64 {
|
||||
}
|
||||
|
||||
pub fn detectNativeCpuAndFeatures() ?Target.Cpu {
|
||||
var f = fs.openFileAbsolute("/proc/cpuinfo", .{}) catch |err| switch (err) {
|
||||
var file = fs.openFileAbsolute("/proc/cpuinfo", .{}) catch |err| switch (err) {
|
||||
else => return null,
|
||||
};
|
||||
defer f.close();
|
||||
defer file.close();
|
||||
|
||||
var buffer: [4096]u8 = undefined; // "flags" lines can get pretty long.
|
||||
var file_reader = file.reader(&buffer);
|
||||
|
||||
const current_arch = builtin.cpu.arch;
|
||||
switch (current_arch) {
|
||||
.arm, .armeb, .thumb, .thumbeb => {
|
||||
return ArmCpuinfoParser.parse(current_arch, f.deprecatedReader()) catch null;
|
||||
return ArmCpuinfoParser.parse(current_arch, &file_reader.interface) catch null;
|
||||
},
|
||||
.aarch64, .aarch64_be => {
|
||||
const registers = [12]u64{
|
||||
@ -413,13 +412,13 @@ pub fn detectNativeCpuAndFeatures() ?Target.Cpu {
|
||||
return core;
|
||||
},
|
||||
.sparc64 => {
|
||||
return SparcCpuinfoParser.parse(current_arch, f.deprecatedReader()) catch null;
|
||||
return SparcCpuinfoParser.parse(current_arch, &file_reader.interface) catch null;
|
||||
},
|
||||
.powerpc, .powerpcle, .powerpc64, .powerpc64le => {
|
||||
return PowerpcCpuinfoParser.parse(current_arch, f.deprecatedReader()) catch null;
|
||||
return PowerpcCpuinfoParser.parse(current_arch, &file_reader.interface) catch null;
|
||||
},
|
||||
.riscv64, .riscv32 => {
|
||||
return RiscvCpuinfoParser.parse(current_arch, f.deprecatedReader()) catch null;
|
||||
return RiscvCpuinfoParser.parse(current_arch, &file_reader.interface) catch null;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
@ -12,6 +12,7 @@ const ThreadPool = std.Thread.Pool;
|
||||
const WaitGroup = std.Thread.WaitGroup;
|
||||
const ErrorBundle = std.zig.ErrorBundle;
|
||||
const fatal = std.process.fatal;
|
||||
const Writer = std.io.Writer;
|
||||
|
||||
const Value = @import("Value.zig");
|
||||
const Type = @import("Type.zig");
|
||||
@ -44,6 +45,8 @@ const Builtin = @import("Builtin.zig");
|
||||
const LlvmObject = @import("codegen/llvm.zig").Object;
|
||||
const dev = @import("dev.zig");
|
||||
|
||||
const DeprecatedLinearFifo = @import("deprecated.zig").LinearFifo;
|
||||
|
||||
pub const Config = @import("Compilation/Config.zig");
|
||||
|
||||
/// General-purpose allocator. Used for both temporary and long-term storage.
|
||||
@ -121,15 +124,15 @@ work_queues: [
|
||||
}
|
||||
break :len len;
|
||||
}
|
||||
]std.fifo.LinearFifo(Job, .Dynamic),
|
||||
]DeprecatedLinearFifo(Job),
|
||||
|
||||
/// These jobs are to invoke the Clang compiler to create an object file, which
|
||||
/// gets linked with the Compilation.
|
||||
c_object_work_queue: std.fifo.LinearFifo(*CObject, .Dynamic),
|
||||
c_object_work_queue: DeprecatedLinearFifo(*CObject),
|
||||
|
||||
/// These jobs are to invoke the RC compiler to create a compiled resource file (.res), which
|
||||
/// gets linked with the Compilation.
|
||||
win32_resource_work_queue: if (dev.env.supports(.win32_resource)) std.fifo.LinearFifo(*Win32Resource, .Dynamic) else struct {
|
||||
win32_resource_work_queue: if (dev.env.supports(.win32_resource)) DeprecatedLinearFifo(*Win32Resource) else struct {
|
||||
pub fn ensureUnusedCapacity(_: @This(), _: u0) error{}!void {}
|
||||
pub fn readItem(_: @This()) ?noreturn {
|
||||
return null;
|
||||
@ -995,13 +998,13 @@ pub const CObject = struct {
|
||||
|
||||
const file = fs.cwd().openFile(file_name, .{}) catch break :source_line 0;
|
||||
defer file.close();
|
||||
file.seekTo(diag.src_loc.offset + 1 - diag.src_loc.column) catch break :source_line 0;
|
||||
|
||||
var line = std.ArrayList(u8).init(eb.gpa);
|
||||
defer line.deinit();
|
||||
file.deprecatedReader().readUntilDelimiterArrayList(&line, '\n', 1 << 10) catch break :source_line 0;
|
||||
|
||||
break :source_line try eb.addString(line.items);
|
||||
var buffer: [1024]u8 = undefined;
|
||||
var file_reader = file.reader(&buffer);
|
||||
file_reader.seekTo(diag.src_loc.offset + 1 - diag.src_loc.column) catch break :source_line 0;
|
||||
var aw: Writer.Allocating = .init(eb.gpa);
|
||||
defer aw.deinit();
|
||||
_ = file_reader.interface.streamDelimiterEnding(&aw.writer, '\n') catch break :source_line 0;
|
||||
break :source_line try eb.addString(aw.getWritten());
|
||||
};
|
||||
|
||||
return .{
|
||||
@ -1067,11 +1070,11 @@ pub const CObject = struct {
|
||||
}
|
||||
};
|
||||
|
||||
var buffer: [1024]u8 = undefined;
|
||||
const file = try fs.cwd().openFile(path, .{});
|
||||
defer file.close();
|
||||
var br = std.io.bufferedReader(file.deprecatedReader());
|
||||
const reader = br.reader();
|
||||
var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = reader.any() });
|
||||
var file_reader = file.reader(&buffer);
|
||||
var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = &file_reader.interface });
|
||||
defer bc.deinit();
|
||||
|
||||
var file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .empty;
|
||||
@ -1873,15 +1876,12 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
|
||||
if (options.verbose_llvm_cpu_features) {
|
||||
if (options.root_mod.resolved_target.llvm_cpu_features) |cf| print: {
|
||||
std.debug.lockStdErr();
|
||||
defer std.debug.unlockStdErr();
|
||||
const stderr = fs.File.stderr().deprecatedWriter();
|
||||
nosuspend {
|
||||
stderr.print("compilation: {s}\n", .{options.root_name}) catch break :print;
|
||||
stderr.print(" target: {s}\n", .{try target.zigTriple(arena)}) catch break :print;
|
||||
stderr.print(" cpu: {s}\n", .{target.cpu.model.name}) catch break :print;
|
||||
stderr.print(" features: {s}\n", .{cf}) catch {};
|
||||
}
|
||||
const stderr_w = std.debug.lockStderrWriter(&.{});
|
||||
defer std.debug.unlockStderrWriter();
|
||||
stderr_w.print("compilation: {s}\n", .{options.root_name}) catch break :print;
|
||||
stderr_w.print(" target: {s}\n", .{try target.zigTriple(arena)}) catch break :print;
|
||||
stderr_w.print(" cpu: {s}\n", .{target.cpu.model.name}) catch break :print;
|
||||
stderr_w.print(" features: {s}\n", .{cf}) catch {};
|
||||
}
|
||||
}
|
||||
|
||||
@ -2483,7 +2483,7 @@ pub fn destroy(comp: *Compilation) void {
|
||||
if (comp.zcu) |zcu| zcu.deinit();
|
||||
comp.cache_use.deinit();
|
||||
|
||||
for (comp.work_queues) |work_queue| work_queue.deinit();
|
||||
for (&comp.work_queues) |*work_queue| work_queue.deinit();
|
||||
comp.c_object_work_queue.deinit();
|
||||
comp.win32_resource_work_queue.deinit();
|
||||
|
||||
@ -3931,11 +3931,12 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
// This AU is referenced and has a transitive compile error, meaning it referenced something with a compile error.
|
||||
// However, we haven't reported any such error.
|
||||
// This is a compiler bug.
|
||||
const stderr = fs.File.stderr().deprecatedWriter();
|
||||
try stderr.writeAll("referenced transitive analysis errors, but none actually emitted\n");
|
||||
try stderr.print("{f} [transitive failure]\n", .{zcu.fmtAnalUnit(failed_unit)});
|
||||
var stderr_w = std.debug.lockStderrWriter(&.{});
|
||||
defer std.debug.unlockStderrWriter();
|
||||
try stderr_w.writeAll("referenced transitive analysis errors, but none actually emitted\n");
|
||||
try stderr_w.print("{f} [transitive failure]\n", .{zcu.fmtAnalUnit(failed_unit)});
|
||||
while (ref) |r| {
|
||||
try stderr.print("referenced by: {f}{s}\n", .{
|
||||
try stderr_w.print("referenced by: {f}{s}\n", .{
|
||||
zcu.fmtAnalUnit(r.referencer),
|
||||
if (zcu.transitive_failed_analysis.contains(r.referencer)) " [transitive failure]" else "",
|
||||
});
|
||||
@ -5849,7 +5850,9 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
|
||||
|
||||
try child.spawn();
|
||||
|
||||
const stderr = try child.stderr.?.deprecatedReader().readAllAlloc(arena, std.math.maxInt(usize));
|
||||
var small_buffer: [1]u8 = undefined;
|
||||
var stderr_reader = child.stderr.?.reader(&small_buffer);
|
||||
const stderr = try stderr_reader.interface.allocRemaining(arena, .unlimited);
|
||||
|
||||
const term = child.wait() catch |err| {
|
||||
return comp.failCObj(c_object, "failed to spawn zig clang {s}: {s}", .{ argv.items[0], @errorName(err) });
|
||||
@ -6213,13 +6216,10 @@ fn spawnZigRc(
|
||||
const stdout = poller.fifo(.stdout);
|
||||
|
||||
poll: while (true) {
|
||||
while (stdout.readableLength() < @sizeOf(std.zig.Server.Message.Header)) {
|
||||
if (!(try poller.poll())) break :poll;
|
||||
}
|
||||
const header = stdout.reader().readStruct(std.zig.Server.Message.Header) catch unreachable;
|
||||
while (stdout.readableLength() < header.bytes_len) {
|
||||
if (!(try poller.poll())) break :poll;
|
||||
}
|
||||
while (stdout.readableLength() < @sizeOf(std.zig.Server.Message.Header)) if (!try poller.poll()) break :poll;
|
||||
var header: std.zig.Server.Message.Header = undefined;
|
||||
assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(std.zig.Server.Message.Header));
|
||||
while (stdout.readableLength() < header.bytes_len) if (!try poller.poll()) break :poll;
|
||||
const body = stdout.readableSliceOfLen(header.bytes_len);
|
||||
|
||||
switch (header.tag) {
|
||||
@ -7209,13 +7209,16 @@ pub fn lockAndSetMiscFailure(
|
||||
}
|
||||
|
||||
pub fn dump_argv(argv: []const []const u8) void {
|
||||
std.debug.lockStdErr();
|
||||
defer std.debug.unlockStdErr();
|
||||
const stderr = fs.File.stderr().deprecatedWriter();
|
||||
for (argv[0 .. argv.len - 1]) |arg| {
|
||||
nosuspend stderr.print("{s} ", .{arg}) catch return;
|
||||
var buffer: [64]u8 = undefined;
|
||||
const stderr = std.debug.lockStderrWriter(&buffer);
|
||||
defer std.debug.unlockStderrWriter();
|
||||
nosuspend {
|
||||
for (argv) |arg| {
|
||||
stderr.writeAll(arg) catch return;
|
||||
(stderr.writableArray(1) catch return)[0] = ' ';
|
||||
}
|
||||
stderr.buffer[stderr.end - 1] = '\n';
|
||||
}
|
||||
nosuspend stderr.print("{s}\n", .{argv[argv.len - 1]}) catch {};
|
||||
}
|
||||
|
||||
pub fn getZigBackend(comp: Compilation) std.builtin.CompilerBackend {
|
||||
|
||||
@ -52,15 +52,6 @@ pub fn LinearFifo(comptime T: type) type {
|
||||
}
|
||||
}
|
||||
|
||||
/// Reduce allocated capacity to `size`.
|
||||
pub fn shrink(self: *Self, size: usize) void {
|
||||
assert(size >= self.count);
|
||||
self.realign();
|
||||
self.buf = self.allocator.realloc(self.buf, size) catch |e| switch (e) {
|
||||
error.OutOfMemory => return, // no problem, capacity is still correct then.
|
||||
};
|
||||
}
|
||||
|
||||
/// Ensure that the buffer can fit at least `size` items
|
||||
pub fn ensureTotalCapacity(self: *Self, size: usize) !void {
|
||||
if (self.buf.len >= size) return;
|
||||
@ -76,11 +67,6 @@ pub fn LinearFifo(comptime T: type) type {
|
||||
return try self.ensureTotalCapacity(math.add(usize, self.count, size) catch return error.OutOfMemory);
|
||||
}
|
||||
|
||||
/// Returns number of items currently in fifo
|
||||
pub fn readableLength(self: Self) usize {
|
||||
return self.count;
|
||||
}
|
||||
|
||||
/// Returns a writable slice from the 'read' end of the fifo
|
||||
fn readableSliceMut(self: Self, offset: usize) []T {
|
||||
if (offset > self.count) return &[_]T{};
|
||||
@ -95,22 +81,6 @@ pub fn LinearFifo(comptime T: type) type {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a readable slice from `offset`
|
||||
pub fn readableSlice(self: Self, offset: usize) []const T {
|
||||
return self.readableSliceMut(offset);
|
||||
}
|
||||
|
||||
pub fn readableSliceOfLen(self: *Self, len: usize) []const T {
|
||||
assert(len <= self.count);
|
||||
const buf = self.readableSlice(0);
|
||||
if (buf.len >= len) {
|
||||
return buf[0..len];
|
||||
} else {
|
||||
self.realign();
|
||||
return self.readableSlice(0)[0..len];
|
||||
}
|
||||
}
|
||||
|
||||
/// Discard first `count` items in the fifo
|
||||
pub fn discard(self: *Self, count: usize) void {
|
||||
assert(count <= self.count);
|
||||
@ -143,28 +113,6 @@ pub fn LinearFifo(comptime T: type) type {
|
||||
return c;
|
||||
}
|
||||
|
||||
/// Read data from the fifo into `dst`, returns number of items copied.
|
||||
pub fn read(self: *Self, dst: []T) usize {
|
||||
var dst_left = dst;
|
||||
|
||||
while (dst_left.len > 0) {
|
||||
const slice = self.readableSlice(0);
|
||||
if (slice.len == 0) break;
|
||||
const n = @min(slice.len, dst_left.len);
|
||||
@memcpy(dst_left[0..n], slice[0..n]);
|
||||
self.discard(n);
|
||||
dst_left = dst_left[n..];
|
||||
}
|
||||
|
||||
return dst.len - dst_left.len;
|
||||
}
|
||||
|
||||
/// Same as `read` except it returns an error union
|
||||
/// The purpose of this function existing is to match `std.io.Reader` API.
|
||||
fn readFn(self: *Self, dest: []u8) error{}!usize {
|
||||
return self.read(dest);
|
||||
}
|
||||
|
||||
/// Returns number of items available in fifo
|
||||
pub fn writableLength(self: Self) usize {
|
||||
return self.buf.len - self.count;
|
||||
@ -183,20 +131,6 @@ pub fn LinearFifo(comptime T: type) type {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a writable buffer of at least `size` items, allocating memory as needed.
|
||||
/// Use `fifo.update` once you've written data to it.
|
||||
pub fn writableWithSize(self: *Self, size: usize) ![]T {
|
||||
try self.ensureUnusedCapacity(size);
|
||||
|
||||
// try to avoid realigning buffer
|
||||
var slice = self.writableSlice(0);
|
||||
if (slice.len < size) {
|
||||
self.realign();
|
||||
slice = self.writableSlice(0);
|
||||
}
|
||||
return slice;
|
||||
}
|
||||
|
||||
/// Update the tail location of the buffer (usually follows use of writable/writableWithSize)
|
||||
pub fn update(self: *Self, count: usize) void {
|
||||
assert(self.count + count <= self.buf.len);
|
||||
@ -231,201 +165,5 @@ pub fn LinearFifo(comptime T: type) type {
|
||||
self.buf[tail] = item;
|
||||
self.update(1);
|
||||
}
|
||||
|
||||
/// Appends the data in `src` to the fifo.
|
||||
/// Allocates more memory as necessary
|
||||
pub fn write(self: *Self, src: []const T) !void {
|
||||
try self.ensureUnusedCapacity(src.len);
|
||||
|
||||
return self.writeAssumeCapacity(src);
|
||||
}
|
||||
|
||||
/// Same as `write` except it returns the number of bytes written, which is always the same
|
||||
/// as `bytes.len`. The purpose of this function existing is to match `std.io.Writer` API.
|
||||
fn appendWrite(self: *Self, bytes: []const u8) error{OutOfMemory}!usize {
|
||||
try self.write(bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
/// Make `count` items available before the current read location
|
||||
fn rewind(self: *Self, count: usize) void {
|
||||
assert(self.writableLength() >= count);
|
||||
|
||||
var head = self.head + (self.buf.len - count);
|
||||
head &= self.buf.len - 1;
|
||||
self.head = head;
|
||||
self.count += count;
|
||||
}
|
||||
|
||||
/// Place data back into the read stream
|
||||
pub fn unget(self: *Self, src: []const T) !void {
|
||||
try self.ensureUnusedCapacity(src.len);
|
||||
|
||||
self.rewind(src.len);
|
||||
|
||||
const slice = self.readableSliceMut(0);
|
||||
if (src.len < slice.len) {
|
||||
@memcpy(slice[0..src.len], src);
|
||||
} else {
|
||||
@memcpy(slice, src[0..slice.len]);
|
||||
const slice2 = self.readableSliceMut(slice.len);
|
||||
@memcpy(slice2[0 .. src.len - slice.len], src[slice.len..]);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the item at `offset`.
|
||||
/// Asserts offset is within bounds.
|
||||
pub fn peekItem(self: Self, offset: usize) T {
|
||||
assert(offset < self.count);
|
||||
|
||||
var index = self.head + offset;
|
||||
index &= self.buf.len - 1;
|
||||
return self.buf[index];
|
||||
}
|
||||
|
||||
pub fn toOwnedSlice(self: *Self) Allocator.Error![]T {
|
||||
if (self.head != 0) self.realign();
|
||||
assert(self.head == 0);
|
||||
assert(self.count <= self.buf.len);
|
||||
const allocator = self.allocator;
|
||||
if (allocator.resize(self.buf, self.count)) {
|
||||
const result = self.buf[0..self.count];
|
||||
self.* = Self.init(allocator);
|
||||
return result;
|
||||
}
|
||||
const new_memory = try allocator.dupe(T, self.buf[0..self.count]);
|
||||
allocator.free(self.buf);
|
||||
self.* = Self.init(allocator);
|
||||
return new_memory;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test "LinearFifo(u8, .Dynamic) discard(0) from empty buffer should not error on overflow" {
|
||||
var fifo = LinearFifo(u8, .Dynamic).init(testing.allocator);
|
||||
defer fifo.deinit();
|
||||
|
||||
// If overflow is not explicitly allowed this will crash in debug / safe mode
|
||||
fifo.discard(0);
|
||||
}
|
||||
|
||||
test "LinearFifo(u8, .Dynamic)" {
|
||||
var fifo = LinearFifo(u8, .Dynamic).init(testing.allocator);
|
||||
defer fifo.deinit();
|
||||
|
||||
try fifo.write("HELLO");
|
||||
try testing.expectEqual(@as(usize, 5), fifo.readableLength());
|
||||
try testing.expectEqualSlices(u8, "HELLO", fifo.readableSlice(0));
|
||||
|
||||
{
|
||||
var i: usize = 0;
|
||||
while (i < 5) : (i += 1) {
|
||||
try fifo.write(&[_]u8{fifo.peekItem(i)});
|
||||
}
|
||||
try testing.expectEqual(@as(usize, 10), fifo.readableLength());
|
||||
try testing.expectEqualSlices(u8, "HELLOHELLO", fifo.readableSlice(0));
|
||||
}
|
||||
|
||||
{
|
||||
try testing.expectEqual(@as(u8, 'H'), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(u8, 'E'), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(u8, 'L'), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(u8, 'L'), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(u8, 'O'), fifo.readItem().?);
|
||||
}
|
||||
try testing.expectEqual(@as(usize, 5), fifo.readableLength());
|
||||
|
||||
{ // Writes that wrap around
|
||||
try testing.expectEqual(@as(usize, 11), fifo.writableLength());
|
||||
try testing.expectEqual(@as(usize, 6), fifo.writableSlice(0).len);
|
||||
fifo.writeAssumeCapacity("6<chars<11");
|
||||
try testing.expectEqualSlices(u8, "HELLO6<char", fifo.readableSlice(0));
|
||||
try testing.expectEqualSlices(u8, "s<11", fifo.readableSlice(11));
|
||||
try testing.expectEqualSlices(u8, "11", fifo.readableSlice(13));
|
||||
try testing.expectEqualSlices(u8, "", fifo.readableSlice(15));
|
||||
fifo.discard(11);
|
||||
try testing.expectEqualSlices(u8, "s<11", fifo.readableSlice(0));
|
||||
fifo.discard(4);
|
||||
try testing.expectEqual(@as(usize, 0), fifo.readableLength());
|
||||
}
|
||||
|
||||
{
|
||||
const buf = try fifo.writableWithSize(12);
|
||||
try testing.expectEqual(@as(usize, 12), buf.len);
|
||||
var i: u8 = 0;
|
||||
while (i < 10) : (i += 1) {
|
||||
buf[i] = i + 'a';
|
||||
}
|
||||
fifo.update(10);
|
||||
try testing.expectEqualSlices(u8, "abcdefghij", fifo.readableSlice(0));
|
||||
}
|
||||
|
||||
{
|
||||
try fifo.unget("prependedstring");
|
||||
var result: [30]u8 = undefined;
|
||||
try testing.expectEqualSlices(u8, "prependedstringabcdefghij", result[0..fifo.read(&result)]);
|
||||
try fifo.unget("b");
|
||||
try fifo.unget("a");
|
||||
try testing.expectEqualSlices(u8, "ab", result[0..fifo.read(&result)]);
|
||||
}
|
||||
|
||||
fifo.shrink(0);
|
||||
|
||||
{
|
||||
try fifo.writer().print("{s}, {s}!", .{ "Hello", "World" });
|
||||
var result: [30]u8 = undefined;
|
||||
try testing.expectEqualSlices(u8, "Hello, World!", result[0..fifo.read(&result)]);
|
||||
try testing.expectEqual(@as(usize, 0), fifo.readableLength());
|
||||
}
|
||||
|
||||
{
|
||||
try fifo.writer().writeAll("This is a test");
|
||||
var result: [30]u8 = undefined;
|
||||
try testing.expectEqualSlices(u8, "This", (try fifo.reader().readUntilDelimiterOrEof(&result, ' ')).?);
|
||||
try testing.expectEqualSlices(u8, "is", (try fifo.reader().readUntilDelimiterOrEof(&result, ' ')).?);
|
||||
try testing.expectEqualSlices(u8, "a", (try fifo.reader().readUntilDelimiterOrEof(&result, ' ')).?);
|
||||
try testing.expectEqualSlices(u8, "test", (try fifo.reader().readUntilDelimiterOrEof(&result, ' ')).?);
|
||||
}
|
||||
|
||||
{
|
||||
try fifo.ensureTotalCapacity(1);
|
||||
var in_fbs = std.io.fixedBufferStream("pump test");
|
||||
var out_buf: [50]u8 = undefined;
|
||||
var out_fbs = std.io.fixedBufferStream(&out_buf);
|
||||
try fifo.pump(in_fbs.reader(), out_fbs.writer());
|
||||
try testing.expectEqualSlices(u8, in_fbs.buffer, out_fbs.getWritten());
|
||||
}
|
||||
}
|
||||
|
||||
test LinearFifo {
|
||||
inline for ([_]type{ u1, u8, u16, u64 }) |T| {
|
||||
const FifoType = LinearFifo(T);
|
||||
var fifo: FifoType = .init(testing.allocator);
|
||||
defer fifo.deinit();
|
||||
|
||||
try fifo.write(&[_]T{ 0, 1, 1, 0, 1 });
|
||||
try testing.expectEqual(@as(usize, 5), fifo.readableLength());
|
||||
|
||||
{
|
||||
try testing.expectEqual(@as(T, 0), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(T, 1), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(T, 1), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(T, 0), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(T, 1), fifo.readItem().?);
|
||||
try testing.expectEqual(@as(usize, 0), fifo.readableLength());
|
||||
}
|
||||
|
||||
{
|
||||
try fifo.writeItem(1);
|
||||
try fifo.writeItem(1);
|
||||
try fifo.writeItem(1);
|
||||
try testing.expectEqual(@as(usize, 3), fifo.readableLength());
|
||||
}
|
||||
|
||||
{
|
||||
var readBuf: [3]T = undefined;
|
||||
const n = fifo.read(&readBuf);
|
||||
try testing.expectEqual(@as(usize, 3), n); // NOTE: It should be the number of items.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
46
src/main.zig
46
src/main.zig
@ -65,8 +65,10 @@ pub fn wasi_cwd() std.os.wasi.fd_t {
|
||||
|
||||
const fatal = std.process.fatal;
|
||||
|
||||
/// This can be global since stdin is a singleton.
|
||||
var stdin_buffer: [4096]u8 = undefined;
|
||||
/// This can be global since stdout is a singleton.
|
||||
var stdio_buffer: [4096]u8 = undefined;
|
||||
var stdout_buffer: [4096]u8 = undefined;
|
||||
|
||||
/// Shaming all the locations that inappropriately use an O(N) search algorithm.
|
||||
/// Please delete this and fix the compilation errors!
|
||||
@ -3561,10 +3563,12 @@ fn buildOutputType(
|
||||
switch (listen) {
|
||||
.none => {},
|
||||
.stdio => {
|
||||
var stdin_reader = fs.File.stdin().reader(&stdin_buffer);
|
||||
var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
|
||||
try serve(
|
||||
comp,
|
||||
.stdin(),
|
||||
.stdout(),
|
||||
&stdin_reader.interface,
|
||||
&stdout_writer.interface,
|
||||
test_exec_args.items,
|
||||
self_exe_path,
|
||||
arg_mode,
|
||||
@ -3584,10 +3588,13 @@ fn buildOutputType(
|
||||
const conn = try server.accept();
|
||||
defer conn.stream.close();
|
||||
|
||||
var input = conn.stream.reader(&stdin_buffer);
|
||||
var output = conn.stream.writer(&stdout_buffer);
|
||||
|
||||
try serve(
|
||||
comp,
|
||||
.{ .handle = conn.stream.handle },
|
||||
.{ .handle = conn.stream.handle },
|
||||
input.interface(),
|
||||
&output.interface,
|
||||
test_exec_args.items,
|
||||
self_exe_path,
|
||||
arg_mode,
|
||||
@ -4053,8 +4060,8 @@ fn saveState(comp: *Compilation, incremental: bool) void {
|
||||
|
||||
fn serve(
|
||||
comp: *Compilation,
|
||||
in: fs.File,
|
||||
out: fs.File,
|
||||
in: *std.Io.Reader,
|
||||
out: *std.Io.Writer,
|
||||
test_exec_args: []const ?[]const u8,
|
||||
self_exe_path: ?[]const u8,
|
||||
arg_mode: ArgMode,
|
||||
@ -4064,12 +4071,10 @@ fn serve(
|
||||
const gpa = comp.gpa;
|
||||
|
||||
var server = try Server.init(.{
|
||||
.gpa = gpa,
|
||||
.in = in,
|
||||
.out = out,
|
||||
.zig_version = build_options.version,
|
||||
});
|
||||
defer server.deinit();
|
||||
|
||||
var child_pid: ?std.process.Child.Id = null;
|
||||
|
||||
@ -5491,10 +5496,10 @@ fn jitCmd(
|
||||
defer comp.destroy();
|
||||
|
||||
if (options.server) {
|
||||
var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
|
||||
var server: std.zig.Server = .{
|
||||
.out = fs.File.stdout(),
|
||||
.out = &stdout_writer.interface,
|
||||
.in = undefined, // won't be receiving messages
|
||||
.receive_fifo = undefined, // won't be receiving messages
|
||||
};
|
||||
|
||||
try comp.update(root_prog_node);
|
||||
@ -6058,7 +6063,7 @@ fn cmdAstCheck(
|
||||
};
|
||||
} else fs.File.stdin();
|
||||
defer if (zig_source_path != null) f.close();
|
||||
var file_reader: fs.File.Reader = f.reader(&stdio_buffer);
|
||||
var file_reader: fs.File.Reader = f.reader(&stdin_buffer);
|
||||
break :s std.zig.readSourceFileToEndAlloc(arena, &file_reader) catch |err| {
|
||||
fatal("unable to load file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) });
|
||||
};
|
||||
@ -6076,7 +6081,7 @@ fn cmdAstCheck(
|
||||
|
||||
const tree = try Ast.parse(arena, source, mode);
|
||||
|
||||
var stdout_writer = fs.File.stdout().writerStreaming(&stdio_buffer);
|
||||
var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
|
||||
const stdout_bw = &stdout_writer.interface;
|
||||
switch (mode) {
|
||||
.zig => {
|
||||
@ -6291,7 +6296,7 @@ fn detectNativeCpuWithLLVM(
|
||||
}
|
||||
|
||||
fn printCpu(cpu: std.Target.Cpu) !void {
|
||||
var stdout_writer = fs.File.stdout().writerStreaming(&stdio_buffer);
|
||||
var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
|
||||
const stdout_bw = &stdout_writer.interface;
|
||||
|
||||
if (cpu.model.llvm_name) |llvm_name| {
|
||||
@ -6340,7 +6345,7 @@ fn cmdDumpLlvmInts(
|
||||
const dl = tm.createTargetDataLayout();
|
||||
const context = llvm.Context.create();
|
||||
|
||||
var stdout_writer = fs.File.stdout().writerStreaming(&stdio_buffer);
|
||||
var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
|
||||
const stdout_bw = &stdout_writer.interface;
|
||||
for ([_]u16{ 1, 8, 16, 32, 64, 128, 256 }) |bits| {
|
||||
const int_type = context.intType(bits);
|
||||
@ -6369,9 +6374,8 @@ fn cmdDumpZir(
|
||||
defer f.close();
|
||||
|
||||
const zir = try Zcu.loadZirCache(arena, f);
|
||||
var stdout_writer = fs.File.stdout().writerStreaming(&stdio_buffer);
|
||||
var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
|
||||
const stdout_bw = &stdout_writer.interface;
|
||||
|
||||
{
|
||||
const instruction_bytes = zir.instructions.len *
|
||||
// Here we don't use @sizeOf(Zir.Inst.Data) because it would include
|
||||
@ -6417,7 +6421,7 @@ fn cmdChangelist(
|
||||
var f = fs.cwd().openFile(old_source_path, .{}) catch |err|
|
||||
fatal("unable to open old source file '{s}': {s}", .{ old_source_path, @errorName(err) });
|
||||
defer f.close();
|
||||
var file_reader: fs.File.Reader = f.reader(&stdio_buffer);
|
||||
var file_reader: fs.File.Reader = f.reader(&stdin_buffer);
|
||||
break :source std.zig.readSourceFileToEndAlloc(arena, &file_reader) catch |err|
|
||||
fatal("unable to read old source file '{s}': {s}", .{ old_source_path, @errorName(err) });
|
||||
};
|
||||
@ -6425,7 +6429,7 @@ fn cmdChangelist(
|
||||
var f = fs.cwd().openFile(new_source_path, .{}) catch |err|
|
||||
fatal("unable to open new source file '{s}': {s}", .{ new_source_path, @errorName(err) });
|
||||
defer f.close();
|
||||
var file_reader: fs.File.Reader = f.reader(&stdio_buffer);
|
||||
var file_reader: fs.File.Reader = f.reader(&stdin_buffer);
|
||||
break :source std.zig.readSourceFileToEndAlloc(arena, &file_reader) catch |err|
|
||||
fatal("unable to read new source file '{s}': {s}", .{ new_source_path, @errorName(err) });
|
||||
};
|
||||
@ -6457,7 +6461,7 @@ fn cmdChangelist(
|
||||
var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .empty;
|
||||
try Zcu.mapOldZirToNew(arena, old_zir, new_zir, &inst_map);
|
||||
|
||||
var stdout_writer = fs.File.stdout().writerStreaming(&stdio_buffer);
|
||||
var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
|
||||
const stdout_bw = &stdout_writer.interface;
|
||||
{
|
||||
try stdout_bw.print("Instruction mappings:\n", .{});
|
||||
@ -6917,7 +6921,7 @@ fn cmdFetch(
|
||||
|
||||
const name = switch (save) {
|
||||
.no => {
|
||||
var stdout = fs.File.stdout().writerStreaming(&stdio_buffer);
|
||||
var stdout = fs.File.stdout().writerStreaming(&stdout_buffer);
|
||||
try stdout.interface.print("{s}\n", .{package_hash_slice});
|
||||
try stdout.interface.flush();
|
||||
return cleanExit();
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user