delete std.io.MultiWriter

instead this use case is better served with passthrough streams. For
instance, hashing writers should support being passed an underlying
writer, and the buffer can go in front of the hasher for optimal code.
This commit is contained in:
Andrew Kelley 2025-05-06 21:14:09 -07:00
parent 110af768bb
commit 803215bb18
3 changed files with 11 additions and 61 deletions

View File

@ -16,9 +16,6 @@ pub const BufferedReader = @import("io/BufferedReader.zig");
pub const BufferedWriter = @import("io/BufferedWriter.zig");
pub const AllocatingWriter = @import("io/AllocatingWriter.zig");
pub const MultiWriter = @import("io/multi_writer.zig").MultiWriter;
pub const multiWriter = @import("io/multi_writer.zig").multiWriter;
pub const ChangeDetectionStream = @import("io/change_detection_stream.zig").ChangeDetectionStream;
pub const changeDetectionStream = @import("io/change_detection_stream.zig").changeDetectionStream;

View File

@ -1,53 +0,0 @@
const std = @import("../std.zig");
const io = std.io;
/// Takes a tuple of streams, and constructs a new stream that writes to all of them
pub fn MultiWriter(comptime Writers: type) type {
comptime var ErrSet = error{};
inline for (@typeInfo(Writers).@"struct".fields) |field| {
const StreamType = field.type;
ErrSet = ErrSet || if (@hasDecl(StreamType, "Error")) StreamType.Error else anyerror;
}
return struct {
const Self = @This();
streams: Writers,
pub const Error = ErrSet;
pub const Writer = io.Writer(*Self, Error, write);
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
pub fn write(self: *Self, bytes: []const u8) Error!usize {
inline for (self.streams) |stream|
try stream.writeAll(bytes);
return bytes.len;
}
};
}
pub fn multiWriter(streams: anytype) MultiWriter(@TypeOf(streams)) {
return .{ .streams = streams };
}
const testing = std.testing;
test "MultiWriter" {
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
var f = try tmp.dir.createFile("t.txt", .{});
var buf1: [255]u8 = undefined;
var fbs1 = io.fixedBufferStream(&buf1);
var buf2: [255]u8 = undefined;
var stream = multiWriter(.{ fbs1.writer(), f.writer() });
try stream.writer().print("HI", .{});
f.close();
try testing.expectEqualSlices(u8, "HI", fbs1.getWritten());
try testing.expectEqualSlices(u8, "HI", try tmp.dir.readFile("t.txt", &buf2));
}

View File

@ -3331,12 +3331,18 @@ fn buildOutputType(
// We are providing our own cache key, because this file has nothing
// to do with the cache manifest.
var hasher = Cache.Hasher.init("0123456789abcdef");
var w = io.multiWriter(.{ f.writer(), hasher.writer() });
var fifo = std.fifo.LinearFifo(u8, .{ .Static = 4096 }).init();
try fifo.pump(fs.File.stdin().reader().unbuffered(), w.writer().unbuffered());
var file_writer = f.writer();
var file_writer_bw = file_writer.interface().unbuffered();
var hasher_writer = hasher.writer(&file_writer_bw);
var buffer: [1000]u8 = undefined;
var bw = hasher_writer.interface().buffered(&buffer);
bw.writeFileAll(.stdin(), .{}) catch |err| switch (err) {
error.WriteFailed => fatal("failed to write {s}: {s}", .{ dump_path, file_writer.err.? }),
else => fatal("failed to pipe stdin to {s}: {s}", .{ dump_path, err }),
};
try bw.flush();
var bin_digest: Cache.BinDigest = undefined;
hasher.final(&bin_digest);
const bin_digest: Cache.BinDigest = hasher_writer.final();
const sub_path = try std.fmt.allocPrint(arena, "tmp" ++ sep ++ "{x}-stdin{s}", .{
&bin_digest, ext.canonicalName(target),