mirror of
https://github.com/ziglang/zig.git
synced 2026-01-20 14:25:16 +00:00
std.zig.llvm.Builder: update format API
This commit is contained in:
parent
cce32bd1d5
commit
3afc6fbac6
File diff suppressed because it is too large
Load Diff
@ -124,7 +124,7 @@ pub fn findEndRecord(seekable_stream: anytype, stream_len: u64) !EndRecord {
|
||||
|
||||
try seekable_stream.seekTo(stream_len - @as(u64, new_loaded_len));
|
||||
const read_buf: []u8 = buf[buf.len - new_loaded_len ..][0..read_len];
|
||||
const len = try seekable_stream.context.reader().readAll(read_buf);
|
||||
const len = try seekable_stream.context.deprecatedReader().readAll(read_buf);
|
||||
if (len != read_len)
|
||||
return error.ZipTruncated;
|
||||
loaded_len = new_loaded_len;
|
||||
@ -295,7 +295,7 @@ pub fn Iterator(comptime SeekableStream: type) type {
|
||||
if (locator_end_offset > stream_len)
|
||||
return error.ZipTruncated;
|
||||
try stream.seekTo(stream_len - locator_end_offset);
|
||||
const locator = try stream.context.reader().readStructEndian(EndLocator64, .little);
|
||||
const locator = try stream.context.deprecatedReader().readStructEndian(EndLocator64, .little);
|
||||
if (!std.mem.eql(u8, &locator.signature, &end_locator64_sig))
|
||||
return error.ZipBadLocatorSig;
|
||||
if (locator.zip64_disk_count != 0)
|
||||
@ -305,7 +305,7 @@ pub fn Iterator(comptime SeekableStream: type) type {
|
||||
|
||||
try stream.seekTo(locator.record_file_offset);
|
||||
|
||||
const record64 = try stream.context.reader().readStructEndian(EndRecord64, .little);
|
||||
const record64 = try stream.context.deprecatedReader().readStructEndian(EndRecord64, .little);
|
||||
|
||||
if (!std.mem.eql(u8, &record64.signature, &end_record64_sig))
|
||||
return error.ZipBadEndRecord64Sig;
|
||||
@ -357,7 +357,7 @@ pub fn Iterator(comptime SeekableStream: type) type {
|
||||
|
||||
const header_zip_offset = self.cd_zip_offset + self.cd_record_offset;
|
||||
try self.stream.seekTo(header_zip_offset);
|
||||
const header = try self.stream.context.reader().readStructEndian(CentralDirectoryFileHeader, .little);
|
||||
const header = try self.stream.context.deprecatedReader().readStructEndian(CentralDirectoryFileHeader, .little);
|
||||
if (!std.mem.eql(u8, &header.signature, ¢ral_file_header_sig))
|
||||
return error.ZipBadCdOffset;
|
||||
|
||||
@ -386,7 +386,7 @@ pub fn Iterator(comptime SeekableStream: type) type {
|
||||
|
||||
{
|
||||
try self.stream.seekTo(header_zip_offset + @sizeOf(CentralDirectoryFileHeader) + header.filename_len);
|
||||
const len = try self.stream.context.reader().readAll(extra);
|
||||
const len = try self.stream.context.deprecatedReader().readAll(extra);
|
||||
if (len != extra.len)
|
||||
return error.ZipTruncated;
|
||||
}
|
||||
@ -449,7 +449,7 @@ pub fn Iterator(comptime SeekableStream: type) type {
|
||||
try stream.seekTo(self.header_zip_offset + @sizeOf(CentralDirectoryFileHeader));
|
||||
|
||||
{
|
||||
const len = try stream.context.reader().readAll(filename);
|
||||
const len = try stream.context.deprecatedReader().readAll(filename);
|
||||
if (len != filename.len)
|
||||
return error.ZipBadFileOffset;
|
||||
}
|
||||
@ -457,7 +457,7 @@ pub fn Iterator(comptime SeekableStream: type) type {
|
||||
const local_data_header_offset: u64 = local_data_header_offset: {
|
||||
const local_header = blk: {
|
||||
try stream.seekTo(self.file_offset);
|
||||
break :blk try stream.context.reader().readStructEndian(LocalFileHeader, .little);
|
||||
break :blk try stream.context.deprecatedReader().readStructEndian(LocalFileHeader, .little);
|
||||
};
|
||||
if (!std.mem.eql(u8, &local_header.signature, &local_file_header_sig))
|
||||
return error.ZipBadFileOffset;
|
||||
@ -483,7 +483,7 @@ pub fn Iterator(comptime SeekableStream: type) type {
|
||||
|
||||
{
|
||||
try stream.seekTo(self.file_offset + @sizeOf(LocalFileHeader) + local_header.filename_len);
|
||||
const len = try stream.context.reader().readAll(extra);
|
||||
const len = try stream.context.deprecatedReader().readAll(extra);
|
||||
if (len != extra.len)
|
||||
return error.ZipTruncated;
|
||||
}
|
||||
@ -552,7 +552,7 @@ pub fn Iterator(comptime SeekableStream: type) type {
|
||||
@as(u64, @sizeOf(LocalFileHeader)) +
|
||||
local_data_header_offset;
|
||||
try stream.seekTo(local_data_file_offset);
|
||||
var limited_reader = std.io.limitedReader(stream.context.reader(), self.compressed_size);
|
||||
var limited_reader = std.io.limitedReader(stream.context.deprecatedReader(), self.compressed_size);
|
||||
const crc = try decompress(
|
||||
self.compression_method,
|
||||
self.uncompressed_size,
|
||||
|
||||
@ -710,7 +710,7 @@ const Writer = struct {
|
||||
}
|
||||
}
|
||||
const asm_source = std.mem.sliceAsBytes(w.air.extra.items[extra_i..])[0..extra.data.source_len];
|
||||
try s.print(", \"{f}\"", .{std.zig.fmtEscapes(asm_source)});
|
||||
try s.print(", \"{f}\"", .{std.zig.fmtString(asm_source)});
|
||||
}
|
||||
|
||||
fn writeDbgStmt(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
|
||||
@ -722,7 +722,7 @@ const Writer = struct {
|
||||
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
||||
try w.writeOperand(s, inst, 0, pl_op.operand);
|
||||
const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
|
||||
try s.print(", \"{f}\"", .{std.zig.fmtEscapes(name.toSlice(w.air))});
|
||||
try s.print(", \"{f}\"", .{std.zig.fmtString(name.toSlice(w.air))});
|
||||
}
|
||||
|
||||
fn writeCall(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
|
||||
|
||||
277
src/Zcu.zig
277
src/Zcu.zig
@ -15,6 +15,7 @@ const BigIntConst = std.math.big.int.Const;
|
||||
const BigIntMutable = std.math.big.int.Mutable;
|
||||
const Target = std.Target;
|
||||
const Ast = std.zig.Ast;
|
||||
const Writer = std.io.Writer;
|
||||
|
||||
const Zcu = @This();
|
||||
const Compilation = @import("Compilation.zig");
|
||||
@ -858,7 +859,7 @@ pub const Namespace = struct {
|
||||
try ns.fileScope(zcu).renderFullyQualifiedDebugName(writer);
|
||||
break :sep ':';
|
||||
};
|
||||
if (name != .empty) try writer.print("{c}{}", .{ sep, name.fmt(&zcu.intern_pool) });
|
||||
if (name != .empty) try writer.print("{c}{f}", .{ sep, name.fmt(&zcu.intern_pool) });
|
||||
}
|
||||
|
||||
pub fn internFullyQualifiedName(
|
||||
@ -870,7 +871,7 @@ pub const Namespace = struct {
|
||||
) !InternPool.NullTerminatedString {
|
||||
const ns_name = Type.fromInterned(ns.owner_type).containerTypeName(ip);
|
||||
if (name == .empty) return ns_name;
|
||||
return ip.getOrPutStringFmt(gpa, tid, "{}.{}", .{ ns_name.fmt(ip), name.fmt(ip) }, .no_embedded_nulls);
|
||||
return ip.getOrPutStringFmt(gpa, tid, "{f}.{f}", .{ ns_name.fmt(ip), name.fmt(ip) }, .no_embedded_nulls);
|
||||
}
|
||||
};
|
||||
|
||||
@ -1039,12 +1040,12 @@ pub const File = struct {
|
||||
if (stat.size > std.math.maxInt(u32))
|
||||
return error.FileTooBig;
|
||||
|
||||
const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
|
||||
const source = try gpa.allocSentinel(u8, @intCast(stat.size), 0);
|
||||
errdefer gpa.free(source);
|
||||
|
||||
const amt = try f.readAll(source);
|
||||
if (amt != stat.size)
|
||||
return error.UnexpectedEndOfFile;
|
||||
var file_reader = f.reader(&.{});
|
||||
file_reader.size = stat.size;
|
||||
try file_reader.interface.readSliceAll(source);
|
||||
|
||||
// Here we do not modify stat fields because this function is the one
|
||||
// used for error reporting. We need to keep the stat fields stale so that
|
||||
@ -1097,11 +1098,10 @@ pub const File = struct {
|
||||
const gpa = pt.zcu.gpa;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
const strings = ip.getLocal(pt.tid).getMutableStrings(gpa);
|
||||
const slice = try strings.addManyAsSlice(file.fullyQualifiedNameLen());
|
||||
var fbs = std.io.fixedBufferStream(slice[0]);
|
||||
file.renderFullyQualifiedName(fbs.writer()) catch unreachable;
|
||||
assert(fbs.pos == slice[0].len);
|
||||
return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(slice[0].len), .no_embedded_nulls);
|
||||
var w: Writer = .fixed((try strings.addManyAsSlice(file.fullyQualifiedNameLen()))[0]);
|
||||
file.renderFullyQualifiedName(&w) catch unreachable;
|
||||
assert(w.end == w.buffer.len);
|
||||
return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(w.end), .no_embedded_nulls);
|
||||
}
|
||||
|
||||
pub const Index = InternPool.FileIndex;
|
||||
@ -1190,13 +1190,8 @@ pub const ErrorMsg = struct {
|
||||
gpa.destroy(err_msg);
|
||||
}
|
||||
|
||||
pub fn init(
|
||||
gpa: Allocator,
|
||||
src_loc: LazySrcLoc,
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) !ErrorMsg {
|
||||
return ErrorMsg{
|
||||
pub fn init(gpa: Allocator, src_loc: LazySrcLoc, comptime format: []const u8, args: anytype) !ErrorMsg {
|
||||
return .{
|
||||
.src_loc = src_loc,
|
||||
.msg = try std.fmt.allocPrint(gpa, format, args),
|
||||
};
|
||||
@ -2811,10 +2806,18 @@ comptime {
|
||||
}
|
||||
|
||||
pub fn loadZirCache(gpa: Allocator, cache_file: std.fs.File) !Zir {
|
||||
return loadZirCacheBody(gpa, try cache_file.deprecatedReader().readStruct(Zir.Header), cache_file);
|
||||
var buffer: [2000]u8 = undefined;
|
||||
var file_reader = cache_file.reader(&buffer);
|
||||
return result: {
|
||||
const header = file_reader.interface.takeStruct(Zir.Header) catch |err| break :result err;
|
||||
break :result loadZirCacheBody(gpa, header.*, &file_reader.interface);
|
||||
} catch |err| switch (err) {
|
||||
error.ReadFailed => return file_reader.err.?,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) !Zir {
|
||||
pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *std.io.Reader) !Zir {
|
||||
var instructions: std.MultiArrayList(Zir.Inst) = .{};
|
||||
errdefer instructions.deinit(gpa);
|
||||
|
||||
@ -2837,34 +2840,16 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.F
|
||||
undefined;
|
||||
defer if (data_has_safety_tag) gpa.free(safety_buffer);
|
||||
|
||||
const data_ptr = if (data_has_safety_tag)
|
||||
@as([*]u8, @ptrCast(safety_buffer.ptr))
|
||||
else
|
||||
@as([*]u8, @ptrCast(zir.instructions.items(.data).ptr));
|
||||
|
||||
var iovecs = [_]std.posix.iovec{
|
||||
.{
|
||||
.base = @as([*]u8, @ptrCast(zir.instructions.items(.tag).ptr)),
|
||||
.len = header.instructions_len,
|
||||
},
|
||||
.{
|
||||
.base = data_ptr,
|
||||
.len = header.instructions_len * 8,
|
||||
},
|
||||
.{
|
||||
.base = zir.string_bytes.ptr,
|
||||
.len = header.string_bytes_len,
|
||||
},
|
||||
.{
|
||||
.base = @as([*]u8, @ptrCast(zir.extra.ptr)),
|
||||
.len = header.extra_len * 4,
|
||||
},
|
||||
var vecs = [_][]u8{
|
||||
@ptrCast(zir.instructions.items(.tag)),
|
||||
if (data_has_safety_tag)
|
||||
@ptrCast(safety_buffer)
|
||||
else
|
||||
@ptrCast(zir.instructions.items(.data)),
|
||||
zir.string_bytes,
|
||||
@ptrCast(zir.extra),
|
||||
};
|
||||
const amt_read = try cache_file.readvAll(&iovecs);
|
||||
const amt_expected = zir.instructions.len * 9 +
|
||||
zir.string_bytes.len +
|
||||
zir.extra.len * 4;
|
||||
if (amt_read != amt_expected) return error.UnexpectedFileSize;
|
||||
try cache_br.readVecAll(&vecs);
|
||||
if (data_has_safety_tag) {
|
||||
const tags = zir.instructions.items(.tag);
|
||||
for (zir.instructions.items(.data), 0..) |*data, i| {
|
||||
@ -2876,7 +2861,6 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.F
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return zir;
|
||||
}
|
||||
|
||||
@ -2887,14 +2871,6 @@ pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.S
|
||||
undefined;
|
||||
defer if (data_has_safety_tag) gpa.free(safety_buffer);
|
||||
|
||||
const data_ptr: [*]const u8 = if (data_has_safety_tag)
|
||||
if (zir.instructions.len == 0)
|
||||
undefined
|
||||
else
|
||||
@ptrCast(safety_buffer.ptr)
|
||||
else
|
||||
@ptrCast(zir.instructions.items(.data).ptr);
|
||||
|
||||
if (data_has_safety_tag) {
|
||||
// The `Data` union has a safety tag but in the file format we store it without.
|
||||
for (zir.instructions.items(.data), 0..) |*data, i| {
|
||||
@ -2912,29 +2888,20 @@ pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.S
|
||||
.stat_inode = stat.inode,
|
||||
.stat_mtime = stat.mtime,
|
||||
};
|
||||
var iovecs: [5]std.posix.iovec_const = .{
|
||||
.{
|
||||
.base = @ptrCast(&header),
|
||||
.len = @sizeOf(Zir.Header),
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zir.instructions.items(.tag).ptr),
|
||||
.len = zir.instructions.len,
|
||||
},
|
||||
.{
|
||||
.base = data_ptr,
|
||||
.len = zir.instructions.len * 8,
|
||||
},
|
||||
.{
|
||||
.base = zir.string_bytes.ptr,
|
||||
.len = zir.string_bytes.len,
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zir.extra.ptr),
|
||||
.len = zir.extra.len * 4,
|
||||
},
|
||||
var vecs = [_][]const u8{
|
||||
@ptrCast((&header)[0..1]),
|
||||
@ptrCast(zir.instructions.items(.tag)),
|
||||
if (data_has_safety_tag)
|
||||
@ptrCast(safety_buffer)
|
||||
else
|
||||
@ptrCast(zir.instructions.items(.data)),
|
||||
zir.string_bytes,
|
||||
@ptrCast(zir.extra),
|
||||
};
|
||||
var cache_fw = cache_file.writer(&.{});
|
||||
cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) {
|
||||
error.WriteFailed => return cache_fw.err.?,
|
||||
};
|
||||
try cache_file.writevAll(&iovecs);
|
||||
}
|
||||
|
||||
pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir) std.fs.File.WriteError!void {
|
||||
@ -2950,48 +2917,24 @@ pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir
|
||||
.stat_inode = stat.inode,
|
||||
.stat_mtime = stat.mtime,
|
||||
};
|
||||
var iovecs: [9]std.posix.iovec_const = .{
|
||||
.{
|
||||
.base = @ptrCast(&header),
|
||||
.len = @sizeOf(Zoir.Header),
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.nodes.items(.tag)),
|
||||
.len = zoir.nodes.len * @sizeOf(Zoir.Node.Repr.Tag),
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.nodes.items(.data)),
|
||||
.len = zoir.nodes.len * 4,
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.nodes.items(.ast_node)),
|
||||
.len = zoir.nodes.len * 4,
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.extra),
|
||||
.len = zoir.extra.len * 4,
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.limbs),
|
||||
.len = zoir.limbs.len * @sizeOf(std.math.big.Limb),
|
||||
},
|
||||
.{
|
||||
.base = zoir.string_bytes.ptr,
|
||||
.len = zoir.string_bytes.len,
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.compile_errors),
|
||||
.len = zoir.compile_errors.len * @sizeOf(Zoir.CompileError),
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.error_notes),
|
||||
.len = zoir.error_notes.len * @sizeOf(Zoir.CompileError.Note),
|
||||
},
|
||||
var vecs = [_][]const u8{
|
||||
@ptrCast((&header)[0..1]),
|
||||
@ptrCast(zoir.nodes.items(.tag)),
|
||||
@ptrCast(zoir.nodes.items(.data)),
|
||||
@ptrCast(zoir.nodes.items(.ast_node)),
|
||||
@ptrCast(zoir.extra),
|
||||
@ptrCast(zoir.limbs),
|
||||
zoir.string_bytes,
|
||||
@ptrCast(zoir.compile_errors),
|
||||
@ptrCast(zoir.error_notes),
|
||||
};
|
||||
var cache_fw = cache_file.writer(&.{});
|
||||
cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) {
|
||||
error.WriteFailed => return cache_fw.err.?,
|
||||
};
|
||||
try cache_file.writevAll(&iovecs);
|
||||
}
|
||||
|
||||
pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_file: std.fs.File) !Zoir {
|
||||
pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_br: *std.io.Reader) !Zoir {
|
||||
var zoir: Zoir = .{
|
||||
.nodes = .empty,
|
||||
.extra = &.{},
|
||||
@ -3017,49 +2960,17 @@ pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_file: std.fs
|
||||
zoir.compile_errors = try gpa.alloc(Zoir.CompileError, header.compile_errors_len);
|
||||
zoir.error_notes = try gpa.alloc(Zoir.CompileError.Note, header.error_notes_len);
|
||||
|
||||
var iovecs: [8]std.posix.iovec = .{
|
||||
.{
|
||||
.base = @ptrCast(zoir.nodes.items(.tag)),
|
||||
.len = header.nodes_len * @sizeOf(Zoir.Node.Repr.Tag),
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.nodes.items(.data)),
|
||||
.len = header.nodes_len * 4,
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.nodes.items(.ast_node)),
|
||||
.len = header.nodes_len * 4,
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.extra),
|
||||
.len = header.extra_len * 4,
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.limbs),
|
||||
.len = header.limbs_len * @sizeOf(std.math.big.Limb),
|
||||
},
|
||||
.{
|
||||
.base = zoir.string_bytes.ptr,
|
||||
.len = header.string_bytes_len,
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.compile_errors),
|
||||
.len = header.compile_errors_len * @sizeOf(Zoir.CompileError),
|
||||
},
|
||||
.{
|
||||
.base = @ptrCast(zoir.error_notes),
|
||||
.len = header.error_notes_len * @sizeOf(Zoir.CompileError.Note),
|
||||
},
|
||||
var vecs = [_][]u8{
|
||||
@ptrCast(zoir.nodes.items(.tag)),
|
||||
@ptrCast(zoir.nodes.items(.data)),
|
||||
@ptrCast(zoir.nodes.items(.ast_node)),
|
||||
@ptrCast(zoir.extra),
|
||||
@ptrCast(zoir.limbs),
|
||||
zoir.string_bytes,
|
||||
@ptrCast(zoir.compile_errors),
|
||||
@ptrCast(zoir.error_notes),
|
||||
};
|
||||
|
||||
const bytes_expected = expected: {
|
||||
var n: usize = 0;
|
||||
for (iovecs) |v| n += v.len;
|
||||
break :expected n;
|
||||
};
|
||||
|
||||
const bytes_read = try cache_file.readvAll(&iovecs);
|
||||
if (bytes_read != bytes_expected) return error.UnexpectedFileSize;
|
||||
try cache_br.readVecAll(&vecs);
|
||||
return zoir;
|
||||
}
|
||||
|
||||
@ -3071,7 +2982,7 @@ pub fn markDependeeOutdated(
|
||||
marked_po: enum { not_marked_po, marked_po },
|
||||
dependee: InternPool.Dependee,
|
||||
) !void {
|
||||
log.debug("outdated dependee: {}", .{zcu.fmtDependee(dependee)});
|
||||
log.debug("outdated dependee: {f}", .{zcu.fmtDependee(dependee)});
|
||||
var it = zcu.intern_pool.dependencyIterator(dependee);
|
||||
while (it.next()) |depender| {
|
||||
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
|
||||
@ -3079,9 +2990,9 @@ pub fn markDependeeOutdated(
|
||||
.not_marked_po => {},
|
||||
.marked_po => {
|
||||
po_dep_count.* -= 1;
|
||||
log.debug("outdated {} => already outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
|
||||
log.debug("outdated {f} => already outdated {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
|
||||
if (po_dep_count.* == 0) {
|
||||
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
|
||||
log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
},
|
||||
@ -3102,9 +3013,9 @@ pub fn markDependeeOutdated(
|
||||
depender,
|
||||
new_po_dep_count,
|
||||
);
|
||||
log.debug("outdated {} => new outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count });
|
||||
log.debug("outdated {f} => new outdated {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count });
|
||||
if (new_po_dep_count == 0) {
|
||||
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
|
||||
log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
// If this is a Decl and was not previously PO, we must recursively
|
||||
@ -3117,16 +3028,16 @@ pub fn markDependeeOutdated(
|
||||
}
|
||||
|
||||
pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
|
||||
log.debug("up-to-date dependee: {}", .{zcu.fmtDependee(dependee)});
|
||||
log.debug("up-to-date dependee: {f}", .{zcu.fmtDependee(dependee)});
|
||||
var it = zcu.intern_pool.dependencyIterator(dependee);
|
||||
while (it.next()) |depender| {
|
||||
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
|
||||
// This depender is already outdated, but it now has one
|
||||
// less PO dependency!
|
||||
po_dep_count.* -= 1;
|
||||
log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
|
||||
log.debug("up-to-date {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
|
||||
if (po_dep_count.* == 0) {
|
||||
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
|
||||
log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
continue;
|
||||
@ -3140,11 +3051,11 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
|
||||
};
|
||||
if (ptr.* > 1) {
|
||||
ptr.* -= 1;
|
||||
log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* });
|
||||
log.debug("up-to-date {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* });
|
||||
continue;
|
||||
}
|
||||
|
||||
log.debug("up-to-date {} => {} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) });
|
||||
log.debug("up-to-date {f} => {f} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) });
|
||||
|
||||
// This dependency is no longer PO, i.e. is known to be up-to-date.
|
||||
assert(zcu.potentially_outdated.swapRemove(depender));
|
||||
@ -3173,7 +3084,7 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
|
||||
.func => |func_index| .{ .interned = func_index }, // IES
|
||||
.memoized_state => |stage| .{ .memoized_state = stage },
|
||||
};
|
||||
log.debug("potentially outdated dependee: {}", .{zcu.fmtDependee(dependee)});
|
||||
log.debug("potentially outdated dependee: {f}", .{zcu.fmtDependee(dependee)});
|
||||
var it = ip.dependencyIterator(dependee);
|
||||
while (it.next()) |po| {
|
||||
if (zcu.outdated.getPtr(po)) |po_dep_count| {
|
||||
@ -3183,17 +3094,17 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
|
||||
_ = zcu.outdated_ready.swapRemove(po);
|
||||
}
|
||||
po_dep_count.* += 1;
|
||||
log.debug("po {} => {} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* });
|
||||
log.debug("po {f} => {f} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* });
|
||||
continue;
|
||||
}
|
||||
if (zcu.potentially_outdated.getPtr(po)) |n| {
|
||||
// There is now one more PO dependency.
|
||||
n.* += 1;
|
||||
log.debug("po {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* });
|
||||
log.debug("po {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* });
|
||||
continue;
|
||||
}
|
||||
try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1);
|
||||
log.debug("po {} => {} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) });
|
||||
log.debug("po {f} => {f} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) });
|
||||
// This AnalUnit was not already PO, so we must recursively mark its dependers as also PO.
|
||||
try zcu.markTransitiveDependersPotentiallyOutdated(po);
|
||||
}
|
||||
@ -3222,7 +3133,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
|
||||
|
||||
if (zcu.outdated_ready.count() > 0) {
|
||||
const unit = zcu.outdated_ready.keys()[0];
|
||||
log.debug("findOutdatedToAnalyze: trivial {}", .{zcu.fmtAnalUnit(unit)});
|
||||
log.debug("findOutdatedToAnalyze: trivial {f}", .{zcu.fmtAnalUnit(unit)});
|
||||
return unit;
|
||||
}
|
||||
|
||||
@ -3273,7 +3184,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
|
||||
}
|
||||
}
|
||||
|
||||
log.debug("findOutdatedToAnalyze: heuristic returned '{}' ({d} dependers)", .{
|
||||
log.debug("findOutdatedToAnalyze: heuristic returned '{f}' ({d} dependers)", .{
|
||||
zcu.fmtAnalUnit(chosen_unit.?),
|
||||
chosen_unit_dependers,
|
||||
});
|
||||
@ -4072,7 +3983,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
const referencer = kv.value;
|
||||
try checked_types.putNoClobber(gpa, ty, {});
|
||||
|
||||
log.debug("handle type '{}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)});
|
||||
log.debug("handle type '{f}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)});
|
||||
|
||||
// If this type undergoes type resolution, the corresponding `AnalUnit` is automatically referenced.
|
||||
const has_resolution: bool = switch (ip.indexToKey(ty)) {
|
||||
@ -4108,7 +4019,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
// `comptime` decls are always analyzed.
|
||||
const unit: AnalUnit = .wrap(.{ .@"comptime" = cu });
|
||||
if (!result.contains(unit)) {
|
||||
log.debug("type '{}': ref comptime %{}", .{
|
||||
log.debug("type '{f}': ref comptime %{}", .{
|
||||
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
|
||||
@intFromEnum(ip.getComptimeUnit(cu).zir_index.resolve(ip) orelse continue),
|
||||
});
|
||||
@ -4139,7 +4050,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
},
|
||||
};
|
||||
if (want_analysis) {
|
||||
log.debug("type '{}': ref test %{}", .{
|
||||
log.debug("type '{f}': ref test %{}", .{
|
||||
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
|
||||
@intFromEnum(inst_info.inst),
|
||||
});
|
||||
@ -4158,7 +4069,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
if (decl.linkage == .@"export") {
|
||||
const unit: AnalUnit = .wrap(.{ .nav_val = nav });
|
||||
if (!result.contains(unit)) {
|
||||
log.debug("type '{}': ref named %{}", .{
|
||||
log.debug("type '{f}': ref named %{}", .{
|
||||
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
|
||||
@intFromEnum(inst_info.inst),
|
||||
});
|
||||
@ -4174,7 +4085,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
if (decl.linkage == .@"export") {
|
||||
const unit: AnalUnit = .wrap(.{ .nav_val = nav });
|
||||
if (!result.contains(unit)) {
|
||||
log.debug("type '{}': ref named %{}", .{
|
||||
log.debug("type '{f}': ref named %{}", .{
|
||||
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
|
||||
@intFromEnum(inst_info.inst),
|
||||
});
|
||||
@ -4199,7 +4110,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
try unit_queue.put(gpa, other, kv.value); // same reference location
|
||||
}
|
||||
|
||||
log.debug("handle unit '{}'", .{zcu.fmtAnalUnit(unit)});
|
||||
log.debug("handle unit '{f}'", .{zcu.fmtAnalUnit(unit)});
|
||||
|
||||
if (zcu.reference_table.get(unit)) |first_ref_idx| {
|
||||
assert(first_ref_idx != std.math.maxInt(u32));
|
||||
@ -4207,7 +4118,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
while (ref_idx != std.math.maxInt(u32)) {
|
||||
const ref = zcu.all_references.items[ref_idx];
|
||||
if (!result.contains(ref.referenced)) {
|
||||
log.debug("unit '{}': ref unit '{}'", .{
|
||||
log.debug("unit '{f}': ref unit '{f}'", .{
|
||||
zcu.fmtAnalUnit(unit),
|
||||
zcu.fmtAnalUnit(ref.referenced),
|
||||
});
|
||||
@ -4226,7 +4137,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
while (ref_idx != std.math.maxInt(u32)) {
|
||||
const ref = zcu.all_type_references.items[ref_idx];
|
||||
if (!checked_types.contains(ref.referenced)) {
|
||||
log.debug("unit '{}': ref type '{}'", .{
|
||||
log.debug("unit '{f}': ref type '{f}'", .{
|
||||
zcu.fmtAnalUnit(unit),
|
||||
Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip),
|
||||
});
|
||||
|
||||
@ -343,7 +343,7 @@ fn loadZirZoirCache(
|
||||
.zon => Zoir.Header,
|
||||
};
|
||||
|
||||
var buffer: [@sizeOf(Header)]u8 = undefined;
|
||||
var buffer: [2000]u8 = undefined;
|
||||
var cache_fr = cache_file.reader(&buffer);
|
||||
cache_fr.size = stat.size;
|
||||
const cache_br = &cache_fr.interface;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user