mirror of
https://github.com/ziglang/zig.git
synced 2026-02-13 21:08:36 +00:00
Merge pull request #9047 from g-w1/spider-astgen
stage2 astgen: catch unused vars
This commit is contained in:
commit
c6844072ce
@ -1017,7 +1017,6 @@ fn tokenizeAndPrint(docgen_tokenizer: *Tokenizer, out: anytype, source_token: To
|
||||
}
|
||||
|
||||
fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: anytype, zig_exe: []const u8, do_code_tests: bool) !void {
|
||||
var code_progress_index: usize = 0;
|
||||
var progress = Progress{};
|
||||
const root_node = try progress.start("Generating docgen examples", toc.nodes.len);
|
||||
defer root_node.end();
|
||||
@ -1090,7 +1089,6 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
|
||||
switch (code.id) {
|
||||
Code.Id.Exe => |expected_outcome| code_block: {
|
||||
const name_plus_bin_ext = try std.fmt.allocPrint(allocator, "{s}{s}", .{ code.name, exe_ext });
|
||||
var build_args = std.ArrayList([]const u8).init(allocator);
|
||||
defer build_args.deinit();
|
||||
try build_args.appendSlice(&[_][]const u8{
|
||||
@ -1361,19 +1359,9 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
},
|
||||
Code.Id.Obj => |maybe_error_match| {
|
||||
const name_plus_obj_ext = try std.fmt.allocPrint(allocator, "{s}{s}", .{ code.name, obj_ext });
|
||||
const tmp_obj_file_name = try fs.path.join(
|
||||
allocator,
|
||||
&[_][]const u8{ tmp_dir_name, name_plus_obj_ext },
|
||||
);
|
||||
var build_args = std.ArrayList([]const u8).init(allocator);
|
||||
defer build_args.deinit();
|
||||
|
||||
const name_plus_h_ext = try std.fmt.allocPrint(allocator, "{s}.h", .{code.name});
|
||||
const output_h_file_name = try fs.path.join(
|
||||
allocator,
|
||||
&[_][]const u8{ tmp_dir_name, name_plus_h_ext },
|
||||
);
|
||||
|
||||
try build_args.appendSlice(&[_][]const u8{
|
||||
zig_exe,
|
||||
"build-obj",
|
||||
|
||||
@ -295,7 +295,7 @@ fn refreshWithHeldLock(self: *Progress) void {
|
||||
end += 1;
|
||||
}
|
||||
|
||||
_ = file.write(self.output_buffer[0..end]) catch |e| {
|
||||
_ = file.write(self.output_buffer[0..end]) catch {
|
||||
// Stop trying to write to this file once it errors.
|
||||
self.terminal = null;
|
||||
};
|
||||
|
||||
@ -162,6 +162,7 @@ pub fn format(
|
||||
options: std.fmt.FormatOptions,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
if (fmt.len != 0) @compileError("Unknown format string: '" ++ fmt ++ "'");
|
||||
try std.fmt.format(out_stream, "{d}.{d}.{d}", .{ self.major, self.minor, self.patch });
|
||||
if (self.pre) |pre| try std.fmt.format(out_stream, "-{s}", .{pre});
|
||||
@ -259,7 +260,7 @@ test "SemanticVersion format" {
|
||||
|
||||
// Invalid version string that may overflow.
|
||||
const big_invalid = "99999999999999999999999.999999999999999999.99999999999999999----RC-SNAPSHOT.12.09.1--------------------------------..12";
|
||||
if (parse(big_invalid)) |ver| std.debug.panic("expected error, found {}", .{ver}) else |err| {}
|
||||
if (parse(big_invalid)) |ver| std.debug.panic("expected error, found {}", .{ver}) else |_| {}
|
||||
}
|
||||
|
||||
test "SemanticVersion precedence" {
|
||||
|
||||
@ -518,8 +518,8 @@ pub fn cpuCount() CpuCountError!usize {
|
||||
},
|
||||
.haiku => {
|
||||
var count: u32 = undefined;
|
||||
var system_info: os.system_info = undefined;
|
||||
const rc = os.system.get_system_info(&system_info);
|
||||
// var system_info: os.system_info = undefined;
|
||||
// const rc = os.system.get_system_info(&system_info);
|
||||
count = system_info.cpu_count;
|
||||
return @intCast(usize, count);
|
||||
},
|
||||
|
||||
@ -40,12 +40,18 @@ else
|
||||
|
||||
pub const SingleThreadedCondition = struct {
|
||||
pub fn wait(cond: *SingleThreadedCondition, mutex: *Mutex) void {
|
||||
_ = cond;
|
||||
_ = mutex;
|
||||
unreachable; // deadlock detected
|
||||
}
|
||||
|
||||
pub fn signal(cond: *SingleThreadedCondition) void {}
|
||||
pub fn signal(cond: *SingleThreadedCondition) void {
|
||||
_ = cond;
|
||||
}
|
||||
|
||||
pub fn broadcast(cond: *SingleThreadedCondition) void {}
|
||||
pub fn broadcast(cond: *SingleThreadedCondition) void {
|
||||
_ = cond;
|
||||
}
|
||||
};
|
||||
|
||||
pub const WindowsCondition = struct {
|
||||
|
||||
@ -105,6 +105,7 @@ pub const DebugEvent = struct {
|
||||
}
|
||||
|
||||
pub fn timedWait(ev: *DebugEvent, timeout: u64) TimedWaitResult {
|
||||
_ = timeout;
|
||||
switch (ev.state) {
|
||||
.unset => return .timed_out,
|
||||
.set => return .event_set,
|
||||
@ -174,7 +175,10 @@ pub const AtomicEvent = struct {
|
||||
};
|
||||
|
||||
pub const SpinFutex = struct {
|
||||
fn wake(waiters: *u32, wake_count: u32) void {}
|
||||
fn wake(waiters: *u32, wake_count: u32) void {
|
||||
_ = waiters;
|
||||
_ = wake_count;
|
||||
}
|
||||
|
||||
fn wait(waiters: *u32, timeout: ?u64) !void {
|
||||
var timer: time.Timer = undefined;
|
||||
@ -193,6 +197,7 @@ pub const AtomicEvent = struct {
|
||||
|
||||
pub const LinuxFutex = struct {
|
||||
fn wake(waiters: *u32, wake_count: u32) void {
|
||||
_ = wake_count;
|
||||
const waiting = std.math.maxInt(i32); // wake_count
|
||||
const ptr = @ptrCast(*const i32, waiters);
|
||||
const rc = linux.futex_wake(ptr, linux.FUTEX_WAKE | linux.FUTEX_PRIVATE_FLAG, waiting);
|
||||
|
||||
@ -40,9 +40,11 @@ pub fn StringArrayHashMapUnmanaged(comptime V: type) type {
|
||||
|
||||
pub const StringContext = struct {
|
||||
pub fn hash(self: @This(), s: []const u8) u32 {
|
||||
_ = self;
|
||||
return hashString(s);
|
||||
}
|
||||
pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
|
||||
_ = self;
|
||||
return eqlString(a, b);
|
||||
}
|
||||
};
|
||||
@ -1324,17 +1326,17 @@ pub fn ArrayHashMapUnmanaged(
|
||||
}
|
||||
fn removeFromIndexByIndexGeneric(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, indexes: []Index(I)) void {
|
||||
const slot = self.getSlotByIndex(entry_index, ctx, header, I, indexes);
|
||||
self.removeSlot(slot, header, I, indexes);
|
||||
removeSlot(slot, header, I, indexes);
|
||||
}
|
||||
|
||||
fn removeFromIndexByKey(self: *Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type, indexes: []Index(I)) ?usize {
|
||||
const slot = self.getSlotByKey(key, ctx, header, I, indexes) orelse return null;
|
||||
const removed_entry_index = indexes[slot].entry_index;
|
||||
self.removeSlot(slot, header, I, indexes);
|
||||
removeSlot(slot, header, I, indexes);
|
||||
return removed_entry_index;
|
||||
}
|
||||
|
||||
fn removeSlot(self: *Self, removed_slot: usize, header: *IndexHeader, comptime I: type, indexes: []Index(I)) void {
|
||||
fn removeSlot(removed_slot: usize, header: *IndexHeader, comptime I: type, indexes: []Index(I)) void {
|
||||
const start_index = removed_slot +% 1;
|
||||
const end_index = start_index +% indexes.len;
|
||||
|
||||
@ -1619,13 +1621,13 @@ pub fn ArrayHashMapUnmanaged(
|
||||
if (self.index_header) |header| {
|
||||
p("\n", .{});
|
||||
switch (header.capacityIndexType()) {
|
||||
.u8 => self.dumpIndex(header, u8),
|
||||
.u16 => self.dumpIndex(header, u16),
|
||||
.u32 => self.dumpIndex(header, u32),
|
||||
.u8 => dumpIndex(header, u8),
|
||||
.u16 => dumpIndex(header, u16),
|
||||
.u32 => dumpIndex(header, u32),
|
||||
}
|
||||
}
|
||||
}
|
||||
fn dumpIndex(self: Self, header: *IndexHeader, comptime I: type) void {
|
||||
fn dumpIndex(header: *IndexHeader, comptime I: type) void {
|
||||
const p = std.debug.print;
|
||||
p(" index len=0x{x} type={}\n", .{ header.length(), header.capacityIndexType() });
|
||||
const indexes = header.indexes(I);
|
||||
@ -1918,7 +1920,7 @@ test "iterator hash map" {
|
||||
try testing.expect(count == 3);
|
||||
try testing.expect(it.next() == null);
|
||||
|
||||
for (buffer) |v, i| {
|
||||
for (buffer) |_, i| {
|
||||
try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
|
||||
}
|
||||
|
||||
@ -1930,7 +1932,7 @@ test "iterator hash map" {
|
||||
if (count >= 2) break;
|
||||
}
|
||||
|
||||
for (buffer[0..2]) |v, i| {
|
||||
for (buffer[0..2]) |_, i| {
|
||||
try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
|
||||
}
|
||||
|
||||
@ -2154,6 +2156,7 @@ test "compile everything" {
|
||||
pub fn getHashPtrAddrFn(comptime K: type, comptime Context: type) (fn (Context, K) u32) {
|
||||
return struct {
|
||||
fn hash(ctx: Context, key: K) u32 {
|
||||
_ = ctx;
|
||||
return getAutoHashFn(usize, void)({}, @ptrToInt(key));
|
||||
}
|
||||
}.hash;
|
||||
@ -2162,6 +2165,7 @@ pub fn getHashPtrAddrFn(comptime K: type, comptime Context: type) (fn (Context,
|
||||
pub fn getTrivialEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
|
||||
return struct {
|
||||
fn eql(ctx: Context, a: K, b: K) bool {
|
||||
_ = ctx;
|
||||
return a == b;
|
||||
}
|
||||
}.eql;
|
||||
@ -2177,6 +2181,7 @@ pub fn AutoContext(comptime K: type) type {
|
||||
pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) u32) {
|
||||
return struct {
|
||||
fn hash(ctx: Context, key: K) u32 {
|
||||
_ = ctx;
|
||||
if (comptime trait.hasUniqueRepresentation(K)) {
|
||||
return @truncate(u32, Wyhash.hash(0, std.mem.asBytes(&key)));
|
||||
} else {
|
||||
@ -2191,6 +2196,7 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
|
||||
pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
|
||||
return struct {
|
||||
fn eql(ctx: Context, a: K, b: K) bool {
|
||||
_ = ctx;
|
||||
return meta.eql(a, b);
|
||||
}
|
||||
}.eql;
|
||||
@ -2217,6 +2223,7 @@ pub fn autoEqlIsCheap(comptime K: type) bool {
|
||||
pub fn getAutoHashStratFn(comptime K: type, comptime Context: type, comptime strategy: std.hash.Strategy) (fn (Context, K) u32) {
|
||||
return struct {
|
||||
fn hash(ctx: Context, key: K) u32 {
|
||||
_ = ctx;
|
||||
var hasher = Wyhash.init(0);
|
||||
std.hash.autoHashStrat(&hasher, key, strategy);
|
||||
return @truncate(u32, hasher.final());
|
||||
|
||||
@ -232,6 +232,7 @@ test "Atomic.loadUnchecked" {
|
||||
|
||||
test "Atomic.storeUnchecked" {
|
||||
inline for (atomicIntTypes()) |Int| {
|
||||
_ = Int;
|
||||
var x = Atomic(usize).init(5);
|
||||
x.storeUnchecked(10);
|
||||
try testing.expectEqual(x.loadUnchecked(), 10);
|
||||
@ -250,6 +251,7 @@ test "Atomic.load" {
|
||||
test "Atomic.store" {
|
||||
inline for (atomicIntTypes()) |Int| {
|
||||
inline for (.{ .Unordered, .Monotonic, .Release, .SeqCst }) |ordering| {
|
||||
_ = Int;
|
||||
var x = Atomic(usize).init(5);
|
||||
x.store(10, ordering);
|
||||
try testing.expectEqual(x.load(.SeqCst), 10);
|
||||
|
||||
@ -112,9 +112,6 @@ pub const Base64Encoder = struct {
|
||||
const out_len = encoder.calcSize(source.len);
|
||||
assert(dest.len >= out_len);
|
||||
|
||||
const nibbles = source.len / 3;
|
||||
const leftover = source.len - 3 * nibbles;
|
||||
|
||||
var acc: u12 = 0;
|
||||
var acc_len: u4 = 0;
|
||||
var out_idx: usize = 0;
|
||||
@ -223,7 +220,6 @@ pub const Base64Decoder = struct {
|
||||
if (decoder.pad_char) |pad_char| {
|
||||
const padding_len = acc_len / 2;
|
||||
var padding_chars: usize = 0;
|
||||
var i: usize = 0;
|
||||
for (leftover) |c| {
|
||||
if (c != pad_char) {
|
||||
return if (c == Base64Decoder.invalid_char) error.InvalidCharacter else error.InvalidPadding;
|
||||
@ -302,7 +298,6 @@ pub const Base64DecoderWithIgnore = struct {
|
||||
var leftover = source[leftover_idx.?..];
|
||||
if (decoder.pad_char) |pad_char| {
|
||||
var padding_chars: usize = 0;
|
||||
var i: usize = 0;
|
||||
for (leftover) |c| {
|
||||
if (decoder_with_ignore.char_is_ignored[c]) continue;
|
||||
if (c != pad_char) {
|
||||
|
||||
@ -84,6 +84,7 @@ pub fn IntegerBitSet(comptime size: u16) type {
|
||||
|
||||
/// Returns the number of bits in this bit set
|
||||
pub inline fn capacity(self: Self) usize {
|
||||
_ = self;
|
||||
return bit_length;
|
||||
}
|
||||
|
||||
@ -311,6 +312,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
|
||||
|
||||
/// Returns the number of bits in this bit set
|
||||
pub inline fn capacity(self: Self) usize {
|
||||
_ = self;
|
||||
return bit_length;
|
||||
}
|
||||
|
||||
@ -373,7 +375,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
|
||||
|
||||
/// Flips every bit in the bit set.
|
||||
pub fn toggleAll(self: *Self) void {
|
||||
for (self.masks) |*mask, i| {
|
||||
for (self.masks) |*mask| {
|
||||
mask.* = ~mask.*;
|
||||
}
|
||||
|
||||
@ -642,7 +644,7 @@ pub const DynamicBitSetUnmanaged = struct {
|
||||
if (bit_length == 0) return;
|
||||
|
||||
const num_masks = numMasks(self.bit_length);
|
||||
for (self.masks[0..num_masks]) |*mask, i| {
|
||||
for (self.masks[0..num_masks]) |*mask| {
|
||||
mask.* = ~mask.*;
|
||||
}
|
||||
|
||||
|
||||
@ -390,6 +390,7 @@ pub const Builder = struct {
|
||||
}
|
||||
|
||||
pub fn version(self: *const Builder, major: u32, minor: u32, patch: u32) LibExeObjStep.SharedLibKind {
|
||||
_ = self;
|
||||
return .{
|
||||
.versioned = .{
|
||||
.major = major,
|
||||
@ -543,7 +544,7 @@ pub const Builder = struct {
|
||||
return null;
|
||||
},
|
||||
.scalar => |s| {
|
||||
const n = std.fmt.parseFloat(T, s) catch |err| {
|
||||
const n = std.fmt.parseFloat(T, s) catch {
|
||||
warn("Expected -D{s} to be a float of type {s}.\n\n", .{ name, @typeName(T) });
|
||||
self.markInvalidUserInput();
|
||||
return null;
|
||||
@ -3129,7 +3130,9 @@ pub const Step = struct {
|
||||
self.dependencies.append(other) catch unreachable;
|
||||
}
|
||||
|
||||
fn makeNoOp(self: *Step) anyerror!void {}
|
||||
fn makeNoOp(self: *Step) anyerror!void {
|
||||
_ = self;
|
||||
}
|
||||
|
||||
pub fn cast(step: *Step, comptime T: type) ?*T {
|
||||
if (step.id == T.base_id) {
|
||||
|
||||
@ -139,6 +139,7 @@ const BinaryElfOutput = struct {
|
||||
}
|
||||
|
||||
fn segmentSortCompare(context: void, left: *BinaryElfSegment, right: *BinaryElfSegment) bool {
|
||||
_ = context;
|
||||
if (left.physicalAddress < right.physicalAddress) {
|
||||
return true;
|
||||
}
|
||||
@ -149,6 +150,7 @@ const BinaryElfOutput = struct {
|
||||
}
|
||||
|
||||
fn sectionSortCompare(context: void, left: *BinaryElfSection, right: *BinaryElfSection) bool {
|
||||
_ = context;
|
||||
return left.binaryOffset < right.binaryOffset;
|
||||
}
|
||||
};
|
||||
|
||||
@ -65,6 +65,8 @@ pub const StackTrace = struct {
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
|
||||
@ -521,6 +523,7 @@ pub const Version = struct {
|
||||
options: std.fmt.FormatOptions,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
if (fmt.len == 0) {
|
||||
if (self.patch == 0) {
|
||||
if (self.minor == 0) {
|
||||
|
||||
@ -351,7 +351,6 @@ pub const Tokenizer = struct {
|
||||
pp_directive: bool = false,
|
||||
|
||||
pub fn next(self: *Tokenizer) Token {
|
||||
const start_index = self.index;
|
||||
var result = Token{
|
||||
.id = .Eof,
|
||||
.start = self.index,
|
||||
@ -1380,12 +1379,12 @@ test "operators" {
|
||||
|
||||
test "keywords" {
|
||||
try expectTokens(
|
||||
\\auto break case char const continue default do
|
||||
\\double else enum extern float for goto if int
|
||||
\\long register return short signed sizeof static
|
||||
\\struct switch typedef union unsigned void volatile
|
||||
\\while _Bool _Complex _Imaginary inline restrict _Alignas
|
||||
\\_Alignof _Atomic _Generic _Noreturn _Static_assert _Thread_local
|
||||
\\auto break case char const continue default do
|
||||
\\double else enum extern float for goto if int
|
||||
\\long register return short signed sizeof static
|
||||
\\struct switch typedef union unsigned void volatile
|
||||
\\while _Bool _Complex _Imaginary inline restrict _Alignas
|
||||
\\_Alignof _Atomic _Generic _Noreturn _Static_assert _Thread_local
|
||||
\\
|
||||
, &[_]Token.Id{
|
||||
.Keyword_auto,
|
||||
|
||||
@ -62,6 +62,8 @@ pub fn GzipStream(comptime ReaderType: type) type {
|
||||
const XFL = header[8];
|
||||
// Operating system where the compression took place
|
||||
const OS = header[9];
|
||||
_ = XFL;
|
||||
_ = OS;
|
||||
|
||||
if (FLG & FEXTRA != 0) {
|
||||
// Skip the extra data, we could read and expose it to the user
|
||||
|
||||
@ -35,6 +35,7 @@ pub fn ZlibStream(comptime ReaderType: type) type {
|
||||
const CM = @truncate(u4, header[0]);
|
||||
const CINFO = @truncate(u4, header[0] >> 4);
|
||||
const FCHECK = @truncate(u5, header[1]);
|
||||
_ = FCHECK;
|
||||
const FDICT = @truncate(u1, header[1] >> 5);
|
||||
|
||||
if ((@as(u16, header[0]) << 8 | header[1]) % 31 != 0)
|
||||
|
||||
@ -23,6 +23,7 @@ pub fn ComptimeStringMap(comptime V: type, comptime kvs: anytype) type {
|
||||
var sorted_kvs: [kvs.len]KV = undefined;
|
||||
const lenAsc = (struct {
|
||||
fn lenAsc(context: void, a: KV, b: KV) bool {
|
||||
_ = context;
|
||||
return a.key.len < b.key.len;
|
||||
}
|
||||
}).lenAsc;
|
||||
|
||||
@ -346,7 +346,7 @@ test "ed25519 test vectors" {
|
||||
.expected = error.IdentityElement, // 11 - small-order A
|
||||
},
|
||||
};
|
||||
for (entries) |entry, i| {
|
||||
for (entries) |entry| {
|
||||
var msg: [entry.msg_hex.len / 2]u8 = undefined;
|
||||
_ = try fmt.hexToBytes(&msg, entry.msg_hex);
|
||||
var public_key: [32]u8 = undefined;
|
||||
|
||||
@ -330,13 +330,10 @@ pub const Scalar = struct {
|
||||
const carry9 = z02 >> 56;
|
||||
const c01 = carry9;
|
||||
const carry10 = (z12 + c01) >> 56;
|
||||
const t21 = @truncate(u64, z12 + c01) & 0xffffffffffffff;
|
||||
const c11 = carry10;
|
||||
const carry11 = (z22 + c11) >> 56;
|
||||
const t22 = @truncate(u64, z22 + c11) & 0xffffffffffffff;
|
||||
const c21 = carry11;
|
||||
const carry12 = (z32 + c21) >> 56;
|
||||
const t23 = @truncate(u64, z32 + c21) & 0xffffffffffffff;
|
||||
const c31 = carry12;
|
||||
const carry13 = (z42 + c31) >> 56;
|
||||
const t24 = @truncate(u64, z42 + c31) & 0xffffffffffffff;
|
||||
@ -605,13 +602,10 @@ const ScalarDouble = struct {
|
||||
const carry0 = z01 >> 56;
|
||||
const c00 = carry0;
|
||||
const carry1 = (z11 + c00) >> 56;
|
||||
const t100 = @as(u64, @truncate(u64, z11 + c00)) & 0xffffffffffffff;
|
||||
const c10 = carry1;
|
||||
const carry2 = (z21 + c10) >> 56;
|
||||
const t101 = @as(u64, @truncate(u64, z21 + c10)) & 0xffffffffffffff;
|
||||
const c20 = carry2;
|
||||
const carry3 = (z31 + c20) >> 56;
|
||||
const t102 = @as(u64, @truncate(u64, z31 + c20)) & 0xffffffffffffff;
|
||||
const c30 = carry3;
|
||||
const carry4 = (z41 + c30) >> 56;
|
||||
const t103 = @as(u64, @truncate(u64, z41 + c30)) & 0xffffffffffffff;
|
||||
|
||||
@ -49,8 +49,6 @@ pub const Block = struct {
|
||||
|
||||
/// Encrypt a block with a round key.
|
||||
pub inline fn encrypt(block: Block, round_key: Block) Block {
|
||||
const src = &block.repr;
|
||||
|
||||
const s0 = block.repr[0];
|
||||
const s1 = block.repr[1];
|
||||
const s2 = block.repr[2];
|
||||
@ -66,8 +64,6 @@ pub const Block = struct {
|
||||
|
||||
/// Encrypt a block with the last round key.
|
||||
pub inline fn encryptLast(block: Block, round_key: Block) Block {
|
||||
const src = &block.repr;
|
||||
|
||||
const t0 = block.repr[0];
|
||||
const t1 = block.repr[1];
|
||||
const t2 = block.repr[2];
|
||||
@ -88,8 +84,6 @@ pub const Block = struct {
|
||||
|
||||
/// Decrypt a block with a round key.
|
||||
pub inline fn decrypt(block: Block, round_key: Block) Block {
|
||||
const src = &block.repr;
|
||||
|
||||
const s0 = block.repr[0];
|
||||
const s1 = block.repr[1];
|
||||
const s2 = block.repr[2];
|
||||
@ -105,8 +99,6 @@ pub const Block = struct {
|
||||
|
||||
/// Decrypt a block with the last round key.
|
||||
pub inline fn decryptLast(block: Block, round_key: Block) Block {
|
||||
const src = &block.repr;
|
||||
|
||||
const t0 = block.repr[0];
|
||||
const t1 = block.repr[1];
|
||||
const t2 = block.repr[2];
|
||||
|
||||
@ -114,7 +114,6 @@ test "Aes256Gcm - Empty message and no associated data" {
|
||||
const ad = "";
|
||||
const m = "";
|
||||
var c: [m.len]u8 = undefined;
|
||||
var m2: [m.len]u8 = undefined;
|
||||
var tag: [Aes256Gcm.tag_length]u8 = undefined;
|
||||
|
||||
Aes256Gcm.encrypt(&c, &tag, m, ad, nonce, key);
|
||||
|
||||
@ -271,7 +271,6 @@ test "AesOcb test vector 1" {
|
||||
var c: [0]u8 = undefined;
|
||||
Aes128Ocb.encrypt(&c, &tag, "", "", nonce, k);
|
||||
|
||||
var expected_c: [c.len]u8 = undefined;
|
||||
var expected_tag: [tag.len]u8 = undefined;
|
||||
_ = try hexToBytes(&expected_tag, "785407BFFFC8AD9EDCC5520AC9111EE6");
|
||||
|
||||
|
||||
@ -48,7 +48,6 @@ const State = struct {
|
||||
fn expand0(state: *State, key: []const u8) void {
|
||||
var i: usize = 0;
|
||||
var j: usize = 0;
|
||||
var t: u32 = undefined;
|
||||
while (i < state.subkeys.len) : (i += 1) {
|
||||
state.subkeys[i] ^= toWord(key, &j);
|
||||
}
|
||||
@ -75,7 +74,6 @@ const State = struct {
|
||||
fn expand(state: *State, data: []const u8, key: []const u8) void {
|
||||
var i: usize = 0;
|
||||
var j: usize = 0;
|
||||
var t: u32 = undefined;
|
||||
while (i < state.subkeys.len) : (i += 1) {
|
||||
state.subkeys[i] ^= toWord(key, &j);
|
||||
}
|
||||
|
||||
@ -394,6 +394,7 @@ pub const Blake3 = struct {
|
||||
/// Construct a new `Blake3` for the key derivation function. The context
|
||||
/// string should be hardcoded, globally unique, and application-specific.
|
||||
pub fn initKdf(context: []const u8, options: KdfOptions) Blake3 {
|
||||
_ = options;
|
||||
var context_hasher = Blake3.init_internal(IV, DERIVE_KEY_CONTEXT);
|
||||
context_hasher.update(context);
|
||||
var context_key: [KEY_LEN]u8 = undefined;
|
||||
|
||||
@ -444,7 +444,6 @@ fn ChaChaWith64BitNonce(comptime rounds_nb: usize) type {
|
||||
if (comptime @sizeOf(usize) > 4) {
|
||||
// A big block is giant: 256 GiB, but we can avoid this limitation
|
||||
var remaining_blocks: u32 = @intCast(u32, (in.len / big_block));
|
||||
var i: u32 = 0;
|
||||
while (remaining_blocks > 0) : (remaining_blocks -= 1) {
|
||||
ChaChaImpl(rounds_nb).chacha20Xor(out[cursor .. cursor + big_block], in[cursor .. cursor + big_block], k, c);
|
||||
c[1] += 1; // upper 32-bit of counter, generic chacha20Xor() doesn't know about this.
|
||||
|
||||
@ -219,6 +219,7 @@ pub const Hash = struct {
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(options: Options) Self {
|
||||
_ = options;
|
||||
return Self{
|
||||
.state = State{ .data = [_]u32{0} ** (State.BLOCKBYTES / 4) },
|
||||
.buf_off = 0,
|
||||
|
||||
@ -45,6 +45,7 @@ pub const Md5 = struct {
|
||||
total_len: u64,
|
||||
|
||||
pub fn init(options: Options) Self {
|
||||
_ = options;
|
||||
return Self{
|
||||
.s = [_]u32{
|
||||
0x67452301,
|
||||
|
||||
@ -63,7 +63,7 @@ pub fn add(a: CompressedScalar, b: CompressedScalar, endian: builtin.Endian) Non
|
||||
|
||||
/// Return -s (mod L)
|
||||
pub fn neg(s: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
|
||||
return (try Scalar.fromBytes(a, endian)).neg().toBytes(endian);
|
||||
return (try Scalar.fromBytes(s, endian)).neg().toBytes(endian);
|
||||
}
|
||||
|
||||
/// Return (a-b) (mod L)
|
||||
|
||||
@ -43,6 +43,7 @@ pub const Sha1 = struct {
|
||||
total_len: u64 = 0,
|
||||
|
||||
pub fn init(options: Options) Self {
|
||||
_ = options;
|
||||
return Self{
|
||||
.s = [_]u32{
|
||||
0x67452301,
|
||||
|
||||
@ -95,6 +95,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
|
||||
total_len: u64 = 0,
|
||||
|
||||
pub fn init(options: Options) Self {
|
||||
_ = options;
|
||||
return Self{
|
||||
.s = [_]u32{
|
||||
params.iv0,
|
||||
@ -462,6 +463,7 @@ fn Sha2x64(comptime params: Sha2Params64) type {
|
||||
total_len: u128 = 0,
|
||||
|
||||
pub fn init(options: Options) Self {
|
||||
_ = options;
|
||||
return Self{
|
||||
.s = [_]u64{
|
||||
params.iv0,
|
||||
|
||||
@ -28,6 +28,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
|
||||
rate: usize,
|
||||
|
||||
pub fn init(options: Options) Self {
|
||||
_ = options;
|
||||
return Self{ .s = [_]u8{0} ** 200, .offset = 0, .rate = 200 - (bits / 4) };
|
||||
}
|
||||
|
||||
|
||||
@ -84,7 +84,7 @@ fn tlsCsprngFill(_: *const std.rand.Random, buffer: []u8) void {
|
||||
os.MAP_PRIVATE | os.MAP_ANONYMOUS,
|
||||
-1,
|
||||
0,
|
||||
) catch |err| {
|
||||
) catch {
|
||||
// Could not allocate memory for the local state, fall back to
|
||||
// the OS syscall.
|
||||
return fillWithOsEntropy(buffer);
|
||||
|
||||
@ -325,6 +325,7 @@ pub fn writeStackTrace(
|
||||
debug_info: *DebugInfo,
|
||||
tty_config: TTY.Config,
|
||||
) !void {
|
||||
_ = allocator;
|
||||
if (builtin.strip_debug_info) return error.MissingDebugInfo;
|
||||
var frame_index: usize = 0;
|
||||
var frames_left: usize = std.math.min(stack_trace.index, stack_trace.instruction_addresses.len);
|
||||
@ -680,6 +681,7 @@ fn readCoffDebugInfo(allocator: *mem.Allocator, coff_file: File) !ModuleDebugInf
|
||||
try di.coff.loadSections();
|
||||
if (di.coff.getSection(".debug_info")) |sec| {
|
||||
// This coff file has embedded DWARF debug info
|
||||
_ = sec;
|
||||
// TODO: free the section data slices
|
||||
const debug_info_data = di.coff.getSectionData(".debug_info", allocator) catch null;
|
||||
const debug_abbrev_data = di.coff.getSectionData(".debug_abbrev", allocator) catch null;
|
||||
@ -896,7 +898,6 @@ fn printLineFromFileAnyOs(out_stream: anytype, line_info: LineInfo) !void {
|
||||
var buf: [mem.page_size]u8 = undefined;
|
||||
var line: usize = 1;
|
||||
var column: usize = 1;
|
||||
var abs_index: usize = 0;
|
||||
while (true) {
|
||||
const amt_read = try f.read(buf[0..]);
|
||||
const slice = buf[0..amt_read];
|
||||
@ -931,6 +932,7 @@ const MachoSymbol = struct {
|
||||
}
|
||||
|
||||
fn addressLessThan(context: void, lhs: MachoSymbol, rhs: MachoSymbol) bool {
|
||||
_ = context;
|
||||
return lhs.address() < rhs.address();
|
||||
}
|
||||
};
|
||||
@ -1135,6 +1137,7 @@ pub const DebugInfo = struct {
|
||||
|
||||
if (os.dl_iterate_phdr(&ctx, anyerror, struct {
|
||||
fn callback(info: *os.dl_phdr_info, size: usize, context: *CtxTy) !void {
|
||||
_ = size;
|
||||
// The base address is too high
|
||||
if (context.address < info.dlpi_addr)
|
||||
return;
|
||||
@ -1190,6 +1193,8 @@ pub const DebugInfo = struct {
|
||||
}
|
||||
|
||||
fn lookupModuleHaiku(self: *DebugInfo, address: usize) !*ModuleDebugInfo {
|
||||
_ = self;
|
||||
_ = address;
|
||||
@panic("TODO implement lookup module for Haiku");
|
||||
}
|
||||
};
|
||||
|
||||
@ -283,6 +283,7 @@ fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: bu
|
||||
}
|
||||
|
||||
fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: builtin.Endian, comptime size: i32) !FormValue {
|
||||
_ = allocator;
|
||||
// TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here.
|
||||
// `nosuspend` should be removed from all the function calls once it is fixed.
|
||||
return FormValue{
|
||||
@ -310,6 +311,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed:
|
||||
|
||||
// TODO the nosuspends here are workarounds
|
||||
fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: i32) !FormValue {
|
||||
_ = allocator;
|
||||
return FormValue{
|
||||
.Ref = switch (size) {
|
||||
1 => try nosuspend in_stream.readInt(u8, endian),
|
||||
@ -453,13 +455,13 @@ pub const DwarfInfo = struct {
|
||||
if (this_die_obj.getAttr(AT_name)) |_| {
|
||||
const name = try this_die_obj.getAttrString(di, AT_name);
|
||||
break :x name;
|
||||
} else if (this_die_obj.getAttr(AT_abstract_origin)) |ref| {
|
||||
} else if (this_die_obj.getAttr(AT_abstract_origin)) |_| {
|
||||
// Follow the DIE it points to and repeat
|
||||
const ref_offset = try this_die_obj.getAttrRef(AT_abstract_origin);
|
||||
if (ref_offset > next_offset) return error.InvalidDebugInfo;
|
||||
try seekable.seekTo(this_unit_offset + ref_offset);
|
||||
this_die_obj = (try di.parseDie(in, abbrev_table, is_64)) orelse return error.InvalidDebugInfo;
|
||||
} else if (this_die_obj.getAttr(AT_specification)) |ref| {
|
||||
} else if (this_die_obj.getAttr(AT_specification)) |_| {
|
||||
// Follow the DIE it points to and repeat
|
||||
const ref_offset = try this_die_obj.getAttrRef(AT_specification);
|
||||
if (ref_offset > next_offset) return error.InvalidDebugInfo;
|
||||
|
||||
@ -66,6 +66,7 @@ pub fn get_DYNAMIC() ?[*]elf.Dyn {
|
||||
}
|
||||
|
||||
pub fn linkmap_iterator(phdrs: []elf.Phdr) !LinkMap.Iterator {
|
||||
_ = phdrs;
|
||||
const _DYNAMIC = get_DYNAMIC() orelse {
|
||||
// No PT_DYNAMIC means this is either a statically-linked program or a
|
||||
// badly corrupted dynamically-linked one.
|
||||
@ -407,7 +408,7 @@ test "dynamic_library" {
|
||||
else => return error.SkipZigTest,
|
||||
};
|
||||
|
||||
const dynlib = DynLib.open(libname) catch |err| {
|
||||
_ = DynLib.open(libname) catch |err| {
|
||||
try testing.expect(err == error.FileNotFound);
|
||||
return;
|
||||
};
|
||||
|
||||
@ -18,7 +18,7 @@ const EnumField = std.builtin.TypeInfo.EnumField;
|
||||
pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_default: ?Data) type {
|
||||
const StructField = std.builtin.TypeInfo.StructField;
|
||||
var fields: []const StructField = &[_]StructField{};
|
||||
for (std.meta.fields(E)) |field, i| {
|
||||
for (std.meta.fields(E)) |field| {
|
||||
fields = fields ++ &[_]StructField{.{
|
||||
.name = field.name,
|
||||
.field_type = Data,
|
||||
@ -144,7 +144,7 @@ pub fn directEnumArrayDefault(
|
||||
) [directEnumArrayLen(E, max_unused_slots)]Data {
|
||||
const len = comptime directEnumArrayLen(E, max_unused_slots);
|
||||
var result: [len]Data = if (default) |d| [_]Data{d} ** len else undefined;
|
||||
inline for (@typeInfo(@TypeOf(init_values)).Struct.fields) |f, i| {
|
||||
inline for (@typeInfo(@TypeOf(init_values)).Struct.fields) |f| {
|
||||
const enum_value = @field(E, f.name);
|
||||
const index = @intCast(usize, @enumToInt(enum_value));
|
||||
result[index] = @field(init_values, f.name);
|
||||
@ -334,6 +334,7 @@ pub fn EnumArray(comptime E: type, comptime V: type) type {
|
||||
/// TODO: Once #8169 is fixed, consider switching this param
|
||||
/// back to an optional.
|
||||
pub fn NoExtension(comptime Self: type) type {
|
||||
_ = Self;
|
||||
return NoExt;
|
||||
}
|
||||
const NoExt = struct {};
|
||||
@ -729,6 +730,7 @@ test "std.enums.ensureIndexer" {
|
||||
}
|
||||
|
||||
fn ascByValue(ctx: void, comptime a: EnumField, comptime b: EnumField) bool {
|
||||
_ = ctx;
|
||||
return a.value < b.value;
|
||||
}
|
||||
pub fn EnumIndexer(comptime E: type) type {
|
||||
@ -743,9 +745,11 @@ pub fn EnumIndexer(comptime E: type) type {
|
||||
pub const Key = E;
|
||||
pub const count: usize = 0;
|
||||
pub fn indexOf(e: E) usize {
|
||||
_ = e;
|
||||
unreachable;
|
||||
}
|
||||
pub fn keyForIndex(i: usize) E {
|
||||
_ = i;
|
||||
unreachable;
|
||||
}
|
||||
};
|
||||
@ -753,10 +757,11 @@ pub fn EnumIndexer(comptime E: type) type {
|
||||
std.sort.sort(EnumField, &fields, {}, ascByValue);
|
||||
const min = fields[0].value;
|
||||
const max = fields[fields.len - 1].value;
|
||||
const fields_len = fields.len;
|
||||
if (max - min == fields.len - 1) {
|
||||
return struct {
|
||||
pub const Key = E;
|
||||
pub const count = fields.len;
|
||||
pub const count = fields_len;
|
||||
pub fn indexOf(e: E) usize {
|
||||
return @intCast(usize, @enumToInt(e) - min);
|
||||
}
|
||||
@ -774,7 +779,7 @@ pub fn EnumIndexer(comptime E: type) type {
|
||||
|
||||
return struct {
|
||||
pub const Key = E;
|
||||
pub const count = fields.len;
|
||||
pub const count = fields_len;
|
||||
pub fn indexOf(e: E) usize {
|
||||
for (keys) |k, i| {
|
||||
if (k == e) return i;
|
||||
|
||||
@ -308,7 +308,6 @@ test "std.event.Channel wraparound" {
|
||||
|
||||
// add items to channel and pull them out until
|
||||
// the buffer wraps around, make sure it doesn't crash.
|
||||
var result: i32 = undefined;
|
||||
channel.put(5);
|
||||
try testing.expectEqual(@as(i32, 5), channel.get());
|
||||
channel.put(6);
|
||||
|
||||
@ -130,7 +130,7 @@ test "std.event.Group" {
|
||||
// TODO this file has bit-rotted. repair it
|
||||
if (true) return error.SkipZigTest;
|
||||
|
||||
const handle = async testGroup(std.heap.page_allocator);
|
||||
_ = async testGroup(std.heap.page_allocator);
|
||||
}
|
||||
fn testGroup(allocator: *Allocator) callconv(.Async) void {
|
||||
var count: usize = 0;
|
||||
|
||||
@ -345,7 +345,7 @@ pub const Loop = struct {
|
||||
);
|
||||
errdefer windows.CloseHandle(self.os_data.io_port);
|
||||
|
||||
for (self.eventfd_resume_nodes) |*eventfd_node, i| {
|
||||
for (self.eventfd_resume_nodes) |*eventfd_node| {
|
||||
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
|
||||
.data = ResumeNode.EventFd{
|
||||
.base = ResumeNode{
|
||||
@ -680,7 +680,7 @@ pub const Loop = struct {
|
||||
fn run(func_args: Args, loop: *Loop, allocator: *mem.Allocator) void {
|
||||
loop.beginOneEvent();
|
||||
loop.yield();
|
||||
const result = @call(.{}, func, func_args);
|
||||
@call(.{}, func, func_args); // compile error when called with non-void ret type
|
||||
suspend {
|
||||
loop.finishOneEvent();
|
||||
allocator.destroy(@frame());
|
||||
|
||||
@ -225,7 +225,7 @@ test "std.event.RwLock" {
|
||||
var lock = RwLock.init();
|
||||
defer lock.deinit();
|
||||
|
||||
const handle = testLock(std.heap.page_allocator, &lock);
|
||||
_ = testLock(std.heap.page_allocator, &lock);
|
||||
|
||||
const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
|
||||
try testing.expectEqualSlices(i32, expected_result, shared_test_data);
|
||||
|
||||
@ -369,6 +369,7 @@ pub fn format(
|
||||
}
|
||||
|
||||
pub fn formatAddress(value: anytype, options: FormatOptions, writer: anytype) @TypeOf(writer).Error!void {
|
||||
_ = options;
|
||||
const T = @TypeOf(value);
|
||||
|
||||
switch (@typeInfo(T)) {
|
||||
@ -553,7 +554,7 @@ pub fn formatType(
|
||||
.Many, .C => {
|
||||
if (actual_fmt.len == 0)
|
||||
@compileError("cannot format pointer without a specifier (i.e. {s} or {*})");
|
||||
if (ptr_info.sentinel) |sentinel| {
|
||||
if (ptr_info.sentinel) |_| {
|
||||
return formatType(mem.span(value), actual_fmt, options, writer, max_depth);
|
||||
}
|
||||
if (ptr_info.child == u8) {
|
||||
@ -741,6 +742,8 @@ fn formatSliceHexImpl(comptime case: Case) type {
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
var buf: [2]u8 = undefined;
|
||||
|
||||
for (bytes) |c| {
|
||||
@ -777,6 +780,8 @@ fn formatSliceEscapeImpl(comptime case: Case) type {
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
var buf: [4]u8 = undefined;
|
||||
|
||||
buf[0] = '\\';
|
||||
@ -820,6 +825,7 @@ fn formatSizeImpl(comptime radix: comptime_int) type {
|
||||
options: FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = fmt;
|
||||
if (value == 0) {
|
||||
return writer.writeAll("0B");
|
||||
}
|
||||
@ -903,6 +909,7 @@ pub fn formatAsciiChar(
|
||||
options: FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
return writer.writeAll(@as(*const [1]u8, &c));
|
||||
}
|
||||
|
||||
@ -1140,7 +1147,7 @@ pub fn formatFloatHexadecimal(
|
||||
|
||||
// +1 for the decimal part.
|
||||
var buf: [1 + mantissa_digits]u8 = undefined;
|
||||
const N = formatIntBuf(&buf, mantissa, 16, .lower, .{ .fill = '0', .width = 1 + mantissa_digits });
|
||||
_ = formatIntBuf(&buf, mantissa, 16, .lower, .{ .fill = '0', .width = 1 + mantissa_digits });
|
||||
|
||||
try writer.writeAll("0x");
|
||||
try writer.writeByte(buf[0]);
|
||||
@ -1362,6 +1369,8 @@ pub fn formatIntBuf(out_buf: []u8, value: anytype, base: u8, case: Case, options
|
||||
}
|
||||
|
||||
fn formatDuration(ns: u64, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
var ns_remaining = ns;
|
||||
inline for (.{
|
||||
.{ .ns = 365 * std.time.ns_per_day, .sep = 'y' },
|
||||
@ -2152,6 +2161,7 @@ test "custom" {
|
||||
options: FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "p")) {
|
||||
return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y });
|
||||
} else if (comptime std.mem.eql(u8, fmt, "d")) {
|
||||
@ -2162,7 +2172,6 @@ test "custom" {
|
||||
}
|
||||
};
|
||||
|
||||
var buf1: [32]u8 = undefined;
|
||||
var value = Vec2{
|
||||
.x = 10.2,
|
||||
.y = 2.22,
|
||||
@ -2220,7 +2229,7 @@ test "union" {
|
||||
try std.testing.expect(mem.eql(u8, uu_result[0..3], "UU@"));
|
||||
|
||||
const eu_result = try bufPrint(buf[0..], "{}", .{eu_inst});
|
||||
try std.testing.expect(mem.eql(u8, uu_result[0..3], "EU@"));
|
||||
try std.testing.expect(mem.eql(u8, eu_result[0..3], "EU@"));
|
||||
}
|
||||
|
||||
test "enum" {
|
||||
@ -2341,6 +2350,7 @@ test "formatType max_depth" {
|
||||
options: FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
if (fmt.len == 0) {
|
||||
return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y });
|
||||
} else {
|
||||
|
||||
@ -200,7 +200,6 @@ const ParseResult = enum {
|
||||
|
||||
fn parseRepr(s: []const u8, n: *FloatRepr) !ParseResult {
|
||||
var digit_index: usize = 0;
|
||||
var negative = false;
|
||||
var negative_exp = false;
|
||||
var exponent: i32 = 0;
|
||||
|
||||
|
||||
@ -477,7 +477,7 @@ pub const Dir = struct {
|
||||
}
|
||||
|
||||
var stat_info: os.libc_stat = undefined;
|
||||
const rc2 = os.system._kern_read_stat(
|
||||
_ = os.system._kern_read_stat(
|
||||
self.dir.fd,
|
||||
&haiku_entry.d_name,
|
||||
false,
|
||||
@ -1541,7 +1541,7 @@ pub const Dir = struct {
|
||||
self: Dir,
|
||||
target_path: []const u8,
|
||||
sym_link_path: []const u8,
|
||||
flags: SymLinkFlags,
|
||||
_: SymLinkFlags,
|
||||
) !void {
|
||||
return os.symlinkatWasi(target_path, self.fd, sym_link_path);
|
||||
}
|
||||
@ -1879,6 +1879,7 @@ pub const Dir = struct {
|
||||
/// * NtDll prefixed
|
||||
/// TODO currently this ignores `flags`.
|
||||
pub fn accessW(self: Dir, sub_path_w: [*:0]const u16, flags: File.OpenFlags) AccessError!void {
|
||||
_ = flags;
|
||||
return os.faccessatW(self.fd, sub_path_w, 0, 0);
|
||||
}
|
||||
|
||||
@ -2438,7 +2439,7 @@ pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 {
|
||||
}) catch continue;
|
||||
|
||||
var real_path_buf: [MAX_PATH_BYTES]u8 = undefined;
|
||||
if (os.realpathZ(&resolved_path_buf, &real_path_buf)) |real_path| {
|
||||
if (os.realpathZ(resolved_path, &real_path_buf)) |real_path| {
|
||||
// found a file, and hope it is the right file
|
||||
if (real_path.len > out_buffer.len)
|
||||
return error.NameTooLong;
|
||||
|
||||
@ -579,7 +579,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
|
||||
// Now we know the disk designator to use, if any, and what kind it is. And our result
|
||||
// is big enough to append all the paths to.
|
||||
var correct_disk_designator = true;
|
||||
for (paths[first_index..]) |p, i| {
|
||||
for (paths[first_index..]) |p| {
|
||||
const parsed = windowsParsePath(p);
|
||||
|
||||
if (parsed.kind != WindowsPath.Kind.None) {
|
||||
@ -660,7 +660,7 @@ pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
|
||||
}
|
||||
errdefer allocator.free(result);
|
||||
|
||||
for (paths[first_index..]) |p, i| {
|
||||
for (paths[first_index..]) |p| {
|
||||
var it = mem.tokenize(p, "/");
|
||||
while (it.next()) |component| {
|
||||
if (mem.eql(u8, component, ".")) {
|
||||
|
||||
@ -541,6 +541,7 @@ test "makePath, put some files in it, deleteTree" {
|
||||
try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "file2.txt", "blah");
|
||||
try tmp.dir.deleteTree("os_test_tmp");
|
||||
if (tmp.dir.openDir("os_test_tmp", .{})) |dir| {
|
||||
_ = dir;
|
||||
@panic("expected error");
|
||||
} else |err| {
|
||||
try testing.expect(err == error.FileNotFound);
|
||||
@ -638,6 +639,7 @@ test "access file" {
|
||||
|
||||
try tmp.dir.makePath("os_test_tmp");
|
||||
if (tmp.dir.access("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", .{})) |ok| {
|
||||
_ = ok;
|
||||
@panic("expected error");
|
||||
} else |err| {
|
||||
try testing.expect(err == error.FileNotFound);
|
||||
|
||||
@ -36,6 +36,8 @@ pub const PreopenType = union(PreopenTypeTag) {
|
||||
}
|
||||
|
||||
pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype) !void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
try out_stream.print("PreopenType{{ ", .{});
|
||||
switch (self) {
|
||||
PreopenType.Dir => |path| try out_stream.print(".Dir = '{}'", .{std.zig.fmtId(path)}),
|
||||
|
||||
@ -353,7 +353,6 @@ fn SMHasherTest(comptime hash_fn: anytype) u32 {
|
||||
|
||||
var key: [256]u8 = undefined;
|
||||
var hashes_bytes: [256 * @sizeOf(HashResult)]u8 = undefined;
|
||||
var final: HashResult = 0;
|
||||
|
||||
std.mem.set(u8, &key, 0);
|
||||
std.mem.set(u8, &hashes_bytes, 0);
|
||||
@ -376,6 +375,7 @@ fn SMHasherTest(comptime hash_fn: anytype) u32 {
|
||||
}
|
||||
|
||||
fn CityHash32hashIgnoreSeed(str: []const u8, seed: u32) u32 {
|
||||
_ = seed;
|
||||
return CityHash32.hash(str);
|
||||
}
|
||||
|
||||
|
||||
@ -166,8 +166,6 @@ pub const Wyhash = struct {
|
||||
}
|
||||
|
||||
pub fn final(self: *Wyhash) u64 {
|
||||
const seed = self.state.seed;
|
||||
const rem_len = @intCast(u5, self.buf_len);
|
||||
const rem_key = self.buf[0..self.buf_len];
|
||||
|
||||
return self.state.final(rem_key);
|
||||
|
||||
@ -29,6 +29,7 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
|
||||
|
||||
return struct {
|
||||
fn hash(ctx: Context, key: K) u64 {
|
||||
_ = ctx;
|
||||
if (comptime trait.hasUniqueRepresentation(K)) {
|
||||
return Wyhash.hash(0, std.mem.asBytes(&key));
|
||||
} else {
|
||||
@ -43,6 +44,7 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
|
||||
pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
|
||||
return struct {
|
||||
fn eql(ctx: Context, a: K, b: K) bool {
|
||||
_ = ctx;
|
||||
return meta.eql(a, b);
|
||||
}
|
||||
}.eql;
|
||||
@ -78,9 +80,11 @@ pub fn StringHashMapUnmanaged(comptime V: type) type {
|
||||
|
||||
pub const StringContext = struct {
|
||||
pub fn hash(self: @This(), s: []const u8) u64 {
|
||||
_ = self;
|
||||
return hashString(s);
|
||||
}
|
||||
pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
|
||||
_ = self;
|
||||
return eqlString(a, b);
|
||||
}
|
||||
};
|
||||
@ -1809,7 +1813,7 @@ test "std.hash_map getOrPut" {
|
||||
|
||||
i = 0;
|
||||
while (i < 20) : (i += 1) {
|
||||
var n = try map.getOrPutValue(i, 1);
|
||||
_ = try map.getOrPutValue(i, 1);
|
||||
}
|
||||
|
||||
i = 0;
|
||||
@ -1887,9 +1891,11 @@ test "std.hash_map clone" {
|
||||
test "std.hash_map getOrPutAdapted" {
|
||||
const AdaptedContext = struct {
|
||||
fn eql(self: @This(), adapted_key: []const u8, test_key: u64) bool {
|
||||
_ = self;
|
||||
return std.fmt.parseInt(u64, adapted_key, 10) catch unreachable == test_key;
|
||||
}
|
||||
fn hash(self: @This(), adapted_key: []const u8) u64 {
|
||||
_ = self;
|
||||
const key = std.fmt.parseInt(u64, adapted_key, 10) catch unreachable;
|
||||
return (AutoContext(u64){}).hash(key);
|
||||
}
|
||||
|
||||
@ -108,6 +108,8 @@ const CAllocator = struct {
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
_ = allocator;
|
||||
_ = return_address;
|
||||
assert(len > 0);
|
||||
assert(std.math.isPowerOfTwo(alignment));
|
||||
|
||||
@ -134,6 +136,9 @@ const CAllocator = struct {
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Allocator.Error!usize {
|
||||
_ = allocator;
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
if (new_len == 0) {
|
||||
alignedFree(buf.ptr);
|
||||
return 0;
|
||||
@ -178,6 +183,9 @@ fn rawCAlloc(
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Allocator.Error![]u8 {
|
||||
_ = self;
|
||||
_ = len_align;
|
||||
_ = ret_addr;
|
||||
assert(ptr_align <= @alignOf(std.c.max_align_t));
|
||||
const ptr = @ptrCast([*]u8, c.malloc(len) orelse return error.OutOfMemory);
|
||||
return ptr[0..len];
|
||||
@ -191,6 +199,9 @@ fn rawCResize(
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Allocator.Error!usize {
|
||||
_ = self;
|
||||
_ = old_align;
|
||||
_ = ret_addr;
|
||||
if (new_len == 0) {
|
||||
c.free(buf.ptr);
|
||||
return 0;
|
||||
@ -231,6 +242,8 @@ pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
|
||||
|
||||
const PageAllocator = struct {
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
|
||||
_ = allocator;
|
||||
_ = ra;
|
||||
assert(n > 0);
|
||||
const aligned_len = mem.alignForward(n, mem.page_size);
|
||||
|
||||
@ -334,6 +347,9 @@ const PageAllocator = struct {
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Allocator.Error!usize {
|
||||
_ = allocator;
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
@ -482,6 +498,8 @@ const WasmPageAllocator = struct {
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
|
||||
_ = allocator;
|
||||
_ = ra;
|
||||
const page_count = nPages(len);
|
||||
const page_idx = try allocPages(page_count, alignment);
|
||||
return @intToPtr([*]u8, page_idx * mem.page_size)[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)];
|
||||
@ -542,6 +560,9 @@ const WasmPageAllocator = struct {
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
_ = allocator;
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
const aligned_len = mem.alignForward(buf.len, mem.page_size);
|
||||
if (new_len > aligned_len) return error.OutOfMemory;
|
||||
const current_n = nPages(aligned_len);
|
||||
@ -588,6 +609,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
_ = return_address;
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
|
||||
const amt = n + ptr_align - 1 + @sizeOf(usize);
|
||||
@ -622,6 +644,8 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
if (new_size == 0) {
|
||||
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
|
||||
@ -694,6 +718,8 @@ pub const FixedBufferAllocator = struct {
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
_ = len_align;
|
||||
_ = ra;
|
||||
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
||||
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse
|
||||
return error.OutOfMemory;
|
||||
@ -716,6 +742,8 @@ pub const FixedBufferAllocator = struct {
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Allocator.Error!usize {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
||||
assert(self.ownsSlice(buf)); // sanity check
|
||||
|
||||
@ -766,6 +794,8 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
_ = len_align;
|
||||
_ = ra;
|
||||
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
|
||||
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
|
||||
while (true) {
|
||||
|
||||
@ -66,6 +66,8 @@ pub const ArenaAllocator = struct {
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
_ = len_align;
|
||||
_ = ra;
|
||||
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
|
||||
|
||||
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
|
||||
@ -95,6 +97,9 @@ pub const ArenaAllocator = struct {
|
||||
}
|
||||
|
||||
fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
|
||||
_ = buf_align;
|
||||
_ = len_align;
|
||||
_ = ret_addr;
|
||||
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
|
||||
|
||||
const cur_node = self.state.buffer_list.first orelse return error.OutOfMemory;
|
||||
|
||||
@ -37,9 +37,9 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
self.writer.print("alloc : {}", .{len}) catch {};
|
||||
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
|
||||
if (result) |buff| {
|
||||
if (result) |_| {
|
||||
self.writer.print(" success!\n", .{}) catch {};
|
||||
} else |err| {
|
||||
} else |_| {
|
||||
self.writer.print(" failure!\n", .{}) catch {};
|
||||
}
|
||||
return result;
|
||||
|
||||
@ -65,7 +65,7 @@ pub fn ScopedLoggingAllocator(
|
||||
) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
|
||||
if (result) |buff| {
|
||||
if (result) |_| {
|
||||
logHelper(
|
||||
success_log_level,
|
||||
"alloc - success - len: {}, ptr_align: {}, len_align: {}",
|
||||
|
||||
@ -161,6 +161,7 @@ pub const null_writer = @as(NullWriter, .{ .context = {} });
|
||||
|
||||
const NullWriter = Writer(void, error{}, dummyWrite);
|
||||
fn dummyWrite(context: void, data: []const u8) error{}!usize {
|
||||
_ = context;
|
||||
return data.len;
|
||||
}
|
||||
|
||||
|
||||
@ -149,7 +149,7 @@ pub fn BitReader(endian: builtin.Endian, comptime ReaderType: type) type {
|
||||
var out_bits_total = @as(usize, 0);
|
||||
//@NOTE: I'm not sure this is a good idea, maybe alignToByte should be forced
|
||||
if (self.bit_count > 0) {
|
||||
for (buffer) |*b, i| {
|
||||
for (buffer) |*b| {
|
||||
b.* = try self.readBits(u8, u8_bit_count, &out_bits);
|
||||
out_bits_total += out_bits;
|
||||
}
|
||||
|
||||
@ -128,7 +128,7 @@ pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type {
|
||||
pub fn write(self: *Self, buffer: []const u8) Error!usize {
|
||||
// TODO: I'm not sure this is a good idea, maybe flushBits should be forced
|
||||
if (self.bit_count > 0) {
|
||||
for (buffer) |b, i|
|
||||
for (buffer) |b|
|
||||
try self.writeBits(b, u8_bit_count);
|
||||
return buffer.len;
|
||||
}
|
||||
|
||||
@ -1221,11 +1221,11 @@ test "json.token premature object close" {
|
||||
pub fn validate(s: []const u8) bool {
|
||||
var p = StreamingParser.init();
|
||||
|
||||
for (s) |c, i| {
|
||||
for (s) |c| {
|
||||
var token1: ?Token = undefined;
|
||||
var token2: ?Token = undefined;
|
||||
|
||||
p.feed(c, &token1, &token2) catch |err| {
|
||||
p.feed(c, &token1, &token2) catch {
|
||||
return false;
|
||||
};
|
||||
}
|
||||
@ -1410,7 +1410,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
|
||||
if (a == null or b == null) return false;
|
||||
return parsedEqual(a.?, b.?);
|
||||
},
|
||||
.Union => |unionInfo| {
|
||||
.Union => {
|
||||
if (info.tag_type) |UnionTag| {
|
||||
const tag_a = std.meta.activeTag(a);
|
||||
const tag_b = std.meta.activeTag(b);
|
||||
@ -1771,7 +1771,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
|
||||
switch (stringToken.escapes) {
|
||||
.None => return allocator.dupe(u8, source_slice),
|
||||
.Some => |some_escapes| {
|
||||
.Some => {
|
||||
const output = try allocator.alloc(u8, stringToken.decodedLength());
|
||||
errdefer allocator.free(output);
|
||||
try unescapeValidString(output, source_slice);
|
||||
@ -2391,7 +2391,7 @@ pub const Parser = struct {
|
||||
const slice = s.slice(input, i);
|
||||
switch (s.escapes) {
|
||||
.None => return Value{ .String = if (p.copy_strings) try allocator.dupe(u8, slice) else slice },
|
||||
.Some => |some_escapes| {
|
||||
.Some => {
|
||||
const output = try allocator.alloc(u8, s.decodedLength());
|
||||
errdefer allocator.free(output);
|
||||
try unescapeValidString(output, slice);
|
||||
@ -2401,6 +2401,7 @@ pub const Parser = struct {
|
||||
}
|
||||
|
||||
fn parseNumber(p: *Parser, n: std.meta.TagPayload(Token, Token.Number), input: []const u8, i: usize) !Value {
|
||||
_ = p;
|
||||
return if (n.is_integer)
|
||||
Value{
|
||||
.Integer = std.fmt.parseInt(i64, n.slice(input, i), 10) catch |e| switch (e) {
|
||||
@ -2815,7 +2816,7 @@ pub fn stringify(
|
||||
if (child_options.whitespace) |*child_whitespace| {
|
||||
child_whitespace.indent_level += 1;
|
||||
}
|
||||
inline for (S.fields) |Field, field_i| {
|
||||
inline for (S.fields) |Field| {
|
||||
// don't include void fields
|
||||
if (Field.field_type == void) continue;
|
||||
|
||||
@ -3114,6 +3115,7 @@ test "stringify struct with custom stringifier" {
|
||||
options: StringifyOptions,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
_ = value;
|
||||
try out_stream.writeAll("[\"something special\",");
|
||||
try stringify(42, options, out_stream);
|
||||
try out_stream.writeByte(']');
|
||||
|
||||
@ -198,7 +198,7 @@ fn test_read_ileb128_seq(comptime T: type, comptime N: usize, encoded: []const u
|
||||
var reader = std.io.fixedBufferStream(encoded);
|
||||
var i: usize = 0;
|
||||
while (i < N) : (i += 1) {
|
||||
const v1 = try readILEB128(T, reader.reader());
|
||||
_ = try readILEB128(T, reader.reader());
|
||||
}
|
||||
}
|
||||
|
||||
@ -206,7 +206,7 @@ fn test_read_uleb128_seq(comptime T: type, comptime N: usize, encoded: []const u
|
||||
var reader = std.io.fixedBufferStream(encoded);
|
||||
var i: usize = 0;
|
||||
while (i < N) : (i += 1) {
|
||||
const v1 = try readULEB128(T, reader.reader());
|
||||
_ = try readULEB128(T, reader.reader());
|
||||
}
|
||||
}
|
||||
|
||||
@ -309,7 +309,6 @@ fn test_write_leb128(value: anytype) !void {
|
||||
const B = std.meta.Int(signedness, larger_type_bits);
|
||||
|
||||
const bytes_needed = bn: {
|
||||
const S = std.meta.Int(signedness, @sizeOf(T) * 8);
|
||||
if (@typeInfo(T).Int.bits <= 7) break :bn @as(u16, 1);
|
||||
|
||||
const unused_bits = if (value < 0) @clz(T, ~value) else @clz(T, value);
|
||||
|
||||
@ -359,8 +359,8 @@ test "basic TailQueue test" {
|
||||
}
|
||||
}
|
||||
|
||||
var first = list.popFirst(); // {2, 3, 4, 5}
|
||||
var last = list.pop(); // {2, 3, 4}
|
||||
_ = list.popFirst(); // {2, 3, 4, 5}
|
||||
_ = list.pop(); // {2, 3, 4}
|
||||
list.remove(&three); // {2, 4}
|
||||
|
||||
try testing.expect(list.first.?.data == 2);
|
||||
|
||||
@ -458,6 +458,7 @@ pub const Mutable = struct {
|
||||
/// If `allocator` is provided, it will be used for temporary storage to improve
|
||||
/// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
|
||||
pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?*Allocator) void {
|
||||
_ = opt_allocator;
|
||||
assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
|
||||
|
||||
mem.set(Limb, rma.limbs, 0);
|
||||
@ -676,6 +677,7 @@ pub const Mutable = struct {
|
||||
///
|
||||
/// `limbs_buffer` is used for temporary storage during the operation.
|
||||
pub fn gcdNoAlias(rma: *Mutable, x: Const, y: Const, limbs_buffer: *std.ArrayList(Limb)) !void {
|
||||
_ = limbs_buffer;
|
||||
assert(rma.limbs.ptr != x.limbs.ptr); // illegal aliasing
|
||||
assert(rma.limbs.ptr != y.limbs.ptr); // illegal aliasing
|
||||
return gcdLehmer(rma, x, y, allocator);
|
||||
@ -1141,6 +1143,7 @@ pub const Const = struct {
|
||||
options: std.fmt.FormatOptions,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
comptime var radix = 10;
|
||||
comptime var case: std.fmt.Case = .lower;
|
||||
|
||||
@ -1618,6 +1621,7 @@ pub const Managed = struct {
|
||||
/// Converts self to a string in the requested base. Memory is allocated from the provided
|
||||
/// allocator and not the one present in self.
|
||||
pub fn toString(self: Managed, allocator: *Allocator, base: u8, case: std.fmt.Case) ![]u8 {
|
||||
_ = allocator;
|
||||
if (base < 2 or base > 16) return error.InvalidBase;
|
||||
return self.toConst().toStringAlloc(self.allocator, base, case);
|
||||
}
|
||||
@ -2000,8 +2004,6 @@ fn llmulacc_karatsuba(allocator: *Allocator, r: []Limb, x: []const Limb, y: []co
|
||||
} else {
|
||||
llsub(j1, y0[0..y0_len], y1[0..y1_len]);
|
||||
}
|
||||
const j0_len = llnormalize(j0);
|
||||
const j1_len = llnormalize(j1);
|
||||
if (x_cmp == y_cmp) {
|
||||
mem.set(Limb, tmp[0..length], 0);
|
||||
llmulacc(allocator, tmp, j0, j1);
|
||||
|
||||
@ -204,7 +204,6 @@ pub const Rational = struct {
|
||||
const esize = math.floatExponentBits(T);
|
||||
const ebias = (1 << (esize - 1)) - 1;
|
||||
const emin = 1 - ebias;
|
||||
const emax = ebias;
|
||||
|
||||
if (self.p.eqZero()) {
|
||||
return 0;
|
||||
|
||||
@ -12,8 +12,8 @@
|
||||
const std = @import("../../std.zig");
|
||||
const debug = std.debug;
|
||||
const math = std.math;
|
||||
const cmath = math.complex;
|
||||
const testing = std.testing;
|
||||
const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns exp(z) scaled to avoid overflow.
|
||||
|
||||
@ -316,16 +316,12 @@ test "math.expm1_64" {
|
||||
}
|
||||
|
||||
test "math.expm1_32.special" {
|
||||
const epsilon = 0.000001;
|
||||
|
||||
try expect(math.isPositiveInf(expm1_32(math.inf(f32))));
|
||||
try expect(expm1_32(-math.inf(f32)) == -1.0);
|
||||
try expect(math.isNan(expm1_32(math.nan(f32))));
|
||||
}
|
||||
|
||||
test "math.expm1_64.special" {
|
||||
const epsilon = 0.000001;
|
||||
|
||||
try expect(math.isPositiveInf(expm1_64(math.inf(f64))));
|
||||
try expect(expm1_64(-math.inf(f64)) == -1.0);
|
||||
try expect(math.isNan(expm1_64(math.nan(f64))));
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
const std = @import("../std.zig");
|
||||
const math = std.math;
|
||||
const expect = std.testing.expect;
|
||||
const expectEqual = std.testing.expectEqual;
|
||||
const maxInt = std.math.maxInt;
|
||||
|
||||
fn modf_result(comptime T: type) type {
|
||||
@ -131,11 +132,7 @@ test "math.modf" {
|
||||
const a = modf(@as(f32, 1.0));
|
||||
const b = modf32(1.0);
|
||||
// NOTE: No struct comparison on generic return type function? non-named, makes sense, but still.
|
||||
try expect(a.ipart == b.ipart and a.fpart == b.fpart);
|
||||
|
||||
const c = modf(@as(f64, 1.0));
|
||||
const d = modf64(1.0);
|
||||
try expect(a.ipart == b.ipart and a.fpart == b.fpart);
|
||||
try expectEqual(a, b);
|
||||
}
|
||||
|
||||
test "math.modf32" {
|
||||
|
||||
@ -139,6 +139,11 @@ var failAllocator = Allocator{
|
||||
.resizeFn = Allocator.noResize,
|
||||
};
|
||||
fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
|
||||
_ = self;
|
||||
_ = n;
|
||||
_ = alignment;
|
||||
_ = len_align;
|
||||
_ = ra;
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
|
||||
@ -55,6 +55,10 @@ pub fn noResize(
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Error!usize {
|
||||
_ = self;
|
||||
_ = buf_align;
|
||||
_ = len_align;
|
||||
_ = ret_addr;
|
||||
if (new_len > buf.len)
|
||||
return error.OutOfMemory;
|
||||
return new_len;
|
||||
|
||||
@ -654,7 +654,6 @@ pub fn TagPayload(comptime U: type, tag: Tag(U)) type {
|
||||
try testing.expect(trait.is(.Union)(U));
|
||||
|
||||
const info = @typeInfo(U).Union;
|
||||
const tag_info = @typeInfo(Tag(U)).Enum;
|
||||
|
||||
inline for (info.fields) |field_info| {
|
||||
if (comptime mem.eql(u8, field_info.name, @tagName(tag)))
|
||||
@ -757,12 +756,6 @@ test "std.meta.eql" {
|
||||
.c = "12345".*,
|
||||
};
|
||||
|
||||
const s_2 = S{
|
||||
.a = 1,
|
||||
.b = 123.3,
|
||||
.c = "54321".*,
|
||||
};
|
||||
|
||||
var s_3 = S{
|
||||
.a = 134,
|
||||
.b = 123.3,
|
||||
@ -850,6 +843,7 @@ pub const refAllDecls = @compileError("refAllDecls has been moved from std.meta
|
||||
pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const Decl {
|
||||
const S = struct {
|
||||
fn declNameLessThan(context: void, lhs: *const Decl, rhs: *const Decl) bool {
|
||||
_ = context;
|
||||
return mem.lessThan(u8, lhs.name, rhs.name);
|
||||
}
|
||||
};
|
||||
|
||||
@ -96,18 +96,18 @@ pub fn TrailerFlags(comptime Fields: type) type {
|
||||
pub fn ptr(self: Self, p: [*]align(@alignOf(Fields)) u8, comptime field: FieldEnum) *Field(field) {
|
||||
if (@sizeOf(Field(field)) == 0)
|
||||
return undefined;
|
||||
const off = self.offset(p, field);
|
||||
const off = self.offset(field);
|
||||
return @ptrCast(*Field(field), @alignCast(@alignOf(Field(field)), p + off));
|
||||
}
|
||||
|
||||
pub fn ptrConst(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime field: FieldEnum) *const Field(field) {
|
||||
if (@sizeOf(Field(field)) == 0)
|
||||
return undefined;
|
||||
const off = self.offset(p, field);
|
||||
const off = self.offset(field);
|
||||
return @ptrCast(*const Field(field), @alignCast(@alignOf(Field(field)), p + off));
|
||||
}
|
||||
|
||||
pub fn offset(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime field: FieldEnum) usize {
|
||||
pub fn offset(self: Self, comptime field: FieldEnum) usize {
|
||||
var off: usize = 0;
|
||||
inline for (@typeInfo(Fields).Struct.fields) |field_info, i| {
|
||||
const active = (self.bits & (1 << i)) != 0;
|
||||
|
||||
@ -92,6 +92,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
}
|
||||
const Sort = struct {
|
||||
fn lessThan(trash: *i32, lhs: Data, rhs: Data) bool {
|
||||
_ = trash;
|
||||
return lhs.alignment > rhs.alignment;
|
||||
}
|
||||
};
|
||||
@ -221,7 +222,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
/// retain list ordering.
|
||||
pub fn swapRemove(self: *Self, index: usize) void {
|
||||
const slices = self.slice();
|
||||
inline for (fields) |field_info, i| {
|
||||
inline for (fields) |_, i| {
|
||||
const field_slice = slices.items(@intToEnum(Field, i));
|
||||
field_slice[index] = field_slice[self.len - 1];
|
||||
field_slice[self.len - 1] = undefined;
|
||||
@ -233,7 +234,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
/// after it to preserve order.
|
||||
pub fn orderedRemove(self: *Self, index: usize) void {
|
||||
const slices = self.slice();
|
||||
inline for (fields) |field_info, field_index| {
|
||||
inline for (fields) |_, field_index| {
|
||||
const field_slice = slices.items(@intToEnum(Field, field_index));
|
||||
var i = index;
|
||||
while (i < self.len - 1) : (i += 1) {
|
||||
|
||||
@ -270,6 +270,8 @@ pub const Ip4Address = extern struct {
|
||||
options: std.fmt.FormatOptions,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
const bytes = @ptrCast(*const [4]u8, &self.sa.addr);
|
||||
try std.fmt.format(out_stream, "{}.{}.{}.{}:{}", .{
|
||||
bytes[0],
|
||||
@ -281,6 +283,7 @@ pub const Ip4Address = extern struct {
|
||||
}
|
||||
|
||||
pub fn getOsSockLen(self: Ip4Address) os.socklen_t {
|
||||
_ = self;
|
||||
return @sizeOf(os.sockaddr_in);
|
||||
}
|
||||
};
|
||||
@ -556,6 +559,8 @@ pub const Ip6Address = extern struct {
|
||||
options: std.fmt.FormatOptions,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
const port = mem.bigToNative(u16, self.sa.port);
|
||||
if (mem.eql(u8, self.sa.addr[0..12], &[_]u8{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff })) {
|
||||
try std.fmt.format(out_stream, "[::ffff:{}.{}.{}.{}]:{}", .{
|
||||
@ -598,6 +603,7 @@ pub const Ip6Address = extern struct {
|
||||
}
|
||||
|
||||
pub fn getOsSockLen(self: Ip6Address) os.socklen_t {
|
||||
_ = self;
|
||||
return @sizeOf(os.sockaddr_in6);
|
||||
}
|
||||
};
|
||||
@ -1062,6 +1068,7 @@ fn IN6_IS_ADDR_SITELOCAL(a: [16]u8) bool {
|
||||
|
||||
// Parameters `b` and `a` swapped to make this descending.
|
||||
fn addrCmpLessThan(context: void, b: LookupAddr, a: LookupAddr) bool {
|
||||
_ = context;
|
||||
return a.sortkey < b.sortkey;
|
||||
}
|
||||
|
||||
|
||||
@ -61,6 +61,7 @@ test "Once executes its function just once" {
|
||||
for (threads) |*handle| {
|
||||
handle.* = try std.Thread.spawn(struct {
|
||||
fn thread_fn(x: u8) void {
|
||||
_ = x;
|
||||
global_once.call();
|
||||
}
|
||||
}.thread_fn, 0);
|
||||
|
||||
@ -1164,6 +1164,7 @@ fn openOptionsFromFlags(flags: u32) windows.OpenFileOptions {
|
||||
/// TODO currently, this function does not handle all flag combinations
|
||||
/// or makes use of perm argument.
|
||||
pub fn openW(file_path_w: []const u16, flags: u32, perm: mode_t) OpenError!fd_t {
|
||||
_ = perm;
|
||||
var options = openOptionsFromFlags(flags);
|
||||
options.dir = std.fs.cwd().fd;
|
||||
return windows.OpenFile(file_path_w, options) catch |err| switch (err) {
|
||||
@ -1273,6 +1274,7 @@ pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t)
|
||||
/// TODO currently, this function does not handle all flag combinations
|
||||
/// or makes use of perm argument.
|
||||
pub fn openatW(dir_fd: fd_t, file_path_w: []const u16, flags: u32, mode: mode_t) OpenError!fd_t {
|
||||
_ = mode;
|
||||
var options = openOptionsFromFlags(flags);
|
||||
options.dir = dir_fd;
|
||||
return windows.OpenFile(file_path_w, options) catch |err| switch (err) {
|
||||
@ -2169,6 +2171,7 @@ pub fn mkdirat(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!v
|
||||
pub const mkdiratC = @compileError("deprecated: renamed to mkdiratZ");
|
||||
|
||||
pub fn mkdiratWasi(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!void {
|
||||
_ = mode;
|
||||
switch (wasi.path_create_directory(dir_fd, sub_dir_path.ptr, sub_dir_path.len)) {
|
||||
wasi.ESUCCESS => return,
|
||||
wasi.EACCES => return error.AccessDenied,
|
||||
@ -2216,6 +2219,7 @@ pub fn mkdiratZ(dir_fd: fd_t, sub_dir_path: [*:0]const u8, mode: u32) MakeDirErr
|
||||
}
|
||||
|
||||
pub fn mkdiratW(dir_fd: fd_t, sub_path_w: []const u16, mode: u32) MakeDirError!void {
|
||||
_ = mode;
|
||||
const sub_dir_handle = windows.OpenFile(sub_path_w, .{
|
||||
.dir = dir_fd,
|
||||
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
|
||||
@ -2291,6 +2295,7 @@ pub fn mkdirZ(dir_path: [*:0]const u8, mode: u32) MakeDirError!void {
|
||||
|
||||
/// Windows-only. Same as `mkdir` but the parameters is WTF16 encoded.
|
||||
pub fn mkdirW(dir_path_w: []const u16, mode: u32) MakeDirError!void {
|
||||
_ = mode;
|
||||
const sub_dir_handle = windows.OpenFile(dir_path_w, .{
|
||||
.dir = std.fs.cwd().fd,
|
||||
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
|
||||
@ -3868,6 +3873,7 @@ pub fn accessZ(path: [*:0]const u8, mode: u32) AccessError!void {
|
||||
/// Otherwise use `access` or `accessC`.
|
||||
/// TODO currently this ignores `mode`.
|
||||
pub fn accessW(path: [*:0]const u16, mode: u32) windows.GetFileAttributesError!void {
|
||||
_ = mode;
|
||||
const ret = try windows.GetFileAttributesW(path);
|
||||
if (ret != windows.INVALID_FILE_ATTRIBUTES) {
|
||||
return;
|
||||
@ -3918,6 +3924,8 @@ pub fn faccessatZ(dirfd: fd_t, path: [*:0]const u8, mode: u32, flags: u32) Acces
|
||||
/// is NtDll-prefixed, null-terminated, WTF-16 encoded.
|
||||
/// TODO currently this ignores `mode` and `flags`
|
||||
pub fn faccessatW(dirfd: fd_t, sub_path_w: [*:0]const u16, mode: u32, flags: u32) AccessError!void {
|
||||
_ = mode;
|
||||
_ = flags;
|
||||
if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
|
||||
return;
|
||||
}
|
||||
@ -4895,6 +4903,8 @@ pub fn res_mkquery(
|
||||
newrr: ?[*]const u8,
|
||||
buf: []u8,
|
||||
) usize {
|
||||
_ = data;
|
||||
_ = newrr;
|
||||
// This implementation is ported from musl libc.
|
||||
// A more idiomatic "ziggy" implementation would be welcome.
|
||||
var name = dname;
|
||||
@ -5341,7 +5351,7 @@ pub fn sendfile(
|
||||
ENXIO => return error.Unseekable,
|
||||
ESPIPE => return error.Unseekable,
|
||||
else => |err| {
|
||||
const discard = unexpectedErrno(err);
|
||||
unexpectedErrno(err) catch {};
|
||||
break :sf;
|
||||
},
|
||||
}
|
||||
@ -5422,7 +5432,7 @@ pub fn sendfile(
|
||||
EPIPE => return error.BrokenPipe,
|
||||
|
||||
else => {
|
||||
const discard = unexpectedErrno(err);
|
||||
unexpectedErrno(err) catch {};
|
||||
if (amt != 0) {
|
||||
return amt;
|
||||
} else {
|
||||
@ -5484,7 +5494,7 @@ pub fn sendfile(
|
||||
EPIPE => return error.BrokenPipe,
|
||||
|
||||
else => {
|
||||
const discard = unexpectedErrno(err);
|
||||
unexpectedErrno(err) catch {};
|
||||
if (amt != 0) {
|
||||
return amt;
|
||||
} else {
|
||||
|
||||
@ -1286,7 +1286,7 @@ pub const CAP_BLOCK_SUSPEND = 36;
|
||||
pub const CAP_AUDIT_READ = 37;
|
||||
pub const CAP_LAST_CAP = CAP_AUDIT_READ;
|
||||
|
||||
pub fn cap_valid(u8: x) bool {
|
||||
pub fn cap_valid(x: u8) bool {
|
||||
return x >= 0 and x <= CAP_LAST_CAP;
|
||||
}
|
||||
|
||||
|
||||
@ -70,6 +70,7 @@ fn splitValueLE64(val: i64) [2]u32 {
|
||||
};
|
||||
}
|
||||
fn splitValueBE64(val: i64) [2]u32 {
|
||||
const u = @bitCast(u64, val);
|
||||
return [2]u32{
|
||||
@truncate(u32, u >> 32),
|
||||
@truncate(u32, u),
|
||||
@ -1022,7 +1023,7 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
|
||||
for (msgvec[0..kvlen]) |*msg, i| {
|
||||
var size: i32 = 0;
|
||||
const msg_iovlen = @intCast(usize, msg.msg_hdr.msg_iovlen); // kernel side this is treated as unsigned
|
||||
for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov, j| {
|
||||
for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov| {
|
||||
if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(i32, size, @intCast(i32, iov.iov_len), &size)) {
|
||||
// batch-send all messages up to the current message
|
||||
if (next_unsent < i) {
|
||||
|
||||
@ -1513,7 +1513,7 @@ pub fn map_create(map_type: MapType, key_size: u32, value_size: u32, max_entries
|
||||
EINVAL => error.MapTypeOrAttrInvalid,
|
||||
ENOMEM => error.SystemResources,
|
||||
EPERM => error.AccessDenied,
|
||||
else => |err| unexpectedErrno(rc),
|
||||
else => |err| unexpectedErrno(err),
|
||||
};
|
||||
}
|
||||
|
||||
@ -1539,7 +1539,7 @@ pub fn map_lookup_elem(fd: fd_t, key: []const u8, value: []u8) !void {
|
||||
EINVAL => return error.FieldInAttrNeedsZeroing,
|
||||
ENOENT => return error.NotFound,
|
||||
EPERM => return error.AccessDenied,
|
||||
else => |err| return unexpectedErrno(rc),
|
||||
else => |err| return unexpectedErrno(err),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -284,6 +284,7 @@ pub const IO_Uring = struct {
|
||||
}
|
||||
|
||||
fn copy_cqes_ready(self: *IO_Uring, cqes: []io_uring_cqe, wait_nr: u32) u32 {
|
||||
_ = wait_nr;
|
||||
const ready = self.cq_ready();
|
||||
const count = std.math.min(cqes.len, ready);
|
||||
var head = self.cq.head.*;
|
||||
@ -320,6 +321,7 @@ pub const IO_Uring = struct {
|
||||
/// Not idempotent, calling more than once will result in other CQEs being lost.
|
||||
/// Matches the implementation of cqe_seen() in liburing.
|
||||
pub fn cqe_seen(self: *IO_Uring, cqe: *io_uring_cqe) void {
|
||||
_ = cqe;
|
||||
self.cq_advance(1);
|
||||
}
|
||||
|
||||
@ -728,6 +730,7 @@ pub const CompletionQueue = struct {
|
||||
}
|
||||
|
||||
pub fn deinit(self: *CompletionQueue) void {
|
||||
_ = self;
|
||||
// A no-op since we now share the mmap with the submission queue.
|
||||
// Here for symmetry with the submission queue, and for any future feature support.
|
||||
}
|
||||
@ -1272,12 +1275,12 @@ test "accept/connect/send/recv" {
|
||||
|
||||
var accept_addr: os.sockaddr = undefined;
|
||||
var accept_addr_len: os.socklen_t = @sizeOf(@TypeOf(accept_addr));
|
||||
const accept = try ring.accept(0xaaaaaaaa, server, &accept_addr, &accept_addr_len, 0);
|
||||
_ = try ring.accept(0xaaaaaaaa, server, &accept_addr, &accept_addr_len, 0);
|
||||
try testing.expectEqual(@as(u32, 1), try ring.submit());
|
||||
|
||||
const client = try os.socket(address.any.family, os.SOCK_STREAM | os.SOCK_CLOEXEC, 0);
|
||||
defer os.close(client);
|
||||
const connect = try ring.connect(0xcccccccc, client, &address.any, address.getOsSockLen());
|
||||
_ = try ring.connect(0xcccccccc, client, &address.any, address.getOsSockLen());
|
||||
try testing.expectEqual(@as(u32, 1), try ring.submit());
|
||||
|
||||
var cqe_accept = try ring.copy_cqe();
|
||||
@ -1305,7 +1308,7 @@ test "accept/connect/send/recv" {
|
||||
|
||||
const send = try ring.send(0xeeeeeeee, client, buffer_send[0..], 0);
|
||||
send.flags |= linux.IOSQE_IO_LINK;
|
||||
const recv = try ring.recv(0xffffffff, cqe_accept.res, buffer_recv[0..], 0);
|
||||
_ = try ring.recv(0xffffffff, cqe_accept.res, buffer_recv[0..], 0);
|
||||
try testing.expectEqual(@as(u32, 2), try ring.submit());
|
||||
|
||||
const cqe_send = try ring.copy_cqe();
|
||||
|
||||
@ -31,7 +31,8 @@ pub fn syscall_pipe(fd: *[2]i32) usize {
|
||||
\\ sw $3, 4($4)
|
||||
\\ 2:
|
||||
: [ret] "={$2}" (-> usize)
|
||||
: [number] "{$2}" (@enumToInt(SYS.pipe))
|
||||
: [number] "{$2}" (@enumToInt(SYS.pipe)),
|
||||
[fd] "{$4}" (fd)
|
||||
: "memory", "cc", "$7"
|
||||
);
|
||||
}
|
||||
|
||||
@ -15,7 +15,6 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
|
||||
|
||||
const eh = @intToPtr(*elf.Ehdr, vdso_addr);
|
||||
var ph_addr: usize = vdso_addr + eh.e_phoff;
|
||||
const ph = @intToPtr(*elf.Phdr, ph_addr);
|
||||
|
||||
var maybe_dynv: ?[*]usize = null;
|
||||
var base: usize = maxInt(usize);
|
||||
|
||||
@ -353,6 +353,7 @@ test "spawn threads" {
|
||||
}
|
||||
|
||||
fn start1(ctx: void) u8 {
|
||||
_ = ctx;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -379,6 +380,7 @@ test "thread local storage" {
|
||||
|
||||
threadlocal var x: i32 = 1234;
|
||||
fn testTls(context: void) !void {
|
||||
_ = context;
|
||||
if (x != 1234) return error.TlsBadStartValue;
|
||||
x += 1;
|
||||
if (x != 1235) return error.TlsBadEndValue;
|
||||
@ -425,6 +427,7 @@ const IterFnError = error{
|
||||
};
|
||||
|
||||
fn iter_fn(info: *dl_phdr_info, size: usize, counter: *usize) IterFnError!void {
|
||||
_ = size;
|
||||
// Count how many libraries are loaded
|
||||
counter.* += @as(usize, 1);
|
||||
|
||||
@ -731,6 +734,7 @@ test "sigaction" {
|
||||
|
||||
const S = struct {
|
||||
fn handler(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const c_void) callconv(.C) void {
|
||||
_ = ctx_ptr;
|
||||
// Check that we received the correct signal.
|
||||
switch (native_os) {
|
||||
.netbsd => {
|
||||
|
||||
@ -37,6 +37,7 @@ pub const Guid = extern struct {
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
if (f.len == 0) {
|
||||
return std.fmt.format(writer, "{x:0>8}-{x:0>4}-{x:0>4}-{x:0>2}{x:0>2}-{x:0>12}", .{
|
||||
self.time_low,
|
||||
|
||||
@ -35,6 +35,7 @@ pub const ManagedNetworkProtocol = extern struct {
|
||||
/// Translates an IP multicast address to a hardware (MAC) multicast address.
|
||||
/// This function may be unsupported in some MNP implementations.
|
||||
pub fn mcastIpToMac(self: *const ManagedNetworkProtocol, ipv6flag: bool, ipaddress: *const c_void, mac_address: *MacAddress) Status {
|
||||
_ = mac_address;
|
||||
return self._mcast_ip_to_mac(self, ipv6flag, ipaddress);
|
||||
}
|
||||
|
||||
|
||||
@ -1156,7 +1156,6 @@ pub fn GetFinalPathNameByHandle(
|
||||
&mount_points_struct.MountPoints[0],
|
||||
)[0..mount_points_struct.NumberOfMountPoints];
|
||||
|
||||
var found: bool = false;
|
||||
for (mount_points) |mount_point| {
|
||||
const symlink = @ptrCast(
|
||||
[*]const u16,
|
||||
|
||||
@ -194,6 +194,7 @@ pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptim
|
||||
|
||||
///Returns the number of elements in the packed array
|
||||
pub fn len(self: Self) usize {
|
||||
_ = self;
|
||||
return int_count;
|
||||
}
|
||||
|
||||
|
||||
@ -594,6 +594,7 @@ pub const Pdb = struct {
|
||||
error.InvalidValue => return error.InvalidDebugInfo,
|
||||
else => |e| return e,
|
||||
};
|
||||
_ = version;
|
||||
sect_cont_offset += @sizeOf(u32);
|
||||
}
|
||||
while (sect_cont_offset != section_contrib_size) {
|
||||
@ -617,6 +618,7 @@ pub const Pdb = struct {
|
||||
// Parse the InfoStreamHeader.
|
||||
const version = try reader.readIntLittle(u32);
|
||||
const signature = try reader.readIntLittle(u32);
|
||||
_ = signature;
|
||||
const age = try reader.readIntLittle(u32);
|
||||
const guid = try reader.readBytesNoEof(16);
|
||||
|
||||
@ -673,6 +675,7 @@ pub const Pdb = struct {
|
||||
}
|
||||
|
||||
pub fn getSymbolName(self: *Pdb, module: *Module, address: u64) ?[]const u8 {
|
||||
_ = self;
|
||||
std.debug.assert(module.populated);
|
||||
|
||||
var symbol_i: usize = 0;
|
||||
@ -904,7 +907,7 @@ const Msf = struct {
|
||||
// These streams are not used, but still participate in the file
|
||||
// and must be taken into account when resolving stream indices.
|
||||
const Nil = 0xFFFFFFFF;
|
||||
for (stream_sizes) |*s, i| {
|
||||
for (stream_sizes) |*s| {
|
||||
const size = try directory.reader().readIntLittle(u32);
|
||||
s.* = if (size == Nil) 0 else blockCountFromSize(size, superblock.BlockSize);
|
||||
}
|
||||
|
||||
@ -428,7 +428,7 @@ pub fn PriorityDequeue(comptime T: type) type {
|
||||
warn("{}, ", .{e});
|
||||
}
|
||||
warn("array: ", .{});
|
||||
for (self.items) |e, i| {
|
||||
for (self.items) |e| {
|
||||
warn("{}, ", .{e});
|
||||
}
|
||||
warn("len: {} ", .{self.len});
|
||||
|
||||
@ -249,7 +249,7 @@ pub fn PriorityQueue(comptime T: type) type {
|
||||
warn("{}, ", .{e});
|
||||
}
|
||||
warn("array: ", .{});
|
||||
for (self.items) |e, i| {
|
||||
for (self.items) |e| {
|
||||
warn("{}, ", .{e});
|
||||
}
|
||||
warn("len: {} ", .{self.len});
|
||||
|
||||
@ -419,6 +419,7 @@ pub const ArgIteratorWindows = struct {
|
||||
};
|
||||
}
|
||||
fn emitBackslashes(self: *ArgIteratorWindows, buf: *std.ArrayList(u16), emit_count: usize) !void {
|
||||
_ = self;
|
||||
var i: usize = 0;
|
||||
while (i < emit_count) : (i += 1) {
|
||||
try buf.append(std.mem.nativeToLittle(u16, '\\'));
|
||||
@ -748,6 +749,7 @@ pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]
|
||||
}
|
||||
try os.dl_iterate_phdr(&paths, error{OutOfMemory}, struct {
|
||||
fn callback(info: *os.dl_phdr_info, size: usize, list: *List) !void {
|
||||
_ = size;
|
||||
const name = info.dlpi_name orelse return;
|
||||
if (name[0] == '/') {
|
||||
const item = try list.allocator.dupeZ(u8, mem.spanZ(name));
|
||||
|
||||
@ -175,5 +175,5 @@ test "exp dist sanity" {
|
||||
test "table gen" {
|
||||
if (please_windows_dont_oom) return error.SkipZigTest;
|
||||
|
||||
const table = NormDist;
|
||||
_ = NormDist;
|
||||
}
|
||||
|
||||
@ -37,9 +37,11 @@ pub fn binarySearch(
|
||||
test "binarySearch" {
|
||||
const S = struct {
|
||||
fn order_u32(context: void, lhs: u32, rhs: u32) math.Order {
|
||||
_ = context;
|
||||
return math.order(lhs, rhs);
|
||||
}
|
||||
fn order_i32(context: void, lhs: i32, rhs: i32) math.Order {
|
||||
_ = context;
|
||||
return math.order(lhs, rhs);
|
||||
}
|
||||
};
|
||||
@ -1133,6 +1135,7 @@ fn swap(
|
||||
pub fn asc(comptime T: type) fn (void, T, T) bool {
|
||||
const impl = struct {
|
||||
fn inner(context: void, a: T, b: T) bool {
|
||||
_ = context;
|
||||
return a < b;
|
||||
}
|
||||
};
|
||||
@ -1144,6 +1147,7 @@ pub fn asc(comptime T: type) fn (void, T, T) bool {
|
||||
pub fn desc(comptime T: type) fn (void, T, T) bool {
|
||||
const impl = struct {
|
||||
fn inner(context: void, a: T, b: T) bool {
|
||||
_ = context;
|
||||
return a > b;
|
||||
}
|
||||
};
|
||||
|
||||
@ -160,6 +160,7 @@ fn strncmp(_l: [*:0]const u8, _r: [*:0]const u8, _n: usize) callconv(.C) c_int {
|
||||
}
|
||||
|
||||
fn strerror(errnum: c_int) callconv(.C) [*:0]const u8 {
|
||||
_ = errnum;
|
||||
return "TODO strerror implementation";
|
||||
}
|
||||
|
||||
@ -173,6 +174,7 @@ test "strncmp" {
|
||||
// Avoid dragging in the runtime safety mechanisms into this .o file,
|
||||
// unless we're trying to test this file.
|
||||
pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
|
||||
_ = error_return_trace;
|
||||
if (builtin.is_test) {
|
||||
@setCold(true);
|
||||
std.debug.panic("{s}", .{msg});
|
||||
|
||||
@ -602,6 +602,7 @@ pub usingnamespace @import("compiler_rt/atomics.zig");
|
||||
// Avoid dragging in the runtime safety mechanisms into this .o file,
|
||||
// unless we're trying to test this file.
|
||||
pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
|
||||
_ = error_return_trace;
|
||||
@setCold(true);
|
||||
if (is_test) {
|
||||
std.debug.panic("{s}", .{msg});
|
||||
|
||||
@ -83,7 +83,6 @@ fn addXf3(comptime T: type, a: T, b: T) T {
|
||||
|
||||
const signBit = (@as(Z, 1) << (significandBits + exponentBits));
|
||||
const maxExponent = ((1 << exponentBits) - 1);
|
||||
const exponentBias = (maxExponent >> 1);
|
||||
|
||||
const implicitBit = (@as(Z, 1) << significandBits);
|
||||
const quietBit = implicitBit >> 1;
|
||||
@ -98,10 +97,6 @@ fn addXf3(comptime T: type, a: T, b: T) T {
|
||||
const aAbs = aRep & absMask;
|
||||
const bAbs = bRep & absMask;
|
||||
|
||||
const negative = (aRep & signBit) != 0;
|
||||
const exponent = @intCast(i32, aAbs >> significandBits) - exponentBias;
|
||||
const significand = (aAbs & significandMask) | implicitBit;
|
||||
|
||||
const infRep = @bitCast(Z, std.math.inf(T));
|
||||
|
||||
// Detect if a or b is zero, infinity, or NaN.
|
||||
|
||||
@ -80,18 +80,21 @@ var spinlocks: SpinlockTable = SpinlockTable{};
|
||||
// Those work on any object no matter the pointer alignment nor its size.
|
||||
|
||||
fn __atomic_load(size: u32, src: [*]u8, dest: [*]u8, model: i32) callconv(.C) void {
|
||||
_ = model;
|
||||
var sl = spinlocks.get(@ptrToInt(src));
|
||||
defer sl.release();
|
||||
@memcpy(dest, src, size);
|
||||
}
|
||||
|
||||
fn __atomic_store(size: u32, dest: [*]u8, src: [*]u8, model: i32) callconv(.C) void {
|
||||
_ = model;
|
||||
var sl = spinlocks.get(@ptrToInt(dest));
|
||||
defer sl.release();
|
||||
@memcpy(dest, src, size);
|
||||
}
|
||||
|
||||
fn __atomic_exchange(size: u32, ptr: [*]u8, val: [*]u8, old: [*]u8, model: i32) callconv(.C) void {
|
||||
_ = model;
|
||||
var sl = spinlocks.get(@ptrToInt(ptr));
|
||||
defer sl.release();
|
||||
@memcpy(old, ptr, size);
|
||||
@ -106,6 +109,8 @@ fn __atomic_compare_exchange(
|
||||
success: i32,
|
||||
failure: i32,
|
||||
) callconv(.C) i32 {
|
||||
_ = success;
|
||||
_ = failure;
|
||||
var sl = spinlocks.get(@ptrToInt(ptr));
|
||||
defer sl.release();
|
||||
for (ptr[0..size]) |b, i| {
|
||||
@ -135,6 +140,7 @@ comptime {
|
||||
fn atomicLoadFn(comptime T: type) fn (*T, i32) callconv(.C) T {
|
||||
return struct {
|
||||
fn atomic_load_N(src: *T, model: i32) callconv(.C) T {
|
||||
_ = model;
|
||||
if (@sizeOf(T) > largest_atomic_size) {
|
||||
var sl = spinlocks.get(@ptrToInt(src));
|
||||
defer sl.release();
|
||||
@ -162,6 +168,7 @@ comptime {
|
||||
fn atomicStoreFn(comptime T: type) fn (*T, T, i32) callconv(.C) void {
|
||||
return struct {
|
||||
fn atomic_store_N(dst: *T, value: T, model: i32) callconv(.C) void {
|
||||
_ = model;
|
||||
if (@sizeOf(T) > largest_atomic_size) {
|
||||
var sl = spinlocks.get(@ptrToInt(dst));
|
||||
defer sl.release();
|
||||
@ -189,6 +196,7 @@ comptime {
|
||||
fn atomicExchangeFn(comptime T: type) fn (*T, T, i32) callconv(.C) T {
|
||||
return struct {
|
||||
fn atomic_exchange_N(ptr: *T, val: T, model: i32) callconv(.C) T {
|
||||
_ = model;
|
||||
if (@sizeOf(T) > largest_atomic_size) {
|
||||
var sl = spinlocks.get(@ptrToInt(ptr));
|
||||
defer sl.release();
|
||||
@ -218,6 +226,8 @@ comptime {
|
||||
fn atomicCompareExchangeFn(comptime T: type) fn (*T, *T, T, i32, i32) callconv(.C) i32 {
|
||||
return struct {
|
||||
fn atomic_compare_exchange_N(ptr: *T, expected: *T, desired: T, success: i32, failure: i32) callconv(.C) i32 {
|
||||
_ = success;
|
||||
_ = failure;
|
||||
if (@sizeOf(T) > largest_atomic_size) {
|
||||
var sl = spinlocks.get(@ptrToInt(ptr));
|
||||
defer sl.release();
|
||||
@ -255,6 +265,7 @@ comptime {
|
||||
fn fetchFn(comptime T: type, comptime op: builtin.AtomicRmwOp) fn (*T, T, i32) callconv(.C) T {
|
||||
return struct {
|
||||
pub fn fetch_op_N(ptr: *T, val: T, model: i32) callconv(.C) T {
|
||||
_ = model;
|
||||
if (@sizeOf(T) > largest_atomic_size) {
|
||||
var sl = spinlocks.get(@ptrToInt(ptr));
|
||||
defer sl.release();
|
||||
|
||||
@ -100,7 +100,7 @@ const test_vectors = init: {
|
||||
};
|
||||
|
||||
test "compare f64" {
|
||||
for (test_vectors) |vector, i| {
|
||||
for (test_vectors) |vector| {
|
||||
try std.testing.expect(test__cmpdf2(vector));
|
||||
}
|
||||
}
|
||||
|
||||
@ -100,7 +100,7 @@ const test_vectors = init: {
|
||||
};
|
||||
|
||||
test "compare f32" {
|
||||
for (test_vectors) |vector, i| {
|
||||
for (test_vectors) |vector| {
|
||||
try std.testing.expect(test__cmpsf2(vector));
|
||||
}
|
||||
}
|
||||
|
||||
@ -12,7 +12,6 @@ const wideMultiply = @import("divdf3.zig").wideMultiply;
|
||||
pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
const Z = std.meta.Int(.unsigned, 128);
|
||||
const SignedZ = std.meta.Int(.signed, 128);
|
||||
|
||||
const significandBits = std.math.floatMantissaBits(f128);
|
||||
const exponentBits = std.math.floatExponentBits(f128);
|
||||
|
||||
@ -46,7 +46,6 @@ fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.Int(.unsi
|
||||
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
|
||||
const srcSigBits = std.math.floatMantissaBits(src_t);
|
||||
const dstSigBits = std.math.floatMantissaBits(dst_t);
|
||||
const SrcShift = std.math.Log2Int(src_rep_t);
|
||||
const DstShift = std.math.Log2Int(dst_rep_t);
|
||||
|
||||
// Various constants whose values follow from the type parameters.
|
||||
|
||||
@ -16,7 +16,6 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t
|
||||
else => unreachable,
|
||||
};
|
||||
const typeWidth = @typeInfo(rep_t).Int.bits;
|
||||
const srep_t = @import("std").meta.Int(.signed, typeWidth);
|
||||
const significandBits = switch (fp_t) {
|
||||
f32 => 23,
|
||||
f64 => 52,
|
||||
|
||||
@ -50,7 +50,6 @@ fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
|
||||
const srcSigBits = std.math.floatMantissaBits(src_t);
|
||||
const dstSigBits = std.math.floatMantissaBits(dst_t);
|
||||
const SrcShift = std.math.Log2Int(src_rep_t);
|
||||
const DstShift = std.math.Log2Int(dst_rep_t);
|
||||
|
||||
// Various constants whose values follow from the type parameters.
|
||||
// Any reasonable optimizer will fold and propagate all of these.
|
||||
|
||||
@ -27,6 +27,8 @@ extern fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8
|
||||
|
||||
// Avoid dragging in the runtime safety mechanisms into this .o file.
|
||||
pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
|
||||
_ = msg;
|
||||
_ = error_return_trace;
|
||||
@setCold(true);
|
||||
if (@hasDecl(std.os, "abort"))
|
||||
std.os.abort();
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user