mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
std: fix typos (#20560)
This commit is contained in:
parent
49f2cca872
commit
13070448f5
@ -3498,7 +3498,7 @@ void do_a_thing(struct Foo *foo) {
|
||||
<p>
|
||||
As a motivating example, consider the statement {#syntax#}const x: u32 = 42;{#endsyntax#}. The type
|
||||
annotation here provides a result type of {#syntax#}u32{#endsyntax#} to the initialization expression
|
||||
{#syntax#}42{#endsyntax#}, instructing the compiler to coerce this integer (initally of type
|
||||
{#syntax#}42{#endsyntax#}, instructing the compiler to coerce this integer (initially of type
|
||||
{#syntax#}comptime_int{#endsyntax#}) to this type. We will see more examples shortly.
|
||||
</p>
|
||||
<p>
|
||||
@ -6833,7 +6833,7 @@ coding style.
|
||||
<li>utils, misc, or somebody's initials</li>
|
||||
</ul>
|
||||
<p>Everything is a value, all types are data, everything is context, all logic manages state.
|
||||
Nothing is communicated by using a word that applies to all types.</p>
|
||||
Nothing is communicated by using a word that applies to all types.</p>
|
||||
<p>Temptation to use "utilities", "miscellaneous", or somebody's initials
|
||||
is a failure to categorize, or more commonly, overcategorization. Such
|
||||
declarations can live at the root of a module that needs them with no
|
||||
|
||||
@ -2,7 +2,7 @@ const std = @import("std");
|
||||
const expect = std.testing.expect;
|
||||
|
||||
const Tuple = struct { u8, u8 };
|
||||
test "coercion from homogenous tuple to array" {
|
||||
test "coercion from homogeneous tuple to array" {
|
||||
const tuple: Tuple = .{ 5, 6 };
|
||||
const array: [2]u8 = tuple;
|
||||
_ = array;
|
||||
|
||||
@ -20,8 +20,8 @@ pub fn deinit(self: *@This()) void {
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn ensureTotalCapacity(self: *@This(), bit_capcity: usize) Allocator.Error!void {
|
||||
const byte_capacity = (bit_capcity + 7) >> 3;
|
||||
pub fn ensureTotalCapacity(self: *@This(), bit_capacity: usize) Allocator.Error!void {
|
||||
const byte_capacity = (bit_capacity + 7) >> 3;
|
||||
try self.bytes.ensureTotalCapacity(byte_capacity);
|
||||
}
|
||||
|
||||
|
||||
@ -2055,7 +2055,7 @@ pub fn dependencyFromBuildZig(
|
||||
}
|
||||
|
||||
const full_path = b.pathFromRoot("build.zig.zon");
|
||||
debug.panic("'{}' is not a build.zig struct of a dependecy in '{s}'", .{ build_zig, full_path });
|
||||
debug.panic("'{}' is not a build.zig struct of a dependency in '{s}'", .{ build_zig, full_path });
|
||||
}
|
||||
|
||||
fn userValuesAreSame(lhs: UserValue, rhs: UserValue) bool {
|
||||
|
||||
@ -887,7 +887,7 @@ pub const all_features = blk: {
|
||||
};
|
||||
result[@intFromEnum(Feature.nv)] = .{
|
||||
.llvm_name = "nv",
|
||||
.description = "Enable v8.4-A Nested Virtualization Enchancement (FEAT_NV, FEAT_NV2)",
|
||||
.description = "Enable v8.4-A Nested Virtualization Enhancement (FEAT_NV, FEAT_NV2)",
|
||||
.dependencies = featureSet(&[_]Feature{}),
|
||||
};
|
||||
result[@intFromEnum(Feature.outline_atomics)] = .{
|
||||
|
||||
@ -446,7 +446,7 @@ pub const all_features = blk: {
|
||||
};
|
||||
result[@intFromEnum(Feature.fix_cmse_cve_2021_35465)] = .{
|
||||
.llvm_name = "fix-cmse-cve-2021-35465",
|
||||
.description = "Mitigate against the cve-2021-35465 security vulnurability",
|
||||
.description = "Mitigate against the cve-2021-35465 security vulnerability",
|
||||
.dependencies = featureSet(&[_]Feature{}),
|
||||
};
|
||||
result[@intFromEnum(Feature.fix_cortex_a57_aes_1742098)] = .{
|
||||
|
||||
@ -214,7 +214,7 @@ pub const all_features = blk: {
|
||||
};
|
||||
result[@intFromEnum(Feature.dsp_silan)] = .{
|
||||
.llvm_name = "dsp_silan",
|
||||
.description = "Enable DSP Silan instrutions",
|
||||
.description = "Enable DSP Silan instructions",
|
||||
.dependencies = featureSet(&[_]Feature{}),
|
||||
};
|
||||
result[@intFromEnum(Feature.dspe60)] = .{
|
||||
@ -224,7 +224,7 @@ pub const all_features = blk: {
|
||||
};
|
||||
result[@intFromEnum(Feature.dspv2)] = .{
|
||||
.llvm_name = "dspv2",
|
||||
.description = "Enable DSP V2.0 instrutions",
|
||||
.description = "Enable DSP V2.0 instructions",
|
||||
.dependencies = featureSet(&[_]Feature{}),
|
||||
};
|
||||
result[@intFromEnum(Feature.e1)] = .{
|
||||
@ -243,7 +243,7 @@ pub const all_features = blk: {
|
||||
};
|
||||
result[@intFromEnum(Feature.edsp)] = .{
|
||||
.llvm_name = "edsp",
|
||||
.description = "Enable DSP instrutions",
|
||||
.description = "Enable DSP instructions",
|
||||
.dependencies = featureSet(&[_]Feature{}),
|
||||
};
|
||||
result[@intFromEnum(Feature.elrw)] = .{
|
||||
@ -333,12 +333,12 @@ pub const all_features = blk: {
|
||||
};
|
||||
result[@intFromEnum(Feature.hwdiv)] = .{
|
||||
.llvm_name = "hwdiv",
|
||||
.description = "Enable divide instrutions",
|
||||
.description = "Enable divide instructions",
|
||||
.dependencies = featureSet(&[_]Feature{}),
|
||||
};
|
||||
result[@intFromEnum(Feature.istack)] = .{
|
||||
.llvm_name = "istack",
|
||||
.description = "Enable interrput attribute",
|
||||
.description = "Enable interrupt attribute",
|
||||
.dependencies = featureSet(&[_]Feature{}),
|
||||
};
|
||||
result[@intFromEnum(Feature.java)] = .{
|
||||
@ -362,7 +362,7 @@ pub const all_features = blk: {
|
||||
};
|
||||
result[@intFromEnum(Feature.multiple_stld)] = .{
|
||||
.llvm_name = "multiple_stld",
|
||||
.description = "Enable multiple load/store instrutions",
|
||||
.description = "Enable multiple load/store instructions",
|
||||
.dependencies = featureSet(&[_]Feature{}),
|
||||
};
|
||||
result[@intFromEnum(Feature.nvic)] = .{
|
||||
@ -372,7 +372,7 @@ pub const all_features = blk: {
|
||||
};
|
||||
result[@intFromEnum(Feature.pushpop)] = .{
|
||||
.llvm_name = "pushpop",
|
||||
.description = "Enable push/pop instrutions",
|
||||
.description = "Enable push/pop instructions",
|
||||
.dependencies = featureSet(&[_]Feature{}),
|
||||
};
|
||||
result[@intFromEnum(Feature.smart)] = .{
|
||||
|
||||
@ -823,14 +823,14 @@ pub const all_features = blk: {
|
||||
};
|
||||
result[@intFromEnum(Feature.zcmp)] = .{
|
||||
.llvm_name = "zcmp",
|
||||
.description = "'Zcmp' (sequenced instuctions for code-size reduction)",
|
||||
.description = "'Zcmp' (sequenced instructions for code-size reduction)",
|
||||
.dependencies = featureSet(&[_]Feature{
|
||||
.zca,
|
||||
}),
|
||||
};
|
||||
result[@intFromEnum(Feature.zcmt)] = .{
|
||||
.llvm_name = "zcmt",
|
||||
.description = "'Zcmt' (table jump instuctions for code-size reduction)",
|
||||
.description = "'Zcmt' (table jump instructions for code-size reduction)",
|
||||
.dependencies = featureSet(&[_]Feature{
|
||||
.zca,
|
||||
.zicsr,
|
||||
|
||||
@ -170,7 +170,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
/// operations.
|
||||
/// Invalidates pre-existing pointers to elements at and after `index`.
|
||||
/// Invalidates all pre-existing element pointers if capacity must be
|
||||
/// increased to accomodate the new elements.
|
||||
/// increased to accommodate the new elements.
|
||||
/// Asserts that the index is in bounds or equal to the length.
|
||||
pub fn addManyAt(self: *Self, index: usize, count: usize) Allocator.Error![]T {
|
||||
const new_len = try addOrOom(self.items.len, count);
|
||||
@ -227,7 +227,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
/// This operation is O(N).
|
||||
/// Invalidates pre-existing pointers to elements at and after `index`.
|
||||
/// Invalidates all pre-existing element pointers if capacity must be
|
||||
/// increased to accomodate the new elements.
|
||||
/// increased to accommodate the new elements.
|
||||
/// Asserts that the index is in bounds or equal to the length.
|
||||
pub fn insertSlice(
|
||||
self: *Self,
|
||||
@ -740,7 +740,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
/// operations.
|
||||
/// Invalidates pre-existing pointers to elements at and after `index`.
|
||||
/// Invalidates all pre-existing element pointers if capacity must be
|
||||
/// increased to accomodate the new elements.
|
||||
/// increased to accommodate the new elements.
|
||||
/// Asserts that the index is in bounds or equal to the length.
|
||||
pub fn addManyAt(
|
||||
self: *Self,
|
||||
@ -776,7 +776,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
/// This operation is O(N).
|
||||
/// Invalidates pre-existing pointers to elements at and after `index`.
|
||||
/// Invalidates all pre-existing element pointers if capacity must be
|
||||
/// increased to accomodate the new elements.
|
||||
/// increased to accommodate the new elements.
|
||||
/// Asserts that the index is in bounds or equal to the length.
|
||||
pub fn insertSlice(
|
||||
self: *Self,
|
||||
|
||||
@ -2523,7 +2523,7 @@ pub const F = struct {
|
||||
/// add signature from same file (used by dyld for shared libs)
|
||||
pub const ADDFILESIGS = 61;
|
||||
/// used in conjunction with F.NOCACHE to indicate that DIRECT, synchronous writes
|
||||
/// should not be used (i.e. its ok to temporaily create cached pages)
|
||||
/// should not be used (i.e. its ok to temporarily create cached pages)
|
||||
pub const NODIRECT = 62;
|
||||
///Get the protection class of a file from the EA, returns int
|
||||
pub const GETPROTECTIONCLASS = 63;
|
||||
|
||||
@ -581,7 +581,7 @@ pub const SectionHeaderFlags = packed struct {
|
||||
/// This is valid for object files only.
|
||||
LNK_INFO: u1 = 0,
|
||||
|
||||
_reserverd_2: u1 = 0,
|
||||
_reserved_2: u1 = 0,
|
||||
|
||||
/// The section will not become part of the image.
|
||||
/// This is valid only for object files.
|
||||
|
||||
@ -70,8 +70,8 @@ pub const store = struct {
|
||||
}
|
||||
};
|
||||
|
||||
/// Container defines header/footer arround deflate bit stream. Gzip and zlib
|
||||
/// compression algorithms are containers arround deflate bit stream body.
|
||||
/// Container defines header/footer around deflate bit stream. Gzip and zlib
|
||||
/// compression algorithms are containers around deflate bit stream body.
|
||||
const Container = @import("flate/container.zig").Container;
|
||||
const std = @import("std");
|
||||
const testing = std.testing;
|
||||
|
||||
@ -109,7 +109,7 @@ const ReadBlock = struct {
|
||||
len: usize,
|
||||
};
|
||||
|
||||
/// Returns position of continous read block data.
|
||||
/// Returns position of continuous read block data.
|
||||
fn readBlock(self: *Self, max: usize) ReadBlock {
|
||||
const r = self.rp & mask;
|
||||
const w = self.wp & mask;
|
||||
|
||||
@ -26,7 +26,7 @@ pub fn add(self: *Self, data: []const u8, pos: u16) u16 {
|
||||
return self.set(h, pos);
|
||||
}
|
||||
|
||||
// Retruns previous location with the same hash value given the current
|
||||
// Returns previous location with the same hash value given the current
|
||||
// position.
|
||||
pub fn prev(self: *Self, pos: u16) u16 {
|
||||
return self.chain[pos];
|
||||
|
||||
@ -84,7 +84,7 @@ pub fn match(self: *Self, prev_pos: u16, curr_pos: u16, min_len: u16) u16 {
|
||||
const prev_lh = self.buffer[prev_pos..][0..max_len];
|
||||
const curr_lh = self.buffer[curr_pos..][0..max_len];
|
||||
|
||||
// If we alread have match (min_len > 0),
|
||||
// If we already have match (min_len > 0),
|
||||
// test the first byte above previous len a[min_len] != b[min_len]
|
||||
// and then all the bytes from that position to zero.
|
||||
// That is likely positions to find difference than looping from first bytes.
|
||||
|
||||
@ -110,7 +110,7 @@ pub fn show(t: Token) void {
|
||||
}
|
||||
}
|
||||
|
||||
// Retruns index in match_lengths table for each length in range 0-255.
|
||||
// Returns index in match_lengths table for each length in range 0-255.
|
||||
const match_lengths_index = [_]u8{
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
|
||||
9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
|
||||
|
||||
@ -57,7 +57,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type {
|
||||
/// it may be some extra zero bits in buffer.
|
||||
pub inline fn fill(self: *Self, nice: u6) !void {
|
||||
if (self.nbits >= nice and nice != 0) {
|
||||
return; // We have enought bits
|
||||
return; // We have enough bits
|
||||
}
|
||||
// Read more bits from forward reader
|
||||
|
||||
@ -96,7 +96,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type {
|
||||
pub const flag = struct {
|
||||
pub const peek: u3 = 0b001; // dont advance internal buffer, just get bits, leave them in buffer
|
||||
pub const buffered: u3 = 0b010; // assume that there is no need to fill, fill should be called before
|
||||
pub const reverse: u3 = 0b100; // bit reverse readed bits
|
||||
pub const reverse: u3 = 0b100; // bit reverse read bits
|
||||
};
|
||||
|
||||
/// Alias for readF(U, 0).
|
||||
@ -133,7 +133,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type {
|
||||
try self.fill(n);
|
||||
return @truncate(self.bits);
|
||||
},
|
||||
flag.buffered => { // no fill, assume that buffer has enought bits
|
||||
flag.buffered => { // no fill, assume that buffer has enough bits
|
||||
const u: U = @truncate(self.bits);
|
||||
try self.shift(n);
|
||||
return u;
|
||||
@ -212,7 +212,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type {
|
||||
}
|
||||
|
||||
/// Read deflate fixed fixed code.
|
||||
/// Reads first 7 bits, and then mybe 1 or 2 more to get full 7,8 or 9 bit code.
|
||||
/// Reads first 7 bits, and then maybe 1 or 2 more to get full 7,8 or 9 bit code.
|
||||
/// ref: https://datatracker.ietf.org/doc/html/rfc1951#page-12
|
||||
/// Lit Value Bits Codes
|
||||
/// --------- ---- -----
|
||||
|
||||
@ -48,7 +48,7 @@ pub fn BlockWriter(comptime WriterType: type) type {
|
||||
/// Should be called only when bit stream is at byte boundary.
|
||||
///
|
||||
/// That is after final block; when last byte could be incomplete or
|
||||
/// after stored block; which is aligned to the byte bounday (it has x
|
||||
/// after stored block; which is aligned to the byte boundary (it has x
|
||||
/// padding bits after first 3 bits).
|
||||
pub fn flush(self: *Self) Error!void {
|
||||
try self.bit_writer.flush();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
pub const deflate = struct {
|
||||
// Number of tokens to accumlate in deflate before starting block encoding.
|
||||
// Number of tokens to accumulate in deflate before starting block encoding.
|
||||
//
|
||||
// In zlib this depends on memlevel: 6 + memlevel, where default memlevel is
|
||||
// 8 and max 9 that gives 14 or 15 bits.
|
||||
|
||||
@ -183,7 +183,7 @@ fn Deflate(comptime container: Container, comptime WriterType: type, comptime Bl
|
||||
// Write match from previous position.
|
||||
step = try self.addMatch(m) - 1; // we already advanced 1 from previous position
|
||||
} else {
|
||||
// No match at previous postition.
|
||||
// No match at previous position.
|
||||
// Write previous literal if any, and remember this literal.
|
||||
try self.addPrevLiteral();
|
||||
self.prev_literal = literal;
|
||||
@ -268,9 +268,9 @@ fn Deflate(comptime container: Container, comptime WriterType: type, comptime Bl
|
||||
fn flushTokens(self: *Self, flush_opt: FlushOption) !void {
|
||||
// Pass tokens to the token writer
|
||||
try self.block_writer.write(self.tokens.tokens(), flush_opt == .final, self.win.tokensBuffer());
|
||||
// Stored block ensures byte aligment.
|
||||
// Stored block ensures byte alignment.
|
||||
// It has 3 bits (final, block_type) and then padding until byte boundary.
|
||||
// After that everyting is aligned to the boundary in the stored block.
|
||||
// After that everything is aligned to the boundary in the stored block.
|
||||
// Empty stored block is Ob000 + (0-7) bits of padding + 0x00 0x00 0xFF 0xFF.
|
||||
// Last 4 bytes are byte aligned.
|
||||
if (flush_opt == .flush) {
|
||||
@ -572,7 +572,7 @@ test "tokenization" {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that tokens writen are equal to expected token list.
|
||||
// Tests that tokens written are equal to expected token list.
|
||||
const TestTokenWriter = struct {
|
||||
const Self = @This();
|
||||
|
||||
@ -655,7 +655,7 @@ test "file tokenization" {
|
||||
const TokenWriter = TokenDecoder(@TypeOf(writer));
|
||||
var cmp = try Deflate(.raw, WriterType, TokenWriter).init(writer, .{ .level = level });
|
||||
|
||||
// Stream uncompressed `orignal` data to the compressor. It will
|
||||
// Stream uncompressed `original` data to the compressor. It will
|
||||
// produce tokens list and pass that list to the TokenDecoder. This
|
||||
// TokenDecoder uses CircularBuffer from inflate to convert list of
|
||||
// tokens back to the uncompressed stream.
|
||||
|
||||
@ -132,7 +132,7 @@ fn HuffmanDecoder(
|
||||
if (n > max) max = n;
|
||||
count[n] += 1;
|
||||
}
|
||||
if (max == 0) // emtpy tree
|
||||
if (max == 0) // empty tree
|
||||
return;
|
||||
|
||||
// check for an over-subscribed or incomplete set of lengths
|
||||
@ -255,7 +255,7 @@ test "encode/decode literals" {
|
||||
|
||||
for (1..286) |j| { // for all different number of codes
|
||||
var enc: LiteralEncoder = .{};
|
||||
// create freqencies
|
||||
// create frequencies
|
||||
var freq = [_]u16{0} ** 286;
|
||||
freq[256] = 1; // ensure we have end of block code
|
||||
for (&freq, 1..) |*f, i| {
|
||||
@ -263,7 +263,7 @@ test "encode/decode literals" {
|
||||
f.* = @intCast(i);
|
||||
}
|
||||
|
||||
// encoder from freqencies
|
||||
// encoder from frequencies
|
||||
enc.generate(&freq, 15);
|
||||
|
||||
// get code_lens from encoder
|
||||
|
||||
@ -168,7 +168,7 @@ pub fn HuffmanEncoder(comptime size: usize) type {
|
||||
while (true) {
|
||||
var l = &levels[level];
|
||||
if (l.next_pair_freq == math.maxInt(i32) and l.next_char_freq == math.maxInt(i32)) {
|
||||
// We've run out of both leafs and pairs.
|
||||
// We've run out of both leaves and pairs.
|
||||
// End all calculations for this level.
|
||||
// To make sure we never come back to this level or any lower level,
|
||||
// set next_pair_freq impossibly large.
|
||||
|
||||
@ -99,7 +99,7 @@ pub fn Inflate(comptime container: Container, comptime LookaheadType: type, comp
|
||||
|
||||
fn storedBlock(self: *Self) !bool {
|
||||
self.bits.alignToByte(); // skip padding until byte boundary
|
||||
// everyting after this is byte aligned in stored block
|
||||
// everything after this is byte aligned in stored block
|
||||
var len = try self.bits.read(u16);
|
||||
const nlen = try self.bits.read(u16);
|
||||
if (len != ~nlen) return error.WrongStoredBlockNlen;
|
||||
@ -155,7 +155,7 @@ pub fn Inflate(comptime container: Container, comptime LookaheadType: type, comp
|
||||
fn dynamicBlockHeader(self: *Self) !void {
|
||||
const hlit: u16 = @as(u16, try self.bits.read(u5)) + 257; // number of ll code entries present - 257
|
||||
const hdist: u16 = @as(u16, try self.bits.read(u5)) + 1; // number of distance code entries - 1
|
||||
const hclen: u8 = @as(u8, try self.bits.read(u4)) + 4; // hclen + 4 code lenths are encoded
|
||||
const hclen: u8 = @as(u8, try self.bits.read(u4)) + 4; // hclen + 4 code lengths are encoded
|
||||
|
||||
if (hlit > 286 or hdist > 30)
|
||||
return error.InvalidDynamicBlockHeader;
|
||||
@ -180,7 +180,7 @@ pub fn Inflate(comptime container: Container, comptime LookaheadType: type, comp
|
||||
return error.InvalidDynamicBlockHeader;
|
||||
}
|
||||
|
||||
// literal code lengts to literal decoder
|
||||
// literal code lengths to literal decoder
|
||||
try self.lit_dec.generate(dec_lens[0..hlit]);
|
||||
|
||||
// distance code lengths to distance decoder
|
||||
|
||||
@ -977,7 +977,7 @@ pub const rsa = struct {
|
||||
// the hash function (2^61 - 1 octets for SHA-1), output
|
||||
// "inconsistent" and stop.
|
||||
// All the cryptographic hash functions in the standard library have a limit of >= 2^61 - 1.
|
||||
// Even then, this check is only there for paranoia. In the context of TLS certifcates, emBit cannot exceed 4096.
|
||||
// Even then, this check is only there for paranoia. In the context of TLS certificates, emBit cannot exceed 4096.
|
||||
if (emBit >= 1 << 61) return error.InvalidSignature;
|
||||
|
||||
// emLen = \ceil(emBits/8)
|
||||
|
||||
@ -41,7 +41,7 @@ pub const OverflowError = error{Overflow};
|
||||
/// Invalid modulus. Modulus must be odd.
|
||||
pub const InvalidModulusError = error{ EvenModulus, ModulusTooSmall };
|
||||
|
||||
/// Exponentation with a null exponent.
|
||||
/// Exponentiation with a null exponent.
|
||||
/// Exponentiation in cryptographic protocols is almost always a sign of a bug which can lead to trivial attacks.
|
||||
/// Therefore, this module returns an error when a null exponent is encountered, encouraging applications to handle this case explicitly.
|
||||
pub const NullExponentError = error{NullExponent};
|
||||
|
||||
@ -379,7 +379,7 @@ fn Kyber(comptime p: Params) type {
|
||||
|
||||
/// Create a new key pair.
|
||||
/// If seed is null, a random seed will be generated.
|
||||
/// If a seed is provided, the key pair will be determinsitic.
|
||||
/// If a seed is provided, the key pair will be deterministic.
|
||||
pub fn create(seed_: ?[seed_length]u8) !KeyPair {
|
||||
const seed = seed_ orelse sk: {
|
||||
var random_seed: [seed_length]u8 = undefined;
|
||||
@ -1253,7 +1253,7 @@ const Poly = struct {
|
||||
t |= @as(T, buf[batch_bytes * i + j]) << (8 * j);
|
||||
}
|
||||
|
||||
// Accumelate `a's and `b's together by masking them out, shifting
|
||||
// Accumulate `a's and `b's together by masking them out, shifting
|
||||
// and adding. For η=3, we have d = a₁ + a₂ + a₃ + 8(b₁ + b₂ + b₃) + …
|
||||
var d: T = 0;
|
||||
inline for (0..eta) |j| {
|
||||
|
||||
@ -487,7 +487,7 @@ pub const Box = struct {
|
||||
/// A key pair.
|
||||
pub const KeyPair = X25519.KeyPair;
|
||||
|
||||
/// Compute a secret suitable for `secretbox` given a recipent's public key and a sender's secret key.
|
||||
/// Compute a secret suitable for `secretbox` given a recipient's public key and a sender's secret key.
|
||||
pub fn createSharedSecret(public_key: [public_length]u8, secret_key: [secret_length]u8) (IdentityElementError || WeakPublicKeyError)![shared_length]u8 {
|
||||
const p = try X25519.scalarmult(secret_key, public_key);
|
||||
const zero = [_]u8{0} ** 16;
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
//! Secure Hashing Algorithm 2 (SHA2)
|
||||
//!
|
||||
//! Published by the National Institue of Standards and Technology (NIST) [1] [2].
|
||||
//! Published by the National Institute of Standards and Technology (NIST) [1] [2].
|
||||
//!
|
||||
//! Truncation mitigates length-extension attacks but increases vulnerability to collision
|
||||
//! attacks. Collision attacks remain impractical for all types defined here.
|
||||
|
||||
@ -40,8 +40,8 @@ const assert = std.debug.assert;
|
||||
pub const Client = @import("tls/Client.zig");
|
||||
|
||||
pub const record_header_len = 5;
|
||||
pub const max_cipertext_inner_record_len = 1 << 14;
|
||||
pub const max_ciphertext_len = max_cipertext_inner_record_len + 256;
|
||||
pub const max_ciphertext_inner_record_len = 1 << 14;
|
||||
pub const max_ciphertext_len = max_ciphertext_inner_record_len + 256;
|
||||
pub const max_ciphertext_record_len = max_ciphertext_len + record_header_len;
|
||||
pub const hello_retry_request_sequence = [32]u8{
|
||||
0xCF, 0x21, 0xAD, 0x74, 0xE5, 0x9A, 0x61, 0x11, 0xBE, 0x1D, 0x8C, 0x02, 0x1E, 0x65, 0xB8, 0x91,
|
||||
|
||||
@ -819,7 +819,7 @@ fn prepareCiphertextRecord(
|
||||
const close_notify_alert_reserved = tls.close_notify_alert.len + overhead_len;
|
||||
while (true) {
|
||||
const encrypted_content_len: u16 = @intCast(@min(
|
||||
@min(bytes.len - bytes_i, tls.max_cipertext_inner_record_len),
|
||||
@min(bytes.len - bytes_i, tls.max_ciphertext_inner_record_len),
|
||||
ciphertext_buf.len -|
|
||||
(close_notify_alert_reserved + overhead_len + ciphertext_end),
|
||||
));
|
||||
|
||||
@ -38,7 +38,7 @@ pub fn ipRegNum() u8 {
|
||||
|
||||
pub fn fpRegNum(reg_context: RegisterContext) u8 {
|
||||
return switch (builtin.cpu.arch) {
|
||||
// GCC on OS X historicaly did the opposite of ELF for these registers (only in .eh_frame), and that is now the convention for MachO
|
||||
// GCC on OS X historically did the opposite of ELF for these registers (only in .eh_frame), and that is now the convention for MachO
|
||||
.x86 => if (reg_context.eh_frame and reg_context.is_macho) 4 else 5,
|
||||
.x86_64 => 6,
|
||||
.arm => 11,
|
||||
|
||||
@ -15,7 +15,7 @@ pub const ExpressionContext = struct {
|
||||
/// The dwarf format of the section this expression is in
|
||||
format: dwarf.Format = .@"32",
|
||||
|
||||
/// If specified, any addresses will pass through this function before being acccessed
|
||||
/// If specified, any addresses will pass through this function before being accessed
|
||||
isValidMemory: ?*const fn (address: usize) bool = null,
|
||||
|
||||
/// The compilation unit this expression relates to, if any
|
||||
@ -42,14 +42,14 @@ pub const ExpressionOptions = struct {
|
||||
/// The address size of the target architecture
|
||||
addr_size: u8 = @sizeOf(usize),
|
||||
|
||||
/// Endianess of the target architecture
|
||||
/// Endianness of the target architecture
|
||||
endian: std.builtin.Endian = builtin.target.cpu.arch.endian(),
|
||||
|
||||
/// Restrict the stack machine to a subset of opcodes used in call frame instructions
|
||||
call_frame_context: bool = false,
|
||||
};
|
||||
|
||||
// Explcitly defined to support executing sub-expressions
|
||||
// Explicitly defined to support executing sub-expressions
|
||||
pub const ExpressionError = error{
|
||||
UnimplementedExpressionCall,
|
||||
UnimplementedOpcode,
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// There is a generic CRC implementation "Crc()" which can be paramterized via
|
||||
// There is a generic CRC implementation "Crc()" which can be parameterized via
|
||||
// the Algorithm struct for a plethora of uses.
|
||||
//
|
||||
// The primary interface for all of the standard CRC algorithms is the
|
||||
|
||||
@ -79,7 +79,7 @@ pub const Wyhash = struct {
|
||||
@memcpy(scratch[0..rem], self.buf[self.buf.len - rem ..][0..rem]);
|
||||
@memcpy(scratch[rem..][0..self.buf_len], self.buf[0..self.buf_len]);
|
||||
|
||||
// Same as input but with additional bytes preceeding start in case of a short buffer
|
||||
// Same as input but with additional bytes preceding start in case of a short buffer
|
||||
input = &scratch;
|
||||
offset = rem;
|
||||
}
|
||||
|
||||
@ -902,7 +902,7 @@ pub fn intToEnum(comptime EnumTag: type, tag_int: anytype) IntToEnumError!EnumTa
|
||||
return error.InvalidEnumTag;
|
||||
}
|
||||
|
||||
// We don't direcly iterate over the fields of EnumTag, as that
|
||||
// We don't directly iterate over the fields of EnumTag, as that
|
||||
// would require an inline loop. Instead, we create an array of
|
||||
// values that is comptime-know, but can be iterated at runtime
|
||||
// without requiring an inline loop. This generates better
|
||||
|
||||
@ -570,7 +570,7 @@ pub fn futex_wake(uaddr: *const i32, futex_op: u32, val: i32) usize {
|
||||
/// Returns the array index of one of the woken futexes.
|
||||
/// No further information is provided: any number of other futexes may also
|
||||
/// have been woken by the same event, and if more than one futex was woken,
|
||||
/// the retrned index may refer to any one of them.
|
||||
/// the returned index may refer to any one of them.
|
||||
/// (It is not necessaryily the futex with the smallest index, nor the one
|
||||
/// most recently woken, nor...)
|
||||
pub fn futex2_waitv(
|
||||
@ -648,7 +648,7 @@ pub fn futex2_wake(
|
||||
pub fn futex2_requeue(
|
||||
/// Array describing the source and destination futex.
|
||||
waiters: [*]futex_waitv,
|
||||
/// Unsed.
|
||||
/// Unused.
|
||||
flags: u32,
|
||||
/// Number of futexes to wake.
|
||||
nr_wake: i32,
|
||||
@ -6009,7 +6009,7 @@ else
|
||||
/// values of this resource limit.
|
||||
NICE,
|
||||
|
||||
/// Maximum realtime priority allowed for non-priviledged
|
||||
/// Maximum realtime priority allowed for non-privileged
|
||||
/// processes.
|
||||
RTPRIO,
|
||||
|
||||
@ -7228,7 +7228,7 @@ pub const futex_waitv = extern struct {
|
||||
uaddr: u64,
|
||||
/// Flags for this waiter.
|
||||
flags: u32,
|
||||
/// Reserved memeber to preserve alignment.
|
||||
/// Reserved member to preserve alignment.
|
||||
/// Should be 0.
|
||||
__reserved: u32,
|
||||
};
|
||||
|
||||
@ -3938,7 +3938,7 @@ test BufferGroup {
|
||||
|
||||
// Server uses buffer group receive
|
||||
{
|
||||
// Submit recv operation, buffer will be choosen from buffer group
|
||||
// Submit recv operation, buffer will be chosen from buffer group
|
||||
_ = try buf_grp.recv(2, fds.server, 0);
|
||||
const submitted = try ring.submit();
|
||||
try testing.expectEqual(1, submitted);
|
||||
@ -3956,7 +3956,7 @@ test BufferGroup {
|
||||
// Get buffer from pool
|
||||
const buf = buf_grp.get(buffer_id)[0..len];
|
||||
try testing.expectEqualSlices(u8, &data, buf);
|
||||
// Releaase buffer to the kernel when application is done with it
|
||||
// Release buffer to the kernel when application is done with it
|
||||
buf_grp.put(buffer_id);
|
||||
}
|
||||
}
|
||||
|
||||
@ -140,7 +140,7 @@ pub const F_STRICT_ALIGNMENT = 0x1;
|
||||
|
||||
/// If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the verifier will
|
||||
/// allow any alignment whatsoever. On platforms with strict alignment
|
||||
/// requirements for loads ands stores (such as sparc and mips) the verifier
|
||||
/// requirements for loads and stores (such as sparc and mips) the verifier
|
||||
/// validates that all loads and stores provably follow this requirement. This
|
||||
/// flag turns that checking and enforcement off.
|
||||
///
|
||||
|
||||
@ -385,7 +385,7 @@ pub const rlimit_resource = enum(c_int) {
|
||||
/// values of this resource limit.
|
||||
NICE,
|
||||
|
||||
/// Maximum realtime priority allowed for non-priviledged
|
||||
/// Maximum realtime priority allowed for non-privileged
|
||||
/// processes.
|
||||
RTPRIO,
|
||||
|
||||
|
||||
@ -370,7 +370,7 @@ pub const rlimit_resource = enum(c_int) {
|
||||
/// values of this resource limit.
|
||||
NICE,
|
||||
|
||||
/// Maximum realtime priority allowed for non-priviledged
|
||||
/// Maximum realtime priority allowed for non-privileged
|
||||
/// processes.
|
||||
RTPRIO,
|
||||
|
||||
|
||||
@ -460,7 +460,7 @@ pub const rlimit_resource = enum(c_int) {
|
||||
/// values of this resource limit.
|
||||
NICE,
|
||||
|
||||
/// Maximum realtime priority allowed for non-priviledged
|
||||
/// Maximum realtime priority allowed for non-privileged
|
||||
/// processes.
|
||||
RTPRIO,
|
||||
|
||||
|
||||
@ -184,7 +184,7 @@ pub const SIG = struct {
|
||||
pub const sigset_t = c_long;
|
||||
pub const empty_sigset = 0;
|
||||
pub const siginfo_t = c_long;
|
||||
// TODO plan9 doesn't have sigaction_fn. Sigaction is not a union, but we incude it here to be compatible.
|
||||
// TODO plan9 doesn't have sigaction_fn. Sigaction is not a union, but we include it here to be compatible.
|
||||
pub const Sigaction = extern struct {
|
||||
pub const handler_fn = *const fn (i32) callconv(.C) void;
|
||||
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
|
||||
|
||||
@ -3474,7 +3474,7 @@ pub const SocketError = error{
|
||||
pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t {
|
||||
if (native_os == .windows) {
|
||||
// NOTE: windows translates the SOCK.NONBLOCK/SOCK.CLOEXEC flags into
|
||||
// windows-analagous operations
|
||||
// windows-analogous operations
|
||||
const filtered_sock_type = socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC);
|
||||
const flags: u32 = if ((socket_type & SOCK.CLOEXEC) != 0)
|
||||
windows.ws2_32.WSA_FLAG_NO_HANDLE_INHERIT
|
||||
|
||||
@ -617,7 +617,7 @@ fn contextLessThan(context: []const u32, a: usize, b: usize) Order {
|
||||
|
||||
const CPQlt = PriorityQueue(usize, []const u32, contextLessThan);
|
||||
|
||||
test "add and remove min heap with contextful comparator" {
|
||||
test "add and remove min heap with context comparator" {
|
||||
const context = [_]u32{ 5, 3, 4, 2, 2, 8, 0 };
|
||||
|
||||
var queue = CPQlt.init(testing.allocator, context[0..]);
|
||||
|
||||
@ -1818,7 +1818,7 @@ pub const CreateEnvironOptions = struct {
|
||||
zig_progress_fd: ?i32 = null,
|
||||
};
|
||||
|
||||
/// Creates a null-deliminated environment variable block in the format
|
||||
/// Creates a null-delimited environment variable block in the format
|
||||
/// expected by POSIX, from a hash map plus options.
|
||||
pub fn createEnvironFromMap(
|
||||
arena: Allocator,
|
||||
@ -1880,7 +1880,7 @@ pub fn createEnvironFromMap(
|
||||
return envp_buf;
|
||||
}
|
||||
|
||||
/// Creates a null-deliminated environment variable block in the format
|
||||
/// Creates a null-delimited environment variable block in the format
|
||||
/// expected by POSIX, from a hash map plus options.
|
||||
pub fn createEnvironFromExisting(
|
||||
arena: Allocator,
|
||||
|
||||
@ -268,7 +268,7 @@ fn breakPatterns(a: usize, b: usize, context: anytype) void {
|
||||
}
|
||||
}
|
||||
|
||||
/// choses a pivot in `items[a..b]`.
|
||||
/// chooses a pivot in `items[a..b]`.
|
||||
/// swaps likely_sorted when `items[a..b]` seems to be already sorted.
|
||||
fn chosePivot(a: usize, b: usize, pivot: *usize, context: anytype) Hint {
|
||||
// minimum length for using the Tukey's ninther method
|
||||
|
||||
@ -315,7 +315,7 @@ pub const FileKind = enum {
|
||||
file,
|
||||
};
|
||||
|
||||
/// Iteartor over entries in the tar file represented by reader.
|
||||
/// Iterator over entries in the tar file represented by reader.
|
||||
pub fn Iterator(comptime ReaderType: type) type {
|
||||
return struct {
|
||||
reader: ReaderType,
|
||||
@ -423,7 +423,7 @@ pub fn Iterator(comptime ReaderType: type) type {
|
||||
self.padding = blockPadding(size);
|
||||
|
||||
switch (kind) {
|
||||
// File types to retrun upstream
|
||||
// File types to return upstream
|
||||
.directory, .normal, .symbolic_link => {
|
||||
file.kind = switch (kind) {
|
||||
.directory => .directory,
|
||||
|
||||
@ -9,7 +9,7 @@ const Case = struct {
|
||||
mode: u32 = 0,
|
||||
link_name: []const u8 = &[0]u8{},
|
||||
kind: tar.FileKind = .file,
|
||||
truncated: bool = false, // when there is no file body, just header, usefull for huge files
|
||||
truncated: bool = false, // when there is no file body, just header, useful for huge files
|
||||
};
|
||||
|
||||
data: []const u8, // testdata file content
|
||||
|
||||
@ -54,7 +54,7 @@ pub fn startInstrumentation() void {
|
||||
|
||||
/// Stop full callgrind instrumentation if not already switched off.
|
||||
/// This flushes Valgrinds translation cache, and does no additional
|
||||
/// instrumentation afterwards, which effectivly will run at the same
|
||||
/// instrumentation afterwards, which effectively will run at the same
|
||||
/// speed as the "none" tool (ie. at minimal slowdown).
|
||||
/// Use this to bypass Callgrind aggregation for uninteresting code parts.
|
||||
/// To start Callgrind in this mode to ignore the setup phase, use
|
||||
|
||||
@ -5085,7 +5085,7 @@ fn structDeclInner(
|
||||
any_default_inits = true;
|
||||
|
||||
// The decl_inst is used as here so that we can easily reconstruct a mapping
|
||||
// between it and the field type when the fields inits are analzyed.
|
||||
// between it and the field type when the fields inits are analyzed.
|
||||
const ri: ResultInfo = .{ .rl = if (field_type == .none) .none else .{ .coerced_ty = decl_inst.toRef() } };
|
||||
|
||||
const default_inst = try expr(&block_scope, &namespace.base, ri, member.ast.value_expr);
|
||||
@ -11559,7 +11559,7 @@ fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !Zir.NullTerminat
|
||||
}
|
||||
|
||||
/// Adds a doc comment block to `string_bytes` by walking backwards from `end_token`.
|
||||
/// `end_token` must point at the first token after the last doc coment line.
|
||||
/// `end_token` must point at the first token after the last doc comment line.
|
||||
/// Returns 0 if no doc comment is present.
|
||||
fn docCommentAsString(astgen: *AstGen, end_token: Ast.TokenIndex) !Zir.NullTerminatedString {
|
||||
if (end_token == 0) return .empty;
|
||||
@ -11780,7 +11780,7 @@ const Scope = struct {
|
||||
inst: Zir.Inst.Ref,
|
||||
/// Source location of the corresponding variable declaration.
|
||||
token_src: Ast.TokenIndex,
|
||||
/// Track the first identifer where it is referenced.
|
||||
/// Track the first identifier where it is referenced.
|
||||
/// 0 means never referenced.
|
||||
used: Ast.TokenIndex = 0,
|
||||
/// Track the identifier where it is discarded, like this `_ = foo;`.
|
||||
@ -11803,13 +11803,13 @@ const Scope = struct {
|
||||
ptr: Zir.Inst.Ref,
|
||||
/// Source location of the corresponding variable declaration.
|
||||
token_src: Ast.TokenIndex,
|
||||
/// Track the first identifer where it is referenced.
|
||||
/// Track the first identifier where it is referenced.
|
||||
/// 0 means never referenced.
|
||||
used: Ast.TokenIndex = 0,
|
||||
/// Track the identifier where it is discarded, like this `_ = foo;`.
|
||||
/// 0 means never discarded.
|
||||
discarded: Ast.TokenIndex = 0,
|
||||
/// Whether this value is used as an lvalue after inititialization.
|
||||
/// Whether this value is used as an lvalue after initialization.
|
||||
/// If not, we know it can be `const`, so will emit a compile error if it is `var`.
|
||||
used_as_lvalue: bool = false,
|
||||
/// String table index.
|
||||
|
||||
@ -7,8 +7,8 @@
|
||||
//! occur. Thus, we want to provide a real result pointer (from an alloc) only
|
||||
//! when necessary.
|
||||
//!
|
||||
//! To achive this, we need to determine which expressions require a result
|
||||
//! pointer. This pass is reponsible for analyzing all syntax forms which may
|
||||
//! To achieve this, we need to determine which expressions require a result
|
||||
//! pointer. This pass is responsible for analyzing all syntax forms which may
|
||||
//! provide a result location and, if sub-expressions consume this result
|
||||
//! pointer non-trivially (e.g. writing through field pointers), marking the
|
||||
//! node as requiring a result location.
|
||||
|
||||
@ -878,7 +878,7 @@ const MsvcLibDir = struct {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => continue,
|
||||
};
|
||||
if (source_directories_value.len > (std.fs.max_path_bytes * 30)) { // note(bratishkaerik): guessing from the fact that on my computer it has 15 pathes and at least some of them are not of max length
|
||||
if (source_directories_value.len > (std.fs.max_path_bytes * 30)) { // note(bratishkaerik): guessing from the fact that on my computer it has 15 paths and at least some of them are not of max length
|
||||
allocator.free(source_directories_value);
|
||||
continue;
|
||||
}
|
||||
@ -887,10 +887,10 @@ const MsvcLibDir = struct {
|
||||
} else return error.PathNotFound;
|
||||
defer allocator.free(source_directories);
|
||||
|
||||
var source_directories_splitted = std.mem.splitScalar(u8, source_directories, ';');
|
||||
var source_directories_split = std.mem.splitScalar(u8, source_directories, ';');
|
||||
|
||||
const msvc_dir: []const u8 = msvc_dir: {
|
||||
const msvc_include_dir_maybe_with_trailing_slash = try allocator.dupe(u8, source_directories_splitted.first());
|
||||
const msvc_include_dir_maybe_with_trailing_slash = try allocator.dupe(u8, source_directories_split.first());
|
||||
|
||||
if (msvc_include_dir_maybe_with_trailing_slash.len > std.fs.max_path_bytes or !std.fs.path.isAbsolute(msvc_include_dir_maybe_with_trailing_slash)) {
|
||||
allocator.free(msvc_include_dir_maybe_with_trailing_slash);
|
||||
|
||||
@ -4540,7 +4540,7 @@ test "zig fmt: decimal float literals with underscore separators" {
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: hexadeciaml float literals with underscore separators" {
|
||||
test "zig fmt: hexadecimal float literals with underscore separators" {
|
||||
try testTransform(
|
||||
\\pub fn main() void {
|
||||
\\ const a: f64 = (0x10.0p-0+(0x10.0p+0))+0x10_00.00_00p-8+0x00_00.00_10p+16;
|
||||
|
||||
@ -1044,7 +1044,7 @@ fn detectAbiAndDynamicLinker(
|
||||
defer if (is_elf_file == false) file.close();
|
||||
|
||||
// Shortest working interpreter path is "#!/i" (4)
|
||||
// (interpreter is "/i", assuming all pathes are absolute, like in above comment).
|
||||
// (interpreter is "/i", assuming all paths are absolute, like in above comment).
|
||||
// ELF magic number length is also 4.
|
||||
//
|
||||
// If file is shorter than that, it is definitely not ELF file
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user