From 0282c2a924054472ccc8d82325e8393442381760 Mon Sep 17 00:00:00 2001 From: -k Date: Fri, 21 Apr 2023 21:21:17 -0700 Subject: [PATCH] doc: fix minor grammar issues --- lib/std/array_hash_map.zig | 4 ++-- lib/std/array_list.zig | 4 ++-- lib/std/buf_map.zig | 2 +- lib/std/hash_map.zig | 2 +- lib/std/os/linux/seccomp.zig | 4 ++-- lib/std/packed_int_array.zig | 6 +++--- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index f62616cd85..04b4e96cd5 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -54,7 +54,7 @@ pub fn hashString(s: []const u8) u32 { /// Insertion order is preserved. /// Deletions perform a "swap removal" on the entries list. -/// Modifying the hash map while iterating is allowed, however one must understand +/// Modifying the hash map while iterating is allowed, however, one must understand /// the (well defined) behavior when mixing insertions and deletions with iteration. /// For a hash map that can be initialized directly that does not store an Allocator /// field, see `ArrayHashMapUnmanaged`. @@ -448,7 +448,7 @@ pub fn ArrayHashMap( /// General purpose hash table. /// Insertion order is preserved. /// Deletions perform a "swap removal" on the entries list. -/// Modifying the hash map while iterating is allowed, however one must understand +/// Modifying the hash map while iterating is allowed, however, one must understand /// the (well defined) behavior when mixing insertions and deletions with iteration. /// This type does not store an Allocator field - the Allocator must be passed in /// with each function call that requires it. See `ArrayHashMap` for a type that stores diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index fb11e2e755..205649ae4d 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -31,7 +31,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { return struct { const Self = @This(); /// Contents of the list. Pointers to elements in this slice are - /// **invalid after resizing operations** on the ArrayList, unless the + /// **invalid after resizing operations** on the ArrayList unless the /// operation explicitly either: (1) states otherwise or (2) lists the /// invalidated pointers. /// @@ -527,7 +527,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ return struct { const Self = @This(); /// Contents of the list. Pointers to elements in this slice are - /// **invalid after resizing operations** on the ArrayList, unless the + /// **invalid after resizing operations** on the ArrayList unless the /// operation explicitly either: (1) states otherwise or (2) lists the /// invalidated pointers. /// diff --git a/lib/std/buf_map.zig b/lib/std/buf_map.zig index 2a6239c490..f20a581972 100644 --- a/lib/std/buf_map.zig +++ b/lib/std/buf_map.zig @@ -4,7 +4,7 @@ const mem = std.mem; const Allocator = mem.Allocator; const testing = std.testing; -/// BufMap copies keys and values before they go into the map, and +/// BufMap copies keys and values before they go into the map and /// frees them when they get removed. pub const BufMap = struct { hash_map: BufMapHashMap, diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index af0ecc5993..cef0eba379 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -350,7 +350,7 @@ pub fn verifyContext( /// General purpose hash table. /// No order is guaranteed and any modification invalidates live iterators. /// It provides fast operations (lookup, insertion, deletion) with quite high -/// load factors (up to 80% by default) for a low memory usage. +/// load factors (up to 80% by default) for low memory usage. /// For a hash map that can be initialized directly that does not store an Allocator /// field, see `HashMapUnmanaged`. /// If iterating over the table entries is a strong usecase and needs to be fast, diff --git a/lib/std/os/linux/seccomp.zig b/lib/std/os/linux/seccomp.zig index 03a96633f8..b659c3d0e8 100644 --- a/lib/std/os/linux/seccomp.zig +++ b/lib/std/os/linux/seccomp.zig @@ -29,8 +29,8 @@ //! which is dependant on the ABI. Since BPF programs execute in a 32-bit //! machine, validation of 64-bit arguments necessitates two load-and-compare //! instructions for the upper and lower words. -//! 3. A further wrinkle to the above is endianess. Unlike network packets, -//! syscall data shares the endianess of the target machine. A filter +//! 3. A further wrinkle to the above is endianness. Unlike network packets, +//! syscall data shares the endianness of the target machine. A filter //! compiled on a little-endian machine will not work on a big-endian one, //! and vice-versa. For example: Checking the upper 32-bits of `data.arg1` //! requires a load at `@offsetOf(data, "arg1") + 4` on big-endian systems diff --git a/lib/std/packed_int_array.zig b/lib/std/packed_int_array.zig index 449c2f487f..8004d223f7 100644 --- a/lib/std/packed_int_array.zig +++ b/lib/std/packed_int_array.zig @@ -182,7 +182,7 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type { /// Creates a bit-packed array of `Int`. Non-byte-multiple integers /// will take up less memory in PackedIntArray than in a normal array. -/// Elements are packed using native endianess and without storing any +/// Elements are packed using native endianness and without storing any /// meta data. PackedArray(i3, 8) will occupy exactly 3 bytes /// of memory. pub fn PackedIntArray(comptime Int: type, comptime int_count: usize) type { @@ -261,7 +261,7 @@ pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptim } /// Create a PackedIntSliceEndian of the array using `NewInt` as the integer type - /// and `new_endian` as the new endianess. `NewInt`'s bit width must fit evenly + /// and `new_endian` as the new endianness. `NewInt`'s bit width must fit evenly /// within the array's `Int`'s total bits. pub fn sliceCastEndian(self: *Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) { return Io.sliceCast(&self.bytes, NewInt, new_endian, 0, int_count); @@ -336,7 +336,7 @@ pub fn PackedIntSliceEndian(comptime Int: type, comptime endian: Endian) type { } /// Create a PackedIntSliceEndian of the slice using `NewInt` as the integer type - /// and `new_endian` as the new endianess. `NewInt`'s bit width must fit evenly + /// and `new_endian` as the new endianness. `NewInt`'s bit width must fit evenly /// within the slice's `Int`'s total bits. pub fn sliceCastEndian(self: Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) { return Io.sliceCast(self.bytes, NewInt, new_endian, self.bit_offset, self.len);