diff --git a/ci/azure/linux_script b/ci/azure/linux_script index 912a2518bb..64407b79b7 100755 --- a/ci/azure/linux_script +++ b/ci/azure/linux_script @@ -9,7 +9,7 @@ sudo apt-get install -y cmake s3cmd tidy ZIGDIR="$(pwd)" ARCH="$(uname -m)" TARGET="$ARCH-linux-musl" -CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.8.0-dev.1939+5a3ea9bec" +CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.8.0-dev.2168+2d1196773" PREFIX="$HOME/$CACHE_BASENAME" MCPU="baseline" JOBS="-j$(nproc)" @@ -25,8 +25,10 @@ wget -nv "https://ziglang.org/deps/$QEMUBASE.tar.xz" tar xf "$QEMUBASE.tar.xz" export PATH="$(pwd)/$QEMUBASE/bin:$PATH" -WASMTIME="wasmtime-v0.20.0-x86_64-linux" -wget -nv "https://github.com/bytecodealliance/wasmtime/releases/download/v0.20.0/$WASMTIME.tar.xz" +# Bump to v0.23 once this issue is resolved: +# https://github.com/ziglang/zig/issues/8742 +WASMTIME="wasmtime-v0.22.1-x86_64-linux" +wget -nv "https://github.com/bytecodealliance/wasmtime/releases/download/v0.22.1/$WASMTIME.tar.xz" tar xf "$WASMTIME.tar.xz" export PATH="$(pwd)/$WASMTIME:$PATH" diff --git a/ci/azure/macos_arm64_script b/ci/azure/macos_arm64_script index 54d0380fa4..612edf677f 100755 --- a/ci/azure/macos_arm64_script +++ b/ci/azure/macos_arm64_script @@ -3,24 +3,31 @@ set -x set -e -brew update && brew install s3cmd ninja gnu-tar +brew update && brew install s3cmd ZIGDIR="$(pwd)" + +HOST_ARCH="x86_64" +HOST_TARGET="$HOST_ARCH-macos-gnu" +HOST_MCPU="baseline" +HOST_CACHE_BASENAME="zig+llvm+lld+clang-$HOST_TARGET-0.8.0-dev.2168+2d1196773" +HOST_PREFIX="$HOME/$HOST_CACHE_BASENAME" + ARCH="aarch64" -# {product}-{os}{sdk_version}-{arch}-{llvm_version}-{cmake_build_type} -CACHE_HOST_BASENAME="ci-llvm-macos10.15-x86_64-12.0.0.1-release" -CACHE_ARM64_BASENAME="ci-llvm-macos11.0-arm64-12.0.0.1-release" -PREFIX_HOST="$HOME/$CACHE_HOST_BASENAME" -PREFIX_ARM64="$HOME/$CACHE_ARM64_BASENAME" +TARGET="$ARCH-macos-gnu" +MCPU="cyclone" +CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.8.0-dev.2168+2d1196773" +PREFIX="$HOME/$CACHE_BASENAME" + JOBS="-j2" -rm -rf $PREFIX +rm -rf $HOST_PREFIX $PREFIX cd $HOME -wget -nv "https://ziglang.org/deps/$CACHE_HOST_BASENAME.tar.xz" -wget -nv "https://ziglang.org/deps/$CACHE_ARM64_BASENAME.tar.xz" -gtar xf "$CACHE_HOST_BASENAME.tar.xz" -gtar xf "$CACHE_ARM64_BASENAME.tar.xz" +wget -nv "https://ziglang.org/deps/$HOST_CACHE_BASENAME.tar.xz" +wget -nv "https://ziglang.org/deps/$CACHE_BASENAME.tar.xz" +tar xf "$HOST_CACHE_BASENAME.tar.xz" +tar xf "$CACHE_BASENAME.tar.xz" cd $ZIGDIR @@ -30,83 +37,75 @@ git config core.abbrev 9 git fetch --unshallow || true git fetch --tags -# Select xcode: latest version found on vmImage macOS-10.15 . -DEVELOPER_DIR=/Applications/Xcode_12.4.app +# Build host zig compiler in debug so that we can get the +# current version when packaging -export ZIG_LOCAL_CACHE_DIR="$ZIGDIR/zig-cache" -export ZIG_GLOBAL_CACHE_DIR="$ZIGDIR/zig-cache" +ZIG="$HOST_PREFIX/bin/zig" -# Build zig for host and use `Debug` type to make builds a little faster. +export CC="$ZIG cc -target $HOST_TARGET -mcpu=$HOST_MCPU" +export CXX="$ZIG c++ -target $HOST_TARGET -mcpu=$HOST_MCPU" -cd $ZIGDIR mkdir build.host cd build.host -cmake -G "Ninja" .. \ +cmake .. \ -DCMAKE_INSTALL_PREFIX="$(pwd)/release" \ - -DCMAKE_PREFIX_PATH="$PREFIX_HOST" \ - -DCMAKE_BUILD_TYPE="Debug" \ - -DZIG_STATIC="OFF" + -DCMAKE_PREFIX_PATH="$HOST_PREFIX" \ + -DCMAKE_BUILD_TYPE=Debug \ + -DZIG_TARGET_TRIPLE="$HOST_TARGET" \ + -DZIG_TARGET_MCPU="$HOST_MCPU" \ + -DZIG_STATIC=ON -# Build but do not install. -ninja $JOBS +unset CC +unset CXX -ZIG_EXE="$ZIGDIR/build.host/zig" - -# Build zig for arm64 target. -# - use `Release` type for published tarballs -# - ad-hoc codesign with linker -# - note: apple quarantine of downloads (eg. via safari) still apply +make $JOBS install +# Build zig compiler cross-compiled for arm64 cd $ZIGDIR -mkdir build.arm64 -cd build.arm64 -cmake -G "Ninja" .. \ + +ZIG="$ZIGDIR/build.host/release/bin/zig" + +export CC="$ZIG cc -target $TARGET -mcpu=$MCPU" +export CXX="$ZIG c++ -target $TARGET -mcpu=$MCPU" + +mkdir build +cd build +cmake .. \ -DCMAKE_INSTALL_PREFIX="$(pwd)/release" \ - -DCMAKE_PREFIX_PATH="$PREFIX_ARM64" \ - -DCMAKE_BUILD_TYPE="Release" \ - -DCMAKE_CROSSCOMPILING="True" \ - -DCMAKE_SYSTEM_NAME="Darwin" \ - -DCMAKE_C_FLAGS="-arch arm64" \ - -DCMAKE_CXX_FLAGS="-arch arm64" \ - -DCMAKE_EXE_LINKER_FLAGS="-lz -Xlinker -adhoc_codesign" \ - -DZIG_USE_LLVM_CONFIG="OFF" \ - -DZIG_EXECUTABLE="$ZIG_EXE" \ - -DZIG_TARGET_TRIPLE="${ARCH}-macos" \ - -DZIG_STATIC="OFF" + -DCMAKE_PREFIX_PATH="$PREFIX" \ + -DCMAKE_BUILD_TYPE=Release \ + -DZIG_TARGET_TRIPLE="$TARGET" \ + -DZIG_TARGET_MCPU="$MCPU" \ + -DZIG_EXECUTABLE="$ZIG" \ + -DZIG_STATIC=ON -ninja $JOBS install +unset CC +unset CXX -# Disable test because binary is foreign arch. -#release/bin/zig build test +make $JOBS install if [ "${BUILD_REASON}" != "PullRequest" ]; then mv ../LICENSE release/ # We do not run test suite but still need langref. mkdir -p release/docs - $ZIG_EXE run ../doc/docgen.zig -- $ZIG_EXE ../doc/langref.html.in release/docs/langref.html + $ZIG run ../doc/docgen.zig -- $ZIG ../doc/langref.html.in release/docs/langref.html # Produce the experimental std lib documentation. mkdir -p release/docs/std - $ZIG_EXE test ../lib/std/std.zig \ + $ZIG test ../lib/std/std.zig \ --override-lib-dir ../lib \ -femit-docs=release/docs/std \ -fno-emit-bin - # Remove the unnecessary bin dir in $prefix/bin/zig mv release/bin/zig release/ rmdir release/bin - # Remove the unnecessary zig dir in $prefix/lib/zig/std/std.zig - mv release/lib/zig release/lib2 - rmdir release/lib - mv release/lib2 release/lib - - VERSION=$($ZIG_EXE version) + VERSION=$(../build.host/release/bin/zig version) DIRNAME="zig-macos-$ARCH-$VERSION" TARBALL="$DIRNAME.tar.xz" - gtar cJf "$TARBALL" release/ --owner=root --sort=name --transform="s,^release,${DIRNAME}," - ln "$TARBALL" "$BUILD_ARTIFACTSTAGINGDIRECTORY/." + mv release "$DIRNAME" + tar cfJ "$TARBALL" "$DIRNAME" mv "$DOWNLOADSECUREFILE_SECUREFILEPATH" "$HOME/.s3cfg" s3cmd put -P --add-header="cache-control: public, max-age=31536000, immutable" "$TARBALL" s3://ziglang.org/builds/ @@ -114,12 +113,13 @@ if [ "${BUILD_REASON}" != "PullRequest" ]; then SHASUM=$(shasum -a 256 $TARBALL | cut '-d ' -f1) BYTESIZE=$(wc -c < $TARBALL) - JSONFILE="tarball.json" + JSONFILE="macos-$GITBRANCH.json" touch $JSONFILE echo "{\"tarball\": \"$TARBALL\"," >>$JSONFILE echo "\"shasum\": \"$SHASUM\"," >>$JSONFILE echo "\"size\": \"$BYTESIZE\"}" >>$JSONFILE + s3cmd put -P --add-header="Cache-Control: max-age=0, must-revalidate" "$JSONFILE" "s3://ziglang.org/builds/$JSONFILE" s3cmd put -P "$JSONFILE" "s3://ziglang.org/builds/$ARCH-macos-$VERSION.json" # `set -x` causes these variables to be mangled. diff --git a/ci/azure/macos_script b/ci/azure/macos_script index d6d32612cc..238029e37e 100755 --- a/ci/azure/macos_script +++ b/ci/azure/macos_script @@ -7,21 +7,21 @@ brew update && brew install s3cmd ZIGDIR="$(pwd)" ARCH="x86_64" -CACHE_BASENAME="zig+llvm+lld+clang-$ARCH-macos-gnu-0.8.0-dev.1939+5a3ea9bec" +TARGET="$ARCH-macos-gnu" +MCPU="baseline" +CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.8.0-dev.2168+2d1196773" PREFIX="$HOME/$CACHE_BASENAME" JOBS="-j2" rm -rf $PREFIX cd $HOME + wget -nv "https://ziglang.org/deps/$CACHE_BASENAME.tar.xz" tar xf "$CACHE_BASENAME.tar.xz" ZIG="$PREFIX/bin/zig" -NATIVE_LIBC_TXT="$HOME/native_libc.txt" -$ZIG libc >"$NATIVE_LIBC_TXT" -export ZIG_LIBC="$NATIVE_LIBC_TXT" -export CC="$ZIG cc" -export CXX="$ZIG c++" +export CC="$ZIG cc -target $TARGET -mcpu=$MCPU" +export CXX="$ZIG c++ -target $TARGET -mcpu=$MCPU" cd $ZIGDIR @@ -37,22 +37,21 @@ cmake .. \ -DCMAKE_INSTALL_PREFIX="$(pwd)/release" \ -DCMAKE_PREFIX_PATH="$PREFIX" \ -DCMAKE_BUILD_TYPE=Release \ - -DZIG_TARGET_TRIPLE="$ARCH-native-gnu" \ - -DZIG_TARGET_MCPU="baseline" \ + -DZIG_TARGET_TRIPLE="$TARGET" \ + -DZIG_TARGET_MCPU="$MCPU" \ -DZIG_STATIC=ON # Now cmake will use zig as the C/C++ compiler. We reset the environment variables # so that installation and testing do not get affected by them. unset CC unset CXX -unset ZIG_LIBC make $JOBS install # Here we rebuild zig but this time using the Zig binary we just now produced to # build zig1.o rather than relying on the one built with stage0. See # https://github.com/ziglang/zig/issues/6830 for more details. -cmake .. -DZIG_EXECUTABLE="$(pwd)/release/bin/zig" -DZIG_TARGET_MCPU="x86_64_v2" +cmake .. -DZIG_EXECUTABLE="$(pwd)/release/bin/zig" make $JOBS install for step in test-toolchain test-std docs; do diff --git a/ci/azure/pipelines.yml b/ci/azure/pipelines.yml index 1e281bb8e7..ced8ed45a0 100644 --- a/ci/azure/pipelines.yml +++ b/ci/azure/pipelines.yml @@ -2,9 +2,7 @@ jobs: - job: BuildMacOS pool: vmImage: 'macOS-10.15' - timeoutInMinutes: 360 - steps: - task: DownloadSecureFile@1 inputs: @@ -15,9 +13,7 @@ jobs: - job: BuildMacOS_arm64 pool: vmImage: 'macOS-10.15' - - timeoutInMinutes: 60 - + timeoutInMinutes: 180 steps: - task: DownloadSecureFile@1 inputs: @@ -28,9 +24,7 @@ jobs: - job: BuildLinux pool: vmImage: 'ubuntu-18.04' - timeoutInMinutes: 360 - steps: - task: DownloadSecureFile@1 inputs: diff --git a/lib/libc/glibc/sysdeps/i386/sysdep.h b/lib/libc/glibc/sysdeps/i386/sysdep.h index b4bcd8fb6c..b338b0dc36 100644 --- a/lib/libc/glibc/sysdeps/i386/sysdep.h +++ b/lib/libc/glibc/sysdeps/i386/sysdep.h @@ -61,7 +61,7 @@ lose: SYSCALL_PIC_SETUP \ # define SETUP_PIC_REG(reg) \ .ifndef GET_PC_THUNK(reg); \ - .section .gnu.linkonce.t.GET_PC_THUNK(reg),"ax",@progbits; \ + .section .text.GET_PC_THUNK(reg),"axG",@progbits,GET_PC_THUNK(reg),comdat; \ .globl GET_PC_THUNK(reg); \ .hidden GET_PC_THUNK(reg); \ .p2align 4; \ @@ -97,8 +97,9 @@ GET_PC_THUNK(reg): \ # define SETUP_PIC_REG_STR(reg) \ ".ifndef " GET_PC_THUNK_STR (reg) "\n" \ - ".section .gnu.linkonce.t." GET_PC_THUNK_STR (reg) ",\"ax\",@progbits\n" \ + "section .text." GET_PC_THUNK_STR (reg) ",\"axG\",@progbits," \ ".globl " GET_PC_THUNK_STR (reg) "\n" \ + GET_PC_THUNK_STR (reg) ",comdat\n" \ ".hidden " GET_PC_THUNK_STR (reg) "\n" \ ".p2align 4\n" \ ".type " GET_PC_THUNK_STR (reg) ",@function\n" \ diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index d296cdcc01..831b21fe19 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -18,11 +18,11 @@ const builtin = std.builtin; const hash_map = @This(); pub fn AutoArrayHashMap(comptime K: type, comptime V: type) type { - return ArrayHashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K), autoEqlIsCheap(K)); + return ArrayHashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K), !autoEqlIsCheap(K)); } pub fn AutoArrayHashMapUnmanaged(comptime K: type, comptime V: type) type { - return ArrayHashMapUnmanaged(K, V, getAutoHashFn(K), getAutoEqlFn(K), autoEqlIsCheap(K)); + return ArrayHashMapUnmanaged(K, V, getAutoHashFn(K), getAutoEqlFn(K), !autoEqlIsCheap(K)); } /// Builtin hashmap for strings as keys. @@ -1318,7 +1318,7 @@ test "reIndex" { try al.append(std.testing.allocator, .{ .key = i, .value = i * 10, - .hash = hash(i), + .hash = {}, }); } @@ -1345,7 +1345,7 @@ test "fromOwnedArrayList" { try al.append(std.testing.allocator, .{ .key = i, .value = i * 10, - .hash = hash(i), + .hash = {}, }); } @@ -1362,6 +1362,18 @@ test "fromOwnedArrayList" { } } +test "auto store_hash" { + const HasCheapEql = AutoArrayHashMap(i32, i32); + const HasExpensiveEql = AutoArrayHashMap([32]i32, i32); + try testing.expect(meta.fieldInfo(HasCheapEql.Entry, .hash).field_type == void); + try testing.expect(meta.fieldInfo(HasExpensiveEql.Entry, .hash).field_type != void); + + const HasCheapEqlUn = AutoArrayHashMapUnmanaged(i32, i32); + const HasExpensiveEqlUn = AutoArrayHashMapUnmanaged([32]i32, i32); + try testing.expect(meta.fieldInfo(HasCheapEqlUn.Entry, .hash).field_type == void); + try testing.expect(meta.fieldInfo(HasExpensiveEqlUn.Entry, .hash).field_type != void); +} + pub fn getHashPtrAddrFn(comptime K: type) (fn (K) u32) { return struct { fn hash(key: K) u32 { diff --git a/lib/std/c.zig b/lib/std/c.zig index f66376f812..acfc6b34f7 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -89,13 +89,13 @@ pub extern "c" fn ftruncate(fd: c_int, length: off_t) c_int; pub extern "c" fn raise(sig: c_int) c_int; pub extern "c" fn read(fd: fd_t, buf: [*]u8, nbyte: usize) isize; pub extern "c" fn readv(fd: c_int, iov: [*]const iovec, iovcnt: c_uint) isize; -pub extern "c" fn pread(fd: fd_t, buf: [*]u8, nbyte: usize, offset: u64) isize; -pub extern "c" fn preadv(fd: c_int, iov: [*]const iovec, iovcnt: c_uint, offset: u64) isize; +pub extern "c" fn pread(fd: fd_t, buf: [*]u8, nbyte: usize, offset: off_t) isize; +pub extern "c" fn preadv(fd: c_int, iov: [*]const iovec, iovcnt: c_uint, offset: off_t) isize; pub extern "c" fn writev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint) isize; -pub extern "c" fn pwritev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint, offset: u64) isize; +pub extern "c" fn pwritev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint, offset: off_t) isize; pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize; -pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: u64) isize; -pub extern "c" fn mmap(addr: ?*align(page_size) c_void, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: u64) *c_void; +pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: off_t) isize; +pub extern "c" fn mmap(addr: ?*align(page_size) c_void, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: off_t) *c_void; pub extern "c" fn munmap(addr: *align(page_size) c_void, len: usize) c_int; pub extern "c" fn mprotect(addr: *align(page_size) c_void, len: usize, prot: c_uint) c_int; pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: c_int) c_int; @@ -151,6 +151,7 @@ pub extern "c" fn bind(socket: fd_t, address: ?*const sockaddr, address_len: soc pub extern "c" fn socketpair(domain: c_uint, sock_type: c_uint, protocol: c_uint, sv: *[2]fd_t) c_int; pub extern "c" fn listen(sockfd: fd_t, backlog: c_uint) c_int; pub extern "c" fn getsockname(sockfd: fd_t, noalias addr: *sockaddr, noalias addrlen: *socklen_t) c_int; +pub extern "c" fn getpeername(sockfd: fd_t, noalias addr: *sockaddr, noalias addrlen: *socklen_t) c_int; pub extern "c" fn connect(sockfd: fd_t, sock_addr: *const sockaddr, addrlen: socklen_t) c_int; pub extern "c" fn accept(sockfd: fd_t, noalias addr: ?*sockaddr, noalias addrlen: ?*socklen_t) c_int; pub extern "c" fn accept4(sockfd: fd_t, noalias addr: ?*sockaddr, noalias addrlen: ?*socklen_t, flags: c_uint) c_int; diff --git a/lib/std/c/linux.zig b/lib/std/c/linux.zig index 2d2fe04b48..4503b12503 100644 --- a/lib/std/c/linux.zig +++ b/lib/std/c/linux.zig @@ -63,6 +63,23 @@ pub const EAI = enum(c_int) { _, }; +pub extern "c" fn fallocate64(fd: fd_t, mode: c_int, offset: off_t, len: off_t) c_int; +pub extern "c" fn fopen64(noalias filename: [*:0]const u8, noalias modes: [*:0]const u8) ?*FILE; +pub extern "c" fn fstat64(fd: fd_t, buf: *libc_stat) c_int; +pub extern "c" fn fstatat64(dirfd: fd_t, path: [*:0]const u8, stat_buf: *libc_stat, flags: u32) c_int; +pub extern "c" fn ftruncate64(fd: c_int, length: off_t) c_int; +pub extern "c" fn getrlimit64(resource: rlimit_resource, rlim: *rlimit) c_int; +pub extern "c" fn lseek64(fd: fd_t, offset: i64, whence: c_int) i64; +pub extern "c" fn mmap64(addr: ?*align(std.mem.page_size) c_void, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: i64) *c_void; +pub extern "c" fn open64(path: [*:0]const u8, oflag: c_uint, ...) c_int; +pub extern "c" fn openat64(fd: c_int, path: [*:0]const u8, oflag: c_uint, ...) c_int; +pub extern "c" fn pread64(fd: fd_t, buf: [*]u8, nbyte: usize, offset: i64) isize; +pub extern "c" fn preadv64(fd: c_int, iov: [*]const iovec, iovcnt: c_uint, offset: i64) isize; +pub extern "c" fn pwrite64(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: i64) isize; +pub extern "c" fn pwritev64(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint, offset: i64) isize; +pub extern "c" fn sendfile64(out_fd: fd_t, in_fd: fd_t, offset: ?*i64, count: usize) isize; +pub extern "c" fn setrlimit64(resource: rlimit_resource, rlim: *const rlimit) c_int; + pub extern "c" fn getrandom(buf_ptr: [*]u8, buf_len: usize, flags: c_uint) isize; pub extern "c" fn sched_getaffinity(pid: c_int, size: usize, set: *cpu_set_t) c_int; pub extern "c" fn eventfd(initval: c_uint, flags: c_uint) c_int; @@ -92,8 +109,6 @@ pub extern "c" fn pipe2(fds: *[2]fd_t, flags: u32) c_int; pub extern "c" fn fallocate(fd: fd_t, mode: c_int, offset: off_t, len: off_t) c_int; -pub extern "c" fn ftruncate64(fd: c_int, length: off_t) c_int; - pub extern "c" fn sendfile( out_fd: fd_t, in_fd: fd_t, diff --git a/lib/std/crypto/pcurves/p256.zig b/lib/std/crypto/pcurves/p256.zig index c2948e38d8..9675175e7f 100644 --- a/lib/std/crypto/pcurves/p256.zig +++ b/lib/std/crypto/pcurves/p256.zig @@ -49,21 +49,26 @@ pub const P256 = struct { } /// Create a point from affine coordinates after checking that they match the curve equation. - pub fn fromAffineCoordinates(x: Fe, y: Fe) EncodingError!P256 { + pub fn fromAffineCoordinates(p: AffineCoordinates) EncodingError!P256 { + const x = p.x; + const y = p.y; const x3AxB = x.sq().mul(x).sub(x).sub(x).sub(x).add(B); const yy = y.sq(); - if (!x3AxB.equivalent(yy)) { + const on_curve = @boolToInt(x3AxB.equivalent(yy)); + const is_identity = @boolToInt(x.equivalent(AffineCoordinates.identityElement.x)) & @boolToInt(y.equivalent(AffineCoordinates.identityElement.y)); + if ((on_curve | is_identity) == 0) { return error.InvalidEncoding; } - const p: P256 = .{ .x = x, .y = y, .z = Fe.one }; - return p; + var ret = P256{ .x = x, .y = y, .z = Fe.one }; + ret.z.cMov(P256.identityElement.z, is_identity); + return ret; } /// Create a point from serialized affine coordinates. pub fn fromSerializedAffineCoordinates(xs: [32]u8, ys: [32]u8, endian: builtin.Endian) (NonCanonicalError || EncodingError)!P256 { const x = try Fe.fromBytes(xs, endian); const y = try Fe.fromBytes(ys, endian); - return fromAffineCoordinates(x, y); + return fromAffineCoordinates(.{ .x = x, .y = y }); } /// Recover the Y coordinate from the X coordinate. @@ -96,7 +101,7 @@ pub const P256 = struct { if (encoded.len != 64) return error.InvalidEncoding; const x = try Fe.fromBytes(encoded[0..32].*, .Big); const y = try Fe.fromBytes(encoded[32..64].*, .Big); - return P256.fromAffineCoordinates(x, y); + return P256.fromAffineCoordinates(.{ .x = x, .y = y }); }, else => return error.InvalidEncoding, } @@ -177,7 +182,7 @@ pub const P256 = struct { /// Add P256 points, the second being specified using affine coordinates. // Algorithm 5 from https://eprint.iacr.org/2015/1060.pdf - pub fn addMixed(p: P256, q: struct { x: Fe, y: Fe }) P256 { + pub fn addMixed(p: P256, q: AffineCoordinates) P256 { var t0 = p.x.mul(q.x); var t1 = p.y.mul(q.y); var t3 = q.x.add(q.y); @@ -194,9 +199,9 @@ pub const P256 = struct { Z3 = X3.dbl(); X3 = X3.add(Z3); Z3 = t1.sub(X3); - X3 = t1.dbl(); + X3 = t1.add(X3); Y3 = B.mul(Y3); - t1 = p.z.add(p.z); + t1 = p.z.dbl(); var t2 = t1.add(p.z); Y3 = Y3.sub(t2); Y3 = Y3.sub(t0); @@ -214,14 +219,16 @@ pub const P256 = struct { Z3 = t4.mul(Z3); t1 = t3.mul(t0); Z3 = Z3.add(t1); - return .{ + var ret = P256{ .x = X3, .y = Y3, .z = Z3, }; + ret.cMov(p, @boolToInt(q.x.isZero())); + return ret; } - // Add P256 points. + /// Add P256 points. // Algorithm 4 from https://eprint.iacr.org/2015/1060.pdf pub fn add(p: P256, q: P256) P256 { var t0 = p.x.mul(q.x); @@ -274,18 +281,19 @@ pub const P256 = struct { }; } - // Subtract P256 points. + /// Subtract P256 points. pub fn sub(p: P256, q: P256) P256 { return p.add(q.neg()); } /// Return affine coordinates. - pub fn affineCoordinates(p: P256) struct { x: Fe, y: Fe } { + pub fn affineCoordinates(p: P256) AffineCoordinates { const zinv = p.z.invert(); - const ret = .{ + var ret = AffineCoordinates{ .x = p.x.mul(zinv), .y = p.y.mul(zinv), }; + ret.cMov(AffineCoordinates.identityElement, @boolToInt(p.x.isZero())); return ret; } @@ -382,11 +390,21 @@ pub const P256 = struct { return pc; } + const basePointPc = comptime pc: { + @setEvalBranchQuota(50000); + break :pc precompute(P256.basePoint, 15); + }; + + const basePointPc8 = comptime pc: { + @setEvalBranchQuota(50000); + break :pc precompute(P256.basePoint, 8); + }; + /// Multiply an elliptic curve point by a scalar. /// Return error.IdentityElement if the result is the identity element. pub fn mul(p: P256, s_: [32]u8, endian: builtin.Endian) IdentityElementError!P256 { const s = if (endian == .Little) s_ else Fe.orderSwap(s_); - const pc = if (p.is_base) precompute(P256.basePoint, 15) else pc: { + const pc = if (p.is_base) basePointPc else pc: { try p.rejectIdentity(); const xpc = precompute(p, 15); break :pc xpc; @@ -398,7 +416,7 @@ pub const P256 = struct { /// This can be used for signature verification. pub fn mulPublic(p: P256, s_: [32]u8, endian: builtin.Endian) IdentityElementError!P256 { const s = if (endian == .Little) s_ else Fe.orderSwap(s_); - const pc = if (p.is_base) precompute(P256.basePoint, 8) else pc: { + const pc = if (p.is_base) basePointPc8 else pc: { try p.rejectIdentity(); const xpc = precompute(p, 8); break :pc xpc; @@ -407,6 +425,20 @@ pub const P256 = struct { } }; +/// A point in affine coordinates. +pub const AffineCoordinates = struct { + x: P256.Fe, + y: P256.Fe, + + /// Identity element in affine coordinates. + pub const identityElement = AffineCoordinates{ .x = P256.identityElement.x, .y = P256.identityElement.y }; + + fn cMov(p: *AffineCoordinates, a: AffineCoordinates, c: u1) void { + p.x.cMov(a.x, c); + p.y.cMov(a.y, c); + } +}; + test "p256" { _ = @import("tests.zig"); } diff --git a/lib/std/crypto/pcurves/tests.zig b/lib/std/crypto/pcurves/tests.zig index 6d9682abf1..c4a09b8f0d 100644 --- a/lib/std/crypto/pcurves/tests.zig +++ b/lib/std/crypto/pcurves/tests.zig @@ -101,3 +101,9 @@ test "p256 field element non-canonical encoding" { const s = [_]u8{0xff} ** 32; try testing.expectError(error.NonCanonical, P256.Fe.fromBytes(s, .Little)); } + +test "p256 neutral element decoding" { + try testing.expectError(error.InvalidEncoding, P256.fromAffineCoordinates(.{ .x = P256.Fe.zero, .y = P256.Fe.zero })); + const p = try P256.fromAffineCoordinates(.{ .x = P256.Fe.zero, .y = P256.Fe.one }); + try testing.expectError(error.IdentityElement, p.rejectIdentity()); +} diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig index a76652ffc9..fecf2310dd 100644 --- a/lib/std/fs/file.zig +++ b/lib/std/fs/file.zig @@ -524,8 +524,8 @@ pub const File = struct { /// The `iovecs` parameter is mutable because this function needs to mutate the fields in /// order to handle partial reads from the underlying OS layer. /// See https://github.com/ziglang/zig/issues/7699 - pub fn preadvAll(self: File, iovecs: []const os.iovec, offset: u64) PReadError!void { - if (iovecs.len == 0) return; + pub fn preadvAll(self: File, iovecs: []os.iovec, offset: u64) PReadError!usize { + if (iovecs.len == 0) return 0; var i: usize = 0; var off: usize = 0; diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 3417e782c0..f586c50b6a 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -520,6 +520,89 @@ test "makePath, put some files in it, deleteTree" { } } +test "writev, readv" { + var tmp = tmpDir(.{}); + defer tmp.cleanup(); + + const line1 = "line1\n"; + const line2 = "line2\n"; + + var buf1: [line1.len]u8 = undefined; + var buf2: [line2.len]u8 = undefined; + var write_vecs = [_]std.os.iovec_const{ + .{ + .iov_base = line1, + .iov_len = line1.len, + }, + .{ + .iov_base = line2, + .iov_len = line2.len, + }, + }; + var read_vecs = [_]std.os.iovec{ + .{ + .iov_base = &buf2, + .iov_len = buf2.len, + }, + .{ + .iov_base = &buf1, + .iov_len = buf1.len, + }, + }; + + var src_file = try tmp.dir.createFile("test.txt", .{ .read = true }); + defer src_file.close(); + + try src_file.writevAll(&write_vecs); + try testing.expectEqual(@as(u64, line1.len + line2.len), try src_file.getEndPos()); + try src_file.seekTo(0); + const read = try src_file.readvAll(&read_vecs); + try testing.expectEqual(@as(usize, line1.len + line2.len), read); + try testing.expectEqualStrings(&buf1, "line2\n"); + try testing.expectEqualStrings(&buf2, "line1\n"); +} + +test "pwritev, preadv" { + var tmp = tmpDir(.{}); + defer tmp.cleanup(); + + const line1 = "line1\n"; + const line2 = "line2\n"; + + var buf1: [line1.len]u8 = undefined; + var buf2: [line2.len]u8 = undefined; + var write_vecs = [_]std.os.iovec_const{ + .{ + .iov_base = line1, + .iov_len = line1.len, + }, + .{ + .iov_base = line2, + .iov_len = line2.len, + }, + }; + var read_vecs = [_]std.os.iovec{ + .{ + .iov_base = &buf2, + .iov_len = buf2.len, + }, + .{ + .iov_base = &buf1, + .iov_len = buf1.len, + }, + }; + + var src_file = try tmp.dir.createFile("test.txt", .{ .read = true }); + defer src_file.close(); + + try src_file.pwritevAll(&write_vecs, 16); + try testing.expectEqual(@as(u64, 16 + line1.len + line2.len), try src_file.getEndPos()); + const read = try src_file.preadvAll(&read_vecs, 16); + try testing.expectEqual(@as(usize, line1.len + line2.len), read); + try testing.expectEqualStrings(&buf1, "line2\n"); + try testing.expectEqualStrings(&buf2, "line1\n"); +} + test "access file" { if (builtin.os.tag == .wasi) return error.SkipZigTest; diff --git a/lib/std/json.zig b/lib/std/json.zig index 232ab50f21..109167bbc1 100644 --- a/lib/std/json.zig +++ b/lib/std/json.zig @@ -1571,6 +1571,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: return error.DuplicateJSONField; } else if (options.duplicate_field_behavior == .UseLast) { parseFree(field.field_type, @field(r, field.name), options); + fields_seen[i] = false; } } if (field.is_comptime) { @@ -1642,6 +1643,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: switch (ptrInfo.size) { .One => { const r: T = try allocator.create(ptrInfo.child); + errdefer allocator.destroy(r); r.* = try parseInternal(ptrInfo.child, token, tokens, options); return r; }, @@ -1988,6 +1990,24 @@ test "parse into struct with misc fields" { try testing.expectEqual(T.Union{ .float = 100000 }, r.a_union); } +test "parse into struct with duplicate field" { + // allow allocator to detect double frees by keeping bucket in use + const ballast = try testing.allocator.alloc(u64, 1); + defer testing.allocator.free(ballast); + + const options = ParseOptions{ + .allocator = testing.allocator, + .duplicate_field_behavior = .UseLast, + }; + const str = "{ \"a\": 1, \"a\": 0.25 }"; + + const T1 = struct { a: *u64 }; + try testing.expectError(error.UnexpectedToken, parse(T1, &TokenStream.init(str), options)); + + const T2 = struct { a: f64 }; + try testing.expectEqual(T2{ .a = 0.25 }, try parse(T2, &TokenStream.init(str), options)); +} + /// A non-stream JSON parser which constructs a tree of Value's. pub const Parser = struct { allocator: *Allocator, diff --git a/lib/std/mem.zig b/lib/std/mem.zig index bc7a9af943..044d73413a 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -603,6 +603,7 @@ test "span" { try testing.expectEqual(@as(?[:0]u16, null), span(@as(?[*:0]u16, null))); } +/// Deprecated: use std.mem.span() or std.mem.sliceTo() /// Same as `span`, except when there is both a sentinel and an array /// length or slice length, scans the memory for the sentinel value /// rather than using the length. @@ -631,6 +632,192 @@ test "spanZ" { try testing.expectEqual(@as(?[:0]u16, null), spanZ(@as(?[*:0]u16, null))); } +/// Helper for the return type of sliceTo() +fn SliceTo(comptime T: type, comptime end: meta.Elem(T)) type { + switch (@typeInfo(T)) { + .Optional => |optional_info| { + return ?SliceTo(optional_info.child, end); + }, + .Pointer => |ptr_info| { + var new_ptr_info = ptr_info; + new_ptr_info.size = .Slice; + switch (ptr_info.size) { + .One => switch (@typeInfo(ptr_info.child)) { + .Array => |array_info| { + new_ptr_info.child = array_info.child; + // The return type must only be sentinel terminated if we are guaranteed + // to find the value searched for, which is only the case if it matches + // the sentinel of the type passed. + if (array_info.sentinel) |sentinel| { + if (end == sentinel) { + new_ptr_info.sentinel = end; + } else { + new_ptr_info.sentinel = null; + } + } + }, + else => {}, + }, + .Many, .Slice => { + // The return type must only be sentinel terminated if we are guaranteed + // to find the value searched for, which is only the case if it matches + // the sentinel of the type passed. + if (ptr_info.sentinel) |sentinel| { + if (end == sentinel) { + new_ptr_info.sentinel = end; + } else { + new_ptr_info.sentinel = null; + } + } + }, + .C => { + new_ptr_info.sentinel = end; + // C pointers are always allowzero, but we don't want the return type to be. + assert(new_ptr_info.is_allowzero); + new_ptr_info.is_allowzero = false; + }, + } + return @Type(std.builtin.TypeInfo{ .Pointer = new_ptr_info }); + }, + else => {}, + } + @compileError("invalid type given to std.mem.sliceTo: " ++ @typeName(T)); +} + +/// Takes a pointer to an array, an array, a sentinel-terminated pointer, or a slice and +/// iterates searching for the first occurrence of `end`, returning the scanned slice. +/// If `end` is not found, the full length of the array/slice/sentinel terminated pointer is returned. +/// If the pointer type is sentinel terminated and `end` matches that terminator, the +/// resulting slice is also sentinel terminated. +/// Pointer properties such as mutability and alignment are preserved. +/// C pointers are assumed to be non-null. +pub fn sliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) SliceTo(@TypeOf(ptr), end) { + if (@typeInfo(@TypeOf(ptr)) == .Optional) { + const non_null = ptr orelse return null; + return sliceTo(non_null, end); + } + const Result = SliceTo(@TypeOf(ptr), end); + const length = lenSliceTo(ptr, end); + if (@typeInfo(Result).Pointer.sentinel) |s| { + return ptr[0..length :s]; + } else { + return ptr[0..length]; + } +} + +test "sliceTo" { + try testing.expectEqualSlices(u8, "aoeu", sliceTo("aoeu", 0)); + + { + var array: [5]u16 = [_]u16{ 1, 2, 3, 4, 5 }; + try testing.expectEqualSlices(u16, &array, sliceTo(&array, 0)); + try testing.expectEqualSlices(u16, array[0..3], sliceTo(array[0..3], 0)); + try testing.expectEqualSlices(u16, array[0..2], sliceTo(&array, 3)); + try testing.expectEqualSlices(u16, array[0..2], sliceTo(array[0..3], 3)); + + const sentinel_ptr = @ptrCast([*:5]u16, &array); + try testing.expectEqualSlices(u16, array[0..2], sliceTo(sentinel_ptr, 3)); + try testing.expectEqualSlices(u16, array[0..4], sliceTo(sentinel_ptr, 99)); + + const optional_sentinel_ptr = @ptrCast(?[*:5]u16, &array); + try testing.expectEqualSlices(u16, array[0..2], sliceTo(optional_sentinel_ptr, 3).?); + try testing.expectEqualSlices(u16, array[0..4], sliceTo(optional_sentinel_ptr, 99).?); + + const c_ptr = @as([*c]u16, &array); + try testing.expectEqualSlices(u16, array[0..2], sliceTo(c_ptr, 3)); + + const slice: []u16 = &array; + try testing.expectEqualSlices(u16, array[0..2], sliceTo(slice, 3)); + try testing.expectEqualSlices(u16, &array, sliceTo(slice, 99)); + + const sentinel_slice: [:5]u16 = array[0..4 :5]; + try testing.expectEqualSlices(u16, array[0..2], sliceTo(sentinel_slice, 3)); + try testing.expectEqualSlices(u16, array[0..4], sliceTo(sentinel_slice, 99)); + } + { + var sentinel_array: [5:0]u16 = [_:0]u16{ 1, 2, 3, 4, 5 }; + try testing.expectEqualSlices(u16, sentinel_array[0..2], sliceTo(&sentinel_array, 3)); + try testing.expectEqualSlices(u16, &sentinel_array, sliceTo(&sentinel_array, 0)); + try testing.expectEqualSlices(u16, &sentinel_array, sliceTo(&sentinel_array, 99)); + } + + try testing.expectEqual(@as(?[]u8, null), sliceTo(@as(?[]u8, null), 0)); +} + +/// Private helper for sliceTo(). If you want the length, use sliceTo(foo, x).len +fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize { + switch (@typeInfo(@TypeOf(ptr))) { + .Pointer => |ptr_info| switch (ptr_info.size) { + .One => switch (@typeInfo(ptr_info.child)) { + .Array => |array_info| { + if (array_info.sentinel) |sentinel| { + if (sentinel == end) { + return indexOfSentinel(array_info.child, end, ptr); + } + } + return indexOfScalar(array_info.child, ptr, end) orelse array_info.len; + }, + else => {}, + }, + .Many => if (ptr_info.sentinel) |sentinel| { + // We may be looking for something other than the sentinel, + // but iterating past the sentinel would be a bug so we need + // to check for both. + var i: usize = 0; + while (ptr[i] != end and ptr[i] != sentinel) i += 1; + return i; + }, + .C => { + assert(ptr != null); + return indexOfSentinel(ptr_info.child, end, ptr); + }, + .Slice => { + if (ptr_info.sentinel) |sentinel| { + if (sentinel == end) { + return indexOfSentinel(ptr_info.child, sentinel, ptr); + } + } + return indexOfScalar(ptr_info.child, ptr, end) orelse ptr.len; + }, + }, + else => {}, + } + @compileError("invalid type given to std.mem.sliceTo: " ++ @typeName(@TypeOf(ptr))); +} + +test "lenSliceTo" { + try testing.expect(lenSliceTo("aoeu", 0) == 4); + + { + var array: [5]u16 = [_]u16{ 1, 2, 3, 4, 5 }; + try testing.expectEqual(@as(usize, 5), lenSliceTo(&array, 0)); + try testing.expectEqual(@as(usize, 3), lenSliceTo(array[0..3], 0)); + try testing.expectEqual(@as(usize, 2), lenSliceTo(&array, 3)); + try testing.expectEqual(@as(usize, 2), lenSliceTo(array[0..3], 3)); + + const sentinel_ptr = @ptrCast([*:5]u16, &array); + try testing.expectEqual(@as(usize, 2), lenSliceTo(sentinel_ptr, 3)); + try testing.expectEqual(@as(usize, 4), lenSliceTo(sentinel_ptr, 99)); + + const c_ptr = @as([*c]u16, &array); + try testing.expectEqual(@as(usize, 2), lenSliceTo(c_ptr, 3)); + + const slice: []u16 = &array; + try testing.expectEqual(@as(usize, 2), lenSliceTo(slice, 3)); + try testing.expectEqual(@as(usize, 5), lenSliceTo(slice, 99)); + + const sentinel_slice: [:5]u16 = array[0..4 :5]; + try testing.expectEqual(@as(usize, 2), lenSliceTo(sentinel_slice, 3)); + try testing.expectEqual(@as(usize, 4), lenSliceTo(sentinel_slice, 99)); + } + { + var sentinel_array: [5:0]u16 = [_:0]u16{ 1, 2, 3, 4, 5 }; + try testing.expectEqual(@as(usize, 2), lenSliceTo(&sentinel_array, 3)); + try testing.expectEqual(@as(usize, 5), lenSliceTo(&sentinel_array, 0)); + try testing.expectEqual(@as(usize, 5), lenSliceTo(&sentinel_array, 99)); + } +} + /// Takes a pointer to an array, an array, a vector, a sentinel-terminated pointer, /// a slice or a tuple, and returns the length. /// In the case of a sentinel-terminated array, it uses the array length. @@ -689,6 +876,7 @@ test "len" { } } +/// Deprecated: use std.mem.len() or std.mem.sliceTo().len /// Takes a pointer to an array, an array, a sentinel-terminated pointer, /// or a slice, and returns the length. /// In the case of a sentinel-terminated array, it scans the array diff --git a/lib/std/meta.zig b/lib/std/meta.zig index e35a248a91..18f761d86e 100644 --- a/lib/std/meta.zig +++ b/lib/std/meta.zig @@ -175,13 +175,7 @@ pub fn Elem(comptime T: type) type { }, .Many, .C, .Slice => return info.child, }, - .Optional => |info| switch (@typeInfo(info.child)) { - .Pointer => |ptr_info| switch (ptr_info.size) { - .Many => return ptr_info.child, - else => {}, - }, - else => {}, - }, + .Optional => |info| return Elem(info.child), else => {}, } @compileError("Expected pointer, slice, array or vector type, found '" ++ @typeName(T) ++ "'"); diff --git a/lib/std/os.zig b/lib/std/os.zig index 6c6baf49f3..b6dc070d60 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -497,8 +497,14 @@ pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize { }; const adjusted_len = math.min(max_count, buf.len); + const pread_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.pread64 + else + system.pread; + + const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned while (true) { - const rc = system.pread(fd, buf.ptr, adjusted_len, offset); + const rc = pread_sym(fd, buf.ptr, adjusted_len, ioffset); switch (errno(rc)) { 0 => return @intCast(usize, rc), EINTR => continue, @@ -567,15 +573,13 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void { } while (true) { - const rc = if (builtin.link_libc) - if (std.Target.current.os.tag == .linux) - system.ftruncate64(fd, @bitCast(off_t, length)) - else - system.ftruncate(fd, @bitCast(off_t, length)) + const ftruncate_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.ftruncate64 else - system.ftruncate(fd, length); + system.ftruncate; - switch (errno(rc)) { + const ilen = @bitCast(i64, length); // the OS treats this as unsigned + switch (errno(ftruncate_sym(fd, ilen))) { 0 => return, EINTR => continue, EFBIG => return error.FileTooBig, @@ -637,8 +641,14 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize { const iov_count = math.cast(u31, iov.len) catch math.maxInt(u31); + const preadv_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.preadv64 + else + system.preadv; + + const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned while (true) { - const rc = system.preadv(fd, iov.ptr, iov_count, offset); + const rc = preadv_sym(fd, iov.ptr, iov_count, ioffset); switch (errno(rc)) { 0 => return @bitCast(usize, rc), EINTR => continue, @@ -895,8 +905,14 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize { }; const adjusted_len = math.min(max_count, bytes.len); + const pwrite_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.pwrite64 + else + system.pwrite; + + const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned while (true) { - const rc = system.pwrite(fd, bytes.ptr, adjusted_len, offset); + const rc = pwrite_sym(fd, bytes.ptr, adjusted_len, ioffset); switch (errno(rc)) { 0 => return @intCast(usize, rc), EINTR => continue, @@ -977,9 +993,15 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usiz } } + const pwritev_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.pwritev64 + else + system.pwritev; + const iov_count = math.cast(u31, iov.len) catch math.maxInt(u31); + const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned while (true) { - const rc = system.pwritev(fd, iov.ptr, iov_count, offset); + const rc = pwritev_sym(fd, iov.ptr, iov_count, ioffset); switch (errno(rc)) { 0 => return @intCast(usize, rc), EINTR => continue, @@ -1068,8 +1090,14 @@ pub fn openZ(file_path: [*:0]const u8, flags: u32, perm: mode_t) OpenError!fd_t const file_path_w = try windows.cStrToPrefixedFileW(file_path); return openW(file_path_w.span(), flags, perm); } + + const open_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.open64 + else + system.open; + while (true) { - const rc = system.open(file_path, flags, perm); + const rc = open_sym(file_path, flags, perm); switch (errno(rc)) { 0 => return @intCast(fd_t, rc), EINTR => continue, @@ -1202,8 +1230,14 @@ pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t) const file_path_w = try windows.cStrToPrefixedFileW(file_path); return openatW(dir_fd, file_path_w.span(), flags, mode); } + + const openat_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.openat64 + else + system.openat; + while (true) { - const rc = system.openat(dir_fd, file_path, flags, mode); + const rc = openat_sym(dir_fd, file_path, flags, mode); switch (errno(rc)) { 0 => return @intCast(fd_t, rc), EINTR => continue, @@ -2758,9 +2792,9 @@ pub const ShutdownHow = enum { recv, send, both }; pub fn shutdown(sock: socket_t, how: ShutdownHow) ShutdownError!void { if (builtin.os.tag == .windows) { const result = windows.ws2_32.shutdown(sock, switch (how) { - .recv => windows.SD_RECEIVE, - .send => windows.SD_SEND, - .both => windows.SD_BOTH, + .recv => windows.ws2_32.SD_RECEIVE, + .send => windows.ws2_32.SD_SEND, + .both => windows.ws2_32.SD_BOTH, }); if (0 != result) switch (windows.ws2_32.WSAGetLastError()) { .WSAECONNABORTED => return error.ConnectionAborted, @@ -3217,6 +3251,35 @@ pub fn getsockname(sock: socket_t, addr: *sockaddr, addrlen: *socklen_t) GetSock } } +pub fn getpeername(sock: socket_t, addr: *sockaddr, addrlen: *socklen_t) GetSockNameError!void { + if (builtin.os.tag == .windows) { + const rc = windows.getpeername(sock, addr, addrlen); + if (rc == windows.ws2_32.SOCKET_ERROR) { + switch (windows.ws2_32.WSAGetLastError()) { + .WSANOTINITIALISED => unreachable, + .WSAENETDOWN => return error.NetworkSubsystemFailed, + .WSAEFAULT => unreachable, // addr or addrlen have invalid pointers or addrlen points to an incorrect value + .WSAENOTSOCK => return error.FileDescriptorNotASocket, + .WSAEINVAL => return error.SocketNotBound, + else => |err| return windows.unexpectedWSAError(err), + } + } + return; + } else { + const rc = system.getpeername(sock, addr, addrlen); + switch (errno(rc)) { + 0 => return, + else => |err| return unexpectedErrno(err), + + EBADF => unreachable, // always a race condition + EFAULT => unreachable, + EINVAL => unreachable, // invalid parameters + ENOTSOCK => return error.FileDescriptorNotASocket, + ENOBUFS => return error.SystemResources, + } + } +} + pub const ConnectError = error{ /// For UNIX domain sockets, which are identified by pathname: Write permission is denied on the socket /// file, or search permission is denied for one of the directories in the path prefix. @@ -3408,8 +3471,13 @@ pub fn fstat(fd: fd_t) FStatError!Stat { @compileError("fstat is not yet implemented on Windows"); } - var stat: Stat = undefined; - switch (errno(system.fstat(fd, &stat))) { + const fstat_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.fstat64 + else + system.fstat; + + var stat = mem.zeroes(Stat); + switch (errno(fstat_sym(fd, &stat))) { 0 => return stat, EINVAL => unreachable, EBADF => unreachable, // Always a race condition. @@ -3459,8 +3527,13 @@ pub fn fstatatWasi(dirfd: fd_t, pathname: []const u8, flags: u32) FStatAtError!S /// Same as `fstatat` but `pathname` is null-terminated. /// See also `fstatat`. pub fn fstatatZ(dirfd: fd_t, pathname: [*:0]const u8, flags: u32) FStatAtError!Stat { - var stat: Stat = undefined; - switch (errno(system.fstatat(dirfd, pathname, &stat, flags))) { + const fstatat_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.fstatat64 + else + system.fstatat; + + var stat = mem.zeroes(Stat); + switch (errno(fstatat_sym(dirfd, pathname, &stat, flags))) { 0 => return stat, EINVAL => unreachable, EBADF => unreachable, // Always a race condition. @@ -3672,12 +3745,17 @@ pub fn mmap( fd: fd_t, offset: u64, ) MMapError![]align(mem.page_size) u8 { + const mmap_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.mmap64 + else + system.mmap; + + const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const rc = mmap_sym(ptr, length, prot, flags, fd, ioffset); const err = if (builtin.link_libc) blk: { - const rc = std.c.mmap(ptr, length, prot, flags, fd, offset); if (rc != std.c.MAP_FAILED) return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, rc))[0..length]; break :blk system._errno().*; } else blk: { - const rc = system.mmap(ptr, length, prot, flags, fd, offset); const err = errno(rc); if (err == 0) return @intToPtr([*]align(mem.page_size) u8, rc)[0..length]; break :blk err; @@ -4027,8 +4105,14 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void { else => |err| return unexpectedErrno(err), } } - const ipos = @bitCast(i64, offset); // the OS treats this as unsigned - switch (errno(system.lseek(fd, ipos, SEEK_SET))) { + + const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.lseek64 + else + system.lseek; + + const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + switch (errno(lseek_sym(fd, ioffset, SEEK_SET))) { 0 => return, EBADF => unreachable, // always a race condition EINVAL => return error.Unseekable, @@ -4069,7 +4153,13 @@ pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void { else => |err| return unexpectedErrno(err), } } - switch (errno(system.lseek(fd, offset, SEEK_CUR))) { + const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.lseek64 + else + system.lseek; + + const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + switch (errno(lseek_sym(fd, ioffset, SEEK_CUR))) { 0 => return, EBADF => unreachable, // always a race condition EINVAL => return error.Unseekable, @@ -4110,7 +4200,13 @@ pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void { else => |err| return unexpectedErrno(err), } } - switch (errno(system.lseek(fd, offset, SEEK_END))) { + const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.lseek64 + else + system.lseek; + + const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + switch (errno(lseek_sym(fd, ioffset, SEEK_END))) { 0 => return, EBADF => unreachable, // always a race condition EINVAL => return error.Unseekable, @@ -4151,7 +4247,12 @@ pub fn lseek_CUR_get(fd: fd_t) SeekError!u64 { else => |err| return unexpectedErrno(err), } } - const rc = system.lseek(fd, 0, SEEK_CUR); + const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.lseek64 + else + system.lseek; + + const rc = lseek_sym(fd, 0, SEEK_CUR); switch (errno(rc)) { 0 => return @bitCast(u64, rc), EBADF => unreachable, // always a race condition @@ -5169,9 +5270,14 @@ pub fn sendfile( // Here we match BSD behavior, making a zero count value send as many bytes as possible. const adjusted_count = if (in_len == 0) max_count else math.min(in_len, @as(size_t, max_count)); + const sendfile_sym = if (builtin.link_libc) + system.sendfile64 + else + system.sendfile; + while (true) { var offset: off_t = @bitCast(off_t, in_offset); - const rc = system.sendfile(out_fd, in_fd, &offset, adjusted_count); + const rc = sendfile_sym(out_fd, in_fd, &offset, adjusted_count); switch (errno(rc)) { 0 => { const amt = @bitCast(usize, rc); @@ -5722,7 +5828,7 @@ pub const SetSockOptError = error{ /// Set a socket's options. pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSockOptError!void { if (builtin.os.tag == .windows) { - const rc = windows.ws2_32.setsockopt(fd, level, optname, opt.ptr, @intCast(socklen_t, opt.len)); + const rc = windows.ws2_32.setsockopt(fd, @intCast(i32, level), @intCast(i32, optname), opt.ptr, @intCast(i32, opt.len)); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, @@ -5989,9 +6095,13 @@ pub fn prctl(option: PR, args: anytype) PrctlError!u31 { pub const GetrlimitError = UnexpectedError; pub fn getrlimit(resource: rlimit_resource) GetrlimitError!rlimit { + const getrlimit_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.getrlimit64 + else + system.getrlimit; + var limits: rlimit = undefined; - const rc = system.getrlimit(resource, &limits); - switch (errno(rc)) { + switch (errno(getrlimit_sym(resource, &limits))) { 0 => return limits, EFAULT => unreachable, // bogus pointer EINVAL => unreachable, @@ -6002,8 +6112,12 @@ pub fn getrlimit(resource: rlimit_resource) GetrlimitError!rlimit { pub const SetrlimitError = error{PermissionDenied} || UnexpectedError; pub fn setrlimit(resource: rlimit_resource, limits: rlimit) SetrlimitError!void { - const rc = system.setrlimit(resource, &limits); - switch (errno(rc)) { + const setrlimit_sym = if (builtin.os.tag == .linux and builtin.link_libc) + system.setrlimit64 + else + system.setrlimit; + + switch (errno(setrlimit_sym(resource, &limits))) { 0 => return, EFAULT => unreachable, // bogus pointer EINVAL => unreachable, diff --git a/lib/std/os/bits/darwin.zig b/lib/std/os/bits/darwin.zig index 6b214759e1..0c2d8dc01e 100644 --- a/lib/std/os/bits/darwin.zig +++ b/lib/std/os/bits/darwin.zig @@ -23,6 +23,13 @@ pub const sockaddr = extern struct { family: sa_family_t, data: [14]u8, }; +pub const sockaddr_storage = extern struct { + len: u8, + family: sa_family_t, + __pad1: [5]u8, + __align: i64, + __pad2: [112]u8, +}; pub const sockaddr_in = extern struct { len: u8 = @sizeOf(sockaddr_in), family: sa_family_t = AF_INET, @@ -1744,3 +1751,38 @@ pub const IOCPARM_MASK = 0x1fff; fn ior(inout: u32, group: usize, num: usize, len: usize) usize { return (inout | ((len & IOCPARM_MASK) << 16) | ((group) << 8) | (num)); } + +// CPU families mapping +pub const CPUFAMILY = enum(u32) { + UNKNOWN = 0, + POWERPC_G3 = 0xcee41549, + POWERPC_G4 = 0x77c184ae, + POWERPC_G5 = 0xed76d8aa, + INTEL_6_13 = 0xaa33392b, + INTEL_PENRYN = 0x78ea4fbc, + INTEL_NEHALEM = 0x6b5a4cd2, + INTEL_WESTMERE = 0x573b5eec, + INTEL_SANDYBRIDGE = 0x5490b78c, + INTEL_IVYBRIDGE = 0x1f65e835, + INTEL_HASWELL = 0x10b282dc, + INTEL_BROADWELL = 0x582ed09c, + INTEL_SKYLAKE = 0x37fc219f, + INTEL_KABYLAKE = 0x0f817246, + ARM_9 = 0xe73283ae, + ARM_11 = 0x8ff620d8, + ARM_XSCALE = 0x53b005f5, + ARM_12 = 0xbd1b0ae9, + ARM_13 = 0x0cc90e64, + ARM_14 = 0x96077ef1, + ARM_15 = 0xa8511bca, + ARM_SWIFT = 0x1e2d6381, + ARM_CYCLONE = 0x37a09642, + ARM_TYPHOON = 0x2c91a47e, + ARM_TWISTER = 0x92fb37c8, + ARM_HURRICANE = 0x67ceee93, + ARM_MONSOON_MISTRAL = 0xe81e7ef6, + ARM_VORTEX_TEMPEST = 0x07d34b9f, + ARM_LIGHTNING_THUNDER = 0x462504d2, + ARM_FIRESTORM_ICESTORM = 0x1b588bb3, + _, +}; diff --git a/lib/std/os/bits/dragonfly.zig b/lib/std/os/bits/dragonfly.zig index e42b6fbd1d..8ef1de3209 100644 --- a/lib/std/os/bits/dragonfly.zig +++ b/lib/std/os/bits/dragonfly.zig @@ -380,6 +380,14 @@ pub const sockaddr = extern struct { sa_data: [14]u8, }; +pub const sockaddr_storage = extern struct { + len: u8, + family: sa_family_t, + __pad1: [5]u8, + __align: i64, + __pad2: [112]u8, +}; + pub const Kevent = extern struct { ident: usize, filter: c_short, @@ -640,7 +648,7 @@ pub const socklen_t = c_uint; pub const sockaddr_storage = extern struct { ss_len: u8, ss_family: sa_family_t, - __ss_pad1: [6]u8, + __ss_pad1: [5]u8, __ss_align: i64, __ss_pad2: [112]u8, }; diff --git a/lib/std/os/bits/freebsd.zig b/lib/std/os/bits/freebsd.zig index 9703ccca04..2f61d137e2 100644 --- a/lib/std/os/bits/freebsd.zig +++ b/lib/std/os/bits/freebsd.zig @@ -206,6 +206,14 @@ pub const sockaddr = extern struct { data: [14]u8, }; +pub const sockaddr_storage = extern struct { + len: u8, + family: sa_family_t, + __pad1: [5]u8, + __align: i64, + __pad2: [112]u8, +}; + pub const sockaddr_in = extern struct { len: u8 = @sizeOf(sockaddr_in), family: sa_family_t = AF_INET, diff --git a/lib/std/os/bits/haiku.zig b/lib/std/os/bits/haiku.zig index 0be900d839..287a62ebb7 100644 --- a/lib/std/os/bits/haiku.zig +++ b/lib/std/os/bits/haiku.zig @@ -239,6 +239,14 @@ pub const sockaddr = extern struct { data: [14]u8, }; +pub const sockaddr_storage = extern struct { + len: u8, + family: sa_family_t, + __pad1: [5]u8, + __align: i64, + __pad2: [112]u8, +}; + pub const sockaddr_in = extern struct { len: u8 = @sizeOf(sockaddr_in), family: sa_family_t = AF_INET, diff --git a/lib/std/os/bits/linux.zig b/lib/std/os/bits/linux.zig index 556eb20ab2..72ea62c32c 100644 --- a/lib/std/os/bits/linux.zig +++ b/lib/std/os/bits/linux.zig @@ -1149,6 +1149,13 @@ pub const sockaddr = extern struct { data: [14]u8, }; +pub const sockaddr_storage = extern struct { + family: sa_family_t, + __pad1: [6]u8, + __align: i64, + __pad2: [112]u8, +}; + /// IPv4 socket address pub const sockaddr_in = extern struct { family: sa_family_t = AF_INET, diff --git a/lib/std/os/bits/netbsd.zig b/lib/std/os/bits/netbsd.zig index 1fd0eabc20..06fd3e6ba2 100644 --- a/lib/std/os/bits/netbsd.zig +++ b/lib/std/os/bits/netbsd.zig @@ -226,6 +226,14 @@ pub const sockaddr = extern struct { data: [14]u8, }; +pub const sockaddr_storage = extern struct { + len: u8, + family: sa_family_t, + __pad1: [5]u8, + __align: i64, + __pad2: [112]u8, +}; + pub const sockaddr_in = extern struct { len: u8 = @sizeOf(sockaddr_in), family: sa_family_t = AF_INET, diff --git a/lib/std/os/bits/openbsd.zig b/lib/std/os/bits/openbsd.zig index 6810ab9a33..84e874f1cb 100644 --- a/lib/std/os/bits/openbsd.zig +++ b/lib/std/os/bits/openbsd.zig @@ -246,6 +246,14 @@ pub const sockaddr = extern struct { data: [14]u8, }; +pub const sockaddr_storage = extern struct { + len: u8, + family: sa_family_t, + __pad1: [5]u8, + __align: i64, + __pad2: [112]u8, +}; + pub const sockaddr_in = extern struct { len: u8 = @sizeOf(sockaddr_in), family: sa_family_t = AF_INET, @@ -290,15 +298,6 @@ pub const AI_NUMERICSERV = 16; /// only if any address is assigned pub const AI_ADDRCONFIG = 64; -pub const CTL_KERN = 1; -pub const CTL_DEBUG = 5; -pub const CTL_HW = 6; - -pub const KERN_PROC_ARGS = 55; -pub const KERN_PROC_ARGV = 1; - -pub const HW_NCPUONLINE = 25; - pub const PATH_MAX = 1024; pub const STDIN_FILENO = 0; @@ -1229,3 +1228,144 @@ pub const POLLNORM = POLLRDNORM; pub const POLLWRNORM = POLLOUT; pub const POLLRDBAND = 0x0080; pub const POLLWRBAND = 0x0100; + +// sysctl mib +pub const CTL_UNSPEC = 0; +pub const CTL_KERN = 1; +pub const CTL_VM = 2; +pub const CTL_FS = 3; +pub const CTL_NET = 4; +pub const CTL_DEBUG = 5; +pub const CTL_HW = 6; +pub const CTL_MACHDEP = 7; + +pub const CTL_DDB = 9; +pub const CTL_VFS = 10; + +pub const KERN_OSTYPE = 1; +pub const KERN_OSRELEASE = 2; +pub const KERN_OSREV = 3; +pub const KERN_VERSION = 4; +pub const KERN_MAXVNODES = 5; +pub const KERN_MAXPROC = 6; +pub const KERN_MAXFILES = 7; +pub const KERN_ARGMAX = 8; +pub const KERN_SECURELVL = 9; +pub const KERN_HOSTNAME = 10; +pub const KERN_HOSTID = 11; +pub const KERN_CLOCKRATE = 12; + +pub const KERN_PROF = 16; +pub const KERN_POSIX1 = 17; +pub const KERN_NGROUPS = 18; +pub const KERN_JOB_CONTROL = 19; +pub const KERN_SAVED_IDS = 20; +pub const KERN_BOOTTIME = 21; +pub const KERN_DOMAINNAME = 22; +pub const KERN_MAXPARTITIONS = 23; +pub const KERN_RAWPARTITION = 24; +pub const KERN_MAXTHREAD = 25; +pub const KERN_NTHREADS = 26; +pub const KERN_OSVERSION = 27; +pub const KERN_SOMAXCONN = 28; +pub const KERN_SOMINCONN = 29; + +pub const KERN_NOSUIDCOREDUMP = 32; +pub const KERN_FSYNC = 33; +pub const KERN_SYSVMSG = 34; +pub const KERN_SYSVSEM = 35; +pub const KERN_SYSVSHM = 36; + +pub const KERN_MSGBUFSIZE = 38; +pub const KERN_MALLOCSTATS = 39; +pub const KERN_CPTIME = 40; +pub const KERN_NCHSTATS = 41; +pub const KERN_FORKSTAT = 42; +pub const KERN_NSELCOLL = 43; +pub const KERN_TTY = 44; +pub const KERN_CCPU = 45; +pub const KERN_FSCALE = 46; +pub const KERN_NPROCS = 47; +pub const KERN_MSGBUF = 48; +pub const KERN_POOL = 49; +pub const KERN_STACKGAPRANDOM = 50; +pub const KERN_SYSVIPC_INFO = 51; +pub const KERN_ALLOWKMEM = 52; +pub const KERN_WITNESSWATCH = 53; +pub const KERN_SPLASSERT = 54; +pub const KERN_PROC_ARGS = 55; +pub const KERN_NFILES = 56; +pub const KERN_TTYCOUNT = 57; +pub const KERN_NUMVNODES = 58; +pub const KERN_MBSTAT = 59; +pub const KERN_WITNESS = 60; +pub const KERN_SEMINFO = 61; +pub const KERN_SHMINFO = 62; +pub const KERN_INTRCNT = 63; +pub const KERN_WATCHDOG = 64; +pub const KERN_ALLOWDT = 65; +pub const KERN_PROC = 66; +pub const KERN_MAXCLUSTERS = 67; +pub const KERN_EVCOUNT = 68; +pub const KERN_TIMECOUNTER = 69; +pub const KERN_MAXLOCKSPERUID = 70; +pub const KERN_CPTIME2 = 71; +pub const KERN_CACHEPCT = 72; +pub const KERN_FILE = 73; +pub const KERN_WXABORT = 74; +pub const KERN_CONSDEV = 75; +pub const KERN_NETLIVELOCKS = 76; +pub const KERN_POOL_DEBUG = 77; +pub const KERN_PROC_CWD = 78; +pub const KERN_PROC_NOBROADCASTKILL = 79; +pub const KERN_PROC_VMMAP = 80; +pub const KERN_GLOBAL_PTRACE = 81; +pub const KERN_CONSBUFSIZE = 82; +pub const KERN_CONSBUF = 83; +pub const KERN_AUDIO = 84; +pub const KERN_CPUSTATS = 85; +pub const KERN_PFSTATUS = 86; +pub const KERN_TIMEOUT_STATS = 87; +pub const KERN_UTC_OFFSET = 88; +pub const KERN_VIDEO = 89; + +pub const HW_MACHINE = 1; +pub const HW_MODEL = 2; +pub const HW_NCPU = 3; +pub const HW_BYTEORDER = 4; +pub const HW_PHYSMEM = 5; +pub const HW_USERMEM = 6; +pub const HW_PAGESIZE = 7; +pub const HW_DISKNAMES = 8; +pub const HW_DISKSTATS = 9; +pub const HW_DISKCOUNT = 10; +pub const HW_SENSORS = 11; +pub const HW_CPUSPEED = 12; +pub const HW_SETPERF = 13; +pub const HW_VENDOR = 14; +pub const HW_PRODUCT = 15; +pub const HW_VERSION = 16; +pub const HW_SERIALNO = 17; +pub const HW_UUID = 18; +pub const HW_PHYSMEM64 = 19; +pub const HW_USERMEM64 = 20; +pub const HW_NCPUFOUND = 21; +pub const HW_ALLOWPOWERDOWN = 22; +pub const HW_PERFPOLICY = 23; +pub const HW_SMT = 24; +pub const HW_NCPUONLINE = 25; + +pub const KERN_PROC_ALL = 0; +pub const KERN_PROC_PID = 1; +pub const KERN_PROC_PGRP = 2; +pub const KERN_PROC_SESSION = 3; +pub const KERN_PROC_TTY = 4; +pub const KERN_PROC_UID = 5; +pub const KERN_PROC_RUID = 6; +pub const KERN_PROC_KTHREAD = 7; +pub const KERN_PROC_SHOW_THREADS = 0x40000000; + +pub const KERN_PROC_ARGV = 1; +pub const KERN_PROC_NARGV = 2; +pub const KERN_PROC_ENV = 3; +pub const KERN_PROC_NENV = 4; diff --git a/lib/std/os/bits/windows.zig b/lib/std/os/bits/windows.zig index 00ca2a1532..c5b101296b 100644 --- a/lib/std/os/bits/windows.zig +++ b/lib/std/os/bits/windows.zig @@ -321,3 +321,5 @@ pub const O_NOATIME = 0o1000000; pub const O_PATH = 0o10000000; pub const O_TMPFILE = 0o20200000; pub const O_NDELAY = O_NONBLOCK; + +pub const IFNAMESIZE = 30; diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 65cbc87e36..d057b2f7b3 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -60,15 +60,30 @@ const require_aligned_register_pair = std.Target.current.cpu.arch.isThumb(); // Split a 64bit value into a {LSB,MSB} pair. -fn splitValue64(val: u64) [2]u32 { +// The LE/BE variants specify the endianness to assume. +fn splitValueLE64(val: i64) [2]u32 { + const u = @bitCast(u64, val); + return [2]u32{ + @truncate(u32, u), + @truncate(u32, u >> 32), + }; +} +fn splitValueBE64(val: i64) [2]u32 { + return [2]u32{ + @truncate(u32, u >> 32), + @truncate(u32, u), + }; +} +fn splitValue64(val: i64) [2]u32 { + const u = @bitCast(u64, val); switch (native_endian) { .Little => return [2]u32{ - @truncate(u32, val), - @truncate(u32, val >> 32), + @truncate(u32, u), + @truncate(u32, u >> 32), }, .Big => return [2]u32{ - @truncate(u32, val >> 32), - @truncate(u32, val), + @truncate(u32, u >> 32), + @truncate(u32, u), }, } } @@ -142,8 +157,8 @@ pub fn utimensat(dirfd: i32, path: ?[*:0]const u8, times: *const [2]timespec, fl return syscall4(.utimensat, @bitCast(usize, @as(isize, dirfd)), @ptrToInt(path), @ptrToInt(times), flags); } -pub fn fallocate(fd: i32, mode: i32, offset: u64, length: u64) usize { - if (@sizeOf(usize) == 4) { +pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize { + if (usize_bits < 64) { const offset_halves = splitValue64(offset); const length_halves = splitValue64(length); return syscall6( @@ -160,8 +175,8 @@ pub fn fallocate(fd: i32, mode: i32, offset: u64, length: u64) usize { .fallocate, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, mode)), - offset, - length, + @bitCast(u64, offset), + @bitCast(u64, length), ); } } @@ -244,7 +259,7 @@ pub fn umount2(special: [*:0]const u8, flags: u32) usize { return syscall2(.umount2, @ptrToInt(special), flags); } -pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: u64) usize { +pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: i64) usize { if (@hasField(SYS, "mmap2")) { // Make sure the offset is also specified in multiples of page size if ((offset & (MMAP2_UNIT - 1)) != 0) @@ -257,7 +272,7 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of prot, flags, @bitCast(usize, @as(isize, fd)), - @truncate(usize, offset / MMAP2_UNIT), + @truncate(usize, @bitCast(u64, offset) / MMAP2_UNIT), ); } else { return syscall6( @@ -267,7 +282,7 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of prot, flags, @bitCast(usize, @as(isize, fd)), - offset, + @bitCast(u64, offset), ); } } @@ -309,8 +324,8 @@ pub fn read(fd: i32, buf: [*]u8, count: usize) usize { return syscall3(.read, @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), count); } -pub fn preadv(fd: i32, iov: [*]const iovec, count: usize, offset: u64) usize { - const offset_halves = splitValue64(offset); +pub fn preadv(fd: i32, iov: [*]const iovec, count: usize, offset: i64) usize { + const offset_halves = splitValueLE64(offset); return syscall5( .preadv, @bitCast(usize, @as(isize, fd)), @@ -321,7 +336,7 @@ pub fn preadv(fd: i32, iov: [*]const iovec, count: usize, offset: u64) usize { ); } -pub fn preadv2(fd: i32, iov: [*]const iovec, count: usize, offset: u64, flags: kernel_rwf) usize { +pub fn preadv2(fd: i32, iov: [*]const iovec, count: usize, offset: i64, flags: kernel_rwf) usize { const offset_halves = splitValue64(offset); return syscall6( .preadv2, @@ -342,8 +357,8 @@ pub fn writev(fd: i32, iov: [*]const iovec_const, count: usize) usize { return syscall3(.writev, @bitCast(usize, @as(isize, fd)), @ptrToInt(iov), count); } -pub fn pwritev(fd: i32, iov: [*]const iovec_const, count: usize, offset: u64) usize { - const offset_halves = splitValue64(offset); +pub fn pwritev(fd: i32, iov: [*]const iovec_const, count: usize, offset: i64) usize { + const offset_halves = splitValueLE64(offset); return syscall5( .pwritev, @bitCast(usize, @as(isize, fd)), @@ -354,7 +369,7 @@ pub fn pwritev(fd: i32, iov: [*]const iovec_const, count: usize, offset: u64) us ); } -pub fn pwritev2(fd: i32, iov: [*]const iovec_const, count: usize, offset: u64, flags: kernel_rwf) usize { +pub fn pwritev2(fd: i32, iov: [*]const iovec_const, count: usize, offset: i64, flags: kernel_rwf) usize { const offset_halves = splitValue64(offset); return syscall6( .pwritev2, @@ -387,7 +402,7 @@ pub fn symlinkat(existing: [*:0]const u8, newfd: i32, newpath: [*:0]const u8) us return syscall3(.symlinkat, @ptrToInt(existing), @bitCast(usize, @as(isize, newfd)), @ptrToInt(newpath)); } -pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: u64) usize { +pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize { if (@hasField(SYS, "pread64") and usize_bits < 64) { const offset_halves = splitValue64(offset); if (require_aligned_register_pair) { @@ -418,7 +433,7 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: u64) usize { @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), count, - offset, + @bitCast(u64, offset), ); } } @@ -453,7 +468,7 @@ pub fn write(fd: i32, buf: [*]const u8, count: usize) usize { return syscall3(.write, @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), count); } -pub fn ftruncate(fd: i32, length: u64) usize { +pub fn ftruncate(fd: i32, length: i64) usize { if (@hasField(SYS, "ftruncate64") and usize_bits < 64) { const length_halves = splitValue64(length); if (require_aligned_register_pair) { @@ -476,12 +491,12 @@ pub fn ftruncate(fd: i32, length: u64) usize { return syscall2( .ftruncate, @bitCast(usize, @as(isize, fd)), - @truncate(usize, length), + @bitCast(usize, length), ); } } -pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: u64) usize { +pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize { if (@hasField(SYS, "pwrite64") and usize_bits < 64) { const offset_halves = splitValue64(offset); @@ -513,7 +528,7 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: u64) usize { @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), count, - offset, + @bitCast(u64, offset), ); } } diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig index 6923513385..c4a00c48ec 100644 --- a/lib/std/os/linux/test.zig +++ b/lib/std/os/linux/test.zig @@ -20,7 +20,7 @@ test "fallocate" { try expect((try file.stat()).size == 0); - const len: u64 = 65536; + const len: i64 = 65536; switch (linux.getErrno(linux.fallocate(file.handle, 0, 0, len))) { 0 => {}, linux.ENOSYS => return error.SkipZigTest, diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig index d3ac3158af..7ab08c47a7 100644 --- a/lib/std/os/test.zig +++ b/lib/std/os/test.zig @@ -263,10 +263,6 @@ test "linkat with different directories" { test "fstatat" { // enable when `fstat` and `fstatat` are implemented on Windows if (builtin.os.tag == .windows) return error.SkipZigTest; - if (builtin.os.tag == .freebsd and builtin.mode == .ReleaseFast) { - // https://github.com/ziglang/zig/issues/8538 - return error.SkipZigTest; - } var tmp = tmpDir(.{}); defer tmp.cleanup(); diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index b1db75aea3..db261c0ecc 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -389,6 +389,43 @@ pub fn GetQueuedCompletionStatus( return GetQueuedCompletionStatusResult.Normal; } +pub const GetQueuedCompletionStatusError = error{ + Aborted, + Cancelled, + EOF, + Timeout, +} || std.os.UnexpectedError; + +pub fn GetQueuedCompletionStatusEx( + completion_port: HANDLE, + completion_port_entries: []OVERLAPPED_ENTRY, + timeout_ms: ?DWORD, + alertable: bool, +) GetQueuedCompletionStatusError!u32 { + var num_entries_removed: u32 = 0; + + const success = kernel32.GetQueuedCompletionStatusEx( + completion_port, + completion_port_entries.ptr, + @intCast(ULONG, completion_port_entries.len), + &num_entries_removed, + timeout_ms orelse INFINITE, + @boolToInt(alertable), + ); + + if (success == FALSE) { + return switch (kernel32.GetLastError()) { + .ABANDONED_WAIT_0 => error.Aborted, + .OPERATION_ABORTED => error.Cancelled, + .HANDLE_EOF => error.EOF, + .IMEOUT => error.Timeout, + else => |err| unexpectedError(err), + }; + } + + return num_entries_removed; +} + pub fn CloseHandle(hObject: HANDLE) void { assert(ntdll.NtClose(hObject) == .SUCCESS); } @@ -1291,6 +1328,10 @@ pub fn getsockname(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.so return ws2_32.getsockname(s, name, @ptrCast(*i32, namelen)); } +pub fn getpeername(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.socklen_t) i32 { + return ws2_32.getpeername(s, name, @ptrCast(*i32, namelen)); +} + pub fn sendmsg( s: ws2_32.SOCKET, msg: *const ws2_32.WSAMSG, @@ -1404,6 +1445,28 @@ pub fn SetConsoleTextAttribute(hConsoleOutput: HANDLE, wAttributes: WORD) SetCon } } +pub fn SetConsoleCtrlHandler(handler_routine: ?HANDLER_ROUTINE, add: bool) !void { + const success = kernel32.SetConsoleCtrlHandler( + handler_routine, + if (add) TRUE else FALSE, + ); + + if (success == FALSE) { + return switch (kernel32.GetLastError()) { + else => |err| unexpectedError(err), + }; + } +} + +pub fn SetFileCompletionNotificationModes(handle: HANDLE, flags: UCHAR) !void { + const success = kernel32.SetFileCompletionNotificationModes(handle, flags); + if (success == FALSE) { + return switch (kernel32.GetLastError()) { + else => |err| unexpectedError(err), + }; + } +} + pub const GetEnvironmentStringsError = error{OutOfMemory}; pub fn GetEnvironmentStringsW() GetEnvironmentStringsError![*:0]u16 { @@ -1686,6 +1749,38 @@ fn MAKELANGID(p: c_ushort, s: c_ushort) callconv(.Inline) LANGID { return (s << 10) | p; } +/// Loads a Winsock extension function in runtime specified by a GUID. +pub fn loadWinsockExtensionFunction(comptime T: type, sock: ws2_32.SOCKET, guid: GUID) !T { + var function: T = undefined; + var num_bytes: DWORD = undefined; + + const rc = ws2_32.WSAIoctl( + sock, + ws2_32.SIO_GET_EXTENSION_FUNCTION_POINTER, + @ptrCast(*const c_void, &guid), + @sizeOf(GUID), + &function, + @sizeOf(T), + &num_bytes, + null, + null, + ); + + if (rc == ws2_32.SOCKET_ERROR) { + return switch (ws2_32.WSAGetLastError()) { + .WSAEOPNOTSUPP => error.OperationNotSupported, + .WSAENOTSOCK => error.FileDescriptorNotASocket, + else => |err| unexpectedWSAError(err), + }; + } + + if (num_bytes != @sizeOf(T)) { + return error.ShortRead; + } + + return function; +} + /// Call this when you made a windows DLL call or something that does SetLastError /// and you get an unexpected error. pub fn unexpectedError(err: Win32Error) std.os.UnexpectedError { diff --git a/lib/std/os/windows/bits.zig b/lib/std/os/windows/bits.zig index 5b9294f921..6fff56e71e 100644 --- a/lib/std/os/windows/bits.zig +++ b/lib/std/os/windows/bits.zig @@ -1619,10 +1619,6 @@ pub const MOUNTMGR_MOUNT_POINTS = extern struct { }; pub const IOCTL_MOUNTMGR_QUERY_POINTS: ULONG = 0x6d0008; -pub const SD_RECEIVE = 0; -pub const SD_SEND = 1; -pub const SD_BOTH = 2; - pub const OBJECT_INFORMATION_CLASS = enum(c_int) { ObjectBasicInformation = 0, ObjectNameInformation = 1, @@ -1642,3 +1638,14 @@ pub const SRWLOCK = usize; pub const SRWLOCK_INIT: SRWLOCK = 0; pub const CONDITION_VARIABLE = usize; pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = 0; + +pub const FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 0x1; +pub const FILE_SKIP_SET_EVENT_ON_HANDLE = 0x2; + +pub const CTRL_C_EVENT: DWORD = 0; +pub const CTRL_BREAK_EVENT: DWORD = 1; +pub const CTRL_CLOSE_EVENT: DWORD = 2; +pub const CTRL_LOGOFF_EVENT: DWORD = 5; +pub const CTRL_SHUTDOWN_EVENT: DWORD = 6; + +pub const HANDLER_ROUTINE = fn (dwCtrlType: DWORD) callconv(.C) BOOL; diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig index 734059a08a..e117f362eb 100644 --- a/lib/std/os/windows/kernel32.zig +++ b/lib/std/os/windows/kernel32.zig @@ -140,6 +140,14 @@ pub extern "kernel32" fn GetOverlappedResult(hFile: HANDLE, lpOverlapped: *OVERL pub extern "kernel32" fn GetProcessHeap() callconv(WINAPI) ?HANDLE; pub extern "kernel32" fn GetQueuedCompletionStatus(CompletionPort: HANDLE, lpNumberOfBytesTransferred: LPDWORD, lpCompletionKey: *ULONG_PTR, lpOverlapped: *?*OVERLAPPED, dwMilliseconds: DWORD) callconv(WINAPI) BOOL; +pub extern "kernel32" fn GetQueuedCompletionStatusEx( + CompletionPort: HANDLE, + lpCompletionPortEntries: [*]OVERLAPPED_ENTRY, + ulCount: ULONG, + ulNumEntriesRemoved: *ULONG, + dwMilliseconds: DWORD, + fAlertable: BOOL, +) callconv(WINAPI) BOOL; pub extern "kernel32" fn GetSystemInfo(lpSystemInfo: *SYSTEM_INFO) callconv(WINAPI) void; pub extern "kernel32" fn GetSystemTimeAsFileTime(*FILETIME) callconv(WINAPI) void; @@ -197,6 +205,16 @@ pub extern "kernel32" fn RemoveDirectoryW(lpPathName: [*:0]const u16) callconv(W pub extern "kernel32" fn SetConsoleTextAttribute(hConsoleOutput: HANDLE, wAttributes: WORD) callconv(WINAPI) BOOL; +pub extern "kernel32" fn SetConsoleCtrlHandler( + HandlerRoutine: ?HANDLER_ROUTINE, + Add: BOOL, +) callconv(WINAPI) BOOL; + +pub extern "kernel32" fn SetFileCompletionNotificationModes( + FileHandle: HANDLE, + Flags: UCHAR, +) callconv(WINAPI) BOOL; + pub extern "kernel32" fn SetFilePointerEx( in_fFile: HANDLE, in_liDistanceToMove: LARGE_INTEGER, diff --git a/lib/std/os/windows/ws2_32.zig b/lib/std/os/windows/ws2_32.zig index 90c75abd9e..15aa0d487f 100644 --- a/lib/std/os/windows/ws2_32.zig +++ b/lib/std/os/windows/ws2_32.zig @@ -7,10 +7,929 @@ usingnamespace @import("bits.zig"); pub const SOCKET = *opaque {}; pub const INVALID_SOCKET = @intToPtr(SOCKET, ~@as(usize, 0)); -pub const SOCKET_ERROR = -1; +pub const GROUP = u32; +pub const ADDRESS_FAMILY = u16; +pub const WSAEVENT = HANDLE; + +// Microsoft use the signed c_int for this, but it should never be negative +pub const socklen_t = u32; + +pub const LM_HB_Extension = 128; +pub const LM_HB1_PnP = 1; +pub const LM_HB1_PDA_Palmtop = 2; +pub const LM_HB1_Computer = 4; +pub const LM_HB1_Printer = 8; +pub const LM_HB1_Modem = 16; +pub const LM_HB1_Fax = 32; +pub const LM_HB1_LANAccess = 64; +pub const LM_HB2_Telephony = 1; +pub const LM_HB2_FileServer = 2; +pub const ATMPROTO_AALUSER = 0; +pub const ATMPROTO_AAL1 = 1; +pub const ATMPROTO_AAL2 = 2; +pub const ATMPROTO_AAL34 = 3; +pub const ATMPROTO_AAL5 = 5; +pub const SAP_FIELD_ABSENT = 4294967294; +pub const SAP_FIELD_ANY = 4294967295; +pub const SAP_FIELD_ANY_AESA_SEL = 4294967290; +pub const SAP_FIELD_ANY_AESA_REST = 4294967291; +pub const ATM_E164 = 1; +pub const ATM_NSAP = 2; +pub const ATM_AESA = 2; +pub const ATM_ADDR_SIZE = 20; +pub const BLLI_L2_ISO_1745 = 1; +pub const BLLI_L2_Q921 = 2; +pub const BLLI_L2_X25L = 6; +pub const BLLI_L2_X25M = 7; +pub const BLLI_L2_ELAPB = 8; +pub const BLLI_L2_HDLC_ARM = 9; +pub const BLLI_L2_HDLC_NRM = 10; +pub const BLLI_L2_HDLC_ABM = 11; +pub const BLLI_L2_LLC = 12; +pub const BLLI_L2_X75 = 13; +pub const BLLI_L2_Q922 = 14; +pub const BLLI_L2_USER_SPECIFIED = 16; +pub const BLLI_L2_ISO_7776 = 17; +pub const BLLI_L3_X25 = 6; +pub const BLLI_L3_ISO_8208 = 7; +pub const BLLI_L3_X223 = 8; +pub const BLLI_L3_SIO_8473 = 9; +pub const BLLI_L3_T70 = 10; +pub const BLLI_L3_ISO_TR9577 = 11; +pub const BLLI_L3_USER_SPECIFIED = 16; +pub const BLLI_L3_IPI_SNAP = 128; +pub const BLLI_L3_IPI_IP = 204; +pub const BHLI_ISO = 0; +pub const BHLI_UserSpecific = 1; +pub const BHLI_HighLayerProfile = 2; +pub const BHLI_VendorSpecificAppId = 3; +pub const AAL5_MODE_MESSAGE = 1; +pub const AAL5_MODE_STREAMING = 2; +pub const AAL5_SSCS_NULL = 0; +pub const AAL5_SSCS_SSCOP_ASSURED = 1; +pub const AAL5_SSCS_SSCOP_NON_ASSURED = 2; +pub const AAL5_SSCS_FRAME_RELAY = 4; +pub const BCOB_A = 1; +pub const BCOB_C = 3; +pub const BCOB_X = 16; +pub const TT_NOIND = 0; +pub const TT_CBR = 4; +pub const TT_VBR = 8; +pub const TR_NOIND = 0; +pub const TR_END_TO_END = 1; +pub const TR_NO_END_TO_END = 2; +pub const CLIP_NOT = 0; +pub const CLIP_SUS = 32; +pub const UP_P2P = 0; +pub const UP_P2MP = 1; +pub const BLLI_L2_MODE_NORMAL = 64; +pub const BLLI_L2_MODE_EXT = 128; +pub const BLLI_L3_MODE_NORMAL = 64; +pub const BLLI_L3_MODE_EXT = 128; +pub const BLLI_L3_PACKET_16 = 4; +pub const BLLI_L3_PACKET_32 = 5; +pub const BLLI_L3_PACKET_64 = 6; +pub const BLLI_L3_PACKET_128 = 7; +pub const BLLI_L3_PACKET_256 = 8; +pub const BLLI_L3_PACKET_512 = 9; +pub const BLLI_L3_PACKET_1024 = 10; +pub const BLLI_L3_PACKET_2048 = 11; +pub const BLLI_L3_PACKET_4096 = 12; +pub const PI_ALLOWED = 0; +pub const PI_RESTRICTED = 64; +pub const PI_NUMBER_NOT_AVAILABLE = 128; +pub const SI_USER_NOT_SCREENED = 0; +pub const SI_USER_PASSED = 1; +pub const SI_USER_FAILED = 2; +pub const SI_NETWORK = 3; +pub const CAUSE_LOC_USER = 0; +pub const CAUSE_LOC_PRIVATE_LOCAL = 1; +pub const CAUSE_LOC_PUBLIC_LOCAL = 2; +pub const CAUSE_LOC_TRANSIT_NETWORK = 3; +pub const CAUSE_LOC_PUBLIC_REMOTE = 4; +pub const CAUSE_LOC_PRIVATE_REMOTE = 5; +pub const CAUSE_LOC_INTERNATIONAL_NETWORK = 7; +pub const CAUSE_LOC_BEYOND_INTERWORKING = 10; +pub const CAUSE_UNALLOCATED_NUMBER = 1; +pub const CAUSE_NO_ROUTE_TO_TRANSIT_NETWORK = 2; +pub const CAUSE_NO_ROUTE_TO_DESTINATION = 3; +pub const CAUSE_VPI_VCI_UNACCEPTABLE = 10; +pub const CAUSE_NORMAL_CALL_CLEARING = 16; +pub const CAUSE_USER_BUSY = 17; +pub const CAUSE_NO_USER_RESPONDING = 18; +pub const CAUSE_CALL_REJECTED = 21; +pub const CAUSE_NUMBER_CHANGED = 22; +pub const CAUSE_USER_REJECTS_CLIR = 23; +pub const CAUSE_DESTINATION_OUT_OF_ORDER = 27; +pub const CAUSE_INVALID_NUMBER_FORMAT = 28; +pub const CAUSE_STATUS_ENQUIRY_RESPONSE = 30; +pub const CAUSE_NORMAL_UNSPECIFIED = 31; +pub const CAUSE_VPI_VCI_UNAVAILABLE = 35; +pub const CAUSE_NETWORK_OUT_OF_ORDER = 38; +pub const CAUSE_TEMPORARY_FAILURE = 41; +pub const CAUSE_ACCESS_INFORMAION_DISCARDED = 43; +pub const CAUSE_NO_VPI_VCI_AVAILABLE = 45; +pub const CAUSE_RESOURCE_UNAVAILABLE = 47; +pub const CAUSE_QOS_UNAVAILABLE = 49; +pub const CAUSE_USER_CELL_RATE_UNAVAILABLE = 51; +pub const CAUSE_BEARER_CAPABILITY_UNAUTHORIZED = 57; +pub const CAUSE_BEARER_CAPABILITY_UNAVAILABLE = 58; +pub const CAUSE_OPTION_UNAVAILABLE = 63; +pub const CAUSE_BEARER_CAPABILITY_UNIMPLEMENTED = 65; +pub const CAUSE_UNSUPPORTED_TRAFFIC_PARAMETERS = 73; +pub const CAUSE_INVALID_CALL_REFERENCE = 81; +pub const CAUSE_CHANNEL_NONEXISTENT = 82; +pub const CAUSE_INCOMPATIBLE_DESTINATION = 88; +pub const CAUSE_INVALID_ENDPOINT_REFERENCE = 89; +pub const CAUSE_INVALID_TRANSIT_NETWORK_SELECTION = 91; +pub const CAUSE_TOO_MANY_PENDING_ADD_PARTY = 92; +pub const CAUSE_AAL_PARAMETERS_UNSUPPORTED = 93; +pub const CAUSE_MANDATORY_IE_MISSING = 96; +pub const CAUSE_UNIMPLEMENTED_MESSAGE_TYPE = 97; +pub const CAUSE_UNIMPLEMENTED_IE = 99; +pub const CAUSE_INVALID_IE_CONTENTS = 100; +pub const CAUSE_INVALID_STATE_FOR_MESSAGE = 101; +pub const CAUSE_RECOVERY_ON_TIMEOUT = 102; +pub const CAUSE_INCORRECT_MESSAGE_LENGTH = 104; +pub const CAUSE_PROTOCOL_ERROR = 111; +pub const CAUSE_COND_UNKNOWN = 0; +pub const CAUSE_COND_PERMANENT = 1; +pub const CAUSE_COND_TRANSIENT = 2; +pub const CAUSE_REASON_USER = 0; +pub const CAUSE_REASON_IE_MISSING = 4; +pub const CAUSE_REASON_IE_INSUFFICIENT = 8; +pub const CAUSE_PU_PROVIDER = 0; +pub const CAUSE_PU_USER = 8; +pub const CAUSE_NA_NORMAL = 0; +pub const CAUSE_NA_ABNORMAL = 4; +pub const QOS_CLASS0 = 0; +pub const QOS_CLASS1 = 1; +pub const QOS_CLASS2 = 2; +pub const QOS_CLASS3 = 3; +pub const QOS_CLASS4 = 4; +pub const TNS_TYPE_NATIONAL = 64; +pub const TNS_PLAN_CARRIER_ID_CODE = 1; +pub const SIO_GET_NUMBER_OF_ATM_DEVICES = 1343619073; +pub const SIO_GET_ATM_ADDRESS = 3491102722; +pub const SIO_ASSOCIATE_PVC = 2417360899; +pub const SIO_GET_ATM_CONNECTION_ID = 1343619076; +pub const RIO_MSG_DONT_NOTIFY = 1; +pub const RIO_MSG_DEFER = 2; +pub const RIO_MSG_WAITALL = 4; +pub const RIO_MSG_COMMIT_ONLY = 8; +pub const RIO_MAX_CQ_SIZE = 134217728; +pub const RIO_CORRUPT_CQ = 4294967295; +pub const WINDOWS_AF_IRDA = 26; +pub const WCE_AF_IRDA = 22; +pub const IRDA_PROTO_SOCK_STREAM = 1; +pub const SOL_IRLMP = 255; +pub const IRLMP_ENUMDEVICES = 16; +pub const IRLMP_IAS_SET = 17; +pub const IRLMP_IAS_QUERY = 18; +pub const IRLMP_SEND_PDU_LEN = 19; +pub const IRLMP_EXCLUSIVE_MODE = 20; +pub const IRLMP_IRLPT_MODE = 21; +pub const IRLMP_9WIRE_MODE = 22; +pub const IRLMP_TINYTP_MODE = 23; +pub const IRLMP_PARAMETERS = 24; +pub const IRLMP_DISCOVERY_MODE = 25; +pub const IRLMP_SHARP_MODE = 32; +pub const IAS_ATTRIB_NO_CLASS = 16; +pub const IAS_ATTRIB_NO_ATTRIB = 0; +pub const IAS_ATTRIB_INT = 1; +pub const IAS_ATTRIB_OCTETSEQ = 2; +pub const IAS_ATTRIB_STR = 3; +pub const IAS_MAX_USER_STRING = 256; +pub const IAS_MAX_OCTET_STRING = 1024; +pub const IAS_MAX_CLASSNAME = 64; +pub const IAS_MAX_ATTRIBNAME = 256; +pub const LmCharSetASCII = 0; +pub const LmCharSetISO_8859_1 = 1; +pub const LmCharSetISO_8859_2 = 2; +pub const LmCharSetISO_8859_3 = 3; +pub const LmCharSetISO_8859_4 = 4; +pub const LmCharSetISO_8859_5 = 5; +pub const LmCharSetISO_8859_6 = 6; +pub const LmCharSetISO_8859_7 = 7; +pub const LmCharSetISO_8859_8 = 8; +pub const LmCharSetISO_8859_9 = 9; +pub const LmCharSetUNICODE = 255; +pub const LM_BAUD_1200 = 1200; +pub const LM_BAUD_2400 = 2400; +pub const LM_BAUD_9600 = 9600; +pub const LM_BAUD_19200 = 19200; +pub const LM_BAUD_38400 = 38400; +pub const LM_BAUD_57600 = 57600; +pub const LM_BAUD_115200 = 115200; +pub const LM_BAUD_576K = 576000; +pub const LM_BAUD_1152K = 1152000; +pub const LM_BAUD_4M = 4000000; +pub const LM_BAUD_16M = 16000000; +pub const IPX_PTYPE = 16384; +pub const IPX_FILTERPTYPE = 16385; +pub const IPX_STOPFILTERPTYPE = 16387; +pub const IPX_DSTYPE = 16386; +pub const IPX_EXTENDED_ADDRESS = 16388; +pub const IPX_RECVHDR = 16389; +pub const IPX_MAXSIZE = 16390; +pub const IPX_ADDRESS = 16391; +pub const IPX_GETNETINFO = 16392; +pub const IPX_GETNETINFO_NORIP = 16393; +pub const IPX_SPXGETCONNECTIONSTATUS = 16395; +pub const IPX_ADDRESS_NOTIFY = 16396; +pub const IPX_MAX_ADAPTER_NUM = 16397; +pub const IPX_RERIPNETNUMBER = 16398; +pub const IPX_RECEIVE_BROADCAST = 16399; +pub const IPX_IMMEDIATESPXACK = 16400; +pub const IPPROTO_RM = 113; +pub const MAX_MCAST_TTL = 255; +pub const RM_OPTIONSBASE = 1000; +pub const RM_RATE_WINDOW_SIZE = 1001; +pub const RM_SET_MESSAGE_BOUNDARY = 1002; +pub const RM_FLUSHCACHE = 1003; +pub const RM_SENDER_WINDOW_ADVANCE_METHOD = 1004; +pub const RM_SENDER_STATISTICS = 1005; +pub const RM_LATEJOIN = 1006; +pub const RM_SET_SEND_IF = 1007; +pub const RM_ADD_RECEIVE_IF = 1008; +pub const RM_DEL_RECEIVE_IF = 1009; +pub const RM_SEND_WINDOW_ADV_RATE = 1010; +pub const RM_USE_FEC = 1011; +pub const RM_SET_MCAST_TTL = 1012; +pub const RM_RECEIVER_STATISTICS = 1013; +pub const RM_HIGH_SPEED_INTRANET_OPT = 1014; +pub const SENDER_DEFAULT_RATE_KBITS_PER_SEC = 56; +pub const SENDER_DEFAULT_WINDOW_ADV_PERCENTAGE = 15; +pub const MAX_WINDOW_INCREMENT_PERCENTAGE = 25; +pub const SENDER_DEFAULT_LATE_JOINER_PERCENTAGE = 0; +pub const SENDER_MAX_LATE_JOINER_PERCENTAGE = 75; +pub const BITS_PER_BYTE = 8; +pub const LOG2_BITS_PER_BYTE = 3; + +pub const SOCKET_DEFAULT2_QM_POLICY = GUID.parse("{aec2ef9c-3a4d-4d3e-8842-239942e39a47}"); +pub const REAL_TIME_NOTIFICATION_CAPABILITY = GUID.parse("{6b59819a-5cae-492d-a901-2a3c2c50164f}"); +pub const REAL_TIME_NOTIFICATION_CAPABILITY_EX = GUID.parse("{6843da03-154a-4616-a508-44371295f96b}"); +pub const ASSOCIATE_NAMERES_CONTEXT = GUID.parse("{59a38b67-d4fe-46e1-ba3c-87ea74ca3049}"); + +pub const WSAID_CONNECTEX = GUID{ + .Data1 = 0x25a207b9, + .Data2 = 0xddf3, + .Data3 = 0x4660, + .Data4 = [8]u8{ 0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e }, +}; + +pub const WSAID_ACCEPTEX = GUID{ + .Data1 = 0xb5367df1, + .Data2 = 0xcbac, + .Data3 = 0x11cf, + .Data4 = [8]u8{ 0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92 }, +}; + +pub const WSAID_GETACCEPTEXSOCKADDRS = GUID{ + .Data1 = 0xb5367df2, + .Data2 = 0xcbac, + .Data3 = 0x11cf, + .Data4 = [8]u8{ 0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92 }, +}; + +pub const WSAID_WSARECVMSG = GUID{ + .Data1 = 0xf689d7c8, + .Data2 = 0x6f1f, + .Data3 = 0x436b, + .Data4 = [8]u8{ 0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22 }, +}; + +pub const WSAID_WSAPOLL = GUID{ + .Data1 = 0x18C76F85, + .Data2 = 0xDC66, + .Data3 = 0x4964, + .Data4 = [8]u8{ 0x97, 0x2E, 0x23, 0xC2, 0x72, 0x38, 0x31, 0x2B }, +}; + +pub const WSAID_WSASENDMSG = GUID{ + .Data1 = 0xa441e712, + .Data2 = 0x754f, + .Data3 = 0x43ca, + .Data4 = [8]u8{ 0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d }, +}; + +pub const TCP_INITIAL_RTO_DEFAULT_RTT = 0; +pub const TCP_INITIAL_RTO_DEFAULT_MAX_SYN_RETRANSMISSIONS = 0; +pub const SOCKET_SETTINGS_GUARANTEE_ENCRYPTION = 1; +pub const SOCKET_SETTINGS_ALLOW_INSECURE = 2; +pub const SOCKET_SETTINGS_IPSEC_SKIP_FILTER_INSTANTIATION = 1; +pub const SOCKET_SETTINGS_IPSEC_OPTIONAL_PEER_NAME_VERIFICATION = 2; +pub const SOCKET_SETTINGS_IPSEC_ALLOW_FIRST_INBOUND_PKT_UNENCRYPTED = 4; +pub const SOCKET_SETTINGS_IPSEC_PEER_NAME_IS_RAW_FORMAT = 8; +pub const SOCKET_QUERY_IPSEC2_ABORT_CONNECTION_ON_FIELD_CHANGE = 1; +pub const SOCKET_QUERY_IPSEC2_FIELD_MASK_MM_SA_ID = 1; +pub const SOCKET_QUERY_IPSEC2_FIELD_MASK_QM_SA_ID = 2; +pub const SOCKET_INFO_CONNECTION_SECURED = 1; +pub const SOCKET_INFO_CONNECTION_ENCRYPTED = 2; +pub const SOCKET_INFO_CONNECTION_IMPERSONATED = 4; +pub const IN4ADDR_LOOPBACK = 16777343; +pub const IN4ADDR_LOOPBACKPREFIX_LENGTH = 8; +pub const IN4ADDR_LINKLOCALPREFIX_LENGTH = 16; +pub const IN4ADDR_MULTICASTPREFIX_LENGTH = 4; +pub const IFF_UP = 1; +pub const IFF_BROADCAST = 2; +pub const IFF_LOOPBACK = 4; +pub const IFF_POINTTOPOINT = 8; +pub const IFF_MULTICAST = 16; +pub const IP_OPTIONS = 1; +pub const IP_HDRINCL = 2; +pub const IP_TOS = 3; +pub const IP_TTL = 4; +pub const IP_MULTICAST_IF = 9; +pub const IP_MULTICAST_TTL = 10; +pub const IP_MULTICAST_LOOP = 11; +pub const IP_ADD_MEMBERSHIP = 12; +pub const IP_DROP_MEMBERSHIP = 13; +pub const IP_DONTFRAGMENT = 14; +pub const IP_ADD_SOURCE_MEMBERSHIP = 15; +pub const IP_DROP_SOURCE_MEMBERSHIP = 16; +pub const IP_BLOCK_SOURCE = 17; +pub const IP_UNBLOCK_SOURCE = 18; +pub const IP_PKTINFO = 19; +pub const IP_HOPLIMIT = 21; +pub const IP_RECVTTL = 21; +pub const IP_RECEIVE_BROADCAST = 22; +pub const IP_RECVIF = 24; +pub const IP_RECVDSTADDR = 25; +pub const IP_IFLIST = 28; +pub const IP_ADD_IFLIST = 29; +pub const IP_DEL_IFLIST = 30; +pub const IP_UNICAST_IF = 31; +pub const IP_RTHDR = 32; +pub const IP_GET_IFLIST = 33; +pub const IP_RECVRTHDR = 38; +pub const IP_TCLASS = 39; +pub const IP_RECVTCLASS = 40; +pub const IP_RECVTOS = 40; +pub const IP_ORIGINAL_ARRIVAL_IF = 47; +pub const IP_ECN = 50; +pub const IP_PKTINFO_EX = 51; +pub const IP_WFP_REDIRECT_RECORDS = 60; +pub const IP_WFP_REDIRECT_CONTEXT = 70; +pub const IP_MTU_DISCOVER = 71; +pub const IP_MTU = 73; +pub const IP_NRT_INTERFACE = 74; +pub const IP_RECVERR = 75; +pub const IP_USER_MTU = 76; +pub const IP_UNSPECIFIED_TYPE_OF_SERVICE = -1; +pub const IN6ADDR_LINKLOCALPREFIX_LENGTH = 64; +pub const IN6ADDR_MULTICASTPREFIX_LENGTH = 8; +pub const IN6ADDR_SOLICITEDNODEMULTICASTPREFIX_LENGTH = 104; +pub const IN6ADDR_V4MAPPEDPREFIX_LENGTH = 96; +pub const IN6ADDR_6TO4PREFIX_LENGTH = 16; +pub const IN6ADDR_TEREDOPREFIX_LENGTH = 32; +pub const MCAST_JOIN_GROUP = 41; +pub const MCAST_LEAVE_GROUP = 42; +pub const MCAST_BLOCK_SOURCE = 43; +pub const MCAST_UNBLOCK_SOURCE = 44; +pub const MCAST_JOIN_SOURCE_GROUP = 45; +pub const MCAST_LEAVE_SOURCE_GROUP = 46; +pub const IPV6_HOPOPTS = 1; +pub const IPV6_HDRINCL = 2; +pub const IPV6_UNICAST_HOPS = 4; +pub const IPV6_MULTICAST_IF = 9; +pub const IPV6_MULTICAST_HOPS = 10; +pub const IPV6_MULTICAST_LOOP = 11; +pub const IPV6_ADD_MEMBERSHIP = 12; +pub const IPV6_DROP_MEMBERSHIP = 13; +pub const IPV6_DONTFRAG = 14; +pub const IPV6_PKTINFO = 19; +pub const IPV6_HOPLIMIT = 21; +pub const IPV6_PROTECTION_LEVEL = 23; +pub const IPV6_RECVIF = 24; +pub const IPV6_RECVDSTADDR = 25; +pub const IPV6_CHECKSUM = 26; +pub const IPV6_V6ONLY = 27; +pub const IPV6_IFLIST = 28; +pub const IPV6_ADD_IFLIST = 29; +pub const IPV6_DEL_IFLIST = 30; +pub const IPV6_UNICAST_IF = 31; +pub const IPV6_RTHDR = 32; +pub const IPV6_GET_IFLIST = 33; +pub const IPV6_RECVRTHDR = 38; +pub const IPV6_TCLASS = 39; +pub const IPV6_RECVTCLASS = 40; +pub const IPV6_ECN = 50; +pub const IPV6_PKTINFO_EX = 51; +pub const IPV6_WFP_REDIRECT_RECORDS = 60; +pub const IPV6_WFP_REDIRECT_CONTEXT = 70; +pub const IPV6_MTU_DISCOVER = 71; +pub const IPV6_MTU = 72; +pub const IPV6_NRT_INTERFACE = 74; +pub const IPV6_RECVERR = 75; +pub const IPV6_USER_MTU = 76; +pub const IP_UNSPECIFIED_HOP_LIMIT = -1; +pub const PROTECTION_LEVEL_UNRESTRICTED = 10; +pub const PROTECTION_LEVEL_EDGERESTRICTED = 20; +pub const PROTECTION_LEVEL_RESTRICTED = 30; +pub const INET_ADDRSTRLEN = 22; +pub const INET6_ADDRSTRLEN = 65; +pub const TCP_OFFLOAD_NO_PREFERENCE = 0; +pub const TCP_OFFLOAD_NOT_PREFERRED = 1; +pub const TCP_OFFLOAD_PREFERRED = 2; +pub const TCP_EXPEDITED_1122 = 2; +pub const TCP_KEEPALIVE = 3; +pub const TCP_MAXSEG = 4; +pub const TCP_MAXRT = 5; +pub const TCP_STDURG = 6; +pub const TCP_NOURG = 7; +pub const TCP_ATMARK = 8; +pub const TCP_NOSYNRETRIES = 9; +pub const TCP_TIMESTAMPS = 10; +pub const TCP_OFFLOAD_PREFERENCE = 11; +pub const TCP_CONGESTION_ALGORITHM = 12; +pub const TCP_DELAY_FIN_ACK = 13; +pub const TCP_MAXRTMS = 14; +pub const TCP_FASTOPEN = 15; +pub const TCP_KEEPCNT = 16; +pub const TCP_KEEPINTVL = 17; +pub const TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18; +pub const TCP_ICMP_ERROR_INFO = 19; +pub const UDP_SEND_MSG_SIZE = 2; +pub const UDP_RECV_MAX_COALESCED_SIZE = 3; +pub const UDP_COALESCED_INFO = 3; +pub const AF_UNSPEC = 0; +pub const AF_UNIX = 1; +pub const AF_INET = 2; +pub const AF_IMPLINK = 3; +pub const AF_PUP = 4; +pub const AF_CHAOS = 5; +pub const AF_NS = 6; +pub const AF_ISO = 7; +pub const AF_ECMA = 8; +pub const AF_DATAKIT = 9; +pub const AF_CCITT = 10; +pub const AF_SNA = 11; +pub const AF_DECnet = 12; +pub const AF_DLI = 13; +pub const AF_LAT = 14; +pub const AF_HYLINK = 15; +pub const AF_APPLETALK = 16; +pub const AF_NETBIOS = 17; +pub const AF_VOICEVIEW = 18; +pub const AF_FIREFOX = 19; +pub const AF_UNKNOWN1 = 20; +pub const AF_BAN = 21; +pub const AF_ATM = 22; +pub const AF_INET6 = 23; +pub const AF_CLUSTER = 24; +pub const AF_12844 = 25; +pub const AF_IRDA = 26; +pub const AF_NETDES = 28; +pub const AF_MAX = 29; +pub const AF_TCNPROCESS = 29; +pub const AF_TCNMESSAGE = 30; +pub const AF_ICLFXBM = 31; +pub const AF_LINK = 33; +pub const AF_HYPERV = 34; +pub const SOCK_STREAM = 1; +pub const SOCK_DGRAM = 2; +pub const SOCK_RAW = 3; +pub const SOCK_RDM = 4; +pub const SOCK_SEQPACKET = 5; +pub const SOL_SOCKET = 65535; +pub const SO_DEBUG = 1; +pub const SO_ACCEPTCONN = 2; +pub const SO_REUSEADDR = 4; +pub const SO_KEEPALIVE = 8; +pub const SO_DONTROUTE = 16; +pub const SO_BROADCAST = 32; +pub const SO_USELOOPBACK = 64; +pub const SO_LINGER = 128; +pub const SO_OOBINLINE = 256; +pub const SO_SNDBUF = 4097; +pub const SO_RCVBUF = 4098; +pub const SO_SNDLOWAT = 4099; +pub const SO_RCVLOWAT = 4100; +pub const SO_SNDTIMEO = 4101; +pub const SO_RCVTIMEO = 4102; +pub const SO_ERROR = 4103; +pub const SO_TYPE = 4104; +pub const SO_BSP_STATE = 4105; +pub const SO_GROUP_ID = 8193; +pub const SO_GROUP_PRIORITY = 8194; +pub const SO_MAX_MSG_SIZE = 8195; +pub const SO_CONDITIONAL_ACCEPT = 12290; +pub const SO_PAUSE_ACCEPT = 12291; +pub const SO_COMPARTMENT_ID = 12292; +pub const SO_RANDOMIZE_PORT = 12293; +pub const SO_PORT_SCALABILITY = 12294; +pub const SO_REUSE_UNICASTPORT = 12295; +pub const SO_REUSE_MULTICASTPORT = 12296; +pub const SO_ORIGINAL_DST = 12303; +pub const WSK_SO_BASE = 16384; +pub const TCP_NODELAY = 1; +pub const IOC_UNIX = 0; +pub const IOC_WS2 = 134217728; +pub const IOC_PROTOCOL = 268435456; +pub const IOC_VENDOR = 402653184; +pub const SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_OUT | IOC_IN | IOC_WS2 | 6; +pub const SIO_BSP_HANDLE = IOC_OUT | IOC_WS2 | 27; +pub const SIO_BSP_HANDLE_SELECT = IOC_OUT | IOC_WS2 | 28; +pub const SIO_BSP_HANDLE_POLL = IOC_OUT | IOC_WS2 | 29; +pub const SIO_BASE_HANDLE = IOC_OUT | IOC_WS2 | 34; +pub const IPPROTO_IP = 0; +pub const IPPORT_TCPMUX = 1; +pub const IPPORT_ECHO = 7; +pub const IPPORT_DISCARD = 9; +pub const IPPORT_SYSTAT = 11; +pub const IPPORT_DAYTIME = 13; +pub const IPPORT_NETSTAT = 15; +pub const IPPORT_QOTD = 17; +pub const IPPORT_MSP = 18; +pub const IPPORT_CHARGEN = 19; +pub const IPPORT_FTP_DATA = 20; +pub const IPPORT_FTP = 21; +pub const IPPORT_TELNET = 23; +pub const IPPORT_SMTP = 25; +pub const IPPORT_TIMESERVER = 37; +pub const IPPORT_NAMESERVER = 42; +pub const IPPORT_WHOIS = 43; +pub const IPPORT_MTP = 57; +pub const IPPORT_TFTP = 69; +pub const IPPORT_RJE = 77; +pub const IPPORT_FINGER = 79; +pub const IPPORT_TTYLINK = 87; +pub const IPPORT_SUPDUP = 95; +pub const IPPORT_POP3 = 110; +pub const IPPORT_NTP = 123; +pub const IPPORT_EPMAP = 135; +pub const IPPORT_NETBIOS_NS = 137; +pub const IPPORT_NETBIOS_DGM = 138; +pub const IPPORT_NETBIOS_SSN = 139; +pub const IPPORT_IMAP = 143; +pub const IPPORT_SNMP = 161; +pub const IPPORT_SNMP_TRAP = 162; +pub const IPPORT_IMAP3 = 220; +pub const IPPORT_LDAP = 389; +pub const IPPORT_HTTPS = 443; +pub const IPPORT_MICROSOFT_DS = 445; +pub const IPPORT_EXECSERVER = 512; +pub const IPPORT_LOGINSERVER = 513; +pub const IPPORT_CMDSERVER = 514; +pub const IPPORT_EFSSERVER = 520; +pub const IPPORT_BIFFUDP = 512; +pub const IPPORT_WHOSERVER = 513; +pub const IPPORT_ROUTESERVER = 520; +pub const IPPORT_RESERVED = 1024; +pub const IPPORT_REGISTERED_MAX = 49151; +pub const IPPORT_DYNAMIC_MIN = 49152; +pub const IPPORT_DYNAMIC_MAX = 65535; +pub const IN_CLASSA_NET = 4278190080; +pub const IN_CLASSA_NSHIFT = 24; +pub const IN_CLASSA_HOST = 16777215; +pub const IN_CLASSA_MAX = 128; +pub const IN_CLASSB_NET = 4294901760; +pub const IN_CLASSB_NSHIFT = 16; +pub const IN_CLASSB_HOST = 65535; +pub const IN_CLASSB_MAX = 65536; +pub const IN_CLASSC_NET = 4294967040; +pub const IN_CLASSC_NSHIFT = 8; +pub const IN_CLASSC_HOST = 255; +pub const IN_CLASSD_NET = 4026531840; +pub const IN_CLASSD_NSHIFT = 28; +pub const IN_CLASSD_HOST = 268435455; +pub const INADDR_LOOPBACK = 2130706433; +pub const INADDR_NONE = 4294967295; +pub const IOCPARM_MASK = 127; +pub const IOC_VOID = 536870912; +pub const IOC_OUT = 1073741824; +pub const IOC_IN = 2147483648; +pub const MSG_TRUNC = 256; +pub const MSG_CTRUNC = 512; +pub const MSG_BCAST = 1024; +pub const MSG_MCAST = 2048; +pub const MSG_ERRQUEUE = 4096; +pub const AI_PASSIVE = 1; +pub const AI_CANONNAME = 2; +pub const AI_NUMERICHOST = 4; +pub const AI_NUMERICSERV = 8; +pub const AI_DNS_ONLY = 16; +pub const AI_ALL = 256; +pub const AI_ADDRCONFIG = 1024; +pub const AI_V4MAPPED = 2048; +pub const AI_NON_AUTHORITATIVE = 16384; +pub const AI_SECURE = 32768; +pub const AI_RETURN_PREFERRED_NAMES = 65536; +pub const AI_FQDN = 131072; +pub const AI_FILESERVER = 262144; +pub const AI_DISABLE_IDN_ENCODING = 524288; +pub const AI_EXTENDED = 2147483648; +pub const AI_RESOLUTION_HANDLE = 1073741824; +pub const FIONBIO = -2147195266; +pub const ADDRINFOEX_VERSION_2 = 2; +pub const ADDRINFOEX_VERSION_3 = 3; +pub const ADDRINFOEX_VERSION_4 = 4; +pub const NS_ALL = 0; +pub const NS_SAP = 1; +pub const NS_NDS = 2; +pub const NS_PEER_BROWSE = 3; +pub const NS_SLP = 5; +pub const NS_DHCP = 6; +pub const NS_TCPIP_LOCAL = 10; +pub const NS_TCPIP_HOSTS = 11; +pub const NS_DNS = 12; +pub const NS_NETBT = 13; +pub const NS_WINS = 14; +pub const NS_NLA = 15; +pub const NS_NBP = 20; +pub const NS_MS = 30; +pub const NS_STDA = 31; +pub const NS_NTDS = 32; +pub const NS_EMAIL = 37; +pub const NS_X500 = 40; +pub const NS_NIS = 41; +pub const NS_NISPLUS = 42; +pub const NS_WRQ = 50; +pub const NS_NETDES = 60; +pub const NI_NOFQDN = 1; +pub const NI_NUMERICHOST = 2; +pub const NI_NAMEREQD = 4; +pub const NI_NUMERICSERV = 8; +pub const NI_DGRAM = 16; +pub const NI_MAXHOST = 1025; +pub const NI_MAXSERV = 32; +pub const INCL_WINSOCK_API_PROTOTYPES = 1; +pub const INCL_WINSOCK_API_TYPEDEFS = 0; +pub const FD_SETSIZE = 64; +pub const IMPLINK_IP = 155; +pub const IMPLINK_LOWEXPER = 156; +pub const IMPLINK_HIGHEXPER = 158; pub const WSADESCRIPTION_LEN = 256; pub const WSASYS_STATUS_LEN = 128; +pub const SOCKET_ERROR = -1; +pub const FROM_PROTOCOL_INFO = -1; +pub const SO_PROTOCOL_INFOA = 8196; +pub const SO_PROTOCOL_INFOW = 8197; +pub const PVD_CONFIG = 12289; +pub const SOMAXCONN = 2147483647; +pub const MSG_PEEK = 2; +pub const MSG_WAITALL = 8; +pub const MSG_PUSH_IMMEDIATE = 32; +pub const MSG_PARTIAL = 32768; +pub const MSG_INTERRUPT = 16; +pub const MSG_MAXIOVLEN = 16; +pub const MAXGETHOSTSTRUCT = 1024; +pub const FD_READ_BIT = 0; +pub const FD_WRITE_BIT = 1; +pub const FD_OOB_BIT = 2; +pub const FD_ACCEPT_BIT = 3; +pub const FD_CONNECT_BIT = 4; +pub const FD_CLOSE_BIT = 5; +pub const FD_QOS_BIT = 6; +pub const FD_GROUP_QOS_BIT = 7; +pub const FD_ROUTING_INTERFACE_CHANGE_BIT = 8; +pub const FD_ADDRESS_LIST_CHANGE_BIT = 9; +pub const FD_MAX_EVENTS = 10; +pub const CF_ACCEPT = 0; +pub const CF_REJECT = 1; +pub const CF_DEFER = 2; +pub const SD_RECEIVE = 0; +pub const SD_SEND = 1; +pub const SD_BOTH = 2; +pub const SG_UNCONSTRAINED_GROUP = 1; +pub const SG_CONSTRAINED_GROUP = 2; +pub const MAX_PROTOCOL_CHAIN = 7; +pub const BASE_PROTOCOL = 1; +pub const LAYERED_PROTOCOL = 0; +pub const WSAPROTOCOL_LEN = 255; +pub const PFL_MULTIPLE_PROTO_ENTRIES = 1; +pub const PFL_RECOMMENDED_PROTO_ENTRY = 2; +pub const PFL_HIDDEN = 4; +pub const PFL_MATCHES_PROTOCOL_ZERO = 8; +pub const PFL_NETWORKDIRECT_PROVIDER = 16; +pub const XP1_CONNECTIONLESS = 1; +pub const XP1_GUARANTEED_DELIVERY = 2; +pub const XP1_GUARANTEED_ORDER = 4; +pub const XP1_MESSAGE_ORIENTED = 8; +pub const XP1_PSEUDO_STREAM = 16; +pub const XP1_GRACEFUL_CLOSE = 32; +pub const XP1_EXPEDITED_DATA = 64; +pub const XP1_CONNECT_DATA = 128; +pub const XP1_DISCONNECT_DATA = 256; +pub const XP1_SUPPORT_BROADCAST = 512; +pub const XP1_SUPPORT_MULTIPOINT = 1024; +pub const XP1_MULTIPOINT_CONTROL_PLANE = 2048; +pub const XP1_MULTIPOINT_DATA_PLANE = 4096; +pub const XP1_QOS_SUPPORTED = 8192; +pub const XP1_INTERRUPT = 16384; +pub const XP1_UNI_SEND = 32768; +pub const XP1_UNI_RECV = 65536; +pub const XP1_IFS_HANDLES = 131072; +pub const XP1_PARTIAL_MESSAGE = 262144; +pub const XP1_SAN_SUPPORT_SDP = 524288; +pub const BIGENDIAN = 0; +pub const LITTLEENDIAN = 1; +pub const SECURITY_PROTOCOL_NONE = 0; +pub const JL_SENDER_ONLY = 1; +pub const JL_RECEIVER_ONLY = 2; +pub const JL_BOTH = 4; +pub const WSA_FLAG_OVERLAPPED = 1; +pub const WSA_FLAG_MULTIPOINT_C_ROOT = 2; +pub const WSA_FLAG_MULTIPOINT_C_LEAF = 4; +pub const WSA_FLAG_MULTIPOINT_D_ROOT = 8; +pub const WSA_FLAG_MULTIPOINT_D_LEAF = 16; +pub const WSA_FLAG_ACCESS_SYSTEM_SECURITY = 64; +pub const WSA_FLAG_NO_HANDLE_INHERIT = 128; +pub const WSA_FLAG_REGISTERED_IO = 256; +pub const TH_NETDEV = 1; +pub const TH_TAPI = 2; +pub const SERVICE_MULTIPLE = 1; +pub const NS_LOCALNAME = 19; +pub const RES_UNUSED_1 = 1; +pub const RES_FLUSH_CACHE = 2; +pub const RES_SERVICE = 4; +pub const LUP_DEEP = 1; +pub const LUP_CONTAINERS = 2; +pub const LUP_NOCONTAINERS = 4; +pub const LUP_NEAREST = 8; +pub const LUP_RETURN_NAME = 16; +pub const LUP_RETURN_TYPE = 32; +pub const LUP_RETURN_VERSION = 64; +pub const LUP_RETURN_COMMENT = 128; +pub const LUP_RETURN_ADDR = 256; +pub const LUP_RETURN_BLOB = 512; +pub const LUP_RETURN_ALIASES = 1024; +pub const LUP_RETURN_QUERY_STRING = 2048; +pub const LUP_RETURN_ALL = 4080; +pub const LUP_RES_SERVICE = 32768; +pub const LUP_FLUSHCACHE = 4096; +pub const LUP_FLUSHPREVIOUS = 8192; +pub const LUP_NON_AUTHORITATIVE = 16384; +pub const LUP_SECURE = 32768; +pub const LUP_RETURN_PREFERRED_NAMES = 65536; +pub const LUP_DNS_ONLY = 131072; +pub const LUP_ADDRCONFIG = 1048576; +pub const LUP_DUAL_ADDR = 2097152; +pub const LUP_FILESERVER = 4194304; +pub const LUP_DISABLE_IDN_ENCODING = 8388608; +pub const LUP_API_ANSI = 16777216; +pub const LUP_RESOLUTION_HANDLE = 2147483648; +pub const RESULT_IS_ALIAS = 1; +pub const RESULT_IS_ADDED = 16; +pub const RESULT_IS_CHANGED = 32; +pub const RESULT_IS_DELETED = 64; +pub const POLLRDNORM = 256; +pub const POLLRDBAND = 512; +pub const POLLPRI = 1024; +pub const POLLWRNORM = 16; +pub const POLLWRBAND = 32; +pub const POLLERR = 1; +pub const POLLHUP = 2; +pub const POLLNVAL = 4; +pub const SO_CONNDATA = 28672; +pub const SO_CONNOPT = 28673; +pub const SO_DISCDATA = 28674; +pub const SO_DISCOPT = 28675; +pub const SO_CONNDATALEN = 28676; +pub const SO_CONNOPTLEN = 28677; +pub const SO_DISCDATALEN = 28678; +pub const SO_DISCOPTLEN = 28679; +pub const SO_OPENTYPE = 28680; +pub const SO_SYNCHRONOUS_ALERT = 16; +pub const SO_SYNCHRONOUS_NONALERT = 32; +pub const SO_MAXDG = 28681; +pub const SO_MAXPATHDG = 28682; +pub const SO_UPDATE_ACCEPT_CONTEXT = 28683; +pub const SO_CONNECT_TIME = 28684; +pub const SO_UPDATE_CONNECT_CONTEXT = 28688; +pub const TCP_BSDURGENT = 28672; +pub const TF_DISCONNECT = 1; +pub const TF_REUSE_SOCKET = 2; +pub const TF_WRITE_BEHIND = 4; +pub const TF_USE_DEFAULT_WORKER = 0; +pub const TF_USE_SYSTEM_THREAD = 16; +pub const TF_USE_KERNEL_APC = 32; +pub const TP_ELEMENT_MEMORY = 1; +pub const TP_ELEMENT_FILE = 2; +pub const TP_ELEMENT_EOP = 4; +pub const NLA_ALLUSERS_NETWORK = 1; +pub const NLA_FRIENDLY_NAME = 2; +pub const WSPDESCRIPTION_LEN = 255; +pub const WSS_OPERATION_IN_PROGRESS = 259; +pub const LSP_SYSTEM = 2147483648; +pub const LSP_INSPECTOR = 1; +pub const LSP_REDIRECTOR = 2; +pub const LSP_PROXY = 4; +pub const LSP_FIREWALL = 8; +pub const LSP_INBOUND_MODIFY = 16; +pub const LSP_OUTBOUND_MODIFY = 32; +pub const LSP_CRYPTO_COMPRESS = 64; +pub const LSP_LOCAL_CACHE = 128; +pub const IPPROTO_ICMP = 1; +pub const IPPROTO_IGMP = 2; +pub const IPPROTO_GGP = 3; +pub const IPPROTO_TCP = 6; +pub const IPPROTO_PUP = 12; +pub const IPPROTO_UDP = 17; +pub const IPPROTO_IDP = 22; +pub const IPPROTO_ND = 77; +pub const IPPROTO_RAW = 255; +pub const IPPROTO_MAX = 256; +pub const IP_DEFAULT_MULTICAST_TTL = 1; +pub const IP_DEFAULT_MULTICAST_LOOP = 1; +pub const IP_MAX_MEMBERSHIPS = 20; +pub const AF_IPX = 6; +pub const FD_READ = 1; +pub const FD_WRITE = 2; +pub const FD_OOB = 4; +pub const FD_ACCEPT = 8; +pub const FD_CONNECT = 16; +pub const FD_CLOSE = 32; +pub const SERVICE_RESOURCE = 1; +pub const SERVICE_SERVICE = 2; +pub const SERVICE_LOCAL = 4; +pub const SERVICE_FLAG_DEFER = 1; +pub const SERVICE_FLAG_HARD = 2; +pub const PROP_COMMENT = 1; +pub const PROP_LOCALE = 2; +pub const PROP_DISPLAY_HINT = 4; +pub const PROP_VERSION = 8; +pub const PROP_START_TIME = 16; +pub const PROP_MACHINE = 32; +pub const PROP_ADDRESSES = 256; +pub const PROP_SD = 512; +pub const PROP_ALL = 2147483648; +pub const SERVICE_ADDRESS_FLAG_RPC_CN = 1; +pub const SERVICE_ADDRESS_FLAG_RPC_DG = 2; +pub const SERVICE_ADDRESS_FLAG_RPC_NB = 4; +pub const NS_DEFAULT = 0; +pub const NS_VNS = 50; +pub const NSTYPE_HIERARCHICAL = 1; +pub const NSTYPE_DYNAMIC = 2; +pub const NSTYPE_ENUMERABLE = 4; +pub const NSTYPE_WORKGROUP = 8; +pub const XP_CONNECTIONLESS = 1; +pub const XP_GUARANTEED_DELIVERY = 2; +pub const XP_GUARANTEED_ORDER = 4; +pub const XP_MESSAGE_ORIENTED = 8; +pub const XP_PSEUDO_STREAM = 16; +pub const XP_GRACEFUL_CLOSE = 32; +pub const XP_EXPEDITED_DATA = 64; +pub const XP_CONNECT_DATA = 128; +pub const XP_DISCONNECT_DATA = 256; +pub const XP_SUPPORTS_BROADCAST = 512; +pub const XP_SUPPORTS_MULTICAST = 1024; +pub const XP_BANDWIDTH_ALLOCATION = 2048; +pub const XP_FRAGMENTATION = 4096; +pub const XP_ENCRYPTS = 8192; +pub const RES_SOFT_SEARCH = 1; +pub const RES_FIND_MULTIPLE = 2; +pub const SET_SERVICE_PARTIAL_SUCCESS = 1; +pub const UDP_NOCHECKSUM = 1; +pub const UDP_CHECKSUM_COVERAGE = 20; +pub const GAI_STRERROR_BUFFER_SIZE = 1024; + +pub const LPCONDITIONPROC = fn ( + lpCallerId: *WSABUF, + lpCallerData: *WSABUF, + lpSQOS: *QOS, + lpGQOS: *QOS, + lpCalleeId: *WSABUF, + lpCalleeData: *WSABUF, + g: *u32, + dwCallbackData: usize, +) callconv(WINAPI) i32; + +pub const LPWSAOVERLAPPED_COMPLETION_ROUTINE = fn ( + dwError: u32, + cbTransferred: u32, + lpOverlapped: *OVERLAPPED, + dwFlags: u32, +) callconv(WINAPI) void; + +pub const FLOWSPEC = extern struct { + TokenRate: u32, + TokenBucketSize: u32, + PeakBandwidth: u32, + Latency: u32, + DelayVariation: u32, + ServiceType: u32, + MaxSduSize: u32, + MinimumPolicedSize: u32, +}; + +pub const QOS = extern struct { + SendingFlowspec: FLOWSPEC, + ReceivingFlowspec: FLOWSPEC, + ProviderSpecific: WSABUF, +}; + +pub const SOCKET_ADDRESS = extern struct { + lpSockaddr: *sockaddr, + iSockaddrLength: i32, +}; + +pub const SOCKET_ADDRESS_LIST = extern struct { + iAddressCount: i32, + Address: [1]SOCKET_ADDRESS, +}; pub const WSADATA = if (@sizeOf(usize) == @sizeOf(u64)) extern struct { @@ -33,15 +952,11 @@ else lpVendorInfo: *u8, }; -pub const MAX_PROTOCOL_CHAIN = 7; - pub const WSAPROTOCOLCHAIN = extern struct { ChainLen: c_int, ChainEntries: [MAX_PROTOCOL_CHAIN]DWORD, }; -pub const WSAPROTOCOL_LEN = 255; - pub const WSAPROTOCOL_INFOA = extern struct { dwServiceFlags1: DWORD, dwServiceFlags2: DWORD, @@ -88,20 +1003,20 @@ pub const WSAPROTOCOL_INFOW = extern struct { szProtocol: [WSAPROTOCOL_LEN + 1]WCHAR, }; -pub const GROUP = u32; +pub const sockproto = extern struct { + sp_family: u16, + sp_protocol: u16, +}; -pub const SG_UNCONSTRAINED_GROUP = 0x1; -pub const SG_CONSTRAINED_GROUP = 0x2; +pub const linger = extern struct { + l_onoff: u16, + l_linger: u16, +}; -pub const WSA_FLAG_OVERLAPPED = 0x01; -pub const WSA_FLAG_MULTIPOINT_C_ROOT = 0x02; -pub const WSA_FLAG_MULTIPOINT_C_LEAF = 0x04; -pub const WSA_FLAG_MULTIPOINT_D_ROOT = 0x08; -pub const WSA_FLAG_MULTIPOINT_D_LEAF = 0x10; -pub const WSA_FLAG_ACCESS_SYSTEM_SECURITY = 0x40; -pub const WSA_FLAG_NO_HANDLE_INHERIT = 0x80; - -pub const WSAEVENT = HANDLE; +pub const WSANETWORKEVENTS = extern struct { + lNetworkEvents: i32, + iErrorCode: [10]i32, +}; pub const WSAOVERLAPPED = extern struct { Internal: DWORD, @@ -111,82 +1026,9 @@ pub const WSAOVERLAPPED = extern struct { hEvent: ?WSAEVENT, }; -pub const WSAOVERLAPPED_COMPLETION_ROUTINE = fn (dwError: DWORD, cbTransferred: DWORD, lpOverlapped: *WSAOVERLAPPED, dwFlags: DWORD) callconv(.C) void; +pub const addrinfo = addrinfoa; -pub const ADDRESS_FAMILY = u16; - -// Microsoft use the signed c_int for this, but it should never be negative -pub const socklen_t = u32; - -pub const AF_UNSPEC = 0; -pub const AF_UNIX = 1; -pub const AF_INET = 2; -pub const AF_IMPLINK = 3; -pub const AF_PUP = 4; -pub const AF_CHAOS = 5; -pub const AF_NS = 6; -pub const AF_IPX = AF_NS; -pub const AF_ISO = 7; -pub const AF_OSI = AF_ISO; -pub const AF_ECMA = 8; -pub const AF_DATAKIT = 9; -pub const AF_CCITT = 10; -pub const AF_SNA = 11; -pub const AF_DECnet = 12; -pub const AF_DLI = 13; -pub const AF_LAT = 14; -pub const AF_HYLINK = 15; -pub const AF_APPLETALK = 16; -pub const AF_NETBIOS = 17; -pub const AF_VOICEVIEW = 18; -pub const AF_FIREFOX = 19; -pub const AF_UNKNOWN1 = 20; -pub const AF_BAN = 21; -pub const AF_ATM = 22; -pub const AF_INET6 = 23; -pub const AF_CLUSTER = 24; -pub const AF_12844 = 25; -pub const AF_IRDA = 26; -pub const AF_NETDES = 28; -pub const AF_TCNPROCESS = 29; -pub const AF_TCNMESSAGE = 30; -pub const AF_ICLFXBM = 31; -pub const AF_BTH = 32; -pub const AF_MAX = 33; - -pub const SOCK_STREAM = 1; -pub const SOCK_DGRAM = 2; -pub const SOCK_RAW = 3; -pub const SOCK_RDM = 4; -pub const SOCK_SEQPACKET = 5; - -pub const IPPROTO_ICMP = 1; -pub const IPPROTO_IGMP = 2; -pub const BTHPROTO_RFCOMM = 3; -pub const IPPROTO_TCP = 6; -pub const IPPROTO_UDP = 17; -pub const IPPROTO_ICMPV6 = 58; -pub const IPPROTO_RM = 113; - -pub const AI_PASSIVE = 0x00001; -pub const AI_CANONNAME = 0x00002; -pub const AI_NUMERICHOST = 0x00004; -pub const AI_NUMERICSERV = 0x00008; -pub const AI_ADDRCONFIG = 0x00400; -pub const AI_V4MAPPED = 0x00800; -pub const AI_NON_AUTHORITATIVE = 0x04000; -pub const AI_SECURE = 0x08000; -pub const AI_RETURN_PREFERRED_NAMES = 0x10000; -pub const AI_DISABLE_IDN_ENCODING = 0x80000; - -pub const FIONBIO = -2147195266; - -pub const sockaddr = extern struct { - family: ADDRESS_FAMILY, - data: [14]u8, -}; - -pub const addrinfo = extern struct { +pub const addrinfoa = extern struct { flags: i32, family: i32, socktype: i32, @@ -197,6 +1039,32 @@ pub const addrinfo = extern struct { next: ?*addrinfo, }; +pub const addrinfoexA = extern struct { + ai_flags: i32, + ai_family: i32, + ai_socktype: i32, + ai_protocol: i32, + ai_addrlen: usize, + ai_canonname: [*:0]u8, + ai_addr: *sockaddr, + ai_blob: *c_void, + ai_bloblen: usize, + ai_provider: *GUID, + ai_next: *addrinfoexA, +}; + +pub const sockaddr = extern struct { + family: ADDRESS_FAMILY, + data: [14]u8, +}; + +pub const sockaddr_storage = extern struct { + family: ADDRESS_FAMILY, + __pad1: [6]u8, + __align: i64, + __pad2: [112]u8, +}; + /// IPv4 socket address pub const sockaddr_in = extern struct { family: ADDRESS_FAMILY = AF_INET, @@ -225,7 +1093,10 @@ pub const WSABUF = extern struct { buf: [*]u8, }; -pub const WSAMSG = extern struct { +pub const msghdr = WSAMSG; +pub const msghdr_const = WSAMSG_const; + +pub const WSAMSG_const = extern struct { name: *const sockaddr, namelen: INT, lpBuffers: [*]WSABUF, @@ -234,26 +1105,108 @@ pub const WSAMSG = extern struct { dwFlags: DWORD, }; +pub const WSAMSG = extern struct { + name: *sockaddr, + namelen: INT, + lpBuffers: [*]WSABUF, + dwBufferCount: DWORD, + Control: WSABUF, + dwFlags: DWORD, +}; + +pub const WSAPOLLFD = pollfd; + pub const pollfd = extern struct { fd: SOCKET, events: SHORT, revents: SHORT, }; -// Event flag definitions for WSAPoll(). +pub const TRANSMIT_FILE_BUFFERS = extern struct { + Head: *c_void, + HeadLength: u32, + Tail: *c_void, + TailLength: u32, +}; -pub const POLLRDNORM = 0x0100; -pub const POLLRDBAND = 0x0200; -pub const POLLIN = (POLLRDNORM | POLLRDBAND); -pub const POLLPRI = 0x0400; +pub const LPFN_TRANSMITFILE = fn ( + hSocket: SOCKET, + hFile: HANDLE, + nNumberOfBytesToWrite: u32, + nNumberOfBytesPerSend: u32, + lpOverlapped: ?*OVERLAPPED, + lpTransmitBuffers: ?*TRANSMIT_FILE_BUFFERS, + dwReserved: u32, +) callconv(WINAPI) BOOL; -pub const POLLWRNORM = 0x0010; -pub const POLLOUT = (POLLWRNORM); -pub const POLLWRBAND = 0x0020; +pub const LPFN_ACCEPTEX = fn ( + sListenSocket: SOCKET, + sAcceptSocket: SOCKET, + lpOutputBuffer: *c_void, + dwReceiveDataLength: u32, + dwLocalAddressLength: u32, + dwRemoteAddressLength: u32, + lpdwBytesReceived: *u32, + lpOverlapped: *OVERLAPPED, +) callconv(WINAPI) BOOL; -pub const POLLERR = 0x0001; -pub const POLLHUP = 0x0002; -pub const POLLNVAL = 0x0004; +pub const LPFN_GETACCEPTEXSOCKADDRS = fn ( + lpOutputBuffer: *c_void, + dwReceiveDataLength: u32, + dwLocalAddressLength: u32, + dwRemoteAddressLength: u32, + LocalSockaddr: **sockaddr, + LocalSockaddrLength: *i32, + RemoteSockaddr: **sockaddr, + RemoteSockaddrLength: *i32, +) callconv(WINAPI) void; + +pub const LPFN_WSASENDMSG = fn ( + s: SOCKET, + lpMsg: *const WSAMSG_const, + dwFlags: u32, + lpNumberOfBytesSent: ?*u32, + lpOverlapped: ?*OVERLAPPED, + lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, +) callconv(WINAPI) i32; + +pub const LPFN_WSARECVMSG = fn ( + s: SOCKET, + lpMsg: *WSAMSG, + lpdwNumberOfBytesRecv: ?*u32, + lpOverlapped: ?*OVERLAPPED, + lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, +) callconv(WINAPI) i32; + +pub const LPSERVICE_CALLBACK_PROC = fn ( + lParam: LPARAM, + hAsyncTaskHandle: HANDLE, +) callconv(WINAPI) void; + +pub const SERVICE_ASYNC_INFO = extern struct { + lpServiceCallbackProc: LPSERVICE_CALLBACK_PROC, + lParam: LPARAM, + hAsyncTaskHandle: HANDLE, +}; + +pub const LPLOOKUPSERVICE_COMPLETION_ROUTINE = fn ( + dwError: u32, + dwBytes: u32, + lpOverlapped: *OVERLAPPED, +) callconv(WINAPI) void; + +pub const fd_set = extern struct { + fd_count: u32, + fd_array: [64]SOCKET, +}; + +pub const hostent = extern struct { + h_name: [*]u8, + h_aliases: **i8, + h_addrtype: i16, + h_length: i16, + h_addr_list: **i8, +}; // https://docs.microsoft.com/en-au/windows/win32/winsock/windows-sockets-error-codes-2 pub const WinsockError = enum(u16) { @@ -704,180 +1657,649 @@ pub const WinsockError = enum(u16) { _, }; -/// no parameters -const IOC_VOID = 0x80000000; +pub extern "ws2_32" fn accept( + s: SOCKET, + addr: ?*sockaddr, + addrlen: ?*i32, +) callconv(WINAPI) SOCKET; -/// copy out parameters -const IOC_OUT = 0x40000000; +pub extern "ws2_32" fn bind( + s: SOCKET, + name: *const sockaddr, + namelen: i32, +) callconv(WINAPI) i32; -/// copy in parameters -const IOC_IN = 0x80000000; +pub extern "ws2_32" fn closesocket( + s: SOCKET, +) callconv(WINAPI) i32; -/// The IOCTL is a generic Windows Sockets 2 IOCTL code. New IOCTL codes defined for Windows Sockets 2 will have T == 1. -const IOC_WS2 = 0x08000000; +pub extern "ws2_32" fn connect( + s: SOCKET, + name: *const sockaddr, + namelen: i32, +) callconv(WINAPI) i32; -pub const SIO_BASE_HANDLE = IOC_OUT | IOC_WS2 | 34; +pub extern "ws2_32" fn ioctlsocket( + s: SOCKET, + cmd: i32, + argp: *u32, +) callconv(WINAPI) i32; -pub const SOL_SOCKET = 0xffff; +pub extern "ws2_32" fn getpeername( + s: SOCKET, + name: *sockaddr, + namelen: *i32, +) callconv(WINAPI) i32; -pub const SO_DEBUG = 0x0001; -pub const SO_ACCEPTCONN = 0x0002; -pub const SO_REUSEADDR = 0x0004; -pub const SO_KEEPALIVE = 0x0008; -pub const SO_DONTROUTE = 0x0010; -pub const SO_BROADCAST = 0x0020; -pub const SO_USELOOPBACK = 0x0040; -pub const SO_LINGER = 0x0080; -pub const SO_OOBINLINE = 0x0100; +pub extern "ws2_32" fn getsockname( + s: SOCKET, + name: *sockaddr, + namelen: *i32, +) callconv(WINAPI) i32; -pub const SO_DONTLINGER = ~@as(u32, SO_LINGER); -pub const SO_EXCLUSIVEADDRUSE = ~@as(u32, SO_REUSEADDR); +pub extern "ws2_32" fn getsockopt( + s: SOCKET, + level: i32, + optname: i32, + optval: [*]u8, + optlen: *i32, +) callconv(WINAPI) i32; -pub const SO_SNDBUF = 0x1001; -pub const SO_RCVBUF = 0x1002; -pub const SO_SNDLOWAT = 0x1003; -pub const SO_RCVLOWAT = 0x1004; -pub const SO_SNDTIMEO = 0x1005; -pub const SO_RCVTIMEO = 0x1006; -pub const SO_ERROR = 0x1007; -pub const SO_TYPE = 0x1008; +pub extern "ws2_32" fn htonl( + hostlong: u32, +) callconv(WINAPI) u32; -pub const SO_GROUP_ID = 0x2001; -pub const SO_GROUP_PRIORITY = 0x2002; -pub const SO_MAX_MSG_SIZE = 0x2003; -pub const SO_PROTOCOL_INFOA = 0x2004; -pub const SO_PROTOCOL_INFOW = 0x2005; +pub extern "ws2_32" fn htons( + hostshort: u16, +) callconv(WINAPI) u16; -pub const PVD_CONFIG = 0x3001; -pub const SO_CONDITIONAL_ACCEPT = 0x3002; +pub extern "ws2_32" fn inet_addr( + cp: ?[*]const u8, +) callconv(WINAPI) u32; -pub const TCP_NODELAY = 0x0001; +pub extern "ws2_32" fn listen( + s: SOCKET, + backlog: i32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn ntohl( + netlong: u32, +) callconv(WINAPI) u32; + +pub extern "ws2_32" fn ntohs( + netshort: u16, +) callconv(WINAPI) u16; + +pub extern "ws2_32" fn recv( + s: SOCKET, + buf: [*]u8, + len: i32, + flags: i32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn recvfrom( + s: SOCKET, + buf: [*]u8, + len: i32, + flags: i32, + from: ?*sockaddr, + fromlen: ?*i32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn select( + nfds: i32, + readfds: ?*fd_set, + writefds: ?*fd_set, + exceptfds: ?*fd_set, + timeout: ?*const timeval, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn send( + s: SOCKET, + buf: [*]const u8, + len: i32, + flags: u32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn sendto( + s: SOCKET, + buf: [*]const u8, + len: i32, + flags: i32, + to: *const sockaddr, + tolen: i32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn setsockopt( + s: SOCKET, + level: i32, + optname: i32, + optval: ?[*]const u8, + optlen: i32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn shutdown( + s: SOCKET, + how: i32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn socket( + af: i32, + @"type": i32, + protocol: i32, +) callconv(WINAPI) SOCKET; pub extern "ws2_32" fn WSAStartup( wVersionRequired: WORD, lpWSAData: *WSADATA, -) callconv(WINAPI) c_int; -pub extern "ws2_32" fn WSACleanup() callconv(WINAPI) c_int; +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSACleanup() callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSASetLastError(iError: i32) callconv(WINAPI) void; + pub extern "ws2_32" fn WSAGetLastError() callconv(WINAPI) WinsockError; -pub extern "ws2_32" fn WSASocketA( - af: c_int, - type: c_int, - protocol: c_int, - lpProtocolInfo: ?*WSAPROTOCOL_INFOA, - g: GROUP, - dwFlags: DWORD, -) callconv(WINAPI) SOCKET; -pub extern "ws2_32" fn WSASocketW( - af: c_int, - type: c_int, - protocol: c_int, - lpProtocolInfo: ?*WSAPROTOCOL_INFOW, - g: GROUP, - dwFlags: DWORD, -) callconv(WINAPI) SOCKET; -pub extern "ws2_32" fn closesocket(s: SOCKET) callconv(WINAPI) c_int; -pub extern "ws2_32" fn WSAIoctl( + +pub extern "ws2_32" fn WSAIsBlocking() callconv(WINAPI) BOOL; + +pub extern "ws2_32" fn WSAUnhookBlockingHook() callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSASetBlockingHook(lpBlockFunc: FARPROC) callconv(WINAPI) FARPROC; + +pub extern "ws2_32" fn WSACancelBlockingCall() callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAAsyncGetServByName( + hWnd: HWND, + wMsg: u32, + name: [*:0]const u8, + proto: ?[*:0]const u8, + buf: [*]u8, + buflen: i32, +) callconv(WINAPI) HANDLE; + +pub extern "ws2_32" fn WSAAsyncGetServByPort( + hWnd: HWND, + wMsg: u32, + port: i32, + proto: ?[*:0]const u8, + buf: [*]u8, + buflen: i32, +) callconv(WINAPI) HANDLE; + +pub extern "ws2_32" fn WSAAsyncGetProtoByName( + hWnd: HWND, + wMsg: u32, + name: [*:0]const u8, + buf: [*]u8, + buflen: i32, +) callconv(WINAPI) HANDLE; + +pub extern "ws2_32" fn WSAAsyncGetProtoByNumber( + hWnd: HWND, + wMsg: u32, + number: i32, + buf: [*]u8, + buflen: i32, +) callconv(WINAPI) HANDLE; + +pub extern "ws2_32" fn WSACancelAsyncRequest(hAsyncTaskHandle: HANDLE) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAAsyncSelect( s: SOCKET, - dwIoControlCode: DWORD, - lpvInBuffer: ?*const c_void, - cbInBuffer: DWORD, - lpvOutBuffer: ?LPVOID, - cbOutBuffer: DWORD, - lpcbBytesReturned: LPDWORD, - lpOverlapped: ?*WSAOVERLAPPED, - lpCompletionRoutine: ?WSAOVERLAPPED_COMPLETION_ROUTINE, -) callconv(WINAPI) c_int; -pub extern "ws2_32" fn accept( + hWnd: HWND, + wMsg: u32, + lEvent: i32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAAccept( s: SOCKET, addr: ?*sockaddr, - addrlen: ?*c_int, + addrlen: ?*i32, + lpfnCondition: ?LPCONDITIONPROC, + dwCallbackData: usize, ) callconv(WINAPI) SOCKET; -pub extern "ws2_32" fn bind( - s: SOCKET, - addr: ?*const sockaddr, - addrlen: c_int, -) callconv(WINAPI) c_int; -pub extern "ws2_32" fn connect( + +pub extern "ws2_32" fn WSACloseEvent(hEvent: HANDLE) callconv(WINAPI) BOOL; + +pub extern "ws2_32" fn WSAConnect( s: SOCKET, name: *const sockaddr, - namelen: c_int, -) callconv(WINAPI) c_int; -pub extern "ws2_32" fn listen( + namelen: i32, + lpCallerData: ?*WSABUF, + lpCalleeData: ?*WSABUF, + lpSQOS: ?*QOS, + lpGQOS: ?*QOS, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAConnectByNameW( s: SOCKET, - backlog: c_int, -) callconv(WINAPI) c_int; + nodename: [*:0]const u16, + servicename: [*:0]const u16, + LocalAddressLength: ?*u32, + LocalAddress: ?*sockaddr, + RemoteAddressLength: ?*u32, + RemoteAddress: ?*sockaddr, + timeout: ?*const timeval, + Reserved: *OVERLAPPED, +) callconv(WINAPI) BOOL; + +pub extern "ws2_32" fn WSAConnectByNameA( + s: SOCKET, + nodename: [*:0]const u8, + servicename: [*:0]const u8, + LocalAddressLength: ?*u32, + LocalAddress: ?*sockaddr, + RemoteAddressLength: ?*u32, + RemoteAddress: ?*sockaddr, + timeout: ?*const timeval, + Reserved: *OVERLAPPED, +) callconv(WINAPI) BOOL; + +pub extern "ws2_32" fn WSAConnectByList( + s: SOCKET, + SocketAddress: *SOCKET_ADDRESS_LIST, + LocalAddressLength: ?*u32, + LocalAddress: ?*sockaddr, + RemoteAddressLength: ?*u32, + RemoteAddress: ?*sockaddr, + timeout: ?*const timeval, + Reserved: *OVERLAPPED, +) callconv(WINAPI) BOOL; + +pub extern "ws2_32" fn WSACreateEvent() callconv(WINAPI) HANDLE; + +pub extern "ws2_32" fn WSADuplicateSocketA( + s: SOCKET, + dwProcessId: u32, + lpProtocolInfo: *WSAPROTOCOL_INFOA, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSADuplicateSocketW( + s: SOCKET, + dwProcessId: u32, + lpProtocolInfo: *WSAPROTOCOL_INFOW, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAEnumNetworkEvents( + s: SOCKET, + hEventObject: HANDLE, + lpNetworkEvents: *WSANETWORKEVENTS, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAEnumProtocolsA( + lpiProtocols: ?*i32, + lpProtocolBuffer: ?*WSAPROTOCOL_INFOA, + lpdwBufferLength: *u32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAEnumProtocolsW( + lpiProtocols: ?*i32, + lpProtocolBuffer: ?*WSAPROTOCOL_INFOW, + lpdwBufferLength: *u32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAEventSelect( + s: SOCKET, + hEventObject: HANDLE, + lNetworkEvents: i32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAGetOverlappedResult( + s: SOCKET, + lpOverlapped: *OVERLAPPED, + lpcbTransfer: *u32, + fWait: BOOL, + lpdwFlags: *u32, +) callconv(WINAPI) BOOL; + +pub extern "ws2_32" fn WSAGetQOSByName( + s: SOCKET, + lpQOSName: *WSABUF, + lpQOS: *QOS, +) callconv(WINAPI) BOOL; + +pub extern "ws2_32" fn WSAHtonl( + s: SOCKET, + hostlong: u32, + lpnetlong: *u32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAHtons( + s: SOCKET, + hostshort: u16, + lpnetshort: *u16, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAIoctl( + s: SOCKET, + dwIoControlCode: u32, + lpvInBuffer: ?*const c_void, + cbInBuffer: u32, + lpvOutbuffer: ?*c_void, + cbOutbuffer: u32, + lpcbBytesReturned: *u32, + lpOverlapped: ?*OVERLAPPED, + lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAJoinLeaf( + s: SOCKET, + name: *const sockaddr, + namelen: i32, + lpCallerdata: ?*WSABUF, + lpCalleeData: ?*WSABUF, + lpSQOS: ?*QOS, + lpGQOS: ?*QOS, + dwFlags: u32, +) callconv(WINAPI) SOCKET; + +pub extern "ws2_32" fn WSANtohl( + s: SOCKET, + netlong: u32, + lphostlong: *u32, +) callconv(WINAPI) u32; + +pub extern "ws2_32" fn WSANtohs( + s: SOCKET, + netshort: u16, + lphostshort: *u16, +) callconv(WINAPI) i32; + pub extern "ws2_32" fn WSARecv( s: SOCKET, - lpBuffers: [*]const WSABUF, - dwBufferCount: DWORD, - lpNumberOfBytesRecvd: ?*DWORD, - lpFlags: *DWORD, - lpOverlapped: ?*WSAOVERLAPPED, - lpCompletionRoutine: ?WSAOVERLAPPED_COMPLETION_ROUTINE, -) callconv(WINAPI) c_int; + lpBuffers: [*]WSABUF, + dwBufferCouynt: u32, + lpNumberOfBytesRecv: ?*u32, + lpFlags: *u32, + lpOverlapped: ?*OVERLAPPED, + lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSARecvDisconnect( + s: SOCKET, + lpInboundDisconnectData: ?*WSABUF, +) callconv(WINAPI) i32; + pub extern "ws2_32" fn WSARecvFrom( s: SOCKET, - lpBuffers: [*]const WSABUF, - dwBufferCount: DWORD, - lpNumberOfBytesRecvd: ?*DWORD, - lpFlags: *DWORD, + lpBuffers: [*]WSABUF, + dwBuffercount: u32, + lpNumberOfBytesRecvd: ?*u32, + lpFlags: *u32, lpFrom: ?*sockaddr, - lpFromlen: ?*socklen_t, - lpOverlapped: ?*WSAOVERLAPPED, - lpCompletionRoutine: ?WSAOVERLAPPED_COMPLETION_ROUTINE, -) callconv(WINAPI) c_int; + lpFromlen: ?*i32, + lpOverlapped: ?*OVERLAPPED, + lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAResetEvent(hEvent: HANDLE) callconv(WINAPI) i32; + pub extern "ws2_32" fn WSASend( s: SOCKET, lpBuffers: [*]WSABUF, - dwBufferCount: DWORD, - lpNumberOfBytesSent: ?*DWORD, - dwFlags: DWORD, - lpOverlapped: ?*WSAOVERLAPPED, - lpCompletionRoutine: ?WSAOVERLAPPED_COMPLETION_ROUTINE, -) callconv(WINAPI) c_int; + dwBufferCount: u32, + lpNumberOfBytesSent: ?*u32, + dwFlags: u32, + lpOverlapped: ?*OVERLAPPED, + lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSASendMsg( + s: SOCKET, + lpMsg: *const WSAMSG_const, + dwFlags: u32, + lpNumberOfBytesSent: ?*u32, + lpOverlapped: ?*OVERLAPPED, + lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSARecvMsg( + s: SOCKET, + lpMsg: *WSAMSG, + lpdwNumberOfBytesRecv: ?*u32, + lpOverlapped: ?*OVERLAPPED, + lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSASendDisconnect( + s: SOCKET, + lpOutboundDisconnectData: ?*WSABUF, +) callconv(WINAPI) i32; + pub extern "ws2_32" fn WSASendTo( s: SOCKET, lpBuffers: [*]WSABUF, - dwBufferCount: DWORD, - lpNumberOfBytesSent: ?*DWORD, - dwFlags: DWORD, + dwBufferCount: u32, + lpNumberOfBytesSent: ?*u32, + dwFlags: u32, lpTo: ?*const sockaddr, - iTolen: c_int, - lpOverlapped: ?*WSAOVERLAPPED, - lpCompletionRoutine: ?WSAOVERLAPPED_COMPLETION_ROUTINE, -) callconv(WINAPI) c_int; -pub extern "ws2_32" fn WSAPoll( - fdArray: [*]pollfd, - fds: c_ulong, - timeout: c_int, -) callconv(WINAPI) c_int; -pub extern "ws2_32" fn getaddrinfo( - pNodeName: [*:0]const u8, - pServiceName: [*:0]const u8, - pHints: *const addrinfo, - ppResult: **addrinfo, + iToLen: i32, + lpOverlapped: ?*OVERLAPPED, + lpCompletionRounte: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, ) callconv(WINAPI) i32; -pub extern "ws2_32" fn freeaddrinfo( - pAddrInfo: *addrinfo, + +pub extern "ws2_32" fn WSASetEvent( + hEvent: HANDLE, +) callconv(WINAPI) BOOL; + +pub extern "ws2_32" fn WSASocketA( + af: i32, + @"type": i32, + protocol: i32, + lpProtocolInfo: ?*WSAPROTOCOL_INFOA, + g: u32, + dwFlags: u32, +) callconv(WINAPI) SOCKET; + +pub extern "ws2_32" fn WSASocketW( + af: i32, + @"type": i32, + protocol: i32, + lpProtocolInfo: ?*WSAPROTOCOL_INFOW, + g: u32, + dwFlags: u32, +) callconv(WINAPI) SOCKET; + +pub extern "ws2_32" fn WSAWaitForMultipleEvents( + cEvents: u32, + lphEvents: [*]const HANDLE, + fWaitAll: BOOL, + dwTimeout: u32, + fAlertable: BOOL, +) callconv(WINAPI) u32; + +pub extern "ws2_32" fn WSAAddressToStringA( + lpsaAddress: *sockaddr, + dwAddressLength: u32, + lpProtocolInfo: ?*WSAPROTOCOL_INFOA, + lpszAddressString: [*]u8, + lpdwAddressStringLength: *u32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAAddressToStringW( + lpsaAddress: *sockaddr, + dwAddressLength: u32, + lpProtocolInfo: ?*WSAPROTOCOL_INFOW, + lpszAddressString: [*]u16, + lpdwAddressStringLength: *u32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAStringToAddressA( + AddressString: [*:0]const u8, + AddressFamily: i32, + lpProtocolInfo: ?*WSAPROTOCOL_INFOA, + lpAddress: *sockaddr, + lpAddressLength: *i32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAStringToAddressW( + AddressString: [*:0]const u16, + AddressFamily: i32, + lpProtocolInfo: ?*WSAPROTOCOL_INFOW, + lpAddrses: *sockaddr, + lpAddressLength: *i32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAProviderConfigChange( + lpNotificationHandle: *HANDLE, + lpOverlapped: ?*OVERLAPPED, + lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn WSAPoll( + fdArray: [*]WSAPOLLFD, + fds: u32, + timeout: i32, +) callconv(WINAPI) i32; + +pub extern "mswsock" fn WSARecvEx( + s: SOCKET, + buf: [*]u8, + len: i32, + flags: *i32, +) callconv(WINAPI) i32; + +pub extern "mswsock" fn TransmitFile( + hSocket: SOCKET, + hFile: HANDLE, + nNumberOfBytesToWrite: u32, + nNumberOfBytesPerSend: u32, + lpOverlapped: ?*OVERLAPPED, + lpTransmitBuffers: ?*TRANSMIT_FILE_BUFFERS, + dwReserved: u32, +) callconv(WINAPI) BOOL; + +pub extern "mswsock" fn AcceptEx( + sListenSocket: SOCKET, + sAcceptSocket: SOCKET, + lpOutputBuffer: *c_void, + dwReceiveDataLength: u32, + dwLocalAddressLength: u32, + dwRemoteAddressLength: u32, + lpdwBytesReceived: *u32, + lpOverlapped: *OVERLAPPED, +) callconv(WINAPI) BOOL; + +pub extern "mswsock" fn GetAcceptExSockaddrs( + lpOutputBuffer: *c_void, + dwReceiveDataLength: u32, + dwLocalAddressLength: u32, + dwRemoteAddressLength: u32, + LocalSockaddr: **sockaddr, + LocalSockaddrLength: *i32, + RemoteSockaddr: **sockaddr, + RemoteSockaddrLength: *i32, ) callconv(WINAPI) void; -pub extern "ws2_32" fn ioctlsocket( - s: SOCKET, - cmd: c_long, - argp: *c_ulong, -) callconv(WINAPI) c_int; -pub extern "ws2_32" fn getsockname( - s: SOCKET, - name: *sockaddr, - namelen: *c_int, -) callconv(WINAPI) c_int; -pub extern "ws2_32" fn setsockopt( - s: SOCKET, - level: u32, - optname: u32, - optval: ?*const c_void, - optlen: socklen_t, -) callconv(WINAPI) c_int; -pub extern "ws2_32" fn shutdown( - s: SOCKET, - how: c_int, -) callconv(WINAPI) c_int; + +pub extern "ws2_32" fn WSAProviderCompleteAsyncCall( + hAsyncCall: HANDLE, + iRetCode: i32, +) callconv(WINAPI) i32; + +pub extern "mswsock" fn EnumProtocolsA( + lpiProtocols: ?*i32, + lpProtocolBuffer: *c_void, + lpdwBufferLength: *u32, +) callconv(WINAPI) i32; + +pub extern "mswsock" fn EnumProtocolsW( + lpiProtocols: ?*i32, + lpProtocolBuffer: *c_void, + lpdwBufferLength: *u32, +) callconv(WINAPI) i32; + +pub extern "mswsock" fn GetAddressByNameA( + dwNameSpace: u32, + lpServiceType: *GUID, + lpServiceName: ?[*:0]u8, + lpiProtocols: ?*i32, + dwResolution: u32, + lpServiceAsyncInfo: ?*SERVICE_ASYNC_INFO, + lpCsaddrBuffer: *c_void, + lpAliasBuffer: ?[*:0]const u8, + lpdwAliasBufferLength: *u32, +) callconv(WINAPI) i32; + +pub extern "mswsock" fn GetAddressByNameW( + dwNameSpace: u32, + lpServiceType: *GUID, + lpServiceName: ?[*:0]u16, + lpiProtocols: ?*i32, + dwResolution: u32, + lpServiceAsyncInfo: ?*SERVICE_ASYNC_INFO, + lpCsaddrBuffer: *c_void, + ldwBufferLEngth: *u32, + lpAliasBuffer: ?[*:0]u16, + lpdwAliasBufferLength: *u32, +) callconv(WINAPI) i32; + +pub extern "mswsock" fn GetTypeByNameA( + lpServiceName: [*:0]u8, + lpServiceType: *GUID, +) callconv(WINAPI) i32; + +pub extern "mswsock" fn GetTypeByNameW( + lpServiceName: [*:0]u16, + lpServiceType: *GUID, +) callconv(WINAPI) i32; + +pub extern "mswsock" fn GetNameByTypeA( + lpServiceType: *GUID, + lpServiceName: [*:0]u8, + dwNameLength: u32, +) callconv(WINAPI) i32; + +pub extern "mswsock" fn GetNameByTypeW( + lpServiceType: *GUID, + lpServiceName: [*:0]u16, + dwNameLength: u32, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn getaddrinfo( + pNodeName: ?[*:0]const u8, + pServiceName: ?[*:0]const u8, + pHints: ?*const addrinfoa, + ppResult: **addrinfoa, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn GetAddrInfoExA( + pName: ?[*:0]const u8, + pServiceName: ?[*:0]const u8, + dwNameSapce: u32, + lpNspId: ?*GUID, + hints: ?*const addrinfoexA, + ppResult: **addrinfoexA, + timeout: ?*timeval, + lpOverlapped: ?*OVERLAPPED, + lpCompletionRoutine: ?LPLOOKUPSERVICE_COMPLETION_ROUTINE, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn GetAddrInfoExCancel( + lpHandle: *HANDLE, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn GetAddrInfoExOverlappedResult( + lpOverlapped: *OVERLAPPED, +) callconv(WINAPI) i32; + +pub extern "ws2_32" fn freeaddrinfo( + pAddrInfo: ?*addrinfoa, +) callconv(WINAPI) void; + +pub extern "ws2_32" fn FreeAddrInfoEx( + pAddrInfoEx: ?*addrinfoexA, +) callconv(WINAPI) void; + +pub extern "ws2_32" fn getnameinfo( + pSockaddr: *const sockaddr, + SockaddrLength: i32, + pNodeBuffer: ?[*]u8, + NodeBufferSize: u32, + pServiceBuffer: ?[*]u8, + ServiceBufferName: u32, + Flags: i32, +) callconv(WINAPI) i32; + +pub extern "IPHLPAPI" fn if_nametoindex( + InterfaceName: [*:0]const u8, +) callconv(WINAPI) u32; diff --git a/lib/std/priority_dequeue.zig b/lib/std/priority_dequeue.zig index f0f8d7f6f7..0bb1f13503 100644 --- a/lib/std/priority_dequeue.zig +++ b/lib/std/priority_dequeue.zig @@ -387,17 +387,6 @@ pub fn PriorityDequeue(comptime T: type) type { return; }, }; - self.len = new_len; - } - - /// Reduce length to `new_len`. - pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { - assert(new_len <= self.items.len); - - // Cannot shrink to smaller than the current queue size without invalidating the heap property - assert(new_len >= self.len); - - self.len = new_len; } pub fn update(self: *Self, elem: T, new_elem: T) !void { @@ -836,7 +825,7 @@ test "std.PriorityDequeue: iterator while empty" { try expectEqual(it.next(), null); } -test "std.PriorityDequeue: shrinkRetainingCapacity and shrinkAndFree" { +test "std.PriorityDequeue: shrinkAndFree" { var queue = PDQ.init(testing.allocator, lessThanComparison); defer queue.deinit(); @@ -849,10 +838,6 @@ test "std.PriorityDequeue: shrinkRetainingCapacity and shrinkAndFree" { try expect(queue.capacity() >= 4); try expectEqual(@as(usize, 3), queue.len); - queue.shrinkRetainingCapacity(3); - try expect(queue.capacity() >= 4); - try expectEqual(@as(usize, 3), queue.len); - queue.shrinkAndFree(3); try expectEqual(@as(usize, 3), queue.capacity()); try expectEqual(@as(usize, 3), queue.len); diff --git a/lib/std/priority_queue.zig b/lib/std/priority_queue.zig index 621af4e97f..4e5320a92b 100644 --- a/lib/std/priority_queue.zig +++ b/lib/std/priority_queue.zig @@ -203,17 +203,6 @@ pub fn PriorityQueue(comptime T: type) type { return; }, }; - self.len = new_len; - } - - /// Reduce length to `new_len`. - pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { - assert(new_len <= self.items.len); - - // Cannot shrink to smaller than the current queue size without invalidating the heap property - assert(new_len >= self.len); - - self.len = new_len; } pub fn update(self: *Self, elem: T, new_elem: T) !void { @@ -495,7 +484,7 @@ test "std.PriorityQueue: iterator while empty" { try expectEqual(it.next(), null); } -test "std.PriorityQueue: shrinkRetainingCapacity and shrinkAndFree" { +test "std.PriorityQueue: shrinkAndFree" { var queue = PQ.init(testing.allocator, lessThan); defer queue.deinit(); @@ -508,10 +497,6 @@ test "std.PriorityQueue: shrinkRetainingCapacity and shrinkAndFree" { try expect(queue.capacity() >= 4); try expectEqual(@as(usize, 3), queue.len); - queue.shrinkRetainingCapacity(3); - try expect(queue.capacity() >= 4); - try expectEqual(@as(usize, 3), queue.len); - queue.shrinkAndFree(3); try expectEqual(@as(usize, 3), queue.capacity()); try expectEqual(@as(usize, 3), queue.len); diff --git a/lib/std/special/c.zig b/lib/std/special/c.zig index 7e5a35565b..1377212870 100644 --- a/lib/std/special/c.zig +++ b/lib/std/special/c.zig @@ -228,7 +228,7 @@ export fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8 return dest; } -export fn memcmp(vl: ?[*]const u8, vr: ?[*]const u8, n: usize) callconv(.C) isize { +export fn memcmp(vl: ?[*]const u8, vr: ?[*]const u8, n: usize) callconv(.C) c_int { @setRuntimeSafety(false); var index: usize = 0; @@ -253,7 +253,7 @@ test "memcmp" { try std.testing.expect(memcmp(base_arr[0..], arr3[0..], base_arr.len) < 0); } -export fn bcmp(vl: [*]allowzero const u8, vr: [*]allowzero const u8, n: usize) callconv(.C) isize { +export fn bcmp(vl: [*]allowzero const u8, vr: [*]allowzero const u8, n: usize) callconv(.C) c_int { @setRuntimeSafety(false); var index: usize = 0; diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig index 3c66c64b4b..7d03e4b059 100644 --- a/lib/std/special/test_runner.zig +++ b/lib/std/special/test_runner.zig @@ -14,12 +14,15 @@ var log_err_count: usize = 0; var args_buffer: [std.fs.MAX_PATH_BYTES + std.mem.page_size]u8 = undefined; var args_allocator = std.heap.FixedBufferAllocator.init(&args_buffer); -pub fn main() anyerror!void { +fn processArgs() void { const args = std.process.argsAlloc(&args_allocator.allocator) catch { @panic("Too many bytes passed over the CLI to the test runner"); }; std.testing.zig_exe_path = args[1]; +} +pub fn main() anyerror!void { + processArgs(); const test_fn_list = builtin.test_functions; var ok_count: usize = 0; var skip_count: usize = 0; @@ -84,6 +87,9 @@ pub fn main() anyerror!void { test_node.end(); progress.log("{s}... FAIL ({s})\n", .{ test_fn.name, @errorName(err) }); if (progress.terminal == null) std.debug.print("FAIL ({s})\n", .{@errorName(err)}); + if (@errorReturnTrace()) |trace| { + std.debug.dumpStackTrace(trace.*); + } }, } } diff --git a/lib/std/target.zig b/lib/std/target.zig index acca7fd13c..31cec9cf82 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -267,7 +267,7 @@ pub const Target = struct { .macos => return .{ .semver = .{ .min = .{ .major = 10, .minor = 13 }, - .max = .{ .major = 11, .minor = 1 }, + .max = .{ .major = 11, .minor = 2 }, }, }, .ios => return .{ @@ -291,19 +291,19 @@ pub const Target = struct { .netbsd => return .{ .semver = .{ .min = .{ .major = 8, .minor = 0 }, - .max = .{ .major = 9, .minor = 0 }, + .max = .{ .major = 9, .minor = 1 }, }, }, .openbsd => return .{ .semver = .{ .min = .{ .major = 6, .minor = 8 }, - .max = .{ .major = 6, .minor = 8 }, + .max = .{ .major = 6, .minor = 9 }, }, }, .dragonfly => return .{ .semver = .{ .min = .{ .major = 5, .minor = 8 }, - .max = .{ .major = 5, .minor = 8 }, + .max = .{ .major = 6, .minor = 0 }, }, }, diff --git a/lib/std/x.zig b/lib/std/x.zig index a123591470..022261bf3b 100644 --- a/lib/std/x.zig +++ b/lib/std/x.zig @@ -7,7 +7,7 @@ const std = @import("std.zig"); pub const os = struct { - pub const Socket = @import("x/os/Socket.zig"); + pub const Socket = @import("x/os/socket.zig").Socket; pub usingnamespace @import("x/os/net.zig"); }; diff --git a/lib/std/x/net/tcp.zig b/lib/std/x/net/tcp.zig index 700038fd6f..f0e341234a 100644 --- a/lib/std/x/net/tcp.zig +++ b/lib/std/x/net/tcp.zig @@ -6,6 +6,7 @@ const std = @import("../../std.zig"); +const io = std.io; const os = std.os; const ip = std.x.net.ip; @@ -58,6 +59,28 @@ pub const Domain = enum(u16) { pub const Client = struct { socket: Socket, + /// Implements `std.io.Reader`. + pub const Reader = struct { + client: Client, + flags: u32, + + /// Implements `readFn` for `std.io.Reader`. + pub fn read(self: Client.Reader, buffer: []u8) !usize { + return self.client.read(buffer, self.flags); + } + }; + + /// Implements `std.io.Writer`. + pub const Writer = struct { + client: Client, + flags: u32, + + /// Implements `writeFn` for `std.io.Writer`. + pub fn write(self: Client.Writer, buffer: []const u8) !usize { + return self.client.write(buffer, self.flags); + } + }; + /// Opens a new client. pub fn init(domain: tcp.Domain, flags: u32) !Client { return Client{ @@ -89,41 +112,46 @@ pub const Client = struct { return self.socket.connect(address.into()); } - /// Read data from the socket into the buffer provided. It returns the - /// number of bytes read into the buffer provided. - pub fn read(self: Client, buf: []u8) !usize { - return self.socket.read(buf); + /// Extracts the error set of a function. + /// TODO: remove after Socket.{read, write} error unions are well-defined across different platforms + fn ErrorSetOf(comptime Function: anytype) type { + return @typeInfo(@typeInfo(@TypeOf(Function)).Fn.return_type.?).ErrorUnion.error_set; + } + + /// Wrap `tcp.Client` into `std.io.Reader`. + pub fn reader(self: Client, flags: u32) io.Reader(Client.Reader, ErrorSetOf(Client.Reader.read), Client.Reader.read) { + return .{ .context = .{ .client = self, .flags = flags } }; + } + + /// Wrap `tcp.Client` into `std.io.Writer`. + pub fn writer(self: Client, flags: u32) io.Writer(Client.Writer, ErrorSetOf(Client.Writer.write), Client.Writer.write) { + return .{ .context = .{ .client = self, .flags = flags } }; } /// Read data from the socket into the buffer provided with a set of flags /// specified. It returns the number of bytes read into the buffer provided. - pub fn recv(self: Client, buf: []u8, flags: u32) !usize { - return self.socket.recv(buf, flags); - } - - /// Write a buffer of data provided to the socket. It returns the number - /// of bytes that are written to the socket. - pub fn write(self: Client, buf: []const u8) !usize { - return self.socket.write(buf); - } - - /// Writes multiple I/O vectors to the socket. It returns the number - /// of bytes that are written to the socket. - pub fn writev(self: Client, buffers: []const os.iovec_const) !usize { - return self.socket.writev(buffers); + pub fn read(self: Client, buf: []u8, flags: u32) !usize { + return self.socket.read(buf, flags); } /// Write a buffer of data provided to the socket with a set of flags specified. /// It returns the number of bytes that are written to the socket. - pub fn send(self: Client, buf: []const u8, flags: u32) !usize { - return self.socket.send(buf, flags); + pub fn write(self: Client, buf: []const u8, flags: u32) !usize { + return self.socket.write(buf, flags); } /// Writes multiple I/O vectors with a prepended message header to the socket /// with a set of flags specified. It returns the number of bytes that are /// written to the socket. - pub fn sendmsg(self: Client, msg: os.msghdr_const, flags: u32) !usize { - return self.socket.sendmsg(msg, flags); + pub fn writeVectorized(self: Client, msg: os.msghdr_const, flags: u32) !usize { + return self.socket.writeVectorized(msg, flags); + } + + /// Read multiple I/O vectors with a prepended message header from the socket + /// with a set of flags specified. It returns the number of bytes that were + /// read into the buffer provided. + pub fn readVectorized(self: Client, msg: *os.msghdr, flags: u32) !usize { + return self.socket.readVectorized(msg, flags); } /// Query and return the latest cached error on the client's underlying socket. @@ -146,12 +174,41 @@ pub const Client = struct { return ip.Address.from(try self.socket.getLocalAddress()); } + /// Query the address that the socket is connected to. + pub fn getRemoteAddress(self: Client) !ip.Address { + return ip.Address.from(try self.socket.getRemoteAddress()); + } + + /// Have close() or shutdown() syscalls block until all queued messages in the client have been successfully + /// sent, or if the timeout specified in seconds has been reached. It returns `error.UnsupportedSocketOption` + /// if the host does not support the option for a socket to linger around up until a timeout specified in + /// seconds. + pub fn setLinger(self: Client, timeout_seconds: ?u16) !void { + return self.socket.setLinger(timeout_seconds); + } + + /// Have keep-alive messages be sent periodically. The timing in which keep-alive messages are sent are + /// dependant on operating system settings. It returns `error.UnsupportedSocketOption` if the host does + /// not support periodically sending keep-alive messages on connection-oriented sockets. + pub fn setKeepAlive(self: Client, enabled: bool) !void { + return self.socket.setKeepAlive(enabled); + } + /// Disable Nagle's algorithm on a TCP socket. It returns `error.UnsupportedSocketOption` if /// the host does not support sockets disabling Nagle's algorithm. pub fn setNoDelay(self: Client, enabled: bool) !void { if (comptime @hasDecl(os, "TCP_NODELAY")) { const bytes = mem.asBytes(&@as(usize, @boolToInt(enabled))); - return os.setsockopt(self.socket.fd, os.IPPROTO_TCP, os.TCP_NODELAY, bytes); + return self.socket.setOption(os.IPPROTO_TCP, os.TCP_NODELAY, bytes); + } + return error.UnsupportedSocketOption; + } + + /// Enables TCP Quick ACK on a TCP socket to immediately send rather than delay ACKs when necessary. It returns + /// `error.UnsupportedSocketOption` if the host does not support TCP Quick ACK. + pub fn setQuickACK(self: Client, enabled: bool) !void { + if (comptime @hasDecl(os, "TCP_QUICKACK")) { + return self.socket.setOption(os.IPPROTO_TCP, os.TCP_QUICKACK, mem.asBytes(&@as(u32, @boolToInt(enabled)))); } return error.UnsupportedSocketOption; } @@ -169,7 +226,7 @@ pub const Client = struct { /// Set a timeout on the socket that is to occur if no messages are successfully written /// to its bound destination after a specified number of milliseconds. A subsequent write /// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded. - pub fn setWriteTimeout(self: Client, milliseconds: usize) !void { + pub fn setWriteTimeout(self: Client, milliseconds: u32) !void { return self.socket.setWriteTimeout(milliseconds); } @@ -177,7 +234,7 @@ pub const Client = struct { /// from its bound destination after a specified number of milliseconds. A subsequent /// read from the socket will thereafter return `error.WouldBlock` should the timeout be /// exceeded. - pub fn setReadTimeout(self: Client, milliseconds: usize) !void { + pub fn setReadTimeout(self: Client, milliseconds: u32) !void { return self.socket.setReadTimeout(milliseconds); } }; @@ -251,16 +308,7 @@ pub const Listener = struct { /// support TCP Fast Open. pub fn setFastOpen(self: Listener, enabled: bool) !void { if (comptime @hasDecl(os, "TCP_FASTOPEN")) { - return os.setsockopt(self.socket.fd, os.IPPROTO_TCP, os.TCP_FASTOPEN, mem.asBytes(&@as(usize, @boolToInt(enabled)))); - } - return error.UnsupportedSocketOption; - } - - /// Enables TCP Quick ACK on a TCP socket to immediately send rather than delay ACKs when necessary. It returns - /// `error.UnsupportedSocketOption` if the host does not support TCP Quick ACK. - pub fn setQuickACK(self: Listener, enabled: bool) !void { - if (comptime @hasDecl(os, "TCP_QUICKACK")) { - return os.setsockopt(self.socket.fd, os.IPPROTO_TCP, os.TCP_QUICKACK, mem.asBytes(&@as(usize, @boolToInt(enabled)))); + return self.socket.setOption(os.IPPROTO_TCP, os.TCP_FASTOPEN, mem.asBytes(&@as(u32, @boolToInt(enabled)))); } return error.UnsupportedSocketOption; } @@ -322,7 +370,7 @@ test "tcp/client: set read timeout of 1 millisecond on blocking client" { defer conn.deinit(); var buf: [1]u8 = undefined; - try testing.expectError(error.WouldBlock, client.read(&buf)); + try testing.expectError(error.WouldBlock, client.reader(0).read(&buf)); } test "tcp/listener: bind to unspecified ipv4 address" { diff --git a/lib/std/x/os/Socket.zig b/lib/std/x/os/Socket.zig deleted file mode 100644 index 3656899aea..0000000000 --- a/lib/std/x/os/Socket.zig +++ /dev/null @@ -1,295 +0,0 @@ -// SPDX-License-Identifier: MIT -// Copyright (c) 2015-2021 Zig Contributors -// This file is part of [zig](https://ziglang.org/), which is MIT licensed. -// The MIT license requires this copyright notice to be included in all copies -// and substantial portions of the software. - -const std = @import("../../std.zig"); -const net = @import("net.zig"); - -const os = std.os; -const fmt = std.fmt; -const mem = std.mem; -const time = std.time; - -/// A generic socket abstraction. -const Socket = @This(); - -/// A socket-address pair. -pub const Connection = struct { - socket: Socket, - address: Socket.Address, - - /// Enclose a socket and address into a socket-address pair. - pub fn from(socket: Socket, address: Socket.Address) Socket.Connection { - return .{ .socket = socket, .address = address }; - } -}; - -/// A generic socket address abstraction. It is safe to directly access and modify -/// the fields of a `Socket.Address`. -pub const Address = union(enum) { - ipv4: net.IPv4.Address, - ipv6: net.IPv6.Address, - - /// Instantiate a new address with a IPv4 host and port. - pub fn initIPv4(host: net.IPv4, port: u16) Socket.Address { - return .{ .ipv4 = .{ .host = host, .port = port } }; - } - - /// Instantiate a new address with a IPv6 host and port. - pub fn initIPv6(host: net.IPv6, port: u16) Socket.Address { - return .{ .ipv6 = .{ .host = host, .port = port } }; - } - - /// Parses a `sockaddr` into a generic socket address. - pub fn fromNative(address: *align(4) const os.sockaddr) Socket.Address { - switch (address.family) { - os.AF_INET => { - const info = @ptrCast(*const os.sockaddr_in, address); - const host = net.IPv4{ .octets = @bitCast([4]u8, info.addr) }; - const port = mem.bigToNative(u16, info.port); - return Socket.Address.initIPv4(host, port); - }, - os.AF_INET6 => { - const info = @ptrCast(*const os.sockaddr_in6, address); - const host = net.IPv6{ .octets = info.addr, .scope_id = info.scope_id }; - const port = mem.bigToNative(u16, info.port); - return Socket.Address.initIPv6(host, port); - }, - else => unreachable, - } - } - - /// Encodes a generic socket address into an extern union that may be reliably - /// casted into a `sockaddr` which may be passed into socket syscalls. - pub fn toNative(self: Socket.Address) extern union { - ipv4: os.sockaddr_in, - ipv6: os.sockaddr_in6, - } { - return switch (self) { - .ipv4 => |address| .{ - .ipv4 = .{ - .addr = @bitCast(u32, address.host.octets), - .port = mem.nativeToBig(u16, address.port), - }, - }, - .ipv6 => |address| .{ - .ipv6 = .{ - .addr = address.host.octets, - .port = mem.nativeToBig(u16, address.port), - .scope_id = address.host.scope_id, - .flowinfo = 0, - }, - }, - }; - } - - /// Returns the number of bytes that make up the `sockaddr` equivalent to the address. - pub fn getNativeSize(self: Socket.Address) u32 { - return switch (self) { - .ipv4 => @sizeOf(os.sockaddr_in), - .ipv6 => @sizeOf(os.sockaddr_in6), - }; - } - - /// Implements the `std.fmt.format` API. - pub fn format( - self: Socket.Address, - comptime layout: []const u8, - opts: fmt.FormatOptions, - writer: anytype, - ) !void { - switch (self) { - .ipv4 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }), - .ipv6 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }), - } - } -}; - -/// The underlying handle of a socket. -fd: os.socket_t, - -/// Open a new socket. -pub fn init(domain: u32, socket_type: u32, protocol: u32) !Socket { - return Socket{ .fd = try os.socket(domain, socket_type, protocol) }; -} - -/// Enclose a socket abstraction over an existing socket file descriptor. -pub fn from(fd: os.socket_t) Socket { - return Socket{ .fd = fd }; -} - -/// Closes the socket. -pub fn deinit(self: Socket) void { - os.closeSocket(self.fd); -} - -/// Shutdown either the read side, write side, or all side of the socket. -pub fn shutdown(self: Socket, how: os.ShutdownHow) !void { - return os.shutdown(self.fd, how); -} - -/// Binds the socket to an address. -pub fn bind(self: Socket, address: Socket.Address) !void { - return os.bind(self.fd, @ptrCast(*const os.sockaddr, &address.toNative()), address.getNativeSize()); -} - -/// Start listening for incoming connections on the socket. -pub fn listen(self: Socket, max_backlog_size: u31) !void { - return os.listen(self.fd, max_backlog_size); -} - -/// Have the socket attempt to the connect to an address. -pub fn connect(self: Socket, address: Socket.Address) !void { - return os.connect(self.fd, @ptrCast(*const os.sockaddr, &address.toNative()), address.getNativeSize()); -} - -/// Accept a pending incoming connection queued to the kernel backlog -/// of the socket. -pub fn accept(self: Socket, flags: u32) !Socket.Connection { - var address: os.sockaddr = undefined; - var address_len: u32 = @sizeOf(os.sockaddr); - - const socket = Socket{ .fd = try os.accept(self.fd, &address, &address_len, flags) }; - const socket_address = Socket.Address.fromNative(@alignCast(4, &address)); - - return Socket.Connection.from(socket, socket_address); -} - -/// Read data from the socket into the buffer provided. It returns the -/// number of bytes read into the buffer provided. -pub fn read(self: Socket, buf: []u8) !usize { - return os.read(self.fd, buf); -} - -/// Read data from the socket into the buffer provided with a set of flags -/// specified. It returns the number of bytes read into the buffer provided. -pub fn recv(self: Socket, buf: []u8, flags: u32) !usize { - return os.recv(self.fd, buf, flags); -} - -/// Write a buffer of data provided to the socket. It returns the number -/// of bytes that are written to the socket. -pub fn write(self: Socket, buf: []const u8) !usize { - return os.write(self.fd, buf); -} - -/// Writes multiple I/O vectors to the socket. It returns the number -/// of bytes that are written to the socket. -pub fn writev(self: Socket, buffers: []const os.iovec_const) !usize { - return os.writev(self.fd, buffers); -} - -/// Write a buffer of data provided to the socket with a set of flags specified. -/// It returns the number of bytes that are written to the socket. -pub fn send(self: Socket, buf: []const u8, flags: u32) !usize { - return os.send(self.fd, buf, flags); -} - -/// Writes multiple I/O vectors with a prepended message header to the socket -/// with a set of flags specified. It returns the number of bytes that are -/// written to the socket. -pub fn sendmsg(self: Socket, msg: os.msghdr_const, flags: u32) !usize { - return os.sendmsg(self.fd, msg, flags); -} - -/// Query the address that the socket is locally bounded to. -pub fn getLocalAddress(self: Socket) !Socket.Address { - var address: os.sockaddr = undefined; - var address_len: u32 = @sizeOf(os.sockaddr); - try os.getsockname(self.fd, &address, &address_len); - return Socket.Address.fromNative(@alignCast(4, &address)); -} - -/// Query and return the latest cached error on the socket. -pub fn getError(self: Socket) !void { - return os.getsockoptError(self.fd); -} - -/// Query the read buffer size of the socket. -pub fn getReadBufferSize(self: Socket) !u32 { - var value: u32 = undefined; - var value_len: u32 = @sizeOf(u32); - - const rc = os.system.getsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVBUF, mem.asBytes(&value), &value_len); - return switch (os.errno(rc)) { - 0 => value, - os.EBADF => error.BadFileDescriptor, - os.EFAULT => error.InvalidAddressSpace, - os.EINVAL => error.InvalidSocketOption, - os.ENOPROTOOPT => error.UnknownSocketOption, - os.ENOTSOCK => error.NotASocket, - else => |err| os.unexpectedErrno(err), - }; -} - -/// Query the write buffer size of the socket. -pub fn getWriteBufferSize(self: Socket) !u32 { - var value: u32 = undefined; - var value_len: u32 = @sizeOf(u32); - - const rc = os.system.getsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDBUF, mem.asBytes(&value), &value_len); - return switch (os.errno(rc)) { - 0 => value, - os.EBADF => error.BadFileDescriptor, - os.EFAULT => error.InvalidAddressSpace, - os.EINVAL => error.InvalidSocketOption, - os.ENOPROTOOPT => error.UnknownSocketOption, - os.ENOTSOCK => error.NotASocket, - else => |err| os.unexpectedErrno(err), - }; -} - -/// Allow multiple sockets on the same host to listen on the same address. It returns `error.UnsupportedSocketOption` if -/// the host does not support sockets listening the same address. -pub fn setReuseAddress(self: Socket, enabled: bool) !void { - if (comptime @hasDecl(os, "SO_REUSEADDR")) { - return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_REUSEADDR, mem.asBytes(&@as(usize, @boolToInt(enabled)))); - } - return error.UnsupportedSocketOption; -} - -/// Allow multiple sockets on the same host to listen on the same port. It returns `error.UnsupportedSocketOption` if -/// the host does not supports sockets listening on the same port. -pub fn setReusePort(self: Socket, enabled: bool) !void { - if (comptime @hasDecl(os, "SO_REUSEPORT")) { - return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_REUSEPORT, mem.asBytes(&@as(usize, @boolToInt(enabled)))); - } - return error.UnsupportedSocketOption; -} - -/// Set the write buffer size of the socket. -pub fn setWriteBufferSize(self: Socket, size: u32) !void { - return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDBUF, mem.asBytes(&size)); -} - -/// Set the read buffer size of the socket. -pub fn setReadBufferSize(self: Socket, size: u32) !void { - return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVBUF, mem.asBytes(&size)); -} - -/// Set a timeout on the socket that is to occur if no messages are successfully written -/// to its bound destination after a specified number of milliseconds. A subsequent write -/// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded. -pub fn setWriteTimeout(self: Socket, milliseconds: usize) !void { - const timeout = os.timeval{ - .tv_sec = @intCast(i32, milliseconds / time.ms_per_s), - .tv_usec = @intCast(i32, (milliseconds % time.ms_per_s) * time.us_per_ms), - }; - - return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDTIMEO, mem.asBytes(&timeout)); -} - -/// Set a timeout on the socket that is to occur if no messages are successfully read -/// from its bound destination after a specified number of milliseconds. A subsequent -/// read from the socket will thereafter return `error.WouldBlock` should the timeout be -/// exceeded. -pub fn setReadTimeout(self: Socket, milliseconds: usize) !void { - const timeout = os.timeval{ - .tv_sec = @intCast(i32, milliseconds / time.ms_per_s), - .tv_usec = @intCast(i32, (milliseconds % time.ms_per_s) * time.us_per_ms), - }; - - return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVTIMEO, mem.asBytes(&timeout)); -} diff --git a/lib/std/x/os/net.zig b/lib/std/x/os/net.zig index 7cea83828a..f2f49ea62f 100644 --- a/lib/std/x/os/net.zig +++ b/lib/std/x/os/net.zig @@ -20,6 +20,14 @@ pub fn resolveScopeID(name: []const u8) !u32 { if (comptime @hasDecl(os, "IFNAMESIZE")) { if (name.len >= os.IFNAMESIZE - 1) return error.NameTooLong; + if (comptime builtin.os.tag == .windows) { + var interface_name: [os.IFNAMESIZE]u8 = undefined; + mem.copy(u8, &interface_name, name); + interface_name[name.len] = 0; + + return os.windows.ws2_32.if_nametoindex(@ptrCast([*:0]const u8, &interface_name)); + } + const fd = try os.socket(os.AF_UNIX, os.SOCK_DGRAM, 0); defer os.closeSocket(fd); @@ -31,6 +39,7 @@ pub fn resolveScopeID(name: []const u8) !u32 { return @bitCast(u32, f.ifru.ivalue); } + return error.Unsupported; } diff --git a/lib/std/x/os/socket.zig b/lib/std/x/os/socket.zig new file mode 100644 index 0000000000..963a1adca6 --- /dev/null +++ b/lib/std/x/os/socket.zig @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2015-2021 Zig Contributors +// This file is part of [zig](https://ziglang.org/), which is MIT licensed. +// The MIT license requires this copyright notice to be included in all copies +// and substantial portions of the software. + +const std = @import("../../std.zig"); +const net = @import("net.zig"); + +const os = std.os; +const fmt = std.fmt; +const mem = std.mem; +const time = std.time; +const builtin = std.builtin; + +/// A generic, cross-platform socket abstraction. +pub const Socket = struct { + /// A socket-address pair. + pub const Connection = struct { + socket: Socket, + address: Socket.Address, + + /// Enclose a socket and address into a socket-address pair. + pub fn from(socket: Socket, address: Socket.Address) Socket.Connection { + return .{ .socket = socket, .address = address }; + } + }; + + /// A generic socket address abstraction. It is safe to directly access and modify + /// the fields of a `Socket.Address`. + pub const Address = union(enum) { + ipv4: net.IPv4.Address, + ipv6: net.IPv6.Address, + + /// Instantiate a new address with a IPv4 host and port. + pub fn initIPv4(host: net.IPv4, port: u16) Socket.Address { + return .{ .ipv4 = .{ .host = host, .port = port } }; + } + + /// Instantiate a new address with a IPv6 host and port. + pub fn initIPv6(host: net.IPv6, port: u16) Socket.Address { + return .{ .ipv6 = .{ .host = host, .port = port } }; + } + + /// Parses a `sockaddr` into a generic socket address. + pub fn fromNative(address: *align(4) const os.sockaddr) Socket.Address { + switch (address.family) { + os.AF_INET => { + const info = @ptrCast(*const os.sockaddr_in, address); + const host = net.IPv4{ .octets = @bitCast([4]u8, info.addr) }; + const port = mem.bigToNative(u16, info.port); + return Socket.Address.initIPv4(host, port); + }, + os.AF_INET6 => { + const info = @ptrCast(*const os.sockaddr_in6, address); + const host = net.IPv6{ .octets = info.addr, .scope_id = info.scope_id }; + const port = mem.bigToNative(u16, info.port); + return Socket.Address.initIPv6(host, port); + }, + else => unreachable, + } + } + + /// Encodes a generic socket address into an extern union that may be reliably + /// casted into a `sockaddr` which may be passed into socket syscalls. + pub fn toNative(self: Socket.Address) extern union { + ipv4: os.sockaddr_in, + ipv6: os.sockaddr_in6, + } { + return switch (self) { + .ipv4 => |address| .{ + .ipv4 = .{ + .addr = @bitCast(u32, address.host.octets), + .port = mem.nativeToBig(u16, address.port), + }, + }, + .ipv6 => |address| .{ + .ipv6 = .{ + .addr = address.host.octets, + .port = mem.nativeToBig(u16, address.port), + .scope_id = address.host.scope_id, + .flowinfo = 0, + }, + }, + }; + } + + /// Returns the number of bytes that make up the `sockaddr` equivalent to the address. + pub fn getNativeSize(self: Socket.Address) u32 { + return switch (self) { + .ipv4 => @sizeOf(os.sockaddr_in), + .ipv6 => @sizeOf(os.sockaddr_in6), + }; + } + + /// Implements the `std.fmt.format` API. + pub fn format( + self: Socket.Address, + comptime layout: []const u8, + opts: fmt.FormatOptions, + writer: anytype, + ) !void { + switch (self) { + .ipv4 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }), + .ipv6 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }), + } + } + }; + + /// The underlying handle of a socket. + fd: os.socket_t, + + /// Enclose a socket abstraction over an existing socket file descriptor. + pub fn from(fd: os.socket_t) Socket { + return Socket{ .fd = fd }; + } + + /// Mix in socket syscalls depending on the platform we are compiling against. + pub usingnamespace switch (builtin.os.tag) { + .windows => @import("socket_windows.zig"), + else => @import("socket_posix.zig"), + }.Mixin(Socket); +}; diff --git a/lib/std/x/os/socket_posix.zig b/lib/std/x/os/socket_posix.zig new file mode 100644 index 0000000000..1e54c5c7a2 --- /dev/null +++ b/lib/std/x/os/socket_posix.zig @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2015-2021 Zig Contributors +// This file is part of [zig](https://ziglang.org/), which is MIT licensed. +// The MIT license requires this copyright notice to be included in all copies +// and substantial portions of the software. + +const std = @import("../../std.zig"); + +const os = std.os; +const mem = std.mem; +const time = std.time; + +pub fn Mixin(comptime Socket: type) type { + return struct { + /// Open a new socket. + pub fn init(domain: u32, socket_type: u32, protocol: u32) !Socket { + return Socket{ .fd = try os.socket(domain, socket_type, protocol) }; + } + + /// Closes the socket. + pub fn deinit(self: Socket) void { + os.closeSocket(self.fd); + } + + /// Shutdown either the read side, write side, or all side of the socket. + pub fn shutdown(self: Socket, how: os.ShutdownHow) !void { + return os.shutdown(self.fd, how); + } + + /// Binds the socket to an address. + pub fn bind(self: Socket, address: Socket.Address) !void { + return os.bind(self.fd, @ptrCast(*const os.sockaddr, &address.toNative()), address.getNativeSize()); + } + + /// Start listening for incoming connections on the socket. + pub fn listen(self: Socket, max_backlog_size: u31) !void { + return os.listen(self.fd, max_backlog_size); + } + + /// Have the socket attempt to the connect to an address. + pub fn connect(self: Socket, address: Socket.Address) !void { + return os.connect(self.fd, @ptrCast(*const os.sockaddr, &address.toNative()), address.getNativeSize()); + } + + /// Accept a pending incoming connection queued to the kernel backlog + /// of the socket. + pub fn accept(self: Socket, flags: u32) !Socket.Connection { + var address: os.sockaddr_storage = undefined; + var address_len: u32 = @sizeOf(os.sockaddr_storage); + + const socket = Socket{ .fd = try os.accept(self.fd, @ptrCast(*os.sockaddr, &address), &address_len, flags) }; + const socket_address = Socket.Address.fromNative(@ptrCast(*os.sockaddr, &address)); + + return Socket.Connection.from(socket, socket_address); + } + + /// Read data from the socket into the buffer provided with a set of flags + /// specified. It returns the number of bytes read into the buffer provided. + pub fn read(self: Socket, buf: []u8, flags: u32) !usize { + return os.recv(self.fd, buf, flags); + } + + /// Write a buffer of data provided to the socket with a set of flags specified. + /// It returns the number of bytes that are written to the socket. + pub fn write(self: Socket, buf: []const u8, flags: u32) !usize { + return os.send(self.fd, buf, flags); + } + + /// Writes multiple I/O vectors with a prepended message header to the socket + /// with a set of flags specified. It returns the number of bytes that are + /// written to the socket. + pub fn writeVectorized(self: Socket, msg: os.msghdr_const, flags: u32) !usize { + return os.sendmsg(self.fd, msg, flags); + } + + /// Read multiple I/O vectors with a prepended message header from the socket + /// with a set of flags specified. It returns the number of bytes that were + /// read into the buffer provided. + pub fn readVectorized(self: Socket, msg: *os.msghdr, flags: u32) !usize { + if (comptime @hasDecl(os.system, "recvmsg")) { + while (true) { + const rc = os.system.recvmsg(self.fd, msg, flags); + return switch (os.errno(rc)) { + 0 => @intCast(usize, rc), + os.EBADF => unreachable, // always a race condition + os.EFAULT => unreachable, + os.EINVAL => unreachable, + os.ENOTCONN => unreachable, + os.ENOTSOCK => unreachable, + os.EINTR => continue, + os.EAGAIN => error.WouldBlock, + os.ENOMEM => error.SystemResources, + os.ECONNREFUSED => error.ConnectionRefused, + os.ECONNRESET => error.ConnectionResetByPeer, + else => |err| os.unexpectedErrno(err), + }; + } + } + return error.NotSupported; + } + + /// Query the address that the socket is locally bounded to. + pub fn getLocalAddress(self: Socket) !Socket.Address { + var address: os.sockaddr_storage = undefined; + var address_len: u32 = @sizeOf(os.sockaddr_storage); + try os.getsockname(self.fd, @ptrCast(*os.sockaddr, &address), &address_len); + return Socket.Address.fromNative(@ptrCast(*os.sockaddr, &address)); + } + + /// Query the address that the socket is connected to. + pub fn getRemoteAddress(self: Socket) !Socket.Address { + var address: os.sockaddr_storage = undefined; + var address_len: u32 = @sizeOf(os.sockaddr_storage); + try os.getpeername(self.fd, @ptrCast(*os.sockaddr, &address), &address_len); + return Socket.Address.fromNative(@ptrCast(*os.sockaddr, &address)); + } + + /// Query and return the latest cached error on the socket. + pub fn getError(self: Socket) !void { + return os.getsockoptError(self.fd); + } + + /// Query the read buffer size of the socket. + pub fn getReadBufferSize(self: Socket) !u32 { + var value: u32 = undefined; + var value_len: u32 = @sizeOf(u32); + + const rc = os.system.getsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVBUF, mem.asBytes(&value), &value_len); + return switch (os.errno(rc)) { + 0 => value, + os.EBADF => error.BadFileDescriptor, + os.EFAULT => error.InvalidAddressSpace, + os.EINVAL => error.InvalidSocketOption, + os.ENOPROTOOPT => error.UnknownSocketOption, + os.ENOTSOCK => error.NotASocket, + else => |err| os.unexpectedErrno(err), + }; + } + + /// Query the write buffer size of the socket. + pub fn getWriteBufferSize(self: Socket) !u32 { + var value: u32 = undefined; + var value_len: u32 = @sizeOf(u32); + + const rc = os.system.getsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDBUF, mem.asBytes(&value), &value_len); + return switch (os.errno(rc)) { + 0 => value, + os.EBADF => error.BadFileDescriptor, + os.EFAULT => error.InvalidAddressSpace, + os.EINVAL => error.InvalidSocketOption, + os.ENOPROTOOPT => error.UnknownSocketOption, + os.ENOTSOCK => error.NotASocket, + else => |err| os.unexpectedErrno(err), + }; + } + + /// Set a socket option. + pub fn setOption(self: Socket, level: u32, code: u32, value: []const u8) !void { + return os.setsockopt(self.fd, level, code, value); + } + + /// Have close() or shutdown() syscalls block until all queued messages in the socket have been successfully + /// sent, or if the timeout specified in seconds has been reached. It returns `error.UnsupportedSocketOption` + /// if the host does not support the option for a socket to linger around up until a timeout specified in + /// seconds. + pub fn setLinger(self: Socket, timeout_seconds: ?u16) !void { + if (comptime @hasDecl(os, "SO_LINGER")) { + const settings = extern struct { + l_onoff: c_int, + l_linger: c_int, + }{ + .l_onoff = @intCast(c_int, @boolToInt(timeout_seconds != null)), + .l_linger = if (timeout_seconds) |seconds| @intCast(c_int, seconds) else 0, + }; + + return self.setOption(os.SOL_SOCKET, os.SO_LINGER, mem.asBytes(&settings)); + } + + return error.UnsupportedSocketOption; + } + + /// On connection-oriented sockets, have keep-alive messages be sent periodically. The timing in which keep-alive + /// messages are sent are dependant on operating system settings. It returns `error.UnsupportedSocketOption` if + /// the host does not support periodically sending keep-alive messages on connection-oriented sockets. + pub fn setKeepAlive(self: Socket, enabled: bool) !void { + if (comptime @hasDecl(os, "SO_KEEPALIVE")) { + return self.setOption(os.SOL_SOCKET, os.SO_KEEPALIVE, mem.asBytes(&@as(u32, @boolToInt(enabled)))); + } + return error.UnsupportedSocketOption; + } + + /// Allow multiple sockets on the same host to listen on the same address. It returns `error.UnsupportedSocketOption` if + /// the host does not support sockets listening the same address. + pub fn setReuseAddress(self: Socket, enabled: bool) !void { + if (comptime @hasDecl(os, "SO_REUSEADDR")) { + return self.setOption(os.SOL_SOCKET, os.SO_REUSEADDR, mem.asBytes(&@as(u32, @boolToInt(enabled)))); + } + return error.UnsupportedSocketOption; + } + + /// Allow multiple sockets on the same host to listen on the same port. It returns `error.UnsupportedSocketOption` if + /// the host does not supports sockets listening on the same port. + pub fn setReusePort(self: Socket, enabled: bool) !void { + if (comptime @hasDecl(os, "SO_REUSEPORT")) { + return self.setOption(os.SOL_SOCKET, os.SO_REUSEPORT, mem.asBytes(&@as(u32, @boolToInt(enabled)))); + } + return error.UnsupportedSocketOption; + } + + /// Set the write buffer size of the socket. + pub fn setWriteBufferSize(self: Socket, size: u32) !void { + return self.setOption(os.SOL_SOCKET, os.SO_SNDBUF, mem.asBytes(&size)); + } + + /// Set the read buffer size of the socket. + pub fn setReadBufferSize(self: Socket, size: u32) !void { + return self.setOption(os.SOL_SOCKET, os.SO_RCVBUF, mem.asBytes(&size)); + } + + /// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is + /// set on a non-blocking socket. + /// + /// Set a timeout on the socket that is to occur if no messages are successfully written + /// to its bound destination after a specified number of milliseconds. A subsequent write + /// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded. + pub fn setWriteTimeout(self: Socket, milliseconds: usize) !void { + const timeout = os.timeval{ + .tv_sec = @intCast(i32, milliseconds / time.ms_per_s), + .tv_usec = @intCast(i32, (milliseconds % time.ms_per_s) * time.us_per_ms), + }; + + return self.setOption(os.SOL_SOCKET, os.SO_SNDTIMEO, mem.asBytes(&timeout)); + } + + /// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is + /// set on a non-blocking socket. + /// + /// Set a timeout on the socket that is to occur if no messages are successfully read + /// from its bound destination after a specified number of milliseconds. A subsequent + /// read from the socket will thereafter return `error.WouldBlock` should the timeout be + /// exceeded. + pub fn setReadTimeout(self: Socket, milliseconds: usize) !void { + const timeout = os.timeval{ + .tv_sec = @intCast(i32, milliseconds / time.ms_per_s), + .tv_usec = @intCast(i32, (milliseconds % time.ms_per_s) * time.us_per_ms), + }; + + return self.setOption(os.SOL_SOCKET, os.SO_RCVTIMEO, mem.asBytes(&timeout)); + } + }; +} diff --git a/lib/std/x/os/socket_windows.zig b/lib/std/x/os/socket_windows.zig new file mode 100644 index 0000000000..6dd1f9a6a9 --- /dev/null +++ b/lib/std/x/os/socket_windows.zig @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2015-2021 Zig Contributors +// This file is part of [zig](https://ziglang.org/), which is MIT licensed. +// The MIT license requires this copyright notice to be included in all copies +// and substantial portions of the software. + +const std = @import("../../std.zig"); +const net = @import("net.zig"); + +const os = std.os; +const mem = std.mem; + +const windows = std.os.windows; +const ws2_32 = windows.ws2_32; + +pub fn Mixin(comptime Socket: type) type { + return struct { + /// Open a new socket. + pub fn init(domain: u32, socket_type: u32, protocol: u32) !Socket { + var filtered_socket_type = socket_type & ~@as(u32, os.SOCK_CLOEXEC); + + var filtered_flags: u32 = ws2_32.WSA_FLAG_OVERLAPPED; + if (socket_type & os.SOCK_CLOEXEC != 0) { + filtered_flags |= ws2_32.WSA_FLAG_NO_HANDLE_INHERIT; + } + + const fd = ws2_32.WSASocketW( + @intCast(i32, domain), + @intCast(i32, filtered_socket_type), + @intCast(i32, protocol), + null, + 0, + filtered_flags, + ); + if (fd == ws2_32.INVALID_SOCKET) { + return switch (ws2_32.WSAGetLastError()) { + .WSANOTINITIALISED => { + _ = try windows.WSAStartup(2, 2); + return Socket.init(domain, socket_type, protocol); + }, + .WSAEAFNOSUPPORT => error.AddressFamilyNotSupported, + .WSAEMFILE => error.ProcessFdQuotaExceeded, + .WSAENOBUFS => error.SystemResources, + .WSAEPROTONOSUPPORT => error.ProtocolNotSupported, + else => |err| windows.unexpectedWSAError(err), + }; + } + + return Socket{ .fd = fd }; + } + + /// Closes the socket. + pub fn deinit(self: Socket) void { + _ = ws2_32.closesocket(self.fd); + } + + /// Shutdown either the read side, write side, or all side of the socket. + pub fn shutdown(self: Socket, how: os.ShutdownHow) !void { + const rc = ws2_32.shutdown(self.fd, switch (how) { + .recv => ws2_32.SD_RECEIVE, + .send => ws2_32.SD_SEND, + .both => ws2_32.SD_BOTH, + }); + if (rc == ws2_32.SOCKET_ERROR) { + return switch (ws2_32.WSAGetLastError()) { + .WSAECONNABORTED => return error.ConnectionAborted, + .WSAECONNRESET => return error.ConnectionResetByPeer, + .WSAEINPROGRESS => return error.BlockingOperationInProgress, + .WSAEINVAL => unreachable, + .WSAENETDOWN => return error.NetworkSubsystemFailed, + .WSAENOTCONN => return error.SocketNotConnected, + .WSAENOTSOCK => unreachable, + .WSANOTINITIALISED => unreachable, + else => |err| return windows.unexpectedWSAError(err), + }; + } + } + + /// Binds the socket to an address. + pub fn bind(self: Socket, address: Socket.Address) !void { + const rc = ws2_32.bind(self.fd, @ptrCast(*const ws2_32.sockaddr, &address.toNative()), @intCast(c_int, address.getNativeSize())); + if (rc == ws2_32.SOCKET_ERROR) { + return switch (ws2_32.WSAGetLastError()) { + .WSAENETDOWN => error.NetworkSubsystemFailed, + .WSAEACCES => error.AccessDenied, + .WSAEADDRINUSE => error.AddressInUse, + .WSAEADDRNOTAVAIL => error.AddressNotAvailable, + .WSAEFAULT => error.BadAddress, + .WSAEINPROGRESS => error.WouldBlock, + .WSAEINVAL => error.AlreadyBound, + .WSAENOBUFS => error.NoEphemeralPortsAvailable, + .WSAENOTSOCK => error.NotASocket, + else => |err| windows.unexpectedWSAError(err), + }; + } + } + + /// Start listening for incoming connections on the socket. + pub fn listen(self: Socket, max_backlog_size: u31) !void { + const rc = ws2_32.listen(self.fd, max_backlog_size); + if (rc == ws2_32.SOCKET_ERROR) { + return switch (ws2_32.WSAGetLastError()) { + .WSAENETDOWN => error.NetworkSubsystemFailed, + .WSAEADDRINUSE => error.AddressInUse, + .WSAEISCONN => error.AlreadyConnected, + .WSAEINVAL => error.SocketNotBound, + .WSAEMFILE, .WSAENOBUFS => error.SystemResources, + .WSAENOTSOCK => error.FileDescriptorNotASocket, + .WSAEOPNOTSUPP => error.OperationNotSupported, + .WSAEINPROGRESS => error.WouldBlock, + else => |err| windows.unexpectedWSAError(err), + }; + } + } + + /// Have the socket attempt to the connect to an address. + pub fn connect(self: Socket, address: Socket.Address) !void { + const rc = ws2_32.connect(self.fd, @ptrCast(*const ws2_32.sockaddr, &address.toNative()), @intCast(c_int, address.getNativeSize())); + if (rc == ws2_32.SOCKET_ERROR) { + return switch (ws2_32.WSAGetLastError()) { + .WSAEADDRINUSE => error.AddressInUse, + .WSAEADDRNOTAVAIL => error.AddressNotAvailable, + .WSAECONNREFUSED => error.ConnectionRefused, + .WSAETIMEDOUT => error.ConnectionTimedOut, + .WSAEFAULT => error.BadAddress, + .WSAEINVAL => error.ListeningSocket, + .WSAEISCONN => error.AlreadyConnected, + .WSAENOTSOCK => error.NotASocket, + .WSAEACCES => error.BroadcastNotEnabled, + .WSAENOBUFS => error.SystemResources, + .WSAEAFNOSUPPORT => error.AddressFamilyNotSupported, + .WSAEINPROGRESS, .WSAEWOULDBLOCK => error.WouldBlock, + .WSAEHOSTUNREACH, .WSAENETUNREACH => error.NetworkUnreachable, + else => |err| windows.unexpectedWSAError(err), + }; + } + } + + /// Accept a pending incoming connection queued to the kernel backlog + /// of the socket. + pub fn accept(self: Socket, flags: u32) !Socket.Connection { + var address: ws2_32.sockaddr_storage = undefined; + var address_len: c_int = @sizeOf(ws2_32.sockaddr_storage); + + const rc = ws2_32.accept(self.fd, @ptrCast(*ws2_32.sockaddr, &address), &address_len); + if (rc == ws2_32.INVALID_SOCKET) { + return switch (ws2_32.WSAGetLastError()) { + .WSANOTINITIALISED => unreachable, + .WSAECONNRESET => error.ConnectionResetByPeer, + .WSAEFAULT => unreachable, + .WSAEINVAL => error.SocketNotListening, + .WSAEMFILE => error.ProcessFdQuotaExceeded, + .WSAENETDOWN => error.NetworkSubsystemFailed, + .WSAENOBUFS => error.FileDescriptorNotASocket, + .WSAEOPNOTSUPP => error.OperationNotSupported, + .WSAEWOULDBLOCK => error.WouldBlock, + else => |err| windows.unexpectedWSAError(err), + }; + } + + const socket = Socket.from(rc); + const socket_address = Socket.Address.fromNative(@ptrCast(*ws2_32.sockaddr, &address)); + + return Socket.Connection.from(socket, socket_address); + } + + /// Read data from the socket into the buffer provided with a set of flags + /// specified. It returns the number of bytes read into the buffer provided. + pub fn read(self: Socket, buf: []u8, flags: u32) !usize { + var bufs = &[_]ws2_32.WSABUF{.{ .len = @intCast(u32, buf.len), .buf = buf.ptr }}; + var num_bytes: u32 = undefined; + var flags_ = flags; + + const rc = ws2_32.WSARecv(self.fd, bufs, 1, &num_bytes, &flags_, null, null); + if (rc == ws2_32.SOCKET_ERROR) { + return switch (ws2_32.WSAGetLastError()) { + .WSAECONNABORTED => error.ConnectionAborted, + .WSAECONNRESET => error.ConnectionResetByPeer, + .WSAEDISCON => error.ConnectionClosedByPeer, + .WSAEFAULT => error.BadBuffer, + .WSAEINPROGRESS, + .WSAEWOULDBLOCK, + .WSA_IO_PENDING, + .WSAETIMEDOUT, + => error.WouldBlock, + .WSAEINTR => error.Cancelled, + .WSAEINVAL => error.SocketNotBound, + .WSAEMSGSIZE => error.MessageTooLarge, + .WSAENETDOWN => error.NetworkSubsystemFailed, + .WSAENETRESET => error.NetworkReset, + .WSAENOTCONN => error.SocketNotConnected, + .WSAENOTSOCK => error.FileDescriptorNotASocket, + .WSAEOPNOTSUPP => error.OperationNotSupported, + .WSAESHUTDOWN => error.AlreadyShutdown, + .WSA_OPERATION_ABORTED => error.OperationAborted, + else => |err| windows.unexpectedWSAError(err), + }; + } + + return @intCast(usize, num_bytes); + } + + /// Write a buffer of data provided to the socket with a set of flags specified. + /// It returns the number of bytes that are written to the socket. + pub fn write(self: Socket, buf: []const u8, flags: u32) !usize { + var bufs = &[_]ws2_32.WSABUF{.{ .len = @intCast(u32, buf.len), .buf = @intToPtr([*]u8, @ptrToInt(buf.ptr)) }}; + var num_bytes: u32 = undefined; + + const rc = ws2_32.WSASend(self.fd, bufs, 1, &num_bytes, flags, null, null); + if (rc == ws2_32.SOCKET_ERROR) { + return switch (ws2_32.WSAGetLastError()) { + .WSAECONNABORTED => error.ConnectionAborted, + .WSAECONNRESET => error.ConnectionResetByPeer, + .WSAEFAULT => error.BadBuffer, + .WSAEINPROGRESS, + .WSAEWOULDBLOCK, + .WSA_IO_PENDING, + .WSAETIMEDOUT, + => error.WouldBlock, + .WSAEINTR => error.Cancelled, + .WSAEINVAL => error.SocketNotBound, + .WSAEMSGSIZE => error.MessageTooLarge, + .WSAENETDOWN => error.NetworkSubsystemFailed, + .WSAENETRESET => error.NetworkReset, + .WSAENOBUFS => error.BufferDeadlock, + .WSAENOTCONN => error.SocketNotConnected, + .WSAENOTSOCK => error.FileDescriptorNotASocket, + .WSAEOPNOTSUPP => error.OperationNotSupported, + .WSAESHUTDOWN => error.AlreadyShutdown, + .WSA_OPERATION_ABORTED => error.OperationAborted, + else => |err| windows.unexpectedWSAError(err), + }; + } + + return @intCast(usize, num_bytes); + } + + /// Writes multiple I/O vectors with a prepended message header to the socket + /// with a set of flags specified. It returns the number of bytes that are + /// written to the socket. + pub fn writeVectorized(self: Socket, msg: ws2_32.msghdr_const, flags: u32) !usize { + const call = try windows.loadWinsockExtensionFunction(ws2_32.LPFN_WSASENDMSG, self.fd, ws2_32.WSAID_WSASENDMSG); + + var num_bytes: u32 = undefined; + + const rc = call(self.fd, &msg, flags, &num_bytes, null, null); + if (rc == ws2_32.SOCKET_ERROR) { + return switch (ws2_32.WSAGetLastError()) { + .WSAECONNABORTED => error.ConnectionAborted, + .WSAECONNRESET => error.ConnectionResetByPeer, + .WSAEFAULT => error.BadBuffer, + .WSAEINPROGRESS, + .WSAEWOULDBLOCK, + .WSA_IO_PENDING, + .WSAETIMEDOUT, + => error.WouldBlock, + .WSAEINTR => error.Cancelled, + .WSAEINVAL => error.SocketNotBound, + .WSAEMSGSIZE => error.MessageTooLarge, + .WSAENETDOWN => error.NetworkSubsystemFailed, + .WSAENETRESET => error.NetworkReset, + .WSAENOBUFS => error.BufferDeadlock, + .WSAENOTCONN => error.SocketNotConnected, + .WSAENOTSOCK => error.FileDescriptorNotASocket, + .WSAEOPNOTSUPP => error.OperationNotSupported, + .WSAESHUTDOWN => error.AlreadyShutdown, + .WSA_OPERATION_ABORTED => error.OperationAborted, + else => |err| windows.unexpectedWSAError(err), + }; + } + + return @intCast(usize, num_bytes); + } + + /// Read multiple I/O vectors with a prepended message header from the socket + /// with a set of flags specified. It returns the number of bytes that were + /// read into the buffer provided. + pub fn readVectorized(self: Socket, msg: *ws2_32.msghdr, flags: u32) !usize { + const call = try windows.loadWinsockExtensionFunction(ws2_32.LPFN_WSARECVMSG, self.fd, ws2_32.WSAID_WSARECVMSG); + + var num_bytes: u32 = undefined; + + const rc = call(self.fd, msg, &num_bytes, null, null); + if (rc == ws2_32.SOCKET_ERROR) { + return switch (ws2_32.WSAGetLastError()) { + .WSAECONNABORTED => error.ConnectionAborted, + .WSAECONNRESET => error.ConnectionResetByPeer, + .WSAEDISCON => error.ConnectionClosedByPeer, + .WSAEFAULT => error.BadBuffer, + .WSAEINPROGRESS, + .WSAEWOULDBLOCK, + .WSA_IO_PENDING, + .WSAETIMEDOUT, + => error.WouldBlock, + .WSAEINTR => error.Cancelled, + .WSAEINVAL => error.SocketNotBound, + .WSAEMSGSIZE => error.MessageTooLarge, + .WSAENETDOWN => error.NetworkSubsystemFailed, + .WSAENETRESET => error.NetworkReset, + .WSAENOTCONN => error.SocketNotConnected, + .WSAENOTSOCK => error.FileDescriptorNotASocket, + .WSAEOPNOTSUPP => error.OperationNotSupported, + .WSAESHUTDOWN => error.AlreadyShutdown, + .WSA_OPERATION_ABORTED => error.OperationAborted, + else => |err| windows.unexpectedWSAError(err), + }; + } + + return @intCast(usize, num_bytes); + } + + /// Query the address that the socket is locally bounded to. + pub fn getLocalAddress(self: Socket) !Socket.Address { + var address: ws2_32.sockaddr_storage = undefined; + var address_len: c_int = @sizeOf(ws2_32.sockaddr_storage); + + const rc = ws2_32.getsockname(self.fd, @ptrCast(*ws2_32.sockaddr, &address), &address_len); + if (rc == ws2_32.SOCKET_ERROR) { + return switch (ws2_32.WSAGetLastError()) { + .WSANOTINITIALISED => unreachable, + .WSAEFAULT => unreachable, + .WSAENETDOWN => error.NetworkSubsystemFailed, + .WSAENOTSOCK => error.FileDescriptorNotASocket, + .WSAEINVAL => error.SocketNotBound, + else => |err| windows.unexpectedWSAError(err), + }; + } + + return Socket.Address.fromNative(@ptrCast(*ws2_32.sockaddr, &address)); + } + + /// Query the address that the socket is connected to. + pub fn getRemoteAddress(self: Socket) !Socket.Address { + var address: ws2_32.sockaddr_storage = undefined; + var address_len: c_int = @sizeOf(ws2_32.sockaddr_storage); + + const rc = ws2_32.getpeername(self.fd, @ptrCast(*ws2_32.sockaddr, &address), &address_len); + if (rc == ws2_32.SOCKET_ERROR) { + return switch (ws2_32.WSAGetLastError()) { + .WSANOTINITIALISED => unreachable, + .WSAEFAULT => unreachable, + .WSAENETDOWN => error.NetworkSubsystemFailed, + .WSAENOTSOCK => error.FileDescriptorNotASocket, + .WSAEINVAL => error.SocketNotBound, + else => |err| windows.unexpectedWSAError(err), + }; + } + + return Socket.Address.fromNative(@ptrCast(*ws2_32.sockaddr, &address)); + } + + /// Query and return the latest cached error on the socket. + pub fn getError(self: Socket) !void { + return {}; + } + + /// Query the read buffer size of the socket. + pub fn getReadBufferSize(self: Socket) !u32 { + return 0; + } + + /// Query the write buffer size of the socket. + pub fn getWriteBufferSize(self: Socket) !u32 { + return 0; + } + + /// Set a socket option. + pub fn setOption(self: Socket, level: u32, code: u32, value: []const u8) !void { + const rc = ws2_32.setsockopt(self.fd, @intCast(i32, level), @intCast(i32, code), value.ptr, @intCast(i32, value.len)); + if (rc == ws2_32.SOCKET_ERROR) { + return switch (ws2_32.WSAGetLastError()) { + .WSANOTINITIALISED => unreachable, + .WSAENETDOWN => return error.NetworkSubsystemFailed, + .WSAEFAULT => unreachable, + .WSAENOTSOCK => return error.FileDescriptorNotASocket, + .WSAEINVAL => return error.SocketNotBound, + else => |err| windows.unexpectedWSAError(err), + }; + } + } + + /// Have close() or shutdown() syscalls block until all queued messages in the socket have been successfully + /// sent, or if the timeout specified in seconds has been reached. It returns `error.UnsupportedSocketOption` + /// if the host does not support the option for a socket to linger around up until a timeout specified in + /// seconds. + pub fn setLinger(self: Socket, timeout_seconds: ?u16) !void { + const settings = ws2_32.linger{ + .l_onoff = @as(u16, @boolToInt(timeout_seconds != null)), + .l_linger = if (timeout_seconds) |seconds| seconds else 0, + }; + + return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_LINGER, mem.asBytes(&settings)); + } + + /// On connection-oriented sockets, have keep-alive messages be sent periodically. The timing in which keep-alive + /// messages are sent are dependant on operating system settings. It returns `error.UnsupportedSocketOption` if + /// the host does not support periodically sending keep-alive messages on connection-oriented sockets. + pub fn setKeepAlive(self: Socket, enabled: bool) !void { + return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_KEEPALIVE, mem.asBytes(&@as(u32, @boolToInt(enabled)))); + } + + /// Allow multiple sockets on the same host to listen on the same address. It returns `error.UnsupportedSocketOption` if + /// the host does not support sockets listening the same address. + pub fn setReuseAddress(self: Socket, enabled: bool) !void { + return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_REUSEADDR, mem.asBytes(&@as(u32, @boolToInt(enabled)))); + } + + /// Allow multiple sockets on the same host to listen on the same port. It returns `error.UnsupportedSocketOption` if + /// the host does not supports sockets listening on the same port. + /// + /// TODO: verify if this truly mimicks SO_REUSEPORT behavior, or if SO_REUSE_UNICASTPORT provides the correct behavior + pub fn setReusePort(self: Socket, enabled: bool) !void { + try self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_BROADCAST, mem.asBytes(&@as(u32, @boolToInt(enabled)))); + try self.setReuseAddress(enabled); + } + + /// Set the write buffer size of the socket. + pub fn setWriteBufferSize(self: Socket, size: u32) !void { + return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_SNDBUF, mem.asBytes(&size)); + } + + /// Set the read buffer size of the socket. + pub fn setReadBufferSize(self: Socket, size: u32) !void { + return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_RCVBUF, mem.asBytes(&size)); + } + + /// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is + /// set on a non-blocking socket. + /// + /// Set a timeout on the socket that is to occur if no messages are successfully written + /// to its bound destination after a specified number of milliseconds. A subsequent write + /// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded. + pub fn setWriteTimeout(self: Socket, milliseconds: u32) !void { + return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_SNDTIMEO, mem.asBytes(&milliseconds)); + } + + /// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is + /// set on a non-blocking socket. + /// + /// Set a timeout on the socket that is to occur if no messages are successfully read + /// from its bound destination after a specified number of milliseconds. A subsequent + /// read from the socket will thereafter return `error.WouldBlock` should the timeout be + /// exceeded. + pub fn setReadTimeout(self: Socket, milliseconds: u32) !void { + return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_RCVTIMEO, mem.asBytes(&milliseconds)); + } + }; +} diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig index d6880ad273..b0da57f0e2 100644 --- a/lib/std/zig/parse.zig +++ b/lib/std/zig/parse.zig @@ -639,7 +639,7 @@ const Parser = struct { }; } - /// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? LinkSection? CallConv? EXCLAMATIONMARK? (Keyword_anytype / TypeExpr) + /// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? LinkSection? CallConv? EXCLAMATIONMARK? TypeExpr fn parseFnProto(p: *Parser) !Node.Index { const fn_token = p.eatToken(.keyword_fn) orelse return null_node; diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 5792e7f115..4630fd0db4 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -209,11 +209,6 @@ pub const NativeTargetInfo = struct { dynamic_linker: DynamicLinker = DynamicLinker{}, - /// Only some architectures have CPU detection implemented. This field reveals whether - /// CPU detection actually occurred. When this is `true` it means that the reported - /// CPU is baseline only because of a missing implementation for that architecture. - cpu_detection_unimplemented: bool = false, - pub const DynamicLinker = Target.DynamicLinker; pub const DetectError = error{ @@ -258,28 +253,86 @@ pub const NativeTargetInfo = struct { os.version_range.windows.max = detected_version; }, .macos => try macos.detect(&os), - .freebsd => { - var osreldate: u32 = undefined; - var len: usize = undefined; + .freebsd, .netbsd, .dragonfly => { + const key = switch (Target.current.os.tag) { + .freebsd => "kern.osreldate", + .netbsd, .dragonfly => "kern.osrevision", + else => unreachable, + }; + var value: u32 = undefined; + var len: usize = @sizeOf(@TypeOf(value)); - std.os.sysctlbynameZ("kern.osreldate", &osreldate, &len, null, 0) catch |err| switch (err) { + std.os.sysctlbynameZ(key, &value, &len, null, 0) catch |err| switch (err) { error.NameTooLong => unreachable, // constant, known good value error.PermissionDenied => unreachable, // only when setting values, error.SystemResources => unreachable, // memory already on the stack error.UnknownName => unreachable, // constant, known good value - error.Unexpected => unreachable, // EFAULT: stack should be safe, EISDIR/ENOTDIR: constant, known good value + error.Unexpected => return error.OSVersionDetectionFail, }; - // https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions.html - // Major * 100,000 has been convention since FreeBSD 2.2 (1997) - // Minor * 1(0),000 summed has been convention since FreeBSD 2.2 (1997) - // e.g. 492101 = 4.11-STABLE = 4.(9+2) - const major = osreldate / 100_000; - const minor1 = osreldate % 100_000 / 10_000; // usually 0 since 5.1 - const minor2 = osreldate % 10_000 / 1_000; // 0 before 5.1, minor version since - const patch = osreldate % 1_000; - os.version_range.semver.min = .{ .major = major, .minor = minor1 + minor2, .patch = patch }; - os.version_range.semver.max = .{ .major = major, .minor = minor1 + minor2, .patch = patch }; + switch (Target.current.os.tag) { + .freebsd => { + // https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions.html + // Major * 100,000 has been convention since FreeBSD 2.2 (1997) + // Minor * 1(0),000 summed has been convention since FreeBSD 2.2 (1997) + // e.g. 492101 = 4.11-STABLE = 4.(9+2) + const major = value / 100_000; + const minor1 = value % 100_000 / 10_000; // usually 0 since 5.1 + const minor2 = value % 10_000 / 1_000; // 0 before 5.1, minor version since + const patch = value % 1_000; + os.version_range.semver.min = .{ .major = major, .minor = minor1 + minor2, .patch = patch }; + os.version_range.semver.max = os.version_range.semver.min; + }, + .netbsd => { + // #define __NetBSD_Version__ MMmmrrpp00 + // + // M = major version + // m = minor version; a minor number of 99 indicates current. + // r = 0 (*) + // p = patchlevel + const major = value / 100_000_000; + const minor = value % 100_000_000 / 1_000_000; + const patch = value % 10_000 / 100; + os.version_range.semver.min = .{ .major = major, .minor = minor, .patch = patch }; + os.version_range.semver.max = os.version_range.semver.min; + }, + .dragonfly => { + // https://github.com/DragonFlyBSD/DragonFlyBSD/blob/cb2cde83771754aeef9bb3251ee48959138dec87/Makefile.inc1#L15-L17 + // flat base10 format: Mmmmpp + // M = major + // m = minor; odd-numbers indicate current dev branch + // p = patch + const major = value / 100_000; + const minor = value % 100_000 / 100; + const patch = value % 100; + os.version_range.semver.min = .{ .major = major, .minor = minor, .patch = patch }; + os.version_range.semver.max = os.version_range.semver.min; + }, + else => unreachable, + } + }, + .openbsd => { + const mib: [2]c_int = [_]c_int{ + std.os.CTL_KERN, + std.os.KERN_OSRELEASE, + }; + var buf: [64]u8 = undefined; + var len: usize = buf.len; + + std.os.sysctl(&mib, &buf, &len, null, 0) catch |err| switch (err) { + error.NameTooLong => unreachable, // constant, known good value + error.PermissionDenied => unreachable, // only when setting values, + error.SystemResources => unreachable, // memory already on the stack + error.UnknownName => unreachable, // constant, known good value + error.Unexpected => return error.OSVersionDetectionFail, + }; + + if (std.builtin.Version.parse(buf[0 .. len - 1])) |ver| { + os.version_range.semver.min = ver; + os.version_range.semver.max = ver; + } else |err| { + return error.OSVersionDetectionFail; + } }, else => { // Unimplemented, fall back to default version range. @@ -310,8 +363,6 @@ pub const NativeTargetInfo = struct { os.version_range.linux.glibc = glibc; } - var cpu_detection_unimplemented = false; - // Until https://github.com/ziglang/zig/issues/4592 is implemented (support detecting the // native CPU architecture as being different than the current target), we use this: const cpu_arch = cross_target.getCpuArch(); @@ -325,7 +376,6 @@ pub const NativeTargetInfo = struct { Target.Cpu.baseline(cpu_arch), .explicit => |model| model.toCpu(cpu_arch), } orelse backup_cpu_detection: { - cpu_detection_unimplemented = true; break :backup_cpu_detection Target.Cpu.baseline(cpu_arch); }; var result = try detectAbiAndDynamicLinker(allocator, cpu, os, cross_target); @@ -362,7 +412,6 @@ pub const NativeTargetInfo = struct { else => {}, } cross_target.updateCpuFeatures(&result.target.cpu.features); - result.cpu_detection_unimplemented = cpu_detection_unimplemented; return result; } @@ -925,12 +974,15 @@ pub const NativeTargetInfo = struct { else => {}, } + switch (std.Target.current.os.tag) { + .linux => return linux.detectNativeCpuAndFeatures(), + .macos => return macos.detectNativeCpuAndFeatures(), + else => {}, + } + // This architecture does not have CPU model & feature detection yet. // See https://github.com/ziglang/zig/issues/4591 - if (std.Target.current.os.tag != .linux) - return null; - - return linux.detectNativeCpuAndFeatures(); + return null; } }; diff --git a/lib/std/zig/system/macos.zig b/lib/std/zig/system/macos.zig index be892b4834..0b3e639582 100644 --- a/lib/std/zig/system/macos.zig +++ b/lib/std/zig/system/macos.zig @@ -7,10 +7,13 @@ const std = @import("std"); const assert = std.debug.assert; const mem = std.mem; const testing = std.testing; +const os = std.os; + +const Target = std.Target; /// Detect macOS version. -/// `os` is not modified in case of error. -pub fn detect(os: *std.Target.Os) !void { +/// `target_os` is not modified in case of error. +pub fn detect(target_os: *Target.Os) !void { // Drop use of osproductversion sysctl because: // 1. only available 10.13.4 High Sierra and later // 2. when used from a binary built against < SDK 11.0 it returns 10.16 and masks Big Sur 11.x version @@ -60,8 +63,8 @@ pub fn detect(os: *std.Target.Os) !void { if (parseSystemVersion(bytes)) |ver| { // never return non-canonical `10.(16+)` if (!(ver.major == 10 and ver.minor >= 16)) { - os.version_range.semver.min = ver; - os.version_range.semver.max = ver; + target_os.version_range.semver.min = ver; + target_os.version_range.semver.max = ver; return; } continue; @@ -410,7 +413,7 @@ fn testVersionEquality(expected: std.builtin.Version, got: std.builtin.Version) /// `-syslibroot` param of the linker. /// The caller needs to free the resulting path slice. pub fn getSDKPath(allocator: *mem.Allocator) ![]u8 { - assert(std.Target.current.isDarwin()); + assert(Target.current.isDarwin()); const argv = &[_][]const u8{ "xcrun", "--show-sdk-path" }; const result = try std.ChildProcess.exec(.{ .allocator = allocator, .argv = argv }); defer { @@ -426,3 +429,41 @@ pub fn getSDKPath(allocator: *mem.Allocator) ![]u8 { const syslibroot = mem.trimRight(u8, result.stdout, "\r\n"); return mem.dupe(allocator, u8, syslibroot); } + +pub fn detectNativeCpuAndFeatures() ?Target.Cpu { + var cpu_family: os.CPUFAMILY = undefined; + var len: usize = @sizeOf(os.CPUFAMILY); + os.sysctlbynameZ("hw.cpufamily", &cpu_family, &len, null, 0) catch |err| switch (err) { + error.NameTooLong => unreachable, // constant, known good value + error.PermissionDenied => unreachable, // only when setting values, + error.SystemResources => unreachable, // memory already on the stack + error.UnknownName => unreachable, // constant, known good value + error.Unexpected => unreachable, // EFAULT: stack should be safe, EISDIR/ENOTDIR: constant, known good value + }; + + const current_arch = Target.current.cpu.arch; + switch (current_arch) { + .aarch64, .aarch64_be, .aarch64_32 => { + const model = switch (cpu_family) { + .ARM_FIRESTORM_ICESTORM => &Target.aarch64.cpu.apple_a14, + .ARM_LIGHTNING_THUNDER => &Target.aarch64.cpu.apple_a13, + .ARM_VORTEX_TEMPEST => &Target.aarch64.cpu.apple_a12, + .ARM_MONSOON_MISTRAL => &Target.aarch64.cpu.apple_a11, + .ARM_HURRICANE => &Target.aarch64.cpu.apple_a10, + .ARM_TWISTER => &Target.aarch64.cpu.apple_a9, + .ARM_TYPHOON => &Target.aarch64.cpu.apple_a8, + .ARM_CYCLONE => &Target.aarch64.cpu.cyclone, + else => return null, + }; + + return Target.Cpu{ + .arch = current_arch, + .model = model, + .features = model.features, + }; + }, + else => {}, + } + + return null; +} diff --git a/src/Cache.zig b/src/Cache.zig index 5bc32b4b68..6c17f52d69 100644 --- a/src/Cache.zig +++ b/src/Cache.zig @@ -11,6 +11,7 @@ const testing = std.testing; const mem = std.mem; const fmt = std.fmt; const Allocator = std.mem.Allocator; +const Compilation = @import("Compilation.zig"); /// Be sure to call `Manifest.deinit` after successful initialization. pub fn obtain(cache: *const Cache) Manifest { @@ -61,7 +62,7 @@ pub const File = struct { pub const HashHelper = struct { hasher: Hasher = hasher_init, - const EmitLoc = @import("Compilation.zig").EmitLoc; + const EmitLoc = Compilation.EmitLoc; /// Record a slice of bytes as an dependency of the process being cached pub fn addBytes(hh: *HashHelper, bytes: []const u8) void { @@ -220,6 +221,24 @@ pub const Manifest = struct { return idx; } + pub fn hashCSource(self: *Manifest, c_source: Compilation.CSourceFile) !void { + _ = try self.addFile(c_source.src_path, null); + // Hash the extra flags, with special care to call addFile for file parameters. + // TODO this logic can likely be improved by utilizing clang_options_data.zig. + const file_args = [_][]const u8{"-include"}; + var arg_i: usize = 0; + while (arg_i < c_source.extra_flags.len) : (arg_i += 1) { + const arg = c_source.extra_flags[arg_i]; + self.hash.addBytes(arg); + for (file_args) |file_arg| { + if (mem.eql(u8, file_arg, arg) and arg_i + 1 < c_source.extra_flags.len) { + arg_i += 1; + _ = try self.addFile(c_source.extra_flags[arg_i], null); + } + } + } + } + pub fn addOptionalFile(self: *Manifest, optional_file_path: ?[]const u8) !void { self.hash.add(optional_file_path != null); const file_path = optional_file_path orelse return; diff --git a/src/Compilation.zig b/src/Compilation.zig index e805b86ee0..992b1685df 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -942,7 +942,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { arena, options.zig_lib_directory.path.?, options.target, - options.is_native_os, + options.is_native_abi, link_libc, options.libc_installation, ); @@ -2514,23 +2514,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P man.hash.add(comp.clang_preprocessor_mode); - _ = try man.addFile(c_object.src.src_path, null); - { - // Hash the extra flags, with special care to call addFile for file parameters. - // TODO this logic can likely be improved by utilizing clang_options_data.zig. - const file_args = [_][]const u8{"-include"}; - var arg_i: usize = 0; - while (arg_i < c_object.src.extra_flags.len) : (arg_i += 1) { - const arg = c_object.src.extra_flags[arg_i]; - man.hash.addBytes(arg); - for (file_args) |file_arg| { - if (mem.eql(u8, file_arg, arg) and arg_i + 1 < c_object.src.extra_flags.len) { - arg_i += 1; - _ = try man.addFile(c_object.src.extra_flags[arg_i], null); - } - } - } - } + try man.hashCSource(c_object.src); { const is_collision = blk: { @@ -2862,6 +2846,10 @@ pub fn addCCArgs( } } + if (target.cpu.arch.isThumb()) { + try argv.append("-mthumb"); + } + if (comp.haveFramePointer()) { try argv.append("-fno-omit-frame-pointer"); } else { @@ -3135,7 +3123,7 @@ fn detectLibCIncludeDirs( arena: *Allocator, zig_lib_dir: []const u8, target: Target, - is_native_os: bool, + is_native_abi: bool, link_libc: bool, libc_installation: ?*const LibCInstallation, ) !LibCDirs { @@ -3150,10 +3138,26 @@ fn detectLibCIncludeDirs( return detectLibCFromLibCInstallation(arena, target, lci); } + if (is_native_abi) { + const libc = try arena.create(LibCInstallation); + libc.* = try LibCInstallation.findNative(.{ .allocator = arena }); + return detectLibCFromLibCInstallation(arena, target, libc); + } + if (target_util.canBuildLibC(target)) { const generic_name = target_util.libCGenericName(target); // Some architectures are handled by the same set of headers. - const arch_name = if (target.abi.isMusl()) target_util.archMuslName(target.cpu.arch) else @tagName(target.cpu.arch); + const arch_name = if (target.abi.isMusl()) + target_util.archMuslName(target.cpu.arch) + else if (target.cpu.arch.isThumb()) + // ARM headers are valid for Thumb too. + switch (target.cpu.arch) { + .thumb => "arm", + .thumbeb => "armeb", + else => unreachable, + } + else + @tagName(target.cpu.arch); const os_name = @tagName(target.os.tag); // Musl's headers are ABI-agnostic and so they all have the "musl" ABI name. const abi_name = if (target.abi.isMusl()) "musl" else @tagName(target.abi); @@ -3190,12 +3194,6 @@ fn detectLibCIncludeDirs( }; } - if (is_native_os) { - const libc = try arena.create(LibCInstallation); - libc.* = try LibCInstallation.findNative(.{ .allocator = arena }); - return detectLibCFromLibCInstallation(arena, target, libc); - } - return LibCDirs{ .libc_include_dir_list = &[0][]u8{}, .libc_installation = null, diff --git a/src/Module.zig b/src/Module.zig index 6dc8de90aa..18dbc9222a 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4151,6 +4151,33 @@ pub fn intDiv(allocator: *Allocator, lhs: Value, rhs: Value) !Value { } } +pub fn intMul(allocator: *Allocator, lhs: Value, rhs: Value) !Value { + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const rhs_bigint = rhs.toBigInt(&rhs_space); + const limbs = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len + rhs_bigint.limbs.len + 1, + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var limbs_buffer = try allocator.alloc( + std.math.big.Limb, + std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1), + ); + defer allocator.free(limbs_buffer); + result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, allocator); + const result_limbs = result_bigint.limbs[0..result_bigint.len]; + + if (result_bigint.positive) { + return Value.Tag.int_big_positive.create(allocator, result_limbs); + } else { + return Value.Tag.int_big_negative.create(allocator, result_limbs); + } +} + pub fn floatAdd( arena: *Allocator, float_type: Type, @@ -4250,6 +4277,39 @@ pub fn floatDiv( } } +pub fn floatMul( + arena: *Allocator, + float_type: Type, + src: LazySrcLoc, + lhs: Value, + rhs: Value, +) !Value { + switch (float_type.tag()) { + .f16 => { + @panic("TODO add __trunctfhf2 to compiler-rt"); + //const lhs_val = lhs.toFloat(f16); + //const rhs_val = rhs.toFloat(f16); + //return Value.Tag.float_16.create(arena, lhs_val * rhs_val); + }, + .f32 => { + const lhs_val = lhs.toFloat(f32); + const rhs_val = rhs.toFloat(f32); + return Value.Tag.float_32.create(arena, lhs_val * rhs_val); + }, + .f64 => { + const lhs_val = lhs.toFloat(f64); + const rhs_val = rhs.toFloat(f64); + return Value.Tag.float_64.create(arena, lhs_val * rhs_val); + }, + .f128, .comptime_float, .c_longdouble => { + const lhs_val = lhs.toFloat(f128); + const rhs_val = rhs.toFloat(f128); + return Value.Tag.float_128.create(arena, lhs_val * rhs_val); + }, + else => unreachable, + } +} + pub fn simplePtrType( mod: *Module, arena: *Allocator, diff --git a/src/Sema.zig b/src/Sema.zig index cc77b00789..626624110e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -4606,10 +4606,15 @@ fn analyzeArithmetic( // incase rhs is 0, simply return lhs without doing any calculations // TODO Once division is implemented we should throw an error when dividing by 0. if (rhs_val.compareWithZero(.eq)) { - return sema.mod.constInst(sema.arena, src, .{ - .ty = scalar_type, - .val = lhs_val, - }); + switch (zir_tag) { + .add, .addwrap, .sub, .subwrap => { + return sema.mod.constInst(sema.arena, src, .{ + .ty = scalar_type, + .val = lhs_val, + }); + }, + else => {}, + } } const value = switch (zir_tag) { @@ -4634,6 +4639,13 @@ fn analyzeArithmetic( try Module.floatDiv(sema.arena, scalar_type, src, lhs_val, rhs_val); break :blk val; }, + .mul => blk: { + const val = if (is_int) + try Module.intMul(sema.arena, lhs_val, rhs_val) + else + try Module.floatMul(sema.arena, scalar_type, src, lhs_val, rhs_val); + break :blk val; + }, else => return sema.mod.fail(&block.base, src, "TODO Implement arithmetic operand '{s}'", .{@tagName(zir_tag)}), }; diff --git a/src/codegen.zig b/src/codegen.zig index f588f7c3b6..9d533db39a 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -20,6 +20,8 @@ const build_options = @import("build_options"); const LazySrcLoc = Module.LazySrcLoc; const RegisterManager = @import("register_manager.zig").RegisterManager; +const X8664Encoder = @import("codegen/x86_64.zig").Encoder; + /// The codegen-related data that is stored in `ir.Inst.Block` instructions. pub const BlockData = struct { relocs: std.ArrayListUnmanaged(Reloc) = undefined, @@ -905,7 +907,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // TODO separate architectures with registers from // stack-based architectures (spu_2) if (callee_preserved_regs.len > 0) { - if (self.register_manager.tryAllocReg(inst)) |reg| { + if (self.register_manager.tryAllocReg(inst, &.{})) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } @@ -917,6 +919,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { pub fn spillInstruction(self: *Self, src: LazySrcLoc, reg: Register, inst: *ir.Inst) !void { const stack_mcv = try self.allocRegOrMem(inst, false); + log.debug("spilling {*} to stack mcv {any}", .{ inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); assert(reg == toCanonicalReg(reg_mcv.register)); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -928,7 +931,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, src: LazySrcLoc, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocRegWithoutTracking(); + const reg = try self.register_manager.allocRegWithoutTracking(&.{}); try self.genSetReg(src, ty, reg, mcv); return reg; } @@ -937,7 +940,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// `reg_owner` is the instruction that gets associated with the register in the register table. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: *ir.Inst, mcv: MCValue) !MCValue { - const reg = try self.register_manager.allocReg(reg_owner); + const reg = try self.register_manager.allocReg(reg_owner, &.{}); try self.genSetReg(reg_owner.src, reg_owner.ty, reg, mcv); return MCValue{ .register = reg }; } @@ -1017,7 +1020,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, .val = Value.initTag(.bool_true), }; - return try self.genX8664BinMath(&inst.base, inst.operand, &imm.base, 6, 0x30); + return try self.genX8664BinMath(&inst.base, inst.operand, &imm.base); }, .arm, .armeb => { var imm = ir.Inst.Constant{ @@ -1041,7 +1044,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return MCValue.dead; switch (arch) { .x86_64 => { - return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 0, 0x00); + return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs); }, .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .add), else => return self.fail(inst.base.src, "TODO implement add for {}", .{self.target.cpu.arch}), @@ -1062,6 +1065,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (inst.base.isUnused()) return MCValue.dead; switch (arch) { + .x86_64 => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs), .arm, .armeb => return try self.genArmMul(&inst.base, inst.lhs, inst.rhs), else => return self.fail(inst.base.src, "TODO implement mul for {}", .{self.target.cpu.arch}), } @@ -1340,7 +1344,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return MCValue.dead; switch (arch) { .x86_64 => { - return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 5, 0x28); + return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs); }, .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .sub), else => return self.fail(inst.base.src, "TODO implement sub for {}", .{self.target.cpu.arch}), @@ -1356,54 +1360,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArmBinOp(self: *Self, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst, op: ir.Inst.Tag) !MCValue { - const lhs = try self.resolveInst(op_lhs); - const rhs = try self.resolveInst(op_rhs); - - // Destination must be a register - var dst_mcv: MCValue = undefined; - var lhs_mcv: MCValue = undefined; - var rhs_mcv: MCValue = undefined; - if (self.reuseOperand(inst, 0, lhs)) { - // LHS is the destination - // RHS is the source - lhs_mcv = if (lhs != .register) try self.copyToNewRegister(inst, lhs) else lhs; - rhs_mcv = rhs; - dst_mcv = lhs_mcv; - } else if (self.reuseOperand(inst, 1, rhs)) { - // RHS is the destination - // LHS is the source - lhs_mcv = lhs; - rhs_mcv = if (rhs != .register) try self.copyToNewRegister(inst, rhs) else rhs; - dst_mcv = rhs_mcv; - } else { - // TODO save 1 copy instruction by directly allocating the destination register - // LHS is the destination - // RHS is the source - lhs_mcv = try self.copyToNewRegister(inst, lhs); - rhs_mcv = rhs; - dst_mcv = lhs_mcv; - } - - try self.genArmBinOpCode(inst.src, dst_mcv.register, lhs_mcv, rhs_mcv, op); - return dst_mcv; - } - - fn genArmBinOpCode( - self: *Self, - src: LazySrcLoc, - dst_reg: Register, - lhs_mcv: MCValue, - rhs_mcv: MCValue, - op: ir.Inst.Tag, - ) !void { - assert(lhs_mcv == .register or lhs_mcv == .register); - - const swap_lhs_and_rhs = rhs_mcv == .register and lhs_mcv != .register; - const op1 = if (swap_lhs_and_rhs) rhs_mcv.register else lhs_mcv.register; - const op2 = if (swap_lhs_and_rhs) lhs_mcv else rhs_mcv; - - const operand = switch (op2) { + fn armOperandShouldBeRegister(self: *Self, src: LazySrcLoc, mcv: MCValue) !bool { + return switch (mcv) { .none => unreachable, .undef => unreachable, .dead, .unreach => unreachable, @@ -1415,15 +1373,142 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (imm > std.math.maxInt(u32)) return self.fail(src, "TODO ARM binary arithmetic immediate larger than u32", .{}); // Load immediate into register if it doesn't fit - // as an operand - break :blk Instruction.Operand.fromU32(@intCast(u32, imm)) orelse - Instruction.Operand.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), op2), Instruction.Operand.Shift.none); + // in an operand + break :blk Instruction.Operand.fromU32(@intCast(u32, imm)) == null; }, + .register => true, + .stack_offset, + .embedded_in_code, + .memory, + => true, + }; + } + + fn genArmBinOp(self: *Self, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst, op: ir.Inst.Tag) !MCValue { + const lhs = try self.resolveInst(op_lhs); + const rhs = try self.resolveInst(op_rhs); + + const lhs_is_register = lhs == .register; + const rhs_is_register = rhs == .register; + const lhs_should_be_register = try self.armOperandShouldBeRegister(op_lhs.src, lhs); + const rhs_should_be_register = try self.armOperandShouldBeRegister(op_rhs.src, rhs); + const reuse_lhs = lhs_is_register and self.reuseOperand(inst, 0, lhs); + const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, 1, rhs); + + // Destination must be a register + var dst_mcv: MCValue = undefined; + var lhs_mcv = lhs; + var rhs_mcv = rhs; + var swap_lhs_and_rhs = false; + + // Allocate registers for operands and/or destination + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + if (reuse_lhs) { + // Allocate 0 or 1 registers + if (!rhs_is_register and rhs_should_be_register) { + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; + branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + } + dst_mcv = lhs; + } else if (reuse_rhs) { + // Allocate 0 or 1 registers + if (!lhs_is_register and lhs_should_be_register) { + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; + branch.inst_table.putAssumeCapacity(op_lhs, lhs_mcv); + } + dst_mcv = rhs; + + swap_lhs_and_rhs = true; + } else { + // Allocate 1 or 2 registers + if (lhs_should_be_register and rhs_should_be_register) { + if (lhs_is_register and rhs_is_register) { + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{ lhs.register, rhs.register }) }; + } else if (lhs_is_register) { + // Move RHS to register + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{lhs.register}) }; + rhs_mcv = dst_mcv; + } else if (rhs_is_register) { + // Move LHS to register + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{rhs.register}) }; + lhs_mcv = dst_mcv; + } else { + // Move LHS and RHS to register + const regs = try self.register_manager.allocRegs(2, .{ inst, op_rhs }, &.{}); + lhs_mcv = MCValue{ .register = regs[0] }; + rhs_mcv = MCValue{ .register = regs[1] }; + dst_mcv = lhs_mcv; + + branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + } + } else if (lhs_should_be_register) { + // RHS is immediate + if (lhs_is_register) { + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{lhs.register}) }; + } else { + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) }; + lhs_mcv = dst_mcv; + } + } else if (rhs_should_be_register) { + // LHS is immediate + if (rhs_is_register) { + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{rhs.register}) }; + } else { + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) }; + rhs_mcv = dst_mcv; + } + + swap_lhs_and_rhs = true; + } else unreachable; // binary operation on two immediates + } + + // Move the operands to the newly allocated registers + if (lhs_mcv == .register and !lhs_is_register) { + try self.genSetReg(op_lhs.src, op_lhs.ty, lhs_mcv.register, lhs); + } + if (rhs_mcv == .register and !rhs_is_register) { + try self.genSetReg(op_rhs.src, op_rhs.ty, rhs_mcv.register, rhs); + } + + try self.genArmBinOpCode( + inst.src, + dst_mcv.register, + lhs_mcv, + rhs_mcv, + swap_lhs_and_rhs, + op, + ); + return dst_mcv; + } + + fn genArmBinOpCode( + self: *Self, + src: LazySrcLoc, + dst_reg: Register, + lhs_mcv: MCValue, + rhs_mcv: MCValue, + swap_lhs_and_rhs: bool, + op: ir.Inst.Tag, + ) !void { + assert(lhs_mcv == .register or rhs_mcv == .register); + + const op1 = if (swap_lhs_and_rhs) rhs_mcv.register else lhs_mcv.register; + const op2 = if (swap_lhs_and_rhs) lhs_mcv else rhs_mcv; + + const operand = switch (op2) { + .none => unreachable, + .undef => unreachable, + .dead, .unreach => unreachable, + .compare_flags_unsigned => unreachable, + .compare_flags_signed => unreachable, + .ptr_stack_offset => unreachable, + .ptr_embedded_in_code => unreachable, + .immediate => |imm| Instruction.Operand.fromU32(@intCast(u32, imm)).?, .register => |reg| Instruction.Operand.reg(reg, Instruction.Operand.Shift.none), .stack_offset, .embedded_in_code, .memory, - => Instruction.Operand.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), op2), Instruction.Operand.Shift.none), + => unreachable, }; switch (op) { @@ -1485,8 +1570,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return dst_mcv; } + /// Perform "binary" operators, excluding comparisons. + /// Currently, the following ops are supported: /// ADD, SUB, XOR, OR, AND - fn genX8664BinMath(self: *Self, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst, opx: u8, mr: u8) !MCValue { + fn genX8664BinMath(self: *Self, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst) !MCValue { + // We'll handle these ops in two steps. + // 1) Prepare an output location (register or memory) + // This location will be the location of the operand that dies (if one exists) + // or just a temporary register (if one doesn't exist) + // 2) Perform the op with the other argument + // 3) Sometimes, the output location is memory but the op doesn't support it. + // In this case, copy that location to a register, then perform the op to that register instead. + // + // TODO: make this algorithm less bad + try self.code.ensureCapacity(self.code.items.len + 8); const lhs = try self.resolveInst(op_lhs); @@ -1547,18 +1644,109 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else => {}, } - try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, opx, mr); + // Now for step 2, we perform the actual op + switch (inst.tag) { + // TODO: Generate wrapping and non-wrapping versions separately + .add, .addwrap => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 0, 0x00), + .bool_or, .bit_or => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 1, 0x08), + .bool_and, .bit_and => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 4, 0x20), + .sub, .subwrap => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 5, 0x28), + .xor, .not => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 6, 0x30), + + .mul, .mulwrap => try self.genX8664Imul(inst.src, inst.ty, dst_mcv, src_mcv), + else => unreachable, + } return dst_mcv; } + /// Wrap over Instruction.encodeInto to translate errors + fn encodeX8664Instruction( + self: *Self, + src: LazySrcLoc, + inst: Instruction, + ) !void { + inst.encodeInto(self.code) catch |err| { + if (err == error.OutOfMemory) + return error.OutOfMemory + else + return self.fail(src, "Instruction.encodeInto failed because {s}", .{@errorName(err)}); + }; + } + + /// This function encodes a binary operation for x86_64 + /// intended for use with the following opcode ranges + /// because they share the same structure. + /// + /// Thus not all binary operations can be used here + /// -- multiplication needs to be done with imul, + /// which doesn't have as convenient an interface. + /// + /// "opx"-style instructions use the opcode extension field to indicate which instruction to execute: + /// + /// opx = /0: add + /// opx = /1: or + /// opx = /2: adc + /// opx = /3: sbb + /// opx = /4: and + /// opx = /5: sub + /// opx = /6: xor + /// opx = /7: cmp + /// + /// opcode | operand shape + /// --------+---------------------- + /// 80 /opx | *r/m8*, imm8 + /// 81 /opx | *r/m16/32/64*, imm16/32 + /// 83 /opx | *r/m16/32/64*, imm8 + /// + /// "mr"-style instructions use the low bits of opcode to indicate shape of instruction: + /// + /// mr = 00: add + /// mr = 08: or + /// mr = 10: adc + /// mr = 18: sbb + /// mr = 20: and + /// mr = 28: sub + /// mr = 30: xor + /// mr = 38: cmp + /// + /// opcode | operand shape + /// -------+------------------------- + /// mr + 0 | *r/m8*, r8 + /// mr + 1 | *r/m16/32/64*, r16/32/64 + /// mr + 2 | *r8*, r/m8 + /// mr + 3 | *r16/32/64*, r/m16/32/64 + /// mr + 4 | *AL*, imm8 + /// mr + 5 | *rAX*, imm16/32 + /// + /// TODO: rotates and shifts share the same structure, so we can potentially implement them + /// at a later date with very similar code. + /// They have "opx"-style instructions, but no "mr"-style instructions. + /// + /// opx = /0: rol, + /// opx = /1: ror, + /// opx = /2: rcl, + /// opx = /3: rcr, + /// opx = /4: shl sal, + /// opx = /5: shr, + /// opx = /6: sal shl, + /// opx = /7: sar, + /// + /// opcode | operand shape + /// --------+------------------ + /// c0 /opx | *r/m8*, imm8 + /// c1 /opx | *r/m16/32/64*, imm8 + /// d0 /opx | *r/m8*, 1 + /// d1 /opx | *r/m16/32/64*, 1 + /// d2 /opx | *r/m8*, CL (for context, CL is register 1) + /// d3 /opx | *r/m16/32/64*, CL (for context, CL is register 1) fn genX8664BinMathCode( self: *Self, src: LazySrcLoc, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue, - opx: u8, + opx: u3, mr: u8, ) !void { switch (dst_mcv) { @@ -1577,31 +1765,85 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, .register => |src_reg| { - self.rex(.{ .b = dst_reg.isExtended(), .r = src_reg.isExtended(), .w = dst_reg.size() == 64 }); - self.code.appendSliceAssumeCapacity(&[_]u8{ mr + 0x1, 0xC0 | (@as(u8, src_reg.id() & 0b111) << 3) | @as(u8, dst_reg.id() & 0b111) }); + // for register, register use mr + 1 + // addressing mode: *r/m16/32/64*, r16/32/64 + const abi_size = dst_ty.abiSize(self.target.*); + const encoder = try X8664Encoder.init(self.code, 3); + encoder.rex(.{ + .w = abi_size == 8, + .r = src_reg.isExtended(), + .b = dst_reg.isExtended(), + }); + encoder.opcode_1byte(mr + 1); + encoder.modRm_direct( + src_reg.low_id(), + dst_reg.low_id(), + ); }, .immediate => |imm| { - const imm32 = @intCast(u31, imm); // This case must be handled before calling genX8664BinMathCode. - // 81 /opx id - if (imm32 <= math.maxInt(u7)) { - self.rex(.{ .b = dst_reg.isExtended(), .w = dst_reg.size() == 64 }); - self.code.appendSliceAssumeCapacity(&[_]u8{ - 0x83, - 0xC0 | (opx << 3) | @truncate(u3, dst_reg.id()), - @intCast(u8, imm32), + // register, immediate use opx = 81 or 83 addressing modes: + // opx = 81: r/m16/32/64, imm16/32 + // opx = 83: r/m16/32/64, imm8 + const imm32 = @intCast(i32, imm); // This case must be handled before calling genX8664BinMathCode. + if (imm32 <= math.maxInt(i8)) { + const abi_size = dst_ty.abiSize(self.target.*); + const encoder = try X8664Encoder.init(self.code, 4); + encoder.rex(.{ + .w = abi_size == 8, + .b = dst_reg.isExtended(), }); + encoder.opcode_1byte(0x83); + encoder.modRm_direct( + opx, + dst_reg.low_id(), + ); + encoder.imm8(@intCast(i8, imm32)); } else { - self.rex(.{ .r = dst_reg.isExtended(), .w = dst_reg.size() == 64 }); - self.code.appendSliceAssumeCapacity(&[_]u8{ - 0x81, - 0xC0 | (opx << 3) | @truncate(u3, dst_reg.id()), + const abi_size = dst_ty.abiSize(self.target.*); + const encoder = try X8664Encoder.init(self.code, 7); + encoder.rex(.{ + .w = abi_size == 8, + .b = dst_reg.isExtended(), }); - std.mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), imm32); + encoder.opcode_1byte(0x81); + encoder.modRm_direct( + opx, + dst_reg.low_id(), + ); + encoder.imm32(@intCast(i32, imm32)); } }, - .embedded_in_code, .memory, .stack_offset => { + .embedded_in_code, .memory => { return self.fail(src, "TODO implement x86 ADD/SUB/CMP source memory", .{}); }, + .stack_offset => |off| { + // register, indirect use mr + 3 + // addressing mode: *r16/32/64*, r/m16/32/64 + const abi_size = dst_ty.abiSize(self.target.*); + const adj_off = off + abi_size; + if (off > math.maxInt(i32)) { + return self.fail(src, "stack offset too large", .{}); + } + const encoder = try X8664Encoder.init(self.code, 7); + encoder.rex(.{ + .w = abi_size == 8, + .r = dst_reg.isExtended(), + }); + encoder.opcode_1byte(mr + 3); + if (adj_off <= std.math.maxInt(i8)) { + encoder.modRm_indirectDisp8( + dst_reg.low_id(), + Register.ebp.low_id(), + ); + encoder.disp8(-@intCast(i8, adj_off)); + } else { + encoder.modRm_indirectDisp32( + dst_reg.low_id(), + Register.ebp.low_id(), + ); + encoder.disp32(-@intCast(i32, adj_off)); + } + }, .compare_flags_unsigned => { return self.fail(src, "TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{}); }, @@ -1640,28 +1882,184 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + /// Performs integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv. + fn genX8664Imul( + self: *Self, + src: LazySrcLoc, + dst_ty: Type, + dst_mcv: MCValue, + src_mcv: MCValue, + ) !void { + switch (dst_mcv) { + .none => unreachable, + .undef => unreachable, + .dead, .unreach, .immediate => unreachable, + .compare_flags_unsigned => unreachable, + .compare_flags_signed => unreachable, + .ptr_stack_offset => unreachable, + .ptr_embedded_in_code => unreachable, + .register => |dst_reg| { + switch (src_mcv) { + .none => unreachable, + .undef => try self.genSetReg(src, dst_ty, dst_reg, .undef), + .dead, .unreach => unreachable, + .ptr_stack_offset => unreachable, + .ptr_embedded_in_code => unreachable, + .register => |src_reg| { + // register, register + // + // Use the following imul opcode + // 0F AF /r: IMUL r32/64, r/m32/64 + const abi_size = dst_ty.abiSize(self.target.*); + const encoder = try X8664Encoder.init(self.code, 4); + encoder.rex(.{ + .w = abi_size == 8, + .r = dst_reg.isExtended(), + .b = src_reg.isExtended(), + }); + encoder.opcode_2byte(0x0f, 0xaf); + encoder.modRm_direct( + dst_reg.low_id(), + src_reg.low_id(), + ); + }, + .immediate => |imm| { + // register, immediate: + // depends on size of immediate. + // + // immediate fits in i8: + // 6B /r ib: IMUL r32/64, r/m32/64, imm8 + // + // immediate fits in i32: + // 69 /r id: IMUL r32/64, r/m32/64, imm32 + // + // immediate is huge: + // split into 2 instructions + // 1) copy the 64 bit immediate into a tmp register + // 2) perform register,register mul + // 0F AF /r: IMUL r32/64, r/m32/64 + if (math.minInt(i8) <= imm and imm <= math.maxInt(i8)) { + const abi_size = dst_ty.abiSize(self.target.*); + const encoder = try X8664Encoder.init(self.code, 4); + encoder.rex(.{ + .w = abi_size == 8, + .r = dst_reg.isExtended(), + .b = dst_reg.isExtended(), + }); + encoder.opcode_1byte(0x6B); + encoder.modRm_direct( + dst_reg.low_id(), + dst_reg.low_id(), + ); + encoder.imm8(@intCast(i8, imm)); + } else if (math.minInt(i32) <= imm and imm <= math.maxInt(i32)) { + const abi_size = dst_ty.abiSize(self.target.*); + const encoder = try X8664Encoder.init(self.code, 7); + encoder.rex(.{ + .w = abi_size == 8, + .r = dst_reg.isExtended(), + .b = dst_reg.isExtended(), + }); + encoder.opcode_1byte(0x69); + encoder.modRm_direct( + dst_reg.low_id(), + dst_reg.low_id(), + ); + encoder.imm32(@intCast(i32, imm)); + } else { + const src_reg = try self.copyToTmpRegister(src, dst_ty, src_mcv); + return self.genX8664Imul(src, dst_ty, dst_mcv, MCValue{ .register = src_reg }); + } + }, + .embedded_in_code, .memory, .stack_offset => { + return self.fail(src, "TODO implement x86 multiply source memory", .{}); + }, + .compare_flags_unsigned => { + return self.fail(src, "TODO implement x86 multiply source compare flag (unsigned)", .{}); + }, + .compare_flags_signed => { + return self.fail(src, "TODO implement x86 multiply source compare flag (signed)", .{}); + }, + } + }, + .stack_offset => |off| { + switch (src_mcv) { + .none => unreachable, + .undef => return self.genSetStack(src, dst_ty, off, .undef), + .dead, .unreach => unreachable, + .ptr_stack_offset => unreachable, + .ptr_embedded_in_code => unreachable, + .register => |src_reg| { + // copy dst to a register + const dst_reg = try self.copyToTmpRegister(src, dst_ty, dst_mcv); + // multiply into dst_reg + // register, register + // Use the following imul opcode + // 0F AF /r: IMUL r32/64, r/m32/64 + const abi_size = dst_ty.abiSize(self.target.*); + const encoder = try X8664Encoder.init(self.code, 4); + encoder.rex(.{ + .w = abi_size == 8, + .r = dst_reg.isExtended(), + .b = src_reg.isExtended(), + }); + encoder.opcode_2byte(0x0f, 0xaf); + encoder.modRm_direct( + dst_reg.low_id(), + src_reg.low_id(), + ); + // copy dst_reg back out + return self.genSetStack(src, dst_ty, off, MCValue{ .register = dst_reg }); + }, + .immediate => |imm| { + return self.fail(src, "TODO implement x86 multiply source immediate", .{}); + }, + .embedded_in_code, .memory, .stack_offset => { + return self.fail(src, "TODO implement x86 multiply source memory", .{}); + }, + .compare_flags_unsigned => { + return self.fail(src, "TODO implement x86 multiply source compare flag (unsigned)", .{}); + }, + .compare_flags_signed => { + return self.fail(src, "TODO implement x86 multiply source compare flag (signed)", .{}); + }, + } + }, + .embedded_in_code, .memory => { + return self.fail(src, "TODO implement x86 multiply destination memory", .{}); + }, + } + } + fn genX8664ModRMRegToStack(self: *Self, src: LazySrcLoc, ty: Type, off: u32, reg: Register, opcode: u8) !void { const abi_size = ty.abiSize(self.target.*); const adj_off = off + abi_size; - try self.code.ensureCapacity(self.code.items.len + 7); - self.rex(.{ .w = reg.size() == 64, .r = reg.isExtended() }); - const reg_id: u8 = @truncate(u3, reg.id()); - if (adj_off <= 128) { - // example: 48 89 55 7f mov QWORD PTR [rbp+0x7f],rdx - const RM = @as(u8, 0b01_000_101) | (reg_id << 3); - const negative_offset = @intCast(i8, -@intCast(i32, adj_off)); - const twos_comp = @bitCast(u8, negative_offset); - self.code.appendSliceAssumeCapacity(&[_]u8{ opcode, RM, twos_comp }); - } else if (adj_off <= 2147483648) { - // example: 48 89 95 80 00 00 00 mov QWORD PTR [rbp+0x80],rdx - const RM = @as(u8, 0b10_000_101) | (reg_id << 3); - const negative_offset = @intCast(i32, -@intCast(i33, adj_off)); - const twos_comp = @bitCast(u32, negative_offset); - self.code.appendSliceAssumeCapacity(&[_]u8{ opcode, RM }); - mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), twos_comp); - } else { + if (off > math.maxInt(i32)) { return self.fail(src, "stack offset too large", .{}); } + + const i_adj_off = -@intCast(i32, adj_off); + const encoder = try X8664Encoder.init(self.code, 7); + encoder.rex(.{ + .w = abi_size == 8, + .r = reg.isExtended(), + }); + encoder.opcode_1byte(opcode); + if (i_adj_off < std.math.maxInt(i8)) { + // example: 48 89 55 7f mov QWORD PTR [rbp+0x7f],rdx + encoder.modRm_indirectDisp8( + reg.low_id(), + Register.ebp.low_id(), + ); + encoder.disp8(@intCast(i8, i_adj_off)); + } else { + // example: 48 89 95 80 00 00 00 mov QWORD PTR [rbp+0x80],rdx + encoder.modRm_indirectDisp32( + reg.low_id(), + Register.ebp.low_id(), + ); + encoder.disp32(i_adj_off); + } } fn genArgDbgInfo(self: *Self, inst: *ir.Inst.Arg, mcv: MCValue) !void { @@ -2106,12 +2504,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("got_addr = 0x{x}", .{got_addr}); switch (arch) { .x86_64 => { - try self.genSetReg(inst.base.src, Type.initTag(.u32), .rax, .{ .memory = got_addr }); + try self.genSetReg(inst.base.src, Type.initTag(.u64), .rax, .{ .memory = got_addr }); // callq *%rax + try self.code.ensureCapacity(self.code.items.len + 2); self.code.appendSliceAssumeCapacity(&[2]u8{ 0xff, 0xd0 }); }, .aarch64 => { - try self.genSetReg(inst.base.src, Type.initTag(.u32), .x30, .{ .memory = got_addr }); + try self.genSetReg(inst.base.src, Type.initTag(.u64), .x30, .{ .memory = got_addr }); // blr x30 writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); }, @@ -2276,10 +2675,42 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const lhs = try self.resolveInst(inst.lhs); const rhs = try self.resolveInst(inst.rhs); - const src_mcv = rhs; - const dst_mcv = if (lhs != .register) try self.copyToNewRegister(inst.lhs, lhs) else lhs; + const lhs_is_register = lhs == .register; + const rhs_is_register = rhs == .register; + // lhs should always be a register + const rhs_should_be_register = try self.armOperandShouldBeRegister(inst.rhs.src, rhs); + + var lhs_mcv = lhs; + var rhs_mcv = rhs; + + // Allocate registers + if (rhs_should_be_register) { + if (!lhs_is_register and !rhs_is_register) { + const regs = try self.register_manager.allocRegs(2, .{ inst.rhs, inst.lhs }, &.{}); + lhs_mcv = MCValue{ .register = regs[0] }; + rhs_mcv = MCValue{ .register = regs[1] }; + } else if (!rhs_is_register) { + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(inst.rhs, &.{}) }; + } + } + if (!lhs_is_register) { + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(inst.lhs, &.{}) }; + } + + // Move the operands to the newly allocated registers + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + if (lhs_mcv == .register and !lhs_is_register) { + try self.genSetReg(inst.lhs.src, inst.lhs.ty, lhs_mcv.register, lhs); + branch.inst_table.putAssumeCapacity(inst.lhs, lhs); + } + if (rhs_mcv == .register and !rhs_is_register) { + try self.genSetReg(inst.rhs.src, inst.rhs.ty, rhs_mcv.register, rhs); + branch.inst_table.putAssumeCapacity(inst.rhs, rhs); + } + + // The destination register is not present in the cmp instruction + try self.genArmBinOpCode(inst.base.src, undefined, lhs_mcv, rhs_mcv, false, .cmp_eq); - try self.genArmBinOpCode(inst.base.src, dst_mcv.register, dst_mcv, src_mcv, .cmp_eq); const info = inst.lhs.ty.intInfo(self.target.*); return switch (info.signedness) { .signed => MCValue{ .compare_flags_signed = op }, @@ -2335,15 +2766,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .register => |reg| blk: { // test reg, 1 // TODO detect al, ax, eax - try self.code.ensureCapacity(self.code.items.len + 4); - // TODO audit this codegen: we force w = true here to make - // the value affect the big register - self.rex(.{ .b = reg.isExtended(), .w = true }); - self.code.appendSliceAssumeCapacity(&[_]u8{ - 0xf6, - @as(u8, 0xC0) | (0 << 3) | @truncate(u3, reg.id()), - 0x01, + const encoder = try X8664Encoder.init(self.code, 4); + encoder.rex(.{ + // TODO audit this codegen: we force w = true here to make + // the value affect the big register + .w = true, + .b = reg.isExtended(), }); + encoder.opcode_1byte(0xf6); + encoder.modRm_direct( + 0, + reg.low_id(), + ); + encoder.disp8(1); break :blk 0x84; }, else => return self.fail(inst.base.src, "TODO implement condbr {s} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }), @@ -2653,9 +3088,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (arch) { .x86_64 => switch (inst.base.tag) { // lhs AND rhs - .bool_and => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 4, 0x20), + .bool_and => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs), // lhs OR rhs - .bool_or => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 1, 0x08), + .bool_or => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs), else => unreachable, // Not a boolean operation }, .arm, .armeb => switch (inst.base.tag) { @@ -2862,39 +3297,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - /// Encodes a REX prefix as specified, and appends it to the instruction - /// stream. This only modifies the instruction stream if at least one bit - /// is set true, which has a few implications: - /// - /// * The length of the instruction buffer will be modified *if* the - /// resulting REX is meaningful, but will remain the same if it is not. - /// * Deliberately inserting a "meaningless REX" requires explicit usage of - /// 0x40, and cannot be done via this function. - /// W => 64 bit mode - /// R => extension to the MODRM.reg field - /// X => extension to the SIB.index field - /// B => extension to the MODRM.rm field or the SIB.base field - fn rex(self: *Self, arg: struct { b: bool = false, w: bool = false, x: bool = false, r: bool = false }) void { - comptime assert(arch == .x86_64); - // From section 2.2.1.2 of the manual, REX is encoded as b0100WRXB. - var value: u8 = 0x40; - if (arg.b) { - value |= 0x1; - } - if (arg.x) { - value |= 0x2; - } - if (arg.r) { - value |= 0x4; - } - if (arg.w) { - value |= 0x8; - } - if (value != 0x40) { - self.code.appendAssumeCapacity(value); - } - } - /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. fn setRegOrMem(self: *Self, src: LazySrcLoc, ty: Type, loc: MCValue, val: MCValue) !void { switch (loc) { @@ -3442,20 +3844,25 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } }, .compare_flags_unsigned => |op| { - try self.code.ensureCapacity(self.code.items.len + 3); + const encoder = try X8664Encoder.init(self.code, 7); // TODO audit this codegen: we force w = true here to make // the value affect the big register - self.rex(.{ .b = reg.isExtended(), .w = true }); - const opcode: u8 = switch (op) { + encoder.rex(.{ + .w = true, + .b = reg.isExtended(), + }); + encoder.opcode_2byte(0x0f, switch (op) { .gte => 0x93, .gt => 0x97, .neq => 0x95, .lt => 0x92, .lte => 0x96, .eq => 0x94, - }; - const id = @as(u8, reg.id() & 0b111); - self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode, 0xC0 | id }); + }); + encoder.modRm_direct( + 0, + reg.low_id(), + ); }, .compare_flags_signed => |op| { return self.fail(src, "TODO set register with compare flags value (signed)", .{}); @@ -3465,40 +3872,43 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // register is the fastest way to zero a register. if (x == 0) { // The encoding for `xor r32, r32` is `0x31 /r`. - // Section 3.1.1.1 of the Intel x64 Manual states that "/r indicates that the - // ModR/M byte of the instruction contains a register operand and an r/m operand." - // - // R/M bytes are composed of two bits for the mode, then three bits for the register, - // then three bits for the operand. Since we're zeroing a register, the two three-bit - // values will be identical, and the mode is three (the raw register value). - // + const encoder = try X8664Encoder.init(self.code, 3); + // If we're accessing e.g. r8d, we need to use a REX prefix before the actual operation. Since // this is a 32-bit operation, the W flag is set to zero. X is also zero, as we're not using a SIB. // Both R and B are set, as we're extending, in effect, the register bits *and* the operand. - try self.code.ensureCapacity(self.code.items.len + 3); - self.rex(.{ .r = reg.isExtended(), .b = reg.isExtended() }); - const id = @as(u8, reg.id() & 0b111); - self.code.appendSliceAssumeCapacity(&[_]u8{ 0x31, 0xC0 | id << 3 | id }); + encoder.rex(.{ + .r = reg.isExtended(), + .b = reg.isExtended(), + }); + encoder.opcode_1byte(0x31); + // Section 3.1.1.1 of the Intel x64 Manual states that "/r indicates that the + // ModR/M byte of the instruction contains a register operand and an r/m operand." + encoder.modRm_direct( + reg.low_id(), + reg.low_id(), + ); + return; } - if (x <= math.maxInt(u32)) { + if (x <= math.maxInt(i32)) { // Next best case: if we set the lower four bytes, the upper four will be zeroed. // // The encoding for `mov IMM32 -> REG` is (0xB8 + R) IMM. - if (reg.isExtended()) { - // Just as with XORing, we need a REX prefix. This time though, we only - // need the B bit set, as we're extending the opcode's register field, - // and there is no Mod R/M byte. - // - // Thus, we need b01000001, or 0x41. - try self.code.resize(self.code.items.len + 6); - self.code.items[self.code.items.len - 6] = 0x41; - } else { - try self.code.resize(self.code.items.len + 5); - } - self.code.items[self.code.items.len - 5] = 0xB8 | @as(u8, reg.id() & 0b111); - const imm_ptr = self.code.items[self.code.items.len - 4 ..][0..4]; - mem.writeIntLittle(u32, imm_ptr, @intCast(u32, x)); + + const encoder = try X8664Encoder.init(self.code, 6); + // Just as with XORing, we need a REX prefix. This time though, we only + // need the B bit set, as we're extending the opcode's register field, + // and there is no Mod R/M byte. + encoder.rex(.{ + .b = reg.isExtended(), + }); + encoder.opcode_withReg(0xB8, reg.low_id()); + + // no ModR/M byte + + // IMM + encoder.imm32(@intCast(i32, x)); return; } // Worst case: we need to load the 64-bit register with the IMM. GNU's assemblers calls @@ -3508,79 +3918,98 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // This encoding is, in fact, the *same* as the one used for 32-bit loads. The only // difference is that we set REX.W before the instruction, which extends the load to // 64-bit and uses the full bit-width of the register. - // - // Since we always need a REX here, let's just check if we also need to set REX.B. - // - // In this case, the encoding of the REX byte is 0b0100100B - try self.code.ensureCapacity(self.code.items.len + 10); - self.rex(.{ .w = reg.size() == 64, .b = reg.isExtended() }); - self.code.items.len += 9; - self.code.items[self.code.items.len - 9] = 0xB8 | @as(u8, reg.id() & 0b111); - const imm_ptr = self.code.items[self.code.items.len - 8 ..][0..8]; - mem.writeIntLittle(u64, imm_ptr, x); + { + const encoder = try X8664Encoder.init(self.code, 10); + encoder.rex(.{ + .w = true, + .b = reg.isExtended(), + }); + encoder.opcode_withReg(0xB8, reg.low_id()); + encoder.imm64(x); + } }, .embedded_in_code => |code_offset| { // We need the offset from RIP in a signed i32 twos complement. // The instruction is 7 bytes long and RIP points to the next instruction. - try self.code.ensureCapacity(self.code.items.len + 7); - // 64-bit LEA is encoded as REX.W 8D /r. If the register is extended, the REX byte is modified, - // but the operation size is unchanged. Since we're using a disp32, we want mode 0 and lower three - // bits as five. - // REX 0x8D 0b00RRR101, where RRR is the lower three bits of the id. - self.rex(.{ .w = reg.size() == 64, .b = reg.isExtended() }); - self.code.items.len += 6; - const rip = self.code.items.len; + + // 64-bit LEA is encoded as REX.W 8D /r. + const rip = self.code.items.len + 7; const big_offset = @intCast(i64, code_offset) - @intCast(i64, rip); const offset = @intCast(i32, big_offset); - self.code.items[self.code.items.len - 6] = 0x8D; - self.code.items[self.code.items.len - 5] = 0b101 | (@as(u8, reg.id() & 0b111) << 3); - const imm_ptr = self.code.items[self.code.items.len - 4 ..][0..4]; - mem.writeIntLittle(i32, imm_ptr, offset); + const encoder = try X8664Encoder.init(self.code, 7); + + // byte 1, always exists because w = true + encoder.rex(.{ + .w = true, + .r = reg.isExtended(), + }); + // byte 2 + encoder.opcode_1byte(0x8D); + // byte 3 + encoder.modRm_RIPDisp32(reg.low_id()); + // byte 4-7 + encoder.disp32(offset); + + // Double check that we haven't done any math errors + assert(rip == self.code.items.len); }, .register => |src_reg| { // If the registers are the same, nothing to do. if (src_reg.id() == reg.id()) return; - // This is a variant of 8B /r. Since we're using 64-bit moves, we require a REX. - // This is thus three bytes: REX 0x8B R/M. - // If the destination is extended, the R field must be 1. - // If the *source* is extended, the B field must be 1. - // Since the register is being accessed directly, the R/M mode is three. The reg field (the middle - // three bits) contain the destination, and the R/M field (the lower three bits) contain the source. - try self.code.ensureCapacity(self.code.items.len + 3); - self.rex(.{ .w = reg.size() == 64, .r = reg.isExtended(), .b = src_reg.isExtended() }); - const R = 0xC0 | (@as(u8, reg.id() & 0b111) << 3) | @as(u8, src_reg.id() & 0b111); - self.code.appendSliceAssumeCapacity(&[_]u8{ 0x8B, R }); + // This is a variant of 8B /r. + const abi_size = ty.abiSize(self.target.*); + const encoder = try X8664Encoder.init(self.code, 3); + encoder.rex(.{ + .w = abi_size == 8, + .r = reg.isExtended(), + .b = src_reg.isExtended(), + }); + encoder.opcode_1byte(0x8B); + encoder.modRm_direct(reg.low_id(), src_reg.low_id()); }, .memory => |x| { if (self.bin_file.options.pie) { // RIP-relative displacement to the entry in the GOT table. + const abi_size = ty.abiSize(self.target.*); + const encoder = try X8664Encoder.init(self.code, 10); + + // LEA reg, [] + + // We encode the instruction FIRST because prefixes may or may not appear. + // After we encode the instruction, we will know that the displacement bytes + // for [] will be at self.code.items.len - 4. + encoder.rex(.{ + .w = true, // force 64 bit because loading an address (to the GOT) + .r = reg.isExtended(), + }); + encoder.opcode_1byte(0x8D); + encoder.modRm_RIPDisp32(reg.low_id()); + encoder.disp32(0); + // TODO we should come up with our own, backend independent relocation types // which each backend (Elf, MachO, etc.) would then translate into an actual // fixup when linking. if (self.bin_file.cast(link.File.MachO)) |macho_file| { try macho_file.pie_fixups.append(self.bin_file.allocator, .{ .target_addr = x, - .offset = self.code.items.len + 3, + .offset = self.code.items.len - 4, .size = 4, }); } else { return self.fail(src, "TODO implement genSetReg for PIE GOT indirection on this platform", .{}); } - try self.code.ensureCapacity(self.code.items.len + 7); - self.rex(.{ .w = reg.size() == 64, .r = reg.isExtended() }); - self.code.appendSliceAssumeCapacity(&[_]u8{ - 0x8D, - 0x05 | (@as(u8, reg.id() & 0b111) << 3), - }); - mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), 0); - try self.code.ensureCapacity(self.code.items.len + 3); - self.rex(.{ .w = reg.size() == 64, .b = reg.isExtended(), .r = reg.isExtended() }); - const RM = (@as(u8, reg.id() & 0b111) << 3) | @truncate(u3, reg.id()); - self.code.appendSliceAssumeCapacity(&[_]u8{ 0x8B, RM }); - } else if (x <= math.maxInt(u32)) { + // MOV reg, [reg] + encoder.rex(.{ + .w = abi_size == 8, + .r = reg.isExtended(), + .b = reg.isExtended(), + }); + encoder.opcode_1byte(0x8B); + encoder.modRm_indirectDisp0(reg.low_id(), reg.low_id()); + } else if (x <= math.maxInt(i32)) { // Moving from memory to a register is a variant of `8B /r`. // Since we're using 64-bit moves, we require a REX. // This variant also requires a SIB, as it would otherwise be RIP-relative. @@ -3588,14 +4017,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // The SIB must be 0x25, to indicate a disp32 with no scaled index. // 0b00RRR100, where RRR is the lower three bits of the register ID. // The instruction is thus eight bytes; REX 0x8B 0b00RRR100 0x25 followed by a four-byte disp32. - try self.code.ensureCapacity(self.code.items.len + 8); - self.rex(.{ .w = reg.size() == 64, .b = reg.isExtended() }); - self.code.appendSliceAssumeCapacity(&[_]u8{ - 0x8B, - 0x04 | (@as(u8, reg.id() & 0b111) << 3), // R - 0x25, + const abi_size = ty.abiSize(self.target.*); + const encoder = try X8664Encoder.init(self.code, 8); + encoder.rex(.{ + .w = abi_size == 8, + .r = reg.isExtended(), }); - mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), @intCast(u32, x)); + encoder.opcode_1byte(0x8B); + // effective address = [SIB] + encoder.modRm_SIBDisp0(reg.low_id()); + // SIB = disp32 + encoder.sib_disp32(); + encoder.disp32(@intCast(i32, x)); } else { // If this is RAX, we can use a direct load; otherwise, we need to load the address, then indirectly load // the value. @@ -3603,12 +4036,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // REX.W 0xA1 moffs64* // moffs64* is a 64-bit offset "relative to segment base", which really just means the // absolute address for all practical purposes. - try self.code.resize(self.code.items.len + 10); - // REX.W == 0x48 - self.code.items[self.code.items.len - 10] = 0x48; - self.code.items[self.code.items.len - 9] = 0xA1; - const imm_ptr = self.code.items[self.code.items.len - 8 ..][0..8]; - mem.writeIntLittle(u64, imm_ptr, x); + + const encoder = try X8664Encoder.init(self.code, 10); + encoder.rex(.{ + .w = true, + }); + encoder.opcode_1byte(0xA1); + encoder.writeIntLittle(u64, x); } else { // This requires two instructions; a move imm as used above, followed by an indirect load using the register // as the address and the register as the destination. @@ -3625,41 +4059,42 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Now, the register contains the address of the value to load into it // Currently, we're only allowing 64-bit registers, so we need the `REX.W 8B /r` variant. // TODO: determine whether to allow other sized registers, and if so, handle them properly. - // This operation requires three bytes: REX 0x8B R/M - try self.code.ensureCapacity(self.code.items.len + 3); - // For this operation, we want R/M mode *zero* (use register indirectly), and the two register - // values must match. Thus, it's 00ABCABC where ABC is the lower three bits of the register ID. - // - // Furthermore, if this is an extended register, both B and R must be set in the REX byte, as *both* - // register operands need to be marked as extended. - self.rex(.{ .w = reg.size() == 64, .b = reg.isExtended(), .r = reg.isExtended() }); - const RM = (@as(u8, reg.id() & 0b111) << 3) | @truncate(u3, reg.id()); - self.code.appendSliceAssumeCapacity(&[_]u8{ 0x8B, RM }); + + // mov reg, [reg] + const abi_size = ty.abiSize(self.target.*); + const encoder = try X8664Encoder.init(self.code, 3); + encoder.rex(.{ + .w = abi_size == 8, + .r = reg.isExtended(), + .b = reg.isExtended(), + }); + encoder.opcode_1byte(0x8B); + encoder.modRm_indirectDisp0(reg.low_id(), reg.low_id()); } } }, .stack_offset => |unadjusted_off| { - try self.code.ensureCapacity(self.code.items.len + 7); - const size_bytes = @divExact(reg.size(), 8); - const off = unadjusted_off + size_bytes; - self.rex(.{ .w = reg.size() == 64, .r = reg.isExtended() }); - const reg_id: u8 = @truncate(u3, reg.id()); - if (off <= 128) { - // Example: 48 8b 4d 7f mov rcx,QWORD PTR [rbp+0x7f] - const RM = @as(u8, 0b01_000_101) | (reg_id << 3); - const negative_offset = @intCast(i8, -@intCast(i32, off)); - const twos_comp = @bitCast(u8, negative_offset); - self.code.appendSliceAssumeCapacity(&[_]u8{ 0x8b, RM, twos_comp }); - } else if (off <= 2147483648) { - // Example: 48 8b 8d 80 00 00 00 mov rcx,QWORD PTR [rbp+0x80] - const RM = @as(u8, 0b10_000_101) | (reg_id << 3); - const negative_offset = @intCast(i32, -@intCast(i33, off)); - const twos_comp = @bitCast(u32, negative_offset); - self.code.appendSliceAssumeCapacity(&[_]u8{ 0x8b, RM }); - mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), twos_comp); - } else { + const abi_size = ty.abiSize(self.target.*); + const off = unadjusted_off + abi_size; + if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) { return self.fail(src, "stack offset too large", .{}); } + const ioff = -@intCast(i32, off); + const encoder = try X8664Encoder.init(self.code, 3); + encoder.rex(.{ + .w = abi_size == 8, + .r = reg.isExtended(), + }); + encoder.opcode_1byte(0x8B); + if (std.math.minInt(i8) <= ioff and ioff <= std.math.maxInt(i8)) { + // Example: 48 8b 4d 7f mov rcx,QWORD PTR [rbp+0x7f] + encoder.modRm_indirectDisp8(reg.low_id(), Register.ebp.low_id()); + encoder.disp8(@intCast(i8, ioff)); + } else { + // Example: 48 8b 8d 80 00 00 00 mov rcx,QWORD PTR [rbp+0x80] + encoder.modRm_indirectDisp32(reg.low_id(), Register.ebp.low_id()); + encoder.disp32(ioff); + } }, }, else => return self.fail(src, "TODO implement getSetReg for {}", .{self.target.cpu.arch}), diff --git a/src/codegen/x86_64.zig b/src/codegen/x86_64.zig index dea39f82cd..5a09f48e17 100644 --- a/src/codegen/x86_64.zig +++ b/src/codegen/x86_64.zig @@ -1,4 +1,9 @@ const std = @import("std"); +const testing = std.testing; +const mem = std.mem; +const assert = std.debug.assert; +const ArrayList = std.ArrayList; +const Allocator = std.mem.Allocator; const Type = @import("../Type.zig"); const DW = std.dwarf; @@ -68,6 +73,11 @@ pub const Register = enum(u8) { return @truncate(u4, @enumToInt(self)); } + /// Like id, but only returns the lower 3 bits. + pub fn low_id(self: Register) u3 { + return @truncate(u3, @enumToInt(self)); + } + /// Returns the index into `callee_preserved_regs`. pub fn allocIndex(self: Register) ?u4 { return switch (self) { @@ -136,6 +146,493 @@ pub const callee_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .rsi, .rdi, .r8 pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 }; pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx }; +/// Encoding helper functions for x86_64 instructions +/// +/// Many of these helpers do very little, but they can help make things +/// slightly more readable with more descriptive field names / function names. +/// +/// Some of them also have asserts to ensure that we aren't doing dumb things. +/// For example, trying to use register 4 (esp) in an indirect modr/m byte is illegal, +/// you need to encode it with an SIB byte. +/// +/// Note that ALL of these helper functions will assume capacity, +/// so ensure that the `code` has sufficient capacity before using them. +/// The `init` method is the recommended way to ensure capacity. +pub const Encoder = struct { + /// Non-owning reference to the code array + code: *ArrayList(u8), + + const Self = @This(); + + /// Wrap `code` in Encoder to make it easier to call these helper functions + /// + /// maximum_inst_size should contain the maximum number of bytes + /// that the encoded instruction will take. + /// This is because the helper functions will assume capacity + /// in order to avoid bounds checking. + pub fn init(code: *ArrayList(u8), maximum_inst_size: u8) !Self { + try code.ensureCapacity(code.items.len + maximum_inst_size); + return Self{ .code = code }; + } + + /// Directly write a number to the code array with big endianness + pub fn writeIntBig(self: Self, comptime T: type, value: T) void { + mem.writeIntBig( + T, + self.code.addManyAsArrayAssumeCapacity(@divExact(@typeInfo(T).Int.bits, 8)), + value, + ); + } + + /// Directly write a number to the code array with little endianness + pub fn writeIntLittle(self: Self, comptime T: type, value: T) void { + mem.writeIntLittle( + T, + self.code.addManyAsArrayAssumeCapacity(@divExact(@typeInfo(T).Int.bits, 8)), + value, + ); + } + + // -------- + // Prefixes + // -------- + + pub const LegacyPrefixes = packed struct { + /// LOCK + prefix_f0: bool = false, + /// REPNZ, REPNE, REP, Scalar Double-precision + prefix_f2: bool = false, + /// REPZ, REPE, REP, Scalar Single-precision + prefix_f3: bool = false, + + /// CS segment override or Branch not taken + prefix_2e: bool = false, + /// DS segment override + prefix_36: bool = false, + /// ES segment override + prefix_26: bool = false, + /// FS segment override + prefix_64: bool = false, + /// GS segment override + prefix_65: bool = false, + + /// Branch taken + prefix_3e: bool = false, + + /// Operand size override (enables 16 bit operation) + prefix_66: bool = false, + + /// Address size override (enables 16 bit address size) + prefix_67: bool = false, + + padding: u5 = 0, + }; + + /// Encodes legacy prefixes + pub fn legacyPrefixes(self: Self, prefixes: LegacyPrefixes) void { + if (@bitCast(u16, prefixes) != 0) { + // Hopefully this path isn't taken very often, so we'll do it the slow way for now + + // LOCK + if (prefixes.prefix_f0) self.code.appendAssumeCapacity(0xf0); + // REPNZ, REPNE, REP, Scalar Double-precision + if (prefixes.prefix_f2) self.code.appendAssumeCapacity(0xf2); + // REPZ, REPE, REP, Scalar Single-precision + if (prefixes.prefix_f3) self.code.appendAssumeCapacity(0xf3); + + // CS segment override or Branch not taken + if (prefixes.prefix_2e) self.code.appendAssumeCapacity(0x2e); + // DS segment override + if (prefixes.prefix_36) self.code.appendAssumeCapacity(0x36); + // ES segment override + if (prefixes.prefix_26) self.code.appendAssumeCapacity(0x26); + // FS segment override + if (prefixes.prefix_64) self.code.appendAssumeCapacity(0x64); + // GS segment override + if (prefixes.prefix_65) self.code.appendAssumeCapacity(0x65); + + // Branch taken + if (prefixes.prefix_3e) self.code.appendAssumeCapacity(0x3e); + + // Operand size override + if (prefixes.prefix_66) self.code.appendAssumeCapacity(0x66); + + // Address size override + if (prefixes.prefix_67) self.code.appendAssumeCapacity(0x67); + } + } + + /// Use 16 bit operand size + /// + /// Note that this flag is overridden by REX.W, if both are present. + pub fn prefix16BitMode(self: Self) void { + self.code.appendAssumeCapacity(0x66); + } + + /// From section 2.2.1.2 of the manual, REX is encoded as b0100WRXB + pub const Rex = struct { + /// Wide, enables 64-bit operation + w: bool = false, + /// Extends the reg field in the ModR/M byte + r: bool = false, + /// Extends the index field in the SIB byte + x: bool = false, + /// Extends the r/m field in the ModR/M byte, + /// or the base field in the SIB byte, + /// or the reg field in the Opcode byte + b: bool = false, + }; + + /// Encodes a REX prefix byte given all the fields + /// + /// Use this byte whenever you need 64 bit operation, + /// or one of reg, index, r/m, base, or opcode-reg might be extended. + /// + /// See struct `Rex` for a description of each field. + /// + /// Does not add a prefix byte if none of the fields are set! + pub fn rex(self: Self, byte: Rex) void { + var value: u8 = 0b0100_0000; + + if (byte.w) value |= 0b1000; + if (byte.r) value |= 0b0100; + if (byte.x) value |= 0b0010; + if (byte.b) value |= 0b0001; + + if (value != 0b0100_0000) { + self.code.appendAssumeCapacity(value); + } + } + + // ------ + // Opcode + // ------ + + /// Encodes a 1 byte opcode + pub fn opcode_1byte(self: Self, opcode: u8) void { + self.code.appendAssumeCapacity(opcode); + } + + /// Encodes a 2 byte opcode + /// + /// e.g. IMUL has the opcode 0x0f 0xaf, so you use + /// + /// encoder.opcode_2byte(0x0f, 0xaf); + pub fn opcode_2byte(self: Self, prefix: u8, opcode: u8) void { + self.code.appendAssumeCapacity(prefix); + self.code.appendAssumeCapacity(opcode); + } + + /// Encodes a 1 byte opcode with a reg field + /// + /// Remember to add a REX prefix byte if reg is extended! + pub fn opcode_withReg(self: Self, opcode: u8, reg: u3) void { + assert(opcode & 0b111 == 0); + self.code.appendAssumeCapacity(opcode | reg); + } + + // ------ + // ModR/M + // ------ + + /// Construct a ModR/M byte given all the fields + /// + /// Remember to add a REX prefix byte if reg or rm are extended! + pub fn modRm(self: Self, mod: u2, reg_or_opx: u3, rm: u3) void { + self.code.appendAssumeCapacity( + @as(u8, mod) << 6 | @as(u8, reg_or_opx) << 3 | rm, + ); + } + + /// Construct a ModR/M byte using direct r/m addressing + /// r/m effective address: r/m + /// + /// Note reg's effective address is always just reg for the ModR/M byte. + /// Remember to add a REX prefix byte if reg or rm are extended! + pub fn modRm_direct(self: Self, reg_or_opx: u3, rm: u3) void { + self.modRm(0b11, reg_or_opx, rm); + } + + /// Construct a ModR/M byte using indirect r/m addressing + /// r/m effective address: [r/m] + /// + /// Note reg's effective address is always just reg for the ModR/M byte. + /// Remember to add a REX prefix byte if reg or rm are extended! + pub fn modRm_indirectDisp0(self: Self, reg_or_opx: u3, rm: u3) void { + assert(rm != 4 and rm != 5); + self.modRm(0b00, reg_or_opx, rm); + } + + /// Construct a ModR/M byte using indirect SIB addressing + /// r/m effective address: [SIB] + /// + /// Note reg's effective address is always just reg for the ModR/M byte. + /// Remember to add a REX prefix byte if reg or rm are extended! + pub fn modRm_SIBDisp0(self: Self, reg_or_opx: u3) void { + self.modRm(0b00, reg_or_opx, 0b100); + } + + /// Construct a ModR/M byte using RIP-relative addressing + /// r/m effective address: [RIP + disp32] + /// + /// Note reg's effective address is always just reg for the ModR/M byte. + /// Remember to add a REX prefix byte if reg or rm are extended! + pub fn modRm_RIPDisp32(self: Self, reg_or_opx: u3) void { + self.modRm(0b00, reg_or_opx, 0b101); + } + + /// Construct a ModR/M byte using indirect r/m with a 8bit displacement + /// r/m effective address: [r/m + disp8] + /// + /// Note reg's effective address is always just reg for the ModR/M byte. + /// Remember to add a REX prefix byte if reg or rm are extended! + pub fn modRm_indirectDisp8(self: Self, reg_or_opx: u3, rm: u3) void { + assert(rm != 4); + self.modRm(0b01, reg_or_opx, rm); + } + + /// Construct a ModR/M byte using indirect SIB with a 8bit displacement + /// r/m effective address: [SIB + disp8] + /// + /// Note reg's effective address is always just reg for the ModR/M byte. + /// Remember to add a REX prefix byte if reg or rm are extended! + pub fn modRm_SIBDisp8(self: Self, reg_or_opx: u3) void { + self.modRm(0b01, reg_or_opx, 0b100); + } + + /// Construct a ModR/M byte using indirect r/m with a 32bit displacement + /// r/m effective address: [r/m + disp32] + /// + /// Note reg's effective address is always just reg for the ModR/M byte. + /// Remember to add a REX prefix byte if reg or rm are extended! + pub fn modRm_indirectDisp32(self: Self, reg_or_opx: u3, rm: u3) void { + assert(rm != 4); + self.modRm(0b10, reg_or_opx, rm); + } + + /// Construct a ModR/M byte using indirect SIB with a 32bit displacement + /// r/m effective address: [SIB + disp32] + /// + /// Note reg's effective address is always just reg for the ModR/M byte. + /// Remember to add a REX prefix byte if reg or rm are extended! + pub fn modRm_SIBDisp32(self: Self, reg_or_opx: u3) void { + self.modRm(0b10, reg_or_opx, 0b100); + } + + // --- + // SIB + // --- + + /// Construct a SIB byte given all the fields + /// + /// Remember to add a REX prefix byte if index or base are extended! + pub fn sib(self: Self, scale: u2, index: u3, base: u3) void { + self.code.appendAssumeCapacity( + @as(u8, scale) << 6 | @as(u8, index) << 3 | base, + ); + } + + /// Construct a SIB byte with scale * index + base, no frills. + /// r/m effective address: [base + scale * index] + /// + /// Remember to add a REX prefix byte if index or base are extended! + pub fn sib_scaleIndexBase(self: Self, scale: u2, index: u3, base: u3) void { + assert(base != 5); + + self.sib(scale, index, base); + } + + /// Construct a SIB byte with scale * index + disp32 + /// r/m effective address: [scale * index + disp32] + /// + /// Remember to add a REX prefix byte if index or base are extended! + pub fn sib_scaleIndexDisp32(self: Self, scale: u2, index: u3) void { + assert(index != 4); + + // scale is actually ignored + // index = 4 means no index + // base = 5 means no base, if mod == 0. + self.sib(scale, index, 5); + } + + /// Construct a SIB byte with just base + /// r/m effective address: [base] + /// + /// Remember to add a REX prefix byte if index or base are extended! + pub fn sib_base(self: Self, base: u3) void { + assert(base != 5); + + // scale is actually ignored + // index = 4 means no index + self.sib(0, 4, base); + } + + /// Construct a SIB byte with just disp32 + /// r/m effective address: [disp32] + /// + /// Remember to add a REX prefix byte if index or base are extended! + pub fn sib_disp32(self: Self) void { + // scale is actually ignored + // index = 4 means no index + // base = 5 means no base, if mod == 0. + self.sib(0, 4, 5); + } + + /// Construct a SIB byte with scale * index + base + disp8 + /// r/m effective address: [base + scale * index + disp8] + /// + /// Remember to add a REX prefix byte if index or base are extended! + pub fn sib_scaleIndexBaseDisp8(self: Self, scale: u2, index: u3, base: u3) void { + self.sib(scale, index, base); + } + + /// Construct a SIB byte with base + disp8, no index + /// r/m effective address: [base + disp8] + /// + /// Remember to add a REX prefix byte if index or base are extended! + pub fn sib_baseDisp8(self: Self, base: u3) void { + // scale is ignored + // index = 4 means no index + self.sib(0, 4, base); + } + + /// Construct a SIB byte with scale * index + base + disp32 + /// r/m effective address: [base + scale * index + disp32] + /// + /// Remember to add a REX prefix byte if index or base are extended! + pub fn sib_scaleIndexBaseDisp32(self: Self, scale: u2, index: u3, base: u3) void { + self.sib(scale, index, base); + } + + /// Construct a SIB byte with base + disp32, no index + /// r/m effective address: [base + disp32] + /// + /// Remember to add a REX prefix byte if index or base are extended! + pub fn sib_baseDisp32(self: Self, base: u3) void { + // scale is ignored + // index = 4 means no index + self.sib(0, 4, base); + } + + // ------------------------- + // Trivial (no bit fiddling) + // ------------------------- + + /// Encode an 8 bit immediate + /// + /// It is sign-extended to 64 bits by the cpu. + pub fn imm8(self: Self, imm: i8) void { + self.code.appendAssumeCapacity(@bitCast(u8, imm)); + } + + /// Encode an 8 bit displacement + /// + /// It is sign-extended to 64 bits by the cpu. + pub fn disp8(self: Self, disp: i8) void { + self.code.appendAssumeCapacity(@bitCast(u8, disp)); + } + + /// Encode an 16 bit immediate + /// + /// It is sign-extended to 64 bits by the cpu. + pub fn imm16(self: Self, imm: i16) void { + self.writeIntLittle(i16, imm); + } + + /// Encode an 32 bit immediate + /// + /// It is sign-extended to 64 bits by the cpu. + pub fn imm32(self: Self, imm: i32) void { + self.writeIntLittle(i32, imm); + } + + /// Encode an 32 bit displacement + /// + /// It is sign-extended to 64 bits by the cpu. + pub fn disp32(self: Self, disp: i32) void { + self.writeIntLittle(i32, disp); + } + + /// Encode an 64 bit immediate + /// + /// It is sign-extended to 64 bits by the cpu. + pub fn imm64(self: Self, imm: u64) void { + self.writeIntLittle(u64, imm); + } +}; + +test "x86_64 Encoder helpers" { + var code = ArrayList(u8).init(testing.allocator); + defer code.deinit(); + + // simple integer multiplication + + // imul eax,edi + // 0faf c7 + { + try code.resize(0); + const encoder = try Encoder.init(&code, 4); + encoder.rex(.{ + .r = Register.eax.isExtended(), + .b = Register.edi.isExtended(), + }); + encoder.opcode_2byte(0x0f, 0xaf); + encoder.modRm_direct( + Register.eax.low_id(), + Register.edi.low_id(), + ); + + try testing.expectEqualSlices(u8, &[_]u8{ 0x0f, 0xaf, 0xc7 }, code.items); + } + + // simple mov + + // mov eax,edi + // 89 f8 + { + try code.resize(0); + const encoder = try Encoder.init(&code, 3); + encoder.rex(.{ + .r = Register.edi.isExtended(), + .b = Register.eax.isExtended(), + }); + encoder.opcode_1byte(0x89); + encoder.modRm_direct( + Register.edi.low_id(), + Register.eax.low_id(), + ); + + try testing.expectEqualSlices(u8, &[_]u8{ 0x89, 0xf8 }, code.items); + } + + // signed integer addition of 32-bit sign extended immediate to 64 bit register + + // add rcx, 2147483647 + // + // Using the following opcode: REX.W + 81 /0 id, we expect the following encoding + // + // 48 : REX.W set for 64 bit operand (*r*cx) + // 81 : opcode for " with immediate" + // c1 : id = rcx, + // : c1 = 11 <-- mod = 11 indicates r/m is register (rcx) + // : 000 <-- opcode_extension = 0 because opcode extension is /0. /0 specifies ADD + // : 001 <-- 001 is rcx + // ffffff7f : 2147483647 + { + try code.resize(0); + const encoder = try Encoder.init(&code, 7); + encoder.rex(.{ .w = true }); // use 64 bit operation + encoder.opcode_1byte(0x81); + encoder.modRm_direct( + 0, + Register.rcx.low_id(), + ); + encoder.imm32(2147483647); + + try testing.expectEqualSlices(u8, &[_]u8{ 0x48, 0x81, 0xc1, 0xff, 0xff, 0xff, 0x7f }, code.items); + } +} + // TODO add these registers to the enum and populate dwarfLocOp // // Return Address register. This is stored in `0(%rsp, "")` and is not a physical register. // RA = (16, "RA"), diff --git a/src/glibc.zig b/src/glibc.zig index a1f02e2f11..6b288ac46d 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -40,10 +40,11 @@ pub const ABI = struct { } }; +// The order of the elements in this array defines the linking order. pub const libs = [_]Lib{ - .{ .name = "c", .sover = 6 }, .{ .name = "m", .sover = 6 }, .{ .name = "pthread", .sover = 0 }, + .{ .name = "c", .sover = 6 }, .{ .name = "dl", .sover = 2 }, .{ .name = "rt", .sover = 1 }, .{ .name = "ld", .sover = 2 }, @@ -763,16 +764,17 @@ pub fn buildSharedObjects(comp: *Compilation) !void { .lt => continue, .gt => { // TODO Expose via compile error mechanism instead of log. - std.log.warn("invalid target glibc version: {}", .{target_version}); + std.log.err("invalid target glibc version: {}", .{target_version}); return error.InvalidTargetGLibCVersion; }, } - } else blk: { + } else { const latest_index = metadata.all_versions.len - 1; - std.log.warn("zig cannot build new glibc version {}; providing instead {}", .{ + // TODO Expose via compile error mechanism instead of log. + std.log.err("zig does not yet provide glibc version {}, the max provided version is {}", .{ target_version, metadata.all_versions[latest_index], }); - break :blk latest_index; + return error.InvalidTargetGLibCVersion; }; { var map_contents = std.ArrayList(u8).init(arena); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 3f87fd390b..7b1c5474a7 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1648,19 +1648,18 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void { // libc dep if (self.base.options.link_libc) { if (self.base.options.libc_installation != null) { - if (self.base.options.link_mode == .Static) { - try argv.append("--start-group"); - try argv.append("-lc"); - try argv.append("-lm"); - try argv.append("--end-group"); - } else { - try argv.append("-lc"); - try argv.append("-lm"); - } - - if (target.os.tag == .freebsd or target.os.tag == .netbsd or target.os.tag == .openbsd) { - try argv.append("-lpthread"); - } + const needs_grouping = self.base.options.link_mode == .Static; + if (needs_grouping) try argv.append("--start-group"); + // This matches the order of glibc.libs + try argv.appendSlice(&[_][]const u8{ + "-lm", + "-lpthread", + "-lc", + "-ldl", + "-lrt", + "-lutil", + }); + if (needs_grouping) try argv.append("--end-group"); } else if (target.isGnuLibC()) { try argv.append(comp.libunwind_static_lib.?.full_object_path); for (glibc.libs) |lib| { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index dbb84a909c..03793060d3 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -442,6 +442,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void { const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; const main_cmd = &self.load_commands.items[self.main_cmd_index.?].Main; main_cmd.entryoff = addr - text_segment.inner.vmaddr; + main_cmd.stacksize = self.base.options.stack_size_override orelse 0; self.load_commands_dirty = true; } try self.writeRebaseInfoTable(); @@ -695,7 +696,9 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void { Compilation.dump_argv(argv.items); } - try zld.link(input_files.items, full_out_path); + try zld.link(input_files.items, full_out_path, .{ + .stack_size = self.base.options.stack_size_override, + }); break :outer; } diff --git a/src/link/MachO/Zld.zig b/src/link/MachO/Zld.zig index 4d19da1e97..c619d0634b 100644 --- a/src/link/MachO/Zld.zig +++ b/src/link/MachO/Zld.zig @@ -29,6 +29,10 @@ page_size: ?u16 = null, file: ?fs.File = null, out_path: ?[]const u8 = null, +// TODO these args will become obselete once Zld is coalesced with incremental +// linker. +stack_size: u64 = 0, + objects: std.ArrayListUnmanaged(*Object) = .{}, archives: std.ArrayListUnmanaged(*Archive) = .{}, @@ -172,7 +176,11 @@ pub fn closeFiles(self: Zld) void { if (self.file) |f| f.close(); } -pub fn link(self: *Zld, files: []const []const u8, out_path: []const u8) !void { +const LinkArgs = struct { + stack_size: ?u64 = null, +}; + +pub fn link(self: *Zld, files: []const []const u8, out_path: []const u8, args: LinkArgs) !void { if (files.len == 0) return error.NoInputFiles; if (out_path.len == 0) return error.EmptyOutputPath; @@ -206,6 +214,7 @@ pub fn link(self: *Zld, files: []const []const u8, out_path: []const u8) !void { .read = true, .mode = if (std.Target.current.os.tag == .windows) 0 else 0o777, }); + self.stack_size = args.stack_size orelse 0; try self.populateMetadata(); try self.parseInputFiles(files); @@ -1533,7 +1542,8 @@ fn resolveRelocsAndWriteSections(self: *Zld) !void { } if (rel.target == .section) { const source_sect = object.sections.items[rel.target.section]; - args.source_sect_addr = source_sect.inner.addr; + args.source_source_sect_addr = sect.inner.addr; + args.source_target_sect_addr = source_sect.inner.addr; } rebases: { @@ -1588,7 +1598,8 @@ fn resolveRelocsAndWriteSections(self: *Zld) !void { else => |tt| { if (tt == .signed and rel.target == .section) { const source_sect = object.sections.items[rel.target.section]; - args.source_sect_addr = source_sect.inner.addr; + args.source_source_sect_addr = sect.inner.addr; + args.source_target_sect_addr = source_sect.inner.addr; } args.target_addr = try self.relocTargetAddr(@intCast(u16, object_id), rel.target); }, @@ -2202,6 +2213,7 @@ fn setEntryPoint(self: *Zld) !void { const entry_sym = sym.cast(Symbol.Regular) orelse unreachable; const ec = &self.load_commands.items[self.main_cmd_index.?].Main; ec.entryoff = @intCast(u32, entry_sym.address - seg.inner.vmaddr); + ec.stacksize = self.stack_size; } fn writeRebaseInfoTable(self: *Zld) !void { diff --git a/src/link/MachO/reloc.zig b/src/link/MachO/reloc.zig index 1ce9fa2c2d..89b7aa4228 100644 --- a/src/link/MachO/reloc.zig +++ b/src/link/MachO/reloc.zig @@ -29,7 +29,8 @@ pub const Relocation = struct { source_addr: u64, target_addr: u64, subtractor: ?u64 = null, - source_sect_addr: ?u64 = null, + source_source_sect_addr: ?u64 = null, + source_target_sect_addr: ?u64 = null, }; pub fn resolve(base: *Relocation, args: ResolveArgs) !void { @@ -39,8 +40,10 @@ pub const Relocation = struct { log.debug(" | target address 0x{x}", .{args.target_addr}); if (args.subtractor) |sub| log.debug(" | subtractor address 0x{x}", .{sub}); - if (args.source_sect_addr) |addr| - log.debug(" | source section address 0x{x}", .{addr}); + if (args.source_source_sect_addr) |addr| + log.debug(" | source source section address 0x{x}", .{addr}); + if (args.source_target_sect_addr) |addr| + log.debug(" | source target section address 0x{x}", .{addr}); return switch (base.@"type") { .unsigned => @fieldParentPtr(Unsigned, "base", base).resolve(args), @@ -104,7 +107,7 @@ pub const Unsigned = struct { pub fn resolve(unsigned: Unsigned, args: Relocation.ResolveArgs) !void { const addend = if (unsigned.base.target == .section) - unsigned.addend - @intCast(i64, args.source_sect_addr.?) + unsigned.addend - @intCast(i64, args.source_target_sect_addr.?) else unsigned.addend; diff --git a/src/link/MachO/reloc/x86_64.zig b/src/link/MachO/reloc/x86_64.zig index 32f83924e8..a5e3ff2825 100644 --- a/src/link/MachO/reloc/x86_64.zig +++ b/src/link/MachO/reloc/x86_64.zig @@ -33,16 +33,19 @@ pub const Signed = struct { pub fn resolve(signed: Signed, args: Relocation.ResolveArgs) !void { const target_addr = target_addr: { if (signed.base.target == .section) { - const source_target = @intCast(i64, signed.base.offset) + signed.addend + 4 + signed.correction; - const source_disp = source_target - @intCast(i64, args.source_sect_addr.?); + const source_target = @intCast(i64, args.source_source_sect_addr.?) + @intCast(i64, signed.base.offset) + signed.addend + 4; + const source_disp = source_target - @intCast(i64, args.source_target_sect_addr.?); break :target_addr @intCast(i64, args.target_addr) + source_disp; } break :target_addr @intCast(i64, args.target_addr) + signed.addend; }; - const displacement = try math.cast(i32, target_addr - @intCast(i64, args.source_addr) - signed.correction - 4); + const displacement = try math.cast( + i32, + target_addr - @intCast(i64, args.source_addr) - signed.correction - 4, + ); - log.debug(" | calculated addend 0x{x}", .{signed.addend}); - log.debug(" | calculated correction 0x{x}", .{signed.correction}); + log.debug(" | addend 0x{x}", .{signed.addend}); + log.debug(" | correction 0x{x}", .{signed.correction}); log.debug(" | displacement 0x{x}", .{displacement}); mem.writeIntLittle(u32, signed.base.code[0..4], @bitCast(u32, displacement)); @@ -172,20 +175,14 @@ pub const Parser = struct { const offset = @intCast(u32, rel.r_address); const inst = parser.code[offset..][0..4]; - const addend = mem.readIntLittle(i32, inst); - - const correction: i4 = correction: { - if (is_extern) break :correction 0; - - const corr: i4 = switch (rel_type) { - .X86_64_RELOC_SIGNED => 0, - .X86_64_RELOC_SIGNED_1 => 1, - .X86_64_RELOC_SIGNED_2 => 2, - .X86_64_RELOC_SIGNED_4 => 4, - else => unreachable, - }; - break :correction corr; + const correction: i4 = switch (rel_type) { + .X86_64_RELOC_SIGNED => 0, + .X86_64_RELOC_SIGNED_1 => 1, + .X86_64_RELOC_SIGNED_2 => 2, + .X86_64_RELOC_SIGNED_4 => 4, + else => unreachable, }; + const addend = mem.readIntLittle(i32, inst) + correction; var signed = try parser.allocator.create(Signed); errdefer parser.allocator.destroy(signed); diff --git a/src/main.zig b/src/main.zig index 149c3957e4..b55e9c9111 100644 --- a/src/main.zig +++ b/src/main.zig @@ -2302,7 +2302,7 @@ fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !voi defer if (enable_cache) man.deinit(); man.hash.add(@as(u16, 0xb945)); // Random number to distinguish translate-c from compiling C objects - _ = man.addFile(c_source_file.src_path, null) catch |err| { + man.hashCSource(c_source_file) catch |err| { fatal("unable to process '{s}': {s}", .{ c_source_file.src_path, @errorName(err) }); }; @@ -2332,12 +2332,16 @@ fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !voi } // Convert to null terminated args. - const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, argv.items.len + 1); - new_argv_with_sentinel[argv.items.len] = null; - const new_argv = new_argv_with_sentinel[0..argv.items.len :null]; + const clang_args_len = argv.items.len + c_source_file.extra_flags.len; + const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, clang_args_len + 1); + new_argv_with_sentinel[clang_args_len] = null; + const new_argv = new_argv_with_sentinel[0..clang_args_len :null]; for (argv.items) |arg, i| { new_argv[i] = try arena.dupeZ(u8, arg); } + for (c_source_file.extra_flags) |arg, i| { + new_argv[argv.items.len + i] = try arena.dupeZ(u8, arg); + } const c_headers_dir_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{"include"}); const c_headers_dir_path_z = try arena.dupeZ(u8, c_headers_dir_path); @@ -3526,88 +3530,8 @@ test "fds" { gimmeMoreOfThoseSweetSweetFileDescriptors(); } -fn detectNativeCpuWithLLVM( - arch: std.Target.Cpu.Arch, - llvm_cpu_name_z: ?[*:0]const u8, - llvm_cpu_features_opt: ?[*:0]const u8, -) !std.Target.Cpu { - var result = std.Target.Cpu.baseline(arch); - - if (llvm_cpu_name_z) |cpu_name_z| { - const llvm_cpu_name = mem.spanZ(cpu_name_z); - - for (arch.allCpuModels()) |model| { - const this_llvm_name = model.llvm_name orelse continue; - if (mem.eql(u8, this_llvm_name, llvm_cpu_name)) { - // Here we use the non-dependencies-populated set, - // so that subtracting features later in this function - // affect the prepopulated set. - result = std.Target.Cpu{ - .arch = arch, - .model = model, - .features = model.features, - }; - break; - } - } - } - - const all_features = arch.allFeaturesList(); - - if (llvm_cpu_features_opt) |llvm_cpu_features| { - var it = mem.tokenize(mem.spanZ(llvm_cpu_features), ","); - while (it.next()) |decorated_llvm_feat| { - var op: enum { - add, - sub, - } = undefined; - var llvm_feat: []const u8 = undefined; - if (mem.startsWith(u8, decorated_llvm_feat, "+")) { - op = .add; - llvm_feat = decorated_llvm_feat[1..]; - } else if (mem.startsWith(u8, decorated_llvm_feat, "-")) { - op = .sub; - llvm_feat = decorated_llvm_feat[1..]; - } else { - return error.InvalidLlvmCpuFeaturesFormat; - } - for (all_features) |feature, index_usize| { - const this_llvm_name = feature.llvm_name orelse continue; - if (mem.eql(u8, llvm_feat, this_llvm_name)) { - const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize); - switch (op) { - .add => result.features.addFeature(index), - .sub => result.features.removeFeature(index), - } - break; - } - } - } - } - - result.features.populateDependencies(all_features); - return result; -} - fn detectNativeTargetInfo(gpa: *Allocator, cross_target: std.zig.CrossTarget) !std.zig.system.NativeTargetInfo { - var info = try std.zig.system.NativeTargetInfo.detect(gpa, cross_target); - if (info.cpu_detection_unimplemented) { - const arch = std.Target.current.cpu.arch; - - // We want to just use detected_info.target but implementing - // CPU model & feature detection is todo so here we rely on LLVM. - // https://github.com/ziglang/zig/issues/4591 - if (!build_options.have_llvm) - fatal("CPU features detection is not yet available for {s} without LLVM extensions", .{@tagName(arch)}); - - const llvm = @import("codegen/llvm/bindings.zig"); - const llvm_cpu_name = llvm.GetHostCPUName(); - const llvm_cpu_features = llvm.GetNativeFeatures(); - info.target.cpu = try detectNativeCpuWithLLVM(arch, llvm_cpu_name, llvm_cpu_features); - cross_target.updateCpuFeatures(&info.target.cpu.features); - info.target.cpu.arch = cross_target.getCpuArch(); - } - return info; + return std.zig.system.NativeTargetInfo.detect(gpa, cross_target); } /// Indicate that we are now terminating with a successful exit code. diff --git a/src/register_manager.zig b/src/register_manager.zig index 2c812cef89..c9235b88c1 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -1,5 +1,6 @@ const std = @import("std"); const math = std.math; +const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; const ir = @import("ir.zig"); @@ -66,8 +67,13 @@ pub fn RegisterManager( } /// Returns `null` if all registers are allocated. - pub fn tryAllocRegs(self: *Self, comptime count: comptime_int, insts: [count]*ir.Inst) ?[count]Register { - if (self.tryAllocRegsWithoutTracking(count)) |regs| { + pub fn tryAllocRegs( + self: *Self, + comptime count: comptime_int, + insts: [count]*ir.Inst, + exceptions: []Register, + ) ?[count]Register { + if (self.tryAllocRegsWithoutTracking(count, exceptions)) |regs| { for (regs) |reg, i| { const index = reg.allocIndex().?; // allocIndex() on a callee-preserved reg should never return null self.registers[index] = insts[i]; @@ -81,21 +87,30 @@ pub fn RegisterManager( } /// Returns `null` if all registers are allocated. - pub fn tryAllocReg(self: *Self, inst: *ir.Inst) ?Register { - return if (tryAllocRegs(self, 1, .{inst})) |regs| regs[0] else null; + pub fn tryAllocReg(self: *Self, inst: *ir.Inst, exceptions: []Register) ?Register { + return if (tryAllocRegs(self, 1, .{inst}, exceptions)) |regs| regs[0] else null; } - pub fn allocRegs(self: *Self, comptime count: comptime_int, insts: [count]*ir.Inst) ![count]Register { + pub fn allocRegs( + self: *Self, + comptime count: comptime_int, + insts: [count]*ir.Inst, + exceptions: []Register, + ) ![count]Register { comptime assert(count > 0 and count <= callee_preserved_regs.len); + assert(count + exceptions.len <= callee_preserved_regs.len); - return self.tryAllocRegs(count, insts) orelse blk: { + return self.tryAllocRegs(count, insts, exceptions) orelse blk: { // We'll take over the first count registers. Spill // the instructions that were previously there to a // stack allocations. var regs: [count]Register = undefined; - std.mem.copy(Register, ®s, callee_preserved_regs[0..count]); + var i: usize = 0; + for (callee_preserved_regs) |reg| { + if (i >= count) break; + if (mem.indexOfScalar(Register, exceptions, reg) != null) continue; + regs[i] = reg; - for (regs) |reg, i| { const index = reg.allocIndex().?; // allocIndex() on a callee-preserved reg should never return null if (self.isRegFree(reg)) { self.markRegUsed(reg); @@ -104,21 +119,28 @@ pub fn RegisterManager( try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst); } self.registers[index] = insts[i]; + + i += 1; } break :blk regs; }; } - pub fn allocReg(self: *Self, inst: *ir.Inst) !Register { - return (try self.allocRegs(1, .{inst}))[0]; + pub fn allocReg(self: *Self, inst: *ir.Inst, exceptions: []Register) !Register { + return (try self.allocRegs(1, .{inst}, exceptions))[0]; } /// Does not track the registers. /// Returns `null` if not enough registers are free. - pub fn tryAllocRegsWithoutTracking(self: *Self, comptime count: comptime_int) ?[count]Register { + pub fn tryAllocRegsWithoutTracking( + self: *Self, + comptime count: comptime_int, + exceptions: []Register, + ) ?[count]Register { comptime if (callee_preserved_regs.len == 0) return null; comptime assert(count > 0 and count <= callee_preserved_regs.len); + assert(count + exceptions.len <= callee_preserved_regs.len); const free_registers = @popCount(FreeRegInt, self.free_registers); if (free_registers < count) return null; @@ -127,30 +149,35 @@ pub fn RegisterManager( var i: usize = 0; for (callee_preserved_regs) |reg| { if (i >= count) break; + if (mem.indexOfScalar(Register, exceptions, reg) != null) continue; if (self.isRegFree(reg)) { regs[i] = reg; i += 1; } } - return regs; + + return if (i < count) null else regs; } /// Does not track the register. /// Returns `null` if all registers are allocated. - pub fn tryAllocRegWithoutTracking(self: *Self) ?Register { - return if (self.tryAllocRegsWithoutTracking(1)) |regs| regs[0] else null; + pub fn tryAllocRegWithoutTracking(self: *Self, exceptions: []Register) ?Register { + return if (self.tryAllocRegsWithoutTracking(1, exceptions)) |regs| regs[0] else null; } /// Does not track the registers - pub fn allocRegsWithoutTracking(self: *Self, comptime count: comptime_int) ![count]Register { - return self.tryAllocRegsWithoutTracking(count) orelse blk: { + pub fn allocRegsWithoutTracking(self: *Self, comptime count: comptime_int, exceptions: []Register) ![count]Register { + return self.tryAllocRegsWithoutTracking(count, exceptions) orelse blk: { // We'll take over the first count registers. Spill // the instructions that were previously there to a // stack allocations. var regs: [count]Register = undefined; - std.mem.copy(Register, ®s, callee_preserved_regs[0..count]); + var i: usize = 0; + for (callee_preserved_regs) |reg| { + if (i >= count) break; + if (mem.indexOfScalar(Register, exceptions, reg) != null) continue; + regs[i] = reg; - for (regs) |reg, i| { const index = reg.allocIndex().?; // allocIndex() on a callee-preserved reg should never return null if (!self.isRegFree(reg)) { const spilled_inst = self.registers[index].?; @@ -158,6 +185,8 @@ pub fn RegisterManager( self.registers[index] = null; self.markRegFree(reg); } + + i += 1; } break :blk regs; @@ -165,8 +194,8 @@ pub fn RegisterManager( } /// Does not track the register. - pub fn allocRegWithoutTracking(self: *Self) !Register { - return (try self.allocRegsWithoutTracking(1))[0]; + pub fn allocRegWithoutTracking(self: *Self, exceptions: []Register) !Register { + return (try self.allocRegsWithoutTracking(1, exceptions))[0]; } /// Allocates the specified register with the specified @@ -270,9 +299,9 @@ test "tryAllocReg: no spilling" { try std.testing.expect(!function.register_manager.isRegAllocated(.r2)); try std.testing.expect(!function.register_manager.isRegAllocated(.r3)); - try std.testing.expectEqual(@as(?MockRegister, .r2), function.register_manager.tryAllocReg(&mock_instruction)); - try std.testing.expectEqual(@as(?MockRegister, .r3), function.register_manager.tryAllocReg(&mock_instruction)); - try std.testing.expectEqual(@as(?MockRegister, null), function.register_manager.tryAllocReg(&mock_instruction)); + try std.testing.expectEqual(@as(?MockRegister, .r2), function.register_manager.tryAllocReg(&mock_instruction, &.{})); + try std.testing.expectEqual(@as(?MockRegister, .r3), function.register_manager.tryAllocReg(&mock_instruction, &.{})); + try std.testing.expectEqual(@as(?MockRegister, null), function.register_manager.tryAllocReg(&mock_instruction, &.{})); try std.testing.expect(function.register_manager.isRegAllocated(.r2)); try std.testing.expect(function.register_manager.isRegAllocated(.r3)); @@ -301,16 +330,16 @@ test "allocReg: spilling" { try std.testing.expect(!function.register_manager.isRegAllocated(.r2)); try std.testing.expect(!function.register_manager.isRegAllocated(.r3)); - try std.testing.expectEqual(@as(?MockRegister, .r2), try function.register_manager.allocReg(&mock_instruction)); - try std.testing.expectEqual(@as(?MockRegister, .r3), try function.register_manager.allocReg(&mock_instruction)); + try std.testing.expectEqual(@as(?MockRegister, .r2), try function.register_manager.allocReg(&mock_instruction, &.{})); + try std.testing.expectEqual(@as(?MockRegister, .r3), try function.register_manager.allocReg(&mock_instruction, &.{})); // Spill a register - try std.testing.expectEqual(@as(?MockRegister, .r2), try function.register_manager.allocReg(&mock_instruction)); + try std.testing.expectEqual(@as(?MockRegister, .r2), try function.register_manager.allocReg(&mock_instruction, &.{})); try std.testing.expectEqualSlices(MockRegister, &[_]MockRegister{.r2}, function.spilled.items); // No spilling necessary function.register_manager.freeReg(.r3); - try std.testing.expectEqual(@as(?MockRegister, .r3), try function.register_manager.allocReg(&mock_instruction)); + try std.testing.expectEqual(@as(?MockRegister, .r3), try function.register_manager.allocReg(&mock_instruction, &.{})); try std.testing.expectEqualSlices(MockRegister, &[_]MockRegister{.r2}, function.spilled.items); } diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp index 0991faf815..9eed42f4cd 100644 --- a/src/stage1/all_types.hpp +++ b/src/stage1/all_types.hpp @@ -718,7 +718,6 @@ struct AstNodeFnProto { Buf *name; ZigList params; AstNode *return_type; - Token *return_anytype_token; AstNode *fn_def_node; // populated if this is an extern declaration Buf *lib_name; diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp index 8ca845d2cf..a87fd80613 100644 --- a/src/stage1/analyze.cpp +++ b/src/stage1/analyze.cpp @@ -2125,18 +2125,6 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc return g->builtin_types.entry_invalid; } - if (fn_proto->return_anytype_token != nullptr) { - if (!calling_convention_allows_zig_types(fn_type_id.cc)) { - add_node_error(g, fn_proto->return_type, - buf_sprintf("return type 'anytype' not allowed in function with calling convention '%s'", - calling_convention_name(fn_type_id.cc))); - return g->builtin_types.entry_invalid; - } - add_node_error(g, proto_node, - buf_sprintf("TODO implement inferred return types https://github.com/ziglang/zig/issues/447")); - return g->builtin_types.entry_invalid; - } - ZigType *specified_return_type = analyze_type_expr(g, child_scope, fn_proto->return_type); if (type_is_invalid(specified_return_type)) { fn_type_id.return_type = g->builtin_types.entry_invalid; @@ -10220,4 +10208,3 @@ const char *float_op_to_name(BuiltinFnId op) { zig_unreachable(); } } - diff --git a/src/stage1/ast_render.cpp b/src/stage1/ast_render.cpp index 9aebac1d28..ed53cf7ccb 100644 --- a/src/stage1/ast_render.cpp +++ b/src/stage1/ast_render.cpp @@ -490,17 +490,13 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { fprintf(ar->f, ")"); } - if (node->data.fn_proto.return_anytype_token != nullptr) { - fprintf(ar->f, "anytype"); - } else { - AstNode *return_type_node = node->data.fn_proto.return_type; - assert(return_type_node != nullptr); - fprintf(ar->f, " "); - if (node->data.fn_proto.auto_err_set) { - fprintf(ar->f, "!"); - } - render_node_grouped(ar, return_type_node); + AstNode *return_type_node = node->data.fn_proto.return_type; + assert(return_type_node != nullptr); + fprintf(ar->f, " "); + if (node->data.fn_proto.auto_err_set) { + fprintf(ar->f, "!"); } + render_node_grouped(ar, return_type_node); break; } case NodeTypeFnDef: diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp index 323874ad2b..4bd608a687 100644 --- a/src/stage1/ir.cpp +++ b/src/stage1/ir.cpp @@ -10104,19 +10104,12 @@ static IrInstSrc *ir_gen_fn_proto(IrBuilderSrc *irb, Scope *parent_scope, AstNod } IrInstSrc *return_type; - if (node->data.fn_proto.return_anytype_token == nullptr) { - if (node->data.fn_proto.return_type == nullptr) { - return_type = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_void); - } else { - return_type = ir_gen_node(irb, node->data.fn_proto.return_type, parent_scope); - if (return_type == irb->codegen->invalid_inst_src) - return irb->codegen->invalid_inst_src; - } + if (node->data.fn_proto.return_type == nullptr) { + return_type = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_void); } else { - add_node_error(irb->codegen, node, - buf_sprintf("TODO implement inferred return types https://github.com/ziglang/zig/issues/447")); - return irb->codegen->invalid_inst_src; - //return_type = nullptr; + return_type = ir_gen_node(irb, node->data.fn_proto.return_type, parent_scope); + if (return_type == irb->codegen->invalid_inst_src) + return irb->codegen->invalid_inst_src; } return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, callconv_value, return_type, is_var_args); @@ -14978,7 +14971,7 @@ static IrInstGen *ir_analyze_struct_literal_to_array(IrAnalyze *ira, IrInst* sou if ((err = type_resolve(ira->codegen, wanted_type, ResolveStatusSizeKnown))) return ira->codegen->invalid_inst_gen; - + size_t array_len = wanted_type->data.array.len; size_t instr_field_count = actual_type->data.structure.src_field_count; assert(array_len == instr_field_count); @@ -20953,44 +20946,42 @@ static IrInstGen *ir_analyze_fn_call(IrAnalyze *ira, IrInst* source_instr, inst_fn_type_id.alignment = align_bytes; } - if (fn_proto_node->data.fn_proto.return_anytype_token == nullptr) { - AstNode *return_type_node = fn_proto_node->data.fn_proto.return_type; - ZigType *specified_return_type = ir_analyze_type_expr(ira, impl_fn->child_scope, return_type_node); - if (type_is_invalid(specified_return_type)) - return ira->codegen->invalid_inst_gen; + AstNode *return_type_node = fn_proto_node->data.fn_proto.return_type; + ZigType *specified_return_type = ir_analyze_type_expr(ira, impl_fn->child_scope, return_type_node); + if (type_is_invalid(specified_return_type)) + return ira->codegen->invalid_inst_gen; - if(!is_valid_return_type(specified_return_type)){ - ErrorMsg *msg = ir_add_error(ira, source_instr, - buf_sprintf("call to generic function with %s return type '%s' not allowed", type_id_name(specified_return_type->id), buf_ptr(&specified_return_type->name))); - add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("function declared here")); + if(!is_valid_return_type(specified_return_type)){ + ErrorMsg *msg = ir_add_error(ira, source_instr, + buf_sprintf("call to generic function with %s return type '%s' not allowed", type_id_name(specified_return_type->id), buf_ptr(&specified_return_type->name))); + add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("function declared here")); - Tld *tld = find_decl(ira->codegen, &fn_entry->fndef_scope->base, &specified_return_type->name); - if (tld != nullptr) { - add_error_note(ira->codegen, msg, tld->source_node, buf_sprintf("type declared here")); - } - return ira->codegen->invalid_inst_gen; + Tld *tld = find_decl(ira->codegen, &fn_entry->fndef_scope->base, &specified_return_type->name); + if (tld != nullptr) { + add_error_note(ira->codegen, msg, tld->source_node, buf_sprintf("type declared here")); } + return ira->codegen->invalid_inst_gen; + } - if (fn_proto_node->data.fn_proto.auto_err_set) { - ZigType *inferred_err_set_type = get_auto_err_set_type(ira->codegen, impl_fn); - if ((err = type_resolve(ira->codegen, specified_return_type, ResolveStatusSizeKnown))) - return ira->codegen->invalid_inst_gen; - inst_fn_type_id.return_type = get_error_union_type(ira->codegen, inferred_err_set_type, specified_return_type); - } else { - inst_fn_type_id.return_type = specified_return_type; - } - - switch (type_requires_comptime(ira->codegen, specified_return_type)) { - case ReqCompTimeYes: - // Throw out our work and call the function as if it were comptime. - return ir_analyze_fn_call(ira, source_instr, fn_entry, fn_type, fn_ref, first_arg_ptr, - first_arg_ptr_src, CallModifierCompileTime, new_stack, new_stack_src, is_async_call_builtin, - args_ptr, args_len, ret_ptr, call_result_loc); - case ReqCompTimeInvalid: + if (fn_proto_node->data.fn_proto.auto_err_set) { + ZigType *inferred_err_set_type = get_auto_err_set_type(ira->codegen, impl_fn); + if ((err = type_resolve(ira->codegen, specified_return_type, ResolveStatusSizeKnown))) return ira->codegen->invalid_inst_gen; - case ReqCompTimeNo: - break; - } + inst_fn_type_id.return_type = get_error_union_type(ira->codegen, inferred_err_set_type, specified_return_type); + } else { + inst_fn_type_id.return_type = specified_return_type; + } + + switch (type_requires_comptime(ira->codegen, specified_return_type)) { + case ReqCompTimeYes: + // Throw out our work and call the function as if it were comptime. + return ir_analyze_fn_call(ira, source_instr, fn_entry, fn_type, fn_ref, first_arg_ptr, + first_arg_ptr_src, CallModifierCompileTime, new_stack, new_stack_src, is_async_call_builtin, + args_ptr, args_len, ret_ptr, call_result_loc); + case ReqCompTimeInvalid: + return ira->codegen->invalid_inst_gen; + case ReqCompTimeNo: + break; } auto existing_entry = ira->codegen->generic_table.put_unique(generic_id, impl_fn); diff --git a/src/stage1/parser.cpp b/src/stage1/parser.cpp index f152f245b7..08323d3086 100644 --- a/src/stage1/parser.cpp +++ b/src/stage1/parser.cpp @@ -820,21 +820,19 @@ static AstNode *ast_parse_fn_proto(ParseContext *pc) { AstNode *align_expr = ast_parse_byte_align(pc); AstNode *section_expr = ast_parse_link_section(pc); AstNode *callconv_expr = ast_parse_callconv(pc); - Token *anytype = eat_token_if(pc, TokenIdKeywordAnyType); Token *exmark = nullptr; AstNode *return_type = nullptr; - if (anytype == nullptr) { - exmark = eat_token_if(pc, TokenIdBang); - return_type = ast_parse_type_expr(pc); - if (return_type == nullptr) { - Token *next = peek_token(pc); - ast_error( - pc, - next, - "expected return type (use 'void' to return nothing), found: '%s'", - token_name(next->id) - ); - } + + exmark = eat_token_if(pc, TokenIdBang); + return_type = ast_parse_type_expr(pc); + if (return_type == nullptr) { + Token *next = peek_token(pc); + ast_error( + pc, + next, + "expected return type (use 'void' to return nothing), found: '%s'", + token_name(next->id) + ); } AstNode *res = ast_create_node(pc, NodeTypeFnProto, first); @@ -844,7 +842,6 @@ static AstNode *ast_parse_fn_proto(ParseContext *pc) { res->data.fn_proto.align_expr = align_expr; res->data.fn_proto.section_expr = section_expr; res->data.fn_proto.callconv_expr = callconv_expr; - res->data.fn_proto.return_anytype_token = anytype; res->data.fn_proto.auto_err_set = exmark != nullptr; res->data.fn_proto.return_type = return_type; diff --git a/src/target.zig b/src/target.zig index 25ed726fe6..1e31f99dc1 100644 --- a/src/target.zig +++ b/src/target.zig @@ -24,6 +24,10 @@ pub const available_libcs = [_]ArchOsAbi{ .{ .arch = .arm, .os = .linux, .abi = .gnueabihf }, .{ .arch = .arm, .os = .linux, .abi = .musleabi }, .{ .arch = .arm, .os = .linux, .abi = .musleabihf }, + .{ .arch = .thumb, .os = .linux, .abi = .gnueabi }, + .{ .arch = .thumb, .os = .linux, .abi = .gnueabihf }, + .{ .arch = .thumb, .os = .linux, .abi = .musleabi }, + .{ .arch = .thumb, .os = .linux, .abi = .musleabihf }, .{ .arch = .arm, .os = .windows, .abi = .gnu }, .{ .arch = .csky, .os = .linux, .abi = .gnueabi }, .{ .arch = .csky, .os = .linux, .abi = .gnueabihf }, @@ -97,7 +101,7 @@ pub fn libCGenericName(target: std.Target) [:0]const u8 { pub fn archMuslName(arch: std.Target.Cpu.Arch) [:0]const u8 { switch (arch) { .aarch64, .aarch64_be => return "aarch64", - .arm, .armeb => return "arm", + .arm, .armeb, .thumb, .thumbeb => return "arm", .mips, .mipsel => return "mips", .mips64el, .mips64 => return "mips64", .powerpc => return "powerpc", diff --git a/src/translate_c.zig b/src/translate_c.zig index 8244d66e94..1f8d818f0b 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -447,7 +447,13 @@ fn declVisitorNamesOnly(c: *Context, decl: *const clang.Decl) Error!void { // TODO https://github.com/ziglang/zig/issues/3756 // TODO https://github.com/ziglang/zig/issues/1802 const name = if (isZigPrimitiveType(decl_name)) try std.fmt.allocPrint(c.arena, "{s}_{d}", .{ decl_name, c.getMangle() }) else decl_name; - try c.unnamed_typedefs.putNoClobber(c.gpa, addr, name); + const result = try c.unnamed_typedefs.getOrPut(c.gpa, addr); + if (result.found_existing) { + // One typedef can declare multiple names. + // Don't put this one in `decl_table` so it's processed later. + return; + } + result.entry.value = name; // Put this typedef in the decl_table to avoid redefinitions. try c.decl_table.putNoClobber(c.gpa, @ptrToInt(typedef_decl.getCanonicalDecl()), name); } diff --git a/test/run_translated_c.zig b/test/run_translated_c.zig index 314ef2889d..250f7e67bc 100644 --- a/test/run_translated_c.zig +++ b/test/run_translated_c.zig @@ -1476,4 +1476,18 @@ pub fn addCases(cases: *tests.RunTranslatedCContext) void { \\ return 0; \\} , ""); + + cases.add("typedef with multiple names", + \\#include + \\typedef struct { + \\ char field; + \\} a_t, b_t; + \\ + \\int main(void) { + \\ a_t a = { .field = 42 }; + \\ b_t b = a; + \\ if (b.field != 42) abort(); + \\ return 0; + \\} + , ""); } diff --git a/test/stage2/arm.zig b/test/stage2/arm.zig index f4efbd9b2a..31b3c06dcd 100644 --- a/test/stage2/arm.zig +++ b/test/stage2/arm.zig @@ -458,4 +458,57 @@ pub fn addCases(ctx: *TestContext) !void { "", ); } + + { + var case = ctx.exe("spilling registers", linux_arm); + case.addCompareOutput( + \\export fn _start() noreturn { + \\ assert(add(3, 4) == 791); + \\ exit(); + \\} + \\ + \\fn add(a: u32, b: u32) u32 { + \\ const x: u32 = blk: { + \\ const c = a + b; // 7 + \\ const d = a + c; // 10 + \\ const e = d + b; // 14 + \\ const f = d + e; // 24 + \\ const g = e + f; // 38 + \\ const h = f + g; // 62 + \\ const i = g + h; // 100 + \\ const j = i + d; // 110 + \\ const k = i + j; // 210 + \\ const l = k + c; // 217 + \\ const m = l + d; // 227 + \\ const n = m + e; // 241 + \\ const o = n + f; // 265 + \\ const p = o + g; // 303 + \\ const q = p + h; // 365 + \\ const r = q + i; // 465 + \\ const s = r + j; // 575 + \\ const t = s + k; // 785 + \\ break :blk t; + \\ }; + \\ const y = x + a; // 788 + \\ const z = y + a; // 791 + \\ return z; + \\} + \\ + \\fn assert(ok: bool) void { + \\ if (!ok) unreachable; + \\} + \\ + \\fn exit() noreturn { + \\ asm volatile ("svc #0" + \\ : + \\ : [number] "{r7}" (1), + \\ [arg1] "{r0}" (0) + \\ : "memory" + \\ ); + \\ unreachable; + \\} + , + "", + ); + } } diff --git a/test/stage2/test.zig b/test/stage2/test.zig index 440042798f..32bf36d9bb 100644 --- a/test/stage2/test.zig +++ b/test/stage2/test.zig @@ -318,6 +318,81 @@ pub fn addCases(ctx: *TestContext) !void { , &[_][]const u8{":2:15: error: incompatible types: 'bool' and 'comptime_int'"}); } + { + var case = ctx.exe("multiplying numbers at runtime and comptime", linux_x64); + case.addCompareOutput( + \\export fn _start() noreturn { + \\ mul(3, 4); + \\ + \\ exit(); + \\} + \\ + \\fn mul(a: u32, b: u32) void { + \\ if (a * b != 12) unreachable; + \\} + \\ + \\fn exit() noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (0) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , + "", + ); + // comptime function call + case.addCompareOutput( + \\export fn _start() noreturn { + \\ exit(); + \\} + \\ + \\fn mul(a: u32, b: u32) u32 { + \\ return a * b; + \\} + \\ + \\const x = mul(3, 4); + \\ + \\fn exit() noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (x - 12) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , + "", + ); + // Inline function call + case.addCompareOutput( + \\export fn _start() noreturn { + \\ var x: usize = 5; + \\ const y = mul(2, 3, x); + \\ exit(y - 30); + \\} + \\ + \\fn mul(a: usize, b: usize, c: usize) callconv(.Inline) usize { + \\ return a * b * c; + \\} + \\ + \\fn exit(code: usize) noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (code) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , + "", + ); + } + { var case = ctx.exe("assert function", linux_x64); case.addCompareOutput( @@ -700,7 +775,8 @@ pub fn addCases(ctx: *TestContext) !void { // Spilling registers to the stack. case.addCompareOutput( \\pub export fn _start() noreturn { - \\ assert(add(3, 4) == 791); + \\ assert(add(3, 4) == 1221); + \\ assert(mul(3, 4) == 21609); \\ \\ exit(); \\} @@ -716,19 +792,47 @@ pub fn addCases(ctx: *TestContext) !void { \\ const i = g + h; // 100 \\ const j = i + d; // 110 \\ const k = i + j; // 210 - \\ const l = k + c; // 217 - \\ const m = l + d; // 227 - \\ const n = m + e; // 241 - \\ const o = n + f; // 265 - \\ const p = o + g; // 303 - \\ const q = p + h; // 365 - \\ const r = q + i; // 465 - \\ const s = r + j; // 575 - \\ const t = s + k; // 785 - \\ break :blk t; + \\ const l = j + k; // 320 + \\ const m = l + c; // 327 + \\ const n = m + d; // 337 + \\ const o = n + e; // 351 + \\ const p = o + f; // 375 + \\ const q = p + g; // 413 + \\ const r = q + h; // 475 + \\ const s = r + i; // 575 + \\ const t = s + j; // 685 + \\ const u = t + k; // 895 + \\ const v = u + l; // 1215 + \\ break :blk v; \\ }; - \\ const y = x + a; // 788 - \\ const z = y + a; // 791 + \\ const y = x + a; // 1218 + \\ const z = y + a; // 1221 + \\ return z; + \\} + \\ + \\fn mul(a: u32, b: u32) u32 { + \\ const x: u32 = blk: { + \\ const c = a * a * a * a; // 81 + \\ const d = a * a * a * b; // 108 + \\ const e = a * a * b * a; // 108 + \\ const f = a * a * b * b; // 144 + \\ const g = a * b * a * a; // 108 + \\ const h = a * b * a * b; // 144 + \\ const i = a * b * b * a; // 144 + \\ const j = a * b * b * b; // 192 + \\ const k = b * a * a * a; // 108 + \\ const l = b * a * a * b; // 144 + \\ const m = b * a * b * a; // 144 + \\ const n = b * a * b * b; // 192 + \\ const o = b * b * a * a; // 144 + \\ const p = b * b * a * b; // 192 + \\ const q = b * b * b * a; // 192 + \\ const r = b * b * b * b; // 256 + \\ const s = c + d + e + f + g + h + i + j + k + l + m + n + o + p + q + r; // 2401 + \\ break :blk s; + \\ }; + \\ const y = x * a; // 7203 + \\ const z = y * a; // 21609 \\ return z; \\} \\ diff --git a/test/stage2/wasm.zig b/test/stage2/wasm.zig index ccf2661b44..880587d93f 100644 --- a/test/stage2/wasm.zig +++ b/test/stage2/wasm.zig @@ -58,7 +58,10 @@ pub fn addCases(ctx: *TestContext) !void { , // This is what you get when you take the bits of the IEE-754 // representation of 42.0 and reinterpret them as an unsigned - // integer. Guess that's a bug in wasmtime. + // integer. + // Bug is fixed in wasmtime v0.26 but updating to v0.26 is blocked + // on this issue: + // https://github.com/ziglang/zig/issues/8742 "1109917696\n", ); diff --git a/test/tests.zig b/test/tests.zig index b6168f04e2..2ea27af2ca 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -98,15 +98,14 @@ const test_targets = blk: { }, .link_libc = true, }, - // https://github.com/ziglang/zig/issues/4926 - //TestTarget{ - // .target = .{ - // .cpu_arch = .i386, - // .os_tag = .linux, - // .abi = .gnu, - // }, - // .link_libc = true, - //}, + TestTarget{ + .target = .{ + .cpu_arch = .i386, + .os_tag = .linux, + .abi = .gnu, + }, + .link_libc = true, + }, TestTarget{ .target = .{