Merge pull request #18505 from ziglang/tsan

TSAN: update to LLVM 17.0.6
This commit is contained in:
Andrew Kelley 2024-01-10 15:08:49 -08:00 committed by GitHub
commit df6aed0fc3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
216 changed files with 15845 additions and 9985 deletions

View File

@ -14,9 +14,10 @@
#ifndef INTERCEPTION_H
#define INTERCEPTION_H
#include "sanitizer_common/sanitizer_asm.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \
#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_APPLE && \
!SANITIZER_NETBSD && !SANITIZER_WINDOWS && !SANITIZER_FUCHSIA && \
!SANITIZER_SOLARIS
# error "Interception doesn't work on this operating system."
@ -67,28 +68,54 @@ typedef __sanitizer::OFF64_T OFF64_T;
// for more details). To intercept such functions you need to use the
// INTERCEPTOR_WITH_SUFFIX(...) macro.
// How it works:
// To replace system functions on Linux we just need to declare functions
// with same names in our library and then obtain the real function pointers
// using dlsym().
// There is one complication. A user may also intercept some of the functions
// we intercept. To resolve this we declare our interceptors with __interceptor_
// prefix, and then make actual interceptors weak aliases to __interceptor_
// functions.
// How it works on Linux
// ---------------------
//
// To replace system functions on Linux we just need to declare functions with
// the same names in our library and then obtain the real function pointers
// using dlsym().
//
// There is one complication: a user may also intercept some of the functions we
// intercept. To allow for up to 3 interceptors (including ours) of a given
// function "func", the interceptor implementation is in ___interceptor_func,
// which is aliased by a weak function __interceptor_func, which in turn is
// aliased (via a trampoline) by weak wrapper function "func".
//
// Most user interceptors should define a foreign interceptor as follows:
//
// - provide a non-weak function "func" that performs interception;
// - if __interceptor_func exists, call it to perform the real functionality;
// - if it does not exist, figure out the real function and call it instead.
//
// In rare cases, a foreign interceptor (of another dynamic analysis runtime)
// may be defined as follows (on supported architectures):
//
// - provide a non-weak function __interceptor_func that performs interception;
// - if ___interceptor_func exists, call it to perform the real functionality;
// - if it does not exist, figure out the real function and call it instead;
// - provide a weak function "func" that is an alias to __interceptor_func.
//
// With this protocol, sanitizer interceptors, foreign user interceptors, and
// foreign interceptors of other dynamic analysis runtimes, or any combination
// thereof, may co-exist simultaneously.
//
// How it works on Mac OS
// ----------------------
//
// This is not so on Mac OS, where the two-level namespace makes our replacement
// functions invisible to other libraries. This may be overcomed using the
// DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared libraries in
// Chromium were noticed when doing so.
//
// This is not so on Mac OS, where the two-level namespace makes
// our replacement functions invisible to other libraries. This may be overcomed
// using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared
// libraries in Chromium were noticed when doing so.
// Instead we create a dylib containing a __DATA,__interpose section that
// associates library functions with their wrappers. When this dylib is
// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all
// the calls to interposed functions done through stubs to the wrapper
// functions.
// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all the
// calls to interposed functions done through stubs to the wrapper functions.
//
// As it's decided at compile time which functions are to be intercepted on Mac,
// INTERCEPT_FUNCTION() is effectively a no-op on this system.
#if SANITIZER_MAC
#if SANITIZER_APPLE
#include <sys/cdefs.h> // For __DARWIN_ALIAS_C().
// Just a pair of pointers.
@ -100,53 +127,102 @@ struct interpose_substitution {
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
// the __DATA,__interpose section.
// As a result all the calls to foo() will be routed to wrap_foo() at runtime.
#define INTERPOSER(func_name) __attribute__((used)) \
#define INTERPOSER(func_name) __attribute__((used)) \
const interpose_substitution substitution_##func_name[] \
__attribute__((section("__DATA, __interpose"))) = { \
{ reinterpret_cast<const uptr>(WRAP(func_name)), \
reinterpret_cast<const uptr>(func_name) } \
{ reinterpret_cast<const uptr>(WRAP(func_name)), \
reinterpret_cast<const uptr>(func_name) } \
}
// For a function foo() and a wrapper function bar() create a global pair
// of pointers { bar, foo } in the __DATA,__interpose section.
// As a result all the calls to foo() will be routed to bar() at runtime.
#define INTERPOSER_2(func_name, wrapper_name) __attribute__((used)) \
const interpose_substitution substitution_##func_name[] \
__attribute__((section("__DATA, __interpose"))) = { \
{ reinterpret_cast<const uptr>(wrapper_name), \
reinterpret_cast<const uptr>(func_name) } \
const interpose_substitution substitution_##func_name[] \
__attribute__((section("__DATA, __interpose"))) = { \
{ reinterpret_cast<const uptr>(wrapper_name), \
reinterpret_cast<const uptr>(func_name) } \
}
# define WRAP(x) wrap_##x
# define WRAPPER_NAME(x) "wrap_"#x
# define TRAMPOLINE(x) WRAP(x)
# define INTERCEPTOR_ATTRIBUTE
# define DECLARE_WRAPPER(ret_type, func, ...)
#elif SANITIZER_WINDOWS
# define WRAP(x) __asan_wrap_##x
# define WRAPPER_NAME(x) "__asan_wrap_"#x
# define TRAMPOLINE(x) WRAP(x)
# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
# define DECLARE_WRAPPER(ret_type, func, ...) \
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
# define WRAP(x) __interceptor_ ## x
# define WRAPPER_NAME(x) "__interceptor_" #x
#elif !SANITIZER_FUCHSIA // LINUX, FREEBSD, NETBSD, SOLARIS
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
# if ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
// Weak aliases of weak aliases do not work, therefore we need to set up a
// trampoline function. The function "func" is a weak alias to the trampoline
// (so that we may check if "func" was overridden), which calls the weak
// function __interceptor_func, which in turn aliases the actual interceptor
// implementation ___interceptor_func:
//
// [wrapper "func": weak] --(alias)--> [TRAMPOLINE(func)]
// |
// +--------(tail call)-------+
// |
// v
// [__interceptor_func: weak] --(alias)--> [WRAP(func)]
//
// We use inline assembly to define most of this, because not all compilers
// support functions with the "naked" attribute with every architecture.
# define WRAP(x) ___interceptor_ ## x
# define TRAMPOLINE(x) __interceptor_trampoline_ ## x
# if SANITIZER_FREEBSD || SANITIZER_NETBSD
// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
// priority than weak ones so weak aliases won't work for indirect calls
// in position-independent (-fPIC / -fPIE) mode.
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__) \
__attribute__((alias("__interceptor_" #func), visibility("default")));
#elif !SANITIZER_FUCHSIA
# define WRAP(x) __interceptor_ ## x
# define WRAPPER_NAME(x) "__interceptor_" #x
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__) \
__attribute__((weak, alias("__interceptor_" #func), visibility("default")));
# define __ASM_WEAK_WRAPPER(func) ".globl " #func "\n"
# else
# define __ASM_WEAK_WRAPPER(func) ".weak " #func "\n"
# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
// Keep trampoline implementation in sync with sanitizer_common/sanitizer_asm.h
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__); \
extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
extern "C" ret_type __interceptor_##func(__VA_ARGS__) \
INTERCEPTOR_ATTRIBUTE __attribute__((weak)) ALIAS(WRAP(func)); \
asm( \
".text\n" \
__ASM_WEAK_WRAPPER(func) \
".set " #func ", " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
".globl " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
".type " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", %function\n" \
SANITIZER_STRINGIFY(TRAMPOLINE(func)) ":\n" \
SANITIZER_STRINGIFY(CFI_STARTPROC) "\n" \
SANITIZER_STRINGIFY(ASM_TAIL_CALL) " __interceptor_" \
SANITIZER_STRINGIFY(ASM_PREEMPTIBLE_SYM(func)) "\n" \
SANITIZER_STRINGIFY(CFI_ENDPROC) "\n" \
".size " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", " \
".-" SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
);
# else // ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
// Some architectures cannot implement efficient interceptor trampolines with
// just a plain jump due to complexities of resolving a preemptible symbol. In
// those cases, revert to just this scheme:
//
// [wrapper "func": weak] --(alias)--> [WRAP(func)]
//
# define WRAP(x) __interceptor_ ## x
# define TRAMPOLINE(x) WRAP(x)
# if SANITIZER_FREEBSD || SANITIZER_NETBSD
# define __ATTRIBUTE_WEAK_WRAPPER
# else
# define __ATTRIBUTE_WEAK_WRAPPER __attribute__((weak))
# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__) \
INTERCEPTOR_ATTRIBUTE __ATTRIBUTE_WEAK_WRAPPER ALIAS(WRAP(func));
# endif // ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
#endif
#if SANITIZER_FUCHSIA
@ -157,33 +233,35 @@ const interpose_substitution substitution_##func_name[] \
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
# define REAL(x) __unsanitized_##x
# define DECLARE_REAL(ret_type, func, ...)
#elif !SANITIZER_MAC
#elif !SANITIZER_APPLE
# define PTR_TO_REAL(x) real_##x
# define REAL(x) __interception::PTR_TO_REAL(x)
# define FUNC_TYPE(x) x##_type
# define DECLARE_REAL(ret_type, func, ...) \
# define DECLARE_REAL(ret_type, func, ...) \
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
namespace __interception { \
extern FUNC_TYPE(func) PTR_TO_REAL(func); \
namespace __interception { \
extern FUNC_TYPE(func) PTR_TO_REAL(func); \
}
# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
#else // SANITIZER_MAC
#else // SANITIZER_APPLE
# define REAL(x) x
# define DECLARE_REAL(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
# define ASSIGN_REAL(x, y)
#endif // SANITIZER_MAC
#endif // SANITIZER_APPLE
#if !SANITIZER_FUCHSIA
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
DECLARE_REAL(ret_type, func, __VA_ARGS__) \
extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
extern "C" ret_type WRAP(func)(__VA_ARGS__);
// Declare an interceptor and its wrapper defined in a different translation
// unit (ex. asm).
# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
extern "C" ret_type WRAP(func)(__VA_ARGS__); \
extern "C" ret_type func(__VA_ARGS__);
# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
extern "C" ret_type WRAP(func)(__VA_ARGS__); \
extern "C" ret_type func(__VA_ARGS__);
#else
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...)
# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...)
@ -193,7 +271,7 @@ const interpose_substitution substitution_##func_name[] \
// macros does its job. In exceptional cases you may need to call REAL(foo)
// without defining INTERCEPTOR(..., foo, ...). For example, if you override
// foo with an interceptor for other function.
#if !SANITIZER_MAC && !SANITIZER_FUCHSIA
#if !SANITIZER_APPLE && !SANITIZER_FUCHSIA
# define DEFINE_REAL(ret_type, func, ...) \
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
namespace __interception { \
@ -213,25 +291,23 @@ const interpose_substitution substitution_##func_name[] \
__interceptor_##func(__VA_ARGS__); \
extern "C" INTERCEPTOR_ATTRIBUTE ret_type func(__VA_ARGS__)
#elif !SANITIZER_MAC
#elif !SANITIZER_APPLE
#define INTERCEPTOR(ret_type, func, ...) \
DEFINE_REAL(ret_type, func, __VA_ARGS__) \
DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
extern "C" \
INTERCEPTOR_ATTRIBUTE \
ret_type WRAP(func)(__VA_ARGS__)
#define INTERCEPTOR(ret_type, func, ...) \
DEFINE_REAL(ret_type, func, __VA_ARGS__) \
DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
// We don't need INTERCEPTOR_WITH_SUFFIX on non-Darwin for now.
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
INTERCEPTOR(ret_type, func, __VA_ARGS__)
#else // SANITIZER_MAC
#else // SANITIZER_APPLE
#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__) suffix; \
extern "C" ret_type WRAP(func)(__VA_ARGS__); \
INTERPOSER(func); \
#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__) suffix; \
extern "C" ret_type WRAP(func)(__VA_ARGS__); \
INTERPOSER(func); \
extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
#define INTERCEPTOR(ret_type, func, ...) \
@ -246,14 +322,12 @@ const interpose_substitution substitution_##func_name[] \
#endif
#if SANITIZER_WINDOWS
# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \
namespace __interception { \
FUNC_TYPE(func) PTR_TO_REAL(func); \
} \
extern "C" \
INTERCEPTOR_ATTRIBUTE \
ret_type __stdcall WRAP(func)(__VA_ARGS__)
namespace __interception { \
FUNC_TYPE(func) PTR_TO_REAL(func); \
} \
extern "C" INTERCEPTOR_ATTRIBUTE ret_type __stdcall WRAP(func)(__VA_ARGS__)
#endif
// ISO C++ forbids casting between pointer-to-function and pointer-to-object,
@ -278,7 +352,7 @@ typedef unsigned long uptr;
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
#elif SANITIZER_MAC
#elif SANITIZER_APPLE
# include "interception_mac.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \

View File

@ -33,7 +33,7 @@ static int StrCmp(const char *s1, const char *s2) {
}
#endif
static void *GetFuncAddr(const char *name, uptr wrapper_addr) {
static void *GetFuncAddr(const char *name, uptr trampoline) {
#if SANITIZER_NETBSD
// FIXME: Find a better way to handle renames
if (StrCmp(name, "sigaction"))
@ -50,17 +50,17 @@ static void *GetFuncAddr(const char *name, uptr wrapper_addr) {
// In case `name' is not loaded, dlsym ends up finding the actual wrapper.
// We don't want to intercept the wrapper and have it point to itself.
if ((uptr)addr == wrapper_addr)
if ((uptr)addr == trampoline)
addr = nullptr;
}
return addr;
}
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
uptr wrapper) {
void *addr = GetFuncAddr(name, wrapper);
uptr trampoline) {
void *addr = GetFuncAddr(name, trampoline);
*ptr_to_real = (uptr)addr;
return addr && (func == wrapper);
return addr && (func == trampoline);
}
// dlvsym is a GNU extension supported by some other platforms.
@ -70,12 +70,12 @@ static void *GetFuncAddr(const char *name, const char *ver) {
}
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
uptr func, uptr wrapper) {
uptr func, uptr trampoline) {
void *addr = GetFuncAddr(name, ver);
*ptr_to_real = (uptr)addr;
return addr && (func == wrapper);
return addr && (func == trampoline);
}
#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
# endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
} // namespace __interception

View File

@ -15,7 +15,7 @@
SANITIZER_SOLARIS
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_linux.h should be included from interception library only"
# error interception_linux.h should be included from interception library only
#endif
#ifndef INTERCEPTION_LINUX_H
@ -23,26 +23,26 @@
namespace __interception {
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
uptr wrapper);
uptr trampoline);
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
uptr func, uptr wrapper);
uptr func, uptr trampoline);
} // namespace __interception
#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
::__interception::InterceptFunction( \
#func, \
(::__interception::uptr *) & REAL(func), \
(::__interception::uptr) & (func), \
(::__interception::uptr) & WRAP(func))
(::__interception::uptr *)&REAL(func), \
(::__interception::uptr)&(func), \
(::__interception::uptr)&TRAMPOLINE(func))
// dlvsym is a GNU extension supported by some other platforms.
#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
::__interception::InterceptFunction( \
#func, symver, \
(::__interception::uptr *) & REAL(func), \
(::__interception::uptr) & (func), \
(::__interception::uptr) & WRAP(func))
(::__interception::uptr *)&REAL(func), \
(::__interception::uptr)&(func), \
(::__interception::uptr)&TRAMPOLINE(func))
#else
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)

View File

@ -13,6 +13,6 @@
#include "interception.h"
#if SANITIZER_MAC
#if SANITIZER_APPLE
#endif // SANITIZER_MAC
#endif // SANITIZER_APPLE

View File

@ -11,7 +11,7 @@
// Mac-specific interception methods.
//===----------------------------------------------------------------------===//
#if SANITIZER_MAC
#if SANITIZER_APPLE
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_mac.h should be included from interception.h only"
@ -24,4 +24,4 @@
#define INTERCEPT_FUNCTION_VER_MAC(func, symver)
#endif // INTERCEPTION_MAC_H
#endif // SANITIZER_MAC
#endif // SANITIZER_APPLE

View File

@ -13,7 +13,7 @@
#include "interception.h"
#if SANITIZER_LINUX || SANITIZER_MAC
#if SANITIZER_LINUX || SANITIZER_APPLE
#include <sys/types.h>
#include <stddef.h>
@ -24,9 +24,9 @@ COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
#if !SANITIZER_MAC
# if SANITIZER_GLIBC || SANITIZER_ANDROID
COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
#endif
# endif
// The following are the cases when pread (and friends) is used instead of
// pread64. In those cases we need OFF_T to match off_t. We don't care about the

View File

@ -56,7 +56,7 @@
// tramp: jmp QWORD [addr]
// addr: .bytes <hook>
//
// Note: <real> is equilavent to <label>.
// Note: <real> is equivalent to <label>.
//
// 3) HotPatch
//
@ -141,8 +141,29 @@ static const int kBranchLength =
FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
static const int kDirectBranchLength = kBranchLength + kAddressLength;
# if defined(_MSC_VER)
# define INTERCEPTION_FORMAT(f, a)
# else
# define INTERCEPTION_FORMAT(f, a) __attribute__((format(printf, f, a)))
# endif
static void (*ErrorReportCallback)(const char *format, ...)
INTERCEPTION_FORMAT(1, 2);
void SetErrorReportCallback(void (*callback)(const char *format, ...)) {
ErrorReportCallback = callback;
}
# define ReportError(...) \
do { \
if (ErrorReportCallback) \
ErrorReportCallback(__VA_ARGS__); \
} while (0)
static void InterceptionFailed() {
// Do we have a good way to abort with an error message here?
ReportError("interception_win: failed due to an unrecoverable error.\n");
// This acts like an abort when no debugger is attached. According to an old
// comment, calling abort() leads to an infinite recursion in CheckFailed.
__debugbreak();
}
@ -249,8 +270,13 @@ static void WritePadding(uptr from, uptr size) {
}
static void WriteJumpInstruction(uptr from, uptr target) {
if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target)) {
ReportError(
"interception_win: cannot write jmp further than 2GB away, from %p to "
"%p.\n",
(void *)from, (void *)target);
InterceptionFailed();
}
ptrdiff_t offset = target - from - kJumpInstructionLength;
*(u8*)from = 0xE9;
*(u32*)(from + 1) = offset;
@ -274,6 +300,10 @@ static void WriteIndirectJumpInstruction(uptr from, uptr indirect_target) {
int offset = indirect_target - from - kIndirectJumpInstructionLength;
if (!DistanceIsWithin2Gig(from + kIndirectJumpInstructionLength,
indirect_target)) {
ReportError(
"interception_win: cannot write indirect jmp with target further than "
"2GB away, from %p to %p.\n",
(void *)from, (void *)indirect_target);
InterceptionFailed();
}
*(u16*)from = 0x25FF;
@ -398,8 +428,44 @@ static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
return allocated_space;
}
// The following prologues cannot be patched because of the short jump
// jumping to the patching region.
#if SANITIZER_WINDOWS64
// ntdll!wcslen in Win11
// 488bc1 mov rax,rcx
// 0fb710 movzx edx,word ptr [rax]
// 4883c002 add rax,2
// 6685d2 test dx,dx
// 75f4 jne -12
static const u8 kPrologueWithShortJump1[] = {
0x48, 0x8b, 0xc1, 0x0f, 0xb7, 0x10, 0x48, 0x83,
0xc0, 0x02, 0x66, 0x85, 0xd2, 0x75, 0xf4,
};
// ntdll!strrchr in Win11
// 4c8bc1 mov r8,rcx
// 8a01 mov al,byte ptr [rcx]
// 48ffc1 inc rcx
// 84c0 test al,al
// 75f7 jne -9
static const u8 kPrologueWithShortJump2[] = {
0x4c, 0x8b, 0xc1, 0x8a, 0x01, 0x48, 0xff, 0xc1,
0x84, 0xc0, 0x75, 0xf7,
};
#endif
// Returns 0 on error.
static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
#if SANITIZER_WINDOWS64
if (memcmp((u8*)address, kPrologueWithShortJump1,
sizeof(kPrologueWithShortJump1)) == 0 ||
memcmp((u8*)address, kPrologueWithShortJump2,
sizeof(kPrologueWithShortJump2)) == 0) {
return 0;
}
#endif
switch (*(u64*)address) {
case 0x90909090909006EB: // stub: jmp over 6 x nop.
return 8;
@ -456,6 +522,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xFF8B: // 8B FF : mov edi, edi
case 0xEC8B: // 8B EC : mov ebp, esp
case 0xc889: // 89 C8 : mov eax, ecx
case 0xE589: // 89 E5 : mov ebp, esp
case 0xC18B: // 8B C1 : mov eax, ecx
case 0xC033: // 33 C0 : xor eax, eax
case 0xC933: // 33 C9 : xor ecx, ecx
@ -477,6 +544,14 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xA1: // A1 XX XX XX XX XX XX XX XX :
// movabs eax, dword ptr ds:[XXXXXXXX]
return 9;
case 0x83:
const u8 next_byte = *(u8*)(address + 1);
const u8 mod = next_byte >> 6;
const u8 rm = next_byte & 7;
if (mod == 1 && rm == 4)
return 5; // 83 ModR/M SIB Disp8 Imm8
// add|or|adc|sbb|and|sub|xor|cmp [r+disp8], imm8
}
switch (*(u16*)address) {
@ -493,6 +568,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x5641: // push r14
case 0x5741: // push r15
case 0x9066: // Two-byte NOP
case 0xc084: // test al, al
case 0x018a: // mov al, byte ptr [rcx]
return 2;
case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
@ -509,6 +586,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xd12b48: // 48 2b d1 : sub rdx, rcx
case 0x07c1f6: // f6 c1 07 : test cl, 0x7
case 0xc98548: // 48 85 C9 : test rcx, rcx
case 0xd28548: // 48 85 d2 : test rdx, rdx
case 0xc0854d: // 4d 85 c0 : test r8, r8
case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
case 0xc03345: // 45 33 c0 : xor r8d, r8d
@ -522,6 +600,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xca2b48: // 48 2b ca : sub rcx, rdx
case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
case 0xc00b4d: // 3d 0b c0 : or r8, r8
case 0xc08b41: // 41 8b c0 : mov eax, r8d
case 0xd18b48: // 48 8b d1 : mov rdx, rcx
case 0xdc8b4c: // 4c 8b dc : mov r11, rsp
case 0xd18b4c: // 4c 8b d1 : mov r10, rcx
@ -556,6 +635,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi
case 0x247c8948: // 48 89 7c 24 XX : mov QWORD PTR [rsp + XX], rdi
case 0x244C8948: // 48 89 4C 24 XX : mov QWORD PTR [rsp + XX], rcx
case 0x24548948: // 48 89 54 24 XX : mov QWORD PTR [rsp + XX], rdx
case 0x244c894c: // 4c 89 4c 24 XX : mov QWORD PTR [rsp + XX], r9
@ -592,6 +672,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x24448B: // 8B 44 24 XX : mov eax, dword ptr [esp + XX]
case 0x244C8B: // 8B 4C 24 XX : mov ecx, dword ptr [esp + XX]
case 0x24548B: // 8B 54 24 XX : mov edx, dword ptr [esp + XX]
case 0x245C8B: // 8B 5C 24 XX : mov ebx, dword ptr [esp + XX]
case 0x246C8B: // 8B 6C 24 XX : mov ebp, dword ptr [esp + XX]
case 0x24748B: // 8B 74 24 XX : mov esi, dword ptr [esp + XX]
case 0x247C8B: // 8B 7C 24 XX : mov edi, dword ptr [esp + XX]
return 4;
@ -603,12 +685,20 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
}
#endif
// Unknown instruction!
// FIXME: Unknown instruction failures might happen when we add a new
// interceptor or a new compiler version. In either case, they should result
// in visible and readable error messages. However, merely calling abort()
// leads to an infinite recursion in CheckFailed.
InterceptionFailed();
// Unknown instruction! This might happen when we add a new interceptor, use
// a new compiler version, or if Windows changed how some functions are
// compiled. In either case, we print the address and 8 bytes of instructions
// to notify the user about the error and to help identify the unknown
// instruction. Don't treat this as a fatal error, though we can break the
// debugger if one has been attached.
u8 *bytes = (u8 *)address;
ReportError(
"interception_win: unhandled instruction at %p: %02x %02x %02x %02x %02x "
"%02x %02x %02x\n",
(void *)address, bytes[0], bytes[1], bytes[2], bytes[3], bytes[4],
bytes[5], bytes[6], bytes[7]);
if (::IsDebuggerPresent())
__debugbreak();
return 0;
}
@ -629,6 +719,8 @@ static bool CopyInstructions(uptr to, uptr from, size_t size) {
while (cursor != size) {
size_t rel_offset = 0;
size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
if (!instruction_size)
return false;
_memcpy((void*)(to + cursor), (void*)(from + cursor),
(size_t)instruction_size);
if (rel_offset) {
@ -689,7 +781,7 @@ bool OverrideFunctionWithRedirectJump(
return false;
if (orig_old_func) {
uptr relative_offset = *(u32*)(old_func + 1);
sptr relative_offset = *(s32 *)(old_func + 1);
uptr absolute_target = old_func + relative_offset + kJumpInstructionLength;
*orig_old_func = absolute_target;
}
@ -846,6 +938,10 @@ static void **InterestingDLLsAvailable() {
"msvcr120.dll", // VS2013
"vcruntime140.dll", // VS2015
"ucrtbase.dll", // Universal CRT
#if (defined(__MINGW32__) && defined(__i386__))
"libc++.dll", // libc++
"libunwind.dll", // libunwind
#endif
// NTDLL should go last as it exports some functions that we should
// override in the CRT [presumably only used internally].
"ntdll.dll", NULL};
@ -1019,4 +1115,4 @@ bool OverrideImportedFunction(const char *module_to_patch,
} // namespace __interception
#endif // SANITIZER_MAC
#endif // SANITIZER_APPLE

View File

@ -41,6 +41,11 @@ bool OverrideImportedFunction(const char *module_to_patch,
const char *function_name, uptr new_function,
uptr *orig_old_func);
// Sets a callback to be used for reporting errors by interception_win. The
// callback will be called with printf-like arguments. Intended to be used with
// __sanitizer::Report. Pass nullptr to disable error reporting (default).
void SetErrorReportCallback(void (*callback)(const char *format, ...));
#if !SANITIZER_WINDOWS64
// Exposed for unittests
bool OverrideFunctionWithDetour(

View File

@ -14,7 +14,7 @@
#endif
SANCOV_FLAG(bool, symbolize, true,
"If set, converage information will be symbolized by sancov tool "
"If set, coverage information will be symbolized by sancov tool "
"after dumping.")
SANCOV_FLAG(bool, help, false, "Print flags help.")

View File

@ -39,6 +39,11 @@ namespace __sanitizer {
// the current thread has exclusive access to the data
// if !h.exists() then the element never existed
// }
// {
// Map::Handle h(&m, addr, false, true);
// this will create a new element or return a handle to an existing element
// if !h.created() this thread does *not* have exclusive access to the data
// }
template<typename T, uptr kSize>
class AddrHashMap {
private:
@ -56,7 +61,7 @@ class AddrHashMap {
static const uptr kBucketSize = 3;
struct Bucket {
RWMutex mtx;
Mutex mtx;
atomic_uintptr_t add;
Cell cells[kBucketSize];
};
@ -89,6 +94,12 @@ class AddrHashMap {
bool create_;
};
typedef void (*ForEachCallback)(const uptr key, const T &val, void *arg);
// ForEach acquires a lock on each bucket while iterating over
// elements. Note that this only ensures that the structure of the hashmap is
// unchanged, there may be a data race to the element itself.
void ForEach(ForEachCallback cb, void *arg);
private:
friend class Handle;
Bucket *table_;
@ -98,6 +109,33 @@ class AddrHashMap {
uptr calcHash(uptr addr);
};
template <typename T, uptr kSize>
void AddrHashMap<T, kSize>::ForEach(ForEachCallback cb, void *arg) {
for (uptr n = 0; n < kSize; n++) {
Bucket *bucket = &table_[n];
ReadLock lock(&bucket->mtx);
for (uptr i = 0; i < kBucketSize; i++) {
Cell *c = &bucket->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
if (addr1 != 0)
cb(addr1, c->val, arg);
}
// Iterate over any additional cells.
if (AddBucket *add =
(AddBucket *)atomic_load(&bucket->add, memory_order_acquire)) {
for (uptr i = 0; i < add->size; i++) {
Cell *c = &add->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
if (addr1 != 0)
cb(addr1, c->val, arg);
}
}
}
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
map_ = map;
@ -163,7 +201,8 @@ AddrHashMap<T, kSize>::AddrHashMap() {
}
template <typename T, uptr kSize>
void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
void AddrHashMap<T, kSize>::acquire(Handle *h)
SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
uptr addr = h->addr_;
uptr hash = calcHash(addr);
Bucket *b = &table_[hash];
@ -292,7 +331,8 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
}
template <typename T, uptr kSize>
void AddrHashMap<T, kSize>::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
void AddrHashMap<T, kSize>::release(Handle *h)
SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
if (!h->cell_)
return;
Bucket *b = h->bucket_;

View File

@ -17,6 +17,7 @@
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
#include "sanitizer_platform.h"
namespace __sanitizer {
@ -24,66 +25,6 @@ namespace __sanitizer {
const char *PrimaryAllocatorName = "SizeClassAllocator";
const char *SecondaryAllocatorName = "LargeMmapAllocator";
// ThreadSanitizer for Go uses libc malloc/free.
#if defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
# if !SANITIZER_GO
extern "C" void *__libc_memalign(uptr alignment, uptr size);
# endif
extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
# else
# include <stdlib.h>
# define __libc_malloc malloc
# if !SANITIZER_GO
static void *__libc_memalign(uptr alignment, uptr size) {
void *p;
uptr error = posix_memalign(&p, alignment, size);
if (error) return nullptr;
return p;
}
# endif
# define __libc_realloc realloc
# define __libc_free free
# endif
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
uptr alignment) {
(void)cache;
#if !SANITIZER_GO
if (alignment == 0)
return __libc_malloc(size);
else
return __libc_memalign(alignment, size);
#else
// Windows does not provide __libc_memalign/posix_memalign. It provides
// __aligned_malloc, but the allocated blocks can't be passed to free,
// they need to be passed to __aligned_free. InternalAlloc interface does
// not account for such requirement. Alignemnt does not seem to be used
// anywhere in runtime, so just call __libc_malloc for now.
DCHECK_EQ(alignment, 0);
return __libc_malloc(size);
#endif
}
static void *RawInternalRealloc(void *ptr, uptr size,
InternalAllocatorCache *cache) {
(void)cache;
return __libc_realloc(ptr, size);
}
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
(void)cache;
__libc_free(ptr);
}
InternalAllocator *internal_allocator() {
return 0;
}
#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
static StaticSpinMutex internal_alloc_init_mu;
@ -135,8 +76,6 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
internal_allocator()->Deallocate(cache, ptr);
}
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
SetAllocatorOutOfMemory();
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
@ -187,6 +126,16 @@ void InternalFree(void *addr, InternalAllocatorCache *cache) {
RawInternalFree(addr, cache);
}
void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
internal_allocator_cache_mu.Lock();
internal_allocator()->ForceLock();
}
void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
internal_allocator()->ForceUnlock();
internal_allocator_cache_mu.Unlock();
}
// LowLevelAllocator
constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
@ -197,12 +146,10 @@ void *LowLevelAllocator::Allocate(uptr size) {
size = RoundUpTo(size, low_level_alloc_min_alignment);
if (allocated_end_ - allocated_current_ < (sptr)size) {
uptr size_to_allocate = RoundUpTo(size, GetPageSizeCached());
allocated_current_ =
(char*)MmapOrDie(size_to_allocate, __func__);
allocated_current_ = (char *)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
if (low_level_alloc_callback) {
low_level_alloc_callback((uptr)allocated_current_,
size_to_allocate);
low_level_alloc_callback((uptr)allocated_current_, size_to_allocate);
}
}
CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
@ -247,4 +194,14 @@ void PrintHintAllocatorCannotReturnNull() {
"allocator_may_return_null=1\n");
}
static atomic_uint8_t rss_limit_exceeded;
bool IsRssLimitExceeded() {
return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
}
void SetRssLimitExceeded(bool limit_exceeded) {
atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
}
} // namespace __sanitizer

View File

@ -14,6 +14,7 @@
#define SANITIZER_ALLOCATOR_H
#include "sanitizer_common.h"
#include "sanitizer_flat_map.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_lfstack.h"
#include "sanitizer_libc.h"
@ -43,12 +44,6 @@ void SetAllocatorOutOfMemory();
void PrintHintAllocatorCannotReturnNull();
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
void OnUnmap(uptr p, uptr size) const { }
};
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
@ -67,15 +62,24 @@ inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
*rand_state = state;
}
struct NoOpMapUnmapCallback {
void OnMap(uptr p, uptr size) const {}
void OnMapSecondary(uptr p, uptr size, uptr user_begin,
uptr user_size) const {}
void OnUnmap(uptr p, uptr size) const {}
};
#include "sanitizer_allocator_size_class_map.h"
#include "sanitizer_allocator_stats.h"
#include "sanitizer_allocator_primary64.h"
#include "sanitizer_allocator_bytemap.h"
#include "sanitizer_allocator_primary32.h"
#include "sanitizer_allocator_local_cache.h"
#include "sanitizer_allocator_secondary.h"
#include "sanitizer_allocator_combined.h"
bool IsRssLimitExceeded();
void SetRssLimitExceeded(bool limit_exceeded);
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H

View File

@ -1,107 +0,0 @@
//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Part of the Sanitizer Allocator.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_H
#error This file must be included inside sanitizer_allocator.h
#endif
// Maps integers in rage [0, kSize) to u8 values.
template <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>
class FlatByteMap {
public:
using AddressSpaceView = AddressSpaceViewTy;
void Init() {
internal_memset(map_, 0, sizeof(map_));
}
void set(uptr idx, u8 val) {
CHECK_LT(idx, kSize);
CHECK_EQ(0U, map_[idx]);
map_[idx] = val;
}
u8 operator[] (uptr idx) {
CHECK_LT(idx, kSize);
// FIXME: CHECK may be too expensive here.
return map_[idx];
}
private:
u8 map_[kSize];
};
// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
// It is implemented as a two-dimensional array: array of kSize1 pointers
// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
// Each value is initially zero and can be set to something else only once.
// Setting and getting values from multiple threads is safe w/o extra locking.
template <u64 kSize1, u64 kSize2,
typename AddressSpaceViewTy = LocalAddressSpaceView,
class MapUnmapCallback = NoOpMapUnmapCallback>
class TwoLevelByteMap {
public:
using AddressSpaceView = AddressSpaceViewTy;
void Init() {
internal_memset(map1_, 0, sizeof(map1_));
mu_.Init();
}
void TestOnlyUnmap() {
for (uptr i = 0; i < kSize1; i++) {
u8 *p = Get(i);
if (!p) continue;
MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
UnmapOrDie(p, kSize2);
}
}
uptr size() const { return kSize1 * kSize2; }
uptr size1() const { return kSize1; }
uptr size2() const { return kSize2; }
void set(uptr idx, u8 val) {
CHECK_LT(idx, kSize1 * kSize2);
u8 *map2 = GetOrCreate(idx / kSize2);
CHECK_EQ(0U, map2[idx % kSize2]);
map2[idx % kSize2] = val;
}
u8 operator[] (uptr idx) const {
CHECK_LT(idx, kSize1 * kSize2);
u8 *map2 = Get(idx / kSize2);
if (!map2) return 0;
auto value_ptr = AddressSpaceView::Load(&map2[idx % kSize2]);
return *value_ptr;
}
private:
u8 *Get(uptr idx) const {
CHECK_LT(idx, kSize1);
return reinterpret_cast<u8 *>(
atomic_load(&map1_[idx], memory_order_acquire));
}
u8 *GetOrCreate(uptr idx) {
u8 *res = Get(idx);
if (!res) {
SpinMutexLock l(&mu_);
if (!(res = Get(idx))) {
res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
memory_order_release);
}
}
return res;
}
atomic_uintptr_t map1_[kSize1];
StaticSpinMutex mu_;
};

View File

@ -29,9 +29,9 @@ class CombinedAllocator {
LargeMmapAllocatorPtrArray,
typename PrimaryAllocator::AddressSpaceView>;
void InitLinkerInitialized(s32 release_to_os_interval_ms) {
stats_.InitLinkerInitialized();
primary_.Init(release_to_os_interval_ms);
void InitLinkerInitialized(s32 release_to_os_interval_ms,
uptr heap_start = 0) {
primary_.Init(release_to_os_interval_ms, heap_start);
secondary_.InitLinkerInitialized();
}
@ -112,15 +112,13 @@ class CombinedAllocator {
return new_p;
}
bool PointerIsMine(void *p) {
bool PointerIsMine(const void *p) const {
if (primary_.PointerIsMine(p))
return true;
return secondary_.PointerIsMine(p);
}
bool FromPrimary(void *p) {
return primary_.PointerIsMine(p);
}
bool FromPrimary(const void *p) const { return primary_.PointerIsMine(p); }
void *GetMetaData(const void *p) {
if (primary_.PointerIsMine(p))
@ -136,7 +134,7 @@ class CombinedAllocator {
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
void *GetBlockBeginFastLocked(void *p) {
void *GetBlockBeginFastLocked(const void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetBlockBegin(p);
return secondary_.GetBlockBeginFastLocked(p);
@ -177,12 +175,12 @@ class CombinedAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
primary_.ForceLock();
secondary_.ForceLock();
}
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
secondary_.ForceUnlock();
primary_.ForceUnlock();
}

View File

@ -0,0 +1,79 @@
//===-- sanitizer_allocator_dlsym.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Hack: Sanitizer initializer calls dlsym which may need to allocate and call
// back into uninitialized sanitizer.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_DLSYM_H
#define SANITIZER_ALLOCATOR_DLSYM_H
#include "sanitizer_allocator_internal.h"
namespace __sanitizer {
template <typename Details>
struct DlSymAllocator {
static bool Use() {
// Fuchsia doesn't use dlsym-based interceptors.
return !SANITIZER_FUCHSIA && UNLIKELY(Details::UseImpl());
}
static bool PointerIsMine(const void *ptr) {
// Fuchsia doesn't use dlsym-based interceptors.
return !SANITIZER_FUCHSIA &&
UNLIKELY(internal_allocator()->FromPrimary(ptr));
}
static void *Allocate(uptr size_in_bytes) {
void *ptr = InternalAlloc(size_in_bytes, nullptr, kWordSize);
CHECK(internal_allocator()->FromPrimary(ptr));
Details::OnAllocate(ptr,
internal_allocator()->GetActuallyAllocatedSize(ptr));
return ptr;
}
static void *Callocate(SIZE_T nmemb, SIZE_T size) {
void *ptr = InternalCalloc(nmemb, size);
CHECK(internal_allocator()->FromPrimary(ptr));
Details::OnAllocate(ptr,
internal_allocator()->GetActuallyAllocatedSize(ptr));
return ptr;
}
static void Free(void *ptr) {
uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr);
Details::OnFree(ptr, size);
InternalFree(ptr);
}
static void *Realloc(void *ptr, uptr new_size) {
if (!ptr)
return Allocate(new_size);
CHECK(internal_allocator()->FromPrimary(ptr));
if (!new_size) {
Free(ptr);
return nullptr;
}
uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr);
uptr memcpy_size = Min(new_size, size);
void *new_ptr = Allocate(new_size);
if (new_ptr)
internal_memcpy(new_ptr, ptr, memcpy_size);
Free(ptr);
return new_ptr;
}
static void OnAllocate(const void *ptr, uptr size) {}
static void OnFree(const void *ptr, uptr size) {}
};
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_DLSYM_H

View File

@ -21,8 +21,12 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_estimated_allocated_size(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE const void *__sanitizer_get_allocated_begin(
const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr
__sanitizer_get_allocated_size(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr
__sanitizer_get_allocated_size_fast(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();

View File

@ -48,8 +48,9 @@ void *InternalReallocArray(void *p, uptr count, uptr size,
void *InternalCalloc(uptr count, uptr size,
InternalAllocatorCache *cache = nullptr);
void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
void InternalAllocatorLock();
void InternalAllocatorUnlock();
InternalAllocator *internal_allocator();
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_INTERNAL_H

View File

@ -189,7 +189,7 @@ class SizeClassAllocator32 {
sci->free_list.push_front(b);
}
bool PointerIsMine(const void *p) {
bool PointerIsMine(const void *p) const {
uptr mem = reinterpret_cast<uptr>(p);
if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
mem &= (kSpaceSize - 1);
@ -198,8 +198,9 @@ class SizeClassAllocator32 {
return GetSizeClass(p) != 0;
}
uptr GetSizeClass(const void *p) {
return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
uptr GetSizeClass(const void *p) const {
uptr id = ComputeRegionId(reinterpret_cast<uptr>(p));
return possible_regions.contains(id) ? possible_regions[id] : 0;
}
void *GetBlockBegin(const void *p) {
@ -237,13 +238,13 @@ class SizeClassAllocator32 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetSizeClassInfo(i)->mutex.Lock();
}
}
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (int i = kNumClasses - 1; i >= 0; i--) {
GetSizeClassInfo(i)->mutex.Unlock();
}
@ -251,9 +252,9 @@ class SizeClassAllocator32 {
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
void ForEachChunk(ForEachChunkCallback callback, void *arg) const {
for (uptr region = 0; region < kNumPossibleRegions; region++)
if (possible_regions[region]) {
if (possible_regions.contains(region) && possible_regions[region]) {
uptr chunk_size = ClassIdToSize(possible_regions[region]);
uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
uptr region_beg = region * kRegionSize;
@ -292,9 +293,7 @@ class SizeClassAllocator32 {
return res;
}
uptr ComputeRegionBeg(uptr mem) {
return mem & ~(kRegionSize - 1);
}
uptr ComputeRegionBeg(uptr mem) const { return mem & ~(kRegionSize - 1); }
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
DCHECK_LT(class_id, kNumClasses);
@ -305,7 +304,7 @@ class SizeClassAllocator32 {
MapUnmapCallback().OnMap(res, kRegionSize);
stat->Add(AllocatorStatMapped, kRegionSize);
CHECK(IsAligned(res, kRegionSize));
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
possible_regions[ComputeRegionId(res)] = class_id;
return res;
}
@ -354,7 +353,7 @@ class SizeClassAllocator32 {
DCHECK_GT(max_count, 0);
TransferBatch *b = nullptr;
constexpr uptr kShuffleArraySize = 48;
uptr shuffle_array[kShuffleArraySize];
UNINITIALIZED uptr shuffle_array[kShuffleArraySize];
uptr count = 0;
for (uptr i = region; i < region + n_chunks * size; i += size) {
shuffle_array[count++] = i;

View File

@ -161,7 +161,7 @@ class SizeClassAllocator64 {
void ForceReleaseToOS() {
MemoryMapperT memory_mapper(*this);
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
Lock l(&GetRegionInfo(class_id)->mutex);
MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
}
}
@ -178,7 +178,7 @@ class SizeClassAllocator64 {
uptr region_beg = GetRegionBeginBySizeClass(class_id);
CompactPtrT *free_array = GetFreeArray(region_beg);
BlockingMutexLock l(&region->mutex);
Lock l(&region->mutex);
uptr old_num_chunks = region->num_freed_chunks;
uptr new_num_freed_chunks = old_num_chunks + n_chunks;
// Failure to allocate free array space while releasing memory is non
@ -204,7 +204,7 @@ class SizeClassAllocator64 {
uptr region_beg = GetRegionBeginBySizeClass(class_id);
CompactPtrT *free_array = GetFreeArray(region_beg);
BlockingMutexLock l(&region->mutex);
Lock l(&region->mutex);
#if SANITIZER_WINDOWS
/* On Windows unmapping of memory during __sanitizer_purge_allocator is
explicit and immediate, so unmapped regions must be explicitly mapped back
@ -282,6 +282,8 @@ class SizeClassAllocator64 {
CHECK(kMetadataSize);
uptr class_id = GetSizeClass(p);
uptr size = ClassIdToSize(class_id);
if (!size)
return nullptr;
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
uptr region_beg = GetRegionBeginBySizeClass(class_id);
return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
@ -300,9 +302,8 @@ class SizeClassAllocator64 {
UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size());
}
static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
uptr stats_size) {
for (uptr class_id = 0; class_id < stats_size; class_id++)
static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats) {
for (uptr class_id = 0; class_id < kNumClasses; class_id++)
if (stats[class_id] == start)
stats[class_id] = rss;
}
@ -315,7 +316,7 @@ class SizeClassAllocator64 {
Printf(
"%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
"num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
"last released: %6zdK region: 0x%zx\n",
"last released: %6lldK region: 0x%zx\n",
region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
region->mapped_user >> 10, region->stats.n_allocated,
region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
@ -328,7 +329,7 @@ class SizeClassAllocator64 {
uptr rss_stats[kNumClasses];
for (uptr class_id = 0; class_id < kNumClasses; class_id++)
rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
GetMemoryProfile(FillMemoryProfile, rss_stats);
uptr total_mapped = 0;
uptr total_rss = 0;
@ -353,13 +354,13 @@ class SizeClassAllocator64 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetRegionInfo(i)->mutex.Lock();
}
}
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
GetRegionInfo(i)->mutex.Unlock();
}
@ -623,7 +624,7 @@ class SizeClassAllocator64 {
static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
// FreeArray is the array of free-d chunks (stored as 4-byte offsets).
// In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
// In the worst case it may require kRegionSize/SizeClassMap::kMinSize
// elements, but in reality this will not happen. For simplicity we
// dedicate 1/8 of the region's virtual space to FreeArray.
static const uptr kFreeArraySize = kRegionSize / 8;
@ -634,8 +635,8 @@ class SizeClassAllocator64 {
return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
}
uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
// kRegionSize must be >= 2^32.
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
// kRegionSize should be able to satisfy the largest size class.
static_assert(kRegionSize >= SizeClassMap::kMaxSize);
// kRegionSize must be <= 2^36, see CompactPtrT.
COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
// Call mmap for user memory with at least this size.
@ -665,7 +666,7 @@ class SizeClassAllocator64 {
};
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
BlockingMutex mutex;
Mutex mutex;
uptr num_freed_chunks; // Number of elements in the freearray.
uptr mapped_free_array; // Bytes mapped for freearray.
uptr allocated_user; // Bytes allocated for user memory.

View File

@ -128,8 +128,7 @@ void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("out-of-memory", stack);
Report("ERROR: %s: allocator is out of memory trying to allocate 0x%zx "
"bytes\n", SanitizerToolName, requested_size);
ERROR_OOM("allocator is trying to allocate 0x%zx bytes\n", requested_size);
}
Die();
}

View File

@ -82,7 +82,7 @@ class LargeMmapAllocator {
InitLinkerInitialized();
}
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
void *Allocate(AllocatorStats *stat, const uptr size, uptr alignment) {
CHECK(IsPowerOfTwo(alignment));
uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_)
@ -99,11 +99,11 @@ class LargeMmapAllocator {
if (!map_beg)
return nullptr;
CHECK(IsAligned(map_beg, page_size_));
MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
uptr res = map_beg + page_size_;
if (res & (alignment - 1)) // Align.
res += alignment - (res & (alignment - 1));
MapUnmapCallback().OnMapSecondary(map_beg, map_size, res, size);
CHECK(IsAligned(res, alignment));
CHECK(IsAligned(res, page_size_));
CHECK_GE(res + size, map_beg);
@ -161,7 +161,7 @@ class LargeMmapAllocator {
return res;
}
bool PointerIsMine(const void *p) {
bool PointerIsMine(const void *p) const {
return GetBlockBegin(p) != nullptr;
}
@ -179,7 +179,7 @@ class LargeMmapAllocator {
return GetHeader(p) + 1;
}
void *GetBlockBegin(const void *ptr) {
void *GetBlockBegin(const void *ptr) const {
uptr p = reinterpret_cast<uptr>(ptr);
SpinMutexLock l(&mutex_);
uptr nearest_chunk = 0;
@ -215,7 +215,7 @@ class LargeMmapAllocator {
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
void *GetBlockBeginFastLocked(void *ptr) {
void *GetBlockBeginFastLocked(const void *ptr) {
mutex_.CheckLocked();
uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_;
@ -267,9 +267,9 @@ class LargeMmapAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); }
void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }
void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); }
void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
@ -301,7 +301,7 @@ class LargeMmapAllocator {
return GetHeader(reinterpret_cast<uptr>(p));
}
void *GetUser(const Header *h) {
void *GetUser(const Header *h) const {
CHECK(IsAligned((uptr)h, page_size_));
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
}
@ -318,5 +318,5 @@ class LargeMmapAllocator {
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
StaticSpinMutex mutex_;
mutable StaticSpinMutex mutex_;
};

View File

@ -193,13 +193,13 @@ class SizeClassMap {
uptr cached = MaxCachedHint(s) * s;
if (i == kBatchClassID)
d = p = l = 0;
Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
"cached: %zd %zd; id %zd\n",
i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
Printf(
"c%02zu => s: %zu diff: +%zu %02zu%% l %zu cached: %zu %zu; id %zu\n",
i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
total_cached += cached;
prev_s = s;
}
Printf("Total cached: %zd\n", total_cached);
Printf("Total cached: %zu\n", total_cached);
}
static void Validate() {

View File

@ -25,19 +25,13 @@ typedef uptr AllocatorStatCounters[AllocatorStatCount];
// Per-thread stats, live in per-thread cache.
class AllocatorStats {
public:
void Init() {
internal_memset(this, 0, sizeof(*this));
}
void InitLinkerInitialized() {}
void Init() { internal_memset(this, 0, sizeof(*this)); }
void Add(AllocatorStat i, uptr v) {
v += atomic_load(&stats_[i], memory_order_relaxed);
atomic_store(&stats_[i], v, memory_order_relaxed);
atomic_fetch_add(&stats_[i], v, memory_order_relaxed);
}
void Sub(AllocatorStat i, uptr v) {
v = atomic_load(&stats_[i], memory_order_relaxed) - v;
atomic_store(&stats_[i], v, memory_order_relaxed);
atomic_fetch_sub(&stats_[i], v, memory_order_relaxed);
}
void Set(AllocatorStat i, uptr v) {
@ -58,17 +52,13 @@ class AllocatorStats {
// Global stats, used for aggregation and querying.
class AllocatorGlobalStats : public AllocatorStats {
public:
void InitLinkerInitialized() {
next_ = this;
prev_ = this;
}
void Init() {
internal_memset(this, 0, sizeof(*this));
InitLinkerInitialized();
}
void Register(AllocatorStats *s) {
SpinMutexLock l(&mu_);
LazyInit();
s->next_ = next_;
s->prev_ = this;
next_->prev_ = s;
@ -87,7 +77,7 @@ class AllocatorGlobalStats : public AllocatorStats {
internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
SpinMutexLock l(&mu_);
const AllocatorStats *stats = this;
for (;;) {
for (; stats;) {
for (int i = 0; i < AllocatorStatCount; i++)
s[i] += stats->Get(AllocatorStat(i));
stats = stats->next_;
@ -100,6 +90,13 @@ class AllocatorGlobalStats : public AllocatorStats {
}
private:
void LazyInit() {
if (!next_) {
next_ = this;
prev_ = this;
}
}
mutable StaticSpinMutex mu_;
};

View File

@ -0,0 +1,123 @@
//===-- sanitizer_array_ref.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ARRAY_REF_H
#define SANITIZER_ARRAY_REF_H
#include "sanitizer_internal_defs.h"
namespace __sanitizer {
/// ArrayRef - Represent a constant reference to an array (0 or more elements
/// consecutively in memory), i.e. a start pointer and a length. It allows
/// various APIs to take consecutive elements easily and conveniently.
///
/// This class does not own the underlying data, it is expected to be used in
/// situations where the data resides in some other buffer, whose lifetime
/// extends past that of the ArrayRef. For this reason, it is not in general
/// safe to store an ArrayRef.
///
/// This is intended to be trivially copyable, so it should be passed by
/// value.
template <typename T>
class ArrayRef {
public:
constexpr ArrayRef() {}
constexpr ArrayRef(const T *begin, const T *end) : begin_(begin), end_(end) {
DCHECK(empty() || begin);
}
constexpr ArrayRef(const T *data, uptr length)
: ArrayRef(data, data + length) {}
template <uptr N>
constexpr ArrayRef(const T (&src)[N]) : ArrayRef(src, src + N) {}
template <typename C>
constexpr ArrayRef(const C &src)
: ArrayRef(src.data(), src.data() + src.size()) {}
ArrayRef(const T &one_elt) : ArrayRef(&one_elt, &one_elt + 1) {}
const T *data() const { return empty() ? nullptr : begin_; }
const T *begin() const { return begin_; }
const T *end() const { return end_; }
bool empty() const { return begin_ == end_; }
uptr size() const { return end_ - begin_; }
/// equals - Check for element-wise equality.
bool equals(ArrayRef rhs) const {
if (size() != rhs.size())
return false;
auto r = rhs.begin();
for (auto &l : *this) {
if (!(l == *r))
return false;
++r;
}
return true;
}
/// slice(n, m) - Chop off the first N elements of the array, and keep M
/// elements in the array.
ArrayRef<T> slice(uptr N, uptr M) const {
DCHECK_LE(N + M, size());
return ArrayRef<T>(data() + N, M);
}
/// slice(n) - Chop off the first N elements of the array.
ArrayRef<T> slice(uptr N) const { return slice(N, size() - N); }
/// Drop the first \p N elements of the array.
ArrayRef<T> drop_front(uptr N = 1) const {
DCHECK_GE(size(), N);
return slice(N, size() - N);
}
/// Drop the last \p N elements of the array.
ArrayRef<T> drop_back(uptr N = 1) const {
DCHECK_GE(size(), N);
return slice(0, size() - N);
}
/// Return a copy of *this with only the first \p N elements.
ArrayRef<T> take_front(uptr N = 1) const {
if (N >= size())
return *this;
return drop_back(size() - N);
}
/// Return a copy of *this with only the last \p N elements.
ArrayRef<T> take_back(uptr N = 1) const {
if (N >= size())
return *this;
return drop_front(size() - N);
}
const T &operator[](uptr index) const {
DCHECK_LT(index, size());
return begin_[index];
}
private:
const T *begin_ = nullptr;
const T *end_ = nullptr;
};
template <typename T>
inline bool operator==(ArrayRef<T> lhs, ArrayRef<T> rhs) {
return lhs.equals(rhs);
}
template <typename T>
inline bool operator!=(ArrayRef<T> lhs, ArrayRef<T> rhs) {
return !(lhs == rhs);
}
} // namespace __sanitizer
#endif // SANITIZER_ARRAY_REF_H

View File

@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
// Various support for assemebler.
// Various support for assembler.
//
//===----------------------------------------------------------------------===//
@ -42,13 +42,57 @@
# define CFI_RESTORE(reg)
#endif
#if defined(__x86_64__) || defined(__i386__) || defined(__sparc__)
# define ASM_TAIL_CALL jmp
#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
defined(__powerpc__) || defined(__loongarch_lp64)
# define ASM_TAIL_CALL b
#elif defined(__s390__)
# define ASM_TAIL_CALL jg
#elif defined(__riscv)
# define ASM_TAIL_CALL tail
#endif
#if defined(__ELF__) && defined(__x86_64__) || defined(__i386__) || \
defined(__riscv)
# define ASM_PREEMPTIBLE_SYM(sym) sym@plt
#else
# define ASM_PREEMPTIBLE_SYM(sym) sym
#endif
#if !defined(__APPLE__)
# define ASM_HIDDEN(symbol) .hidden symbol
# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
# define ASM_SIZE(symbol) .size symbol, .-symbol
# define ASM_SYMBOL(symbol) symbol
# define ASM_SYMBOL_INTERCEPTOR(symbol) symbol
# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
# if defined(__i386__) || defined(__powerpc__) || defined(__s390__) || \
defined(__sparc__)
// For details, see interception.h
# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
# define ASM_TRAMPOLINE_ALIAS(symbol, name) \
.weak symbol; \
.set symbol, ASM_WRAPPER_NAME(name)
# define ASM_INTERCEPTOR_TRAMPOLINE(name)
# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 0
# else // Architecture supports interceptor trampoline
// Keep trampoline implementation in sync with interception/interception.h
# define ASM_WRAPPER_NAME(symbol) ___interceptor_##symbol
# define ASM_TRAMPOLINE_ALIAS(symbol, name) \
.weak symbol; \
.set symbol, __interceptor_trampoline_##name
# define ASM_INTERCEPTOR_TRAMPOLINE(name) \
.weak __interceptor_##name; \
.set __interceptor_##name, ASM_WRAPPER_NAME(name); \
.globl __interceptor_trampoline_##name; \
ASM_TYPE_FUNCTION(__interceptor_trampoline_##name); \
__interceptor_trampoline_##name: \
CFI_STARTPROC; \
ASM_TAIL_CALL ASM_PREEMPTIBLE_SYM(__interceptor_##name); \
CFI_ENDPROC; \
ASM_SIZE(__interceptor_trampoline_##name)
# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 1
# endif // Architecture supports interceptor trampoline
#else
# define ASM_HIDDEN(symbol)
# define ASM_TYPE_FUNCTION(symbol)
@ -61,8 +105,15 @@
#if defined(__ELF__) && (defined(__GNU__) || defined(__FreeBSD__) || \
defined(__Fuchsia__) || defined(__linux__))
// clang-format off
#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits // NOLINT
#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
// clang-format on
#else
#define NO_EXEC_STACK_DIRECTIVE
#endif
#if (defined(__x86_64__) || defined(__i386__)) && defined(__has_include) && __has_include(<cet.h>)
#include <cet.h>
#endif
#ifndef _CET_ENDBR
#define _CET_ENDBR
#endif

View File

@ -74,13 +74,12 @@ template <typename T>
inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
typedef typename T::Type Type;
Type cmpv = *cmp;
Type prev;
prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
if (prev == cmpv) return true;
*cmp = prev;
return false;
// Transitioned from __sync_val_compare_and_swap to support targets like
// SPARC V8 that cannot inline atomic cmpxchg. __atomic_compare_exchange
// can then be resolved from libatomic. __ATOMIC_SEQ_CST is used to best
// match the __sync builtin memory order.
return __atomic_compare_exchange(&a->val_dont_use, cmp, &xchg, false,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
}
template<typename T>
@ -96,8 +95,8 @@ inline bool atomic_compare_exchange_weak(volatile T *a,
// This include provides explicit template instantiations for atomic_uint64_t
// on MIPS32, which does not directly support 8 byte atomics. It has to
// proceed the template definitions above.
#if defined(_MIPS_SIM) && defined(_ABIO32)
#include "sanitizer_atomic_clang_mips.h"
#if defined(_MIPS_SIM) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
# include "sanitizer_atomic_clang_mips.h"
#endif
#undef ATOMIC_ORDER

View File

@ -18,7 +18,7 @@ namespace __sanitizer {
// MIPS32 does not support atomics > 4 bytes. To address this lack of
// functionality, the sanitizer library provides helper methods which use an
// internal spin lock mechanism to emulate atomic oprations when the size is
// internal spin lock mechanism to emulate atomic operations when the size is
// 8 bytes.
static void __spin_lock(volatile int *lock) {
while (__sync_lock_test_and_set(lock, 1))

View File

@ -0,0 +1,148 @@
//===-- sanitizer_chained_origin_depot.cpp --------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A storage for chained origins.
//===----------------------------------------------------------------------===//
#include "sanitizer_chained_origin_depot.h"
#include "sanitizer_stackdepotbase.h"
namespace __sanitizer {
namespace {
struct ChainedOriginDepotDesc {
u32 here_id;
u32 prev_id;
};
struct ChainedOriginDepotNode {
using hash_type = u32;
u32 link;
u32 here_id;
u32 prev_id;
typedef ChainedOriginDepotDesc args_type;
bool eq(hash_type hash, const args_type &args) const;
static uptr allocated() { return 0; }
static hash_type hash(const args_type &args);
static bool is_valid(const args_type &args);
void store(u32 id, const args_type &args, hash_type other_hash);
args_type load(u32 id) const;
struct Handle {
const ChainedOriginDepotNode *node_ = nullptr;
u32 id_ = 0;
Handle(const ChainedOriginDepotNode *node, u32 id) : node_(node), id_(id) {}
bool valid() const { return node_; }
u32 id() const { return id_; }
int here_id() const { return node_->here_id; }
int prev_id() const { return node_->prev_id; }
};
static Handle get_handle(u32 id);
typedef Handle handle_type;
};
} // namespace
static StackDepotBase<ChainedOriginDepotNode, 4, 20> depot;
bool ChainedOriginDepotNode::eq(hash_type hash, const args_type &args) const {
return here_id == args.here_id && prev_id == args.prev_id;
}
/* This is murmur2 hash for the 64->32 bit case.
It does not behave all that well because the keys have a very biased
distribution (I've seen 7-element buckets with the table only 14% full).
here_id is built of
* (1 bits) Reserved, zero.
* (8 bits) Part id = bits 13..20 of the hash value of here_id's key.
* (23 bits) Sequential number (each part has each own sequence).
prev_id has either the same distribution as here_id (but with 3:8:21)
split, or one of two reserved values (-1) or (-2). Either case can
dominate depending on the workload.
*/
ChainedOriginDepotNode::hash_type ChainedOriginDepotNode::hash(
const args_type &args) {
const u32 m = 0x5bd1e995;
const u32 seed = 0x9747b28c;
const u32 r = 24;
u32 h = seed;
u32 k = args.here_id;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
k = args.prev_id;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
bool ChainedOriginDepotNode::is_valid(const args_type &args) { return true; }
void ChainedOriginDepotNode::store(u32 id, const args_type &args,
hash_type other_hash) {
here_id = args.here_id;
prev_id = args.prev_id;
}
ChainedOriginDepotNode::args_type ChainedOriginDepotNode::load(u32 id) const {
args_type ret = {here_id, prev_id};
return ret;
}
ChainedOriginDepotNode::Handle ChainedOriginDepotNode::get_handle(u32 id) {
return Handle(&depot.nodes[id], id);
}
ChainedOriginDepot::ChainedOriginDepot() {}
StackDepotStats ChainedOriginDepot::GetStats() const {
return depot.GetStats();
}
bool ChainedOriginDepot::Put(u32 here_id, u32 prev_id, u32 *new_id) {
ChainedOriginDepotDesc desc = {here_id, prev_id};
bool inserted;
*new_id = depot.Put(desc, &inserted);
return inserted;
}
u32 ChainedOriginDepot::Get(u32 id, u32 *other) {
ChainedOriginDepotDesc desc = depot.Get(id);
*other = desc.prev_id;
return desc.here_id;
}
void ChainedOriginDepot::LockAll() { depot.LockAll(); }
void ChainedOriginDepot::UnlockAll() { depot.UnlockAll(); }
void ChainedOriginDepot::TestOnlyUnmap() { depot.TestOnlyUnmap(); }
} // namespace __sanitizer

View File

@ -0,0 +1,46 @@
//===-- sanitizer_chained_origin_depot.h ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A storage for chained origins.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_CHAINED_ORIGIN_DEPOT_H
#define SANITIZER_CHAINED_ORIGIN_DEPOT_H
#include "sanitizer_common.h"
namespace __sanitizer {
class ChainedOriginDepot {
public:
ChainedOriginDepot();
// Gets the statistic of the origin chain storage.
StackDepotStats GetStats() const;
// Stores a chain with StackDepot ID here_id and previous chain ID prev_id.
// If successful, returns true and the new chain id new_id.
// If the same element already exists, returns false and sets new_id to the
// existing ID.
bool Put(u32 here_id, u32 prev_id, u32 *new_id);
// Retrieves the stored StackDepot ID for the given origin ID.
u32 Get(u32 id, u32 *other);
void LockAll();
void UnlockAll();
void TestOnlyUnmap();
private:
ChainedOriginDepot(const ChainedOriginDepot &) = delete;
void operator=(const ChainedOriginDepot &) = delete;
};
} // namespace __sanitizer
#endif // SANITIZER_CHAINED_ORIGIN_DEPOT_H

View File

@ -11,10 +11,12 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
#include "sanitizer_allocator_interface.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_flags.h"
#include "sanitizer_interface_internal.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
@ -44,15 +46,41 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
Die();
}
recursion_count++;
Report("ERROR: %s failed to "
"%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
SanitizerToolName, mmap_type, size, size, mem_type, err);
if (ErrorIsOOM(err)) {
ERROR_OOM("failed to %s 0x%zx (%zd) bytes of %s (error code: %d)\n",
mmap_type, size, size, mem_type, err);
} else {
Report(
"ERROR: %s failed to "
"%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
SanitizerToolName, mmap_type, size, size, mem_type, err);
}
#if !SANITIZER_GO
DumpProcessMap();
#endif
UNREACHABLE("unable to mmap");
}
void NORETURN ReportMunmapFailureAndDie(void *addr, uptr size, error_t err,
bool raw_report) {
static int recursion_count;
if (raw_report || recursion_count) {
// If raw report is requested or we went into recursion just die. The
// Report() and CHECK calls below may call munmap recursively and fail.
RawWrite("ERROR: Failed to munmap\n");
Die();
}
recursion_count++;
Report(
"ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p (error "
"code: %d)\n",
SanitizerToolName, size, size, addr, err);
#if !SANITIZER_GO
DumpProcessMap();
#endif
UNREACHABLE("unable to unmmap");
}
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
@ -138,13 +166,21 @@ void LoadedModule::set(const char *module_name, uptr base_address,
set(module_name, base_address);
arch_ = arch;
internal_memcpy(uuid_, uuid, sizeof(uuid_));
uuid_size_ = kModuleUUIDSize;
instrumented_ = instrumented;
}
void LoadedModule::setUuid(const char *uuid, uptr size) {
if (size > kModuleUUIDSize)
size = kModuleUUIDSize;
internal_memcpy(uuid_, uuid, size);
uuid_size_ = size;
}
void LoadedModule::clear() {
InternalFree(full_name_);
base_address_ = 0;
max_executable_address_ = 0;
max_address_ = 0;
full_name_ = nullptr;
arch_ = kModuleArchUnknown;
internal_memset(uuid_, 0, kModuleUUIDSize);
@ -162,8 +198,7 @@ void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable,
AddressRange *r =
new(mem) AddressRange(beg, end, executable, writable, name);
ranges_.push_back(r);
if (executable && end > max_executable_address_)
max_executable_address_ = end;
max_address_ = Max(max_address_, end);
}
bool LoadedModule::containsAddress(uptr address) const {
@ -301,18 +336,22 @@ struct MallocFreeHook {
static MallocFreeHook MFHooks[kMaxMallocFreeHooks];
void RunMallocHooks(const void *ptr, uptr size) {
void RunMallocHooks(void *ptr, uptr size) {
__sanitizer_malloc_hook(ptr, size);
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
auto hook = MFHooks[i].malloc_hook;
if (!hook) return;
if (!hook)
break;
hook(ptr, size);
}
}
void RunFreeHooks(const void *ptr) {
void RunFreeHooks(void *ptr) {
__sanitizer_free_hook(ptr);
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
auto hook = MFHooks[i].free_hook;
if (!hook) return;
if (!hook)
break;
hook(ptr);
}
}
@ -338,6 +377,13 @@ void SleepForSeconds(unsigned seconds) {
}
void SleepForMillis(unsigned millis) { internal_usleep((u64)millis * 1000); }
void WaitForDebugger(unsigned seconds, const char *label) {
if (seconds) {
Report("Sleeping for %u second(s) %s\n", seconds, label);
SleepForSeconds(seconds);
}
}
} // namespace __sanitizer
using namespace __sanitizer;
@ -360,4 +406,16 @@ int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
void (*free_hook)(const void *)) {
return InstallMallocFreeHooks(malloc_hook, free_hook);
}
// Provide default (no-op) implementation of malloc hooks.
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, void *ptr,
uptr size) {
(void)ptr;
(void)size;
}
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
(void)ptr;
}
} // extern "C"

View File

@ -16,7 +16,6 @@
#define SANITIZER_COMMON_H
#include "sanitizer_flags.h"
#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_list.h"
@ -118,9 +117,15 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
// unaccessible memory.
bool MprotectNoAccess(uptr addr, uptr size);
bool MprotectReadOnly(uptr addr, uptr size);
bool MprotectReadWrite(uptr addr, uptr size);
void MprotectMallocZones(void *addr, int prot);
#if SANITIZER_WINDOWS
// Zero previously mmap'd memory. Currently used only on Windows.
bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
#endif
#if SANITIZER_LINUX
// Unmap memory. Currently only used on Linux.
void UnmapFromTo(uptr from, uptr to);
@ -171,8 +176,8 @@ void SetShadowRegionHugePageMode(uptr addr, uptr length);
bool DontDumpShadowMemory(uptr addr, uptr length);
// Check if the built VMA size matches the runtime one.
void CheckVMASize();
void RunMallocHooks(const void *ptr, uptr size);
void RunFreeHooks(const void *ptr);
void RunMallocHooks(void *ptr, uptr size);
void RunFreeHooks(void *ptr);
class ReservedAddressRange {
public:
@ -192,12 +197,13 @@ class ReservedAddressRange {
};
typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
/*out*/uptr *stats, uptr stats_size);
/*out*/ uptr *stats);
// Parse the contents of /proc/self/smaps and generate a memory profile.
// |cb| is a tool-specific callback that fills the |stats| array containing
// |stats_size| elements.
void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
// |cb| is a tool-specific callback that fills the |stats| array.
void GetMemoryProfile(fill_profile_f cb, uptr *stats);
void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
uptr smaps_len);
// Simple low-level (mmap-based) allocator for internal use. Doesn't have
// constructor, so all instances of LowLevelAllocator should be
@ -206,6 +212,7 @@ class LowLevelAllocator {
public:
// Requires an external lock.
void *Allocate(uptr size);
private:
char *allocated_end_;
char *allocated_current_;
@ -222,8 +229,8 @@ void CatastrophicErrorWrite(const char *buffer, uptr length);
void RawWrite(const char *buffer);
bool ColorizeReports();
void RemoveANSIEscapeSequencesFromString(char *buffer);
void Printf(const char *format, ...);
void Report(const char *format, ...);
void Printf(const char *format, ...) FORMAT(1, 2);
void Report(const char *format, ...) FORMAT(1, 2);
void SetPrintfAndReportCallback(void (*callback)(const char *));
#define VReport(level, ...) \
do { \
@ -237,12 +244,12 @@ void SetPrintfAndReportCallback(void (*callback)(const char *));
// Lock sanitizer error reporting and protects against nested errors.
class ScopedErrorReportLock {
public:
ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
static void Lock() ACQUIRE(mutex_);
static void Unlock() RELEASE(mutex_);
static void CheckLocked() CHECK_LOCKED(mutex_);
static void Lock() SANITIZER_ACQUIRE(mutex_);
static void Unlock() SANITIZER_RELEASE(mutex_);
static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
private:
static atomic_uintptr_t reporting_thread_;
@ -285,7 +292,7 @@ void SetStackSizeLimitInBytes(uptr limit);
bool AddressSpaceIsUnlimited();
void SetAddressSpaceUnlimited();
void AdjustStackSize(void *attr);
void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
void PlatformPrepareForSandboxing(void *args);
void SetSandboxingCallback(void (*f)());
void InitializeCoverage(bool enabled, const char *coverage_dir);
@ -294,6 +301,7 @@ void InitTlsSize();
uptr GetTlsSize();
// Other
void WaitForDebugger(unsigned seconds, const char *label);
void SleepForSeconds(unsigned seconds);
void SleepForMillis(unsigned millis);
u64 NanoTime();
@ -309,6 +317,20 @@ CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
const char *mmap_type, error_t err,
bool raw_report = false);
void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
bool raw_report = false);
// Returns true if the platform-specific error reported is an OOM error.
bool ErrorIsOOM(error_t err);
// This reports an error in the form:
//
// `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}`
//
// Downstream tools that read sanitizer output will know that errors starting
// in this format are specifically OOM errors.
#define ERROR_OOM(err_msg, ...) \
Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__)
// Specific tools may override behavior of "Die" function to do tool-specific
// job.
@ -325,12 +347,6 @@ void SetUserDieCallback(DieCallbackType callback);
void SetCheckUnwindCallback(void (*callback)());
// Callback will be called if soft_rss_limit_mb is given and the limit is
// exceeded (exceeded==true) or if rss went down below the limit
// (exceeded==false).
// The callback should be registered once at the tool init time.
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
// Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *);
HandleSignalMode GetHandleSignalMode(int signum);
@ -371,7 +387,7 @@ void ReportErrorSummary(const char *error_type, const AddressInfo &info,
void ReportErrorSummary(const char *error_type, const StackTrace *trace,
const char *alt_tool_name = nullptr);
void ReportMmapWriteExec(int prot);
void ReportMmapWriteExec(int prot, int mflags);
// Math
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
@ -419,9 +435,7 @@ inline uptr LeastSignificantSetBitIndex(uptr x) {
return up;
}
inline bool IsPowerOfTwo(uptr x) {
return (x & (x - 1)) == 0;
}
inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
inline uptr RoundUpToPowerOfTwo(uptr size) {
CHECK(size);
@ -433,16 +447,16 @@ inline uptr RoundUpToPowerOfTwo(uptr size) {
return 1ULL << (up + 1);
}
inline uptr RoundUpTo(uptr size, uptr boundary) {
inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
RAW_CHECK(IsPowerOfTwo(boundary));
return (size + boundary - 1) & ~(boundary - 1);
}
inline uptr RoundDownTo(uptr x, uptr boundary) {
inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
return x & ~(boundary - 1);
}
inline bool IsAligned(uptr a, uptr alignment) {
inline constexpr bool IsAligned(uptr a, uptr alignment) {
return (a & (alignment - 1)) == 0;
}
@ -461,6 +475,10 @@ template <class T>
constexpr T Max(T a, T b) {
return a > b ? a : b;
}
template <class T>
constexpr T Abs(T a) {
return a < 0 ? -a : a;
}
template<class T> void Swap(T& a, T& b) {
T tmp = a;
a = b;
@ -502,8 +520,8 @@ class InternalMmapVectorNoCtor {
return data_[i];
}
void push_back(const T &element) {
CHECK_LE(size_, capacity());
if (size_ == capacity()) {
if (UNLIKELY(size_ >= capacity())) {
CHECK_EQ(size_, capacity());
uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
Realloc(new_capacity);
}
@ -563,7 +581,7 @@ class InternalMmapVectorNoCtor {
}
private:
void Realloc(uptr new_capacity) {
NOINLINE void Realloc(uptr new_capacity) {
CHECK_GT(new_capacity, 0);
CHECK_LE(size_, new_capacity);
uptr new_capacity_bytes =
@ -618,7 +636,7 @@ class InternalScopedString {
buffer_.resize(1);
buffer_[0] = '\0';
}
void append(const char *format, ...);
void append(const char *format, ...) FORMAT(2, 3);
const char *data() const { return buffer_.data(); }
char *data() { return buffer_.data(); }
@ -670,11 +688,9 @@ void Sort(T *v, uptr size, Compare comp = {}) {
// Works like std::lower_bound: finds the first element that is not less
// than the val.
template <class Container,
template <class Container, class T,
class Compare = CompareLess<typename Container::value_type>>
uptr InternalLowerBound(const Container &v,
const typename Container::value_type &val,
Compare comp = {}) {
uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
uptr first = 0;
uptr last = v.size();
while (last > first) {
@ -697,7 +713,9 @@ enum ModuleArch {
kModuleArchARMV7S,
kModuleArchARMV7K,
kModuleArchARM64,
kModuleArchRISCV64
kModuleArchLoongArch64,
kModuleArchRISCV64,
kModuleArchHexagon
};
// Sorts and removes duplicates from the container.
@ -721,12 +739,15 @@ void SortAndDedup(Container &v, Compare comp = {}) {
v.resize(last + 1);
}
constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// The resulting buffer is mmaped and stored in '*buff'.
// Returns true if file was successfully opened and read.
bool ReadFileToVector(const char *file_name,
InternalMmapVectorNoCtor<char> *buff,
uptr max_len = 1 << 26, error_t *errno_p = nullptr);
uptr max_len = kDefaultFileMaxSize,
error_t *errno_p = nullptr);
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// This function is less I/O efficient than ReadFileToVector as it may reread
@ -737,9 +758,12 @@ bool ReadFileToVector(const char *file_name,
// The total number of read bytes is stored in '*read_len'.
// Returns true if file was successfully opened and read.
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uptr *read_len, uptr max_len = 1 << 26,
uptr *read_len, uptr max_len = kDefaultFileMaxSize,
error_t *errno_p = nullptr);
int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
uptr *pc_offset);
// When adding a new architecture, don't forget to also update
// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
inline const char *ModuleArchToString(ModuleArch arch) {
@ -762,14 +786,22 @@ inline const char *ModuleArchToString(ModuleArch arch) {
return "armv7k";
case kModuleArchARM64:
return "arm64";
case kModuleArchLoongArch64:
return "loongarch64";
case kModuleArchRISCV64:
return "riscv64";
case kModuleArchHexagon:
return "hexagon";
}
CHECK(0 && "Invalid module arch");
return "";
}
#if SANITIZER_APPLE
const uptr kModuleUUIDSize = 16;
#else
const uptr kModuleUUIDSize = 32;
#endif
const uptr kMaxSegName = 16;
// Represents a binary loaded into virtual memory (e.g. this can be an
@ -779,8 +811,9 @@ class LoadedModule {
LoadedModule()
: full_name_(nullptr),
base_address_(0),
max_executable_address_(0),
max_address_(0),
arch_(kModuleArchUnknown),
uuid_size_(0),
instrumented_(false) {
internal_memset(uuid_, 0, kModuleUUIDSize);
ranges_.clear();
@ -788,6 +821,7 @@ class LoadedModule {
void set(const char *module_name, uptr base_address);
void set(const char *module_name, uptr base_address, ModuleArch arch,
u8 uuid[kModuleUUIDSize], bool instrumented);
void setUuid(const char *uuid, uptr size);
void clear();
void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
const char *name = nullptr);
@ -795,9 +829,10 @@ class LoadedModule {
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
uptr max_executable_address() const { return max_executable_address_; }
uptr max_address() const { return max_address_; }
ModuleArch arch() const { return arch_; }
const u8 *uuid() const { return uuid_; }
uptr uuid_size() const { return uuid_size_; }
bool instrumented() const { return instrumented_; }
struct AddressRange {
@ -824,8 +859,9 @@ class LoadedModule {
private:
char *full_name_; // Owned.
uptr base_address_;
uptr max_executable_address_;
uptr max_address_;
ModuleArch arch_;
uptr uuid_size_;
u8 uuid_[kModuleUUIDSize];
bool instrumented_;
IntrusiveList<AddressRange> ranges_;
@ -883,13 +919,13 @@ void WriteToSyslog(const char *buffer);
#define SANITIZER_WIN_TRACE 0
#endif
#if SANITIZER_MAC || SANITIZER_WIN_TRACE
#if SANITIZER_APPLE || SANITIZER_WIN_TRACE
void LogFullErrorReport(const char *buffer);
#else
inline void LogFullErrorReport(const char *buffer) {}
#endif
#if SANITIZER_LINUX || SANITIZER_MAC
#if SANITIZER_LINUX || SANITIZER_APPLE
void WriteOneLineToSyslog(const char *s);
void LogMessageOnPrintf(const char *str);
#else
@ -951,7 +987,7 @@ struct SignalContext {
uptr sp;
uptr bp;
bool is_memory_access;
enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
enum WriteFlag { Unknown, Read, Write } write_flag;
// In some cases the kernel cannot provide the true faulting address; `addr`
// will be zero then. This field allows to distinguish between these cases
@ -996,7 +1032,6 @@ struct SignalContext {
};
void InitializePlatformEarly();
void MaybeReexec();
template <typename Fn>
class RunOnDestruction {
@ -1049,31 +1084,10 @@ inline u32 GetNumberOfCPUsCached() {
return NumberOfCPUsCached;
}
template <typename T>
class ArrayRef {
public:
ArrayRef() {}
ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
T *begin() { return begin_; }
T *end() { return end_; }
private:
T *begin_ = nullptr;
T *end_ = nullptr;
};
#define PRINTF_128(v) \
(*((u8 *)&v + 0)), (*((u8 *)&v + 1)), (*((u8 *)&v + 2)), (*((u8 *)&v + 3)), \
(*((u8 *)&v + 4)), (*((u8 *)&v + 5)), (*((u8 *)&v + 6)), \
(*((u8 *)&v + 7)), (*((u8 *)&v + 8)), (*((u8 *)&v + 9)), \
(*((u8 *)&v + 10)), (*((u8 *)&v + 11)), (*((u8 *)&v + 12)), \
(*((u8 *)&v + 13)), (*((u8 *)&v + 14)), (*((u8 *)&v + 15))
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
__sanitizer::LowLevelAllocator &alloc) { // NOLINT
__sanitizer::LowLevelAllocator &alloc) {
return alloc.Allocate(size);
}

File diff suppressed because it is too large Load Diff

View File

@ -324,8 +324,8 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
continue;
int size = scanf_get_value_size(&dir);
if (size == FSS_INVALID) {
Report("%s: WARNING: unexpected format specifier in scanf interceptor: ",
SanitizerToolName, "%.*s\n", dir.end - dir.begin, dir.begin);
Report("%s: WARNING: unexpected format specifier in scanf interceptor: %.*s\n",
SanitizerToolName, static_cast<int>(dir.end - dir.begin), dir.begin);
break;
}
void *argp = va_arg(aq, void *);
@ -340,11 +340,19 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
size = 0;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
// For %ms/%mc, write the allocated output buffer as well.
// For %mc/%mC/%ms/%m[/%mS, write the allocated output buffer as well.
if (dir.allocate) {
char *buf = *(char **)argp;
if (buf)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
if (char *buf = *(char **)argp) {
if (dir.convSpecifier == 'c')
size = 1;
else if (dir.convSpecifier == 'C')
size = sizeof(wchar_t);
else if (dir.convSpecifier == 'S')
size = (internal_wcslen((wchar_t *)buf) + 1) * sizeof(wchar_t);
else // 's' or '['
size = internal_strlen(buf) + 1;
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
}
}
}
}
@ -469,7 +477,7 @@ static int printf_get_value_size(PrintfDirective *dir) {
break; \
default: \
Report("WARNING: unexpected floating-point arg size" \
" in printf interceptor: %d\n", size); \
" in printf interceptor: %zu\n", static_cast<uptr>(size)); \
return; \
} \
} else { \
@ -484,7 +492,7 @@ static int printf_get_value_size(PrintfDirective *dir) {
break; \
default: \
Report("WARNING: unexpected arg size" \
" in printf interceptor: %d\n", size); \
" in printf interceptor: %zu\n", static_cast<uptr>(size)); \
return; \
} \
} \
@ -530,7 +538,7 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
Report(
"%s: WARNING: unexpected format specifier in printf "
"interceptor: %.*s (reported once per process)\n",
SanitizerToolName, dir.end - dir.begin, dir.begin);
SanitizerToolName, static_cast<int>(dir.end - dir.begin), dir.begin);
break;
}
if (dir.convSpecifier == 'n') {

View File

@ -115,11 +115,19 @@ static void ioctl_table_fill() {
// _(SOUND_MIXER_WRITE_MUTE, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE
_(BLKFLSBUF, NONE, 0);
_(BLKGETSIZE, WRITE, sizeof(uptr));
_(BLKRAGET, WRITE, sizeof(int));
_(BLKRAGET, WRITE, sizeof(uptr));
_(BLKRASET, NONE, 0);
_(BLKROGET, WRITE, sizeof(int));
_(BLKROSET, READ, sizeof(int));
_(BLKRRPART, NONE, 0);
_(BLKFRASET, NONE, 0);
_(BLKFRAGET, WRITE, sizeof(uptr));
_(BLKSECTSET, READ, sizeof(short));
_(BLKSECTGET, WRITE, sizeof(short));
_(BLKSSZGET, WRITE, sizeof(int));
_(BLKBSZGET, WRITE, sizeof(int));
_(BLKBSZSET, READ, sizeof(uptr));
_(BLKGETSIZE64, WRITE, sizeof(u64));
_(CDROMEJECT, NONE, 0);
_(CDROMEJECT_SW, NONE, 0);
_(CDROMMULTISESSION, WRITE, struct_cdrom_multisession_sz);

View File

@ -0,0 +1,244 @@
//===-- sanitizer_common_interceptors_memintrinsics.inc ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Memintrinsic function interceptors for tools like AddressSanitizer,
// ThreadSanitizer, MemorySanitizer, etc.
//
// These interceptors are part of the common interceptors, but separated out so
// that implementations may add them, if necessary, to a separate source file
// that should define SANITIZER_COMMON_NO_REDEFINE_BUILTINS at the top.
//
// This file should be included into the tool's memintrinsic interceptor file,
// which has to define its own macros:
// COMMON_INTERCEPTOR_ENTER
// COMMON_INTERCEPTOR_READ_RANGE
// COMMON_INTERCEPTOR_WRITE_RANGE
// COMMON_INTERCEPTOR_MEMSET_IMPL
// COMMON_INTERCEPTOR_MEMMOVE_IMPL
// COMMON_INTERCEPTOR_MEMCPY_IMPL
// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
//===----------------------------------------------------------------------===//
#ifdef SANITIZER_REDEFINE_BUILTINS_H
#error "Define SANITIZER_COMMON_NO_REDEFINE_BUILTINS in .cpp file"
#endif
#include "interception/interception.h"
#include "sanitizer_platform_interceptors.h"
// Platform-specific options.
#if SANITIZER_APPLE
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
#elif SANITIZER_WINDOWS64
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
#else
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
#endif // SANITIZER_APPLE
#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
{ \
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
return internal_memset(dst, v, size); \
COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
if (common_flags()->intercept_intrin) \
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
return REAL(memset)(dst, v, size); \
}
#endif
#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
{ \
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
return internal_memmove(dst, src, size); \
COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
if (common_flags()->intercept_intrin) { \
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
} \
return REAL(memmove)(dst, src, size); \
}
#endif
#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
{ \
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
return internal_memmove(dst, src, size); \
} \
COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
if (common_flags()->intercept_intrin) { \
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
} \
return REAL(memcpy)(dst, src, size); \
}
#endif
#if SANITIZER_INTERCEPT_MEMSET
INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
}
#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
#else
#define INIT_MEMSET
#endif
#if SANITIZER_INTERCEPT_MEMMOVE
INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
}
#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
#else
#define INIT_MEMMOVE
#endif
#if SANITIZER_INTERCEPT_MEMCPY
INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
// On OS X, calling internal_memcpy here will cause memory corruptions,
// because memcpy and memmove are actually aliases of the same
// implementation. We need to use internal_memmove here.
// N.B.: If we switch this to internal_ we'll have to use internal_memmove
// due to memcpy being an alias of memmove on OS X.
void *ctx;
#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
#else
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
#endif
}
#define INIT_MEMCPY \
do { \
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
COMMON_INTERCEPT_FUNCTION(memcpy); \
} else { \
ASSIGN_REAL(memcpy, memmove); \
} \
CHECK(REAL(memcpy)); \
} while (false)
#else
#define INIT_MEMCPY
#endif
#if SANITIZER_INTERCEPT_AEABI_MEM
INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
// Note the argument order.
INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
void *ctx;
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
void *ctx;
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
void *ctx;
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
#define INIT_AEABI_MEM \
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
#else
#define INIT_AEABI_MEM
#endif // SANITIZER_INTERCEPT_AEABI_MEM
#if SANITIZER_INTERCEPT___BZERO
INTERCEPTOR(void *, __bzero, void *block, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
#else
#define INIT___BZERO
#endif // SANITIZER_INTERCEPT___BZERO
#if SANITIZER_INTERCEPT_BZERO
INTERCEPTOR(void *, bzero, void *block, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);
#else
#define INIT_BZERO
#endif // SANITIZER_INTERCEPT_BZERO
namespace __sanitizer {
// This does not need to be called if InitializeCommonInterceptors() is called.
void InitializeMemintrinsicInterceptors() {
INIT_MEMSET;
INIT_MEMMOVE;
INIT_MEMCPY;
INIT_AEABI_MEM;
INIT___BZERO;
INIT_BZERO;
}
} // namespace __sanitizer

View File

@ -33,7 +33,7 @@
INTERCEPTOR(int, statvfs, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@ -99,7 +99,7 @@ INTERCEPTOR(int, getvfsstat, void *buf, SIZE_T bufsize, int flags) {
INTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
int res = REAL(statvfs1)(path, buf, flags);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
return res;

View File

@ -9,12 +9,16 @@
//===----------------------------------------------------------------------===//
INTERFACE_FUNCTION(__sanitizer_acquire_crash_state)
INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_annotate_double_ended_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
INTERFACE_FUNCTION(
__sanitizer_double_ended_contiguous_container_find_bad_address)
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
INTERFACE_FUNCTION(__sanitizer_set_report_path)
INTERFACE_FUNCTION(__sanitizer_set_report_fd)
INTERFACE_FUNCTION(__sanitizer_get_report_path)
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_verify_double_ended_contiguous_container)
INTERFACE_WEAK_FUNCTION(__sanitizer_on_print)
INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)
INTERFACE_WEAK_FUNCTION(__sanitizer_sandbox_on_notify)
@ -28,7 +32,9 @@ INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
// Allocator interface.
INTERFACE_FUNCTION(__sanitizer_get_allocated_begin)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size_fast)
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
@ -40,3 +46,7 @@ INTERFACE_FUNCTION(__sanitizer_purge_allocator)
INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)
// Memintrinsic functions.
INTERFACE_FUNCTION(__sanitizer_internal_memcpy)
INTERFACE_FUNCTION(__sanitizer_internal_memmove)
INTERFACE_FUNCTION(__sanitizer_internal_memset)

View File

@ -11,3 +11,5 @@ INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_demangle)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_inline_frames)

View File

@ -10,27 +10,22 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_interface.h"
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_interface_internal.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stackdepot.h"
namespace __sanitizer {
static void (*SoftRssLimitExceededCallback)(bool exceeded);
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
SoftRssLimitExceededCallback = Callback;
}
#if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO
// Weak default implementation for when sanitizer_stackdepot is not linked in.
SANITIZER_WEAK_ATTRIBUTE StackDepotStats *StackDepotGetStats() {
return nullptr;
}
SANITIZER_WEAK_ATTRIBUTE StackDepotStats StackDepotGetStats() { return {}; }
void *BackgroundThread(void *arg) {
VPrintf(1, "%s: Started BackgroundThread\n", SanitizerToolName);
const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
const bool heap_profile = common_flags()->heap_profile;
@ -48,16 +43,12 @@ void *BackgroundThread(void *arg) {
prev_reported_rss = current_rss_mb;
}
// If stack depot has grown 10% since last time, print it too.
StackDepotStats *stack_depot_stats = StackDepotGetStats();
if (stack_depot_stats) {
if (prev_reported_stack_depot_size * 11 / 10 <
stack_depot_stats->allocated) {
Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
SanitizerToolName,
stack_depot_stats->n_uniq_ids,
stack_depot_stats->allocated >> 20);
prev_reported_stack_depot_size = stack_depot_stats->allocated;
}
StackDepotStats stack_depot_stats = StackDepotGetStats();
if (prev_reported_stack_depot_size * 11 / 10 <
stack_depot_stats.allocated) {
Printf("%s: StackDepot: %zd ids; %zdM allocated\n", SanitizerToolName,
stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);
prev_reported_stack_depot_size = stack_depot_stats.allocated;
}
}
// Check RSS against the limit.
@ -72,13 +63,13 @@ void *BackgroundThread(void *arg) {
reached_soft_rss_limit = true;
Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
if (SoftRssLimitExceededCallback)
SoftRssLimitExceededCallback(true);
SetRssLimitExceeded(true);
} else if (soft_rss_limit_mb >= current_rss_mb &&
reached_soft_rss_limit) {
reached_soft_rss_limit = false;
if (SoftRssLimitExceededCallback)
SoftRssLimitExceededCallback(false);
Report("%s: soft rss limit unexhausted (%zdMb vs %zdMb)\n",
SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
SetRssLimitExceeded(false);
}
}
if (heap_profile &&
@ -89,6 +80,42 @@ void *BackgroundThread(void *arg) {
}
}
}
void MaybeStartBackgroudThread() {
// Need to implement/test on other platforms.
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
!common_flags()->soft_rss_limit_mb &&
!common_flags()->heap_profile) return;
if (!&real_pthread_create) {
VPrintf(1, "%s: real_pthread_create undefined\n", SanitizerToolName);
return; // Can't spawn the thread anyway.
}
static bool started = false;
if (!started) {
started = true;
internal_start_thread(BackgroundThread, nullptr);
}
}
# if !SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
# ifdef __clang__
# pragma clang diagnostic push
// We avoid global-constructors to be sure that globals are ready when
// sanitizers need them. This can happend before global constructors executed.
// Here we don't mind if thread is started on later stages.
# pragma clang diagnostic ignored "-Wglobal-constructors"
# endif
static struct BackgroudThreadStarted {
BackgroudThreadStarted() { MaybeStartBackgroudThread(); }
} background_thread_strarter UNUSED;
# ifdef __clang__
# pragma clang diagnostic pop
# endif
# endif
#else
void MaybeStartBackgroudThread() {}
#endif
void WriteToSyslog(const char *msg) {
@ -111,18 +138,6 @@ void WriteToSyslog(const char *msg) {
WriteOneLineToSyslog(p);
}
void MaybeStartBackgroudThread() {
#if (SANITIZER_LINUX || SANITIZER_NETBSD) && \
!SANITIZER_GO // Need to implement/test on other platforms.
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
!common_flags()->soft_rss_limit_mb &&
!common_flags()->heap_profile) return;
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
internal_start_thread(BackgroundThread, nullptr);
#endif
}
static void (*sandboxing_callback)();
void SetSandboxingCallback(void (*f)()) {
sandboxing_callback = f;
@ -191,10 +206,22 @@ void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
#endif // !SANITIZER_FUCHSIA
#if !SANITIZER_WINDOWS && !SANITIZER_GO
// Weak default implementation for when sanitizer_stackdepot is not linked in.
SANITIZER_WEAK_ATTRIBUTE void StackDepotStopBackgroundThread() {}
static void StopStackDepotBackgroundThread() {
StackDepotStopBackgroundThread();
}
#else
// SANITIZER_WEAK_ATTRIBUTE is unsupported.
static void StopStackDepotBackgroundThread() {}
#endif
} // namespace __sanitizer
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
__sanitizer_sandbox_arguments *args) {
__sanitizer::StopStackDepotBackgroundThread();
__sanitizer::PlatformPrepareForSandboxing(args);
if (__sanitizer::sandboxing_callback)
__sanitizer::sandboxing_callback();

View File

@ -25,9 +25,10 @@ void LogMessageOnPrintf(const char *str) {}
#endif
void WriteToSyslog(const char *buffer) {}
void Abort() { internal__exit(1); }
bool CreateDir(const char *pathname) { return false; }
#endif // !SANITIZER_WINDOWS
#if !SANITIZER_WINDOWS && !SANITIZER_MAC
#if !SANITIZER_WINDOWS && !SANITIZER_APPLE
void ListOfModules::init() {}
void InitializePlatformCommonFlags(CommonFlags *cf) {}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -27,6 +27,16 @@ INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_gep)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_indir)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load1)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load2)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load4)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load8)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load16)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store1)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store2)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store4)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store8)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store16)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_switch)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_8bit_counters_init)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_bool_flag_init)

View File

@ -0,0 +1,20 @@
//===-- sanitizer_coverage_win_dll_thunk.cpp ------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a family of thunks that should be statically linked into
// the DLLs that have instrumentation in order to delegate the calls to the
// shared runtime that lives in the main binary.
// See https://github.com/google/sanitizers/issues/209 for the details.
//===----------------------------------------------------------------------===//
#ifdef SANITIZER_DLL_THUNK
#include "sanitizer_win_dll_thunk.h"
// Sanitizer Coverage interface functions.
#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
#include "sanitizer_coverage_interface.inc"
#endif // SANITIZER_DLL_THUNK

View File

@ -0,0 +1,26 @@
//===-- sanitizer_coverage_win_dynamic_runtime_thunk.cpp ------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines things that need to be present in the application modules
// to interact with Sanitizer Coverage, when it is included in a dll.
//
//===----------------------------------------------------------------------===//
#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
#define SANITIZER_IMPORT_INTERFACE 1
#include "sanitizer_win_defs.h"
// Define weak alias for all weak functions imported from sanitizer coverage.
#define INTERFACE_FUNCTION(Name)
#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
#include "sanitizer_coverage_interface.inc"
#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
namespace __sanitizer {
// Add one, otherwise unused, external symbol to this object file so that the
// Visual C++ linker includes it and reads the .drective section.
void ForceWholeArchiveIncludeForSanCov() {}
}

View File

@ -0,0 +1,23 @@
//===-- sanitizer_coverage_win_weak_interception.cpp ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This module should be included in Sanitizer Coverage when it implemented as a
// shared library on Windows (dll), in order to delegate the calls of weak
// functions to the implementation in the main executable when a strong
// definition is provided.
//===----------------------------------------------------------------------===//
#ifdef SANITIZER_DYNAMIC
#include "sanitizer_win_weak_interception.h"
#include "sanitizer_interface_internal.h"
#include "sancov_flags.h"
// Check if strong definitions for weak functions are present in the main
// executable. If that is the case, override dll functions to point to strong
// implementations.
#define INTERFACE_FUNCTION(Name)
#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
#include "sanitizer_coverage_interface.inc"
#endif // SANITIZER_DYNAMIC

View File

@ -293,7 +293,7 @@ class DeadlockDetector {
}
// Returns true iff dtls is empty (no locks are currently held) and we can
// add the node to the currently held locks w/o chanding the global state.
// add the node to the currently held locks w/o changing the global state.
// This operation is thread-safe as it only touches the dtls.
bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
if (!dtls->empty()) return false;

View File

@ -0,0 +1,705 @@
//===- sanitizer_dense_map.h - Dense probed hash table ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This is fork of llvm/ADT/DenseMap.h class with the following changes:
// * Use mmap to allocate.
// * No iterators.
// * Does not shrink.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_DENSE_MAP_H
#define SANITIZER_DENSE_MAP_H
#include "sanitizer_common.h"
#include "sanitizer_dense_map_info.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_type_traits.h"
namespace __sanitizer {
template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
typename BucketT>
class DenseMapBase {
public:
using size_type = unsigned;
using key_type = KeyT;
using mapped_type = ValueT;
using value_type = BucketT;
WARN_UNUSED_RESULT bool empty() const { return getNumEntries() == 0; }
unsigned size() const { return getNumEntries(); }
/// Grow the densemap so that it can contain at least \p NumEntries items
/// before resizing again.
void reserve(size_type NumEntries) {
auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
if (NumBuckets > getNumBuckets())
grow(NumBuckets);
}
void clear() {
if (getNumEntries() == 0 && getNumTombstones() == 0)
return;
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
if (__sanitizer::is_trivially_destructible<ValueT>::value) {
// Use a simpler loop when values don't need destruction.
for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
P->getFirst() = EmptyKey;
} else {
unsigned NumEntries = getNumEntries();
for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
P->getSecond().~ValueT();
--NumEntries;
}
P->getFirst() = EmptyKey;
}
}
CHECK_EQ(NumEntries, 0);
}
setNumEntries(0);
setNumTombstones(0);
}
/// Return 1 if the specified key is in the map, 0 otherwise.
size_type count(const KeyT &Key) const {
const BucketT *TheBucket;
return LookupBucketFor(Key, TheBucket) ? 1 : 0;
}
value_type *find(const KeyT &Key) {
BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
return TheBucket;
return nullptr;
}
const value_type *find(const KeyT &Key) const {
const BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
return TheBucket;
return nullptr;
}
/// Alternate version of find() which allows a different, and possibly
/// less expensive, key type.
/// The DenseMapInfo is responsible for supplying methods
/// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
/// type used.
template <class LookupKeyT>
value_type *find_as(const LookupKeyT &Key) {
BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
return TheBucket;
return nullptr;
}
template <class LookupKeyT>
const value_type *find_as(const LookupKeyT &Key) const {
const BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
return TheBucket;
return nullptr;
}
/// lookup - Return the entry for the specified key, or a default
/// constructed value if no such entry exists.
ValueT lookup(const KeyT &Key) const {
const BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
return TheBucket->getSecond();
return ValueT();
}
// Inserts key,value pair into the map if the key isn't already in the map.
// If the key is already in the map, it returns false and doesn't update the
// value.
detail::DenseMapPair<value_type *, bool> insert(const value_type &KV) {
return try_emplace(KV.first, KV.second);
}
// Inserts key,value pair into the map if the key isn't already in the map.
// If the key is already in the map, it returns false and doesn't update the
// value.
detail::DenseMapPair<value_type *, bool> insert(value_type &&KV) {
return try_emplace(__sanitizer::move(KV.first),
__sanitizer::move(KV.second));
}
// Inserts key,value pair into the map if the key isn't already in the map.
// The value is constructed in-place if the key is not in the map, otherwise
// it is not moved.
template <typename... Ts>
detail::DenseMapPair<value_type *, bool> try_emplace(KeyT &&Key,
Ts &&...Args) {
BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
return {TheBucket, false}; // Already in map.
// Otherwise, insert the new element.
TheBucket = InsertIntoBucket(TheBucket, __sanitizer::move(Key),
__sanitizer::forward<Ts>(Args)...);
return {TheBucket, true};
}
// Inserts key,value pair into the map if the key isn't already in the map.
// The value is constructed in-place if the key is not in the map, otherwise
// it is not moved.
template <typename... Ts>
detail::DenseMapPair<value_type *, bool> try_emplace(const KeyT &Key,
Ts &&...Args) {
BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
return {TheBucket, false}; // Already in map.
// Otherwise, insert the new element.
TheBucket =
InsertIntoBucket(TheBucket, Key, __sanitizer::forward<Ts>(Args)...);
return {TheBucket, true};
}
/// Alternate version of insert() which allows a different, and possibly
/// less expensive, key type.
/// The DenseMapInfo is responsible for supplying methods
/// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
/// type used.
template <typename LookupKeyT>
detail::DenseMapPair<value_type *, bool> insert_as(value_type &&KV,
const LookupKeyT &Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
return {TheBucket, false}; // Already in map.
// Otherwise, insert the new element.
TheBucket =
InsertIntoBucketWithLookup(TheBucket, __sanitizer::move(KV.first),
__sanitizer::move(KV.second), Val);
return {TheBucket, true};
}
bool erase(const KeyT &Val) {
BucketT *TheBucket;
if (!LookupBucketFor(Val, TheBucket))
return false; // not in map.
TheBucket->getSecond().~ValueT();
TheBucket->getFirst() = getTombstoneKey();
decrementNumEntries();
incrementNumTombstones();
return true;
}
void erase(value_type *I) {
CHECK_NE(I, nullptr);
BucketT *TheBucket = &*I;
TheBucket->getSecond().~ValueT();
TheBucket->getFirst() = getTombstoneKey();
decrementNumEntries();
incrementNumTombstones();
}
value_type &FindAndConstruct(const KeyT &Key) {
BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
return *TheBucket;
return *InsertIntoBucket(TheBucket, Key);
}
ValueT &operator[](const KeyT &Key) { return FindAndConstruct(Key).second; }
value_type &FindAndConstruct(KeyT &&Key) {
BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
return *TheBucket;
return *InsertIntoBucket(TheBucket, __sanitizer::move(Key));
}
ValueT &operator[](KeyT &&Key) {
return FindAndConstruct(__sanitizer::move(Key)).second;
}
/// Iterate over active entries of the container.
///
/// Function can return fast to stop the process.
template <class Fn>
void forEach(Fn fn) {
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
for (auto *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
const KeyT K = P->getFirst();
if (!KeyInfoT::isEqual(K, EmptyKey) &&
!KeyInfoT::isEqual(K, TombstoneKey)) {
if (!fn(*P))
return;
}
}
}
template <class Fn>
void forEach(Fn fn) const {
const_cast<DenseMapBase *>(this)->forEach(
[&](const value_type &KV) { return fn(KV); });
}
protected:
DenseMapBase() = default;
void destroyAll() {
if (getNumBuckets() == 0) // Nothing to do.
return;
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
!KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
P->getSecond().~ValueT();
P->getFirst().~KeyT();
}
}
void initEmpty() {
setNumEntries(0);
setNumTombstones(0);
CHECK_EQ((getNumBuckets() & (getNumBuckets() - 1)), 0);
const KeyT EmptyKey = getEmptyKey();
for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
::new (&B->getFirst()) KeyT(EmptyKey);
}
/// Returns the number of buckets to allocate to ensure that the DenseMap can
/// accommodate \p NumEntries without need to grow().
unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
// Ensure that "NumEntries * 4 < NumBuckets * 3"
if (NumEntries == 0)
return 0;
// +1 is required because of the strict equality.
// For example if NumEntries is 48, we need to return 401.
return RoundUpToPowerOfTwo((NumEntries * 4 / 3 + 1) + /* NextPowerOf2 */ 1);
}
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
initEmpty();
// Insert all the old elements.
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
!KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
// Insert the key/value into the new table.
BucketT *DestBucket;
bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
(void)FoundVal; // silence warning.
CHECK(!FoundVal);
DestBucket->getFirst() = __sanitizer::move(B->getFirst());
::new (&DestBucket->getSecond())
ValueT(__sanitizer::move(B->getSecond()));
incrementNumEntries();
// Free the value.
B->getSecond().~ValueT();
}
B->getFirst().~KeyT();
}
}
template <typename OtherBaseT>
void copyFrom(
const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
CHECK_NE(&other, this);
CHECK_EQ(getNumBuckets(), other.getNumBuckets());
setNumEntries(other.getNumEntries());
setNumTombstones(other.getNumTombstones());
if (__sanitizer::is_trivially_copyable<KeyT>::value &&
__sanitizer::is_trivially_copyable<ValueT>::value)
internal_memcpy(reinterpret_cast<void *>(getBuckets()),
other.getBuckets(), getNumBuckets() * sizeof(BucketT));
else
for (uptr i = 0; i < getNumBuckets(); ++i) {
::new (&getBuckets()[i].getFirst())
KeyT(other.getBuckets()[i].getFirst());
if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
::new (&getBuckets()[i].getSecond())
ValueT(other.getBuckets()[i].getSecond());
}
}
static unsigned getHashValue(const KeyT &Val) {
return KeyInfoT::getHashValue(Val);
}
template <typename LookupKeyT>
static unsigned getHashValue(const LookupKeyT &Val) {
return KeyInfoT::getHashValue(Val);
}
static const KeyT getEmptyKey() { return KeyInfoT::getEmptyKey(); }
static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
private:
unsigned getNumEntries() const {
return static_cast<const DerivedT *>(this)->getNumEntries();
}
void setNumEntries(unsigned Num) {
static_cast<DerivedT *>(this)->setNumEntries(Num);
}
void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
unsigned getNumTombstones() const {
return static_cast<const DerivedT *>(this)->getNumTombstones();
}
void setNumTombstones(unsigned Num) {
static_cast<DerivedT *>(this)->setNumTombstones(Num);
}
void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
const BucketT *getBuckets() const {
return static_cast<const DerivedT *>(this)->getBuckets();
}
BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }
unsigned getNumBuckets() const {
return static_cast<const DerivedT *>(this)->getNumBuckets();
}
BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
const BucketT *getBucketsEnd() const {
return getBuckets() + getNumBuckets();
}
void grow(unsigned AtLeast) { static_cast<DerivedT *>(this)->grow(AtLeast); }
template <typename KeyArg, typename... ValueArgs>
BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
ValueArgs &&...Values) {
TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
TheBucket->getFirst() = __sanitizer::forward<KeyArg>(Key);
::new (&TheBucket->getSecond())
ValueT(__sanitizer::forward<ValueArgs>(Values)...);
return TheBucket;
}
template <typename LookupKeyT>
BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
ValueT &&Value, LookupKeyT &Lookup) {
TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
TheBucket->getFirst() = __sanitizer::move(Key);
::new (&TheBucket->getSecond()) ValueT(__sanitizer::move(Value));
return TheBucket;
}
template <typename LookupKeyT>
BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
BucketT *TheBucket) {
// If the load of the hash table is more than 3/4, or if fewer than 1/8 of
// the buckets are empty (meaning that many are filled with tombstones),
// grow the table.
//
// The later case is tricky. For example, if we had one empty bucket with
// tons of tombstones, failing lookups (e.g. for insertion) would have to
// probe almost the entire table until it found the empty bucket. If the
// table completely filled with tombstones, no lookup would ever succeed,
// causing infinite loops in lookup.
unsigned NewNumEntries = getNumEntries() + 1;
unsigned NumBuckets = getNumBuckets();
if (UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
this->grow(NumBuckets * 2);
LookupBucketFor(Lookup, TheBucket);
NumBuckets = getNumBuckets();
} else if (UNLIKELY(NumBuckets - (NewNumEntries + getNumTombstones()) <=
NumBuckets / 8)) {
this->grow(NumBuckets);
LookupBucketFor(Lookup, TheBucket);
}
CHECK(TheBucket);
// Only update the state after we've grown our bucket space appropriately
// so that when growing buckets we have self-consistent entry count.
incrementNumEntries();
// If we are writing over a tombstone, remember this.
const KeyT EmptyKey = getEmptyKey();
if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
decrementNumTombstones();
return TheBucket;
}
/// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
/// FoundBucket. If the bucket contains the key and a value, this returns
/// true, otherwise it returns a bucket with an empty marker or tombstone and
/// returns false.
template <typename LookupKeyT>
bool LookupBucketFor(const LookupKeyT &Val,
const BucketT *&FoundBucket) const {
const BucketT *BucketsPtr = getBuckets();
const unsigned NumBuckets = getNumBuckets();
if (NumBuckets == 0) {
FoundBucket = nullptr;
return false;
}
// FoundTombstone - Keep track of whether we find a tombstone while probing.
const BucketT *FoundTombstone = nullptr;
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
CHECK(!KeyInfoT::isEqual(Val, EmptyKey));
CHECK(!KeyInfoT::isEqual(Val, TombstoneKey));
unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
unsigned ProbeAmt = 1;
while (true) {
const BucketT *ThisBucket = BucketsPtr + BucketNo;
// Found Val's bucket? If so, return it.
if (LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
FoundBucket = ThisBucket;
return true;
}
// If we found an empty bucket, the key doesn't exist in the set.
// Insert it and return the default value.
if (LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
// If we've already seen a tombstone while probing, fill it in instead
// of the empty bucket we eventually probed to.
FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
return false;
}
// If this is a tombstone, remember it. If Val ends up not in the map, we
// prefer to return it than something that would require more probing.
if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
!FoundTombstone)
FoundTombstone = ThisBucket; // Remember the first tombstone found.
// Otherwise, it's a hash collision or a tombstone, continue quadratic
// probing.
BucketNo += ProbeAmt++;
BucketNo &= (NumBuckets - 1);
}
}
template <typename LookupKeyT>
bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
const BucketT *ConstFoundBucket;
bool Result = const_cast<const DenseMapBase *>(this)->LookupBucketFor(
Val, ConstFoundBucket);
FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
return Result;
}
public:
/// Return the approximate size (in bytes) of the actual map.
/// This is just the raw memory used by DenseMap.
/// If entries are pointers to objects, the size of the referenced objects
/// are not included.
uptr getMemorySize() const {
return RoundUpTo(getNumBuckets() * sizeof(BucketT), GetPageSizeCached());
}
};
/// Equality comparison for DenseMap.
///
/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
/// is also in RHS, and that no additional pairs are in RHS.
/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
/// complexity is linear, worst case is O(N^2) (if every hash collides).
template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
typename BucketT>
bool operator==(
const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
if (LHS.size() != RHS.size())
return false;
bool R = true;
LHS.forEach(
[&](const typename DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT,
BucketT>::value_type &KV) -> bool {
const auto *I = RHS.find(KV.first);
if (!I || I->second != KV.second) {
R = false;
return false;
}
return true;
});
return R;
}
/// Inequality comparison for DenseMap.
///
/// Equivalent to !(LHS == RHS). See operator== for performance notes.
template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
typename BucketT>
bool operator!=(
const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
return !(LHS == RHS);
}
template <typename KeyT, typename ValueT,
typename KeyInfoT = DenseMapInfo<KeyT>,
typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
KeyT, ValueT, KeyInfoT, BucketT> {
friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
// Lift some types from the dependent base class into this class for
// simplicity of referring to them.
using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
BucketT *Buckets = nullptr;
unsigned NumEntries = 0;
unsigned NumTombstones = 0;
unsigned NumBuckets = 0;
public:
/// Create a DenseMap with an optional \p InitialReserve that guarantee that
/// this number of elements can be inserted in the map without grow()
explicit DenseMap(unsigned InitialReserve) { init(InitialReserve); }
constexpr DenseMap() = default;
DenseMap(const DenseMap &other) : BaseT() {
init(0);
copyFrom(other);
}
DenseMap(DenseMap &&other) : BaseT() {
init(0);
swap(other);
}
~DenseMap() {
this->destroyAll();
deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
}
void swap(DenseMap &RHS) {
Swap(Buckets, RHS.Buckets);
Swap(NumEntries, RHS.NumEntries);
Swap(NumTombstones, RHS.NumTombstones);
Swap(NumBuckets, RHS.NumBuckets);
}
DenseMap &operator=(const DenseMap &other) {
if (&other != this)
copyFrom(other);
return *this;
}
DenseMap &operator=(DenseMap &&other) {
this->destroyAll();
deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
init(0);
swap(other);
return *this;
}
void copyFrom(const DenseMap &other) {
this->destroyAll();
deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
if (allocateBuckets(other.NumBuckets)) {
this->BaseT::copyFrom(other);
} else {
NumEntries = 0;
NumTombstones = 0;
}
}
void init(unsigned InitNumEntries) {
auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
if (allocateBuckets(InitBuckets)) {
this->BaseT::initEmpty();
} else {
NumEntries = 0;
NumTombstones = 0;
}
}
void grow(unsigned AtLeast) {
unsigned OldNumBuckets = NumBuckets;
BucketT *OldBuckets = Buckets;
allocateBuckets(RoundUpToPowerOfTwo(Max<unsigned>(64, AtLeast)));
CHECK(Buckets);
if (!OldBuckets) {
this->BaseT::initEmpty();
return;
}
this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets);
// Free the old table.
deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets);
}
private:
unsigned getNumEntries() const { return NumEntries; }
void setNumEntries(unsigned Num) { NumEntries = Num; }
unsigned getNumTombstones() const { return NumTombstones; }
void setNumTombstones(unsigned Num) { NumTombstones = Num; }
BucketT *getBuckets() const { return Buckets; }
unsigned getNumBuckets() const { return NumBuckets; }
bool allocateBuckets(unsigned Num) {
NumBuckets = Num;
if (NumBuckets == 0) {
Buckets = nullptr;
return false;
}
uptr Size = sizeof(BucketT) * NumBuckets;
if (Size * 2 <= GetPageSizeCached()) {
// We always allocate at least a page, so use entire space.
unsigned Log2 = MostSignificantSetBitIndex(GetPageSizeCached() / Size);
Size <<= Log2;
NumBuckets <<= Log2;
CHECK_EQ(Size, sizeof(BucketT) * NumBuckets);
CHECK_GT(Size * 2, GetPageSizeCached());
}
Buckets = static_cast<BucketT *>(allocate_buffer(Size));
return true;
}
static void *allocate_buffer(uptr Size) {
return MmapOrDie(RoundUpTo(Size, GetPageSizeCached()), "DenseMap");
}
static void deallocate_buffer(void *Ptr, uptr Size) {
UnmapOrDie(Ptr, RoundUpTo(Size, GetPageSizeCached()));
}
};
} // namespace __sanitizer
#endif // SANITIZER_DENSE_MAP_H

View File

@ -0,0 +1,282 @@
//===- sanitizer_dense_map_info.h - Type traits for DenseMap ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_DENSE_MAP_INFO_H
#define SANITIZER_DENSE_MAP_INFO_H
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_type_traits.h"
namespace __sanitizer {
namespace detail {
/// Simplistic combination of 32-bit hash values into 32-bit hash values.
static constexpr unsigned combineHashValue(unsigned a, unsigned b) {
u64 key = (u64)a << 32 | (u64)b;
key += ~(key << 32);
key ^= (key >> 22);
key += ~(key << 13);
key ^= (key >> 8);
key += (key << 3);
key ^= (key >> 15);
key += ~(key << 27);
key ^= (key >> 31);
return (unsigned)key;
}
// We extend a pair to allow users to override the bucket type with their own
// implementation without requiring two members.
template <typename KeyT, typename ValueT>
struct DenseMapPair {
KeyT first = {};
ValueT second = {};
constexpr DenseMapPair() = default;
constexpr DenseMapPair(const KeyT &f, const ValueT &s)
: first(f), second(s) {}
template <typename KeyT2, typename ValueT2>
constexpr DenseMapPair(KeyT2 &&f, ValueT2 &&s)
: first(__sanitizer::forward<KeyT2>(f)),
second(__sanitizer::forward<ValueT2>(s)) {}
constexpr DenseMapPair(const DenseMapPair &other) = default;
constexpr DenseMapPair &operator=(const DenseMapPair &other) = default;
constexpr DenseMapPair(DenseMapPair &&other) = default;
constexpr DenseMapPair &operator=(DenseMapPair &&other) = default;
KeyT &getFirst() { return first; }
const KeyT &getFirst() const { return first; }
ValueT &getSecond() { return second; }
const ValueT &getSecond() const { return second; }
};
} // end namespace detail
template <typename T>
struct DenseMapInfo {
// static T getEmptyKey();
// static T getTombstoneKey();
// static unsigned getHashValue(const T &Val);
// static bool isEqual(const T &LHS, const T &RHS);
};
// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values
// that are aligned to alignof(T) bytes, but try to avoid requiring T to be
// complete. This allows clients to instantiate DenseMap<T*, ...> with forward
// declared key types. Assume that no pointer key type requires more than 4096
// bytes of alignment.
template <typename T>
struct DenseMapInfo<T *> {
// The following should hold, but it would require T to be complete:
// static_assert(alignof(T) <= (1 << Log2MaxAlign),
// "DenseMap does not support pointer keys requiring more than "
// "Log2MaxAlign bits of alignment");
static constexpr uptr Log2MaxAlign = 12;
static constexpr T *getEmptyKey() {
uptr Val = static_cast<uptr>(-1);
Val <<= Log2MaxAlign;
return reinterpret_cast<T *>(Val);
}
static constexpr T *getTombstoneKey() {
uptr Val = static_cast<uptr>(-2);
Val <<= Log2MaxAlign;
return reinterpret_cast<T *>(Val);
}
static constexpr unsigned getHashValue(const T *PtrVal) {
return (unsigned((uptr)PtrVal) >> 4) ^ (unsigned((uptr)PtrVal) >> 9);
}
static constexpr bool isEqual(const T *LHS, const T *RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for chars.
template <>
struct DenseMapInfo<char> {
static constexpr char getEmptyKey() { return ~0; }
static constexpr char getTombstoneKey() { return ~0 - 1; }
static constexpr unsigned getHashValue(const char &Val) { return Val * 37U; }
static constexpr bool isEqual(const char &LHS, const char &RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for unsigned chars.
template <>
struct DenseMapInfo<unsigned char> {
static constexpr unsigned char getEmptyKey() { return ~0; }
static constexpr unsigned char getTombstoneKey() { return ~0 - 1; }
static constexpr unsigned getHashValue(const unsigned char &Val) {
return Val * 37U;
}
static constexpr bool isEqual(const unsigned char &LHS,
const unsigned char &RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for unsigned shorts.
template <>
struct DenseMapInfo<unsigned short> {
static constexpr unsigned short getEmptyKey() { return 0xFFFF; }
static constexpr unsigned short getTombstoneKey() { return 0xFFFF - 1; }
static constexpr unsigned getHashValue(const unsigned short &Val) {
return Val * 37U;
}
static constexpr bool isEqual(const unsigned short &LHS,
const unsigned short &RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for unsigned ints.
template <>
struct DenseMapInfo<unsigned> {
static constexpr unsigned getEmptyKey() { return ~0U; }
static constexpr unsigned getTombstoneKey() { return ~0U - 1; }
static constexpr unsigned getHashValue(const unsigned &Val) {
return Val * 37U;
}
static constexpr bool isEqual(const unsigned &LHS, const unsigned &RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for unsigned longs.
template <>
struct DenseMapInfo<unsigned long> {
static constexpr unsigned long getEmptyKey() { return ~0UL; }
static constexpr unsigned long getTombstoneKey() { return ~0UL - 1L; }
static constexpr unsigned getHashValue(const unsigned long &Val) {
return (unsigned)(Val * 37UL);
}
static constexpr bool isEqual(const unsigned long &LHS,
const unsigned long &RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for unsigned long longs.
template <>
struct DenseMapInfo<unsigned long long> {
static constexpr unsigned long long getEmptyKey() { return ~0ULL; }
static constexpr unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
static constexpr unsigned getHashValue(const unsigned long long &Val) {
return (unsigned)(Val * 37ULL);
}
static constexpr bool isEqual(const unsigned long long &LHS,
const unsigned long long &RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for shorts.
template <>
struct DenseMapInfo<short> {
static constexpr short getEmptyKey() { return 0x7FFF; }
static constexpr short getTombstoneKey() { return -0x7FFF - 1; }
static constexpr unsigned getHashValue(const short &Val) { return Val * 37U; }
static constexpr bool isEqual(const short &LHS, const short &RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for ints.
template <>
struct DenseMapInfo<int> {
static constexpr int getEmptyKey() { return 0x7fffffff; }
static constexpr int getTombstoneKey() { return -0x7fffffff - 1; }
static constexpr unsigned getHashValue(const int &Val) {
return (unsigned)(Val * 37U);
}
static constexpr bool isEqual(const int &LHS, const int &RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for longs.
template <>
struct DenseMapInfo<long> {
static constexpr long getEmptyKey() {
return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
}
static constexpr long getTombstoneKey() { return getEmptyKey() - 1L; }
static constexpr unsigned getHashValue(const long &Val) {
return (unsigned)(Val * 37UL);
}
static constexpr bool isEqual(const long &LHS, const long &RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for long longs.
template <>
struct DenseMapInfo<long long> {
static constexpr long long getEmptyKey() { return 0x7fffffffffffffffLL; }
static constexpr long long getTombstoneKey() {
return -0x7fffffffffffffffLL - 1;
}
static constexpr unsigned getHashValue(const long long &Val) {
return (unsigned)(Val * 37ULL);
}
static constexpr bool isEqual(const long long &LHS, const long long &RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for all pairs whose members have info.
template <typename T, typename U>
struct DenseMapInfo<detail::DenseMapPair<T, U>> {
using Pair = detail::DenseMapPair<T, U>;
using FirstInfo = DenseMapInfo<T>;
using SecondInfo = DenseMapInfo<U>;
static constexpr Pair getEmptyKey() {
return detail::DenseMapPair<T, U>(FirstInfo::getEmptyKey(),
SecondInfo::getEmptyKey());
}
static constexpr Pair getTombstoneKey() {
return detail::DenseMapPair<T, U>(FirstInfo::getTombstoneKey(),
SecondInfo::getTombstoneKey());
}
static constexpr unsigned getHashValue(const Pair &PairVal) {
return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first),
SecondInfo::getHashValue(PairVal.second));
}
static constexpr bool isEqual(const Pair &LHS, const Pair &RHS) {
return FirstInfo::isEqual(LHS.first, RHS.first) &&
SecondInfo::isEqual(LHS.second, RHS.second);
}
};
} // namespace __sanitizer
#endif // SANITIZER_DENSE_MAP_INFO_H

View File

@ -21,7 +21,7 @@
#include "sanitizer_errno_codes.h"
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_MAC
#if SANITIZER_FREEBSD || SANITIZER_APPLE
# define __errno_location __error
#elif SANITIZER_ANDROID || SANITIZER_NETBSD
# define __errno_location __errno

View File

@ -25,6 +25,7 @@ namespace __sanitizer {
#define errno_EBUSY 16
#define errno_EINVAL 22
#define errno_ENAMETOOLONG 36
#define errno_ENOSYS 38
// Those might not present or their value differ on different platforms.
extern const int errno_EOWNERDEAD;

View File

@ -19,6 +19,7 @@
#include "sanitizer_common.h"
#include "sanitizer_file.h"
# include "sanitizer_interface_internal.h"
namespace __sanitizer {
@ -75,6 +76,24 @@ void ReportFile::ReopenIfNecessary() {
fd_pid = pid;
}
static void RecursiveCreateParentDirs(char *path) {
if (path[0] == '\0')
return;
for (int i = 1; path[i] != '\0'; ++i) {
char save = path[i];
if (!IsPathSeparator(path[i]))
continue;
path[i] = '\0';
if (!DirExists(path) && !CreateDir(path)) {
const char *ErrorMsgPrefix = "ERROR: Can't create directory: ";
WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
WriteToFile(kStderrFd, path, internal_strlen(path));
Die();
}
path[i] = save;
}
}
void ReportFile::SetReportPath(const char *path) {
if (path) {
uptr len = internal_strlen(path);
@ -95,6 +114,7 @@ void ReportFile::SetReportPath(const char *path) {
fd = kStdoutFd;
} else {
internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
RecursiveCreateParentDirs(path_prefix);
}
}

View File

@ -15,7 +15,7 @@
#ifndef SANITIZER_FILE_H
#define SANITIZER_FILE_H
#include "sanitizer_interface_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
@ -78,9 +78,12 @@ bool SupportsColoredOutput(fd_t fd);
// OS
const char *GetPwd();
bool FileExists(const char *filename);
bool DirExists(const char *path);
char *FindPathToBinary(const char *name);
bool IsPathSeparator(const char c);
bool IsAbsolutePath(const char *path);
// Returns true on success, false on failure.
bool CreateDir(const char *pathname);
// Starts a subprocess and returs its pid.
// If *_fd parameters are not kInvalidFd their corresponding input/output
// streams will be redirect to the file. The files will always be closed

View File

@ -13,9 +13,9 @@
#include "sanitizer_flag_parser.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_flags.h"
#include "sanitizer_flag_parser.h"
#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
namespace __sanitizer {

View File

@ -13,9 +13,9 @@
#ifndef SANITIZER_FLAG_REGISTRY_H
#define SANITIZER_FLAG_REGISTRY_H
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_common.h"
namespace __sanitizer {
@ -138,7 +138,7 @@ inline bool FlagHandler<uptr>::Parse(const char *value) {
template <>
inline bool FlagHandler<uptr>::Format(char *buffer, uptr size) {
uptr num_symbols_should_write = internal_snprintf(buffer, size, "%p", *t_);
uptr num_symbols_should_write = internal_snprintf(buffer, size, "0x%zx", *t_);
return num_symbols_should_write < size;
}

View File

@ -62,16 +62,19 @@ COMMON_FLAG(
COMMON_FLAG(const char *, log_suffix, nullptr,
"String to append to log file name, e.g. \".txt\".")
COMMON_FLAG(
bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_APPLE,
"Write all sanitizer output to syslog in addition to other means of "
"logging.")
COMMON_FLAG(
int, verbosity, 0,
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
COMMON_FLAG(bool, strip_env, 1,
COMMON_FLAG(bool, strip_env, true,
"Whether to remove the sanitizer from DYLD_INSERT_LIBRARIES to "
"avoid passing it to children. Default is true.")
COMMON_FLAG(bool, detect_leaks, !SANITIZER_MAC, "Enable memory leak detection.")
"avoid passing it to children on Apple platforms. Default is true.")
COMMON_FLAG(bool, verify_interceptors, true,
"Verify that interceptors are working on Apple platforms. Default "
"is true.")
COMMON_FLAG(bool, detect_leaks, !SANITIZER_APPLE, "Enable memory leak detection.")
COMMON_FLAG(
bool, leak_check_at_exit, true,
"Invoke leak checking in an atexit handler. Has no effect if "
@ -160,6 +163,10 @@ COMMON_FLAG(
COMMON_FLAG(const char *, coverage_dir, ".",
"Target directory for coverage dumps. Defaults to the current "
"directory.")
COMMON_FLAG(const char *, cov_8bit_counters_out, "",
"If non-empty, write 8bit counters to this file. ")
COMMON_FLAG(const char *, cov_pcs_out, "",
"If non-empty, write the coverage pc table to this file. ")
COMMON_FLAG(bool, full_address_space, false,
"Sanitize complete address space; "
"by default kernel area on 32-bit platforms will not be sanitized")
@ -175,6 +182,7 @@ COMMON_FLAG(bool, use_madv_dontdump, true,
"in core file.")
COMMON_FLAG(bool, symbolize_inline_frames, true,
"Print inlined frames in stacktraces. Defaults to true.")
COMMON_FLAG(bool, demangle, true, "Print demangled symbols.")
COMMON_FLAG(bool, symbolize_vs_style, false,
"Print file locations in Visual Studio style (e.g: "
" file(10,42): ...")
@ -187,6 +195,8 @@ COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
"Format string used to render stack frames. "
"See sanitizer_stacktrace_printer.h for the format description. "
"Use DEFAULT to get default format.")
COMMON_FLAG(int, compress_stack_depot, 0,
"Compress stack depot to save memory.")
COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
"If true, the shadow is not allowed to use huge pages. ")
COMMON_FLAG(bool, strict_string_checks, false,
@ -238,7 +248,7 @@ COMMON_FLAG(bool, decorate_proc_maps, (bool)SANITIZER_ANDROID,
COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
"found an error")
COMMON_FLAG(
bool, abort_on_error, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
bool, abort_on_error, (bool)SANITIZER_ANDROID || (bool)SANITIZER_APPLE,
"If set, the tool calls abort() instead of _exit() after printing the "
"error report.")
COMMON_FLAG(bool, suppress_equal_pcs, true,

View File

@ -0,0 +1,162 @@
//===-- sanitizer_flat_map.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Part of the Sanitizer Allocator.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_FLAT_MAP_H
#define SANITIZER_FLAT_MAP_H
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_local_address_space_view.h"
#include "sanitizer_mutex.h"
namespace __sanitizer {
// Maps integers in rage [0, kSize) to values.
template <typename T, u64 kSize,
typename AddressSpaceViewTy = LocalAddressSpaceView>
class FlatMap {
public:
using AddressSpaceView = AddressSpaceViewTy;
void Init() { internal_memset(map_, 0, sizeof(map_)); }
constexpr uptr size() const { return kSize; }
bool contains(uptr idx) const {
CHECK_LT(idx, kSize);
return true;
}
T &operator[](uptr idx) {
DCHECK_LT(idx, kSize);
return map_[idx];
}
const T &operator[](uptr idx) const {
DCHECK_LT(idx, kSize);
return map_[idx];
}
private:
T map_[kSize];
};
// TwoLevelMap maps integers in range [0, kSize1*kSize2) to values.
// It is implemented as a two-dimensional array: array of kSize1 pointers
// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
// Each value is initially zero and can be set to something else only once.
// Setting and getting values from multiple threads is safe w/o extra locking.
template <typename T, u64 kSize1, u64 kSize2,
typename AddressSpaceViewTy = LocalAddressSpaceView>
class TwoLevelMap {
static_assert(IsPowerOfTwo(kSize2), "Use a power of two for performance.");
public:
using AddressSpaceView = AddressSpaceViewTy;
void Init() {
mu_.Init();
internal_memset(map1_, 0, sizeof(map1_));
}
void TestOnlyUnmap() {
for (uptr i = 0; i < kSize1; i++) {
T *p = Get(i);
if (!p)
continue;
UnmapOrDie(p, kSize2);
}
Init();
}
uptr MemoryUsage() const {
uptr res = 0;
for (uptr i = 0; i < kSize1; i++) {
T *p = Get(i);
if (!p)
continue;
res += MmapSize();
}
return res;
}
constexpr uptr size() const { return kSize1 * kSize2; }
constexpr uptr size1() const { return kSize1; }
constexpr uptr size2() const { return kSize2; }
bool contains(uptr idx) const {
CHECK_LT(idx, kSize1 * kSize2);
return Get(idx / kSize2);
}
const T &operator[](uptr idx) const {
DCHECK_LT(idx, kSize1 * kSize2);
T *map2 = GetOrCreate(idx / kSize2);
return *AddressSpaceView::Load(&map2[idx % kSize2]);
}
T &operator[](uptr idx) {
DCHECK_LT(idx, kSize1 * kSize2);
T *map2 = GetOrCreate(idx / kSize2);
return *AddressSpaceView::LoadWritable(&map2[idx % kSize2]);
}
private:
constexpr uptr MmapSize() const {
return RoundUpTo(kSize2 * sizeof(T), GetPageSizeCached());
}
T *Get(uptr idx) const {
DCHECK_LT(idx, kSize1);
return reinterpret_cast<T *>(
atomic_load(&map1_[idx], memory_order_acquire));
}
T *GetOrCreate(uptr idx) const {
DCHECK_LT(idx, kSize1);
// This code needs to use memory_order_acquire/consume, but we use
// memory_order_relaxed for performance reasons (matters for arm64). We
// expect memory_order_relaxed to be effectively equivalent to
// memory_order_consume in this case for all relevant architectures: all
// dependent data is reachable only by dereferencing the resulting pointer.
// If relaxed load fails to see stored ptr, the code will fall back to
// Create() and reload the value again with locked mutex as a memory
// barrier.
T *res = reinterpret_cast<T *>(atomic_load_relaxed(&map1_[idx]));
if (LIKELY(res))
return res;
return Create(idx);
}
NOINLINE T *Create(uptr idx) const {
SpinMutexLock l(&mu_);
T *res = Get(idx);
if (!res) {
res = reinterpret_cast<T *>(MmapOrDie(MmapSize(), "TwoLevelMap"));
atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
memory_order_release);
}
return res;
}
mutable StaticSpinMutex mu_;
mutable atomic_uintptr_t map1_[kSize1];
};
template <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>
using FlatByteMap = FlatMap<u8, kSize, AddressSpaceViewTy>;
template <u64 kSize1, u64 kSize2,
typename AddressSpaceViewTy = LocalAddressSpaceView>
using TwoLevelByteMap = TwoLevelMap<u8, kSize1, kSize2, AddressSpaceViewTy>;
} // namespace __sanitizer
#endif

View File

@ -14,24 +14,25 @@
#include "sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA
#include <pthread.h>
#include <stdlib.h>
#include <unistd.h>
#include <zircon/errors.h>
#include <zircon/process.h>
#include <zircon/syscalls.h>
#include <zircon/utc.h>
# include <pthread.h>
# include <stdlib.h>
# include <unistd.h>
# include <zircon/errors.h>
# include <zircon/process.h>
# include <zircon/syscalls.h>
# include <zircon/utc.h>
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
# include "sanitizer_common.h"
# include "sanitizer_interface_internal.h"
# include "sanitizer_libc.h"
# include "sanitizer_mutex.h"
namespace __sanitizer {
void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
uptr internal_sched_yield() {
zx_status_t status = _zx_nanosleep(0);
zx_status_t status = _zx_thread_legacy_yield(0u);
CHECK_EQ(status, ZX_OK);
return 0; // Why doesn't this return void?
}
@ -86,10 +87,9 @@ void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
}
void InitializePlatformEarly() {}
void MaybeReexec() {}
void CheckASLR() {}
void CheckMPROTECT() {}
void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
void PlatformPrepareForSandboxing(void *args) {}
void DisableCoreDumperIfNecessary() {}
void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
void SetAlternateSignalStack() {}
@ -112,47 +112,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
CHECK_EQ(status, ZX_OK);
}
enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
BlockingMutex::BlockingMutex() {
// NOTE! It's important that this use internal_memset, because plain
// memset might be intercepted (e.g., actually be __asan_memset).
// Defining this so the compiler initializes each field, e.g.:
// BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
// might result in the compiler generating a call to memset, which would
// have the same problem.
internal_memset(this, 0, sizeof(*this));
}
void BlockingMutex::Lock() {
CHECK_EQ(owner_, 0);
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
zx_status_t status =
_zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), MtxSleeping,
ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
if (status != ZX_ERR_BAD_STATE) // Normal race.
CHECK_EQ(status, ZX_OK);
}
}
void BlockingMutex::Unlock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
CHECK_NE(v, MtxUnlocked);
if (v == MtxSleeping) {
zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
CHECK_EQ(status, ZX_OK);
}
}
void BlockingMutex::CheckLocked() const {
auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
}
uptr GetPageSize() { return _zx_system_get_page_size(); }
uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
@ -168,6 +127,8 @@ uptr GetMaxUserVirtualAddress() {
uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
bool ErrorIsOOM(error_t err) { return err == ZX_ERR_NO_MEMORY; }
static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
bool raw_report, bool die_for_nomem) {
size = RoundUpTo(size, GetPageSize());
@ -315,6 +276,21 @@ void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
UNIMPLEMENTED();
}
bool MprotectNoAccess(uptr addr, uptr size) {
return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK;
}
bool MprotectReadOnly(uptr addr, uptr size) {
return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) ==
ZX_OK;
}
bool MprotectReadWrite(uptr addr, uptr size) {
return _zx_vmar_protect(_zx_vmar_root_self(),
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, addr,
size) == ZX_OK;
}
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
const char *mem_type) {
CHECK_GE(size, GetPageSize());
@ -413,33 +389,12 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) {
}
// FIXME implement on this platform.
void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uptr *read_len, uptr max_len, error_t *errno_p) {
zx_handle_t vmo;
zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
if (status == ZX_OK) {
uint64_t vmo_size;
status = _zx_vmo_get_size(vmo, &vmo_size);
if (status == ZX_OK) {
if (vmo_size < max_len)
max_len = vmo_size;
size_t map_size = RoundUpTo(max_len, GetPageSize());
uintptr_t addr;
status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
map_size, &addr);
if (status == ZX_OK) {
*buff = reinterpret_cast<char *>(addr);
*buff_size = map_size;
*read_len = max_len;
}
}
_zx_handle_close(vmo);
}
if (status != ZX_OK && errno_p)
*errno_p = status;
return status == ZX_OK;
*errno_p = ZX_ERR_NOT_SUPPORTED;
return false;
}
void RawWrite(const char *buffer) {
@ -516,6 +471,9 @@ u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }
uptr GetRSS() { UNIMPLEMENTED(); }
void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
void internal_join_thread(void *th) {}
void InitializePlatformCommonFlags(CommonFlags *cf) {}
} // namespace __sanitizer

View File

@ -38,6 +38,30 @@ class MurMur2HashBuilder {
return x;
}
};
class MurMur2Hash64Builder {
static const u64 m = 0xc6a4a7935bd1e995ull;
static const u64 seed = 0x9747b28c9747b28cull;
static const u64 r = 47;
u64 h;
public:
explicit MurMur2Hash64Builder(u64 init = 0) { h = seed ^ (init * m); }
void add(u64 k) {
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
u64 get() {
u64 x = h;
x ^= x >> r;
x *= m;
x ^= x >> r;
return x;
}
};
} //namespace __sanitizer
#endif // SANITIZER_HASH_H

View File

@ -1267,8 +1267,6 @@ static void ioctl_table_fill() {
_(TIOCGFLAGS, WRITE, sizeof(int));
_(TIOCSFLAGS, READ, sizeof(int));
_(TIOCDCDTIMESTAMP, WRITE, struct_timeval_sz);
_(TIOCRCVFRAME, READ, sizeof(uptr));
_(TIOCXMTFRAME, READ, sizeof(uptr));
_(TIOCPTMGET, WRITE, struct_ptmget_sz);
_(TIOCGRANTPT, NONE, 0);
_(TIOCPTSNAME, WRITE, struct_ptmget_sz);
@ -1406,7 +1404,7 @@ static void ioctl_table_fill() {
_(URIO_SEND_COMMAND, READWRITE, struct_urio_command_sz);
_(URIO_RECV_COMMAND, READWRITE, struct_urio_command_sz);
#undef _
} // NOLINT
}
static bool ioctl_initialized = false;

View File

@ -20,103 +20,134 @@
#include "sanitizer_internal_defs.h"
extern "C" {
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
// The special values are "stdout" and "stderr".
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path);
// Tell the tools to write their reports to the provided file descriptor
// (casted to void *).
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_fd(void *fd);
// Get the current full report file path, if a path was specified by
// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
SANITIZER_INTERFACE_ATTRIBUTE
const char *__sanitizer_get_report_path();
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
// The special values are "stdout" and "stderr".
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path);
// Tell the tools to write their reports to the provided file descriptor
// (casted to void *).
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_fd(void *fd);
// Get the current full report file path, if a path was specified by
// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
SANITIZER_INTERFACE_ATTRIBUTE
const char *__sanitizer_get_report_path();
typedef struct {
int coverage_sandboxed;
__sanitizer::sptr coverage_fd;
unsigned int coverage_max_block_size;
} __sanitizer_sandbox_arguments;
typedef struct {
int coverage_sandboxed;
__sanitizer::sptr coverage_fd;
unsigned int coverage_max_block_size;
} __sanitizer_sandbox_arguments;
// Notify the tools that the sandbox is going to be turned on.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// Notify the tools that the sandbox is going to be turned on.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
// the error message. This function can be overridden by the client.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_report_error_summary(const char *error_summary);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
// the error message. This function can be overridden by the client.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_report_error_summary(const char *error_summary);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
// Returns 1 on the first call, then returns 0 thereafter. Called by the tool
// to ensure only one report is printed when multiple errors occur
// simultaneously.
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state();
// Returns 1 on the first call, then returns 0 thereafter. Called by the tool
// to ensure only one report is printed when multiple errors occur
// simultaneously.
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state();
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
const void *end,
const void *old_mid,
const void *new_mid);
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
const void *end);
SANITIZER_INTERFACE_ATTRIBUTE
const void *__sanitizer_contiguous_container_find_bad_address(
const void *beg, const void *mid, const void *end);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg, const void *end,
const void *old_mid,
const void *new_mid);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_double_ended_contiguous_container(
const void *storage_beg, const void *storage_end,
const void *old_container_beg, const void *old_container_end,
const void *new_container_beg, const void *new_container_end);
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
const void *end);
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_verify_double_ended_contiguous_container(
const void *storage_beg, const void *container_beg,
const void *container_end, const void *storage_end);
SANITIZER_INTERFACE_ATTRIBUTE
const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
const void *mid,
const void *end);
SANITIZER_INTERFACE_ATTRIBUTE
const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
const void *storage_beg, const void *container_beg,
const void *container_end, const void *storage_end);
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_get_module_and_offset_for_pc(
__sanitizer::uptr pc, char *module_path,
__sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_cmp();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_cmp1();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_cmp2();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_cmp4();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_cmp8();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_const_cmp1();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_const_cmp2();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_const_cmp4();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_const_cmp8();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_switch();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_div4();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_div8();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_gep();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_pc_indir();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_pc_guard(__sanitizer::u32*);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_pc_guard_init(__sanitizer::u32*,
__sanitizer::u32*);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_8bit_counters_init();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_bool_flag_init();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_pcs_init();
} // extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
__sanitizer::uptr module_path_len,
void **pc_offset);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_cmp();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_cmp1();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_cmp2();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_cmp4();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_cmp8();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_const_cmp1();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_const_cmp2();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_const_cmp4();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_const_cmp8();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_switch();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_div4();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_div8();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_gep();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_pc_indir();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_load1();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_load2();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_load4();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_load8();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_load16();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_store1();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_store2();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_store4();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_store8();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_store16();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_pc_guard(__sanitizer::u32 *);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_trace_pc_guard_init(__sanitizer::u32 *, __sanitizer::u32 *);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_8bit_counters_init(char *, char *);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_bool_flag_init();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_cov_pcs_init(const __sanitizer::uptr *, const __sanitizer::uptr *);
} // extern "C"
#endif // SANITIZER_INTERFACE_INTERNAL_H

View File

@ -13,6 +13,7 @@
#define SANITIZER_DEFS_H
#include "sanitizer_platform.h"
#include "sanitizer_redefine_builtins.h"
#ifndef SANITIZER_DEBUG
# define SANITIZER_DEBUG 0
@ -37,15 +38,6 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
// TLS is handled differently on different platforms
#if SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_FREEBSD
# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE \
__attribute__((tls_model("initial-exec"))) thread_local
#else
# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE
#endif
//--------------------------- WEAK FUNCTIONS ---------------------------------//
// When working with weak functions, to simplify the code and make it more
// portable, when possible define a default implementation using this macro:
@ -73,7 +65,7 @@
// Before Xcode 4.5, the Darwin linker doesn't reliably support undefined
// weak symbols. Mac OS X 10.9/Darwin 13 is the first release only supported
// by Xcode >= 4.5.
#elif SANITIZER_MAC && \
#elif SANITIZER_APPLE && \
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1090 && !SANITIZER_GO
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
@ -125,6 +117,10 @@
# define __has_attribute(x) 0
#endif
#if !defined(__has_cpp_attribute)
# define __has_cpp_attribute(x) 0
#endif
// For portability reasons we do not include stddef.h, stdint.h or any other
// system header, but we do need some basic types that are not defined
// in a portable way by the language itself.
@ -135,8 +131,13 @@ namespace __sanitizer {
typedef unsigned long long uptr;
typedef signed long long sptr;
#else
# if (SANITIZER_WORDSIZE == 64) || SANITIZER_APPLE || SANITIZER_WINDOWS
typedef unsigned long uptr;
typedef signed long sptr;
# else
typedef unsigned int uptr;
typedef signed int sptr;
# endif
#endif // defined(_WIN64)
#if defined(__x86_64__)
// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
@ -168,17 +169,17 @@ typedef long pid_t;
typedef int pid_t;
#endif
#if SANITIZER_FREEBSD || SANITIZER_NETBSD || \
SANITIZER_MAC || \
#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_APPLE || \
(SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)) || \
(SANITIZER_LINUX && defined(__x86_64__))
(SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID) || \
(SANITIZER_LINUX && (defined(__x86_64__) || defined(__hexagon__)))
typedef u64 OFF_T;
#else
typedef uptr OFF_T;
#endif
typedef u64 OFF64_T;
#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
#if (SANITIZER_WORDSIZE == 64) || SANITIZER_APPLE
typedef uptr operator_new_size_type;
#else
# if defined(__s390__) && !defined(__s390x__)
@ -217,7 +218,7 @@ typedef u64 tid_t;
# define WARN_UNUSED_RESULT
#else // _MSC_VER
# define ALWAYS_INLINE inline __attribute__((always_inline))
# define ALIAS(x) __attribute__((alias(x)))
# define ALIAS(x) __attribute__((alias(SANITIZER_STRINGIFY(x))))
// Please only use the ALIGNED macro before the type.
// Using ALIGNED after the variable declaration is not portable!
# define ALIGNED(x) __attribute__((aligned(x)))
@ -250,6 +251,20 @@ typedef u64 tid_t;
# define NOEXCEPT throw()
#endif
#if __has_cpp_attribute(clang::fallthrough)
# define FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(fallthrough)
# define FALLTHROUGH [[fallthrough]]
#else
# define FALLTHROUGH
#endif
#if __has_attribute(uninitialized)
# define UNINITIALIZED __attribute__((uninitialized))
#else
# define UNINITIALIZED
#endif
// Unaligned versions of basic types.
typedef ALIGNED(1) u16 uu16;
typedef ALIGNED(1) u32 uu32;
@ -277,14 +292,17 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2);
// Check macro
#define RAW_CHECK_MSG(expr, msg) do { \
if (UNLIKELY(!(expr))) { \
RawWrite(msg); \
Die(); \
} \
} while (0)
#define RAW_CHECK_MSG(expr, msg, ...) \
do { \
if (UNLIKELY(!(expr))) { \
const char* msgs[] = {msg, __VA_ARGS__}; \
for (const char* m : msgs) RawWrite(m); \
Die(); \
} \
} while (0)
#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr)
#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr "\n", )
#define RAW_CHECK_VA(expr, ...) RAW_CHECK_MSG(expr, #expr "\n", __VA_ARGS__)
#define CHECK_IMPL(c1, op, c2) \
do { \
@ -366,13 +384,10 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
enum LinkerInitialized { LINKER_INITIALIZED = 0 };
#if !defined(_MSC_VER) || defined(__clang__)
#if SANITIZER_S390_31
#define GET_CALLER_PC() \
(__sanitizer::uptr) __builtin_extract_return_addr(__builtin_return_address(0))
#else
#define GET_CALLER_PC() (__sanitizer::uptr) __builtin_return_address(0)
#endif
#define GET_CURRENT_FRAME() (__sanitizer::uptr) __builtin_frame_address(0)
# define GET_CALLER_PC() \
((__sanitizer::uptr)__builtin_extract_return_addr( \
__builtin_return_address(0)))
# define GET_CURRENT_FRAME() ((__sanitizer::uptr)__builtin_frame_address(0))
inline void Trap() {
__builtin_trap();
}
@ -381,13 +396,13 @@ extern "C" void* _ReturnAddress(void);
extern "C" void* _AddressOfReturnAddress(void);
# pragma intrinsic(_ReturnAddress)
# pragma intrinsic(_AddressOfReturnAddress)
#define GET_CALLER_PC() (__sanitizer::uptr) _ReturnAddress()
# define GET_CALLER_PC() ((__sanitizer::uptr)_ReturnAddress())
// CaptureStackBackTrace doesn't need to know BP on Windows.
#define GET_CURRENT_FRAME() \
(((__sanitizer::uptr)_AddressOfReturnAddress()) + sizeof(__sanitizer::uptr))
# define GET_CURRENT_FRAME() \
(((__sanitizer::uptr)_AddressOfReturnAddress()) + sizeof(__sanitizer::uptr))
extern "C" void __ud2(void);
# pragma intrinsic(__ud2)
# pragma intrinsic(__ud2)
inline void Trap() {
__ud2();
}
@ -409,8 +424,14 @@ inline void Trap() {
(void)enable_fp; \
} while (0)
constexpr u32 kInvalidTid = -1;
constexpr u32 kMainTid = 0;
// Internal thread identifier allocated by ThreadRegistry.
typedef u32 Tid;
constexpr Tid kInvalidTid = -1;
constexpr Tid kMainTid = 0;
// Stack depot stack identifier.
typedef u32 StackID;
const StackID kInvalidStackID = 0;
} // namespace __sanitizer

View File

@ -0,0 +1,87 @@
//===-- sanitizer_leb128.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_LEB128_H
#define SANITIZER_LEB128_H
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
namespace __sanitizer {
template <typename T, typename It>
It EncodeSLEB128(T value, It begin, It end) {
bool more;
do {
u8 byte = value & 0x7f;
// NOTE: this assumes that this signed shift is an arithmetic right shift.
value >>= 7;
more = !((((value == 0) && ((byte & 0x40) == 0)) ||
((value == -1) && ((byte & 0x40) != 0))));
if (more)
byte |= 0x80;
if (UNLIKELY(begin == end))
break;
*(begin++) = byte;
} while (more);
return begin;
}
template <typename T, typename It>
It DecodeSLEB128(It begin, It end, T* v) {
T value = 0;
unsigned shift = 0;
u8 byte;
do {
if (UNLIKELY(begin == end))
return begin;
byte = *(begin++);
T slice = byte & 0x7f;
value |= slice << shift;
shift += 7;
} while (byte >= 128);
if (shift < 64 && (byte & 0x40))
value |= (-1ULL) << shift;
*v = value;
return begin;
}
template <typename T, typename It>
It EncodeULEB128(T value, It begin, It end) {
do {
u8 byte = value & 0x7f;
value >>= 7;
if (value)
byte |= 0x80;
if (UNLIKELY(begin == end))
break;
*(begin++) = byte;
} while (value);
return begin;
}
template <typename T, typename It>
It DecodeULEB128(It begin, It end, T* v) {
T value = 0;
unsigned shift = 0;
u8 byte;
do {
if (UNLIKELY(begin == end))
return begin;
byte = *(begin++);
T slice = byte & 0x7f;
value += slice << shift;
shift += 7;
} while (byte >= 128);
*v = value;
return begin;
}
} // namespace __sanitizer
#endif // SANITIZER_LEB128_H

View File

@ -10,6 +10,9 @@
// run-time libraries. See sanitizer_libc.h for details.
//===----------------------------------------------------------------------===//
// Do not redefine builtins; this file is defining the builtin replacements.
#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
@ -46,7 +49,10 @@ int internal_memcmp(const void* s1, const void* s2, uptr n) {
return 0;
}
void *internal_memcpy(void *dest, const void *src, uptr n) {
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memcpy(void *dest,
const void *src,
uptr n) {
char *d = (char*)dest;
const char *s = (const char *)src;
for (uptr i = 0; i < n; ++i)
@ -54,7 +60,8 @@ void *internal_memcpy(void *dest, const void *src, uptr n) {
return dest;
}
void *internal_memmove(void *dest, const void *src, uptr n) {
SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memmove(
void *dest, const void *src, uptr n) {
char *d = (char*)dest;
const char *s = (const char *)src;
sptr i, signed_n = (sptr)n;
@ -72,7 +79,8 @@ void *internal_memmove(void *dest, const void *src, uptr n) {
return dest;
}
void *internal_memset(void* s, int c, uptr n) {
SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memset(void *s, int c,
uptr n) {
// Optimize for the most performance-critical case:
if ((reinterpret_cast<uptr>(s) % 16) == 0 && (n % 16) == 0) {
u64 *p = reinterpret_cast<u64*>(s);
@ -95,6 +103,7 @@ void *internal_memset(void* s, int c, uptr n) {
}
return s;
}
} // extern "C"
uptr internal_strcspn(const char *s, const char *reject) {
uptr i;
@ -258,6 +267,18 @@ s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base) {
}
}
uptr internal_wcslen(const wchar_t *s) {
uptr i = 0;
while (s[i]) i++;
return i;
}
uptr internal_wcsnlen(const wchar_t *s, uptr maxlen) {
uptr i = 0;
while (i < maxlen && s[i]) i++;
return i;
}
bool mem_is_zero(const char *beg, uptr size) {
CHECK_LE(size, 1ULL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
const char *end = beg + size;

View File

@ -24,15 +24,33 @@ namespace __sanitizer {
// internal_X() is a custom implementation of X() for use in RTL.
extern "C" {
// These are used as builtin replacements; see sanitizer_redefine_builtins.h.
// In normal runtime code, use the __sanitizer::internal_X() aliases instead.
SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memcpy(void *dest,
const void *src,
uptr n);
SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memmove(
void *dest, const void *src, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memset(void *s, int c,
uptr n);
} // extern "C"
// String functions
s64 internal_atoll(const char *nptr);
void *internal_memchr(const void *s, int c, uptr n);
void *internal_memrchr(const void *s, int c, uptr n);
int internal_memcmp(const void* s1, const void* s2, uptr n);
void *internal_memcpy(void *dest, const void *src, uptr n);
void *internal_memmove(void *dest, const void *src, uptr n);
ALWAYS_INLINE void *internal_memcpy(void *dest, const void *src, uptr n) {
return __sanitizer_internal_memcpy(dest, src, n);
}
ALWAYS_INLINE void *internal_memmove(void *dest, const void *src, uptr n) {
return __sanitizer_internal_memmove(dest, src, n);
}
// Should not be used in performance-critical places.
void *internal_memset(void *s, int c, uptr n);
ALWAYS_INLINE void *internal_memset(void *s, int c, uptr n) {
return __sanitizer_internal_memset(s, c, n);
}
char* internal_strchr(const char *s, int c);
char *internal_strchrnul(const char *s, int c);
int internal_strcmp(const char *s1, const char *s2);
@ -49,7 +67,10 @@ char *internal_strrchr(const char *s, int c);
char *internal_strstr(const char *haystack, const char *needle);
// Works only for base=10 and doesn't set errno.
s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base);
int internal_snprintf(char *buffer, uptr length, const char *format, ...);
int internal_snprintf(char *buffer, uptr length, const char *format, ...)
FORMAT(3, 4);
uptr internal_wcslen(const wchar_t *s);
uptr internal_wcsnlen(const wchar_t *s, uptr maxlen);
// Return true if all bytes in [mem, mem+size) are zero.
// Optimized for the case when the result is true.

View File

@ -8,7 +8,7 @@
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE || \
SANITIZER_NETBSD
#include "sanitizer_libignore.h"
@ -22,9 +22,9 @@ LibIgnore::LibIgnore(LinkerInitialized) {
}
void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
BlockingMutexLock lock(&mutex_);
Lock lock(&mutex_);
if (count_ >= kMaxLibs) {
Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName,
Report("%s: too many ignored libraries (max: %zu)\n", SanitizerToolName,
kMaxLibs);
Die();
}
@ -36,7 +36,7 @@ void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
}
void LibIgnore::OnLibraryLoaded(const char *name) {
BlockingMutexLock lock(&mutex_);
Lock lock(&mutex_);
// Try to match suppressions with symlink target.
InternalMmapVector<char> buf(kMaxPathLength);
if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
@ -105,7 +105,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
continue;
if (IsPcInstrumented(range.beg) && IsPcInstrumented(range.end - 1))
continue;
VReport(1, "Adding instrumented range %p-%p from library '%s'\n",
VReport(1, "Adding instrumented range 0x%zx-0x%zx from library '%s'\n",
range.beg, range.end, mod.full_name());
const uptr idx =
atomic_load(&instrumented_ranges_count_, memory_order_relaxed);
@ -125,5 +125,5 @@ void LibIgnore::OnLibraryUnloaded() {
} // namespace __sanitizer
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC ||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE ||
// SANITIZER_NETBSD

View File

@ -77,7 +77,7 @@ class LibIgnore {
LibCodeRange instrumented_code_ranges_[kMaxInstrumentedRanges];
// Cold part:
BlockingMutex mutex_;
Mutex mutex_;
uptr count_;
Lib libs_[kMaxLibs];
bool track_instrumented_libs_;

View File

@ -34,7 +34,7 @@
// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To
// access stat from asm/stat.h, without conflicting with definition in
// sys/stat.h, we use this trick.
#if defined(__mips64)
#if SANITIZER_MIPS64
#include <asm/unistd.h>
#include <sys/types.h>
#define stat kernel_stat
@ -78,8 +78,13 @@
#include <sys/personality.h>
#endif
#if SANITIZER_LINUX && defined(__loongarch__)
# include <sys/sysmacros.h>
#endif
#if SANITIZER_FREEBSD
#include <sys/exec.h>
#include <sys/procctl.h>
#include <sys/sysctl.h>
#include <machine/atomic.h>
extern "C" {
@ -123,8 +128,9 @@ const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
// Are we using 32-bit or 64-bit Linux syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
// but it still needs to use 64-bit syscalls.
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
SANITIZER_WORDSIZE == 64)
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
SANITIZER_WORDSIZE == 64 || \
(defined(__mips__) && _MIPS_SIM == _ABIN32))
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
#else
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
@ -150,17 +156,51 @@ const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
namespace __sanitizer {
#if SANITIZER_LINUX && defined(__x86_64__)
#include "sanitizer_syscall_linux_x86_64.inc"
#elif SANITIZER_LINUX && SANITIZER_RISCV64
#include "sanitizer_syscall_linux_riscv64.inc"
#elif SANITIZER_LINUX && defined(__aarch64__)
#include "sanitizer_syscall_linux_aarch64.inc"
#elif SANITIZER_LINUX && defined(__arm__)
#include "sanitizer_syscall_linux_arm.inc"
#else
#include "sanitizer_syscall_generic.inc"
#endif
void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset) {
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, set, oldset));
}
void BlockSignals(__sanitizer_sigset_t *oldset) {
__sanitizer_sigset_t set;
internal_sigfillset(&set);
# if SANITIZER_LINUX && !SANITIZER_ANDROID
// Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
// on any thread, setuid call hangs.
// See test/sanitizer_common/TestCases/Linux/setuid.c.
internal_sigdelset(&set, 33);
# endif
# if SANITIZER_LINUX
// Seccomp-BPF-sandboxed processes rely on SIGSYS to handle trapped syscalls.
// If this signal is blocked, such calls cannot be handled and the process may
// hang.
internal_sigdelset(&set, 31);
# endif
SetSigProcMask(&set, oldset);
}
ScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) {
BlockSignals(&saved_);
if (copy)
internal_memcpy(copy, &saved_, sizeof(saved_));
}
ScopedBlockSignals::~ScopedBlockSignals() { SetSigProcMask(&saved_, nullptr); }
# if SANITIZER_LINUX && defined(__x86_64__)
# include "sanitizer_syscall_linux_x86_64.inc"
# elif SANITIZER_LINUX && SANITIZER_RISCV64
# include "sanitizer_syscall_linux_riscv64.inc"
# elif SANITIZER_LINUX && defined(__aarch64__)
# include "sanitizer_syscall_linux_aarch64.inc"
# elif SANITIZER_LINUX && defined(__arm__)
# include "sanitizer_syscall_linux_arm.inc"
# elif SANITIZER_LINUX && defined(__hexagon__)
# include "sanitizer_syscall_linux_hexagon.inc"
# elif SANITIZER_LINUX && SANITIZER_LOONGARCH64
# include "sanitizer_syscall_linux_loongarch64.inc"
# else
# include "sanitizer_syscall_generic.inc"
# endif
// --------------- sanitizer_libc.h
#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
@ -204,7 +244,7 @@ uptr internal_close(fd_t fd) {
}
uptr internal_open(const char *filename, int flags) {
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
# if SANITIZER_LINUX
return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);
#else
return internal_syscall(SYSCALL(open), (uptr)filename, flags);
@ -212,7 +252,7 @@ uptr internal_open(const char *filename, int flags) {
}
uptr internal_open(const char *filename, int flags, u32 mode) {
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
# if SANITIZER_LINUX
return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,
mode);
#else
@ -241,7 +281,7 @@ uptr internal_ftruncate(fd_t fd, uptr size) {
return res;
}
#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && SANITIZER_LINUX
#if (!SANITIZER_LINUX_USES_64BIT_SYSCALLS || SANITIZER_SPARC) && SANITIZER_LINUX
static void stat64_to_stat(struct stat64 *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
@ -260,7 +300,29 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) {
}
#endif
#if defined(__mips64)
#if SANITIZER_LINUX && defined(__loongarch__)
static void statx_to_stat(struct statx *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = makedev(in->stx_dev_major, in->stx_dev_minor);
out->st_ino = in->stx_ino;
out->st_mode = in->stx_mode;
out->st_nlink = in->stx_nlink;
out->st_uid = in->stx_uid;
out->st_gid = in->stx_gid;
out->st_rdev = makedev(in->stx_rdev_major, in->stx_rdev_minor);
out->st_size = in->stx_size;
out->st_blksize = in->stx_blksize;
out->st_blocks = in->stx_blocks;
out->st_atime = in->stx_atime.tv_sec;
out->st_atim.tv_nsec = in->stx_atime.tv_nsec;
out->st_mtime = in->stx_mtime.tv_sec;
out->st_mtim.tv_nsec = in->stx_mtime.tv_nsec;
out->st_ctime = in->stx_ctime.tv_sec;
out->st_ctim.tv_nsec = in->stx_ctime.tv_nsec;
}
#endif
#if SANITIZER_MIPS64
// Undefine compatibility macros from <sys/stat.h>
// so that they would not clash with the kernel_stat
// st_[a|m|c]time fields
@ -311,52 +373,65 @@ static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
#endif
uptr internal_stat(const char *path, void *buf) {
#if SANITIZER_FREEBSD
# if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0);
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
# elif SANITIZER_LINUX
# if defined(__loongarch__)
struct statx bufx;
int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path,
AT_NO_AUTOMOUNT, STATX_BASIC_STATS, (uptr)&bufx);
statx_to_stat(&bufx, (struct stat *)buf);
return res;
# elif (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \
(defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
!SANITIZER_SPARC
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
0);
#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
# if defined(__mips64)
// For mips64, stat syscall fills buffer in the format of kernel_stat
struct kernel_stat kbuf;
int res = internal_syscall(SYSCALL(stat), path, &kbuf);
kernel_stat_to_stat(&kbuf, (struct stat *)buf);
# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path,
(uptr)&buf64, 0);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
# else
return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
# endif
#else
# endif
# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(stat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
# endif
}
uptr internal_lstat(const char *path, void *buf) {
#if SANITIZER_FREEBSD
# if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf,
AT_SYMLINK_NOFOLLOW);
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
# elif SANITIZER_LINUX
# if defined(__loongarch__)
struct statx bufx;
int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path,
AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT,
STATX_BASIC_STATS, (uptr)&bufx);
statx_to_stat(&bufx, (struct stat *)buf);
return res;
# elif (defined(_LP64) || SANITIZER_X32 || \
(defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
!SANITIZER_SPARC
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
AT_SYMLINK_NOFOLLOW);
#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
# if SANITIZER_MIPS64
// For mips64, lstat syscall fills buffer in the format of kernel_stat
struct kernel_stat kbuf;
int res = internal_syscall(SYSCALL(lstat), path, &kbuf);
kernel_stat_to_stat(&kbuf, (struct stat *)buf);
# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path,
(uptr)&buf64, AT_SYMLINK_NOFOLLOW);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
# else
return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);
# endif
#else
# endif
# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
# endif
}
uptr internal_fstat(fd_t fd, void *buf) {
@ -367,9 +442,15 @@ uptr internal_fstat(fd_t fd, void *buf) {
int res = internal_syscall(SYSCALL(fstat), fd, &kbuf);
kernel_stat_to_stat(&kbuf, (struct stat *)buf);
return res;
# else
# elif SANITIZER_LINUX && defined(__loongarch__)
struct statx bufx;
int res = internal_syscall(SYSCALL(statx), fd, "", AT_EMPTY_PATH,
STATX_BASIC_STATS, (uptr)&bufx);
statx_to_stat(&bufx, (struct stat *)buf);
return res;
# else
return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
# endif
# endif
#else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
@ -390,7 +471,7 @@ uptr internal_dup(int oldfd) {
}
uptr internal_dup2(int oldfd, int newfd) {
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
# if SANITIZER_LINUX
return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);
#else
return internal_syscall(SYSCALL(dup2), oldfd, newfd);
@ -398,7 +479,7 @@ uptr internal_dup2(int oldfd, int newfd) {
}
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
# if SANITIZER_LINUX
return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf,
bufsize);
#else
@ -407,7 +488,7 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
}
uptr internal_unlink(const char *path) {
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
# if SANITIZER_LINUX
return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
#else
return internal_syscall(SYSCALL(unlink), (uptr)path);
@ -415,15 +496,15 @@ uptr internal_unlink(const char *path) {
}
uptr internal_rename(const char *oldpath, const char *newpath) {
#if defined(__riscv)
# if (defined(__riscv) || defined(__loongarch__)) && defined(__linux__)
return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
(uptr)newpath, 0);
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
# elif SANITIZER_LINUX
return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
(uptr)newpath);
#else
# else
return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);
#endif
# endif
}
uptr internal_sched_yield() {
@ -460,17 +541,20 @@ bool FileExists(const char *filename) {
if (ShouldMockFailureToOpen(filename))
return false;
struct stat st;
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))
#else
if (internal_stat(filename, &st))
#endif
return false;
// Sanity check: filename is a regular file.
return S_ISREG(st.st_mode);
}
#if !SANITIZER_NETBSD
bool DirExists(const char *path) {
struct stat st;
if (internal_stat(path, &st))
return false;
return S_ISDIR(st.st_mode);
}
# if !SANITIZER_NETBSD
tid_t GetTid() {
#if SANITIZER_FREEBSD
long Tid;
@ -659,48 +743,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
# endif
}
enum { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
void BlockingMutex::Lock() {
CHECK_EQ(owner_, 0);
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
#if SANITIZER_FREEBSD
_umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
#elif SANITIZER_NETBSD
sched_yield(); /* No userspace futex-like synchronization */
#else
internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT_PRIVATE, MtxSleeping,
0, 0, 0);
#endif
}
}
void BlockingMutex::Unlock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
CHECK_NE(v, MtxUnlocked);
if (v == MtxSleeping) {
#if SANITIZER_FREEBSD
_umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
#elif SANITIZER_NETBSD
/* No userspace futex-like synchronization */
#else
internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0);
#endif
}
}
void BlockingMutex::CheckLocked() const {
auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
}
# endif // !SANITIZER_SOLARIS
// ----------------- sanitizer_linux.h
@ -711,17 +753,17 @@ void BlockingMutex::CheckLocked() const {
// Not used
#else
struct linux_dirent {
#if SANITIZER_X32 || defined(__aarch64__) || SANITIZER_RISCV64
# if SANITIZER_X32 || SANITIZER_LINUX
u64 d_ino;
u64 d_off;
#else
# else
unsigned long d_ino;
unsigned long d_off;
#endif
# endif
unsigned short d_reclen;
#if defined(__aarch64__) || SANITIZER_RISCV64
# if SANITIZER_LINUX
unsigned char d_type;
#endif
# endif
char d_name[256];
};
#endif
@ -757,11 +799,11 @@ int internal_dlinfo(void *handle, int request, void *p) {
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
#if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL);
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
# elif SANITIZER_LINUX
return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
#else
# else
return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);
#endif
# endif
}
uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
@ -772,18 +814,29 @@ uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5);
}
#endif
# if defined(__x86_64__)
# include <asm/unistd_64.h>
// Currently internal_arch_prctl() is only needed on x86_64.
uptr internal_arch_prctl(int option, uptr arg2) {
return internal_syscall(__NR_arch_prctl, option, arg2);
}
# endif
# endif
uptr internal_sigaltstack(const void *ss, void *oss) {
return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
}
int internal_fork() {
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
# if SANITIZER_LINUX
# if SANITIZER_S390
return internal_syscall(SYSCALL(clone), 0, SIGCHLD);
# else
return internal_syscall(SYSCALL(clone), SIGCHLD, 0);
#else
# endif
# else
return internal_syscall(SYSCALL(fork));
#endif
# endif
}
#if SANITIZER_FREEBSD
@ -911,6 +964,10 @@ bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
return k_set->sig[idx] & ((uptr)1 << bit);
}
#elif SANITIZER_FREEBSD
uptr internal_procctl(int type, int id, int cmd, void *data) {
return internal_syscall(SYSCALL(procctl), type, id, cmd, data);
}
void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
sigset_t *rset = reinterpret_cast<sigset_t *>(set);
sigdelset(rset, signum);
@ -1052,7 +1109,7 @@ uptr GetMaxVirtualAddress() {
#if SANITIZER_NETBSD && defined(__x86_64__)
return 0x7f7ffffff000ULL; // (0x00007f8000000000 - PAGE_SIZE)
#elif SANITIZER_WORDSIZE == 64
# if defined(__powerpc64__) || defined(__aarch64__)
# if defined(__powerpc64__) || defined(__aarch64__) || defined(__loongarch__)
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
// We somehow need to figure out which one we are using now and choose
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
@ -1060,10 +1117,11 @@ uptr GetMaxVirtualAddress() {
// of the address space, so simply checking the stack address is not enough.
// This should (does) work for both PowerPC64 Endian modes.
// Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
// loongarch64 also has multiple address space layouts: default is 47-bit.
return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
#elif SANITIZER_RISCV64
return (1ULL << 38) - 1;
# elif defined(__mips64)
# elif SANITIZER_MIPS64
return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
# elif defined(__s390x__)
return (1ULL << 53) - 1; // 0x001fffffffffffffUL;
@ -1217,7 +1275,8 @@ void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
}
#endif
#if defined(__x86_64__) && SANITIZER_LINUX
#if SANITIZER_LINUX
#if defined(__x86_64__)
// We cannot use glibc's clone wrapper, because it messes with the child
// task's TLS. It writes the PID and TID of the child task to its thread
// descriptor, but in our case the child task shares the thread descriptor with
@ -1399,7 +1458,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
#elif defined(__aarch64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
long long res;
register long long res __asm__("x0");
if (!fn || !child_stack)
return -EINVAL;
CHECK_EQ(0, (uptr)child_stack % 16);
@ -1447,6 +1506,47 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "x30", "memory");
return res;
}
#elif SANITIZER_LOONGARCH64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
if (!fn || !child_stack)
return -EINVAL;
CHECK_EQ(0, (uptr)child_stack % 16);
register int res __asm__("$a0");
register int __flags __asm__("$a0") = flags;
register void *__stack __asm__("$a1") = child_stack;
register int *__ptid __asm__("$a2") = parent_tidptr;
register int *__ctid __asm__("$a3") = child_tidptr;
register void *__tls __asm__("$a4") = newtls;
register int (*__fn)(void *) __asm__("$a5") = fn;
register void *__arg __asm__("$a6") = arg;
register int nr_clone __asm__("$a7") = __NR_clone;
__asm__ __volatile__(
"syscall 0\n"
// if ($a0 != 0)
// return $a0;
"bnez $a0, 1f\n"
// In the child, now. Call "fn(arg)".
"move $a0, $a6\n"
"jirl $ra, $a5, 0\n"
// Call _exit($a0).
"addi.d $a7, $zero, %9\n"
"syscall 0\n"
"1:\n"
: "=r"(res)
: "0"(__flags), "r"(__stack), "r"(__ptid), "r"(__ctid), "r"(__tls),
"r"(__fn), "r"(__arg), "r"(nr_clone), "i"(__NR_exit)
: "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8");
return res;
}
#elif defined(__powerpc64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
@ -1556,7 +1656,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29");
return res;
}
#elif defined(__i386__) && SANITIZER_LINUX
#elif defined(__i386__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
int res;
@ -1621,7 +1721,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "memory");
return res;
}
#elif defined(__arm__) && SANITIZER_LINUX
#elif defined(__arm__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
unsigned int res;
@ -1687,7 +1787,8 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "memory");
return res;
}
#endif // defined(__x86_64__) && SANITIZER_LINUX
#endif
#endif // SANITIZER_LINUX
#if SANITIZER_LINUX
int internal_uname(struct utsname *buf) {
@ -1778,23 +1879,18 @@ HandleSignalMode GetHandleSignalMode(int signum) {
#if !SANITIZER_GO
void *internal_start_thread(void *(*func)(void *arg), void *arg) {
if (&real_pthread_create == 0)
return nullptr;
// Start the thread with signals blocked, otherwise it can steal user signals.
__sanitizer_sigset_t set, old;
internal_sigfillset(&set);
#if SANITIZER_LINUX && !SANITIZER_ANDROID
// Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
// on any thread, setuid call hangs (see test/tsan/setuid.c).
internal_sigdelset(&set, 33);
#endif
internal_sigprocmask(SIG_SETMASK, &set, &old);
ScopedBlockSignals block(nullptr);
void *th;
real_pthread_create(&th, nullptr, func, arg);
internal_sigprocmask(SIG_SETMASK, &old, nullptr);
return th;
}
void internal_join_thread(void *th) {
real_pthread_join(th, nullptr);
if (&real_pthread_join)
real_pthread_join(th, nullptr);
}
#else
void *internal_start_thread(void *(*func)(void *), void *arg) { return 0; }
@ -1802,7 +1898,7 @@ void *internal_start_thread(void *(*func)(void *), void *arg) { return 0; }
void internal_join_thread(void *th) {}
#endif
#if defined(__aarch64__)
#if SANITIZER_LINUX && defined(__aarch64__)
// Android headers in the older NDK releases miss this definition.
struct __sanitizer_esr_context {
struct _aarch64_ctx head;
@ -1811,7 +1907,7 @@ struct __sanitizer_esr_context {
static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
static const u32 kEsrMagic = 0x45535201;
u8 *aux = ucontext->uc_mcontext.__reserved;
u8 *aux = reinterpret_cast<u8 *>(ucontext->uc_mcontext.__reserved);
while (true) {
_aarch64_ctx *ctx = (_aarch64_ctx *)aux;
if (ctx->size == 0) break;
@ -1823,6 +1919,11 @@ static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
}
return false;
}
#elif SANITIZER_FREEBSD && defined(__aarch64__)
// FreeBSD doesn't provide ESR in the ucontext.
static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
return false;
}
#endif
using Context = ucontext_t;
@ -1841,7 +1942,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#else
uptr err = ucontext->uc_mcontext.gregs[REG_ERR];
#endif // SANITIZER_FREEBSD
return err & PF_WRITE ? WRITE : READ;
return err & PF_WRITE ? Write : Read;
#elif defined(__mips__)
uint32_t *exception_source;
uint32_t faulty_instruction;
@ -1864,7 +1965,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0x2a: // swl
case 0x2e: // swr
#endif
return SignalContext::WRITE;
return SignalContext::Write;
case 0x20: // lb
case 0x24: // lbu
@ -1879,27 +1980,34 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0x22: // lwl
case 0x26: // lwr
#endif
return SignalContext::READ;
return SignalContext::Read;
#if __mips_isa_rev == 6
case 0x3b: // pcrel
op_code = (faulty_instruction >> 19) & 0x3;
switch (op_code) {
case 0x1: // lwpc
case 0x2: // lwupc
return SignalContext::READ;
return SignalContext::Read;
}
#endif
}
return SignalContext::UNKNOWN;
return SignalContext::Unknown;
#elif defined(__arm__)
static const uptr FSR_WRITE = 1U << 11;
uptr fsr = ucontext->uc_mcontext.error_code;
return fsr & FSR_WRITE ? WRITE : READ;
return fsr & FSR_WRITE ? Write : Read;
#elif defined(__aarch64__)
static const u64 ESR_ELx_WNR = 1U << 6;
u64 esr;
if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN;
return esr & ESR_ELx_WNR ? WRITE : READ;
if (!Aarch64GetESR(ucontext, &esr)) return Unknown;
return esr & ESR_ELx_WNR ? Write : Read;
#elif defined(__loongarch__)
u32 flags = ucontext->uc_mcontext.__flags;
if (flags & SC_ADDRERR_RD)
return SignalContext::Read;
if (flags & SC_ADDRERR_WR)
return SignalContext::Write;
return SignalContext::Unknown;
#elif defined(__sparc__)
// Decode the instruction to determine the access type.
// From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype).
@ -1915,9 +2023,13 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#endif
#endif
u32 instr = *(u32 *)pc;
return (instr >> 21) & 1 ? WRITE: READ;
return (instr >> 21) & 1 ? Write: Read;
#elif defined(__riscv)
#if SANITIZER_FREEBSD
unsigned long pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;
#else
unsigned long pc = ucontext->uc_mcontext.__gregs[REG_PC];
#endif
unsigned faulty_instruction = *(uint16_t *)pc;
#if defined(__riscv_compressed)
@ -1931,7 +2043,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#if __riscv_xlen == 64
case 0b10'011: // c.ldsp (rd != x0)
#endif
return rd ? SignalContext::READ : SignalContext::UNKNOWN;
return rd ? SignalContext::Read : SignalContext::Unknown;
case 0b00'010: // c.lw
#if __riscv_flen >= 32 && __riscv_xlen == 32
case 0b10'011: // c.flwsp
@ -1943,7 +2055,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0b00'001: // c.fld
case 0b10'001: // c.fldsp
#endif
return SignalContext::READ;
return SignalContext::Read;
case 0b00'110: // c.sw
case 0b10'110: // c.swsp
#if __riscv_flen >= 32 || __riscv_xlen == 64
@ -1954,9 +2066,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0b00'101: // c.fsd
case 0b10'101: // c.fsdsp
#endif
return SignalContext::WRITE;
return SignalContext::Write;
default:
return SignalContext::UNKNOWN;
return SignalContext::Unknown;
}
}
#endif
@ -1974,9 +2086,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#endif
case 0b100: // lbu
case 0b101: // lhu
return SignalContext::READ;
return SignalContext::Read;
default:
return SignalContext::UNKNOWN;
return SignalContext::Unknown;
}
case 0b0100011: // stores
switch (funct3) {
@ -1986,9 +2098,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#if __riscv_xlen == 64
case 0b011: // sd
#endif
return SignalContext::WRITE;
return SignalContext::Write;
default:
return SignalContext::UNKNOWN;
return SignalContext::Unknown;
}
#if __riscv_flen >= 32
case 0b0000111: // floating-point loads
@ -1997,9 +2109,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#if __riscv_flen == 64
case 0b011: // fld
#endif
return SignalContext::READ;
return SignalContext::Read;
default:
return SignalContext::UNKNOWN;
return SignalContext::Unknown;
}
case 0b0100111: // floating-point stores
switch (funct3) {
@ -2007,17 +2119,17 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#if __riscv_flen == 64
case 0b011: // fsd
#endif
return SignalContext::WRITE;
return SignalContext::Write;
default:
return SignalContext::UNKNOWN;
return SignalContext::Unknown;
}
#endif
default:
return SignalContext::UNKNOWN;
return SignalContext::Unknown;
}
#else
(void)ucontext;
return UNKNOWN; // FIXME: Implement.
return Unknown; // FIXME: Implement.
#endif
}
@ -2044,10 +2156,17 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*bp = ucontext->uc_mcontext.arm_fp;
*sp = ucontext->uc_mcontext.arm_sp;
#elif defined(__aarch64__)
# if SANITIZER_FREEBSD
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.mc_gpregs.gp_elr;
*bp = ucontext->uc_mcontext.mc_gpregs.gp_x[29];
*sp = ucontext->uc_mcontext.mc_gpregs.gp_sp;
# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.pc;
*bp = ucontext->uc_mcontext.regs[29];
*sp = ucontext->uc_mcontext.sp;
# endif
#elif defined(__hppa__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.sc_iaoq[0];
@ -2092,12 +2211,19 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*sp = ucontext->uc_mcontext.gregs[REG_UESP];
# endif
#elif defined(__powerpc__) || defined(__powerpc64__)
# if SANITIZER_FREEBSD
ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.mc_srr0;
*sp = ucontext->uc_mcontext.mc_frame[1];
*bp = ucontext->uc_mcontext.mc_frame[31];
# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.regs->nip;
*sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
// The powerpc{,64}-linux ABIs do not specify r31 as the frame
// pointer, but GCC always uses r31 when we need a frame pointer.
*bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
# endif
#elif defined(__sparc__)
#if defined(__arch64__) || defined(__sparcv9)
#define STACK_BIAS 2047
@ -2136,12 +2262,28 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*sp = ucontext->uc_mcontext.gregs[15];
#elif defined(__riscv)
ucontext_t *ucontext = (ucontext_t*)context;
# if SANITIZER_FREEBSD
*pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;
*bp = ucontext->uc_mcontext.mc_gpregs.gp_s[0];
*sp = ucontext->uc_mcontext.mc_gpregs.gp_sp;
# else
*pc = ucontext->uc_mcontext.__gregs[REG_PC];
*bp = ucontext->uc_mcontext.__gregs[REG_S0];
*sp = ucontext->uc_mcontext.__gregs[REG_SP];
#else
# error "Unsupported arch"
#endif
# endif
# elif defined(__hexagon__)
ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.pc;
*bp = ucontext->uc_mcontext.r30;
*sp = ucontext->uc_mcontext.r29;
# elif defined(__loongarch__)
ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.__pc;
*bp = ucontext->uc_mcontext.__gregs[22];
*sp = ucontext->uc_mcontext.__gregs[3];
# else
# error "Unsupported arch"
# endif
}
void SignalContext::InitPcSpBp() { GetPcSpBp(context, &pc, &sp, &bp); }
@ -2150,10 +2292,6 @@ void InitializePlatformEarly() {
// Do nothing.
}
void MaybeReexec() {
// No need to re-exec on Linux.
}
void CheckASLR() {
#if SANITIZER_NETBSD
int mib[3];
@ -2175,49 +2313,35 @@ void CheckASLR() {
GetArgv()[0]);
Die();
}
#elif SANITIZER_PPC64V2
// Disable ASLR for Linux PPC64LE.
int old_personality = personality(0xffffffff);
if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
VReport(1, "WARNING: Program is being run with address space layout "
"randomization (ASLR) enabled which prevents the thread and "
"memory sanitizers from working on powerpc64le.\n"
"ASLR will be disabled and the program re-executed.\n");
CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
ReExec();
}
#elif SANITIZER_FREEBSD
int aslr_pie;
uptr len = sizeof(aslr_pie);
#if SANITIZER_WORDSIZE == 64
if (UNLIKELY(internal_sysctlbyname("kern.elf64.aslr.pie_enable",
&aslr_pie, &len, NULL, 0) == -1)) {
int aslr_status;
int r = internal_procctl(P_PID, 0, PROC_ASLR_STATUS, &aslr_status);
if (UNLIKELY(r == -1)) {
// We're making things less 'dramatic' here since
// the OID is not necessarily guaranteed to be here
// the cmd is not necessarily guaranteed to be here
// just yet regarding FreeBSD release
return;
}
if (aslr_pie > 0) {
if ((aslr_status & PROC_ASLR_ACTIVE) != 0) {
Printf("This sanitizer is not compatible with enabled ASLR "
"and binaries compiled with PIE\n");
Die();
}
#endif
// there might be 32 bits compat for 64 bits
if (UNLIKELY(internal_sysctlbyname("kern.elf32.aslr.pie_enable",
&aslr_pie, &len, NULL, 0) == -1)) {
return;
# elif SANITIZER_PPC64V2
// Disable ASLR for Linux PPC64LE.
int old_personality = personality(0xffffffff);
if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
VReport(1,
"WARNING: Program is being run with address space layout "
"randomization (ASLR) enabled which prevents the thread and "
"memory sanitizers from working on powerpc64le.\n"
"ASLR will be disabled and the program re-executed.\n");
CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
ReExec();
}
if (aslr_pie > 0) {
Printf("This sanitizer is not compatible with enabled ASLR "
"and binaries compiled with PIE\n");
Die();
}
#else
# else
// Do nothing
#endif
# endif
}
void CheckMPROTECT() {

View File

@ -49,26 +49,44 @@ uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
uptr internal_sigaltstack(const void* ss, void* oss);
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
#if SANITIZER_GLIBC
void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset);
void BlockSignals(__sanitizer_sigset_t *oldset = nullptr);
struct ScopedBlockSignals {
explicit ScopedBlockSignals(__sanitizer_sigset_t *copy);
~ScopedBlockSignals();
ScopedBlockSignals &operator=(const ScopedBlockSignals &) = delete;
ScopedBlockSignals(const ScopedBlockSignals &) = delete;
private:
__sanitizer_sigset_t saved_;
};
# if SANITIZER_GLIBC
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);
#endif
// Linux-only syscalls.
#if SANITIZER_LINUX
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
# if defined(__x86_64__)
uptr internal_arch_prctl(int option, uptr arg2);
# endif
// Used only by sanitizer_stoptheworld. Signal handlers that are actually used
// (like the process-wide error reporting SEGV handler) must use
// internal_sigaction instead.
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
defined(__arm__) || SANITIZER_RISCV64
# if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
int internal_uname(struct utsname *buf);
#elif SANITIZER_FREEBSD
uptr internal_procctl(int type, int id, int cmd, void *data);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
#elif SANITIZER_NETBSD
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
@ -135,6 +153,9 @@ inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
"rdhwr %0,$29\n" \
".set pop\n" : "=r"(__v)); \
__v; })
#elif defined (__riscv)
# define __get_tls() \
({ void** __v; __asm__("mv %0, tp" : "=r"(__v)); __v; })
#elif defined(__i386__)
# define __get_tls() \
({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; })

View File

@ -27,6 +27,7 @@
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_solaris.h"
#if SANITIZER_NETBSD
#define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
@ -62,6 +63,7 @@
#endif
#if SANITIZER_SOLARIS
#include <stddef.h>
#include <stdlib.h>
#include <thread.h>
#endif
@ -146,7 +148,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
pthread_attr_t attr;
pthread_attr_init(&attr);
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
internal_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
#endif // SANITIZER_SOLARIS
@ -203,7 +205,8 @@ void InitTlsSize() {
g_use_dlpi_tls_data =
GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__)
#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__) || \
defined(__loongarch__)
void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
size_t tls_align;
((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
@ -216,14 +219,13 @@ void InitTlsSize() { }
// On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage
// of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan
// to get the pointer to thread-specific data keys in the thread control block.
#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !SANITIZER_ANDROID
#if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \
!SANITIZER_ANDROID && !SANITIZER_GO
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t thread_descriptor_size;
uptr ThreadDescriptorSize() {
uptr val = atomic_load_relaxed(&thread_descriptor_size);
if (val)
return val;
static uptr ThreadDescriptorSizeFallback() {
uptr val = 0;
#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
int major;
int minor;
@ -264,6 +266,8 @@ uptr ThreadDescriptorSize() {
#elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776);
#elif SANITIZER_LOONGARCH64
val = 1856; // from glibc 2.36
#elif SANITIZER_RISCV64
int major;
int minor;
@ -285,12 +289,26 @@ uptr ThreadDescriptorSize() {
#elif defined(__powerpc64__)
val = 1776; // from glibc.ppc64le 2.20-8.fc21
#endif
if (val)
atomic_store_relaxed(&thread_descriptor_size, val);
return val;
}
#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
uptr ThreadDescriptorSize() {
uptr val = atomic_load_relaxed(&thread_descriptor_size);
if (val)
return val;
// _thread_db_sizeof_pthread is a GLIBC_PRIVATE symbol that is exported in
// glibc 2.34 and later.
if (unsigned *psizeof = static_cast<unsigned *>(
dlsym(RTLD_DEFAULT, "_thread_db_sizeof_pthread")))
val = *psizeof;
if (!val)
val = ThreadDescriptorSizeFallback();
atomic_store_relaxed(&thread_descriptor_size, val);
return val;
}
#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \
SANITIZER_LOONGARCH64
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks.
static uptr TlsPreTcbSize() {
@ -300,6 +318,8 @@ static uptr TlsPreTcbSize() {
const uptr kTcbHead = 88; // sizeof (tcbhead_t)
#elif SANITIZER_RISCV64
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
#elif SANITIZER_LOONGARCH64
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
#endif
const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize =
@ -308,7 +328,6 @@ static uptr TlsPreTcbSize() {
}
#endif
#if !SANITIZER_GO
namespace {
struct TlsBlock {
uptr begin, end, align;
@ -339,19 +358,43 @@ static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {
extern "C" void *__tls_get_addr(size_t *);
#endif
static size_t main_tls_modid;
static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
void *data) {
if (!info->dlpi_tls_modid)
size_t tls_modid;
#if SANITIZER_SOLARIS
// dlpi_tls_modid is only available since Solaris 11.4 SRU 10. Use
// dlinfo(RTLD_DI_LINKMAP) instead which works on all of Solaris 11.3,
// 11.4, and Illumos. The tlsmodid of the executable was changed to 1 in
// 11.4 to match other implementations.
if (size >= offsetof(dl_phdr_info_test, dlpi_tls_modid))
main_tls_modid = 1;
else
main_tls_modid = 0;
g_use_dlpi_tls_data = 0;
Rt_map *map;
dlinfo(RTLD_SELF, RTLD_DI_LINKMAP, &map);
tls_modid = map->rt_tlsmodid;
#else
main_tls_modid = 1;
tls_modid = info->dlpi_tls_modid;
#endif
if (tls_modid < main_tls_modid)
return 0;
uptr begin = (uptr)info->dlpi_tls_data;
uptr begin;
#if !SANITIZER_SOLARIS
begin = (uptr)info->dlpi_tls_data;
#endif
if (!g_use_dlpi_tls_data) {
// Call __tls_get_addr as a fallback. This forces TLS allocation on glibc
// and FreeBSD.
#ifdef __s390__
begin = (uptr)__builtin_thread_pointer() +
TlsGetOffset(info->dlpi_tls_modid, 0);
TlsGetOffset(tls_modid, 0);
#else
size_t mod_and_off[2] = {info->dlpi_tls_modid, 0};
size_t mod_and_off[2] = {tls_modid, 0};
begin = (uptr)__tls_get_addr(mod_and_off);
#endif
}
@ -359,7 +402,7 @@ static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
if (info->dlpi_phdr[i].p_type == PT_TLS) {
static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back(
TlsBlock{begin, begin + info->dlpi_phdr[i].p_memsz,
info->dlpi_phdr[i].p_align, info->dlpi_tls_modid});
info->dlpi_phdr[i].p_align, tls_modid});
break;
}
return 0;
@ -371,11 +414,11 @@ __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
dl_iterate_phdr(CollectStaticTlsBlocks, &ranges);
uptr len = ranges.size();
Sort(ranges.begin(), len);
// Find the range with tls_modid=1. For glibc, because libc.so uses PT_TLS,
// this module is guaranteed to exist and is one of the initially loaded
// modules.
// Find the range with tls_modid == main_tls_modid. For glibc, because
// libc.so uses PT_TLS, this module is guaranteed to exist and is one of
// the initially loaded modules.
uptr one = 0;
while (one != len && ranges[one].tls_modid != 1) ++one;
while (one != len && ranges[one].tls_modid != main_tls_modid) ++one;
if (one == len) {
// This may happen with musl if no module uses PT_TLS.
*addr = 0;
@ -384,21 +427,20 @@ __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
return;
}
// Find the maximum consecutive ranges. We consider two modules consecutive if
// the gap is smaller than the alignment. The dynamic loader places static TLS
// blocks this way not to waste space.
// the gap is smaller than the alignment of the latter range. The dynamic
// loader places static TLS blocks this way not to waste space.
uptr l = one;
*align = ranges[l].align;
while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l - 1].align)
while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l].align)
*align = Max(*align, ranges[--l].align);
uptr r = one + 1;
while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r - 1].align)
while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r].align)
*align = Max(*align, ranges[r++].align);
*addr = ranges[l].begin;
*size = ranges[r - 1].end - ranges[l].begin;
}
#endif // !SANITIZER_GO
#endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
// SANITIZER_LINUX) && !SANITIZER_ANDROID
// SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO
#if SANITIZER_NETBSD
static struct tls_tcb * ThreadSelfTlsTcb() {
@ -452,7 +494,11 @@ static void GetTls(uptr *addr, uptr *size) {
#elif SANITIZER_GLIBC && defined(__x86_64__)
// For aarch64 and x86-64, use an O(1) approach which requires relatively
// precise ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
# if SANITIZER_X32
asm("mov %%fs:8,%0" : "=r"(*addr));
# else
asm("mov %%fs:16,%0" : "=r"(*addr));
# endif
*size = g_tls_size;
*addr -= *size;
*addr += ThreadDescriptorSize();
@ -460,6 +506,15 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
*size = g_tls_size + ThreadDescriptorSize();
#elif SANITIZER_GLIBC && defined(__loongarch__)
# ifdef __clang__
*addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
# else
asm("or %0,$tp,$zero" : "=r"(*addr));
*addr -= ThreadDescriptorSize();
# endif
*size = g_tls_size + ThreadDescriptorSize();
#elif SANITIZER_GLIBC && defined(__powerpc64__)
// Workaround for glibc<2.25(?). 2.27 is known to not need this.
uptr tp;
@ -467,7 +522,7 @@ static void GetTls(uptr *addr, uptr *size) {
const uptr pre_tcb_size = TlsPreTcbSize();
*addr = tp - pre_tcb_size;
*size = g_tls_size + pre_tcb_size;
#elif SANITIZER_FREEBSD || SANITIZER_LINUX
#elif SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS
uptr align;
GetStaticTlsBoundary(addr, size, &align);
#if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
@ -528,10 +583,6 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = (uptr)tcb->tcb_dtv[1];
}
}
#elif SANITIZER_SOLARIS
// FIXME
*addr = 0;
*size = 0;
#else
#error "Unknown OS"
#endif
@ -603,6 +654,34 @@ static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
bool writable = phdr->p_flags & PF_W;
cur_module.addAddressRange(cur_beg, cur_end, executable,
writable);
} else if (phdr->p_type == PT_NOTE) {
# ifdef NT_GNU_BUILD_ID
uptr off = 0;
while (off + sizeof(ElfW(Nhdr)) < phdr->p_memsz) {
auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(info->dlpi_addr +
phdr->p_vaddr + off);
constexpr auto kGnuNamesz = 4; // "GNU" with NUL-byte.
static_assert(kGnuNamesz % 4 == 0, "kGnuNameSize is aligned to 4.");
if (nhdr->n_type == NT_GNU_BUILD_ID && nhdr->n_namesz == kGnuNamesz) {
if (off + sizeof(ElfW(Nhdr)) + nhdr->n_namesz + nhdr->n_descsz >
phdr->p_memsz) {
// Something is very wrong, bail out instead of reading potentially
// arbitrary memory.
break;
}
const char *name =
reinterpret_cast<const char *>(nhdr) + sizeof(*nhdr);
if (internal_memcmp(name, "GNU", 3) == 0) {
const char *value = reinterpret_cast<const char *>(nhdr) +
sizeof(*nhdr) + kGnuNamesz;
cur_module.setUuid(value, nhdr->n_descsz);
break;
}
}
off += sizeof(*nhdr) + RoundUpTo(nhdr->n_namesz, 4) +
RoundUpTo(nhdr->n_descsz, 4);
}
# endif
}
}
modules->push_back(cur_module);

View File

@ -57,8 +57,10 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
if (!fn || !child_stack)
return -EINVAL;
if (!fn || !child_stack) {
errno = EINVAL;
return -1;
}
CHECK_EQ(0, (uptr)child_stack % 16);
// Minimum frame size.
#ifdef __s390x__
@ -71,9 +73,9 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
// And pass parameters.
((unsigned long *)child_stack)[1] = (uptr)fn;
((unsigned long *)child_stack)[2] = (uptr)arg;
register long res __asm__("r2");
register uptr res __asm__("r2");
register void *__cstack __asm__("r2") = child_stack;
register int __flags __asm__("r3") = flags;
register long __flags __asm__("r3") = flags;
register int * __ptidptr __asm__("r4") = parent_tidptr;
register int * __ctidptr __asm__("r5") = child_tidptr;
register void * __newtls __asm__("r6") = newtls;
@ -113,6 +115,10 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
"r"(__ctidptr),
"r"(__newtls)
: "memory", "cc");
if (res >= (uptr)-4095) {
errno = -res;
return -1;
}
return res;
}

View File

@ -17,7 +17,7 @@
// instantiated with the `LocalAddressSpaceView` type. This type is used to
// load any pointers in instance methods. This implementation is effectively
// a no-op. When an object is to be used in an out-of-process manner it is
// instansiated with the `RemoteAddressSpaceView` type.
// instantiated with the `RemoteAddressSpaceView` type.
//
// By making `AddressSpaceView` a template parameter of an object, it can
// change its implementation at compile time which has no run time overhead.

View File

@ -0,0 +1,159 @@
//===-- sanitizer_lzw.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// LempelZivWelch encoding/decoding
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_LZW_H
#define SANITIZER_LZW_H
#include "sanitizer_dense_map.h"
namespace __sanitizer {
using LzwCodeType = u32;
template <class T, class ItIn, class ItOut>
ItOut LzwEncode(ItIn begin, ItIn end, ItOut out) {
using Substring =
detail::DenseMapPair<LzwCodeType /* Prefix */, T /* Next input */>;
// Sentinel value for substrings of len 1.
static constexpr LzwCodeType kNoPrefix =
Min(DenseMapInfo<Substring>::getEmptyKey().first,
DenseMapInfo<Substring>::getTombstoneKey().first) -
1;
DenseMap<Substring, LzwCodeType> prefix_to_code;
{
// Add all substring of len 1 as initial dictionary.
InternalMmapVector<T> dict_len1;
for (auto it = begin; it != end; ++it)
if (prefix_to_code.try_emplace({kNoPrefix, *it}, 0).second)
dict_len1.push_back(*it);
// Slightly helps with later delta encoding.
Sort(dict_len1.data(), dict_len1.size());
// For large sizeof(T) we have to store dict_len1. Smaller types like u8 can
// just generate them.
*out = dict_len1.size();
++out;
for (uptr i = 0; i != dict_len1.size(); ++i) {
// Remap after the Sort.
prefix_to_code[{kNoPrefix, dict_len1[i]}] = i;
*out = dict_len1[i];
++out;
}
CHECK_EQ(prefix_to_code.size(), dict_len1.size());
}
if (begin == end)
return out;
// Main LZW encoding loop.
LzwCodeType match = prefix_to_code.find({kNoPrefix, *begin})->second;
++begin;
for (auto it = begin; it != end; ++it) {
// Extend match with the new item.
auto ins = prefix_to_code.try_emplace({match, *it}, prefix_to_code.size());
if (ins.second) {
// This is a new substring, but emit the code for the current match
// (before extend). This allows LZW decoder to recover the dictionary.
*out = match;
++out;
// Reset the match to a single item, which must be already in the map.
match = prefix_to_code.find({kNoPrefix, *it})->second;
} else {
// Already known, use as the current match.
match = ins.first->second;
}
}
*out = match;
++out;
return out;
}
template <class T, class ItIn, class ItOut>
ItOut LzwDecode(ItIn begin, ItIn end, ItOut out) {
if (begin == end)
return out;
// Load dictionary of len 1 substrings. Theses correspont to lowest codes.
InternalMmapVector<T> dict_len1(*begin);
++begin;
if (begin == end)
return out;
for (auto& v : dict_len1) {
v = *begin;
++begin;
}
// Substrings of len 2 and up. Indexes are shifted because [0,
// dict_len1.size()) stored in dict_len1. Substings get here after being
// emitted to the output, so we can use output position.
InternalMmapVector<detail::DenseMapPair<ItOut /* begin. */, ItOut /* end */>>
code_to_substr;
// Copies already emitted substrings into the output again.
auto copy = [&code_to_substr, &dict_len1](LzwCodeType code, ItOut out) {
if (code < dict_len1.size()) {
*out = dict_len1[code];
++out;
return out;
}
const auto& s = code_to_substr[code - dict_len1.size()];
for (ItOut it = s.first; it != s.second; ++it, ++out) *out = *it;
return out;
};
// Returns lens of the substring with the given code.
auto code_to_len = [&code_to_substr, &dict_len1](LzwCodeType code) -> uptr {
if (code < dict_len1.size())
return 1;
const auto& s = code_to_substr[code - dict_len1.size()];
return s.second - s.first;
};
// Main LZW decoding loop.
LzwCodeType prev_code = *begin;
++begin;
out = copy(prev_code, out);
for (auto it = begin; it != end; ++it) {
LzwCodeType code = *it;
auto start = out;
if (code == dict_len1.size() + code_to_substr.size()) {
// Special LZW case. The code is not in the dictionary yet. This is
// possible only when the new substring is the same as previous one plus
// the first item of the previous substring. We can emit that in two
// steps.
out = copy(prev_code, out);
*out = *start;
++out;
} else {
out = copy(code, out);
}
// Every time encoded emits the code, it also creates substing of len + 1
// including the first item of the just emmited substring. Do the same here.
uptr len = code_to_len(prev_code);
code_to_substr.push_back({start - len, start + 1});
prev_code = code;
}
return out;
}
} // namespace __sanitizer
#endif

View File

@ -11,80 +11,82 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_MAC
#include "sanitizer_mac.h"
#include "interception/interception.h"
#if SANITIZER_APPLE
# include "interception/interception.h"
# include "sanitizer_mac.h"
// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
// the clients will most certainly use 64-bit ones as well.
#ifndef _DARWIN_USE_64_BIT_INODE
#define _DARWIN_USE_64_BIT_INODE 1
#endif
#include <stdio.h>
# ifndef _DARWIN_USE_64_BIT_INODE
# define _DARWIN_USE_64_BIT_INODE 1
# endif
# include <stdio.h>
#include "sanitizer_common.h"
#include "sanitizer_file.h"
#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_ptrauth.h"
# include "sanitizer_common.h"
# include "sanitizer_file.h"
# include "sanitizer_flags.h"
# include "sanitizer_interface_internal.h"
# include "sanitizer_internal_defs.h"
# include "sanitizer_libc.h"
# include "sanitizer_platform_limits_posix.h"
# include "sanitizer_procmaps.h"
# include "sanitizer_ptrauth.h"
#if !SANITIZER_IOS
#include <crt_externs.h> // for _NSGetEnviron
#else
# if !SANITIZER_IOS
# include <crt_externs.h> // for _NSGetEnviron
# else
extern char **environ;
#endif
# endif
#if defined(__has_include) && __has_include(<os/trace.h>)
#define SANITIZER_OS_TRACE 1
#include <os/trace.h>
#else
#define SANITIZER_OS_TRACE 0
#endif
# if defined(__has_include) && __has_include(<os/trace.h>)
# define SANITIZER_OS_TRACE 1
# include <os/trace.h>
# else
# define SANITIZER_OS_TRACE 0
# endif
// import new crash reporting api
#if defined(__has_include) && __has_include(<CrashReporterClient.h>)
#define HAVE_CRASHREPORTERCLIENT_H 1
#include <CrashReporterClient.h>
#else
#define HAVE_CRASHREPORTERCLIENT_H 0
#endif
# if defined(__has_include) && __has_include(<CrashReporterClient.h>)
# define HAVE_CRASHREPORTERCLIENT_H 1
# include <CrashReporterClient.h>
# else
# define HAVE_CRASHREPORTERCLIENT_H 0
# endif
#if !SANITIZER_IOS
#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
#else
# if !SANITIZER_IOS
# include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
# else
extern "C" {
extern char ***_NSGetArgv(void);
extern char ***_NSGetArgv(void);
}
#endif
# endif
#include <asl.h>
#include <dlfcn.h> // for dladdr()
#include <errno.h>
#include <fcntl.h>
#include <libkern/OSAtomic.h>
#include <mach-o/dyld.h>
#include <mach/mach.h>
#include <mach/mach_time.h>
#include <mach/vm_statistics.h>
#include <malloc/malloc.h>
#include <os/log.h>
#include <pthread.h>
#include <sched.h>
#include <signal.h>
#include <spawn.h>
#include <stdlib.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <util.h>
# include <asl.h>
# include <dlfcn.h> // for dladdr()
# include <errno.h>
# include <fcntl.h>
# include <libkern/OSAtomic.h>
# include <mach-o/dyld.h>
# include <mach/mach.h>
# include <mach/mach_time.h>
# include <mach/vm_statistics.h>
# include <malloc/malloc.h>
# include <os/log.h>
# include <pthread.h>
# include <pthread/introspection.h>
# include <sched.h>
# include <signal.h>
# include <spawn.h>
# include <stdlib.h>
# include <sys/ioctl.h>
# include <sys/mman.h>
# include <sys/resource.h>
# include <sys/stat.h>
# include <sys/sysctl.h>
# include <sys/types.h>
# include <sys/wait.h>
# include <unistd.h>
# include <util.h>
// From <crt_externs.h>, but we don't have that file on iOS.
extern "C" {
@ -265,30 +267,32 @@ int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
pid_t *pid) {
fd_t master_fd = kInvalidFd;
fd_t slave_fd = kInvalidFd;
fd_t primary_fd = kInvalidFd;
fd_t secondary_fd = kInvalidFd;
auto fd_closer = at_scope_exit([&] {
internal_close(master_fd);
internal_close(slave_fd);
internal_close(primary_fd);
internal_close(secondary_fd);
});
// We need a new pseudoterminal to avoid buffering problems. The 'atos' tool
// in particular detects when it's talking to a pipe and forgets to flush the
// output stream after sending a response.
master_fd = posix_openpt(O_RDWR);
if (master_fd == kInvalidFd) return kInvalidFd;
primary_fd = posix_openpt(O_RDWR);
if (primary_fd == kInvalidFd)
return kInvalidFd;
int res = grantpt(master_fd) || unlockpt(master_fd);
int res = grantpt(primary_fd) || unlockpt(primary_fd);
if (res != 0) return kInvalidFd;
// Use TIOCPTYGNAME instead of ptsname() to avoid threading problems.
char slave_pty_name[128];
res = ioctl(master_fd, TIOCPTYGNAME, slave_pty_name);
char secondary_pty_name[128];
res = ioctl(primary_fd, TIOCPTYGNAME, secondary_pty_name);
if (res == -1) return kInvalidFd;
slave_fd = internal_open(slave_pty_name, O_RDWR);
if (slave_fd == kInvalidFd) return kInvalidFd;
secondary_fd = internal_open(secondary_pty_name, O_RDWR);
if (secondary_fd == kInvalidFd)
return kInvalidFd;
// File descriptor actions
posix_spawn_file_actions_t acts;
@ -299,9 +303,9 @@ static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
posix_spawn_file_actions_destroy(&acts);
});
res = posix_spawn_file_actions_adddup2(&acts, slave_fd, STDIN_FILENO) ||
posix_spawn_file_actions_adddup2(&acts, slave_fd, STDOUT_FILENO) ||
posix_spawn_file_actions_addclose(&acts, slave_fd);
res = posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDIN_FILENO) ||
posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDOUT_FILENO) ||
posix_spawn_file_actions_addclose(&acts, secondary_fd);
if (res != 0) return kInvalidFd;
// Spawn attributes
@ -326,14 +330,14 @@ static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
// Disable echo in the new terminal, disable CR.
struct termios termflags;
tcgetattr(master_fd, &termflags);
tcgetattr(primary_fd, &termflags);
termflags.c_oflag &= ~ONLCR;
termflags.c_lflag &= ~ECHO;
tcsetattr(master_fd, TCSANOW, &termflags);
tcsetattr(primary_fd, TCSANOW, &termflags);
// On success, do not close master_fd on scope exit.
fd_t fd = master_fd;
master_fd = kInvalidFd;
// On success, do not close primary_fd on scope exit.
fd_t fd = primary_fd;
primary_fd = kInvalidFd;
return fd;
}
@ -390,6 +394,13 @@ bool FileExists(const char *filename) {
return S_ISREG(st.st_mode);
}
bool DirExists(const char *path) {
struct stat st;
if (stat(path, &st))
return false;
return S_ISDIR(st.st_mode);
}
tid_t GetTid() {
tid_t tid;
pthread_threadid_np(nullptr, &tid);
@ -516,25 +527,6 @@ void FutexWait(atomic_uint32_t *p, u32 cmp) {
void FutexWake(atomic_uint32_t *p, u32 count) {}
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
void BlockingMutex::Lock() {
CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
CHECK_EQ(OS_SPINLOCK_INIT, 0);
CHECK_EQ(owner_, 0);
OSSpinLockLock((OSSpinLock*)&opaque_storage_);
}
void BlockingMutex::Unlock() {
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
void BlockingMutex::CheckLocked() const {
CHECK_NE(*(const OSSpinLock*)&opaque_storage_, 0);
}
u64 NanoTime() {
timeval tv;
internal_memset(&tv, 0, sizeof(tv));
@ -562,6 +554,9 @@ uptr TlsBaseAddr() {
asm("movq %%gs:0,%0" : "=r"(segbase));
#elif defined(__i386__)
asm("movl %%gs:0,%0" : "=r"(segbase));
#elif defined(__aarch64__)
asm("mrs %x0, tpidrro_el0" : "=r"(segbase));
segbase &= 0x07ul; // clearing lower bits, cpu id stored there
#endif
return segbase;
}
@ -784,8 +779,8 @@ void *internal_start_thread(void *(*func)(void *arg), void *arg) {
void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
#if !SANITIZER_GO
static BlockingMutex syslog_lock(LINKER_INITIALIZED);
#endif
static Mutex syslog_lock;
# endif
void WriteOneLineToSyslog(const char *s) {
#if !SANITIZER_GO
@ -800,7 +795,7 @@ void WriteOneLineToSyslog(const char *s) {
// buffer to store crash report application information
static char crashreporter_info_buff[__sanitizer::kErrorMessageBufferSize] = {};
static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
static Mutex crashreporter_info_mutex;
extern "C" {
// Integrate with crash reporter libraries.
@ -830,7 +825,7 @@ asm(".desc ___crashreporter_info__, 0x10");
} // extern "C"
static void CRAppendCrashLogMessage(const char *msg) {
BlockingMutexLock l(&crashreporter_info_mutex);
Lock l(&crashreporter_info_mutex);
internal_strlcat(crashreporter_info_buff, msg,
sizeof(crashreporter_info_buff));
#if HAVE_CRASHREPORTERCLIENT_H
@ -874,7 +869,7 @@ void LogFullErrorReport(const char *buffer) {
// the reporting thread holds the thread registry mutex, and asl_log waits
// for GCD to dispatch a new thread, the process will deadlock, because the
// pthread_create wrapper needs to acquire the lock as well.
BlockingMutexLock l(&syslog_lock);
Lock l(&syslog_lock);
if (common_flags()->log_to_syslog)
WriteToSyslog(buffer);
@ -885,9 +880,12 @@ void LogFullErrorReport(const char *buffer) {
SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#if defined(__x86_64__) || defined(__i386__)
ucontext_t *ucontext = static_cast<ucontext_t*>(context);
return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? WRITE : READ;
return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? Write : Read;
#elif defined(__arm64__)
ucontext_t *ucontext = static_cast<ucontext_t*>(context);
return ucontext->uc_mcontext->__es.__esr & 0x40 /*ISS_DA_WNR*/ ? Write : Read;
#else
return UNKNOWN;
return Unknown;
#endif
}
@ -902,18 +900,14 @@ bool SignalContext::IsTrueFaultingAddress() const {
(uptr)ptrauth_strip( \
(void *)arm_thread_state64_get_##r(ucontext->uc_mcontext->__ss), 0)
#else
#define AARCH64_GET_REG(r) ucontext->uc_mcontext->__ss.__##r
#define AARCH64_GET_REG(r) (uptr)ucontext->uc_mcontext->__ss.__##r
#endif
static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
ucontext_t *ucontext = (ucontext_t*)context;
# if defined(__aarch64__)
*pc = AARCH64_GET_REG(pc);
# if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0
*bp = AARCH64_GET_REG(fp);
# else
*bp = AARCH64_GET_REG(lr);
# endif
*sp = AARCH64_GET_REG(sp);
# elif defined(__x86_64__)
*pc = ucontext->uc_mcontext->__ss.__rip;
@ -950,6 +944,9 @@ static void DisableMmapExcGuardExceptions() {
set_behavior(mach_task_self(), task_exc_guard_none);
}
static void VerifyInterceptorsWorking();
static void StripEnv();
void InitializePlatformEarly() {
// Only use xnu_fast_mmap when on x86_64 and the kernel supports it.
use_xnu_fast_mmap =
@ -960,17 +957,54 @@ void InitializePlatformEarly() {
#endif
if (GetDarwinKernelVersion() >= DarwinKernelVersion(19, 0))
DisableMmapExcGuardExceptions();
# if !SANITIZER_GO
MonotonicNanoTime(); // Call to initialize mach_timebase_info
VerifyInterceptorsWorking();
StripEnv();
# endif
}
#if !SANITIZER_GO
static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
LowLevelAllocator allocator_for_env;
static bool ShouldCheckInterceptors() {
// Restrict "interceptors working?" check to ASan and TSan.
const char *sanitizer_names[] = {"AddressSanitizer", "ThreadSanitizer"};
size_t count = sizeof(sanitizer_names) / sizeof(sanitizer_names[0]);
for (size_t i = 0; i < count; i++) {
if (internal_strcmp(sanitizer_names[i], SanitizerToolName) == 0)
return true;
}
return false;
}
static void VerifyInterceptorsWorking() {
if (!common_flags()->verify_interceptors || !ShouldCheckInterceptors())
return;
// Verify that interceptors really work. We'll use dlsym to locate
// "puts", if interceptors are working, it should really point to
// "wrap_puts" within our own dylib.
Dl_info info_puts, info_runtime;
RAW_CHECK(dladdr(dlsym(RTLD_DEFAULT, "puts"), &info_puts));
RAW_CHECK(dladdr((void *)&VerifyInterceptorsWorking, &info_runtime));
if (internal_strcmp(info_puts.dli_fname, info_runtime.dli_fname) != 0) {
Report(
"ERROR: Interceptors are not working. This may be because %s is "
"loaded too late (e.g. via dlopen). Please launch the executable "
"with:\n%s=%s\n",
SanitizerToolName, kDyldInsertLibraries, info_runtime.dli_fname);
RAW_CHECK("interceptors not installed" && 0);
}
}
// Change the value of the env var |name|, leaking the original value.
// If |name_value| is NULL, the variable is deleted from the environment,
// otherwise the corresponding "NAME=value" string is replaced with
// |name_value|.
void LeakyResetEnv(const char *name, const char *name_value) {
static void LeakyResetEnv(const char *name, const char *name_value) {
char **env = GetEnviron();
uptr name_len = internal_strlen(name);
while (*env != 0) {
@ -995,100 +1029,28 @@ void LeakyResetEnv(const char *name, const char *name_value) {
}
}
SANITIZER_WEAK_CXX_DEFAULT_IMPL
bool ReexecDisabled() {
return false;
}
static bool DyldNeedsEnvVariable() {
// If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
// DYLD_INSERT_LIBRARIES is not set.
return GetMacosAlignedVersion() < MacosVersion(10, 11);
}
void MaybeReexec() {
// FIXME: This should really live in some "InitializePlatform" method.
MonotonicNanoTime();
if (ReexecDisabled()) return;
// Make sure the dynamic runtime library is preloaded so that the
// wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
// ourselves.
Dl_info info;
RAW_CHECK(dladdr((void*)((uptr)&__sanitizer_report_error_summary), &info));
char *dyld_insert_libraries =
const_cast<char*>(GetEnv(kDyldInsertLibraries));
uptr old_env_len = dyld_insert_libraries ?
internal_strlen(dyld_insert_libraries) : 0;
uptr fname_len = internal_strlen(info.dli_fname);
const char *dylib_name = StripModuleName(info.dli_fname);
uptr dylib_name_len = internal_strlen(dylib_name);
bool lib_is_in_env = dyld_insert_libraries &&
internal_strstr(dyld_insert_libraries, dylib_name);
if (DyldNeedsEnvVariable() && !lib_is_in_env) {
// DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
// library.
InternalMmapVector<char> program_name(1024);
uint32_t buf_size = program_name.size();
_NSGetExecutablePath(program_name.data(), &buf_size);
char *new_env = const_cast<char*>(info.dli_fname);
if (dyld_insert_libraries) {
// Append the runtime dylib name to the existing value of
// DYLD_INSERT_LIBRARIES.
new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2);
internal_strncpy(new_env, dyld_insert_libraries, old_env_len);
new_env[old_env_len] = ':';
// Copy fname_len and add a trailing zero.
internal_strncpy(new_env + old_env_len + 1, info.dli_fname,
fname_len + 1);
// Ok to use setenv() since the wrappers don't depend on the value of
// asan_inited.
setenv(kDyldInsertLibraries, new_env, /*overwrite*/1);
} else {
// Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
}
VReport(1, "exec()-ing the program with\n");
VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
VReport(1, "to enable wrappers.\n");
execv(program_name.data(), *_NSGetArgv());
// We get here only if execv() failed.
Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
"which is required for the sanitizer to work. We tried to set the "
"environment variable and re-execute itself, but execv() failed, "
"possibly because of sandbox restrictions. Make sure to launch the "
"executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
RAW_CHECK("execv failed" && 0);
}
// Verify that interceptors really work. We'll use dlsym to locate
// "pthread_create", if interceptors are working, it should really point to
// "wrap_pthread_create" within our own dylib.
Dl_info info_pthread_create;
void *dlopen_addr = dlsym(RTLD_DEFAULT, "pthread_create");
RAW_CHECK(dladdr(dlopen_addr, &info_pthread_create));
if (internal_strcmp(info.dli_fname, info_pthread_create.dli_fname) != 0) {
Report(
"ERROR: Interceptors are not working. This may be because %s is "
"loaded too late (e.g. via dlopen). Please launch the executable "
"with:\n%s=%s\n",
SanitizerToolName, kDyldInsertLibraries, info.dli_fname);
RAW_CHECK("interceptors not installed" && 0);
}
if (!lib_is_in_env)
static void StripEnv() {
if (!common_flags()->strip_env)
return;
if (!common_flags()->strip_env)
char *dyld_insert_libraries =
const_cast<char *>(GetEnv(kDyldInsertLibraries));
if (!dyld_insert_libraries)
return;
Dl_info info;
RAW_CHECK(dladdr((void *)&StripEnv, &info));
const char *dylib_name = StripModuleName(info.dli_fname);
bool lib_is_in_env = internal_strstr(dyld_insert_libraries, dylib_name);
if (!lib_is_in_env)
return;
// DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
// the dylib from the environment variable, because interceptors are installed
// and we don't want our children to inherit the variable.
uptr old_env_len = internal_strlen(dyld_insert_libraries);
uptr dylib_name_len = internal_strlen(dylib_name);
uptr env_name_len = internal_strlen(kDyldInsertLibraries);
// Allocate memory to hold the previous env var name, its value, the '='
// sign and the '\0' char.
@ -1237,7 +1199,7 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr largest_gap_found = 0;
uptr max_occupied_addr = 0;
VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
uptr shadow_start =
FindAvailableMemoryRange(space_size, alignment, granularity,
&largest_gap_found, &max_occupied_addr);
@ -1246,20 +1208,21 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
VReport(
2,
"Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
largest_gap_found, max_occupied_addr);
(void *)largest_gap_found, (void *)max_occupied_addr);
uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment);
if (new_max_vm < max_occupied_addr) {
Report("Unable to find a memory range for dynamic shadow.\n");
Report(
"space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
"new_max_vm = %p\n",
space_size, largest_gap_found, max_occupied_addr, new_max_vm);
(void *)space_size, (void *)largest_gap_found,
(void *)max_occupied_addr, (void *)new_max_vm);
CHECK(0 && "cannot place shadow");
}
RestrictMemoryToMaxAddress(new_max_vm);
high_mem_end = new_max_vm - 1;
space_size = (high_mem_end >> shadow_scale) + left_padding;
VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
nullptr, nullptr);
if (shadow_start == 0) {
@ -1288,6 +1251,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
mach_vm_address_t start_address =
(SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;
const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1;
mach_vm_address_t address = start_address;
mach_vm_address_t free_begin = start_address;
kern_return_t kr = KERN_SUCCESS;
@ -1302,7 +1266,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
(vm_region_info_t)&vminfo, &count);
if (kr == KERN_INVALID_ADDRESS) {
// No more regions beyond "address", consider the gap at the end of VM.
address = GetMaxVirtualAddress() + 1;
address = max_vm_address;
vmsize = 0;
} else {
if (max_occupied_addr) *max_occupied_addr = address + vmsize;
@ -1310,7 +1274,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
if (free_begin != address) {
// We found a free region [free_begin..address-1].
uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);
uptr gap_end = RoundDownTo((uptr)address, alignment);
uptr gap_end = RoundDownTo((uptr)Min(address, max_vm_address), alignment);
uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0;
if (size < gap_size) {
return gap_start;
@ -1330,7 +1294,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
}
// FIXME implement on this platform.
void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
void SignalContext::DumpAllRegisters(void *context) {
Report("Register values:\n");
@ -1339,7 +1303,7 @@ void SignalContext::DumpAllRegisters(void *context) {
# define DUMPREG64(r) \
Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r);
# define DUMPREGA64(r) \
Printf(" %s = 0x%016llx ", #r, AARCH64_GET_REG(r));
Printf(" %s = 0x%016lx ", #r, AARCH64_GET_REG(r));
# define DUMPREG32(r) \
Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r);
# define DUMPREG_(r) Printf(" "); DUMPREG(r);
@ -1409,7 +1373,7 @@ void DumpProcessMap() {
char uuid_str[128];
FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid());
Printf("0x%zx-0x%zx %s (%s) %s\n", modules[i].base_address(),
modules[i].max_executable_address(), modules[i].full_name(),
modules[i].max_address(), modules[i].full_name(),
ModuleArchToString(modules[i].arch()), uuid_str);
}
Printf("End of module map.\n");
@ -1433,6 +1397,61 @@ u32 GetNumberOfCPUs() {
void InitializePlatformCommonFlags(CommonFlags *cf) {}
// Pthread introspection hook
//
// * GCD worker threads are created without a call to pthread_create(), but we
// still need to register these threads (with ThreadCreate/Start()).
// * We use the "pthread introspection hook" below to observe the creation of
// such threads.
// * GCD worker threads don't have parent threads and the CREATE event is
// delivered in the context of the thread itself. CREATE events for regular
// threads, are delivered on the parent. We use this to tell apart which
// threads are GCD workers with `thread == pthread_self()`.
//
static pthread_introspection_hook_t prev_pthread_introspection_hook;
static ThreadEventCallbacks thread_event_callbacks;
static void sanitizer_pthread_introspection_hook(unsigned int event,
pthread_t thread, void *addr,
size_t size) {
// create -> start -> terminate -> destroy
// * create/destroy are usually (not guaranteed) delivered on the parent and
// track resource allocation/reclamation
// * start/terminate are guaranteed to be delivered in the context of the
// thread and give hooks into "just after (before) thread starts (stops)
// executing"
DCHECK(event >= PTHREAD_INTROSPECTION_THREAD_CREATE &&
event <= PTHREAD_INTROSPECTION_THREAD_DESTROY);
if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
bool gcd_worker = (thread == pthread_self());
if (thread_event_callbacks.create)
thread_event_callbacks.create((uptr)thread, gcd_worker);
} else if (event == PTHREAD_INTROSPECTION_THREAD_START) {
CHECK_EQ(thread, pthread_self());
if (thread_event_callbacks.start)
thread_event_callbacks.start((uptr)thread);
}
if (prev_pthread_introspection_hook)
prev_pthread_introspection_hook(event, thread, addr, size);
if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
CHECK_EQ(thread, pthread_self());
if (thread_event_callbacks.terminate)
thread_event_callbacks.terminate((uptr)thread);
} else if (event == PTHREAD_INTROSPECTION_THREAD_DESTROY) {
if (thread_event_callbacks.destroy)
thread_event_callbacks.destroy((uptr)thread);
}
}
void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks) {
thread_event_callbacks = callbacks;
prev_pthread_introspection_hook =
pthread_introspection_hook_install(&sanitizer_pthread_introspection_hook);
}
} // namespace __sanitizer
#endif // SANITIZER_MAC
#endif // SANITIZER_APPLE

View File

@ -9,12 +9,12 @@
// This file is shared between various sanitizers' runtime libraries and
// provides definitions for OSX-specific functions.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_MAC_H
#define SANITIZER_MAC_H
#ifndef SANITIZER_APPLE_H
#define SANITIZER_APPLE_H
#include "sanitizer_common.h"
#include "sanitizer_platform.h"
#if SANITIZER_MAC
#if SANITIZER_APPLE
#include "sanitizer_posix.h"
namespace __sanitizer {
@ -62,7 +62,18 @@ char **GetEnviron();
void RestrictMemoryToMaxAddress(uptr max_address);
using ThreadEventCallback = void (*)(uptr thread);
using ThreadCreateEventCallback = void (*)(uptr thread, bool gcd_worker);
struct ThreadEventCallbacks {
ThreadCreateEventCallback create;
ThreadEventCallback start;
ThreadEventCallback terminate;
ThreadEventCallback destroy;
};
void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks);
} // namespace __sanitizer
#endif // SANITIZER_MAC
#endif // SANITIZER_MAC_H
#endif // SANITIZER_APPLE
#endif // SANITIZER_APPLE_H

View File

@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_MAC
#if SANITIZER_APPLE
#include "sanitizer_mac.h"
#include <sys/mman.h>
@ -26,4 +26,4 @@ void RestrictMemoryToMaxAddress(uptr max_address) {
} // namespace __sanitizer
#endif // SANITIZER_MAC
#endif // SANITIZER_APPLE

View File

@ -0,0 +1,38 @@
//===-- sanitizer_mallinfo.h ----------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of Sanitizer common code.
//
// Definition for mallinfo on different platforms.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_MALLINFO_H
#define SANITIZER_MALLINFO_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
namespace __sanitizer {
#if SANITIZER_ANDROID
struct __sanitizer_struct_mallinfo {
uptr v[10];
};
#elif SANITIZER_LINUX || SANITIZER_APPLE || SANITIZER_FUCHSIA
struct __sanitizer_struct_mallinfo {
int v[10];
};
#endif
} // namespace __sanitizer
#endif // SANITIZER_MALLINFO_H

View File

@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if !SANITIZER_MAC
#if !SANITIZER_APPLE
#error "This file should only be compiled on Darwin."
#endif
@ -23,6 +23,7 @@
#include <sys/mman.h>
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator_dlsym.h"
#include "sanitizer_common/sanitizer_mac.h"
// Similar code is used in Google Perftools,
@ -192,20 +193,15 @@ void *__sanitizer_mz_malloc(malloc_zone_t *zone, uptr size) {
return p;
}
struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
static bool UseImpl() { return !COMMON_MALLOC_SANITIZER_INITIALIZED; }
};
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
if (UNLIKELY(!COMMON_MALLOC_SANITIZER_INITIALIZED)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const size_t kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
static size_t allocated;
size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
void *mem = (void*)&calloc_memory_for_dlsym[allocated];
allocated += size_in_words;
CHECK(allocated < kCallocPoolSize);
return mem;
}
if (DlsymAlloc::Use())
return DlsymAlloc::Callocate(nmemb, size);
COMMON_MALLOC_CALLOC(nmemb, size);
return p;
}
@ -223,6 +219,8 @@ extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_mz_free(malloc_zone_t *zone, void *ptr) {
if (!ptr) return;
if (DlsymAlloc::PointerIsMine(ptr))
return DlsymAlloc::Free(ptr);
COMMON_MALLOC_FREE(ptr);
}

View File

@ -73,7 +73,7 @@ void DebugMutexInit() {
// Build adjacency matrix.
bool leaf[kMutexTypeMax];
internal_memset(&leaf, 0, sizeof(leaf));
int cnt[kMutexTypeMax] = {};
int cnt[kMutexTypeMax];
internal_memset(&cnt, 0, sizeof(cnt));
for (int t = 0; t < kMutexTypeMax; t++) {
mutex_type_count = t;
@ -174,7 +174,7 @@ struct InternalDeadlockDetector {
if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) {
Printf("%s: internal deadlock: can't lock %s under %s mutex\n", SanitizerToolName,
mutex_meta[type].name, mutex_meta[max_idx].name);
PrintMutexPC(pc);
PrintMutexPC(locked[max_idx].pc);
CHECK(0);
}
locked[type].seq = ++sequence;

View File

@ -20,25 +20,27 @@
namespace __sanitizer {
class MUTEX StaticSpinMutex {
class SANITIZER_MUTEX StaticSpinMutex {
public:
void Init() {
atomic_store(&state_, 0, memory_order_relaxed);
}
void Lock() ACQUIRE() {
void Lock() SANITIZER_ACQUIRE() {
if (LIKELY(TryLock()))
return;
LockSlow();
}
bool TryLock() TRY_ACQUIRE(true) {
bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
}
void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
void Unlock() SANITIZER_RELEASE() {
atomic_store(&state_, 0, memory_order_release);
}
void CheckLocked() const CHECK_LOCKED() {
void CheckLocked() const SANITIZER_CHECK_LOCKED() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
}
@ -48,7 +50,7 @@ class MUTEX StaticSpinMutex {
void LockSlow();
};
class MUTEX SpinMutex : public StaticSpinMutex {
class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
public:
SpinMutex() {
Init();
@ -95,7 +97,11 @@ enum {
// Go linker does not support THREADLOCAL variables,
// so we can't use per-thread state.
#define SANITIZER_CHECK_DEADLOCKS (SANITIZER_DEBUG && !SANITIZER_GO)
// Disable checked locks on Darwin. Although Darwin platforms support
// THREADLOCAL variables they are not usable early on during process init when
// `__sanitizer::Mutex` is used.
#define SANITIZER_CHECK_DEADLOCKS \
(SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_APPLE)
#if SANITIZER_CHECK_DEADLOCKS
struct MutexMeta {
@ -111,7 +117,7 @@ struct MutexMeta {
class CheckedMutex {
public:
constexpr CheckedMutex(MutexType type)
explicit constexpr CheckedMutex(MutexType type)
#if SANITIZER_CHECK_DEADLOCKS
: type_(type)
#endif
@ -152,15 +158,15 @@ class CheckedMutex {
// Derive from CheckedMutex for the purposes of EBO.
// We could make it a field marked with [[no_unique_address]],
// but this attribute is not supported by some older compilers.
class MUTEX Mutex : CheckedMutex {
class SANITIZER_MUTEX Mutex : CheckedMutex {
public:
constexpr Mutex(MutexType type = MutexUnchecked) : CheckedMutex(type) {}
explicit constexpr Mutex(MutexType type = MutexUnchecked)
: CheckedMutex(type) {}
void Lock() ACQUIRE() {
void Lock() SANITIZER_ACQUIRE() {
CheckedMutex::Lock();
u64 reset_mask = ~0ull;
u64 state = atomic_load_relaxed(&state_);
const uptr kMaxSpinIters = 1500;
for (uptr spin_iters = 0;; spin_iters++) {
u64 new_state;
bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
@ -189,8 +195,6 @@ class MUTEX Mutex : CheckedMutex {
// We've incremented waiting writers, so now block.
writers_.Wait();
spin_iters = 0;
state = atomic_load(&state_, memory_order_relaxed);
DCHECK_NE(state & kWriterSpinWait, 0);
} else {
// We've set kWriterSpinWait, but we are still in active spinning.
}
@ -199,10 +203,26 @@ class MUTEX Mutex : CheckedMutex {
// Either way we need to reset kWriterSpinWait
// next time we take the lock or block again.
reset_mask = ~kWriterSpinWait;
state = atomic_load(&state_, memory_order_relaxed);
DCHECK_NE(state & kWriterSpinWait, 0);
}
}
void Unlock() RELEASE() {
bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
u64 state = atomic_load_relaxed(&state_);
for (;;) {
if (UNLIKELY(state & (kWriterLock | kReaderLockMask)))
return false;
// The mutex is not read-/write-locked, try to lock.
if (LIKELY(atomic_compare_exchange_weak(
&state_, &state, state | kWriterLock, memory_order_acquire))) {
CheckedMutex::Lock();
return true;
}
}
}
void Unlock() SANITIZER_RELEASE() {
CheckedMutex::Unlock();
bool wake_writer;
u64 wake_readers;
@ -212,17 +232,16 @@ class MUTEX Mutex : CheckedMutex {
DCHECK_NE(state & kWriterLock, 0);
DCHECK_EQ(state & kReaderLockMask, 0);
new_state = state & ~kWriterLock;
wake_writer =
(state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0;
wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
(state & kWaitingWriterMask) != 0;
if (wake_writer)
new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
wake_readers =
(state & (kWriterSpinWait | kWaitingWriterMask)) != 0
wake_writer || (state & kWriterSpinWait) != 0
? 0
: ((state & kWaitingReaderMask) >> kWaitingReaderShift);
if (wake_readers)
new_state = (new_state & ~kWaitingReaderMask) +
(wake_readers << kReaderLockShift);
new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
} while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
memory_order_release)));
if (UNLIKELY(wake_writer))
@ -231,37 +250,54 @@ class MUTEX Mutex : CheckedMutex {
readers_.Post(wake_readers);
}
void ReadLock() ACQUIRE_SHARED() {
void ReadLock() SANITIZER_ACQUIRE_SHARED() {
CheckedMutex::Lock();
bool locked;
u64 new_state;
u64 reset_mask = ~0ull;
u64 state = atomic_load_relaxed(&state_);
do {
locked =
(state & kReaderLockMask) == 0 &&
(state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0;
for (uptr spin_iters = 0;; spin_iters++) {
bool locked = (state & kWriterLock) != 0;
u64 new_state;
if (LIKELY(!locked)) {
new_state = (state + kReaderLockInc) & reset_mask;
} else if (spin_iters > kMaxSpinIters) {
new_state = (state + kWaitingReaderInc) & reset_mask;
} else if ((state & kReaderSpinWait) == 0) {
// Active spinning, but denote our presence so that unlocking
// thread does not wake up other threads.
new_state = state | kReaderSpinWait;
} else {
// Active spinning.
state = atomic_load(&state_, memory_order_relaxed);
continue;
}
if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
memory_order_acquire)))
continue;
if (LIKELY(!locked))
new_state = state + kReaderLockInc;
else
new_state = state + kWaitingReaderInc;
} while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
memory_order_acquire)));
if (UNLIKELY(locked))
readers_.Wait();
DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0);
DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0);
return; // We've locked the mutex.
if (spin_iters > kMaxSpinIters) {
// We've incremented waiting readers, so now block.
readers_.Wait();
spin_iters = 0;
} else {
// We've set kReaderSpinWait, but we are still in active spinning.
}
reset_mask = ~kReaderSpinWait;
state = atomic_load(&state_, memory_order_relaxed);
}
}
void ReadUnlock() RELEASE_SHARED() {
void ReadUnlock() SANITIZER_RELEASE_SHARED() {
CheckedMutex::Unlock();
bool wake;
u64 new_state;
u64 state = atomic_load_relaxed(&state_);
do {
DCHECK_NE(state & kReaderLockMask, 0);
DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0);
DCHECK_EQ(state & kWriterLock, 0);
new_state = state - kReaderLockInc;
wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 &&
wake = (new_state &
(kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
(new_state & kWaitingWriterMask) != 0;
if (wake)
new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
@ -277,13 +313,13 @@ class MUTEX Mutex : CheckedMutex {
// owns the mutex but a child checks that it is locked. Rather than
// maintaining complex state to work around those situations, the check only
// checks that the mutex is owned.
void CheckWriteLocked() const CHECK_LOCKED() {
void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
}
void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
void CheckReadLocked() const CHECK_LOCKED() {
void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
}
@ -305,16 +341,14 @@ class MUTEX Mutex : CheckedMutex {
// - a writer is awake and spin-waiting
// the flag is used to prevent thundering herd problem
// (new writers are not woken if this flag is set)
// - a reader is awake and spin-waiting
//
// Writer support active spinning, readers does not.
// Both writers and readers use active spinning before blocking.
// But readers are more aggressive and always take the mutex
// if there are any other readers.
// Writers hand off the mutex to readers: after wake up readers
// already assume ownership of the mutex (don't need to do any
// state updates). But the mutex is not handed off to writers,
// after wake up writers compete to lock the mutex again.
// This is needed to allow repeated write locks even in presence
// of other blocked writers.
// After wake up both writers and readers compete to lock the
// mutex again. This is needed to allow repeated locks even in presence
// of other blocked threads.
static constexpr u64 kCounterWidth = 20;
static constexpr u64 kReaderLockShift = 0;
static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
@ -330,7 +364,11 @@ class MUTEX Mutex : CheckedMutex {
<< kWaitingWriterShift;
static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
static constexpr uptr kMaxSpinIters = 1500;
Mutex(LinkerInitialized) = delete;
Mutex(const Mutex &) = delete;
void operator=(const Mutex &) = delete;
};
@ -338,119 +376,14 @@ class MUTEX Mutex : CheckedMutex {
void FutexWait(atomic_uint32_t *p, u32 cmp);
void FutexWake(atomic_uint32_t *p, u32 count);
class MUTEX BlockingMutex {
public:
explicit constexpr BlockingMutex(LinkerInitialized)
: opaque_storage_ {0, }, owner_ {0} {}
BlockingMutex();
void Lock() ACQUIRE();
void Unlock() RELEASE();
// This function does not guarantee an explicit check that the calling thread
// is the thread which owns the mutex. This behavior, while more strictly
// correct, causes problems in cases like StopTheWorld, where a parent thread
// owns the mutex but a child checks that it is locked. Rather than
// maintaining complex state to work around those situations, the check only
// checks that the mutex is owned, and assumes callers to be generally
// well-behaved.
void CheckLocked() const CHECK_LOCKED();
private:
// Solaris mutex_t has a member that requires 64-bit alignment.
ALIGNED(8) uptr opaque_storage_[10];
uptr owner_; // for debugging
};
// Reader-writer spin mutex.
class MUTEX RWMutex {
public:
RWMutex() {
atomic_store(&state_, kUnlocked, memory_order_relaxed);
}
~RWMutex() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
void Lock() ACQUIRE() {
u32 cmp = kUnlocked;
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
memory_order_acquire))
return;
LockSlow();
}
void Unlock() RELEASE() {
u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
DCHECK_NE(prev & kWriteLock, 0);
(void)prev;
}
void ReadLock() ACQUIRE_SHARED() {
u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
if ((prev & kWriteLock) == 0)
return;
ReadLockSlow();
}
void ReadUnlock() RELEASE_SHARED() {
u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
(void)prev;
}
void CheckLocked() const CHECK_LOCKED() {
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
private:
atomic_uint32_t state_;
enum {
kUnlocked = 0,
kWriteLock = 1,
kReadLock = 2
};
void NOINLINE LockSlow() {
for (int i = 0;; i++) {
if (i < 10)
proc_yield(10);
else
internal_sched_yield();
u32 cmp = atomic_load(&state_, memory_order_relaxed);
if (cmp == kUnlocked &&
atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
memory_order_acquire))
return;
}
}
void NOINLINE ReadLockSlow() {
for (int i = 0;; i++) {
if (i < 10)
proc_yield(10);
else
internal_sched_yield();
u32 prev = atomic_load(&state_, memory_order_acquire);
if ((prev & kWriteLock) == 0)
return;
}
}
RWMutex(const RWMutex &) = delete;
void operator=(const RWMutex &) = delete;
};
template <typename MutexType>
class SCOPED_LOCK GenericScopedLock {
class SANITIZER_SCOPED_LOCK GenericScopedLock {
public:
explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
mu_->Lock();
}
~GenericScopedLock() RELEASE() { mu_->Unlock(); }
~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
private:
MutexType *mu_;
@ -460,13 +393,14 @@ class SCOPED_LOCK GenericScopedLock {
};
template <typename MutexType>
class SCOPED_LOCK GenericScopedReadLock {
class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
public:
explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
: mu_(mu) {
mu_->ReadLock();
}
~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
private:
MutexType *mu_;
@ -475,12 +409,37 @@ class SCOPED_LOCK GenericScopedReadLock {
void operator=(const GenericScopedReadLock &) = delete;
};
template <typename MutexType>
class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
public:
ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
SANITIZER_ACQUIRE(mu)
: mu_(mu), write_(write) {
if (write_)
mu_->Lock();
else
mu_->ReadLock();
}
ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
if (write_)
mu_->Unlock();
else
mu_->ReadUnlock();
}
private:
MutexType *mu_;
bool write_;
GenericScopedRWLock(const GenericScopedRWLock &) = delete;
void operator=(const GenericScopedRWLock &) = delete;
};
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
typedef GenericScopedLock<RWMutex> RWMutexLock;
typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
typedef GenericScopedLock<Mutex> Lock;
typedef GenericScopedReadLock<Mutex> ReadLock;
typedef GenericScopedRWLock<Mutex> RWLock;
} // namespace __sanitizer

View File

@ -1,71 +0,0 @@
//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A fast memory allocator that does not support free() nor realloc().
// All allocations are forever.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
#define SANITIZER_PERSISTENT_ALLOCATOR_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
class PersistentAllocator {
public:
void *alloc(uptr size);
private:
void *tryAlloc(uptr size);
StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
atomic_uintptr_t region_pos; // Region allocator for Node's.
atomic_uintptr_t region_end;
};
inline void *PersistentAllocator::tryAlloc(uptr size) {
// Optimisic lock-free allocation, essentially try to bump the region ptr.
for (;;) {
uptr cmp = atomic_load(&region_pos, memory_order_acquire);
uptr end = atomic_load(&region_end, memory_order_acquire);
if (cmp == 0 || cmp + size > end) return nullptr;
if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
memory_order_acquire))
return (void *)cmp;
}
}
inline void *PersistentAllocator::alloc(uptr size) {
// First, try to allocate optimisitically.
void *s = tryAlloc(size);
if (s) return s;
// If failed, lock, retry and alloc new superblock.
SpinMutexLock l(&mtx);
for (;;) {
s = tryAlloc(size);
if (s) return s;
atomic_store(&region_pos, 0, memory_order_relaxed);
uptr allocsz = 64 * 1024;
if (allocsz < size) allocsz = size;
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
atomic_store(&region_end, mem + allocsz, memory_order_release);
atomic_store(&region_pos, mem, memory_order_release);
}
}
extern PersistentAllocator thePersistentAllocator;
inline void *PersistentAlloc(uptr sz) {
return thePersistentAllocator.alloc(sz);
}
} // namespace __sanitizer
#endif // SANITIZER_PERSISTENT_ALLOCATOR_H

View File

@ -22,103 +22,123 @@
// function declarations into a .S file which doesn't compile.
// https://crbug.com/1162741
#if __has_include(<features.h>) && !defined(__ANDROID__)
#include <features.h>
# include <features.h>
#endif
#if defined(__linux__)
# define SANITIZER_LINUX 1
# define SANITIZER_LINUX 1
#else
# define SANITIZER_LINUX 0
# define SANITIZER_LINUX 0
#endif
#if defined(__GLIBC__)
# define SANITIZER_GLIBC 1
# define SANITIZER_GLIBC 1
#else
# define SANITIZER_GLIBC 0
# define SANITIZER_GLIBC 0
#endif
#if defined(__FreeBSD__)
# define SANITIZER_FREEBSD 1
# define SANITIZER_FREEBSD 1
#else
# define SANITIZER_FREEBSD 0
# define SANITIZER_FREEBSD 0
#endif
#if defined(__NetBSD__)
# define SANITIZER_NETBSD 1
# define SANITIZER_NETBSD 1
#else
# define SANITIZER_NETBSD 0
# define SANITIZER_NETBSD 0
#endif
#if defined(__sun__) && defined(__svr4__)
# define SANITIZER_SOLARIS 1
# define SANITIZER_SOLARIS 1
#else
# define SANITIZER_SOLARIS 0
# define SANITIZER_SOLARIS 0
#endif
// - SANITIZER_APPLE: all Apple code
// - TARGET_OS_OSX: macOS
// - SANITIZER_IOS: devices (iOS and iOS-like)
// - SANITIZER_WATCHOS
// - SANITIZER_TVOS
// - SANITIZER_IOSSIM: simulators (iOS and iOS-like)
// - SANITIZER_DRIVERKIT
#if defined(__APPLE__)
# define SANITIZER_MAC 1
# include <TargetConditionals.h>
# if TARGET_OS_OSX
# define SANITIZER_OSX 1
# else
# define SANITIZER_OSX 0
# endif
# if TARGET_OS_IPHONE
# define SANITIZER_IOS 1
# else
# define SANITIZER_IOS 0
# endif
# if TARGET_OS_SIMULATOR
# define SANITIZER_IOSSIM 1
# else
# define SANITIZER_APPLE 1
# include <TargetConditionals.h>
# if TARGET_OS_OSX
# define SANITIZER_OSX 1
# else
# define SANITIZER_OSX 0
# endif
# if TARGET_OS_IPHONE
# define SANITIZER_IOS 1
# else
# define SANITIZER_IOS 0
# endif
# if TARGET_OS_WATCH
# define SANITIZER_WATCHOS 1
# else
# define SANITIZER_WATCHOS 0
# endif
# if TARGET_OS_TV
# define SANITIZER_TVOS 1
# else
# define SANITIZER_TVOS 0
# endif
# if TARGET_OS_SIMULATOR
# define SANITIZER_IOSSIM 1
# else
# define SANITIZER_IOSSIM 0
# endif
# if defined(TARGET_OS_DRIVERKIT) && TARGET_OS_DRIVERKIT
# define SANITIZER_DRIVERKIT 1
# else
# define SANITIZER_DRIVERKIT 0
# endif
#else
# define SANITIZER_APPLE 0
# define SANITIZER_OSX 0
# define SANITIZER_IOS 0
# define SANITIZER_WATCHOS 0
# define SANITIZER_TVOS 0
# define SANITIZER_IOSSIM 0
# endif
#else
# define SANITIZER_MAC 0
# define SANITIZER_IOS 0
# define SANITIZER_IOSSIM 0
# define SANITIZER_OSX 0
#endif
#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH
# define SANITIZER_WATCHOS 1
#else
# define SANITIZER_WATCHOS 0
#endif
#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_TV
# define SANITIZER_TVOS 1
#else
# define SANITIZER_TVOS 0
# define SANITIZER_DRIVERKIT 0
#endif
#if defined(_WIN32)
# define SANITIZER_WINDOWS 1
# define SANITIZER_WINDOWS 1
#else
# define SANITIZER_WINDOWS 0
# define SANITIZER_WINDOWS 0
#endif
#if defined(_WIN64)
# define SANITIZER_WINDOWS64 1
# define SANITIZER_WINDOWS64 1
#else
# define SANITIZER_WINDOWS64 0
# define SANITIZER_WINDOWS64 0
#endif
#if defined(__ANDROID__)
# define SANITIZER_ANDROID 1
# define SANITIZER_ANDROID 1
#else
# define SANITIZER_ANDROID 0
# define SANITIZER_ANDROID 0
#endif
#if defined(__Fuchsia__)
# define SANITIZER_FUCHSIA 1
# define SANITIZER_FUCHSIA 1
#else
# define SANITIZER_FUCHSIA 0
# define SANITIZER_FUCHSIA 0
#endif
#define SANITIZER_POSIX \
(SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
SANITIZER_NETBSD || SANITIZER_SOLARIS)
// Assume linux that is not glibc or android is musl libc.
#if SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID
# define SANITIZER_MUSL 1
#else
# define SANITIZER_MUSL 0
#endif
#define SANITIZER_POSIX \
(SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE || \
SANITIZER_NETBSD || SANITIZER_SOLARIS)
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
@ -127,58 +147,79 @@
#endif
#if SANITIZER_WORDSIZE == 64
# define FIRST_32_SECOND_64(a, b) (b)
# define FIRST_32_SECOND_64(a, b) (b)
#else
# define FIRST_32_SECOND_64(a, b) (a)
# define FIRST_32_SECOND_64(a, b) (a)
#endif
#if defined(__x86_64__) && !defined(_LP64)
# define SANITIZER_X32 1
# define SANITIZER_X32 1
#else
# define SANITIZER_X32 0
# define SANITIZER_X32 0
#endif
#if defined(__x86_64__) || defined(_M_X64)
# define SANITIZER_X64 1
#else
# define SANITIZER_X64 0
#endif
#if defined(__i386__) || defined(_M_IX86)
# define SANITIZER_I386 1
# define SANITIZER_I386 1
#else
# define SANITIZER_I386 0
# define SANITIZER_I386 0
#endif
#if defined(__mips__)
# define SANITIZER_MIPS 1
# if defined(__mips64)
# define SANITIZER_MIPS32 0
# define SANITIZER_MIPS64 1
# else
# define SANITIZER_MIPS32 1
# define SANITIZER_MIPS64 0
# endif
# define SANITIZER_MIPS 1
# if defined(__mips64) && _MIPS_SIM == _ABI64
# define SANITIZER_MIPS32 0
# define SANITIZER_MIPS64 1
# else
# define SANITIZER_MIPS32 1
# define SANITIZER_MIPS64 0
# endif
#else
# define SANITIZER_MIPS 0
# define SANITIZER_MIPS32 0
# define SANITIZER_MIPS64 0
# define SANITIZER_MIPS 0
# define SANITIZER_MIPS32 0
# define SANITIZER_MIPS64 0
#endif
#if defined(__s390__)
# define SANITIZER_S390 1
# if defined(__s390x__)
# define SANITIZER_S390_31 0
# define SANITIZER_S390_64 1
# else
# define SANITIZER_S390_31 1
# define SANITIZER_S390_64 0
# endif
# define SANITIZER_S390 1
# if defined(__s390x__)
# define SANITIZER_S390_31 0
# define SANITIZER_S390_64 1
# else
# define SANITIZER_S390_31 1
# define SANITIZER_S390_64 0
# endif
#else
# define SANITIZER_S390 0
# define SANITIZER_S390_31 0
# define SANITIZER_S390_64 0
# define SANITIZER_S390 0
# define SANITIZER_S390_31 0
# define SANITIZER_S390_64 0
#endif
#if defined(__sparc__)
# define SANITIZER_SPARC 1
# if defined(__arch64__)
# define SANITIZER_SPARC32 0
# define SANITIZER_SPARC64 1
# else
# define SANITIZER_SPARC32 1
# define SANITIZER_SPARC64 0
# endif
#else
# define SANITIZER_SPARC 0
# define SANITIZER_SPARC32 0
# define SANITIZER_SPARC64 0
#endif
#if defined(__powerpc__)
# define SANITIZER_PPC 1
# if defined(__powerpc64__)
# define SANITIZER_PPC32 0
# define SANITIZER_PPC64 1
# define SANITIZER_PPC 1
# if defined(__powerpc64__)
# define SANITIZER_PPC32 0
# define SANITIZER_PPC64 1
// 64-bit PPC has two ABIs (v1 and v2). The old powerpc64 target is
// big-endian, and uses v1 ABI (known for its function descriptors),
// while the new powerpc64le target is little-endian and uses v2.
@ -186,106 +227,109 @@
// (eg. big-endian v2), but you won't find such combinations in the wild
// (it'd require bootstrapping a whole system, which would be quite painful
// - there's no target triple for that). LLVM doesn't support them either.
# if _CALL_ELF == 2
# define SANITIZER_PPC64V1 0
# define SANITIZER_PPC64V2 1
# if _CALL_ELF == 2
# define SANITIZER_PPC64V1 0
# define SANITIZER_PPC64V2 1
# else
# define SANITIZER_PPC64V1 1
# define SANITIZER_PPC64V2 0
# endif
# else
# define SANITIZER_PPC64V1 1
# define SANITIZER_PPC64V2 0
# define SANITIZER_PPC32 1
# define SANITIZER_PPC64 0
# define SANITIZER_PPC64V1 0
# define SANITIZER_PPC64V2 0
# endif
# else
# define SANITIZER_PPC32 1
#else
# define SANITIZER_PPC 0
# define SANITIZER_PPC32 0
# define SANITIZER_PPC64 0
# define SANITIZER_PPC64V1 0
# define SANITIZER_PPC64V2 0
# endif
#else
# define SANITIZER_PPC 0
# define SANITIZER_PPC32 0
# define SANITIZER_PPC64 0
# define SANITIZER_PPC64V1 0
# define SANITIZER_PPC64V2 0
#endif
#if defined(__arm__)
# define SANITIZER_ARM 1
#if defined(__arm__) || defined(_M_ARM)
# define SANITIZER_ARM 1
#else
# define SANITIZER_ARM 0
# define SANITIZER_ARM 0
#endif
#if defined(__aarch64__) || defined(_M_ARM64)
# define SANITIZER_ARM64 1
#else
# define SANITIZER_ARM64 0
#endif
#if SANITIZER_SOLARIS && SANITIZER_WORDSIZE == 32
# define SANITIZER_SOLARIS32 1
# define SANITIZER_SOLARIS32 1
#else
# define SANITIZER_SOLARIS32 0
# define SANITIZER_SOLARIS32 0
#endif
#if defined(__riscv) && (__riscv_xlen == 64)
#define SANITIZER_RISCV64 1
# define SANITIZER_RISCV64 1
#else
#define SANITIZER_RISCV64 0
# define SANITIZER_RISCV64 0
#endif
#if defined(__loongarch_lp64)
# define SANITIZER_LOONGARCH64 1
#else
# define SANITIZER_LOONGARCH64 0
#endif
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
// does not work well and we need to fallback to SizeClassAllocator32.
// But in some cases SizeClassAllocator64 does not work well and we need to
// fallback to SizeClassAllocator32.
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
# define SANITIZER_CAN_USE_ALLOCATOR64 1
# elif defined(__mips64) || defined(__aarch64__)
# define SANITIZER_CAN_USE_ALLOCATOR64 0
# else
# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
# endif
# if SANITIZER_RISCV64 || SANITIZER_IOS
# define SANITIZER_CAN_USE_ALLOCATOR64 0
# elif defined(__mips64) || defined(__hexagon__)
# define SANITIZER_CAN_USE_ALLOCATOR64 0
# else
# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
# endif
#endif
// The range of addresses which can be returned my mmap.
// FIXME: this value should be different on different platforms. Larger values
// will still work but will consume more memory for TwoLevelByteMap.
#if defined(__mips__)
#if SANITIZER_GO && defined(__mips64)
#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
#else
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
#endif
#elif SANITIZER_RISCV64
#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
#elif defined(__aarch64__)
# if SANITIZER_MAC
# if SANITIZER_OSX || SANITIZER_IOSSIM
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
# if SANITIZER_GO && defined(__mips64)
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
# else
// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
# endif
#elif SANITIZER_RISCV64
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
#elif defined(__aarch64__)
# if SANITIZER_APPLE
# if SANITIZER_OSX || SANITIZER_IOSSIM
# define SANITIZER_MMAP_RANGE_SIZE \
FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
# else
// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
# define SANITIZER_MMAP_RANGE_SIZE \
FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
# endif
# else
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
# endif
# else
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
# endif
#elif defined(__sparc__)
#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)
#else
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
#endif
// Whether the addresses are sign-extended from the VMA range to the word.
// The SPARC64 Linux port implements this to split the VMA space into two
// non-contiguous halves with a huge hole in the middle.
#if defined(__sparc__) && SANITIZER_WORDSIZE == 64
#define SANITIZER_SIGN_EXTENDED_ADDRESSES 1
# define SANITIZER_SIGN_EXTENDED_ADDRESSES 1
#else
#define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
#endif
// The AArch64 and RISC-V linux ports use the canonical syscall set as
// mandated by the upstream linux community for all new ports. Other ports
// may still use legacy syscalls.
#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
# if (defined(__aarch64__) || defined(__riscv)) && SANITIZER_LINUX
# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
# else
# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
# endif
# define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
#endif
// udi16 syscalls can only be used when the following conditions are
@ -296,15 +340,15 @@
// Since we don't want to include libc headers here, we check the
// target only.
#if defined(__arm__) || SANITIZER_X32 || defined(__sparc__)
#define SANITIZER_USES_UID16_SYSCALLS 1
# define SANITIZER_USES_UID16_SYSCALLS 1
#else
#define SANITIZER_USES_UID16_SYSCALLS 0
# define SANITIZER_USES_UID16_SYSCALLS 0
#endif
#if defined(__mips__)
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
#else
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
#endif
/// \macro MSC_PREREQ
@ -313,15 +357,15 @@
/// * 1800: Microsoft Visual Studio 2013 / 12.0
/// * 1900: Microsoft Visual Studio 2015 / 14.0
#ifdef _MSC_VER
# define MSC_PREREQ(version) (_MSC_VER >= (version))
# define MSC_PREREQ(version) (_MSC_VER >= (version))
#else
# define MSC_PREREQ(version) 0
# define MSC_PREREQ(version) 0
#endif
#if SANITIZER_MAC && !(defined(__arm64__) && SANITIZER_IOS)
# define SANITIZER_NON_UNIQUE_TYPEINFO 0
#if SANITIZER_APPLE && defined(__x86_64__)
# define SANITIZER_NON_UNIQUE_TYPEINFO 0
#else
# define SANITIZER_NON_UNIQUE_TYPEINFO 1
# define SANITIZER_NON_UNIQUE_TYPEINFO 1
#endif
// On linux, some architectures had an ABI transition from 64-bit long double
@ -329,11 +373,11 @@
// involving long doubles come in two versions, and we need to pass the
// correct one to dlvsym when intercepting them.
#if SANITIZER_LINUX && (SANITIZER_S390 || SANITIZER_PPC32 || SANITIZER_PPC64V1)
#define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
# define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
#endif
#if SANITIZER_GO == 0
# define SANITIZER_GO 0
# define SANITIZER_GO 0
#endif
// On PowerPC and ARM Thumb, calling pthread_exit() causes LSan to detect leaks.
@ -341,40 +385,64 @@
// dlopen mallocs "libgcc_s.so" string which confuses LSan, it fails to realize
// that this allocation happens in dynamic linker and should be ignored.
#if SANITIZER_PPC || defined(__thumb__)
# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
#else
# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
#endif
#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
# define SANITIZER_MADVISE_DONTNEED MADV_FREE
#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD || SANITIZER_SOLARIS
# define SANITIZER_MADVISE_DONTNEED MADV_FREE
#else
# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
#endif
// Older gcc have issues aligning to a constexpr, and require an integer.
// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
#if defined(__powerpc__) || defined(__powerpc64__)
# define SANITIZER_CACHE_LINE_SIZE 128
# define SANITIZER_CACHE_LINE_SIZE 128
#else
# define SANITIZER_CACHE_LINE_SIZE 64
# define SANITIZER_CACHE_LINE_SIZE 64
#endif
// Enable offline markup symbolizer for Fuchsia.
#if SANITIZER_FUCHSIA
# define SANITIZER_SYMBOLIZER_MARKUP 1
#else
#define SANITIZER_SYMBOLIZER_MARKUP 0
# define SANITIZER_SYMBOLIZER_MARKUP 0
#endif
// Enable ability to support sanitizer initialization that is
// compatible with the sanitizer library being loaded via
// `dlopen()`.
#if SANITIZER_MAC
#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1
#if SANITIZER_APPLE
# define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1
#else
#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
# define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
#endif
#endif // SANITIZER_PLATFORM_H
// SANITIZER_SUPPORTS_THREADLOCAL
// 1 - THREADLOCAL macro is supported by target
// 0 - THREADLOCAL macro is not supported by target
#ifndef __has_feature
// TODO: Support other compilers here
# define SANITIZER_SUPPORTS_THREADLOCAL 1
#else
# if __has_feature(tls)
# define SANITIZER_SUPPORTS_THREADLOCAL 1
# else
# define SANITIZER_SUPPORTS_THREADLOCAL 0
# endif
#endif
#if defined(__thumb__) && defined(__linux__)
// Workaround for
// https://lab.llvm.org/buildbot/#/builders/clang-thumbv7-full-2stage
// or
// https://lab.llvm.org/staging/#/builders/clang-thumbv7-full-2stage
// It fails *rss_limit_mb_test* without meaningful errors.
# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 1
#else
# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 0
#endif
#endif // SANITIZER_PLATFORM_H

View File

@ -76,7 +76,7 @@
#define SI_LINUX 0
#endif
#if SANITIZER_MAC
#if SANITIZER_APPLE
#define SI_MAC 1
#define SI_NOT_MAC 0
#else
@ -126,7 +126,7 @@
#define SI_SOLARIS32 0
#endif
#if SANITIZER_POSIX && !SANITIZER_MAC
#if SANITIZER_POSIX && !SANITIZER_APPLE
#define SI_POSIX_NOT_MAC 1
#else
#define SI_POSIX_NOT_MAC 0
@ -229,11 +229,15 @@
(SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETTIME \
(SI_FREEBSD || SI_NETBSD || SI_LINUX || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID SI_LINUX
#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID \
(SI_LINUX || SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
#define SANITIZER_INTERCEPT_TIME SI_POSIX
#define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GLOB64 SI_GLIBC
#define SANITIZER_INTERCEPT___B64_TO SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_DN_COMP_EXPAND SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_POSIX_SPAWN SI_POSIX
#define SANITIZER_INTERCEPT_WAIT SI_POSIX
#define SANITIZER_INTERCEPT_INET SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_POSIX
@ -251,7 +255,8 @@
#define SANITIZER_INTERCEPT_GETHOSTENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX
#define SANITIZER_INTERCEPT_ACCEPT SI_POSIX
#define SANITIZER_INTERCEPT_ACCEPT4 (SI_LINUX_NOT_ANDROID || SI_NETBSD)
#define SANITIZER_INTERCEPT_ACCEPT4 \
(SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_PACCEPT SI_NETBSD
#define SANITIZER_INTERCEPT_MODF SI_POSIX
#define SANITIZER_INTERCEPT_RECVMSG SI_POSIX
@ -264,11 +269,11 @@
#define SANITIZER_INTERCEPT_INET_ATON SI_POSIX
#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
#define SANITIZER_INTERCEPT_READDIR SI_POSIX
#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
#define SANITIZER_INTERCEPT_READDIR64 SI_GLIBC || SI_SOLARIS32
#if SI_LINUX_NOT_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
defined(__s390__) || SANITIZER_RISCV64)
defined(__s390__) || defined(__loongarch__) || SANITIZER_RISCV64)
#define SANITIZER_INTERCEPT_PTRACE 1
#else
#define SANITIZER_INTERCEPT_PTRACE 0
@ -303,13 +308,13 @@
#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCANDIR \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
#define SANITIZER_INTERCEPT_SCANDIR64 SI_GLIBC || SI_SOLARIS32
#define SANITIZER_INTERCEPT_GETGROUPS SI_POSIX
#define SANITIZER_INTERCEPT_POLL SI_POSIX
#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_WORDEXP \
(SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID || \
SI_SOLARIS) // NOLINT
SI_SOLARIS)
#define SANITIZER_INTERCEPT_SIGWAIT SI_POSIX
#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID || SI_SOLARIS
@ -325,11 +330,10 @@
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS \
(SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_STATFS64 \
(((SI_MAC && !TARGET_CPU_ARM64) && !SI_IOS) || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATFS64 SI_GLIBC && SANITIZER_HAS_STATFS64
#define SANITIZER_INTERCEPT_STATVFS \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATVFS64 SI_GLIBC
#define SANITIZER_INTERCEPT_INITGROUPS SI_POSIX
#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_POSIX
#define SANITIZER_INTERCEPT_ETHER_HOST \
@ -337,12 +341,14 @@
#define SANITIZER_INTERCEPT_ETHER_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_SHMCTL \
(((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64) || \
SI_NETBSD || SI_SOLARIS) // NOLINT
SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_RANDOM_R SI_GLIBC
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_GLIBC
#define SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP \
(SI_LINUX_NOT_ANDROID || SI_FREEBSD)
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED \
(SI_POSIX && !SI_NETBSD)
@ -362,6 +368,8 @@
(SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED \
(SI_LINUX_NOT_ANDROID && !SI_NETBSD)
#define SANITIZER_INTERCEPT_TRYJOIN SI_GLIBC
#define SANITIZER_INTERCEPT_TIMEDJOIN SI_GLIBC
#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD
#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX
#define SANITIZER_INTERCEPT_TMPNAM_R (SI_GLIBC || SI_SOLARIS)
@ -391,8 +399,6 @@
#define SANITIZER_INTERCEPT__EXIT \
(SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_MUTEX SI_POSIX
#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_GLIBC
#define SANITIZER_INTERCEPT___LIBC_MUTEX SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
(SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
@ -400,7 +406,7 @@
(SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_TLS_GET_ADDR \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
@ -445,7 +451,8 @@
#define SANITIZER_INTERCEPT_SEM \
(SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_POSIX
#define SANITIZER_INTERCEPT_MINCORE (SI_LINUX || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_MINCORE \
(SI_LINUX || SI_NETBSD || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX
#define SANITIZER_INTERCEPT_CTERMID \
(SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
@ -457,13 +464,17 @@
#define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX
#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
#define SANITIZER_INTERCEPT_STAT \
(SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT___XSTAT (!SANITIZER_INTERCEPT_STAT && SI_POSIX)
#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
#define SI_STAT_LINUX (SI_LINUX && __GLIBC_PREREQ(2, 33))
#define SANITIZER_INTERCEPT_STAT \
(SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS || \
SI_STAT_LINUX)
#define SANITIZER_INTERCEPT_STAT64 SI_STAT_LINUX && SANITIZER_HAS_STAT64
#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD || SI_STAT_LINUX)
#define SANITIZER_INTERCEPT___XSTAT \
((!SANITIZER_INTERCEPT_STAT && SI_POSIX) || SI_STAT_LINUX)
#define SANITIZER_INTERCEPT___XSTAT64 SI_GLIBC
#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT
#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT___LXSTAT64 SI_GLIBC
#define SANITIZER_INTERCEPT_UTMP \
(SI_POSIX && !SI_MAC && !SI_FREEBSD && !SI_NETBSD)
@ -474,7 +485,7 @@
(SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_MMAP64 SI_GLIBC || SI_SOLARIS
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)
#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC
@ -484,6 +495,7 @@
#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC)
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WCSLEN 1
#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
#define SANITIZER_INTERCEPT_WCSDUP SI_POSIX
#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA)
@ -496,7 +508,8 @@
#define SANITIZER_INTERCEPT_GID_FROM_GROUP SI_NETBSD
#define SANITIZER_INTERCEPT_ACCESS (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_FACCESSAT (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_GETGROUPLIST SI_NETBSD
#define SANITIZER_INTERCEPT_GETGROUPLIST \
(SI_NETBSD || SI_FREEBSD || SI_LINUX)
#define SANITIZER_INTERCEPT_STRLCPY \
(SI_NETBSD || SI_FREEBSD || SI_MAC || SI_ANDROID)
@ -517,10 +530,11 @@
#define SANITIZER_INTERCEPT_DEVNAME_R (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_FGETLN (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_TTYENT SI_NETBSD
#define SANITIZER_INTERCEPT_PROTOENT (SI_NETBSD || SI_LINUX)
#define SANITIZER_INTERCEPT_TTYENT (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_TTYENTPATH SI_NETBSD
#define SANITIZER_INTERCEPT_PROTOENT (SI_LINUX || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_PROTOENT_R SI_GLIBC
#define SANITIZER_INTERCEPT_NETENT SI_NETBSD
#define SANITIZER_INTERCEPT_NETENT (SI_LINUX || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_SETVBUF \
(SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC)
#define SANITIZER_INTERCEPT_GETMNTINFO (SI_NETBSD || SI_FREEBSD || SI_MAC)
@ -536,17 +550,17 @@
#define SANITIZER_INTERCEPT_MODCTL SI_NETBSD
#define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD
#define SANITIZER_INTERCEPT_STRTONUM (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_FPARSELN SI_NETBSD
#define SANITIZER_INTERCEPT_FPARSELN (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_STATVFS1 SI_NETBSD
#define SANITIZER_INTERCEPT_STRTOI SI_NETBSD
#define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD
#define SANITIZER_INTERCEPT_SHA1 SI_NETBSD
#define SANITIZER_INTERCEPT_MD4 SI_NETBSD
#define SANITIZER_INTERCEPT_RMD160 SI_NETBSD
#define SANITIZER_INTERCEPT_MD5 SI_NETBSD
#define SANITIZER_INTERCEPT_MD5 (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_FSEEK (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_MD2 SI_NETBSD
#define SANITIZER_INTERCEPT_SHA2 SI_NETBSD
#define SANITIZER_INTERCEPT_SHA2 (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_CDB SI_NETBSD
#define SANITIZER_INTERCEPT_VIS (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_POPEN SI_POSIX
@ -559,25 +573,30 @@
#define SANITIZER_INTERCEPT_FDEVNAME SI_FREEBSD
#define SANITIZER_INTERCEPT_GETUSERSHELL (SI_POSIX && !SI_ANDROID)
#define SANITIZER_INTERCEPT_SL_INIT (SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_CRYPT (SI_POSIX && !SI_ANDROID)
#define SANITIZER_INTERCEPT_CRYPT_R (SI_LINUX && !SI_ANDROID)
#define SANITIZER_INTERCEPT_GETRANDOM \
((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)
#define SANITIZER_INTERCEPT___CXA_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_ATFORK SI_NETBSD
#define SANITIZER_INTERCEPT_GETENTROPY SI_FREEBSD
#define SANITIZER_INTERCEPT_GETENTROPY \
((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)
#define SANITIZER_INTERCEPT_QSORT \
(SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
#define SANITIZER_INTERCEPT_QSORT_R SI_GLIBC
#define SANITIZER_INTERCEPT_BSEARCH \
(SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
// sigaltstack on i386 macOS cannot be intercepted due to setjmp()
// calling it and assuming that it does not clobber registers.
#define SANITIZER_INTERCEPT_SIGALTSTACK \
(SI_POSIX && !(SANITIZER_MAC && SANITIZER_I386))
(SI_POSIX && !(SANITIZER_APPLE && SANITIZER_I386))
#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
#define SANITIZER_INTERCEPT_FLOPEN SI_FREEBSD
#define SANITIZER_INTERCEPT_PROCCTL SI_FREEBSD
#define SANITIZER_INTERCEPT_HEXDUMP SI_FREEBSD
#define SANITIZER_INTERCEPT_ARGP_PARSE SI_GLIBC
#define SANITIZER_INTERCEPT_CPUSET_GETAFFINITY SI_FREEBSD
// This macro gives a way for downstream users to override the above
// interceptor macros irrespective of the platform they are on. They have

View File

@ -17,6 +17,7 @@
#include <sys/capsicum.h>
#include <sys/consio.h>
#include <sys/cpuset.h>
#include <sys/filio.h>
#include <sys/ipc.h>
#include <sys/kbio.h>
@ -69,11 +70,17 @@
#include <semaphore.h>
#include <signal.h>
#include <stddef.h>
#include <md5.h>
#include <sha224.h>
#include <sha256.h>
#include <sha384.h>
#include <sha512.h>
#include <stdio.h>
#include <stringlist.h>
#include <term.h>
#include <termios.h>
#include <time.h>
#include <ttyent.h>
#include <utime.h>
#include <utmpx.h>
#include <vis.h>
@ -97,6 +104,7 @@ void *__sanitizer_get_link_map_by_dlopen_handle(void *handle) {
return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr;
}
unsigned struct_cpuset_sz = sizeof(cpuset_t);
unsigned struct_cap_rights_sz = sizeof(cap_rights_t);
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
@ -124,7 +132,7 @@ unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
unsigned ucontext_t_sz = sizeof(ucontext_t);
unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_timespec_sz = sizeof(struct timespec);
unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
@ -167,12 +175,21 @@ uptr __sanitizer_in_addr_sz(int af) {
return 0;
}
// For FreeBSD the actual size of a directory entry is not always in d_reclen.
// Use the appropriate macro to get the correct size for all cases (e.g. NFS).
u16 __sanitizer_dirsiz(const __sanitizer_dirent *dp) {
return _GENERIC_DIRSIZ(dp);
}
unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC;
const int wordexp_wrde_dooffs = WRDE_DOOFFS;
unsigned path_max = PATH_MAX;
int struct_ttyent_sz = sizeof(struct ttyent);
// ioctl arguments
unsigned struct_ifreq_sz = sizeof(struct ifreq);
unsigned struct_termios_sz = sizeof(struct termios);
@ -196,6 +213,10 @@ unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
unsigned struct_procctl_reaper_status_sz = sizeof(struct __sanitizer_procctl_reaper_status);
unsigned struct_procctl_reaper_pidinfo_sz = sizeof(struct __sanitizer_procctl_reaper_pidinfo);
unsigned struct_procctl_reaper_pids_sz = sizeof(struct __sanitizer_procctl_reaper_pids);
unsigned struct_procctl_reaper_kill_sz = sizeof(struct __sanitizer_procctl_reaper_kill);
const unsigned long __sanitizer_bufsiz = BUFSIZ;
const unsigned IOCTL_NOT_PRESENT = 0;
@ -357,6 +378,22 @@ const int si_SEGV_MAPERR = SEGV_MAPERR;
const int si_SEGV_ACCERR = SEGV_ACCERR;
const int unvis_valid = UNVIS_VALID;
const int unvis_validpush = UNVIS_VALIDPUSH;
const unsigned MD5_CTX_sz = sizeof(MD5_CTX);
const unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH;
#define SHA2_CONST(LEN) \
const unsigned SHA##LEN##_CTX_sz = sizeof(SHA##LEN##_CTX); \
const unsigned SHA##LEN##_return_length = SHA##LEN##_DIGEST_STRING_LENGTH; \
const unsigned SHA##LEN##_block_length = SHA##LEN##_BLOCK_LENGTH; \
const unsigned SHA##LEN##_digest_length = SHA##LEN##_DIGEST_LENGTH
SHA2_CONST(224);
SHA2_CONST(256);
SHA2_CONST(384);
SHA2_CONST(512);
#undef SHA2_CONST
} // namespace __sanitizer
using namespace __sanitizer;
@ -529,4 +566,5 @@ COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
CHECK_TYPE_SIZE(sem_t);
COMPILER_CHECK(sizeof(__sanitizer_cap_rights_t) >= sizeof(cap_rights_t));
COMPILER_CHECK(sizeof(__sanitizer_cpuset_t) >= sizeof(cpuset_t));
#endif // SANITIZER_FREEBSD

View File

@ -16,26 +16,26 @@
#if SANITIZER_FREEBSD
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
#include "sanitizer_platform_limits_posix.h"
# include "sanitizer_internal_defs.h"
# include "sanitizer_platform.h"
# include "sanitizer_platform_limits_posix.h"
// Get sys/_types.h, because that tells us whether 64-bit inodes are
// used in struct dirent below.
#include <sys/_types.h>
# include <sys/_types.h>
namespace __sanitizer {
void *__sanitizer_get_link_map_by_dlopen_handle(void *handle);
#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
(link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
(link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
#if defined(__powerpc64__)
# if defined(__powerpc64__)
const unsigned struct___old_kernel_stat_sz = 0;
#else
# else
const unsigned struct___old_kernel_stat_sz = 32;
#endif
# endif
extern unsigned struct_rusage_sz;
extern unsigned siginfo_t_sz;
extern unsigned struct_itimerval_sz;
@ -57,7 +57,7 @@ extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs64_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
extern unsigned ucontext_t_sz;
unsigned ucontext_t_sz(void *ctx);
extern unsigned struct_rlimit_sz;
extern unsigned struct_utimbuf_sz;
extern unsigned struct_timespec_sz;
@ -114,11 +114,24 @@ struct __sanitizer_ipc_perm {
long key;
};
#if !defined(__i386__)
struct __sanitizer_protoent {
char *p_name;
char **p_aliases;
int p_proto;
};
struct __sanitizer_netent {
char *n_name;
char **n_aliases;
int n_addrtype;
u32 n_net;
};
# if !defined(__i386__)
typedef long long __sanitizer_time_t;
#else
# else
typedef long __sanitizer_time_t;
#endif
# endif
struct __sanitizer_shmid_ds {
__sanitizer_ipc_perm shm_perm;
@ -147,7 +160,7 @@ struct __sanitizer_ifaddrs {
unsigned int ifa_flags;
void *ifa_addr; // (struct sockaddr *)
void *ifa_netmask; // (struct sockaddr *)
#undef ifa_dstaddr
# undef ifa_dstaddr
void *ifa_dstaddr; // (struct sockaddr *)
void *ifa_data;
};
@ -229,37 +242,43 @@ struct __sanitizer_cmsghdr {
};
struct __sanitizer_dirent {
#if defined(__INO64)
# if defined(__INO64)
unsigned long long d_fileno;
unsigned long long d_off;
#else
# else
unsigned int d_fileno;
#endif
# endif
unsigned short d_reclen;
// more fields that we don't care about
u8 d_type;
u8 d_pad0;
u16 d_namlen;
u16 d_pad1;
char d_name[256];
};
u16 __sanitizer_dirsiz(const __sanitizer_dirent *dp);
// 'clock_t' is 32 bits wide on x64 FreeBSD
typedef int __sanitizer_clock_t;
typedef int __sanitizer_clockid_t;
#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
defined(__mips__)
# if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
defined(__mips__)
typedef unsigned __sanitizer___kernel_uid_t;
typedef unsigned __sanitizer___kernel_gid_t;
#else
# else
typedef unsigned short __sanitizer___kernel_uid_t;
typedef unsigned short __sanitizer___kernel_gid_t;
#endif
# endif
typedef long long __sanitizer___kernel_off_t;
#if defined(__powerpc__) || defined(__mips__)
# if defined(__powerpc__) || defined(__mips__)
typedef unsigned int __sanitizer___kernel_old_uid_t;
typedef unsigned int __sanitizer___kernel_old_gid_t;
#else
# else
typedef unsigned short __sanitizer___kernel_old_uid_t;
typedef unsigned short __sanitizer___kernel_old_gid_t;
#endif
# endif
typedef long long __sanitizer___kernel_loff_t;
typedef struct {
@ -366,9 +385,12 @@ struct __sanitizer_glob_t {
extern int glob_nomatch;
extern int glob_altdirfunc;
extern const int wordexp_wrde_dooffs;
extern unsigned path_max;
extern int struct_ttyent_sz;
struct __sanitizer_wordexp_t {
uptr we_wordc;
char **we_wordv;
@ -398,39 +420,81 @@ struct __sanitizer_ifconf {
} ifc_ifcu;
};
#define IOC_NRBITS 8
#define IOC_TYPEBITS 8
#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
#define IOC_SIZEBITS 13
#define IOC_DIRBITS 3
#define IOC_NONE 1U
#define IOC_WRITE 4U
#define IOC_READ 2U
#else
#define IOC_SIZEBITS 14
#define IOC_DIRBITS 2
#define IOC_NONE 0U
#define IOC_WRITE 1U
#define IOC_READ 2U
#endif
#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
#if defined(IOC_DIRMASK)
#undef IOC_DIRMASK
#endif
#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
#define IOC_NRSHIFT 0
#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
#define EVIOC_EV_MAX 0x1f
#define EVIOC_ABS_MAX 0x3f
struct __sanitizer__ttyent {
char *ty_name;
char *ty_getty;
char *ty_type;
int ty_status;
char *ty_window;
char *ty_comment;
char *ty_group;
};
#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
// procctl reaper data for PROCCTL_REAPER flags
struct __sanitizer_procctl_reaper_status {
unsigned int rs_flags;
unsigned int rs_children;
unsigned int rs_descendants;
pid_t rs_reaper;
pid_t rs_pid;
unsigned int rs_pad0[15];
};
struct __sanitizer_procctl_reaper_pidinfo {
pid_t pi_pid;
pid_t pi_subtree;
unsigned int pi_flags;
unsigned int pi_pad0[15];
};
struct __sanitizer_procctl_reaper_pids {
unsigned int rp_count;
unsigned int rp_pad0[15];
struct __sanitize_procctl_reapper_pidinfo *rp_pids;
};
struct __sanitizer_procctl_reaper_kill {
int rk_sig;
unsigned int rk_flags;
pid_t rk_subtree;
unsigned int rk_killed;
pid_t rk_fpid;
unsigned int rk_pad[15];
};
# define IOC_NRBITS 8
# define IOC_TYPEBITS 8
# if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
# define IOC_SIZEBITS 13
# define IOC_DIRBITS 3
# define IOC_NONE 1U
# define IOC_WRITE 4U
# define IOC_READ 2U
# else
# define IOC_SIZEBITS 14
# define IOC_DIRBITS 2
# define IOC_NONE 0U
# define IOC_WRITE 1U
# define IOC_READ 2U
# endif
# define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
# define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
# define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
# if defined(IOC_DIRMASK)
# undef IOC_DIRMASK
# endif
# define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
# define IOC_NRSHIFT 0
# define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
# define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
# define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
# define EVIOC_EV_MAX 0x1f
# define EVIOC_ABS_MAX 0x3f
# define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
# define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
# define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
# define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
extern unsigned struct_ifreq_sz;
extern unsigned struct_termios_sz;
@ -454,6 +518,11 @@ extern unsigned struct_ppp_stats_sz;
extern unsigned struct_sioc_sg_req_sz;
extern unsigned struct_sioc_vif_req_sz;
extern unsigned struct_procctl_reaper_status_sz;
extern unsigned struct_procctl_reaper_pidinfo_sz;
extern unsigned struct_procctl_reaper_pids_sz;
extern unsigned struct_procctl_reaper_kill_sz;
// ioctl request identifiers
// A special value to mark ioctls that are not present on the target platform,
@ -621,6 +690,22 @@ extern unsigned IOCTL_KDSKBMODE;
extern const int si_SEGV_MAPERR;
extern const int si_SEGV_ACCERR;
extern const unsigned MD5_CTX_sz;
extern const unsigned MD5_return_length;
#define SHA2_EXTERN(LEN) \
extern const unsigned SHA##LEN##_CTX_sz; \
extern const unsigned SHA##LEN##_return_length; \
extern const unsigned SHA##LEN##_block_length; \
extern const unsigned SHA##LEN##_digest_length
SHA2_EXTERN(224);
SHA2_EXTERN(256);
SHA2_EXTERN(384);
SHA2_EXTERN(512);
#undef SHA2_EXTERN
struct __sanitizer_cap_rights {
u64 cr_rights[2];
};
@ -630,26 +715,37 @@ extern unsigned struct_cap_rights_sz;
extern unsigned struct_fstab_sz;
extern unsigned struct_StringList_sz;
struct __sanitizer_cpuset {
#if __FreeBSD_version >= 1400090
long __bits[(1024 + (sizeof(long) * 8) - 1) / (sizeof(long) * 8)];
#else
long __bits[(256 + (sizeof(long) * 8) - 1) / (sizeof(long) * 8)];
#endif
};
typedef struct __sanitizer_cpuset __sanitizer_cpuset_t;
extern unsigned struct_cpuset_sz;
} // namespace __sanitizer
#define CHECK_TYPE_SIZE(TYPE) \
COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
# define CHECK_TYPE_SIZE(TYPE) \
COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
sizeof(((CLASS *)NULL)->MEMBER)); \
COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
offsetof(CLASS, MEMBER))
# define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
sizeof(((CLASS *)NULL)->MEMBER)); \
COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
offsetof(CLASS, MEMBER))
// For sigaction, which is a function and struct at the same time,
// and thus requires explicit "struct" in sizeof() expression.
#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
sizeof(((struct CLASS *)NULL)->MEMBER)); \
COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
offsetof(struct CLASS, MEMBER))
# define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
sizeof(((struct CLASS *)NULL)->MEMBER)); \
COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
offsetof(struct CLASS, MEMBER))
#define SIGACTION_SYMNAME sigaction
# define SIGACTION_SYMNAME sigaction
#endif

View File

@ -28,44 +28,39 @@
// are not defined anywhere in userspace headers. Fake them. This seems to work
// fine with newer headers, too.
#include <linux/posix_types.h>
#if defined(__x86_64__) || defined(__mips__)
#include <sys/stat.h>
#else
#define ino_t __kernel_ino_t
#define mode_t __kernel_mode_t
#define nlink_t __kernel_nlink_t
#define uid_t __kernel_uid_t
#define gid_t __kernel_gid_t
#define off_t __kernel_off_t
#define time_t __kernel_time_t
# if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)
# include <sys/stat.h>
# else
# define ino_t __kernel_ino_t
# define mode_t __kernel_mode_t
# define nlink_t __kernel_nlink_t
# define uid_t __kernel_uid_t
# define gid_t __kernel_gid_t
# define off_t __kernel_off_t
# define time_t __kernel_time_t
// This header seems to contain the definitions of _kernel_ stat* structs.
#include <asm/stat.h>
#undef ino_t
#undef mode_t
#undef nlink_t
#undef uid_t
#undef gid_t
#undef off_t
#endif
# include <asm/stat.h>
# undef ino_t
# undef mode_t
# undef nlink_t
# undef uid_t
# undef gid_t
# undef off_t
# endif
#include <linux/aio_abi.h>
# include <linux/aio_abi.h>
#if !SANITIZER_ANDROID
#include <sys/statfs.h>
#include <linux/perf_event.h>
#endif
# if !SANITIZER_ANDROID
# include <sys/statfs.h>
# include <linux/perf_event.h>
# endif
using namespace __sanitizer;
namespace __sanitizer {
#if !SANITIZER_ANDROID
unsigned struct_statfs64_sz = sizeof(struct statfs64);
#endif
} // namespace __sanitizer
#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
&& !defined(__mips__) && !defined(__s390__)\
&& !defined(__sparc__) && !defined(__riscv)
# if !defined(__powerpc64__) && !defined(__x86_64__) && \
!defined(__aarch64__) && !defined(__mips__) && !defined(__s390__) && \
!defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__) && \
!defined(__loongarch__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif

View File

@ -554,7 +554,7 @@ unsigned struct_tms_sz = sizeof(struct tms);
unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
unsigned ucontext_t_sz = sizeof(ucontext_t);
unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_timespec_sz = sizeof(struct timespec);
unsigned struct_sembuf_sz = sizeof(struct sembuf);
@ -666,6 +666,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC;
const int wordexp_wrde_dooffs = WRDE_DOOFFS;
unsigned path_max = PATH_MAX;
@ -2341,8 +2342,6 @@ unsigned IOCTL_TIOCDRAIN = TIOCDRAIN;
unsigned IOCTL_TIOCGFLAGS = TIOCGFLAGS;
unsigned IOCTL_TIOCSFLAGS = TIOCSFLAGS;
unsigned IOCTL_TIOCDCDTIMESTAMP = TIOCDCDTIMESTAMP;
unsigned IOCTL_TIOCRCVFRAME = TIOCRCVFRAME;
unsigned IOCTL_TIOCXMTFRAME = TIOCXMTFRAME;
unsigned IOCTL_TIOCPTMGET = TIOCPTMGET;
unsigned IOCTL_TIOCGRANTPT = TIOCGRANTPT;
unsigned IOCTL_TIOCPTSNAME = TIOCPTSNAME;

View File

@ -45,7 +45,7 @@ extern unsigned struct_stack_t_sz;
extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
extern unsigned ucontext_t_sz;
unsigned ucontext_t_sz(void *ctx);
extern unsigned struct_rlimit_sz;
extern unsigned struct_utimbuf_sz;
@ -394,6 +394,7 @@ struct __sanitizer_glob_t {
extern int glob_nomatch;
extern int glob_altdirfunc;
extern const int wordexp_wrde_dooffs;
extern unsigned path_max;
@ -2194,8 +2195,6 @@ extern unsigned IOCTL_TIOCDRAIN;
extern unsigned IOCTL_TIOCGFLAGS;
extern unsigned IOCTL_TIOCSFLAGS;
extern unsigned IOCTL_TIOCDCDTIMESTAMP;
extern unsigned IOCTL_TIOCRCVFRAME;
extern unsigned IOCTL_TIOCXMTFRAME;
extern unsigned IOCTL_TIOCPTMGET;
extern unsigned IOCTL_TIOCGRANTPT;
extern unsigned IOCTL_TIOCPTSNAME;

View File

@ -18,12 +18,13 @@
// depends on _FILE_OFFSET_BITS setting.
// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
#undef _FILE_OFFSET_BITS
#undef _TIME_BITS
#endif
// Must go after undef _FILE_OFFSET_BITS.
#include "sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_MAC
#if SANITIZER_LINUX || SANITIZER_APPLE
// Must go after undef _FILE_OFFSET_BITS.
#include "sanitizer_glibc_version.h"
@ -51,7 +52,7 @@
#include <time.h>
#include <wchar.h>
#include <regex.h>
#if !SANITIZER_MAC
#if !SANITIZER_APPLE
#include <utmp.h>
#endif
@ -73,7 +74,9 @@
#include <sys/vt.h>
#include <linux/cdrom.h>
#include <linux/fd.h>
#if SANITIZER_ANDROID
#include <linux/fs.h>
#endif
#include <linux/hdreg.h>
#include <linux/input.h>
#include <linux/ioctl.h>
@ -91,10 +94,10 @@
#if SANITIZER_LINUX
# include <utime.h>
# include <sys/ptrace.h>
#if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \
SANITIZER_RISCV64
# include <asm/ptrace.h>
# ifdef __arm__
# if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \
defined(__hexagon__) || defined(__loongarch__) ||SANITIZER_RISCV64
# include <asm/ptrace.h>
# ifdef __arm__
typedef struct user_fpregs elf_fpregset_t;
# define ARM_VFPREGS_SIZE_ASAN (32 * 8 /*fpregs*/ + 4 /*fpscr*/)
# if !defined(ARM_VFPREGS_SIZE)
@ -152,7 +155,6 @@ typedef struct user_fpregs elf_fpregset_t;
#include <linux/serial.h>
#include <sys/msg.h>
#include <sys/ipc.h>
#include <crypt.h>
#endif // SANITIZER_ANDROID
#include <link.h>
@ -163,22 +165,24 @@ typedef struct user_fpregs elf_fpregset_t;
#include <fstab.h>
#endif // SANITIZER_LINUX
#if SANITIZER_MAC
#if SANITIZER_APPLE
#include <net/ethernet.h>
#include <sys/filio.h>
#include <sys/sockio.h>
#endif
// Include these after system headers to avoid name clashes and ambiguities.
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform_limits_posix.h"
# include "sanitizer_common.h"
# include "sanitizer_internal_defs.h"
# include "sanitizer_platform_interceptors.h"
# include "sanitizer_platform_limits_posix.h"
namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
#if !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)
#if SANITIZER_HAS_STAT64
unsigned struct_stat64_sz = sizeof(struct stat64);
#endif // !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)
#endif // SANITIZER_HAS_STAT64
unsigned struct_rusage_sz = sizeof(struct rusage);
unsigned struct_tm_sz = sizeof(struct tm);
unsigned struct_passwd_sz = sizeof(struct passwd);
@ -203,26 +207,60 @@ namespace __sanitizer {
unsigned struct_regex_sz = sizeof(regex_t);
unsigned struct_regmatch_sz = sizeof(regmatch_t);
#if (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
#if SANITIZER_HAS_STATFS64
unsigned struct_statfs64_sz = sizeof(struct statfs64);
#endif // (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
#endif // SANITIZER_HAS_STATFS64
#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_APPLE
unsigned struct_fstab_sz = sizeof(struct fstab);
#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
// SANITIZER_MAC
// SANITIZER_APPLE
#if !SANITIZER_ANDROID
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
unsigned ucontext_t_sz = sizeof(ucontext_t);
#endif // !SANITIZER_ANDROID
#if SANITIZER_LINUX
unsigned ucontext_t_sz(void *ctx) {
# if SANITIZER_GLIBC && SANITIZER_X64
// Added in Linux kernel 3.4.0, merged to glibc in 2.16
# ifndef FP_XSTATE_MAGIC1
# define FP_XSTATE_MAGIC1 0x46505853U
# endif
// See kernel arch/x86/kernel/fpu/signal.c for details.
const auto *fpregs = static_cast<ucontext_t *>(ctx)->uc_mcontext.fpregs;
// The member names differ across header versions, but the actual layout
// is always the same. So avoid using members, just use arithmetic.
const uint32_t *after_xmm =
reinterpret_cast<const uint32_t *>(fpregs + 1) - 24;
if (after_xmm[12] == FP_XSTATE_MAGIC1)
return reinterpret_cast<const char *>(fpregs) + after_xmm[13] -
static_cast<const char *>(ctx);
# endif
return sizeof(ucontext_t);
}
# endif // !SANITIZER_ANDROID
# if SANITIZER_LINUX
unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
unsigned __user_cap_header_struct_sz =
sizeof(struct __user_cap_header_struct);
unsigned __user_cap_data_struct_sz = sizeof(struct __user_cap_data_struct);
unsigned __user_cap_data_struct_sz(void *hdrp) {
int u32s = 0;
if (hdrp) {
switch (((struct __user_cap_header_struct *)hdrp)->version) {
case _LINUX_CAPABILITY_VERSION_1:
u32s = _LINUX_CAPABILITY_U32S_1;
break;
case _LINUX_CAPABILITY_VERSION_2:
u32s = _LINUX_CAPABILITY_U32S_2;
break;
case _LINUX_CAPABILITY_VERSION_3:
u32s = _LINUX_CAPABILITY_U32S_3;
break;
}
}
return sizeof(struct __user_cap_data_struct) * u32s;
}
unsigned struct_new_utsname_sz = sizeof(struct new_utsname);
unsigned struct_old_utsname_sz = sizeof(struct old_utsname);
unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname);
@ -235,24 +273,28 @@ namespace __sanitizer {
unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
#endif // SANITIZER_LINUX
#if SANITIZER_LINUX && !SANITIZER_ANDROID
#if SANITIZER_GLIBC
// Use pre-computed size of struct ustat to avoid <sys/ustat.h> which
// has been removed from glibc 2.28.
#if defined(__aarch64__) || defined(__s390x__) || defined(__mips64) || \
defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) || \
defined(__x86_64__) || SANITIZER_RISCV64
#define SIZEOF_STRUCT_USTAT 32
#elif defined(__arm__) || defined(__i386__) || defined(__mips__) \
|| defined(__powerpc__) || defined(__s390__) || defined(__sparc__)
#define SIZEOF_STRUCT_USTAT 20
#else
#error Unknown size of struct ustat
#endif
# elif defined(__arm__) || defined(__i386__) || defined(__mips__) || \
defined(__powerpc__) || defined(__s390__) || defined(__sparc__) || \
defined(__hexagon__)
# define SIZEOF_STRUCT_USTAT 20
# elif defined(__loongarch__)
// Not used. The minimum Glibc version available for LoongArch is 2.36
// so ustat() wrapper is already gone.
# define SIZEOF_STRUCT_USTAT 0
# else
# error Unknown size of struct ustat
# endif
unsigned struct_ustat_sz = SIZEOF_STRUCT_USTAT;
unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
unsigned struct_crypt_data_sz = sizeof(struct crypt_data);
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
#endif // SANITIZER_GLIBC
#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned struct_timex_sz = sizeof(struct timex);
@ -280,7 +322,7 @@ namespace __sanitizer {
int shmctl_shm_stat = (int)SHM_STAT;
#endif
#if !SANITIZER_MAC && !SANITIZER_FREEBSD
#if !SANITIZER_APPLE && !SANITIZER_FREEBSD
unsigned struct_utmp_sz = sizeof(struct utmp);
#endif
#if !SANITIZER_ANDROID
@ -312,10 +354,14 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
int glob_altdirfunc = GLOB_ALTDIRFUNC;
#endif
# if !SANITIZER_ANDROID
const int wordexp_wrde_dooffs = WRDE_DOOFFS;
# endif // !SANITIZER_ANDROID
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
defined(__s390__) || SANITIZER_RISCV64)
defined(__s390__) || defined(__loongarch__)|| SANITIZER_RISCV64)
#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__)
unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
@ -325,21 +371,24 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
#elif defined(__aarch64__)
unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state);
#elif defined(__loongarch__)
unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fp_state);
#elif defined(__s390__)
unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct);
#else
unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
#endif // __mips64 || __powerpc64__ || __aarch64__
#endif // __mips64 || __powerpc64__ || __aarch64__ || __loongarch__
#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \
defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \
SANITIZER_RISCV64
defined(__loongarch__) || SANITIZER_RISCV64
unsigned struct_user_fpxregs_struct_sz = 0;
#else
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
#endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__
// || __s390__
// || __s390__ || __loongarch__
#ifdef __arm__
unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE;
#else
@ -484,7 +533,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
#endif // SANITIZER_GLIBC
#if !SANITIZER_ANDROID && !SANITIZER_MAC
#if !SANITIZER_ANDROID && !SANITIZER_APPLE
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
#endif
@ -570,6 +619,14 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned IOCTL_BLKROGET = BLKROGET;
unsigned IOCTL_BLKROSET = BLKROSET;
unsigned IOCTL_BLKRRPART = BLKRRPART;
unsigned IOCTL_BLKFRASET = BLKFRASET;
unsigned IOCTL_BLKFRAGET = BLKFRAGET;
unsigned IOCTL_BLKSECTSET = BLKSECTSET;
unsigned IOCTL_BLKSECTGET = BLKSECTGET;
unsigned IOCTL_BLKSSZGET = BLKSSZGET;
unsigned IOCTL_BLKBSZGET = BLKBSZGET;
unsigned IOCTL_BLKBSZSET = BLKBSZSET;
unsigned IOCTL_BLKGETSIZE64 = BLKGETSIZE64;
unsigned IOCTL_CDROMAUDIOBUFSIZ = CDROMAUDIOBUFSIZ;
unsigned IOCTL_CDROMEJECT = CDROMEJECT;
unsigned IOCTL_CDROMEJECT_SW = CDROMEJECT_SW;
@ -837,10 +894,10 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned IOCTL_EVIOCGPROP = IOCTL_NOT_PRESENT;
unsigned IOCTL_EVIOCSKEYCODE_V2 = IOCTL_NOT_PRESENT;
#endif
unsigned IOCTL_FS_IOC_GETFLAGS = FS_IOC_GETFLAGS;
unsigned IOCTL_FS_IOC_GETVERSION = FS_IOC_GETVERSION;
unsigned IOCTL_FS_IOC_SETFLAGS = FS_IOC_SETFLAGS;
unsigned IOCTL_FS_IOC_SETVERSION = FS_IOC_SETVERSION;
unsigned IOCTL_FS_IOC_GETFLAGS = _IOR('f', 1, long);
unsigned IOCTL_FS_IOC_GETVERSION = _IOR('v', 1, long);
unsigned IOCTL_FS_IOC_SETFLAGS = _IOW('f', 2, long);
unsigned IOCTL_FS_IOC_SETVERSION = _IOW('v', 2, long);
unsigned IOCTL_GIO_CMAP = GIO_CMAP;
unsigned IOCTL_GIO_FONT = GIO_FONT;
unsigned IOCTL_GIO_UNIMAP = GIO_UNIMAP;
@ -1035,7 +1092,7 @@ CHECK_SIZE_AND_OFFSET(mmsghdr, msg_len);
COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
CHECK_SIZE_AND_OFFSET(dirent, d_ino);
#if SANITIZER_MAC
#if SANITIZER_APPLE
CHECK_SIZE_AND_OFFSET(dirent, d_seekoff);
#elif SANITIZER_FREEBSD
// There is no 'd_off' field on FreeBSD.
@ -1044,7 +1101,7 @@ CHECK_SIZE_AND_OFFSET(dirent, d_off);
#endif
CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
#if SANITIZER_LINUX && !SANITIZER_ANDROID
#if SANITIZER_GLIBC
COMPILER_CHECK(sizeof(__sanitizer_dirent64) <= sizeof(dirent64));
CHECK_SIZE_AND_OFFSET(dirent64, d_ino);
CHECK_SIZE_AND_OFFSET(dirent64, d_off);
@ -1077,6 +1134,15 @@ CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);
CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer);
#endif
#if SANITIZER_HAS_SIGINFO
COMPILER_CHECK(alignof(siginfo_t) == alignof(__sanitizer_siginfo));
using __sanitizer_siginfo_t = __sanitizer_siginfo;
CHECK_TYPE_SIZE(siginfo_t);
CHECK_SIZE_AND_OFFSET(siginfo_t, si_signo);
CHECK_SIZE_AND_OFFSET(siginfo_t, si_errno);
CHECK_SIZE_AND_OFFSET(siginfo_t, si_code);
#endif
#if SANITIZER_LINUX
CHECK_TYPE_SIZE(__sysctl_args);
CHECK_SIZE_AND_OFFSET(__sysctl_args, name);
@ -1217,7 +1283,7 @@ CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
#endif
#if SANITIZER_MAC
#if SANITIZER_APPLE
CHECK_SIZE_AND_OFFSET(passwd, pw_change);
CHECK_SIZE_AND_OFFSET(passwd, pw_expire);
CHECK_SIZE_AND_OFFSET(passwd, pw_class);
@ -1230,7 +1296,7 @@ CHECK_SIZE_AND_OFFSET(group, gr_passwd);
CHECK_SIZE_AND_OFFSET(group, gr_gid);
CHECK_SIZE_AND_OFFSET(group, gr_mem);
#if HAVE_RPC_XDR_H
#if HAVE_RPC_XDR_H && !SANITIZER_APPLE
CHECK_TYPE_SIZE(XDR);
CHECK_SIZE_AND_OFFSET(XDR, x_op);
CHECK_SIZE_AND_OFFSET(XDR, x_ops);
@ -1285,4 +1351,4 @@ CHECK_TYPE_SIZE(sem_t);
COMPILER_CHECK(ARM_VFPREGS_SIZE == ARM_VFPREGS_SIZE_ASAN);
#endif
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_APPLE

View File

@ -14,10 +14,25 @@
#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H
#define SANITIZER_PLATFORM_LIMITS_POSIX_H
#if SANITIZER_LINUX || SANITIZER_MAC
#if SANITIZER_LINUX || SANITIZER_APPLE
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
#include "sanitizer_mallinfo.h"
#if SANITIZER_APPLE
#include <sys/cdefs.h>
#if !__DARWIN_ONLY_64_BIT_INO_T
#define SANITIZER_HAS_STAT64 1
#define SANITIZER_HAS_STATFS64 1
#else
#define SANITIZER_HAS_STAT64 0
#define SANITIZER_HAS_STATFS64 0
#endif
#elif SANITIZER_GLIBC || SANITIZER_ANDROID
#define SANITIZER_HAS_STAT64 1
#define SANITIZER_HAS_STATFS64 1
#endif
#if defined(__sparc__)
// FIXME: This can't be included from tsan which does not support sparc yet.
@ -29,7 +44,7 @@
namespace __sanitizer {
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
#if !SANITIZER_IOS
#if SANITIZER_HAS_STAT64
extern unsigned struct_stat64_sz;
#endif
extern unsigned struct_rusage_sz;
@ -49,7 +64,9 @@ extern unsigned struct_itimerspec_sz;
extern unsigned struct_sigevent_sz;
extern unsigned struct_stack_t_sz;
extern unsigned struct_sched_param_sz;
#if SANITIZER_HAS_STATFS64
extern unsigned struct_statfs64_sz;
#endif
extern unsigned struct_regex_sz;
extern unsigned struct_regmatch_sz;
@ -57,12 +74,12 @@ extern unsigned struct_regmatch_sz;
extern unsigned struct_fstab_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
extern unsigned ucontext_t_sz;
#endif // !SANITIZER_ANDROID
unsigned ucontext_t_sz(void *uctx);
# endif // !SANITIZER_ANDROID
#if SANITIZER_LINUX
# if SANITIZER_LINUX
#if defined(__x86_64__)
# if defined(__x86_64__)
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 0;
#elif defined(__i386__)
@ -81,9 +98,10 @@ const unsigned struct_kernel_stat64_sz = 104;
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__mips__)
const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
? FIRST_32_SECOND_64(104, 128)
: FIRST_32_SECOND_64(160, 216);
const unsigned struct_kernel_stat_sz =
SANITIZER_ANDROID
? FIRST_32_SECOND_64(104, 128)
: FIRST_32_SECOND_64((_MIPS_SIM == _ABIN32) ? 176 : 160, 216);
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__s390__) && !defined(__s390x__)
const unsigned struct_kernel_stat_sz = 64;
@ -102,7 +120,13 @@ const unsigned struct_kernel_stat64_sz = 104;
#elif SANITIZER_RISCV64
const unsigned struct_kernel_stat_sz = 128;
const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64
#endif
# elif defined(__hexagon__)
const unsigned struct_kernel_stat_sz = 128;
const unsigned struct_kernel_stat64_sz = 0;
# elif defined(__loongarch__)
const unsigned struct_kernel_stat_sz = 128;
const unsigned struct_kernel_stat64_sz = 0;
# endif
struct __sanitizer_perf_event_attr {
unsigned type;
unsigned size;
@ -112,7 +136,7 @@ struct __sanitizer_perf_event_attr {
extern unsigned struct_epoll_event_sz;
extern unsigned struct_sysinfo_sz;
extern unsigned __user_cap_header_struct_sz;
extern unsigned __user_cap_data_struct_sz;
extern unsigned __user_cap_data_struct_sz(void *hdrp);
extern unsigned struct_new_utsname_sz;
extern unsigned struct_old_utsname_sz;
extern unsigned struct_oldold_utsname_sz;
@ -122,7 +146,7 @@ const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
#if SANITIZER_LINUX
#if defined(__powerpc64__) || defined(__s390__)
#if defined(__powerpc64__) || defined(__s390__) || defined(__loongarch__)
const unsigned struct___old_kernel_stat_sz = 0;
#elif !defined(__sparc__)
const unsigned struct___old_kernel_stat_sz = 32;
@ -181,17 +205,7 @@ struct __sanitizer_sem_t {
};
#endif // SANITIZER_LINUX
#if SANITIZER_ANDROID
struct __sanitizer_struct_mallinfo {
uptr v[10];
};
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
struct __sanitizer_struct_mallinfo {
int v[10];
};
extern unsigned struct_ustat_sz;
extern unsigned struct_rlimit64_sz;
extern unsigned struct_statvfs64_sz;
@ -295,7 +309,6 @@ extern unsigned struct_msqid_ds_sz;
extern unsigned struct_mq_attr_sz;
extern unsigned struct_timex_sz;
extern unsigned struct_statvfs_sz;
extern unsigned struct_crypt_data_sz;
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
struct __sanitizer_iovec {
@ -319,7 +332,7 @@ struct __sanitizer_ifaddrs {
};
#endif // !SANITIZER_ANDROID
#if SANITIZER_MAC
#if SANITIZER_APPLE
typedef unsigned long __sanitizer_pthread_key_t;
#else
typedef unsigned __sanitizer_pthread_key_t;
@ -346,7 +359,7 @@ struct __sanitizer_passwd {
char *pw_passwd;
int pw_uid;
int pw_gid;
#if SANITIZER_MAC
#if SANITIZER_APPLE
long pw_change;
char *pw_class;
#endif
@ -355,7 +368,7 @@ struct __sanitizer_passwd {
#endif
char *pw_dir;
char *pw_shell;
#if SANITIZER_MAC
#if SANITIZER_APPLE
long pw_expire;
#endif
};
@ -367,7 +380,8 @@ struct __sanitizer_group {
char **gr_mem;
};
#if defined(__x86_64__) && !defined(_LP64)
# if (SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID) || \
(defined(__x86_64__) && !defined(_LP64)) || defined(__hexagon__)
typedef long long __sanitizer_time_t;
#else
typedef long __sanitizer_time_t;
@ -427,7 +441,7 @@ struct __sanitizer_file_handle {
};
#endif
#if SANITIZER_MAC
#if SANITIZER_APPLE
struct __sanitizer_msghdr {
void *msg_name;
unsigned msg_namelen;
@ -468,30 +482,31 @@ struct __sanitizer_mmsghdr {
};
#endif
#if SANITIZER_MAC
#if SANITIZER_APPLE
struct __sanitizer_dirent {
unsigned long long d_ino;
unsigned long long d_seekoff;
unsigned short d_reclen;
// more fields that we don't care about
};
#elif SANITIZER_ANDROID || defined(__x86_64__)
# elif (SANITIZER_LINUX && !SANITIZER_GLIBC) || defined(__x86_64__) || \
defined(__hexagon__)
struct __sanitizer_dirent {
unsigned long long d_ino;
unsigned long long d_off;
unsigned short d_reclen;
// more fields that we don't care about
};
#else
# else
struct __sanitizer_dirent {
uptr d_ino;
uptr d_off;
unsigned short d_reclen;
// more fields that we don't care about
};
#endif
# endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# if SANITIZER_GLIBC
struct __sanitizer_dirent64 {
unsigned long long d_ino;
unsigned long long d_off;
@ -511,8 +526,8 @@ typedef int __sanitizer_clockid_t;
#endif
#if SANITIZER_LINUX
#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
defined(__mips__)
# if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
defined(__mips__) || defined(__hexagon__)
typedef unsigned __sanitizer___kernel_uid_t;
typedef unsigned __sanitizer___kernel_gid_t;
#else
@ -552,7 +567,7 @@ typedef unsigned long __sanitizer_sigset_t[16 / sizeof(unsigned long)];
# else
typedef unsigned long __sanitizer_sigset_t;
# endif
#elif SANITIZER_MAC
#elif SANITIZER_APPLE
typedef unsigned __sanitizer_sigset_t;
#elif SANITIZER_LINUX
struct __sanitizer_sigset_t {
@ -561,11 +576,36 @@ struct __sanitizer_sigset_t {
};
#endif
struct __sanitizer_siginfo {
// The size is determined by looking at sizeof of real siginfo_t on linux.
u64 opaque[128 / sizeof(u64)];
struct __sanitizer_siginfo_pad {
#if SANITIZER_X32
// x32 siginfo_t is aligned to 8 bytes.
u64 pad[128 / sizeof(u64)];
#else
// Require uptr, because siginfo_t is always pointer-size aligned on Linux.
uptr pad[128 / sizeof(uptr)];
#endif
};
#if SANITIZER_LINUX
# define SANITIZER_HAS_SIGINFO 1
union __sanitizer_siginfo {
struct {
int si_signo;
# if SANITIZER_MIPS
int si_code;
int si_errno;
# else
int si_errno;
int si_code;
# endif
};
__sanitizer_siginfo_pad pad;
};
#else
# define SANITIZER_HAS_SIGINFO 0
typedef __sanitizer_siginfo_pad __sanitizer_siginfo;
#endif
using __sanitizer_sighandler_ptr = void (*)(int sig);
using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
__sanitizer_siginfo *siginfo,
@ -712,12 +752,19 @@ struct __sanitizer_protoent {
int p_proto;
};
struct __sanitizer_netent {
char *n_name;
char **n_aliases;
int n_addrtype;
u32 n_net;
};
struct __sanitizer_addrinfo {
int ai_flags;
int ai_family;
int ai_socktype;
int ai_protocol;
#if SANITIZER_ANDROID || SANITIZER_MAC
#if SANITIZER_ANDROID || SANITIZER_APPLE
unsigned ai_addrlen;
char *ai_canonname;
void *ai_addr;
@ -743,7 +790,7 @@ struct __sanitizer_pollfd {
short revents;
};
#if SANITIZER_ANDROID || SANITIZER_MAC
#if SANITIZER_ANDROID || SANITIZER_APPLE
typedef unsigned __sanitizer_nfds_t;
#else
typedef unsigned long __sanitizer_nfds_t;
@ -773,6 +820,10 @@ extern int glob_altdirfunc;
extern unsigned path_max;
# if !SANITIZER_ANDROID
extern const int wordexp_wrde_dooffs;
# endif // !SANITIZER_ANDROID
struct __sanitizer_wordexp_t {
uptr we_wordc;
char **we_wordv;
@ -806,7 +857,7 @@ typedef void __sanitizer_FILE;
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
defined(__s390__) || SANITIZER_RISCV64)
defined(__s390__) || defined(__loongarch__) || SANITIZER_RISCV64)
extern unsigned struct_user_regs_struct_sz;
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;
@ -839,7 +890,7 @@ extern int shmctl_shm_info;
extern int shmctl_shm_stat;
#endif
#if !SANITIZER_MAC && !SANITIZER_FREEBSD
#if !SANITIZER_APPLE && !SANITIZER_FREEBSD
extern unsigned struct_utmp_sz;
#endif
#if !SANITIZER_ANDROID
@ -854,7 +905,7 @@ struct __sanitizer_ifconf {
union {
void *ifcu_req;
} ifc_ifcu;
#if SANITIZER_MAC
#if SANITIZER_APPLE
} __attribute__((packed));
#else
};
@ -1007,7 +1058,7 @@ extern unsigned struct_audio_buf_info_sz;
extern unsigned struct_ppp_stats_sz;
#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
#if !SANITIZER_ANDROID && !SANITIZER_MAC
#if !SANITIZER_ANDROID && !SANITIZER_APPLE
extern unsigned struct_sioc_sg_req_sz;
extern unsigned struct_sioc_vif_req_sz;
#endif
@ -1094,6 +1145,14 @@ extern unsigned IOCTL_BLKRASET;
extern unsigned IOCTL_BLKROGET;
extern unsigned IOCTL_BLKROSET;
extern unsigned IOCTL_BLKRRPART;
extern unsigned IOCTL_BLKFRASET;
extern unsigned IOCTL_BLKFRAGET;
extern unsigned IOCTL_BLKSECTSET;
extern unsigned IOCTL_BLKSECTGET;
extern unsigned IOCTL_BLKSSZGET;
extern unsigned IOCTL_BLKBSZGET;
extern unsigned IOCTL_BLKBSZSET;
extern unsigned IOCTL_BLKGETSIZE64;
extern unsigned IOCTL_CDROMAUDIOBUFSIZ;
extern unsigned IOCTL_CDROMEJECT;
extern unsigned IOCTL_CDROMEJECT_SW;
@ -1440,6 +1499,6 @@ extern const int si_SEGV_ACCERR;
#define SIGACTION_SYMNAME sigaction
#endif // SANITIZER_LINUX || SANITIZER_MAC
#endif // SANITIZER_LINUX || SANITIZER_APPLE
#endif

View File

@ -89,7 +89,7 @@ namespace __sanitizer {
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
unsigned ucontext_t_sz = sizeof(ucontext_t);
unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
unsigned struct_timespec_sz = sizeof(struct timespec);
#if SANITIZER_SOLARIS32
unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
@ -123,6 +123,7 @@ namespace __sanitizer {
unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));
int glob_nomatch = GLOB_NOMATCH;
const int wordexp_wrde_dooffs = WRDE_DOOFFS;
unsigned path_max = PATH_MAX;

View File

@ -43,7 +43,7 @@ extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs64_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
extern unsigned ucontext_t_sz;
unsigned ucontext_t_sz(void *ctx);
extern unsigned struct_timespec_sz;
extern unsigned struct_rlimit_sz;
@ -341,6 +341,7 @@ struct __sanitizer_glob_t {
extern int glob_nomatch;
extern int glob_altdirfunc;
extern const int wordexp_wrde_dooffs;
extern unsigned path_max;

View File

@ -41,6 +41,8 @@ uptr GetMmapGranularity() {
return GetPageSize();
}
bool ErrorIsOOM(error_t err) { return err == ENOMEM; }
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
size = RoundUpTo(size, GetPageSizeCached());
uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
@ -55,11 +57,9 @@ void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
void UnmapOrDie(void *addr, uptr size) {
if (!addr || !size) return;
uptr res = internal_munmap(addr, size);
if (UNLIKELY(internal_iserror(res))) {
Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
SanitizerToolName, size, size, addr);
CHECK("unable to unmap" && 0);
}
int reserrno;
if (UNLIKELY(internal_iserror(res, &reserrno)))
ReportMunmapFailureAndDie(addr, size, reserrno);
DecreaseTotalMmap(size);
}
@ -85,18 +85,26 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
CHECK(IsPowerOfTwo(size));
CHECK(IsPowerOfTwo(alignment));
uptr map_size = size + alignment;
// mmap maps entire pages and rounds up map_size needs to be a an integral
// number of pages.
// We need to be aware of this size for calculating end and for unmapping
// fragments before and after the alignment region.
map_size = RoundUpTo(map_size, GetPageSizeCached());
uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
if (UNLIKELY(!map_res))
return nullptr;
uptr map_end = map_res + map_size;
uptr res = map_res;
if (!IsAligned(res, alignment)) {
res = (map_res + alignment - 1) & ~(alignment - 1);
UnmapOrDie((void*)map_res, res - map_res);
}
uptr map_end = map_res + map_size;
uptr end = res + size;
if (end != map_end)
end = RoundUpTo(end, GetPageSizeCached());
if (end != map_end) {
CHECK_LT(end, map_end);
UnmapOrDie((void*)end, map_end - end);
}
return (void*)res;
}
@ -146,7 +154,11 @@ bool MprotectReadOnly(uptr addr, uptr size) {
return 0 == internal_mprotect((void *)addr, size, PROT_READ);
}
#if !SANITIZER_MAC
bool MprotectReadWrite(uptr addr, uptr size) {
return 0 == internal_mprotect((void *)addr, size, PROT_READ | PROT_WRITE);
}
#if !SANITIZER_APPLE
void MprotectMallocZones(void *addr, int prot) {}
#endif
@ -239,7 +251,7 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
return true;
}
#if !SANITIZER_MAC
#if !SANITIZER_APPLE
void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
const sptr kBufSize = 4095;

View File

@ -20,10 +20,7 @@
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_platform_limits_solaris.h"
#if !SANITIZER_POSIX
// Make it hard to accidentally use any of functions declared in this file:
#error This file should only be included on POSIX
#endif
#if SANITIZER_POSIX
namespace __sanitizer {
@ -93,7 +90,7 @@ int real_pthread_join(void *th, void **ret);
} \
} // namespace __sanitizer
int my_pthread_attr_getstack(void *attr, void **addr, uptr *size);
int internal_pthread_attr_getstack(void *attr, void **addr, uptr *size);
// A routine named real_sigaction() must be implemented by each sanitizer in
// order for internal_sigaction() to bypass interceptors.
@ -123,7 +120,12 @@ int GetNamedMappingFd(const char *name, uptr size, int *flags);
// alive at least as long as the mapping exists.
void DecorateMapping(uptr addr, uptr size, const char *name);
# if !SANITIZER_FREEBSD
# define __sanitizer_dirsiz(dp) ((dp)->d_reclen)
# endif
} // namespace __sanitizer
#endif // SANITIZER_POSIX
#endif // SANITIZER_POSIX_H

View File

@ -151,6 +151,8 @@ int Atexit(void (*function)(void)) {
#endif
}
bool CreateDir(const char *pathname) { return mkdir(pathname, 0755) == 0; }
bool SupportsColoredOutput(fd_t fd) {
return isatty(fd) != 0;
}
@ -288,7 +290,7 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) {
return result;
}
void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
void PlatformPrepareForSandboxing(void *args) {
// Some kinds of sandboxes may forbid filesystem access, so we won't be able
// to read the file mappings from /proc/self/maps. Luckily, neither the
// process will be able to load additional libraries, so it's fine to use the
@ -381,8 +383,8 @@ SANITIZER_WEAK_ATTRIBUTE int
real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
} // extern "C"
int my_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
#if !SANITIZER_GO && !SANITIZER_MAC
int internal_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
#if !SANITIZER_GO && !SANITIZER_APPLE
if (&real_pthread_attr_getstack)
return real_pthread_attr_getstack((pthread_attr_t *)attr, addr,
(size_t *)size);
@ -395,7 +397,7 @@ void AdjustStackSize(void *attr_) {
pthread_attr_t *attr = (pthread_attr_t *)attr_;
uptr stackaddr = 0;
uptr stacksize = 0;
my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
internal_pthread_attr_getstack(attr, (void **)&stackaddr, &stacksize);
// GLibC will return (0 - stacksize) as the stack address in the case when
// stacksize is set, but stackaddr is not.
bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);

View File

@ -20,10 +20,6 @@
#include <stdio.h>
#include <stdarg.h>
#if defined(__x86_64__)
# include <emmintrin.h>
#endif
#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 && \
!defined(va_copy)
# define va_copy(dst, src) ((dst) = (src))
@ -132,8 +128,8 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int VSNPrintf(char *buff, int buff_length,
const char *format, va_list args) {
static const char *kPrintfFormatsHelp =
"Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X,V}; %p; "
"%[-]([0-9]*)?(\\.\\*)?s; %c\n";
"Supported Printf formats: %([0-9]*)?(z|l|ll)?{d,u,x,X}; %p; "
"%[-]([0-9]*)?(\\.\\*)?s; %c\nProvided format: ";
RAW_CHECK(format);
RAW_CHECK(buff_length > 0);
const char *buff_end = &buff[buff_length - 1];
@ -164,9 +160,11 @@ int VSNPrintf(char *buff, int buff_length,
}
bool have_z = (*cur == 'z');
cur += have_z;
bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
bool have_l = cur[0] == 'l' && cur[1] != 'l';
cur += have_l;
bool have_ll = cur[0] == 'l' && cur[1] == 'l';
cur += have_ll * 2;
const bool have_length = have_z || have_ll;
const bool have_length = have_z || have_l || have_ll;
const bool have_flags = have_width || have_length;
// At the moment only %s supports precision and left-justification.
CHECK(!((precision >= 0 || left_justified) && *cur != 's'));
@ -174,6 +172,7 @@ int VSNPrintf(char *buff, int buff_length,
case 'd': {
s64 dval = have_ll ? va_arg(args, s64)
: have_z ? va_arg(args, sptr)
: have_l ? va_arg(args, long)
: va_arg(args, int);
result += AppendSignedDecimal(&buff, buff_end, dval, width,
pad_with_zero);
@ -184,26 +183,20 @@ int VSNPrintf(char *buff, int buff_length,
case 'X': {
u64 uval = have_ll ? va_arg(args, u64)
: have_z ? va_arg(args, uptr)
: have_l ? va_arg(args, unsigned long)
: va_arg(args, unsigned);
bool uppercase = (*cur == 'X');
result += AppendUnsigned(&buff, buff_end, uval, (*cur == 'u') ? 10 : 16,
width, pad_with_zero, uppercase);
break;
}
case 'V': {
for (uptr i = 0; i < 16; i++) {
unsigned x = va_arg(args, unsigned);
result += AppendUnsigned(&buff, buff_end, x, 16, 2, true, false);
}
break;
}
case 'p': {
RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format);
result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
break;
}
case 's': {
RAW_CHECK_MSG(!have_length, kPrintfFormatsHelp);
RAW_CHECK_VA(!have_length, kPrintfFormatsHelp, format);
// Only left-justified width is supported.
CHECK(!have_width || left_justified);
result += AppendString(&buff, buff_end, left_justified ? -width : width,
@ -211,17 +204,17 @@ int VSNPrintf(char *buff, int buff_length,
break;
}
case 'c': {
RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format);
result += AppendChar(&buff, buff_end, va_arg(args, int));
break;
}
case '%' : {
RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format);
result += AppendChar(&buff, buff_end, '%');
break;
}
default: {
RAW_CHECK_MSG(false, kPrintfFormatsHelp);
RAW_CHECK_VA(false, kPrintfFormatsHelp, format);
}
}
}
@ -317,7 +310,6 @@ static void NOINLINE SharedPrintfCode(bool append_pid, const char *format,
format, args);
}
FORMAT(1, 2)
void Printf(const char *format, ...) {
va_list args;
va_start(args, format);
@ -326,7 +318,6 @@ void Printf(const char *format, ...) {
}
// Like Printf, but prints the current PID before the output string.
FORMAT(1, 2)
void Report(const char *format, ...) {
va_list args;
va_start(args, format);
@ -338,7 +329,6 @@ void Report(const char *format, ...) {
// Returns the number of symbols that should have been written to buffer
// (not including trailing '\0'). Thus, the string is truncated
// iff return value is not less than "length".
FORMAT(3, 4)
int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
va_list args;
va_start(args, format);
@ -347,7 +337,6 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
return needed_length;
}
FORMAT(2, 3)
void InternalScopedString::append(const char *format, ...) {
uptr prev_len = length();

View File

@ -16,7 +16,7 @@
#include "sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
SANITIZER_MAC || SANITIZER_SOLARIS || \
SANITIZER_APPLE || SANITIZER_SOLARIS || \
SANITIZER_FUCHSIA
#include "sanitizer_common.h"
@ -65,13 +65,37 @@ class MemoryMappedSegment {
MemoryMappedSegmentData *data_;
};
class MemoryMappingLayout {
struct ImageHeader;
class MemoryMappingLayoutBase {
public:
virtual bool Next(MemoryMappedSegment *segment) { UNIMPLEMENTED(); }
virtual bool Error() const { UNIMPLEMENTED(); };
virtual void Reset() { UNIMPLEMENTED(); }
protected:
~MemoryMappingLayoutBase() {}
};
class MemoryMappingLayout : public MemoryMappingLayoutBase {
public:
explicit MemoryMappingLayout(bool cache_enabled);
// This destructor cannot be virtual, as it would cause an operator new() linking
// failures in hwasan test cases. However non-virtual destructors emit warnings
// in macOS build, hence disabling those
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
#endif
~MemoryMappingLayout();
bool Next(MemoryMappedSegment *segment);
bool Error() const;
void Reset();
#ifdef __clang__
#pragma clang diagnostic pop
#endif
virtual bool Next(MemoryMappedSegment *segment) override;
virtual bool Error() const override;
virtual void Reset() override;
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
// instead of aborting.
@ -80,10 +104,14 @@ class MemoryMappingLayout {
// Adds all mapped objects into a vector.
void DumpListOfModules(InternalMmapVectorNoCtor<LoadedModule> *modules);
protected:
#if SANITIZER_APPLE
virtual const ImageHeader *CurrentImageHeader();
#endif
MemoryMappingLayoutData data_;
private:
void LoadFromCache();
MemoryMappingLayoutData data_;
};
// Returns code range for the specified module.

View File

@ -39,6 +39,22 @@
namespace __sanitizer {
#if SANITIZER_FREEBSD
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {
const int Mib[] = {
CTL_KERN,
KERN_PROC,
KERN_PROC_PID,
getpid()
};
struct kinfo_proc InfoProc;
uptr Len = sizeof(InfoProc);
CHECK_EQ(internal_sysctl(Mib, ARRAY_SIZE(Mib), nullptr, (uptr *)&InfoProc, &Len, 0), 0);
cb(0, InfoProc.ki_rssize * GetPageSizeCached(), false, stats);
}
#endif
void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
const int Mib[] = {
#if SANITIZER_FREEBSD

View File

@ -145,29 +145,47 @@ void MemoryMappingLayout::DumpListOfModules(
}
}
void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
#if SANITIZER_LINUX || SANITIZER_ANDROID || SANITIZER_SOLARIS || SANITIZER_NETBSD
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {
char *smaps = nullptr;
uptr smaps_cap = 0;
uptr smaps_len = 0;
if (!ReadFileToBuffer("/proc/self/smaps", &smaps, &smaps_cap, &smaps_len))
return;
ParseUnixMemoryProfile(cb, stats, smaps, smaps_len);
UnmapOrDie(smaps, smaps_cap);
}
void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
uptr smaps_len) {
uptr start = 0;
bool file = false;
const char *pos = smaps;
while (pos < smaps + smaps_len) {
char *end = smaps + smaps_len;
if (smaps_len < 2)
return;
// The following parsing can crash on almost every line
// in the case of malformed/truncated input.
// Fixing that is hard b/c e.g. ParseDecimal does not
// even accept end of the buffer and assumes well-formed input.
// So instead we patch end of the input a bit,
// it does not affect well-formed complete inputs.
*--end = 0;
*--end = '\n';
while (pos < end) {
if (IsHex(pos[0])) {
start = ParseHex(&pos);
for (; *pos != '/' && *pos > '\n'; pos++) {}
file = *pos == '/';
} else if (internal_strncmp(pos, "Rss:", 4) == 0) {
while (!IsDecimal(*pos)) pos++;
while (pos < end && !IsDecimal(*pos)) pos++;
uptr rss = ParseDecimal(&pos) * 1024;
cb(start, rss, file, stats, stats_size);
cb(start, rss, file, stats);
}
while (*pos++ != '\n') {}
}
UnmapOrDie(smaps, smaps_cap);
}
#endif
} // namespace __sanitizer

View File

@ -10,7 +10,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_MAC
#if SANITIZER_APPLE
#include "sanitizer_common.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
@ -136,29 +136,34 @@ void MemoryMappingLayout::LoadFromCache() {
// No-op on Mac for now.
}
static bool IsDyldHdr(const mach_header *hdr) {
return (hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
hdr->filetype == MH_DYLINKER;
}
// _dyld_get_image_header() and related APIs don't report dyld itself.
// We work around this by manually recursing through the memory map
// until we hit a Mach header matching dyld instead. These recurse
// calls are expensive, but the first memory map generation occurs
// early in the process, when dyld is one of the only images loaded,
// so it will be hit after only a few iterations.
static mach_header *get_dyld_image_header() {
unsigned depth = 1;
vm_size_t size = 0;
// so it will be hit after only a few iterations. These assumptions don't hold
// on macOS 13+ anymore (dyld itself has moved into the shared cache).
static mach_header *GetDyldImageHeaderViaVMRegion() {
vm_address_t address = 0;
kern_return_t err = KERN_SUCCESS;
mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
while (true) {
vm_size_t size = 0;
unsigned depth = 1;
struct vm_region_submap_info_64 info;
err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
(vm_region_info_t)&info, &count);
mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
kern_return_t err =
vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
(vm_region_info_t)&info, &count);
if (err != KERN_SUCCESS) return nullptr;
if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {
mach_header *hdr = (mach_header *)address;
if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
hdr->filetype == MH_DYLINKER) {
if (IsDyldHdr(hdr)) {
return hdr;
}
}
@ -166,8 +171,69 @@ static mach_header *get_dyld_image_header() {
}
}
extern "C" {
struct dyld_shared_cache_dylib_text_info {
uint64_t version; // current version 2
// following fields all exist in version 1
uint64_t loadAddressUnslid;
uint64_t textSegmentSize;
uuid_t dylibUuid;
const char *path; // pointer invalid at end of iterations
// following fields all exist in version 2
uint64_t textSegmentOffset; // offset from start of cache
};
typedef struct dyld_shared_cache_dylib_text_info
dyld_shared_cache_dylib_text_info;
extern bool _dyld_get_shared_cache_uuid(uuid_t uuid);
extern const void *_dyld_get_shared_cache_range(size_t *length);
extern int dyld_shared_cache_iterate_text(
const uuid_t cacheUuid,
void (^callback)(const dyld_shared_cache_dylib_text_info *info));
} // extern "C"
static mach_header *GetDyldImageHeaderViaSharedCache() {
uuid_t uuid;
bool hasCache = _dyld_get_shared_cache_uuid(uuid);
if (!hasCache)
return nullptr;
size_t cacheLength;
__block uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);
CHECK(cacheStart && cacheLength);
__block mach_header *dyldHdr = nullptr;
int res = dyld_shared_cache_iterate_text(
uuid, ^(const dyld_shared_cache_dylib_text_info *info) {
CHECK_GE(info->version, 2);
mach_header *hdr =
(mach_header *)(cacheStart + info->textSegmentOffset);
if (IsDyldHdr(hdr))
dyldHdr = hdr;
});
CHECK_EQ(res, 0);
return dyldHdr;
}
const mach_header *get_dyld_hdr() {
if (!dyld_hdr) dyld_hdr = get_dyld_image_header();
if (!dyld_hdr) {
// On macOS 13+, dyld itself has moved into the shared cache. Looking it up
// via vm_region_recurse_64() causes spins/hangs/crashes.
if (GetMacosAlignedVersion() >= MacosVersion(13, 0)) {
dyld_hdr = GetDyldImageHeaderViaSharedCache();
if (!dyld_hdr) {
VReport(1,
"Failed to lookup the dyld image header in the shared cache on "
"macOS 13+ (or no shared cache in use). Falling back to "
"lookup via vm_region_recurse_64().\n");
dyld_hdr = GetDyldImageHeaderViaVMRegion();
}
} else {
dyld_hdr = GetDyldImageHeaderViaVMRegion();
}
CHECK(dyld_hdr);
}
return dyld_hdr;
}
@ -184,7 +250,9 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
MemoryMappedSegmentData *seg_data,
MemoryMappingLayoutData *layout_data) {
const char *lc = layout_data->current_load_cmd_addr;
layout_data->current_load_cmd_addr += ((const load_command *)lc)->cmdsize;
layout_data->current_load_cmd_count--;
if (((const load_command *)lc)->cmd == kLCSegment) {
const SegmentCommand* sc = (const SegmentCommand *)lc;
uptr base_virt_addr, addr_mask;
@ -292,11 +360,16 @@ static bool IsModuleInstrumented(const load_command *first_lc) {
return false;
}
const ImageHeader *MemoryMappingLayout::CurrentImageHeader() {
const mach_header *hdr = (data_.current_image == kDyldImageIdx)
? get_dyld_hdr()
: _dyld_get_image_header(data_.current_image);
return (const ImageHeader *)hdr;
}
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
for (; data_.current_image >= kDyldImageIdx; data_.current_image--) {
const mach_header *hdr = (data_.current_image == kDyldImageIdx)
? get_dyld_hdr()
: _dyld_get_image_header(data_.current_image);
const mach_header *hdr = (const mach_header *)CurrentImageHeader();
if (!hdr) continue;
if (data_.current_load_cmd_count < 0) {
// Set up for this image;
@ -326,7 +399,7 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
(const load_command *)data_.current_load_cmd_addr);
}
for (; data_.current_load_cmd_count >= 0; data_.current_load_cmd_count--) {
while (data_.current_load_cmd_count > 0) {
switch (data_.current_magic) {
// data_.current_magic may be only one of MH_MAGIC, MH_MAGIC_64.
#ifdef MH_MAGIC_64
@ -347,6 +420,7 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
}
// If we get here, no more load_cmd's in this image talk about
// segments. Go on to the next image.
data_.current_load_cmd_count = -1; // This will trigger loading next image
}
return false;
}
@ -376,4 +450,4 @@ void MemoryMappingLayout::DumpListOfModules(
} // namespace __sanitizer
#endif // SANITIZER_MAC
#endif // SANITIZER_APPLE

Some files were not shown because too many files have changed in this diff Show More