Path: blob/main/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp
35236 views
//===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7///8/// \file9/// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and10/// FreeBSD-specific code.11///12//===----------------------------------------------------------------------===//1314#include "sanitizer_common/sanitizer_platform.h"15#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD1617# include <dlfcn.h>18# include <elf.h>19# include <errno.h>20# include <link.h>21# include <pthread.h>22# include <signal.h>23# include <stdio.h>24# include <stdlib.h>25# include <sys/prctl.h>26# include <sys/resource.h>27# include <sys/time.h>28# include <unistd.h>29# include <unwind.h>3031# include "hwasan.h"32# include "hwasan_dynamic_shadow.h"33# include "hwasan_interface_internal.h"34# include "hwasan_mapping.h"35# include "hwasan_report.h"36# include "hwasan_thread.h"37# include "hwasan_thread_list.h"38# include "sanitizer_common/sanitizer_common.h"39# include "sanitizer_common/sanitizer_procmaps.h"40# include "sanitizer_common/sanitizer_stackdepot.h"4142// Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.43//44// HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF45// Not currently tested.46// HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON47// Integration tests downstream exist.48// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF49// Tested with check-hwasan on x86_64-linux.50// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON51// Tested with check-hwasan on aarch64-linux-android.52# if !SANITIZER_ANDROID53SANITIZER_INTERFACE_ATTRIBUTE54THREADLOCAL uptr __hwasan_tls;55# endif5657namespace __hwasan {5859// With the zero shadow base we can not actually map pages starting from 0.60// This constant is somewhat arbitrary.61constexpr uptr kZeroBaseShadowStart = 0;62constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;6364static void ProtectGap(uptr addr, uptr size) {65__sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,66kZeroBaseMaxShadowStart);67}6869uptr kLowMemStart;70uptr kLowMemEnd;71uptr kHighMemStart;72uptr kHighMemEnd;7374static void PrintRange(uptr start, uptr end, const char *name) {75Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);76}7778static void PrintAddressSpaceLayout() {79PrintRange(kHighMemStart, kHighMemEnd, "HighMem");80if (kHighShadowEnd + 1 < kHighMemStart)81PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");82else83CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);84PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");85if (kLowShadowEnd + 1 < kHighShadowStart)86PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");87else88CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);89PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");90if (kLowMemEnd + 1 < kLowShadowStart)91PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");92else93CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);94PrintRange(kLowMemStart, kLowMemEnd, "LowMem");95CHECK_EQ(0, kLowMemStart);96}9798static uptr GetHighMemEnd() {99// HighMem covers the upper part of the address space.100uptr max_address = GetMaxUserVirtualAddress();101// Adjust max address to make sure that kHighMemEnd and kHighMemStart are102// properly aligned:103max_address |= (GetMmapGranularity() << kShadowScale) - 1;104return max_address;105}106107static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {108// FIXME: Android should init flags before shadow.109if (!SANITIZER_ANDROID && flags()->fixed_shadow_base != (uptr)-1) {110__hwasan_shadow_memory_dynamic_address = flags()->fixed_shadow_base;111uptr beg = __hwasan_shadow_memory_dynamic_address;112uptr end = beg + shadow_size_bytes;113if (!MemoryRangeIsAvailable(beg, end)) {114Report(115"FATAL: HWAddressSanitizer: Shadow range %p-%p is not available.\n",116(void *)beg, (void *)end);117DumpProcessMap();118CHECK(MemoryRangeIsAvailable(beg, end));119}120} else {121__hwasan_shadow_memory_dynamic_address =122FindDynamicShadowStart(shadow_size_bytes);123}124}125126static void MaybeDieIfNoTaggingAbi(const char *message) {127if (!flags()->fail_without_syscall_abi)128return;129Printf("FATAL: %s\n", message);130Die();131}132133# define PR_SET_TAGGED_ADDR_CTRL 55134# define PR_GET_TAGGED_ADDR_CTRL 56135# define PR_TAGGED_ADDR_ENABLE (1UL << 0)136# define ARCH_GET_UNTAG_MASK 0x4001137# define ARCH_ENABLE_TAGGED_ADDR 0x4002138# define ARCH_GET_MAX_TAG_BITS 0x4003139140static bool CanUseTaggingAbi() {141# if defined(__x86_64__)142unsigned long num_bits = 0;143// Check for x86 LAM support. This API is based on a currently unsubmitted144// patch to the Linux kernel (as of August 2022) and is thus subject to145// change. The patch is here:146// https://lore.kernel.org/all/[email protected]/147//148// arch_prctl(ARCH_GET_MAX_TAG_BITS, &bits) returns the maximum number of tag149// bits the user can request, or zero if LAM is not supported by the hardware.150if (internal_iserror(internal_arch_prctl(ARCH_GET_MAX_TAG_BITS,151reinterpret_cast<uptr>(&num_bits))))152return false;153// The platform must provide enough bits for HWASan tags.154if (num_bits < kTagBits)155return false;156return true;157# else158// Check for ARM TBI support.159return !internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));160# endif // __x86_64__161}162163static bool EnableTaggingAbi() {164# if defined(__x86_64__)165// Enable x86 LAM tagging for the process.166//167// arch_prctl(ARCH_ENABLE_TAGGED_ADDR, bits) enables tagging if the number of168// tag bits requested by the user does not exceed that provided by the system.169// arch_prctl(ARCH_GET_UNTAG_MASK, &mask) returns the mask of significant170// address bits. It is ~0ULL if either LAM is disabled for the process or LAM171// is not supported by the hardware.172if (internal_iserror(internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR, kTagBits)))173return false;174unsigned long mask = 0;175// Make sure the tag bits are where we expect them to be.176if (internal_iserror(internal_arch_prctl(ARCH_GET_UNTAG_MASK,177reinterpret_cast<uptr>(&mask))))178return false;179// @mask has ones for non-tag bits, whereas @kAddressTagMask has ones for tag180// bits. Therefore these masks must not overlap.181if (mask & kAddressTagMask)182return false;183return true;184# else185// Enable ARM TBI tagging for the process. If for some reason tagging is not186// supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns187// -EINVAL.188if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,189PR_TAGGED_ADDR_ENABLE, 0, 0, 0)))190return false;191// Ensure that TBI is enabled.192if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) !=193PR_TAGGED_ADDR_ENABLE)194return false;195return true;196# endif // __x86_64__197}198199void InitializeOsSupport() {200// Check we're running on a kernel that can use the tagged address ABI.201bool has_abi = CanUseTaggingAbi();202203if (!has_abi) {204# if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)205// Some older Android kernels have the tagged pointer ABI on206// unconditionally, and hence don't have the tagged-addr prctl while still207// allow the ABI.208// If targeting Android and the prctl is not around we assume this is the209// case.210return;211# else212MaybeDieIfNoTaggingAbi(213"HWAddressSanitizer requires a kernel with tagged address ABI.");214# endif215}216217if (EnableTaggingAbi())218return;219220# if SANITIZER_ANDROID221MaybeDieIfNoTaggingAbi(222"HWAddressSanitizer failed to enable tagged address syscall ABI.\n"223"Check the `sysctl abi.tagged_addr_disabled` configuration.");224# else225MaybeDieIfNoTaggingAbi(226"HWAddressSanitizer failed to enable tagged address syscall ABI.\n");227# endif228}229230bool InitShadow() {231// Define the entire memory range.232kHighMemEnd = GetHighMemEnd();233234// Determine shadow memory base offset.235InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));236237// Place the low memory first.238kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;239kLowMemStart = 0;240241// Define the low shadow based on the already placed low memory.242kLowShadowEnd = MemToShadow(kLowMemEnd);243kLowShadowStart = __hwasan_shadow_memory_dynamic_address;244245// High shadow takes whatever memory is left up there (making sure it is not246// interfering with low memory in the fixed case).247kHighShadowEnd = MemToShadow(kHighMemEnd);248kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;249250// High memory starts where allocated shadow allows.251kHighMemStart = ShadowToMem(kHighShadowStart);252253// Check the sanity of the defined memory ranges (there might be gaps).254CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);255CHECK_GT(kHighMemStart, kHighShadowEnd);256CHECK_GT(kHighShadowEnd, kHighShadowStart);257CHECK_GT(kHighShadowStart, kLowMemEnd);258CHECK_GT(kLowMemEnd, kLowMemStart);259CHECK_GT(kLowShadowEnd, kLowShadowStart);260CHECK_GT(kLowShadowStart, kLowMemEnd);261262// Reserve shadow memory.263ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");264ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");265266// Protect all the gaps.267ProtectGap(0, Min(kLowMemStart, kLowShadowStart));268if (kLowMemEnd + 1 < kLowShadowStart)269ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);270if (kLowShadowEnd + 1 < kHighShadowStart)271ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);272if (kHighShadowEnd + 1 < kHighMemStart)273ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);274275if (Verbosity())276PrintAddressSpaceLayout();277278return true;279}280281void InitThreads() {282CHECK(__hwasan_shadow_memory_dynamic_address);283uptr guard_page_size = GetMmapGranularity();284uptr thread_space_start =285__hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);286uptr thread_space_end =287__hwasan_shadow_memory_dynamic_address - guard_page_size;288ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,289"hwasan threads", /*madvise_shadow*/ false);290ProtectGap(thread_space_end,291__hwasan_shadow_memory_dynamic_address - thread_space_end);292InitThreadList(thread_space_start, thread_space_end - thread_space_start);293hwasanThreadList().CreateCurrentThread();294}295296bool MemIsApp(uptr p) {297// Memory outside the alias range has non-zero tags.298# if !defined(HWASAN_ALIASING_MODE)299CHECK_EQ(GetTagFromPointer(p), 0);300# endif301302return (p >= kHighMemStart && p <= kHighMemEnd) ||303(p >= kLowMemStart && p <= kLowMemEnd);304}305306void InstallAtExitHandler() { atexit(HwasanAtExit); }307308// ---------------------- TSD ---------------- {{{1309310# if HWASAN_WITH_INTERCEPTORS311static pthread_key_t tsd_key;312static bool tsd_key_inited = false;313314void HwasanTSDThreadInit() {315if (tsd_key_inited)316CHECK_EQ(0, pthread_setspecific(tsd_key,317(void *)GetPthreadDestructorIterations()));318}319320void HwasanTSDDtor(void *tsd) {321uptr iterations = (uptr)tsd;322if (iterations > 1) {323CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));324return;325}326__hwasan_thread_exit();327}328329void HwasanTSDInit() {330CHECK(!tsd_key_inited);331tsd_key_inited = true;332CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));333}334# else335void HwasanTSDInit() {}336void HwasanTSDThreadInit() {}337# endif338339# if SANITIZER_ANDROID340uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); }341# else342uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }343# endif344345# if SANITIZER_ANDROID346void AndroidTestTlsSlot() {347uptr kMagicValue = 0x010203040A0B0C0D;348uptr *tls_ptr = GetCurrentThreadLongPtr();349uptr old_value = *tls_ptr;350*tls_ptr = kMagicValue;351dlerror();352if (*(uptr *)get_android_tls_ptr() != kMagicValue) {353Printf(354"ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "355"for dlerror().\n");356Die();357}358*tls_ptr = old_value;359}360# else361void AndroidTestTlsSlot() {}362# endif363364static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {365// Access type is passed in a platform dependent way (see below) and encoded366// as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is367// recoverable. Valid values of Y are 0 to 4, which are interpreted as368// log2(access_size), and 0xF, which means that access size is passed via369// platform dependent register (see below).370# if defined(__aarch64__)371// Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,372// access size is stored in X1 register. Access address is always in X0373// register.374uptr pc = (uptr)info->si_addr;375const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;376if ((code & 0xff00) != 0x900)377return AccessInfo{}; // Not ours.378379const bool is_store = code & 0x10;380const bool recover = code & 0x20;381const uptr addr = uc->uc_mcontext.regs[0];382const unsigned size_log = code & 0xf;383if (size_log > 4 && size_log != 0xf)384return AccessInfo{}; // Not ours.385const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;386387# elif defined(__x86_64__)388// Access type is encoded in the instruction following INT3 as389// NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in390// RSI register. Access address is always in RDI register.391uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];392uint8_t *nop = (uint8_t *)pc;393if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||394*(nop + 3) < 0x40)395return AccessInfo{}; // Not ours.396const unsigned code = *(nop + 3);397398const bool is_store = code & 0x10;399const bool recover = code & 0x20;400const uptr addr = uc->uc_mcontext.gregs[REG_RDI];401const unsigned size_log = code & 0xf;402if (size_log > 4 && size_log != 0xf)403return AccessInfo{}; // Not ours.404const uptr size =405size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;406407# elif SANITIZER_RISCV64408// Access type is encoded in the instruction following EBREAK as409// ADDI x0, x0, [0x40 + 0xXY]. For Y == 0xF, access size is stored in410// X11 register. Access address is always in X10 register.411uptr pc = (uptr)uc->uc_mcontext.__gregs[REG_PC];412uint8_t byte1 = *((u8 *)(pc + 0));413uint8_t byte2 = *((u8 *)(pc + 1));414uint8_t byte3 = *((u8 *)(pc + 2));415uint8_t byte4 = *((u8 *)(pc + 3));416uint32_t ebreak = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));417bool isFaultShort = false;418bool isEbreak = (ebreak == 0x100073);419bool isShortEbreak = false;420# if defined(__riscv_compressed)421isFaultShort = ((ebreak & 0x3) != 0x3);422isShortEbreak = ((ebreak & 0xffff) == 0x9002);423# endif424// faulted insn is not ebreak, not our case425if (!(isEbreak || isShortEbreak))426return AccessInfo{};427// advance pc to point after ebreak and reconstruct addi instruction428pc += isFaultShort ? 2 : 4;429byte1 = *((u8 *)(pc + 0));430byte2 = *((u8 *)(pc + 1));431byte3 = *((u8 *)(pc + 2));432byte4 = *((u8 *)(pc + 3));433// reconstruct instruction434uint32_t instr = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));435// check if this is really 32 bit instruction436// code is encoded in top 12 bits, since instruction is supposed to be with437// imm438const unsigned code = (instr >> 20) & 0xffff;439const uptr addr = uc->uc_mcontext.__gregs[10];440const bool is_store = code & 0x10;441const bool recover = code & 0x20;442const unsigned size_log = code & 0xf;443if (size_log > 4 && size_log != 0xf)444return AccessInfo{}; // Not our case445const uptr size =446size_log == 0xf ? uc->uc_mcontext.__gregs[11] : 1U << size_log;447448# else449# error Unsupported architecture450# endif451452return AccessInfo{addr, size, is_store, !is_store, recover};453}454455static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {456AccessInfo ai = GetAccessInfo(info, uc);457if (!ai.is_store && !ai.is_load)458return false;459460SignalContext sig{info, uc};461HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);462463# if defined(__aarch64__)464uc->uc_mcontext.pc += 4;465# elif defined(__x86_64__)466# elif SANITIZER_RISCV64467// pc points to EBREAK which is 2 bytes long468uint8_t *exception_source = (uint8_t *)(uc->uc_mcontext.__gregs[REG_PC]);469uint8_t byte1 = (uint8_t)(*(exception_source + 0));470uint8_t byte2 = (uint8_t)(*(exception_source + 1));471uint8_t byte3 = (uint8_t)(*(exception_source + 2));472uint8_t byte4 = (uint8_t)(*(exception_source + 3));473uint32_t faulted = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));474bool isFaultShort = false;475# if defined(__riscv_compressed)476isFaultShort = ((faulted & 0x3) != 0x3);477# endif478uc->uc_mcontext.__gregs[REG_PC] += isFaultShort ? 2 : 4;479# else480# error Unsupported architecture481# endif482return true;483}484485static void OnStackUnwind(const SignalContext &sig, const void *,486BufferedStackTrace *stack) {487stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,488common_flags()->fast_unwind_on_fatal);489}490491void HwasanOnDeadlySignal(int signo, void *info, void *context) {492// Probably a tag mismatch.493if (signo == SIGTRAP)494if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t *)context))495return;496497HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);498}499500void Thread::InitStackAndTls(const InitState *) {501uptr tls_size;502uptr stack_size;503GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,504&tls_size);505stack_top_ = stack_bottom_ + stack_size;506tls_end_ = tls_begin_ + tls_size;507}508509uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {510CHECK(IsAligned(p, kShadowAlignment));511CHECK(IsAligned(size, kShadowAlignment));512uptr shadow_start = MemToShadow(p);513uptr shadow_size = MemToShadowSize(size);514515uptr page_size = GetPageSizeCached();516uptr page_start = RoundUpTo(shadow_start, page_size);517uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);518uptr threshold = common_flags()->clear_shadow_mmap_threshold;519if (SANITIZER_LINUX &&520UNLIKELY(page_end >= page_start + threshold && tag == 0)) {521internal_memset((void *)shadow_start, tag, page_start - shadow_start);522internal_memset((void *)page_end, tag,523shadow_start + shadow_size - page_end);524// For an anonymous private mapping MADV_DONTNEED will return a zero page on525// Linux.526ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);527} else {528internal_memset((void *)shadow_start, tag, shadow_size);529}530return AddTagToPointer(p, tag);531}532533static void BeforeFork() {534if (CAN_SANITIZE_LEAKS) {535__lsan::LockGlobal();536}537// `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and lock the538// stuff we need.539__lsan::LockThreads();540__lsan::LockAllocator();541StackDepotLockBeforeFork();542}543544static void AfterFork(bool fork_child) {545StackDepotUnlockAfterFork(fork_child);546// `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock547// the stuff we need.548__lsan::UnlockAllocator();549__lsan::UnlockThreads();550if (CAN_SANITIZE_LEAKS) {551__lsan::UnlockGlobal();552}553}554555void HwasanInstallAtForkHandler() {556pthread_atfork(557&BeforeFork, []() { AfterFork(/* fork_child= */ false); },558[]() { AfterFork(/* fork_child= */ true); });559}560561void InstallAtExitCheckLeaks() {562if (CAN_SANITIZE_LEAKS) {563if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {564if (flags()->halt_on_error)565Atexit(__lsan::DoLeakCheck);566else567Atexit(__lsan::DoRecoverableLeakCheckVoid);568}569}570}571572} // namespace __hwasan573574using namespace __hwasan;575576extern "C" void __hwasan_thread_enter() {577hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();578}579580extern "C" void __hwasan_thread_exit() {581Thread *t = GetCurrentThread();582// Make sure that signal handler can not see a stale current thread pointer.583atomic_signal_fence(memory_order_seq_cst);584if (t) {585// Block async signals on the thread as the handler can be instrumented.586// After this point instrumented code can't access essential data from TLS587// and will crash.588// Bionic already calls __hwasan_thread_exit with blocked signals.589if (SANITIZER_GLIBC)590BlockSignals();591hwasanThreadList().ReleaseThread(t);592}593}594595#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD596597598