Path: blob/main/system/lib/libcxxabi/src/fallback_malloc.cpp
6173 views
//===----------------------------------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//78#include "fallback_malloc.h"9#include "abort_message.h"1011#include <__thread/support.h>12#ifndef _LIBCXXABI_HAS_NO_THREADS13#if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB)14#pragma comment(lib, "pthread")15#endif16#endif1718#include <__memory/aligned_alloc.h>19#include <__assert>20#include <stdlib.h> // for malloc, calloc, free21#include <string.h> // for memset2223// A small, simple heap manager based (loosely) on24// the startup heap manager from FreeBSD, optimized for space.25//26// Manages a fixed-size memory pool, supports malloc and free only.27// No support for realloc.28//29// Allocates chunks in multiples of four bytes, with a four byte header30// for each chunk. The overhead of each chunk is kept low by keeping pointers31// as two byte offsets within the heap, rather than (4 or 8 byte) pointers.3233namespace {3435// When POSIX threads are not available, make the mutex operations a nop36#ifndef _LIBCXXABI_HAS_NO_THREADS37static _LIBCPP_CONSTINIT std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER;38#else39static _LIBCPP_CONSTINIT void* heap_mutex = 0;40#endif4142class mutexor {43public:44#ifndef _LIBCXXABI_HAS_NO_THREADS45mutexor(std::__libcpp_mutex_t* m) : mtx_(m) {46std::__libcpp_mutex_lock(mtx_);47}48~mutexor() { std::__libcpp_mutex_unlock(mtx_); }49#else50mutexor(void*) {}51~mutexor() {}52#endif53private:54mutexor(const mutexor& rhs);55mutexor& operator=(const mutexor& rhs);56#ifndef _LIBCXXABI_HAS_NO_THREADS57std::__libcpp_mutex_t* mtx_;58#endif59};6061static const size_t HEAP_SIZE = 512;62char heap[HEAP_SIZE] __attribute__((aligned));6364typedef unsigned short heap_offset;65typedef unsigned short heap_size;6667// On both 64 and 32 bit targets heap_node should have the following properties68// Size: 469// Alignment: 270struct heap_node {71heap_offset next_node; // offset into heap72heap_size len; // size in units of "sizeof(heap_node)"73};7475// All pointers returned by fallback_malloc must be at least aligned76// as RequiredAligned. Note that RequiredAlignment can be greater than77// alignof(std::max_align_t) on 64 bit systems compiling 32 bit code.78struct FallbackMaxAlignType {79} __attribute__((aligned));80const size_t RequiredAlignment = alignof(FallbackMaxAlignType);8182static_assert(alignof(FallbackMaxAlignType) % sizeof(heap_node) == 0,83"The required alignment must be evenly divisible by the sizeof(heap_node)");8485// The number of heap_node's that can fit in a chunk of memory with the size86// of the RequiredAlignment. On 64 bit targets NodesPerAlignment should be 4.87const size_t NodesPerAlignment = alignof(FallbackMaxAlignType) / sizeof(heap_node);8889static const heap_node* list_end =90(heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap91static heap_node* freelist = NULL;9293heap_node* node_from_offset(const heap_offset offset) {94return (heap_node*)(heap + (offset * sizeof(heap_node)));95}9697heap_offset offset_from_node(const heap_node* ptr) {98return static_cast<heap_offset>(99static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) /100sizeof(heap_node));101}102103// Return a pointer to the first address, 'A', in `heap` that can actually be104// used to represent a heap_node. 'A' must be aligned so that105// '(A + sizeof(heap_node)) % RequiredAlignment == 0'. On 64 bit systems this106// address should be 12 bytes after the first 16 byte boundary.107heap_node* getFirstAlignedNodeInHeap() {108heap_node* node = (heap_node*)heap;109const size_t alignNBytesAfterBoundary = RequiredAlignment - sizeof(heap_node);110size_t boundaryOffset = reinterpret_cast<size_t>(node) % RequiredAlignment;111size_t requiredOffset = alignNBytesAfterBoundary - boundaryOffset;112size_t NElemOffset = requiredOffset / sizeof(heap_node);113return node + NElemOffset;114}115116void init_heap() {117freelist = getFirstAlignedNodeInHeap();118freelist->next_node = offset_from_node(list_end);119freelist->len = static_cast<heap_size>(list_end - freelist);120}121122// How big a chunk we allocate123size_t alloc_size(size_t len) {124return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1;125}126127bool is_fallback_ptr(void* ptr) {128return ptr >= heap && ptr < (heap + HEAP_SIZE);129}130131void* fallback_malloc(size_t len) {132heap_node *p, *prev;133const size_t nelems = alloc_size(len);134mutexor mtx(&heap_mutex);135136if (NULL == freelist)137init_heap();138139// Walk the free list, looking for a "big enough" chunk140for (p = freelist, prev = 0; p && p != list_end;141prev = p, p = node_from_offset(p->next_node)) {142143// Check the invariant that all heap_nodes pointers 'p' are aligned144// so that 'p + 1' has an alignment of at least RequiredAlignment145_LIBCXXABI_ASSERT(reinterpret_cast<size_t>(p + 1) % RequiredAlignment == 0, "");146147// Calculate the number of extra padding elements needed in order148// to split 'p' and create a properly aligned heap_node from the tail149// of 'p'. We calculate aligned_nelems such that 'p->len - aligned_nelems'150// will be a multiple of NodesPerAlignment.151size_t aligned_nelems = nelems;152if (p->len > nelems) {153heap_size remaining_len = static_cast<heap_size>(p->len - nelems);154aligned_nelems += remaining_len % NodesPerAlignment;155}156157// chunk is larger and we can create a properly aligned heap_node158// from the tail. In this case we shorten 'p' and return the tail.159if (p->len > aligned_nelems) {160heap_node* q;161p->len = static_cast<heap_size>(p->len - aligned_nelems);162q = p + p->len;163q->next_node = 0;164q->len = static_cast<heap_size>(aligned_nelems);165void* ptr = q + 1;166_LIBCXXABI_ASSERT(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0, "");167return ptr;168}169170// The chunk is the exact size or the chunk is larger but not large171// enough to split due to alignment constraints.172if (p->len >= nelems) {173if (prev == 0)174freelist = node_from_offset(p->next_node);175else176prev->next_node = p->next_node;177p->next_node = 0;178void* ptr = p + 1;179_LIBCXXABI_ASSERT(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0, "");180return ptr;181}182}183return NULL; // couldn't find a spot big enough184}185186// Return the start of the next block187heap_node* after(struct heap_node* p) { return p + p->len; }188189void fallback_free(void* ptr) {190struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk191struct heap_node *p, *prev;192193mutexor mtx(&heap_mutex);194195#ifdef DEBUG_FALLBACK_MALLOC196std::printf("Freeing item at %d of size %d\n", offset_from_node(cp), cp->len);197#endif198199for (p = freelist, prev = 0; p && p != list_end;200prev = p, p = node_from_offset(p->next_node)) {201#ifdef DEBUG_FALLBACK_MALLOC202std::printf(" p=%d, cp=%d, after(p)=%d, after(cp)=%d\n",203offset_from_node(p), offset_from_node(cp),204offset_from_node(after(p)), offset_from_node(after(cp)));205#endif206if (after(p) == cp) {207#ifdef DEBUG_FALLBACK_MALLOC208std::printf(" Appending onto chunk at %d\n", offset_from_node(p));209#endif210p->len = static_cast<heap_size>(211p->len + cp->len); // make the free heap_node larger212return;213} else if (after(cp) == p) { // there's a free heap_node right after214#ifdef DEBUG_FALLBACK_MALLOC215std::printf(" Appending free chunk at %d\n", offset_from_node(p));216#endif217cp->len = static_cast<heap_size>(cp->len + p->len);218if (prev == 0) {219freelist = cp;220cp->next_node = p->next_node;221} else222prev->next_node = offset_from_node(cp);223return;224}225}226// Nothing to merge with, add it to the start of the free list227#ifdef DEBUG_FALLBACK_MALLOC228std::printf(" Making new free list entry %d\n", offset_from_node(cp));229#endif230cp->next_node = offset_from_node(freelist);231freelist = cp;232}233234#ifdef INSTRUMENT_FALLBACK_MALLOC235size_t print_free_list() {236struct heap_node *p, *prev;237heap_size total_free = 0;238if (NULL == freelist)239init_heap();240241for (p = freelist, prev = 0; p && p != list_end;242prev = p, p = node_from_offset(p->next_node)) {243std::printf("%sOffset: %d\tsize: %d Next: %d\n",244(prev == 0 ? "" : " "), offset_from_node(p), p->len, p->next_node);245total_free += p->len;246}247std::printf("Total Free space: %d\n", total_free);248return total_free;249}250#endif251} // end unnamed namespace252253namespace __cxxabiv1 {254255struct __attribute__((aligned)) __aligned_type {};256257void* __aligned_malloc_with_fallback(size_t size) {258#if defined(_WIN32)259if (void* dest = std::__libcpp_aligned_alloc(alignof(__aligned_type), size))260return dest;261#elif !_LIBCPP_HAS_LIBRARY_ALIGNED_ALLOCATION262if (void* dest = ::malloc(size))263return dest;264#else265if (size == 0)266size = 1;267if (void* dest = std::__libcpp_aligned_alloc(__alignof(__aligned_type), size))268return dest;269#endif270return fallback_malloc(size);271}272273void* __calloc_with_fallback(size_t count, size_t size) {274void* ptr = ::calloc(count, size);275if (NULL != ptr)276return ptr;277// if calloc fails, fall back to emergency stash278ptr = fallback_malloc(size * count);279if (NULL != ptr)280::memset(ptr, 0, size * count);281return ptr;282}283284void __aligned_free_with_fallback(void* ptr) {285if (is_fallback_ptr(ptr))286fallback_free(ptr);287else {288#if !_LIBCPP_HAS_LIBRARY_ALIGNED_ALLOCATION289::free(ptr);290#else291std::__libcpp_aligned_free(ptr);292#endif293}294}295296void __free_with_fallback(void* ptr) {297if (is_fallback_ptr(ptr))298fallback_free(ptr);299else300::free(ptr);301}302303} // namespace __cxxabiv1304305306