#include <errno.h>
#include <stdalign.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include <memory.h>
#include <assert.h>
#include <malloc.h>
#include <stdio.h>
#include <emscripten/heap.h>
#include <emscripten/threading.h>
void *_sbrk64(int64_t numBytes);
#ifdef __EMSCRIPTEN_TRACING__
#include <emscripten/trace.h>
#endif
static_assert((((int32_t)0x80000000U) >> 31) == -1, "This malloc implementation requires that right-shifting a signed integer produces a sign-extending (arithmetic) shift!");
#define MALLOC_ALIGNMENT alignof(max_align_t)
static_assert(alignof(max_align_t) == 8, "max_align_t must be correct");
#ifdef EMMALLOC_NO_STD_EXPORTS
#define EMMALLOC_ALIAS(ALIAS, ORIGINAL)
#else
#define EMMALLOC_EXPORT __attribute__((weak, __visibility__("default")))
#define EMMALLOC_ALIAS(ALIAS, ORIGINAL) extern __typeof(ORIGINAL) ALIAS __attribute__((weak, alias(#ORIGINAL)));
#endif
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#define NUM_FREE_BUCKETS 64
#define BUCKET_BITMASK_T uint64_t
#define FREE_REGION_FLAG 0x1u
#define MAX_ALLOC_SIZE 0xFFFFFFC7u
typedef struct Region {
size_t size;
struct Region *prev, *next;
size_t _at_the_end_of_this_struct_size;
} Region;
typedef struct RootRegion {
uint32_t size;
struct RootRegion *next;
uint8_t* endPtr;
} RootRegion;
#ifdef __EMSCRIPTEN_SHARED_MEMORY__
static volatile uint8_t multithreadingLock = 0;
#define MALLOC_ACQUIRE() while (__sync_lock_test_and_set(&multithreadingLock, 1)) { while (multithreadingLock) { } }
#define MALLOC_RELEASE() __sync_lock_release(&multithreadingLock)
#define ASSERT_MALLOC_IS_ACQUIRED() assert(multithreadingLock == 1)
#else
#define MALLOC_ACQUIRE() ((void)0)
#define MALLOC_RELEASE() ((void)0)
#define ASSERT_MALLOC_IS_ACQUIRED() ((void)0)
#endif
#define IS_POWER_OF_2(val) (((val) & ((val)-1)) == 0)
#define ALIGN_UP(ptr, alignment) ((uint8_t*)((((uintptr_t)(ptr)) + ((alignment)-1)) & ~((alignment)-1)))
#define ALIGN_DOWN(ptr, alignment) ((uint8_t*)(((uintptr_t)(ptr)) & ~((alignment)-1)))
#define HAS_ALIGNMENT(ptr, alignment) ((((uintptr_t)(ptr)) & ((alignment)-1)) == 0)
static_assert(IS_POWER_OF_2(MALLOC_ALIGNMENT), "MALLOC_ALIGNMENT must be a power of two value!");
static_assert(MALLOC_ALIGNMENT >= 4, "Smallest possible MALLOC_ALIGNMENT if 4!");
static RootRegion *listOfAllRegions = NULL;
static Region freeRegionBuckets[NUM_FREE_BUCKETS];
static BUCKET_BITMASK_T freeRegionBucketsUsed = 0;
#define REGION_HEADER_SIZE (2*sizeof(size_t))
#define SMALLEST_ALLOCATION_SIZE (2*sizeof(void*))
static_assert(NUM_FREE_BUCKETS == 64, "Following function is tailored specifically for NUM_FREE_BUCKETS == 64 case");
static int compute_free_list_bucket(size_t allocSize) {
if (allocSize < 128) return (allocSize >> 3) - 1;
int clz = __builtin_clz(allocSize);
int bucketIndex =
(clz > 19)
? 110 - (clz<<2) + ((allocSize >> (29-clz)) ^ 4)
: MIN( 71 - (clz<<1) + ((allocSize >> (30-clz)) ^ 2), NUM_FREE_BUCKETS-1);
assert(bucketIndex >= 0);
assert(bucketIndex < NUM_FREE_BUCKETS);
return bucketIndex;
}
#define DECODE_CEILING_SIZE(size) ((size_t)((size) & ~FREE_REGION_FLAG))
static Region *prev_region(Region *region) {
size_t prevRegionSize = ((size_t*)region)[-1];
prevRegionSize = DECODE_CEILING_SIZE(prevRegionSize);
return (Region*)((uint8_t*)region - prevRegionSize);
}
static Region *next_region(Region *region) {
return (Region*)((uint8_t*)region + region->size);
}
static size_t region_ceiling_size(Region *region) {
return ((size_t*)((uint8_t*)region + region->size))[-1];
}
static bool region_is_free(Region *r) {
return region_ceiling_size(r) & FREE_REGION_FLAG;
}
static bool region_is_in_use(Region *r) {
return r->size == region_ceiling_size(r);
}
static size_t size_of_region_from_ceiling(Region *r) {
size_t size = region_ceiling_size(r);
return DECODE_CEILING_SIZE(size);
}
static bool debug_region_is_consistent(Region *r) {
assert(r);
size_t sizeAtBottom = r->size;
size_t sizeAtCeiling = size_of_region_from_ceiling(r);
return sizeAtBottom == sizeAtCeiling;
}
static uint8_t *region_payload_start_ptr(Region *region) {
return (uint8_t*)region + sizeof(size_t);
}
static uint8_t *region_payload_end_ptr(Region *region) {
return (uint8_t*)region + region->size - sizeof(size_t);
}
static void create_used_region(void *ptr, size_t size) {
assert(ptr);
assert(HAS_ALIGNMENT(ptr, sizeof(size_t)));
assert(HAS_ALIGNMENT(size, sizeof(size_t)));
assert(size >= sizeof(Region));
*(size_t*)ptr = size;
((size_t*)ptr)[(size/sizeof(size_t))-1] = size;
}
static void create_free_region(void *ptr, size_t size) {
assert(ptr);
assert(HAS_ALIGNMENT(ptr, sizeof(size_t)));
assert(HAS_ALIGNMENT(size, sizeof(size_t)));
assert(size >= sizeof(Region));
Region *freeRegion = (Region*)ptr;
freeRegion->size = size;
((size_t*)ptr)[(size/sizeof(size_t))-1] = size | FREE_REGION_FLAG;
}
static void prepend_to_free_list(Region *region, Region *prependTo) {
assert(region);
assert(prependTo);
assert(region_is_free((Region*)region));
region->next = prependTo;
region->prev = prependTo->prev;
assert(region->prev);
prependTo->prev = region;
region->prev->next = region;
}
static void unlink_from_free_list(Region *region) {
assert(region);
assert(region_is_free((Region*)region));
assert(region->prev);
assert(region->next);
region->prev->next = region->next;
region->next->prev = region->prev;
}
static void link_to_free_list(Region *freeRegion) {
assert(freeRegion);
assert(freeRegion->size >= sizeof(Region));
int bucketIndex = compute_free_list_bucket(freeRegion->size-REGION_HEADER_SIZE);
Region *freeListHead = freeRegionBuckets + bucketIndex;
freeRegion->prev = freeListHead;
freeRegion->next = freeListHead->next;
assert(freeRegion->next);
freeListHead->next = freeRegion;
freeRegion->next->prev = freeRegion;
freeRegionBucketsUsed |= ((BUCKET_BITMASK_T)1) << bucketIndex;
}
static void dump_memory_regions() {
ASSERT_MALLOC_IS_ACQUIRED();
RootRegion *root = listOfAllRegions;
MAIN_THREAD_ASYNC_EM_ASM(out('All memory regions:'));
while (root) {
Region *r = (Region*)root;
assert(debug_region_is_consistent(r));
uint8_t *lastRegionEnd = root->endPtr;
MAIN_THREAD_ASYNC_EM_ASM(out('Region block '+ptrToString($0)+' - '+ptrToString($1)+ ' ('+Number($2)+' bytes):'),
r, lastRegionEnd, lastRegionEnd-(uint8_t*)r);
while ((uint8_t*)r < lastRegionEnd) {
MAIN_THREAD_ASYNC_EM_ASM(out('Region '+ptrToString($0)+', size: '+Number($1)+' ('+($2?"used":"--FREE--")+')'),
r, r->size, region_ceiling_size(r) == r->size);
assert(debug_region_is_consistent(r));
size_t sizeFromCeiling = size_of_region_from_ceiling(r);
if (sizeFromCeiling != r->size) {
MAIN_THREAD_ASYNC_EM_ASM(out('Corrupt region! Size marker at the end of the region does not match: '+Number($0)), sizeFromCeiling);
}
if (r->size == 0) {
break;
}
r = next_region(r);
}
root = root->next;
MAIN_THREAD_ASYNC_EM_ASM(out(""));
}
MAIN_THREAD_ASYNC_EM_ASM(out('Free regions:'));
for (int i = 0; i < NUM_FREE_BUCKETS; ++i) {
Region *prev = &freeRegionBuckets[i];
Region *fr = freeRegionBuckets[i].next;
while (fr != &freeRegionBuckets[i]) {
MAIN_THREAD_ASYNC_EM_ASM(out('In bucket '+$0+', free region '+ptrToString($1)+', size: ' + Number($2) + ' (size at ceiling: '+Number($3)+'), prev: ' + ptrToString($4) + ', next: ' + ptrToString($5)),
i, fr, fr->size, size_of_region_from_ceiling(fr), fr->prev, fr->next);
assert(debug_region_is_consistent(fr));
assert(region_is_free(fr));
assert(fr->prev == prev);
prev = fr;
assert(fr->next != fr);
assert(fr->prev != fr);
fr = fr->next;
}
}
MAIN_THREAD_ASYNC_EM_ASM(out('Free bucket index map: ' + Number($0).toString(2) + ' ' + Number($1).toString(2)), (uint32_t)(freeRegionBucketsUsed >> 32), (uint32_t)freeRegionBucketsUsed);
MAIN_THREAD_ASYNC_EM_ASM(out(""));
}
void emmalloc_dump_memory_regions() {
MALLOC_ACQUIRE();
dump_memory_regions();
MALLOC_RELEASE();
}
static int validate_memory_regions() {
ASSERT_MALLOC_IS_ACQUIRED();
RootRegion *root = listOfAllRegions;
while (root) {
Region *r = (Region*)root;
if (!debug_region_is_consistent(r)) {
MAIN_THREAD_ASYNC_EM_ASM(err('Used region '+ptrToString($0)+', size: '+Number($1)+' ('+($2?"used":"--FREE--")+') is corrupt (size markers in the beginning and at the end of the region do not match!)'),
r, r->size, region_ceiling_size(r) == r->size);
return 1;
}
uint8_t *lastRegionEnd = root->endPtr;
while ((uint8_t*)r < lastRegionEnd) {
if (!debug_region_is_consistent(r)) {
MAIN_THREAD_ASYNC_EM_ASM(err('Used region '+ptrToString($0)+', size: '+Number($1)+' ('+($2?"used":"--FREE--")+') is corrupt (size markers in the beginning and at the end of the region do not match!)'),
r, r->size, region_ceiling_size(r) == r->size);
return 1;
}
if (r->size == 0) {
break;
}
r = next_region(r);
}
root = root->next;
}
for (int i = 0; i < NUM_FREE_BUCKETS; ++i) {
Region *prev = &freeRegionBuckets[i];
Region *fr = freeRegionBuckets[i].next;
while (fr != &freeRegionBuckets[i]) {
if (!debug_region_is_consistent(fr) || !region_is_free(fr) || fr->prev != prev || fr->next == fr || fr->prev == fr) {
MAIN_THREAD_ASYNC_EM_ASM(out('In bucket '+$0+', free region '+ptrToString($1)+', size: ' + Number($2) + ' (size at ceiling: '+Number($3)+'), prev: ' + ptrToString($4) + ', next: 0x' + ptrToString($5) + ' is corrupt!'),
i, fr, fr->size, size_of_region_from_ceiling(fr), fr->prev, fr->next);
return 1;
}
prev = fr;
fr = fr->next;
}
}
return 0;
}
int emmalloc_validate_memory_regions() {
MALLOC_ACQUIRE();
int memoryError = validate_memory_regions();
MALLOC_RELEASE();
return memoryError;
}
static bool claim_more_memory(size_t numBytes) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('claim_more_memory(numBytes='+Number($0)+ ')'), numBytes);
#endif
#ifdef EMMALLOC_MEMVALIDATE
validate_memory_regions();
#endif
numBytes = (size_t)ALIGN_UP(numBytes, MALLOC_ALIGNMENT);
assert((int64_t)numBytes >= 0);
uint8_t *startPtr = (uint8_t*)_sbrk64((int64_t)numBytes);
if ((intptr_t)startPtr == -1) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(err('claim_more_memory: sbrk failed!'));
#endif
return false;
}
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('claim_more_memory: claimed ' + ptrToString($0) + ' - ' + ptrToString($1) + ' (' + Number($2) + ' bytes) via sbrk()'), startPtr, startPtr + numBytes, numBytes);
#endif
assert(HAS_ALIGNMENT(startPtr, alignof(size_t)));
uint8_t *endPtr = startPtr + numBytes;
Region *endSentinelRegion = (Region*)(endPtr - sizeof(Region));
create_used_region(endSentinelRegion, sizeof(Region));
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('claim_more_memory: created a sentinel memory region at address ' + ptrToString($0)), endSentinelRegion);
#endif
uint8_t *previousSbrkEndAddress = listOfAllRegions ? listOfAllRegions->endPtr : 0;
if (startPtr == previousSbrkEndAddress) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(err('claim_more_memory: sbrk() returned a region contiguous to last root region, expanding the existing root region'));
#endif
Region *prevEndSentinel = prev_region((Region*)startPtr);
assert(debug_region_is_consistent(prevEndSentinel));
assert(region_is_in_use(prevEndSentinel));
Region *prevRegion = prev_region(prevEndSentinel);
assert(debug_region_is_consistent(prevRegion));
listOfAllRegions->endPtr = endPtr;
if (region_is_free(prevRegion)) {
size_t newFreeRegionSize = (uint8_t*)endSentinelRegion - (uint8_t*)prevRegion;
unlink_from_free_list(prevRegion);
create_free_region(prevRegion, newFreeRegionSize);
link_to_free_list(prevRegion);
return true;
}
startPtr -= sizeof(Region);
} else {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(err('claim_more_memory: sbrk() returned a disjoint region to last root region, some external code must have sbrk()ed outside emmalloc(). Creating a new root region'));
#endif
create_used_region(startPtr, sizeof(Region));
RootRegion *newRegionBlock = (RootRegion*)startPtr;
newRegionBlock->next = listOfAllRegions;
newRegionBlock->endPtr = endPtr;
listOfAllRegions = newRegionBlock;
startPtr += sizeof(Region);
}
create_free_region(startPtr, (uint8_t*)endSentinelRegion - startPtr);
link_to_free_list((Region*)startPtr);
return true;
}
__attribute__((constructor(47)))
static void initialize_emmalloc_heap() {
#pragma clang loop unroll(disable)
for (int i = 0; i < NUM_FREE_BUCKETS; ++i) {
freeRegionBuckets[i].prev = freeRegionBuckets[i].next = &freeRegionBuckets[i];
}
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('initialize_emmalloc_heap()'));
#endif
claim_more_memory(3*sizeof(Region));
}
void emmalloc_blank_slate_from_orbit() {
MALLOC_ACQUIRE();
listOfAllRegions = NULL;
freeRegionBucketsUsed = 0;
initialize_emmalloc_heap();
MALLOC_RELEASE();
}
static void *attempt_allocate(Region *freeRegion, size_t alignment, size_t size) {
ASSERT_MALLOC_IS_ACQUIRED();
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('attempt_allocate(freeRegion=' + ptrToString($0) + ',alignment=' + Number($1) + ',size=' + Number($2) + ')'), freeRegion, alignment, size);
#endif
assert(freeRegion);
uint8_t *payloadStartPtr = region_payload_start_ptr(freeRegion);
uint8_t *payloadStartPtrAligned = ALIGN_UP(payloadStartPtr, alignment);
uint8_t *payloadEndPtr = region_payload_end_ptr(freeRegion);
if (payloadStartPtrAligned + size > payloadEndPtr) {
return NULL;
}
unlink_from_free_list(freeRegion);
if (payloadStartPtr != payloadStartPtrAligned) {
Region *prevRegion = prev_region((Region*)freeRegion);
assert(region_is_in_use(prevRegion));
size_t regionBoundaryBumpAmount = payloadStartPtrAligned - payloadStartPtr;
size_t newThisRegionSize = freeRegion->size - regionBoundaryBumpAmount;
create_used_region(prevRegion, prevRegion->size + regionBoundaryBumpAmount);
freeRegion = (Region *)((uint8_t*)freeRegion + regionBoundaryBumpAmount);
freeRegion->size = newThisRegionSize;
}
if (sizeof(Region) + REGION_HEADER_SIZE + size <= freeRegion->size) {
Region *newFreeRegion = (Region *)((uint8_t*)freeRegion + REGION_HEADER_SIZE + size);
create_free_region(newFreeRegion, freeRegion->size - size - REGION_HEADER_SIZE);
link_to_free_list(newFreeRegion);
create_used_region(freeRegion, size + REGION_HEADER_SIZE);
} else {
((size_t*)((uint8_t*)freeRegion + freeRegion->size))[-1] = freeRegion->size;
}
#ifdef __EMSCRIPTEN_TRACING__
emscripten_trace_record_allocation(freeRegion, freeRegion->size);
#endif
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('attempt_allocate - succeeded allocating memory, region ptr=' + ptrToString($0) + ', align=' + $1 + ', payload size=' + Number($2) + ' bytes)'), freeRegion, alignment, size);
#endif
return (uint8_t*)freeRegion + sizeof(size_t);
}
static size_t validate_alloc_alignment(size_t alignment) {
return MAX(alignment, MALLOC_ALIGNMENT);
}
static size_t validate_alloc_size(size_t size) {
assert(size + REGION_HEADER_SIZE > size);
size_t validatedSize = size > SMALLEST_ALLOCATION_SIZE ? (size_t)ALIGN_UP(size, sizeof(Region*)) : SMALLEST_ALLOCATION_SIZE;
assert(validatedSize >= size);
return validatedSize;
}
EM_JS_DEPS(_em_malloc_deps, "$ptrToString");
static void *allocate_memory(size_t alignment, size_t size) {
ASSERT_MALLOC_IS_ACQUIRED();
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('allocate_memory(align=' + $0 + ', size=' + Number($1) + ' bytes)'), alignment, size);
#endif
#ifdef EMMALLOC_MEMVALIDATE
validate_memory_regions();
#endif
if (!IS_POWER_OF_2(alignment)) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('Allocation failed: alignment not power of 2!'));
#endif
return 0;
}
if (size > MAX_ALLOC_SIZE) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('Allocation failed: attempted allocation size is too large: ' + Number($0) + 'bytes! (negative integer wraparound?)'), size);
#endif
return 0;
}
alignment = validate_alloc_alignment(alignment);
size = validate_alloc_size(size);
int bucketIndex = compute_free_list_bucket(size);
BUCKET_BITMASK_T bucketMask = freeRegionBucketsUsed >> bucketIndex;
while (bucketMask) {
BUCKET_BITMASK_T indexAdd = __builtin_ctzll(bucketMask);
bucketIndex += indexAdd;
bucketMask >>= indexAdd;
assert(bucketIndex >= 0);
assert(bucketIndex <= NUM_FREE_BUCKETS-1);
assert(freeRegionBucketsUsed & (((BUCKET_BITMASK_T)1) << bucketIndex));
Region *freeRegion = freeRegionBuckets[bucketIndex].next;
assert(freeRegion);
if (freeRegion != &freeRegionBuckets[bucketIndex]) {
void *ptr = attempt_allocate(freeRegion, alignment, size);
if (ptr) {
return ptr;
}
unlink_from_free_list(freeRegion);
prepend_to_free_list(freeRegion, &freeRegionBuckets[bucketIndex]);
++bucketIndex;
bucketMask >>= 1;
} else {
freeRegionBucketsUsed &= ~(((BUCKET_BITMASK_T)1) << bucketIndex);
bucketMask ^= 1;
}
assert((bucketIndex == NUM_FREE_BUCKETS && bucketMask == 0) || (bucketMask == freeRegionBucketsUsed >> bucketIndex));
}
int largestBucketIndex = NUM_FREE_BUCKETS - 1 - __builtin_clzll(freeRegionBucketsUsed);
Region *freeRegion = freeRegionBucketsUsed ? freeRegionBuckets[largestBucketIndex].next : 0;
if (freeRegionBucketsUsed >> 30) {
const int maxRegionsToTryBeforeGivingUp = 99;
int numTriesLeft = maxRegionsToTryBeforeGivingUp;
while (freeRegion != &freeRegionBuckets[largestBucketIndex] && numTriesLeft-- > 0) {
void *ptr = attempt_allocate(freeRegion, alignment, size);
if (ptr) {
return ptr;
}
freeRegion = freeRegion->next;
}
}
size_t numBytesToClaim = size+sizeof(Region)*3;
if (alignment > MALLOC_ALIGNMENT) {
numBytesToClaim += alignment;
}
assert(numBytesToClaim > size);
bool success = claim_more_memory(numBytesToClaim);
if (success) {
return allocate_memory(alignment, size);
}
if (freeRegion) {
while (freeRegion != &freeRegionBuckets[largestBucketIndex]) {
void *ptr = attempt_allocate(freeRegion, alignment, size);
if (ptr) {
return ptr;
}
freeRegion = freeRegion->next;
}
}
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('Could not find a free memory block!'));
#endif
return 0;
}
void *emmalloc_memalign(size_t alignment, size_t size) {
MALLOC_ACQUIRE();
void *ptr = allocate_memory(alignment, size);
MALLOC_RELEASE();
return ptr;
}
EMMALLOC_ALIAS(emscripten_builtin_memalign, emmalloc_memalign);
EMMALLOC_ALIAS(memalign, emmalloc_memalign);
#ifndef EMMALLOC_NO_STD_EXPORTS
void * EMMALLOC_EXPORT aligned_alloc(size_t alignment, size_t size) {
if ((alignment % sizeof(void *) != 0) || (size % alignment) != 0) {
return 0;
}
return emmalloc_memalign(alignment, size);
}
#endif
void *emmalloc_malloc(size_t size) {
return emmalloc_memalign(MALLOC_ALIGNMENT, size);
}
EMMALLOC_ALIAS(emscripten_builtin_malloc, emmalloc_malloc);
EMMALLOC_ALIAS(__libc_malloc, emmalloc_malloc);
EMMALLOC_ALIAS(malloc, emmalloc_malloc);
size_t emmalloc_usable_size(void *ptr) {
if (!ptr) {
return 0;
}
uint8_t *regionStartPtr = (uint8_t*)ptr - sizeof(size_t);
Region *region = (Region*)(regionStartPtr);
assert(HAS_ALIGNMENT(region, sizeof(size_t)));
MALLOC_ACQUIRE();
size_t size = region->size;
assert(size >= sizeof(Region));
assert(region_is_in_use(region));
MALLOC_RELEASE();
return size - REGION_HEADER_SIZE;
}
EMMALLOC_ALIAS(malloc_usable_size, emmalloc_usable_size);
void emmalloc_free(void *ptr) {
#ifdef EMMALLOC_MEMVALIDATE
emmalloc_validate_memory_regions();
#endif
if (!ptr) {
return;
}
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('free(ptr='+ptrToString($0)+')'), ptr);
#endif
uint8_t *regionStartPtr = (uint8_t*)ptr - sizeof(size_t);
Region *region = (Region*)(regionStartPtr);
assert(HAS_ALIGNMENT(region, sizeof(size_t)));
MALLOC_ACQUIRE();
size_t size = region->size;
#ifdef EMMALLOC_VERBOSE
if (size < sizeof(Region) || !region_is_in_use(region)) {
if (debug_region_is_consistent(region)) {
EM_ASM(err('Double free at region ptr ' + ptrToString($0) + ', region->size: ' + ptrToString($1) + ', region->sizeAtCeiling: ' + ptrToString($2) + ')'), region, size, region_ceiling_size(region));
} else {
MAIN_THREAD_ASYNC_EM_ASM(err('Corrupt region at region ptr ' + ptrToString($0) + ' region->size: ' + ptrToString($1) + ', region->sizeAtCeiling: ' + ptrToString($2) + ')'), region, size, region_ceiling_size(region));
}
}
#endif
assert(size >= sizeof(Region));
assert(region_is_in_use(region));
#ifdef __EMSCRIPTEN_TRACING__
emscripten_trace_record_free(region);
#endif
size_t prevRegionSizeField = ((size_t*)region)[-1];
size_t prevRegionSize = prevRegionSizeField & ~FREE_REGION_FLAG;
if (prevRegionSizeField != prevRegionSize) {
Region *prevRegion = (Region*)((uint8_t*)region - prevRegionSize);
assert(debug_region_is_consistent(prevRegion));
unlink_from_free_list(prevRegion);
regionStartPtr = (uint8_t*)prevRegion;
size += prevRegionSize;
}
Region *nextRegion = next_region(region);
assert(debug_region_is_consistent(nextRegion));
size_t sizeAtEnd = *(size_t*)region_payload_end_ptr(nextRegion);
if (nextRegion->size != sizeAtEnd) {
unlink_from_free_list(nextRegion);
size += nextRegion->size;
}
create_free_region(regionStartPtr, size);
link_to_free_list((Region*)regionStartPtr);
MALLOC_RELEASE();
#ifdef EMMALLOC_MEMVALIDATE
emmalloc_validate_memory_regions();
#endif
}
EMMALLOC_ALIAS(emscripten_builtin_free, emmalloc_free);
EMMALLOC_ALIAS(__libc_free, emmalloc_free);
EMMALLOC_ALIAS(free, emmalloc_free);
static int attempt_region_resize(Region *region, size_t size) {
ASSERT_MALLOC_IS_ACQUIRED();
assert(size > 0);
assert(HAS_ALIGNMENT(size, sizeof(size_t)));
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('attempt_region_resize(region=' + ptrToString($0) + ', size=' + Number($1) + ' bytes)'), region, size);
#endif
Region *nextRegion = next_region(region);
uint8_t *nextRegionEndPtr = (uint8_t*)nextRegion + nextRegion->size;
size_t sizeAtCeiling = ((size_t*)nextRegionEndPtr)[-1];
if (nextRegion->size != sizeAtCeiling) {
assert(region_is_free(nextRegion));
uint8_t *newNextRegionStartPtr = (uint8_t*)region + size;
assert(HAS_ALIGNMENT(newNextRegionStartPtr, sizeof(size_t)));
if (newNextRegionStartPtr + sizeof(Region) <= nextRegionEndPtr) {
unlink_from_free_list(nextRegion);
create_free_region(newNextRegionStartPtr, nextRegionEndPtr - newNextRegionStartPtr);
link_to_free_list((Region*)newNextRegionStartPtr);
create_used_region(region, newNextRegionStartPtr - (uint8_t*)region);
return 1;
}
if (newNextRegionStartPtr <= nextRegionEndPtr) {
unlink_from_free_list(nextRegion);
create_used_region(region, region->size + nextRegion->size);
return 1;
}
} else {
if (size + sizeof(Region) <= region->size) {
size_t freeRegionSize = region->size - size;
create_used_region(region, size);
Region *freeRegion = (Region *)((uint8_t*)region + size);
create_free_region(freeRegion, freeRegionSize);
link_to_free_list(freeRegion);
return 1;
} else if (size <= region->size) {
return 1;
}
}
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('attempt_region_resize failed.'));
#endif
return 0;
}
static int acquire_and_attempt_region_resize(Region *region, size_t size) {
MALLOC_ACQUIRE();
int success = attempt_region_resize(region, size);
MALLOC_RELEASE();
return success;
}
void *emmalloc_aligned_realloc(void *ptr, size_t alignment, size_t size) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('aligned_realloc(ptr=' + ptrToString($0) + ', alignment=' + $1 + ', size=' + Number($2)), ptr, alignment, size);
#endif
if (!ptr) {
return emmalloc_memalign(alignment, size);
}
if (size == 0) {
free(ptr);
return 0;
}
if (size > MAX_ALLOC_SIZE) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('Allocation failed: attempted allocation size is too large: ' + Number($0) + 'bytes! (negative integer wraparound?)'), size);
#endif
return 0;
}
assert(IS_POWER_OF_2(alignment));
assert(HAS_ALIGNMENT(ptr, alignment));
size = validate_alloc_size(size);
Region *region = (Region*)((uint8_t*)ptr - sizeof(size_t));
if (acquire_and_attempt_region_resize(region, size + REGION_HEADER_SIZE)) {
#ifdef __EMSCRIPTEN_TRACING__
emscripten_trace_record_reallocation(ptr, ptr, size);
#endif
return ptr;
}
void *newptr = emmalloc_memalign(alignment, size);
if (newptr) {
memcpy(newptr, ptr, MIN(size, region->size - REGION_HEADER_SIZE));
free(ptr);
}
return newptr;
}
EMMALLOC_ALIAS(aligned_realloc, emmalloc_aligned_realloc);
void *emmalloc_realloc_try(void *ptr, size_t size) {
if (!ptr) {
return 0;
}
if (size == 0) {
free(ptr);
return 0;
}
if (size > MAX_ALLOC_SIZE) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('Allocation failed: attempted allocation size is too large: ' + Number($0) + 'bytes! (negative integer wraparound?)'), size);
#endif
return 0;
}
size = validate_alloc_size(size);
Region *region = (Region*)((uint8_t*)ptr - sizeof(size_t));
int success = acquire_and_attempt_region_resize(region, size + REGION_HEADER_SIZE);
#ifdef __EMSCRIPTEN_TRACING__
if (success) {
emscripten_trace_record_reallocation(ptr, ptr, size);
}
#endif
return success ? ptr : 0;
}
void *emmalloc_aligned_realloc_uninitialized(void *ptr, size_t alignment, size_t size) {
if (!ptr) {
return emmalloc_memalign(alignment, size);
}
if (size == 0) {
free(ptr);
return 0;
}
if (size > MAX_ALLOC_SIZE) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('Allocation failed: attempted allocation size is too large: ' + Number($0) + 'bytes! (negative integer wraparound?)'), size);
#endif
return 0;
}
size = validate_alloc_size(size);
Region *region = (Region*)((uint8_t*)ptr - sizeof(size_t));
if (acquire_and_attempt_region_resize(region, size + REGION_HEADER_SIZE)) {
#ifdef __EMSCRIPTEN_TRACING__
emscripten_trace_record_reallocation(ptr, ptr, size);
#endif
return ptr;
}
free(ptr);
return emmalloc_memalign(alignment, size);
}
void *emmalloc_realloc(void *ptr, size_t size) {
return emmalloc_aligned_realloc(ptr, MALLOC_ALIGNMENT, size);
}
EMMALLOC_ALIAS(emscripten_builtin_realloc, emmalloc_realloc);
EMMALLOC_ALIAS(__libc_realloc, emmalloc_realloc);
EMMALLOC_ALIAS(realloc, emmalloc_realloc);
void *emmalloc_realloc_uninitialized(void *ptr, size_t size) {
return emmalloc_aligned_realloc_uninitialized(ptr, MALLOC_ALIGNMENT, size);
}
int emmalloc_posix_memalign(void **memptr, size_t alignment, size_t size) {
assert(memptr);
if (alignment % sizeof(void *) != 0) {
return EINVAL;
}
*memptr = emmalloc_memalign(alignment, size);
return *memptr ? 0 : ENOMEM;
}
EMMALLOC_ALIAS(posix_memalign, emmalloc_posix_memalign);
void *emmalloc_calloc(size_t num, size_t size) {
size_t bytes = num*size;
void *ptr = emmalloc_memalign(MALLOC_ALIGNMENT, bytes);
if (ptr) {
memset(ptr, 0, bytes);
}
return ptr;
}
EMMALLOC_ALIAS(emscripten_builtin_calloc, emmalloc_calloc);
EMMALLOC_ALIAS(__libc_calloc, emmalloc_calloc);
EMMALLOC_ALIAS(calloc, emmalloc_calloc);
static int count_linked_list_size(Region *list) {
int size = 1;
for (Region *i = list->next; i != list; list = list->next) {
++size;
}
return size;
}
static size_t count_linked_list_space(Region *list) {
size_t space = 0;
for (Region *i = list->next; i != list; list = list->next) {
space += region_payload_end_ptr(i) - region_payload_start_ptr(i);
}
return space;
}
struct mallinfo emmalloc_mallinfo() {
MALLOC_ACQUIRE();
struct mallinfo info;
info.arena = emscripten_get_heap_size() - (size_t)_sbrk64(0);
info.ordblks = count_linked_list_size(&freeRegionBuckets[NUM_FREE_BUCKETS-1])-1;
info.smblks = 0;
info.fsmblks = 0;
for (int i = 0; i < NUM_FREE_BUCKETS-1; ++i) {
info.smblks += count_linked_list_size(&freeRegionBuckets[i])-1;
info.fsmblks += count_linked_list_space(&freeRegionBuckets[i]);
}
info.hblks = 0;
info.hblkhd = 0;
info.usmblks = 0;
info.uordblks = 0;
info.fordblks = 0;
Region *lastActualRegion = prev_region((Region*)(listOfAllRegions->endPtr - sizeof(Region)));
info.keepcost = region_is_free(lastActualRegion) ? lastActualRegion->size : 0;
RootRegion *root = listOfAllRegions;
while (root) {
Region *r = (Region*)root;
assert(debug_region_is_consistent(r));
uint8_t *lastRegionEnd = root->endPtr;
while ((uint8_t*)r < lastRegionEnd) {
assert(debug_region_is_consistent(r));
if (region_is_free(r)) {
info.fordblks += region_payload_end_ptr(r) - region_payload_start_ptr(r);
info.uordblks += REGION_HEADER_SIZE;
} else {
info.uordblks += r->size;
}
info.usmblks = MAX(info.usmblks, (intptr_t)r + r->size);
if (r->size == 0) {
break;
}
r = next_region(r);
}
root = root->next;
}
MALLOC_RELEASE();
return info;
}
EMMALLOC_ALIAS(mallinfo, emmalloc_mallinfo);
static int trim_dynamic_heap_reservation(size_t pad) {
ASSERT_MALLOC_IS_ACQUIRED();
if (!listOfAllRegions) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('emmalloc_trim(): cannot trim memory, emmalloc is currently not initialized to manage any dynamic memory at all.'));
#endif
return 0;
}
uint8_t *previousSbrkEndAddress = listOfAllRegions->endPtr;
void *sbrkAddr = _sbrk64(0);
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('emmalloc_trim(): _sbrk64(0) = ' + ptrToString($0) + ', previousSbrkEndAddress = ' + ptrToString($1)), sbrkAddr, previousSbrkEndAddress);
#endif
assert(sbrkAddr == previousSbrkEndAddress);
size_t lastMemoryRegionSize = ((size_t*)previousSbrkEndAddress)[-1];
Region *endSentinelRegion = (Region*)(previousSbrkEndAddress - lastMemoryRegionSize);
Region *lastActualRegion = prev_region(endSentinelRegion);
if (!region_is_free(lastActualRegion)) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('emmalloc_trim(): Last actual region ' + ptrToString($0) + ' is in use, there is nothing to trim from.'), lastActualRegion);
#endif
return 0;
}
pad = (size_t)ALIGN_UP(pad, 4);
if (pad >= lastActualRegion->size) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('emmalloc_trim(): Last actual region does not have enough space to leave ' + Number($0) + ' bytes of free memory in it.'), pad);
#endif
return 0;
}
size_t shrinkAmount = lastActualRegion->size - pad - 2*sizeof(size_t);
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('emmalloc_trim(): shrinkAmount ' + Number($0) + '.'), shrinkAmount);
#endif
shrinkAmount = (size_t)ALIGN_DOWN(shrinkAmount, __alignof__(max_align_t));
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('emmalloc_trim(): shrinkAmount2 ' + Number($0) + '.'), shrinkAmount);
#endif
if (!shrinkAmount) {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('emmalloc_trim(): Aligning for sbrk() requirements removed opportunity to trim.'));
#endif
return 0;
}
unlink_from_free_list(lastActualRegion);
size_t newRegionSize = lastActualRegion->size - shrinkAmount;
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('emmalloc_trim(): Shrinking ' + Number($0) + ' bytes off the free heap end region. New free heap end region size: ' + Number($1) + ' bytes.'), shrinkAmount, newRegionSize);
#endif
if (newRegionSize >= sizeof(Region)) {
create_free_region(lastActualRegion, newRegionSize);
link_to_free_list(lastActualRegion);
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('emmalloc_trim(): Created new free heap end region at ' + ptrToString($0) + '. Size: ' + Number($1) + ' bytes.'), lastActualRegion, newRegionSize);
#endif
} else {
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('emmalloc_trim(): Not enough room to fit a free heap end region. Discarding it altogether.'));
#endif
newRegionSize = 0;
}
void *oldSbrk = _sbrk64(-(int64_t)shrinkAmount);
assert((intptr_t)oldSbrk != -1);
void *sbrkNow = _sbrk64(0);
Region *newEndSentinelRegion = (Region*)((uint8_t*)lastActualRegion + newRegionSize);
size_t newEndSentinelRegionSize = (uintptr_t)sbrkNow - (uintptr_t)newEndSentinelRegion;
#ifdef EMMALLOC_VERBOSE
MAIN_THREAD_ASYNC_EM_ASM(out('emmalloc_trim(): Created new sentinel end region at ' + ptrToString($0) + '. Size: ' + Number($1) + ' bytes.'), newEndSentinelRegion, newEndSentinelRegionSize);
#endif
create_used_region(newEndSentinelRegion, newEndSentinelRegionSize);
listOfAllRegions->endPtr = (uint8_t*)newEndSentinelRegion + newEndSentinelRegionSize;
return 1;
}
int emmalloc_trim(size_t pad) {
MALLOC_ACQUIRE();
int success = trim_dynamic_heap_reservation(pad);
MALLOC_RELEASE();
return success;
}
EMMALLOC_ALIAS(malloc_trim, emmalloc_trim)
size_t emmalloc_dynamic_heap_size() {
size_t dynamicHeapSize = 0;
MALLOC_ACQUIRE();
RootRegion *root = listOfAllRegions;
while (root) {
dynamicHeapSize += root->endPtr - (uint8_t*)root;
root = root->next;
}
MALLOC_RELEASE();
return dynamicHeapSize;
}
size_t emmalloc_free_dynamic_memory() {
size_t freeDynamicMemory = 0;
int bucketIndex = 0;
MALLOC_ACQUIRE();
BUCKET_BITMASK_T bucketMask = freeRegionBucketsUsed;
while (bucketMask) {
BUCKET_BITMASK_T indexAdd = __builtin_ctzll(bucketMask);
bucketIndex += indexAdd;
bucketMask >>= indexAdd;
for (Region *freeRegion = freeRegionBuckets[bucketIndex].next;
freeRegion != &freeRegionBuckets[bucketIndex];
freeRegion = freeRegion->next) {
freeDynamicMemory += freeRegion->size - REGION_HEADER_SIZE;
}
++bucketIndex;
bucketMask >>= 1;
}
MALLOC_RELEASE();
return freeDynamicMemory;
}
size_t emmalloc_compute_free_dynamic_memory_fragmentation_map(size_t freeMemorySizeMap[32]) {
memset((void*)freeMemorySizeMap, 0, sizeof(freeMemorySizeMap[0])*32);
size_t numFreeMemoryRegions = 0;
int bucketIndex = 0;
MALLOC_ACQUIRE();
BUCKET_BITMASK_T bucketMask = freeRegionBucketsUsed;
while (bucketMask) {
BUCKET_BITMASK_T indexAdd = __builtin_ctzll(bucketMask);
bucketIndex += indexAdd;
bucketMask >>= indexAdd;
for (Region *freeRegion = freeRegionBuckets[bucketIndex].next;
freeRegion != &freeRegionBuckets[bucketIndex];
freeRegion = freeRegion->next) {
++numFreeMemoryRegions;
size_t freeDynamicMemory = freeRegion->size - REGION_HEADER_SIZE;
if (freeDynamicMemory > 0) {
++freeMemorySizeMap[31-__builtin_clz(freeDynamicMemory)];
} else {
++freeMemorySizeMap[0];
}
}
++bucketIndex;
bucketMask >>= 1;
}
MALLOC_RELEASE();
return numFreeMemoryRegions;
}
void emmalloc_dump_free_dynamic_memory_fragmentation_map() {
size_t freeMemorySizeMap[32];
size_t numFreeMemoryRegions = emmalloc_compute_free_dynamic_memory_fragmentation_map(freeMemorySizeMap);
printf("numFreeMemoryRegions: %zu\n", numFreeMemoryRegions);
for (int i = 0; i < 32; ++i) {
printf("Free memory regions of size [%llu,%llu[ bytes: %zu regions\n", 1ull<<i, 1ull<<(i+1), freeMemorySizeMap[i]);
}
}
size_t emmalloc_unclaimed_heap_memory(void) {
return emscripten_get_heap_max() - (size_t)_sbrk64(0);
}