Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
emscripten-core
GitHub Repository: emscripten-core/emscripten
Path: blob/main/system/lib/mimalloc/src/segment-map.c
6175 views
1
/* ----------------------------------------------------------------------------
2
Copyright (c) 2019-2023, Microsoft Research, Daan Leijen
3
This is free software; you can redistribute it and/or modify it under the
4
terms of the MIT license. A copy of the license can be found in the file
5
"LICENSE" at the root of this distribution.
6
-----------------------------------------------------------------------------*/
7
8
/* -----------------------------------------------------------
9
The following functions are to reliably find the segment or
10
block that encompasses any pointer p (or NULL if it is not
11
in any of our segments).
12
We maintain a bitmap of all memory with 1 bit per MI_SEGMENT_SIZE (64MiB)
13
set to 1 if it contains the segment meta data.
14
----------------------------------------------------------- */
15
#include "mimalloc.h"
16
#include "mimalloc/internal.h"
17
#include "mimalloc/atomic.h"
18
19
#if (MI_INTPTR_SIZE>=8) && MI_TRACK_ASAN
20
#define MI_MAX_ADDRESS ((size_t)140 << 40) // 140TB (see issue #881)
21
#elif (MI_INTPTR_SIZE >= 8)
22
#define MI_MAX_ADDRESS ((size_t)40 << 40) // 40TB (to include huge page areas)
23
#else
24
#define MI_MAX_ADDRESS ((size_t)2 << 30) // 2Gb
25
#endif
26
27
#define MI_SEGMENT_MAP_BITS (MI_MAX_ADDRESS / MI_SEGMENT_SIZE)
28
#define MI_SEGMENT_MAP_SIZE (MI_SEGMENT_MAP_BITS / 8)
29
#define MI_SEGMENT_MAP_WSIZE (MI_SEGMENT_MAP_SIZE / MI_INTPTR_SIZE)
30
31
static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments
32
33
static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) {
34
// note: segment can be invalid or NULL.
35
mi_assert_internal(_mi_ptr_segment(segment + 1) == segment); // is it aligned on MI_SEGMENT_SIZE?
36
if ((uintptr_t)segment >= MI_MAX_ADDRESS) {
37
*bitidx = 0;
38
return MI_SEGMENT_MAP_WSIZE;
39
}
40
else {
41
const uintptr_t segindex = ((uintptr_t)segment) / MI_SEGMENT_SIZE;
42
*bitidx = segindex % MI_INTPTR_BITS;
43
const size_t mapindex = segindex / MI_INTPTR_BITS;
44
mi_assert_internal(mapindex < MI_SEGMENT_MAP_WSIZE);
45
return mapindex;
46
}
47
}
48
49
void _mi_segment_map_allocated_at(const mi_segment_t* segment) {
50
size_t bitidx;
51
size_t index = mi_segment_map_index_of(segment, &bitidx);
52
mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE);
53
if (index==MI_SEGMENT_MAP_WSIZE) return;
54
uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]);
55
uintptr_t newmask;
56
do {
57
newmask = (mask | ((uintptr_t)1 << bitidx));
58
} while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask));
59
}
60
61
void _mi_segment_map_freed_at(const mi_segment_t* segment) {
62
size_t bitidx;
63
size_t index = mi_segment_map_index_of(segment, &bitidx);
64
mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE);
65
if (index == MI_SEGMENT_MAP_WSIZE) return;
66
uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]);
67
uintptr_t newmask;
68
do {
69
newmask = (mask & ~((uintptr_t)1 << bitidx));
70
} while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask));
71
}
72
73
// Determine the segment belonging to a pointer or NULL if it is not in a valid segment.
74
static mi_segment_t* _mi_segment_of(const void* p) {
75
if (p == NULL) return NULL;
76
mi_segment_t* segment = _mi_ptr_segment(p); // segment can be NULL
77
size_t bitidx;
78
size_t index = mi_segment_map_index_of(segment, &bitidx);
79
// fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge
80
const uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]);
81
if mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0) {
82
return segment; // yes, allocated by us
83
}
84
if (index==MI_SEGMENT_MAP_WSIZE) return NULL;
85
86
// TODO: maintain max/min allocated range for efficiency for more efficient rejection of invalid pointers?
87
88
// search downwards for the first segment in case it is an interior pointer
89
// could be slow but searches in MI_INTPTR_SIZE * MI_SEGMENT_SIZE (512MiB) steps trough
90
// valid huge objects
91
// note: we could maintain a lowest index to speed up the path for invalid pointers?
92
size_t lobitidx;
93
size_t loindex;
94
uintptr_t lobits = mask & (((uintptr_t)1 << bitidx) - 1);
95
if (lobits != 0) {
96
loindex = index;
97
lobitidx = mi_bsr(lobits); // lobits != 0
98
}
99
else if (index == 0) {
100
return NULL;
101
}
102
else {
103
mi_assert_internal(index > 0);
104
uintptr_t lomask = mask;
105
loindex = index;
106
do {
107
loindex--;
108
lomask = mi_atomic_load_relaxed(&mi_segment_map[loindex]);
109
} while (lomask != 0 && loindex > 0);
110
if (lomask == 0) return NULL;
111
lobitidx = mi_bsr(lomask); // lomask != 0
112
}
113
mi_assert_internal(loindex < MI_SEGMENT_MAP_WSIZE);
114
// take difference as the addresses could be larger than the MAX_ADDRESS space.
115
size_t diff = (((index - loindex) * (8*MI_INTPTR_SIZE)) + bitidx - lobitidx) * MI_SEGMENT_SIZE;
116
segment = (mi_segment_t*)((uint8_t*)segment - diff);
117
118
if (segment == NULL) return NULL;
119
mi_assert_internal((void*)segment < p);
120
bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie);
121
mi_assert_internal(cookie_ok);
122
if mi_unlikely(!cookie_ok) return NULL;
123
if (((uint8_t*)segment + mi_segment_size(segment)) <= (uint8_t*)p) return NULL; // outside the range
124
mi_assert_internal(p >= (void*)segment && (uint8_t*)p < (uint8_t*)segment + mi_segment_size(segment));
125
return segment;
126
}
127
128
// Is this a valid pointer in our heap?
129
static bool mi_is_valid_pointer(const void* p) {
130
return ((_mi_segment_of(p) != NULL) || (_mi_arena_contains(p)));
131
}
132
133
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
134
return mi_is_valid_pointer(p);
135
}
136
137
/*
138
// Return the full segment range belonging to a pointer
139
static void* mi_segment_range_of(const void* p, size_t* size) {
140
mi_segment_t* segment = _mi_segment_of(p);
141
if (segment == NULL) {
142
if (size != NULL) *size = 0;
143
return NULL;
144
}
145
else {
146
if (size != NULL) *size = segment->segment_size;
147
return segment;
148
}
149
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
150
mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size);
151
mi_reset_delayed(tld);
152
mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld));
153
return page;
154
}
155
*/
156
157