Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
stenzek
GitHub Repository: stenzek/duckstation
Path: blob/master/src/common/memmap.cpp
4212 views
1
// SPDX-FileCopyrightText: 2019-2024 Connor McLaughlin <[email protected]>
2
// SPDX-License-Identifier: CC-BY-NC-ND-4.0
3
4
#include "memmap.h"
5
#include "align.h"
6
#include "assert.h"
7
#include "error.h"
8
#include "log.h"
9
#include "small_string.h"
10
#include "string_util.h"
11
12
#include "fmt/format.h"
13
14
#include <memory>
15
16
#if defined(_WIN32)
17
#include "windows_headers.h"
18
#include <Psapi.h>
19
#elif defined(__APPLE__)
20
#ifdef __aarch64__
21
#include <pthread.h> // pthread_jit_write_protect_np()
22
#endif
23
#include <mach-o/dyld.h>
24
#include <mach-o/getsect.h>
25
#include <mach/mach_init.h>
26
#include <mach/mach_port.h>
27
#include <mach/mach_vm.h>
28
#include <mach/vm_map.h>
29
#include <sys/mman.h>
30
#include <sys/sysctl.h>
31
#else
32
#include <cerrno>
33
#include <dlfcn.h>
34
#include <fcntl.h>
35
#include <sys/mman.h>
36
#include <unistd.h>
37
#endif
38
#if defined(__linux__) && defined(CPU_ARCH_RISCV64)
39
#include <sys/cachectl.h>
40
#endif
41
42
#if defined(__linux__) && !defined(MAP_FIXED_NOREPLACE)
43
// Compatibility with old libc.
44
#define MAP_FIXED_NOREPLACE 0x100000
45
#endif
46
47
LOG_CHANNEL(MemMap);
48
49
namespace MemMap {
50
/// Allocates RWX memory at the specified address.
51
static void* AllocateJITMemoryAt(const void* addr, size_t size);
52
} // namespace MemMap
53
54
#ifdef DYNAMIC_HOST_PAGE_SIZE
55
const u32 HOST_PAGE_SIZE = MemMap::GetRuntimePageSize();
56
const u32 HOST_PAGE_MASK = MemMap::GetRuntimePageSize() - 1;
57
const u32 HOST_PAGE_SHIFT = std::bit_width(MemMap::GetRuntimePageSize() - 1);
58
#endif
59
60
#ifdef _WIN32
61
62
u32 MemMap::GetRuntimePageSize()
63
{
64
static u32 cached_page_size = 0;
65
if (cached_page_size != 0) [[likely]]
66
return cached_page_size;
67
68
SYSTEM_INFO si = {};
69
GetSystemInfo(&si);
70
cached_page_size = si.dwPageSize;
71
return cached_page_size;
72
}
73
74
bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode)
75
{
76
DebugAssert((size & (HOST_PAGE_SIZE - 1)) == 0);
77
78
DWORD old_protect;
79
if (!VirtualProtect(baseaddr, size, static_cast<DWORD>(mode), &old_protect))
80
{
81
ERROR_LOG("VirtualProtect() failed with error {}", GetLastError());
82
return false;
83
}
84
85
return true;
86
}
87
88
std::string MemMap::GetFileMappingName(const char* prefix)
89
{
90
const unsigned pid = GetCurrentProcessId();
91
return fmt::format("{}_{}", prefix, pid);
92
}
93
94
void* MemMap::CreateSharedMemory(const char* name, size_t size, Error* error)
95
{
96
const std::wstring mapping_name = name ? StringUtil::UTF8StringToWideString(name) : std::wstring();
97
const HANDLE mapping =
98
CreateFileMappingW(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE, static_cast<DWORD>(size >> 32),
99
static_cast<DWORD>(size), mapping_name.empty() ? nullptr : mapping_name.c_str());
100
if (!mapping)
101
Error::SetWin32(error, "CreateFileMappingW() failed: ", GetLastError());
102
103
return static_cast<void*>(mapping);
104
}
105
106
void MemMap::DestroySharedMemory(void* ptr)
107
{
108
CloseHandle(static_cast<HANDLE>(ptr));
109
}
110
111
void MemMap::DeleteSharedMemory(const char* name)
112
{
113
// Automatically freed on close.
114
}
115
116
void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode)
117
{
118
void* ret = MapViewOfFileEx(static_cast<HANDLE>(handle), FILE_MAP_READ | FILE_MAP_WRITE,
119
static_cast<DWORD>(offset >> 32), static_cast<DWORD>(offset), size, baseaddr);
120
if (!ret)
121
return nullptr;
122
123
if (mode != PageProtect::ReadWrite)
124
{
125
DWORD old_prot;
126
if (!VirtualProtect(ret, size, static_cast<DWORD>(mode), &old_prot))
127
Panic("Failed to protect memory mapping");
128
}
129
return ret;
130
}
131
132
void MemMap::UnmapSharedMemory(void* baseaddr, size_t size)
133
{
134
if (!UnmapViewOfFile(baseaddr))
135
Panic("Failed to unmap shared memory");
136
}
137
138
const void* MemMap::GetBaseAddress()
139
{
140
const HMODULE mod = GetModuleHandleW(nullptr);
141
if (!mod)
142
return nullptr;
143
144
MODULEINFO mi;
145
if (!GetModuleInformation(GetCurrentProcess(), mod, &mi, sizeof(mi)))
146
return mod;
147
148
return mi.lpBaseOfDll;
149
}
150
151
void* MemMap::AllocateJITMemoryAt(const void* addr, size_t size)
152
{
153
void* ptr = static_cast<u8*>(VirtualAlloc(const_cast<void*>(addr), size,
154
addr ? (MEM_RESERVE | MEM_COMMIT) : MEM_COMMIT, PAGE_EXECUTE_READWRITE));
155
if (!ptr && !addr) [[unlikely]]
156
ERROR_LOG("VirtualAlloc(RWX, {}) for internal buffer failed: {}", size, GetLastError());
157
158
return ptr;
159
}
160
161
void MemMap::ReleaseJITMemory(void* ptr, size_t size)
162
{
163
if (!VirtualFree(ptr, 0, MEM_RELEASE))
164
ERROR_LOG("Failed to free code pointer {}", static_cast<void*>(ptr));
165
}
166
167
#if defined(CPU_ARCH_ARM32) || defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64)
168
169
void MemMap::FlushInstructionCache(void* address, size_t size)
170
{
171
::FlushInstructionCache(GetCurrentProcess(), address, size);
172
}
173
174
#endif
175
176
SharedMemoryMappingArea::SharedMemoryMappingArea() = default;
177
178
SharedMemoryMappingArea::~SharedMemoryMappingArea()
179
{
180
Destroy();
181
}
182
183
SharedMemoryMappingArea::PlaceholderMap::iterator SharedMemoryMappingArea::FindPlaceholder(size_t offset)
184
{
185
if (m_placeholder_ranges.empty())
186
return m_placeholder_ranges.end();
187
188
// this will give us an iterator equal or after page
189
auto it = m_placeholder_ranges.lower_bound(offset);
190
if (it == m_placeholder_ranges.end())
191
{
192
// check the last page
193
it = (++m_placeholder_ranges.rbegin()).base();
194
}
195
196
// it's the one we found?
197
if (offset >= it->first && offset < it->second)
198
return it;
199
200
// otherwise try the one before
201
if (it == m_placeholder_ranges.begin())
202
return m_placeholder_ranges.end();
203
204
--it;
205
if (offset >= it->first && offset < it->second)
206
return it;
207
else
208
return m_placeholder_ranges.end();
209
}
210
211
bool SharedMemoryMappingArea::Create(size_t size)
212
{
213
Destroy();
214
215
AssertMsg(Common::IsAlignedPow2(size, HOST_PAGE_SIZE), "Size is page aligned");
216
217
m_base_ptr = static_cast<u8*>(VirtualAlloc2(GetCurrentProcess(), nullptr, size, MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
218
PAGE_NOACCESS, nullptr, 0));
219
if (!m_base_ptr)
220
return false;
221
222
m_size = size;
223
m_num_pages = size >> HOST_PAGE_SHIFT;
224
m_placeholder_ranges.emplace(0, size);
225
return true;
226
}
227
228
void SharedMemoryMappingArea::Destroy()
229
{
230
AssertMsg(m_num_mappings == 0, "No mappings left");
231
232
// hopefully this will be okay, and we don't need to coalesce all the placeholders...
233
if (m_base_ptr && !VirtualFreeEx(GetCurrentProcess(), m_base_ptr, 0, MEM_RELEASE))
234
Panic("Failed to release shared memory area");
235
236
m_placeholder_ranges.clear();
237
m_base_ptr = nullptr;
238
m_size = 0;
239
m_num_pages = 0;
240
m_num_mappings = 0;
241
}
242
243
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size,
244
PageProtect mode)
245
{
246
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
247
248
const size_t map_offset = static_cast<u8*>(map_base) - m_base_ptr;
249
DebugAssert(Common::IsAlignedPow2(map_offset, HOST_PAGE_SIZE));
250
DebugAssert(Common::IsAlignedPow2(map_size, HOST_PAGE_SIZE));
251
252
// should be a placeholder. unless there's some other mapping we didn't free.
253
PlaceholderMap::iterator phit = FindPlaceholder(map_offset);
254
DebugAssertMsg(phit != m_placeholder_ranges.end(), "Page we're mapping is a placeholder");
255
DebugAssertMsg(map_offset >= phit->first && map_offset < phit->second, "Page is in returned placeholder range");
256
DebugAssertMsg((map_offset + map_size) <= phit->second, "Page range is in returned placeholder range");
257
258
// do we need to split to the left? (i.e. is there a placeholder before this range)
259
const size_t old_ph_end = phit->second;
260
if (map_offset != phit->first)
261
{
262
phit->second = map_offset;
263
264
// split it (i.e. left..start and start..end are now separated)
265
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(phit->first), (map_offset - phit->first),
266
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER))
267
{
268
Panic("Failed to left split placeholder for map");
269
}
270
}
271
else
272
{
273
// start of the placeholder is getting used, we'll split it right below if there's anything left over
274
m_placeholder_ranges.erase(phit);
275
}
276
277
// do we need to split to the right? (i.e. is there a placeholder after this range)
278
if ((map_offset + map_size) != old_ph_end)
279
{
280
// split out end..ph_end
281
m_placeholder_ranges.emplace(map_offset + map_size, old_ph_end);
282
283
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(map_offset), map_size,
284
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER))
285
{
286
Panic("Failed to right split placeholder for map");
287
}
288
}
289
290
// actually do the mapping, replacing the placeholder on the range
291
if (!MapViewOfFile3(static_cast<HANDLE>(file_handle), GetCurrentProcess(), map_base, file_offset, map_size,
292
MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0))
293
{
294
ERROR_LOG("MapViewOfFile3() failed: {}", GetLastError());
295
return nullptr;
296
}
297
298
if (mode != PageProtect::ReadWrite)
299
{
300
DWORD old_prot;
301
if (!VirtualProtect(map_base, map_size, static_cast<DWORD>(mode), &old_prot))
302
Panic("Failed to protect memory mapping");
303
}
304
305
m_num_mappings++;
306
return static_cast<u8*>(map_base);
307
}
308
309
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
310
{
311
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
312
313
const size_t map_offset = static_cast<u8*>(map_base) - m_base_ptr;
314
DebugAssert(Common::IsAlignedPow2(map_offset, HOST_PAGE_SIZE));
315
DebugAssert(Common::IsAlignedPow2(map_size, HOST_PAGE_SIZE));
316
317
// unmap the specified range
318
if (!UnmapViewOfFile2(GetCurrentProcess(), map_base, MEM_PRESERVE_PLACEHOLDER))
319
{
320
ERROR_LOG("UnmapViewOfFile2() failed: {}", GetLastError());
321
return false;
322
}
323
324
// can we coalesce to the left?
325
PlaceholderMap::iterator left_it = (map_offset > 0) ? FindPlaceholder(map_offset - 1) : m_placeholder_ranges.end();
326
if (left_it != m_placeholder_ranges.end())
327
{
328
// the left placeholder should end at our start
329
DebugAssert(map_offset == left_it->second);
330
left_it->second = map_offset + map_size;
331
332
// combine placeholders before and the range we're unmapping, i.e. to the left
333
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(left_it->first), left_it->second - left_it->first,
334
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS))
335
{
336
Panic("Failed to coalesce placeholders left for unmap");
337
}
338
}
339
else
340
{
341
// this is a new placeholder
342
left_it = m_placeholder_ranges.emplace(map_offset, map_offset + map_size).first;
343
}
344
345
// can we coalesce to the right?
346
PlaceholderMap::iterator right_it =
347
((map_offset + map_size) < m_size) ? FindPlaceholder(map_offset + map_size) : m_placeholder_ranges.end();
348
if (right_it != m_placeholder_ranges.end())
349
{
350
// should start at our end
351
DebugAssert(right_it->first == (map_offset + map_size));
352
left_it->second = right_it->second;
353
m_placeholder_ranges.erase(right_it);
354
355
// combine our placeholder and the next, i.e. to the right
356
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(left_it->first), left_it->second - left_it->first,
357
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS))
358
{
359
Panic("Failed to coalescae placeholders right for unmap");
360
}
361
}
362
363
m_num_mappings--;
364
return true;
365
}
366
367
#elif defined(__APPLE__)
368
369
u32 MemMap::GetRuntimePageSize()
370
{
371
static u32 cached_page_size = 0;
372
if (cached_page_size != 0) [[likely]]
373
return cached_page_size;
374
375
size_t page_size_size = sizeof(cached_page_size);
376
if (sysctlbyname("hw.pagesize", &cached_page_size, &page_size_size, nullptr, 0) != 0) [[unlikely]]
377
cached_page_size = 0;
378
return cached_page_size;
379
}
380
381
bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode)
382
{
383
DebugAssertMsg((size & (HOST_PAGE_SIZE - 1)) == 0, "Size is page aligned");
384
385
kern_return_t res = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size, false,
386
static_cast<vm_prot_t>(mode));
387
if (res != KERN_SUCCESS) [[unlikely]]
388
{
389
ERROR_LOG("mach_vm_protect() failed: {}", res);
390
return false;
391
}
392
393
return true;
394
}
395
396
std::string MemMap::GetFileMappingName(const char* prefix)
397
{
398
// name actually is not used.
399
return {};
400
}
401
402
void* MemMap::CreateSharedMemory(const char* name, size_t size, Error* error)
403
{
404
mach_vm_size_t vm_size = size;
405
mach_port_t port;
406
const kern_return_t res = mach_make_memory_entry_64(
407
mach_task_self(), &vm_size, 0, MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port, MACH_PORT_NULL);
408
if (res != KERN_SUCCESS)
409
{
410
Error::SetStringFmt(error, "mach_make_memory_entry_64() failed: {}", res);
411
return nullptr;
412
}
413
414
return reinterpret_cast<void*>(static_cast<uintptr_t>(port));
415
}
416
417
void MemMap::DestroySharedMemory(void* ptr)
418
{
419
mach_port_deallocate(mach_task_self(), static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(ptr)));
420
}
421
422
void MemMap::DeleteSharedMemory(const char* name)
423
{
424
}
425
426
void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode)
427
{
428
mach_vm_address_t ptr = reinterpret_cast<mach_vm_address_t>(baseaddr);
429
const kern_return_t res = mach_vm_map(mach_task_self(), &ptr, size, 0, baseaddr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE,
430
static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(handle)), offset, FALSE,
431
static_cast<vm_prot_t>(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
432
if (res != KERN_SUCCESS)
433
{
434
ERROR_LOG("mach_vm_map() failed: {}", res);
435
return nullptr;
436
}
437
438
return reinterpret_cast<void*>(ptr);
439
}
440
441
void MemMap::UnmapSharedMemory(void* baseaddr, size_t size)
442
{
443
const kern_return_t res = mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size);
444
if (res != KERN_SUCCESS)
445
Panic("Failed to unmap shared memory");
446
}
447
448
const void* MemMap::GetBaseAddress()
449
{
450
u32 name_buffer_size = 0;
451
_NSGetExecutablePath(nullptr, &name_buffer_size);
452
if (name_buffer_size > 0) [[likely]]
453
{
454
std::unique_ptr<char[]> name_buffer = std::make_unique_for_overwrite<char[]>(name_buffer_size + 1);
455
if (_NSGetExecutablePath(name_buffer.get(), &name_buffer_size) == 0) [[likely]]
456
{
457
name_buffer[name_buffer_size] = 0;
458
459
const struct segment_command_64* command = getsegbyname("__TEXT");
460
if (command) [[likely]]
461
{
462
const u8* base = reinterpret_cast<const u8*>(command->vmaddr);
463
const u32 image_count = _dyld_image_count();
464
for (u32 i = 0; i < image_count; i++)
465
{
466
if (std::strcmp(_dyld_get_image_name(i), name_buffer.get()) == 0)
467
return base + _dyld_get_image_vmaddr_slide(i);
468
}
469
}
470
}
471
}
472
473
return reinterpret_cast<const void*>(&GetBaseAddress);
474
}
475
476
void* MemMap::AllocateJITMemoryAt(const void* addr, size_t size)
477
{
478
#if !defined(__aarch64__)
479
kern_return_t ret = mach_vm_allocate(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&addr), size,
480
addr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE);
481
if (ret != KERN_SUCCESS)
482
{
483
ERROR_LOG("mach_vm_allocate() returned {}", ret);
484
return nullptr;
485
}
486
487
ret = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(addr), size, false,
488
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
489
if (ret != KERN_SUCCESS)
490
{
491
ERROR_LOG("mach_vm_protect() returned {}", ret);
492
mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(addr), size);
493
return nullptr;
494
}
495
496
return const_cast<void*>(addr);
497
#else
498
// On ARM64, we need to use MAP_JIT, which means we can't use MAP_FIXED.
499
if (addr)
500
return nullptr;
501
502
constexpr int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_JIT;
503
void* ptr = mmap(const_cast<void*>(addr), size, PROT_READ | PROT_WRITE | PROT_EXEC, flags, -1, 0);
504
if (ptr == MAP_FAILED)
505
{
506
ERROR_LOG("mmap(RWX, {}) for internal buffer failed: {}", size, errno);
507
return nullptr;
508
}
509
510
return ptr;
511
#endif
512
}
513
514
void MemMap::ReleaseJITMemory(void* ptr, size_t size)
515
{
516
#if !defined(__aarch64__)
517
const kern_return_t res = mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(ptr), size);
518
if (res != KERN_SUCCESS)
519
ERROR_LOG("mach_vm_deallocate() failed: {}", res);
520
#else
521
if (munmap(ptr, size) != 0)
522
ERROR_LOG("Failed to free code pointer {}", static_cast<void*>(ptr));
523
#endif
524
}
525
526
#if defined(CPU_ARCH_ARM32) || defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64)
527
528
void MemMap::FlushInstructionCache(void* address, size_t size)
529
{
530
__builtin___clear_cache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size);
531
}
532
533
#endif
534
535
SharedMemoryMappingArea::SharedMemoryMappingArea() = default;
536
537
SharedMemoryMappingArea::~SharedMemoryMappingArea()
538
{
539
Destroy();
540
}
541
542
bool SharedMemoryMappingArea::Create(size_t size)
543
{
544
AssertMsg(Common::IsAlignedPow2(size, HOST_PAGE_SIZE), "Size is page aligned");
545
Destroy();
546
547
const kern_return_t res =
548
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&m_base_ptr), size, 0, VM_FLAGS_ANYWHERE,
549
MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE);
550
if (res != KERN_SUCCESS)
551
{
552
ERROR_LOG("mach_vm_map() failed: {}", res);
553
return false;
554
}
555
556
m_size = size;
557
m_num_pages = size >> HOST_PAGE_SHIFT;
558
return true;
559
}
560
561
void SharedMemoryMappingArea::Destroy()
562
{
563
AssertMsg(m_num_mappings == 0, "No mappings left");
564
565
if (m_base_ptr &&
566
mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(m_base_ptr), m_size) != KERN_SUCCESS)
567
{
568
Panic("Failed to release shared memory area");
569
}
570
571
m_base_ptr = nullptr;
572
m_size = 0;
573
m_num_pages = 0;
574
}
575
576
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size,
577
PageProtect mode)
578
{
579
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
580
581
const kern_return_t res =
582
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE,
583
static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(file_handle)), file_offset, false,
584
static_cast<vm_prot_t>(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
585
if (res != KERN_SUCCESS) [[unlikely]]
586
{
587
ERROR_LOG("mach_vm_map() failed: {}", res);
588
return nullptr;
589
}
590
591
m_num_mappings++;
592
return static_cast<u8*>(map_base);
593
}
594
595
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
596
{
597
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
598
599
const kern_return_t res =
600
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE,
601
MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE);
602
if (res != KERN_SUCCESS) [[unlikely]]
603
{
604
ERROR_LOG("mach_vm_map() failed: {}", res);
605
return false;
606
}
607
608
m_num_mappings--;
609
return true;
610
}
611
612
#ifdef __aarch64__
613
614
static thread_local int s_code_write_depth = 0;
615
616
void MemMap::BeginCodeWrite()
617
{
618
// DEBUG_LOG("BeginCodeWrite(): {}", s_code_write_depth);
619
if ((s_code_write_depth++) == 0)
620
{
621
// DEBUG_LOG(" pthread_jit_write_protect_np(0)");
622
pthread_jit_write_protect_np(0);
623
}
624
}
625
626
void MemMap::EndCodeWrite()
627
{
628
// DEBUG_LOG("EndCodeWrite(): {}", s_code_write_depth);
629
630
DebugAssert(s_code_write_depth > 0);
631
if ((--s_code_write_depth) == 0)
632
{
633
// DEBUG_LOG(" pthread_jit_write_protect_np(1)");
634
pthread_jit_write_protect_np(1);
635
}
636
}
637
638
#endif
639
640
#else
641
642
u32 MemMap::GetRuntimePageSize()
643
{
644
static u32 cached_page_size = 0;
645
if (cached_page_size != 0) [[likely]]
646
return cached_page_size;
647
648
const int res = sysconf(_SC_PAGESIZE);
649
cached_page_size = (res > 0) ? static_cast<u32>(res) : 0;
650
return cached_page_size;
651
}
652
653
bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode)
654
{
655
DebugAssertMsg((size & (HOST_PAGE_SIZE - 1)) == 0, "Size is page aligned");
656
657
const int result = mprotect(baseaddr, size, static_cast<int>(mode));
658
if (result != 0) [[unlikely]]
659
{
660
ERROR_LOG("mprotect() for {} at {} failed", size, baseaddr);
661
return false;
662
}
663
664
return true;
665
}
666
667
std::string MemMap::GetFileMappingName(const char* prefix)
668
{
669
const unsigned pid = static_cast<unsigned>(getpid());
670
#if defined(__FreeBSD__)
671
// FreeBSD's shm_open(3) requires name to be absolute
672
return fmt::format("/tmp/{}_{}", prefix, pid);
673
#else
674
return fmt::format("{}_{}", prefix, pid);
675
#endif
676
}
677
678
#ifndef __ANDROID__
679
680
void* MemMap::CreateSharedMemory(const char* name, size_t size, Error* error)
681
{
682
const bool is_anonymous = (!name || *name == 0);
683
#if defined(__linux__) || defined(__FreeBSD__)
684
const int fd = is_anonymous ? memfd_create("", 0) : shm_open(name, O_CREAT | O_EXCL | O_RDWR, 0600);
685
if (fd < 0)
686
{
687
Error::SetErrno(error, is_anonymous ? "memfd_create() failed: " : "shm_open() failed: ", errno);
688
return nullptr;
689
}
690
#else
691
const int fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, 0600);
692
if (fd < 0)
693
{
694
Error::SetErrno(error, "shm_open() failed: ", errno);
695
return nullptr;
696
}
697
698
// we're not going to be opening this mapping in other processes, so remove the file
699
if (is_anonymous)
700
shm_unlink(name);
701
#endif
702
703
// use fallocate() to ensure we don't SIGBUS later on.
704
#ifdef __linux__
705
if (fallocate(fd, 0, 0, static_cast<off_t>(size)) < 0)
706
{
707
Error::SetErrno(error, TinyString::from_format("fallocate({}) failed: ", size), errno);
708
close(fd);
709
if (!is_anonymous)
710
shm_unlink(name);
711
return nullptr;
712
}
713
#else
714
// ensure it's the correct size
715
if (ftruncate(fd, static_cast<off_t>(size)) < 0)
716
{
717
Error::SetErrno(error, TinyString::from_format("ftruncate({}) failed: ", size), errno);
718
close(fd);
719
if (!is_anonymous)
720
shm_unlink(name);
721
return nullptr;
722
}
723
#endif
724
725
return reinterpret_cast<void*>(static_cast<intptr_t>(fd));
726
}
727
728
void MemMap::DestroySharedMemory(void* ptr)
729
{
730
close(static_cast<int>(reinterpret_cast<intptr_t>(ptr)));
731
}
732
733
void MemMap::DeleteSharedMemory(const char* name)
734
{
735
shm_unlink(name);
736
}
737
738
#endif
739
740
void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode)
741
{
742
const int flags = (baseaddr != nullptr) ? (MAP_SHARED | MAP_FIXED) : MAP_SHARED;
743
void* ptr = mmap(baseaddr, size, static_cast<int>(mode), flags, static_cast<int>(reinterpret_cast<intptr_t>(handle)),
744
static_cast<off_t>(offset));
745
if (ptr == MAP_FAILED)
746
return nullptr;
747
748
return ptr;
749
}
750
751
void MemMap::UnmapSharedMemory(void* baseaddr, size_t size)
752
{
753
if (munmap(baseaddr, size) != 0)
754
Panic("Failed to unmap shared memory");
755
}
756
757
const void* MemMap::GetBaseAddress()
758
{
759
#ifndef __APPLE__
760
Dl_info info;
761
if (dladdr(reinterpret_cast<const void*>(&GetBaseAddress), &info) == 0)
762
{
763
ERROR_LOG("dladdr() failed");
764
return nullptr;
765
}
766
767
return info.dli_fbase;
768
#else
769
#error Fixme
770
#endif
771
}
772
773
void* MemMap::AllocateJITMemoryAt(const void* addr, size_t size)
774
{
775
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
776
#if defined(__linux__)
777
// Linux does the right thing, allows us to not disturb an existing mapping.
778
if (addr)
779
flags |= MAP_FIXED_NOREPLACE;
780
#elif defined(__FreeBSD__)
781
// FreeBSD achieves the same with MAP_FIXED and MAP_EXCL.
782
if (addr)
783
flags |= MAP_FIXED | MAP_EXCL;
784
#else
785
// Targeted mapping not available?
786
if (addr)
787
return nullptr;
788
#endif
789
790
void* ptr = mmap(const_cast<void*>(addr), size, PROT_READ | PROT_WRITE | PROT_EXEC, flags, -1, 0);
791
if (ptr == MAP_FAILED)
792
{
793
if (!addr)
794
ERROR_LOG("mmap(RWX, {}) for internal buffer failed: {}", size, errno);
795
796
return nullptr;
797
}
798
else if (addr && ptr != addr) [[unlikely]]
799
{
800
if (munmap(ptr, size) != 0)
801
ERROR_LOG("Failed to munmap() incorrectly hinted allocation: {}", errno);
802
return nullptr;
803
}
804
805
return ptr;
806
}
807
808
void MemMap::ReleaseJITMemory(void* ptr, size_t size)
809
{
810
if (munmap(ptr, size) != 0)
811
ERROR_LOG("Failed to free code pointer {}", static_cast<void*>(ptr));
812
}
813
814
#if defined(CPU_ARCH_ARM32) || defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64)
815
816
void MemMap::FlushInstructionCache(void* address, size_t size)
817
{
818
#if defined(CPU_ARCH_RISCV64) && defined(__linux__) && defined(__clang__) && (__clang_major__ <= 18)
819
__riscv_flush_icache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size, 0);
820
#else
821
__builtin___clear_cache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size);
822
#endif
823
}
824
825
#endif
826
827
SharedMemoryMappingArea::SharedMemoryMappingArea() = default;
828
829
SharedMemoryMappingArea::~SharedMemoryMappingArea()
830
{
831
Destroy();
832
}
833
834
bool SharedMemoryMappingArea::Create(size_t size)
835
{
836
AssertMsg(Common::IsAlignedPow2(size, HOST_PAGE_SIZE), "Size is page aligned");
837
Destroy();
838
839
void* alloc = mmap(nullptr, size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
840
if (alloc == MAP_FAILED)
841
return false;
842
843
m_base_ptr = static_cast<u8*>(alloc);
844
m_size = size;
845
m_num_pages = size >> HOST_PAGE_SHIFT;
846
return true;
847
}
848
849
void SharedMemoryMappingArea::Destroy()
850
{
851
AssertMsg(m_num_mappings == 0, "No mappings left");
852
853
if (m_base_ptr && munmap(m_base_ptr, m_size) != 0)
854
Panic("Failed to release shared memory area");
855
856
m_base_ptr = nullptr;
857
m_size = 0;
858
m_num_pages = 0;
859
}
860
861
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size,
862
PageProtect mode)
863
{
864
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
865
866
void* const ptr = mmap(map_base, map_size, static_cast<int>(mode), MAP_SHARED | MAP_FIXED,
867
static_cast<int>(reinterpret_cast<intptr_t>(file_handle)), static_cast<off_t>(file_offset));
868
if (ptr == MAP_FAILED)
869
return nullptr;
870
871
m_num_mappings++;
872
return static_cast<u8*>(ptr);
873
}
874
875
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
876
{
877
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
878
879
if (mmap(map_base, map_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED)
880
return false;
881
882
m_num_mappings--;
883
return true;
884
}
885
886
#endif
887
888
void* MemMap::AllocateJITMemory(size_t size)
889
{
890
const u8* base =
891
reinterpret_cast<const u8*>(Common::AlignDownPow2(reinterpret_cast<uintptr_t>(GetBaseAddress()), HOST_PAGE_SIZE));
892
u8* ptr = nullptr;
893
#if !defined(CPU_ARCH_ARM64) || !defined(__APPLE__)
894
895
#if defined(CPU_ARCH_X64)
896
static constexpr size_t assume_binary_size = 64 * 1024 * 1024;
897
static constexpr size_t step = 64 * 1024 * 1024;
898
static constexpr size_t max_displacement = 0x80000000u;
899
#elif defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64)
900
static constexpr size_t assume_binary_size = 16 * 1024 * 1024;
901
static constexpr size_t step = 8 * 1024 * 1024;
902
static constexpr size_t max_displacement =
903
1024 * 1024 * 1024; // technically 4GB, but we don't want to spend that much time trying
904
#elif defined(CPU_ARCH_ARM32)
905
static constexpr size_t assume_binary_size = 8 * 1024 * 1024; // Wishful thinking...
906
static constexpr size_t step = 2 * 1024 * 1024;
907
static constexpr size_t max_displacement = 32 * 1024 * 1024;
908
#else
909
#error Unhandled architecture.
910
#endif
911
912
const size_t max_displacement_from_start = max_displacement - size;
913
Assert(size <= max_displacement);
914
915
// Try to find a region in the max displacement range of the process base address.
916
// Assume that the DuckStation binary will at max be some size, release is currently around 12MB on Windows.
917
// Therefore the max offset is +/- 12MB + code_size. Try allocating in steps by incrementing the pointer, then if no
918
// address range is found, go backwards from the base address (which will probably fail).
919
const u8* min_address =
920
base - std::min(reinterpret_cast<ptrdiff_t>(base), static_cast<ptrdiff_t>(max_displacement_from_start));
921
const u8* max_address = base + max_displacement_from_start;
922
VERBOSE_LOG("Base address: {}", static_cast<const void*>(base));
923
VERBOSE_LOG("Acceptable address range: {} - {}", static_cast<const void*>(min_address),
924
static_cast<const void*>(max_address));
925
926
// Start offset by the expected binary size.
927
for (const u8* current_address = base + assume_binary_size;; current_address += step)
928
{
929
VERBOSE_LOG("Trying {} (displacement 0x{:X})", static_cast<const void*>(current_address),
930
static_cast<ptrdiff_t>(current_address - base));
931
if ((ptr = static_cast<u8*>(AllocateJITMemoryAt(current_address, size))))
932
break;
933
934
if ((reinterpret_cast<uintptr_t>(current_address) + step) > reinterpret_cast<uintptr_t>(max_address) ||
935
(reinterpret_cast<uintptr_t>(current_address) + step) < reinterpret_cast<uintptr_t>(current_address))
936
{
937
break;
938
}
939
}
940
941
// Try before (will likely fail).
942
if (!ptr && reinterpret_cast<uintptr_t>(base) >= step)
943
{
944
for (const u8* current_address = base - step;; current_address -= step)
945
{
946
VERBOSE_LOG("Trying {} (displacement 0x{:X})", static_cast<const void*>(current_address),
947
static_cast<ptrdiff_t>(base - current_address));
948
if ((ptr = static_cast<u8*>(AllocateJITMemoryAt(current_address, size))))
949
break;
950
951
if ((reinterpret_cast<uintptr_t>(current_address) - step) < reinterpret_cast<uintptr_t>(min_address) ||
952
(reinterpret_cast<uintptr_t>(current_address) - step) > reinterpret_cast<uintptr_t>(current_address))
953
{
954
break;
955
}
956
}
957
}
958
959
if (!ptr)
960
{
961
#ifdef CPU_ARCH_X64
962
ERROR_LOG("Failed to allocate JIT buffer in range, expect crashes.");
963
#endif
964
if (!(ptr = static_cast<u8*>(AllocateJITMemoryAt(nullptr, size))))
965
return ptr;
966
}
967
#else
968
// We cannot control where the buffer gets allocated on Apple Silicon. Hope for the best.
969
if (!(ptr = static_cast<u8*>(AllocateJITMemoryAt(nullptr, size))))
970
return ptr;
971
#endif
972
973
INFO_LOG("Allocated JIT buffer of size {} at {} (0x{:X} bytes / {} MB away)", size, static_cast<void*>(ptr),
974
std::abs(static_cast<ptrdiff_t>(ptr - base)),
975
(std::abs(static_cast<ptrdiff_t>(ptr - base)) + (1024 * 1024 - 1)) / (1024 * 1024));
976
977
return ptr;
978
}
979
980