Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
stenzek
GitHub Repository: stenzek/duckstation
Path: blob/master/src/common/memmap.cpp
7342 views
1
// SPDX-FileCopyrightText: 2019-2024 Connor McLaughlin <[email protected]>
2
// SPDX-License-Identifier: CC-BY-NC-ND-4.0
3
4
#include "memmap.h"
5
#include "align.h"
6
#include "assert.h"
7
#include "error.h"
8
#include "log.h"
9
#include "small_string.h"
10
#include "string_util.h"
11
12
#include "fmt/format.h"
13
14
#include <memory>
15
16
#if defined(_WIN32)
17
#include "windows_headers.h"
18
#include <Psapi.h>
19
#elif defined(__APPLE__)
20
#ifdef __aarch64__
21
#include <pthread.h> // pthread_jit_write_protect_np()
22
#endif
23
#include <mach-o/dyld.h>
24
#include <mach-o/getsect.h>
25
#include <mach/mach_init.h>
26
#include <mach/mach_port.h>
27
#include <mach/mach_vm.h>
28
#include <mach/vm_map.h>
29
#include <sys/mman.h>
30
#include <sys/sysctl.h>
31
#else
32
#include <cerrno>
33
#include <dlfcn.h>
34
#include <fcntl.h>
35
#include <sys/mman.h>
36
#include <unistd.h>
37
#endif
38
#if defined(__linux__) && defined(CPU_ARCH_RISCV64)
39
#include <sys/cachectl.h>
40
#endif
41
42
#if defined(__linux__) && !defined(MAP_FIXED_NOREPLACE)
43
// Compatibility with old libc.
44
#define MAP_FIXED_NOREPLACE 0x100000
45
#endif
46
47
LOG_CHANNEL(MemMap);
48
49
namespace MemMap {
50
/// Allocates RWX memory at the specified address.
51
static void* AllocateJITMemoryAt(const void* addr, size_t size);
52
} // namespace MemMap
53
54
#ifdef DYNAMIC_HOST_PAGE_SIZE
55
const u32 HOST_PAGE_SIZE = MemMap::GetRuntimePageSize();
56
const u32 HOST_PAGE_MASK = MemMap::GetRuntimePageSize() - 1;
57
const u32 HOST_PAGE_SHIFT = std::bit_width(MemMap::GetRuntimePageSize() - 1);
58
#endif
59
60
#ifdef _WIN32
61
62
u32 MemMap::GetRuntimePageSize()
63
{
64
static u32 cached_page_size = 0;
65
if (cached_page_size != 0) [[likely]]
66
return cached_page_size;
67
68
SYSTEM_INFO si = {};
69
GetSystemInfo(&si);
70
cached_page_size = si.dwPageSize;
71
return cached_page_size;
72
}
73
74
bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode)
75
{
76
DebugAssert((size & (HOST_PAGE_SIZE - 1)) == 0);
77
78
DWORD old_protect;
79
if (!VirtualProtect(baseaddr, size, static_cast<DWORD>(mode), &old_protect))
80
{
81
ERROR_LOG("VirtualProtect() failed with error {}", GetLastError());
82
return false;
83
}
84
85
return true;
86
}
87
88
std::string MemMap::GetFileMappingName(const char* prefix)
89
{
90
const unsigned pid = GetCurrentProcessId();
91
return fmt::format("{}_{}", prefix, pid);
92
}
93
94
void* MemMap::CreateSharedMemory(const char* name, size_t size, Error* error)
95
{
96
const std::wstring mapping_name = name ? StringUtil::UTF8StringToWideString(name) : std::wstring();
97
const HANDLE mapping =
98
CreateFileMappingW(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE, static_cast<DWORD>(size >> 32),
99
static_cast<DWORD>(size), mapping_name.empty() ? nullptr : mapping_name.c_str());
100
if (!mapping)
101
Error::SetWin32(error, "CreateFileMappingW() failed: ", GetLastError());
102
103
return static_cast<void*>(mapping);
104
}
105
106
void MemMap::DestroySharedMemory(void* ptr)
107
{
108
CloseHandle(static_cast<HANDLE>(ptr));
109
}
110
111
void MemMap::DeleteSharedMemory(const char* name)
112
{
113
// Automatically freed on close.
114
}
115
116
void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode)
117
{
118
void* ret = MapViewOfFileEx(static_cast<HANDLE>(handle), FILE_MAP_READ | FILE_MAP_WRITE,
119
static_cast<DWORD>(offset >> 32), static_cast<DWORD>(offset), size, baseaddr);
120
if (!ret)
121
return nullptr;
122
123
if (mode != PageProtect::ReadWrite)
124
{
125
DWORD old_prot;
126
if (!VirtualProtect(ret, size, static_cast<DWORD>(mode), &old_prot))
127
Panic("Failed to protect memory mapping");
128
}
129
return ret;
130
}
131
132
void MemMap::UnmapSharedMemory(void* baseaddr, size_t size)
133
{
134
if (!UnmapViewOfFile(baseaddr))
135
Panic("Failed to unmap shared memory");
136
}
137
138
const void* MemMap::GetBaseAddress()
139
{
140
const HMODULE mod = GetModuleHandleW(nullptr);
141
if (!mod)
142
return nullptr;
143
144
MODULEINFO mi;
145
if (!GetModuleInformation(GetCurrentProcess(), mod, &mi, sizeof(mi)))
146
return mod;
147
148
return mi.lpBaseOfDll;
149
}
150
151
void* MemMap::AllocateJITMemoryAt(const void* addr, size_t size)
152
{
153
void* ptr = static_cast<u8*>(VirtualAlloc(const_cast<void*>(addr), size,
154
addr ? (MEM_RESERVE | MEM_COMMIT) : MEM_COMMIT, PAGE_EXECUTE_READWRITE));
155
if (!ptr && !addr) [[unlikely]]
156
ERROR_LOG("VirtualAlloc(RWX, {}) for internal buffer failed: {}", size, GetLastError());
157
158
return ptr;
159
}
160
161
void MemMap::ReleaseJITMemory(void* ptr, size_t size)
162
{
163
if (!VirtualFree(ptr, 0, MEM_RELEASE))
164
ERROR_LOG("Failed to free code pointer {}", static_cast<void*>(ptr));
165
}
166
167
#if defined(CPU_ARCH_ARM32) || defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64)
168
169
void MemMap::FlushInstructionCache(void* address, size_t size)
170
{
171
::FlushInstructionCache(GetCurrentProcess(), address, size);
172
}
173
174
#endif
175
176
SharedMemoryMappingArea::SharedMemoryMappingArea() = default;
177
178
SharedMemoryMappingArea::~SharedMemoryMappingArea()
179
{
180
Destroy();
181
}
182
183
SharedMemoryMappingArea::PlaceholderMap::iterator SharedMemoryMappingArea::FindPlaceholder(size_t offset)
184
{
185
if (m_placeholder_ranges.empty())
186
return m_placeholder_ranges.end();
187
188
// this will give us an iterator equal or after page
189
auto it = m_placeholder_ranges.lower_bound(offset);
190
if (it == m_placeholder_ranges.end())
191
{
192
// check the last page
193
it = (++m_placeholder_ranges.rbegin()).base();
194
}
195
196
// it's the one we found?
197
if (offset >= it->first && offset < it->second)
198
return it;
199
200
// otherwise try the one before
201
if (it == m_placeholder_ranges.begin())
202
return m_placeholder_ranges.end();
203
204
--it;
205
if (offset >= it->first && offset < it->second)
206
return it;
207
else
208
return m_placeholder_ranges.end();
209
}
210
211
bool SharedMemoryMappingArea::Create(size_t size)
212
{
213
Destroy();
214
215
AssertMsg(Common::IsAlignedPow2(size, HOST_PAGE_SIZE), "Size is page aligned");
216
217
m_base_ptr = static_cast<u8*>(VirtualAlloc2(GetCurrentProcess(), nullptr, size, MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
218
PAGE_NOACCESS, nullptr, 0));
219
if (!m_base_ptr)
220
return false;
221
222
m_size = size;
223
m_num_pages = size >> HOST_PAGE_SHIFT;
224
m_placeholder_ranges.emplace(0, size);
225
return true;
226
}
227
228
void SharedMemoryMappingArea::Destroy()
229
{
230
AssertMsg(m_num_mappings == 0, "No mappings left");
231
232
// hopefully this will be okay, and we don't need to coalesce all the placeholders...
233
if (m_base_ptr && !VirtualFreeEx(GetCurrentProcess(), m_base_ptr, 0, MEM_RELEASE))
234
Panic("Failed to release shared memory area");
235
236
m_placeholder_ranges.clear();
237
m_base_ptr = nullptr;
238
m_size = 0;
239
m_num_pages = 0;
240
m_num_mappings = 0;
241
}
242
243
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size,
244
PageProtect mode)
245
{
246
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
247
248
const size_t map_offset = static_cast<u8*>(map_base) - m_base_ptr;
249
DebugAssert(Common::IsAlignedPow2(map_offset, HOST_PAGE_SIZE));
250
DebugAssert(Common::IsAlignedPow2(map_size, HOST_PAGE_SIZE));
251
252
// should be a placeholder. unless there's some other mapping we didn't free.
253
PlaceholderMap::iterator phit = FindPlaceholder(map_offset);
254
DebugAssertMsg(phit != m_placeholder_ranges.end(), "Page we're mapping is a placeholder");
255
DebugAssertMsg(map_offset >= phit->first && map_offset < phit->second, "Page is in returned placeholder range");
256
DebugAssertMsg((map_offset + map_size) <= phit->second, "Page range is in returned placeholder range");
257
258
// do we need to split to the left? (i.e. is there a placeholder before this range)
259
const size_t old_ph_end = phit->second;
260
if (map_offset != phit->first)
261
{
262
phit->second = map_offset;
263
264
// split it (i.e. left..start and start..end are now separated)
265
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(phit->first), (map_offset - phit->first),
266
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER))
267
{
268
Panic("Failed to left split placeholder for map");
269
}
270
}
271
else
272
{
273
// start of the placeholder is getting used, we'll split it right below if there's anything left over
274
m_placeholder_ranges.erase(phit);
275
}
276
277
// do we need to split to the right? (i.e. is there a placeholder after this range)
278
if ((map_offset + map_size) != old_ph_end)
279
{
280
// split out end..ph_end
281
m_placeholder_ranges.emplace(map_offset + map_size, old_ph_end);
282
283
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(map_offset), map_size,
284
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER))
285
{
286
Panic("Failed to right split placeholder for map");
287
}
288
}
289
290
// actually do the mapping, replacing the placeholder on the range
291
if (!MapViewOfFile3(static_cast<HANDLE>(file_handle), GetCurrentProcess(), map_base, file_offset, map_size,
292
MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0))
293
{
294
ERROR_LOG("MapViewOfFile3() failed: {}", GetLastError());
295
return nullptr;
296
}
297
298
if (mode != PageProtect::ReadWrite)
299
{
300
DWORD old_prot;
301
if (!VirtualProtect(map_base, map_size, static_cast<DWORD>(mode), &old_prot))
302
Panic("Failed to protect memory mapping");
303
}
304
305
m_num_mappings++;
306
return static_cast<u8*>(map_base);
307
}
308
309
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
310
{
311
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
312
313
const size_t map_offset = static_cast<u8*>(map_base) - m_base_ptr;
314
DebugAssert(Common::IsAlignedPow2(map_offset, HOST_PAGE_SIZE));
315
DebugAssert(Common::IsAlignedPow2(map_size, HOST_PAGE_SIZE));
316
317
// unmap the specified range
318
if (!UnmapViewOfFile2(GetCurrentProcess(), map_base, MEM_PRESERVE_PLACEHOLDER))
319
{
320
ERROR_LOG("UnmapViewOfFile2() failed: {}", GetLastError());
321
return false;
322
}
323
324
// can we coalesce to the left?
325
PlaceholderMap::iterator left_it = (map_offset > 0) ? FindPlaceholder(map_offset - 1) : m_placeholder_ranges.end();
326
if (left_it != m_placeholder_ranges.end())
327
{
328
// the left placeholder should end at our start
329
DebugAssert(map_offset == left_it->second);
330
left_it->second = map_offset + map_size;
331
332
// combine placeholders before and the range we're unmapping, i.e. to the left
333
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(left_it->first), left_it->second - left_it->first,
334
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS))
335
{
336
Panic("Failed to coalesce placeholders left for unmap");
337
}
338
}
339
else
340
{
341
// this is a new placeholder
342
left_it = m_placeholder_ranges.emplace(map_offset, map_offset + map_size).first;
343
}
344
345
// can we coalesce to the right?
346
PlaceholderMap::iterator right_it =
347
((map_offset + map_size) < m_size) ? FindPlaceholder(map_offset + map_size) : m_placeholder_ranges.end();
348
if (right_it != m_placeholder_ranges.end())
349
{
350
// should start at our end
351
DebugAssert(right_it->first == (map_offset + map_size));
352
left_it->second = right_it->second;
353
m_placeholder_ranges.erase(right_it);
354
355
// combine our placeholder and the next, i.e. to the right
356
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(left_it->first), left_it->second - left_it->first,
357
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS))
358
{
359
Panic("Failed to coalescae placeholders right for unmap");
360
}
361
}
362
363
m_num_mappings--;
364
return true;
365
}
366
367
#elif defined(__APPLE__)
368
369
u32 MemMap::GetRuntimePageSize()
370
{
371
static u32 cached_page_size = 0;
372
if (cached_page_size != 0) [[likely]]
373
return cached_page_size;
374
375
size_t page_size_size = sizeof(cached_page_size);
376
if (sysctlbyname("hw.pagesize", &cached_page_size, &page_size_size, nullptr, 0) != 0) [[unlikely]]
377
cached_page_size = 0;
378
return cached_page_size;
379
}
380
381
bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode)
382
{
383
DebugAssertMsg((size & (HOST_PAGE_SIZE - 1)) == 0, "Size is page aligned");
384
385
kern_return_t res = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size, false,
386
static_cast<vm_prot_t>(mode));
387
if (res != KERN_SUCCESS) [[unlikely]]
388
{
389
ERROR_LOG("mach_vm_protect() failed: {}", res);
390
return false;
391
}
392
393
return true;
394
}
395
396
std::string MemMap::GetFileMappingName(const char* prefix)
397
{
398
// name actually is not used.
399
return {};
400
}
401
402
void* MemMap::CreateSharedMemory(const char* name, size_t size, Error* error)
403
{
404
mach_vm_size_t vm_size = size;
405
mach_port_t port;
406
const kern_return_t res = mach_make_memory_entry_64(
407
mach_task_self(), &vm_size, 0, MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port, MACH_PORT_NULL);
408
if (res != KERN_SUCCESS)
409
{
410
Error::SetStringFmt(error, "mach_make_memory_entry_64() failed: {}", res);
411
return nullptr;
412
}
413
414
return reinterpret_cast<void*>(static_cast<uintptr_t>(port));
415
}
416
417
void MemMap::DestroySharedMemory(void* ptr)
418
{
419
mach_port_deallocate(mach_task_self(), static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(ptr)));
420
}
421
422
void MemMap::DeleteSharedMemory(const char* name)
423
{
424
}
425
426
void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode)
427
{
428
mach_vm_address_t ptr = reinterpret_cast<mach_vm_address_t>(baseaddr);
429
const kern_return_t res = mach_vm_map(mach_task_self(), &ptr, size, 0, baseaddr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE,
430
static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(handle)), offset, FALSE,
431
static_cast<vm_prot_t>(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
432
if (res != KERN_SUCCESS)
433
{
434
ERROR_LOG("mach_vm_map() failed: {}", res);
435
return nullptr;
436
}
437
438
return reinterpret_cast<void*>(ptr);
439
}
440
441
void MemMap::UnmapSharedMemory(void* baseaddr, size_t size)
442
{
443
const kern_return_t res = mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size);
444
if (res != KERN_SUCCESS)
445
Panic("Failed to unmap shared memory");
446
}
447
448
const void* MemMap::GetBaseAddress()
449
{
450
#ifdef __clang__
451
#pragma clang diagnostic push
452
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
453
#endif
454
455
u32 name_buffer_size = 0;
456
_NSGetExecutablePath(nullptr, &name_buffer_size);
457
if (name_buffer_size > 0) [[likely]]
458
{
459
std::unique_ptr<char[]> name_buffer = std::make_unique_for_overwrite<char[]>(name_buffer_size + 1);
460
if (_NSGetExecutablePath(name_buffer.get(), &name_buffer_size) == 0) [[likely]]
461
{
462
name_buffer[name_buffer_size] = 0;
463
464
const struct segment_command_64* command = getsegbyname("__TEXT");
465
if (command) [[likely]]
466
{
467
const u8* base = reinterpret_cast<const u8*>(command->vmaddr);
468
const u32 image_count = _dyld_image_count();
469
for (u32 i = 0; i < image_count; i++)
470
{
471
if (std::strcmp(_dyld_get_image_name(i), name_buffer.get()) == 0)
472
return base + _dyld_get_image_vmaddr_slide(i);
473
}
474
}
475
}
476
}
477
478
return reinterpret_cast<const void*>(&GetBaseAddress);
479
480
#ifdef __clang__
481
#pragma clang diagnostic pop
482
#endif
483
}
484
485
void* MemMap::AllocateJITMemoryAt(const void* addr, size_t size)
486
{
487
#if !defined(__aarch64__)
488
kern_return_t ret = mach_vm_allocate(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&addr), size,
489
addr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE);
490
if (ret != KERN_SUCCESS)
491
{
492
ERROR_LOG("mach_vm_allocate() returned {}", ret);
493
return nullptr;
494
}
495
496
ret = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(addr), size, false,
497
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
498
if (ret != KERN_SUCCESS)
499
{
500
ERROR_LOG("mach_vm_protect() returned {}", ret);
501
mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(addr), size);
502
return nullptr;
503
}
504
505
return const_cast<void*>(addr);
506
#else
507
// On ARM64, we need to use MAP_JIT, which means we can't use MAP_FIXED.
508
if (addr)
509
return nullptr;
510
511
constexpr int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_JIT;
512
void* ptr = mmap(const_cast<void*>(addr), size, PROT_READ | PROT_WRITE | PROT_EXEC, flags, -1, 0);
513
if (ptr == MAP_FAILED)
514
{
515
ERROR_LOG("mmap(RWX, {}) for internal buffer failed: {}", size, errno);
516
return nullptr;
517
}
518
519
return ptr;
520
#endif
521
}
522
523
void MemMap::ReleaseJITMemory(void* ptr, size_t size)
524
{
525
#if !defined(__aarch64__)
526
const kern_return_t res = mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(ptr), size);
527
if (res != KERN_SUCCESS)
528
ERROR_LOG("mach_vm_deallocate() failed: {}", res);
529
#else
530
if (munmap(ptr, size) != 0)
531
ERROR_LOG("Failed to free code pointer {}", static_cast<void*>(ptr));
532
#endif
533
}
534
535
#if defined(CPU_ARCH_ARM32) || defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64)
536
537
void MemMap::FlushInstructionCache(void* address, size_t size)
538
{
539
__builtin___clear_cache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size);
540
}
541
542
#endif
543
544
SharedMemoryMappingArea::SharedMemoryMappingArea() = default;
545
546
SharedMemoryMappingArea::~SharedMemoryMappingArea()
547
{
548
Destroy();
549
}
550
551
bool SharedMemoryMappingArea::Create(size_t size)
552
{
553
AssertMsg(Common::IsAlignedPow2(size, HOST_PAGE_SIZE), "Size is page aligned");
554
Destroy();
555
556
const kern_return_t res =
557
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&m_base_ptr), size, 0, VM_FLAGS_ANYWHERE,
558
MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE);
559
if (res != KERN_SUCCESS)
560
{
561
ERROR_LOG("mach_vm_map() failed: {}", res);
562
return false;
563
}
564
565
m_size = size;
566
m_num_pages = size >> HOST_PAGE_SHIFT;
567
return true;
568
}
569
570
void SharedMemoryMappingArea::Destroy()
571
{
572
AssertMsg(m_num_mappings == 0, "No mappings left");
573
574
if (m_base_ptr &&
575
mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(m_base_ptr), m_size) != KERN_SUCCESS)
576
{
577
Panic("Failed to release shared memory area");
578
}
579
580
m_base_ptr = nullptr;
581
m_size = 0;
582
m_num_pages = 0;
583
}
584
585
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size,
586
PageProtect mode)
587
{
588
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
589
590
const kern_return_t res =
591
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE,
592
static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(file_handle)), file_offset, false,
593
static_cast<vm_prot_t>(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
594
if (res != KERN_SUCCESS) [[unlikely]]
595
{
596
ERROR_LOG("mach_vm_map() failed: {}", res);
597
return nullptr;
598
}
599
600
m_num_mappings++;
601
return static_cast<u8*>(map_base);
602
}
603
604
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
605
{
606
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
607
608
const kern_return_t res =
609
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE,
610
MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE);
611
if (res != KERN_SUCCESS) [[unlikely]]
612
{
613
ERROR_LOG("mach_vm_map() failed: {}", res);
614
return false;
615
}
616
617
m_num_mappings--;
618
return true;
619
}
620
621
#ifdef __aarch64__
622
623
static thread_local int s_code_write_depth = 0;
624
625
void MemMap::BeginCodeWrite()
626
{
627
// DEBUG_LOG("BeginCodeWrite(): {}", s_code_write_depth);
628
if ((s_code_write_depth++) == 0)
629
{
630
// DEBUG_LOG(" pthread_jit_write_protect_np(0)");
631
pthread_jit_write_protect_np(0);
632
}
633
}
634
635
void MemMap::EndCodeWrite()
636
{
637
// DEBUG_LOG("EndCodeWrite(): {}", s_code_write_depth);
638
639
DebugAssert(s_code_write_depth > 0);
640
if ((--s_code_write_depth) == 0)
641
{
642
// DEBUG_LOG(" pthread_jit_write_protect_np(1)");
643
pthread_jit_write_protect_np(1);
644
}
645
}
646
647
#endif
648
649
#else
650
651
u32 MemMap::GetRuntimePageSize()
652
{
653
static u32 cached_page_size = 0;
654
if (cached_page_size != 0) [[likely]]
655
return cached_page_size;
656
657
const int res = sysconf(_SC_PAGESIZE);
658
cached_page_size = (res > 0) ? static_cast<u32>(res) : 0;
659
return cached_page_size;
660
}
661
662
bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode)
663
{
664
DebugAssertMsg((size & (HOST_PAGE_SIZE - 1)) == 0, "Size is page aligned");
665
666
const int result = mprotect(baseaddr, size, static_cast<int>(mode));
667
if (result != 0) [[unlikely]]
668
{
669
ERROR_LOG("mprotect() for {} at {} failed", size, baseaddr);
670
return false;
671
}
672
673
return true;
674
}
675
676
std::string MemMap::GetFileMappingName(const char* prefix)
677
{
678
const unsigned pid = static_cast<unsigned>(getpid());
679
#if defined(__FreeBSD__)
680
// FreeBSD's shm_open(3) requires name to be absolute
681
return fmt::format("/tmp/{}_{}", prefix, pid);
682
#else
683
return fmt::format("{}_{}", prefix, pid);
684
#endif
685
}
686
687
#ifndef __ANDROID__
688
689
void* MemMap::CreateSharedMemory(const char* name, size_t size, Error* error)
690
{
691
const bool is_anonymous = (!name || *name == 0);
692
#if defined(__linux__) || defined(__FreeBSD__)
693
const int fd = is_anonymous ? memfd_create("", 0) : shm_open(name, O_CREAT | O_EXCL | O_RDWR, 0600);
694
if (fd < 0)
695
{
696
Error::SetErrno(error, is_anonymous ? "memfd_create() failed: " : "shm_open() failed: ", errno);
697
return nullptr;
698
}
699
#else
700
const int fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, 0600);
701
if (fd < 0)
702
{
703
Error::SetErrno(error, "shm_open() failed: ", errno);
704
return nullptr;
705
}
706
707
// we're not going to be opening this mapping in other processes, so remove the file
708
if (is_anonymous)
709
shm_unlink(name);
710
#endif
711
712
// use fallocate() to ensure we don't SIGBUS later on.
713
#ifdef __linux__
714
if (fallocate(fd, 0, 0, static_cast<off_t>(size)) < 0)
715
{
716
Error::SetErrno(error, TinyString::from_format("fallocate({}) failed: ", size), errno);
717
close(fd);
718
if (!is_anonymous)
719
shm_unlink(name);
720
return nullptr;
721
}
722
#else
723
// ensure it's the correct size
724
if (ftruncate(fd, static_cast<off_t>(size)) < 0)
725
{
726
Error::SetErrno(error, TinyString::from_format("ftruncate({}) failed: ", size), errno);
727
close(fd);
728
if (!is_anonymous)
729
shm_unlink(name);
730
return nullptr;
731
}
732
#endif
733
734
return reinterpret_cast<void*>(static_cast<intptr_t>(fd));
735
}
736
737
void MemMap::DestroySharedMemory(void* ptr)
738
{
739
close(static_cast<int>(reinterpret_cast<intptr_t>(ptr)));
740
}
741
742
void MemMap::DeleteSharedMemory(const char* name)
743
{
744
shm_unlink(name);
745
}
746
747
#endif
748
749
void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode)
750
{
751
const int flags = (baseaddr != nullptr) ? (MAP_SHARED | MAP_FIXED) : MAP_SHARED;
752
void* ptr = mmap(baseaddr, size, static_cast<int>(mode), flags, static_cast<int>(reinterpret_cast<intptr_t>(handle)),
753
static_cast<off_t>(offset));
754
if (ptr == MAP_FAILED)
755
return nullptr;
756
757
return ptr;
758
}
759
760
void MemMap::UnmapSharedMemory(void* baseaddr, size_t size)
761
{
762
if (munmap(baseaddr, size) != 0)
763
Panic("Failed to unmap shared memory");
764
}
765
766
const void* MemMap::GetBaseAddress()
767
{
768
#ifndef __APPLE__
769
Dl_info info;
770
if (dladdr(reinterpret_cast<const void*>(&GetBaseAddress), &info) == 0)
771
{
772
ERROR_LOG("dladdr() failed");
773
return nullptr;
774
}
775
776
return info.dli_fbase;
777
#else
778
#error Fixme
779
#endif
780
}
781
782
void* MemMap::AllocateJITMemoryAt(const void* addr, size_t size)
783
{
784
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
785
#if defined(__linux__)
786
// Linux does the right thing, allows us to not disturb an existing mapping.
787
if (addr)
788
flags |= MAP_FIXED_NOREPLACE;
789
#elif defined(__FreeBSD__)
790
// FreeBSD achieves the same with MAP_FIXED and MAP_EXCL.
791
if (addr)
792
flags |= MAP_FIXED | MAP_EXCL;
793
#else
794
// Targeted mapping not available?
795
if (addr)
796
return nullptr;
797
#endif
798
799
void* ptr = mmap(const_cast<void*>(addr), size, PROT_READ | PROT_WRITE | PROT_EXEC, flags, -1, 0);
800
if (ptr == MAP_FAILED)
801
{
802
if (!addr)
803
ERROR_LOG("mmap(RWX, {}) for internal buffer failed: {}", size, errno);
804
805
return nullptr;
806
}
807
else if (addr && ptr != addr) [[unlikely]]
808
{
809
if (munmap(ptr, size) != 0)
810
ERROR_LOG("Failed to munmap() incorrectly hinted allocation: {}", errno);
811
return nullptr;
812
}
813
814
return ptr;
815
}
816
817
void MemMap::ReleaseJITMemory(void* ptr, size_t size)
818
{
819
if (munmap(ptr, size) != 0)
820
ERROR_LOG("Failed to free code pointer {}", static_cast<void*>(ptr));
821
}
822
823
#if defined(CPU_ARCH_ARM32) || defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64)
824
825
void MemMap::FlushInstructionCache(void* address, size_t size)
826
{
827
#if defined(CPU_ARCH_RISCV64) && defined(__linux__) && defined(__clang__) && (__clang_major__ <= 18)
828
__riscv_flush_icache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size, 0);
829
#else
830
__builtin___clear_cache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size);
831
#endif
832
}
833
834
#endif
835
836
SharedMemoryMappingArea::SharedMemoryMappingArea() = default;
837
838
SharedMemoryMappingArea::~SharedMemoryMappingArea()
839
{
840
Destroy();
841
}
842
843
bool SharedMemoryMappingArea::Create(size_t size)
844
{
845
AssertMsg(Common::IsAlignedPow2(size, HOST_PAGE_SIZE), "Size is page aligned");
846
Destroy();
847
848
void* alloc = mmap(nullptr, size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
849
if (alloc == MAP_FAILED)
850
return false;
851
852
m_base_ptr = static_cast<u8*>(alloc);
853
m_size = size;
854
m_num_pages = size >> HOST_PAGE_SHIFT;
855
return true;
856
}
857
858
void SharedMemoryMappingArea::Destroy()
859
{
860
AssertMsg(m_num_mappings == 0, "No mappings left");
861
862
if (m_base_ptr && munmap(m_base_ptr, m_size) != 0)
863
Panic("Failed to release shared memory area");
864
865
m_base_ptr = nullptr;
866
m_size = 0;
867
m_num_pages = 0;
868
}
869
870
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size,
871
PageProtect mode)
872
{
873
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
874
875
void* const ptr = mmap(map_base, map_size, static_cast<int>(mode), MAP_SHARED | MAP_FIXED,
876
static_cast<int>(reinterpret_cast<intptr_t>(file_handle)), static_cast<off_t>(file_offset));
877
if (ptr == MAP_FAILED)
878
return nullptr;
879
880
m_num_mappings++;
881
return static_cast<u8*>(ptr);
882
}
883
884
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
885
{
886
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
887
888
if (mmap(map_base, map_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED)
889
return false;
890
891
m_num_mappings--;
892
return true;
893
}
894
895
#endif
896
897
void* MemMap::AllocateJITMemory(size_t size)
898
{
899
const u8* base =
900
reinterpret_cast<const u8*>(Common::AlignDownPow2(reinterpret_cast<uintptr_t>(GetBaseAddress()), HOST_PAGE_SIZE));
901
u8* ptr = nullptr;
902
#if !defined(CPU_ARCH_ARM64) || !defined(__APPLE__)
903
904
#if defined(CPU_ARCH_X64)
905
static constexpr size_t assume_binary_size = 64 * 1024 * 1024;
906
static constexpr size_t step = 64 * 1024 * 1024;
907
static constexpr size_t max_displacement = 0x80000000u;
908
#elif defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64)
909
static constexpr size_t assume_binary_size = 16 * 1024 * 1024;
910
static constexpr size_t step = 8 * 1024 * 1024;
911
static constexpr size_t max_displacement =
912
1024 * 1024 * 1024; // technically 4GB, but we don't want to spend that much time trying
913
#elif defined(CPU_ARCH_ARM32)
914
static constexpr size_t assume_binary_size = 8 * 1024 * 1024; // Wishful thinking...
915
static constexpr size_t step = 2 * 1024 * 1024;
916
static constexpr size_t max_displacement = 32 * 1024 * 1024;
917
#else
918
#error Unhandled architecture.
919
#endif
920
921
const size_t max_displacement_from_start = max_displacement - size;
922
Assert(size <= max_displacement);
923
924
// Try to find a region in the max displacement range of the process base address.
925
// Assume that the DuckStation binary will at max be some size, release is currently around 12MB on Windows.
926
// Therefore the max offset is +/- 12MB + code_size. Try allocating in steps by incrementing the pointer, then if no
927
// address range is found, go backwards from the base address (which will probably fail).
928
const u8* min_address =
929
base - std::min(reinterpret_cast<ptrdiff_t>(base), static_cast<ptrdiff_t>(max_displacement_from_start));
930
const u8* max_address = base + max_displacement_from_start;
931
VERBOSE_LOG("Base address: {}", static_cast<const void*>(base));
932
VERBOSE_LOG("Acceptable address range: {} - {}", static_cast<const void*>(min_address),
933
static_cast<const void*>(max_address));
934
935
// Start offset by the expected binary size.
936
for (const u8* current_address = base + assume_binary_size;; current_address += step)
937
{
938
VERBOSE_LOG("Trying {} (displacement 0x{:X})", static_cast<const void*>(current_address),
939
static_cast<ptrdiff_t>(current_address - base));
940
if ((ptr = static_cast<u8*>(AllocateJITMemoryAt(current_address, size))))
941
break;
942
943
if ((reinterpret_cast<uintptr_t>(current_address) + step) > reinterpret_cast<uintptr_t>(max_address) ||
944
(reinterpret_cast<uintptr_t>(current_address) + step) < reinterpret_cast<uintptr_t>(current_address))
945
{
946
break;
947
}
948
}
949
950
// Try before (will likely fail).
951
if (!ptr && reinterpret_cast<uintptr_t>(base) >= step)
952
{
953
for (const u8* current_address = base - step;; current_address -= step)
954
{
955
VERBOSE_LOG("Trying {} (displacement 0x{:X})", static_cast<const void*>(current_address),
956
static_cast<ptrdiff_t>(base - current_address));
957
if ((ptr = static_cast<u8*>(AllocateJITMemoryAt(current_address, size))))
958
break;
959
960
if ((reinterpret_cast<uintptr_t>(current_address) - step) < reinterpret_cast<uintptr_t>(min_address) ||
961
(reinterpret_cast<uintptr_t>(current_address) - step) > reinterpret_cast<uintptr_t>(current_address))
962
{
963
break;
964
}
965
}
966
}
967
968
if (!ptr)
969
{
970
#ifdef CPU_ARCH_X64
971
ERROR_LOG("Failed to allocate JIT buffer in range, expect crashes.");
972
#endif
973
if (!(ptr = static_cast<u8*>(AllocateJITMemoryAt(nullptr, size))))
974
return ptr;
975
}
976
#else
977
// We cannot control where the buffer gets allocated on Apple Silicon. Hope for the best.
978
if (!(ptr = static_cast<u8*>(AllocateJITMemoryAt(nullptr, size))))
979
return ptr;
980
#endif
981
982
INFO_LOG("Allocated JIT buffer of size {} at {} (0x{:X} bytes / {} MB away)", size, static_cast<void*>(ptr),
983
std::abs(static_cast<ptrdiff_t>(ptr - base)),
984
(std::abs(static_cast<ptrdiff_t>(ptr - base)) + (1024 * 1024 - 1)) / (1024 * 1024));
985
986
return ptr;
987
}
988
989