Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
official-stockfish
GitHub Repository: official-stockfish/Stockfish
Path: blob/master/src/memory.h
632 views
1
/*
2
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3
Copyright (C) 2004-2026 The Stockfish developers (see AUTHORS file)
4
5
Stockfish is free software: you can redistribute it and/or modify
6
it under the terms of the GNU General Public License as published by
7
the Free Software Foundation, either version 3 of the License, or
8
(at your option) any later version.
9
10
Stockfish is distributed in the hope that it will be useful,
11
but WITHOUT ANY WARRANTY; without even the implied warranty of
12
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
GNU General Public License for more details.
14
15
You should have received a copy of the GNU General Public License
16
along with this program. If not, see <http://www.gnu.org/licenses/>.
17
*/
18
19
#ifndef MEMORY_H_INCLUDED
20
#define MEMORY_H_INCLUDED
21
22
#include <algorithm>
23
#include <cstdint>
24
#include <memory>
25
#include <new>
26
#include <type_traits>
27
#include <utility>
28
#include <cstring>
29
30
#include "types.h"
31
32
#if defined(_WIN64)
33
34
#if _WIN32_WINNT < 0x0601
35
#undef _WIN32_WINNT
36
#define _WIN32_WINNT 0x0601 // Force to include needed API prototypes
37
#endif
38
39
#if !defined(NOMINMAX)
40
#define NOMINMAX
41
#endif
42
#include <windows.h>
43
44
// Some Windows headers (RPC/old headers) define short macros such
45
// as 'small' expanding to 'char', which breaks identifiers in the code.
46
// Undefine those macros immediately after including <windows.h>.
47
#ifdef small
48
#undef small
49
#endif
50
51
#include <psapi.h>
52
53
extern "C" {
54
using OpenProcessToken_t = bool (*)(HANDLE, DWORD, PHANDLE);
55
using LookupPrivilegeValueA_t = bool (*)(LPCSTR, LPCSTR, PLUID);
56
using AdjustTokenPrivileges_t =
57
bool (*)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
58
}
59
#endif
60
61
62
namespace Stockfish {
63
64
void* std_aligned_alloc(size_t alignment, size_t size);
65
void std_aligned_free(void* ptr);
66
67
// Memory aligned by page size, min alignment: 4096 bytes
68
void* aligned_large_pages_alloc(size_t size);
69
void aligned_large_pages_free(void* mem);
70
71
bool has_large_pages();
72
73
// Frees memory which was placed there with placement new.
74
// Works for both single objects and arrays of unknown bound.
75
template<typename T, typename FREE_FUNC>
76
void memory_deleter(T* ptr, FREE_FUNC free_func) {
77
if (!ptr)
78
return;
79
80
// Explicitly needed to call the destructor
81
if constexpr (!std::is_trivially_destructible_v<T>)
82
ptr->~T();
83
84
free_func(ptr);
85
}
86
87
// Frees memory which was placed there with placement new.
88
// Works for both single objects and arrays of unknown bound.
89
template<typename T, typename FREE_FUNC>
90
void memory_deleter_array(T* ptr, FREE_FUNC free_func) {
91
if (!ptr)
92
return;
93
94
95
// Move back on the pointer to where the size is allocated
96
const size_t array_offset = std::max(sizeof(size_t), alignof(T));
97
char* raw_memory = reinterpret_cast<char*>(ptr) - array_offset;
98
99
if constexpr (!std::is_trivially_destructible_v<T>)
100
{
101
const size_t size = *reinterpret_cast<size_t*>(raw_memory);
102
103
// Explicitly call the destructor for each element in reverse order
104
for (size_t i = size; i-- > 0;)
105
ptr[i].~T();
106
}
107
108
free_func(raw_memory);
109
}
110
111
// Allocates memory for a single object and places it there with placement new
112
template<typename T, typename ALLOC_FUNC, typename... Args>
113
inline std::enable_if_t<!std::is_array_v<T>, T*> memory_allocator(ALLOC_FUNC alloc_func,
114
Args&&... args) {
115
void* raw_memory = alloc_func(sizeof(T));
116
ASSERT_ALIGNED(raw_memory, alignof(T));
117
return new (raw_memory) T(std::forward<Args>(args)...);
118
}
119
120
// Allocates memory for an array of unknown bound and places it there with placement new
121
template<typename T, typename ALLOC_FUNC>
122
inline std::enable_if_t<std::is_array_v<T>, std::remove_extent_t<T>*>
123
memory_allocator(ALLOC_FUNC alloc_func, size_t num) {
124
using ElementType = std::remove_extent_t<T>;
125
126
const size_t array_offset = std::max(sizeof(size_t), alignof(ElementType));
127
128
// Save the array size in the memory location
129
char* raw_memory =
130
reinterpret_cast<char*>(alloc_func(array_offset + num * sizeof(ElementType)));
131
ASSERT_ALIGNED(raw_memory, alignof(T));
132
133
new (raw_memory) size_t(num);
134
135
for (size_t i = 0; i < num; ++i)
136
new (raw_memory + array_offset + i * sizeof(ElementType)) ElementType();
137
138
// Need to return the pointer at the start of the array so that
139
// the indexing in unique_ptr<T[]> works.
140
return reinterpret_cast<ElementType*>(raw_memory + array_offset);
141
}
142
143
//
144
//
145
// aligned large page unique ptr
146
//
147
//
148
149
template<typename T>
150
struct LargePageDeleter {
151
void operator()(T* ptr) const { return memory_deleter<T>(ptr, aligned_large_pages_free); }
152
};
153
154
template<typename T>
155
struct LargePageArrayDeleter {
156
void operator()(T* ptr) const { return memory_deleter_array<T>(ptr, aligned_large_pages_free); }
157
};
158
159
template<typename T>
160
using LargePagePtr =
161
std::conditional_t<std::is_array_v<T>,
162
std::unique_ptr<T, LargePageArrayDeleter<std::remove_extent_t<T>>>,
163
std::unique_ptr<T, LargePageDeleter<T>>>;
164
165
// make_unique_large_page for single objects
166
template<typename T, typename... Args>
167
std::enable_if_t<!std::is_array_v<T>, LargePagePtr<T>> make_unique_large_page(Args&&... args) {
168
static_assert(alignof(T) <= 4096,
169
"aligned_large_pages_alloc() may fail for such a big alignment requirement of T");
170
171
T* obj = memory_allocator<T>(aligned_large_pages_alloc, std::forward<Args>(args)...);
172
173
return LargePagePtr<T>(obj);
174
}
175
176
// make_unique_large_page for arrays of unknown bound
177
template<typename T>
178
std::enable_if_t<std::is_array_v<T>, LargePagePtr<T>> make_unique_large_page(size_t num) {
179
using ElementType = std::remove_extent_t<T>;
180
181
static_assert(alignof(ElementType) <= 4096,
182
"aligned_large_pages_alloc() may fail for such a big alignment requirement of T");
183
184
ElementType* memory = memory_allocator<T>(aligned_large_pages_alloc, num);
185
186
return LargePagePtr<T>(memory);
187
}
188
189
//
190
//
191
// aligned unique ptr
192
//
193
//
194
195
template<typename T>
196
struct AlignedDeleter {
197
void operator()(T* ptr) const { return memory_deleter<T>(ptr, std_aligned_free); }
198
};
199
200
template<typename T>
201
struct AlignedArrayDeleter {
202
void operator()(T* ptr) const { return memory_deleter_array<T>(ptr, std_aligned_free); }
203
};
204
205
template<typename T>
206
using AlignedPtr =
207
std::conditional_t<std::is_array_v<T>,
208
std::unique_ptr<T, AlignedArrayDeleter<std::remove_extent_t<T>>>,
209
std::unique_ptr<T, AlignedDeleter<T>>>;
210
211
// make_unique_aligned for single objects
212
template<typename T, typename... Args>
213
std::enable_if_t<!std::is_array_v<T>, AlignedPtr<T>> make_unique_aligned(Args&&... args) {
214
const auto func = [](size_t size) { return std_aligned_alloc(alignof(T), size); };
215
T* obj = memory_allocator<T>(func, std::forward<Args>(args)...);
216
217
return AlignedPtr<T>(obj);
218
}
219
220
// make_unique_aligned for arrays of unknown bound
221
template<typename T>
222
std::enable_if_t<std::is_array_v<T>, AlignedPtr<T>> make_unique_aligned(size_t num) {
223
using ElementType = std::remove_extent_t<T>;
224
225
const auto func = [](size_t size) { return std_aligned_alloc(alignof(ElementType), size); };
226
ElementType* memory = memory_allocator<T>(func, num);
227
228
return AlignedPtr<T>(memory);
229
}
230
231
232
// Get the first aligned element of an array.
233
// ptr must point to an array of size at least `sizeof(T) * N + alignment` bytes,
234
// where N is the number of elements in the array.
235
template<uintptr_t Alignment, typename T>
236
T* align_ptr_up(T* ptr) {
237
static_assert(alignof(T) < Alignment);
238
239
const uintptr_t ptrint = reinterpret_cast<uintptr_t>(reinterpret_cast<char*>(ptr));
240
return reinterpret_cast<T*>(
241
reinterpret_cast<char*>((ptrint + (Alignment - 1)) / Alignment * Alignment));
242
}
243
244
#if defined(_WIN32)
245
246
template<typename FuncYesT, typename FuncNoT>
247
auto windows_try_with_large_page_priviliges([[maybe_unused]] FuncYesT&& fyes, FuncNoT&& fno) {
248
249
#if !defined(_WIN64)
250
return fno();
251
#else
252
253
HANDLE hProcessToken{};
254
LUID luid{};
255
256
const size_t largePageSize = GetLargePageMinimum();
257
if (!largePageSize)
258
return fno();
259
260
// Dynamically link OpenProcessToken, LookupPrivilegeValue and AdjustTokenPrivileges
261
262
HMODULE hAdvapi32 = GetModuleHandle(TEXT("advapi32.dll"));
263
264
if (!hAdvapi32)
265
hAdvapi32 = LoadLibrary(TEXT("advapi32.dll"));
266
267
auto OpenProcessToken_f =
268
OpenProcessToken_t((void (*)()) GetProcAddress(hAdvapi32, "OpenProcessToken"));
269
if (!OpenProcessToken_f)
270
return fno();
271
auto LookupPrivilegeValueA_f =
272
LookupPrivilegeValueA_t((void (*)()) GetProcAddress(hAdvapi32, "LookupPrivilegeValueA"));
273
if (!LookupPrivilegeValueA_f)
274
return fno();
275
auto AdjustTokenPrivileges_f =
276
AdjustTokenPrivileges_t((void (*)()) GetProcAddress(hAdvapi32, "AdjustTokenPrivileges"));
277
if (!AdjustTokenPrivileges_f)
278
return fno();
279
280
// We need SeLockMemoryPrivilege, so try to enable it for the process
281
282
if (!OpenProcessToken_f( // OpenProcessToken()
283
GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hProcessToken))
284
return fno();
285
286
if (!LookupPrivilegeValueA_f(nullptr, "SeLockMemoryPrivilege", &luid))
287
return fno();
288
289
TOKEN_PRIVILEGES tp{};
290
TOKEN_PRIVILEGES prevTp{};
291
DWORD prevTpLen = 0;
292
293
tp.PrivilegeCount = 1;
294
tp.Privileges[0].Luid = luid;
295
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
296
297
// Try to enable SeLockMemoryPrivilege. Note that even if AdjustTokenPrivileges()
298
// succeeds, we still need to query GetLastError() to ensure that the privileges
299
// were actually obtained.
300
301
if (!AdjustTokenPrivileges_f(hProcessToken, FALSE, &tp, sizeof(TOKEN_PRIVILEGES), &prevTp,
302
&prevTpLen)
303
|| GetLastError() != ERROR_SUCCESS)
304
return fno();
305
306
auto&& ret = fyes(largePageSize);
307
308
// Privilege no longer needed, restore previous state
309
AdjustTokenPrivileges_f(hProcessToken, FALSE, &prevTp, 0, nullptr, nullptr);
310
311
CloseHandle(hProcessToken);
312
313
return std::forward<decltype(ret)>(ret);
314
315
#endif
316
}
317
318
#endif
319
320
template<typename T, typename ByteT>
321
T load_as(const ByteT* buffer) {
322
static_assert(std::is_trivially_copyable<T>::value, "Type must be trivially copyable");
323
static_assert(sizeof(ByteT) == 1);
324
325
T value;
326
std::memcpy(&value, buffer, sizeof(T));
327
328
return value;
329
}
330
331
} // namespace Stockfish
332
333
#endif // #ifndef MEMORY_H_INCLUDED
334
335