Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/d3d12ma/D3D12MemAlloc.cpp
9896 views
1
//
2
// Copyright (c) 2019-2022 Advanced Micro Devices, Inc. All rights reserved.
3
//
4
// Permission is hereby granted, free of charge, to any person obtaining a copy
5
// of this software and associated documentation files (the "Software"), to deal
6
// in the Software without restriction, including without limitation the rights
7
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8
// copies of the Software, and to permit persons to whom the Software is
9
// furnished to do so, subject to the following conditions:
10
//
11
// The above copyright notice and this permission notice shall be included in
12
// all copies or substantial portions of the Software.
13
//
14
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20
// THE SOFTWARE.
21
//
22
23
#include "D3D12MemAlloc.h"
24
25
#include <combaseapi.h>
26
#include <mutex>
27
#include <algorithm>
28
#include <utility>
29
#include <cstdlib>
30
#include <cstdint>
31
#include <malloc.h> // for _aligned_malloc, _aligned_free
32
#ifndef _WIN32
33
#include <shared_mutex>
34
#endif
35
36
#if !defined(_MSC_VER)
37
#include <guiddef.h>
38
39
#include <dxguids.h>
40
#endif
41
42
////////////////////////////////////////////////////////////////////////////////
43
////////////////////////////////////////////////////////////////////////////////
44
//
45
// Configuration Begin
46
//
47
////////////////////////////////////////////////////////////////////////////////
48
////////////////////////////////////////////////////////////////////////////////
49
#ifndef _D3D12MA_CONFIGURATION
50
51
#ifdef _WIN32
52
#if !defined(WINVER) || WINVER < 0x0600
53
#error Required at least WinAPI version supporting: client = Windows Vista, server = Windows Server 2008.
54
#endif
55
#endif
56
57
#ifndef D3D12MA_SORT
58
#define D3D12MA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
59
#endif
60
61
#ifndef D3D12MA_D3D12_HEADERS_ALREADY_INCLUDED
62
#include <dxgi.h>
63
#if D3D12MA_DXGI_1_4
64
#include <dxgi1_4.h>
65
#endif
66
#endif
67
68
#ifndef D3D12MA_ASSERT
69
#include <cassert>
70
#define D3D12MA_ASSERT(cond) assert(cond)
71
#endif
72
73
// Assert that will be called very often, like inside data structures e.g. operator[].
74
// Making it non-empty can make program slow.
75
#ifndef D3D12MA_HEAVY_ASSERT
76
#ifdef _DEBUG
77
#define D3D12MA_HEAVY_ASSERT(expr) //D3D12MA_ASSERT(expr)
78
#else
79
#define D3D12MA_HEAVY_ASSERT(expr)
80
#endif
81
#endif
82
83
#ifndef D3D12MA_DEBUG_ALIGNMENT
84
/*
85
Minimum alignment of all allocations, in bytes.
86
Set to more than 1 for debugging purposes only. Must be power of two.
87
*/
88
#define D3D12MA_DEBUG_ALIGNMENT (1)
89
#endif
90
91
#ifndef D3D12MA_DEBUG_MARGIN
92
// Minimum margin before and after every allocation, in bytes.
93
// Set nonzero for debugging purposes only.
94
#define D3D12MA_DEBUG_MARGIN (0)
95
#endif
96
97
#ifndef D3D12MA_DEBUG_GLOBAL_MUTEX
98
/*
99
Set this to 1 for debugging purposes only, to enable single mutex protecting all
100
entry calls to the library. Can be useful for debugging multithreading issues.
101
*/
102
#define D3D12MA_DEBUG_GLOBAL_MUTEX (0)
103
#endif
104
105
/*
106
Define this macro for debugging purposes only to force specific D3D12_RESOURCE_HEAP_TIER,
107
especially to test compatibility with D3D12_RESOURCE_HEAP_TIER_1 on modern GPUs.
108
*/
109
//#define D3D12MA_FORCE_RESOURCE_HEAP_TIER D3D12_RESOURCE_HEAP_TIER_1
110
111
#ifndef D3D12MA_DEFAULT_BLOCK_SIZE
112
/// Default size of a block allocated as single ID3D12Heap.
113
#define D3D12MA_DEFAULT_BLOCK_SIZE (64ull * 1024 * 1024)
114
#endif
115
116
#ifndef D3D12MA_DEBUG_LOG
117
#define D3D12MA_DEBUG_LOG(format, ...)
118
/*
119
#define D3D12MA_DEBUG_LOG(format, ...) do { \
120
wprintf(format, __VA_ARGS__); \
121
wprintf(L"\n"); \
122
} while(false)
123
*/
124
#endif
125
126
#endif // _D3D12MA_CONFIGURATION
127
////////////////////////////////////////////////////////////////////////////////
128
////////////////////////////////////////////////////////////////////////////////
129
//
130
// Configuration End
131
//
132
////////////////////////////////////////////////////////////////////////////////
133
////////////////////////////////////////////////////////////////////////////////
134
135
#define D3D12MA_IID_PPV_ARGS(ppType) __uuidof(**(ppType)), reinterpret_cast<void**>(ppType)
136
137
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
138
#define D3D12MA_CREATE_NOT_ZEROED_AVAILABLE 1
139
#endif
140
141
namespace D3D12MA
142
{
143
static constexpr UINT HEAP_TYPE_COUNT = 4;
144
static constexpr UINT STANDARD_HEAP_TYPE_COUNT = 3; // Only DEFAULT, UPLOAD, READBACK.
145
static constexpr UINT DEFAULT_POOL_MAX_COUNT = 9;
146
static const UINT NEW_BLOCK_SIZE_SHIFT_MAX = 3;
147
// Minimum size of a free suballocation to register it in the free suballocation collection.
148
static const UINT64 MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
149
150
static const WCHAR* const HeapTypeNames[] =
151
{
152
L"DEFAULT",
153
L"UPLOAD",
154
L"READBACK",
155
L"CUSTOM",
156
};
157
158
static const D3D12_HEAP_FLAGS RESOURCE_CLASS_HEAP_FLAGS =
159
D3D12_HEAP_FLAG_DENY_BUFFERS | D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES | D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES;
160
161
static const D3D12_RESIDENCY_PRIORITY D3D12_RESIDENCY_PRIORITY_NONE = D3D12_RESIDENCY_PRIORITY(0);
162
163
#ifndef _D3D12MA_ENUM_DECLARATIONS
164
165
// Local copy of this enum, as it is provided only by <dxgi1_4.h>, so it may not be available.
166
enum DXGI_MEMORY_SEGMENT_GROUP_COPY
167
{
168
DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY = 0,
169
DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY = 1,
170
DXGI_MEMORY_SEGMENT_GROUP_COUNT
171
};
172
173
enum class ResourceClass
174
{
175
Unknown, Buffer, Non_RT_DS_Texture, RT_DS_Texture
176
};
177
178
enum SuballocationType
179
{
180
SUBALLOCATION_TYPE_FREE = 0,
181
SUBALLOCATION_TYPE_ALLOCATION = 1,
182
};
183
184
#endif // _D3D12MA_ENUM_DECLARATIONS
185
186
187
#ifndef _D3D12MA_FUNCTIONS
188
189
static void* DefaultAllocate(size_t Size, size_t Alignment, void* /*pPrivateData*/)
190
{
191
#ifdef _WIN32
192
return _aligned_malloc(Size, Alignment);
193
#else
194
return aligned_alloc(Alignment, Size);
195
#endif
196
}
197
static void DefaultFree(void* pMemory, void* /*pPrivateData*/)
198
{
199
#ifdef _WIN32
200
return _aligned_free(pMemory);
201
#else
202
return free(pMemory);
203
#endif
204
}
205
206
static void* Malloc(const ALLOCATION_CALLBACKS& allocs, size_t size, size_t alignment)
207
{
208
void* const result = (*allocs.pAllocate)(size, alignment, allocs.pPrivateData);
209
D3D12MA_ASSERT(result);
210
return result;
211
}
212
static void Free(const ALLOCATION_CALLBACKS& allocs, void* memory)
213
{
214
(*allocs.pFree)(memory, allocs.pPrivateData);
215
}
216
217
template<typename T>
218
static T* Allocate(const ALLOCATION_CALLBACKS& allocs)
219
{
220
return (T*)Malloc(allocs, sizeof(T), __alignof(T));
221
}
222
template<typename T>
223
static T* AllocateArray(const ALLOCATION_CALLBACKS& allocs, size_t count)
224
{
225
return (T*)Malloc(allocs, sizeof(T) * count, __alignof(T));
226
}
227
228
#define D3D12MA_NEW(allocs, type) new(D3D12MA::Allocate<type>(allocs))(type)
229
#define D3D12MA_NEW_ARRAY(allocs, type, count) new(D3D12MA::AllocateArray<type>((allocs), (count)))(type)
230
231
template<typename T>
232
void D3D12MA_DELETE(const ALLOCATION_CALLBACKS& allocs, T* memory)
233
{
234
if (memory)
235
{
236
memory->~T();
237
Free(allocs, memory);
238
}
239
}
240
template<typename T>
241
void D3D12MA_DELETE_ARRAY(const ALLOCATION_CALLBACKS& allocs, T* memory, size_t count)
242
{
243
if (memory)
244
{
245
for (size_t i = count; i--; )
246
{
247
memory[i].~T();
248
}
249
Free(allocs, memory);
250
}
251
}
252
253
static void SetupAllocationCallbacks(ALLOCATION_CALLBACKS& outAllocs, const ALLOCATION_CALLBACKS* allocationCallbacks)
254
{
255
if (allocationCallbacks)
256
{
257
outAllocs = *allocationCallbacks;
258
D3D12MA_ASSERT(outAllocs.pAllocate != NULL && outAllocs.pFree != NULL);
259
}
260
else
261
{
262
outAllocs.pAllocate = &DefaultAllocate;
263
outAllocs.pFree = &DefaultFree;
264
outAllocs.pPrivateData = NULL;
265
}
266
}
267
268
#define SAFE_RELEASE(ptr) do { if(ptr) { (ptr)->Release(); (ptr) = NULL; } } while(false)
269
270
#define D3D12MA_VALIDATE(cond) do { if(!(cond)) { \
271
D3D12MA_ASSERT(0 && "Validation failed: " #cond); \
272
return false; \
273
} } while(false)
274
275
template<typename T>
276
static T D3D12MA_MIN(const T& a, const T& b) { return a <= b ? a : b; }
277
template<typename T>
278
static T D3D12MA_MAX(const T& a, const T& b) { return a <= b ? b : a; }
279
280
template<typename T>
281
static void D3D12MA_SWAP(T& a, T& b) { T tmp = a; a = b; b = tmp; }
282
283
// Scans integer for index of first nonzero bit from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
284
static UINT8 BitScanLSB(UINT64 mask)
285
{
286
#if defined(_MSC_VER) && defined(_WIN64)
287
unsigned long pos;
288
if (_BitScanForward64(&pos, mask))
289
return static_cast<UINT8>(pos);
290
return UINT8_MAX;
291
#elif defined __GNUC__ || defined __clang__
292
return static_cast<UINT8>(__builtin_ffsll(mask)) - 1U;
293
#else
294
UINT8 pos = 0;
295
UINT64 bit = 1;
296
do
297
{
298
if (mask & bit)
299
return pos;
300
bit <<= 1;
301
} while (pos++ < 63);
302
return UINT8_MAX;
303
#endif
304
}
305
// Scans integer for index of first nonzero bit from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
306
static UINT8 BitScanLSB(UINT32 mask)
307
{
308
#ifdef _MSC_VER
309
unsigned long pos;
310
if (_BitScanForward(&pos, mask))
311
return static_cast<UINT8>(pos);
312
return UINT8_MAX;
313
#elif defined __GNUC__ || defined __clang__
314
return static_cast<UINT8>(__builtin_ffs(mask)) - 1U;
315
#else
316
UINT8 pos = 0;
317
UINT32 bit = 1;
318
do
319
{
320
if (mask & bit)
321
return pos;
322
bit <<= 1;
323
} while (pos++ < 31);
324
return UINT8_MAX;
325
#endif
326
}
327
328
// Scans integer for index of first nonzero bit from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
329
static UINT8 BitScanMSB(UINT64 mask)
330
{
331
#if defined(_MSC_VER) && defined(_WIN64)
332
unsigned long pos;
333
if (_BitScanReverse64(&pos, mask))
334
return static_cast<UINT8>(pos);
335
#elif defined __GNUC__ || defined __clang__
336
if (mask)
337
return 63 - static_cast<UINT8>(__builtin_clzll(mask));
338
#else
339
UINT8 pos = 63;
340
UINT64 bit = 1ULL << 63;
341
do
342
{
343
if (mask & bit)
344
return pos;
345
bit >>= 1;
346
} while (pos-- > 0);
347
#endif
348
return UINT8_MAX;
349
}
350
// Scans integer for index of first nonzero bit from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
351
static UINT8 BitScanMSB(UINT32 mask)
352
{
353
#ifdef _MSC_VER
354
unsigned long pos;
355
if (_BitScanReverse(&pos, mask))
356
return static_cast<UINT8>(pos);
357
#elif defined __GNUC__ || defined __clang__
358
if (mask)
359
return 31 - static_cast<UINT8>(__builtin_clz(mask));
360
#else
361
UINT8 pos = 31;
362
UINT32 bit = 1UL << 31;
363
do
364
{
365
if (mask & bit)
366
return pos;
367
bit >>= 1;
368
} while (pos-- > 0);
369
#endif
370
return UINT8_MAX;
371
}
372
373
/*
374
Returns true if given number is a power of two.
375
T must be unsigned integer number or signed integer but always nonnegative.
376
For 0 returns true.
377
*/
378
template <typename T>
379
static bool IsPow2(T x) { return (x & (x - 1)) == 0; }
380
381
// Aligns given value up to nearest multiply of align value. For example: AlignUp(11, 8) = 16.
382
// Use types like UINT, uint64_t as T.
383
template <typename T>
384
static T AlignUp(T val, T alignment)
385
{
386
D3D12MA_HEAVY_ASSERT(IsPow2(alignment));
387
return (val + alignment - 1) & ~(alignment - 1);
388
}
389
// Aligns given value down to nearest multiply of align value. For example: AlignUp(11, 8) = 8.
390
// Use types like UINT, uint64_t as T.
391
template <typename T>
392
static T AlignDown(T val, T alignment)
393
{
394
D3D12MA_HEAVY_ASSERT(IsPow2(alignment));
395
return val & ~(alignment - 1);
396
}
397
398
// Division with mathematical rounding to nearest number.
399
template <typename T>
400
static T RoundDiv(T x, T y) { return (x + (y / (T)2)) / y; }
401
template <typename T>
402
static T DivideRoundingUp(T x, T y) { return (x + y - 1) / y; }
403
404
static WCHAR HexDigitToChar(UINT8 digit)
405
{
406
if(digit < 10)
407
return L'0' + digit;
408
else
409
return L'A' + (digit - 10);
410
}
411
412
/*
413
Performs binary search and returns iterator to first element that is greater or
414
equal to `key`, according to comparison `cmp`.
415
416
Cmp should return true if first argument is less than second argument.
417
418
Returned value is the found element, if present in the collection or place where
419
new element with value (key) should be inserted.
420
*/
421
template <typename CmpLess, typename IterT, typename KeyT>
422
static IterT BinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
423
{
424
size_t down = 0, up = (end - beg);
425
while (down < up)
426
{
427
const size_t mid = (down + up) / 2;
428
if (cmp(*(beg + mid), key))
429
{
430
down = mid + 1;
431
}
432
else
433
{
434
up = mid;
435
}
436
}
437
return beg + down;
438
}
439
440
/*
441
Performs binary search and returns iterator to an element that is equal to `key`,
442
according to comparison `cmp`.
443
444
Cmp should return true if first argument is less than second argument.
445
446
Returned value is the found element, if present in the collection or end if not
447
found.
448
*/
449
template<typename CmpLess, typename IterT, typename KeyT>
450
static IterT BinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
451
{
452
IterT it = BinaryFindFirstNotLess<CmpLess, IterT, KeyT>(beg, end, value, cmp);
453
if (it == end ||
454
(!cmp(*it, value) && !cmp(value, *it)))
455
{
456
return it;
457
}
458
return end;
459
}
460
461
static UINT HeapTypeToIndex(D3D12_HEAP_TYPE type)
462
{
463
switch (type)
464
{
465
case D3D12_HEAP_TYPE_DEFAULT: return 0;
466
case D3D12_HEAP_TYPE_UPLOAD: return 1;
467
case D3D12_HEAP_TYPE_READBACK: return 2;
468
case D3D12_HEAP_TYPE_CUSTOM: return 3;
469
default: D3D12MA_ASSERT(0); return UINT_MAX;
470
}
471
}
472
473
static D3D12_HEAP_TYPE IndexToHeapType(UINT heapTypeIndex)
474
{
475
D3D12MA_ASSERT(heapTypeIndex < 4);
476
// D3D12_HEAP_TYPE_DEFAULT starts at 1.
477
return (D3D12_HEAP_TYPE)(heapTypeIndex + 1);
478
}
479
480
static UINT64 HeapFlagsToAlignment(D3D12_HEAP_FLAGS flags, bool denyMsaaTextures)
481
{
482
/*
483
Documentation of D3D12_HEAP_DESC structure says:
484
485
- D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT defined as 64KB.
486
- D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT defined as 4MB. An
487
application must decide whether the heap will contain multi-sample
488
anti-aliasing (MSAA), in which case, the application must choose [this flag].
489
490
https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_heap_desc
491
*/
492
493
if (denyMsaaTextures)
494
return D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT;
495
496
const D3D12_HEAP_FLAGS denyAllTexturesFlags =
497
D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES | D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES;
498
const bool canContainAnyTextures =
499
(flags & denyAllTexturesFlags) != denyAllTexturesFlags;
500
return canContainAnyTextures ?
501
D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT : D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT;
502
}
503
504
static ResourceClass HeapFlagsToResourceClass(D3D12_HEAP_FLAGS heapFlags)
505
{
506
const bool allowBuffers = (heapFlags & D3D12_HEAP_FLAG_DENY_BUFFERS) == 0;
507
const bool allowRtDsTextures = (heapFlags & D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES) == 0;
508
const bool allowNonRtDsTextures = (heapFlags & D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES) == 0;
509
510
const uint8_t allowedGroupCount = (allowBuffers ? 1 : 0) + (allowRtDsTextures ? 1 : 0) + (allowNonRtDsTextures ? 1 : 0);
511
if (allowedGroupCount != 1)
512
return ResourceClass::Unknown;
513
514
if (allowRtDsTextures)
515
return ResourceClass::RT_DS_Texture;
516
if (allowNonRtDsTextures)
517
return ResourceClass::Non_RT_DS_Texture;
518
return ResourceClass::Buffer;
519
}
520
521
static bool IsHeapTypeStandard(D3D12_HEAP_TYPE type)
522
{
523
return type == D3D12_HEAP_TYPE_DEFAULT ||
524
type == D3D12_HEAP_TYPE_UPLOAD ||
525
type == D3D12_HEAP_TYPE_READBACK;
526
}
527
528
static D3D12_HEAP_PROPERTIES StandardHeapTypeToHeapProperties(D3D12_HEAP_TYPE type)
529
{
530
D3D12MA_ASSERT(IsHeapTypeStandard(type));
531
D3D12_HEAP_PROPERTIES result = {};
532
result.Type = type;
533
return result;
534
}
535
536
static bool IsFormatCompressed(DXGI_FORMAT format)
537
{
538
switch (format)
539
{
540
case DXGI_FORMAT_BC1_TYPELESS:
541
case DXGI_FORMAT_BC1_UNORM:
542
case DXGI_FORMAT_BC1_UNORM_SRGB:
543
case DXGI_FORMAT_BC2_TYPELESS:
544
case DXGI_FORMAT_BC2_UNORM:
545
case DXGI_FORMAT_BC2_UNORM_SRGB:
546
case DXGI_FORMAT_BC3_TYPELESS:
547
case DXGI_FORMAT_BC3_UNORM:
548
case DXGI_FORMAT_BC3_UNORM_SRGB:
549
case DXGI_FORMAT_BC4_TYPELESS:
550
case DXGI_FORMAT_BC4_UNORM:
551
case DXGI_FORMAT_BC4_SNORM:
552
case DXGI_FORMAT_BC5_TYPELESS:
553
case DXGI_FORMAT_BC5_UNORM:
554
case DXGI_FORMAT_BC5_SNORM:
555
case DXGI_FORMAT_BC6H_TYPELESS:
556
case DXGI_FORMAT_BC6H_UF16:
557
case DXGI_FORMAT_BC6H_SF16:
558
case DXGI_FORMAT_BC7_TYPELESS:
559
case DXGI_FORMAT_BC7_UNORM:
560
case DXGI_FORMAT_BC7_UNORM_SRGB:
561
return true;
562
default:
563
return false;
564
}
565
}
566
567
// Only some formats are supported. For others it returns 0.
568
static UINT GetBitsPerPixel(DXGI_FORMAT format)
569
{
570
switch (format)
571
{
572
case DXGI_FORMAT_R32G32B32A32_TYPELESS:
573
case DXGI_FORMAT_R32G32B32A32_FLOAT:
574
case DXGI_FORMAT_R32G32B32A32_UINT:
575
case DXGI_FORMAT_R32G32B32A32_SINT:
576
return 128;
577
case DXGI_FORMAT_R32G32B32_TYPELESS:
578
case DXGI_FORMAT_R32G32B32_FLOAT:
579
case DXGI_FORMAT_R32G32B32_UINT:
580
case DXGI_FORMAT_R32G32B32_SINT:
581
return 96;
582
case DXGI_FORMAT_R16G16B16A16_TYPELESS:
583
case DXGI_FORMAT_R16G16B16A16_FLOAT:
584
case DXGI_FORMAT_R16G16B16A16_UNORM:
585
case DXGI_FORMAT_R16G16B16A16_UINT:
586
case DXGI_FORMAT_R16G16B16A16_SNORM:
587
case DXGI_FORMAT_R16G16B16A16_SINT:
588
return 64;
589
case DXGI_FORMAT_R32G32_TYPELESS:
590
case DXGI_FORMAT_R32G32_FLOAT:
591
case DXGI_FORMAT_R32G32_UINT:
592
case DXGI_FORMAT_R32G32_SINT:
593
return 64;
594
case DXGI_FORMAT_R32G8X24_TYPELESS:
595
case DXGI_FORMAT_D32_FLOAT_S8X24_UINT:
596
case DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS:
597
case DXGI_FORMAT_X32_TYPELESS_G8X24_UINT:
598
return 64;
599
case DXGI_FORMAT_R10G10B10A2_TYPELESS:
600
case DXGI_FORMAT_R10G10B10A2_UNORM:
601
case DXGI_FORMAT_R10G10B10A2_UINT:
602
case DXGI_FORMAT_R11G11B10_FLOAT:
603
return 32;
604
case DXGI_FORMAT_R8G8B8A8_TYPELESS:
605
case DXGI_FORMAT_R8G8B8A8_UNORM:
606
case DXGI_FORMAT_R8G8B8A8_UNORM_SRGB:
607
case DXGI_FORMAT_R8G8B8A8_UINT:
608
case DXGI_FORMAT_R8G8B8A8_SNORM:
609
case DXGI_FORMAT_R8G8B8A8_SINT:
610
return 32;
611
case DXGI_FORMAT_R16G16_TYPELESS:
612
case DXGI_FORMAT_R16G16_FLOAT:
613
case DXGI_FORMAT_R16G16_UNORM:
614
case DXGI_FORMAT_R16G16_UINT:
615
case DXGI_FORMAT_R16G16_SNORM:
616
case DXGI_FORMAT_R16G16_SINT:
617
return 32;
618
case DXGI_FORMAT_R32_TYPELESS:
619
case DXGI_FORMAT_D32_FLOAT:
620
case DXGI_FORMAT_R32_FLOAT:
621
case DXGI_FORMAT_R32_UINT:
622
case DXGI_FORMAT_R32_SINT:
623
return 32;
624
case DXGI_FORMAT_R24G8_TYPELESS:
625
case DXGI_FORMAT_D24_UNORM_S8_UINT:
626
case DXGI_FORMAT_R24_UNORM_X8_TYPELESS:
627
case DXGI_FORMAT_X24_TYPELESS_G8_UINT:
628
return 32;
629
case DXGI_FORMAT_R8G8_TYPELESS:
630
case DXGI_FORMAT_R8G8_UNORM:
631
case DXGI_FORMAT_R8G8_UINT:
632
case DXGI_FORMAT_R8G8_SNORM:
633
case DXGI_FORMAT_R8G8_SINT:
634
return 16;
635
case DXGI_FORMAT_R16_TYPELESS:
636
case DXGI_FORMAT_R16_FLOAT:
637
case DXGI_FORMAT_D16_UNORM:
638
case DXGI_FORMAT_R16_UNORM:
639
case DXGI_FORMAT_R16_UINT:
640
case DXGI_FORMAT_R16_SNORM:
641
case DXGI_FORMAT_R16_SINT:
642
return 16;
643
case DXGI_FORMAT_R8_TYPELESS:
644
case DXGI_FORMAT_R8_UNORM:
645
case DXGI_FORMAT_R8_UINT:
646
case DXGI_FORMAT_R8_SNORM:
647
case DXGI_FORMAT_R8_SINT:
648
case DXGI_FORMAT_A8_UNORM:
649
return 8;
650
case DXGI_FORMAT_BC1_TYPELESS:
651
case DXGI_FORMAT_BC1_UNORM:
652
case DXGI_FORMAT_BC1_UNORM_SRGB:
653
return 4;
654
case DXGI_FORMAT_BC2_TYPELESS:
655
case DXGI_FORMAT_BC2_UNORM:
656
case DXGI_FORMAT_BC2_UNORM_SRGB:
657
return 8;
658
case DXGI_FORMAT_BC3_TYPELESS:
659
case DXGI_FORMAT_BC3_UNORM:
660
case DXGI_FORMAT_BC3_UNORM_SRGB:
661
return 8;
662
case DXGI_FORMAT_BC4_TYPELESS:
663
case DXGI_FORMAT_BC4_UNORM:
664
case DXGI_FORMAT_BC4_SNORM:
665
return 4;
666
case DXGI_FORMAT_BC5_TYPELESS:
667
case DXGI_FORMAT_BC5_UNORM:
668
case DXGI_FORMAT_BC5_SNORM:
669
return 8;
670
case DXGI_FORMAT_BC6H_TYPELESS:
671
case DXGI_FORMAT_BC6H_UF16:
672
case DXGI_FORMAT_BC6H_SF16:
673
return 8;
674
case DXGI_FORMAT_BC7_TYPELESS:
675
case DXGI_FORMAT_BC7_UNORM:
676
case DXGI_FORMAT_BC7_UNORM_SRGB:
677
return 8;
678
default:
679
return 0;
680
}
681
}
682
683
template<typename D3D12_RESOURCE_DESC_T>
684
static ResourceClass ResourceDescToResourceClass(const D3D12_RESOURCE_DESC_T& resDesc)
685
{
686
if (resDesc.Dimension == D3D12_RESOURCE_DIMENSION_BUFFER)
687
return ResourceClass::Buffer;
688
// Else: it's surely a texture.
689
const bool isRenderTargetOrDepthStencil =
690
(resDesc.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0;
691
return isRenderTargetOrDepthStencil ? ResourceClass::RT_DS_Texture : ResourceClass::Non_RT_DS_Texture;
692
}
693
694
// This algorithm is overly conservative.
695
template<typename D3D12_RESOURCE_DESC_T>
696
static bool CanUseSmallAlignment(const D3D12_RESOURCE_DESC_T& resourceDesc)
697
{
698
if (resourceDesc.Dimension != D3D12_RESOURCE_DIMENSION_TEXTURE2D)
699
return false;
700
if ((resourceDesc.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0)
701
return false;
702
if (resourceDesc.SampleDesc.Count > 1)
703
return false;
704
if (resourceDesc.DepthOrArraySize != 1)
705
return false;
706
707
UINT sizeX = (UINT)resourceDesc.Width;
708
UINT sizeY = resourceDesc.Height;
709
UINT bitsPerPixel = GetBitsPerPixel(resourceDesc.Format);
710
if (bitsPerPixel == 0)
711
return false;
712
713
if (IsFormatCompressed(resourceDesc.Format))
714
{
715
sizeX = DivideRoundingUp(sizeX, 4u);
716
sizeY = DivideRoundingUp(sizeY, 4u);
717
bitsPerPixel *= 16;
718
}
719
720
UINT tileSizeX = 0, tileSizeY = 0;
721
switch (bitsPerPixel)
722
{
723
case 8: tileSizeX = 64; tileSizeY = 64; break;
724
case 16: tileSizeX = 64; tileSizeY = 32; break;
725
case 32: tileSizeX = 32; tileSizeY = 32; break;
726
case 64: tileSizeX = 32; tileSizeY = 16; break;
727
case 128: tileSizeX = 16; tileSizeY = 16; break;
728
default: return false;
729
}
730
731
const UINT tileCount = DivideRoundingUp(sizeX, tileSizeX) * DivideRoundingUp(sizeY, tileSizeY);
732
return tileCount <= 16;
733
}
734
735
static bool ValidateAllocateMemoryParameters(
736
const ALLOCATION_DESC* pAllocDesc,
737
const D3D12_RESOURCE_ALLOCATION_INFO* pAllocInfo,
738
Allocation** ppAllocation)
739
{
740
return pAllocDesc &&
741
pAllocInfo &&
742
ppAllocation &&
743
(pAllocInfo->Alignment == 0 ||
744
pAllocInfo->Alignment == D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT ||
745
pAllocInfo->Alignment == D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT) &&
746
pAllocInfo->SizeInBytes != 0 &&
747
pAllocInfo->SizeInBytes % (64ull * 1024) == 0;
748
}
749
750
#endif // _D3D12MA_FUNCTIONS
751
752
#ifndef _D3D12MA_STATISTICS_FUNCTIONS
753
754
static void ClearStatistics(Statistics& outStats)
755
{
756
outStats.BlockCount = 0;
757
outStats.AllocationCount = 0;
758
outStats.BlockBytes = 0;
759
outStats.AllocationBytes = 0;
760
}
761
762
static void ClearDetailedStatistics(DetailedStatistics& outStats)
763
{
764
ClearStatistics(outStats.Stats);
765
outStats.UnusedRangeCount = 0;
766
outStats.AllocationSizeMin = UINT64_MAX;
767
outStats.AllocationSizeMax = 0;
768
outStats.UnusedRangeSizeMin = UINT64_MAX;
769
outStats.UnusedRangeSizeMax = 0;
770
}
771
772
static void AddStatistics(Statistics& inoutStats, const Statistics& src)
773
{
774
inoutStats.BlockCount += src.BlockCount;
775
inoutStats.AllocationCount += src.AllocationCount;
776
inoutStats.BlockBytes += src.BlockBytes;
777
inoutStats.AllocationBytes += src.AllocationBytes;
778
}
779
780
static void AddDetailedStatistics(DetailedStatistics& inoutStats, const DetailedStatistics& src)
781
{
782
AddStatistics(inoutStats.Stats, src.Stats);
783
inoutStats.UnusedRangeCount += src.UnusedRangeCount;
784
inoutStats.AllocationSizeMin = D3D12MA_MIN(inoutStats.AllocationSizeMin, src.AllocationSizeMin);
785
inoutStats.AllocationSizeMax = D3D12MA_MAX(inoutStats.AllocationSizeMax, src.AllocationSizeMax);
786
inoutStats.UnusedRangeSizeMin = D3D12MA_MIN(inoutStats.UnusedRangeSizeMin, src.UnusedRangeSizeMin);
787
inoutStats.UnusedRangeSizeMax = D3D12MA_MAX(inoutStats.UnusedRangeSizeMax, src.UnusedRangeSizeMax);
788
}
789
790
static void AddDetailedStatisticsAllocation(DetailedStatistics& inoutStats, UINT64 size)
791
{
792
inoutStats.Stats.AllocationCount++;
793
inoutStats.Stats.AllocationBytes += size;
794
inoutStats.AllocationSizeMin = D3D12MA_MIN(inoutStats.AllocationSizeMin, size);
795
inoutStats.AllocationSizeMax = D3D12MA_MAX(inoutStats.AllocationSizeMax, size);
796
}
797
798
static void AddDetailedStatisticsUnusedRange(DetailedStatistics& inoutStats, UINT64 size)
799
{
800
inoutStats.UnusedRangeCount++;
801
inoutStats.UnusedRangeSizeMin = D3D12MA_MIN(inoutStats.UnusedRangeSizeMin, size);
802
inoutStats.UnusedRangeSizeMax = D3D12MA_MAX(inoutStats.UnusedRangeSizeMax, size);
803
}
804
805
#endif // _D3D12MA_STATISTICS_FUNCTIONS
806
807
808
#ifndef _D3D12MA_MUTEX
809
810
#ifndef D3D12MA_MUTEX
811
class Mutex
812
{
813
public:
814
void Lock() { m_Mutex.lock(); }
815
void Unlock() { m_Mutex.unlock(); }
816
817
private:
818
std::mutex m_Mutex;
819
};
820
#define D3D12MA_MUTEX Mutex
821
#endif
822
823
#ifndef D3D12MA_RW_MUTEX
824
#ifdef _WIN32
825
class RWMutex
826
{
827
public:
828
RWMutex() { InitializeSRWLock(&m_Lock); }
829
void LockRead() { AcquireSRWLockShared(&m_Lock); }
830
void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
831
void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
832
void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
833
834
private:
835
SRWLOCK m_Lock;
836
};
837
#else // #ifdef _WIN32
838
class RWMutex
839
{
840
public:
841
RWMutex() {}
842
void LockRead() { m_Mutex.lock_shared(); }
843
void UnlockRead() { m_Mutex.unlock_shared(); }
844
void LockWrite() { m_Mutex.lock(); }
845
void UnlockWrite() { m_Mutex.unlock(); }
846
847
private:
848
std::shared_timed_mutex m_Mutex;
849
};
850
#endif // #ifdef _WIN32
851
#define D3D12MA_RW_MUTEX RWMutex
852
#endif // #ifndef D3D12MA_RW_MUTEX
853
854
// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
855
struct MutexLock
856
{
857
D3D12MA_CLASS_NO_COPY(MutexLock);
858
public:
859
MutexLock(D3D12MA_MUTEX& mutex, bool useMutex = true) :
860
m_pMutex(useMutex ? &mutex : NULL)
861
{
862
if (m_pMutex) m_pMutex->Lock();
863
}
864
~MutexLock() { if (m_pMutex) m_pMutex->Unlock(); }
865
866
private:
867
D3D12MA_MUTEX* m_pMutex;
868
};
869
870
// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
871
struct MutexLockRead
872
{
873
D3D12MA_CLASS_NO_COPY(MutexLockRead);
874
public:
875
MutexLockRead(D3D12MA_RW_MUTEX& mutex, bool useMutex)
876
: m_pMutex(useMutex ? &mutex : NULL)
877
{
878
if(m_pMutex)
879
{
880
m_pMutex->LockRead();
881
}
882
}
883
~MutexLockRead() { if (m_pMutex) m_pMutex->UnlockRead(); }
884
885
private:
886
D3D12MA_RW_MUTEX* m_pMutex;
887
};
888
889
// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
890
struct MutexLockWrite
891
{
892
D3D12MA_CLASS_NO_COPY(MutexLockWrite);
893
public:
894
MutexLockWrite(D3D12MA_RW_MUTEX& mutex, bool useMutex)
895
: m_pMutex(useMutex ? &mutex : NULL)
896
{
897
if (m_pMutex) m_pMutex->LockWrite();
898
}
899
~MutexLockWrite() { if (m_pMutex) m_pMutex->UnlockWrite(); }
900
901
private:
902
D3D12MA_RW_MUTEX* m_pMutex;
903
};
904
905
#if D3D12MA_DEBUG_GLOBAL_MUTEX
906
static D3D12MA_MUTEX g_DebugGlobalMutex;
907
#define D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK MutexLock debugGlobalMutexLock(g_DebugGlobalMutex, true);
908
#else
909
#define D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
910
#endif
911
#endif // _D3D12MA_MUTEX
912
913
#ifndef _D3D12MA_VECTOR
914
/*
915
Dynamically resizing continuous array. Class with interface similar to std::vector.
916
T must be POD because constructors and destructors are not called and memcpy is
917
used for these objects.
918
*/
919
template<typename T>
920
class Vector
921
{
922
public:
923
using value_type = T;
924
using iterator = T*;
925
using const_iterator = const T*;
926
927
// allocationCallbacks externally owned, must outlive this object.
928
Vector(const ALLOCATION_CALLBACKS& allocationCallbacks);
929
Vector(size_t count, const ALLOCATION_CALLBACKS& allocationCallbacks);
930
Vector(const Vector<T>& src);
931
~Vector();
932
933
const ALLOCATION_CALLBACKS& GetAllocs() const { return m_AllocationCallbacks; }
934
bool empty() const { return m_Count == 0; }
935
size_t size() const { return m_Count; }
936
T* data() { return m_pArray; }
937
const T* data() const { return m_pArray; }
938
void clear(bool freeMemory = false) { resize(0, freeMemory); }
939
940
iterator begin() { return m_pArray; }
941
iterator end() { return m_pArray + m_Count; }
942
const_iterator cbegin() const { return m_pArray; }
943
const_iterator cend() const { return m_pArray + m_Count; }
944
const_iterator begin() const { return cbegin(); }
945
const_iterator end() const { return cend(); }
946
947
void push_front(const T& src) { insert(0, src); }
948
void push_back(const T& src);
949
void pop_front();
950
void pop_back();
951
952
T& front();
953
T& back();
954
const T& front() const;
955
const T& back() const;
956
957
void reserve(size_t newCapacity, bool freeMemory = false);
958
void resize(size_t newCount, bool freeMemory = false);
959
void insert(size_t index, const T& src);
960
void remove(size_t index);
961
962
template<typename CmpLess>
963
size_t InsertSorted(const T& value, const CmpLess& cmp);
964
template<typename CmpLess>
965
bool RemoveSorted(const T& value, const CmpLess& cmp);
966
967
Vector& operator=(const Vector<T>& rhs);
968
T& operator[](size_t index);
969
const T& operator[](size_t index) const;
970
971
private:
972
const ALLOCATION_CALLBACKS& m_AllocationCallbacks;
973
T* m_pArray;
974
size_t m_Count;
975
size_t m_Capacity;
976
};
977
978
#ifndef _D3D12MA_VECTOR_FUNCTIONS
979
template<typename T>
980
Vector<T>::Vector(const ALLOCATION_CALLBACKS& allocationCallbacks)
981
: m_AllocationCallbacks(allocationCallbacks),
982
m_pArray(NULL),
983
m_Count(0),
984
m_Capacity(0) {}
985
986
template<typename T>
987
Vector<T>::Vector(size_t count, const ALLOCATION_CALLBACKS& allocationCallbacks)
988
: m_AllocationCallbacks(allocationCallbacks),
989
m_pArray(count ? AllocateArray<T>(allocationCallbacks, count) : NULL),
990
m_Count(count),
991
m_Capacity(count) {}
992
993
template<typename T>
994
Vector<T>::Vector(const Vector<T>& src)
995
: m_AllocationCallbacks(src.m_AllocationCallbacks),
996
m_pArray(src.m_Count ? AllocateArray<T>(src.m_AllocationCallbacks, src.m_Count) : NULL),
997
m_Count(src.m_Count),
998
m_Capacity(src.m_Count)
999
{
1000
if (m_Count > 0)
1001
{
1002
memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1003
}
1004
}
1005
1006
template<typename T>
1007
Vector<T>::~Vector()
1008
{
1009
Free(m_AllocationCallbacks, m_pArray);
1010
}
1011
1012
template<typename T>
1013
void Vector<T>::push_back(const T& src)
1014
{
1015
const size_t newIndex = size();
1016
resize(newIndex + 1);
1017
m_pArray[newIndex] = src;
1018
}
1019
1020
template<typename T>
1021
void Vector<T>::pop_front()
1022
{
1023
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1024
remove(0);
1025
}
1026
1027
template<typename T>
1028
void Vector<T>::pop_back()
1029
{
1030
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1031
resize(size() - 1);
1032
}
1033
1034
template<typename T>
1035
T& Vector<T>::front()
1036
{
1037
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1038
return m_pArray[0];
1039
}
1040
1041
template<typename T>
1042
T& Vector<T>::back()
1043
{
1044
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1045
return m_pArray[m_Count - 1];
1046
}
1047
1048
template<typename T>
1049
const T& Vector<T>::front() const
1050
{
1051
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1052
return m_pArray[0];
1053
}
1054
1055
template<typename T>
1056
const T& Vector<T>::back() const
1057
{
1058
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1059
return m_pArray[m_Count - 1];
1060
}
1061
1062
template<typename T>
1063
void Vector<T>::reserve(size_t newCapacity, bool freeMemory)
1064
{
1065
newCapacity = D3D12MA_MAX(newCapacity, m_Count);
1066
1067
if ((newCapacity < m_Capacity) && !freeMemory)
1068
{
1069
newCapacity = m_Capacity;
1070
}
1071
1072
if (newCapacity != m_Capacity)
1073
{
1074
T* const newArray = newCapacity ? AllocateArray<T>(m_AllocationCallbacks, newCapacity) : NULL;
1075
if (m_Count != 0)
1076
{
1077
memcpy(newArray, m_pArray, m_Count * sizeof(T));
1078
}
1079
Free(m_AllocationCallbacks, m_pArray);
1080
m_Capacity = newCapacity;
1081
m_pArray = newArray;
1082
}
1083
}
1084
1085
template<typename T>
1086
void Vector<T>::resize(size_t newCount, bool freeMemory)
1087
{
1088
size_t newCapacity = m_Capacity;
1089
if (newCount > m_Capacity)
1090
{
1091
newCapacity = D3D12MA_MAX(newCount, D3D12MA_MAX(m_Capacity * 3 / 2, (size_t)8));
1092
}
1093
else if (freeMemory)
1094
{
1095
newCapacity = newCount;
1096
}
1097
1098
if (newCapacity != m_Capacity)
1099
{
1100
T* const newArray = newCapacity ? AllocateArray<T>(m_AllocationCallbacks, newCapacity) : NULL;
1101
const size_t elementsToCopy = D3D12MA_MIN(m_Count, newCount);
1102
if (elementsToCopy != 0)
1103
{
1104
memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1105
}
1106
Free(m_AllocationCallbacks, m_pArray);
1107
m_Capacity = newCapacity;
1108
m_pArray = newArray;
1109
}
1110
1111
m_Count = newCount;
1112
}
1113
1114
template<typename T>
1115
void Vector<T>::insert(size_t index, const T& src)
1116
{
1117
D3D12MA_HEAVY_ASSERT(index <= m_Count);
1118
const size_t oldCount = size();
1119
resize(oldCount + 1);
1120
if (index < oldCount)
1121
{
1122
memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1123
}
1124
m_pArray[index] = src;
1125
}
1126
1127
template<typename T>
1128
void Vector<T>::remove(size_t index)
1129
{
1130
D3D12MA_HEAVY_ASSERT(index < m_Count);
1131
const size_t oldCount = size();
1132
if (index < oldCount - 1)
1133
{
1134
memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1135
}
1136
resize(oldCount - 1);
1137
}
1138
1139
template<typename T> template<typename CmpLess>
1140
size_t Vector<T>::InsertSorted(const T& value, const CmpLess& cmp)
1141
{
1142
const size_t indexToInsert = BinaryFindFirstNotLess<CmpLess, iterator, T>(
1143
m_pArray,
1144
m_pArray + m_Count,
1145
value,
1146
cmp) - m_pArray;
1147
insert(indexToInsert, value);
1148
return indexToInsert;
1149
}
1150
1151
template<typename T> template<typename CmpLess>
1152
bool Vector<T>::RemoveSorted(const T& value, const CmpLess& cmp)
1153
{
1154
const iterator it = BinaryFindFirstNotLess(
1155
m_pArray,
1156
m_pArray + m_Count,
1157
value,
1158
cmp);
1159
if ((it != end()) && !cmp(*it, value) && !cmp(value, *it))
1160
{
1161
size_t indexToRemove = it - begin();
1162
remove(indexToRemove);
1163
return true;
1164
}
1165
return false;
1166
}
1167
1168
template<typename T>
1169
Vector<T>& Vector<T>::operator=(const Vector<T>& rhs)
1170
{
1171
if (&rhs != this)
1172
{
1173
resize(rhs.m_Count);
1174
if (m_Count != 0)
1175
{
1176
memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1177
}
1178
}
1179
return *this;
1180
}
1181
1182
template<typename T>
1183
T& Vector<T>::operator[](size_t index)
1184
{
1185
D3D12MA_HEAVY_ASSERT(index < m_Count);
1186
return m_pArray[index];
1187
}
1188
1189
template<typename T>
1190
const T& Vector<T>::operator[](size_t index) const
1191
{
1192
D3D12MA_HEAVY_ASSERT(index < m_Count);
1193
return m_pArray[index];
1194
}
1195
#endif // _D3D12MA_VECTOR_FUNCTIONS
1196
#endif // _D3D12MA_VECTOR
1197
1198
#ifndef _D3D12MA_STRING_BUILDER
1199
class StringBuilder
1200
{
1201
public:
1202
StringBuilder(const ALLOCATION_CALLBACKS& allocationCallbacks) : m_Data(allocationCallbacks) {}
1203
1204
size_t GetLength() const { return m_Data.size(); }
1205
LPCWSTR GetData() const { return m_Data.data(); }
1206
1207
void Add(WCHAR ch) { m_Data.push_back(ch); }
1208
void Add(LPCWSTR str);
1209
void AddNewLine() { Add(L'\n'); }
1210
void AddNumber(UINT num);
1211
void AddNumber(UINT64 num);
1212
void AddPointer(const void* ptr);
1213
1214
private:
1215
Vector<WCHAR> m_Data;
1216
};
1217
1218
#ifndef _D3D12MA_STRING_BUILDER_FUNCTIONS
1219
void StringBuilder::Add(LPCWSTR str)
1220
{
1221
const size_t len = wcslen(str);
1222
if (len > 0)
1223
{
1224
const size_t oldCount = m_Data.size();
1225
m_Data.resize(oldCount + len);
1226
memcpy(m_Data.data() + oldCount, str, len * sizeof(WCHAR));
1227
}
1228
}
1229
1230
void StringBuilder::AddNumber(UINT num)
1231
{
1232
WCHAR buf[11];
1233
buf[10] = L'\0';
1234
WCHAR *p = &buf[10];
1235
do
1236
{
1237
*--p = L'0' + (num % 10);
1238
num /= 10;
1239
}
1240
while (num);
1241
Add(p);
1242
}
1243
1244
void StringBuilder::AddNumber(UINT64 num)
1245
{
1246
WCHAR buf[21];
1247
buf[20] = L'\0';
1248
WCHAR *p = &buf[20];
1249
do
1250
{
1251
*--p = L'0' + (num % 10);
1252
num /= 10;
1253
}
1254
while (num);
1255
Add(p);
1256
}
1257
1258
void StringBuilder::AddPointer(const void* ptr)
1259
{
1260
WCHAR buf[21];
1261
uintptr_t num = (uintptr_t)ptr;
1262
buf[20] = L'\0';
1263
WCHAR *p = &buf[20];
1264
do
1265
{
1266
*--p = HexDigitToChar((UINT8)(num & 0xF));
1267
num >>= 4;
1268
}
1269
while (num);
1270
Add(p);
1271
}
1272
1273
#endif // _D3D12MA_STRING_BUILDER_FUNCTIONS
1274
#endif // _D3D12MA_STRING_BUILDER
1275
1276
#ifndef _D3D12MA_JSON_WRITER
1277
/*
1278
Allows to conveniently build a correct JSON document to be written to the
1279
StringBuilder passed to the constructor.
1280
*/
1281
class JsonWriter
1282
{
1283
public:
1284
// stringBuilder - string builder to write the document to. Must remain alive for the whole lifetime of this object.
1285
JsonWriter(const ALLOCATION_CALLBACKS& allocationCallbacks, StringBuilder& stringBuilder);
1286
~JsonWriter();
1287
1288
// Begins object by writing "{".
1289
// Inside an object, you must call pairs of WriteString and a value, e.g.:
1290
// j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject();
1291
// Will write: { "A": 1, "B": 2 }
1292
void BeginObject(bool singleLine = false);
1293
// Ends object by writing "}".
1294
void EndObject();
1295
1296
// Begins array by writing "[".
1297
// Inside an array, you can write a sequence of any values.
1298
void BeginArray(bool singleLine = false);
1299
// Ends array by writing "[".
1300
void EndArray();
1301
1302
// Writes a string value inside "".
1303
// pStr can contain any UTF-16 characters, including '"', new line etc. - they will be properly escaped.
1304
void WriteString(LPCWSTR pStr);
1305
1306
// Begins writing a string value.
1307
// Call BeginString, ContinueString, ContinueString, ..., EndString instead of
1308
// WriteString to conveniently build the string content incrementally, made of
1309
// parts including numbers.
1310
void BeginString(LPCWSTR pStr = NULL);
1311
// Posts next part of an open string.
1312
void ContinueString(LPCWSTR pStr);
1313
// Posts next part of an open string. The number is converted to decimal characters.
1314
void ContinueString(UINT num);
1315
void ContinueString(UINT64 num);
1316
void ContinueString_Pointer(const void* ptr);
1317
// Posts next part of an open string. Pointer value is converted to characters
1318
// using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00
1319
// void ContinueString_Pointer(const void* ptr);
1320
// Ends writing a string value by writing '"'.
1321
void EndString(LPCWSTR pStr = NULL);
1322
1323
// Writes a number value.
1324
void WriteNumber(UINT num);
1325
void WriteNumber(UINT64 num);
1326
// Writes a boolean value - false or true.
1327
void WriteBool(bool b);
1328
// Writes a null value.
1329
void WriteNull();
1330
1331
void AddAllocationToObject(const Allocation& alloc);
1332
void AddDetailedStatisticsInfoObject(const DetailedStatistics& stats);
1333
1334
private:
1335
static const WCHAR* const INDENT;
1336
1337
enum CollectionType
1338
{
1339
COLLECTION_TYPE_OBJECT,
1340
COLLECTION_TYPE_ARRAY,
1341
};
1342
struct StackItem
1343
{
1344
CollectionType type;
1345
UINT valueCount;
1346
bool singleLineMode;
1347
};
1348
1349
StringBuilder& m_SB;
1350
Vector<StackItem> m_Stack;
1351
bool m_InsideString;
1352
1353
void BeginValue(bool isString);
1354
void WriteIndent(bool oneLess = false);
1355
};
1356
1357
#ifndef _D3D12MA_JSON_WRITER_FUNCTIONS
1358
const WCHAR* const JsonWriter::INDENT = L" ";
1359
1360
JsonWriter::JsonWriter(const ALLOCATION_CALLBACKS& allocationCallbacks, StringBuilder& stringBuilder)
1361
: m_SB(stringBuilder),
1362
m_Stack(allocationCallbacks),
1363
m_InsideString(false) {}
1364
1365
JsonWriter::~JsonWriter()
1366
{
1367
D3D12MA_ASSERT(!m_InsideString);
1368
D3D12MA_ASSERT(m_Stack.empty());
1369
}
1370
1371
void JsonWriter::BeginObject(bool singleLine)
1372
{
1373
D3D12MA_ASSERT(!m_InsideString);
1374
1375
BeginValue(false);
1376
m_SB.Add(L'{');
1377
1378
StackItem stackItem;
1379
stackItem.type = COLLECTION_TYPE_OBJECT;
1380
stackItem.valueCount = 0;
1381
stackItem.singleLineMode = singleLine;
1382
m_Stack.push_back(stackItem);
1383
}
1384
1385
void JsonWriter::EndObject()
1386
{
1387
D3D12MA_ASSERT(!m_InsideString);
1388
D3D12MA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
1389
D3D12MA_ASSERT(m_Stack.back().valueCount % 2 == 0);
1390
1391
WriteIndent(true);
1392
m_SB.Add(L'}');
1393
1394
m_Stack.pop_back();
1395
}
1396
1397
void JsonWriter::BeginArray(bool singleLine)
1398
{
1399
D3D12MA_ASSERT(!m_InsideString);
1400
1401
BeginValue(false);
1402
m_SB.Add(L'[');
1403
1404
StackItem stackItem;
1405
stackItem.type = COLLECTION_TYPE_ARRAY;
1406
stackItem.valueCount = 0;
1407
stackItem.singleLineMode = singleLine;
1408
m_Stack.push_back(stackItem);
1409
}
1410
1411
void JsonWriter::EndArray()
1412
{
1413
D3D12MA_ASSERT(!m_InsideString);
1414
D3D12MA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
1415
1416
WriteIndent(true);
1417
m_SB.Add(L']');
1418
1419
m_Stack.pop_back();
1420
}
1421
1422
void JsonWriter::WriteString(LPCWSTR pStr)
1423
{
1424
BeginString(pStr);
1425
EndString();
1426
}
1427
1428
void JsonWriter::BeginString(LPCWSTR pStr)
1429
{
1430
D3D12MA_ASSERT(!m_InsideString);
1431
1432
BeginValue(true);
1433
m_InsideString = true;
1434
m_SB.Add(L'"');
1435
if (pStr != NULL)
1436
{
1437
ContinueString(pStr);
1438
}
1439
}
1440
1441
void JsonWriter::ContinueString(LPCWSTR pStr)
1442
{
1443
D3D12MA_ASSERT(m_InsideString);
1444
D3D12MA_ASSERT(pStr);
1445
1446
for (const WCHAR *p = pStr; *p; ++p)
1447
{
1448
// the strings we encode are assumed to be in UTF-16LE format, the native
1449
// windows wide character Unicode format. In this encoding Unicode code
1450
// points U+0000 to U+D7FF and U+E000 to U+FFFF are encoded in two bytes,
1451
// and everything else takes more than two bytes. We will reject any
1452
// multi wchar character encodings for simplicity.
1453
UINT val = (UINT)*p;
1454
D3D12MA_ASSERT(((val <= 0xD7FF) || (0xE000 <= val && val <= 0xFFFF)) &&
1455
"Character not currently supported.");
1456
switch (*p)
1457
{
1458
case L'"': m_SB.Add(L'\\'); m_SB.Add(L'"'); break;
1459
case L'\\': m_SB.Add(L'\\'); m_SB.Add(L'\\'); break;
1460
case L'/': m_SB.Add(L'\\'); m_SB.Add(L'/'); break;
1461
case L'\b': m_SB.Add(L'\\'); m_SB.Add(L'b'); break;
1462
case L'\f': m_SB.Add(L'\\'); m_SB.Add(L'f'); break;
1463
case L'\n': m_SB.Add(L'\\'); m_SB.Add(L'n'); break;
1464
case L'\r': m_SB.Add(L'\\'); m_SB.Add(L'r'); break;
1465
case L'\t': m_SB.Add(L'\\'); m_SB.Add(L't'); break;
1466
default:
1467
// conservatively use encoding \uXXXX for any Unicode character
1468
// requiring more than one byte.
1469
if (32 <= val && val < 256)
1470
m_SB.Add(*p);
1471
else
1472
{
1473
m_SB.Add(L'\\');
1474
m_SB.Add(L'u');
1475
for (UINT i = 0; i < 4; ++i)
1476
{
1477
UINT hexDigit = (val & 0xF000) >> 12;
1478
val <<= 4;
1479
if (hexDigit < 10)
1480
m_SB.Add(L'0' + (WCHAR)hexDigit);
1481
else
1482
m_SB.Add(L'A' + (WCHAR)hexDigit);
1483
}
1484
}
1485
break;
1486
}
1487
}
1488
}
1489
1490
void JsonWriter::ContinueString(UINT num)
1491
{
1492
D3D12MA_ASSERT(m_InsideString);
1493
m_SB.AddNumber(num);
1494
}
1495
1496
void JsonWriter::ContinueString(UINT64 num)
1497
{
1498
D3D12MA_ASSERT(m_InsideString);
1499
m_SB.AddNumber(num);
1500
}
1501
1502
void JsonWriter::ContinueString_Pointer(const void* ptr)
1503
{
1504
D3D12MA_ASSERT(m_InsideString);
1505
m_SB.AddPointer(ptr);
1506
}
1507
1508
void JsonWriter::EndString(LPCWSTR pStr)
1509
{
1510
D3D12MA_ASSERT(m_InsideString);
1511
1512
if (pStr)
1513
ContinueString(pStr);
1514
m_SB.Add(L'"');
1515
m_InsideString = false;
1516
}
1517
1518
void JsonWriter::WriteNumber(UINT num)
1519
{
1520
D3D12MA_ASSERT(!m_InsideString);
1521
BeginValue(false);
1522
m_SB.AddNumber(num);
1523
}
1524
1525
void JsonWriter::WriteNumber(UINT64 num)
1526
{
1527
D3D12MA_ASSERT(!m_InsideString);
1528
BeginValue(false);
1529
m_SB.AddNumber(num);
1530
}
1531
1532
void JsonWriter::WriteBool(bool b)
1533
{
1534
D3D12MA_ASSERT(!m_InsideString);
1535
BeginValue(false);
1536
if (b)
1537
m_SB.Add(L"true");
1538
else
1539
m_SB.Add(L"false");
1540
}
1541
1542
void JsonWriter::WriteNull()
1543
{
1544
D3D12MA_ASSERT(!m_InsideString);
1545
BeginValue(false);
1546
m_SB.Add(L"null");
1547
}
1548
1549
void JsonWriter::AddAllocationToObject(const Allocation& alloc)
1550
{
1551
WriteString(L"Type");
1552
switch (alloc.m_PackedData.GetResourceDimension()) {
1553
case D3D12_RESOURCE_DIMENSION_UNKNOWN:
1554
WriteString(L"UNKNOWN");
1555
break;
1556
case D3D12_RESOURCE_DIMENSION_BUFFER:
1557
WriteString(L"BUFFER");
1558
break;
1559
case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
1560
WriteString(L"TEXTURE1D");
1561
break;
1562
case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
1563
WriteString(L"TEXTURE2D");
1564
break;
1565
case D3D12_RESOURCE_DIMENSION_TEXTURE3D:
1566
WriteString(L"TEXTURE3D");
1567
break;
1568
default: D3D12MA_ASSERT(0); break;
1569
}
1570
1571
WriteString(L"Size");
1572
WriteNumber(alloc.GetSize());
1573
WriteString(L"Usage");
1574
WriteNumber((UINT)alloc.m_PackedData.GetResourceFlags());
1575
1576
void* privateData = alloc.GetPrivateData();
1577
if (privateData)
1578
{
1579
WriteString(L"CustomData");
1580
BeginString();
1581
ContinueString_Pointer(privateData);
1582
EndString();
1583
}
1584
1585
LPCWSTR name = alloc.GetName();
1586
if (name != NULL)
1587
{
1588
WriteString(L"Name");
1589
WriteString(name);
1590
}
1591
if (alloc.m_PackedData.GetTextureLayout())
1592
{
1593
WriteString(L"Layout");
1594
WriteNumber((UINT)alloc.m_PackedData.GetTextureLayout());
1595
}
1596
}
1597
1598
void JsonWriter::AddDetailedStatisticsInfoObject(const DetailedStatistics& stats)
1599
{
1600
BeginObject();
1601
1602
WriteString(L"BlockCount");
1603
WriteNumber(stats.Stats.BlockCount);
1604
WriteString(L"BlockBytes");
1605
WriteNumber(stats.Stats.BlockBytes);
1606
WriteString(L"AllocationCount");
1607
WriteNumber(stats.Stats.AllocationCount);
1608
WriteString(L"AllocationBytes");
1609
WriteNumber(stats.Stats.AllocationBytes);
1610
WriteString(L"UnusedRangeCount");
1611
WriteNumber(stats.UnusedRangeCount);
1612
1613
if (stats.Stats.AllocationCount > 1)
1614
{
1615
WriteString(L"AllocationSizeMin");
1616
WriteNumber(stats.AllocationSizeMin);
1617
WriteString(L"AllocationSizeMax");
1618
WriteNumber(stats.AllocationSizeMax);
1619
}
1620
if (stats.UnusedRangeCount > 1)
1621
{
1622
WriteString(L"UnusedRangeSizeMin");
1623
WriteNumber(stats.UnusedRangeSizeMin);
1624
WriteString(L"UnusedRangeSizeMax");
1625
WriteNumber(stats.UnusedRangeSizeMax);
1626
}
1627
EndObject();
1628
}
1629
1630
void JsonWriter::BeginValue(bool isString)
1631
{
1632
if (!m_Stack.empty())
1633
{
1634
StackItem& currItem = m_Stack.back();
1635
if (currItem.type == COLLECTION_TYPE_OBJECT && currItem.valueCount % 2 == 0)
1636
{
1637
D3D12MA_ASSERT(isString);
1638
}
1639
1640
if (currItem.type == COLLECTION_TYPE_OBJECT && currItem.valueCount % 2 == 1)
1641
{
1642
m_SB.Add(L':'); m_SB.Add(L' ');
1643
}
1644
else if (currItem.valueCount > 0)
1645
{
1646
m_SB.Add(L','); m_SB.Add(L' ');
1647
WriteIndent();
1648
}
1649
else
1650
{
1651
WriteIndent();
1652
}
1653
++currItem.valueCount;
1654
}
1655
}
1656
1657
void JsonWriter::WriteIndent(bool oneLess)
1658
{
1659
if (!m_Stack.empty() && !m_Stack.back().singleLineMode)
1660
{
1661
m_SB.AddNewLine();
1662
1663
size_t count = m_Stack.size();
1664
if (count > 0 && oneLess)
1665
{
1666
--count;
1667
}
1668
for (size_t i = 0; i < count; ++i)
1669
{
1670
m_SB.Add(INDENT);
1671
}
1672
}
1673
}
1674
#endif // _D3D12MA_JSON_WRITER_FUNCTIONS
1675
#endif // _D3D12MA_JSON_WRITER
1676
1677
#ifndef _D3D12MA_POOL_ALLOCATOR
1678
/*
1679
Allocator for objects of type T using a list of arrays (pools) to speed up
1680
allocation. Number of elements that can be allocated is not bounded because
1681
allocator can create multiple blocks.
1682
T should be POD because constructor and destructor is not called in Alloc or
1683
Free.
1684
*/
1685
template<typename T>
1686
class PoolAllocator
1687
{
1688
D3D12MA_CLASS_NO_COPY(PoolAllocator)
1689
public:
1690
// allocationCallbacks externally owned, must outlive this object.
1691
PoolAllocator(const ALLOCATION_CALLBACKS& allocationCallbacks, UINT firstBlockCapacity);
1692
~PoolAllocator() { Clear(); }
1693
1694
void Clear();
1695
template<typename... Types>
1696
T* Alloc(Types... args);
1697
void Free(T* ptr);
1698
1699
private:
1700
union Item
1701
{
1702
UINT NextFreeIndex; // UINT32_MAX means end of list.
1703
alignas(T) char Value[sizeof(T)];
1704
};
1705
1706
struct ItemBlock
1707
{
1708
Item* pItems;
1709
UINT Capacity;
1710
UINT FirstFreeIndex;
1711
};
1712
1713
const ALLOCATION_CALLBACKS& m_AllocationCallbacks;
1714
const UINT m_FirstBlockCapacity;
1715
Vector<ItemBlock> m_ItemBlocks;
1716
1717
ItemBlock& CreateNewBlock();
1718
};
1719
1720
#ifndef _D3D12MA_POOL_ALLOCATOR_FUNCTIONS
1721
template<typename T>
1722
PoolAllocator<T>::PoolAllocator(const ALLOCATION_CALLBACKS& allocationCallbacks, UINT firstBlockCapacity)
1723
: m_AllocationCallbacks(allocationCallbacks),
1724
m_FirstBlockCapacity(firstBlockCapacity),
1725
m_ItemBlocks(allocationCallbacks)
1726
{
1727
D3D12MA_ASSERT(m_FirstBlockCapacity > 1);
1728
}
1729
1730
template<typename T>
1731
void PoolAllocator<T>::Clear()
1732
{
1733
for(size_t i = m_ItemBlocks.size(); i--; )
1734
{
1735
D3D12MA_DELETE_ARRAY(m_AllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
1736
}
1737
m_ItemBlocks.clear(true);
1738
}
1739
1740
template<typename T> template<typename... Types>
1741
T* PoolAllocator<T>::Alloc(Types... args)
1742
{
1743
for(size_t i = m_ItemBlocks.size(); i--; )
1744
{
1745
ItemBlock& block = m_ItemBlocks[i];
1746
// This block has some free items: Use first one.
1747
if(block.FirstFreeIndex != UINT32_MAX)
1748
{
1749
Item* const pItem = &block.pItems[block.FirstFreeIndex];
1750
block.FirstFreeIndex = pItem->NextFreeIndex;
1751
T* result = (T*)&pItem->Value;
1752
new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
1753
return result;
1754
}
1755
}
1756
1757
// No block has free item: Create new one and use it.
1758
ItemBlock& newBlock = CreateNewBlock();
1759
Item* const pItem = &newBlock.pItems[0];
1760
newBlock.FirstFreeIndex = pItem->NextFreeIndex;
1761
T* result = (T*)pItem->Value;
1762
new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
1763
return result;
1764
}
1765
1766
template<typename T>
1767
void PoolAllocator<T>::Free(T* ptr)
1768
{
1769
// Search all memory blocks to find ptr.
1770
for(size_t i = m_ItemBlocks.size(); i--; )
1771
{
1772
ItemBlock& block = m_ItemBlocks[i];
1773
1774
Item* pItemPtr;
1775
memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
1776
1777
// Check if pItemPtr is in address range of this block.
1778
if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
1779
{
1780
ptr->~T(); // Explicit destructor call.
1781
const UINT index = static_cast<UINT>(pItemPtr - block.pItems);
1782
pItemPtr->NextFreeIndex = block.FirstFreeIndex;
1783
block.FirstFreeIndex = index;
1784
return;
1785
}
1786
}
1787
D3D12MA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
1788
}
1789
1790
template<typename T>
1791
typename PoolAllocator<T>::ItemBlock& PoolAllocator<T>::CreateNewBlock()
1792
{
1793
const UINT newBlockCapacity = m_ItemBlocks.empty() ?
1794
m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
1795
1796
const ItemBlock newBlock = {
1797
D3D12MA_NEW_ARRAY(m_AllocationCallbacks, Item, newBlockCapacity),
1798
newBlockCapacity,
1799
0 };
1800
1801
m_ItemBlocks.push_back(newBlock);
1802
1803
// Setup singly-linked list of all free items in this block.
1804
for(UINT i = 0; i < newBlockCapacity - 1; ++i)
1805
{
1806
newBlock.pItems[i].NextFreeIndex = i + 1;
1807
}
1808
newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
1809
return m_ItemBlocks.back();
1810
}
1811
#endif // _D3D12MA_POOL_ALLOCATOR_FUNCTIONS
1812
#endif // _D3D12MA_POOL_ALLOCATOR
1813
1814
#ifndef _D3D12MA_LIST
1815
/*
1816
Doubly linked list, with elements allocated out of PoolAllocator.
1817
Has custom interface, as well as STL-style interface, including iterator and
1818
const_iterator.
1819
*/
1820
template<typename T>
1821
class List
1822
{
1823
D3D12MA_CLASS_NO_COPY(List)
1824
public:
1825
struct Item
1826
{
1827
Item* pPrev;
1828
Item* pNext;
1829
T Value;
1830
};
1831
1832
class reverse_iterator;
1833
class const_reverse_iterator;
1834
class iterator
1835
{
1836
friend class List<T>;
1837
friend class const_iterator;
1838
1839
public:
1840
iterator() = default;
1841
iterator(const reverse_iterator& src)
1842
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1843
1844
T& operator*() const;
1845
T* operator->() const;
1846
1847
iterator& operator++();
1848
iterator& operator--();
1849
iterator operator++(int);
1850
iterator operator--(int);
1851
1852
bool operator==(const iterator& rhs) const;
1853
bool operator!=(const iterator& rhs) const;
1854
1855
private:
1856
List<T>* m_pList = NULL;
1857
Item* m_pItem = NULL;
1858
1859
iterator(List<T>* pList, Item* pItem) : m_pList(pList), m_pItem(pItem) {}
1860
};
1861
1862
class reverse_iterator
1863
{
1864
friend class List<T>;
1865
friend class const_reverse_iterator;
1866
1867
public:
1868
reverse_iterator() = default;
1869
reverse_iterator(const iterator& src)
1870
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1871
1872
T& operator*() const;
1873
T* operator->() const;
1874
1875
reverse_iterator& operator++();
1876
reverse_iterator& operator--();
1877
reverse_iterator operator++(int);
1878
reverse_iterator operator--(int);
1879
1880
bool operator==(const reverse_iterator& rhs) const;
1881
bool operator!=(const reverse_iterator& rhs) const;
1882
1883
private:
1884
List<T>* m_pList = NULL;
1885
Item* m_pItem = NULL;
1886
1887
reverse_iterator(List<T>* pList, Item* pItem)
1888
: m_pList(pList), m_pItem(pItem) {}
1889
};
1890
1891
class const_iterator
1892
{
1893
friend class List<T>;
1894
1895
public:
1896
const_iterator() = default;
1897
const_iterator(const iterator& src)
1898
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1899
const_iterator(const reverse_iterator& src)
1900
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1901
const_iterator(const const_reverse_iterator& src)
1902
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1903
1904
iterator dropConst() const;
1905
const T& operator*() const;
1906
const T* operator->() const;
1907
1908
const_iterator& operator++();
1909
const_iterator& operator--();
1910
const_iterator operator++(int);
1911
const_iterator operator--(int);
1912
1913
bool operator==(const const_iterator& rhs) const;
1914
bool operator!=(const const_iterator& rhs) const;
1915
1916
private:
1917
const List<T>* m_pList = NULL;
1918
const Item* m_pItem = NULL;
1919
1920
const_iterator(const List<T>* pList, const Item* pItem)
1921
: m_pList(pList), m_pItem(pItem) {}
1922
};
1923
1924
class const_reverse_iterator
1925
{
1926
friend class List<T>;
1927
1928
public:
1929
const_reverse_iterator() = default;
1930
const_reverse_iterator(const iterator& src)
1931
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1932
const_reverse_iterator(const reverse_iterator& src)
1933
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1934
const_reverse_iterator(const const_iterator& src)
1935
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1936
1937
reverse_iterator dropConst() const;
1938
const T& operator*() const;
1939
const T* operator->() const;
1940
1941
const_reverse_iterator& operator++();
1942
const_reverse_iterator& operator--();
1943
const_reverse_iterator operator++(int);
1944
const_reverse_iterator operator--(int);
1945
1946
bool operator==(const const_reverse_iterator& rhs) const;
1947
bool operator!=(const const_reverse_iterator& rhs) const;
1948
1949
private:
1950
const List<T>* m_pList = NULL;
1951
const Item* m_pItem = NULL;
1952
1953
const_reverse_iterator(const List<T>* pList, const Item* pItem)
1954
: m_pList(pList), m_pItem(pItem) {}
1955
};
1956
1957
// allocationCallbacks externally owned, must outlive this object.
1958
List(const ALLOCATION_CALLBACKS& allocationCallbacks);
1959
// Intentionally not calling Clear, because that would be unnecessary
1960
// computations to return all items to m_ItemAllocator as free.
1961
~List() = default;
1962
1963
size_t GetCount() const { return m_Count; }
1964
bool IsEmpty() const { return m_Count == 0; }
1965
1966
Item* Front() { return m_pFront; }
1967
const Item* Front() const { return m_pFront; }
1968
Item* Back() { return m_pBack; }
1969
const Item* Back() const { return m_pBack; }
1970
1971
bool empty() const { return IsEmpty(); }
1972
size_t size() const { return GetCount(); }
1973
void push_back(const T& value) { PushBack(value); }
1974
iterator insert(iterator it, const T& value) { return iterator(this, InsertBefore(it.m_pItem, value)); }
1975
void clear() { Clear(); }
1976
void erase(iterator it) { Remove(it.m_pItem); }
1977
1978
iterator begin() { return iterator(this, Front()); }
1979
iterator end() { return iterator(this, NULL); }
1980
reverse_iterator rbegin() { return reverse_iterator(this, Back()); }
1981
reverse_iterator rend() { return reverse_iterator(this, NULL); }
1982
1983
const_iterator cbegin() const { return const_iterator(this, Front()); }
1984
const_iterator cend() const { return const_iterator(this, NULL); }
1985
const_iterator begin() const { return cbegin(); }
1986
const_iterator end() const { return cend(); }
1987
1988
const_reverse_iterator crbegin() const { return const_reverse_iterator(this, Back()); }
1989
const_reverse_iterator crend() const { return const_reverse_iterator(this, NULL); }
1990
const_reverse_iterator rbegin() const { return crbegin(); }
1991
const_reverse_iterator rend() const { return crend(); }
1992
1993
Item* PushBack();
1994
Item* PushFront();
1995
Item* PushBack(const T& value);
1996
Item* PushFront(const T& value);
1997
void PopBack();
1998
void PopFront();
1999
2000
// Item can be null - it means PushBack.
2001
Item* InsertBefore(Item* pItem);
2002
// Item can be null - it means PushFront.
2003
Item* InsertAfter(Item* pItem);
2004
Item* InsertBefore(Item* pItem, const T& value);
2005
Item* InsertAfter(Item* pItem, const T& value);
2006
2007
void Clear();
2008
void Remove(Item* pItem);
2009
2010
private:
2011
const ALLOCATION_CALLBACKS& m_AllocationCallbacks;
2012
PoolAllocator<Item> m_ItemAllocator;
2013
Item* m_pFront;
2014
Item* m_pBack;
2015
size_t m_Count;
2016
};
2017
2018
#ifndef _D3D12MA_LIST_ITERATOR_FUNCTIONS
2019
template<typename T>
2020
T& List<T>::iterator::operator*() const
2021
{
2022
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2023
return m_pItem->Value;
2024
}
2025
2026
template<typename T>
2027
T* List<T>::iterator::operator->() const
2028
{
2029
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2030
return &m_pItem->Value;
2031
}
2032
2033
template<typename T>
2034
typename List<T>::iterator& List<T>::iterator::operator++()
2035
{
2036
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2037
m_pItem = m_pItem->pNext;
2038
return *this;
2039
}
2040
2041
template<typename T>
2042
typename List<T>::iterator& List<T>::iterator::operator--()
2043
{
2044
if (m_pItem != NULL)
2045
{
2046
m_pItem = m_pItem->pPrev;
2047
}
2048
else
2049
{
2050
D3D12MA_HEAVY_ASSERT(!m_pList->IsEmpty());
2051
m_pItem = m_pList->Back();
2052
}
2053
return *this;
2054
}
2055
2056
template<typename T>
2057
typename List<T>::iterator List<T>::iterator::operator++(int)
2058
{
2059
iterator result = *this;
2060
++* this;
2061
return result;
2062
}
2063
2064
template<typename T>
2065
typename List<T>::iterator List<T>::iterator::operator--(int)
2066
{
2067
iterator result = *this;
2068
--* this;
2069
return result;
2070
}
2071
2072
template<typename T>
2073
bool List<T>::iterator::operator==(const iterator& rhs) const
2074
{
2075
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2076
return m_pItem == rhs.m_pItem;
2077
}
2078
2079
template<typename T>
2080
bool List<T>::iterator::operator!=(const iterator& rhs) const
2081
{
2082
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2083
return m_pItem != rhs.m_pItem;
2084
}
2085
#endif // _D3D12MA_LIST_ITERATOR_FUNCTIONS
2086
2087
#ifndef _D3D12MA_LIST_REVERSE_ITERATOR_FUNCTIONS
2088
template<typename T>
2089
T& List<T>::reverse_iterator::operator*() const
2090
{
2091
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2092
return m_pItem->Value;
2093
}
2094
2095
template<typename T>
2096
T* List<T>::reverse_iterator::operator->() const
2097
{
2098
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2099
return &m_pItem->Value;
2100
}
2101
2102
template<typename T>
2103
typename List<T>::reverse_iterator& List<T>::reverse_iterator::operator++()
2104
{
2105
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2106
m_pItem = m_pItem->pPrev;
2107
return *this;
2108
}
2109
2110
template<typename T>
2111
typename List<T>::reverse_iterator& List<T>::reverse_iterator::operator--()
2112
{
2113
if (m_pItem != NULL)
2114
{
2115
m_pItem = m_pItem->pNext;
2116
}
2117
else
2118
{
2119
D3D12MA_HEAVY_ASSERT(!m_pList->IsEmpty());
2120
m_pItem = m_pList->Front();
2121
}
2122
return *this;
2123
}
2124
2125
template<typename T>
2126
typename List<T>::reverse_iterator List<T>::reverse_iterator::operator++(int)
2127
{
2128
reverse_iterator result = *this;
2129
++* this;
2130
return result;
2131
}
2132
2133
template<typename T>
2134
typename List<T>::reverse_iterator List<T>::reverse_iterator::operator--(int)
2135
{
2136
reverse_iterator result = *this;
2137
--* this;
2138
return result;
2139
}
2140
2141
template<typename T>
2142
bool List<T>::reverse_iterator::operator==(const reverse_iterator& rhs) const
2143
{
2144
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2145
return m_pItem == rhs.m_pItem;
2146
}
2147
2148
template<typename T>
2149
bool List<T>::reverse_iterator::operator!=(const reverse_iterator& rhs) const
2150
{
2151
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2152
return m_pItem != rhs.m_pItem;
2153
}
2154
#endif // _D3D12MA_LIST_REVERSE_ITERATOR_FUNCTIONS
2155
2156
#ifndef _D3D12MA_LIST_CONST_ITERATOR_FUNCTIONS
2157
template<typename T>
2158
typename List<T>::iterator List<T>::const_iterator::dropConst() const
2159
{
2160
return iterator(const_cast<List<T>*>(m_pList), const_cast<Item*>(m_pItem));
2161
}
2162
2163
template<typename T>
2164
const T& List<T>::const_iterator::operator*() const
2165
{
2166
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2167
return m_pItem->Value;
2168
}
2169
2170
template<typename T>
2171
const T* List<T>::const_iterator::operator->() const
2172
{
2173
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2174
return &m_pItem->Value;
2175
}
2176
2177
template<typename T>
2178
typename List<T>::const_iterator& List<T>::const_iterator::operator++()
2179
{
2180
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2181
m_pItem = m_pItem->pNext;
2182
return *this;
2183
}
2184
2185
template<typename T>
2186
typename List<T>::const_iterator& List<T>::const_iterator::operator--()
2187
{
2188
if (m_pItem != NULL)
2189
{
2190
m_pItem = m_pItem->pPrev;
2191
}
2192
else
2193
{
2194
D3D12MA_HEAVY_ASSERT(!m_pList->IsEmpty());
2195
m_pItem = m_pList->Back();
2196
}
2197
return *this;
2198
}
2199
2200
template<typename T>
2201
typename List<T>::const_iterator List<T>::const_iterator::operator++(int)
2202
{
2203
const_iterator result = *this;
2204
++* this;
2205
return result;
2206
}
2207
2208
template<typename T>
2209
typename List<T>::const_iterator List<T>::const_iterator::operator--(int)
2210
{
2211
const_iterator result = *this;
2212
--* this;
2213
return result;
2214
}
2215
2216
template<typename T>
2217
bool List<T>::const_iterator::operator==(const const_iterator& rhs) const
2218
{
2219
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2220
return m_pItem == rhs.m_pItem;
2221
}
2222
2223
template<typename T>
2224
bool List<T>::const_iterator::operator!=(const const_iterator& rhs) const
2225
{
2226
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2227
return m_pItem != rhs.m_pItem;
2228
}
2229
#endif // _D3D12MA_LIST_CONST_ITERATOR_FUNCTIONS
2230
2231
#ifndef _D3D12MA_LIST_CONST_REVERSE_ITERATOR_FUNCTIONS
2232
template<typename T>
2233
typename List<T>::reverse_iterator List<T>::const_reverse_iterator::dropConst() const
2234
{
2235
return reverse_iterator(const_cast<List<T>*>(m_pList), const_cast<Item*>(m_pItem));
2236
}
2237
2238
template<typename T>
2239
const T& List<T>::const_reverse_iterator::operator*() const
2240
{
2241
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2242
return m_pItem->Value;
2243
}
2244
2245
template<typename T>
2246
const T* List<T>::const_reverse_iterator::operator->() const
2247
{
2248
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2249
return &m_pItem->Value;
2250
}
2251
2252
template<typename T>
2253
typename List<T>::const_reverse_iterator& List<T>::const_reverse_iterator::operator++()
2254
{
2255
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2256
m_pItem = m_pItem->pPrev;
2257
return *this;
2258
}
2259
2260
template<typename T>
2261
typename List<T>::const_reverse_iterator& List<T>::const_reverse_iterator::operator--()
2262
{
2263
if (m_pItem != NULL)
2264
{
2265
m_pItem = m_pItem->pNext;
2266
}
2267
else
2268
{
2269
D3D12MA_HEAVY_ASSERT(!m_pList->IsEmpty());
2270
m_pItem = m_pList->Front();
2271
}
2272
return *this;
2273
}
2274
2275
template<typename T>
2276
typename List<T>::const_reverse_iterator List<T>::const_reverse_iterator::operator++(int)
2277
{
2278
const_reverse_iterator result = *this;
2279
++* this;
2280
return result;
2281
}
2282
2283
template<typename T>
2284
typename List<T>::const_reverse_iterator List<T>::const_reverse_iterator::operator--(int)
2285
{
2286
const_reverse_iterator result = *this;
2287
--* this;
2288
return result;
2289
}
2290
2291
template<typename T>
2292
bool List<T>::const_reverse_iterator::operator==(const const_reverse_iterator& rhs) const
2293
{
2294
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2295
return m_pItem == rhs.m_pItem;
2296
}
2297
2298
template<typename T>
2299
bool List<T>::const_reverse_iterator::operator!=(const const_reverse_iterator& rhs) const
2300
{
2301
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2302
return m_pItem != rhs.m_pItem;
2303
}
2304
#endif // _D3D12MA_LIST_CONST_REVERSE_ITERATOR_FUNCTIONS
2305
2306
#ifndef _D3D12MA_LIST_FUNCTIONS
2307
template<typename T>
2308
List<T>::List(const ALLOCATION_CALLBACKS& allocationCallbacks)
2309
: m_AllocationCallbacks(allocationCallbacks),
2310
m_ItemAllocator(allocationCallbacks, 128),
2311
m_pFront(NULL),
2312
m_pBack(NULL),
2313
m_Count(0) {}
2314
2315
template<typename T>
2316
void List<T>::Clear()
2317
{
2318
if(!IsEmpty())
2319
{
2320
Item* pItem = m_pBack;
2321
while(pItem != NULL)
2322
{
2323
Item* const pPrevItem = pItem->pPrev;
2324
m_ItemAllocator.Free(pItem);
2325
pItem = pPrevItem;
2326
}
2327
m_pFront = NULL;
2328
m_pBack = NULL;
2329
m_Count = 0;
2330
}
2331
}
2332
2333
template<typename T>
2334
typename List<T>::Item* List<T>::PushBack()
2335
{
2336
Item* const pNewItem = m_ItemAllocator.Alloc();
2337
pNewItem->pNext = NULL;
2338
if(IsEmpty())
2339
{
2340
pNewItem->pPrev = NULL;
2341
m_pFront = pNewItem;
2342
m_pBack = pNewItem;
2343
m_Count = 1;
2344
}
2345
else
2346
{
2347
pNewItem->pPrev = m_pBack;
2348
m_pBack->pNext = pNewItem;
2349
m_pBack = pNewItem;
2350
++m_Count;
2351
}
2352
return pNewItem;
2353
}
2354
2355
template<typename T>
2356
typename List<T>::Item* List<T>::PushFront()
2357
{
2358
Item* const pNewItem = m_ItemAllocator.Alloc();
2359
pNewItem->pPrev = NULL;
2360
if(IsEmpty())
2361
{
2362
pNewItem->pNext = NULL;
2363
m_pFront = pNewItem;
2364
m_pBack = pNewItem;
2365
m_Count = 1;
2366
}
2367
else
2368
{
2369
pNewItem->pNext = m_pFront;
2370
m_pFront->pPrev = pNewItem;
2371
m_pFront = pNewItem;
2372
++m_Count;
2373
}
2374
return pNewItem;
2375
}
2376
2377
template<typename T>
2378
typename List<T>::Item* List<T>::PushBack(const T& value)
2379
{
2380
Item* const pNewItem = PushBack();
2381
pNewItem->Value = value;
2382
return pNewItem;
2383
}
2384
2385
template<typename T>
2386
typename List<T>::Item* List<T>::PushFront(const T& value)
2387
{
2388
Item* const pNewItem = PushFront();
2389
pNewItem->Value = value;
2390
return pNewItem;
2391
}
2392
2393
template<typename T>
2394
void List<T>::PopBack()
2395
{
2396
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2397
Item* const pBackItem = m_pBack;
2398
Item* const pPrevItem = pBackItem->pPrev;
2399
if(pPrevItem != NULL)
2400
{
2401
pPrevItem->pNext = NULL;
2402
}
2403
m_pBack = pPrevItem;
2404
m_ItemAllocator.Free(pBackItem);
2405
--m_Count;
2406
}
2407
2408
template<typename T>
2409
void List<T>::PopFront()
2410
{
2411
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2412
Item* const pFrontItem = m_pFront;
2413
Item* const pNextItem = pFrontItem->pNext;
2414
if(pNextItem != NULL)
2415
{
2416
pNextItem->pPrev = NULL;
2417
}
2418
m_pFront = pNextItem;
2419
m_ItemAllocator.Free(pFrontItem);
2420
--m_Count;
2421
}
2422
2423
template<typename T>
2424
void List<T>::Remove(Item* pItem)
2425
{
2426
D3D12MA_HEAVY_ASSERT(pItem != NULL);
2427
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2428
2429
if(pItem->pPrev != NULL)
2430
{
2431
pItem->pPrev->pNext = pItem->pNext;
2432
}
2433
else
2434
{
2435
D3D12MA_HEAVY_ASSERT(m_pFront == pItem);
2436
m_pFront = pItem->pNext;
2437
}
2438
2439
if(pItem->pNext != NULL)
2440
{
2441
pItem->pNext->pPrev = pItem->pPrev;
2442
}
2443
else
2444
{
2445
D3D12MA_HEAVY_ASSERT(m_pBack == pItem);
2446
m_pBack = pItem->pPrev;
2447
}
2448
2449
m_ItemAllocator.Free(pItem);
2450
--m_Count;
2451
}
2452
2453
template<typename T>
2454
typename List<T>::Item* List<T>::InsertBefore(Item* pItem)
2455
{
2456
if(pItem != NULL)
2457
{
2458
Item* const prevItem = pItem->pPrev;
2459
Item* const newItem = m_ItemAllocator.Alloc();
2460
newItem->pPrev = prevItem;
2461
newItem->pNext = pItem;
2462
pItem->pPrev = newItem;
2463
if(prevItem != NULL)
2464
{
2465
prevItem->pNext = newItem;
2466
}
2467
else
2468
{
2469
D3D12MA_HEAVY_ASSERT(m_pFront == pItem);
2470
m_pFront = newItem;
2471
}
2472
++m_Count;
2473
return newItem;
2474
}
2475
else
2476
{
2477
return PushBack();
2478
}
2479
}
2480
2481
template<typename T>
2482
typename List<T>::Item* List<T>::InsertAfter(Item* pItem)
2483
{
2484
if(pItem != NULL)
2485
{
2486
Item* const nextItem = pItem->pNext;
2487
Item* const newItem = m_ItemAllocator.Alloc();
2488
newItem->pNext = nextItem;
2489
newItem->pPrev = pItem;
2490
pItem->pNext = newItem;
2491
if(nextItem != NULL)
2492
{
2493
nextItem->pPrev = newItem;
2494
}
2495
else
2496
{
2497
D3D12MA_HEAVY_ASSERT(m_pBack == pItem);
2498
m_pBack = newItem;
2499
}
2500
++m_Count;
2501
return newItem;
2502
}
2503
else
2504
return PushFront();
2505
}
2506
2507
template<typename T>
2508
typename List<T>::Item* List<T>::InsertBefore(Item* pItem, const T& value)
2509
{
2510
Item* const newItem = InsertBefore(pItem);
2511
newItem->Value = value;
2512
return newItem;
2513
}
2514
2515
template<typename T>
2516
typename List<T>::Item* List<T>::InsertAfter(Item* pItem, const T& value)
2517
{
2518
Item* const newItem = InsertAfter(pItem);
2519
newItem->Value = value;
2520
return newItem;
2521
}
2522
#endif // _D3D12MA_LIST_FUNCTIONS
2523
#endif // _D3D12MA_LIST
2524
2525
#ifndef _D3D12MA_INTRUSIVE_LINKED_LIST
2526
/*
2527
Expected interface of ItemTypeTraits:
2528
struct MyItemTypeTraits
2529
{
2530
using ItemType = MyItem;
2531
static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
2532
static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
2533
static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
2534
static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
2535
};
2536
*/
2537
template<typename ItemTypeTraits>
2538
class IntrusiveLinkedList
2539
{
2540
public:
2541
using ItemType = typename ItemTypeTraits::ItemType;
2542
static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
2543
static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
2544
2545
// Movable, not copyable.
2546
IntrusiveLinkedList() = default;
2547
IntrusiveLinkedList(const IntrusiveLinkedList&) = delete;
2548
IntrusiveLinkedList(IntrusiveLinkedList&& src);
2549
IntrusiveLinkedList& operator=(const IntrusiveLinkedList&) = delete;
2550
IntrusiveLinkedList& operator=(IntrusiveLinkedList&& src);
2551
~IntrusiveLinkedList() { D3D12MA_HEAVY_ASSERT(IsEmpty()); }
2552
2553
size_t GetCount() const { return m_Count; }
2554
bool IsEmpty() const { return m_Count == 0; }
2555
2556
ItemType* Front() { return m_Front; }
2557
ItemType* Back() { return m_Back; }
2558
const ItemType* Front() const { return m_Front; }
2559
const ItemType* Back() const { return m_Back; }
2560
2561
void PushBack(ItemType* item);
2562
void PushFront(ItemType* item);
2563
ItemType* PopBack();
2564
ItemType* PopFront();
2565
2566
// MyItem can be null - it means PushBack.
2567
void InsertBefore(ItemType* existingItem, ItemType* newItem);
2568
// MyItem can be null - it means PushFront.
2569
void InsertAfter(ItemType* existingItem, ItemType* newItem);
2570
2571
void Remove(ItemType* item);
2572
void RemoveAll();
2573
2574
private:
2575
ItemType* m_Front = NULL;
2576
ItemType* m_Back = NULL;
2577
size_t m_Count = 0;
2578
};
2579
2580
#ifndef _D3D12MA_INTRUSIVE_LINKED_LIST_FUNCTIONS
2581
template<typename ItemTypeTraits>
2582
IntrusiveLinkedList<ItemTypeTraits>::IntrusiveLinkedList(IntrusiveLinkedList&& src)
2583
: m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
2584
{
2585
src.m_Front = src.m_Back = NULL;
2586
src.m_Count = 0;
2587
}
2588
2589
template<typename ItemTypeTraits>
2590
IntrusiveLinkedList<ItemTypeTraits>& IntrusiveLinkedList<ItemTypeTraits>::operator=(IntrusiveLinkedList&& src)
2591
{
2592
if (&src != this)
2593
{
2594
D3D12MA_HEAVY_ASSERT(IsEmpty());
2595
m_Front = src.m_Front;
2596
m_Back = src.m_Back;
2597
m_Count = src.m_Count;
2598
src.m_Front = src.m_Back = NULL;
2599
src.m_Count = 0;
2600
}
2601
return *this;
2602
}
2603
2604
template<typename ItemTypeTraits>
2605
void IntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)
2606
{
2607
D3D12MA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == NULL && ItemTypeTraits::GetNext(item) == NULL);
2608
if (IsEmpty())
2609
{
2610
m_Front = item;
2611
m_Back = item;
2612
m_Count = 1;
2613
}
2614
else
2615
{
2616
ItemTypeTraits::AccessPrev(item) = m_Back;
2617
ItemTypeTraits::AccessNext(m_Back) = item;
2618
m_Back = item;
2619
++m_Count;
2620
}
2621
}
2622
2623
template<typename ItemTypeTraits>
2624
void IntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)
2625
{
2626
D3D12MA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == NULL && ItemTypeTraits::GetNext(item) == NULL);
2627
if (IsEmpty())
2628
{
2629
m_Front = item;
2630
m_Back = item;
2631
m_Count = 1;
2632
}
2633
else
2634
{
2635
ItemTypeTraits::AccessNext(item) = m_Front;
2636
ItemTypeTraits::AccessPrev(m_Front) = item;
2637
m_Front = item;
2638
++m_Count;
2639
}
2640
}
2641
2642
template<typename ItemTypeTraits>
2643
typename IntrusiveLinkedList<ItemTypeTraits>::ItemType* IntrusiveLinkedList<ItemTypeTraits>::PopBack()
2644
{
2645
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2646
ItemType* const backItem = m_Back;
2647
ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
2648
if (prevItem != NULL)
2649
{
2650
ItemTypeTraits::AccessNext(prevItem) = NULL;
2651
}
2652
m_Back = prevItem;
2653
--m_Count;
2654
ItemTypeTraits::AccessPrev(backItem) = NULL;
2655
ItemTypeTraits::AccessNext(backItem) = NULL;
2656
return backItem;
2657
}
2658
2659
template<typename ItemTypeTraits>
2660
typename IntrusiveLinkedList<ItemTypeTraits>::ItemType* IntrusiveLinkedList<ItemTypeTraits>::PopFront()
2661
{
2662
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2663
ItemType* const frontItem = m_Front;
2664
ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
2665
if (nextItem != NULL)
2666
{
2667
ItemTypeTraits::AccessPrev(nextItem) = NULL;
2668
}
2669
m_Front = nextItem;
2670
--m_Count;
2671
ItemTypeTraits::AccessPrev(frontItem) = NULL;
2672
ItemTypeTraits::AccessNext(frontItem) = NULL;
2673
return frontItem;
2674
}
2675
2676
template<typename ItemTypeTraits>
2677
void IntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)
2678
{
2679
D3D12MA_HEAVY_ASSERT(newItem != NULL && ItemTypeTraits::GetPrev(newItem) == NULL && ItemTypeTraits::GetNext(newItem) == NULL);
2680
if (existingItem != NULL)
2681
{
2682
ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
2683
ItemTypeTraits::AccessPrev(newItem) = prevItem;
2684
ItemTypeTraits::AccessNext(newItem) = existingItem;
2685
ItemTypeTraits::AccessPrev(existingItem) = newItem;
2686
if (prevItem != NULL)
2687
{
2688
ItemTypeTraits::AccessNext(prevItem) = newItem;
2689
}
2690
else
2691
{
2692
D3D12MA_HEAVY_ASSERT(m_Front == existingItem);
2693
m_Front = newItem;
2694
}
2695
++m_Count;
2696
}
2697
else
2698
PushBack(newItem);
2699
}
2700
2701
template<typename ItemTypeTraits>
2702
void IntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)
2703
{
2704
D3D12MA_HEAVY_ASSERT(newItem != NULL && ItemTypeTraits::GetPrev(newItem) == NULL && ItemTypeTraits::GetNext(newItem) == NULL);
2705
if (existingItem != NULL)
2706
{
2707
ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
2708
ItemTypeTraits::AccessNext(newItem) = nextItem;
2709
ItemTypeTraits::AccessPrev(newItem) = existingItem;
2710
ItemTypeTraits::AccessNext(existingItem) = newItem;
2711
if (nextItem != NULL)
2712
{
2713
ItemTypeTraits::AccessPrev(nextItem) = newItem;
2714
}
2715
else
2716
{
2717
D3D12MA_HEAVY_ASSERT(m_Back == existingItem);
2718
m_Back = newItem;
2719
}
2720
++m_Count;
2721
}
2722
else
2723
return PushFront(newItem);
2724
}
2725
2726
template<typename ItemTypeTraits>
2727
void IntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)
2728
{
2729
D3D12MA_HEAVY_ASSERT(item != NULL && m_Count > 0);
2730
if (ItemTypeTraits::GetPrev(item) != NULL)
2731
{
2732
ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
2733
}
2734
else
2735
{
2736
D3D12MA_HEAVY_ASSERT(m_Front == item);
2737
m_Front = ItemTypeTraits::GetNext(item);
2738
}
2739
2740
if (ItemTypeTraits::GetNext(item) != NULL)
2741
{
2742
ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
2743
}
2744
else
2745
{
2746
D3D12MA_HEAVY_ASSERT(m_Back == item);
2747
m_Back = ItemTypeTraits::GetPrev(item);
2748
}
2749
ItemTypeTraits::AccessPrev(item) = NULL;
2750
ItemTypeTraits::AccessNext(item) = NULL;
2751
--m_Count;
2752
}
2753
2754
template<typename ItemTypeTraits>
2755
void IntrusiveLinkedList<ItemTypeTraits>::RemoveAll()
2756
{
2757
if (!IsEmpty())
2758
{
2759
ItemType* item = m_Back;
2760
while (item != NULL)
2761
{
2762
ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
2763
ItemTypeTraits::AccessPrev(item) = NULL;
2764
ItemTypeTraits::AccessNext(item) = NULL;
2765
item = prevItem;
2766
}
2767
m_Front = NULL;
2768
m_Back = NULL;
2769
m_Count = 0;
2770
}
2771
}
2772
#endif // _D3D12MA_INTRUSIVE_LINKED_LIST_FUNCTIONS
2773
#endif // _D3D12MA_INTRUSIVE_LINKED_LIST
2774
2775
#ifndef _D3D12MA_ALLOCATION_OBJECT_ALLOCATOR
2776
/*
2777
Thread-safe wrapper over PoolAllocator free list, for allocation of Allocation objects.
2778
*/
2779
class AllocationObjectAllocator
2780
{
2781
D3D12MA_CLASS_NO_COPY(AllocationObjectAllocator);
2782
public:
2783
AllocationObjectAllocator(const ALLOCATION_CALLBACKS& allocationCallbacks)
2784
: m_Allocator(allocationCallbacks, 1024) {}
2785
2786
template<typename... Types>
2787
Allocation* Allocate(Types... args);
2788
void Free(Allocation* alloc);
2789
2790
private:
2791
D3D12MA_MUTEX m_Mutex;
2792
PoolAllocator<Allocation> m_Allocator;
2793
};
2794
2795
#ifndef _D3D12MA_ALLOCATION_OBJECT_ALLOCATOR_FUNCTIONS
2796
template<typename... Types>
2797
Allocation* AllocationObjectAllocator::Allocate(Types... args)
2798
{
2799
MutexLock mutexLock(m_Mutex);
2800
return m_Allocator.Alloc(std::forward<Types>(args)...);
2801
}
2802
2803
void AllocationObjectAllocator::Free(Allocation* alloc)
2804
{
2805
MutexLock mutexLock(m_Mutex);
2806
m_Allocator.Free(alloc);
2807
}
2808
#endif // _D3D12MA_ALLOCATION_OBJECT_ALLOCATOR_FUNCTIONS
2809
#endif // _D3D12MA_ALLOCATION_OBJECT_ALLOCATOR
2810
2811
#ifndef _D3D12MA_SUBALLOCATION
2812
/*
2813
Represents a region of NormalBlock that is either assigned and returned as
2814
allocated memory block or free.
2815
*/
2816
struct Suballocation
2817
{
2818
UINT64 offset;
2819
UINT64 size;
2820
void* privateData;
2821
SuballocationType type;
2822
};
2823
using SuballocationList = List<Suballocation>;
2824
2825
// Comparator for offsets.
2826
struct SuballocationOffsetLess
2827
{
2828
bool operator()(const Suballocation& lhs, const Suballocation& rhs) const
2829
{
2830
return lhs.offset < rhs.offset;
2831
}
2832
};
2833
2834
struct SuballocationOffsetGreater
2835
{
2836
bool operator()(const Suballocation& lhs, const Suballocation& rhs) const
2837
{
2838
return lhs.offset > rhs.offset;
2839
}
2840
};
2841
2842
struct SuballocationItemSizeLess
2843
{
2844
bool operator()(const SuballocationList::iterator lhs, const SuballocationList::iterator rhs) const
2845
{
2846
return lhs->size < rhs->size;
2847
}
2848
bool operator()(const SuballocationList::iterator lhs, UINT64 rhsSize) const
2849
{
2850
return lhs->size < rhsSize;
2851
}
2852
};
2853
#endif // _D3D12MA_SUBALLOCATION
2854
2855
#ifndef _D3D12MA_ALLOCATION_REQUEST
2856
/*
2857
Parameters of planned allocation inside a NormalBlock.
2858
*/
2859
struct AllocationRequest
2860
{
2861
AllocHandle allocHandle;
2862
UINT64 size;
2863
UINT64 algorithmData;
2864
UINT64 sumFreeSize; // Sum size of free items that overlap with proposed allocation.
2865
UINT64 sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
2866
SuballocationList::iterator item;
2867
BOOL zeroInitialized = FALSE; // TODO Implement proper handling in TLSF and Linear, using ZeroInitializedRange class.
2868
};
2869
#endif // _D3D12MA_ALLOCATION_REQUEST
2870
2871
#ifndef _D3D12MA_ZERO_INITIALIZED_RANGE
2872
/*
2873
Keeps track of the range of bytes that are surely initialized with zeros.
2874
Everything outside of it is considered uninitialized memory that may contain
2875
garbage data.
2876
2877
The range is left-inclusive.
2878
*/
2879
class ZeroInitializedRange
2880
{
2881
public:
2882
void Reset(UINT64 size);
2883
BOOL IsRangeZeroInitialized(UINT64 beg, UINT64 end) const;
2884
void MarkRangeAsUsed(UINT64 usedBeg, UINT64 usedEnd);
2885
2886
private:
2887
UINT64 m_ZeroBeg = 0, m_ZeroEnd = 0;
2888
};
2889
2890
#ifndef _D3D12MA_ZERO_INITIALIZED_RANGE_FUNCTIONS
2891
void ZeroInitializedRange::Reset(UINT64 size)
2892
{
2893
D3D12MA_ASSERT(size > 0);
2894
m_ZeroBeg = 0;
2895
m_ZeroEnd = size;
2896
}
2897
2898
BOOL ZeroInitializedRange::IsRangeZeroInitialized(UINT64 beg, UINT64 end) const
2899
{
2900
D3D12MA_ASSERT(beg < end);
2901
return m_ZeroBeg <= beg && end <= m_ZeroEnd;
2902
}
2903
2904
void ZeroInitializedRange::MarkRangeAsUsed(UINT64 usedBeg, UINT64 usedEnd)
2905
{
2906
D3D12MA_ASSERT(usedBeg < usedEnd);
2907
// No new bytes marked.
2908
if (usedEnd <= m_ZeroBeg || m_ZeroEnd <= usedBeg)
2909
{
2910
return;
2911
}
2912
// All bytes marked.
2913
if (usedBeg <= m_ZeroBeg && m_ZeroEnd <= usedEnd)
2914
{
2915
m_ZeroBeg = m_ZeroEnd = 0;
2916
}
2917
// Some bytes marked.
2918
else
2919
{
2920
const UINT64 remainingZeroBefore = usedBeg > m_ZeroBeg ? usedBeg - m_ZeroBeg : 0;
2921
const UINT64 remainingZeroAfter = usedEnd < m_ZeroEnd ? m_ZeroEnd - usedEnd : 0;
2922
D3D12MA_ASSERT(remainingZeroBefore > 0 || remainingZeroAfter > 0);
2923
if (remainingZeroBefore > remainingZeroAfter)
2924
{
2925
m_ZeroEnd = usedBeg;
2926
}
2927
else
2928
{
2929
m_ZeroBeg = usedEnd;
2930
}
2931
}
2932
}
2933
#endif // _D3D12MA_ZERO_INITIALIZED_RANGE_FUNCTIONS
2934
#endif // _D3D12MA_ZERO_INITIALIZED_RANGE
2935
2936
#ifndef _D3D12MA_BLOCK_METADATA
2937
/*
2938
Data structure used for bookkeeping of allocations and unused ranges of memory
2939
in a single ID3D12Heap memory block.
2940
*/
2941
class BlockMetadata
2942
{
2943
public:
2944
BlockMetadata(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual);
2945
virtual ~BlockMetadata() = default;
2946
2947
virtual void Init(UINT64 size) { m_Size = size; }
2948
// Validates all data structures inside this object. If not valid, returns false.
2949
virtual bool Validate() const = 0;
2950
UINT64 GetSize() const { return m_Size; }
2951
bool IsVirtual() const { return m_IsVirtual; }
2952
virtual size_t GetAllocationCount() const = 0;
2953
virtual size_t GetFreeRegionsCount() const = 0;
2954
virtual UINT64 GetSumFreeSize() const = 0;
2955
virtual UINT64 GetAllocationOffset(AllocHandle allocHandle) const = 0;
2956
// Returns true if this block is empty - contains only single free suballocation.
2957
virtual bool IsEmpty() const = 0;
2958
2959
virtual void GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const = 0;
2960
2961
// Tries to find a place for suballocation with given parameters inside this block.
2962
// If succeeded, fills pAllocationRequest and returns true.
2963
// If failed, returns false.
2964
virtual bool CreateAllocationRequest(
2965
UINT64 allocSize,
2966
UINT64 allocAlignment,
2967
bool upperAddress,
2968
UINT32 strategy,
2969
AllocationRequest* pAllocationRequest) = 0;
2970
2971
// Makes actual allocation based on request. Request must already be checked and valid.
2972
virtual void Alloc(
2973
const AllocationRequest& request,
2974
UINT64 allocSize,
2975
void* PrivateData) = 0;
2976
2977
virtual void Free(AllocHandle allocHandle) = 0;
2978
// Frees all allocations.
2979
// Careful! Don't call it if there are Allocation objects owned by pPrivateData of of cleared allocations!
2980
virtual void Clear() = 0;
2981
2982
virtual AllocHandle GetAllocationListBegin() const = 0;
2983
virtual AllocHandle GetNextAllocation(AllocHandle prevAlloc) const = 0;
2984
virtual UINT64 GetNextFreeRegionSize(AllocHandle alloc) const = 0;
2985
virtual void* GetAllocationPrivateData(AllocHandle allocHandle) const = 0;
2986
virtual void SetAllocationPrivateData(AllocHandle allocHandle, void* privateData) = 0;
2987
2988
virtual void AddStatistics(Statistics& inoutStats) const = 0;
2989
virtual void AddDetailedStatistics(DetailedStatistics& inoutStats) const = 0;
2990
virtual void WriteAllocationInfoToJson(JsonWriter& json) const = 0;
2991
virtual void DebugLogAllAllocations() const = 0;
2992
2993
protected:
2994
const ALLOCATION_CALLBACKS* GetAllocs() const { return m_pAllocationCallbacks; }
2995
UINT64 GetDebugMargin() const { return IsVirtual() ? 0 : D3D12MA_DEBUG_MARGIN; }
2996
2997
void DebugLogAllocation(UINT64 offset, UINT64 size, void* privateData) const;
2998
void PrintDetailedMap_Begin(JsonWriter& json,
2999
UINT64 unusedBytes,
3000
size_t allocationCount,
3001
size_t unusedRangeCount) const;
3002
void PrintDetailedMap_Allocation(JsonWriter& json,
3003
UINT64 offset, UINT64 size, void* privateData) const;
3004
void PrintDetailedMap_UnusedRange(JsonWriter& json,
3005
UINT64 offset, UINT64 size) const;
3006
void PrintDetailedMap_End(JsonWriter& json) const;
3007
3008
private:
3009
UINT64 m_Size;
3010
bool m_IsVirtual;
3011
const ALLOCATION_CALLBACKS* m_pAllocationCallbacks;
3012
3013
D3D12MA_CLASS_NO_COPY(BlockMetadata);
3014
};
3015
3016
#ifndef _D3D12MA_BLOCK_METADATA_FUNCTIONS
3017
BlockMetadata::BlockMetadata(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual)
3018
: m_Size(0),
3019
m_IsVirtual(isVirtual),
3020
m_pAllocationCallbacks(allocationCallbacks)
3021
{
3022
D3D12MA_ASSERT(allocationCallbacks);
3023
}
3024
3025
void BlockMetadata::DebugLogAllocation(UINT64 offset, UINT64 size, void* privateData) const
3026
{
3027
if (IsVirtual())
3028
{
3029
D3D12MA_DEBUG_LOG(L"UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; PrivateData: %p", offset, size, privateData);
3030
}
3031
else
3032
{
3033
D3D12MA_ASSERT(privateData != NULL);
3034
Allocation* allocation = reinterpret_cast<Allocation*>(privateData);
3035
3036
privateData = allocation->GetPrivateData();
3037
LPCWSTR name = allocation->GetName();
3038
3039
D3D12MA_DEBUG_LOG(L"UNFREED ALLOCATION; Offset: %llu; Size: %llu; PrivateData: %p; Name: %s",
3040
offset, size, privateData, name ? name : L"D3D12MA_Empty");
3041
}
3042
}
3043
3044
void BlockMetadata::PrintDetailedMap_Begin(JsonWriter& json,
3045
UINT64 unusedBytes, size_t allocationCount, size_t unusedRangeCount) const
3046
{
3047
json.WriteString(L"TotalBytes");
3048
json.WriteNumber(GetSize());
3049
3050
json.WriteString(L"UnusedBytes");
3051
json.WriteNumber(unusedBytes);
3052
3053
json.WriteString(L"Allocations");
3054
json.WriteNumber((UINT64)allocationCount);
3055
3056
json.WriteString(L"UnusedRanges");
3057
json.WriteNumber((UINT64)unusedRangeCount);
3058
3059
json.WriteString(L"Suballocations");
3060
json.BeginArray();
3061
}
3062
3063
void BlockMetadata::PrintDetailedMap_Allocation(JsonWriter& json,
3064
UINT64 offset, UINT64 size, void* privateData) const
3065
{
3066
json.BeginObject(true);
3067
3068
json.WriteString(L"Offset");
3069
json.WriteNumber(offset);
3070
3071
if (IsVirtual())
3072
{
3073
json.WriteString(L"Size");
3074
json.WriteNumber(size);
3075
if (privateData)
3076
{
3077
json.WriteString(L"CustomData");
3078
json.WriteNumber((uintptr_t)privateData);
3079
}
3080
}
3081
else
3082
{
3083
const Allocation* const alloc = (const Allocation*)privateData;
3084
D3D12MA_ASSERT(alloc);
3085
json.AddAllocationToObject(*alloc);
3086
}
3087
json.EndObject();
3088
}
3089
3090
void BlockMetadata::PrintDetailedMap_UnusedRange(JsonWriter& json,
3091
UINT64 offset, UINT64 size) const
3092
{
3093
json.BeginObject(true);
3094
3095
json.WriteString(L"Offset");
3096
json.WriteNumber(offset);
3097
3098
json.WriteString(L"Type");
3099
json.WriteString(L"FREE");
3100
3101
json.WriteString(L"Size");
3102
json.WriteNumber(size);
3103
3104
json.EndObject();
3105
}
3106
3107
void BlockMetadata::PrintDetailedMap_End(JsonWriter& json) const
3108
{
3109
json.EndArray();
3110
}
3111
#endif // _D3D12MA_BLOCK_METADATA_FUNCTIONS
3112
#endif // _D3D12MA_BLOCK_METADATA
3113
3114
#if 0
3115
#ifndef _D3D12MA_BLOCK_METADATA_GENERIC
3116
class BlockMetadata_Generic : public BlockMetadata
3117
{
3118
public:
3119
BlockMetadata_Generic(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual);
3120
virtual ~BlockMetadata_Generic() = default;
3121
3122
size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; }
3123
UINT64 GetSumFreeSize() const override { return m_SumFreeSize; }
3124
UINT64 GetAllocationOffset(AllocHandle allocHandle) const override { return (UINT64)allocHandle - 1; }
3125
3126
void Init(UINT64 size) override;
3127
bool Validate() const override;
3128
bool IsEmpty() const override;
3129
void GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const override;
3130
3131
bool CreateAllocationRequest(
3132
UINT64 allocSize,
3133
UINT64 allocAlignment,
3134
bool upperAddress,
3135
AllocationRequest* pAllocationRequest) override;
3136
3137
void Alloc(
3138
const AllocationRequest& request,
3139
UINT64 allocSize,
3140
void* privateData) override;
3141
3142
void Free(AllocHandle allocHandle) override;
3143
void Clear() override;
3144
3145
void SetAllocationPrivateData(AllocHandle allocHandle, void* privateData) override;
3146
3147
void AddStatistics(Statistics& inoutStats) const override;
3148
void AddDetailedStatistics(DetailedStatistics& inoutStats) const override;
3149
void WriteAllocationInfoToJson(JsonWriter& json) const override;
3150
3151
private:
3152
UINT m_FreeCount;
3153
UINT64 m_SumFreeSize;
3154
SuballocationList m_Suballocations;
3155
// Suballocations that are free and have size greater than certain threshold.
3156
// Sorted by size, ascending.
3157
Vector<SuballocationList::iterator> m_FreeSuballocationsBySize;
3158
ZeroInitializedRange m_ZeroInitializedRange;
3159
3160
SuballocationList::const_iterator FindAtOffset(UINT64 offset) const;
3161
bool ValidateFreeSuballocationList() const;
3162
3163
// Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3164
// If yes, fills pOffset and returns true. If no, returns false.
3165
bool CheckAllocation(
3166
UINT64 allocSize,
3167
UINT64 allocAlignment,
3168
SuballocationList::const_iterator suballocItem,
3169
AllocHandle* pAllocHandle,
3170
UINT64* pSumFreeSize,
3171
UINT64* pSumItemSize,
3172
BOOL *pZeroInitialized) const;
3173
// Given free suballocation, it merges it with following one, which must also be free.
3174
void MergeFreeWithNext(SuballocationList::iterator item);
3175
// Releases given suballocation, making it free.
3176
// Merges it with adjacent free suballocations if applicable.
3177
// Returns iterator to new free suballocation at this place.
3178
SuballocationList::iterator FreeSuballocation(SuballocationList::iterator suballocItem);
3179
// Given free suballocation, it inserts it into sorted list of
3180
// m_FreeSuballocationsBySize if it's suitable.
3181
void RegisterFreeSuballocation(SuballocationList::iterator item);
3182
// Given free suballocation, it removes it from sorted list of
3183
// m_FreeSuballocationsBySize if it's suitable.
3184
void UnregisterFreeSuballocation(SuballocationList::iterator item);
3185
3186
D3D12MA_CLASS_NO_COPY(BlockMetadata_Generic)
3187
};
3188
3189
#ifndef _D3D12MA_BLOCK_METADATA_GENERIC_FUNCTIONS
3190
BlockMetadata_Generic::BlockMetadata_Generic(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual)
3191
: BlockMetadata(allocationCallbacks, isVirtual),
3192
m_FreeCount(0),
3193
m_SumFreeSize(0),
3194
m_Suballocations(*allocationCallbacks),
3195
m_FreeSuballocationsBySize(*allocationCallbacks)
3196
{
3197
D3D12MA_ASSERT(allocationCallbacks);
3198
}
3199
3200
void BlockMetadata_Generic::Init(UINT64 size)
3201
{
3202
BlockMetadata::Init(size);
3203
m_ZeroInitializedRange.Reset(size);
3204
3205
m_FreeCount = 1;
3206
m_SumFreeSize = size;
3207
3208
Suballocation suballoc = {};
3209
suballoc.offset = 0;
3210
suballoc.size = size;
3211
suballoc.type = SUBALLOCATION_TYPE_FREE;
3212
suballoc.privateData = NULL;
3213
3214
D3D12MA_ASSERT(size > MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
3215
m_Suballocations.push_back(suballoc);
3216
SuballocationList::iterator suballocItem = m_Suballocations.end();
3217
--suballocItem;
3218
m_FreeSuballocationsBySize.push_back(suballocItem);
3219
}
3220
3221
bool BlockMetadata_Generic::Validate() const
3222
{
3223
D3D12MA_VALIDATE(!m_Suballocations.empty());
3224
3225
// Expected offset of new suballocation as calculated from previous ones.
3226
UINT64 calculatedOffset = 0;
3227
// Expected number of free suballocations as calculated from traversing their list.
3228
UINT calculatedFreeCount = 0;
3229
// Expected sum size of free suballocations as calculated from traversing their list.
3230
UINT64 calculatedSumFreeSize = 0;
3231
// Expected number of free suballocations that should be registered in
3232
// m_FreeSuballocationsBySize calculated from traversing their list.
3233
size_t freeSuballocationsToRegister = 0;
3234
// True if previous visited suballocation was free.
3235
bool prevFree = false;
3236
3237
for (const auto& subAlloc : m_Suballocations)
3238
{
3239
// Actual offset of this suballocation doesn't match expected one.
3240
D3D12MA_VALIDATE(subAlloc.offset == calculatedOffset);
3241
3242
const bool currFree = (subAlloc.type == SUBALLOCATION_TYPE_FREE);
3243
// Two adjacent free suballocations are invalid. They should be merged.
3244
D3D12MA_VALIDATE(!prevFree || !currFree);
3245
3246
const Allocation* const alloc = (Allocation*)subAlloc.privateData;
3247
if (!IsVirtual())
3248
{
3249
D3D12MA_VALIDATE(currFree == (alloc == NULL));
3250
}
3251
3252
if (currFree)
3253
{
3254
calculatedSumFreeSize += subAlloc.size;
3255
++calculatedFreeCount;
3256
if (subAlloc.size >= MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
3257
{
3258
++freeSuballocationsToRegister;
3259
}
3260
3261
// Margin required between allocations - every free space must be at least that large.
3262
D3D12MA_VALIDATE(subAlloc.size >= GetDebugMargin());
3263
}
3264
else
3265
{
3266
if (!IsVirtual())
3267
{
3268
D3D12MA_VALIDATE(alloc->GetOffset() == subAlloc.offset);
3269
D3D12MA_VALIDATE(alloc->GetSize() == subAlloc.size);
3270
}
3271
3272
// Margin required between allocations - previous allocation must be free.
3273
D3D12MA_VALIDATE(GetDebugMargin() == 0 || prevFree);
3274
}
3275
3276
calculatedOffset += subAlloc.size;
3277
prevFree = currFree;
3278
}
3279
3280
// Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
3281
// match expected one.
3282
D3D12MA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
3283
3284
UINT64 lastSize = 0;
3285
for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
3286
{
3287
SuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
3288
3289
// Only free suballocations can be registered in m_FreeSuballocationsBySize.
3290
D3D12MA_VALIDATE(suballocItem->type == SUBALLOCATION_TYPE_FREE);
3291
// They must be sorted by size ascending.
3292
D3D12MA_VALIDATE(suballocItem->size >= lastSize);
3293
3294
lastSize = suballocItem->size;
3295
}
3296
3297
// Check if totals match calculacted values.
3298
D3D12MA_VALIDATE(ValidateFreeSuballocationList());
3299
D3D12MA_VALIDATE(calculatedOffset == GetSize());
3300
D3D12MA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
3301
D3D12MA_VALIDATE(calculatedFreeCount == m_FreeCount);
3302
3303
return true;
3304
}
3305
3306
bool BlockMetadata_Generic::IsEmpty() const
3307
{
3308
return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
3309
}
3310
3311
void BlockMetadata_Generic::GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const
3312
{
3313
Suballocation& suballoc = *FindAtOffset((UINT64)allocHandle - 1).dropConst();
3314
outInfo.Offset = suballoc.offset;
3315
outInfo.Size = suballoc.size;
3316
outInfo.pPrivateData = suballoc.privateData;
3317
}
3318
3319
bool BlockMetadata_Generic::CreateAllocationRequest(
3320
UINT64 allocSize,
3321
UINT64 allocAlignment,
3322
bool upperAddress,
3323
AllocationRequest* pAllocationRequest)
3324
{
3325
D3D12MA_ASSERT(allocSize > 0);
3326
D3D12MA_ASSERT(!upperAddress && "ALLOCATION_FLAG_UPPER_ADDRESS can be used only with linear algorithm.");
3327
D3D12MA_ASSERT(pAllocationRequest != NULL);
3328
D3D12MA_HEAVY_ASSERT(Validate());
3329
3330
// There is not enough total free space in this block to fullfill the request: Early return.
3331
if (m_SumFreeSize < allocSize + GetDebugMargin())
3332
{
3333
return false;
3334
}
3335
3336
// New algorithm, efficiently searching freeSuballocationsBySize.
3337
const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
3338
if (freeSuballocCount > 0)
3339
{
3340
// Find first free suballocation with size not less than allocSize + GetDebugMargin().
3341
SuballocationList::iterator* const it = BinaryFindFirstNotLess(
3342
m_FreeSuballocationsBySize.data(),
3343
m_FreeSuballocationsBySize.data() + freeSuballocCount,
3344
allocSize + GetDebugMargin(),
3345
SuballocationItemSizeLess());
3346
size_t index = it - m_FreeSuballocationsBySize.data();
3347
for (; index < freeSuballocCount; ++index)
3348
{
3349
if (CheckAllocation(
3350
allocSize,
3351
allocAlignment,
3352
m_FreeSuballocationsBySize[index],
3353
&pAllocationRequest->allocHandle,
3354
&pAllocationRequest->sumFreeSize,
3355
&pAllocationRequest->sumItemSize,
3356
&pAllocationRequest->zeroInitialized))
3357
{
3358
pAllocationRequest->item = m_FreeSuballocationsBySize[index];
3359
return true;
3360
}
3361
}
3362
}
3363
3364
return false;
3365
}
3366
3367
void BlockMetadata_Generic::Alloc(
3368
const AllocationRequest& request,
3369
UINT64 allocSize,
3370
void* privateData)
3371
{
3372
D3D12MA_ASSERT(request.item != m_Suballocations.end());
3373
Suballocation& suballoc = *request.item;
3374
// Given suballocation is a free block.
3375
D3D12MA_ASSERT(suballoc.type == SUBALLOCATION_TYPE_FREE);
3376
// Given offset is inside this suballocation.
3377
UINT64 offset = (UINT64)request.allocHandle - 1;
3378
D3D12MA_ASSERT(offset >= suballoc.offset);
3379
const UINT64 paddingBegin = offset - suballoc.offset;
3380
D3D12MA_ASSERT(suballoc.size >= paddingBegin + allocSize);
3381
const UINT64 paddingEnd = suballoc.size - paddingBegin - allocSize;
3382
3383
// Unregister this free suballocation from m_FreeSuballocationsBySize and update
3384
// it to become used.
3385
UnregisterFreeSuballocation(request.item);
3386
3387
suballoc.offset = offset;
3388
suballoc.size = allocSize;
3389
suballoc.type = SUBALLOCATION_TYPE_ALLOCATION;
3390
suballoc.privateData = privateData;
3391
3392
// If there are any free bytes remaining at the end, insert new free suballocation after current one.
3393
if (paddingEnd)
3394
{
3395
Suballocation paddingSuballoc = {};
3396
paddingSuballoc.offset = offset + allocSize;
3397
paddingSuballoc.size = paddingEnd;
3398
paddingSuballoc.type = SUBALLOCATION_TYPE_FREE;
3399
SuballocationList::iterator next = request.item;
3400
++next;
3401
const SuballocationList::iterator paddingEndItem =
3402
m_Suballocations.insert(next, paddingSuballoc);
3403
RegisterFreeSuballocation(paddingEndItem);
3404
}
3405
3406
// If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
3407
if (paddingBegin)
3408
{
3409
Suballocation paddingSuballoc = {};
3410
paddingSuballoc.offset = offset - paddingBegin;
3411
paddingSuballoc.size = paddingBegin;
3412
paddingSuballoc.type = SUBALLOCATION_TYPE_FREE;
3413
const SuballocationList::iterator paddingBeginItem =
3414
m_Suballocations.insert(request.item, paddingSuballoc);
3415
RegisterFreeSuballocation(paddingBeginItem);
3416
}
3417
3418
// Update totals.
3419
m_FreeCount = m_FreeCount - 1;
3420
if (paddingBegin > 0)
3421
{
3422
++m_FreeCount;
3423
}
3424
if (paddingEnd > 0)
3425
{
3426
++m_FreeCount;
3427
}
3428
m_SumFreeSize -= allocSize;
3429
3430
m_ZeroInitializedRange.MarkRangeAsUsed(offset, offset + allocSize);
3431
}
3432
3433
void BlockMetadata_Generic::Free(AllocHandle allocHandle)
3434
{
3435
FreeSuballocation(FindAtOffset((UINT64)allocHandle - 1).dropConst());
3436
}
3437
3438
void BlockMetadata_Generic::Clear()
3439
{
3440
m_FreeCount = 1;
3441
m_SumFreeSize = GetSize();
3442
3443
m_Suballocations.clear();
3444
Suballocation suballoc = {};
3445
suballoc.offset = 0;
3446
suballoc.size = GetSize();
3447
suballoc.type = SUBALLOCATION_TYPE_FREE;
3448
m_Suballocations.push_back(suballoc);
3449
3450
m_FreeSuballocationsBySize.clear();
3451
m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
3452
}
3453
3454
SuballocationList::const_iterator BlockMetadata_Generic::FindAtOffset(UINT64 offset) const
3455
{
3456
const UINT64 last = m_Suballocations.crbegin()->offset;
3457
if (last == offset)
3458
return m_Suballocations.crbegin();
3459
const UINT64 first = m_Suballocations.cbegin()->offset;
3460
if (first == offset)
3461
return m_Suballocations.cbegin();
3462
3463
const size_t suballocCount = m_Suballocations.size();
3464
const UINT64 step = (last - first + m_Suballocations.cbegin()->size) / suballocCount;
3465
auto findSuballocation = [&](auto begin, auto end) -> SuballocationList::const_iterator
3466
{
3467
for (auto suballocItem = begin;
3468
suballocItem != end;
3469
++suballocItem)
3470
{
3471
const Suballocation& suballoc = *suballocItem;
3472
if (suballoc.offset == offset)
3473
return suballocItem;
3474
}
3475
D3D12MA_ASSERT(false && "Not found!");
3476
return m_Suballocations.end();
3477
};
3478
// If requested offset is closer to the end of range, search from the end
3479
if ((offset - first) > suballocCount * step / 2)
3480
{
3481
return findSuballocation(m_Suballocations.crbegin(), m_Suballocations.crend());
3482
}
3483
return findSuballocation(m_Suballocations.cbegin(), m_Suballocations.cend());
3484
}
3485
3486
bool BlockMetadata_Generic::ValidateFreeSuballocationList() const
3487
{
3488
UINT64 lastSize = 0;
3489
for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
3490
{
3491
const SuballocationList::iterator it = m_FreeSuballocationsBySize[i];
3492
3493
D3D12MA_VALIDATE(it->type == SUBALLOCATION_TYPE_FREE);
3494
D3D12MA_VALIDATE(it->size >= MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
3495
D3D12MA_VALIDATE(it->size >= lastSize);
3496
lastSize = it->size;
3497
}
3498
return true;
3499
}
3500
3501
bool BlockMetadata_Generic::CheckAllocation(
3502
UINT64 allocSize,
3503
UINT64 allocAlignment,
3504
SuballocationList::const_iterator suballocItem,
3505
AllocHandle* pAllocHandle,
3506
UINT64* pSumFreeSize,
3507
UINT64* pSumItemSize,
3508
BOOL* pZeroInitialized) const
3509
{
3510
D3D12MA_ASSERT(allocSize > 0);
3511
D3D12MA_ASSERT(suballocItem != m_Suballocations.cend());
3512
D3D12MA_ASSERT(pAllocHandle != NULL && pZeroInitialized != NULL);
3513
3514
*pSumFreeSize = 0;
3515
*pSumItemSize = 0;
3516
*pZeroInitialized = FALSE;
3517
3518
const Suballocation& suballoc = *suballocItem;
3519
D3D12MA_ASSERT(suballoc.type == SUBALLOCATION_TYPE_FREE);
3520
3521
*pSumFreeSize = suballoc.size;
3522
3523
// Size of this suballocation is too small for this request: Early return.
3524
if (suballoc.size < allocSize)
3525
{
3526
return false;
3527
}
3528
3529
// Start from offset equal to beginning of this suballocation and debug margin of previous allocation if present.
3530
UINT64 offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin());
3531
3532
// Apply alignment.
3533
offset = AlignUp(offset, allocAlignment);
3534
3535
// Calculate padding at the beginning based on current offset.
3536
const UINT64 paddingBegin = offset - suballoc.offset;
3537
3538
// Fail if requested size plus margin after is bigger than size of this suballocation.
3539
if (paddingBegin + allocSize + GetDebugMargin() > suballoc.size)
3540
{
3541
return false;
3542
}
3543
3544
// All tests passed: Success. Offset is already filled.
3545
*pZeroInitialized = m_ZeroInitializedRange.IsRangeZeroInitialized(offset, offset + allocSize);
3546
*pAllocHandle = (AllocHandle)(offset + 1);
3547
return true;
3548
}
3549
3550
void BlockMetadata_Generic::MergeFreeWithNext(SuballocationList::iterator item)
3551
{
3552
D3D12MA_ASSERT(item != m_Suballocations.end());
3553
D3D12MA_ASSERT(item->type == SUBALLOCATION_TYPE_FREE);
3554
3555
SuballocationList::iterator nextItem = item;
3556
++nextItem;
3557
D3D12MA_ASSERT(nextItem != m_Suballocations.end());
3558
D3D12MA_ASSERT(nextItem->type == SUBALLOCATION_TYPE_FREE);
3559
3560
item->size += nextItem->size;
3561
--m_FreeCount;
3562
m_Suballocations.erase(nextItem);
3563
}
3564
3565
SuballocationList::iterator BlockMetadata_Generic::FreeSuballocation(SuballocationList::iterator suballocItem)
3566
{
3567
// Change this suballocation to be marked as free.
3568
Suballocation& suballoc = *suballocItem;
3569
suballoc.type = SUBALLOCATION_TYPE_FREE;
3570
suballoc.privateData = NULL;
3571
3572
// Update totals.
3573
++m_FreeCount;
3574
m_SumFreeSize += suballoc.size;
3575
3576
// Merge with previous and/or next suballocation if it's also free.
3577
bool mergeWithNext = false;
3578
bool mergeWithPrev = false;
3579
3580
SuballocationList::iterator nextItem = suballocItem;
3581
++nextItem;
3582
if ((nextItem != m_Suballocations.end()) && (nextItem->type == SUBALLOCATION_TYPE_FREE))
3583
{
3584
mergeWithNext = true;
3585
}
3586
3587
SuballocationList::iterator prevItem = suballocItem;
3588
if (suballocItem != m_Suballocations.begin())
3589
{
3590
--prevItem;
3591
if (prevItem->type == SUBALLOCATION_TYPE_FREE)
3592
{
3593
mergeWithPrev = true;
3594
}
3595
}
3596
3597
if (mergeWithNext)
3598
{
3599
UnregisterFreeSuballocation(nextItem);
3600
MergeFreeWithNext(suballocItem);
3601
}
3602
3603
if (mergeWithPrev)
3604
{
3605
UnregisterFreeSuballocation(prevItem);
3606
MergeFreeWithNext(prevItem);
3607
RegisterFreeSuballocation(prevItem);
3608
return prevItem;
3609
}
3610
else
3611
{
3612
RegisterFreeSuballocation(suballocItem);
3613
return suballocItem;
3614
}
3615
}
3616
3617
void BlockMetadata_Generic::RegisterFreeSuballocation(SuballocationList::iterator item)
3618
{
3619
D3D12MA_ASSERT(item->type == SUBALLOCATION_TYPE_FREE);
3620
D3D12MA_ASSERT(item->size > 0);
3621
3622
// You may want to enable this validation at the beginning or at the end of
3623
// this function, depending on what do you want to check.
3624
D3D12MA_HEAVY_ASSERT(ValidateFreeSuballocationList());
3625
3626
if (item->size >= MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
3627
{
3628
if (m_FreeSuballocationsBySize.empty())
3629
{
3630
m_FreeSuballocationsBySize.push_back(item);
3631
}
3632
else
3633
{
3634
m_FreeSuballocationsBySize.InsertSorted(item, SuballocationItemSizeLess());
3635
}
3636
}
3637
3638
//D3D12MA_HEAVY_ASSERT(ValidateFreeSuballocationList());
3639
}
3640
3641
void BlockMetadata_Generic::UnregisterFreeSuballocation(SuballocationList::iterator item)
3642
{
3643
D3D12MA_ASSERT(item->type == SUBALLOCATION_TYPE_FREE);
3644
D3D12MA_ASSERT(item->size > 0);
3645
3646
// You may want to enable this validation at the beginning or at the end of
3647
// this function, depending on what do you want to check.
3648
D3D12MA_HEAVY_ASSERT(ValidateFreeSuballocationList());
3649
3650
if (item->size >= MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
3651
{
3652
SuballocationList::iterator* const it = BinaryFindFirstNotLess(
3653
m_FreeSuballocationsBySize.data(),
3654
m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
3655
item,
3656
SuballocationItemSizeLess());
3657
for (size_t index = it - m_FreeSuballocationsBySize.data();
3658
index < m_FreeSuballocationsBySize.size();
3659
++index)
3660
{
3661
if (m_FreeSuballocationsBySize[index] == item)
3662
{
3663
m_FreeSuballocationsBySize.remove(index);
3664
return;
3665
}
3666
D3D12MA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
3667
}
3668
D3D12MA_ASSERT(0 && "Not found.");
3669
}
3670
3671
//D3D12MA_HEAVY_ASSERT(ValidateFreeSuballocationList());
3672
}
3673
3674
void BlockMetadata_Generic::SetAllocationPrivateData(AllocHandle allocHandle, void* privateData)
3675
{
3676
Suballocation& suballoc = *FindAtOffset((UINT64)allocHandle - 1).dropConst();
3677
suballoc.privateData = privateData;
3678
}
3679
3680
void BlockMetadata_Generic::AddStatistics(Statistics& inoutStats) const
3681
{
3682
inoutStats.BlockCount++;
3683
inoutStats.AllocationCount += (UINT)m_Suballocations.size() - m_FreeCount;
3684
inoutStats.BlockBytes += GetSize();
3685
inoutStats.AllocationBytes += GetSize() - m_SumFreeSize;
3686
}
3687
3688
void BlockMetadata_Generic::AddDetailedStatistics(DetailedStatistics& inoutStats) const
3689
{
3690
inoutStats.Stats.BlockCount++;
3691
inoutStats.Stats.BlockBytes += GetSize();
3692
3693
for (const auto& suballoc : m_Suballocations)
3694
{
3695
if (suballoc.type == SUBALLOCATION_TYPE_FREE)
3696
AddDetailedStatisticsUnusedRange(inoutStats, suballoc.size);
3697
else
3698
AddDetailedStatisticsAllocation(inoutStats, suballoc.size);
3699
}
3700
}
3701
3702
void BlockMetadata_Generic::WriteAllocationInfoToJson(JsonWriter& json) const
3703
{
3704
PrintDetailedMap_Begin(json, GetSumFreeSize(), GetAllocationCount(), m_FreeCount);
3705
for (const auto& suballoc : m_Suballocations)
3706
{
3707
if (suballoc.type == SUBALLOCATION_TYPE_FREE)
3708
PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
3709
else
3710
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.privateData);
3711
}
3712
PrintDetailedMap_End(json);
3713
}
3714
#endif // _D3D12MA_BLOCK_METADATA_GENERIC_FUNCTIONS
3715
#endif // _D3D12MA_BLOCK_METADATA_GENERIC
3716
#endif // #if 0
3717
3718
#ifndef _D3D12MA_BLOCK_METADATA_LINEAR
3719
class BlockMetadata_Linear : public BlockMetadata
3720
{
3721
public:
3722
BlockMetadata_Linear(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual);
3723
virtual ~BlockMetadata_Linear() = default;
3724
3725
UINT64 GetSumFreeSize() const override { return m_SumFreeSize; }
3726
bool IsEmpty() const override { return GetAllocationCount() == 0; }
3727
UINT64 GetAllocationOffset(AllocHandle allocHandle) const override { return (UINT64)allocHandle - 1; };
3728
3729
void Init(UINT64 size) override;
3730
bool Validate() const override;
3731
size_t GetAllocationCount() const override;
3732
size_t GetFreeRegionsCount() const override;
3733
void GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const override;
3734
3735
bool CreateAllocationRequest(
3736
UINT64 allocSize,
3737
UINT64 allocAlignment,
3738
bool upperAddress,
3739
UINT32 strategy,
3740
AllocationRequest* pAllocationRequest) override;
3741
3742
void Alloc(
3743
const AllocationRequest& request,
3744
UINT64 allocSize,
3745
void* privateData) override;
3746
3747
void Free(AllocHandle allocHandle) override;
3748
void Clear() override;
3749
3750
AllocHandle GetAllocationListBegin() const override;
3751
AllocHandle GetNextAllocation(AllocHandle prevAlloc) const override;
3752
UINT64 GetNextFreeRegionSize(AllocHandle alloc) const override;
3753
void* GetAllocationPrivateData(AllocHandle allocHandle) const override;
3754
void SetAllocationPrivateData(AllocHandle allocHandle, void* privateData) override;
3755
3756
void AddStatistics(Statistics& inoutStats) const override;
3757
void AddDetailedStatistics(DetailedStatistics& inoutStats) const override;
3758
void WriteAllocationInfoToJson(JsonWriter& json) const override;
3759
void DebugLogAllAllocations() const override;
3760
3761
private:
3762
/*
3763
There are two suballocation vectors, used in ping-pong way.
3764
The one with index m_1stVectorIndex is called 1st.
3765
The one with index (m_1stVectorIndex ^ 1) is called 2nd.
3766
2nd can be non-empty only when 1st is not empty.
3767
When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
3768
*/
3769
typedef Vector<Suballocation> SuballocationVectorType;
3770
3771
enum ALLOC_REQUEST_TYPE
3772
{
3773
ALLOC_REQUEST_UPPER_ADDRESS,
3774
ALLOC_REQUEST_END_OF_1ST,
3775
ALLOC_REQUEST_END_OF_2ND,
3776
};
3777
3778
enum SECOND_VECTOR_MODE
3779
{
3780
SECOND_VECTOR_EMPTY,
3781
/*
3782
Suballocations in 2nd vector are created later than the ones in 1st, but they
3783
all have smaller offset.
3784
*/
3785
SECOND_VECTOR_RING_BUFFER,
3786
/*
3787
Suballocations in 2nd vector are upper side of double stack.
3788
They all have offsets higher than those in 1st vector.
3789
Top of this stack means smaller offsets, but higher indices in this vector.
3790
*/
3791
SECOND_VECTOR_DOUBLE_STACK,
3792
};
3793
3794
UINT64 m_SumFreeSize;
3795
SuballocationVectorType m_Suballocations0, m_Suballocations1;
3796
UINT32 m_1stVectorIndex;
3797
SECOND_VECTOR_MODE m_2ndVectorMode;
3798
// Number of items in 1st vector with hAllocation = null at the beginning.
3799
size_t m_1stNullItemsBeginCount;
3800
// Number of other items in 1st vector with hAllocation = null somewhere in the middle.
3801
size_t m_1stNullItemsMiddleCount;
3802
// Number of items in 2nd vector with hAllocation = null.
3803
size_t m_2ndNullItemsCount;
3804
3805
SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
3806
SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
3807
const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
3808
const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
3809
3810
Suballocation& FindSuballocation(UINT64 offset) const;
3811
bool ShouldCompact1st() const;
3812
void CleanupAfterFree();
3813
3814
bool CreateAllocationRequest_LowerAddress(
3815
UINT64 allocSize,
3816
UINT64 allocAlignment,
3817
AllocationRequest* pAllocationRequest);
3818
bool CreateAllocationRequest_UpperAddress(
3819
UINT64 allocSize,
3820
UINT64 allocAlignment,
3821
AllocationRequest* pAllocationRequest);
3822
3823
D3D12MA_CLASS_NO_COPY(BlockMetadata_Linear)
3824
};
3825
3826
#ifndef _D3D12MA_BLOCK_METADATA_LINEAR_FUNCTIONS
3827
BlockMetadata_Linear::BlockMetadata_Linear(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual)
3828
: BlockMetadata(allocationCallbacks, isVirtual),
3829
m_SumFreeSize(0),
3830
m_Suballocations0(*allocationCallbacks),
3831
m_Suballocations1(*allocationCallbacks),
3832
m_1stVectorIndex(0),
3833
m_2ndVectorMode(SECOND_VECTOR_EMPTY),
3834
m_1stNullItemsBeginCount(0),
3835
m_1stNullItemsMiddleCount(0),
3836
m_2ndNullItemsCount(0)
3837
{
3838
D3D12MA_ASSERT(allocationCallbacks);
3839
}
3840
3841
void BlockMetadata_Linear::Init(UINT64 size)
3842
{
3843
BlockMetadata::Init(size);
3844
m_SumFreeSize = size;
3845
}
3846
3847
bool BlockMetadata_Linear::Validate() const
3848
{
3849
D3D12MA_VALIDATE(GetSumFreeSize() <= GetSize());
3850
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
3851
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
3852
3853
D3D12MA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
3854
D3D12MA_VALIDATE(!suballocations1st.empty() ||
3855
suballocations2nd.empty() ||
3856
m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
3857
3858
if (!suballocations1st.empty())
3859
{
3860
// Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
3861
D3D12MA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != SUBALLOCATION_TYPE_FREE);
3862
// Null item at the end should be just pop_back().
3863
D3D12MA_VALIDATE(suballocations1st.back().type != SUBALLOCATION_TYPE_FREE);
3864
}
3865
if (!suballocations2nd.empty())
3866
{
3867
// Null item at the end should be just pop_back().
3868
D3D12MA_VALIDATE(suballocations2nd.back().type != SUBALLOCATION_TYPE_FREE);
3869
}
3870
3871
D3D12MA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
3872
D3D12MA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
3873
3874
UINT64 sumUsedSize = 0;
3875
const size_t suballoc1stCount = suballocations1st.size();
3876
UINT64 offset = 0;
3877
3878
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
3879
{
3880
const size_t suballoc2ndCount = suballocations2nd.size();
3881
size_t nullItem2ndCount = 0;
3882
for (size_t i = 0; i < suballoc2ndCount; ++i)
3883
{
3884
const Suballocation& suballoc = suballocations2nd[i];
3885
const bool currFree = (suballoc.type == SUBALLOCATION_TYPE_FREE);
3886
3887
const Allocation* alloc = (Allocation*)suballoc.privateData;
3888
if (!IsVirtual())
3889
{
3890
D3D12MA_VALIDATE(currFree == (alloc == NULL));
3891
}
3892
D3D12MA_VALIDATE(suballoc.offset >= offset);
3893
3894
if (!currFree)
3895
{
3896
if (!IsVirtual())
3897
{
3898
D3D12MA_VALIDATE(GetAllocationOffset(alloc->GetAllocHandle()) == suballoc.offset);
3899
D3D12MA_VALIDATE(alloc->GetSize() == suballoc.size);
3900
}
3901
sumUsedSize += suballoc.size;
3902
}
3903
else
3904
{
3905
++nullItem2ndCount;
3906
}
3907
3908
offset = suballoc.offset + suballoc.size + GetDebugMargin();
3909
}
3910
3911
D3D12MA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
3912
}
3913
3914
for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
3915
{
3916
const Suballocation& suballoc = suballocations1st[i];
3917
D3D12MA_VALIDATE(suballoc.type == SUBALLOCATION_TYPE_FREE &&
3918
suballoc.privateData == NULL);
3919
}
3920
3921
size_t nullItem1stCount = m_1stNullItemsBeginCount;
3922
3923
for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
3924
{
3925
const Suballocation& suballoc = suballocations1st[i];
3926
const bool currFree = (suballoc.type == SUBALLOCATION_TYPE_FREE);
3927
3928
const Allocation* alloc = (Allocation*)suballoc.privateData;
3929
if (!IsVirtual())
3930
{
3931
D3D12MA_VALIDATE(currFree == (alloc == NULL));
3932
}
3933
D3D12MA_VALIDATE(suballoc.offset >= offset);
3934
D3D12MA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
3935
3936
if (!currFree)
3937
{
3938
if (!IsVirtual())
3939
{
3940
D3D12MA_VALIDATE(GetAllocationOffset(alloc->GetAllocHandle()) == suballoc.offset);
3941
D3D12MA_VALIDATE(alloc->GetSize() == suballoc.size);
3942
}
3943
sumUsedSize += suballoc.size;
3944
}
3945
else
3946
{
3947
++nullItem1stCount;
3948
}
3949
3950
offset = suballoc.offset + suballoc.size + GetDebugMargin();
3951
}
3952
D3D12MA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
3953
3954
if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
3955
{
3956
const size_t suballoc2ndCount = suballocations2nd.size();
3957
size_t nullItem2ndCount = 0;
3958
for (size_t i = suballoc2ndCount; i--; )
3959
{
3960
const Suballocation& suballoc = suballocations2nd[i];
3961
const bool currFree = (suballoc.type == SUBALLOCATION_TYPE_FREE);
3962
3963
const Allocation* alloc = (Allocation*)suballoc.privateData;
3964
if (!IsVirtual())
3965
{
3966
D3D12MA_VALIDATE(currFree == (alloc == NULL));
3967
}
3968
D3D12MA_VALIDATE(suballoc.offset >= offset);
3969
3970
if (!currFree)
3971
{
3972
if (!IsVirtual())
3973
{
3974
D3D12MA_VALIDATE(GetAllocationOffset(alloc->GetAllocHandle()) == suballoc.offset);
3975
D3D12MA_VALIDATE(alloc->GetSize() == suballoc.size);
3976
}
3977
sumUsedSize += suballoc.size;
3978
}
3979
else
3980
{
3981
++nullItem2ndCount;
3982
}
3983
3984
offset = suballoc.offset + suballoc.size + GetDebugMargin();
3985
}
3986
3987
D3D12MA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
3988
}
3989
3990
D3D12MA_VALIDATE(offset <= GetSize());
3991
D3D12MA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
3992
3993
return true;
3994
}
3995
3996
size_t BlockMetadata_Linear::GetAllocationCount() const
3997
{
3998
return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount +
3999
AccessSuballocations2nd().size() - m_2ndNullItemsCount;
4000
}
4001
4002
size_t BlockMetadata_Linear::GetFreeRegionsCount() const
4003
{
4004
// Function only used for defragmentation, which is disabled for this algorithm
4005
D3D12MA_ASSERT(0);
4006
return SIZE_MAX;
4007
}
4008
4009
void BlockMetadata_Linear::GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const
4010
{
4011
const Suballocation& suballoc = FindSuballocation((UINT64)allocHandle - 1);
4012
outInfo.Offset = suballoc.offset;
4013
outInfo.Size = suballoc.size;
4014
outInfo.pPrivateData = suballoc.privateData;
4015
}
4016
4017
bool BlockMetadata_Linear::CreateAllocationRequest(
4018
UINT64 allocSize,
4019
UINT64 allocAlignment,
4020
bool upperAddress,
4021
UINT32 strategy,
4022
AllocationRequest* pAllocationRequest)
4023
{
4024
D3D12MA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
4025
D3D12MA_ASSERT(pAllocationRequest != NULL);
4026
D3D12MA_HEAVY_ASSERT(Validate());
4027
pAllocationRequest->size = allocSize;
4028
return upperAddress ?
4029
CreateAllocationRequest_UpperAddress(
4030
allocSize, allocAlignment, pAllocationRequest) :
4031
CreateAllocationRequest_LowerAddress(
4032
allocSize, allocAlignment, pAllocationRequest);
4033
}
4034
4035
void BlockMetadata_Linear::Alloc(
4036
const AllocationRequest& request,
4037
UINT64 allocSize,
4038
void* privateData)
4039
{
4040
UINT64 offset = (UINT64)request.allocHandle - 1;
4041
const Suballocation newSuballoc = { offset, request.size, privateData, SUBALLOCATION_TYPE_ALLOCATION };
4042
4043
switch (request.algorithmData)
4044
{
4045
case ALLOC_REQUEST_UPPER_ADDRESS:
4046
{
4047
D3D12MA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
4048
"CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
4049
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4050
suballocations2nd.push_back(newSuballoc);
4051
m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
4052
break;
4053
}
4054
case ALLOC_REQUEST_END_OF_1ST:
4055
{
4056
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4057
4058
D3D12MA_ASSERT(suballocations1st.empty() ||
4059
offset >= suballocations1st.back().offset + suballocations1st.back().size);
4060
// Check if it fits before the end of the block.
4061
D3D12MA_ASSERT(offset + request.size <= GetSize());
4062
4063
suballocations1st.push_back(newSuballoc);
4064
break;
4065
}
4066
case ALLOC_REQUEST_END_OF_2ND:
4067
{
4068
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4069
// New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
4070
D3D12MA_ASSERT(!suballocations1st.empty() &&
4071
offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset);
4072
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4073
4074
switch (m_2ndVectorMode)
4075
{
4076
case SECOND_VECTOR_EMPTY:
4077
// First allocation from second part ring buffer.
4078
D3D12MA_ASSERT(suballocations2nd.empty());
4079
m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
4080
break;
4081
case SECOND_VECTOR_RING_BUFFER:
4082
// 2-part ring buffer is already started.
4083
D3D12MA_ASSERT(!suballocations2nd.empty());
4084
break;
4085
case SECOND_VECTOR_DOUBLE_STACK:
4086
D3D12MA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
4087
break;
4088
default:
4089
D3D12MA_ASSERT(0);
4090
}
4091
4092
suballocations2nd.push_back(newSuballoc);
4093
break;
4094
}
4095
default:
4096
D3D12MA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
4097
}
4098
m_SumFreeSize -= newSuballoc.size;
4099
}
4100
4101
void BlockMetadata_Linear::Free(AllocHandle allocHandle)
4102
{
4103
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4104
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4105
UINT64 offset = (UINT64)allocHandle - 1;
4106
4107
if (!suballocations1st.empty())
4108
{
4109
// First allocation: Mark it as next empty at the beginning.
4110
Suballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
4111
if (firstSuballoc.offset == offset)
4112
{
4113
firstSuballoc.type = SUBALLOCATION_TYPE_FREE;
4114
firstSuballoc.privateData = NULL;
4115
m_SumFreeSize += firstSuballoc.size;
4116
++m_1stNullItemsBeginCount;
4117
CleanupAfterFree();
4118
return;
4119
}
4120
}
4121
4122
// Last allocation in 2-part ring buffer or top of upper stack (same logic).
4123
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
4124
m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
4125
{
4126
Suballocation& lastSuballoc = suballocations2nd.back();
4127
if (lastSuballoc.offset == offset)
4128
{
4129
m_SumFreeSize += lastSuballoc.size;
4130
suballocations2nd.pop_back();
4131
CleanupAfterFree();
4132
return;
4133
}
4134
}
4135
// Last allocation in 1st vector.
4136
else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY)
4137
{
4138
Suballocation& lastSuballoc = suballocations1st.back();
4139
if (lastSuballoc.offset == offset)
4140
{
4141
m_SumFreeSize += lastSuballoc.size;
4142
suballocations1st.pop_back();
4143
CleanupAfterFree();
4144
return;
4145
}
4146
}
4147
4148
Suballocation refSuballoc;
4149
refSuballoc.offset = offset;
4150
// Rest of members stays uninitialized intentionally for better performance.
4151
4152
// Item from the middle of 1st vector.
4153
{
4154
const SuballocationVectorType::iterator it = BinaryFindSorted(
4155
suballocations1st.begin() + m_1stNullItemsBeginCount,
4156
suballocations1st.end(),
4157
refSuballoc,
4158
SuballocationOffsetLess());
4159
if (it != suballocations1st.end())
4160
{
4161
it->type = SUBALLOCATION_TYPE_FREE;
4162
it->privateData = NULL;
4163
++m_1stNullItemsMiddleCount;
4164
m_SumFreeSize += it->size;
4165
CleanupAfterFree();
4166
return;
4167
}
4168
}
4169
4170
if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
4171
{
4172
// Item from the middle of 2nd vector.
4173
const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
4174
BinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, SuballocationOffsetLess()) :
4175
BinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, SuballocationOffsetGreater());
4176
if (it != suballocations2nd.end())
4177
{
4178
it->type = SUBALLOCATION_TYPE_FREE;
4179
it->privateData = NULL;
4180
++m_2ndNullItemsCount;
4181
m_SumFreeSize += it->size;
4182
CleanupAfterFree();
4183
return;
4184
}
4185
}
4186
4187
D3D12MA_ASSERT(0 && "Allocation to free not found in linear allocator!");
4188
}
4189
4190
void BlockMetadata_Linear::Clear()
4191
{
4192
m_SumFreeSize = GetSize();
4193
m_Suballocations0.clear();
4194
m_Suballocations1.clear();
4195
// Leaving m_1stVectorIndex unchanged - it doesn't matter.
4196
m_2ndVectorMode = SECOND_VECTOR_EMPTY;
4197
m_1stNullItemsBeginCount = 0;
4198
m_1stNullItemsMiddleCount = 0;
4199
m_2ndNullItemsCount = 0;
4200
}
4201
4202
AllocHandle BlockMetadata_Linear::GetAllocationListBegin() const
4203
{
4204
// Function only used for defragmentation, which is disabled for this algorithm
4205
D3D12MA_ASSERT(0);
4206
return (AllocHandle)0;
4207
}
4208
4209
AllocHandle BlockMetadata_Linear::GetNextAllocation(AllocHandle prevAlloc) const
4210
{
4211
// Function only used for defragmentation, which is disabled for this algorithm
4212
D3D12MA_ASSERT(0);
4213
return (AllocHandle)0;
4214
}
4215
4216
UINT64 BlockMetadata_Linear::GetNextFreeRegionSize(AllocHandle alloc) const
4217
{
4218
// Function only used for defragmentation, which is disabled for this algorithm
4219
D3D12MA_ASSERT(0);
4220
return 0;
4221
}
4222
4223
void* BlockMetadata_Linear::GetAllocationPrivateData(AllocHandle allocHandle) const
4224
{
4225
return FindSuballocation((UINT64)allocHandle - 1).privateData;
4226
}
4227
4228
void BlockMetadata_Linear::SetAllocationPrivateData(AllocHandle allocHandle, void* privateData)
4229
{
4230
Suballocation& suballoc = FindSuballocation((UINT64)allocHandle - 1);
4231
suballoc.privateData = privateData;
4232
}
4233
4234
void BlockMetadata_Linear::AddStatistics(Statistics& inoutStats) const
4235
{
4236
inoutStats.BlockCount++;
4237
inoutStats.AllocationCount += (UINT)GetAllocationCount();
4238
inoutStats.BlockBytes += GetSize();
4239
inoutStats.AllocationBytes += GetSize() - m_SumFreeSize;
4240
}
4241
4242
void BlockMetadata_Linear::AddDetailedStatistics(DetailedStatistics& inoutStats) const
4243
{
4244
inoutStats.Stats.BlockCount++;
4245
inoutStats.Stats.BlockBytes += GetSize();
4246
4247
const UINT64 size = GetSize();
4248
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4249
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4250
const size_t suballoc1stCount = suballocations1st.size();
4251
const size_t suballoc2ndCount = suballocations2nd.size();
4252
4253
UINT64 lastOffset = 0;
4254
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4255
{
4256
const UINT64 freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
4257
size_t nextAlloc2ndIndex = 0;
4258
while (lastOffset < freeSpace2ndTo1stEnd)
4259
{
4260
// Find next non-null allocation or move nextAllocIndex to the end.
4261
while (nextAlloc2ndIndex < suballoc2ndCount &&
4262
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4263
{
4264
++nextAlloc2ndIndex;
4265
}
4266
4267
// Found non-null allocation.
4268
if (nextAlloc2ndIndex < suballoc2ndCount)
4269
{
4270
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4271
4272
// 1. Process free space before this allocation.
4273
if (lastOffset < suballoc.offset)
4274
{
4275
// There is free space from lastOffset to suballoc.offset.
4276
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4277
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
4278
}
4279
4280
// 2. Process this allocation.
4281
// There is allocation with suballoc.offset, suballoc.size.
4282
AddDetailedStatisticsAllocation(inoutStats, suballoc.size);
4283
4284
// 3. Prepare for next iteration.
4285
lastOffset = suballoc.offset + suballoc.size;
4286
++nextAlloc2ndIndex;
4287
}
4288
// We are at the end.
4289
else
4290
{
4291
// There is free space from lastOffset to freeSpace2ndTo1stEnd.
4292
if (lastOffset < freeSpace2ndTo1stEnd)
4293
{
4294
const UINT64 unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
4295
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
4296
}
4297
4298
// End of loop.
4299
lastOffset = freeSpace2ndTo1stEnd;
4300
}
4301
}
4302
}
4303
4304
size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
4305
const UINT64 freeSpace1stTo2ndEnd =
4306
m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
4307
while (lastOffset < freeSpace1stTo2ndEnd)
4308
{
4309
// Find next non-null allocation or move nextAllocIndex to the end.
4310
while (nextAlloc1stIndex < suballoc1stCount &&
4311
suballocations1st[nextAlloc1stIndex].privateData == NULL)
4312
{
4313
++nextAlloc1stIndex;
4314
}
4315
4316
// Found non-null allocation.
4317
if (nextAlloc1stIndex < suballoc1stCount)
4318
{
4319
const Suballocation& suballoc = suballocations1st[nextAlloc1stIndex];
4320
4321
// 1. Process free space before this allocation.
4322
if (lastOffset < suballoc.offset)
4323
{
4324
// There is free space from lastOffset to suballoc.offset.
4325
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4326
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
4327
}
4328
4329
// 2. Process this allocation.
4330
// There is allocation with suballoc.offset, suballoc.size.
4331
AddDetailedStatisticsAllocation(inoutStats, suballoc.size);
4332
4333
// 3. Prepare for next iteration.
4334
lastOffset = suballoc.offset + suballoc.size;
4335
++nextAlloc1stIndex;
4336
}
4337
// We are at the end.
4338
else
4339
{
4340
// There is free space from lastOffset to freeSpace1stTo2ndEnd.
4341
if (lastOffset < freeSpace1stTo2ndEnd)
4342
{
4343
const UINT64 unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
4344
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
4345
}
4346
4347
// End of loop.
4348
lastOffset = freeSpace1stTo2ndEnd;
4349
}
4350
}
4351
4352
if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
4353
{
4354
size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
4355
while (lastOffset < size)
4356
{
4357
// Find next non-null allocation or move nextAllocIndex to the end.
4358
while (nextAlloc2ndIndex != SIZE_MAX &&
4359
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4360
{
4361
--nextAlloc2ndIndex;
4362
}
4363
4364
// Found non-null allocation.
4365
if (nextAlloc2ndIndex != SIZE_MAX)
4366
{
4367
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4368
4369
// 1. Process free space before this allocation.
4370
if (lastOffset < suballoc.offset)
4371
{
4372
// There is free space from lastOffset to suballoc.offset.
4373
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4374
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
4375
}
4376
4377
// 2. Process this allocation.
4378
// There is allocation with suballoc.offset, suballoc.size.
4379
AddDetailedStatisticsAllocation(inoutStats, suballoc.size);
4380
4381
// 3. Prepare for next iteration.
4382
lastOffset = suballoc.offset + suballoc.size;
4383
--nextAlloc2ndIndex;
4384
}
4385
// We are at the end.
4386
else
4387
{
4388
// There is free space from lastOffset to size.
4389
if (lastOffset < size)
4390
{
4391
const UINT64 unusedRangeSize = size - lastOffset;
4392
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
4393
}
4394
4395
// End of loop.
4396
lastOffset = size;
4397
}
4398
}
4399
}
4400
}
4401
4402
void BlockMetadata_Linear::WriteAllocationInfoToJson(JsonWriter& json) const
4403
{
4404
const UINT64 size = GetSize();
4405
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4406
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4407
const size_t suballoc1stCount = suballocations1st.size();
4408
const size_t suballoc2ndCount = suballocations2nd.size();
4409
4410
// FIRST PASS
4411
4412
size_t unusedRangeCount = 0;
4413
UINT64 usedBytes = 0;
4414
4415
UINT64 lastOffset = 0;
4416
4417
size_t alloc2ndCount = 0;
4418
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4419
{
4420
const UINT64 freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
4421
size_t nextAlloc2ndIndex = 0;
4422
while (lastOffset < freeSpace2ndTo1stEnd)
4423
{
4424
// Find next non-null allocation or move nextAlloc2ndIndex to the end.
4425
while (nextAlloc2ndIndex < suballoc2ndCount &&
4426
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4427
{
4428
++nextAlloc2ndIndex;
4429
}
4430
4431
// Found non-null allocation.
4432
if (nextAlloc2ndIndex < suballoc2ndCount)
4433
{
4434
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4435
4436
// 1. Process free space before this allocation.
4437
if (lastOffset < suballoc.offset)
4438
{
4439
// There is free space from lastOffset to suballoc.offset.
4440
++unusedRangeCount;
4441
}
4442
4443
// 2. Process this allocation.
4444
// There is allocation with suballoc.offset, suballoc.size.
4445
++alloc2ndCount;
4446
usedBytes += suballoc.size;
4447
4448
// 3. Prepare for next iteration.
4449
lastOffset = suballoc.offset + suballoc.size;
4450
++nextAlloc2ndIndex;
4451
}
4452
// We are at the end.
4453
else
4454
{
4455
if (lastOffset < freeSpace2ndTo1stEnd)
4456
{
4457
// There is free space from lastOffset to freeSpace2ndTo1stEnd.
4458
++unusedRangeCount;
4459
}
4460
4461
// End of loop.
4462
lastOffset = freeSpace2ndTo1stEnd;
4463
}
4464
}
4465
}
4466
4467
size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
4468
size_t alloc1stCount = 0;
4469
const UINT64 freeSpace1stTo2ndEnd =
4470
m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
4471
while (lastOffset < freeSpace1stTo2ndEnd)
4472
{
4473
// Find next non-null allocation or move nextAllocIndex to the end.
4474
while (nextAlloc1stIndex < suballoc1stCount &&
4475
suballocations1st[nextAlloc1stIndex].privateData == NULL)
4476
{
4477
++nextAlloc1stIndex;
4478
}
4479
4480
// Found non-null allocation.
4481
if (nextAlloc1stIndex < suballoc1stCount)
4482
{
4483
const Suballocation& suballoc = suballocations1st[nextAlloc1stIndex];
4484
4485
// 1. Process free space before this allocation.
4486
if (lastOffset < suballoc.offset)
4487
{
4488
// There is free space from lastOffset to suballoc.offset.
4489
++unusedRangeCount;
4490
}
4491
4492
// 2. Process this allocation.
4493
// There is allocation with suballoc.offset, suballoc.size.
4494
++alloc1stCount;
4495
usedBytes += suballoc.size;
4496
4497
// 3. Prepare for next iteration.
4498
lastOffset = suballoc.offset + suballoc.size;
4499
++nextAlloc1stIndex;
4500
}
4501
// We are at the end.
4502
else
4503
{
4504
if (lastOffset < size)
4505
{
4506
// There is free space from lastOffset to freeSpace1stTo2ndEnd.
4507
++unusedRangeCount;
4508
}
4509
4510
// End of loop.
4511
lastOffset = freeSpace1stTo2ndEnd;
4512
}
4513
}
4514
4515
if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
4516
{
4517
size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
4518
while (lastOffset < size)
4519
{
4520
// Find next non-null allocation or move nextAlloc2ndIndex to the end.
4521
while (nextAlloc2ndIndex != SIZE_MAX &&
4522
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4523
{
4524
--nextAlloc2ndIndex;
4525
}
4526
4527
// Found non-null allocation.
4528
if (nextAlloc2ndIndex != SIZE_MAX)
4529
{
4530
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4531
4532
// 1. Process free space before this allocation.
4533
if (lastOffset < suballoc.offset)
4534
{
4535
// There is free space from lastOffset to suballoc.offset.
4536
++unusedRangeCount;
4537
}
4538
4539
// 2. Process this allocation.
4540
// There is allocation with suballoc.offset, suballoc.size.
4541
++alloc2ndCount;
4542
usedBytes += suballoc.size;
4543
4544
// 3. Prepare for next iteration.
4545
lastOffset = suballoc.offset + suballoc.size;
4546
--nextAlloc2ndIndex;
4547
}
4548
// We are at the end.
4549
else
4550
{
4551
if (lastOffset < size)
4552
{
4553
// There is free space from lastOffset to size.
4554
++unusedRangeCount;
4555
}
4556
4557
// End of loop.
4558
lastOffset = size;
4559
}
4560
}
4561
}
4562
4563
const UINT64 unusedBytes = size - usedBytes;
4564
PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
4565
4566
// SECOND PASS
4567
lastOffset = 0;
4568
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4569
{
4570
const UINT64 freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
4571
size_t nextAlloc2ndIndex = 0;
4572
while (lastOffset < freeSpace2ndTo1stEnd)
4573
{
4574
// Find next non-null allocation or move nextAlloc2ndIndex to the end.
4575
while (nextAlloc2ndIndex < suballoc2ndCount &&
4576
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4577
{
4578
++nextAlloc2ndIndex;
4579
}
4580
4581
// Found non-null allocation.
4582
if (nextAlloc2ndIndex < suballoc2ndCount)
4583
{
4584
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4585
4586
// 1. Process free space before this allocation.
4587
if (lastOffset < suballoc.offset)
4588
{
4589
// There is free space from lastOffset to suballoc.offset.
4590
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4591
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4592
}
4593
4594
// 2. Process this allocation.
4595
// There is allocation with suballoc.offset, suballoc.size.
4596
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.privateData);
4597
4598
// 3. Prepare for next iteration.
4599
lastOffset = suballoc.offset + suballoc.size;
4600
++nextAlloc2ndIndex;
4601
}
4602
// We are at the end.
4603
else
4604
{
4605
if (lastOffset < freeSpace2ndTo1stEnd)
4606
{
4607
// There is free space from lastOffset to freeSpace2ndTo1stEnd.
4608
const UINT64 unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
4609
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4610
}
4611
4612
// End of loop.
4613
lastOffset = freeSpace2ndTo1stEnd;
4614
}
4615
}
4616
}
4617
4618
nextAlloc1stIndex = m_1stNullItemsBeginCount;
4619
while (lastOffset < freeSpace1stTo2ndEnd)
4620
{
4621
// Find next non-null allocation or move nextAllocIndex to the end.
4622
while (nextAlloc1stIndex < suballoc1stCount &&
4623
suballocations1st[nextAlloc1stIndex].privateData == NULL)
4624
{
4625
++nextAlloc1stIndex;
4626
}
4627
4628
// Found non-null allocation.
4629
if (nextAlloc1stIndex < suballoc1stCount)
4630
{
4631
const Suballocation& suballoc = suballocations1st[nextAlloc1stIndex];
4632
4633
// 1. Process free space before this allocation.
4634
if (lastOffset < suballoc.offset)
4635
{
4636
// There is free space from lastOffset to suballoc.offset.
4637
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4638
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4639
}
4640
4641
// 2. Process this allocation.
4642
// There is allocation with suballoc.offset, suballoc.size.
4643
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.privateData);
4644
4645
// 3. Prepare for next iteration.
4646
lastOffset = suballoc.offset + suballoc.size;
4647
++nextAlloc1stIndex;
4648
}
4649
// We are at the end.
4650
else
4651
{
4652
if (lastOffset < freeSpace1stTo2ndEnd)
4653
{
4654
// There is free space from lastOffset to freeSpace1stTo2ndEnd.
4655
const UINT64 unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
4656
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4657
}
4658
4659
// End of loop.
4660
lastOffset = freeSpace1stTo2ndEnd;
4661
}
4662
}
4663
4664
if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
4665
{
4666
size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
4667
while (lastOffset < size)
4668
{
4669
// Find next non-null allocation or move nextAlloc2ndIndex to the end.
4670
while (nextAlloc2ndIndex != SIZE_MAX &&
4671
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4672
{
4673
--nextAlloc2ndIndex;
4674
}
4675
4676
// Found non-null allocation.
4677
if (nextAlloc2ndIndex != SIZE_MAX)
4678
{
4679
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4680
4681
// 1. Process free space before this allocation.
4682
if (lastOffset < suballoc.offset)
4683
{
4684
// There is free space from lastOffset to suballoc.offset.
4685
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4686
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4687
}
4688
4689
// 2. Process this allocation.
4690
// There is allocation with suballoc.offset, suballoc.size.
4691
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.privateData);
4692
4693
// 3. Prepare for next iteration.
4694
lastOffset = suballoc.offset + suballoc.size;
4695
--nextAlloc2ndIndex;
4696
}
4697
// We are at the end.
4698
else
4699
{
4700
if (lastOffset < size)
4701
{
4702
// There is free space from lastOffset to size.
4703
const UINT64 unusedRangeSize = size - lastOffset;
4704
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4705
}
4706
4707
// End of loop.
4708
lastOffset = size;
4709
}
4710
}
4711
}
4712
4713
PrintDetailedMap_End(json);
4714
}
4715
4716
void BlockMetadata_Linear::DebugLogAllAllocations() const
4717
{
4718
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4719
for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it)
4720
if (it->type != SUBALLOCATION_TYPE_FREE)
4721
DebugLogAllocation(it->offset, it->size, it->privateData);
4722
4723
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4724
for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it)
4725
if (it->type != SUBALLOCATION_TYPE_FREE)
4726
DebugLogAllocation(it->offset, it->size, it->privateData);
4727
}
4728
4729
Suballocation& BlockMetadata_Linear::FindSuballocation(UINT64 offset) const
4730
{
4731
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4732
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4733
4734
Suballocation refSuballoc;
4735
refSuballoc.offset = offset;
4736
// Rest of members stays uninitialized intentionally for better performance.
4737
4738
// Item from the 1st vector.
4739
{
4740
const SuballocationVectorType::const_iterator it = BinaryFindSorted(
4741
suballocations1st.begin() + m_1stNullItemsBeginCount,
4742
suballocations1st.end(),
4743
refSuballoc,
4744
SuballocationOffsetLess());
4745
if (it != suballocations1st.end())
4746
{
4747
return const_cast<Suballocation&>(*it);
4748
}
4749
}
4750
4751
if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
4752
{
4753
// Rest of members stays uninitialized intentionally for better performance.
4754
const SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
4755
BinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, SuballocationOffsetLess()) :
4756
BinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, SuballocationOffsetGreater());
4757
if (it != suballocations2nd.end())
4758
{
4759
return const_cast<Suballocation&>(*it);
4760
}
4761
}
4762
4763
D3D12MA_ASSERT(0 && "Allocation not found in linear allocator!");
4764
return const_cast<Suballocation&>(suballocations1st.back()); // Should never occur.
4765
}
4766
4767
bool BlockMetadata_Linear::ShouldCompact1st() const
4768
{
4769
const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
4770
const size_t suballocCount = AccessSuballocations1st().size();
4771
return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
4772
}
4773
4774
void BlockMetadata_Linear::CleanupAfterFree()
4775
{
4776
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4777
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4778
4779
if (IsEmpty())
4780
{
4781
suballocations1st.clear();
4782
suballocations2nd.clear();
4783
m_1stNullItemsBeginCount = 0;
4784
m_1stNullItemsMiddleCount = 0;
4785
m_2ndNullItemsCount = 0;
4786
m_2ndVectorMode = SECOND_VECTOR_EMPTY;
4787
}
4788
else
4789
{
4790
const size_t suballoc1stCount = suballocations1st.size();
4791
const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
4792
D3D12MA_ASSERT(nullItem1stCount <= suballoc1stCount);
4793
4794
// Find more null items at the beginning of 1st vector.
4795
while (m_1stNullItemsBeginCount < suballoc1stCount &&
4796
suballocations1st[m_1stNullItemsBeginCount].type == SUBALLOCATION_TYPE_FREE)
4797
{
4798
++m_1stNullItemsBeginCount;
4799
--m_1stNullItemsMiddleCount;
4800
}
4801
4802
// Find more null items at the end of 1st vector.
4803
while (m_1stNullItemsMiddleCount > 0 &&
4804
suballocations1st.back().type == SUBALLOCATION_TYPE_FREE)
4805
{
4806
--m_1stNullItemsMiddleCount;
4807
suballocations1st.pop_back();
4808
}
4809
4810
// Find more null items at the end of 2nd vector.
4811
while (m_2ndNullItemsCount > 0 &&
4812
suballocations2nd.back().type == SUBALLOCATION_TYPE_FREE)
4813
{
4814
--m_2ndNullItemsCount;
4815
suballocations2nd.pop_back();
4816
}
4817
4818
// Find more null items at the beginning of 2nd vector.
4819
while (m_2ndNullItemsCount > 0 &&
4820
suballocations2nd[0].type == SUBALLOCATION_TYPE_FREE)
4821
{
4822
--m_2ndNullItemsCount;
4823
suballocations2nd.remove(0);
4824
}
4825
4826
if (ShouldCompact1st())
4827
{
4828
const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
4829
size_t srcIndex = m_1stNullItemsBeginCount;
4830
for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
4831
{
4832
while (suballocations1st[srcIndex].type == SUBALLOCATION_TYPE_FREE)
4833
{
4834
++srcIndex;
4835
}
4836
if (dstIndex != srcIndex)
4837
{
4838
suballocations1st[dstIndex] = suballocations1st[srcIndex];
4839
}
4840
++srcIndex;
4841
}
4842
suballocations1st.resize(nonNullItemCount);
4843
m_1stNullItemsBeginCount = 0;
4844
m_1stNullItemsMiddleCount = 0;
4845
}
4846
4847
// 2nd vector became empty.
4848
if (suballocations2nd.empty())
4849
{
4850
m_2ndVectorMode = SECOND_VECTOR_EMPTY;
4851
}
4852
4853
// 1st vector became empty.
4854
if (suballocations1st.size() - m_1stNullItemsBeginCount == 0)
4855
{
4856
suballocations1st.clear();
4857
m_1stNullItemsBeginCount = 0;
4858
4859
if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4860
{
4861
// Swap 1st with 2nd. Now 2nd is empty.
4862
m_2ndVectorMode = SECOND_VECTOR_EMPTY;
4863
m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
4864
while (m_1stNullItemsBeginCount < suballocations2nd.size() &&
4865
suballocations2nd[m_1stNullItemsBeginCount].type == SUBALLOCATION_TYPE_FREE)
4866
{
4867
++m_1stNullItemsBeginCount;
4868
--m_1stNullItemsMiddleCount;
4869
}
4870
m_2ndNullItemsCount = 0;
4871
m_1stVectorIndex ^= 1;
4872
}
4873
}
4874
}
4875
4876
D3D12MA_HEAVY_ASSERT(Validate());
4877
}
4878
4879
bool BlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
4880
UINT64 allocSize,
4881
UINT64 allocAlignment,
4882
AllocationRequest* pAllocationRequest)
4883
{
4884
const UINT64 blockSize = GetSize();
4885
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4886
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4887
4888
if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
4889
{
4890
// Try to allocate at the end of 1st vector.
4891
4892
UINT64 resultBaseOffset = 0;
4893
if (!suballocations1st.empty())
4894
{
4895
const Suballocation& lastSuballoc = suballocations1st.back();
4896
resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + GetDebugMargin();
4897
}
4898
4899
// Start from offset equal to beginning of free space.
4900
UINT64 resultOffset = resultBaseOffset;
4901
// Apply alignment.
4902
resultOffset = AlignUp(resultOffset, allocAlignment);
4903
4904
const UINT64 freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
4905
suballocations2nd.back().offset : blockSize;
4906
4907
// There is enough free space at the end after alignment.
4908
if (resultOffset + allocSize + GetDebugMargin() <= freeSpaceEnd)
4909
{
4910
// All tests passed: Success.
4911
pAllocationRequest->allocHandle = (AllocHandle)(resultOffset + 1);
4912
// pAllocationRequest->item, customData unused.
4913
pAllocationRequest->algorithmData = ALLOC_REQUEST_END_OF_1ST;
4914
return true;
4915
}
4916
}
4917
4918
// Wrap-around to end of 2nd vector. Try to allocate there, watching for the
4919
// beginning of 1st vector as the end of free space.
4920
if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4921
{
4922
D3D12MA_ASSERT(!suballocations1st.empty());
4923
4924
UINT64 resultBaseOffset = 0;
4925
if (!suballocations2nd.empty())
4926
{
4927
const Suballocation& lastSuballoc = suballocations2nd.back();
4928
resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + GetDebugMargin();
4929
}
4930
4931
// Start from offset equal to beginning of free space.
4932
UINT64 resultOffset = resultBaseOffset;
4933
4934
// Apply alignment.
4935
resultOffset = AlignUp(resultOffset, allocAlignment);
4936
4937
size_t index1st = m_1stNullItemsBeginCount;
4938
// There is enough free space at the end after alignment.
4939
if ((index1st == suballocations1st.size() && resultOffset + allocSize + GetDebugMargin() <= blockSize) ||
4940
(index1st < suballocations1st.size() && resultOffset + allocSize + GetDebugMargin() <= suballocations1st[index1st].offset))
4941
{
4942
// All tests passed: Success.
4943
pAllocationRequest->allocHandle = (AllocHandle)(resultOffset + 1);
4944
pAllocationRequest->algorithmData = ALLOC_REQUEST_END_OF_2ND;
4945
// pAllocationRequest->item, customData unused.
4946
return true;
4947
}
4948
}
4949
return false;
4950
}
4951
4952
bool BlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
4953
UINT64 allocSize,
4954
UINT64 allocAlignment,
4955
AllocationRequest* pAllocationRequest)
4956
{
4957
const UINT64 blockSize = GetSize();
4958
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4959
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4960
4961
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4962
{
4963
D3D12MA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
4964
return false;
4965
}
4966
4967
// Try to allocate before 2nd.back(), or end of block if 2nd.empty().
4968
if (allocSize > blockSize)
4969
{
4970
return false;
4971
}
4972
UINT64 resultBaseOffset = blockSize - allocSize;
4973
if (!suballocations2nd.empty())
4974
{
4975
const Suballocation& lastSuballoc = suballocations2nd.back();
4976
resultBaseOffset = lastSuballoc.offset - allocSize;
4977
if (allocSize > lastSuballoc.offset)
4978
{
4979
return false;
4980
}
4981
}
4982
4983
// Start from offset equal to end of free space.
4984
UINT64 resultOffset = resultBaseOffset;
4985
// Apply debugMargin at the end.
4986
if (GetDebugMargin() > 0)
4987
{
4988
if (resultOffset < GetDebugMargin())
4989
{
4990
return false;
4991
}
4992
resultOffset -= GetDebugMargin();
4993
}
4994
4995
// Apply alignment.
4996
resultOffset = AlignDown(resultOffset, allocAlignment);
4997
// There is enough free space.
4998
const UINT64 endOf1st = !suballocations1st.empty() ?
4999
suballocations1st.back().offset + suballocations1st.back().size : 0;
5000
5001
if (endOf1st + GetDebugMargin() <= resultOffset)
5002
{
5003
// All tests passed: Success.
5004
pAllocationRequest->allocHandle = (AllocHandle)(resultOffset + 1);
5005
// pAllocationRequest->item unused.
5006
pAllocationRequest->algorithmData = ALLOC_REQUEST_UPPER_ADDRESS;
5007
return true;
5008
}
5009
return false;
5010
}
5011
#endif // _D3D12MA_BLOCK_METADATA_LINEAR_FUNCTIONS
5012
#endif // _D3D12MA_BLOCK_METADATA_LINEAR
5013
5014
#ifndef _D3D12MA_BLOCK_METADATA_TLSF
5015
class BlockMetadata_TLSF : public BlockMetadata
5016
{
5017
public:
5018
BlockMetadata_TLSF(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual);
5019
virtual ~BlockMetadata_TLSF();
5020
5021
size_t GetAllocationCount() const override { return m_AllocCount; }
5022
size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; }
5023
UINT64 GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; }
5024
bool IsEmpty() const override { return m_NullBlock->offset == 0; }
5025
UINT64 GetAllocationOffset(AllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; };
5026
5027
void Init(UINT64 size) override;
5028
bool Validate() const override;
5029
void GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const override;
5030
5031
bool CreateAllocationRequest(
5032
UINT64 allocSize,
5033
UINT64 allocAlignment,
5034
bool upperAddress,
5035
UINT32 strategy,
5036
AllocationRequest* pAllocationRequest) override;
5037
5038
void Alloc(
5039
const AllocationRequest& request,
5040
UINT64 allocSize,
5041
void* privateData) override;
5042
5043
void Free(AllocHandle allocHandle) override;
5044
void Clear() override;
5045
5046
AllocHandle GetAllocationListBegin() const override;
5047
AllocHandle GetNextAllocation(AllocHandle prevAlloc) const override;
5048
UINT64 GetNextFreeRegionSize(AllocHandle alloc) const override;
5049
void* GetAllocationPrivateData(AllocHandle allocHandle) const override;
5050
void SetAllocationPrivateData(AllocHandle allocHandle, void* privateData) override;
5051
5052
void AddStatistics(Statistics& inoutStats) const override;
5053
void AddDetailedStatistics(DetailedStatistics& inoutStats) const override;
5054
void WriteAllocationInfoToJson(JsonWriter& json) const override;
5055
void DebugLogAllAllocations() const override;
5056
5057
private:
5058
// According to original paper it should be preferable 4 or 5:
5059
// M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems"
5060
// http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf
5061
static const UINT8 SECOND_LEVEL_INDEX = 5;
5062
static const UINT16 SMALL_BUFFER_SIZE = 256;
5063
static const UINT INITIAL_BLOCK_ALLOC_COUNT = 16;
5064
static const UINT8 MEMORY_CLASS_SHIFT = 7;
5065
static const UINT8 MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
5066
5067
class Block
5068
{
5069
public:
5070
UINT64 offset;
5071
UINT64 size;
5072
Block* prevPhysical;
5073
Block* nextPhysical;
5074
5075
void MarkFree() { prevFree = NULL; }
5076
void MarkTaken() { prevFree = this; }
5077
bool IsFree() const { return prevFree != this; }
5078
void*& PrivateData() { D3D12MA_HEAVY_ASSERT(!IsFree()); return privateData; }
5079
Block*& PrevFree() { return prevFree; }
5080
Block*& NextFree() { D3D12MA_HEAVY_ASSERT(IsFree()); return nextFree; }
5081
5082
private:
5083
Block* prevFree; // Address of the same block here indicates that block is taken
5084
union
5085
{
5086
Block* nextFree;
5087
void* privateData;
5088
};
5089
};
5090
5091
size_t m_AllocCount = 0;
5092
// Total number of free blocks besides null block
5093
size_t m_BlocksFreeCount = 0;
5094
// Total size of free blocks excluding null block
5095
UINT64 m_BlocksFreeSize = 0;
5096
UINT32 m_IsFreeBitmap = 0;
5097
UINT8 m_MemoryClasses = 0;
5098
UINT32 m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES];
5099
UINT32 m_ListsCount = 0;
5100
/*
5101
* 0: 0-3 lists for small buffers
5102
* 1+: 0-(2^SLI-1) lists for normal buffers
5103
*/
5104
Block** m_FreeList = NULL;
5105
PoolAllocator<Block> m_BlockAllocator;
5106
Block* m_NullBlock = NULL;
5107
5108
UINT8 SizeToMemoryClass(UINT64 size) const;
5109
UINT16 SizeToSecondIndex(UINT64 size, UINT8 memoryClass) const;
5110
UINT32 GetListIndex(UINT8 memoryClass, UINT16 secondIndex) const;
5111
UINT32 GetListIndex(UINT64 size) const;
5112
5113
void RemoveFreeBlock(Block* block);
5114
void InsertFreeBlock(Block* block);
5115
void MergeBlock(Block* block, Block* prev);
5116
5117
Block* FindFreeBlock(UINT64 size, UINT32& listIndex) const;
5118
bool CheckBlock(
5119
Block& block,
5120
UINT32 listIndex,
5121
UINT64 allocSize,
5122
UINT64 allocAlignment,
5123
AllocationRequest* pAllocationRequest);
5124
5125
D3D12MA_CLASS_NO_COPY(BlockMetadata_TLSF)
5126
};
5127
5128
#ifndef _D3D12MA_BLOCK_METADATA_TLSF_FUNCTIONS
5129
BlockMetadata_TLSF::BlockMetadata_TLSF(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual)
5130
: BlockMetadata(allocationCallbacks, isVirtual),
5131
m_BlockAllocator(*allocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT)
5132
{
5133
D3D12MA_ASSERT(allocationCallbacks);
5134
}
5135
5136
BlockMetadata_TLSF::~BlockMetadata_TLSF()
5137
{
5138
D3D12MA_DELETE_ARRAY(*GetAllocs(), m_FreeList, m_ListsCount);
5139
}
5140
5141
void BlockMetadata_TLSF::Init(UINT64 size)
5142
{
5143
BlockMetadata::Init(size);
5144
5145
m_NullBlock = m_BlockAllocator.Alloc();
5146
m_NullBlock->size = size;
5147
m_NullBlock->offset = 0;
5148
m_NullBlock->prevPhysical = NULL;
5149
m_NullBlock->nextPhysical = NULL;
5150
m_NullBlock->MarkFree();
5151
m_NullBlock->NextFree() = NULL;
5152
m_NullBlock->PrevFree() = NULL;
5153
UINT8 memoryClass = SizeToMemoryClass(size);
5154
UINT16 sli = SizeToSecondIndex(size, memoryClass);
5155
m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1;
5156
if (IsVirtual())
5157
m_ListsCount += 1UL << SECOND_LEVEL_INDEX;
5158
else
5159
m_ListsCount += 4;
5160
5161
m_MemoryClasses = memoryClass + 2;
5162
memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(UINT32));
5163
5164
m_FreeList = D3D12MA_NEW_ARRAY(*GetAllocs(), Block*, m_ListsCount);
5165
memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
5166
}
5167
5168
bool BlockMetadata_TLSF::Validate() const
5169
{
5170
D3D12MA_VALIDATE(GetSumFreeSize() <= GetSize());
5171
5172
UINT64 calculatedSize = m_NullBlock->size;
5173
UINT64 calculatedFreeSize = m_NullBlock->size;
5174
size_t allocCount = 0;
5175
size_t freeCount = 0;
5176
5177
// Check integrity of free lists
5178
for (UINT32 list = 0; list < m_ListsCount; ++list)
5179
{
5180
Block* block = m_FreeList[list];
5181
if (block != NULL)
5182
{
5183
D3D12MA_VALIDATE(block->IsFree());
5184
D3D12MA_VALIDATE(block->PrevFree() == NULL);
5185
while (block->NextFree())
5186
{
5187
D3D12MA_VALIDATE(block->NextFree()->IsFree());
5188
D3D12MA_VALIDATE(block->NextFree()->PrevFree() == block);
5189
block = block->NextFree();
5190
}
5191
}
5192
}
5193
5194
D3D12MA_VALIDATE(m_NullBlock->nextPhysical == NULL);
5195
if (m_NullBlock->prevPhysical)
5196
{
5197
D3D12MA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock);
5198
}
5199
5200
// Check all blocks
5201
UINT64 nextOffset = m_NullBlock->offset;
5202
for (Block* prev = m_NullBlock->prevPhysical; prev != NULL; prev = prev->prevPhysical)
5203
{
5204
D3D12MA_VALIDATE(prev->offset + prev->size == nextOffset);
5205
nextOffset = prev->offset;
5206
calculatedSize += prev->size;
5207
5208
UINT32 listIndex = GetListIndex(prev->size);
5209
if (prev->IsFree())
5210
{
5211
++freeCount;
5212
// Check if free block belongs to free list
5213
Block* freeBlock = m_FreeList[listIndex];
5214
D3D12MA_VALIDATE(freeBlock != NULL);
5215
5216
bool found = false;
5217
do
5218
{
5219
if (freeBlock == prev)
5220
found = true;
5221
5222
freeBlock = freeBlock->NextFree();
5223
} while (!found && freeBlock != NULL);
5224
5225
D3D12MA_VALIDATE(found);
5226
calculatedFreeSize += prev->size;
5227
}
5228
else
5229
{
5230
++allocCount;
5231
// Check if taken block is not on a free list
5232
Block* freeBlock = m_FreeList[listIndex];
5233
while (freeBlock)
5234
{
5235
D3D12MA_VALIDATE(freeBlock != prev);
5236
freeBlock = freeBlock->NextFree();
5237
}
5238
}
5239
5240
if (prev->prevPhysical)
5241
{
5242
D3D12MA_VALIDATE(prev->prevPhysical->nextPhysical == prev);
5243
}
5244
}
5245
5246
D3D12MA_VALIDATE(nextOffset == 0);
5247
D3D12MA_VALIDATE(calculatedSize == GetSize());
5248
D3D12MA_VALIDATE(calculatedFreeSize == GetSumFreeSize());
5249
D3D12MA_VALIDATE(allocCount == m_AllocCount);
5250
D3D12MA_VALIDATE(freeCount == m_BlocksFreeCount);
5251
5252
return true;
5253
}
5254
5255
void BlockMetadata_TLSF::GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const
5256
{
5257
Block* block = (Block*)allocHandle;
5258
D3D12MA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!");
5259
outInfo.Offset = block->offset;
5260
outInfo.Size = block->size;
5261
outInfo.pPrivateData = block->PrivateData();
5262
}
5263
5264
bool BlockMetadata_TLSF::CreateAllocationRequest(
5265
UINT64 allocSize,
5266
UINT64 allocAlignment,
5267
bool upperAddress,
5268
UINT32 strategy,
5269
AllocationRequest* pAllocationRequest)
5270
{
5271
D3D12MA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
5272
D3D12MA_ASSERT(!upperAddress && "ALLOCATION_FLAG_UPPER_ADDRESS can be used only with linear algorithm.");
5273
D3D12MA_ASSERT(pAllocationRequest != NULL);
5274
D3D12MA_HEAVY_ASSERT(Validate());
5275
5276
allocSize += GetDebugMargin();
5277
// Quick check for too small pool
5278
if (allocSize > GetSumFreeSize())
5279
return false;
5280
5281
// If no free blocks in pool then check only null block
5282
if (m_BlocksFreeCount == 0)
5283
return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest);
5284
5285
// Round up to the next block
5286
UINT64 sizeForNextList = allocSize;
5287
UINT16 smallSizeStep = SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4);
5288
if (allocSize > SMALL_BUFFER_SIZE)
5289
{
5290
sizeForNextList += (1ULL << (BitScanMSB(allocSize) - SECOND_LEVEL_INDEX));
5291
}
5292
else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep)
5293
sizeForNextList = SMALL_BUFFER_SIZE + 1;
5294
else
5295
sizeForNextList += smallSizeStep;
5296
5297
UINT32 nextListIndex = 0;
5298
UINT32 prevListIndex = 0;
5299
Block* nextListBlock = NULL;
5300
Block* prevListBlock = NULL;
5301
5302
// Check blocks according to strategies
5303
if (strategy & ALLOCATION_FLAG_STRATEGY_MIN_TIME)
5304
{
5305
// Quick check for larger block first
5306
nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
5307
if (nextListBlock != NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
5308
return true;
5309
5310
// If not fitted then null block
5311
if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest))
5312
return true;
5313
5314
// Null block failed, search larger bucket
5315
while (nextListBlock)
5316
{
5317
if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
5318
return true;
5319
nextListBlock = nextListBlock->NextFree();
5320
}
5321
5322
// Failed again, check best fit bucket
5323
prevListBlock = FindFreeBlock(allocSize, prevListIndex);
5324
while (prevListBlock)
5325
{
5326
if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, pAllocationRequest))
5327
return true;
5328
prevListBlock = prevListBlock->NextFree();
5329
}
5330
}
5331
else if (strategy & ALLOCATION_FLAG_STRATEGY_MIN_MEMORY)
5332
{
5333
// Check best fit bucket
5334
prevListBlock = FindFreeBlock(allocSize, prevListIndex);
5335
while (prevListBlock)
5336
{
5337
if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, pAllocationRequest))
5338
return true;
5339
prevListBlock = prevListBlock->NextFree();
5340
}
5341
5342
// If failed check null block
5343
if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest))
5344
return true;
5345
5346
// Check larger bucket
5347
nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
5348
while (nextListBlock)
5349
{
5350
if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
5351
return true;
5352
nextListBlock = nextListBlock->NextFree();
5353
}
5354
}
5355
else if (strategy & ALLOCATION_FLAG_STRATEGY_MIN_OFFSET)
5356
{
5357
// Perform search from the start
5358
Vector<Block*> blockList(m_BlocksFreeCount, *GetAllocs());
5359
5360
size_t i = m_BlocksFreeCount;
5361
for (Block* block = m_NullBlock->prevPhysical; block != NULL; block = block->prevPhysical)
5362
{
5363
if (block->IsFree() && block->size >= allocSize)
5364
blockList[--i] = block;
5365
}
5366
5367
for (; i < m_BlocksFreeCount; ++i)
5368
{
5369
Block& block = *blockList[i];
5370
if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, pAllocationRequest))
5371
return true;
5372
}
5373
5374
// If failed check null block
5375
if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest))
5376
return true;
5377
5378
// Whole range searched, no more memory
5379
return false;
5380
}
5381
else
5382
{
5383
// Check larger bucket
5384
nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
5385
while (nextListBlock)
5386
{
5387
if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
5388
return true;
5389
nextListBlock = nextListBlock->NextFree();
5390
}
5391
5392
// If failed check null block
5393
if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest))
5394
return true;
5395
5396
// Check best fit bucket
5397
prevListBlock = FindFreeBlock(allocSize, prevListIndex);
5398
while (prevListBlock)
5399
{
5400
if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, pAllocationRequest))
5401
return true;
5402
prevListBlock = prevListBlock->NextFree();
5403
}
5404
}
5405
5406
// Worst case, full search has to be done
5407
while (++nextListIndex < m_ListsCount)
5408
{
5409
nextListBlock = m_FreeList[nextListIndex];
5410
while (nextListBlock)
5411
{
5412
if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
5413
return true;
5414
nextListBlock = nextListBlock->NextFree();
5415
}
5416
}
5417
5418
// No more memory sadly
5419
return false;
5420
}
5421
5422
void BlockMetadata_TLSF::Alloc(
5423
const AllocationRequest& request,
5424
UINT64 allocSize,
5425
void* privateData)
5426
{
5427
// Get block and pop it from the free list
5428
Block* currentBlock = (Block*)request.allocHandle;
5429
UINT64 offset = request.algorithmData;
5430
D3D12MA_ASSERT(currentBlock != NULL);
5431
D3D12MA_ASSERT(currentBlock->offset <= offset);
5432
5433
if (currentBlock != m_NullBlock)
5434
RemoveFreeBlock(currentBlock);
5435
5436
// Append missing alignment to prev block or create new one
5437
UINT64 misssingAlignment = offset - currentBlock->offset;
5438
if (misssingAlignment)
5439
{
5440
Block* prevBlock = currentBlock->prevPhysical;
5441
D3D12MA_ASSERT(prevBlock != NULL && "There should be no missing alignment at offset 0!");
5442
5443
if (prevBlock->IsFree() && prevBlock->size != GetDebugMargin())
5444
{
5445
UINT32 oldList = GetListIndex(prevBlock->size);
5446
prevBlock->size += misssingAlignment;
5447
// Check if new size crosses list bucket
5448
if (oldList != GetListIndex(prevBlock->size))
5449
{
5450
prevBlock->size -= misssingAlignment;
5451
RemoveFreeBlock(prevBlock);
5452
prevBlock->size += misssingAlignment;
5453
InsertFreeBlock(prevBlock);
5454
}
5455
else
5456
m_BlocksFreeSize += misssingAlignment;
5457
}
5458
else
5459
{
5460
Block* newBlock = m_BlockAllocator.Alloc();
5461
currentBlock->prevPhysical = newBlock;
5462
prevBlock->nextPhysical = newBlock;
5463
newBlock->prevPhysical = prevBlock;
5464
newBlock->nextPhysical = currentBlock;
5465
newBlock->size = misssingAlignment;
5466
newBlock->offset = currentBlock->offset;
5467
newBlock->MarkTaken();
5468
5469
InsertFreeBlock(newBlock);
5470
}
5471
5472
currentBlock->size -= misssingAlignment;
5473
currentBlock->offset += misssingAlignment;
5474
}
5475
5476
UINT64 size = request.size + GetDebugMargin();
5477
if (currentBlock->size == size)
5478
{
5479
if (currentBlock == m_NullBlock)
5480
{
5481
// Setup new null block
5482
m_NullBlock = m_BlockAllocator.Alloc();
5483
m_NullBlock->size = 0;
5484
m_NullBlock->offset = currentBlock->offset + size;
5485
m_NullBlock->prevPhysical = currentBlock;
5486
m_NullBlock->nextPhysical = NULL;
5487
m_NullBlock->MarkFree();
5488
m_NullBlock->PrevFree() = NULL;
5489
m_NullBlock->NextFree() = NULL;
5490
currentBlock->nextPhysical = m_NullBlock;
5491
currentBlock->MarkTaken();
5492
}
5493
}
5494
else
5495
{
5496
D3D12MA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!");
5497
5498
// Create new free block
5499
Block* newBlock = m_BlockAllocator.Alloc();
5500
newBlock->size = currentBlock->size - size;
5501
newBlock->offset = currentBlock->offset + size;
5502
newBlock->prevPhysical = currentBlock;
5503
newBlock->nextPhysical = currentBlock->nextPhysical;
5504
currentBlock->nextPhysical = newBlock;
5505
currentBlock->size = size;
5506
5507
if (currentBlock == m_NullBlock)
5508
{
5509
m_NullBlock = newBlock;
5510
m_NullBlock->MarkFree();
5511
m_NullBlock->NextFree() = NULL;
5512
m_NullBlock->PrevFree() = NULL;
5513
currentBlock->MarkTaken();
5514
}
5515
else
5516
{
5517
newBlock->nextPhysical->prevPhysical = newBlock;
5518
newBlock->MarkTaken();
5519
InsertFreeBlock(newBlock);
5520
}
5521
}
5522
currentBlock->PrivateData() = privateData;
5523
5524
if (GetDebugMargin() > 0)
5525
{
5526
currentBlock->size -= GetDebugMargin();
5527
Block* newBlock = m_BlockAllocator.Alloc();
5528
newBlock->size = GetDebugMargin();
5529
newBlock->offset = currentBlock->offset + currentBlock->size;
5530
newBlock->prevPhysical = currentBlock;
5531
newBlock->nextPhysical = currentBlock->nextPhysical;
5532
newBlock->MarkTaken();
5533
currentBlock->nextPhysical->prevPhysical = newBlock;
5534
currentBlock->nextPhysical = newBlock;
5535
InsertFreeBlock(newBlock);
5536
}
5537
++m_AllocCount;
5538
}
5539
5540
void BlockMetadata_TLSF::Free(AllocHandle allocHandle)
5541
{
5542
Block* block = (Block*)allocHandle;
5543
Block* next = block->nextPhysical;
5544
D3D12MA_ASSERT(!block->IsFree() && "Block is already free!");
5545
5546
--m_AllocCount;
5547
if (GetDebugMargin() > 0)
5548
{
5549
RemoveFreeBlock(next);
5550
MergeBlock(next, block);
5551
block = next;
5552
next = next->nextPhysical;
5553
}
5554
5555
// Try merging
5556
Block* prev = block->prevPhysical;
5557
if (prev != NULL && prev->IsFree() && prev->size != GetDebugMargin())
5558
{
5559
RemoveFreeBlock(prev);
5560
MergeBlock(block, prev);
5561
}
5562
5563
if (!next->IsFree())
5564
InsertFreeBlock(block);
5565
else if (next == m_NullBlock)
5566
MergeBlock(m_NullBlock, block);
5567
else
5568
{
5569
RemoveFreeBlock(next);
5570
MergeBlock(next, block);
5571
InsertFreeBlock(next);
5572
}
5573
}
5574
5575
void BlockMetadata_TLSF::Clear()
5576
{
5577
m_AllocCount = 0;
5578
m_BlocksFreeCount = 0;
5579
m_BlocksFreeSize = 0;
5580
m_IsFreeBitmap = 0;
5581
m_NullBlock->offset = 0;
5582
m_NullBlock->size = GetSize();
5583
Block* block = m_NullBlock->prevPhysical;
5584
m_NullBlock->prevPhysical = NULL;
5585
while (block)
5586
{
5587
Block* prev = block->prevPhysical;
5588
m_BlockAllocator.Free(block);
5589
block = prev;
5590
}
5591
memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
5592
memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(UINT32));
5593
}
5594
5595
AllocHandle BlockMetadata_TLSF::GetAllocationListBegin() const
5596
{
5597
if (m_AllocCount == 0)
5598
return (AllocHandle)0;
5599
5600
for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical)
5601
{
5602
if (!block->IsFree())
5603
return (AllocHandle)block;
5604
}
5605
D3D12MA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!");
5606
return (AllocHandle)0;
5607
}
5608
5609
AllocHandle BlockMetadata_TLSF::GetNextAllocation(AllocHandle prevAlloc) const
5610
{
5611
Block* startBlock = (Block*)prevAlloc;
5612
D3D12MA_ASSERT(!startBlock->IsFree() && "Incorrect block!");
5613
5614
for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical)
5615
{
5616
if (!block->IsFree())
5617
return (AllocHandle)block;
5618
}
5619
return (AllocHandle)0;
5620
}
5621
5622
UINT64 BlockMetadata_TLSF::GetNextFreeRegionSize(AllocHandle alloc) const
5623
{
5624
Block* block = (Block*)alloc;
5625
D3D12MA_ASSERT(!block->IsFree() && "Incorrect block!");
5626
5627
if (block->prevPhysical)
5628
return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0;
5629
return 0;
5630
}
5631
5632
void* BlockMetadata_TLSF::GetAllocationPrivateData(AllocHandle allocHandle) const
5633
{
5634
Block* block = (Block*)allocHandle;
5635
D3D12MA_ASSERT(!block->IsFree() && "Cannot get user data for free block!");
5636
return block->PrivateData();
5637
}
5638
5639
void BlockMetadata_TLSF::SetAllocationPrivateData(AllocHandle allocHandle, void* privateData)
5640
{
5641
Block* block = (Block*)allocHandle;
5642
D3D12MA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!");
5643
block->PrivateData() = privateData;
5644
}
5645
5646
void BlockMetadata_TLSF::AddStatistics(Statistics& inoutStats) const
5647
{
5648
inoutStats.BlockCount++;
5649
inoutStats.AllocationCount += static_cast<UINT>(m_AllocCount);
5650
inoutStats.BlockBytes += GetSize();
5651
inoutStats.AllocationBytes += GetSize() - GetSumFreeSize();
5652
}
5653
5654
void BlockMetadata_TLSF::AddDetailedStatistics(DetailedStatistics& inoutStats) const
5655
{
5656
inoutStats.Stats.BlockCount++;
5657
inoutStats.Stats.BlockBytes += GetSize();
5658
5659
for (Block* block = m_NullBlock->prevPhysical; block != NULL; block = block->prevPhysical)
5660
{
5661
if (block->IsFree())
5662
AddDetailedStatisticsUnusedRange(inoutStats, block->size);
5663
else
5664
AddDetailedStatisticsAllocation(inoutStats, block->size);
5665
}
5666
5667
if (m_NullBlock->size > 0)
5668
AddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size);
5669
}
5670
5671
void BlockMetadata_TLSF::WriteAllocationInfoToJson(JsonWriter& json) const
5672
{
5673
size_t blockCount = m_AllocCount + m_BlocksFreeCount;
5674
Vector<Block*> blockList(blockCount, *GetAllocs());
5675
5676
size_t i = blockCount;
5677
if (m_NullBlock->size > 0)
5678
{
5679
++blockCount;
5680
blockList.push_back(m_NullBlock);
5681
}
5682
for (Block* block = m_NullBlock->prevPhysical; block != NULL; block = block->prevPhysical)
5683
{
5684
blockList[--i] = block;
5685
}
5686
D3D12MA_ASSERT(i == 0);
5687
5688
PrintDetailedMap_Begin(json, GetSumFreeSize(), GetAllocationCount(), m_BlocksFreeCount + static_cast<bool>(m_NullBlock->size));
5689
for (; i < blockCount; ++i)
5690
{
5691
Block* block = blockList[i];
5692
if (block->IsFree())
5693
PrintDetailedMap_UnusedRange(json, block->offset, block->size);
5694
else
5695
PrintDetailedMap_Allocation(json, block->offset, block->size, block->PrivateData());
5696
}
5697
PrintDetailedMap_End(json);
5698
}
5699
5700
void BlockMetadata_TLSF::DebugLogAllAllocations() const
5701
{
5702
for (Block* block = m_NullBlock->prevPhysical; block != NULL; block = block->prevPhysical)
5703
{
5704
if (!block->IsFree())
5705
{
5706
DebugLogAllocation(block->offset, block->size, block->PrivateData());
5707
}
5708
}
5709
}
5710
5711
UINT8 BlockMetadata_TLSF::SizeToMemoryClass(UINT64 size) const
5712
{
5713
if (size > SMALL_BUFFER_SIZE)
5714
return BitScanMSB(size) - MEMORY_CLASS_SHIFT;
5715
return 0;
5716
}
5717
5718
UINT16 BlockMetadata_TLSF::SizeToSecondIndex(UINT64 size, UINT8 memoryClass) const
5719
{
5720
if (memoryClass == 0)
5721
{
5722
if (IsVirtual())
5723
return static_cast<UINT16>((size - 1) / 8);
5724
else
5725
return static_cast<UINT16>((size - 1) / 64);
5726
}
5727
return static_cast<UINT16>((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX));
5728
}
5729
5730
UINT32 BlockMetadata_TLSF::GetListIndex(UINT8 memoryClass, UINT16 secondIndex) const
5731
{
5732
if (memoryClass == 0)
5733
return secondIndex;
5734
5735
const UINT32 index = static_cast<UINT32>(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex;
5736
if (IsVirtual())
5737
return index + (1 << SECOND_LEVEL_INDEX);
5738
else
5739
return index + 4;
5740
}
5741
5742
UINT32 BlockMetadata_TLSF::GetListIndex(UINT64 size) const
5743
{
5744
UINT8 memoryClass = SizeToMemoryClass(size);
5745
return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass));
5746
}
5747
5748
void BlockMetadata_TLSF::RemoveFreeBlock(Block* block)
5749
{
5750
D3D12MA_ASSERT(block != m_NullBlock);
5751
D3D12MA_ASSERT(block->IsFree());
5752
5753
if (block->NextFree() != NULL)
5754
block->NextFree()->PrevFree() = block->PrevFree();
5755
if (block->PrevFree() != NULL)
5756
block->PrevFree()->NextFree() = block->NextFree();
5757
else
5758
{
5759
UINT8 memClass = SizeToMemoryClass(block->size);
5760
UINT16 secondIndex = SizeToSecondIndex(block->size, memClass);
5761
UINT32 index = GetListIndex(memClass, secondIndex);
5762
m_FreeList[index] = block->NextFree();
5763
if (block->NextFree() == NULL)
5764
{
5765
m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex);
5766
if (m_InnerIsFreeBitmap[memClass] == 0)
5767
m_IsFreeBitmap &= ~(1UL << memClass);
5768
}
5769
}
5770
block->MarkTaken();
5771
block->PrivateData() = NULL;
5772
--m_BlocksFreeCount;
5773
m_BlocksFreeSize -= block->size;
5774
}
5775
5776
void BlockMetadata_TLSF::InsertFreeBlock(Block* block)
5777
{
5778
D3D12MA_ASSERT(block != m_NullBlock);
5779
D3D12MA_ASSERT(!block->IsFree() && "Cannot insert block twice!");
5780
5781
UINT8 memClass = SizeToMemoryClass(block->size);
5782
UINT16 secondIndex = SizeToSecondIndex(block->size, memClass);
5783
UINT32 index = GetListIndex(memClass, secondIndex);
5784
block->PrevFree() = NULL;
5785
block->NextFree() = m_FreeList[index];
5786
m_FreeList[index] = block;
5787
if (block->NextFree() != NULL)
5788
block->NextFree()->PrevFree() = block;
5789
else
5790
{
5791
m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex;
5792
m_IsFreeBitmap |= 1UL << memClass;
5793
}
5794
++m_BlocksFreeCount;
5795
m_BlocksFreeSize += block->size;
5796
}
5797
5798
void BlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)
5799
{
5800
D3D12MA_ASSERT(block->prevPhysical == prev && "Cannot merge seperate physical regions!");
5801
D3D12MA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!");
5802
5803
block->offset = prev->offset;
5804
block->size += prev->size;
5805
block->prevPhysical = prev->prevPhysical;
5806
if (block->prevPhysical)
5807
block->prevPhysical->nextPhysical = block;
5808
m_BlockAllocator.Free(prev);
5809
}
5810
5811
BlockMetadata_TLSF::Block* BlockMetadata_TLSF::FindFreeBlock(UINT64 size, UINT32& listIndex) const
5812
{
5813
UINT8 memoryClass = SizeToMemoryClass(size);
5814
UINT32 innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));
5815
if (!innerFreeMap)
5816
{
5817
// Check higher levels for avaiable blocks
5818
UINT32 freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));
5819
if (!freeMap)
5820
return NULL; // No more memory avaible
5821
5822
// Find lowest free region
5823
memoryClass = BitScanLSB(freeMap);
5824
innerFreeMap = m_InnerIsFreeBitmap[memoryClass];
5825
D3D12MA_ASSERT(innerFreeMap != 0);
5826
}
5827
// Find lowest free subregion
5828
listIndex = GetListIndex(memoryClass, BitScanLSB(innerFreeMap));
5829
return m_FreeList[listIndex];
5830
}
5831
5832
bool BlockMetadata_TLSF::CheckBlock(
5833
Block& block,
5834
UINT32 listIndex,
5835
UINT64 allocSize,
5836
UINT64 allocAlignment,
5837
AllocationRequest* pAllocationRequest)
5838
{
5839
D3D12MA_ASSERT(block.IsFree() && "Block is already taken!");
5840
5841
UINT64 alignedOffset = AlignUp(block.offset, allocAlignment);
5842
if (block.size < allocSize + alignedOffset - block.offset)
5843
return false;
5844
5845
// Alloc successful
5846
pAllocationRequest->allocHandle = (AllocHandle)&block;
5847
pAllocationRequest->size = allocSize - GetDebugMargin();
5848
pAllocationRequest->algorithmData = alignedOffset;
5849
5850
// Place block at the start of list if it's normal block
5851
if (listIndex != m_ListsCount && block.PrevFree())
5852
{
5853
block.PrevFree()->NextFree() = block.NextFree();
5854
if (block.NextFree())
5855
block.NextFree()->PrevFree() = block.PrevFree();
5856
block.PrevFree() = NULL;
5857
block.NextFree() = m_FreeList[listIndex];
5858
m_FreeList[listIndex] = &block;
5859
if (block.NextFree())
5860
block.NextFree()->PrevFree() = &block;
5861
}
5862
5863
return true;
5864
}
5865
#endif // _D3D12MA_BLOCK_METADATA_TLSF_FUNCTIONS
5866
#endif // _D3D12MA_BLOCK_METADATA_TLSF
5867
5868
#ifndef _D3D12MA_MEMORY_BLOCK
5869
/*
5870
Represents a single block of device memory (heap).
5871
Base class for inheritance.
5872
Thread-safety: This class must be externally synchronized.
5873
*/
5874
class MemoryBlock
5875
{
5876
public:
5877
// Creates the ID3D12Heap.
5878
MemoryBlock(
5879
AllocatorPimpl* allocator,
5880
const D3D12_HEAP_PROPERTIES& heapProps,
5881
D3D12_HEAP_FLAGS heapFlags,
5882
UINT64 size,
5883
UINT id);
5884
virtual ~MemoryBlock();
5885
5886
const D3D12_HEAP_PROPERTIES& GetHeapProperties() const { return m_HeapProps; }
5887
D3D12_HEAP_FLAGS GetHeapFlags() const { return m_HeapFlags; }
5888
UINT64 GetSize() const { return m_Size; }
5889
UINT GetId() const { return m_Id; }
5890
ID3D12Heap* GetHeap() const { return m_Heap; }
5891
5892
protected:
5893
AllocatorPimpl* const m_Allocator;
5894
const D3D12_HEAP_PROPERTIES m_HeapProps;
5895
const D3D12_HEAP_FLAGS m_HeapFlags;
5896
const UINT64 m_Size;
5897
const UINT m_Id;
5898
5899
HRESULT Init(ID3D12ProtectedResourceSession* pProtectedSession, bool denyMsaaTextures);
5900
5901
private:
5902
ID3D12Heap* m_Heap = NULL;
5903
5904
D3D12MA_CLASS_NO_COPY(MemoryBlock)
5905
};
5906
#endif // _D3D12MA_MEMORY_BLOCK
5907
5908
#ifndef _D3D12MA_NORMAL_BLOCK
5909
/*
5910
Represents a single block of device memory (heap) with all the data about its
5911
regions (aka suballocations, Allocation), assigned and free.
5912
Thread-safety: This class must be externally synchronized.
5913
*/
5914
class NormalBlock : public MemoryBlock
5915
{
5916
public:
5917
BlockMetadata* m_pMetadata;
5918
5919
NormalBlock(
5920
AllocatorPimpl* allocator,
5921
BlockVector* blockVector,
5922
const D3D12_HEAP_PROPERTIES& heapProps,
5923
D3D12_HEAP_FLAGS heapFlags,
5924
UINT64 size,
5925
UINT id);
5926
virtual ~NormalBlock();
5927
5928
BlockVector* GetBlockVector() const { return m_BlockVector; }
5929
5930
// 'algorithm' should be one of the *_ALGORITHM_* flags in enums POOL_FLAGS or VIRTUAL_BLOCK_FLAGS
5931
HRESULT Init(UINT32 algorithm, ID3D12ProtectedResourceSession* pProtectedSession, bool denyMsaaTextures);
5932
5933
// Validates all data structures inside this object. If not valid, returns false.
5934
bool Validate() const;
5935
5936
private:
5937
BlockVector* m_BlockVector;
5938
5939
D3D12MA_CLASS_NO_COPY(NormalBlock)
5940
};
5941
#endif // _D3D12MA_NORMAL_BLOCK
5942
5943
#ifndef _D3D12MA_COMMITTED_ALLOCATION_LIST_ITEM_TRAITS
5944
struct CommittedAllocationListItemTraits
5945
{
5946
using ItemType = Allocation;
5947
5948
static ItemType* GetPrev(const ItemType* item)
5949
{
5950
D3D12MA_ASSERT(item->m_PackedData.GetType() == Allocation::TYPE_COMMITTED || item->m_PackedData.GetType() == Allocation::TYPE_HEAP);
5951
return item->m_Committed.prev;
5952
}
5953
static ItemType* GetNext(const ItemType* item)
5954
{
5955
D3D12MA_ASSERT(item->m_PackedData.GetType() == Allocation::TYPE_COMMITTED || item->m_PackedData.GetType() == Allocation::TYPE_HEAP);
5956
return item->m_Committed.next;
5957
}
5958
static ItemType*& AccessPrev(ItemType* item)
5959
{
5960
D3D12MA_ASSERT(item->m_PackedData.GetType() == Allocation::TYPE_COMMITTED || item->m_PackedData.GetType() == Allocation::TYPE_HEAP);
5961
return item->m_Committed.prev;
5962
}
5963
static ItemType*& AccessNext(ItemType* item)
5964
{
5965
D3D12MA_ASSERT(item->m_PackedData.GetType() == Allocation::TYPE_COMMITTED || item->m_PackedData.GetType() == Allocation::TYPE_HEAP);
5966
return item->m_Committed.next;
5967
}
5968
};
5969
#endif // _D3D12MA_COMMITTED_ALLOCATION_LIST_ITEM_TRAITS
5970
5971
#ifndef _D3D12MA_COMMITTED_ALLOCATION_LIST
5972
/*
5973
Stores linked list of Allocation objects that are of TYPE_COMMITTED or TYPE_HEAP.
5974
Thread-safe, synchronized internally.
5975
*/
5976
class CommittedAllocationList
5977
{
5978
public:
5979
CommittedAllocationList() = default;
5980
void Init(bool useMutex, D3D12_HEAP_TYPE heapType, PoolPimpl* pool);
5981
~CommittedAllocationList();
5982
5983
D3D12_HEAP_TYPE GetHeapType() const { return m_HeapType; }
5984
PoolPimpl* GetPool() const { return m_Pool; }
5985
UINT GetMemorySegmentGroup(AllocatorPimpl* allocator) const;
5986
5987
void AddStatistics(Statistics& inoutStats);
5988
void AddDetailedStatistics(DetailedStatistics& inoutStats);
5989
// Writes JSON array with the list of allocations.
5990
void BuildStatsString(JsonWriter& json);
5991
5992
void Register(Allocation* alloc);
5993
void Unregister(Allocation* alloc);
5994
5995
private:
5996
using CommittedAllocationLinkedList = IntrusiveLinkedList<CommittedAllocationListItemTraits>;
5997
5998
bool m_UseMutex = true;
5999
D3D12_HEAP_TYPE m_HeapType = D3D12_HEAP_TYPE_CUSTOM;
6000
PoolPimpl* m_Pool = NULL;
6001
6002
D3D12MA_RW_MUTEX m_Mutex;
6003
CommittedAllocationLinkedList m_AllocationList;
6004
};
6005
#endif // _D3D12MA_COMMITTED_ALLOCATION_LIST
6006
6007
#ifndef _D3D12M_COMMITTED_ALLOCATION_PARAMETERS
6008
struct CommittedAllocationParameters
6009
{
6010
CommittedAllocationList* m_List = NULL;
6011
D3D12_HEAP_PROPERTIES m_HeapProperties = {};
6012
D3D12_HEAP_FLAGS m_HeapFlags = D3D12_HEAP_FLAG_NONE;
6013
ID3D12ProtectedResourceSession* m_ProtectedSession = NULL;
6014
bool m_CanAlias = false;
6015
D3D12_RESIDENCY_PRIORITY m_ResidencyPriority = D3D12_RESIDENCY_PRIORITY_NONE;
6016
6017
bool IsValid() const { return m_List != NULL; }
6018
};
6019
#endif // _D3D12M_COMMITTED_ALLOCATION_PARAMETERS
6020
6021
// Simple variant data structure to hold all possible variations of ID3D12Device*::CreateCommittedResource* and ID3D12Device*::CreatePlacedResource* arguments
6022
struct CREATE_RESOURCE_PARAMS
6023
{
6024
CREATE_RESOURCE_PARAMS() = delete;
6025
CREATE_RESOURCE_PARAMS(
6026
const D3D12_RESOURCE_DESC* pResourceDesc,
6027
D3D12_RESOURCE_STATES InitialResourceState,
6028
const D3D12_CLEAR_VALUE* pOptimizedClearValue)
6029
: Variant(VARIANT_WITH_STATE)
6030
, pResourceDesc(pResourceDesc)
6031
, InitialResourceState(InitialResourceState)
6032
, pOptimizedClearValue(pOptimizedClearValue)
6033
{
6034
}
6035
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6036
CREATE_RESOURCE_PARAMS(
6037
const D3D12_RESOURCE_DESC1* pResourceDesc,
6038
D3D12_RESOURCE_STATES InitialResourceState,
6039
const D3D12_CLEAR_VALUE* pOptimizedClearValue)
6040
: Variant(VARIANT_WITH_STATE_AND_DESC1)
6041
, pResourceDesc1(pResourceDesc)
6042
, InitialResourceState(InitialResourceState)
6043
, pOptimizedClearValue(pOptimizedClearValue)
6044
{
6045
}
6046
#endif
6047
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6048
CREATE_RESOURCE_PARAMS(
6049
const D3D12_RESOURCE_DESC1* pResourceDesc,
6050
D3D12_BARRIER_LAYOUT InitialLayout,
6051
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
6052
UINT32 NumCastableFormats,
6053
DXGI_FORMAT* pCastableFormats)
6054
: Variant(VARIANT_WITH_LAYOUT)
6055
, pResourceDesc1(pResourceDesc)
6056
, InitialLayout(InitialLayout)
6057
, pOptimizedClearValue(pOptimizedClearValue)
6058
, NumCastableFormats(NumCastableFormats)
6059
, pCastableFormats(pCastableFormats)
6060
{
6061
}
6062
#endif
6063
6064
enum VARIANT
6065
{
6066
VARIANT_INVALID = 0,
6067
VARIANT_WITH_STATE,
6068
VARIANT_WITH_STATE_AND_DESC1,
6069
VARIANT_WITH_LAYOUT
6070
};
6071
6072
VARIANT Variant = VARIANT_INVALID;
6073
6074
const D3D12_RESOURCE_DESC* GetResourceDesc() const
6075
{
6076
D3D12MA_ASSERT(Variant == VARIANT_WITH_STATE);
6077
return pResourceDesc;
6078
}
6079
const D3D12_RESOURCE_DESC*& AccessResourceDesc()
6080
{
6081
D3D12MA_ASSERT(Variant == VARIANT_WITH_STATE);
6082
return pResourceDesc;
6083
}
6084
const D3D12_RESOURCE_DESC* GetBaseResourceDesc() const
6085
{
6086
// D3D12_RESOURCE_DESC1 can be cast to D3D12_RESOURCE_DESC by discarding the new members at the end.
6087
return pResourceDesc;
6088
}
6089
D3D12_RESOURCE_STATES GetInitialResourceState() const
6090
{
6091
D3D12MA_ASSERT(Variant < VARIANT_WITH_LAYOUT);
6092
return InitialResourceState;
6093
}
6094
const D3D12_CLEAR_VALUE* GetOptimizedClearValue() const
6095
{
6096
return pOptimizedClearValue;
6097
}
6098
6099
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6100
const D3D12_RESOURCE_DESC1* GetResourceDesc1() const
6101
{
6102
D3D12MA_ASSERT(Variant >= VARIANT_WITH_STATE_AND_DESC1);
6103
return pResourceDesc1;
6104
}
6105
const D3D12_RESOURCE_DESC1*& AccessResourceDesc1()
6106
{
6107
D3D12MA_ASSERT(Variant >= VARIANT_WITH_STATE_AND_DESC1);
6108
return pResourceDesc1;
6109
}
6110
#endif
6111
6112
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6113
D3D12_BARRIER_LAYOUT GetInitialLayout() const
6114
{
6115
D3D12MA_ASSERT(Variant >= VARIANT_WITH_LAYOUT);
6116
return InitialLayout;
6117
}
6118
UINT32 GetNumCastableFormats() const
6119
{
6120
D3D12MA_ASSERT(Variant >= VARIANT_WITH_LAYOUT);
6121
return NumCastableFormats;
6122
}
6123
DXGI_FORMAT* GetCastableFormats() const
6124
{
6125
D3D12MA_ASSERT(Variant >= VARIANT_WITH_LAYOUT);
6126
return pCastableFormats;
6127
}
6128
#endif
6129
6130
private:
6131
union
6132
{
6133
const D3D12_RESOURCE_DESC* pResourceDesc;
6134
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6135
const D3D12_RESOURCE_DESC1* pResourceDesc1;
6136
#endif
6137
};
6138
union
6139
{
6140
D3D12_RESOURCE_STATES InitialResourceState;
6141
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6142
D3D12_BARRIER_LAYOUT InitialLayout;
6143
#endif
6144
};
6145
const D3D12_CLEAR_VALUE* pOptimizedClearValue;
6146
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6147
UINT32 NumCastableFormats;
6148
DXGI_FORMAT* pCastableFormats;
6149
#endif
6150
};
6151
6152
#ifndef _D3D12MA_BLOCK_VECTOR
6153
/*
6154
Sequence of NormalBlock. Represents memory blocks allocated for a specific
6155
heap type and possibly resource type (if only Tier 1 is supported).
6156
6157
Synchronized internally with a mutex.
6158
*/
6159
class BlockVector
6160
{
6161
friend class DefragmentationContextPimpl;
6162
D3D12MA_CLASS_NO_COPY(BlockVector)
6163
public:
6164
BlockVector(
6165
AllocatorPimpl* hAllocator,
6166
const D3D12_HEAP_PROPERTIES& heapProps,
6167
D3D12_HEAP_FLAGS heapFlags,
6168
UINT64 preferredBlockSize,
6169
size_t minBlockCount,
6170
size_t maxBlockCount,
6171
bool explicitBlockSize,
6172
UINT64 minAllocationAlignment,
6173
UINT32 algorithm,
6174
bool denyMsaaTextures,
6175
ID3D12ProtectedResourceSession* pProtectedSession,
6176
D3D12_RESIDENCY_PRIORITY residencyPriority);
6177
~BlockVector();
6178
D3D12_RESIDENCY_PRIORITY GetResidencyPriority() const { return m_ResidencyPriority; }
6179
6180
const D3D12_HEAP_PROPERTIES& GetHeapProperties() const { return m_HeapProps; }
6181
D3D12_HEAP_FLAGS GetHeapFlags() const { return m_HeapFlags; }
6182
UINT64 GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6183
UINT32 GetAlgorithm() const { return m_Algorithm; }
6184
bool DeniesMsaaTextures() const { return m_DenyMsaaTextures; }
6185
// To be used only while the m_Mutex is locked. Used during defragmentation.
6186
size_t GetBlockCount() const { return m_Blocks.size(); }
6187
// To be used only while the m_Mutex is locked. Used during defragmentation.
6188
NormalBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6189
D3D12MA_RW_MUTEX& GetMutex() { return m_Mutex; }
6190
6191
HRESULT CreateMinBlocks();
6192
bool IsEmpty();
6193
6194
HRESULT Allocate(
6195
UINT64 size,
6196
UINT64 alignment,
6197
const ALLOCATION_DESC& allocDesc,
6198
size_t allocationCount,
6199
Allocation** pAllocations);
6200
6201
void Free(Allocation* hAllocation);
6202
6203
HRESULT CreateResource(
6204
UINT64 size,
6205
UINT64 alignment,
6206
const ALLOCATION_DESC& allocDesc,
6207
const CREATE_RESOURCE_PARAMS& createParams,
6208
Allocation** ppAllocation,
6209
REFIID riidResource,
6210
void** ppvResource);
6211
6212
void AddStatistics(Statistics& inoutStats);
6213
void AddDetailedStatistics(DetailedStatistics& inoutStats);
6214
6215
void WriteBlockInfoToJson(JsonWriter& json);
6216
6217
private:
6218
AllocatorPimpl* const m_hAllocator;
6219
const D3D12_HEAP_PROPERTIES m_HeapProps;
6220
const D3D12_HEAP_FLAGS m_HeapFlags;
6221
const UINT64 m_PreferredBlockSize;
6222
const size_t m_MinBlockCount;
6223
const size_t m_MaxBlockCount;
6224
const bool m_ExplicitBlockSize;
6225
const UINT64 m_MinAllocationAlignment;
6226
const UINT32 m_Algorithm;
6227
const bool m_DenyMsaaTextures;
6228
ID3D12ProtectedResourceSession* const m_ProtectedSession;
6229
const D3D12_RESIDENCY_PRIORITY m_ResidencyPriority;
6230
/* There can be at most one allocation that is completely empty - a
6231
hysteresis to avoid pessimistic case of alternating creation and destruction
6232
of a ID3D12Heap. */
6233
bool m_HasEmptyBlock;
6234
D3D12MA_RW_MUTEX m_Mutex;
6235
// Incrementally sorted by sumFreeSize, ascending.
6236
Vector<NormalBlock*> m_Blocks;
6237
UINT m_NextBlockId;
6238
bool m_IncrementalSort = true;
6239
6240
// Disable incremental sorting when freeing allocations
6241
void SetIncrementalSort(bool val) { m_IncrementalSort = val; }
6242
6243
UINT64 CalcSumBlockSize() const;
6244
UINT64 CalcMaxBlockSize() const;
6245
6246
// Finds and removes given block from vector.
6247
void Remove(NormalBlock* pBlock);
6248
6249
// Performs single step in sorting m_Blocks. They may not be fully sorted
6250
// after this call.
6251
void IncrementallySortBlocks();
6252
void SortByFreeSize();
6253
6254
HRESULT AllocatePage(
6255
UINT64 size,
6256
UINT64 alignment,
6257
const ALLOCATION_DESC& allocDesc,
6258
Allocation** pAllocation);
6259
6260
HRESULT AllocateFromBlock(
6261
NormalBlock* pBlock,
6262
UINT64 size,
6263
UINT64 alignment,
6264
ALLOCATION_FLAGS allocFlags,
6265
void* pPrivateData,
6266
UINT32 strategy,
6267
Allocation** pAllocation);
6268
6269
HRESULT CommitAllocationRequest(
6270
AllocationRequest& allocRequest,
6271
NormalBlock* pBlock,
6272
UINT64 size,
6273
UINT64 alignment,
6274
void* pPrivateData,
6275
Allocation** pAllocation);
6276
6277
HRESULT CreateBlock(
6278
UINT64 blockSize,
6279
size_t* pNewBlockIndex);
6280
};
6281
#endif // _D3D12MA_BLOCK_VECTOR
6282
6283
#ifndef _D3D12MA_CURRENT_BUDGET_DATA
6284
class CurrentBudgetData
6285
{
6286
public:
6287
bool ShouldUpdateBudget() const { return m_OperationsSinceBudgetFetch >= 30; }
6288
6289
void GetStatistics(Statistics& outStats, UINT group) const;
6290
void GetBudget(bool useMutex,
6291
UINT64* outLocalUsage, UINT64* outLocalBudget,
6292
UINT64* outNonLocalUsage, UINT64* outNonLocalBudget);
6293
6294
#if D3D12MA_DXGI_1_4
6295
HRESULT UpdateBudget(IDXGIAdapter3* adapter3, bool useMutex);
6296
#endif
6297
6298
void AddAllocation(UINT group, UINT64 allocationBytes);
6299
void RemoveAllocation(UINT group, UINT64 allocationBytes);
6300
6301
void AddBlock(UINT group, UINT64 blockBytes);
6302
void RemoveBlock(UINT group, UINT64 blockBytes);
6303
6304
private:
6305
D3D12MA_ATOMIC_UINT32 m_BlockCount[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6306
D3D12MA_ATOMIC_UINT32 m_AllocationCount[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6307
D3D12MA_ATOMIC_UINT64 m_BlockBytes[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6308
D3D12MA_ATOMIC_UINT64 m_AllocationBytes[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6309
6310
D3D12MA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch = {0};
6311
D3D12MA_RW_MUTEX m_BudgetMutex;
6312
UINT64 m_D3D12Usage[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6313
UINT64 m_D3D12Budget[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6314
UINT64 m_BlockBytesAtD3D12Fetch[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6315
};
6316
6317
#ifndef _D3D12MA_CURRENT_BUDGET_DATA_FUNCTIONS
6318
void CurrentBudgetData::GetStatistics(Statistics& outStats, UINT group) const
6319
{
6320
outStats.BlockCount = m_BlockCount[group];
6321
outStats.AllocationCount = m_AllocationCount[group];
6322
outStats.BlockBytes = m_BlockBytes[group];
6323
outStats.AllocationBytes = m_AllocationBytes[group];
6324
}
6325
6326
void CurrentBudgetData::GetBudget(bool useMutex,
6327
UINT64* outLocalUsage, UINT64* outLocalBudget,
6328
UINT64* outNonLocalUsage, UINT64* outNonLocalBudget)
6329
{
6330
MutexLockRead lockRead(m_BudgetMutex, useMutex);
6331
6332
if (outLocalUsage)
6333
{
6334
const UINT64 D3D12Usage = m_D3D12Usage[DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY];
6335
const UINT64 blockBytes = m_BlockBytes[DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY];
6336
const UINT64 blockBytesAtD3D12Fetch = m_BlockBytesAtD3D12Fetch[DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY];
6337
*outLocalUsage = D3D12Usage + blockBytes > blockBytesAtD3D12Fetch ?
6338
D3D12Usage + blockBytes - blockBytesAtD3D12Fetch : 0;
6339
}
6340
if (outLocalBudget)
6341
*outLocalBudget = m_D3D12Budget[DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY];
6342
6343
if (outNonLocalUsage)
6344
{
6345
const UINT64 D3D12Usage = m_D3D12Usage[DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY];
6346
const UINT64 blockBytes = m_BlockBytes[DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY];
6347
const UINT64 blockBytesAtD3D12Fetch = m_BlockBytesAtD3D12Fetch[DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY];
6348
*outNonLocalUsage = D3D12Usage + blockBytes > blockBytesAtD3D12Fetch ?
6349
D3D12Usage + blockBytes - blockBytesAtD3D12Fetch : 0;
6350
}
6351
if (outNonLocalBudget)
6352
*outNonLocalBudget = m_D3D12Budget[DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY];
6353
}
6354
6355
#if D3D12MA_DXGI_1_4
6356
HRESULT CurrentBudgetData::UpdateBudget(IDXGIAdapter3* adapter3, bool useMutex)
6357
{
6358
D3D12MA_ASSERT(adapter3);
6359
6360
DXGI_QUERY_VIDEO_MEMORY_INFO infoLocal = {};
6361
DXGI_QUERY_VIDEO_MEMORY_INFO infoNonLocal = {};
6362
const HRESULT hrLocal = adapter3->QueryVideoMemoryInfo(0, DXGI_MEMORY_SEGMENT_GROUP_LOCAL, &infoLocal);
6363
const HRESULT hrNonLocal = adapter3->QueryVideoMemoryInfo(0, DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL, &infoNonLocal);
6364
6365
if (SUCCEEDED(hrLocal) || SUCCEEDED(hrNonLocal))
6366
{
6367
MutexLockWrite lockWrite(m_BudgetMutex, useMutex);
6368
6369
if (SUCCEEDED(hrLocal))
6370
{
6371
m_D3D12Usage[0] = infoLocal.CurrentUsage;
6372
m_D3D12Budget[0] = infoLocal.Budget;
6373
}
6374
if (SUCCEEDED(hrNonLocal))
6375
{
6376
m_D3D12Usage[1] = infoNonLocal.CurrentUsage;
6377
m_D3D12Budget[1] = infoNonLocal.Budget;
6378
}
6379
6380
m_BlockBytesAtD3D12Fetch[0] = m_BlockBytes[0];
6381
m_BlockBytesAtD3D12Fetch[1] = m_BlockBytes[1];
6382
m_OperationsSinceBudgetFetch = 0;
6383
}
6384
6385
return FAILED(hrLocal) ? hrLocal : hrNonLocal;
6386
}
6387
#endif // #if D3D12MA_DXGI_1_4
6388
6389
void CurrentBudgetData::AddAllocation(UINT group, UINT64 allocationBytes)
6390
{
6391
++m_AllocationCount[group];
6392
m_AllocationBytes[group] += allocationBytes;
6393
++m_OperationsSinceBudgetFetch;
6394
}
6395
6396
void CurrentBudgetData::RemoveAllocation(UINT group, UINT64 allocationBytes)
6397
{
6398
D3D12MA_ASSERT(m_AllocationBytes[group] >= allocationBytes);
6399
D3D12MA_ASSERT(m_AllocationCount[group] > 0);
6400
m_AllocationBytes[group] -= allocationBytes;
6401
--m_AllocationCount[group];
6402
++m_OperationsSinceBudgetFetch;
6403
}
6404
6405
void CurrentBudgetData::AddBlock(UINT group, UINT64 blockBytes)
6406
{
6407
++m_BlockCount[group];
6408
m_BlockBytes[group] += blockBytes;
6409
++m_OperationsSinceBudgetFetch;
6410
}
6411
6412
void CurrentBudgetData::RemoveBlock(UINT group, UINT64 blockBytes)
6413
{
6414
D3D12MA_ASSERT(m_BlockBytes[group] >= blockBytes);
6415
D3D12MA_ASSERT(m_BlockCount[group] > 0);
6416
m_BlockBytes[group] -= blockBytes;
6417
--m_BlockCount[group];
6418
++m_OperationsSinceBudgetFetch;
6419
}
6420
#endif // _D3D12MA_CURRENT_BUDGET_DATA_FUNCTIONS
6421
#endif // _D3D12MA_CURRENT_BUDGET_DATA
6422
6423
#ifndef _D3D12MA_DEFRAGMENTATION_CONTEXT_PIMPL
6424
class DefragmentationContextPimpl
6425
{
6426
D3D12MA_CLASS_NO_COPY(DefragmentationContextPimpl)
6427
public:
6428
DefragmentationContextPimpl(
6429
AllocatorPimpl* hAllocator,
6430
const DEFRAGMENTATION_DESC& desc,
6431
BlockVector* poolVector);
6432
~DefragmentationContextPimpl();
6433
6434
void GetStats(DEFRAGMENTATION_STATS& outStats) { outStats = m_GlobalStats; }
6435
const ALLOCATION_CALLBACKS& GetAllocs() const { return m_Moves.GetAllocs(); }
6436
6437
HRESULT DefragmentPassBegin(DEFRAGMENTATION_PASS_MOVE_INFO& moveInfo);
6438
HRESULT DefragmentPassEnd(DEFRAGMENTATION_PASS_MOVE_INFO& moveInfo);
6439
6440
private:
6441
// Max number of allocations to ignore due to size constraints before ending single pass
6442
static const UINT8 MAX_ALLOCS_TO_IGNORE = 16;
6443
enum class CounterStatus { Pass, Ignore, End };
6444
6445
struct FragmentedBlock
6446
{
6447
UINT32 data;
6448
NormalBlock* block;
6449
};
6450
struct StateBalanced
6451
{
6452
UINT64 avgFreeSize = 0;
6453
UINT64 avgAllocSize = UINT64_MAX;
6454
};
6455
struct MoveAllocationData
6456
{
6457
UINT64 size;
6458
UINT64 alignment;
6459
ALLOCATION_FLAGS flags;
6460
DEFRAGMENTATION_MOVE move = {};
6461
};
6462
6463
const UINT64 m_MaxPassBytes;
6464
const UINT32 m_MaxPassAllocations;
6465
6466
Vector<DEFRAGMENTATION_MOVE> m_Moves;
6467
6468
UINT8 m_IgnoredAllocs = 0;
6469
UINT32 m_Algorithm;
6470
UINT32 m_BlockVectorCount;
6471
BlockVector* m_PoolBlockVector;
6472
BlockVector** m_pBlockVectors;
6473
size_t m_ImmovableBlockCount = 0;
6474
DEFRAGMENTATION_STATS m_GlobalStats = { 0 };
6475
DEFRAGMENTATION_STATS m_PassStats = { 0 };
6476
void* m_AlgorithmState = NULL;
6477
6478
static MoveAllocationData GetMoveData(AllocHandle handle, BlockMetadata* metadata);
6479
CounterStatus CheckCounters(UINT64 bytes);
6480
bool IncrementCounters(UINT64 bytes);
6481
bool ReallocWithinBlock(BlockVector& vector, NormalBlock* block);
6482
bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, BlockVector& vector);
6483
6484
bool ComputeDefragmentation(BlockVector& vector, size_t index);
6485
bool ComputeDefragmentation_Fast(BlockVector& vector);
6486
bool ComputeDefragmentation_Balanced(BlockVector& vector, size_t index, bool update);
6487
bool ComputeDefragmentation_Full(BlockVector& vector);
6488
6489
void UpdateVectorStatistics(BlockVector& vector, StateBalanced& state);
6490
};
6491
#endif // _D3D12MA_DEFRAGMENTATION_CONTEXT_PIMPL
6492
6493
#ifndef _D3D12MA_POOL_PIMPL
6494
class PoolPimpl
6495
{
6496
friend class Allocator;
6497
friend struct PoolListItemTraits;
6498
public:
6499
PoolPimpl(AllocatorPimpl* allocator, const POOL_DESC& desc);
6500
~PoolPimpl();
6501
6502
AllocatorPimpl* GetAllocator() const { return m_Allocator; }
6503
const POOL_DESC& GetDesc() const { return m_Desc; }
6504
bool SupportsCommittedAllocations() const { return m_Desc.BlockSize == 0; }
6505
LPCWSTR GetName() const { return m_Name; }
6506
6507
BlockVector* GetBlockVector() { return m_BlockVector; }
6508
CommittedAllocationList* GetCommittedAllocationList() { return SupportsCommittedAllocations() ? &m_CommittedAllocations : NULL; }
6509
6510
HRESULT Init();
6511
void GetStatistics(Statistics& outStats);
6512
void CalculateStatistics(DetailedStatistics& outStats);
6513
void AddDetailedStatistics(DetailedStatistics& inoutStats);
6514
void SetName(LPCWSTR Name);
6515
6516
private:
6517
AllocatorPimpl* m_Allocator; // Externally owned object.
6518
POOL_DESC m_Desc;
6519
BlockVector* m_BlockVector; // Owned object.
6520
CommittedAllocationList m_CommittedAllocations;
6521
wchar_t* m_Name;
6522
PoolPimpl* m_PrevPool = NULL;
6523
PoolPimpl* m_NextPool = NULL;
6524
6525
void FreeName();
6526
};
6527
6528
struct PoolListItemTraits
6529
{
6530
using ItemType = PoolPimpl;
6531
static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
6532
static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
6533
static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
6534
static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
6535
};
6536
#endif // _D3D12MA_POOL_PIMPL
6537
6538
6539
#ifndef _D3D12MA_ALLOCATOR_PIMPL
6540
class AllocatorPimpl
6541
{
6542
friend class Allocator;
6543
friend class Pool;
6544
public:
6545
std::atomic_uint32_t m_RefCount = {1};
6546
CurrentBudgetData m_Budget;
6547
6548
AllocatorPimpl(const ALLOCATION_CALLBACKS& allocationCallbacks, const ALLOCATOR_DESC& desc);
6549
~AllocatorPimpl();
6550
6551
ID3D12Device* GetDevice() const { return m_Device; }
6552
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
6553
ID3D12Device1* GetDevice1() const { return m_Device1; }
6554
#endif
6555
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
6556
ID3D12Device4* GetDevice4() const { return m_Device4; }
6557
#endif
6558
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6559
ID3D12Device8* GetDevice8() const { return m_Device8; }
6560
#endif
6561
// Shortcut for "Allocation Callbacks", because this function is called so often.
6562
const ALLOCATION_CALLBACKS& GetAllocs() const { return m_AllocationCallbacks; }
6563
const D3D12_FEATURE_DATA_D3D12_OPTIONS& GetD3D12Options() const { return m_D3D12Options; }
6564
BOOL IsUMA() const { return m_D3D12Architecture.UMA; }
6565
BOOL IsCacheCoherentUMA() const { return m_D3D12Architecture.CacheCoherentUMA; }
6566
bool SupportsResourceHeapTier2() const { return m_D3D12Options.ResourceHeapTier >= D3D12_RESOURCE_HEAP_TIER_2; }
6567
bool UseMutex() const { return m_UseMutex; }
6568
AllocationObjectAllocator& GetAllocationObjectAllocator() { return m_AllocationObjectAllocator; }
6569
UINT GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6570
/*
6571
If SupportsResourceHeapTier2():
6572
0: D3D12_HEAP_TYPE_DEFAULT
6573
1: D3D12_HEAP_TYPE_UPLOAD
6574
2: D3D12_HEAP_TYPE_READBACK
6575
else:
6576
0: D3D12_HEAP_TYPE_DEFAULT + buffer
6577
1: D3D12_HEAP_TYPE_DEFAULT + texture
6578
2: D3D12_HEAP_TYPE_DEFAULT + texture RT or DS
6579
3: D3D12_HEAP_TYPE_UPLOAD + buffer
6580
4: D3D12_HEAP_TYPE_UPLOAD + texture
6581
5: D3D12_HEAP_TYPE_UPLOAD + texture RT or DS
6582
6: D3D12_HEAP_TYPE_READBACK + buffer
6583
7: D3D12_HEAP_TYPE_READBACK + texture
6584
8: D3D12_HEAP_TYPE_READBACK + texture RT or DS
6585
*/
6586
UINT GetDefaultPoolCount() const { return SupportsResourceHeapTier2() ? 3 : 9; }
6587
BlockVector** GetDefaultPools() { return m_BlockVectors; }
6588
6589
HRESULT Init(const ALLOCATOR_DESC& desc);
6590
bool HeapFlagsFulfillResourceHeapTier(D3D12_HEAP_FLAGS flags) const;
6591
UINT StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE heapType) const;
6592
UINT HeapPropertiesToMemorySegmentGroup(const D3D12_HEAP_PROPERTIES& heapProps) const;
6593
UINT64 GetMemoryCapacity(UINT memorySegmentGroup) const;
6594
6595
HRESULT CreatePlacedResourceWrap(
6596
ID3D12Heap *pHeap,
6597
UINT64 HeapOffset,
6598
const CREATE_RESOURCE_PARAMS& createParams,
6599
REFIID riidResource,
6600
void** ppvResource);
6601
6602
HRESULT CreateResource(
6603
const ALLOCATION_DESC* pAllocDesc,
6604
const CREATE_RESOURCE_PARAMS& createParams,
6605
Allocation** ppAllocation,
6606
REFIID riidResource,
6607
void** ppvResource);
6608
6609
HRESULT CreateAliasingResource(
6610
Allocation* pAllocation,
6611
UINT64 AllocationLocalOffset,
6612
const CREATE_RESOURCE_PARAMS& createParams,
6613
REFIID riidResource,
6614
void** ppvResource);
6615
6616
HRESULT AllocateMemory(
6617
const ALLOCATION_DESC* pAllocDesc,
6618
const D3D12_RESOURCE_ALLOCATION_INFO* pAllocInfo,
6619
Allocation** ppAllocation);
6620
6621
// Unregisters allocation from the collection of dedicated allocations.
6622
// Allocation object must be deleted externally afterwards.
6623
void FreeCommittedMemory(Allocation* allocation);
6624
// Unregisters allocation from the collection of placed allocations.
6625
// Allocation object must be deleted externally afterwards.
6626
void FreePlacedMemory(Allocation* allocation);
6627
// Unregisters allocation from the collection of dedicated allocations and destroys associated heap.
6628
// Allocation object must be deleted externally afterwards.
6629
void FreeHeapMemory(Allocation* allocation);
6630
6631
void SetResidencyPriority(ID3D12Pageable* obj, D3D12_RESIDENCY_PRIORITY priority) const;
6632
6633
void SetCurrentFrameIndex(UINT frameIndex);
6634
// For more deailed stats use outCutomHeaps to access statistics divided into L0 and L1 group
6635
void CalculateStatistics(TotalStatistics& outStats, DetailedStatistics outCutomHeaps[2] = NULL);
6636
6637
void GetBudget(Budget* outLocalBudget, Budget* outNonLocalBudget);
6638
void GetBudgetForHeapType(Budget& outBudget, D3D12_HEAP_TYPE heapType);
6639
6640
void BuildStatsString(WCHAR** ppStatsString, BOOL detailedMap);
6641
void FreeStatsString(WCHAR* pStatsString);
6642
6643
private:
6644
using PoolList = IntrusiveLinkedList<PoolListItemTraits>;
6645
6646
const bool m_UseMutex;
6647
const bool m_AlwaysCommitted;
6648
const bool m_MsaaAlwaysCommitted;
6649
bool m_DefaultPoolsNotZeroed = false;
6650
ID3D12Device* m_Device; // AddRef
6651
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
6652
ID3D12Device1* m_Device1 = NULL; // AddRef, optional
6653
#endif
6654
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
6655
ID3D12Device4* m_Device4 = NULL; // AddRef, optional
6656
#endif
6657
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6658
ID3D12Device8* m_Device8 = NULL; // AddRef, optional
6659
#endif
6660
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6661
ID3D12Device10* m_Device10 = NULL; // AddRef, optional
6662
#endif
6663
IDXGIAdapter* m_Adapter; // AddRef
6664
#if D3D12MA_DXGI_1_4
6665
IDXGIAdapter3* m_Adapter3 = NULL; // AddRef, optional
6666
#endif
6667
UINT64 m_PreferredBlockSize;
6668
ALLOCATION_CALLBACKS m_AllocationCallbacks;
6669
D3D12MA_ATOMIC_UINT32 m_CurrentFrameIndex;
6670
DXGI_ADAPTER_DESC m_AdapterDesc;
6671
D3D12_FEATURE_DATA_D3D12_OPTIONS m_D3D12Options;
6672
D3D12_FEATURE_DATA_ARCHITECTURE m_D3D12Architecture;
6673
AllocationObjectAllocator m_AllocationObjectAllocator;
6674
6675
D3D12MA_RW_MUTEX m_PoolsMutex[HEAP_TYPE_COUNT];
6676
PoolList m_Pools[HEAP_TYPE_COUNT];
6677
// Default pools.
6678
BlockVector* m_BlockVectors[DEFAULT_POOL_MAX_COUNT];
6679
CommittedAllocationList m_CommittedAllocations[STANDARD_HEAP_TYPE_COUNT];
6680
6681
/*
6682
Heuristics that decides whether a resource should better be placed in its own,
6683
dedicated allocation (committed resource rather than placed resource).
6684
*/
6685
template<typename D3D12_RESOURCE_DESC_T>
6686
static bool PrefersCommittedAllocation(const D3D12_RESOURCE_DESC_T& resourceDesc);
6687
6688
// Allocates and registers new committed resource with implicit heap, as dedicated allocation.
6689
// Creates and returns Allocation object and optionally D3D12 resource.
6690
HRESULT AllocateCommittedResource(
6691
const CommittedAllocationParameters& committedAllocParams,
6692
UINT64 resourceSize, bool withinBudget, void* pPrivateData,
6693
const CREATE_RESOURCE_PARAMS& createParams,
6694
Allocation** ppAllocation, REFIID riidResource, void** ppvResource);
6695
6696
// Allocates and registers new heap without any resources placed in it, as dedicated allocation.
6697
// Creates and returns Allocation object.
6698
HRESULT AllocateHeap(
6699
const CommittedAllocationParameters& committedAllocParams,
6700
const D3D12_RESOURCE_ALLOCATION_INFO& allocInfo, bool withinBudget,
6701
void* pPrivateData, Allocation** ppAllocation);
6702
6703
template<typename D3D12_RESOURCE_DESC_T>
6704
HRESULT CalcAllocationParams(const ALLOCATION_DESC& allocDesc, UINT64 allocSize,
6705
const D3D12_RESOURCE_DESC_T* resDesc, // Optional
6706
BlockVector*& outBlockVector, CommittedAllocationParameters& outCommittedAllocationParams, bool& outPreferCommitted);
6707
6708
// Returns UINT32_MAX if index cannot be calculcated.
6709
UINT CalcDefaultPoolIndex(const ALLOCATION_DESC& allocDesc, ResourceClass resourceClass) const;
6710
void CalcDefaultPoolParams(D3D12_HEAP_TYPE& outHeapType, D3D12_HEAP_FLAGS& outHeapFlags, UINT index) const;
6711
6712
// Registers Pool object in m_Pools.
6713
void RegisterPool(Pool* pool, D3D12_HEAP_TYPE heapType);
6714
// Unregisters Pool object from m_Pools.
6715
void UnregisterPool(Pool* pool, D3D12_HEAP_TYPE heapType);
6716
6717
HRESULT UpdateD3D12Budget();
6718
6719
D3D12_RESOURCE_ALLOCATION_INFO GetResourceAllocationInfoNative(const D3D12_RESOURCE_DESC& resourceDesc) const;
6720
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6721
D3D12_RESOURCE_ALLOCATION_INFO GetResourceAllocationInfoNative(const D3D12_RESOURCE_DESC1& resourceDesc) const;
6722
#endif
6723
6724
template<typename D3D12_RESOURCE_DESC_T>
6725
D3D12_RESOURCE_ALLOCATION_INFO GetResourceAllocationInfo(D3D12_RESOURCE_DESC_T& inOutResourceDesc) const;
6726
6727
bool NewAllocationWithinBudget(D3D12_HEAP_TYPE heapType, UINT64 size);
6728
6729
// Writes object { } with data of given budget.
6730
static void WriteBudgetToJson(JsonWriter& json, const Budget& budget);
6731
};
6732
6733
#ifndef _D3D12MA_ALLOCATOR_PIMPL_FUNCTINOS
6734
AllocatorPimpl::AllocatorPimpl(const ALLOCATION_CALLBACKS& allocationCallbacks, const ALLOCATOR_DESC& desc)
6735
: m_UseMutex((desc.Flags & ALLOCATOR_FLAG_SINGLETHREADED) == 0),
6736
m_AlwaysCommitted((desc.Flags & ALLOCATOR_FLAG_ALWAYS_COMMITTED) != 0),
6737
m_MsaaAlwaysCommitted((desc.Flags & ALLOCATOR_FLAG_MSAA_TEXTURES_ALWAYS_COMMITTED) != 0),
6738
m_Device(desc.pDevice),
6739
m_Adapter(desc.pAdapter),
6740
m_PreferredBlockSize(desc.PreferredBlockSize != 0 ? desc.PreferredBlockSize : D3D12MA_DEFAULT_BLOCK_SIZE),
6741
m_AllocationCallbacks(allocationCallbacks),
6742
m_CurrentFrameIndex(0),
6743
// Below this line don't use allocationCallbacks but m_AllocationCallbacks!!!
6744
m_AllocationObjectAllocator(m_AllocationCallbacks)
6745
{
6746
// desc.pAllocationCallbacks intentionally ignored here, preprocessed by CreateAllocator.
6747
ZeroMemory(&m_D3D12Options, sizeof(m_D3D12Options));
6748
ZeroMemory(&m_D3D12Architecture, sizeof(m_D3D12Architecture));
6749
6750
ZeroMemory(m_BlockVectors, sizeof(m_BlockVectors));
6751
6752
for (UINT i = 0; i < STANDARD_HEAP_TYPE_COUNT; ++i)
6753
{
6754
m_CommittedAllocations[i].Init(
6755
m_UseMutex,
6756
(D3D12_HEAP_TYPE)(D3D12_HEAP_TYPE_DEFAULT + i),
6757
NULL); // pool
6758
}
6759
6760
m_Device->AddRef();
6761
m_Adapter->AddRef();
6762
}
6763
6764
HRESULT AllocatorPimpl::Init(const ALLOCATOR_DESC& desc)
6765
{
6766
#if D3D12MA_DXGI_1_4
6767
desc.pAdapter->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Adapter3));
6768
#endif
6769
6770
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
6771
m_Device->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Device1));
6772
#endif
6773
6774
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
6775
m_Device->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Device4));
6776
#endif
6777
6778
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6779
m_Device->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Device8));
6780
6781
if((desc.Flags & ALLOCATOR_FLAG_DEFAULT_POOLS_NOT_ZEROED) != 0)
6782
{
6783
D3D12_FEATURE_DATA_D3D12_OPTIONS7 options7 = {};
6784
if(SUCCEEDED(m_Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS7, &options7, sizeof(options7))))
6785
{
6786
// DEFAULT_POOLS_NOT_ZEROED both supported and enabled by the user.
6787
m_DefaultPoolsNotZeroed = true;
6788
}
6789
}
6790
#endif
6791
6792
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6793
m_Device->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Device10));
6794
#endif
6795
6796
HRESULT hr = m_Adapter->GetDesc(&m_AdapterDesc);
6797
if (FAILED(hr))
6798
{
6799
return hr;
6800
}
6801
6802
hr = m_Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS, &m_D3D12Options, sizeof(m_D3D12Options));
6803
if (FAILED(hr))
6804
{
6805
return hr;
6806
}
6807
#ifdef D3D12MA_FORCE_RESOURCE_HEAP_TIER
6808
m_D3D12Options.ResourceHeapTier = (D3D12MA_FORCE_RESOURCE_HEAP_TIER);
6809
#endif
6810
6811
hr = m_Device->CheckFeatureSupport(D3D12_FEATURE_ARCHITECTURE, &m_D3D12Architecture, sizeof(m_D3D12Architecture));
6812
if (FAILED(hr))
6813
{
6814
m_D3D12Architecture.UMA = FALSE;
6815
m_D3D12Architecture.CacheCoherentUMA = FALSE;
6816
}
6817
6818
D3D12_HEAP_PROPERTIES heapProps = {};
6819
const UINT defaultPoolCount = GetDefaultPoolCount();
6820
for (UINT i = 0; i < defaultPoolCount; ++i)
6821
{
6822
D3D12_HEAP_FLAGS heapFlags;
6823
CalcDefaultPoolParams(heapProps.Type, heapFlags, i);
6824
6825
#if D3D12MA_CREATE_NOT_ZEROED_AVAILABLE
6826
if(m_DefaultPoolsNotZeroed)
6827
{
6828
heapFlags |= D3D12_HEAP_FLAG_CREATE_NOT_ZEROED;
6829
}
6830
#endif
6831
6832
m_BlockVectors[i] = D3D12MA_NEW(GetAllocs(), BlockVector)(
6833
this, // hAllocator
6834
heapProps, // heapType
6835
heapFlags, // heapFlags
6836
m_PreferredBlockSize,
6837
0, // minBlockCount
6838
SIZE_MAX, // maxBlockCount
6839
false, // explicitBlockSize
6840
D3D12MA_DEBUG_ALIGNMENT, // minAllocationAlignment
6841
0, // Default algorithm,
6842
m_MsaaAlwaysCommitted,
6843
NULL, // pProtectedSession
6844
D3D12_RESIDENCY_PRIORITY_NONE); // residencyPriority
6845
// No need to call m_pBlockVectors[i]->CreateMinBlocks here, becase minBlockCount is 0.
6846
}
6847
6848
#if D3D12MA_DXGI_1_4
6849
UpdateD3D12Budget();
6850
#endif
6851
6852
return S_OK;
6853
}
6854
6855
AllocatorPimpl::~AllocatorPimpl()
6856
{
6857
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6858
SAFE_RELEASE(m_Device10);
6859
#endif
6860
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6861
SAFE_RELEASE(m_Device8);
6862
#endif
6863
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
6864
SAFE_RELEASE(m_Device4);
6865
#endif
6866
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
6867
SAFE_RELEASE(m_Device1);
6868
#endif
6869
#if D3D12MA_DXGI_1_4
6870
SAFE_RELEASE(m_Adapter3);
6871
#endif
6872
SAFE_RELEASE(m_Adapter);
6873
SAFE_RELEASE(m_Device);
6874
6875
for (UINT i = DEFAULT_POOL_MAX_COUNT; i--; )
6876
{
6877
D3D12MA_DELETE(GetAllocs(), m_BlockVectors[i]);
6878
}
6879
6880
for (UINT i = HEAP_TYPE_COUNT; i--; )
6881
{
6882
if (!m_Pools[i].IsEmpty())
6883
{
6884
D3D12MA_ASSERT(0 && "Unfreed pools found!");
6885
}
6886
}
6887
}
6888
6889
bool AllocatorPimpl::HeapFlagsFulfillResourceHeapTier(D3D12_HEAP_FLAGS flags) const
6890
{
6891
if (SupportsResourceHeapTier2())
6892
{
6893
return true;
6894
}
6895
else
6896
{
6897
const bool allowBuffers = (flags & D3D12_HEAP_FLAG_DENY_BUFFERS) == 0;
6898
const bool allowRtDsTextures = (flags & D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES) == 0;
6899
const bool allowNonRtDsTextures = (flags & D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES) == 0;
6900
const uint8_t allowedGroupCount = (allowBuffers ? 1 : 0) + (allowRtDsTextures ? 1 : 0) + (allowNonRtDsTextures ? 1 : 0);
6901
return allowedGroupCount == 1;
6902
}
6903
}
6904
6905
UINT AllocatorPimpl::StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE heapType) const
6906
{
6907
D3D12MA_ASSERT(IsHeapTypeStandard(heapType));
6908
if (IsUMA())
6909
return DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY;
6910
return heapType == D3D12_HEAP_TYPE_DEFAULT ?
6911
DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY : DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY;
6912
}
6913
6914
UINT AllocatorPimpl::HeapPropertiesToMemorySegmentGroup(const D3D12_HEAP_PROPERTIES& heapProps) const
6915
{
6916
if (IsUMA())
6917
return DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY;
6918
if (heapProps.MemoryPoolPreference == D3D12_MEMORY_POOL_UNKNOWN)
6919
return StandardHeapTypeToMemorySegmentGroup(heapProps.Type);
6920
return heapProps.MemoryPoolPreference == D3D12_MEMORY_POOL_L1 ?
6921
DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY : DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY;
6922
}
6923
6924
UINT64 AllocatorPimpl::GetMemoryCapacity(UINT memorySegmentGroup) const
6925
{
6926
switch (memorySegmentGroup)
6927
{
6928
case DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY:
6929
return IsUMA() ?
6930
m_AdapterDesc.DedicatedVideoMemory + m_AdapterDesc.SharedSystemMemory : m_AdapterDesc.DedicatedVideoMemory;
6931
case DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY:
6932
return IsUMA() ? 0 : m_AdapterDesc.SharedSystemMemory;
6933
default:
6934
D3D12MA_ASSERT(0);
6935
return UINT64_MAX;
6936
}
6937
}
6938
6939
HRESULT AllocatorPimpl::CreatePlacedResourceWrap(
6940
ID3D12Heap *pHeap,
6941
UINT64 HeapOffset,
6942
const CREATE_RESOURCE_PARAMS& createParams,
6943
REFIID riidResource,
6944
void** ppvResource)
6945
{
6946
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6947
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_LAYOUT)
6948
{
6949
if (!m_Device10)
6950
{
6951
return E_NOINTERFACE;
6952
}
6953
return m_Device10->CreatePlacedResource2(pHeap, HeapOffset,
6954
createParams.GetResourceDesc1(), createParams.GetInitialLayout(),
6955
createParams.GetOptimizedClearValue(), createParams.GetNumCastableFormats(),
6956
createParams.GetCastableFormats(), riidResource, ppvResource);
6957
} else
6958
#endif
6959
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6960
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
6961
{
6962
if (!m_Device8)
6963
{
6964
return E_NOINTERFACE;
6965
}
6966
return m_Device8->CreatePlacedResource1(pHeap, HeapOffset,
6967
createParams.GetResourceDesc1(), createParams.GetInitialResourceState(),
6968
createParams.GetOptimizedClearValue(), riidResource, ppvResource);
6969
} else
6970
#endif
6971
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE)
6972
{
6973
return m_Device->CreatePlacedResource(pHeap, HeapOffset,
6974
createParams.GetResourceDesc(), createParams.GetInitialResourceState(),
6975
createParams.GetOptimizedClearValue(), riidResource, ppvResource);
6976
}
6977
else
6978
{
6979
D3D12MA_ASSERT(0);
6980
return E_INVALIDARG;
6981
}
6982
}
6983
6984
6985
HRESULT AllocatorPimpl::CreateResource(
6986
const ALLOCATION_DESC* pAllocDesc,
6987
const CREATE_RESOURCE_PARAMS& createParams,
6988
Allocation** ppAllocation,
6989
REFIID riidResource,
6990
void** ppvResource)
6991
{
6992
D3D12MA_ASSERT(pAllocDesc && createParams.GetBaseResourceDesc() && ppAllocation);
6993
6994
*ppAllocation = NULL;
6995
if (ppvResource)
6996
{
6997
*ppvResource = NULL;
6998
}
6999
7000
CREATE_RESOURCE_PARAMS finalCreateParams = createParams;
7001
D3D12_RESOURCE_DESC finalResourceDesc;
7002
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7003
D3D12_RESOURCE_DESC1 finalResourceDesc1;
7004
#endif
7005
D3D12_RESOURCE_ALLOCATION_INFO resAllocInfo;
7006
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE)
7007
{
7008
finalResourceDesc = *createParams.GetResourceDesc();
7009
finalCreateParams.AccessResourceDesc() = &finalResourceDesc;
7010
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc);
7011
}
7012
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7013
else if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
7014
{
7015
if (!m_Device8)
7016
{
7017
return E_NOINTERFACE;
7018
}
7019
finalResourceDesc1 = *createParams.GetResourceDesc1();
7020
finalCreateParams.AccessResourceDesc1() = &finalResourceDesc1;
7021
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc1);
7022
}
7023
#endif
7024
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
7025
else if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_LAYOUT)
7026
{
7027
if (!m_Device10)
7028
{
7029
return E_NOINTERFACE;
7030
}
7031
finalResourceDesc1 = *createParams.GetResourceDesc1();
7032
finalCreateParams.AccessResourceDesc1() = &finalResourceDesc1;
7033
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc1);
7034
}
7035
#endif
7036
else
7037
{
7038
D3D12MA_ASSERT(0);
7039
return E_INVALIDARG;
7040
}
7041
D3D12MA_ASSERT(IsPow2(resAllocInfo.Alignment));
7042
D3D12MA_ASSERT(resAllocInfo.SizeInBytes > 0);
7043
7044
BlockVector* blockVector = NULL;
7045
CommittedAllocationParameters committedAllocationParams = {};
7046
bool preferCommitted = false;
7047
7048
HRESULT hr;
7049
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7050
if (createParams.Variant >= CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
7051
{
7052
hr = CalcAllocationParams<D3D12_RESOURCE_DESC1>(*pAllocDesc, resAllocInfo.SizeInBytes,
7053
createParams.GetResourceDesc1(),
7054
blockVector, committedAllocationParams, preferCommitted);
7055
}
7056
else
7057
#endif
7058
{
7059
hr = CalcAllocationParams<D3D12_RESOURCE_DESC>(*pAllocDesc, resAllocInfo.SizeInBytes,
7060
createParams.GetResourceDesc(),
7061
blockVector, committedAllocationParams, preferCommitted);
7062
}
7063
if (FAILED(hr))
7064
return hr;
7065
7066
const bool withinBudget = (pAllocDesc->Flags & ALLOCATION_FLAG_WITHIN_BUDGET) != 0;
7067
hr = E_INVALIDARG;
7068
if (committedAllocationParams.IsValid() && preferCommitted)
7069
{
7070
hr = AllocateCommittedResource(committedAllocationParams,
7071
resAllocInfo.SizeInBytes, withinBudget, pAllocDesc->pPrivateData,
7072
finalCreateParams, ppAllocation, riidResource, ppvResource);
7073
if (SUCCEEDED(hr))
7074
return hr;
7075
}
7076
if (blockVector != NULL)
7077
{
7078
hr = blockVector->CreateResource(resAllocInfo.SizeInBytes, resAllocInfo.Alignment,
7079
*pAllocDesc, finalCreateParams,
7080
ppAllocation, riidResource, ppvResource);
7081
if (SUCCEEDED(hr))
7082
return hr;
7083
}
7084
if (committedAllocationParams.IsValid() && !preferCommitted)
7085
{
7086
hr = AllocateCommittedResource(committedAllocationParams,
7087
resAllocInfo.SizeInBytes, withinBudget, pAllocDesc->pPrivateData,
7088
finalCreateParams, ppAllocation, riidResource, ppvResource);
7089
if (SUCCEEDED(hr))
7090
return hr;
7091
}
7092
return hr;
7093
}
7094
7095
HRESULT AllocatorPimpl::AllocateMemory(
7096
const ALLOCATION_DESC* pAllocDesc,
7097
const D3D12_RESOURCE_ALLOCATION_INFO* pAllocInfo,
7098
Allocation** ppAllocation)
7099
{
7100
*ppAllocation = NULL;
7101
7102
BlockVector* blockVector = NULL;
7103
CommittedAllocationParameters committedAllocationParams = {};
7104
bool preferCommitted = false;
7105
HRESULT hr = CalcAllocationParams<D3D12_RESOURCE_DESC>(*pAllocDesc, pAllocInfo->SizeInBytes,
7106
NULL, // pResDesc
7107
blockVector, committedAllocationParams, preferCommitted);
7108
if (FAILED(hr))
7109
return hr;
7110
7111
const bool withinBudget = (pAllocDesc->Flags & ALLOCATION_FLAG_WITHIN_BUDGET) != 0;
7112
hr = E_INVALIDARG;
7113
if (committedAllocationParams.IsValid() && preferCommitted)
7114
{
7115
hr = AllocateHeap(committedAllocationParams, *pAllocInfo, withinBudget, pAllocDesc->pPrivateData, ppAllocation);
7116
if (SUCCEEDED(hr))
7117
return hr;
7118
}
7119
if (blockVector != NULL)
7120
{
7121
hr = blockVector->Allocate(pAllocInfo->SizeInBytes, pAllocInfo->Alignment,
7122
*pAllocDesc, 1, (Allocation**)ppAllocation);
7123
if (SUCCEEDED(hr))
7124
return hr;
7125
}
7126
if (committedAllocationParams.IsValid() && !preferCommitted)
7127
{
7128
hr = AllocateHeap(committedAllocationParams, *pAllocInfo, withinBudget, pAllocDesc->pPrivateData, ppAllocation);
7129
if (SUCCEEDED(hr))
7130
return hr;
7131
}
7132
return hr;
7133
}
7134
7135
HRESULT AllocatorPimpl::CreateAliasingResource(
7136
Allocation* pAllocation,
7137
UINT64 AllocationLocalOffset,
7138
const CREATE_RESOURCE_PARAMS& createParams,
7139
REFIID riidResource,
7140
void** ppvResource)
7141
{
7142
*ppvResource = NULL;
7143
7144
CREATE_RESOURCE_PARAMS finalCreateParams = createParams;
7145
D3D12_RESOURCE_DESC finalResourceDesc;
7146
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7147
D3D12_RESOURCE_DESC1 finalResourceDesc1;
7148
#endif
7149
D3D12_RESOURCE_ALLOCATION_INFO resAllocInfo;
7150
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE)
7151
{
7152
finalResourceDesc = *createParams.GetResourceDesc();
7153
finalCreateParams.AccessResourceDesc() = &finalResourceDesc;
7154
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc);
7155
}
7156
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7157
else if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
7158
{
7159
if (!m_Device8)
7160
{
7161
return E_NOINTERFACE;
7162
}
7163
finalResourceDesc1 = *createParams.GetResourceDesc1();
7164
finalCreateParams.AccessResourceDesc1() = &finalResourceDesc1;
7165
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc1);
7166
}
7167
#endif
7168
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
7169
else if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_LAYOUT)
7170
{
7171
if (!m_Device10)
7172
{
7173
return E_NOINTERFACE;
7174
}
7175
finalResourceDesc1 = *createParams.GetResourceDesc1();
7176
finalCreateParams.AccessResourceDesc1() = &finalResourceDesc1;
7177
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc1);
7178
}
7179
#endif
7180
else
7181
{
7182
D3D12MA_ASSERT(0);
7183
return E_INVALIDARG;
7184
}
7185
D3D12MA_ASSERT(IsPow2(resAllocInfo.Alignment));
7186
D3D12MA_ASSERT(resAllocInfo.SizeInBytes > 0);
7187
7188
ID3D12Heap* const existingHeap = pAllocation->GetHeap();
7189
const UINT64 existingOffset = pAllocation->GetOffset();
7190
const UINT64 existingSize = pAllocation->GetSize();
7191
const UINT64 newOffset = existingOffset + AllocationLocalOffset;
7192
7193
if (existingHeap == NULL ||
7194
AllocationLocalOffset + resAllocInfo.SizeInBytes > existingSize ||
7195
newOffset % resAllocInfo.Alignment != 0)
7196
{
7197
return E_INVALIDARG;
7198
}
7199
7200
return CreatePlacedResourceWrap(existingHeap, newOffset, finalCreateParams, riidResource, ppvResource);
7201
}
7202
7203
void AllocatorPimpl::FreeCommittedMemory(Allocation* allocation)
7204
{
7205
D3D12MA_ASSERT(allocation && allocation->m_PackedData.GetType() == Allocation::TYPE_COMMITTED);
7206
7207
CommittedAllocationList* const allocList = allocation->m_Committed.list;
7208
allocList->Unregister(allocation);
7209
7210
const UINT memSegmentGroup = allocList->GetMemorySegmentGroup(this);
7211
const UINT64 allocSize = allocation->GetSize();
7212
m_Budget.RemoveAllocation(memSegmentGroup, allocSize);
7213
m_Budget.RemoveBlock(memSegmentGroup, allocSize);
7214
}
7215
7216
void AllocatorPimpl::FreePlacedMemory(Allocation* allocation)
7217
{
7218
D3D12MA_ASSERT(allocation && allocation->m_PackedData.GetType() == Allocation::TYPE_PLACED);
7219
7220
NormalBlock* const block = allocation->m_Placed.block;
7221
D3D12MA_ASSERT(block);
7222
BlockVector* const blockVector = block->GetBlockVector();
7223
D3D12MA_ASSERT(blockVector);
7224
m_Budget.RemoveAllocation(HeapPropertiesToMemorySegmentGroup(block->GetHeapProperties()), allocation->GetSize());
7225
blockVector->Free(allocation);
7226
}
7227
7228
void AllocatorPimpl::FreeHeapMemory(Allocation* allocation)
7229
{
7230
D3D12MA_ASSERT(allocation && allocation->m_PackedData.GetType() == Allocation::TYPE_HEAP);
7231
7232
CommittedAllocationList* const allocList = allocation->m_Committed.list;
7233
allocList->Unregister(allocation);
7234
SAFE_RELEASE(allocation->m_Heap.heap);
7235
7236
const UINT memSegmentGroup = allocList->GetMemorySegmentGroup(this);
7237
const UINT64 allocSize = allocation->GetSize();
7238
m_Budget.RemoveAllocation(memSegmentGroup, allocSize);
7239
m_Budget.RemoveBlock(memSegmentGroup, allocSize);
7240
}
7241
7242
void AllocatorPimpl::SetResidencyPriority(ID3D12Pageable* obj, D3D12_RESIDENCY_PRIORITY priority) const
7243
{
7244
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
7245
if (priority != D3D12_RESIDENCY_PRIORITY_NONE && m_Device1)
7246
{
7247
// Intentionally ignoring the result.
7248
m_Device1->SetResidencyPriority(1, &obj, &priority);
7249
}
7250
#endif
7251
}
7252
7253
void AllocatorPimpl::SetCurrentFrameIndex(UINT frameIndex)
7254
{
7255
m_CurrentFrameIndex.store(frameIndex);
7256
7257
#if D3D12MA_DXGI_1_4
7258
UpdateD3D12Budget();
7259
#endif
7260
}
7261
7262
void AllocatorPimpl::CalculateStatistics(TotalStatistics& outStats, DetailedStatistics outCutomHeaps[2])
7263
{
7264
// Init stats
7265
for (size_t i = 0; i < HEAP_TYPE_COUNT; i++)
7266
ClearDetailedStatistics(outStats.HeapType[i]);
7267
for (size_t i = 0; i < DXGI_MEMORY_SEGMENT_GROUP_COUNT; i++)
7268
ClearDetailedStatistics(outStats.MemorySegmentGroup[i]);
7269
ClearDetailedStatistics(outStats.Total);
7270
if (outCutomHeaps)
7271
{
7272
ClearDetailedStatistics(outCutomHeaps[0]);
7273
ClearDetailedStatistics(outCutomHeaps[1]);
7274
}
7275
7276
// Process default pools. 3 standard heap types only. Add them to outStats.HeapType[i].
7277
if (SupportsResourceHeapTier2())
7278
{
7279
// DEFAULT, UPLOAD, READBACK.
7280
for (size_t heapTypeIndex = 0; heapTypeIndex < STANDARD_HEAP_TYPE_COUNT; ++heapTypeIndex)
7281
{
7282
BlockVector* const pBlockVector = m_BlockVectors[heapTypeIndex];
7283
D3D12MA_ASSERT(pBlockVector);
7284
pBlockVector->AddDetailedStatistics(outStats.HeapType[heapTypeIndex]);
7285
}
7286
}
7287
else
7288
{
7289
// DEFAULT, UPLOAD, READBACK.
7290
for (size_t heapTypeIndex = 0; heapTypeIndex < STANDARD_HEAP_TYPE_COUNT; ++heapTypeIndex)
7291
{
7292
for (size_t heapSubType = 0; heapSubType < 3; ++heapSubType)
7293
{
7294
BlockVector* const pBlockVector = m_BlockVectors[heapTypeIndex * 3 + heapSubType];
7295
D3D12MA_ASSERT(pBlockVector);
7296
pBlockVector->AddDetailedStatistics(outStats.HeapType[heapTypeIndex]);
7297
}
7298
}
7299
}
7300
7301
// Sum them up to memory segment groups.
7302
AddDetailedStatistics(
7303
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE_DEFAULT)],
7304
outStats.HeapType[0]);
7305
AddDetailedStatistics(
7306
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE_UPLOAD)],
7307
outStats.HeapType[1]);
7308
AddDetailedStatistics(
7309
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE_READBACK)],
7310
outStats.HeapType[2]);
7311
7312
// Process custom pools.
7313
DetailedStatistics tmpStats;
7314
for (size_t heapTypeIndex = 0; heapTypeIndex < HEAP_TYPE_COUNT; ++heapTypeIndex)
7315
{
7316
MutexLockRead lock(m_PoolsMutex[heapTypeIndex], m_UseMutex);
7317
PoolList& poolList = m_Pools[heapTypeIndex];
7318
for (PoolPimpl* pool = poolList.Front(); pool != NULL; pool = poolList.GetNext(pool))
7319
{
7320
const D3D12_HEAP_PROPERTIES& poolHeapProps = pool->GetDesc().HeapProperties;
7321
ClearDetailedStatistics(tmpStats);
7322
pool->AddDetailedStatistics(tmpStats);
7323
AddDetailedStatistics(
7324
outStats.HeapType[heapTypeIndex], tmpStats);
7325
7326
UINT memorySegment = HeapPropertiesToMemorySegmentGroup(poolHeapProps);
7327
AddDetailedStatistics(
7328
outStats.MemorySegmentGroup[memorySegment], tmpStats);
7329
7330
if (outCutomHeaps)
7331
AddDetailedStatistics(outCutomHeaps[memorySegment], tmpStats);
7332
}
7333
}
7334
7335
// Process committed allocations. 3 standard heap types only.
7336
for (UINT heapTypeIndex = 0; heapTypeIndex < STANDARD_HEAP_TYPE_COUNT; ++heapTypeIndex)
7337
{
7338
ClearDetailedStatistics(tmpStats);
7339
m_CommittedAllocations[heapTypeIndex].AddDetailedStatistics(tmpStats);
7340
AddDetailedStatistics(
7341
outStats.HeapType[heapTypeIndex], tmpStats);
7342
AddDetailedStatistics(
7343
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(IndexToHeapType(heapTypeIndex))], tmpStats);
7344
}
7345
7346
// Sum up memory segment groups to totals.
7347
AddDetailedStatistics(outStats.Total, outStats.MemorySegmentGroup[0]);
7348
AddDetailedStatistics(outStats.Total, outStats.MemorySegmentGroup[1]);
7349
7350
D3D12MA_ASSERT(outStats.Total.Stats.BlockCount ==
7351
outStats.MemorySegmentGroup[0].Stats.BlockCount + outStats.MemorySegmentGroup[1].Stats.BlockCount);
7352
D3D12MA_ASSERT(outStats.Total.Stats.AllocationCount ==
7353
outStats.MemorySegmentGroup[0].Stats.AllocationCount + outStats.MemorySegmentGroup[1].Stats.AllocationCount);
7354
D3D12MA_ASSERT(outStats.Total.Stats.BlockBytes ==
7355
outStats.MemorySegmentGroup[0].Stats.BlockBytes + outStats.MemorySegmentGroup[1].Stats.BlockBytes);
7356
D3D12MA_ASSERT(outStats.Total.Stats.AllocationBytes ==
7357
outStats.MemorySegmentGroup[0].Stats.AllocationBytes + outStats.MemorySegmentGroup[1].Stats.AllocationBytes);
7358
D3D12MA_ASSERT(outStats.Total.UnusedRangeCount ==
7359
outStats.MemorySegmentGroup[0].UnusedRangeCount + outStats.MemorySegmentGroup[1].UnusedRangeCount);
7360
7361
D3D12MA_ASSERT(outStats.Total.Stats.BlockCount ==
7362
outStats.HeapType[0].Stats.BlockCount + outStats.HeapType[1].Stats.BlockCount +
7363
outStats.HeapType[2].Stats.BlockCount + outStats.HeapType[3].Stats.BlockCount);
7364
D3D12MA_ASSERT(outStats.Total.Stats.AllocationCount ==
7365
outStats.HeapType[0].Stats.AllocationCount + outStats.HeapType[1].Stats.AllocationCount +
7366
outStats.HeapType[2].Stats.AllocationCount + outStats.HeapType[3].Stats.AllocationCount);
7367
D3D12MA_ASSERT(outStats.Total.Stats.BlockBytes ==
7368
outStats.HeapType[0].Stats.BlockBytes + outStats.HeapType[1].Stats.BlockBytes +
7369
outStats.HeapType[2].Stats.BlockBytes + outStats.HeapType[3].Stats.BlockBytes);
7370
D3D12MA_ASSERT(outStats.Total.Stats.AllocationBytes ==
7371
outStats.HeapType[0].Stats.AllocationBytes + outStats.HeapType[1].Stats.AllocationBytes +
7372
outStats.HeapType[2].Stats.AllocationBytes + outStats.HeapType[3].Stats.AllocationBytes);
7373
D3D12MA_ASSERT(outStats.Total.UnusedRangeCount ==
7374
outStats.HeapType[0].UnusedRangeCount + outStats.HeapType[1].UnusedRangeCount +
7375
outStats.HeapType[2].UnusedRangeCount + outStats.HeapType[3].UnusedRangeCount);
7376
}
7377
7378
void AllocatorPimpl::GetBudget(Budget* outLocalBudget, Budget* outNonLocalBudget)
7379
{
7380
if (outLocalBudget)
7381
m_Budget.GetStatistics(outLocalBudget->Stats, DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY);
7382
if (outNonLocalBudget)
7383
m_Budget.GetStatistics(outNonLocalBudget->Stats, DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY);
7384
7385
#if D3D12MA_DXGI_1_4
7386
if (m_Adapter3)
7387
{
7388
if (!m_Budget.ShouldUpdateBudget())
7389
{
7390
m_Budget.GetBudget(m_UseMutex,
7391
outLocalBudget ? &outLocalBudget->UsageBytes : NULL,
7392
outLocalBudget ? &outLocalBudget->BudgetBytes : NULL,
7393
outNonLocalBudget ? &outNonLocalBudget->UsageBytes : NULL,
7394
outNonLocalBudget ? &outNonLocalBudget->BudgetBytes : NULL);
7395
}
7396
else
7397
{
7398
UpdateD3D12Budget();
7399
GetBudget(outLocalBudget, outNonLocalBudget); // Recursion
7400
}
7401
}
7402
else
7403
#endif
7404
{
7405
if (outLocalBudget)
7406
{
7407
outLocalBudget->UsageBytes = outLocalBudget->Stats.BlockBytes;
7408
outLocalBudget->BudgetBytes = GetMemoryCapacity(DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY) * 8 / 10; // 80% heuristics.
7409
}
7410
if (outNonLocalBudget)
7411
{
7412
outNonLocalBudget->UsageBytes = outNonLocalBudget->Stats.BlockBytes;
7413
outNonLocalBudget->BudgetBytes = GetMemoryCapacity(DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY) * 8 / 10; // 80% heuristics.
7414
}
7415
}
7416
}
7417
7418
void AllocatorPimpl::GetBudgetForHeapType(Budget& outBudget, D3D12_HEAP_TYPE heapType)
7419
{
7420
switch (heapType)
7421
{
7422
case D3D12_HEAP_TYPE_DEFAULT:
7423
GetBudget(&outBudget, NULL);
7424
break;
7425
case D3D12_HEAP_TYPE_UPLOAD:
7426
case D3D12_HEAP_TYPE_READBACK:
7427
GetBudget(NULL, &outBudget);
7428
break;
7429
default: D3D12MA_ASSERT(0);
7430
}
7431
}
7432
7433
void AllocatorPimpl::BuildStatsString(WCHAR** ppStatsString, BOOL detailedMap)
7434
{
7435
StringBuilder sb(GetAllocs());
7436
{
7437
Budget localBudget = {}, nonLocalBudget = {};
7438
GetBudget(&localBudget, &nonLocalBudget);
7439
7440
TotalStatistics stats;
7441
DetailedStatistics customHeaps[2];
7442
CalculateStatistics(stats, customHeaps);
7443
7444
JsonWriter json(GetAllocs(), sb);
7445
json.BeginObject();
7446
{
7447
json.WriteString(L"General");
7448
json.BeginObject();
7449
{
7450
json.WriteString(L"API");
7451
json.WriteString(L"Direct3D 12");
7452
7453
json.WriteString(L"GPU");
7454
json.WriteString(m_AdapterDesc.Description);
7455
7456
json.WriteString(L"DedicatedVideoMemory");
7457
json.WriteNumber((UINT64)m_AdapterDesc.DedicatedVideoMemory);
7458
json.WriteString(L"DedicatedSystemMemory");
7459
json.WriteNumber((UINT64)m_AdapterDesc.DedicatedSystemMemory);
7460
json.WriteString(L"SharedSystemMemory");
7461
json.WriteNumber((UINT64)m_AdapterDesc.SharedSystemMemory);
7462
7463
json.WriteString(L"ResourceHeapTier");
7464
json.WriteNumber(static_cast<UINT>(m_D3D12Options.ResourceHeapTier));
7465
7466
json.WriteString(L"ResourceBindingTier");
7467
json.WriteNumber(static_cast<UINT>(m_D3D12Options.ResourceBindingTier));
7468
7469
json.WriteString(L"TiledResourcesTier");
7470
json.WriteNumber(static_cast<UINT>(m_D3D12Options.TiledResourcesTier));
7471
7472
json.WriteString(L"TileBasedRenderer");
7473
json.WriteBool(m_D3D12Architecture.TileBasedRenderer);
7474
7475
json.WriteString(L"UMA");
7476
json.WriteBool(m_D3D12Architecture.UMA);
7477
json.WriteString(L"CacheCoherentUMA");
7478
json.WriteBool(m_D3D12Architecture.CacheCoherentUMA);
7479
}
7480
json.EndObject();
7481
}
7482
{
7483
json.WriteString(L"Total");
7484
json.AddDetailedStatisticsInfoObject(stats.Total);
7485
}
7486
{
7487
json.WriteString(L"MemoryInfo");
7488
json.BeginObject();
7489
{
7490
json.WriteString(L"L0");
7491
json.BeginObject();
7492
{
7493
json.WriteString(L"Budget");
7494
WriteBudgetToJson(json, IsUMA() ? localBudget : nonLocalBudget); // When UMA device only L0 present as local
7495
7496
json.WriteString(L"Stats");
7497
json.AddDetailedStatisticsInfoObject(stats.MemorySegmentGroup[!IsUMA()]);
7498
7499
json.WriteString(L"MemoryPools");
7500
json.BeginObject();
7501
{
7502
if (IsUMA())
7503
{
7504
json.WriteString(L"DEFAULT");
7505
json.BeginObject();
7506
{
7507
json.WriteString(L"Stats");
7508
json.AddDetailedStatisticsInfoObject(stats.HeapType[0]);
7509
}
7510
json.EndObject();
7511
}
7512
json.WriteString(L"UPLOAD");
7513
json.BeginObject();
7514
{
7515
json.WriteString(L"Stats");
7516
json.AddDetailedStatisticsInfoObject(stats.HeapType[1]);
7517
}
7518
json.EndObject();
7519
7520
json.WriteString(L"READBACK");
7521
json.BeginObject();
7522
{
7523
json.WriteString(L"Stats");
7524
json.AddDetailedStatisticsInfoObject(stats.HeapType[2]);
7525
}
7526
json.EndObject();
7527
7528
json.WriteString(L"CUSTOM");
7529
json.BeginObject();
7530
{
7531
json.WriteString(L"Stats");
7532
json.AddDetailedStatisticsInfoObject(customHeaps[!IsUMA()]);
7533
}
7534
json.EndObject();
7535
}
7536
json.EndObject();
7537
}
7538
json.EndObject();
7539
if (!IsUMA())
7540
{
7541
json.WriteString(L"L1");
7542
json.BeginObject();
7543
{
7544
json.WriteString(L"Budget");
7545
WriteBudgetToJson(json, localBudget);
7546
7547
json.WriteString(L"Stats");
7548
json.AddDetailedStatisticsInfoObject(stats.MemorySegmentGroup[0]);
7549
7550
json.WriteString(L"MemoryPools");
7551
json.BeginObject();
7552
{
7553
json.WriteString(L"DEFAULT");
7554
json.BeginObject();
7555
{
7556
json.WriteString(L"Stats");
7557
json.AddDetailedStatisticsInfoObject(stats.HeapType[0]);
7558
}
7559
json.EndObject();
7560
7561
json.WriteString(L"CUSTOM");
7562
json.BeginObject();
7563
{
7564
json.WriteString(L"Stats");
7565
json.AddDetailedStatisticsInfoObject(customHeaps[0]);
7566
}
7567
json.EndObject();
7568
}
7569
json.EndObject();
7570
}
7571
json.EndObject();
7572
}
7573
}
7574
json.EndObject();
7575
}
7576
7577
if (detailedMap)
7578
{
7579
const auto writeHeapInfo = [&](BlockVector* blockVector, CommittedAllocationList* committedAllocs, bool customHeap)
7580
{
7581
D3D12MA_ASSERT(blockVector);
7582
7583
D3D12_HEAP_FLAGS flags = blockVector->GetHeapFlags();
7584
json.WriteString(L"Flags");
7585
json.BeginArray(true);
7586
{
7587
if (flags & D3D12_HEAP_FLAG_SHARED)
7588
json.WriteString(L"HEAP_FLAG_SHARED");
7589
if (flags & D3D12_HEAP_FLAG_ALLOW_DISPLAY)
7590
json.WriteString(L"HEAP_FLAG_ALLOW_DISPLAY");
7591
if (flags & D3D12_HEAP_FLAG_SHARED_CROSS_ADAPTER)
7592
json.WriteString(L"HEAP_FLAG_CROSS_ADAPTER");
7593
if (flags & D3D12_HEAP_FLAG_HARDWARE_PROTECTED)
7594
json.WriteString(L"HEAP_FLAG_HARDWARE_PROTECTED");
7595
if (flags & D3D12_HEAP_FLAG_ALLOW_WRITE_WATCH)
7596
json.WriteString(L"HEAP_FLAG_ALLOW_WRITE_WATCH");
7597
if (flags & D3D12_HEAP_FLAG_ALLOW_SHADER_ATOMICS)
7598
json.WriteString(L"HEAP_FLAG_ALLOW_SHADER_ATOMICS");
7599
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7600
if (flags & D3D12_HEAP_FLAG_CREATE_NOT_RESIDENT)
7601
json.WriteString(L"HEAP_FLAG_CREATE_NOT_RESIDENT");
7602
if (flags & D3D12_HEAP_FLAG_CREATE_NOT_ZEROED)
7603
json.WriteString(L"HEAP_FLAG_CREATE_NOT_ZEROED");
7604
#endif
7605
7606
if (flags & D3D12_HEAP_FLAG_DENY_BUFFERS)
7607
json.WriteString(L"HEAP_FLAG_DENY_BUFFERS");
7608
if (flags & D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES)
7609
json.WriteString(L"HEAP_FLAG_DENY_RT_DS_TEXTURES");
7610
if (flags & D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES)
7611
json.WriteString(L"HEAP_FLAG_DENY_NON_RT_DS_TEXTURES");
7612
7613
flags &= ~(D3D12_HEAP_FLAG_SHARED
7614
| D3D12_HEAP_FLAG_DENY_BUFFERS
7615
| D3D12_HEAP_FLAG_ALLOW_DISPLAY
7616
| D3D12_HEAP_FLAG_SHARED_CROSS_ADAPTER
7617
| D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES
7618
| D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES
7619
| D3D12_HEAP_FLAG_HARDWARE_PROTECTED
7620
| D3D12_HEAP_FLAG_ALLOW_WRITE_WATCH
7621
| D3D12_HEAP_FLAG_ALLOW_SHADER_ATOMICS);
7622
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7623
flags &= ~(D3D12_HEAP_FLAG_CREATE_NOT_RESIDENT
7624
| D3D12_HEAP_FLAG_CREATE_NOT_ZEROED);
7625
#endif
7626
if (flags != 0)
7627
json.WriteNumber((UINT)flags);
7628
7629
if (customHeap)
7630
{
7631
const D3D12_HEAP_PROPERTIES& properties = blockVector->GetHeapProperties();
7632
switch (properties.MemoryPoolPreference)
7633
{
7634
default:
7635
D3D12MA_ASSERT(0);
7636
case D3D12_MEMORY_POOL_UNKNOWN:
7637
json.WriteString(L"MEMORY_POOL_UNKNOWN");
7638
break;
7639
case D3D12_MEMORY_POOL_L0:
7640
json.WriteString(L"MEMORY_POOL_L0");
7641
break;
7642
case D3D12_MEMORY_POOL_L1:
7643
json.WriteString(L"MEMORY_POOL_L1");
7644
break;
7645
}
7646
switch (properties.CPUPageProperty)
7647
{
7648
default:
7649
D3D12MA_ASSERT(0);
7650
case D3D12_CPU_PAGE_PROPERTY_UNKNOWN:
7651
json.WriteString(L"CPU_PAGE_PROPERTY_UNKNOWN");
7652
break;
7653
case D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE:
7654
json.WriteString(L"CPU_PAGE_PROPERTY_NOT_AVAILABLE");
7655
break;
7656
case D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE:
7657
json.WriteString(L"CPU_PAGE_PROPERTY_WRITE_COMBINE");
7658
break;
7659
case D3D12_CPU_PAGE_PROPERTY_WRITE_BACK:
7660
json.WriteString(L"CPU_PAGE_PROPERTY_WRITE_BACK");
7661
break;
7662
}
7663
}
7664
}
7665
json.EndArray();
7666
7667
json.WriteString(L"PreferredBlockSize");
7668
json.WriteNumber(blockVector->GetPreferredBlockSize());
7669
7670
json.WriteString(L"Blocks");
7671
blockVector->WriteBlockInfoToJson(json);
7672
7673
json.WriteString(L"DedicatedAllocations");
7674
json.BeginArray();
7675
if (committedAllocs)
7676
committedAllocs->BuildStatsString(json);
7677
json.EndArray();
7678
};
7679
7680
json.WriteString(L"DefaultPools");
7681
json.BeginObject();
7682
{
7683
if (SupportsResourceHeapTier2())
7684
{
7685
for (uint8_t heapType = 0; heapType < STANDARD_HEAP_TYPE_COUNT; ++heapType)
7686
{
7687
json.WriteString(HeapTypeNames[heapType]);
7688
json.BeginObject();
7689
writeHeapInfo(m_BlockVectors[heapType], m_CommittedAllocations + heapType, false);
7690
json.EndObject();
7691
}
7692
}
7693
else
7694
{
7695
for (uint8_t heapType = 0; heapType < STANDARD_HEAP_TYPE_COUNT; ++heapType)
7696
{
7697
for (uint8_t heapSubType = 0; heapSubType < 3; ++heapSubType)
7698
{
7699
static const WCHAR* const heapSubTypeName[] = {
7700
L" - Buffers",
7701
L" - Textures",
7702
L" - Textures RT/DS",
7703
};
7704
json.BeginString(HeapTypeNames[heapType]);
7705
json.EndString(heapSubTypeName[heapSubType]);
7706
7707
json.BeginObject();
7708
writeHeapInfo(m_BlockVectors[heapType + heapSubType], m_CommittedAllocations + heapType, false);
7709
json.EndObject();
7710
}
7711
}
7712
}
7713
}
7714
json.EndObject();
7715
7716
json.WriteString(L"CustomPools");
7717
json.BeginObject();
7718
for (uint8_t heapTypeIndex = 0; heapTypeIndex < HEAP_TYPE_COUNT; ++heapTypeIndex)
7719
{
7720
MutexLockRead mutex(m_PoolsMutex[heapTypeIndex], m_UseMutex);
7721
auto* item = m_Pools[heapTypeIndex].Front();
7722
if (item != NULL)
7723
{
7724
size_t index = 0;
7725
json.WriteString(HeapTypeNames[heapTypeIndex]);
7726
json.BeginArray();
7727
do
7728
{
7729
json.BeginObject();
7730
json.WriteString(L"Name");
7731
json.BeginString();
7732
json.ContinueString(index++);
7733
if (item->GetName())
7734
{
7735
json.ContinueString(L" - ");
7736
json.ContinueString(item->GetName());
7737
}
7738
json.EndString();
7739
7740
writeHeapInfo(item->GetBlockVector(), item->GetCommittedAllocationList(), heapTypeIndex == 3);
7741
json.EndObject();
7742
} while ((item = PoolList::GetNext(item)) != NULL);
7743
json.EndArray();
7744
}
7745
}
7746
json.EndObject();
7747
}
7748
json.EndObject();
7749
}
7750
7751
const size_t length = sb.GetLength();
7752
WCHAR* result = AllocateArray<WCHAR>(GetAllocs(), length + 2);
7753
result[0] = 0xFEFF;
7754
memcpy(result + 1, sb.GetData(), length * sizeof(WCHAR));
7755
result[length + 1] = L'\0';
7756
*ppStatsString = result;
7757
}
7758
7759
void AllocatorPimpl::FreeStatsString(WCHAR* pStatsString)
7760
{
7761
D3D12MA_ASSERT(pStatsString);
7762
Free(GetAllocs(), pStatsString);
7763
}
7764
7765
template<typename D3D12_RESOURCE_DESC_T>
7766
bool AllocatorPimpl::PrefersCommittedAllocation(const D3D12_RESOURCE_DESC_T& resourceDesc)
7767
{
7768
// Intentional. It may change in the future.
7769
return false;
7770
}
7771
7772
HRESULT AllocatorPimpl::AllocateCommittedResource(
7773
const CommittedAllocationParameters& committedAllocParams,
7774
UINT64 resourceSize, bool withinBudget, void* pPrivateData,
7775
const CREATE_RESOURCE_PARAMS& createParams,
7776
Allocation** ppAllocation, REFIID riidResource, void** ppvResource)
7777
{
7778
D3D12MA_ASSERT(committedAllocParams.IsValid());
7779
7780
HRESULT hr;
7781
ID3D12Resource* res = NULL;
7782
// Allocate aliasing memory with explicit heap
7783
if (committedAllocParams.m_CanAlias)
7784
{
7785
D3D12_RESOURCE_ALLOCATION_INFO heapAllocInfo = {};
7786
heapAllocInfo.SizeInBytes = resourceSize;
7787
heapAllocInfo.Alignment = HeapFlagsToAlignment(committedAllocParams.m_HeapFlags, m_MsaaAlwaysCommitted);
7788
hr = AllocateHeap(committedAllocParams, heapAllocInfo, withinBudget, pPrivateData, ppAllocation);
7789
if (SUCCEEDED(hr))
7790
{
7791
hr = CreatePlacedResourceWrap((*ppAllocation)->GetHeap(), 0,
7792
createParams, D3D12MA_IID_PPV_ARGS(&res));
7793
if (SUCCEEDED(hr))
7794
{
7795
if (ppvResource != NULL)
7796
hr = res->QueryInterface(riidResource, ppvResource);
7797
if (SUCCEEDED(hr))
7798
{
7799
(*ppAllocation)->SetResourcePointer(res, createParams.GetBaseResourceDesc());
7800
return hr;
7801
}
7802
res->Release();
7803
}
7804
FreeHeapMemory(*ppAllocation);
7805
}
7806
return hr;
7807
}
7808
7809
if (withinBudget &&
7810
!NewAllocationWithinBudget(committedAllocParams.m_HeapProperties.Type, resourceSize))
7811
{
7812
return E_OUTOFMEMORY;
7813
}
7814
7815
/* D3D12 ERROR:
7816
* ID3D12Device::CreateCommittedResource:
7817
* When creating a committed resource, D3D12_HEAP_FLAGS must not have either
7818
* D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES,
7819
* D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES,
7820
* nor D3D12_HEAP_FLAG_DENY_BUFFERS set.
7821
* These flags will be set automatically to correspond with the committed resource type.
7822
*
7823
* [ STATE_CREATION ERROR #640: CREATERESOURCEANDHEAP_INVALIDHEAPMISCFLAGS]
7824
*/
7825
7826
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
7827
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_LAYOUT)
7828
{
7829
if (!m_Device10)
7830
{
7831
return E_NOINTERFACE;
7832
}
7833
hr = m_Device10->CreateCommittedResource3(
7834
&committedAllocParams.m_HeapProperties,
7835
committedAllocParams.m_HeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS,
7836
createParams.GetResourceDesc1(), createParams.GetInitialLayout(),
7837
createParams.GetOptimizedClearValue(), committedAllocParams.m_ProtectedSession,
7838
createParams.GetNumCastableFormats(), createParams.GetCastableFormats(),
7839
D3D12MA_IID_PPV_ARGS(&res));
7840
} else
7841
#endif
7842
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7843
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
7844
{
7845
if (!m_Device8)
7846
{
7847
return E_NOINTERFACE;
7848
}
7849
hr = m_Device8->CreateCommittedResource2(
7850
&committedAllocParams.m_HeapProperties,
7851
committedAllocParams.m_HeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS,
7852
createParams.GetResourceDesc1(), createParams.GetInitialResourceState(),
7853
createParams.GetOptimizedClearValue(), committedAllocParams.m_ProtectedSession,
7854
D3D12MA_IID_PPV_ARGS(&res));
7855
} else
7856
#endif
7857
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE)
7858
{
7859
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
7860
if (m_Device4)
7861
{
7862
hr = m_Device4->CreateCommittedResource1(
7863
&committedAllocParams.m_HeapProperties,
7864
committedAllocParams.m_HeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS,
7865
createParams.GetResourceDesc(), createParams.GetInitialResourceState(),
7866
createParams.GetOptimizedClearValue(), committedAllocParams.m_ProtectedSession,
7867
D3D12MA_IID_PPV_ARGS(&res));
7868
}
7869
else
7870
#endif
7871
{
7872
if (committedAllocParams.m_ProtectedSession == NULL)
7873
{
7874
hr = m_Device->CreateCommittedResource(
7875
&committedAllocParams.m_HeapProperties,
7876
committedAllocParams.m_HeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS,
7877
createParams.GetResourceDesc(), createParams.GetInitialResourceState(),
7878
createParams.GetOptimizedClearValue(), D3D12MA_IID_PPV_ARGS(&res));
7879
}
7880
else
7881
hr = E_NOINTERFACE;
7882
}
7883
}
7884
else
7885
{
7886
D3D12MA_ASSERT(0);
7887
return E_INVALIDARG;
7888
}
7889
7890
if (SUCCEEDED(hr))
7891
{
7892
SetResidencyPriority(res, committedAllocParams.m_ResidencyPriority);
7893
7894
if (ppvResource != NULL)
7895
{
7896
hr = res->QueryInterface(riidResource, ppvResource);
7897
}
7898
if (SUCCEEDED(hr))
7899
{
7900
BOOL wasZeroInitialized = TRUE;
7901
#if D3D12MA_CREATE_NOT_ZEROED_AVAILABLE
7902
if((committedAllocParams.m_HeapFlags & D3D12_HEAP_FLAG_CREATE_NOT_ZEROED) != 0)
7903
{
7904
wasZeroInitialized = FALSE;
7905
}
7906
#endif
7907
7908
Allocation* alloc = m_AllocationObjectAllocator.Allocate(
7909
this, resourceSize, createParams.GetBaseResourceDesc()->Alignment, wasZeroInitialized);
7910
alloc->InitCommitted(committedAllocParams.m_List);
7911
alloc->SetResourcePointer(res, createParams.GetBaseResourceDesc());
7912
alloc->SetPrivateData(pPrivateData);
7913
7914
*ppAllocation = alloc;
7915
7916
committedAllocParams.m_List->Register(alloc);
7917
7918
const UINT memSegmentGroup = HeapPropertiesToMemorySegmentGroup(committedAllocParams.m_HeapProperties);
7919
m_Budget.AddBlock(memSegmentGroup, resourceSize);
7920
m_Budget.AddAllocation(memSegmentGroup, resourceSize);
7921
}
7922
else
7923
{
7924
res->Release();
7925
}
7926
}
7927
return hr;
7928
}
7929
7930
HRESULT AllocatorPimpl::AllocateHeap(
7931
const CommittedAllocationParameters& committedAllocParams,
7932
const D3D12_RESOURCE_ALLOCATION_INFO& allocInfo, bool withinBudget,
7933
void* pPrivateData, Allocation** ppAllocation)
7934
{
7935
D3D12MA_ASSERT(committedAllocParams.IsValid());
7936
7937
*ppAllocation = nullptr;
7938
7939
if (withinBudget &&
7940
!NewAllocationWithinBudget(committedAllocParams.m_HeapProperties.Type, allocInfo.SizeInBytes))
7941
{
7942
return E_OUTOFMEMORY;
7943
}
7944
7945
D3D12_HEAP_DESC heapDesc = {};
7946
heapDesc.SizeInBytes = allocInfo.SizeInBytes;
7947
heapDesc.Properties = committedAllocParams.m_HeapProperties;
7948
heapDesc.Alignment = allocInfo.Alignment;
7949
heapDesc.Flags = committedAllocParams.m_HeapFlags;
7950
7951
HRESULT hr;
7952
ID3D12Heap* heap = nullptr;
7953
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
7954
if (m_Device4)
7955
hr = m_Device4->CreateHeap1(&heapDesc, committedAllocParams.m_ProtectedSession, D3D12MA_IID_PPV_ARGS(&heap));
7956
else
7957
#endif
7958
{
7959
if (committedAllocParams.m_ProtectedSession == NULL)
7960
hr = m_Device->CreateHeap(&heapDesc, D3D12MA_IID_PPV_ARGS(&heap));
7961
else
7962
hr = E_NOINTERFACE;
7963
}
7964
7965
if (SUCCEEDED(hr))
7966
{
7967
SetResidencyPriority(heap, committedAllocParams.m_ResidencyPriority);
7968
7969
BOOL wasZeroInitialized = TRUE;
7970
#if D3D12MA_CREATE_NOT_ZEROED_AVAILABLE
7971
if((heapDesc.Flags & D3D12_HEAP_FLAG_CREATE_NOT_ZEROED) != 0)
7972
{
7973
wasZeroInitialized = FALSE;
7974
}
7975
#endif
7976
7977
(*ppAllocation) = m_AllocationObjectAllocator.Allocate(this, allocInfo.SizeInBytes, allocInfo.Alignment, wasZeroInitialized);
7978
(*ppAllocation)->InitHeap(committedAllocParams.m_List, heap);
7979
(*ppAllocation)->SetPrivateData(pPrivateData);
7980
committedAllocParams.m_List->Register(*ppAllocation);
7981
7982
const UINT memSegmentGroup = HeapPropertiesToMemorySegmentGroup(committedAllocParams.m_HeapProperties);
7983
m_Budget.AddBlock(memSegmentGroup, allocInfo.SizeInBytes);
7984
m_Budget.AddAllocation(memSegmentGroup, allocInfo.SizeInBytes);
7985
}
7986
return hr;
7987
}
7988
7989
template<typename D3D12_RESOURCE_DESC_T>
7990
HRESULT AllocatorPimpl::CalcAllocationParams(const ALLOCATION_DESC& allocDesc, UINT64 allocSize,
7991
const D3D12_RESOURCE_DESC_T* resDesc,
7992
BlockVector*& outBlockVector, CommittedAllocationParameters& outCommittedAllocationParams, bool& outPreferCommitted)
7993
{
7994
outBlockVector = NULL;
7995
outCommittedAllocationParams = CommittedAllocationParameters();
7996
outPreferCommitted = false;
7997
7998
bool msaaAlwaysCommitted;
7999
if (allocDesc.CustomPool != NULL)
8000
{
8001
PoolPimpl* const pool = allocDesc.CustomPool->m_Pimpl;
8002
8003
msaaAlwaysCommitted = pool->GetBlockVector()->DeniesMsaaTextures();
8004
outBlockVector = pool->GetBlockVector();
8005
8006
const auto& desc = pool->GetDesc();
8007
outCommittedAllocationParams.m_ProtectedSession = desc.pProtectedSession;
8008
outCommittedAllocationParams.m_HeapProperties = desc.HeapProperties;
8009
outCommittedAllocationParams.m_HeapFlags = desc.HeapFlags;
8010
outCommittedAllocationParams.m_List = pool->GetCommittedAllocationList();
8011
outCommittedAllocationParams.m_ResidencyPriority = pool->GetDesc().ResidencyPriority;
8012
}
8013
else
8014
{
8015
if (!IsHeapTypeStandard(allocDesc.HeapType))
8016
{
8017
return E_INVALIDARG;
8018
}
8019
msaaAlwaysCommitted = m_MsaaAlwaysCommitted;
8020
8021
outCommittedAllocationParams.m_HeapProperties = StandardHeapTypeToHeapProperties(allocDesc.HeapType);
8022
outCommittedAllocationParams.m_HeapFlags = allocDesc.ExtraHeapFlags;
8023
outCommittedAllocationParams.m_List = &m_CommittedAllocations[HeapTypeToIndex(allocDesc.HeapType)];
8024
// outCommittedAllocationParams.m_ResidencyPriority intentionally left with default value.
8025
8026
const ResourceClass resourceClass = (resDesc != NULL) ?
8027
ResourceDescToResourceClass(*resDesc) : HeapFlagsToResourceClass(allocDesc.ExtraHeapFlags);
8028
const UINT defaultPoolIndex = CalcDefaultPoolIndex(allocDesc, resourceClass);
8029
if (defaultPoolIndex != UINT32_MAX)
8030
{
8031
outBlockVector = m_BlockVectors[defaultPoolIndex];
8032
const UINT64 preferredBlockSize = outBlockVector->GetPreferredBlockSize();
8033
if (allocSize > preferredBlockSize)
8034
{
8035
outBlockVector = NULL;
8036
}
8037
else if (allocSize > preferredBlockSize / 2)
8038
{
8039
// Heuristics: Allocate committed memory if requested size if greater than half of preferred block size.
8040
outPreferCommitted = true;
8041
}
8042
}
8043
8044
const D3D12_HEAP_FLAGS extraHeapFlags = allocDesc.ExtraHeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS;
8045
if (outBlockVector != NULL && extraHeapFlags != 0)
8046
{
8047
outBlockVector = NULL;
8048
}
8049
}
8050
8051
if ((allocDesc.Flags & ALLOCATION_FLAG_COMMITTED) != 0 ||
8052
m_AlwaysCommitted)
8053
{
8054
outBlockVector = NULL;
8055
}
8056
if ((allocDesc.Flags & ALLOCATION_FLAG_NEVER_ALLOCATE) != 0)
8057
{
8058
outCommittedAllocationParams.m_List = NULL;
8059
}
8060
outCommittedAllocationParams.m_CanAlias = allocDesc.Flags & ALLOCATION_FLAG_CAN_ALIAS;
8061
8062
if (resDesc != NULL)
8063
{
8064
if (resDesc->SampleDesc.Count > 1 && msaaAlwaysCommitted)
8065
outBlockVector = NULL;
8066
if (!outPreferCommitted && PrefersCommittedAllocation(*resDesc))
8067
outPreferCommitted = true;
8068
}
8069
8070
return (outBlockVector != NULL || outCommittedAllocationParams.m_List != NULL) ? S_OK : E_INVALIDARG;
8071
}
8072
8073
UINT AllocatorPimpl::CalcDefaultPoolIndex(const ALLOCATION_DESC& allocDesc, ResourceClass resourceClass) const
8074
{
8075
D3D12_HEAP_FLAGS extraHeapFlags = allocDesc.ExtraHeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS;
8076
8077
#if D3D12MA_CREATE_NOT_ZEROED_AVAILABLE
8078
// If allocator was created with ALLOCATOR_FLAG_DEFAULT_POOLS_NOT_ZEROED, also ignore
8079
// D3D12_HEAP_FLAG_CREATE_NOT_ZEROED.
8080
if(m_DefaultPoolsNotZeroed)
8081
{
8082
extraHeapFlags &= ~D3D12_HEAP_FLAG_CREATE_NOT_ZEROED;
8083
}
8084
#endif
8085
8086
if (extraHeapFlags != 0)
8087
{
8088
return UINT32_MAX;
8089
}
8090
8091
UINT poolIndex = UINT_MAX;
8092
switch (allocDesc.HeapType)
8093
{
8094
case D3D12_HEAP_TYPE_DEFAULT: poolIndex = 0; break;
8095
case D3D12_HEAP_TYPE_UPLOAD: poolIndex = 1; break;
8096
case D3D12_HEAP_TYPE_READBACK: poolIndex = 2; break;
8097
default: D3D12MA_ASSERT(0);
8098
}
8099
8100
if (SupportsResourceHeapTier2())
8101
return poolIndex;
8102
else
8103
{
8104
switch (resourceClass)
8105
{
8106
case ResourceClass::Buffer:
8107
return poolIndex * 3;
8108
case ResourceClass::Non_RT_DS_Texture:
8109
return poolIndex * 3 + 1;
8110
case ResourceClass::RT_DS_Texture:
8111
return poolIndex * 3 + 2;
8112
default:
8113
return UINT32_MAX;
8114
}
8115
}
8116
}
8117
8118
void AllocatorPimpl::CalcDefaultPoolParams(D3D12_HEAP_TYPE& outHeapType, D3D12_HEAP_FLAGS& outHeapFlags, UINT index) const
8119
{
8120
outHeapType = D3D12_HEAP_TYPE_DEFAULT;
8121
outHeapFlags = D3D12_HEAP_FLAG_NONE;
8122
8123
if (!SupportsResourceHeapTier2())
8124
{
8125
switch (index % 3)
8126
{
8127
case 0:
8128
outHeapFlags = D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES | D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES;
8129
break;
8130
case 1:
8131
outHeapFlags = D3D12_HEAP_FLAG_DENY_BUFFERS | D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES;
8132
break;
8133
case 2:
8134
outHeapFlags = D3D12_HEAP_FLAG_DENY_BUFFERS | D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES;
8135
break;
8136
}
8137
8138
index /= 3;
8139
}
8140
8141
switch (index)
8142
{
8143
case 0:
8144
outHeapType = D3D12_HEAP_TYPE_DEFAULT;
8145
break;
8146
case 1:
8147
outHeapType = D3D12_HEAP_TYPE_UPLOAD;
8148
break;
8149
case 2:
8150
outHeapType = D3D12_HEAP_TYPE_READBACK;
8151
break;
8152
default:
8153
D3D12MA_ASSERT(0);
8154
}
8155
}
8156
8157
void AllocatorPimpl::RegisterPool(Pool* pool, D3D12_HEAP_TYPE heapType)
8158
{
8159
const UINT heapTypeIndex = HeapTypeToIndex(heapType);
8160
8161
MutexLockWrite lock(m_PoolsMutex[heapTypeIndex], m_UseMutex);
8162
m_Pools[heapTypeIndex].PushBack(pool->m_Pimpl);
8163
}
8164
8165
void AllocatorPimpl::UnregisterPool(Pool* pool, D3D12_HEAP_TYPE heapType)
8166
{
8167
const UINT heapTypeIndex = HeapTypeToIndex(heapType);
8168
8169
MutexLockWrite lock(m_PoolsMutex[heapTypeIndex], m_UseMutex);
8170
m_Pools[heapTypeIndex].Remove(pool->m_Pimpl);
8171
}
8172
8173
HRESULT AllocatorPimpl::UpdateD3D12Budget()
8174
{
8175
#if D3D12MA_DXGI_1_4
8176
if (m_Adapter3)
8177
return m_Budget.UpdateBudget(m_Adapter3, m_UseMutex);
8178
else
8179
return E_NOINTERFACE;
8180
#else
8181
return S_OK;
8182
#endif
8183
}
8184
8185
D3D12_RESOURCE_ALLOCATION_INFO AllocatorPimpl::GetResourceAllocationInfoNative(const D3D12_RESOURCE_DESC& resourceDesc) const
8186
{
8187
#if defined(_MSC_VER) || !defined(_WIN32)
8188
return m_Device->GetResourceAllocationInfo(0, 1, &resourceDesc);
8189
#else
8190
D3D12_RESOURCE_ALLOCATION_INFO ret;
8191
m_Device->GetResourceAllocationInfo(&ret, 0, 1, &resourceDesc);
8192
return ret;
8193
#endif
8194
}
8195
8196
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
8197
D3D12_RESOURCE_ALLOCATION_INFO AllocatorPimpl::GetResourceAllocationInfoNative(const D3D12_RESOURCE_DESC1& resourceDesc) const
8198
{
8199
D3D12MA_ASSERT(m_Device8 != NULL);
8200
D3D12_RESOURCE_ALLOCATION_INFO1 info1Unused;
8201
#if defined(_MSC_VER) || !defined(_WIN32)
8202
return m_Device8->GetResourceAllocationInfo2(0, 1, &resourceDesc, &info1Unused);
8203
#else
8204
D3D12_RESOURCE_ALLOCATION_INFO ret;
8205
m_Device8->GetResourceAllocationInfo2(&ret, 0, 1, &resourceDesc, &info1Unused);
8206
return ret;
8207
#endif
8208
}
8209
#endif // #ifdef __ID3D12Device8_INTERFACE_DEFINED__
8210
8211
template<typename D3D12_RESOURCE_DESC_T>
8212
D3D12_RESOURCE_ALLOCATION_INFO AllocatorPimpl::GetResourceAllocationInfo(D3D12_RESOURCE_DESC_T& inOutResourceDesc) const
8213
{
8214
/* Optional optimization: Microsoft documentation says:
8215
https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-getresourceallocationinfo
8216
8217
Your application can forgo using GetResourceAllocationInfo for buffer resources
8218
(D3D12_RESOURCE_DIMENSION_BUFFER). Buffers have the same size on all adapters,
8219
which is merely the smallest multiple of 64KB that's greater or equal to
8220
D3D12_RESOURCE_DESC::Width.
8221
*/
8222
if (inOutResourceDesc.Alignment == 0 &&
8223
inOutResourceDesc.Dimension == D3D12_RESOURCE_DIMENSION_BUFFER)
8224
{
8225
return {
8226
AlignUp<UINT64>(inOutResourceDesc.Width, D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT), // SizeInBytes
8227
D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT }; // Alignment
8228
}
8229
8230
#if D3D12MA_USE_SMALL_RESOURCE_PLACEMENT_ALIGNMENT
8231
if (inOutResourceDesc.Alignment == 0 &&
8232
inOutResourceDesc.Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE2D &&
8233
(inOutResourceDesc.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) == 0
8234
#if D3D12MA_USE_SMALL_RESOURCE_PLACEMENT_ALIGNMENT == 1
8235
&& CanUseSmallAlignment(inOutResourceDesc)
8236
#endif
8237
)
8238
{
8239
/*
8240
The algorithm here is based on Microsoft sample: "Small Resources Sample"
8241
https://github.com/microsoft/DirectX-Graphics-Samples/tree/master/Samples/Desktop/D3D12SmallResources
8242
*/
8243
const UINT64 smallAlignmentToTry = inOutResourceDesc.SampleDesc.Count > 1 ?
8244
D3D12_SMALL_MSAA_RESOURCE_PLACEMENT_ALIGNMENT :
8245
D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT;
8246
inOutResourceDesc.Alignment = smallAlignmentToTry;
8247
const D3D12_RESOURCE_ALLOCATION_INFO smallAllocInfo = GetResourceAllocationInfoNative(inOutResourceDesc);
8248
// Check if alignment requested has been granted.
8249
if (smallAllocInfo.Alignment == smallAlignmentToTry)
8250
{
8251
return smallAllocInfo;
8252
}
8253
inOutResourceDesc.Alignment = 0; // Restore original
8254
}
8255
#endif // #if D3D12MA_USE_SMALL_RESOURCE_PLACEMENT_ALIGNMENT
8256
8257
return GetResourceAllocationInfoNative(inOutResourceDesc);
8258
}
8259
8260
bool AllocatorPimpl::NewAllocationWithinBudget(D3D12_HEAP_TYPE heapType, UINT64 size)
8261
{
8262
Budget budget = {};
8263
GetBudgetForHeapType(budget, heapType);
8264
return budget.UsageBytes + size <= budget.BudgetBytes;
8265
}
8266
8267
void AllocatorPimpl::WriteBudgetToJson(JsonWriter& json, const Budget& budget)
8268
{
8269
json.BeginObject();
8270
{
8271
json.WriteString(L"BudgetBytes");
8272
json.WriteNumber(budget.BudgetBytes);
8273
json.WriteString(L"UsageBytes");
8274
json.WriteNumber(budget.UsageBytes);
8275
}
8276
json.EndObject();
8277
}
8278
8279
#endif // _D3D12MA_ALLOCATOR_PIMPL
8280
#endif // _D3D12MA_ALLOCATOR_PIMPL
8281
8282
#ifndef _D3D12MA_VIRTUAL_BLOCK_PIMPL
8283
class VirtualBlockPimpl
8284
{
8285
public:
8286
const ALLOCATION_CALLBACKS m_AllocationCallbacks;
8287
const UINT64 m_Size;
8288
BlockMetadata* m_Metadata;
8289
8290
VirtualBlockPimpl(const ALLOCATION_CALLBACKS& allocationCallbacks, const VIRTUAL_BLOCK_DESC& desc);
8291
~VirtualBlockPimpl();
8292
};
8293
8294
#ifndef _D3D12MA_VIRTUAL_BLOCK_PIMPL_FUNCTIONS
8295
VirtualBlockPimpl::VirtualBlockPimpl(const ALLOCATION_CALLBACKS& allocationCallbacks, const VIRTUAL_BLOCK_DESC& desc)
8296
: m_AllocationCallbacks(allocationCallbacks), m_Size(desc.Size)
8297
{
8298
switch (desc.Flags & VIRTUAL_BLOCK_FLAG_ALGORITHM_MASK)
8299
{
8300
case VIRTUAL_BLOCK_FLAG_ALGORITHM_LINEAR:
8301
m_Metadata = D3D12MA_NEW(allocationCallbacks, BlockMetadata_Linear)(&m_AllocationCallbacks, true);
8302
break;
8303
default:
8304
D3D12MA_ASSERT(0);
8305
case 0:
8306
m_Metadata = D3D12MA_NEW(allocationCallbacks, BlockMetadata_TLSF)(&m_AllocationCallbacks, true);
8307
break;
8308
}
8309
m_Metadata->Init(m_Size);
8310
}
8311
8312
VirtualBlockPimpl::~VirtualBlockPimpl()
8313
{
8314
D3D12MA_DELETE(m_AllocationCallbacks, m_Metadata);
8315
}
8316
#endif // _D3D12MA_VIRTUAL_BLOCK_PIMPL_FUNCTIONS
8317
#endif // _D3D12MA_VIRTUAL_BLOCK_PIMPL
8318
8319
8320
#ifndef _D3D12MA_MEMORY_BLOCK_FUNCTIONS
8321
MemoryBlock::MemoryBlock(
8322
AllocatorPimpl* allocator,
8323
const D3D12_HEAP_PROPERTIES& heapProps,
8324
D3D12_HEAP_FLAGS heapFlags,
8325
UINT64 size,
8326
UINT id)
8327
: m_Allocator(allocator),
8328
m_HeapProps(heapProps),
8329
m_HeapFlags(heapFlags),
8330
m_Size(size),
8331
m_Id(id) {}
8332
8333
MemoryBlock::~MemoryBlock()
8334
{
8335
if (m_Heap)
8336
{
8337
m_Heap->Release();
8338
m_Allocator->m_Budget.RemoveBlock(
8339
m_Allocator->HeapPropertiesToMemorySegmentGroup(m_HeapProps), m_Size);
8340
}
8341
}
8342
8343
HRESULT MemoryBlock::Init(ID3D12ProtectedResourceSession* pProtectedSession, bool denyMsaaTextures)
8344
{
8345
D3D12MA_ASSERT(m_Heap == NULL && m_Size > 0);
8346
8347
D3D12_HEAP_DESC heapDesc = {};
8348
heapDesc.SizeInBytes = m_Size;
8349
heapDesc.Properties = m_HeapProps;
8350
heapDesc.Alignment = HeapFlagsToAlignment(m_HeapFlags, denyMsaaTextures);
8351
heapDesc.Flags = m_HeapFlags;
8352
8353
HRESULT hr;
8354
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
8355
ID3D12Device4* const device4 = m_Allocator->GetDevice4();
8356
if (device4)
8357
hr = m_Allocator->GetDevice4()->CreateHeap1(&heapDesc, pProtectedSession, D3D12MA_IID_PPV_ARGS(&m_Heap));
8358
else
8359
#endif
8360
{
8361
if (pProtectedSession == NULL)
8362
hr = m_Allocator->GetDevice()->CreateHeap(&heapDesc, D3D12MA_IID_PPV_ARGS(&m_Heap));
8363
else
8364
hr = E_NOINTERFACE;
8365
}
8366
8367
if (SUCCEEDED(hr))
8368
{
8369
m_Allocator->m_Budget.AddBlock(
8370
m_Allocator->HeapPropertiesToMemorySegmentGroup(m_HeapProps), m_Size);
8371
}
8372
return hr;
8373
}
8374
#endif // _D3D12MA_MEMORY_BLOCK_FUNCTIONS
8375
8376
#ifndef _D3D12MA_NORMAL_BLOCK_FUNCTIONS
8377
NormalBlock::NormalBlock(
8378
AllocatorPimpl* allocator,
8379
BlockVector* blockVector,
8380
const D3D12_HEAP_PROPERTIES& heapProps,
8381
D3D12_HEAP_FLAGS heapFlags,
8382
UINT64 size,
8383
UINT id)
8384
: MemoryBlock(allocator, heapProps, heapFlags, size, id),
8385
m_pMetadata(NULL),
8386
m_BlockVector(blockVector) {}
8387
8388
NormalBlock::~NormalBlock()
8389
{
8390
if (m_pMetadata != NULL)
8391
{
8392
// Define macro D3D12MA_DEBUG_LOG to receive the list of the unfreed allocations.
8393
if (!m_pMetadata->IsEmpty())
8394
m_pMetadata->DebugLogAllAllocations();
8395
8396
// THIS IS THE MOST IMPORTANT ASSERT IN THE ENTIRE LIBRARY!
8397
// Hitting it means you have some memory leak - unreleased Allocation objects.
8398
D3D12MA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
8399
8400
D3D12MA_DELETE(m_Allocator->GetAllocs(), m_pMetadata);
8401
}
8402
}
8403
8404
HRESULT NormalBlock::Init(UINT32 algorithm, ID3D12ProtectedResourceSession* pProtectedSession, bool denyMsaaTextures)
8405
{
8406
HRESULT hr = MemoryBlock::Init(pProtectedSession, denyMsaaTextures);
8407
if (FAILED(hr))
8408
{
8409
return hr;
8410
}
8411
8412
switch (algorithm)
8413
{
8414
case POOL_FLAG_ALGORITHM_LINEAR:
8415
m_pMetadata = D3D12MA_NEW(m_Allocator->GetAllocs(), BlockMetadata_Linear)(&m_Allocator->GetAllocs(), false);
8416
break;
8417
default:
8418
D3D12MA_ASSERT(0);
8419
case 0:
8420
m_pMetadata = D3D12MA_NEW(m_Allocator->GetAllocs(), BlockMetadata_TLSF)(&m_Allocator->GetAllocs(), false);
8421
break;
8422
}
8423
m_pMetadata->Init(m_Size);
8424
8425
return hr;
8426
}
8427
8428
bool NormalBlock::Validate() const
8429
{
8430
D3D12MA_VALIDATE(GetHeap() &&
8431
m_pMetadata &&
8432
m_pMetadata->GetSize() != 0 &&
8433
m_pMetadata->GetSize() == GetSize());
8434
return m_pMetadata->Validate();
8435
}
8436
#endif // _D3D12MA_NORMAL_BLOCK_FUNCTIONS
8437
8438
#ifndef _D3D12MA_COMMITTED_ALLOCATION_LIST_FUNCTIONS
8439
void CommittedAllocationList::Init(bool useMutex, D3D12_HEAP_TYPE heapType, PoolPimpl* pool)
8440
{
8441
m_UseMutex = useMutex;
8442
m_HeapType = heapType;
8443
m_Pool = pool;
8444
}
8445
8446
CommittedAllocationList::~CommittedAllocationList()
8447
{
8448
if (!m_AllocationList.IsEmpty())
8449
{
8450
D3D12MA_ASSERT(0 && "Unfreed committed allocations found!");
8451
}
8452
}
8453
8454
UINT CommittedAllocationList::GetMemorySegmentGroup(AllocatorPimpl* allocator) const
8455
{
8456
if (m_Pool)
8457
return allocator->HeapPropertiesToMemorySegmentGroup(m_Pool->GetDesc().HeapProperties);
8458
else
8459
return allocator->StandardHeapTypeToMemorySegmentGroup(m_HeapType);
8460
}
8461
8462
void CommittedAllocationList::AddStatistics(Statistics& inoutStats)
8463
{
8464
MutexLockRead lock(m_Mutex, m_UseMutex);
8465
8466
for (Allocation* alloc = m_AllocationList.Front();
8467
alloc != NULL; alloc = m_AllocationList.GetNext(alloc))
8468
{
8469
const UINT64 size = alloc->GetSize();
8470
inoutStats.BlockCount++;
8471
inoutStats.AllocationCount++;
8472
inoutStats.BlockBytes += size;
8473
inoutStats.AllocationBytes += size;
8474
}
8475
}
8476
8477
void CommittedAllocationList::AddDetailedStatistics(DetailedStatistics& inoutStats)
8478
{
8479
MutexLockRead lock(m_Mutex, m_UseMutex);
8480
8481
for (Allocation* alloc = m_AllocationList.Front();
8482
alloc != NULL; alloc = m_AllocationList.GetNext(alloc))
8483
{
8484
const UINT64 size = alloc->GetSize();
8485
inoutStats.Stats.BlockCount++;
8486
inoutStats.Stats.BlockBytes += size;
8487
AddDetailedStatisticsAllocation(inoutStats, size);
8488
}
8489
}
8490
8491
void CommittedAllocationList::BuildStatsString(JsonWriter& json)
8492
{
8493
MutexLockRead lock(m_Mutex, m_UseMutex);
8494
8495
for (Allocation* alloc = m_AllocationList.Front();
8496
alloc != NULL; alloc = m_AllocationList.GetNext(alloc))
8497
{
8498
json.BeginObject(true);
8499
json.AddAllocationToObject(*alloc);
8500
json.EndObject();
8501
}
8502
}
8503
8504
void CommittedAllocationList::Register(Allocation* alloc)
8505
{
8506
MutexLockWrite lock(m_Mutex, m_UseMutex);
8507
m_AllocationList.PushBack(alloc);
8508
}
8509
8510
void CommittedAllocationList::Unregister(Allocation* alloc)
8511
{
8512
MutexLockWrite lock(m_Mutex, m_UseMutex);
8513
m_AllocationList.Remove(alloc);
8514
}
8515
#endif // _D3D12MA_COMMITTED_ALLOCATION_LIST_FUNCTIONS
8516
8517
#ifndef _D3D12MA_BLOCK_VECTOR_FUNCTIONS
8518
BlockVector::BlockVector(
8519
AllocatorPimpl* hAllocator,
8520
const D3D12_HEAP_PROPERTIES& heapProps,
8521
D3D12_HEAP_FLAGS heapFlags,
8522
UINT64 preferredBlockSize,
8523
size_t minBlockCount,
8524
size_t maxBlockCount,
8525
bool explicitBlockSize,
8526
UINT64 minAllocationAlignment,
8527
UINT32 algorithm,
8528
bool denyMsaaTextures,
8529
ID3D12ProtectedResourceSession* pProtectedSession,
8530
D3D12_RESIDENCY_PRIORITY residencyPriority)
8531
: m_hAllocator(hAllocator),
8532
m_HeapProps(heapProps),
8533
m_HeapFlags(heapFlags),
8534
m_PreferredBlockSize(preferredBlockSize),
8535
m_MinBlockCount(minBlockCount),
8536
m_MaxBlockCount(maxBlockCount),
8537
m_ExplicitBlockSize(explicitBlockSize),
8538
m_MinAllocationAlignment(minAllocationAlignment),
8539
m_Algorithm(algorithm),
8540
m_DenyMsaaTextures(denyMsaaTextures),
8541
m_ProtectedSession(pProtectedSession),
8542
m_ResidencyPriority(residencyPriority),
8543
m_HasEmptyBlock(false),
8544
m_Blocks(hAllocator->GetAllocs()),
8545
m_NextBlockId(0) {}
8546
8547
BlockVector::~BlockVector()
8548
{
8549
for (size_t i = m_Blocks.size(); i--; )
8550
{
8551
D3D12MA_DELETE(m_hAllocator->GetAllocs(), m_Blocks[i]);
8552
}
8553
}
8554
8555
HRESULT BlockVector::CreateMinBlocks()
8556
{
8557
for (size_t i = 0; i < m_MinBlockCount; ++i)
8558
{
8559
HRESULT hr = CreateBlock(m_PreferredBlockSize, NULL);
8560
if (FAILED(hr))
8561
{
8562
return hr;
8563
}
8564
}
8565
return S_OK;
8566
}
8567
8568
bool BlockVector::IsEmpty()
8569
{
8570
MutexLockRead lock(m_Mutex, m_hAllocator->UseMutex());
8571
return m_Blocks.empty();
8572
}
8573
8574
HRESULT BlockVector::Allocate(
8575
UINT64 size,
8576
UINT64 alignment,
8577
const ALLOCATION_DESC& allocDesc,
8578
size_t allocationCount,
8579
Allocation** pAllocations)
8580
{
8581
size_t allocIndex;
8582
HRESULT hr = S_OK;
8583
8584
{
8585
MutexLockWrite lock(m_Mutex, m_hAllocator->UseMutex());
8586
for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
8587
{
8588
hr = AllocatePage(
8589
size,
8590
alignment,
8591
allocDesc,
8592
pAllocations + allocIndex);
8593
if (FAILED(hr))
8594
{
8595
break;
8596
}
8597
}
8598
}
8599
8600
if (FAILED(hr))
8601
{
8602
// Free all already created allocations.
8603
while (allocIndex--)
8604
{
8605
Free(pAllocations[allocIndex]);
8606
}
8607
ZeroMemory(pAllocations, sizeof(Allocation*) * allocationCount);
8608
}
8609
8610
return hr;
8611
}
8612
8613
void BlockVector::Free(Allocation* hAllocation)
8614
{
8615
NormalBlock* pBlockToDelete = NULL;
8616
8617
bool budgetExceeded = false;
8618
if (IsHeapTypeStandard(m_HeapProps.Type))
8619
{
8620
Budget budget = {};
8621
m_hAllocator->GetBudgetForHeapType(budget, m_HeapProps.Type);
8622
budgetExceeded = budget.UsageBytes >= budget.BudgetBytes;
8623
}
8624
8625
// Scope for lock.
8626
{
8627
MutexLockWrite lock(m_Mutex, m_hAllocator->UseMutex());
8628
8629
NormalBlock* pBlock = hAllocation->m_Placed.block;
8630
8631
pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle());
8632
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8633
8634
const size_t blockCount = m_Blocks.size();
8635
// pBlock became empty after this deallocation.
8636
if (pBlock->m_pMetadata->IsEmpty())
8637
{
8638
// Already has empty Allocation. We don't want to have two, so delete this one.
8639
if ((m_HasEmptyBlock || budgetExceeded) &&
8640
blockCount > m_MinBlockCount)
8641
{
8642
pBlockToDelete = pBlock;
8643
Remove(pBlock);
8644
}
8645
// We now have first empty block.
8646
else
8647
{
8648
m_HasEmptyBlock = true;
8649
}
8650
}
8651
// pBlock didn't become empty, but we have another empty block - find and free that one.
8652
// (This is optional, heuristics.)
8653
else if (m_HasEmptyBlock && blockCount > m_MinBlockCount)
8654
{
8655
NormalBlock* pLastBlock = m_Blocks.back();
8656
if (pLastBlock->m_pMetadata->IsEmpty())
8657
{
8658
pBlockToDelete = pLastBlock;
8659
m_Blocks.pop_back();
8660
m_HasEmptyBlock = false;
8661
}
8662
}
8663
8664
IncrementallySortBlocks();
8665
}
8666
8667
// Destruction of a free Allocation. Deferred until this point, outside of mutex
8668
// lock, for performance reason.
8669
if (pBlockToDelete != NULL)
8670
{
8671
D3D12MA_DELETE(m_hAllocator->GetAllocs(), pBlockToDelete);
8672
}
8673
}
8674
8675
HRESULT BlockVector::CreateResource(
8676
UINT64 size,
8677
UINT64 alignment,
8678
const ALLOCATION_DESC& allocDesc,
8679
const CREATE_RESOURCE_PARAMS& createParams,
8680
Allocation** ppAllocation,
8681
REFIID riidResource,
8682
void** ppvResource)
8683
{
8684
HRESULT hr = Allocate(size, alignment, allocDesc, 1, ppAllocation);
8685
if (SUCCEEDED(hr))
8686
{
8687
ID3D12Resource* res = NULL;
8688
hr = m_hAllocator->CreatePlacedResourceWrap(
8689
(*ppAllocation)->m_Placed.block->GetHeap(),
8690
(*ppAllocation)->GetOffset(),
8691
createParams,
8692
D3D12MA_IID_PPV_ARGS(&res));
8693
if (SUCCEEDED(hr))
8694
{
8695
if (ppvResource != NULL)
8696
{
8697
hr = res->QueryInterface(riidResource, ppvResource);
8698
}
8699
if (SUCCEEDED(hr))
8700
{
8701
(*ppAllocation)->SetResourcePointer(res, createParams.GetBaseResourceDesc());
8702
}
8703
else
8704
{
8705
res->Release();
8706
SAFE_RELEASE(*ppAllocation);
8707
}
8708
}
8709
else
8710
{
8711
SAFE_RELEASE(*ppAllocation);
8712
}
8713
}
8714
return hr;
8715
}
8716
8717
void BlockVector::AddStatistics(Statistics& inoutStats)
8718
{
8719
MutexLockRead lock(m_Mutex, m_hAllocator->UseMutex());
8720
8721
for (size_t i = 0; i < m_Blocks.size(); ++i)
8722
{
8723
const NormalBlock* const pBlock = m_Blocks[i];
8724
D3D12MA_ASSERT(pBlock);
8725
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8726
pBlock->m_pMetadata->AddStatistics(inoutStats);
8727
}
8728
}
8729
8730
void BlockVector::AddDetailedStatistics(DetailedStatistics& inoutStats)
8731
{
8732
MutexLockRead lock(m_Mutex, m_hAllocator->UseMutex());
8733
8734
for (size_t i = 0; i < m_Blocks.size(); ++i)
8735
{
8736
const NormalBlock* const pBlock = m_Blocks[i];
8737
D3D12MA_ASSERT(pBlock);
8738
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8739
pBlock->m_pMetadata->AddDetailedStatistics(inoutStats);
8740
}
8741
}
8742
8743
void BlockVector::WriteBlockInfoToJson(JsonWriter& json)
8744
{
8745
MutexLockRead lock(m_Mutex, m_hAllocator->UseMutex());
8746
8747
json.BeginObject();
8748
8749
for (size_t i = 0, count = m_Blocks.size(); i < count; ++i)
8750
{
8751
const NormalBlock* const pBlock = m_Blocks[i];
8752
D3D12MA_ASSERT(pBlock);
8753
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8754
json.BeginString();
8755
json.ContinueString(pBlock->GetId());
8756
json.EndString();
8757
8758
json.BeginObject();
8759
pBlock->m_pMetadata->WriteAllocationInfoToJson(json);
8760
json.EndObject();
8761
}
8762
8763
json.EndObject();
8764
}
8765
8766
UINT64 BlockVector::CalcSumBlockSize() const
8767
{
8768
UINT64 result = 0;
8769
for (size_t i = m_Blocks.size(); i--; )
8770
{
8771
result += m_Blocks[i]->m_pMetadata->GetSize();
8772
}
8773
return result;
8774
}
8775
8776
UINT64 BlockVector::CalcMaxBlockSize() const
8777
{
8778
UINT64 result = 0;
8779
for (size_t i = m_Blocks.size(); i--; )
8780
{
8781
result = D3D12MA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
8782
if (result >= m_PreferredBlockSize)
8783
{
8784
break;
8785
}
8786
}
8787
return result;
8788
}
8789
8790
void BlockVector::Remove(NormalBlock* pBlock)
8791
{
8792
for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
8793
{
8794
if (m_Blocks[blockIndex] == pBlock)
8795
{
8796
m_Blocks.remove(blockIndex);
8797
return;
8798
}
8799
}
8800
D3D12MA_ASSERT(0);
8801
}
8802
8803
void BlockVector::IncrementallySortBlocks()
8804
{
8805
if (!m_IncrementalSort)
8806
return;
8807
// Bubble sort only until first swap.
8808
for (size_t i = 1; i < m_Blocks.size(); ++i)
8809
{
8810
if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
8811
{
8812
D3D12MA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
8813
return;
8814
}
8815
}
8816
}
8817
8818
void BlockVector::SortByFreeSize()
8819
{
8820
D3D12MA_SORT(m_Blocks.begin(), m_Blocks.end(),
8821
[](auto* b1, auto* b2)
8822
{
8823
return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize();
8824
});
8825
}
8826
8827
HRESULT BlockVector::AllocatePage(
8828
UINT64 size,
8829
UINT64 alignment,
8830
const ALLOCATION_DESC& allocDesc,
8831
Allocation** pAllocation)
8832
{
8833
// Early reject: requested allocation size is larger that maximum block size for this block vector.
8834
if (size + D3D12MA_DEBUG_MARGIN > m_PreferredBlockSize)
8835
{
8836
return E_OUTOFMEMORY;
8837
}
8838
8839
UINT64 freeMemory = UINT64_MAX;
8840
if (IsHeapTypeStandard(m_HeapProps.Type))
8841
{
8842
Budget budget = {};
8843
m_hAllocator->GetBudgetForHeapType(budget, m_HeapProps.Type);
8844
freeMemory = (budget.UsageBytes < budget.BudgetBytes) ? (budget.BudgetBytes - budget.UsageBytes) : 0;
8845
}
8846
8847
const bool canCreateNewBlock =
8848
((allocDesc.Flags & ALLOCATION_FLAG_NEVER_ALLOCATE) == 0) &&
8849
(m_Blocks.size() < m_MaxBlockCount) &&
8850
// Even if we don't have to stay within budget with this allocation, when the
8851
// budget would be exceeded, we don't want to allocate new blocks, but always
8852
// create resources as committed.
8853
freeMemory >= size;
8854
8855
// 1. Search existing allocations
8856
{
8857
// Forward order in m_Blocks - prefer blocks with smallest amount of free space.
8858
for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
8859
{
8860
NormalBlock* const pCurrBlock = m_Blocks[blockIndex];
8861
D3D12MA_ASSERT(pCurrBlock);
8862
HRESULT hr = AllocateFromBlock(
8863
pCurrBlock,
8864
size,
8865
alignment,
8866
allocDesc.Flags,
8867
allocDesc.pPrivateData,
8868
allocDesc.Flags & ALLOCATION_FLAG_STRATEGY_MASK,
8869
pAllocation);
8870
if (SUCCEEDED(hr))
8871
{
8872
return hr;
8873
}
8874
}
8875
}
8876
8877
// 2. Try to create new block.
8878
if (canCreateNewBlock)
8879
{
8880
// Calculate optimal size for new block.
8881
UINT64 newBlockSize = m_PreferredBlockSize;
8882
UINT newBlockSizeShift = 0;
8883
8884
if (!m_ExplicitBlockSize)
8885
{
8886
// Allocate 1/8, 1/4, 1/2 as first blocks.
8887
const UINT64 maxExistingBlockSize = CalcMaxBlockSize();
8888
for (UINT i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
8889
{
8890
const UINT64 smallerNewBlockSize = newBlockSize / 2;
8891
if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
8892
{
8893
newBlockSize = smallerNewBlockSize;
8894
++newBlockSizeShift;
8895
}
8896
else
8897
{
8898
break;
8899
}
8900
}
8901
}
8902
8903
size_t newBlockIndex = 0;
8904
HRESULT hr = newBlockSize <= freeMemory ?
8905
CreateBlock(newBlockSize, &newBlockIndex) : E_OUTOFMEMORY;
8906
// Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
8907
if (!m_ExplicitBlockSize)
8908
{
8909
while (FAILED(hr) && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
8910
{
8911
const UINT64 smallerNewBlockSize = newBlockSize / 2;
8912
if (smallerNewBlockSize >= size)
8913
{
8914
newBlockSize = smallerNewBlockSize;
8915
++newBlockSizeShift;
8916
hr = newBlockSize <= freeMemory ?
8917
CreateBlock(newBlockSize, &newBlockIndex) : E_OUTOFMEMORY;
8918
}
8919
else
8920
{
8921
break;
8922
}
8923
}
8924
}
8925
8926
if (SUCCEEDED(hr))
8927
{
8928
NormalBlock* const pBlock = m_Blocks[newBlockIndex];
8929
D3D12MA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
8930
8931
hr = AllocateFromBlock(
8932
pBlock,
8933
size,
8934
alignment,
8935
allocDesc.Flags,
8936
allocDesc.pPrivateData,
8937
allocDesc.Flags & ALLOCATION_FLAG_STRATEGY_MASK,
8938
pAllocation);
8939
if (SUCCEEDED(hr))
8940
{
8941
return hr;
8942
}
8943
else
8944
{
8945
// Allocation from new block failed, possibly due to D3D12MA_DEBUG_MARGIN or alignment.
8946
return E_OUTOFMEMORY;
8947
}
8948
}
8949
}
8950
8951
return E_OUTOFMEMORY;
8952
}
8953
8954
HRESULT BlockVector::AllocateFromBlock(
8955
NormalBlock* pBlock,
8956
UINT64 size,
8957
UINT64 alignment,
8958
ALLOCATION_FLAGS allocFlags,
8959
void* pPrivateData,
8960
UINT32 strategy,
8961
Allocation** pAllocation)
8962
{
8963
alignment = D3D12MA_MAX(alignment, m_MinAllocationAlignment);
8964
8965
AllocationRequest currRequest = {};
8966
if (pBlock->m_pMetadata->CreateAllocationRequest(
8967
size,
8968
alignment,
8969
allocFlags & ALLOCATION_FLAG_UPPER_ADDRESS,
8970
strategy,
8971
&currRequest))
8972
{
8973
return CommitAllocationRequest(currRequest, pBlock, size, alignment, pPrivateData, pAllocation);
8974
}
8975
return E_OUTOFMEMORY;
8976
}
8977
8978
HRESULT BlockVector::CommitAllocationRequest(
8979
AllocationRequest& allocRequest,
8980
NormalBlock* pBlock,
8981
UINT64 size,
8982
UINT64 alignment,
8983
void* pPrivateData,
8984
Allocation** pAllocation)
8985
{
8986
// We no longer have an empty Allocation.
8987
if (pBlock->m_pMetadata->IsEmpty())
8988
m_HasEmptyBlock = false;
8989
8990
*pAllocation = m_hAllocator->GetAllocationObjectAllocator().Allocate(m_hAllocator, size, alignment, allocRequest.zeroInitialized);
8991
pBlock->m_pMetadata->Alloc(allocRequest, size, *pAllocation);
8992
8993
(*pAllocation)->InitPlaced(allocRequest.allocHandle, pBlock);
8994
(*pAllocation)->SetPrivateData(pPrivateData);
8995
8996
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8997
m_hAllocator->m_Budget.AddAllocation(m_hAllocator->HeapPropertiesToMemorySegmentGroup(m_HeapProps), size);
8998
8999
return S_OK;
9000
}
9001
9002
HRESULT BlockVector::CreateBlock(
9003
UINT64 blockSize,
9004
size_t* pNewBlockIndex)
9005
{
9006
NormalBlock* const pBlock = D3D12MA_NEW(m_hAllocator->GetAllocs(), NormalBlock)(
9007
m_hAllocator,
9008
this,
9009
m_HeapProps,
9010
m_HeapFlags,
9011
blockSize,
9012
m_NextBlockId++);
9013
HRESULT hr = pBlock->Init(m_Algorithm, m_ProtectedSession, m_DenyMsaaTextures);
9014
if (FAILED(hr))
9015
{
9016
D3D12MA_DELETE(m_hAllocator->GetAllocs(), pBlock);
9017
return hr;
9018
}
9019
9020
m_hAllocator->SetResidencyPriority(pBlock->GetHeap(), m_ResidencyPriority);
9021
9022
m_Blocks.push_back(pBlock);
9023
if (pNewBlockIndex != NULL)
9024
{
9025
*pNewBlockIndex = m_Blocks.size() - 1;
9026
}
9027
9028
return hr;
9029
}
9030
#endif // _D3D12MA_BLOCK_VECTOR_FUNCTIONS
9031
9032
#ifndef _D3D12MA_DEFRAGMENTATION_CONTEXT_PIMPL_FUNCTIONS
9033
DefragmentationContextPimpl::DefragmentationContextPimpl(
9034
AllocatorPimpl* hAllocator,
9035
const DEFRAGMENTATION_DESC& desc,
9036
BlockVector* poolVector)
9037
: m_MaxPassBytes(desc.MaxBytesPerPass == 0 ? UINT64_MAX : desc.MaxBytesPerPass),
9038
m_MaxPassAllocations(desc.MaxAllocationsPerPass == 0 ? UINT32_MAX : desc.MaxAllocationsPerPass),
9039
m_Moves(hAllocator->GetAllocs())
9040
{
9041
m_Algorithm = desc.Flags & DEFRAGMENTATION_FLAG_ALGORITHM_MASK;
9042
9043
if (poolVector != NULL)
9044
{
9045
m_BlockVectorCount = 1;
9046
m_PoolBlockVector = poolVector;
9047
m_pBlockVectors = &m_PoolBlockVector;
9048
m_PoolBlockVector->SetIncrementalSort(false);
9049
m_PoolBlockVector->SortByFreeSize();
9050
}
9051
else
9052
{
9053
m_BlockVectorCount = hAllocator->GetDefaultPoolCount();
9054
m_PoolBlockVector = NULL;
9055
m_pBlockVectors = hAllocator->GetDefaultPools();
9056
for (UINT32 i = 0; i < m_BlockVectorCount; ++i)
9057
{
9058
BlockVector* vector = m_pBlockVectors[i];
9059
if (vector != NULL)
9060
{
9061
vector->SetIncrementalSort(false);
9062
vector->SortByFreeSize();
9063
}
9064
}
9065
}
9066
9067
switch (m_Algorithm)
9068
{
9069
case 0: // Default algorithm
9070
m_Algorithm = DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED;
9071
case DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED:
9072
{
9073
m_AlgorithmState = D3D12MA_NEW_ARRAY(hAllocator->GetAllocs(), StateBalanced, m_BlockVectorCount);
9074
break;
9075
}
9076
}
9077
}
9078
9079
DefragmentationContextPimpl::~DefragmentationContextPimpl()
9080
{
9081
if (m_PoolBlockVector != NULL)
9082
m_PoolBlockVector->SetIncrementalSort(true);
9083
else
9084
{
9085
for (UINT32 i = 0; i < m_BlockVectorCount; ++i)
9086
{
9087
BlockVector* vector = m_pBlockVectors[i];
9088
if (vector != NULL)
9089
vector->SetIncrementalSort(true);
9090
}
9091
}
9092
9093
if (m_AlgorithmState)
9094
{
9095
switch (m_Algorithm)
9096
{
9097
case DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED:
9098
D3D12MA_DELETE_ARRAY(m_Moves.GetAllocs(), reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount);
9099
break;
9100
default:
9101
D3D12MA_ASSERT(0);
9102
}
9103
}
9104
}
9105
9106
HRESULT DefragmentationContextPimpl::DefragmentPassBegin(DEFRAGMENTATION_PASS_MOVE_INFO& moveInfo)
9107
{
9108
if (m_PoolBlockVector != NULL)
9109
{
9110
MutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->m_hAllocator->UseMutex());
9111
9112
if (m_PoolBlockVector->GetBlockCount() > 1)
9113
ComputeDefragmentation(*m_PoolBlockVector, 0);
9114
else if (m_PoolBlockVector->GetBlockCount() == 1)
9115
ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0));
9116
9117
// Setup index into block vector
9118
for (size_t i = 0; i < m_Moves.size(); ++i)
9119
m_Moves[i].pDstTmpAllocation->SetPrivateData(0);
9120
}
9121
else
9122
{
9123
for (UINT32 i = 0; i < m_BlockVectorCount; ++i)
9124
{
9125
if (m_pBlockVectors[i] != NULL)
9126
{
9127
MutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->m_hAllocator->UseMutex());
9128
9129
bool end = false;
9130
size_t movesOffset = m_Moves.size();
9131
if (m_pBlockVectors[i]->GetBlockCount() > 1)
9132
{
9133
end = ComputeDefragmentation(*m_pBlockVectors[i], i);
9134
}
9135
else if (m_pBlockVectors[i]->GetBlockCount() == 1)
9136
{
9137
end = ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0));
9138
}
9139
9140
// Setup index into block vector
9141
for (; movesOffset < m_Moves.size(); ++movesOffset)
9142
m_Moves[movesOffset].pDstTmpAllocation->SetPrivateData(reinterpret_cast<void*>(static_cast<uintptr_t>(i)));
9143
9144
if (end)
9145
break;
9146
}
9147
}
9148
}
9149
9150
moveInfo.MoveCount = static_cast<UINT32>(m_Moves.size());
9151
if (moveInfo.MoveCount > 0)
9152
{
9153
moveInfo.pMoves = m_Moves.data();
9154
return S_FALSE;
9155
}
9156
9157
moveInfo.pMoves = NULL;
9158
return S_OK;
9159
}
9160
9161
HRESULT DefragmentationContextPimpl::DefragmentPassEnd(DEFRAGMENTATION_PASS_MOVE_INFO& moveInfo)
9162
{
9163
D3D12MA_ASSERT(moveInfo.MoveCount > 0 ? moveInfo.pMoves != NULL : true);
9164
9165
HRESULT result = S_OK;
9166
Vector<FragmentedBlock> immovableBlocks(m_Moves.GetAllocs());
9167
9168
for (uint32_t i = 0; i < moveInfo.MoveCount; ++i)
9169
{
9170
DEFRAGMENTATION_MOVE& move = moveInfo.pMoves[i];
9171
size_t prevCount = 0, currentCount = 0;
9172
UINT64 freedBlockSize = 0;
9173
9174
UINT32 vectorIndex;
9175
BlockVector* vector;
9176
if (m_PoolBlockVector != NULL)
9177
{
9178
vectorIndex = 0;
9179
vector = m_PoolBlockVector;
9180
}
9181
else
9182
{
9183
vectorIndex = static_cast<UINT32>(reinterpret_cast<uintptr_t>(move.pDstTmpAllocation->GetPrivateData()));
9184
vector = m_pBlockVectors[vectorIndex];
9185
D3D12MA_ASSERT(vector != NULL);
9186
}
9187
9188
switch (move.Operation)
9189
{
9190
case DEFRAGMENTATION_MOVE_OPERATION_COPY:
9191
{
9192
move.pSrcAllocation->SwapBlockAllocation(move.pDstTmpAllocation);
9193
9194
// Scope for locks, Free have it's own lock
9195
{
9196
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9197
prevCount = vector->GetBlockCount();
9198
freedBlockSize = move.pDstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
9199
}
9200
move.pDstTmpAllocation->Release();
9201
{
9202
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9203
currentCount = vector->GetBlockCount();
9204
}
9205
9206
result = S_FALSE;
9207
break;
9208
}
9209
case DEFRAGMENTATION_MOVE_OPERATION_IGNORE:
9210
{
9211
m_PassStats.BytesMoved -= move.pSrcAllocation->GetSize();
9212
--m_PassStats.AllocationsMoved;
9213
move.pDstTmpAllocation->Release();
9214
9215
NormalBlock* newBlock = move.pSrcAllocation->GetBlock();
9216
bool notPresent = true;
9217
for (const FragmentedBlock& block : immovableBlocks)
9218
{
9219
if (block.block == newBlock)
9220
{
9221
notPresent = false;
9222
break;
9223
}
9224
}
9225
if (notPresent)
9226
immovableBlocks.push_back({ vectorIndex, newBlock });
9227
break;
9228
}
9229
case DEFRAGMENTATION_MOVE_OPERATION_DESTROY:
9230
{
9231
m_PassStats.BytesMoved -= move.pSrcAllocation->GetSize();
9232
--m_PassStats.AllocationsMoved;
9233
// Scope for locks, Free have it's own lock
9234
{
9235
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9236
prevCount = vector->GetBlockCount();
9237
freedBlockSize = move.pSrcAllocation->GetBlock()->m_pMetadata->GetSize();
9238
}
9239
move.pSrcAllocation->Release();
9240
{
9241
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9242
currentCount = vector->GetBlockCount();
9243
}
9244
freedBlockSize *= prevCount - currentCount;
9245
9246
UINT64 dstBlockSize;
9247
{
9248
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9249
dstBlockSize = move.pDstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
9250
}
9251
move.pDstTmpAllocation->Release();
9252
{
9253
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9254
freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount());
9255
currentCount = vector->GetBlockCount();
9256
}
9257
9258
result = S_FALSE;
9259
break;
9260
}
9261
default:
9262
D3D12MA_ASSERT(0);
9263
}
9264
9265
if (prevCount > currentCount)
9266
{
9267
size_t freedBlocks = prevCount - currentCount;
9268
m_PassStats.HeapsFreed += static_cast<UINT32>(freedBlocks);
9269
m_PassStats.BytesFreed += freedBlockSize;
9270
}
9271
}
9272
moveInfo.MoveCount = 0;
9273
moveInfo.pMoves = NULL;
9274
m_Moves.clear();
9275
9276
// Update stats
9277
m_GlobalStats.AllocationsMoved += m_PassStats.AllocationsMoved;
9278
m_GlobalStats.BytesFreed += m_PassStats.BytesFreed;
9279
m_GlobalStats.BytesMoved += m_PassStats.BytesMoved;
9280
m_GlobalStats.HeapsFreed += m_PassStats.HeapsFreed;
9281
m_PassStats = { 0 };
9282
9283
// Move blocks with immovable allocations according to algorithm
9284
if (immovableBlocks.size() > 0)
9285
{
9286
// Move to the begining
9287
for (const FragmentedBlock& block : immovableBlocks)
9288
{
9289
BlockVector* vector = m_pBlockVectors[block.data];
9290
MutexLockWrite lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9291
9292
for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i)
9293
{
9294
if (vector->GetBlock(i) == block.block)
9295
{
9296
D3D12MA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]);
9297
break;
9298
}
9299
}
9300
}
9301
}
9302
return result;
9303
}
9304
9305
bool DefragmentationContextPimpl::ComputeDefragmentation(BlockVector& vector, size_t index)
9306
{
9307
switch (m_Algorithm)
9308
{
9309
case DEFRAGMENTATION_FLAG_ALGORITHM_FAST:
9310
return ComputeDefragmentation_Fast(vector);
9311
default:
9312
D3D12MA_ASSERT(0);
9313
case DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED:
9314
return ComputeDefragmentation_Balanced(vector, index, true);
9315
case DEFRAGMENTATION_FLAG_ALGORITHM_FULL:
9316
return ComputeDefragmentation_Full(vector);
9317
}
9318
}
9319
9320
DefragmentationContextPimpl::MoveAllocationData DefragmentationContextPimpl::GetMoveData(
9321
AllocHandle handle, BlockMetadata* metadata)
9322
{
9323
MoveAllocationData moveData;
9324
moveData.move.pSrcAllocation = (Allocation*)metadata->GetAllocationPrivateData(handle);
9325
moveData.size = moveData.move.pSrcAllocation->GetSize();
9326
moveData.alignment = moveData.move.pSrcAllocation->GetAlignment();
9327
moveData.flags = ALLOCATION_FLAG_NONE;
9328
9329
return moveData;
9330
}
9331
9332
DefragmentationContextPimpl::CounterStatus DefragmentationContextPimpl::CheckCounters(UINT64 bytes)
9333
{
9334
// Ignore allocation if will exceed max size for copy
9335
if (m_PassStats.BytesMoved + bytes > m_MaxPassBytes)
9336
{
9337
if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE)
9338
return CounterStatus::Ignore;
9339
else
9340
return CounterStatus::End;
9341
}
9342
return CounterStatus::Pass;
9343
}
9344
9345
bool DefragmentationContextPimpl::IncrementCounters(UINT64 bytes)
9346
{
9347
m_PassStats.BytesMoved += bytes;
9348
// Early return when max found
9349
if (++m_PassStats.AllocationsMoved >= m_MaxPassAllocations || m_PassStats.BytesMoved >= m_MaxPassBytes)
9350
{
9351
D3D12MA_ASSERT((m_PassStats.AllocationsMoved == m_MaxPassAllocations ||
9352
m_PassStats.BytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!");
9353
return true;
9354
}
9355
return false;
9356
}
9357
9358
bool DefragmentationContextPimpl::ReallocWithinBlock(BlockVector& vector, NormalBlock* block)
9359
{
9360
BlockMetadata* metadata = block->m_pMetadata;
9361
9362
for (AllocHandle handle = metadata->GetAllocationListBegin();
9363
handle != (AllocHandle)0;
9364
handle = metadata->GetNextAllocation(handle))
9365
{
9366
MoveAllocationData moveData = GetMoveData(handle, metadata);
9367
// Ignore newly created allocations by defragmentation algorithm
9368
if (moveData.move.pSrcAllocation->GetPrivateData() == this)
9369
continue;
9370
switch (CheckCounters(moveData.move.pSrcAllocation->GetSize()))
9371
{
9372
case CounterStatus::Ignore:
9373
continue;
9374
case CounterStatus::End:
9375
return true;
9376
default:
9377
D3D12MA_ASSERT(0);
9378
case CounterStatus::Pass:
9379
break;
9380
}
9381
9382
UINT64 offset = moveData.move.pSrcAllocation->GetOffset();
9383
if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
9384
{
9385
AllocationRequest request = {};
9386
if (metadata->CreateAllocationRequest(
9387
moveData.size,
9388
moveData.alignment,
9389
false,
9390
ALLOCATION_FLAG_STRATEGY_MIN_OFFSET,
9391
&request))
9392
{
9393
if (metadata->GetAllocationOffset(request.allocHandle) < offset)
9394
{
9395
if (SUCCEEDED(vector.CommitAllocationRequest(
9396
request,
9397
block,
9398
moveData.size,
9399
moveData.alignment,
9400
this,
9401
&moveData.move.pDstTmpAllocation)))
9402
{
9403
m_Moves.push_back(moveData.move);
9404
if (IncrementCounters(moveData.size))
9405
return true;
9406
}
9407
}
9408
}
9409
}
9410
}
9411
return false;
9412
}
9413
9414
bool DefragmentationContextPimpl::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, BlockVector& vector)
9415
{
9416
for (; start < end; ++start)
9417
{
9418
NormalBlock* dstBlock = vector.GetBlock(start);
9419
if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size)
9420
{
9421
if (SUCCEEDED(vector.AllocateFromBlock(dstBlock,
9422
data.size,
9423
data.alignment,
9424
data.flags,
9425
this,
9426
0,
9427
&data.move.pDstTmpAllocation)))
9428
{
9429
m_Moves.push_back(data.move);
9430
if (IncrementCounters(data.size))
9431
return true;
9432
break;
9433
}
9434
}
9435
}
9436
return false;
9437
}
9438
9439
bool DefragmentationContextPimpl::ComputeDefragmentation_Fast(BlockVector& vector)
9440
{
9441
// Move only between blocks
9442
9443
// Go through allocations in last blocks and try to fit them inside first ones
9444
for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
9445
{
9446
BlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
9447
9448
for (AllocHandle handle = metadata->GetAllocationListBegin();
9449
handle != (AllocHandle)0;
9450
handle = metadata->GetNextAllocation(handle))
9451
{
9452
MoveAllocationData moveData = GetMoveData(handle, metadata);
9453
// Ignore newly created allocations by defragmentation algorithm
9454
if (moveData.move.pSrcAllocation->GetPrivateData() == this)
9455
continue;
9456
switch (CheckCounters(moveData.move.pSrcAllocation->GetSize()))
9457
{
9458
case CounterStatus::Ignore:
9459
continue;
9460
case CounterStatus::End:
9461
return true;
9462
default:
9463
D3D12MA_ASSERT(0);
9464
case CounterStatus::Pass:
9465
break;
9466
}
9467
9468
// Check all previous blocks for free space
9469
if (AllocInOtherBlock(0, i, moveData, vector))
9470
return true;
9471
}
9472
}
9473
return false;
9474
}
9475
9476
bool DefragmentationContextPimpl::ComputeDefragmentation_Balanced(BlockVector& vector, size_t index, bool update)
9477
{
9478
// Go over every allocation and try to fit it in previous blocks at lowest offsets,
9479
// if not possible: realloc within single block to minimize offset (exclude offset == 0),
9480
// but only if there are noticable gaps between them (some heuristic, ex. average size of allocation in block)
9481
D3D12MA_ASSERT(m_AlgorithmState != NULL);
9482
9483
StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];
9484
if (update && vectorState.avgAllocSize == UINT64_MAX)
9485
UpdateVectorStatistics(vector, vectorState);
9486
9487
const size_t startMoveCount = m_Moves.size();
9488
UINT64 minimalFreeRegion = vectorState.avgFreeSize / 2;
9489
for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
9490
{
9491
NormalBlock* block = vector.GetBlock(i);
9492
BlockMetadata* metadata = block->m_pMetadata;
9493
UINT64 prevFreeRegionSize = 0;
9494
9495
for (AllocHandle handle = metadata->GetAllocationListBegin();
9496
handle != (AllocHandle)0;
9497
handle = metadata->GetNextAllocation(handle))
9498
{
9499
MoveAllocationData moveData = GetMoveData(handle, metadata);
9500
// Ignore newly created allocations by defragmentation algorithm
9501
if (moveData.move.pSrcAllocation->GetPrivateData() == this)
9502
continue;
9503
switch (CheckCounters(moveData.move.pSrcAllocation->GetSize()))
9504
{
9505
case CounterStatus::Ignore:
9506
continue;
9507
case CounterStatus::End:
9508
return true;
9509
default:
9510
D3D12MA_ASSERT(0);
9511
case CounterStatus::Pass:
9512
break;
9513
}
9514
9515
// Check all previous blocks for free space
9516
const size_t prevMoveCount = m_Moves.size();
9517
if (AllocInOtherBlock(0, i, moveData, vector))
9518
return true;
9519
9520
UINT64 nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle);
9521
// If no room found then realloc within block for lower offset
9522
UINT64 offset = moveData.move.pSrcAllocation->GetOffset();
9523
if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
9524
{
9525
// Check if realloc will make sense
9526
if (prevFreeRegionSize >= minimalFreeRegion ||
9527
nextFreeRegionSize >= minimalFreeRegion ||
9528
moveData.size <= vectorState.avgFreeSize ||
9529
moveData.size <= vectorState.avgAllocSize)
9530
{
9531
AllocationRequest request = {};
9532
if (metadata->CreateAllocationRequest(
9533
moveData.size,
9534
moveData.alignment,
9535
false,
9536
ALLOCATION_FLAG_STRATEGY_MIN_OFFSET,
9537
&request))
9538
{
9539
if (metadata->GetAllocationOffset(request.allocHandle) < offset)
9540
{
9541
if (SUCCEEDED(vector.CommitAllocationRequest(
9542
request,
9543
block,
9544
moveData.size,
9545
moveData.alignment,
9546
this,
9547
&moveData.move.pDstTmpAllocation)))
9548
{
9549
m_Moves.push_back(moveData.move);
9550
if (IncrementCounters(moveData.size))
9551
return true;
9552
}
9553
}
9554
}
9555
}
9556
}
9557
prevFreeRegionSize = nextFreeRegionSize;
9558
}
9559
}
9560
9561
// No moves perfomed, update statistics to current vector state
9562
if (startMoveCount == m_Moves.size() && !update)
9563
{
9564
vectorState.avgAllocSize = UINT64_MAX;
9565
return ComputeDefragmentation_Balanced(vector, index, false);
9566
}
9567
return false;
9568
}
9569
9570
bool DefragmentationContextPimpl::ComputeDefragmentation_Full(BlockVector& vector)
9571
{
9572
// Go over every allocation and try to fit it in previous blocks at lowest offsets,
9573
// if not possible: realloc within single block to minimize offset (exclude offset == 0)
9574
9575
for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
9576
{
9577
NormalBlock* block = vector.GetBlock(i);
9578
BlockMetadata* metadata = block->m_pMetadata;
9579
9580
for (AllocHandle handle = metadata->GetAllocationListBegin();
9581
handle != (AllocHandle)0;
9582
handle = metadata->GetNextAllocation(handle))
9583
{
9584
MoveAllocationData moveData = GetMoveData(handle, metadata);
9585
// Ignore newly created allocations by defragmentation algorithm
9586
if (moveData.move.pSrcAllocation->GetPrivateData() == this)
9587
continue;
9588
switch (CheckCounters(moveData.move.pSrcAllocation->GetSize()))
9589
{
9590
case CounterStatus::Ignore:
9591
continue;
9592
case CounterStatus::End:
9593
return true;
9594
default:
9595
D3D12MA_ASSERT(0);
9596
case CounterStatus::Pass:
9597
break;
9598
}
9599
9600
// Check all previous blocks for free space
9601
const size_t prevMoveCount = m_Moves.size();
9602
if (AllocInOtherBlock(0, i, moveData, vector))
9603
return true;
9604
9605
// If no room found then realloc within block for lower offset
9606
UINT64 offset = moveData.move.pSrcAllocation->GetOffset();
9607
if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
9608
{
9609
AllocationRequest request = {};
9610
if (metadata->CreateAllocationRequest(
9611
moveData.size,
9612
moveData.alignment,
9613
false,
9614
ALLOCATION_FLAG_STRATEGY_MIN_OFFSET,
9615
&request))
9616
{
9617
if (metadata->GetAllocationOffset(request.allocHandle) < offset)
9618
{
9619
if (SUCCEEDED(vector.CommitAllocationRequest(
9620
request,
9621
block,
9622
moveData.size,
9623
moveData.alignment,
9624
this,
9625
&moveData.move.pDstTmpAllocation)))
9626
{
9627
m_Moves.push_back(moveData.move);
9628
if (IncrementCounters(moveData.size))
9629
return true;
9630
}
9631
}
9632
}
9633
}
9634
}
9635
}
9636
return false;
9637
}
9638
9639
void DefragmentationContextPimpl::UpdateVectorStatistics(BlockVector& vector, StateBalanced& state)
9640
{
9641
size_t allocCount = 0;
9642
size_t freeCount = 0;
9643
state.avgFreeSize = 0;
9644
state.avgAllocSize = 0;
9645
9646
for (size_t i = 0; i < vector.GetBlockCount(); ++i)
9647
{
9648
BlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
9649
9650
allocCount += metadata->GetAllocationCount();
9651
freeCount += metadata->GetFreeRegionsCount();
9652
state.avgFreeSize += metadata->GetSumFreeSize();
9653
state.avgAllocSize += metadata->GetSize();
9654
}
9655
9656
state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount;
9657
state.avgFreeSize /= freeCount;
9658
}
9659
#endif // _D3D12MA_DEFRAGMENTATION_CONTEXT_PIMPL_FUNCTIONS
9660
9661
#ifndef _D3D12MA_POOL_PIMPL_FUNCTIONS
9662
PoolPimpl::PoolPimpl(AllocatorPimpl* allocator, const POOL_DESC& desc)
9663
: m_Allocator(allocator),
9664
m_Desc(desc),
9665
m_BlockVector(NULL),
9666
m_Name(NULL)
9667
{
9668
const bool explicitBlockSize = desc.BlockSize != 0;
9669
const UINT64 preferredBlockSize = explicitBlockSize ? desc.BlockSize : D3D12MA_DEFAULT_BLOCK_SIZE;
9670
UINT maxBlockCount = desc.MaxBlockCount != 0 ? desc.MaxBlockCount : UINT_MAX;
9671
9672
#ifndef __ID3D12Device4_INTERFACE_DEFINED__
9673
D3D12MA_ASSERT(m_Desc.pProtectedSession == NULL);
9674
#endif
9675
9676
m_BlockVector = D3D12MA_NEW(allocator->GetAllocs(), BlockVector)(
9677
allocator, desc.HeapProperties, desc.HeapFlags,
9678
preferredBlockSize,
9679
desc.MinBlockCount, maxBlockCount,
9680
explicitBlockSize,
9681
D3D12MA_MAX(desc.MinAllocationAlignment, (UINT64)D3D12MA_DEBUG_ALIGNMENT),
9682
(desc.Flags & POOL_FLAG_ALGORITHM_MASK) != 0,
9683
(desc.Flags & POOL_FLAG_MSAA_TEXTURES_ALWAYS_COMMITTED) != 0,
9684
desc.pProtectedSession,
9685
desc.ResidencyPriority);
9686
}
9687
9688
PoolPimpl::~PoolPimpl()
9689
{
9690
D3D12MA_ASSERT(m_PrevPool == NULL && m_NextPool == NULL);
9691
FreeName();
9692
D3D12MA_DELETE(m_Allocator->GetAllocs(), m_BlockVector);
9693
}
9694
9695
HRESULT PoolPimpl::Init()
9696
{
9697
m_CommittedAllocations.Init(m_Allocator->UseMutex(), m_Desc.HeapProperties.Type, this);
9698
return m_BlockVector->CreateMinBlocks();
9699
}
9700
9701
void PoolPimpl::GetStatistics(Statistics& outStats)
9702
{
9703
ClearStatistics(outStats);
9704
m_BlockVector->AddStatistics(outStats);
9705
m_CommittedAllocations.AddStatistics(outStats);
9706
}
9707
9708
void PoolPimpl::CalculateStatistics(DetailedStatistics& outStats)
9709
{
9710
ClearDetailedStatistics(outStats);
9711
AddDetailedStatistics(outStats);
9712
}
9713
9714
void PoolPimpl::AddDetailedStatistics(DetailedStatistics& inoutStats)
9715
{
9716
m_BlockVector->AddDetailedStatistics(inoutStats);
9717
m_CommittedAllocations.AddDetailedStatistics(inoutStats);
9718
}
9719
9720
void PoolPimpl::SetName(LPCWSTR Name)
9721
{
9722
FreeName();
9723
9724
if (Name)
9725
{
9726
const size_t nameCharCount = wcslen(Name) + 1;
9727
m_Name = D3D12MA_NEW_ARRAY(m_Allocator->GetAllocs(), WCHAR, nameCharCount);
9728
memcpy(m_Name, Name, nameCharCount * sizeof(WCHAR));
9729
}
9730
}
9731
9732
void PoolPimpl::FreeName()
9733
{
9734
if (m_Name)
9735
{
9736
const size_t nameCharCount = wcslen(m_Name) + 1;
9737
D3D12MA_DELETE_ARRAY(m_Allocator->GetAllocs(), m_Name, nameCharCount);
9738
m_Name = NULL;
9739
}
9740
}
9741
#endif // _D3D12MA_POOL_PIMPL_FUNCTIONS
9742
9743
9744
#ifndef _D3D12MA_PUBLIC_INTERFACE
9745
HRESULT CreateAllocator(const ALLOCATOR_DESC* pDesc, Allocator** ppAllocator)
9746
{
9747
if (!pDesc || !ppAllocator || !pDesc->pDevice || !pDesc->pAdapter ||
9748
!(pDesc->PreferredBlockSize == 0 || (pDesc->PreferredBlockSize >= 16 && pDesc->PreferredBlockSize < 0x10000000000ull)))
9749
{
9750
D3D12MA_ASSERT(0 && "Invalid arguments passed to CreateAllocator.");
9751
return E_INVALIDARG;
9752
}
9753
9754
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9755
9756
ALLOCATION_CALLBACKS allocationCallbacks;
9757
SetupAllocationCallbacks(allocationCallbacks, pDesc->pAllocationCallbacks);
9758
9759
*ppAllocator = D3D12MA_NEW(allocationCallbacks, Allocator)(allocationCallbacks, *pDesc);
9760
HRESULT hr = (*ppAllocator)->m_Pimpl->Init(*pDesc);
9761
if (FAILED(hr))
9762
{
9763
D3D12MA_DELETE(allocationCallbacks, *ppAllocator);
9764
*ppAllocator = NULL;
9765
}
9766
return hr;
9767
}
9768
9769
HRESULT CreateVirtualBlock(const VIRTUAL_BLOCK_DESC* pDesc, VirtualBlock** ppVirtualBlock)
9770
{
9771
if (!pDesc || !ppVirtualBlock)
9772
{
9773
D3D12MA_ASSERT(0 && "Invalid arguments passed to CreateVirtualBlock.");
9774
return E_INVALIDARG;
9775
}
9776
9777
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9778
9779
ALLOCATION_CALLBACKS allocationCallbacks;
9780
SetupAllocationCallbacks(allocationCallbacks, pDesc->pAllocationCallbacks);
9781
9782
*ppVirtualBlock = D3D12MA_NEW(allocationCallbacks, VirtualBlock)(allocationCallbacks, *pDesc);
9783
return S_OK;
9784
}
9785
9786
#ifndef _D3D12MA_IUNKNOWN_IMPL_FUNCTIONS
9787
HRESULT STDMETHODCALLTYPE IUnknownImpl::QueryInterface(REFIID riid, void** ppvObject)
9788
{
9789
if (ppvObject == NULL)
9790
return E_POINTER;
9791
if (riid == IID_IUnknown)
9792
{
9793
++m_RefCount;
9794
*ppvObject = this;
9795
return S_OK;
9796
}
9797
*ppvObject = NULL;
9798
return E_NOINTERFACE;
9799
}
9800
9801
ULONG STDMETHODCALLTYPE IUnknownImpl::AddRef()
9802
{
9803
return ++m_RefCount;
9804
}
9805
9806
ULONG STDMETHODCALLTYPE IUnknownImpl::Release()
9807
{
9808
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9809
9810
const uint32_t newRefCount = --m_RefCount;
9811
if (newRefCount == 0)
9812
ReleaseThis();
9813
return newRefCount;
9814
}
9815
#endif // _D3D12MA_IUNKNOWN_IMPL_FUNCTIONS
9816
9817
#ifndef _D3D12MA_ALLOCATION_FUNCTIONS
9818
void Allocation::PackedData::SetType(Type type)
9819
{
9820
const UINT u = (UINT)type;
9821
D3D12MA_ASSERT(u < (1u << 2));
9822
m_Type = u;
9823
}
9824
9825
void Allocation::PackedData::SetResourceDimension(D3D12_RESOURCE_DIMENSION resourceDimension)
9826
{
9827
const UINT u = (UINT)resourceDimension;
9828
D3D12MA_ASSERT(u < (1u << 3));
9829
m_ResourceDimension = u;
9830
}
9831
9832
void Allocation::PackedData::SetResourceFlags(D3D12_RESOURCE_FLAGS resourceFlags)
9833
{
9834
const UINT u = (UINT)resourceFlags;
9835
D3D12MA_ASSERT(u < (1u << 24));
9836
m_ResourceFlags = u;
9837
}
9838
9839
void Allocation::PackedData::SetTextureLayout(D3D12_TEXTURE_LAYOUT textureLayout)
9840
{
9841
const UINT u = (UINT)textureLayout;
9842
D3D12MA_ASSERT(u < (1u << 9));
9843
m_TextureLayout = u;
9844
}
9845
9846
UINT64 Allocation::GetOffset() const
9847
{
9848
switch (m_PackedData.GetType())
9849
{
9850
case TYPE_COMMITTED:
9851
case TYPE_HEAP:
9852
return 0;
9853
case TYPE_PLACED:
9854
return m_Placed.block->m_pMetadata->GetAllocationOffset(m_Placed.allocHandle);
9855
default:
9856
D3D12MA_ASSERT(0);
9857
return 0;
9858
}
9859
}
9860
9861
void Allocation::SetResource(ID3D12Resource* pResource)
9862
{
9863
if (pResource != m_Resource)
9864
{
9865
if (m_Resource)
9866
m_Resource->Release();
9867
m_Resource = pResource;
9868
if (m_Resource)
9869
m_Resource->AddRef();
9870
}
9871
}
9872
9873
ID3D12Heap* Allocation::GetHeap() const
9874
{
9875
switch (m_PackedData.GetType())
9876
{
9877
case TYPE_COMMITTED:
9878
return NULL;
9879
case TYPE_PLACED:
9880
return m_Placed.block->GetHeap();
9881
case TYPE_HEAP:
9882
return m_Heap.heap;
9883
default:
9884
D3D12MA_ASSERT(0);
9885
return 0;
9886
}
9887
}
9888
9889
void Allocation::SetName(LPCWSTR Name)
9890
{
9891
FreeName();
9892
9893
if (Name)
9894
{
9895
const size_t nameCharCount = wcslen(Name) + 1;
9896
m_Name = D3D12MA_NEW_ARRAY(m_Allocator->GetAllocs(), WCHAR, nameCharCount);
9897
memcpy(m_Name, Name, nameCharCount * sizeof(WCHAR));
9898
}
9899
}
9900
9901
void Allocation::ReleaseThis()
9902
{
9903
if (this == NULL)
9904
{
9905
return;
9906
}
9907
9908
SAFE_RELEASE(m_Resource);
9909
9910
switch (m_PackedData.GetType())
9911
{
9912
case TYPE_COMMITTED:
9913
m_Allocator->FreeCommittedMemory(this);
9914
break;
9915
case TYPE_PLACED:
9916
m_Allocator->FreePlacedMemory(this);
9917
break;
9918
case TYPE_HEAP:
9919
m_Allocator->FreeHeapMemory(this);
9920
break;
9921
}
9922
9923
FreeName();
9924
9925
m_Allocator->GetAllocationObjectAllocator().Free(this);
9926
}
9927
9928
Allocation::Allocation(AllocatorPimpl* allocator, UINT64 size, UINT64 alignment, BOOL wasZeroInitialized)
9929
: m_Allocator{ allocator },
9930
m_Size{ size },
9931
m_Alignment{ alignment },
9932
m_Resource{ NULL },
9933
m_pPrivateData{ NULL },
9934
m_Name{ NULL }
9935
{
9936
D3D12MA_ASSERT(allocator);
9937
9938
m_PackedData.SetType(TYPE_COUNT);
9939
m_PackedData.SetResourceDimension(D3D12_RESOURCE_DIMENSION_UNKNOWN);
9940
m_PackedData.SetResourceFlags(D3D12_RESOURCE_FLAG_NONE);
9941
m_PackedData.SetTextureLayout(D3D12_TEXTURE_LAYOUT_UNKNOWN);
9942
m_PackedData.SetWasZeroInitialized(wasZeroInitialized);
9943
}
9944
9945
void Allocation::InitCommitted(CommittedAllocationList* list)
9946
{
9947
m_PackedData.SetType(TYPE_COMMITTED);
9948
m_Committed.list = list;
9949
m_Committed.prev = NULL;
9950
m_Committed.next = NULL;
9951
}
9952
9953
void Allocation::InitPlaced(AllocHandle allocHandle, NormalBlock* block)
9954
{
9955
m_PackedData.SetType(TYPE_PLACED);
9956
m_Placed.allocHandle = allocHandle;
9957
m_Placed.block = block;
9958
}
9959
9960
void Allocation::InitHeap(CommittedAllocationList* list, ID3D12Heap* heap)
9961
{
9962
m_PackedData.SetType(TYPE_HEAP);
9963
m_Heap.list = list;
9964
m_Committed.prev = NULL;
9965
m_Committed.next = NULL;
9966
m_Heap.heap = heap;
9967
}
9968
9969
void Allocation::SwapBlockAllocation(Allocation* allocation)
9970
{
9971
D3D12MA_ASSERT(allocation != NULL);
9972
D3D12MA_ASSERT(m_PackedData.GetType() == TYPE_PLACED);
9973
D3D12MA_ASSERT(allocation->m_PackedData.GetType() == TYPE_PLACED);
9974
9975
D3D12MA_SWAP(m_Resource, allocation->m_Resource);
9976
m_PackedData.SetWasZeroInitialized(allocation->m_PackedData.WasZeroInitialized());
9977
m_Placed.block->m_pMetadata->SetAllocationPrivateData(m_Placed.allocHandle, allocation);
9978
D3D12MA_SWAP(m_Placed, allocation->m_Placed);
9979
m_Placed.block->m_pMetadata->SetAllocationPrivateData(m_Placed.allocHandle, this);
9980
}
9981
9982
AllocHandle Allocation::GetAllocHandle() const
9983
{
9984
switch (m_PackedData.GetType())
9985
{
9986
case TYPE_COMMITTED:
9987
case TYPE_HEAP:
9988
return (AllocHandle)0;
9989
case TYPE_PLACED:
9990
return m_Placed.allocHandle;
9991
default:
9992
D3D12MA_ASSERT(0);
9993
return (AllocHandle)0;
9994
}
9995
}
9996
9997
NormalBlock* Allocation::GetBlock()
9998
{
9999
switch (m_PackedData.GetType())
10000
{
10001
case TYPE_COMMITTED:
10002
case TYPE_HEAP:
10003
return NULL;
10004
case TYPE_PLACED:
10005
return m_Placed.block;
10006
default:
10007
D3D12MA_ASSERT(0);
10008
return NULL;
10009
}
10010
}
10011
10012
template<typename D3D12_RESOURCE_DESC_T>
10013
void Allocation::SetResourcePointer(ID3D12Resource* resource, const D3D12_RESOURCE_DESC_T* pResourceDesc)
10014
{
10015
D3D12MA_ASSERT(m_Resource == NULL && pResourceDesc);
10016
m_Resource = resource;
10017
m_PackedData.SetResourceDimension(pResourceDesc->Dimension);
10018
m_PackedData.SetResourceFlags(pResourceDesc->Flags);
10019
m_PackedData.SetTextureLayout(pResourceDesc->Layout);
10020
}
10021
10022
void Allocation::FreeName()
10023
{
10024
if (m_Name)
10025
{
10026
const size_t nameCharCount = wcslen(m_Name) + 1;
10027
D3D12MA_DELETE_ARRAY(m_Allocator->GetAllocs(), m_Name, nameCharCount);
10028
m_Name = NULL;
10029
}
10030
}
10031
#endif // _D3D12MA_ALLOCATION_FUNCTIONS
10032
10033
#ifndef _D3D12MA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
10034
HRESULT DefragmentationContext::BeginPass(DEFRAGMENTATION_PASS_MOVE_INFO* pPassInfo)
10035
{
10036
D3D12MA_ASSERT(pPassInfo);
10037
return m_Pimpl->DefragmentPassBegin(*pPassInfo);
10038
}
10039
10040
HRESULT DefragmentationContext::EndPass(DEFRAGMENTATION_PASS_MOVE_INFO* pPassInfo)
10041
{
10042
D3D12MA_ASSERT(pPassInfo);
10043
return m_Pimpl->DefragmentPassEnd(*pPassInfo);
10044
}
10045
10046
void DefragmentationContext::GetStats(DEFRAGMENTATION_STATS* pStats)
10047
{
10048
D3D12MA_ASSERT(pStats);
10049
m_Pimpl->GetStats(*pStats);
10050
}
10051
10052
void DefragmentationContext::ReleaseThis()
10053
{
10054
if (this == NULL)
10055
{
10056
return;
10057
}
10058
10059
D3D12MA_DELETE(m_Pimpl->GetAllocs(), this);
10060
}
10061
10062
DefragmentationContext::DefragmentationContext(AllocatorPimpl* allocator,
10063
const DEFRAGMENTATION_DESC& desc,
10064
BlockVector* poolVector)
10065
: m_Pimpl(D3D12MA_NEW(allocator->GetAllocs(), DefragmentationContextPimpl)(allocator, desc, poolVector)) {}
10066
10067
DefragmentationContext::~DefragmentationContext()
10068
{
10069
D3D12MA_DELETE(m_Pimpl->GetAllocs(), m_Pimpl);
10070
}
10071
#endif // _D3D12MA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
10072
10073
#ifndef _D3D12MA_POOL_FUNCTIONS
10074
POOL_DESC Pool::GetDesc() const
10075
{
10076
return m_Pimpl->GetDesc();
10077
}
10078
10079
void Pool::GetStatistics(Statistics* pStats)
10080
{
10081
D3D12MA_ASSERT(pStats);
10082
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10083
m_Pimpl->GetStatistics(*pStats);
10084
}
10085
10086
void Pool::CalculateStatistics(DetailedStatistics* pStats)
10087
{
10088
D3D12MA_ASSERT(pStats);
10089
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10090
m_Pimpl->CalculateStatistics(*pStats);
10091
}
10092
10093
void Pool::SetName(LPCWSTR Name)
10094
{
10095
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10096
m_Pimpl->SetName(Name);
10097
}
10098
10099
LPCWSTR Pool::GetName() const
10100
{
10101
return m_Pimpl->GetName();
10102
}
10103
10104
HRESULT Pool::BeginDefragmentation(const DEFRAGMENTATION_DESC* pDesc, DefragmentationContext** ppContext)
10105
{
10106
D3D12MA_ASSERT(pDesc && ppContext);
10107
10108
// Check for support
10109
if (m_Pimpl->GetBlockVector()->GetAlgorithm() & POOL_FLAG_ALGORITHM_LINEAR)
10110
return E_NOINTERFACE;
10111
10112
AllocatorPimpl* allocator = m_Pimpl->GetAllocator();
10113
*ppContext = D3D12MA_NEW(allocator->GetAllocs(), DefragmentationContext)(allocator, *pDesc, m_Pimpl->GetBlockVector());
10114
return S_OK;
10115
}
10116
10117
void Pool::ReleaseThis()
10118
{
10119
if (this == NULL)
10120
{
10121
return;
10122
}
10123
10124
D3D12MA_DELETE(m_Pimpl->GetAllocator()->GetAllocs(), this);
10125
}
10126
10127
Pool::Pool(Allocator* allocator, const POOL_DESC& desc)
10128
: m_Pimpl(D3D12MA_NEW(allocator->m_Pimpl->GetAllocs(), PoolPimpl)(allocator->m_Pimpl, desc)) {}
10129
10130
Pool::~Pool()
10131
{
10132
m_Pimpl->GetAllocator()->UnregisterPool(this, m_Pimpl->GetDesc().HeapProperties.Type);
10133
10134
D3D12MA_DELETE(m_Pimpl->GetAllocator()->GetAllocs(), m_Pimpl);
10135
}
10136
#endif // _D3D12MA_POOL_FUNCTIONS
10137
10138
#ifndef _D3D12MA_ALLOCATOR_FUNCTIONS
10139
const D3D12_FEATURE_DATA_D3D12_OPTIONS& Allocator::GetD3D12Options() const
10140
{
10141
return m_Pimpl->GetD3D12Options();
10142
}
10143
10144
BOOL Allocator::IsUMA() const
10145
{
10146
return m_Pimpl->IsUMA();
10147
}
10148
10149
BOOL Allocator::IsCacheCoherentUMA() const
10150
{
10151
return m_Pimpl->IsCacheCoherentUMA();
10152
}
10153
10154
UINT64 Allocator::GetMemoryCapacity(UINT memorySegmentGroup) const
10155
{
10156
return m_Pimpl->GetMemoryCapacity(memorySegmentGroup);
10157
}
10158
10159
HRESULT Allocator::CreateResource(
10160
const ALLOCATION_DESC* pAllocDesc,
10161
const D3D12_RESOURCE_DESC* pResourceDesc,
10162
D3D12_RESOURCE_STATES InitialResourceState,
10163
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
10164
Allocation** ppAllocation,
10165
REFIID riidResource,
10166
void** ppvResource)
10167
{
10168
if (!pAllocDesc || !pResourceDesc || !ppAllocation)
10169
{
10170
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateResource.");
10171
return E_INVALIDARG;
10172
}
10173
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10174
return m_Pimpl->CreateResource(
10175
pAllocDesc,
10176
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialResourceState, pOptimizedClearValue),
10177
ppAllocation,
10178
riidResource,
10179
ppvResource);
10180
}
10181
10182
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
10183
HRESULT Allocator::CreateResource2(
10184
const ALLOCATION_DESC* pAllocDesc,
10185
const D3D12_RESOURCE_DESC1* pResourceDesc,
10186
D3D12_RESOURCE_STATES InitialResourceState,
10187
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
10188
Allocation** ppAllocation,
10189
REFIID riidResource,
10190
void** ppvResource)
10191
{
10192
if (!pAllocDesc || !pResourceDesc || !ppAllocation)
10193
{
10194
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateResource2.");
10195
return E_INVALIDARG;
10196
}
10197
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10198
return m_Pimpl->CreateResource(
10199
pAllocDesc,
10200
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialResourceState, pOptimizedClearValue),
10201
ppAllocation,
10202
riidResource,
10203
ppvResource);
10204
}
10205
#endif // #ifdef __ID3D12Device8_INTERFACE_DEFINED__
10206
10207
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
10208
HRESULT Allocator::CreateResource3(
10209
const ALLOCATION_DESC* pAllocDesc,
10210
const D3D12_RESOURCE_DESC1* pResourceDesc,
10211
D3D12_BARRIER_LAYOUT InitialLayout,
10212
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
10213
UINT32 NumCastableFormats,
10214
DXGI_FORMAT* pCastableFormats,
10215
Allocation** ppAllocation,
10216
REFIID riidResource,
10217
void** ppvResource)
10218
{
10219
if (!pAllocDesc || !pResourceDesc || !ppAllocation)
10220
{
10221
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateResource3.");
10222
return E_INVALIDARG;
10223
}
10224
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10225
return m_Pimpl->CreateResource(
10226
pAllocDesc,
10227
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialLayout, pOptimizedClearValue, NumCastableFormats, pCastableFormats),
10228
ppAllocation,
10229
riidResource,
10230
ppvResource);
10231
}
10232
#endif // #ifdef __ID3D12Device10_INTERFACE_DEFINED__
10233
10234
HRESULT Allocator::AllocateMemory(
10235
const ALLOCATION_DESC* pAllocDesc,
10236
const D3D12_RESOURCE_ALLOCATION_INFO* pAllocInfo,
10237
Allocation** ppAllocation)
10238
{
10239
if (!ValidateAllocateMemoryParameters(pAllocDesc, pAllocInfo, ppAllocation))
10240
{
10241
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::AllocateMemory.");
10242
return E_INVALIDARG;
10243
}
10244
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10245
return m_Pimpl->AllocateMemory(pAllocDesc, pAllocInfo, ppAllocation);
10246
}
10247
10248
HRESULT Allocator::CreateAliasingResource(
10249
Allocation* pAllocation,
10250
UINT64 AllocationLocalOffset,
10251
const D3D12_RESOURCE_DESC* pResourceDesc,
10252
D3D12_RESOURCE_STATES InitialResourceState,
10253
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
10254
REFIID riidResource,
10255
void** ppvResource)
10256
{
10257
if (!pAllocation || !pResourceDesc || !ppvResource)
10258
{
10259
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateAliasingResource.");
10260
return E_INVALIDARG;
10261
}
10262
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10263
return m_Pimpl->CreateAliasingResource(
10264
pAllocation,
10265
AllocationLocalOffset,
10266
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialResourceState, pOptimizedClearValue),
10267
riidResource,
10268
ppvResource);
10269
}
10270
10271
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
10272
HRESULT Allocator::CreateAliasingResource1(
10273
Allocation* pAllocation,
10274
UINT64 AllocationLocalOffset,
10275
const D3D12_RESOURCE_DESC1* pResourceDesc,
10276
D3D12_RESOURCE_STATES InitialResourceState,
10277
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
10278
REFIID riidResource,
10279
void** ppvResource)
10280
{
10281
if (!pAllocation || !pResourceDesc || !ppvResource)
10282
{
10283
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateAliasingResource.");
10284
return E_INVALIDARG;
10285
}
10286
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10287
return m_Pimpl->CreateAliasingResource(
10288
pAllocation,
10289
AllocationLocalOffset,
10290
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialResourceState, pOptimizedClearValue),
10291
riidResource,
10292
ppvResource);
10293
}
10294
#endif // #ifdef __ID3D12Device8_INTERFACE_DEFINED__
10295
10296
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
10297
HRESULT Allocator::CreateAliasingResource2(
10298
Allocation* pAllocation,
10299
UINT64 AllocationLocalOffset,
10300
const D3D12_RESOURCE_DESC1* pResourceDesc,
10301
D3D12_BARRIER_LAYOUT InitialLayout,
10302
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
10303
UINT32 NumCastableFormats,
10304
DXGI_FORMAT* pCastableFormats,
10305
REFIID riidResource,
10306
void** ppvResource)
10307
{
10308
if (!pAllocation || !pResourceDesc || !ppvResource)
10309
{
10310
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateAliasingResource.");
10311
return E_INVALIDARG;
10312
}
10313
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10314
return m_Pimpl->CreateAliasingResource(
10315
pAllocation,
10316
AllocationLocalOffset,
10317
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialLayout, pOptimizedClearValue, NumCastableFormats, pCastableFormats),
10318
riidResource,
10319
ppvResource);
10320
}
10321
#endif // #ifdef __ID3D12Device10_INTERFACE_DEFINED__
10322
10323
HRESULT Allocator::CreatePool(
10324
const POOL_DESC* pPoolDesc,
10325
Pool** ppPool)
10326
{
10327
if (!pPoolDesc || !ppPool ||
10328
(pPoolDesc->MaxBlockCount > 0 && pPoolDesc->MaxBlockCount < pPoolDesc->MinBlockCount) ||
10329
(pPoolDesc->MinAllocationAlignment > 0 && !IsPow2(pPoolDesc->MinAllocationAlignment)))
10330
{
10331
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreatePool.");
10332
return E_INVALIDARG;
10333
}
10334
if (!m_Pimpl->HeapFlagsFulfillResourceHeapTier(pPoolDesc->HeapFlags))
10335
{
10336
D3D12MA_ASSERT(0 && "Invalid pPoolDesc->HeapFlags passed to Allocator::CreatePool. Did you forget to handle ResourceHeapTier=1?");
10337
return E_INVALIDARG;
10338
}
10339
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10340
* ppPool = D3D12MA_NEW(m_Pimpl->GetAllocs(), Pool)(this, *pPoolDesc);
10341
HRESULT hr = (*ppPool)->m_Pimpl->Init();
10342
if (SUCCEEDED(hr))
10343
{
10344
m_Pimpl->RegisterPool(*ppPool, pPoolDesc->HeapProperties.Type);
10345
}
10346
else
10347
{
10348
D3D12MA_DELETE(m_Pimpl->GetAllocs(), *ppPool);
10349
*ppPool = NULL;
10350
}
10351
return hr;
10352
}
10353
10354
void Allocator::SetCurrentFrameIndex(UINT frameIndex)
10355
{
10356
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10357
m_Pimpl->SetCurrentFrameIndex(frameIndex);
10358
}
10359
10360
void Allocator::GetBudget(Budget* pLocalBudget, Budget* pNonLocalBudget)
10361
{
10362
if (pLocalBudget == NULL && pNonLocalBudget == NULL)
10363
{
10364
return;
10365
}
10366
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10367
m_Pimpl->GetBudget(pLocalBudget, pNonLocalBudget);
10368
}
10369
10370
void Allocator::CalculateStatistics(TotalStatistics* pStats)
10371
{
10372
D3D12MA_ASSERT(pStats);
10373
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10374
m_Pimpl->CalculateStatistics(*pStats);
10375
}
10376
10377
void Allocator::BuildStatsString(WCHAR** ppStatsString, BOOL DetailedMap) const
10378
{
10379
D3D12MA_ASSERT(ppStatsString);
10380
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10381
m_Pimpl->BuildStatsString(ppStatsString, DetailedMap);
10382
}
10383
10384
void Allocator::FreeStatsString(WCHAR* pStatsString) const
10385
{
10386
if (pStatsString != NULL)
10387
{
10388
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10389
m_Pimpl->FreeStatsString(pStatsString);
10390
}
10391
}
10392
10393
void Allocator::BeginDefragmentation(const DEFRAGMENTATION_DESC* pDesc, DefragmentationContext** ppContext)
10394
{
10395
D3D12MA_ASSERT(pDesc && ppContext);
10396
10397
*ppContext = D3D12MA_NEW(m_Pimpl->GetAllocs(), DefragmentationContext)(m_Pimpl, *pDesc, NULL);
10398
}
10399
10400
void Allocator::ReleaseThis()
10401
{
10402
// Copy is needed because otherwise we would call destructor and invalidate the structure with callbacks before using it to free memory.
10403
const ALLOCATION_CALLBACKS allocationCallbacksCopy = m_Pimpl->GetAllocs();
10404
D3D12MA_DELETE(allocationCallbacksCopy, this);
10405
}
10406
10407
Allocator::Allocator(const ALLOCATION_CALLBACKS& allocationCallbacks, const ALLOCATOR_DESC& desc)
10408
: m_Pimpl(D3D12MA_NEW(allocationCallbacks, AllocatorPimpl)(allocationCallbacks, desc)) {}
10409
10410
Allocator::~Allocator()
10411
{
10412
D3D12MA_DELETE(m_Pimpl->GetAllocs(), m_Pimpl);
10413
}
10414
#endif // _D3D12MA_ALLOCATOR_FUNCTIONS
10415
10416
#ifndef _D3D12MA_VIRTUAL_BLOCK_FUNCTIONS
10417
BOOL VirtualBlock::IsEmpty() const
10418
{
10419
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10420
return m_Pimpl->m_Metadata->IsEmpty() ? TRUE : FALSE;
10421
}
10422
10423
void VirtualBlock::GetAllocationInfo(VirtualAllocation allocation, VIRTUAL_ALLOCATION_INFO* pInfo) const
10424
{
10425
D3D12MA_ASSERT(allocation.AllocHandle != (AllocHandle)0 && pInfo);
10426
10427
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10428
m_Pimpl->m_Metadata->GetAllocationInfo(allocation.AllocHandle, *pInfo);
10429
}
10430
10431
HRESULT VirtualBlock::Allocate(const VIRTUAL_ALLOCATION_DESC* pDesc, VirtualAllocation* pAllocation, UINT64* pOffset)
10432
{
10433
if (!pDesc || !pAllocation || pDesc->Size == 0 || !IsPow2(pDesc->Alignment))
10434
{
10435
D3D12MA_ASSERT(0 && "Invalid arguments passed to VirtualBlock::Allocate.");
10436
return E_INVALIDARG;
10437
}
10438
10439
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10440
10441
const UINT64 alignment = pDesc->Alignment != 0 ? pDesc->Alignment : 1;
10442
AllocationRequest allocRequest = {};
10443
if (m_Pimpl->m_Metadata->CreateAllocationRequest(
10444
pDesc->Size,
10445
alignment,
10446
pDesc->Flags & VIRTUAL_ALLOCATION_FLAG_UPPER_ADDRESS,
10447
pDesc->Flags & VIRTUAL_ALLOCATION_FLAG_STRATEGY_MASK,
10448
&allocRequest))
10449
{
10450
m_Pimpl->m_Metadata->Alloc(allocRequest, pDesc->Size, pDesc->pPrivateData);
10451
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
10452
pAllocation->AllocHandle = allocRequest.allocHandle;
10453
10454
if (pOffset)
10455
*pOffset = m_Pimpl->m_Metadata->GetAllocationOffset(allocRequest.allocHandle);
10456
return S_OK;
10457
}
10458
10459
pAllocation->AllocHandle = (AllocHandle)0;
10460
if (pOffset)
10461
*pOffset = UINT64_MAX;
10462
10463
return E_OUTOFMEMORY;
10464
}
10465
10466
void VirtualBlock::FreeAllocation(VirtualAllocation allocation)
10467
{
10468
if (allocation.AllocHandle == (AllocHandle)0)
10469
return;
10470
10471
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10472
10473
m_Pimpl->m_Metadata->Free(allocation.AllocHandle);
10474
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
10475
}
10476
10477
void VirtualBlock::Clear()
10478
{
10479
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10480
10481
m_Pimpl->m_Metadata->Clear();
10482
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
10483
}
10484
10485
void VirtualBlock::SetAllocationPrivateData(VirtualAllocation allocation, void* pPrivateData)
10486
{
10487
D3D12MA_ASSERT(allocation.AllocHandle != (AllocHandle)0);
10488
10489
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10490
m_Pimpl->m_Metadata->SetAllocationPrivateData(allocation.AllocHandle, pPrivateData);
10491
}
10492
10493
void VirtualBlock::GetStatistics(Statistics* pStats) const
10494
{
10495
D3D12MA_ASSERT(pStats);
10496
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10497
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
10498
ClearStatistics(*pStats);
10499
m_Pimpl->m_Metadata->AddStatistics(*pStats);
10500
}
10501
10502
void VirtualBlock::CalculateStatistics(DetailedStatistics* pStats) const
10503
{
10504
D3D12MA_ASSERT(pStats);
10505
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10506
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
10507
ClearDetailedStatistics(*pStats);
10508
m_Pimpl->m_Metadata->AddDetailedStatistics(*pStats);
10509
}
10510
10511
void VirtualBlock::BuildStatsString(WCHAR** ppStatsString) const
10512
{
10513
D3D12MA_ASSERT(ppStatsString);
10514
10515
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10516
10517
StringBuilder sb(m_Pimpl->m_AllocationCallbacks);
10518
{
10519
JsonWriter json(m_Pimpl->m_AllocationCallbacks, sb);
10520
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
10521
json.BeginObject();
10522
m_Pimpl->m_Metadata->WriteAllocationInfoToJson(json);
10523
json.EndObject();
10524
} // Scope for JsonWriter
10525
10526
const size_t length = sb.GetLength();
10527
WCHAR* result = AllocateArray<WCHAR>(m_Pimpl->m_AllocationCallbacks, length + 1);
10528
memcpy(result, sb.GetData(), length * sizeof(WCHAR));
10529
result[length] = L'\0';
10530
*ppStatsString = result;
10531
}
10532
10533
void VirtualBlock::FreeStatsString(WCHAR* pStatsString) const
10534
{
10535
if (pStatsString != NULL)
10536
{
10537
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10538
D3D12MA::Free(m_Pimpl->m_AllocationCallbacks, pStatsString);
10539
}
10540
}
10541
10542
void VirtualBlock::ReleaseThis()
10543
{
10544
// Copy is needed because otherwise we would call destructor and invalidate the structure with callbacks before using it to free memory.
10545
const ALLOCATION_CALLBACKS allocationCallbacksCopy = m_Pimpl->m_AllocationCallbacks;
10546
D3D12MA_DELETE(allocationCallbacksCopy, this);
10547
}
10548
10549
VirtualBlock::VirtualBlock(const ALLOCATION_CALLBACKS& allocationCallbacks, const VIRTUAL_BLOCK_DESC& desc)
10550
: m_Pimpl(D3D12MA_NEW(allocationCallbacks, VirtualBlockPimpl)(allocationCallbacks, desc)) {}
10551
10552
VirtualBlock::~VirtualBlock()
10553
{
10554
// THIS IS AN IMPORTANT ASSERT!
10555
// Hitting it means you have some memory leak - unreleased allocations in this virtual block.
10556
D3D12MA_ASSERT(m_Pimpl->m_Metadata->IsEmpty() && "Some allocations were not freed before destruction of this virtual block!");
10557
10558
D3D12MA_DELETE(m_Pimpl->m_AllocationCallbacks, m_Pimpl);
10559
}
10560
#endif // _D3D12MA_VIRTUAL_BLOCK_FUNCTIONS
10561
#endif // _D3D12MA_PUBLIC_INTERFACE
10562
} // namespace D3D12MA
10563
10564