Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/d3d12ma/D3D12MemAlloc.cpp
21138 views
1
//
2
// Copyright (c) 2019-2025 Advanced Micro Devices, Inc. All rights reserved.
3
//
4
// Permission is hereby granted, free of charge, to any person obtaining a copy
5
// of this software and associated documentation files (the "Software"), to deal
6
// in the Software without restriction, including without limitation the rights
7
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8
// copies of the Software, and to permit persons to whom the Software is
9
// furnished to do so, subject to the following conditions:
10
//
11
// The above copyright notice and this permission notice shall be included in
12
// all copies or substantial portions of the Software.
13
//
14
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20
// THE SOFTWARE.
21
//
22
23
#include "D3D12MemAlloc.h"
24
25
#include <combaseapi.h>
26
#include <mutex>
27
#include <algorithm>
28
#include <utility>
29
#include <cstdlib>
30
#include <cstdint>
31
#include <malloc.h> // for _aligned_malloc, _aligned_free
32
#ifndef _WIN32
33
#include <shared_mutex>
34
#endif
35
36
// Includes needed for MinGW - see #71.
37
#ifndef _MSC_VER
38
#include <guiddef.h>
39
// guiddef.h must be included first.
40
#include <dxguids.h>
41
#endif
42
43
////////////////////////////////////////////////////////////////////////////////
44
////////////////////////////////////////////////////////////////////////////////
45
//
46
// Configuration Begin
47
//
48
////////////////////////////////////////////////////////////////////////////////
49
////////////////////////////////////////////////////////////////////////////////
50
#ifndef _D3D12MA_CONFIGURATION
51
52
#ifdef _WIN32
53
#if !defined(WINVER) || WINVER < 0x0600
54
#error Required at least WinAPI version supporting: client = Windows Vista, server = Windows Server 2008.
55
#endif
56
#endif
57
58
#ifndef D3D12MA_SORT
59
#define D3D12MA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
60
#endif
61
62
#ifndef D3D12MA_D3D12_HEADERS_ALREADY_INCLUDED
63
#include <dxgi.h>
64
#if D3D12MA_DXGI_1_4
65
#include <dxgi1_4.h>
66
#endif
67
#endif
68
69
#ifndef D3D12MA_ASSERT
70
#include <cassert>
71
#define D3D12MA_ASSERT(cond) assert(cond)
72
#endif
73
74
// Assert that will be called very often, like inside data structures e.g. operator[].
75
// Making it non-empty can make program slow.
76
#ifndef D3D12MA_HEAVY_ASSERT
77
#ifdef _DEBUG
78
#define D3D12MA_HEAVY_ASSERT(expr) //D3D12MA_ASSERT(expr)
79
#else
80
#define D3D12MA_HEAVY_ASSERT(expr)
81
#endif
82
#endif
83
84
#ifndef D3D12MA_DEBUG_ALIGNMENT
85
/*
86
Minimum alignment of all allocations, in bytes.
87
Set to more than 1 for debugging purposes only. Must be power of two.
88
*/
89
#define D3D12MA_DEBUG_ALIGNMENT (1)
90
#endif
91
92
#ifndef D3D12MA_DEBUG_MARGIN
93
// Minimum margin before and after every allocation, in bytes.
94
// Set nonzero for debugging purposes only.
95
#define D3D12MA_DEBUG_MARGIN (0)
96
#endif
97
98
#ifndef D3D12MA_DEBUG_GLOBAL_MUTEX
99
/*
100
Set this to 1 for debugging purposes only, to enable single mutex protecting all
101
entry calls to the library. Can be useful for debugging multithreading issues.
102
*/
103
#define D3D12MA_DEBUG_GLOBAL_MUTEX (0)
104
#endif
105
106
/*
107
Define this macro for debugging purposes only to force specific D3D12_RESOURCE_HEAP_TIER,
108
especially to test compatibility with D3D12_RESOURCE_HEAP_TIER_1 on modern GPUs.
109
*/
110
//#define D3D12MA_FORCE_RESOURCE_HEAP_TIER D3D12_RESOURCE_HEAP_TIER_1
111
112
#ifndef D3D12MA_DEFAULT_BLOCK_SIZE
113
/// Default size of a block allocated as single ID3D12Heap.
114
#define D3D12MA_DEFAULT_BLOCK_SIZE (64ull * 1024 * 1024)
115
#endif
116
117
#ifndef D3D12MA_OPTIONS16_SUPPORTED
118
#if D3D12_SDK_VERSION >= 610
119
#define D3D12MA_OPTIONS16_SUPPORTED 1
120
#else
121
#define D3D12MA_OPTIONS16_SUPPORTED 0
122
#endif
123
#endif
124
125
#ifndef D3D12MA_DEBUG_LOG
126
#define D3D12MA_DEBUG_LOG(format, ...)
127
/*
128
#define D3D12MA_DEBUG_LOG(format, ...) do { \
129
wprintf(format, __VA_ARGS__); \
130
wprintf(L"\n"); \
131
} while(false)
132
*/
133
#endif
134
135
#endif // _D3D12MA_CONFIGURATION
136
////////////////////////////////////////////////////////////////////////////////
137
////////////////////////////////////////////////////////////////////////////////
138
//
139
// Configuration End
140
//
141
////////////////////////////////////////////////////////////////////////////////
142
////////////////////////////////////////////////////////////////////////////////
143
144
#define D3D12MA_IID_PPV_ARGS(ppType) __uuidof(**(ppType)), reinterpret_cast<void**>(ppType)
145
146
namespace D3D12MA
147
{
148
static constexpr UINT HEAP_TYPE_COUNT = 5;
149
static constexpr UINT STANDARD_HEAP_TYPE_COUNT = 4; // Only DEFAULT, UPLOAD, READBACK, GPU_UPLOAD.
150
static constexpr UINT DEFAULT_POOL_MAX_COUNT = STANDARD_HEAP_TYPE_COUNT * 3;
151
static const UINT NEW_BLOCK_SIZE_SHIFT_MAX = 3;
152
// Minimum size of a free suballocation to register it in the free suballocation collection.
153
static const UINT64 MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
154
155
static const WCHAR* const HeapTypeNames[] =
156
{
157
L"DEFAULT",
158
L"UPLOAD",
159
L"READBACK",
160
L"CUSTOM",
161
L"GPU_UPLOAD",
162
};
163
static const WCHAR* const StandardHeapTypeNames[] =
164
{
165
L"DEFAULT",
166
L"UPLOAD",
167
L"READBACK",
168
L"GPU_UPLOAD",
169
};
170
171
static const D3D12_HEAP_FLAGS RESOURCE_CLASS_HEAP_FLAGS =
172
D3D12_HEAP_FLAG_DENY_BUFFERS | D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES | D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES;
173
174
static const D3D12_RESIDENCY_PRIORITY D3D12_RESIDENCY_PRIORITY_NONE = D3D12_RESIDENCY_PRIORITY(0);
175
176
static const D3D12_HEAP_TYPE D3D12_HEAP_TYPE_GPU_UPLOAD_COPY = (D3D12_HEAP_TYPE)5;
177
178
#ifndef _D3D12MA_ENUM_DECLARATIONS
179
180
// Local copy of this enum, as it is provided only by <dxgi1_4.h>, so it may not be available.
181
enum DXGI_MEMORY_SEGMENT_GROUP_COPY
182
{
183
DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY = 0,
184
DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY = 1,
185
DXGI_MEMORY_SEGMENT_GROUP_COUNT
186
};
187
188
enum class ResourceClass
189
{
190
Unknown, Buffer, Non_RT_DS_Texture, RT_DS_Texture
191
};
192
193
enum SuballocationType
194
{
195
SUBALLOCATION_TYPE_FREE = 0,
196
SUBALLOCATION_TYPE_ALLOCATION = 1,
197
};
198
199
#endif // _D3D12MA_ENUM_DECLARATIONS
200
201
202
#ifndef _D3D12MA_FUNCTIONS
203
204
static void* DefaultAllocate(size_t Size, size_t Alignment, void* /*pPrivateData*/)
205
{
206
#ifdef _WIN32
207
return _aligned_malloc(Size, Alignment);
208
#else
209
return aligned_alloc(Alignment, Size);
210
#endif
211
}
212
static void DefaultFree(void* pMemory, void* /*pPrivateData*/)
213
{
214
#ifdef _WIN32
215
return _aligned_free(pMemory);
216
#else
217
return free(pMemory);
218
#endif
219
}
220
221
static void* Malloc(const ALLOCATION_CALLBACKS& allocs, size_t size, size_t alignment)
222
{
223
void* const result = (*allocs.pAllocate)(size, alignment, allocs.pPrivateData);
224
D3D12MA_ASSERT(result);
225
return result;
226
}
227
static void Free(const ALLOCATION_CALLBACKS& allocs, void* memory)
228
{
229
(*allocs.pFree)(memory, allocs.pPrivateData);
230
}
231
232
template<typename T>
233
static T* Allocate(const ALLOCATION_CALLBACKS& allocs)
234
{
235
return (T*)Malloc(allocs, sizeof(T), __alignof(T));
236
}
237
template<typename T>
238
static T* AllocateArray(const ALLOCATION_CALLBACKS& allocs, size_t count)
239
{
240
return (T*)Malloc(allocs, sizeof(T) * count, __alignof(T));
241
}
242
243
#define D3D12MA_NEW(allocs, type) new(D3D12MA::Allocate<type>(allocs))(type)
244
#define D3D12MA_NEW_ARRAY(allocs, type, count) new(D3D12MA::AllocateArray<type>((allocs), (count)))(type)
245
246
template<typename T>
247
void D3D12MA_DELETE(const ALLOCATION_CALLBACKS& allocs, T* memory)
248
{
249
if (memory)
250
{
251
memory->~T();
252
Free(allocs, memory);
253
}
254
}
255
template<typename T>
256
void D3D12MA_DELETE_ARRAY(const ALLOCATION_CALLBACKS& allocs, T* memory, size_t count)
257
{
258
if (memory)
259
{
260
for (size_t i = count; i--; )
261
{
262
memory[i].~T();
263
}
264
Free(allocs, memory);
265
}
266
}
267
268
static void SetupAllocationCallbacks(ALLOCATION_CALLBACKS& outAllocs, const ALLOCATION_CALLBACKS* allocationCallbacks)
269
{
270
if (allocationCallbacks)
271
{
272
outAllocs = *allocationCallbacks;
273
D3D12MA_ASSERT(outAllocs.pAllocate != NULL && outAllocs.pFree != NULL);
274
}
275
else
276
{
277
outAllocs.pAllocate = &DefaultAllocate;
278
outAllocs.pFree = &DefaultFree;
279
outAllocs.pPrivateData = NULL;
280
}
281
}
282
283
#define SAFE_RELEASE(ptr) do { if(ptr) { (ptr)->Release(); (ptr) = NULL; } } while(false)
284
285
#define D3D12MA_VALIDATE(cond) do { if(!(cond)) { \
286
D3D12MA_ASSERT(0 && "Validation failed: " #cond); \
287
return false; \
288
} } while(false)
289
290
template<typename T>
291
static T D3D12MA_MIN(const T& a, const T& b) { return a <= b ? a : b; }
292
template<typename T>
293
static T D3D12MA_MAX(const T& a, const T& b) { return a <= b ? b : a; }
294
295
template<typename T>
296
static void D3D12MA_SWAP(T& a, T& b) { T tmp = a; a = b; b = tmp; }
297
298
// Scans integer for index of first nonzero bit from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
299
static UINT8 BitScanLSB(UINT64 mask)
300
{
301
#if defined(_MSC_VER) && defined(_WIN64)
302
unsigned long pos;
303
if (_BitScanForward64(&pos, mask))
304
return static_cast<UINT8>(pos);
305
return UINT8_MAX;
306
#elif defined __GNUC__ || defined __clang__
307
return static_cast<UINT8>(__builtin_ffsll(mask)) - 1U;
308
#else
309
UINT8 pos = 0;
310
UINT64 bit = 1;
311
do
312
{
313
if (mask & bit)
314
return pos;
315
bit <<= 1;
316
} while (pos++ < 63);
317
return UINT8_MAX;
318
#endif
319
}
320
// Scans integer for index of first nonzero bit from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
321
static UINT8 BitScanLSB(UINT32 mask)
322
{
323
#ifdef _MSC_VER
324
unsigned long pos;
325
if (_BitScanForward(&pos, mask))
326
return static_cast<UINT8>(pos);
327
return UINT8_MAX;
328
#elif defined __GNUC__ || defined __clang__
329
return static_cast<UINT8>(__builtin_ffs(mask)) - 1U;
330
#else
331
UINT8 pos = 0;
332
UINT32 bit = 1;
333
do
334
{
335
if (mask & bit)
336
return pos;
337
bit <<= 1;
338
} while (pos++ < 31);
339
return UINT8_MAX;
340
#endif
341
}
342
343
// Scans integer for index of first nonzero bit from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
344
static UINT8 BitScanMSB(UINT64 mask)
345
{
346
#if defined(_MSC_VER) && defined(_WIN64)
347
unsigned long pos;
348
if (_BitScanReverse64(&pos, mask))
349
return static_cast<UINT8>(pos);
350
#elif defined __GNUC__ || defined __clang__
351
if (mask)
352
return 63 - static_cast<UINT8>(__builtin_clzll(mask));
353
#else
354
UINT8 pos = 63;
355
UINT64 bit = 1ULL << 63;
356
do
357
{
358
if (mask & bit)
359
return pos;
360
bit >>= 1;
361
} while (pos-- > 0);
362
#endif
363
return UINT8_MAX;
364
}
365
// Scans integer for index of first nonzero bit from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
366
static UINT8 BitScanMSB(UINT32 mask)
367
{
368
#ifdef _MSC_VER
369
unsigned long pos;
370
if (_BitScanReverse(&pos, mask))
371
return static_cast<UINT8>(pos);
372
#elif defined __GNUC__ || defined __clang__
373
if (mask)
374
return 31 - static_cast<UINT8>(__builtin_clz(mask));
375
#else
376
UINT8 pos = 31;
377
UINT32 bit = 1UL << 31;
378
do
379
{
380
if (mask & bit)
381
return pos;
382
bit >>= 1;
383
} while (pos-- > 0);
384
#endif
385
return UINT8_MAX;
386
}
387
388
/*
389
Returns true if given number is a power of two.
390
T must be unsigned integer number or signed integer but always nonnegative.
391
For 0 returns true.
392
*/
393
template <typename T>
394
static bool IsPow2(T x) { return (x & (x - 1)) == 0; }
395
396
// Aligns given value up to nearest multiply of align value. For example: AlignUp(11, 8) = 16.
397
// Use types like UINT, uint64_t as T.
398
template <typename T>
399
static T AlignUp(T val, T alignment)
400
{
401
D3D12MA_HEAVY_ASSERT(IsPow2(alignment));
402
return (val + alignment - 1) & ~(alignment - 1);
403
}
404
// Aligns given value down to nearest multiply of align value. For example: AlignUp(11, 8) = 8.
405
// Use types like UINT, uint64_t as T.
406
template <typename T>
407
static T AlignDown(T val, T alignment)
408
{
409
D3D12MA_HEAVY_ASSERT(IsPow2(alignment));
410
return val & ~(alignment - 1);
411
}
412
413
// Division with mathematical rounding to nearest number.
414
template <typename T>
415
static T RoundDiv(T x, T y) { return (x + (y / (T)2)) / y; }
416
template <typename T>
417
static T DivideRoundingUp(T x, T y) { return (x + y - 1) / y; }
418
419
static WCHAR HexDigitToChar(UINT8 digit)
420
{
421
if(digit < 10)
422
return L'0' + digit;
423
else
424
return L'A' + (digit - 10);
425
}
426
427
/*
428
Performs binary search and returns iterator to first element that is greater or
429
equal to `key`, according to comparison `cmp`.
430
431
Cmp should return true if first argument is less than second argument.
432
433
Returned value is the found element, if present in the collection or place where
434
new element with value (key) should be inserted.
435
*/
436
template <typename CmpLess, typename IterT, typename KeyT>
437
static IterT BinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
438
{
439
size_t down = 0, up = (end - beg);
440
while (down < up)
441
{
442
const size_t mid = (down + up) / 2;
443
if (cmp(*(beg + mid), key))
444
{
445
down = mid + 1;
446
}
447
else
448
{
449
up = mid;
450
}
451
}
452
return beg + down;
453
}
454
455
/*
456
Performs binary search and returns iterator to an element that is equal to `key`,
457
according to comparison `cmp`.
458
459
Cmp should return true if first argument is less than second argument.
460
461
Returned value is the found element, if present in the collection or end if not
462
found.
463
*/
464
template<typename CmpLess, typename IterT, typename KeyT>
465
static IterT BinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
466
{
467
IterT it = BinaryFindFirstNotLess<CmpLess, IterT, KeyT>(beg, end, value, cmp);
468
if (it == end ||
469
(!cmp(*it, value) && !cmp(value, *it)))
470
{
471
return it;
472
}
473
return end;
474
}
475
476
static UINT StandardHeapTypeToIndex(D3D12_HEAP_TYPE type)
477
{
478
switch (type)
479
{
480
case D3D12_HEAP_TYPE_DEFAULT: return 0;
481
case D3D12_HEAP_TYPE_UPLOAD: return 1;
482
case D3D12_HEAP_TYPE_READBACK: return 2;
483
case D3D12_HEAP_TYPE_GPU_UPLOAD_COPY: return 3;
484
default: D3D12MA_ASSERT(0); return UINT_MAX;
485
}
486
}
487
488
static D3D12_HEAP_TYPE IndexToStandardHeapType(UINT heapTypeIndex)
489
{
490
switch(heapTypeIndex)
491
{
492
case 0: return D3D12_HEAP_TYPE_DEFAULT;
493
case 1: return D3D12_HEAP_TYPE_UPLOAD;
494
case 2: return D3D12_HEAP_TYPE_READBACK;
495
case 3: return D3D12_HEAP_TYPE_GPU_UPLOAD_COPY;
496
default: D3D12MA_ASSERT(0); return D3D12_HEAP_TYPE_CUSTOM;
497
}
498
}
499
500
static UINT64 HeapFlagsToAlignment(D3D12_HEAP_FLAGS flags, bool denyMsaaTextures)
501
{
502
/*
503
Documentation of D3D12_HEAP_DESC structure says:
504
505
- D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT defined as 64KB.
506
- D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT defined as 4MB. An
507
application must decide whether the heap will contain multi-sample
508
anti-aliasing (MSAA), in which case, the application must choose [this flag].
509
510
https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_heap_desc
511
*/
512
513
if (denyMsaaTextures)
514
return D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT;
515
516
const D3D12_HEAP_FLAGS denyAllTexturesFlags =
517
D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES | D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES;
518
const bool canContainAnyTextures =
519
(flags & denyAllTexturesFlags) != denyAllTexturesFlags;
520
return canContainAnyTextures ?
521
D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT : D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT;
522
}
523
524
static ResourceClass HeapFlagsToResourceClass(D3D12_HEAP_FLAGS heapFlags)
525
{
526
const bool allowBuffers = (heapFlags & D3D12_HEAP_FLAG_DENY_BUFFERS) == 0;
527
const bool allowRtDsTextures = (heapFlags & D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES) == 0;
528
const bool allowNonRtDsTextures = (heapFlags & D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES) == 0;
529
530
const uint8_t allowedGroupCount = (allowBuffers ? 1 : 0) + (allowRtDsTextures ? 1 : 0) + (allowNonRtDsTextures ? 1 : 0);
531
if (allowedGroupCount != 1)
532
return ResourceClass::Unknown;
533
534
if (allowRtDsTextures)
535
return ResourceClass::RT_DS_Texture;
536
if (allowNonRtDsTextures)
537
return ResourceClass::Non_RT_DS_Texture;
538
return ResourceClass::Buffer;
539
}
540
541
static bool IsHeapTypeStandard(D3D12_HEAP_TYPE type)
542
{
543
return type == D3D12_HEAP_TYPE_DEFAULT ||
544
type == D3D12_HEAP_TYPE_UPLOAD ||
545
type == D3D12_HEAP_TYPE_READBACK ||
546
type == D3D12_HEAP_TYPE_GPU_UPLOAD_COPY;
547
}
548
549
static D3D12_HEAP_PROPERTIES StandardHeapTypeToHeapProperties(D3D12_HEAP_TYPE type)
550
{
551
D3D12MA_ASSERT(IsHeapTypeStandard(type));
552
D3D12_HEAP_PROPERTIES result = {};
553
result.Type = type;
554
return result;
555
}
556
557
static bool IsFormatCompressed(DXGI_FORMAT format)
558
{
559
switch (format)
560
{
561
case DXGI_FORMAT_BC1_TYPELESS:
562
case DXGI_FORMAT_BC1_UNORM:
563
case DXGI_FORMAT_BC1_UNORM_SRGB:
564
case DXGI_FORMAT_BC2_TYPELESS:
565
case DXGI_FORMAT_BC2_UNORM:
566
case DXGI_FORMAT_BC2_UNORM_SRGB:
567
case DXGI_FORMAT_BC3_TYPELESS:
568
case DXGI_FORMAT_BC3_UNORM:
569
case DXGI_FORMAT_BC3_UNORM_SRGB:
570
case DXGI_FORMAT_BC4_TYPELESS:
571
case DXGI_FORMAT_BC4_UNORM:
572
case DXGI_FORMAT_BC4_SNORM:
573
case DXGI_FORMAT_BC5_TYPELESS:
574
case DXGI_FORMAT_BC5_UNORM:
575
case DXGI_FORMAT_BC5_SNORM:
576
case DXGI_FORMAT_BC6H_TYPELESS:
577
case DXGI_FORMAT_BC6H_UF16:
578
case DXGI_FORMAT_BC6H_SF16:
579
case DXGI_FORMAT_BC7_TYPELESS:
580
case DXGI_FORMAT_BC7_UNORM:
581
case DXGI_FORMAT_BC7_UNORM_SRGB:
582
return true;
583
default:
584
return false;
585
}
586
}
587
588
// Only some formats are supported. For others it returns 0.
589
static UINT GetBitsPerPixel(DXGI_FORMAT format)
590
{
591
switch (format)
592
{
593
case DXGI_FORMAT_R32G32B32A32_TYPELESS:
594
case DXGI_FORMAT_R32G32B32A32_FLOAT:
595
case DXGI_FORMAT_R32G32B32A32_UINT:
596
case DXGI_FORMAT_R32G32B32A32_SINT:
597
return 128;
598
case DXGI_FORMAT_R32G32B32_TYPELESS:
599
case DXGI_FORMAT_R32G32B32_FLOAT:
600
case DXGI_FORMAT_R32G32B32_UINT:
601
case DXGI_FORMAT_R32G32B32_SINT:
602
return 96;
603
case DXGI_FORMAT_R16G16B16A16_TYPELESS:
604
case DXGI_FORMAT_R16G16B16A16_FLOAT:
605
case DXGI_FORMAT_R16G16B16A16_UNORM:
606
case DXGI_FORMAT_R16G16B16A16_UINT:
607
case DXGI_FORMAT_R16G16B16A16_SNORM:
608
case DXGI_FORMAT_R16G16B16A16_SINT:
609
return 64;
610
case DXGI_FORMAT_R32G32_TYPELESS:
611
case DXGI_FORMAT_R32G32_FLOAT:
612
case DXGI_FORMAT_R32G32_UINT:
613
case DXGI_FORMAT_R32G32_SINT:
614
return 64;
615
case DXGI_FORMAT_R32G8X24_TYPELESS:
616
case DXGI_FORMAT_D32_FLOAT_S8X24_UINT:
617
case DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS:
618
case DXGI_FORMAT_X32_TYPELESS_G8X24_UINT:
619
return 64;
620
case DXGI_FORMAT_R10G10B10A2_TYPELESS:
621
case DXGI_FORMAT_R10G10B10A2_UNORM:
622
case DXGI_FORMAT_R10G10B10A2_UINT:
623
case DXGI_FORMAT_R11G11B10_FLOAT:
624
return 32;
625
case DXGI_FORMAT_R8G8B8A8_TYPELESS:
626
case DXGI_FORMAT_R8G8B8A8_UNORM:
627
case DXGI_FORMAT_R8G8B8A8_UNORM_SRGB:
628
case DXGI_FORMAT_R8G8B8A8_UINT:
629
case DXGI_FORMAT_R8G8B8A8_SNORM:
630
case DXGI_FORMAT_R8G8B8A8_SINT:
631
return 32;
632
case DXGI_FORMAT_R16G16_TYPELESS:
633
case DXGI_FORMAT_R16G16_FLOAT:
634
case DXGI_FORMAT_R16G16_UNORM:
635
case DXGI_FORMAT_R16G16_UINT:
636
case DXGI_FORMAT_R16G16_SNORM:
637
case DXGI_FORMAT_R16G16_SINT:
638
return 32;
639
case DXGI_FORMAT_R32_TYPELESS:
640
case DXGI_FORMAT_D32_FLOAT:
641
case DXGI_FORMAT_R32_FLOAT:
642
case DXGI_FORMAT_R32_UINT:
643
case DXGI_FORMAT_R32_SINT:
644
return 32;
645
case DXGI_FORMAT_R24G8_TYPELESS:
646
case DXGI_FORMAT_D24_UNORM_S8_UINT:
647
case DXGI_FORMAT_R24_UNORM_X8_TYPELESS:
648
case DXGI_FORMAT_X24_TYPELESS_G8_UINT:
649
return 32;
650
case DXGI_FORMAT_R8G8_TYPELESS:
651
case DXGI_FORMAT_R8G8_UNORM:
652
case DXGI_FORMAT_R8G8_UINT:
653
case DXGI_FORMAT_R8G8_SNORM:
654
case DXGI_FORMAT_R8G8_SINT:
655
return 16;
656
case DXGI_FORMAT_R16_TYPELESS:
657
case DXGI_FORMAT_R16_FLOAT:
658
case DXGI_FORMAT_D16_UNORM:
659
case DXGI_FORMAT_R16_UNORM:
660
case DXGI_FORMAT_R16_UINT:
661
case DXGI_FORMAT_R16_SNORM:
662
case DXGI_FORMAT_R16_SINT:
663
return 16;
664
case DXGI_FORMAT_R8_TYPELESS:
665
case DXGI_FORMAT_R8_UNORM:
666
case DXGI_FORMAT_R8_UINT:
667
case DXGI_FORMAT_R8_SNORM:
668
case DXGI_FORMAT_R8_SINT:
669
case DXGI_FORMAT_A8_UNORM:
670
return 8;
671
case DXGI_FORMAT_BC1_TYPELESS:
672
case DXGI_FORMAT_BC1_UNORM:
673
case DXGI_FORMAT_BC1_UNORM_SRGB:
674
return 4;
675
case DXGI_FORMAT_BC2_TYPELESS:
676
case DXGI_FORMAT_BC2_UNORM:
677
case DXGI_FORMAT_BC2_UNORM_SRGB:
678
return 8;
679
case DXGI_FORMAT_BC3_TYPELESS:
680
case DXGI_FORMAT_BC3_UNORM:
681
case DXGI_FORMAT_BC3_UNORM_SRGB:
682
return 8;
683
case DXGI_FORMAT_BC4_TYPELESS:
684
case DXGI_FORMAT_BC4_UNORM:
685
case DXGI_FORMAT_BC4_SNORM:
686
return 4;
687
case DXGI_FORMAT_BC5_TYPELESS:
688
case DXGI_FORMAT_BC5_UNORM:
689
case DXGI_FORMAT_BC5_SNORM:
690
return 8;
691
case DXGI_FORMAT_BC6H_TYPELESS:
692
case DXGI_FORMAT_BC6H_UF16:
693
case DXGI_FORMAT_BC6H_SF16:
694
return 8;
695
case DXGI_FORMAT_BC7_TYPELESS:
696
case DXGI_FORMAT_BC7_UNORM:
697
case DXGI_FORMAT_BC7_UNORM_SRGB:
698
return 8;
699
default:
700
return 0;
701
}
702
}
703
704
template<typename D3D12_RESOURCE_DESC_T>
705
static ResourceClass ResourceDescToResourceClass(const D3D12_RESOURCE_DESC_T& resDesc)
706
{
707
if (resDesc.Dimension == D3D12_RESOURCE_DIMENSION_BUFFER)
708
return ResourceClass::Buffer;
709
// Else: it's surely a texture.
710
const bool isRenderTargetOrDepthStencil =
711
(resDesc.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0;
712
return isRenderTargetOrDepthStencil ? ResourceClass::RT_DS_Texture : ResourceClass::Non_RT_DS_Texture;
713
}
714
715
// This algorithm is overly conservative.
716
template<typename D3D12_RESOURCE_DESC_T>
717
static bool CanUseSmallAlignment(const D3D12_RESOURCE_DESC_T& resourceDesc)
718
{
719
if (resourceDesc.Dimension != D3D12_RESOURCE_DIMENSION_TEXTURE2D)
720
return false;
721
if ((resourceDesc.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0)
722
return false;
723
if (resourceDesc.SampleDesc.Count > 1)
724
return false;
725
if (resourceDesc.DepthOrArraySize != 1)
726
return false;
727
728
UINT sizeX = (UINT)resourceDesc.Width;
729
UINT sizeY = resourceDesc.Height;
730
UINT bitsPerPixel = GetBitsPerPixel(resourceDesc.Format);
731
if (bitsPerPixel == 0)
732
return false;
733
734
if (IsFormatCompressed(resourceDesc.Format))
735
{
736
sizeX = DivideRoundingUp(sizeX, 4u);
737
sizeY = DivideRoundingUp(sizeY, 4u);
738
bitsPerPixel *= 16;
739
}
740
741
UINT tileSizeX = 0, tileSizeY = 0;
742
switch (bitsPerPixel)
743
{
744
case 8: tileSizeX = 64; tileSizeY = 64; break;
745
case 16: tileSizeX = 64; tileSizeY = 32; break;
746
case 32: tileSizeX = 32; tileSizeY = 32; break;
747
case 64: tileSizeX = 32; tileSizeY = 16; break;
748
case 128: tileSizeX = 16; tileSizeY = 16; break;
749
default: return false;
750
}
751
752
const UINT tileCount = DivideRoundingUp(sizeX, tileSizeX) * DivideRoundingUp(sizeY, tileSizeY);
753
return tileCount <= 16;
754
}
755
756
static bool ValidateAllocateMemoryParameters(
757
const ALLOCATION_DESC* pAllocDesc,
758
const D3D12_RESOURCE_ALLOCATION_INFO* pAllocInfo,
759
Allocation** ppAllocation)
760
{
761
return pAllocDesc &&
762
pAllocInfo &&
763
ppAllocation &&
764
(pAllocInfo->Alignment == 0 ||
765
pAllocInfo->Alignment == D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT ||
766
pAllocInfo->Alignment == D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT) &&
767
pAllocInfo->SizeInBytes != 0 &&
768
pAllocInfo->SizeInBytes % (64ull * 1024) == 0;
769
}
770
771
#endif // _D3D12MA_FUNCTIONS
772
773
#ifndef _D3D12MA_STATISTICS_FUNCTIONS
774
775
static void ClearStatistics(Statistics& outStats)
776
{
777
outStats.BlockCount = 0;
778
outStats.AllocationCount = 0;
779
outStats.BlockBytes = 0;
780
outStats.AllocationBytes = 0;
781
}
782
783
static void ClearDetailedStatistics(DetailedStatistics& outStats)
784
{
785
ClearStatistics(outStats.Stats);
786
outStats.UnusedRangeCount = 0;
787
outStats.AllocationSizeMin = UINT64_MAX;
788
outStats.AllocationSizeMax = 0;
789
outStats.UnusedRangeSizeMin = UINT64_MAX;
790
outStats.UnusedRangeSizeMax = 0;
791
}
792
793
static void AddStatistics(Statistics& inoutStats, const Statistics& src)
794
{
795
inoutStats.BlockCount += src.BlockCount;
796
inoutStats.AllocationCount += src.AllocationCount;
797
inoutStats.BlockBytes += src.BlockBytes;
798
inoutStats.AllocationBytes += src.AllocationBytes;
799
}
800
801
static void AddDetailedStatistics(DetailedStatistics& inoutStats, const DetailedStatistics& src)
802
{
803
AddStatistics(inoutStats.Stats, src.Stats);
804
inoutStats.UnusedRangeCount += src.UnusedRangeCount;
805
inoutStats.AllocationSizeMin = D3D12MA_MIN(inoutStats.AllocationSizeMin, src.AllocationSizeMin);
806
inoutStats.AllocationSizeMax = D3D12MA_MAX(inoutStats.AllocationSizeMax, src.AllocationSizeMax);
807
inoutStats.UnusedRangeSizeMin = D3D12MA_MIN(inoutStats.UnusedRangeSizeMin, src.UnusedRangeSizeMin);
808
inoutStats.UnusedRangeSizeMax = D3D12MA_MAX(inoutStats.UnusedRangeSizeMax, src.UnusedRangeSizeMax);
809
}
810
811
static void AddDetailedStatisticsAllocation(DetailedStatistics& inoutStats, UINT64 size)
812
{
813
inoutStats.Stats.AllocationCount++;
814
inoutStats.Stats.AllocationBytes += size;
815
inoutStats.AllocationSizeMin = D3D12MA_MIN(inoutStats.AllocationSizeMin, size);
816
inoutStats.AllocationSizeMax = D3D12MA_MAX(inoutStats.AllocationSizeMax, size);
817
}
818
819
static void AddDetailedStatisticsUnusedRange(DetailedStatistics& inoutStats, UINT64 size)
820
{
821
inoutStats.UnusedRangeCount++;
822
inoutStats.UnusedRangeSizeMin = D3D12MA_MIN(inoutStats.UnusedRangeSizeMin, size);
823
inoutStats.UnusedRangeSizeMax = D3D12MA_MAX(inoutStats.UnusedRangeSizeMax, size);
824
}
825
826
#endif // _D3D12MA_STATISTICS_FUNCTIONS
827
828
829
#ifndef _D3D12MA_MUTEX
830
831
#ifndef D3D12MA_MUTEX
832
class Mutex
833
{
834
public:
835
void Lock() { m_Mutex.lock(); }
836
void Unlock() { m_Mutex.unlock(); }
837
838
private:
839
std::mutex m_Mutex;
840
};
841
#define D3D12MA_MUTEX Mutex
842
#endif
843
844
#ifndef D3D12MA_RW_MUTEX
845
#ifdef _WIN32
846
class RWMutex
847
{
848
public:
849
RWMutex() { InitializeSRWLock(&m_Lock); }
850
void LockRead() { AcquireSRWLockShared(&m_Lock); }
851
void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
852
void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
853
void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
854
855
private:
856
SRWLOCK m_Lock;
857
};
858
#else // #ifdef _WIN32
859
class RWMutex
860
{
861
public:
862
RWMutex() {}
863
void LockRead() { m_Mutex.lock_shared(); }
864
void UnlockRead() { m_Mutex.unlock_shared(); }
865
void LockWrite() { m_Mutex.lock(); }
866
void UnlockWrite() { m_Mutex.unlock(); }
867
868
private:
869
std::shared_timed_mutex m_Mutex;
870
};
871
#endif // #ifdef _WIN32
872
#define D3D12MA_RW_MUTEX RWMutex
873
#endif // #ifndef D3D12MA_RW_MUTEX
874
875
// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
876
struct MutexLock
877
{
878
D3D12MA_CLASS_NO_COPY(MutexLock);
879
public:
880
MutexLock(D3D12MA_MUTEX& mutex, bool useMutex = true) :
881
m_pMutex(useMutex ? &mutex : NULL)
882
{
883
if (m_pMutex) m_pMutex->Lock();
884
}
885
~MutexLock() { if (m_pMutex) m_pMutex->Unlock(); }
886
887
private:
888
D3D12MA_MUTEX* m_pMutex;
889
};
890
891
// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
892
struct MutexLockRead
893
{
894
D3D12MA_CLASS_NO_COPY(MutexLockRead);
895
public:
896
MutexLockRead(D3D12MA_RW_MUTEX& mutex, bool useMutex)
897
: m_pMutex(useMutex ? &mutex : NULL)
898
{
899
if(m_pMutex)
900
{
901
m_pMutex->LockRead();
902
}
903
}
904
~MutexLockRead() { if (m_pMutex) m_pMutex->UnlockRead(); }
905
906
private:
907
D3D12MA_RW_MUTEX* m_pMutex;
908
};
909
910
// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
911
struct MutexLockWrite
912
{
913
D3D12MA_CLASS_NO_COPY(MutexLockWrite);
914
public:
915
MutexLockWrite(D3D12MA_RW_MUTEX& mutex, bool useMutex)
916
: m_pMutex(useMutex ? &mutex : NULL)
917
{
918
if (m_pMutex) m_pMutex->LockWrite();
919
}
920
~MutexLockWrite() { if (m_pMutex) m_pMutex->UnlockWrite(); }
921
922
private:
923
D3D12MA_RW_MUTEX* m_pMutex;
924
};
925
926
#if D3D12MA_DEBUG_GLOBAL_MUTEX
927
static D3D12MA_MUTEX g_DebugGlobalMutex;
928
#define D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK MutexLock debugGlobalMutexLock(g_DebugGlobalMutex, true);
929
#else
930
#define D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
931
#endif
932
#endif // _D3D12MA_MUTEX
933
934
#ifndef _D3D12MA_VECTOR
935
/*
936
Dynamically resizing continuous array. Class with interface similar to std::vector.
937
T must be POD because constructors and destructors are not called and memcpy is
938
used for these objects.
939
*/
940
template<typename T>
941
class Vector
942
{
943
public:
944
using value_type = T;
945
using iterator = T*;
946
using const_iterator = const T*;
947
948
// allocationCallbacks externally owned, must outlive this object.
949
Vector(const ALLOCATION_CALLBACKS& allocationCallbacks);
950
Vector(size_t count, const ALLOCATION_CALLBACKS& allocationCallbacks);
951
Vector(const Vector<T>& src);
952
~Vector();
953
954
const ALLOCATION_CALLBACKS& GetAllocs() const { return m_AllocationCallbacks; }
955
bool empty() const { return m_Count == 0; }
956
size_t size() const { return m_Count; }
957
T* data() { return m_pArray; }
958
const T* data() const { return m_pArray; }
959
void clear(bool freeMemory = false) { resize(0, freeMemory); }
960
961
iterator begin() { return m_pArray; }
962
iterator end() { return m_pArray + m_Count; }
963
const_iterator cbegin() const { return m_pArray; }
964
const_iterator cend() const { return m_pArray + m_Count; }
965
const_iterator begin() const { return cbegin(); }
966
const_iterator end() const { return cend(); }
967
968
void push_front(const T& src) { insert(0, src); }
969
void push_back(const T& src);
970
void pop_front();
971
void pop_back();
972
973
T& front();
974
T& back();
975
const T& front() const;
976
const T& back() const;
977
978
void reserve(size_t newCapacity, bool freeMemory = false);
979
void resize(size_t newCount, bool freeMemory = false);
980
void insert(size_t index, const T& src);
981
void remove(size_t index);
982
983
template<typename CmpLess>
984
size_t InsertSorted(const T& value, const CmpLess& cmp);
985
template<typename CmpLess>
986
bool RemoveSorted(const T& value, const CmpLess& cmp);
987
988
Vector& operator=(const Vector<T>& rhs);
989
T& operator[](size_t index);
990
const T& operator[](size_t index) const;
991
992
private:
993
const ALLOCATION_CALLBACKS& m_AllocationCallbacks;
994
T* m_pArray;
995
size_t m_Count;
996
size_t m_Capacity;
997
};
998
999
#ifndef _D3D12MA_VECTOR_FUNCTIONS
1000
template<typename T>
1001
Vector<T>::Vector(const ALLOCATION_CALLBACKS& allocationCallbacks)
1002
: m_AllocationCallbacks(allocationCallbacks),
1003
m_pArray(NULL),
1004
m_Count(0),
1005
m_Capacity(0) {}
1006
1007
template<typename T>
1008
Vector<T>::Vector(size_t count, const ALLOCATION_CALLBACKS& allocationCallbacks)
1009
: m_AllocationCallbacks(allocationCallbacks),
1010
m_pArray(count ? AllocateArray<T>(allocationCallbacks, count) : NULL),
1011
m_Count(count),
1012
m_Capacity(count) {}
1013
1014
template<typename T>
1015
Vector<T>::Vector(const Vector<T>& src)
1016
: m_AllocationCallbacks(src.m_AllocationCallbacks),
1017
m_pArray(src.m_Count ? AllocateArray<T>(src.m_AllocationCallbacks, src.m_Count) : NULL),
1018
m_Count(src.m_Count),
1019
m_Capacity(src.m_Count)
1020
{
1021
if (m_Count > 0)
1022
{
1023
memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1024
}
1025
}
1026
1027
template<typename T>
1028
Vector<T>::~Vector()
1029
{
1030
Free(m_AllocationCallbacks, m_pArray);
1031
}
1032
1033
template<typename T>
1034
void Vector<T>::push_back(const T& src)
1035
{
1036
const size_t newIndex = size();
1037
resize(newIndex + 1);
1038
m_pArray[newIndex] = src;
1039
}
1040
1041
template<typename T>
1042
void Vector<T>::pop_front()
1043
{
1044
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1045
remove(0);
1046
}
1047
1048
template<typename T>
1049
void Vector<T>::pop_back()
1050
{
1051
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1052
resize(size() - 1);
1053
}
1054
1055
template<typename T>
1056
T& Vector<T>::front()
1057
{
1058
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1059
return m_pArray[0];
1060
}
1061
1062
template<typename T>
1063
T& Vector<T>::back()
1064
{
1065
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1066
return m_pArray[m_Count - 1];
1067
}
1068
1069
template<typename T>
1070
const T& Vector<T>::front() const
1071
{
1072
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1073
return m_pArray[0];
1074
}
1075
1076
template<typename T>
1077
const T& Vector<T>::back() const
1078
{
1079
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1080
return m_pArray[m_Count - 1];
1081
}
1082
1083
template<typename T>
1084
void Vector<T>::reserve(size_t newCapacity, bool freeMemory)
1085
{
1086
newCapacity = D3D12MA_MAX(newCapacity, m_Count);
1087
1088
if ((newCapacity < m_Capacity) && !freeMemory)
1089
{
1090
newCapacity = m_Capacity;
1091
}
1092
1093
if (newCapacity != m_Capacity)
1094
{
1095
T* const newArray = newCapacity ? AllocateArray<T>(m_AllocationCallbacks, newCapacity) : NULL;
1096
if (m_Count != 0)
1097
{
1098
memcpy(newArray, m_pArray, m_Count * sizeof(T));
1099
}
1100
Free(m_AllocationCallbacks, m_pArray);
1101
m_Capacity = newCapacity;
1102
m_pArray = newArray;
1103
}
1104
}
1105
1106
template<typename T>
1107
void Vector<T>::resize(size_t newCount, bool freeMemory)
1108
{
1109
size_t newCapacity = m_Capacity;
1110
if (newCount > m_Capacity)
1111
{
1112
newCapacity = D3D12MA_MAX(newCount, D3D12MA_MAX(m_Capacity * 3 / 2, (size_t)8));
1113
}
1114
else if (freeMemory)
1115
{
1116
newCapacity = newCount;
1117
}
1118
1119
if (newCapacity != m_Capacity)
1120
{
1121
T* const newArray = newCapacity ? AllocateArray<T>(m_AllocationCallbacks, newCapacity) : NULL;
1122
const size_t elementsToCopy = D3D12MA_MIN(m_Count, newCount);
1123
if (elementsToCopy != 0)
1124
{
1125
memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1126
}
1127
Free(m_AllocationCallbacks, m_pArray);
1128
m_Capacity = newCapacity;
1129
m_pArray = newArray;
1130
}
1131
1132
m_Count = newCount;
1133
}
1134
1135
template<typename T>
1136
void Vector<T>::insert(size_t index, const T& src)
1137
{
1138
D3D12MA_HEAVY_ASSERT(index <= m_Count);
1139
const size_t oldCount = size();
1140
resize(oldCount + 1);
1141
if (index < oldCount)
1142
{
1143
memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1144
}
1145
m_pArray[index] = src;
1146
}
1147
1148
template<typename T>
1149
void Vector<T>::remove(size_t index)
1150
{
1151
D3D12MA_HEAVY_ASSERT(index < m_Count);
1152
const size_t oldCount = size();
1153
if (index < oldCount - 1)
1154
{
1155
memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1156
}
1157
resize(oldCount - 1);
1158
}
1159
1160
template<typename T> template<typename CmpLess>
1161
size_t Vector<T>::InsertSorted(const T& value, const CmpLess& cmp)
1162
{
1163
const size_t indexToInsert = BinaryFindFirstNotLess<CmpLess, iterator, T>(
1164
m_pArray,
1165
m_pArray + m_Count,
1166
value,
1167
cmp) - m_pArray;
1168
insert(indexToInsert, value);
1169
return indexToInsert;
1170
}
1171
1172
template<typename T> template<typename CmpLess>
1173
bool Vector<T>::RemoveSorted(const T& value, const CmpLess& cmp)
1174
{
1175
const iterator it = BinaryFindFirstNotLess(
1176
m_pArray,
1177
m_pArray + m_Count,
1178
value,
1179
cmp);
1180
if ((it != end()) && !cmp(*it, value) && !cmp(value, *it))
1181
{
1182
size_t indexToRemove = it - begin();
1183
remove(indexToRemove);
1184
return true;
1185
}
1186
return false;
1187
}
1188
1189
template<typename T>
1190
Vector<T>& Vector<T>::operator=(const Vector<T>& rhs)
1191
{
1192
if (&rhs != this)
1193
{
1194
resize(rhs.m_Count);
1195
if (m_Count != 0)
1196
{
1197
memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1198
}
1199
}
1200
return *this;
1201
}
1202
1203
template<typename T>
1204
T& Vector<T>::operator[](size_t index)
1205
{
1206
D3D12MA_HEAVY_ASSERT(index < m_Count);
1207
return m_pArray[index];
1208
}
1209
1210
template<typename T>
1211
const T& Vector<T>::operator[](size_t index) const
1212
{
1213
D3D12MA_HEAVY_ASSERT(index < m_Count);
1214
return m_pArray[index];
1215
}
1216
#endif // _D3D12MA_VECTOR_FUNCTIONS
1217
#endif // _D3D12MA_VECTOR
1218
1219
#ifndef _D3D12MA_STRING_BUILDER
1220
class StringBuilder
1221
{
1222
public:
1223
StringBuilder(const ALLOCATION_CALLBACKS& allocationCallbacks) : m_Data(allocationCallbacks) {}
1224
1225
size_t GetLength() const { return m_Data.size(); }
1226
LPCWSTR GetData() const { return m_Data.data(); }
1227
1228
void Add(WCHAR ch) { m_Data.push_back(ch); }
1229
void Add(LPCWSTR str);
1230
void AddNewLine() { Add(L'\n'); }
1231
void AddNumber(UINT num);
1232
void AddNumber(UINT64 num);
1233
void AddPointer(const void* ptr);
1234
1235
private:
1236
Vector<WCHAR> m_Data;
1237
};
1238
1239
#ifndef _D3D12MA_STRING_BUILDER_FUNCTIONS
1240
void StringBuilder::Add(LPCWSTR str)
1241
{
1242
const size_t len = wcslen(str);
1243
if (len > 0)
1244
{
1245
const size_t oldCount = m_Data.size();
1246
m_Data.resize(oldCount + len);
1247
memcpy(m_Data.data() + oldCount, str, len * sizeof(WCHAR));
1248
}
1249
}
1250
1251
void StringBuilder::AddNumber(UINT num)
1252
{
1253
WCHAR buf[11];
1254
buf[10] = L'\0';
1255
WCHAR *p = &buf[10];
1256
do
1257
{
1258
*--p = L'0' + (num % 10);
1259
num /= 10;
1260
}
1261
while (num);
1262
Add(p);
1263
}
1264
1265
void StringBuilder::AddNumber(UINT64 num)
1266
{
1267
WCHAR buf[21];
1268
buf[20] = L'\0';
1269
WCHAR *p = &buf[20];
1270
do
1271
{
1272
*--p = L'0' + (num % 10);
1273
num /= 10;
1274
}
1275
while (num);
1276
Add(p);
1277
}
1278
1279
void StringBuilder::AddPointer(const void* ptr)
1280
{
1281
WCHAR buf[21];
1282
uintptr_t num = (uintptr_t)ptr;
1283
buf[20] = L'\0';
1284
WCHAR *p = &buf[20];
1285
do
1286
{
1287
*--p = HexDigitToChar((UINT8)(num & 0xF));
1288
num >>= 4;
1289
}
1290
while (num);
1291
Add(p);
1292
}
1293
1294
#endif // _D3D12MA_STRING_BUILDER_FUNCTIONS
1295
#endif // _D3D12MA_STRING_BUILDER
1296
1297
#ifndef _D3D12MA_JSON_WRITER
1298
/*
1299
Allows to conveniently build a correct JSON document to be written to the
1300
StringBuilder passed to the constructor.
1301
*/
1302
class JsonWriter
1303
{
1304
public:
1305
// stringBuilder - string builder to write the document to. Must remain alive for the whole lifetime of this object.
1306
JsonWriter(const ALLOCATION_CALLBACKS& allocationCallbacks, StringBuilder& stringBuilder);
1307
~JsonWriter();
1308
1309
// Begins object by writing "{".
1310
// Inside an object, you must call pairs of WriteString and a value, e.g.:
1311
// j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject();
1312
// Will write: { "A": 1, "B": 2 }
1313
void BeginObject(bool singleLine = false);
1314
// Ends object by writing "}".
1315
void EndObject();
1316
1317
// Begins array by writing "[".
1318
// Inside an array, you can write a sequence of any values.
1319
void BeginArray(bool singleLine = false);
1320
// Ends array by writing "[".
1321
void EndArray();
1322
1323
// Writes a string value inside "".
1324
// pStr can contain any UTF-16 characters, including '"', new line etc. - they will be properly escaped.
1325
void WriteString(LPCWSTR pStr);
1326
1327
// Begins writing a string value.
1328
// Call BeginString, ContinueString, ContinueString, ..., EndString instead of
1329
// WriteString to conveniently build the string content incrementally, made of
1330
// parts including numbers.
1331
void BeginString(LPCWSTR pStr = NULL);
1332
// Posts next part of an open string.
1333
void ContinueString(LPCWSTR pStr);
1334
// Posts next part of an open string. The number is converted to decimal characters.
1335
void ContinueString(UINT num);
1336
void ContinueString(UINT64 num);
1337
void ContinueString_Pointer(const void* ptr);
1338
// Posts next part of an open string. Pointer value is converted to characters
1339
// using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00
1340
// void ContinueString_Pointer(const void* ptr);
1341
// Ends writing a string value by writing '"'.
1342
void EndString(LPCWSTR pStr = NULL);
1343
1344
// Writes a number value.
1345
void WriteNumber(UINT num);
1346
void WriteNumber(UINT64 num);
1347
// Writes a boolean value - false or true.
1348
void WriteBool(bool b);
1349
// Writes a null value.
1350
void WriteNull();
1351
1352
void AddAllocationToObject(const Allocation& alloc);
1353
void AddDetailedStatisticsInfoObject(const DetailedStatistics& stats);
1354
1355
private:
1356
static const WCHAR* const INDENT;
1357
1358
enum CollectionType
1359
{
1360
COLLECTION_TYPE_OBJECT,
1361
COLLECTION_TYPE_ARRAY,
1362
};
1363
struct StackItem
1364
{
1365
CollectionType type;
1366
UINT valueCount;
1367
bool singleLineMode;
1368
};
1369
1370
StringBuilder& m_SB;
1371
Vector<StackItem> m_Stack;
1372
bool m_InsideString;
1373
1374
void BeginValue(bool isString);
1375
void WriteIndent(bool oneLess = false);
1376
};
1377
1378
#ifndef _D3D12MA_JSON_WRITER_FUNCTIONS
1379
const WCHAR* const JsonWriter::INDENT = L" ";
1380
1381
JsonWriter::JsonWriter(const ALLOCATION_CALLBACKS& allocationCallbacks, StringBuilder& stringBuilder)
1382
: m_SB(stringBuilder),
1383
m_Stack(allocationCallbacks),
1384
m_InsideString(false) {}
1385
1386
JsonWriter::~JsonWriter()
1387
{
1388
D3D12MA_ASSERT(!m_InsideString);
1389
D3D12MA_ASSERT(m_Stack.empty());
1390
}
1391
1392
void JsonWriter::BeginObject(bool singleLine)
1393
{
1394
D3D12MA_ASSERT(!m_InsideString);
1395
1396
BeginValue(false);
1397
m_SB.Add(L'{');
1398
1399
StackItem stackItem;
1400
stackItem.type = COLLECTION_TYPE_OBJECT;
1401
stackItem.valueCount = 0;
1402
stackItem.singleLineMode = singleLine;
1403
m_Stack.push_back(stackItem);
1404
}
1405
1406
void JsonWriter::EndObject()
1407
{
1408
D3D12MA_ASSERT(!m_InsideString);
1409
D3D12MA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
1410
D3D12MA_ASSERT(m_Stack.back().valueCount % 2 == 0);
1411
1412
WriteIndent(true);
1413
m_SB.Add(L'}');
1414
1415
m_Stack.pop_back();
1416
}
1417
1418
void JsonWriter::BeginArray(bool singleLine)
1419
{
1420
D3D12MA_ASSERT(!m_InsideString);
1421
1422
BeginValue(false);
1423
m_SB.Add(L'[');
1424
1425
StackItem stackItem;
1426
stackItem.type = COLLECTION_TYPE_ARRAY;
1427
stackItem.valueCount = 0;
1428
stackItem.singleLineMode = singleLine;
1429
m_Stack.push_back(stackItem);
1430
}
1431
1432
void JsonWriter::EndArray()
1433
{
1434
D3D12MA_ASSERT(!m_InsideString);
1435
D3D12MA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
1436
1437
WriteIndent(true);
1438
m_SB.Add(L']');
1439
1440
m_Stack.pop_back();
1441
}
1442
1443
void JsonWriter::WriteString(LPCWSTR pStr)
1444
{
1445
BeginString(pStr);
1446
EndString();
1447
}
1448
1449
void JsonWriter::BeginString(LPCWSTR pStr)
1450
{
1451
D3D12MA_ASSERT(!m_InsideString);
1452
1453
BeginValue(true);
1454
m_InsideString = true;
1455
m_SB.Add(L'"');
1456
if (pStr != NULL)
1457
{
1458
ContinueString(pStr);
1459
}
1460
}
1461
1462
void JsonWriter::ContinueString(LPCWSTR pStr)
1463
{
1464
D3D12MA_ASSERT(m_InsideString);
1465
D3D12MA_ASSERT(pStr);
1466
1467
for (const WCHAR *p = pStr; *p; ++p)
1468
{
1469
// the strings we encode are assumed to be in UTF-16LE format, the native
1470
// windows wide character Unicode format. In this encoding Unicode code
1471
// points U+0000 to U+D7FF and U+E000 to U+FFFF are encoded in two bytes,
1472
// and everything else takes more than two bytes. We will reject any
1473
// multi wchar character encodings for simplicity.
1474
UINT val = (UINT)*p;
1475
D3D12MA_ASSERT(((val <= 0xD7FF) || (0xE000 <= val && val <= 0xFFFF)) &&
1476
"Character not currently supported.");
1477
switch (*p)
1478
{
1479
case L'"': m_SB.Add(L'\\'); m_SB.Add(L'"'); break;
1480
case L'\\': m_SB.Add(L'\\'); m_SB.Add(L'\\'); break;
1481
case L'/': m_SB.Add(L'\\'); m_SB.Add(L'/'); break;
1482
case L'\b': m_SB.Add(L'\\'); m_SB.Add(L'b'); break;
1483
case L'\f': m_SB.Add(L'\\'); m_SB.Add(L'f'); break;
1484
case L'\n': m_SB.Add(L'\\'); m_SB.Add(L'n'); break;
1485
case L'\r': m_SB.Add(L'\\'); m_SB.Add(L'r'); break;
1486
case L'\t': m_SB.Add(L'\\'); m_SB.Add(L't'); break;
1487
default:
1488
// conservatively use encoding \uXXXX for any Unicode character
1489
// requiring more than one byte.
1490
if (32 <= val && val < 256)
1491
m_SB.Add(*p);
1492
else
1493
{
1494
m_SB.Add(L'\\');
1495
m_SB.Add(L'u');
1496
for (UINT i = 0; i < 4; ++i)
1497
{
1498
UINT hexDigit = (val & 0xF000) >> 12;
1499
val <<= 4;
1500
if (hexDigit < 10)
1501
m_SB.Add(L'0' + (WCHAR)hexDigit);
1502
else
1503
m_SB.Add(L'A' + (WCHAR)hexDigit);
1504
}
1505
}
1506
break;
1507
}
1508
}
1509
}
1510
1511
void JsonWriter::ContinueString(UINT num)
1512
{
1513
D3D12MA_ASSERT(m_InsideString);
1514
m_SB.AddNumber(num);
1515
}
1516
1517
void JsonWriter::ContinueString(UINT64 num)
1518
{
1519
D3D12MA_ASSERT(m_InsideString);
1520
m_SB.AddNumber(num);
1521
}
1522
1523
void JsonWriter::ContinueString_Pointer(const void* ptr)
1524
{
1525
D3D12MA_ASSERT(m_InsideString);
1526
m_SB.AddPointer(ptr);
1527
}
1528
1529
void JsonWriter::EndString(LPCWSTR pStr)
1530
{
1531
D3D12MA_ASSERT(m_InsideString);
1532
1533
if (pStr)
1534
ContinueString(pStr);
1535
m_SB.Add(L'"');
1536
m_InsideString = false;
1537
}
1538
1539
void JsonWriter::WriteNumber(UINT num)
1540
{
1541
D3D12MA_ASSERT(!m_InsideString);
1542
BeginValue(false);
1543
m_SB.AddNumber(num);
1544
}
1545
1546
void JsonWriter::WriteNumber(UINT64 num)
1547
{
1548
D3D12MA_ASSERT(!m_InsideString);
1549
BeginValue(false);
1550
m_SB.AddNumber(num);
1551
}
1552
1553
void JsonWriter::WriteBool(bool b)
1554
{
1555
D3D12MA_ASSERT(!m_InsideString);
1556
BeginValue(false);
1557
if (b)
1558
m_SB.Add(L"true");
1559
else
1560
m_SB.Add(L"false");
1561
}
1562
1563
void JsonWriter::WriteNull()
1564
{
1565
D3D12MA_ASSERT(!m_InsideString);
1566
BeginValue(false);
1567
m_SB.Add(L"null");
1568
}
1569
1570
void JsonWriter::AddAllocationToObject(const Allocation& alloc)
1571
{
1572
WriteString(L"Type");
1573
switch (alloc.m_PackedData.GetResourceDimension()) {
1574
case D3D12_RESOURCE_DIMENSION_UNKNOWN:
1575
WriteString(L"UNKNOWN");
1576
break;
1577
case D3D12_RESOURCE_DIMENSION_BUFFER:
1578
WriteString(L"BUFFER");
1579
break;
1580
case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
1581
WriteString(L"TEXTURE1D");
1582
break;
1583
case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
1584
WriteString(L"TEXTURE2D");
1585
break;
1586
case D3D12_RESOURCE_DIMENSION_TEXTURE3D:
1587
WriteString(L"TEXTURE3D");
1588
break;
1589
default: D3D12MA_ASSERT(0); break;
1590
}
1591
1592
WriteString(L"Size");
1593
WriteNumber(alloc.GetSize());
1594
WriteString(L"Usage");
1595
WriteNumber((UINT)alloc.m_PackedData.GetResourceFlags());
1596
1597
void* privateData = alloc.GetPrivateData();
1598
if (privateData)
1599
{
1600
WriteString(L"CustomData");
1601
BeginString();
1602
ContinueString_Pointer(privateData);
1603
EndString();
1604
}
1605
1606
LPCWSTR name = alloc.GetName();
1607
if (name != NULL)
1608
{
1609
WriteString(L"Name");
1610
WriteString(name);
1611
}
1612
if (alloc.m_PackedData.GetTextureLayout())
1613
{
1614
WriteString(L"Layout");
1615
WriteNumber((UINT)alloc.m_PackedData.GetTextureLayout());
1616
}
1617
}
1618
1619
void JsonWriter::AddDetailedStatisticsInfoObject(const DetailedStatistics& stats)
1620
{
1621
BeginObject();
1622
1623
WriteString(L"BlockCount");
1624
WriteNumber(stats.Stats.BlockCount);
1625
WriteString(L"BlockBytes");
1626
WriteNumber(stats.Stats.BlockBytes);
1627
WriteString(L"AllocationCount");
1628
WriteNumber(stats.Stats.AllocationCount);
1629
WriteString(L"AllocationBytes");
1630
WriteNumber(stats.Stats.AllocationBytes);
1631
WriteString(L"UnusedRangeCount");
1632
WriteNumber(stats.UnusedRangeCount);
1633
1634
if (stats.Stats.AllocationCount > 1)
1635
{
1636
WriteString(L"AllocationSizeMin");
1637
WriteNumber(stats.AllocationSizeMin);
1638
WriteString(L"AllocationSizeMax");
1639
WriteNumber(stats.AllocationSizeMax);
1640
}
1641
if (stats.UnusedRangeCount > 1)
1642
{
1643
WriteString(L"UnusedRangeSizeMin");
1644
WriteNumber(stats.UnusedRangeSizeMin);
1645
WriteString(L"UnusedRangeSizeMax");
1646
WriteNumber(stats.UnusedRangeSizeMax);
1647
}
1648
EndObject();
1649
}
1650
1651
void JsonWriter::BeginValue(bool isString)
1652
{
1653
if (!m_Stack.empty())
1654
{
1655
StackItem& currItem = m_Stack.back();
1656
if (currItem.type == COLLECTION_TYPE_OBJECT && currItem.valueCount % 2 == 0)
1657
{
1658
D3D12MA_ASSERT(isString);
1659
}
1660
1661
if (currItem.type == COLLECTION_TYPE_OBJECT && currItem.valueCount % 2 == 1)
1662
{
1663
m_SB.Add(L':'); m_SB.Add(L' ');
1664
}
1665
else if (currItem.valueCount > 0)
1666
{
1667
m_SB.Add(L','); m_SB.Add(L' ');
1668
WriteIndent();
1669
}
1670
else
1671
{
1672
WriteIndent();
1673
}
1674
++currItem.valueCount;
1675
}
1676
}
1677
1678
void JsonWriter::WriteIndent(bool oneLess)
1679
{
1680
if (!m_Stack.empty() && !m_Stack.back().singleLineMode)
1681
{
1682
m_SB.AddNewLine();
1683
1684
size_t count = m_Stack.size();
1685
if (count > 0 && oneLess)
1686
{
1687
--count;
1688
}
1689
for (size_t i = 0; i < count; ++i)
1690
{
1691
m_SB.Add(INDENT);
1692
}
1693
}
1694
}
1695
#endif // _D3D12MA_JSON_WRITER_FUNCTIONS
1696
#endif // _D3D12MA_JSON_WRITER
1697
1698
#ifndef _D3D12MA_POOL_ALLOCATOR
1699
/*
1700
Allocator for objects of type T using a list of arrays (pools) to speed up
1701
allocation. Number of elements that can be allocated is not bounded because
1702
allocator can create multiple blocks.
1703
T should be POD because constructor and destructor is not called in Alloc or
1704
Free.
1705
*/
1706
template<typename T>
1707
class PoolAllocator
1708
{
1709
D3D12MA_CLASS_NO_COPY(PoolAllocator)
1710
public:
1711
// allocationCallbacks externally owned, must outlive this object.
1712
PoolAllocator(const ALLOCATION_CALLBACKS& allocationCallbacks, UINT firstBlockCapacity);
1713
~PoolAllocator() { Clear(); }
1714
1715
void Clear();
1716
template<typename... Types>
1717
T* Alloc(Types... args);
1718
void Free(T* ptr);
1719
1720
private:
1721
union Item
1722
{
1723
UINT NextFreeIndex; // UINT32_MAX means end of list.
1724
alignas(T) char Value[sizeof(T)];
1725
};
1726
1727
struct ItemBlock
1728
{
1729
Item* pItems;
1730
UINT Capacity;
1731
UINT FirstFreeIndex;
1732
};
1733
1734
const ALLOCATION_CALLBACKS& m_AllocationCallbacks;
1735
const UINT m_FirstBlockCapacity;
1736
Vector<ItemBlock> m_ItemBlocks;
1737
1738
ItemBlock& CreateNewBlock();
1739
};
1740
1741
#ifndef _D3D12MA_POOL_ALLOCATOR_FUNCTIONS
1742
template<typename T>
1743
PoolAllocator<T>::PoolAllocator(const ALLOCATION_CALLBACKS& allocationCallbacks, UINT firstBlockCapacity)
1744
: m_AllocationCallbacks(allocationCallbacks),
1745
m_FirstBlockCapacity(firstBlockCapacity),
1746
m_ItemBlocks(allocationCallbacks)
1747
{
1748
D3D12MA_ASSERT(m_FirstBlockCapacity > 1);
1749
}
1750
1751
template<typename T>
1752
void PoolAllocator<T>::Clear()
1753
{
1754
for(size_t i = m_ItemBlocks.size(); i--; )
1755
{
1756
D3D12MA_DELETE_ARRAY(m_AllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
1757
}
1758
m_ItemBlocks.clear(true);
1759
}
1760
1761
template<typename T> template<typename... Types>
1762
T* PoolAllocator<T>::Alloc(Types... args)
1763
{
1764
for(size_t i = m_ItemBlocks.size(); i--; )
1765
{
1766
ItemBlock& block = m_ItemBlocks[i];
1767
// This block has some free items: Use first one.
1768
if(block.FirstFreeIndex != UINT32_MAX)
1769
{
1770
Item* const pItem = &block.pItems[block.FirstFreeIndex];
1771
block.FirstFreeIndex = pItem->NextFreeIndex;
1772
T* result = (T*)&pItem->Value;
1773
new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
1774
return result;
1775
}
1776
}
1777
1778
// No block has free item: Create new one and use it.
1779
ItemBlock& newBlock = CreateNewBlock();
1780
Item* const pItem = &newBlock.pItems[0];
1781
newBlock.FirstFreeIndex = pItem->NextFreeIndex;
1782
T* result = (T*)pItem->Value;
1783
new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
1784
return result;
1785
}
1786
1787
template<typename T>
1788
void PoolAllocator<T>::Free(T* ptr)
1789
{
1790
// Search all memory blocks to find ptr.
1791
for(size_t i = m_ItemBlocks.size(); i--; )
1792
{
1793
ItemBlock& block = m_ItemBlocks[i];
1794
1795
Item* pItemPtr;
1796
memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
1797
1798
// Check if pItemPtr is in address range of this block.
1799
if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
1800
{
1801
ptr->~T(); // Explicit destructor call.
1802
const UINT index = static_cast<UINT>(pItemPtr - block.pItems);
1803
pItemPtr->NextFreeIndex = block.FirstFreeIndex;
1804
block.FirstFreeIndex = index;
1805
return;
1806
}
1807
}
1808
D3D12MA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
1809
}
1810
1811
template<typename T>
1812
typename PoolAllocator<T>::ItemBlock& PoolAllocator<T>::CreateNewBlock()
1813
{
1814
const UINT newBlockCapacity = m_ItemBlocks.empty() ?
1815
m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
1816
1817
const ItemBlock newBlock = {
1818
D3D12MA_NEW_ARRAY(m_AllocationCallbacks, Item, newBlockCapacity),
1819
newBlockCapacity,
1820
0 };
1821
1822
m_ItemBlocks.push_back(newBlock);
1823
1824
// Setup singly-linked list of all free items in this block.
1825
for(UINT i = 0; i < newBlockCapacity - 1; ++i)
1826
{
1827
newBlock.pItems[i].NextFreeIndex = i + 1;
1828
}
1829
newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
1830
return m_ItemBlocks.back();
1831
}
1832
#endif // _D3D12MA_POOL_ALLOCATOR_FUNCTIONS
1833
#endif // _D3D12MA_POOL_ALLOCATOR
1834
1835
#ifndef _D3D12MA_LIST
1836
/*
1837
Doubly linked list, with elements allocated out of PoolAllocator.
1838
Has custom interface, as well as STL-style interface, including iterator and
1839
const_iterator.
1840
*/
1841
template<typename T>
1842
class List
1843
{
1844
D3D12MA_CLASS_NO_COPY(List)
1845
public:
1846
struct Item
1847
{
1848
Item* pPrev;
1849
Item* pNext;
1850
T Value;
1851
};
1852
1853
class reverse_iterator;
1854
class const_reverse_iterator;
1855
class iterator
1856
{
1857
friend class List<T>;
1858
friend class const_iterator;
1859
1860
public:
1861
iterator() = default;
1862
iterator(const reverse_iterator& src)
1863
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1864
1865
T& operator*() const;
1866
T* operator->() const;
1867
1868
iterator& operator++();
1869
iterator& operator--();
1870
iterator operator++(int);
1871
iterator operator--(int);
1872
1873
bool operator==(const iterator& rhs) const;
1874
bool operator!=(const iterator& rhs) const;
1875
1876
private:
1877
List<T>* m_pList = NULL;
1878
Item* m_pItem = NULL;
1879
1880
iterator(List<T>* pList, Item* pItem) : m_pList(pList), m_pItem(pItem) {}
1881
};
1882
1883
class reverse_iterator
1884
{
1885
friend class List<T>;
1886
friend class const_reverse_iterator;
1887
1888
public:
1889
reverse_iterator() = default;
1890
reverse_iterator(const iterator& src)
1891
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1892
1893
T& operator*() const;
1894
T* operator->() const;
1895
1896
reverse_iterator& operator++();
1897
reverse_iterator& operator--();
1898
reverse_iterator operator++(int);
1899
reverse_iterator operator--(int);
1900
1901
bool operator==(const reverse_iterator& rhs) const;
1902
bool operator!=(const reverse_iterator& rhs) const;
1903
1904
private:
1905
List<T>* m_pList = NULL;
1906
Item* m_pItem = NULL;
1907
1908
reverse_iterator(List<T>* pList, Item* pItem)
1909
: m_pList(pList), m_pItem(pItem) {}
1910
};
1911
1912
class const_iterator
1913
{
1914
friend class List<T>;
1915
1916
public:
1917
const_iterator() = default;
1918
const_iterator(const iterator& src)
1919
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1920
const_iterator(const reverse_iterator& src)
1921
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1922
const_iterator(const const_reverse_iterator& src)
1923
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1924
1925
iterator dropConst() const;
1926
const T& operator*() const;
1927
const T* operator->() const;
1928
1929
const_iterator& operator++();
1930
const_iterator& operator--();
1931
const_iterator operator++(int);
1932
const_iterator operator--(int);
1933
1934
bool operator==(const const_iterator& rhs) const;
1935
bool operator!=(const const_iterator& rhs) const;
1936
1937
private:
1938
const List<T>* m_pList = NULL;
1939
const Item* m_pItem = NULL;
1940
1941
const_iterator(const List<T>* pList, const Item* pItem)
1942
: m_pList(pList), m_pItem(pItem) {}
1943
};
1944
1945
class const_reverse_iterator
1946
{
1947
friend class List<T>;
1948
1949
public:
1950
const_reverse_iterator() = default;
1951
const_reverse_iterator(const iterator& src)
1952
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1953
const_reverse_iterator(const reverse_iterator& src)
1954
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1955
const_reverse_iterator(const const_iterator& src)
1956
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1957
1958
reverse_iterator dropConst() const;
1959
const T& operator*() const;
1960
const T* operator->() const;
1961
1962
const_reverse_iterator& operator++();
1963
const_reverse_iterator& operator--();
1964
const_reverse_iterator operator++(int);
1965
const_reverse_iterator operator--(int);
1966
1967
bool operator==(const const_reverse_iterator& rhs) const;
1968
bool operator!=(const const_reverse_iterator& rhs) const;
1969
1970
private:
1971
const List<T>* m_pList = NULL;
1972
const Item* m_pItem = NULL;
1973
1974
const_reverse_iterator(const List<T>* pList, const Item* pItem)
1975
: m_pList(pList), m_pItem(pItem) {}
1976
};
1977
1978
// allocationCallbacks externally owned, must outlive this object.
1979
List(const ALLOCATION_CALLBACKS& allocationCallbacks);
1980
// Intentionally not calling Clear, because that would be unnecessary
1981
// computations to return all items to m_ItemAllocator as free.
1982
~List() = default;
1983
1984
size_t GetCount() const { return m_Count; }
1985
bool IsEmpty() const { return m_Count == 0; }
1986
1987
Item* Front() { return m_pFront; }
1988
const Item* Front() const { return m_pFront; }
1989
Item* Back() { return m_pBack; }
1990
const Item* Back() const { return m_pBack; }
1991
1992
bool empty() const { return IsEmpty(); }
1993
size_t size() const { return GetCount(); }
1994
void push_back(const T& value) { PushBack(value); }
1995
iterator insert(iterator it, const T& value) { return iterator(this, InsertBefore(it.m_pItem, value)); }
1996
void clear() { Clear(); }
1997
void erase(iterator it) { Remove(it.m_pItem); }
1998
1999
iterator begin() { return iterator(this, Front()); }
2000
iterator end() { return iterator(this, NULL); }
2001
reverse_iterator rbegin() { return reverse_iterator(this, Back()); }
2002
reverse_iterator rend() { return reverse_iterator(this, NULL); }
2003
2004
const_iterator cbegin() const { return const_iterator(this, Front()); }
2005
const_iterator cend() const { return const_iterator(this, NULL); }
2006
const_iterator begin() const { return cbegin(); }
2007
const_iterator end() const { return cend(); }
2008
2009
const_reverse_iterator crbegin() const { return const_reverse_iterator(this, Back()); }
2010
const_reverse_iterator crend() const { return const_reverse_iterator(this, NULL); }
2011
const_reverse_iterator rbegin() const { return crbegin(); }
2012
const_reverse_iterator rend() const { return crend(); }
2013
2014
Item* PushBack();
2015
Item* PushFront();
2016
Item* PushBack(const T& value);
2017
Item* PushFront(const T& value);
2018
void PopBack();
2019
void PopFront();
2020
2021
// Item can be null - it means PushBack.
2022
Item* InsertBefore(Item* pItem);
2023
// Item can be null - it means PushFront.
2024
Item* InsertAfter(Item* pItem);
2025
Item* InsertBefore(Item* pItem, const T& value);
2026
Item* InsertAfter(Item* pItem, const T& value);
2027
2028
void Clear();
2029
void Remove(Item* pItem);
2030
2031
private:
2032
const ALLOCATION_CALLBACKS& m_AllocationCallbacks;
2033
PoolAllocator<Item> m_ItemAllocator;
2034
Item* m_pFront;
2035
Item* m_pBack;
2036
size_t m_Count;
2037
};
2038
2039
#ifndef _D3D12MA_LIST_ITERATOR_FUNCTIONS
2040
template<typename T>
2041
T& List<T>::iterator::operator*() const
2042
{
2043
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2044
return m_pItem->Value;
2045
}
2046
2047
template<typename T>
2048
T* List<T>::iterator::operator->() const
2049
{
2050
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2051
return &m_pItem->Value;
2052
}
2053
2054
template<typename T>
2055
typename List<T>::iterator& List<T>::iterator::operator++()
2056
{
2057
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2058
m_pItem = m_pItem->pNext;
2059
return *this;
2060
}
2061
2062
template<typename T>
2063
typename List<T>::iterator& List<T>::iterator::operator--()
2064
{
2065
if (m_pItem != NULL)
2066
{
2067
m_pItem = m_pItem->pPrev;
2068
}
2069
else
2070
{
2071
D3D12MA_HEAVY_ASSERT(!m_pList->IsEmpty());
2072
m_pItem = m_pList->Back();
2073
}
2074
return *this;
2075
}
2076
2077
template<typename T>
2078
typename List<T>::iterator List<T>::iterator::operator++(int)
2079
{
2080
iterator result = *this;
2081
++* this;
2082
return result;
2083
}
2084
2085
template<typename T>
2086
typename List<T>::iterator List<T>::iterator::operator--(int)
2087
{
2088
iterator result = *this;
2089
--* this;
2090
return result;
2091
}
2092
2093
template<typename T>
2094
bool List<T>::iterator::operator==(const iterator& rhs) const
2095
{
2096
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2097
return m_pItem == rhs.m_pItem;
2098
}
2099
2100
template<typename T>
2101
bool List<T>::iterator::operator!=(const iterator& rhs) const
2102
{
2103
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2104
return m_pItem != rhs.m_pItem;
2105
}
2106
#endif // _D3D12MA_LIST_ITERATOR_FUNCTIONS
2107
2108
#ifndef _D3D12MA_LIST_REVERSE_ITERATOR_FUNCTIONS
2109
template<typename T>
2110
T& List<T>::reverse_iterator::operator*() const
2111
{
2112
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2113
return m_pItem->Value;
2114
}
2115
2116
template<typename T>
2117
T* List<T>::reverse_iterator::operator->() const
2118
{
2119
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2120
return &m_pItem->Value;
2121
}
2122
2123
template<typename T>
2124
typename List<T>::reverse_iterator& List<T>::reverse_iterator::operator++()
2125
{
2126
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2127
m_pItem = m_pItem->pPrev;
2128
return *this;
2129
}
2130
2131
template<typename T>
2132
typename List<T>::reverse_iterator& List<T>::reverse_iterator::operator--()
2133
{
2134
if (m_pItem != NULL)
2135
{
2136
m_pItem = m_pItem->pNext;
2137
}
2138
else
2139
{
2140
D3D12MA_HEAVY_ASSERT(!m_pList->IsEmpty());
2141
m_pItem = m_pList->Front();
2142
}
2143
return *this;
2144
}
2145
2146
template<typename T>
2147
typename List<T>::reverse_iterator List<T>::reverse_iterator::operator++(int)
2148
{
2149
reverse_iterator result = *this;
2150
++* this;
2151
return result;
2152
}
2153
2154
template<typename T>
2155
typename List<T>::reverse_iterator List<T>::reverse_iterator::operator--(int)
2156
{
2157
reverse_iterator result = *this;
2158
--* this;
2159
return result;
2160
}
2161
2162
template<typename T>
2163
bool List<T>::reverse_iterator::operator==(const reverse_iterator& rhs) const
2164
{
2165
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2166
return m_pItem == rhs.m_pItem;
2167
}
2168
2169
template<typename T>
2170
bool List<T>::reverse_iterator::operator!=(const reverse_iterator& rhs) const
2171
{
2172
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2173
return m_pItem != rhs.m_pItem;
2174
}
2175
#endif // _D3D12MA_LIST_REVERSE_ITERATOR_FUNCTIONS
2176
2177
#ifndef _D3D12MA_LIST_CONST_ITERATOR_FUNCTIONS
2178
template<typename T>
2179
typename List<T>::iterator List<T>::const_iterator::dropConst() const
2180
{
2181
return iterator(const_cast<List<T>*>(m_pList), const_cast<Item*>(m_pItem));
2182
}
2183
2184
template<typename T>
2185
const T& List<T>::const_iterator::operator*() const
2186
{
2187
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2188
return m_pItem->Value;
2189
}
2190
2191
template<typename T>
2192
const T* List<T>::const_iterator::operator->() const
2193
{
2194
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2195
return &m_pItem->Value;
2196
}
2197
2198
template<typename T>
2199
typename List<T>::const_iterator& List<T>::const_iterator::operator++()
2200
{
2201
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2202
m_pItem = m_pItem->pNext;
2203
return *this;
2204
}
2205
2206
template<typename T>
2207
typename List<T>::const_iterator& List<T>::const_iterator::operator--()
2208
{
2209
if (m_pItem != NULL)
2210
{
2211
m_pItem = m_pItem->pPrev;
2212
}
2213
else
2214
{
2215
D3D12MA_HEAVY_ASSERT(!m_pList->IsEmpty());
2216
m_pItem = m_pList->Back();
2217
}
2218
return *this;
2219
}
2220
2221
template<typename T>
2222
typename List<T>::const_iterator List<T>::const_iterator::operator++(int)
2223
{
2224
const_iterator result = *this;
2225
++* this;
2226
return result;
2227
}
2228
2229
template<typename T>
2230
typename List<T>::const_iterator List<T>::const_iterator::operator--(int)
2231
{
2232
const_iterator result = *this;
2233
--* this;
2234
return result;
2235
}
2236
2237
template<typename T>
2238
bool List<T>::const_iterator::operator==(const const_iterator& rhs) const
2239
{
2240
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2241
return m_pItem == rhs.m_pItem;
2242
}
2243
2244
template<typename T>
2245
bool List<T>::const_iterator::operator!=(const const_iterator& rhs) const
2246
{
2247
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2248
return m_pItem != rhs.m_pItem;
2249
}
2250
#endif // _D3D12MA_LIST_CONST_ITERATOR_FUNCTIONS
2251
2252
#ifndef _D3D12MA_LIST_CONST_REVERSE_ITERATOR_FUNCTIONS
2253
template<typename T>
2254
typename List<T>::reverse_iterator List<T>::const_reverse_iterator::dropConst() const
2255
{
2256
return reverse_iterator(const_cast<List<T>*>(m_pList), const_cast<Item*>(m_pItem));
2257
}
2258
2259
template<typename T>
2260
const T& List<T>::const_reverse_iterator::operator*() const
2261
{
2262
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2263
return m_pItem->Value;
2264
}
2265
2266
template<typename T>
2267
const T* List<T>::const_reverse_iterator::operator->() const
2268
{
2269
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2270
return &m_pItem->Value;
2271
}
2272
2273
template<typename T>
2274
typename List<T>::const_reverse_iterator& List<T>::const_reverse_iterator::operator++()
2275
{
2276
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2277
m_pItem = m_pItem->pPrev;
2278
return *this;
2279
}
2280
2281
template<typename T>
2282
typename List<T>::const_reverse_iterator& List<T>::const_reverse_iterator::operator--()
2283
{
2284
if (m_pItem != NULL)
2285
{
2286
m_pItem = m_pItem->pNext;
2287
}
2288
else
2289
{
2290
D3D12MA_HEAVY_ASSERT(!m_pList->IsEmpty());
2291
m_pItem = m_pList->Front();
2292
}
2293
return *this;
2294
}
2295
2296
template<typename T>
2297
typename List<T>::const_reverse_iterator List<T>::const_reverse_iterator::operator++(int)
2298
{
2299
const_reverse_iterator result = *this;
2300
++* this;
2301
return result;
2302
}
2303
2304
template<typename T>
2305
typename List<T>::const_reverse_iterator List<T>::const_reverse_iterator::operator--(int)
2306
{
2307
const_reverse_iterator result = *this;
2308
--* this;
2309
return result;
2310
}
2311
2312
template<typename T>
2313
bool List<T>::const_reverse_iterator::operator==(const const_reverse_iterator& rhs) const
2314
{
2315
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2316
return m_pItem == rhs.m_pItem;
2317
}
2318
2319
template<typename T>
2320
bool List<T>::const_reverse_iterator::operator!=(const const_reverse_iterator& rhs) const
2321
{
2322
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2323
return m_pItem != rhs.m_pItem;
2324
}
2325
#endif // _D3D12MA_LIST_CONST_REVERSE_ITERATOR_FUNCTIONS
2326
2327
#ifndef _D3D12MA_LIST_FUNCTIONS
2328
template<typename T>
2329
List<T>::List(const ALLOCATION_CALLBACKS& allocationCallbacks)
2330
: m_AllocationCallbacks(allocationCallbacks),
2331
m_ItemAllocator(allocationCallbacks, 128),
2332
m_pFront(NULL),
2333
m_pBack(NULL),
2334
m_Count(0) {}
2335
2336
template<typename T>
2337
void List<T>::Clear()
2338
{
2339
if(!IsEmpty())
2340
{
2341
Item* pItem = m_pBack;
2342
while(pItem != NULL)
2343
{
2344
Item* const pPrevItem = pItem->pPrev;
2345
m_ItemAllocator.Free(pItem);
2346
pItem = pPrevItem;
2347
}
2348
m_pFront = NULL;
2349
m_pBack = NULL;
2350
m_Count = 0;
2351
}
2352
}
2353
2354
template<typename T>
2355
typename List<T>::Item* List<T>::PushBack()
2356
{
2357
Item* const pNewItem = m_ItemAllocator.Alloc();
2358
pNewItem->pNext = NULL;
2359
if(IsEmpty())
2360
{
2361
pNewItem->pPrev = NULL;
2362
m_pFront = pNewItem;
2363
m_pBack = pNewItem;
2364
m_Count = 1;
2365
}
2366
else
2367
{
2368
pNewItem->pPrev = m_pBack;
2369
m_pBack->pNext = pNewItem;
2370
m_pBack = pNewItem;
2371
++m_Count;
2372
}
2373
return pNewItem;
2374
}
2375
2376
template<typename T>
2377
typename List<T>::Item* List<T>::PushFront()
2378
{
2379
Item* const pNewItem = m_ItemAllocator.Alloc();
2380
pNewItem->pPrev = NULL;
2381
if(IsEmpty())
2382
{
2383
pNewItem->pNext = NULL;
2384
m_pFront = pNewItem;
2385
m_pBack = pNewItem;
2386
m_Count = 1;
2387
}
2388
else
2389
{
2390
pNewItem->pNext = m_pFront;
2391
m_pFront->pPrev = pNewItem;
2392
m_pFront = pNewItem;
2393
++m_Count;
2394
}
2395
return pNewItem;
2396
}
2397
2398
template<typename T>
2399
typename List<T>::Item* List<T>::PushBack(const T& value)
2400
{
2401
Item* const pNewItem = PushBack();
2402
pNewItem->Value = value;
2403
return pNewItem;
2404
}
2405
2406
template<typename T>
2407
typename List<T>::Item* List<T>::PushFront(const T& value)
2408
{
2409
Item* const pNewItem = PushFront();
2410
pNewItem->Value = value;
2411
return pNewItem;
2412
}
2413
2414
template<typename T>
2415
void List<T>::PopBack()
2416
{
2417
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2418
Item* const pBackItem = m_pBack;
2419
Item* const pPrevItem = pBackItem->pPrev;
2420
if(pPrevItem != NULL)
2421
{
2422
pPrevItem->pNext = NULL;
2423
}
2424
m_pBack = pPrevItem;
2425
m_ItemAllocator.Free(pBackItem);
2426
--m_Count;
2427
}
2428
2429
template<typename T>
2430
void List<T>::PopFront()
2431
{
2432
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2433
Item* const pFrontItem = m_pFront;
2434
Item* const pNextItem = pFrontItem->pNext;
2435
if(pNextItem != NULL)
2436
{
2437
pNextItem->pPrev = NULL;
2438
}
2439
m_pFront = pNextItem;
2440
m_ItemAllocator.Free(pFrontItem);
2441
--m_Count;
2442
}
2443
2444
template<typename T>
2445
void List<T>::Remove(Item* pItem)
2446
{
2447
D3D12MA_HEAVY_ASSERT(pItem != NULL);
2448
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2449
2450
if(pItem->pPrev != NULL)
2451
{
2452
pItem->pPrev->pNext = pItem->pNext;
2453
}
2454
else
2455
{
2456
D3D12MA_HEAVY_ASSERT(m_pFront == pItem);
2457
m_pFront = pItem->pNext;
2458
}
2459
2460
if(pItem->pNext != NULL)
2461
{
2462
pItem->pNext->pPrev = pItem->pPrev;
2463
}
2464
else
2465
{
2466
D3D12MA_HEAVY_ASSERT(m_pBack == pItem);
2467
m_pBack = pItem->pPrev;
2468
}
2469
2470
m_ItemAllocator.Free(pItem);
2471
--m_Count;
2472
}
2473
2474
template<typename T>
2475
typename List<T>::Item* List<T>::InsertBefore(Item* pItem)
2476
{
2477
if(pItem != NULL)
2478
{
2479
Item* const prevItem = pItem->pPrev;
2480
Item* const newItem = m_ItemAllocator.Alloc();
2481
newItem->pPrev = prevItem;
2482
newItem->pNext = pItem;
2483
pItem->pPrev = newItem;
2484
if(prevItem != NULL)
2485
{
2486
prevItem->pNext = newItem;
2487
}
2488
else
2489
{
2490
D3D12MA_HEAVY_ASSERT(m_pFront == pItem);
2491
m_pFront = newItem;
2492
}
2493
++m_Count;
2494
return newItem;
2495
}
2496
else
2497
{
2498
return PushBack();
2499
}
2500
}
2501
2502
template<typename T>
2503
typename List<T>::Item* List<T>::InsertAfter(Item* pItem)
2504
{
2505
if(pItem != NULL)
2506
{
2507
Item* const nextItem = pItem->pNext;
2508
Item* const newItem = m_ItemAllocator.Alloc();
2509
newItem->pNext = nextItem;
2510
newItem->pPrev = pItem;
2511
pItem->pNext = newItem;
2512
if(nextItem != NULL)
2513
{
2514
nextItem->pPrev = newItem;
2515
}
2516
else
2517
{
2518
D3D12MA_HEAVY_ASSERT(m_pBack == pItem);
2519
m_pBack = newItem;
2520
}
2521
++m_Count;
2522
return newItem;
2523
}
2524
else
2525
return PushFront();
2526
}
2527
2528
template<typename T>
2529
typename List<T>::Item* List<T>::InsertBefore(Item* pItem, const T& value)
2530
{
2531
Item* const newItem = InsertBefore(pItem);
2532
newItem->Value = value;
2533
return newItem;
2534
}
2535
2536
template<typename T>
2537
typename List<T>::Item* List<T>::InsertAfter(Item* pItem, const T& value)
2538
{
2539
Item* const newItem = InsertAfter(pItem);
2540
newItem->Value = value;
2541
return newItem;
2542
}
2543
#endif // _D3D12MA_LIST_FUNCTIONS
2544
#endif // _D3D12MA_LIST
2545
2546
#ifndef _D3D12MA_INTRUSIVE_LINKED_LIST
2547
/*
2548
Expected interface of ItemTypeTraits:
2549
struct MyItemTypeTraits
2550
{
2551
using ItemType = MyItem;
2552
static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
2553
static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
2554
static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
2555
static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
2556
};
2557
*/
2558
template<typename ItemTypeTraits>
2559
class IntrusiveLinkedList
2560
{
2561
public:
2562
using ItemType = typename ItemTypeTraits::ItemType;
2563
static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
2564
static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
2565
2566
// Movable, not copyable.
2567
IntrusiveLinkedList() = default;
2568
IntrusiveLinkedList(const IntrusiveLinkedList&) = delete;
2569
IntrusiveLinkedList(IntrusiveLinkedList&& src);
2570
IntrusiveLinkedList& operator=(const IntrusiveLinkedList&) = delete;
2571
IntrusiveLinkedList& operator=(IntrusiveLinkedList&& src);
2572
~IntrusiveLinkedList() { D3D12MA_HEAVY_ASSERT(IsEmpty()); }
2573
2574
size_t GetCount() const { return m_Count; }
2575
bool IsEmpty() const { return m_Count == 0; }
2576
2577
ItemType* Front() { return m_Front; }
2578
ItemType* Back() { return m_Back; }
2579
const ItemType* Front() const { return m_Front; }
2580
const ItemType* Back() const { return m_Back; }
2581
2582
void PushBack(ItemType* item);
2583
void PushFront(ItemType* item);
2584
ItemType* PopBack();
2585
ItemType* PopFront();
2586
2587
// MyItem can be null - it means PushBack.
2588
void InsertBefore(ItemType* existingItem, ItemType* newItem);
2589
// MyItem can be null - it means PushFront.
2590
void InsertAfter(ItemType* existingItem, ItemType* newItem);
2591
2592
void Remove(ItemType* item);
2593
void RemoveAll();
2594
2595
private:
2596
ItemType* m_Front = NULL;
2597
ItemType* m_Back = NULL;
2598
size_t m_Count = 0;
2599
};
2600
2601
#ifndef _D3D12MA_INTRUSIVE_LINKED_LIST_FUNCTIONS
2602
template<typename ItemTypeTraits>
2603
IntrusiveLinkedList<ItemTypeTraits>::IntrusiveLinkedList(IntrusiveLinkedList&& src)
2604
: m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
2605
{
2606
src.m_Front = src.m_Back = NULL;
2607
src.m_Count = 0;
2608
}
2609
2610
template<typename ItemTypeTraits>
2611
IntrusiveLinkedList<ItemTypeTraits>& IntrusiveLinkedList<ItemTypeTraits>::operator=(IntrusiveLinkedList&& src)
2612
{
2613
if (&src != this)
2614
{
2615
D3D12MA_HEAVY_ASSERT(IsEmpty());
2616
m_Front = src.m_Front;
2617
m_Back = src.m_Back;
2618
m_Count = src.m_Count;
2619
src.m_Front = src.m_Back = NULL;
2620
src.m_Count = 0;
2621
}
2622
return *this;
2623
}
2624
2625
template<typename ItemTypeTraits>
2626
void IntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)
2627
{
2628
D3D12MA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == NULL && ItemTypeTraits::GetNext(item) == NULL);
2629
if (IsEmpty())
2630
{
2631
m_Front = item;
2632
m_Back = item;
2633
m_Count = 1;
2634
}
2635
else
2636
{
2637
ItemTypeTraits::AccessPrev(item) = m_Back;
2638
ItemTypeTraits::AccessNext(m_Back) = item;
2639
m_Back = item;
2640
++m_Count;
2641
}
2642
}
2643
2644
template<typename ItemTypeTraits>
2645
void IntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)
2646
{
2647
D3D12MA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == NULL && ItemTypeTraits::GetNext(item) == NULL);
2648
if (IsEmpty())
2649
{
2650
m_Front = item;
2651
m_Back = item;
2652
m_Count = 1;
2653
}
2654
else
2655
{
2656
ItemTypeTraits::AccessNext(item) = m_Front;
2657
ItemTypeTraits::AccessPrev(m_Front) = item;
2658
m_Front = item;
2659
++m_Count;
2660
}
2661
}
2662
2663
template<typename ItemTypeTraits>
2664
typename IntrusiveLinkedList<ItemTypeTraits>::ItemType* IntrusiveLinkedList<ItemTypeTraits>::PopBack()
2665
{
2666
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2667
ItemType* const backItem = m_Back;
2668
ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
2669
if (prevItem != NULL)
2670
{
2671
ItemTypeTraits::AccessNext(prevItem) = NULL;
2672
}
2673
m_Back = prevItem;
2674
--m_Count;
2675
ItemTypeTraits::AccessPrev(backItem) = NULL;
2676
ItemTypeTraits::AccessNext(backItem) = NULL;
2677
return backItem;
2678
}
2679
2680
template<typename ItemTypeTraits>
2681
typename IntrusiveLinkedList<ItemTypeTraits>::ItemType* IntrusiveLinkedList<ItemTypeTraits>::PopFront()
2682
{
2683
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2684
ItemType* const frontItem = m_Front;
2685
ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
2686
if (nextItem != NULL)
2687
{
2688
ItemTypeTraits::AccessPrev(nextItem) = NULL;
2689
}
2690
m_Front = nextItem;
2691
--m_Count;
2692
ItemTypeTraits::AccessPrev(frontItem) = NULL;
2693
ItemTypeTraits::AccessNext(frontItem) = NULL;
2694
return frontItem;
2695
}
2696
2697
template<typename ItemTypeTraits>
2698
void IntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)
2699
{
2700
D3D12MA_HEAVY_ASSERT(newItem != NULL && ItemTypeTraits::GetPrev(newItem) == NULL && ItemTypeTraits::GetNext(newItem) == NULL);
2701
if (existingItem != NULL)
2702
{
2703
ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
2704
ItemTypeTraits::AccessPrev(newItem) = prevItem;
2705
ItemTypeTraits::AccessNext(newItem) = existingItem;
2706
ItemTypeTraits::AccessPrev(existingItem) = newItem;
2707
if (prevItem != NULL)
2708
{
2709
ItemTypeTraits::AccessNext(prevItem) = newItem;
2710
}
2711
else
2712
{
2713
D3D12MA_HEAVY_ASSERT(m_Front == existingItem);
2714
m_Front = newItem;
2715
}
2716
++m_Count;
2717
}
2718
else
2719
PushBack(newItem);
2720
}
2721
2722
template<typename ItemTypeTraits>
2723
void IntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)
2724
{
2725
D3D12MA_HEAVY_ASSERT(newItem != NULL && ItemTypeTraits::GetPrev(newItem) == NULL && ItemTypeTraits::GetNext(newItem) == NULL);
2726
if (existingItem != NULL)
2727
{
2728
ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
2729
ItemTypeTraits::AccessNext(newItem) = nextItem;
2730
ItemTypeTraits::AccessPrev(newItem) = existingItem;
2731
ItemTypeTraits::AccessNext(existingItem) = newItem;
2732
if (nextItem != NULL)
2733
{
2734
ItemTypeTraits::AccessPrev(nextItem) = newItem;
2735
}
2736
else
2737
{
2738
D3D12MA_HEAVY_ASSERT(m_Back == existingItem);
2739
m_Back = newItem;
2740
}
2741
++m_Count;
2742
}
2743
else
2744
return PushFront(newItem);
2745
}
2746
2747
template<typename ItemTypeTraits>
2748
void IntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)
2749
{
2750
D3D12MA_HEAVY_ASSERT(item != NULL && m_Count > 0);
2751
if (ItemTypeTraits::GetPrev(item) != NULL)
2752
{
2753
ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
2754
}
2755
else
2756
{
2757
D3D12MA_HEAVY_ASSERT(m_Front == item);
2758
m_Front = ItemTypeTraits::GetNext(item);
2759
}
2760
2761
if (ItemTypeTraits::GetNext(item) != NULL)
2762
{
2763
ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
2764
}
2765
else
2766
{
2767
D3D12MA_HEAVY_ASSERT(m_Back == item);
2768
m_Back = ItemTypeTraits::GetPrev(item);
2769
}
2770
ItemTypeTraits::AccessPrev(item) = NULL;
2771
ItemTypeTraits::AccessNext(item) = NULL;
2772
--m_Count;
2773
}
2774
2775
template<typename ItemTypeTraits>
2776
void IntrusiveLinkedList<ItemTypeTraits>::RemoveAll()
2777
{
2778
if (!IsEmpty())
2779
{
2780
ItemType* item = m_Back;
2781
while (item != NULL)
2782
{
2783
ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
2784
ItemTypeTraits::AccessPrev(item) = NULL;
2785
ItemTypeTraits::AccessNext(item) = NULL;
2786
item = prevItem;
2787
}
2788
m_Front = NULL;
2789
m_Back = NULL;
2790
m_Count = 0;
2791
}
2792
}
2793
#endif // _D3D12MA_INTRUSIVE_LINKED_LIST_FUNCTIONS
2794
#endif // _D3D12MA_INTRUSIVE_LINKED_LIST
2795
2796
#ifndef _D3D12MA_ALLOCATION_OBJECT_ALLOCATOR
2797
/*
2798
Thread-safe wrapper over PoolAllocator free list, for allocation of Allocation objects.
2799
*/
2800
class AllocationObjectAllocator
2801
{
2802
D3D12MA_CLASS_NO_COPY(AllocationObjectAllocator);
2803
public:
2804
AllocationObjectAllocator(const ALLOCATION_CALLBACKS& allocationCallbacks, bool useMutex)
2805
: m_UseMutex(useMutex), m_Allocator(allocationCallbacks, 1024) {}
2806
2807
template<typename... Types>
2808
Allocation* Allocate(Types... args);
2809
void Free(Allocation* alloc);
2810
2811
private:
2812
D3D12MA_MUTEX m_Mutex;
2813
bool m_UseMutex;
2814
PoolAllocator<Allocation> m_Allocator;
2815
};
2816
2817
#ifndef _D3D12MA_ALLOCATION_OBJECT_ALLOCATOR_FUNCTIONS
2818
template<typename... Types>
2819
Allocation* AllocationObjectAllocator::Allocate(Types... args)
2820
{
2821
MutexLock mutexLock(m_Mutex, m_UseMutex);
2822
return m_Allocator.Alloc(std::forward<Types>(args)...);
2823
}
2824
2825
void AllocationObjectAllocator::Free(Allocation* alloc)
2826
{
2827
MutexLock mutexLock(m_Mutex, m_UseMutex);
2828
m_Allocator.Free(alloc);
2829
}
2830
#endif // _D3D12MA_ALLOCATION_OBJECT_ALLOCATOR_FUNCTIONS
2831
#endif // _D3D12MA_ALLOCATION_OBJECT_ALLOCATOR
2832
2833
#ifndef _D3D12MA_SUBALLOCATION
2834
/*
2835
Represents a region of NormalBlock that is either assigned and returned as
2836
allocated memory block or free.
2837
*/
2838
struct Suballocation
2839
{
2840
UINT64 offset;
2841
UINT64 size;
2842
void* privateData;
2843
SuballocationType type;
2844
};
2845
using SuballocationList = List<Suballocation>;
2846
2847
// Comparator for offsets.
2848
struct SuballocationOffsetLess
2849
{
2850
bool operator()(const Suballocation& lhs, const Suballocation& rhs) const
2851
{
2852
return lhs.offset < rhs.offset;
2853
}
2854
};
2855
2856
struct SuballocationOffsetGreater
2857
{
2858
bool operator()(const Suballocation& lhs, const Suballocation& rhs) const
2859
{
2860
return lhs.offset > rhs.offset;
2861
}
2862
};
2863
2864
struct SuballocationItemSizeLess
2865
{
2866
bool operator()(const SuballocationList::iterator lhs, const SuballocationList::iterator rhs) const
2867
{
2868
return lhs->size < rhs->size;
2869
}
2870
bool operator()(const SuballocationList::iterator lhs, UINT64 rhsSize) const
2871
{
2872
return lhs->size < rhsSize;
2873
}
2874
};
2875
#endif // _D3D12MA_SUBALLOCATION
2876
2877
#ifndef _D3D12MA_ALLOCATION_REQUEST
2878
/*
2879
Parameters of planned allocation inside a NormalBlock.
2880
*/
2881
struct AllocationRequest
2882
{
2883
AllocHandle allocHandle;
2884
UINT64 size;
2885
UINT64 algorithmData;
2886
UINT64 sumFreeSize; // Sum size of free items that overlap with proposed allocation.
2887
UINT64 sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
2888
SuballocationList::iterator item;
2889
};
2890
#endif // _D3D12MA_ALLOCATION_REQUEST
2891
2892
#ifndef _D3D12MA_BLOCK_METADATA
2893
/*
2894
Data structure used for bookkeeping of allocations and unused ranges of memory
2895
in a single ID3D12Heap memory block.
2896
*/
2897
class BlockMetadata
2898
{
2899
public:
2900
BlockMetadata(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual);
2901
virtual ~BlockMetadata() = default;
2902
2903
virtual void Init(UINT64 size) { m_Size = size; }
2904
// Validates all data structures inside this object. If not valid, returns false.
2905
virtual bool Validate() const = 0;
2906
UINT64 GetSize() const { return m_Size; }
2907
bool IsVirtual() const { return m_IsVirtual; }
2908
virtual size_t GetAllocationCount() const = 0;
2909
virtual size_t GetFreeRegionsCount() const = 0;
2910
virtual UINT64 GetSumFreeSize() const = 0;
2911
virtual UINT64 GetAllocationOffset(AllocHandle allocHandle) const = 0;
2912
// Returns true if this block is empty - contains only single free suballocation.
2913
virtual bool IsEmpty() const = 0;
2914
2915
virtual void GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const = 0;
2916
2917
// Tries to find a place for suballocation with given parameters inside this block.
2918
// If succeeded, fills pAllocationRequest and returns true.
2919
// If failed, returns false.
2920
virtual bool CreateAllocationRequest(
2921
UINT64 allocSize,
2922
UINT64 allocAlignment,
2923
bool upperAddress,
2924
UINT32 strategy,
2925
AllocationRequest* pAllocationRequest) = 0;
2926
2927
// Makes actual allocation based on request. Request must already be checked and valid.
2928
virtual void Alloc(
2929
const AllocationRequest& request,
2930
UINT64 allocSize,
2931
void* PrivateData) = 0;
2932
2933
virtual void Free(AllocHandle allocHandle) = 0;
2934
// Frees all allocations.
2935
// Careful! Don't call it if there are Allocation objects owned by pPrivateData of of cleared allocations!
2936
virtual void Clear() = 0;
2937
2938
virtual AllocHandle GetAllocationListBegin() const = 0;
2939
virtual AllocHandle GetNextAllocation(AllocHandle prevAlloc) const = 0;
2940
virtual UINT64 GetNextFreeRegionSize(AllocHandle alloc) const = 0;
2941
virtual void* GetAllocationPrivateData(AllocHandle allocHandle) const = 0;
2942
virtual void SetAllocationPrivateData(AllocHandle allocHandle, void* privateData) = 0;
2943
2944
virtual void AddStatistics(Statistics& inoutStats) const = 0;
2945
virtual void AddDetailedStatistics(DetailedStatistics& inoutStats) const = 0;
2946
virtual void WriteAllocationInfoToJson(JsonWriter& json) const = 0;
2947
virtual void DebugLogAllAllocations() const = 0;
2948
2949
protected:
2950
const ALLOCATION_CALLBACKS* GetAllocs() const { return m_pAllocationCallbacks; }
2951
UINT64 GetDebugMargin() const { return IsVirtual() ? 0 : D3D12MA_DEBUG_MARGIN; }
2952
2953
void DebugLogAllocation(UINT64 offset, UINT64 size, void* privateData) const;
2954
void PrintDetailedMap_Begin(JsonWriter& json,
2955
UINT64 unusedBytes,
2956
size_t allocationCount,
2957
size_t unusedRangeCount) const;
2958
void PrintDetailedMap_Allocation(JsonWriter& json,
2959
UINT64 offset, UINT64 size, void* privateData) const;
2960
void PrintDetailedMap_UnusedRange(JsonWriter& json,
2961
UINT64 offset, UINT64 size) const;
2962
void PrintDetailedMap_End(JsonWriter& json) const;
2963
2964
private:
2965
UINT64 m_Size;
2966
bool m_IsVirtual;
2967
const ALLOCATION_CALLBACKS* m_pAllocationCallbacks;
2968
2969
D3D12MA_CLASS_NO_COPY(BlockMetadata);
2970
};
2971
2972
#ifndef _D3D12MA_BLOCK_METADATA_FUNCTIONS
2973
BlockMetadata::BlockMetadata(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual)
2974
: m_Size(0),
2975
m_IsVirtual(isVirtual),
2976
m_pAllocationCallbacks(allocationCallbacks)
2977
{
2978
D3D12MA_ASSERT(allocationCallbacks);
2979
}
2980
2981
void BlockMetadata::DebugLogAllocation(UINT64 offset, UINT64 size, void* privateData) const
2982
{
2983
if (IsVirtual())
2984
{
2985
D3D12MA_DEBUG_LOG(L"UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; PrivateData: %p", offset, size, privateData);
2986
}
2987
else
2988
{
2989
D3D12MA_ASSERT(privateData != NULL);
2990
Allocation* allocation = reinterpret_cast<Allocation*>(privateData);
2991
2992
privateData = allocation->GetPrivateData();
2993
LPCWSTR name = allocation->GetName();
2994
2995
D3D12MA_DEBUG_LOG(L"UNFREED ALLOCATION; Offset: %llu; Size: %llu; PrivateData: %p; Name: %s",
2996
offset, size, privateData, name ? name : L"D3D12MA_Empty");
2997
}
2998
}
2999
3000
void BlockMetadata::PrintDetailedMap_Begin(JsonWriter& json,
3001
UINT64 unusedBytes, size_t allocationCount, size_t unusedRangeCount) const
3002
{
3003
json.WriteString(L"TotalBytes");
3004
json.WriteNumber(GetSize());
3005
3006
json.WriteString(L"UnusedBytes");
3007
json.WriteNumber(unusedBytes);
3008
3009
json.WriteString(L"Allocations");
3010
json.WriteNumber((UINT64)allocationCount);
3011
3012
json.WriteString(L"UnusedRanges");
3013
json.WriteNumber((UINT64)unusedRangeCount);
3014
3015
json.WriteString(L"Suballocations");
3016
json.BeginArray();
3017
}
3018
3019
void BlockMetadata::PrintDetailedMap_Allocation(JsonWriter& json,
3020
UINT64 offset, UINT64 size, void* privateData) const
3021
{
3022
json.BeginObject(true);
3023
3024
json.WriteString(L"Offset");
3025
json.WriteNumber(offset);
3026
3027
if (IsVirtual())
3028
{
3029
json.WriteString(L"Size");
3030
json.WriteNumber(size);
3031
if (privateData)
3032
{
3033
json.WriteString(L"CustomData");
3034
json.WriteNumber((uintptr_t)privateData);
3035
}
3036
}
3037
else
3038
{
3039
const Allocation* const alloc = (const Allocation*)privateData;
3040
D3D12MA_ASSERT(alloc);
3041
json.AddAllocationToObject(*alloc);
3042
}
3043
json.EndObject();
3044
}
3045
3046
void BlockMetadata::PrintDetailedMap_UnusedRange(JsonWriter& json,
3047
UINT64 offset, UINT64 size) const
3048
{
3049
json.BeginObject(true);
3050
3051
json.WriteString(L"Offset");
3052
json.WriteNumber(offset);
3053
3054
json.WriteString(L"Type");
3055
json.WriteString(L"FREE");
3056
3057
json.WriteString(L"Size");
3058
json.WriteNumber(size);
3059
3060
json.EndObject();
3061
}
3062
3063
void BlockMetadata::PrintDetailedMap_End(JsonWriter& json) const
3064
{
3065
json.EndArray();
3066
}
3067
#endif // _D3D12MA_BLOCK_METADATA_FUNCTIONS
3068
#endif // _D3D12MA_BLOCK_METADATA
3069
3070
#ifndef _D3D12MA_BLOCK_METADATA_LINEAR
3071
class BlockMetadata_Linear : public BlockMetadata
3072
{
3073
public:
3074
BlockMetadata_Linear(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual);
3075
virtual ~BlockMetadata_Linear() = default;
3076
3077
UINT64 GetSumFreeSize() const override { return m_SumFreeSize; }
3078
bool IsEmpty() const override { return GetAllocationCount() == 0; }
3079
UINT64 GetAllocationOffset(AllocHandle allocHandle) const override { return (UINT64)allocHandle - 1; };
3080
3081
void Init(UINT64 size) override;
3082
bool Validate() const override;
3083
size_t GetAllocationCount() const override;
3084
size_t GetFreeRegionsCount() const override;
3085
void GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const override;
3086
3087
bool CreateAllocationRequest(
3088
UINT64 allocSize,
3089
UINT64 allocAlignment,
3090
bool upperAddress,
3091
UINT32 strategy,
3092
AllocationRequest* pAllocationRequest) override;
3093
3094
void Alloc(
3095
const AllocationRequest& request,
3096
UINT64 allocSize,
3097
void* privateData) override;
3098
3099
void Free(AllocHandle allocHandle) override;
3100
void Clear() override;
3101
3102
AllocHandle GetAllocationListBegin() const override;
3103
AllocHandle GetNextAllocation(AllocHandle prevAlloc) const override;
3104
UINT64 GetNextFreeRegionSize(AllocHandle alloc) const override;
3105
void* GetAllocationPrivateData(AllocHandle allocHandle) const override;
3106
void SetAllocationPrivateData(AllocHandle allocHandle, void* privateData) override;
3107
3108
void AddStatistics(Statistics& inoutStats) const override;
3109
void AddDetailedStatistics(DetailedStatistics& inoutStats) const override;
3110
void WriteAllocationInfoToJson(JsonWriter& json) const override;
3111
void DebugLogAllAllocations() const override;
3112
3113
private:
3114
/*
3115
There are two suballocation vectors, used in ping-pong way.
3116
The one with index m_1stVectorIndex is called 1st.
3117
The one with index (m_1stVectorIndex ^ 1) is called 2nd.
3118
2nd can be non-empty only when 1st is not empty.
3119
When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
3120
*/
3121
typedef Vector<Suballocation> SuballocationVectorType;
3122
3123
enum ALLOC_REQUEST_TYPE
3124
{
3125
ALLOC_REQUEST_UPPER_ADDRESS,
3126
ALLOC_REQUEST_END_OF_1ST,
3127
ALLOC_REQUEST_END_OF_2ND,
3128
};
3129
3130
enum SECOND_VECTOR_MODE
3131
{
3132
SECOND_VECTOR_EMPTY,
3133
/*
3134
Suballocations in 2nd vector are created later than the ones in 1st, but they
3135
all have smaller offset.
3136
*/
3137
SECOND_VECTOR_RING_BUFFER,
3138
/*
3139
Suballocations in 2nd vector are upper side of double stack.
3140
They all have offsets higher than those in 1st vector.
3141
Top of this stack means smaller offsets, but higher indices in this vector.
3142
*/
3143
SECOND_VECTOR_DOUBLE_STACK,
3144
};
3145
3146
UINT64 m_SumFreeSize;
3147
SuballocationVectorType m_Suballocations0, m_Suballocations1;
3148
UINT32 m_1stVectorIndex;
3149
SECOND_VECTOR_MODE m_2ndVectorMode;
3150
// Number of items in 1st vector with hAllocation = null at the beginning.
3151
size_t m_1stNullItemsBeginCount;
3152
// Number of other items in 1st vector with hAllocation = null somewhere in the middle.
3153
size_t m_1stNullItemsMiddleCount;
3154
// Number of items in 2nd vector with hAllocation = null.
3155
size_t m_2ndNullItemsCount;
3156
3157
SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
3158
SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
3159
const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
3160
const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
3161
3162
Suballocation& FindSuballocation(UINT64 offset) const;
3163
bool ShouldCompact1st() const;
3164
void CleanupAfterFree();
3165
3166
bool CreateAllocationRequest_LowerAddress(
3167
UINT64 allocSize,
3168
UINT64 allocAlignment,
3169
AllocationRequest* pAllocationRequest);
3170
bool CreateAllocationRequest_UpperAddress(
3171
UINT64 allocSize,
3172
UINT64 allocAlignment,
3173
AllocationRequest* pAllocationRequest);
3174
3175
D3D12MA_CLASS_NO_COPY(BlockMetadata_Linear)
3176
};
3177
3178
#ifndef _D3D12MA_BLOCK_METADATA_LINEAR_FUNCTIONS
3179
BlockMetadata_Linear::BlockMetadata_Linear(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual)
3180
: BlockMetadata(allocationCallbacks, isVirtual),
3181
m_SumFreeSize(0),
3182
m_Suballocations0(*allocationCallbacks),
3183
m_Suballocations1(*allocationCallbacks),
3184
m_1stVectorIndex(0),
3185
m_2ndVectorMode(SECOND_VECTOR_EMPTY),
3186
m_1stNullItemsBeginCount(0),
3187
m_1stNullItemsMiddleCount(0),
3188
m_2ndNullItemsCount(0)
3189
{
3190
D3D12MA_ASSERT(allocationCallbacks);
3191
}
3192
3193
void BlockMetadata_Linear::Init(UINT64 size)
3194
{
3195
BlockMetadata::Init(size);
3196
m_SumFreeSize = size;
3197
}
3198
3199
bool BlockMetadata_Linear::Validate() const
3200
{
3201
D3D12MA_VALIDATE(GetSumFreeSize() <= GetSize());
3202
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
3203
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
3204
3205
D3D12MA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
3206
D3D12MA_VALIDATE(!suballocations1st.empty() ||
3207
suballocations2nd.empty() ||
3208
m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
3209
3210
if (!suballocations1st.empty())
3211
{
3212
// Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
3213
D3D12MA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != SUBALLOCATION_TYPE_FREE);
3214
// Null item at the end should be just pop_back().
3215
D3D12MA_VALIDATE(suballocations1st.back().type != SUBALLOCATION_TYPE_FREE);
3216
}
3217
if (!suballocations2nd.empty())
3218
{
3219
// Null item at the end should be just pop_back().
3220
D3D12MA_VALIDATE(suballocations2nd.back().type != SUBALLOCATION_TYPE_FREE);
3221
}
3222
3223
D3D12MA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
3224
D3D12MA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
3225
3226
UINT64 sumUsedSize = 0;
3227
const size_t suballoc1stCount = suballocations1st.size();
3228
UINT64 offset = 0;
3229
3230
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
3231
{
3232
const size_t suballoc2ndCount = suballocations2nd.size();
3233
size_t nullItem2ndCount = 0;
3234
for (size_t i = 0; i < suballoc2ndCount; ++i)
3235
{
3236
const Suballocation& suballoc = suballocations2nd[i];
3237
const bool currFree = (suballoc.type == SUBALLOCATION_TYPE_FREE);
3238
3239
const Allocation* alloc = (Allocation*)suballoc.privateData;
3240
if (!IsVirtual())
3241
{
3242
D3D12MA_VALIDATE(currFree == (alloc == NULL));
3243
}
3244
D3D12MA_VALIDATE(suballoc.offset >= offset);
3245
3246
if (!currFree)
3247
{
3248
if (!IsVirtual())
3249
{
3250
D3D12MA_VALIDATE(GetAllocationOffset(alloc->GetAllocHandle()) == suballoc.offset);
3251
D3D12MA_VALIDATE(alloc->GetSize() == suballoc.size);
3252
}
3253
sumUsedSize += suballoc.size;
3254
}
3255
else
3256
{
3257
++nullItem2ndCount;
3258
}
3259
3260
offset = suballoc.offset + suballoc.size + GetDebugMargin();
3261
}
3262
3263
D3D12MA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
3264
}
3265
3266
for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
3267
{
3268
const Suballocation& suballoc = suballocations1st[i];
3269
D3D12MA_VALIDATE(suballoc.type == SUBALLOCATION_TYPE_FREE &&
3270
suballoc.privateData == NULL);
3271
}
3272
3273
size_t nullItem1stCount = m_1stNullItemsBeginCount;
3274
3275
for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
3276
{
3277
const Suballocation& suballoc = suballocations1st[i];
3278
const bool currFree = (suballoc.type == SUBALLOCATION_TYPE_FREE);
3279
3280
const Allocation* alloc = (Allocation*)suballoc.privateData;
3281
if (!IsVirtual())
3282
{
3283
D3D12MA_VALIDATE(currFree == (alloc == NULL));
3284
}
3285
D3D12MA_VALIDATE(suballoc.offset >= offset);
3286
D3D12MA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
3287
3288
if (!currFree)
3289
{
3290
if (!IsVirtual())
3291
{
3292
D3D12MA_VALIDATE(GetAllocationOffset(alloc->GetAllocHandle()) == suballoc.offset);
3293
D3D12MA_VALIDATE(alloc->GetSize() == suballoc.size);
3294
}
3295
sumUsedSize += suballoc.size;
3296
}
3297
else
3298
{
3299
++nullItem1stCount;
3300
}
3301
3302
offset = suballoc.offset + suballoc.size + GetDebugMargin();
3303
}
3304
D3D12MA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
3305
3306
if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
3307
{
3308
const size_t suballoc2ndCount = suballocations2nd.size();
3309
size_t nullItem2ndCount = 0;
3310
for (size_t i = suballoc2ndCount; i--; )
3311
{
3312
const Suballocation& suballoc = suballocations2nd[i];
3313
const bool currFree = (suballoc.type == SUBALLOCATION_TYPE_FREE);
3314
3315
const Allocation* alloc = (Allocation*)suballoc.privateData;
3316
if (!IsVirtual())
3317
{
3318
D3D12MA_VALIDATE(currFree == (alloc == NULL));
3319
}
3320
D3D12MA_VALIDATE(suballoc.offset >= offset);
3321
3322
if (!currFree)
3323
{
3324
if (!IsVirtual())
3325
{
3326
D3D12MA_VALIDATE(GetAllocationOffset(alloc->GetAllocHandle()) == suballoc.offset);
3327
D3D12MA_VALIDATE(alloc->GetSize() == suballoc.size);
3328
}
3329
sumUsedSize += suballoc.size;
3330
}
3331
else
3332
{
3333
++nullItem2ndCount;
3334
}
3335
3336
offset = suballoc.offset + suballoc.size + GetDebugMargin();
3337
}
3338
3339
D3D12MA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
3340
}
3341
3342
D3D12MA_VALIDATE(offset <= GetSize());
3343
D3D12MA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
3344
3345
return true;
3346
}
3347
3348
size_t BlockMetadata_Linear::GetAllocationCount() const
3349
{
3350
return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount +
3351
AccessSuballocations2nd().size() - m_2ndNullItemsCount;
3352
}
3353
3354
size_t BlockMetadata_Linear::GetFreeRegionsCount() const
3355
{
3356
// Function only used for defragmentation, which is disabled for this algorithm
3357
D3D12MA_ASSERT(0);
3358
return SIZE_MAX;
3359
}
3360
3361
void BlockMetadata_Linear::GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const
3362
{
3363
const Suballocation& suballoc = FindSuballocation((UINT64)allocHandle - 1);
3364
outInfo.Offset = suballoc.offset;
3365
outInfo.Size = suballoc.size;
3366
outInfo.pPrivateData = suballoc.privateData;
3367
}
3368
3369
bool BlockMetadata_Linear::CreateAllocationRequest(
3370
UINT64 allocSize,
3371
UINT64 allocAlignment,
3372
bool upperAddress,
3373
UINT32 strategy,
3374
AllocationRequest* pAllocationRequest)
3375
{
3376
D3D12MA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
3377
D3D12MA_ASSERT(pAllocationRequest != NULL);
3378
D3D12MA_HEAVY_ASSERT(Validate());
3379
3380
if(allocSize > GetSize())
3381
return false;
3382
3383
pAllocationRequest->size = allocSize;
3384
return upperAddress ?
3385
CreateAllocationRequest_UpperAddress(
3386
allocSize, allocAlignment, pAllocationRequest) :
3387
CreateAllocationRequest_LowerAddress(
3388
allocSize, allocAlignment, pAllocationRequest);
3389
}
3390
3391
void BlockMetadata_Linear::Alloc(
3392
const AllocationRequest& request,
3393
UINT64 allocSize,
3394
void* privateData)
3395
{
3396
UINT64 offset = (UINT64)request.allocHandle - 1;
3397
const Suballocation newSuballoc = { offset, request.size, privateData, SUBALLOCATION_TYPE_ALLOCATION };
3398
3399
switch (request.algorithmData)
3400
{
3401
case ALLOC_REQUEST_UPPER_ADDRESS:
3402
{
3403
D3D12MA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
3404
"CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
3405
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
3406
suballocations2nd.push_back(newSuballoc);
3407
m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
3408
break;
3409
}
3410
case ALLOC_REQUEST_END_OF_1ST:
3411
{
3412
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
3413
3414
D3D12MA_ASSERT(suballocations1st.empty() ||
3415
offset >= suballocations1st.back().offset + suballocations1st.back().size);
3416
// Check if it fits before the end of the block.
3417
D3D12MA_ASSERT(offset + request.size <= GetSize());
3418
3419
suballocations1st.push_back(newSuballoc);
3420
break;
3421
}
3422
case ALLOC_REQUEST_END_OF_2ND:
3423
{
3424
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
3425
// New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
3426
D3D12MA_ASSERT(!suballocations1st.empty() &&
3427
offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset);
3428
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
3429
3430
switch (m_2ndVectorMode)
3431
{
3432
case SECOND_VECTOR_EMPTY:
3433
// First allocation from second part ring buffer.
3434
D3D12MA_ASSERT(suballocations2nd.empty());
3435
m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
3436
break;
3437
case SECOND_VECTOR_RING_BUFFER:
3438
// 2-part ring buffer is already started.
3439
D3D12MA_ASSERT(!suballocations2nd.empty());
3440
break;
3441
case SECOND_VECTOR_DOUBLE_STACK:
3442
D3D12MA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
3443
break;
3444
default:
3445
D3D12MA_ASSERT(0);
3446
}
3447
3448
suballocations2nd.push_back(newSuballoc);
3449
break;
3450
}
3451
default:
3452
D3D12MA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
3453
}
3454
m_SumFreeSize -= newSuballoc.size;
3455
}
3456
3457
void BlockMetadata_Linear::Free(AllocHandle allocHandle)
3458
{
3459
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
3460
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
3461
UINT64 offset = (UINT64)allocHandle - 1;
3462
3463
if (!suballocations1st.empty())
3464
{
3465
// First allocation: Mark it as next empty at the beginning.
3466
Suballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
3467
if (firstSuballoc.offset == offset)
3468
{
3469
firstSuballoc.type = SUBALLOCATION_TYPE_FREE;
3470
firstSuballoc.privateData = NULL;
3471
m_SumFreeSize += firstSuballoc.size;
3472
++m_1stNullItemsBeginCount;
3473
CleanupAfterFree();
3474
return;
3475
}
3476
}
3477
3478
// Last allocation in 2-part ring buffer or top of upper stack (same logic).
3479
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
3480
m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
3481
{
3482
Suballocation& lastSuballoc = suballocations2nd.back();
3483
if (lastSuballoc.offset == offset)
3484
{
3485
m_SumFreeSize += lastSuballoc.size;
3486
suballocations2nd.pop_back();
3487
CleanupAfterFree();
3488
return;
3489
}
3490
}
3491
// Last allocation in 1st vector.
3492
else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY)
3493
{
3494
Suballocation& lastSuballoc = suballocations1st.back();
3495
if (lastSuballoc.offset == offset)
3496
{
3497
m_SumFreeSize += lastSuballoc.size;
3498
suballocations1st.pop_back();
3499
CleanupAfterFree();
3500
return;
3501
}
3502
}
3503
3504
Suballocation refSuballoc;
3505
refSuballoc.offset = offset;
3506
// Rest of members stays uninitialized intentionally for better performance.
3507
3508
// Item from the middle of 1st vector.
3509
{
3510
const SuballocationVectorType::iterator it = BinaryFindSorted(
3511
suballocations1st.begin() + m_1stNullItemsBeginCount,
3512
suballocations1st.end(),
3513
refSuballoc,
3514
SuballocationOffsetLess());
3515
if (it != suballocations1st.end())
3516
{
3517
it->type = SUBALLOCATION_TYPE_FREE;
3518
it->privateData = NULL;
3519
++m_1stNullItemsMiddleCount;
3520
m_SumFreeSize += it->size;
3521
CleanupAfterFree();
3522
return;
3523
}
3524
}
3525
3526
if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
3527
{
3528
// Item from the middle of 2nd vector.
3529
const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
3530
BinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, SuballocationOffsetLess()) :
3531
BinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, SuballocationOffsetGreater());
3532
if (it != suballocations2nd.end())
3533
{
3534
it->type = SUBALLOCATION_TYPE_FREE;
3535
it->privateData = NULL;
3536
++m_2ndNullItemsCount;
3537
m_SumFreeSize += it->size;
3538
CleanupAfterFree();
3539
return;
3540
}
3541
}
3542
3543
D3D12MA_ASSERT(0 && "Allocation to free not found in linear allocator!");
3544
}
3545
3546
void BlockMetadata_Linear::Clear()
3547
{
3548
m_SumFreeSize = GetSize();
3549
m_Suballocations0.clear();
3550
m_Suballocations1.clear();
3551
// Leaving m_1stVectorIndex unchanged - it doesn't matter.
3552
m_2ndVectorMode = SECOND_VECTOR_EMPTY;
3553
m_1stNullItemsBeginCount = 0;
3554
m_1stNullItemsMiddleCount = 0;
3555
m_2ndNullItemsCount = 0;
3556
}
3557
3558
AllocHandle BlockMetadata_Linear::GetAllocationListBegin() const
3559
{
3560
// Function only used for defragmentation, which is disabled for this algorithm
3561
D3D12MA_ASSERT(0);
3562
return (AllocHandle)0;
3563
}
3564
3565
AllocHandle BlockMetadata_Linear::GetNextAllocation(AllocHandle prevAlloc) const
3566
{
3567
// Function only used for defragmentation, which is disabled for this algorithm
3568
D3D12MA_ASSERT(0);
3569
return (AllocHandle)0;
3570
}
3571
3572
UINT64 BlockMetadata_Linear::GetNextFreeRegionSize(AllocHandle alloc) const
3573
{
3574
// Function only used for defragmentation, which is disabled for this algorithm
3575
D3D12MA_ASSERT(0);
3576
return 0;
3577
}
3578
3579
void* BlockMetadata_Linear::GetAllocationPrivateData(AllocHandle allocHandle) const
3580
{
3581
return FindSuballocation((UINT64)allocHandle - 1).privateData;
3582
}
3583
3584
void BlockMetadata_Linear::SetAllocationPrivateData(AllocHandle allocHandle, void* privateData)
3585
{
3586
Suballocation& suballoc = FindSuballocation((UINT64)allocHandle - 1);
3587
suballoc.privateData = privateData;
3588
}
3589
3590
void BlockMetadata_Linear::AddStatistics(Statistics& inoutStats) const
3591
{
3592
inoutStats.BlockCount++;
3593
inoutStats.AllocationCount += (UINT)GetAllocationCount();
3594
inoutStats.BlockBytes += GetSize();
3595
inoutStats.AllocationBytes += GetSize() - m_SumFreeSize;
3596
}
3597
3598
void BlockMetadata_Linear::AddDetailedStatistics(DetailedStatistics& inoutStats) const
3599
{
3600
inoutStats.Stats.BlockCount++;
3601
inoutStats.Stats.BlockBytes += GetSize();
3602
3603
const UINT64 size = GetSize();
3604
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
3605
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
3606
const size_t suballoc1stCount = suballocations1st.size();
3607
const size_t suballoc2ndCount = suballocations2nd.size();
3608
3609
UINT64 lastOffset = 0;
3610
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
3611
{
3612
const UINT64 freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
3613
size_t nextAlloc2ndIndex = 0;
3614
while (lastOffset < freeSpace2ndTo1stEnd)
3615
{
3616
// Find next non-null allocation or move nextAllocIndex to the end.
3617
while (nextAlloc2ndIndex < suballoc2ndCount &&
3618
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
3619
{
3620
++nextAlloc2ndIndex;
3621
}
3622
3623
// Found non-null allocation.
3624
if (nextAlloc2ndIndex < suballoc2ndCount)
3625
{
3626
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
3627
3628
// 1. Process free space before this allocation.
3629
if (lastOffset < suballoc.offset)
3630
{
3631
// There is free space from lastOffset to suballoc.offset.
3632
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
3633
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
3634
}
3635
3636
// 2. Process this allocation.
3637
// There is allocation with suballoc.offset, suballoc.size.
3638
AddDetailedStatisticsAllocation(inoutStats, suballoc.size);
3639
3640
// 3. Prepare for next iteration.
3641
lastOffset = suballoc.offset + suballoc.size;
3642
++nextAlloc2ndIndex;
3643
}
3644
// We are at the end.
3645
else
3646
{
3647
// There is free space from lastOffset to freeSpace2ndTo1stEnd.
3648
if (lastOffset < freeSpace2ndTo1stEnd)
3649
{
3650
const UINT64 unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
3651
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
3652
}
3653
3654
// End of loop.
3655
lastOffset = freeSpace2ndTo1stEnd;
3656
}
3657
}
3658
}
3659
3660
size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
3661
const UINT64 freeSpace1stTo2ndEnd =
3662
m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
3663
while (lastOffset < freeSpace1stTo2ndEnd)
3664
{
3665
// Find next non-null allocation or move nextAllocIndex to the end.
3666
while (nextAlloc1stIndex < suballoc1stCount &&
3667
suballocations1st[nextAlloc1stIndex].privateData == NULL)
3668
{
3669
++nextAlloc1stIndex;
3670
}
3671
3672
// Found non-null allocation.
3673
if (nextAlloc1stIndex < suballoc1stCount)
3674
{
3675
const Suballocation& suballoc = suballocations1st[nextAlloc1stIndex];
3676
3677
// 1. Process free space before this allocation.
3678
if (lastOffset < suballoc.offset)
3679
{
3680
// There is free space from lastOffset to suballoc.offset.
3681
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
3682
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
3683
}
3684
3685
// 2. Process this allocation.
3686
// There is allocation with suballoc.offset, suballoc.size.
3687
AddDetailedStatisticsAllocation(inoutStats, suballoc.size);
3688
3689
// 3. Prepare for next iteration.
3690
lastOffset = suballoc.offset + suballoc.size;
3691
++nextAlloc1stIndex;
3692
}
3693
// We are at the end.
3694
else
3695
{
3696
// There is free space from lastOffset to freeSpace1stTo2ndEnd.
3697
if (lastOffset < freeSpace1stTo2ndEnd)
3698
{
3699
const UINT64 unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
3700
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
3701
}
3702
3703
// End of loop.
3704
lastOffset = freeSpace1stTo2ndEnd;
3705
}
3706
}
3707
3708
if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
3709
{
3710
size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
3711
while (lastOffset < size)
3712
{
3713
// Find next non-null allocation or move nextAllocIndex to the end.
3714
while (nextAlloc2ndIndex != SIZE_MAX &&
3715
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
3716
{
3717
--nextAlloc2ndIndex;
3718
}
3719
3720
// Found non-null allocation.
3721
if (nextAlloc2ndIndex != SIZE_MAX)
3722
{
3723
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
3724
3725
// 1. Process free space before this allocation.
3726
if (lastOffset < suballoc.offset)
3727
{
3728
// There is free space from lastOffset to suballoc.offset.
3729
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
3730
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
3731
}
3732
3733
// 2. Process this allocation.
3734
// There is allocation with suballoc.offset, suballoc.size.
3735
AddDetailedStatisticsAllocation(inoutStats, suballoc.size);
3736
3737
// 3. Prepare for next iteration.
3738
lastOffset = suballoc.offset + suballoc.size;
3739
--nextAlloc2ndIndex;
3740
}
3741
// We are at the end.
3742
else
3743
{
3744
// There is free space from lastOffset to size.
3745
if (lastOffset < size)
3746
{
3747
const UINT64 unusedRangeSize = size - lastOffset;
3748
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
3749
}
3750
3751
// End of loop.
3752
lastOffset = size;
3753
}
3754
}
3755
}
3756
}
3757
3758
void BlockMetadata_Linear::WriteAllocationInfoToJson(JsonWriter& json) const
3759
{
3760
const UINT64 size = GetSize();
3761
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
3762
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
3763
const size_t suballoc1stCount = suballocations1st.size();
3764
const size_t suballoc2ndCount = suballocations2nd.size();
3765
3766
// FIRST PASS
3767
3768
size_t unusedRangeCount = 0;
3769
UINT64 usedBytes = 0;
3770
3771
UINT64 lastOffset = 0;
3772
3773
size_t alloc2ndCount = 0;
3774
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
3775
{
3776
const UINT64 freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
3777
size_t nextAlloc2ndIndex = 0;
3778
while (lastOffset < freeSpace2ndTo1stEnd)
3779
{
3780
// Find next non-null allocation or move nextAlloc2ndIndex to the end.
3781
while (nextAlloc2ndIndex < suballoc2ndCount &&
3782
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
3783
{
3784
++nextAlloc2ndIndex;
3785
}
3786
3787
// Found non-null allocation.
3788
if (nextAlloc2ndIndex < suballoc2ndCount)
3789
{
3790
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
3791
3792
// 1. Process free space before this allocation.
3793
if (lastOffset < suballoc.offset)
3794
{
3795
// There is free space from lastOffset to suballoc.offset.
3796
++unusedRangeCount;
3797
}
3798
3799
// 2. Process this allocation.
3800
// There is allocation with suballoc.offset, suballoc.size.
3801
++alloc2ndCount;
3802
usedBytes += suballoc.size;
3803
3804
// 3. Prepare for next iteration.
3805
lastOffset = suballoc.offset + suballoc.size;
3806
++nextAlloc2ndIndex;
3807
}
3808
// We are at the end.
3809
else
3810
{
3811
if (lastOffset < freeSpace2ndTo1stEnd)
3812
{
3813
// There is free space from lastOffset to freeSpace2ndTo1stEnd.
3814
++unusedRangeCount;
3815
}
3816
3817
// End of loop.
3818
lastOffset = freeSpace2ndTo1stEnd;
3819
}
3820
}
3821
}
3822
3823
size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
3824
size_t alloc1stCount = 0;
3825
const UINT64 freeSpace1stTo2ndEnd =
3826
m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
3827
while (lastOffset < freeSpace1stTo2ndEnd)
3828
{
3829
// Find next non-null allocation or move nextAllocIndex to the end.
3830
while (nextAlloc1stIndex < suballoc1stCount &&
3831
suballocations1st[nextAlloc1stIndex].privateData == NULL)
3832
{
3833
++nextAlloc1stIndex;
3834
}
3835
3836
// Found non-null allocation.
3837
if (nextAlloc1stIndex < suballoc1stCount)
3838
{
3839
const Suballocation& suballoc = suballocations1st[nextAlloc1stIndex];
3840
3841
// 1. Process free space before this allocation.
3842
if (lastOffset < suballoc.offset)
3843
{
3844
// There is free space from lastOffset to suballoc.offset.
3845
++unusedRangeCount;
3846
}
3847
3848
// 2. Process this allocation.
3849
// There is allocation with suballoc.offset, suballoc.size.
3850
++alloc1stCount;
3851
usedBytes += suballoc.size;
3852
3853
// 3. Prepare for next iteration.
3854
lastOffset = suballoc.offset + suballoc.size;
3855
++nextAlloc1stIndex;
3856
}
3857
// We are at the end.
3858
else
3859
{
3860
if (lastOffset < size)
3861
{
3862
// There is free space from lastOffset to freeSpace1stTo2ndEnd.
3863
++unusedRangeCount;
3864
}
3865
3866
// End of loop.
3867
lastOffset = freeSpace1stTo2ndEnd;
3868
}
3869
}
3870
3871
if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
3872
{
3873
size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
3874
while (lastOffset < size)
3875
{
3876
// Find next non-null allocation or move nextAlloc2ndIndex to the end.
3877
while (nextAlloc2ndIndex != SIZE_MAX &&
3878
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
3879
{
3880
--nextAlloc2ndIndex;
3881
}
3882
3883
// Found non-null allocation.
3884
if (nextAlloc2ndIndex != SIZE_MAX)
3885
{
3886
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
3887
3888
// 1. Process free space before this allocation.
3889
if (lastOffset < suballoc.offset)
3890
{
3891
// There is free space from lastOffset to suballoc.offset.
3892
++unusedRangeCount;
3893
}
3894
3895
// 2. Process this allocation.
3896
// There is allocation with suballoc.offset, suballoc.size.
3897
++alloc2ndCount;
3898
usedBytes += suballoc.size;
3899
3900
// 3. Prepare for next iteration.
3901
lastOffset = suballoc.offset + suballoc.size;
3902
--nextAlloc2ndIndex;
3903
}
3904
// We are at the end.
3905
else
3906
{
3907
if (lastOffset < size)
3908
{
3909
// There is free space from lastOffset to size.
3910
++unusedRangeCount;
3911
}
3912
3913
// End of loop.
3914
lastOffset = size;
3915
}
3916
}
3917
}
3918
3919
const UINT64 unusedBytes = size - usedBytes;
3920
PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
3921
3922
// SECOND PASS
3923
lastOffset = 0;
3924
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
3925
{
3926
const UINT64 freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
3927
size_t nextAlloc2ndIndex = 0;
3928
while (lastOffset < freeSpace2ndTo1stEnd)
3929
{
3930
// Find next non-null allocation or move nextAlloc2ndIndex to the end.
3931
while (nextAlloc2ndIndex < suballoc2ndCount &&
3932
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
3933
{
3934
++nextAlloc2ndIndex;
3935
}
3936
3937
// Found non-null allocation.
3938
if (nextAlloc2ndIndex < suballoc2ndCount)
3939
{
3940
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
3941
3942
// 1. Process free space before this allocation.
3943
if (lastOffset < suballoc.offset)
3944
{
3945
// There is free space from lastOffset to suballoc.offset.
3946
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
3947
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
3948
}
3949
3950
// 2. Process this allocation.
3951
// There is allocation with suballoc.offset, suballoc.size.
3952
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.privateData);
3953
3954
// 3. Prepare for next iteration.
3955
lastOffset = suballoc.offset + suballoc.size;
3956
++nextAlloc2ndIndex;
3957
}
3958
// We are at the end.
3959
else
3960
{
3961
if (lastOffset < freeSpace2ndTo1stEnd)
3962
{
3963
// There is free space from lastOffset to freeSpace2ndTo1stEnd.
3964
const UINT64 unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
3965
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
3966
}
3967
3968
// End of loop.
3969
lastOffset = freeSpace2ndTo1stEnd;
3970
}
3971
}
3972
}
3973
3974
nextAlloc1stIndex = m_1stNullItemsBeginCount;
3975
while (lastOffset < freeSpace1stTo2ndEnd)
3976
{
3977
// Find next non-null allocation or move nextAllocIndex to the end.
3978
while (nextAlloc1stIndex < suballoc1stCount &&
3979
suballocations1st[nextAlloc1stIndex].privateData == NULL)
3980
{
3981
++nextAlloc1stIndex;
3982
}
3983
3984
// Found non-null allocation.
3985
if (nextAlloc1stIndex < suballoc1stCount)
3986
{
3987
const Suballocation& suballoc = suballocations1st[nextAlloc1stIndex];
3988
3989
// 1. Process free space before this allocation.
3990
if (lastOffset < suballoc.offset)
3991
{
3992
// There is free space from lastOffset to suballoc.offset.
3993
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
3994
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
3995
}
3996
3997
// 2. Process this allocation.
3998
// There is allocation with suballoc.offset, suballoc.size.
3999
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.privateData);
4000
4001
// 3. Prepare for next iteration.
4002
lastOffset = suballoc.offset + suballoc.size;
4003
++nextAlloc1stIndex;
4004
}
4005
// We are at the end.
4006
else
4007
{
4008
if (lastOffset < freeSpace1stTo2ndEnd)
4009
{
4010
// There is free space from lastOffset to freeSpace1stTo2ndEnd.
4011
const UINT64 unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
4012
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4013
}
4014
4015
// End of loop.
4016
lastOffset = freeSpace1stTo2ndEnd;
4017
}
4018
}
4019
4020
if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
4021
{
4022
size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
4023
while (lastOffset < size)
4024
{
4025
// Find next non-null allocation or move nextAlloc2ndIndex to the end.
4026
while (nextAlloc2ndIndex != SIZE_MAX &&
4027
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4028
{
4029
--nextAlloc2ndIndex;
4030
}
4031
4032
// Found non-null allocation.
4033
if (nextAlloc2ndIndex != SIZE_MAX)
4034
{
4035
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4036
4037
// 1. Process free space before this allocation.
4038
if (lastOffset < suballoc.offset)
4039
{
4040
// There is free space from lastOffset to suballoc.offset.
4041
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4042
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4043
}
4044
4045
// 2. Process this allocation.
4046
// There is allocation with suballoc.offset, suballoc.size.
4047
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.privateData);
4048
4049
// 3. Prepare for next iteration.
4050
lastOffset = suballoc.offset + suballoc.size;
4051
--nextAlloc2ndIndex;
4052
}
4053
// We are at the end.
4054
else
4055
{
4056
if (lastOffset < size)
4057
{
4058
// There is free space from lastOffset to size.
4059
const UINT64 unusedRangeSize = size - lastOffset;
4060
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4061
}
4062
4063
// End of loop.
4064
lastOffset = size;
4065
}
4066
}
4067
}
4068
4069
PrintDetailedMap_End(json);
4070
}
4071
4072
void BlockMetadata_Linear::DebugLogAllAllocations() const
4073
{
4074
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4075
for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it)
4076
if (it->type != SUBALLOCATION_TYPE_FREE)
4077
DebugLogAllocation(it->offset, it->size, it->privateData);
4078
4079
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4080
for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it)
4081
if (it->type != SUBALLOCATION_TYPE_FREE)
4082
DebugLogAllocation(it->offset, it->size, it->privateData);
4083
}
4084
4085
Suballocation& BlockMetadata_Linear::FindSuballocation(UINT64 offset) const
4086
{
4087
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4088
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4089
4090
Suballocation refSuballoc;
4091
refSuballoc.offset = offset;
4092
// Rest of members stays uninitialized intentionally for better performance.
4093
4094
// Item from the 1st vector.
4095
{
4096
const SuballocationVectorType::const_iterator it = BinaryFindSorted(
4097
suballocations1st.begin() + m_1stNullItemsBeginCount,
4098
suballocations1st.end(),
4099
refSuballoc,
4100
SuballocationOffsetLess());
4101
if (it != suballocations1st.end())
4102
{
4103
return const_cast<Suballocation&>(*it);
4104
}
4105
}
4106
4107
if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
4108
{
4109
// Rest of members stays uninitialized intentionally for better performance.
4110
const SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
4111
BinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, SuballocationOffsetLess()) :
4112
BinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, SuballocationOffsetGreater());
4113
if (it != suballocations2nd.end())
4114
{
4115
return const_cast<Suballocation&>(*it);
4116
}
4117
}
4118
4119
D3D12MA_ASSERT(0 && "Allocation not found in linear allocator!");
4120
return const_cast<Suballocation&>(suballocations1st.back()); // Should never occur.
4121
}
4122
4123
bool BlockMetadata_Linear::ShouldCompact1st() const
4124
{
4125
const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
4126
const size_t suballocCount = AccessSuballocations1st().size();
4127
return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
4128
}
4129
4130
void BlockMetadata_Linear::CleanupAfterFree()
4131
{
4132
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4133
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4134
4135
if (IsEmpty())
4136
{
4137
suballocations1st.clear();
4138
suballocations2nd.clear();
4139
m_1stNullItemsBeginCount = 0;
4140
m_1stNullItemsMiddleCount = 0;
4141
m_2ndNullItemsCount = 0;
4142
m_2ndVectorMode = SECOND_VECTOR_EMPTY;
4143
}
4144
else
4145
{
4146
const size_t suballoc1stCount = suballocations1st.size();
4147
const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
4148
D3D12MA_ASSERT(nullItem1stCount <= suballoc1stCount);
4149
4150
// Find more null items at the beginning of 1st vector.
4151
while (m_1stNullItemsBeginCount < suballoc1stCount &&
4152
suballocations1st[m_1stNullItemsBeginCount].type == SUBALLOCATION_TYPE_FREE)
4153
{
4154
++m_1stNullItemsBeginCount;
4155
--m_1stNullItemsMiddleCount;
4156
}
4157
4158
// Find more null items at the end of 1st vector.
4159
while (m_1stNullItemsMiddleCount > 0 &&
4160
suballocations1st.back().type == SUBALLOCATION_TYPE_FREE)
4161
{
4162
--m_1stNullItemsMiddleCount;
4163
suballocations1st.pop_back();
4164
}
4165
4166
// Find more null items at the end of 2nd vector.
4167
while (m_2ndNullItemsCount > 0 &&
4168
suballocations2nd.back().type == SUBALLOCATION_TYPE_FREE)
4169
{
4170
--m_2ndNullItemsCount;
4171
suballocations2nd.pop_back();
4172
}
4173
4174
// Find more null items at the beginning of 2nd vector.
4175
while (m_2ndNullItemsCount > 0 &&
4176
suballocations2nd[0].type == SUBALLOCATION_TYPE_FREE)
4177
{
4178
--m_2ndNullItemsCount;
4179
suballocations2nd.remove(0);
4180
}
4181
4182
if (ShouldCompact1st())
4183
{
4184
const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
4185
size_t srcIndex = m_1stNullItemsBeginCount;
4186
for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
4187
{
4188
while (suballocations1st[srcIndex].type == SUBALLOCATION_TYPE_FREE)
4189
{
4190
++srcIndex;
4191
}
4192
if (dstIndex != srcIndex)
4193
{
4194
suballocations1st[dstIndex] = suballocations1st[srcIndex];
4195
}
4196
++srcIndex;
4197
}
4198
suballocations1st.resize(nonNullItemCount);
4199
m_1stNullItemsBeginCount = 0;
4200
m_1stNullItemsMiddleCount = 0;
4201
}
4202
4203
// 2nd vector became empty.
4204
if (suballocations2nd.empty())
4205
{
4206
m_2ndVectorMode = SECOND_VECTOR_EMPTY;
4207
}
4208
4209
// 1st vector became empty.
4210
if (suballocations1st.size() - m_1stNullItemsBeginCount == 0)
4211
{
4212
suballocations1st.clear();
4213
m_1stNullItemsBeginCount = 0;
4214
4215
if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4216
{
4217
// Swap 1st with 2nd. Now 2nd is empty.
4218
m_2ndVectorMode = SECOND_VECTOR_EMPTY;
4219
m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
4220
while (m_1stNullItemsBeginCount < suballocations2nd.size() &&
4221
suballocations2nd[m_1stNullItemsBeginCount].type == SUBALLOCATION_TYPE_FREE)
4222
{
4223
++m_1stNullItemsBeginCount;
4224
--m_1stNullItemsMiddleCount;
4225
}
4226
m_2ndNullItemsCount = 0;
4227
m_1stVectorIndex ^= 1;
4228
}
4229
}
4230
}
4231
4232
D3D12MA_HEAVY_ASSERT(Validate());
4233
}
4234
4235
bool BlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
4236
UINT64 allocSize,
4237
UINT64 allocAlignment,
4238
AllocationRequest* pAllocationRequest)
4239
{
4240
const UINT64 blockSize = GetSize();
4241
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4242
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4243
4244
if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
4245
{
4246
// Try to allocate at the end of 1st vector.
4247
4248
UINT64 resultBaseOffset = 0;
4249
if (!suballocations1st.empty())
4250
{
4251
const Suballocation& lastSuballoc = suballocations1st.back();
4252
resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + GetDebugMargin();
4253
}
4254
4255
// Start from offset equal to beginning of free space.
4256
UINT64 resultOffset = resultBaseOffset;
4257
// Apply alignment.
4258
resultOffset = AlignUp(resultOffset, allocAlignment);
4259
4260
const UINT64 freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
4261
suballocations2nd.back().offset : blockSize;
4262
4263
// There is enough free space at the end after alignment.
4264
if (resultOffset + allocSize + GetDebugMargin() <= freeSpaceEnd)
4265
{
4266
// All tests passed: Success.
4267
pAllocationRequest->allocHandle = (AllocHandle)(resultOffset + 1);
4268
// pAllocationRequest->item, customData unused.
4269
pAllocationRequest->algorithmData = ALLOC_REQUEST_END_OF_1ST;
4270
return true;
4271
}
4272
}
4273
4274
// Wrap-around to end of 2nd vector. Try to allocate there, watching for the
4275
// beginning of 1st vector as the end of free space.
4276
if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4277
{
4278
D3D12MA_ASSERT(!suballocations1st.empty());
4279
4280
UINT64 resultBaseOffset = 0;
4281
if (!suballocations2nd.empty())
4282
{
4283
const Suballocation& lastSuballoc = suballocations2nd.back();
4284
resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + GetDebugMargin();
4285
}
4286
4287
// Start from offset equal to beginning of free space.
4288
UINT64 resultOffset = resultBaseOffset;
4289
4290
// Apply alignment.
4291
resultOffset = AlignUp(resultOffset, allocAlignment);
4292
4293
size_t index1st = m_1stNullItemsBeginCount;
4294
// There is enough free space at the end after alignment.
4295
if ((index1st == suballocations1st.size() && resultOffset + allocSize + GetDebugMargin() <= blockSize) ||
4296
(index1st < suballocations1st.size() && resultOffset + allocSize + GetDebugMargin() <= suballocations1st[index1st].offset))
4297
{
4298
// All tests passed: Success.
4299
pAllocationRequest->allocHandle = (AllocHandle)(resultOffset + 1);
4300
pAllocationRequest->algorithmData = ALLOC_REQUEST_END_OF_2ND;
4301
// pAllocationRequest->item, customData unused.
4302
return true;
4303
}
4304
}
4305
return false;
4306
}
4307
4308
bool BlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
4309
UINT64 allocSize,
4310
UINT64 allocAlignment,
4311
AllocationRequest* pAllocationRequest)
4312
{
4313
const UINT64 blockSize = GetSize();
4314
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4315
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4316
4317
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4318
{
4319
D3D12MA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
4320
return false;
4321
}
4322
4323
// Try to allocate before 2nd.back(), or end of block if 2nd.empty().
4324
if (allocSize > blockSize)
4325
{
4326
return false;
4327
}
4328
UINT64 resultBaseOffset = blockSize - allocSize;
4329
if (!suballocations2nd.empty())
4330
{
4331
const Suballocation& lastSuballoc = suballocations2nd.back();
4332
resultBaseOffset = lastSuballoc.offset - allocSize;
4333
if (allocSize > lastSuballoc.offset)
4334
{
4335
return false;
4336
}
4337
}
4338
4339
// Start from offset equal to end of free space.
4340
UINT64 resultOffset = resultBaseOffset;
4341
// Apply debugMargin at the end.
4342
if (GetDebugMargin() > 0)
4343
{
4344
if (resultOffset < GetDebugMargin())
4345
{
4346
return false;
4347
}
4348
resultOffset -= GetDebugMargin();
4349
}
4350
4351
// Apply alignment.
4352
resultOffset = AlignDown(resultOffset, allocAlignment);
4353
// There is enough free space.
4354
const UINT64 endOf1st = !suballocations1st.empty() ?
4355
suballocations1st.back().offset + suballocations1st.back().size : 0;
4356
4357
if (endOf1st + GetDebugMargin() <= resultOffset)
4358
{
4359
// All tests passed: Success.
4360
pAllocationRequest->allocHandle = (AllocHandle)(resultOffset + 1);
4361
// pAllocationRequest->item unused.
4362
pAllocationRequest->algorithmData = ALLOC_REQUEST_UPPER_ADDRESS;
4363
return true;
4364
}
4365
return false;
4366
}
4367
#endif // _D3D12MA_BLOCK_METADATA_LINEAR_FUNCTIONS
4368
#endif // _D3D12MA_BLOCK_METADATA_LINEAR
4369
4370
#ifndef _D3D12MA_BLOCK_METADATA_TLSF
4371
class BlockMetadata_TLSF : public BlockMetadata
4372
{
4373
public:
4374
BlockMetadata_TLSF(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual);
4375
virtual ~BlockMetadata_TLSF();
4376
4377
size_t GetAllocationCount() const override { return m_AllocCount; }
4378
size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; }
4379
UINT64 GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; }
4380
bool IsEmpty() const override { return m_NullBlock->offset == 0; }
4381
UINT64 GetAllocationOffset(AllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; };
4382
4383
void Init(UINT64 size) override;
4384
bool Validate() const override;
4385
void GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const override;
4386
4387
bool CreateAllocationRequest(
4388
UINT64 allocSize,
4389
UINT64 allocAlignment,
4390
bool upperAddress,
4391
UINT32 strategy,
4392
AllocationRequest* pAllocationRequest) override;
4393
4394
void Alloc(
4395
const AllocationRequest& request,
4396
UINT64 allocSize,
4397
void* privateData) override;
4398
4399
void Free(AllocHandle allocHandle) override;
4400
void Clear() override;
4401
4402
AllocHandle GetAllocationListBegin() const override;
4403
AllocHandle GetNextAllocation(AllocHandle prevAlloc) const override;
4404
UINT64 GetNextFreeRegionSize(AllocHandle alloc) const override;
4405
void* GetAllocationPrivateData(AllocHandle allocHandle) const override;
4406
void SetAllocationPrivateData(AllocHandle allocHandle, void* privateData) override;
4407
4408
void AddStatistics(Statistics& inoutStats) const override;
4409
void AddDetailedStatistics(DetailedStatistics& inoutStats) const override;
4410
void WriteAllocationInfoToJson(JsonWriter& json) const override;
4411
void DebugLogAllAllocations() const override;
4412
4413
private:
4414
// According to original paper it should be preferable 4 or 5:
4415
// M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems"
4416
// http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf
4417
static const UINT8 SECOND_LEVEL_INDEX = 5;
4418
static const UINT16 SMALL_BUFFER_SIZE = 256;
4419
static const UINT INITIAL_BLOCK_ALLOC_COUNT = 16;
4420
static const UINT8 MEMORY_CLASS_SHIFT = 7;
4421
static const UINT8 MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
4422
4423
class Block
4424
{
4425
public:
4426
UINT64 offset;
4427
UINT64 size;
4428
Block* prevPhysical;
4429
Block* nextPhysical;
4430
4431
void MarkFree() { prevFree = NULL; }
4432
void MarkTaken() { prevFree = this; }
4433
bool IsFree() const { return prevFree != this; }
4434
void*& PrivateData() { D3D12MA_HEAVY_ASSERT(!IsFree()); return privateData; }
4435
Block*& PrevFree() { return prevFree; }
4436
Block*& NextFree() { D3D12MA_HEAVY_ASSERT(IsFree()); return nextFree; }
4437
4438
private:
4439
Block* prevFree; // Address of the same block here indicates that block is taken
4440
union
4441
{
4442
Block* nextFree;
4443
void* privateData;
4444
};
4445
};
4446
4447
size_t m_AllocCount = 0;
4448
// Total number of free blocks besides null block
4449
size_t m_BlocksFreeCount = 0;
4450
// Total size of free blocks excluding null block
4451
UINT64 m_BlocksFreeSize = 0;
4452
UINT32 m_IsFreeBitmap = 0;
4453
UINT8 m_MemoryClasses = 0;
4454
UINT32 m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES];
4455
UINT32 m_ListsCount = 0;
4456
/*
4457
* 0: 0-3 lists for small buffers
4458
* 1+: 0-(2^SLI-1) lists for normal buffers
4459
*/
4460
Block** m_FreeList = NULL;
4461
PoolAllocator<Block> m_BlockAllocator;
4462
Block* m_NullBlock = NULL;
4463
4464
UINT8 SizeToMemoryClass(UINT64 size) const;
4465
UINT16 SizeToSecondIndex(UINT64 size, UINT8 memoryClass) const;
4466
UINT32 GetListIndex(UINT8 memoryClass, UINT16 secondIndex) const;
4467
UINT32 GetListIndex(UINT64 size) const;
4468
4469
void RemoveFreeBlock(Block* block);
4470
void InsertFreeBlock(Block* block);
4471
void MergeBlock(Block* block, Block* prev);
4472
4473
Block* FindFreeBlock(UINT64 size, UINT32& listIndex) const;
4474
bool CheckBlock(
4475
Block& block,
4476
UINT32 listIndex,
4477
UINT64 allocSize,
4478
UINT64 allocAlignment,
4479
AllocationRequest* pAllocationRequest);
4480
4481
D3D12MA_CLASS_NO_COPY(BlockMetadata_TLSF)
4482
};
4483
4484
#ifndef _D3D12MA_BLOCK_METADATA_TLSF_FUNCTIONS
4485
BlockMetadata_TLSF::BlockMetadata_TLSF(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual)
4486
: BlockMetadata(allocationCallbacks, isVirtual),
4487
m_BlockAllocator(*allocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT)
4488
{
4489
D3D12MA_ASSERT(allocationCallbacks);
4490
}
4491
4492
BlockMetadata_TLSF::~BlockMetadata_TLSF()
4493
{
4494
D3D12MA_DELETE_ARRAY(*GetAllocs(), m_FreeList, m_ListsCount);
4495
}
4496
4497
void BlockMetadata_TLSF::Init(UINT64 size)
4498
{
4499
BlockMetadata::Init(size);
4500
4501
m_NullBlock = m_BlockAllocator.Alloc();
4502
m_NullBlock->size = size;
4503
m_NullBlock->offset = 0;
4504
m_NullBlock->prevPhysical = NULL;
4505
m_NullBlock->nextPhysical = NULL;
4506
m_NullBlock->MarkFree();
4507
m_NullBlock->NextFree() = NULL;
4508
m_NullBlock->PrevFree() = NULL;
4509
UINT8 memoryClass = SizeToMemoryClass(size);
4510
UINT16 sli = SizeToSecondIndex(size, memoryClass);
4511
m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1;
4512
if (IsVirtual())
4513
m_ListsCount += 1UL << SECOND_LEVEL_INDEX;
4514
else
4515
m_ListsCount += 4;
4516
4517
m_MemoryClasses = memoryClass + 2;
4518
memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(UINT32));
4519
4520
m_FreeList = D3D12MA_NEW_ARRAY(*GetAllocs(), Block*, m_ListsCount);
4521
memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
4522
}
4523
4524
bool BlockMetadata_TLSF::Validate() const
4525
{
4526
D3D12MA_VALIDATE(GetSumFreeSize() <= GetSize());
4527
4528
UINT64 calculatedSize = m_NullBlock->size;
4529
UINT64 calculatedFreeSize = m_NullBlock->size;
4530
size_t allocCount = 0;
4531
size_t freeCount = 0;
4532
4533
// Check integrity of free lists
4534
for (UINT32 list = 0; list < m_ListsCount; ++list)
4535
{
4536
Block* block = m_FreeList[list];
4537
if (block != NULL)
4538
{
4539
D3D12MA_VALIDATE(block->IsFree());
4540
D3D12MA_VALIDATE(block->PrevFree() == NULL);
4541
while (block->NextFree())
4542
{
4543
D3D12MA_VALIDATE(block->NextFree()->IsFree());
4544
D3D12MA_VALIDATE(block->NextFree()->PrevFree() == block);
4545
block = block->NextFree();
4546
}
4547
}
4548
}
4549
4550
D3D12MA_VALIDATE(m_NullBlock->nextPhysical == NULL);
4551
if (m_NullBlock->prevPhysical)
4552
{
4553
D3D12MA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock);
4554
}
4555
4556
// Check all blocks
4557
UINT64 nextOffset = m_NullBlock->offset;
4558
for (Block* prev = m_NullBlock->prevPhysical; prev != NULL; prev = prev->prevPhysical)
4559
{
4560
D3D12MA_VALIDATE(prev->offset + prev->size == nextOffset);
4561
nextOffset = prev->offset;
4562
calculatedSize += prev->size;
4563
4564
UINT32 listIndex = GetListIndex(prev->size);
4565
if (prev->IsFree())
4566
{
4567
++freeCount;
4568
// Check if free block belongs to free list
4569
Block* freeBlock = m_FreeList[listIndex];
4570
D3D12MA_VALIDATE(freeBlock != NULL);
4571
4572
bool found = false;
4573
do
4574
{
4575
if (freeBlock == prev)
4576
found = true;
4577
4578
freeBlock = freeBlock->NextFree();
4579
} while (!found && freeBlock != NULL);
4580
4581
D3D12MA_VALIDATE(found);
4582
calculatedFreeSize += prev->size;
4583
}
4584
else
4585
{
4586
++allocCount;
4587
// Check if taken block is not on a free list
4588
Block* freeBlock = m_FreeList[listIndex];
4589
while (freeBlock)
4590
{
4591
D3D12MA_VALIDATE(freeBlock != prev);
4592
freeBlock = freeBlock->NextFree();
4593
}
4594
}
4595
4596
if (prev->prevPhysical)
4597
{
4598
D3D12MA_VALIDATE(prev->prevPhysical->nextPhysical == prev);
4599
}
4600
}
4601
4602
D3D12MA_VALIDATE(nextOffset == 0);
4603
D3D12MA_VALIDATE(calculatedSize == GetSize());
4604
D3D12MA_VALIDATE(calculatedFreeSize == GetSumFreeSize());
4605
D3D12MA_VALIDATE(allocCount == m_AllocCount);
4606
D3D12MA_VALIDATE(freeCount == m_BlocksFreeCount);
4607
4608
return true;
4609
}
4610
4611
void BlockMetadata_TLSF::GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const
4612
{
4613
Block* block = (Block*)allocHandle;
4614
D3D12MA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!");
4615
outInfo.Offset = block->offset;
4616
outInfo.Size = block->size;
4617
outInfo.pPrivateData = block->PrivateData();
4618
}
4619
4620
bool BlockMetadata_TLSF::CreateAllocationRequest(
4621
UINT64 allocSize,
4622
UINT64 allocAlignment,
4623
bool upperAddress,
4624
UINT32 strategy,
4625
AllocationRequest* pAllocationRequest)
4626
{
4627
D3D12MA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
4628
D3D12MA_ASSERT(!upperAddress && "ALLOCATION_FLAG_UPPER_ADDRESS can be used only with linear algorithm.");
4629
D3D12MA_ASSERT(pAllocationRequest != NULL);
4630
D3D12MA_HEAVY_ASSERT(Validate());
4631
4632
allocSize += GetDebugMargin();
4633
// Quick check for too small pool
4634
if (allocSize > GetSumFreeSize())
4635
return false;
4636
4637
// If no free blocks in pool then check only null block
4638
if (m_BlocksFreeCount == 0)
4639
return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest);
4640
4641
// Round up to the next block
4642
UINT64 sizeForNextList = allocSize;
4643
UINT16 smallSizeStep = SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4);
4644
if (allocSize > SMALL_BUFFER_SIZE)
4645
{
4646
sizeForNextList += (1ULL << (BitScanMSB(allocSize) - SECOND_LEVEL_INDEX));
4647
}
4648
else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep)
4649
sizeForNextList = SMALL_BUFFER_SIZE + 1;
4650
else
4651
sizeForNextList += smallSizeStep;
4652
4653
UINT32 nextListIndex = 0;
4654
UINT32 prevListIndex = 0;
4655
Block* nextListBlock = NULL;
4656
Block* prevListBlock = NULL;
4657
4658
// Check blocks according to strategies
4659
if (strategy & ALLOCATION_FLAG_STRATEGY_MIN_TIME)
4660
{
4661
// Quick check for larger block first
4662
nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
4663
if (nextListBlock != NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
4664
return true;
4665
4666
// If not fitted then null block
4667
if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest))
4668
return true;
4669
4670
// Null block failed, search larger bucket
4671
while (nextListBlock)
4672
{
4673
if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
4674
return true;
4675
nextListBlock = nextListBlock->NextFree();
4676
}
4677
4678
// Failed again, check best fit bucket
4679
prevListBlock = FindFreeBlock(allocSize, prevListIndex);
4680
while (prevListBlock)
4681
{
4682
if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, pAllocationRequest))
4683
return true;
4684
prevListBlock = prevListBlock->NextFree();
4685
}
4686
}
4687
else if (strategy & ALLOCATION_FLAG_STRATEGY_MIN_MEMORY)
4688
{
4689
// Check best fit bucket
4690
prevListBlock = FindFreeBlock(allocSize, prevListIndex);
4691
while (prevListBlock)
4692
{
4693
if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, pAllocationRequest))
4694
return true;
4695
prevListBlock = prevListBlock->NextFree();
4696
}
4697
4698
// If failed check null block
4699
if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest))
4700
return true;
4701
4702
// Check larger bucket
4703
nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
4704
while (nextListBlock)
4705
{
4706
if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
4707
return true;
4708
nextListBlock = nextListBlock->NextFree();
4709
}
4710
}
4711
else if (strategy & ALLOCATION_FLAG_STRATEGY_MIN_OFFSET)
4712
{
4713
// Perform search from the start
4714
Vector<Block*> blockList(m_BlocksFreeCount, *GetAllocs());
4715
4716
size_t i = m_BlocksFreeCount;
4717
for (Block* block = m_NullBlock->prevPhysical; block != NULL; block = block->prevPhysical)
4718
{
4719
if (block->IsFree() && block->size >= allocSize)
4720
blockList[--i] = block;
4721
}
4722
4723
for (; i < m_BlocksFreeCount; ++i)
4724
{
4725
Block& block = *blockList[i];
4726
if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, pAllocationRequest))
4727
return true;
4728
}
4729
4730
// If failed check null block
4731
if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest))
4732
return true;
4733
4734
// Whole range searched, no more memory
4735
return false;
4736
}
4737
else
4738
{
4739
// Check larger bucket
4740
nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
4741
while (nextListBlock)
4742
{
4743
if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
4744
return true;
4745
nextListBlock = nextListBlock->NextFree();
4746
}
4747
4748
// If failed check null block
4749
if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest))
4750
return true;
4751
4752
// Check best fit bucket
4753
prevListBlock = FindFreeBlock(allocSize, prevListIndex);
4754
while (prevListBlock)
4755
{
4756
if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, pAllocationRequest))
4757
return true;
4758
prevListBlock = prevListBlock->NextFree();
4759
}
4760
}
4761
4762
// Worst case, full search has to be done
4763
while (++nextListIndex < m_ListsCount)
4764
{
4765
nextListBlock = m_FreeList[nextListIndex];
4766
while (nextListBlock)
4767
{
4768
if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
4769
return true;
4770
nextListBlock = nextListBlock->NextFree();
4771
}
4772
}
4773
4774
// No more memory sadly
4775
return false;
4776
}
4777
4778
void BlockMetadata_TLSF::Alloc(
4779
const AllocationRequest& request,
4780
UINT64 allocSize,
4781
void* privateData)
4782
{
4783
// Get block and pop it from the free list
4784
Block* currentBlock = (Block*)request.allocHandle;
4785
UINT64 offset = request.algorithmData;
4786
D3D12MA_ASSERT(currentBlock != NULL);
4787
D3D12MA_ASSERT(currentBlock->offset <= offset);
4788
4789
if (currentBlock != m_NullBlock)
4790
RemoveFreeBlock(currentBlock);
4791
4792
// Append missing alignment to prev block or create new one
4793
UINT64 misssingAlignment = offset - currentBlock->offset;
4794
if (misssingAlignment)
4795
{
4796
Block* prevBlock = currentBlock->prevPhysical;
4797
D3D12MA_ASSERT(prevBlock != NULL && "There should be no missing alignment at offset 0!");
4798
4799
if (prevBlock->IsFree() && prevBlock->size != GetDebugMargin())
4800
{
4801
UINT32 oldList = GetListIndex(prevBlock->size);
4802
prevBlock->size += misssingAlignment;
4803
// Check if new size crosses list bucket
4804
if (oldList != GetListIndex(prevBlock->size))
4805
{
4806
prevBlock->size -= misssingAlignment;
4807
RemoveFreeBlock(prevBlock);
4808
prevBlock->size += misssingAlignment;
4809
InsertFreeBlock(prevBlock);
4810
}
4811
else
4812
m_BlocksFreeSize += misssingAlignment;
4813
}
4814
else
4815
{
4816
Block* newBlock = m_BlockAllocator.Alloc();
4817
currentBlock->prevPhysical = newBlock;
4818
prevBlock->nextPhysical = newBlock;
4819
newBlock->prevPhysical = prevBlock;
4820
newBlock->nextPhysical = currentBlock;
4821
newBlock->size = misssingAlignment;
4822
newBlock->offset = currentBlock->offset;
4823
newBlock->MarkTaken();
4824
4825
InsertFreeBlock(newBlock);
4826
}
4827
4828
currentBlock->size -= misssingAlignment;
4829
currentBlock->offset += misssingAlignment;
4830
}
4831
4832
UINT64 size = request.size + GetDebugMargin();
4833
if (currentBlock->size == size)
4834
{
4835
if (currentBlock == m_NullBlock)
4836
{
4837
// Setup new null block
4838
m_NullBlock = m_BlockAllocator.Alloc();
4839
m_NullBlock->size = 0;
4840
m_NullBlock->offset = currentBlock->offset + size;
4841
m_NullBlock->prevPhysical = currentBlock;
4842
m_NullBlock->nextPhysical = NULL;
4843
m_NullBlock->MarkFree();
4844
m_NullBlock->PrevFree() = NULL;
4845
m_NullBlock->NextFree() = NULL;
4846
currentBlock->nextPhysical = m_NullBlock;
4847
currentBlock->MarkTaken();
4848
}
4849
}
4850
else
4851
{
4852
D3D12MA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!");
4853
4854
// Create new free block
4855
Block* newBlock = m_BlockAllocator.Alloc();
4856
newBlock->size = currentBlock->size - size;
4857
newBlock->offset = currentBlock->offset + size;
4858
newBlock->prevPhysical = currentBlock;
4859
newBlock->nextPhysical = currentBlock->nextPhysical;
4860
currentBlock->nextPhysical = newBlock;
4861
currentBlock->size = size;
4862
4863
if (currentBlock == m_NullBlock)
4864
{
4865
m_NullBlock = newBlock;
4866
m_NullBlock->MarkFree();
4867
m_NullBlock->NextFree() = NULL;
4868
m_NullBlock->PrevFree() = NULL;
4869
currentBlock->MarkTaken();
4870
}
4871
else
4872
{
4873
newBlock->nextPhysical->prevPhysical = newBlock;
4874
newBlock->MarkTaken();
4875
InsertFreeBlock(newBlock);
4876
}
4877
}
4878
currentBlock->PrivateData() = privateData;
4879
4880
if (GetDebugMargin() > 0)
4881
{
4882
currentBlock->size -= GetDebugMargin();
4883
Block* newBlock = m_BlockAllocator.Alloc();
4884
newBlock->size = GetDebugMargin();
4885
newBlock->offset = currentBlock->offset + currentBlock->size;
4886
newBlock->prevPhysical = currentBlock;
4887
newBlock->nextPhysical = currentBlock->nextPhysical;
4888
newBlock->MarkTaken();
4889
currentBlock->nextPhysical->prevPhysical = newBlock;
4890
currentBlock->nextPhysical = newBlock;
4891
InsertFreeBlock(newBlock);
4892
}
4893
++m_AllocCount;
4894
}
4895
4896
void BlockMetadata_TLSF::Free(AllocHandle allocHandle)
4897
{
4898
Block* block = (Block*)allocHandle;
4899
Block* next = block->nextPhysical;
4900
D3D12MA_ASSERT(!block->IsFree() && "Block is already free!");
4901
4902
--m_AllocCount;
4903
if (GetDebugMargin() > 0)
4904
{
4905
RemoveFreeBlock(next);
4906
MergeBlock(next, block);
4907
block = next;
4908
next = next->nextPhysical;
4909
}
4910
4911
// Try merging
4912
Block* prev = block->prevPhysical;
4913
if (prev != NULL && prev->IsFree() && prev->size != GetDebugMargin())
4914
{
4915
RemoveFreeBlock(prev);
4916
MergeBlock(block, prev);
4917
}
4918
4919
if (!next->IsFree())
4920
InsertFreeBlock(block);
4921
else if (next == m_NullBlock)
4922
MergeBlock(m_NullBlock, block);
4923
else
4924
{
4925
RemoveFreeBlock(next);
4926
MergeBlock(next, block);
4927
InsertFreeBlock(next);
4928
}
4929
}
4930
4931
void BlockMetadata_TLSF::Clear()
4932
{
4933
m_AllocCount = 0;
4934
m_BlocksFreeCount = 0;
4935
m_BlocksFreeSize = 0;
4936
m_IsFreeBitmap = 0;
4937
m_NullBlock->offset = 0;
4938
m_NullBlock->size = GetSize();
4939
Block* block = m_NullBlock->prevPhysical;
4940
m_NullBlock->prevPhysical = NULL;
4941
while (block)
4942
{
4943
Block* prev = block->prevPhysical;
4944
m_BlockAllocator.Free(block);
4945
block = prev;
4946
}
4947
memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
4948
memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(UINT32));
4949
}
4950
4951
AllocHandle BlockMetadata_TLSF::GetAllocationListBegin() const
4952
{
4953
if (m_AllocCount == 0)
4954
return (AllocHandle)0;
4955
4956
for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical)
4957
{
4958
if (!block->IsFree())
4959
return (AllocHandle)block;
4960
}
4961
D3D12MA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!");
4962
return (AllocHandle)0;
4963
}
4964
4965
AllocHandle BlockMetadata_TLSF::GetNextAllocation(AllocHandle prevAlloc) const
4966
{
4967
Block* startBlock = (Block*)prevAlloc;
4968
D3D12MA_ASSERT(!startBlock->IsFree() && "Incorrect block!");
4969
4970
for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical)
4971
{
4972
if (!block->IsFree())
4973
return (AllocHandle)block;
4974
}
4975
return (AllocHandle)0;
4976
}
4977
4978
UINT64 BlockMetadata_TLSF::GetNextFreeRegionSize(AllocHandle alloc) const
4979
{
4980
Block* block = (Block*)alloc;
4981
D3D12MA_ASSERT(!block->IsFree() && "Incorrect block!");
4982
4983
if (block->prevPhysical)
4984
return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0;
4985
return 0;
4986
}
4987
4988
void* BlockMetadata_TLSF::GetAllocationPrivateData(AllocHandle allocHandle) const
4989
{
4990
Block* block = (Block*)allocHandle;
4991
D3D12MA_ASSERT(!block->IsFree() && "Cannot get user data for free block!");
4992
return block->PrivateData();
4993
}
4994
4995
void BlockMetadata_TLSF::SetAllocationPrivateData(AllocHandle allocHandle, void* privateData)
4996
{
4997
Block* block = (Block*)allocHandle;
4998
D3D12MA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!");
4999
block->PrivateData() = privateData;
5000
}
5001
5002
void BlockMetadata_TLSF::AddStatistics(Statistics& inoutStats) const
5003
{
5004
inoutStats.BlockCount++;
5005
inoutStats.AllocationCount += static_cast<UINT>(m_AllocCount);
5006
inoutStats.BlockBytes += GetSize();
5007
inoutStats.AllocationBytes += GetSize() - GetSumFreeSize();
5008
}
5009
5010
void BlockMetadata_TLSF::AddDetailedStatistics(DetailedStatistics& inoutStats) const
5011
{
5012
inoutStats.Stats.BlockCount++;
5013
inoutStats.Stats.BlockBytes += GetSize();
5014
5015
for (Block* block = m_NullBlock->prevPhysical; block != NULL; block = block->prevPhysical)
5016
{
5017
if (block->IsFree())
5018
AddDetailedStatisticsUnusedRange(inoutStats, block->size);
5019
else
5020
AddDetailedStatisticsAllocation(inoutStats, block->size);
5021
}
5022
5023
if (m_NullBlock->size > 0)
5024
AddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size);
5025
}
5026
5027
void BlockMetadata_TLSF::WriteAllocationInfoToJson(JsonWriter& json) const
5028
{
5029
size_t blockCount = m_AllocCount + m_BlocksFreeCount;
5030
Vector<Block*> blockList(blockCount, *GetAllocs());
5031
5032
size_t i = blockCount;
5033
if (m_NullBlock->size > 0)
5034
{
5035
++blockCount;
5036
blockList.push_back(m_NullBlock);
5037
}
5038
for (Block* block = m_NullBlock->prevPhysical; block != NULL; block = block->prevPhysical)
5039
{
5040
blockList[--i] = block;
5041
}
5042
D3D12MA_ASSERT(i == 0);
5043
5044
PrintDetailedMap_Begin(json, GetSumFreeSize(), GetAllocationCount(), m_BlocksFreeCount +
5045
(m_NullBlock->size > 0 ? 1 : 0));
5046
for (; i < blockCount; ++i)
5047
{
5048
Block* block = blockList[i];
5049
if (block->IsFree())
5050
PrintDetailedMap_UnusedRange(json, block->offset, block->size);
5051
else
5052
PrintDetailedMap_Allocation(json, block->offset, block->size, block->PrivateData());
5053
}
5054
PrintDetailedMap_End(json);
5055
}
5056
5057
void BlockMetadata_TLSF::DebugLogAllAllocations() const
5058
{
5059
for (Block* block = m_NullBlock->prevPhysical; block != NULL; block = block->prevPhysical)
5060
{
5061
if (!block->IsFree())
5062
{
5063
DebugLogAllocation(block->offset, block->size, block->PrivateData());
5064
}
5065
}
5066
}
5067
5068
UINT8 BlockMetadata_TLSF::SizeToMemoryClass(UINT64 size) const
5069
{
5070
if (size > SMALL_BUFFER_SIZE)
5071
return BitScanMSB(size) - MEMORY_CLASS_SHIFT;
5072
return 0;
5073
}
5074
5075
UINT16 BlockMetadata_TLSF::SizeToSecondIndex(UINT64 size, UINT8 memoryClass) const
5076
{
5077
if (memoryClass == 0)
5078
{
5079
if (IsVirtual())
5080
return static_cast<UINT16>((size - 1) / 8);
5081
else
5082
return static_cast<UINT16>((size - 1) / 64);
5083
}
5084
return static_cast<UINT16>((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX));
5085
}
5086
5087
UINT32 BlockMetadata_TLSF::GetListIndex(UINT8 memoryClass, UINT16 secondIndex) const
5088
{
5089
if (memoryClass == 0)
5090
return secondIndex;
5091
5092
const UINT32 index = static_cast<UINT32>(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex;
5093
if (IsVirtual())
5094
return index + (1 << SECOND_LEVEL_INDEX);
5095
else
5096
return index + 4;
5097
}
5098
5099
UINT32 BlockMetadata_TLSF::GetListIndex(UINT64 size) const
5100
{
5101
UINT8 memoryClass = SizeToMemoryClass(size);
5102
return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass));
5103
}
5104
5105
void BlockMetadata_TLSF::RemoveFreeBlock(Block* block)
5106
{
5107
D3D12MA_ASSERT(block != m_NullBlock);
5108
D3D12MA_ASSERT(block->IsFree());
5109
5110
if (block->NextFree() != NULL)
5111
block->NextFree()->PrevFree() = block->PrevFree();
5112
if (block->PrevFree() != NULL)
5113
block->PrevFree()->NextFree() = block->NextFree();
5114
else
5115
{
5116
UINT8 memClass = SizeToMemoryClass(block->size);
5117
UINT16 secondIndex = SizeToSecondIndex(block->size, memClass);
5118
UINT32 index = GetListIndex(memClass, secondIndex);
5119
m_FreeList[index] = block->NextFree();
5120
if (block->NextFree() == NULL)
5121
{
5122
m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex);
5123
if (m_InnerIsFreeBitmap[memClass] == 0)
5124
m_IsFreeBitmap &= ~(1UL << memClass);
5125
}
5126
}
5127
block->MarkTaken();
5128
block->PrivateData() = NULL;
5129
--m_BlocksFreeCount;
5130
m_BlocksFreeSize -= block->size;
5131
}
5132
5133
void BlockMetadata_TLSF::InsertFreeBlock(Block* block)
5134
{
5135
D3D12MA_ASSERT(block != m_NullBlock);
5136
D3D12MA_ASSERT(!block->IsFree() && "Cannot insert block twice!");
5137
5138
UINT8 memClass = SizeToMemoryClass(block->size);
5139
UINT16 secondIndex = SizeToSecondIndex(block->size, memClass);
5140
UINT32 index = GetListIndex(memClass, secondIndex);
5141
block->PrevFree() = NULL;
5142
block->NextFree() = m_FreeList[index];
5143
m_FreeList[index] = block;
5144
if (block->NextFree() != NULL)
5145
block->NextFree()->PrevFree() = block;
5146
else
5147
{
5148
m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex;
5149
m_IsFreeBitmap |= 1UL << memClass;
5150
}
5151
++m_BlocksFreeCount;
5152
m_BlocksFreeSize += block->size;
5153
}
5154
5155
void BlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)
5156
{
5157
D3D12MA_ASSERT(block->prevPhysical == prev && "Cannot merge seperate physical regions!");
5158
D3D12MA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!");
5159
5160
block->offset = prev->offset;
5161
block->size += prev->size;
5162
block->prevPhysical = prev->prevPhysical;
5163
if (block->prevPhysical)
5164
block->prevPhysical->nextPhysical = block;
5165
m_BlockAllocator.Free(prev);
5166
}
5167
5168
BlockMetadata_TLSF::Block* BlockMetadata_TLSF::FindFreeBlock(UINT64 size, UINT32& listIndex) const
5169
{
5170
UINT8 memoryClass = SizeToMemoryClass(size);
5171
UINT32 innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));
5172
if (!innerFreeMap)
5173
{
5174
// Check higher levels for avaiable blocks
5175
UINT32 freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));
5176
if (!freeMap)
5177
return NULL; // No more memory avaible
5178
5179
// Find lowest free region
5180
memoryClass = BitScanLSB(freeMap);
5181
innerFreeMap = m_InnerIsFreeBitmap[memoryClass];
5182
D3D12MA_ASSERT(innerFreeMap != 0);
5183
}
5184
// Find lowest free subregion
5185
listIndex = GetListIndex(memoryClass, BitScanLSB(innerFreeMap));
5186
return m_FreeList[listIndex];
5187
}
5188
5189
bool BlockMetadata_TLSF::CheckBlock(
5190
Block& block,
5191
UINT32 listIndex,
5192
UINT64 allocSize,
5193
UINT64 allocAlignment,
5194
AllocationRequest* pAllocationRequest)
5195
{
5196
D3D12MA_ASSERT(block.IsFree() && "Block is already taken!");
5197
5198
UINT64 alignedOffset = AlignUp(block.offset, allocAlignment);
5199
if (block.size < allocSize + alignedOffset - block.offset)
5200
return false;
5201
5202
// Alloc successful
5203
pAllocationRequest->allocHandle = (AllocHandle)&block;
5204
pAllocationRequest->size = allocSize - GetDebugMargin();
5205
pAllocationRequest->algorithmData = alignedOffset;
5206
5207
// Place block at the start of list if it's normal block
5208
if (listIndex != m_ListsCount && block.PrevFree())
5209
{
5210
block.PrevFree()->NextFree() = block.NextFree();
5211
if (block.NextFree())
5212
block.NextFree()->PrevFree() = block.PrevFree();
5213
block.PrevFree() = NULL;
5214
block.NextFree() = m_FreeList[listIndex];
5215
m_FreeList[listIndex] = &block;
5216
if (block.NextFree())
5217
block.NextFree()->PrevFree() = &block;
5218
}
5219
5220
return true;
5221
}
5222
#endif // _D3D12MA_BLOCK_METADATA_TLSF_FUNCTIONS
5223
#endif // _D3D12MA_BLOCK_METADATA_TLSF
5224
5225
#ifndef _D3D12MA_MEMORY_BLOCK
5226
/*
5227
Represents a single block of device memory (heap).
5228
Base class for inheritance.
5229
Thread-safety: This class must be externally synchronized.
5230
*/
5231
class MemoryBlock
5232
{
5233
public:
5234
// Creates the ID3D12Heap.
5235
MemoryBlock(
5236
AllocatorPimpl* allocator,
5237
const D3D12_HEAP_PROPERTIES& heapProps,
5238
D3D12_HEAP_FLAGS heapFlags,
5239
UINT64 size,
5240
UINT id);
5241
virtual ~MemoryBlock();
5242
5243
const D3D12_HEAP_PROPERTIES& GetHeapProperties() const { return m_HeapProps; }
5244
D3D12_HEAP_FLAGS GetHeapFlags() const { return m_HeapFlags; }
5245
UINT64 GetSize() const { return m_Size; }
5246
UINT GetId() const { return m_Id; }
5247
ID3D12Heap* GetHeap() const { return m_Heap; }
5248
5249
protected:
5250
AllocatorPimpl* const m_Allocator;
5251
const D3D12_HEAP_PROPERTIES m_HeapProps;
5252
const D3D12_HEAP_FLAGS m_HeapFlags;
5253
const UINT64 m_Size;
5254
const UINT m_Id;
5255
5256
HRESULT Init(ID3D12ProtectedResourceSession* pProtectedSession, bool denyMsaaTextures);
5257
5258
private:
5259
ID3D12Heap* m_Heap = NULL;
5260
5261
D3D12MA_CLASS_NO_COPY(MemoryBlock)
5262
};
5263
#endif // _D3D12MA_MEMORY_BLOCK
5264
5265
#ifndef _D3D12MA_NORMAL_BLOCK
5266
/*
5267
Represents a single block of device memory (heap) with all the data about its
5268
regions (aka suballocations, Allocation), assigned and free.
5269
Thread-safety: This class must be externally synchronized.
5270
*/
5271
class NormalBlock : public MemoryBlock
5272
{
5273
public:
5274
BlockMetadata* m_pMetadata;
5275
5276
NormalBlock(
5277
AllocatorPimpl* allocator,
5278
BlockVector* blockVector,
5279
const D3D12_HEAP_PROPERTIES& heapProps,
5280
D3D12_HEAP_FLAGS heapFlags,
5281
UINT64 size,
5282
UINT id);
5283
virtual ~NormalBlock();
5284
5285
BlockVector* GetBlockVector() const { return m_BlockVector; }
5286
5287
// 'algorithm' should be one of the *_ALGORITHM_* flags in enums POOL_FLAGS or VIRTUAL_BLOCK_FLAGS
5288
HRESULT Init(UINT32 algorithm, ID3D12ProtectedResourceSession* pProtectedSession, bool denyMsaaTextures);
5289
5290
// Validates all data structures inside this object. If not valid, returns false.
5291
bool Validate() const;
5292
5293
private:
5294
BlockVector* m_BlockVector;
5295
5296
D3D12MA_CLASS_NO_COPY(NormalBlock)
5297
};
5298
#endif // _D3D12MA_NORMAL_BLOCK
5299
5300
#ifndef _D3D12MA_COMMITTED_ALLOCATION_LIST_ITEM_TRAITS
5301
struct CommittedAllocationListItemTraits
5302
{
5303
using ItemType = Allocation;
5304
5305
static ItemType* GetPrev(const ItemType* item)
5306
{
5307
D3D12MA_ASSERT(item->m_PackedData.GetType() == Allocation::TYPE_COMMITTED || item->m_PackedData.GetType() == Allocation::TYPE_HEAP);
5308
return item->m_Committed.prev;
5309
}
5310
static ItemType* GetNext(const ItemType* item)
5311
{
5312
D3D12MA_ASSERT(item->m_PackedData.GetType() == Allocation::TYPE_COMMITTED || item->m_PackedData.GetType() == Allocation::TYPE_HEAP);
5313
return item->m_Committed.next;
5314
}
5315
static ItemType*& AccessPrev(ItemType* item)
5316
{
5317
D3D12MA_ASSERT(item->m_PackedData.GetType() == Allocation::TYPE_COMMITTED || item->m_PackedData.GetType() == Allocation::TYPE_HEAP);
5318
return item->m_Committed.prev;
5319
}
5320
static ItemType*& AccessNext(ItemType* item)
5321
{
5322
D3D12MA_ASSERT(item->m_PackedData.GetType() == Allocation::TYPE_COMMITTED || item->m_PackedData.GetType() == Allocation::TYPE_HEAP);
5323
return item->m_Committed.next;
5324
}
5325
};
5326
#endif // _D3D12MA_COMMITTED_ALLOCATION_LIST_ITEM_TRAITS
5327
5328
#ifndef _D3D12MA_COMMITTED_ALLOCATION_LIST
5329
/*
5330
Stores linked list of Allocation objects that are of TYPE_COMMITTED or TYPE_HEAP.
5331
Thread-safe, synchronized internally.
5332
*/
5333
class CommittedAllocationList
5334
{
5335
public:
5336
CommittedAllocationList() = default;
5337
void Init(bool useMutex, D3D12_HEAP_TYPE heapType, PoolPimpl* pool);
5338
~CommittedAllocationList();
5339
5340
D3D12_HEAP_TYPE GetHeapType() const { return m_HeapType; }
5341
PoolPimpl* GetPool() const { return m_Pool; }
5342
UINT GetMemorySegmentGroup(AllocatorPimpl* allocator) const;
5343
5344
void AddStatistics(Statistics& inoutStats);
5345
void AddDetailedStatistics(DetailedStatistics& inoutStats);
5346
// Writes JSON array with the list of allocations.
5347
void BuildStatsString(JsonWriter& json);
5348
5349
void Register(Allocation* alloc);
5350
void Unregister(Allocation* alloc);
5351
5352
private:
5353
using CommittedAllocationLinkedList = IntrusiveLinkedList<CommittedAllocationListItemTraits>;
5354
5355
bool m_UseMutex = true;
5356
D3D12_HEAP_TYPE m_HeapType = D3D12_HEAP_TYPE_CUSTOM;
5357
PoolPimpl* m_Pool = NULL;
5358
5359
D3D12MA_RW_MUTEX m_Mutex;
5360
CommittedAllocationLinkedList m_AllocationList;
5361
};
5362
#endif // _D3D12MA_COMMITTED_ALLOCATION_LIST
5363
5364
#ifndef _D3D12M_COMMITTED_ALLOCATION_PARAMETERS
5365
struct CommittedAllocationParameters
5366
{
5367
CommittedAllocationList* m_List = NULL;
5368
D3D12_HEAP_PROPERTIES m_HeapProperties = {};
5369
D3D12_HEAP_FLAGS m_HeapFlags = D3D12_HEAP_FLAG_NONE;
5370
ID3D12ProtectedResourceSession* m_ProtectedSession = NULL;
5371
bool m_CanAlias = false;
5372
D3D12_RESIDENCY_PRIORITY m_ResidencyPriority = D3D12_RESIDENCY_PRIORITY_NONE;
5373
5374
bool IsValid() const { return m_List != NULL; }
5375
};
5376
#endif // _D3D12M_COMMITTED_ALLOCATION_PARAMETERS
5377
5378
// Simple variant data structure to hold all possible variations of ID3D12Device*::CreateCommittedResource* and ID3D12Device*::CreatePlacedResource* arguments
5379
struct CREATE_RESOURCE_PARAMS
5380
{
5381
CREATE_RESOURCE_PARAMS() = delete;
5382
CREATE_RESOURCE_PARAMS(
5383
const D3D12_RESOURCE_DESC* pResourceDesc,
5384
D3D12_RESOURCE_STATES InitialResourceState,
5385
const D3D12_CLEAR_VALUE* pOptimizedClearValue)
5386
: Variant(VARIANT_WITH_STATE)
5387
, pResourceDesc(pResourceDesc)
5388
, InitialResourceState(InitialResourceState)
5389
, pOptimizedClearValue(pOptimizedClearValue)
5390
{
5391
}
5392
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
5393
CREATE_RESOURCE_PARAMS(
5394
const D3D12_RESOURCE_DESC1* pResourceDesc,
5395
D3D12_RESOURCE_STATES InitialResourceState,
5396
const D3D12_CLEAR_VALUE* pOptimizedClearValue)
5397
: Variant(VARIANT_WITH_STATE_AND_DESC1)
5398
, pResourceDesc1(pResourceDesc)
5399
, InitialResourceState(InitialResourceState)
5400
, pOptimizedClearValue(pOptimizedClearValue)
5401
{
5402
}
5403
#endif
5404
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
5405
CREATE_RESOURCE_PARAMS(
5406
const D3D12_RESOURCE_DESC1* pResourceDesc,
5407
D3D12_BARRIER_LAYOUT InitialLayout,
5408
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
5409
UINT32 NumCastableFormats,
5410
DXGI_FORMAT* pCastableFormats)
5411
: Variant(VARIANT_WITH_LAYOUT)
5412
, pResourceDesc1(pResourceDesc)
5413
, InitialLayout(InitialLayout)
5414
, pOptimizedClearValue(pOptimizedClearValue)
5415
, NumCastableFormats(NumCastableFormats)
5416
, pCastableFormats(pCastableFormats)
5417
{
5418
}
5419
#endif
5420
5421
enum VARIANT
5422
{
5423
VARIANT_INVALID = 0,
5424
VARIANT_WITH_STATE,
5425
VARIANT_WITH_STATE_AND_DESC1,
5426
VARIANT_WITH_LAYOUT
5427
};
5428
5429
VARIANT Variant = VARIANT_INVALID;
5430
5431
const D3D12_RESOURCE_DESC* GetResourceDesc() const
5432
{
5433
D3D12MA_ASSERT(Variant == VARIANT_WITH_STATE);
5434
return pResourceDesc;
5435
}
5436
const D3D12_RESOURCE_DESC*& AccessResourceDesc()
5437
{
5438
D3D12MA_ASSERT(Variant == VARIANT_WITH_STATE);
5439
return pResourceDesc;
5440
}
5441
const D3D12_RESOURCE_DESC* GetBaseResourceDesc() const
5442
{
5443
// D3D12_RESOURCE_DESC1 can be cast to D3D12_RESOURCE_DESC by discarding the new members at the end.
5444
return pResourceDesc;
5445
}
5446
D3D12_RESOURCE_STATES GetInitialResourceState() const
5447
{
5448
D3D12MA_ASSERT(Variant < VARIANT_WITH_LAYOUT);
5449
return InitialResourceState;
5450
}
5451
const D3D12_CLEAR_VALUE* GetOptimizedClearValue() const
5452
{
5453
return pOptimizedClearValue;
5454
}
5455
5456
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
5457
const D3D12_RESOURCE_DESC1* GetResourceDesc1() const
5458
{
5459
D3D12MA_ASSERT(Variant >= VARIANT_WITH_STATE_AND_DESC1);
5460
return pResourceDesc1;
5461
}
5462
const D3D12_RESOURCE_DESC1*& AccessResourceDesc1()
5463
{
5464
D3D12MA_ASSERT(Variant >= VARIANT_WITH_STATE_AND_DESC1);
5465
return pResourceDesc1;
5466
}
5467
#endif
5468
5469
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
5470
D3D12_BARRIER_LAYOUT GetInitialLayout() const
5471
{
5472
D3D12MA_ASSERT(Variant >= VARIANT_WITH_LAYOUT);
5473
return InitialLayout;
5474
}
5475
UINT32 GetNumCastableFormats() const
5476
{
5477
D3D12MA_ASSERT(Variant >= VARIANT_WITH_LAYOUT);
5478
return NumCastableFormats;
5479
}
5480
DXGI_FORMAT* GetCastableFormats() const
5481
{
5482
D3D12MA_ASSERT(Variant >= VARIANT_WITH_LAYOUT);
5483
return pCastableFormats;
5484
}
5485
#endif
5486
5487
private:
5488
union
5489
{
5490
const D3D12_RESOURCE_DESC* pResourceDesc;
5491
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
5492
const D3D12_RESOURCE_DESC1* pResourceDesc1;
5493
#endif
5494
};
5495
union
5496
{
5497
D3D12_RESOURCE_STATES InitialResourceState;
5498
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
5499
D3D12_BARRIER_LAYOUT InitialLayout;
5500
#endif
5501
};
5502
const D3D12_CLEAR_VALUE* pOptimizedClearValue;
5503
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
5504
UINT32 NumCastableFormats;
5505
DXGI_FORMAT* pCastableFormats;
5506
#endif
5507
};
5508
5509
#ifndef _D3D12MA_BLOCK_VECTOR
5510
/*
5511
Sequence of NormalBlock. Represents memory blocks allocated for a specific
5512
heap type and possibly resource type (if only Tier 1 is supported).
5513
5514
Synchronized internally with a mutex.
5515
*/
5516
class BlockVector
5517
{
5518
friend class DefragmentationContextPimpl;
5519
D3D12MA_CLASS_NO_COPY(BlockVector)
5520
public:
5521
BlockVector(
5522
AllocatorPimpl* hAllocator,
5523
const D3D12_HEAP_PROPERTIES& heapProps,
5524
D3D12_HEAP_FLAGS heapFlags,
5525
UINT64 preferredBlockSize,
5526
size_t minBlockCount,
5527
size_t maxBlockCount,
5528
bool explicitBlockSize,
5529
UINT64 minAllocationAlignment,
5530
UINT32 algorithm,
5531
bool denyMsaaTextures,
5532
ID3D12ProtectedResourceSession* pProtectedSession,
5533
D3D12_RESIDENCY_PRIORITY residencyPriority);
5534
~BlockVector();
5535
D3D12_RESIDENCY_PRIORITY GetResidencyPriority() const { return m_ResidencyPriority; }
5536
5537
const D3D12_HEAP_PROPERTIES& GetHeapProperties() const { return m_HeapProps; }
5538
D3D12_HEAP_FLAGS GetHeapFlags() const { return m_HeapFlags; }
5539
UINT64 GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5540
UINT32 GetAlgorithm() const { return m_Algorithm; }
5541
bool DeniesMsaaTextures() const { return m_DenyMsaaTextures; }
5542
// To be used only while the m_Mutex is locked. Used during defragmentation.
5543
size_t GetBlockCount() const { return m_Blocks.size(); }
5544
// To be used only while the m_Mutex is locked. Used during defragmentation.
5545
NormalBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5546
D3D12MA_RW_MUTEX& GetMutex() { return m_Mutex; }
5547
5548
HRESULT CreateMinBlocks();
5549
bool IsEmpty();
5550
5551
HRESULT Allocate(
5552
UINT64 size,
5553
UINT64 alignment,
5554
const ALLOCATION_DESC& allocDesc,
5555
size_t allocationCount,
5556
Allocation** pAllocations);
5557
5558
void Free(Allocation* hAllocation);
5559
5560
HRESULT CreateResource(
5561
UINT64 size,
5562
UINT64 alignment,
5563
const ALLOCATION_DESC& allocDesc,
5564
const CREATE_RESOURCE_PARAMS& createParams,
5565
Allocation** ppAllocation,
5566
REFIID riidResource,
5567
void** ppvResource);
5568
5569
void AddStatistics(Statistics& inoutStats);
5570
void AddDetailedStatistics(DetailedStatistics& inoutStats);
5571
5572
void WriteBlockInfoToJson(JsonWriter& json);
5573
5574
private:
5575
AllocatorPimpl* const m_hAllocator;
5576
const D3D12_HEAP_PROPERTIES m_HeapProps;
5577
const D3D12_HEAP_FLAGS m_HeapFlags;
5578
const UINT64 m_PreferredBlockSize;
5579
const size_t m_MinBlockCount;
5580
const size_t m_MaxBlockCount;
5581
const bool m_ExplicitBlockSize;
5582
const UINT64 m_MinAllocationAlignment;
5583
const UINT32 m_Algorithm;
5584
const bool m_DenyMsaaTextures;
5585
ID3D12ProtectedResourceSession* const m_ProtectedSession;
5586
const D3D12_RESIDENCY_PRIORITY m_ResidencyPriority;
5587
/* There can be at most one allocation that is completely empty - a
5588
hysteresis to avoid pessimistic case of alternating creation and destruction
5589
of a ID3D12Heap. */
5590
bool m_HasEmptyBlock;
5591
D3D12MA_RW_MUTEX m_Mutex;
5592
// Incrementally sorted by sumFreeSize, ascending.
5593
Vector<NormalBlock*> m_Blocks;
5594
UINT m_NextBlockId;
5595
bool m_IncrementalSort = true;
5596
5597
// Disable incremental sorting when freeing allocations
5598
void SetIncrementalSort(bool val) { m_IncrementalSort = val; }
5599
5600
UINT64 CalcSumBlockSize() const;
5601
UINT64 CalcMaxBlockSize() const;
5602
5603
// Finds and removes given block from vector.
5604
void Remove(NormalBlock* pBlock);
5605
5606
// Performs single step in sorting m_Blocks. They may not be fully sorted
5607
// after this call.
5608
void IncrementallySortBlocks();
5609
void SortByFreeSize();
5610
5611
HRESULT AllocatePage(
5612
UINT64 size,
5613
UINT64 alignment,
5614
const ALLOCATION_DESC& allocDesc,
5615
Allocation** pAllocation);
5616
5617
HRESULT AllocateFromBlock(
5618
NormalBlock* pBlock,
5619
UINT64 size,
5620
UINT64 alignment,
5621
ALLOCATION_FLAGS allocFlags,
5622
void* pPrivateData,
5623
UINT32 strategy,
5624
Allocation** pAllocation);
5625
5626
HRESULT CommitAllocationRequest(
5627
AllocationRequest& allocRequest,
5628
NormalBlock* pBlock,
5629
UINT64 size,
5630
UINT64 alignment,
5631
void* pPrivateData,
5632
Allocation** pAllocation);
5633
5634
HRESULT CreateBlock(
5635
UINT64 blockSize,
5636
size_t* pNewBlockIndex);
5637
};
5638
#endif // _D3D12MA_BLOCK_VECTOR
5639
5640
#ifndef _D3D12MA_CURRENT_BUDGET_DATA
5641
class CurrentBudgetData
5642
{
5643
public:
5644
bool ShouldUpdateBudget() const { return m_OperationsSinceBudgetFetch >= 30; }
5645
5646
void GetStatistics(Statistics& outStats, UINT group) const;
5647
void GetBudget(bool useMutex,
5648
UINT64* outLocalUsage, UINT64* outLocalBudget,
5649
UINT64* outNonLocalUsage, UINT64* outNonLocalBudget);
5650
5651
#if D3D12MA_DXGI_1_4
5652
HRESULT UpdateBudget(IDXGIAdapter3* adapter3, bool useMutex);
5653
#endif
5654
5655
void AddAllocation(UINT group, UINT64 allocationBytes);
5656
void RemoveAllocation(UINT group, UINT64 allocationBytes);
5657
5658
void AddBlock(UINT group, UINT64 blockBytes);
5659
void RemoveBlock(UINT group, UINT64 blockBytes);
5660
5661
private:
5662
D3D12MA_ATOMIC_UINT32 m_BlockCount[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
5663
D3D12MA_ATOMIC_UINT32 m_AllocationCount[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
5664
D3D12MA_ATOMIC_UINT64 m_BlockBytes[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
5665
D3D12MA_ATOMIC_UINT64 m_AllocationBytes[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
5666
5667
D3D12MA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch = {0};
5668
D3D12MA_RW_MUTEX m_BudgetMutex;
5669
UINT64 m_D3D12Usage[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
5670
UINT64 m_D3D12Budget[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
5671
UINT64 m_BlockBytesAtD3D12Fetch[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
5672
};
5673
5674
#ifndef _D3D12MA_CURRENT_BUDGET_DATA_FUNCTIONS
5675
void CurrentBudgetData::GetStatistics(Statistics& outStats, UINT group) const
5676
{
5677
outStats.BlockCount = m_BlockCount[group];
5678
outStats.AllocationCount = m_AllocationCount[group];
5679
outStats.BlockBytes = m_BlockBytes[group];
5680
outStats.AllocationBytes = m_AllocationBytes[group];
5681
}
5682
5683
void CurrentBudgetData::GetBudget(bool useMutex,
5684
UINT64* outLocalUsage, UINT64* outLocalBudget,
5685
UINT64* outNonLocalUsage, UINT64* outNonLocalBudget)
5686
{
5687
MutexLockRead lockRead(m_BudgetMutex, useMutex);
5688
5689
if (outLocalUsage)
5690
{
5691
const UINT64 D3D12Usage = m_D3D12Usage[DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY];
5692
const UINT64 blockBytes = m_BlockBytes[DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY];
5693
const UINT64 blockBytesAtD3D12Fetch = m_BlockBytesAtD3D12Fetch[DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY];
5694
*outLocalUsage = D3D12Usage + blockBytes > blockBytesAtD3D12Fetch ?
5695
D3D12Usage + blockBytes - blockBytesAtD3D12Fetch : 0;
5696
}
5697
if (outLocalBudget)
5698
*outLocalBudget = m_D3D12Budget[DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY];
5699
5700
if (outNonLocalUsage)
5701
{
5702
const UINT64 D3D12Usage = m_D3D12Usage[DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY];
5703
const UINT64 blockBytes = m_BlockBytes[DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY];
5704
const UINT64 blockBytesAtD3D12Fetch = m_BlockBytesAtD3D12Fetch[DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY];
5705
*outNonLocalUsage = D3D12Usage + blockBytes > blockBytesAtD3D12Fetch ?
5706
D3D12Usage + blockBytes - blockBytesAtD3D12Fetch : 0;
5707
}
5708
if (outNonLocalBudget)
5709
*outNonLocalBudget = m_D3D12Budget[DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY];
5710
}
5711
5712
#if D3D12MA_DXGI_1_4
5713
HRESULT CurrentBudgetData::UpdateBudget(IDXGIAdapter3* adapter3, bool useMutex)
5714
{
5715
D3D12MA_ASSERT(adapter3);
5716
5717
DXGI_QUERY_VIDEO_MEMORY_INFO infoLocal = {};
5718
DXGI_QUERY_VIDEO_MEMORY_INFO infoNonLocal = {};
5719
const HRESULT hrLocal = adapter3->QueryVideoMemoryInfo(0, DXGI_MEMORY_SEGMENT_GROUP_LOCAL, &infoLocal);
5720
const HRESULT hrNonLocal = adapter3->QueryVideoMemoryInfo(0, DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL, &infoNonLocal);
5721
5722
if (SUCCEEDED(hrLocal) || SUCCEEDED(hrNonLocal))
5723
{
5724
MutexLockWrite lockWrite(m_BudgetMutex, useMutex);
5725
5726
if (SUCCEEDED(hrLocal))
5727
{
5728
m_D3D12Usage[0] = infoLocal.CurrentUsage;
5729
m_D3D12Budget[0] = infoLocal.Budget;
5730
}
5731
if (SUCCEEDED(hrNonLocal))
5732
{
5733
m_D3D12Usage[1] = infoNonLocal.CurrentUsage;
5734
m_D3D12Budget[1] = infoNonLocal.Budget;
5735
}
5736
5737
m_BlockBytesAtD3D12Fetch[0] = m_BlockBytes[0];
5738
m_BlockBytesAtD3D12Fetch[1] = m_BlockBytes[1];
5739
m_OperationsSinceBudgetFetch = 0;
5740
}
5741
5742
return FAILED(hrLocal) ? hrLocal : hrNonLocal;
5743
}
5744
#endif // #if D3D12MA_DXGI_1_4
5745
5746
void CurrentBudgetData::AddAllocation(UINT group, UINT64 allocationBytes)
5747
{
5748
++m_AllocationCount[group];
5749
m_AllocationBytes[group] += allocationBytes;
5750
++m_OperationsSinceBudgetFetch;
5751
}
5752
5753
void CurrentBudgetData::RemoveAllocation(UINT group, UINT64 allocationBytes)
5754
{
5755
D3D12MA_ASSERT(m_AllocationBytes[group] >= allocationBytes);
5756
D3D12MA_ASSERT(m_AllocationCount[group] > 0);
5757
m_AllocationBytes[group] -= allocationBytes;
5758
--m_AllocationCount[group];
5759
++m_OperationsSinceBudgetFetch;
5760
}
5761
5762
void CurrentBudgetData::AddBlock(UINT group, UINT64 blockBytes)
5763
{
5764
++m_BlockCount[group];
5765
m_BlockBytes[group] += blockBytes;
5766
++m_OperationsSinceBudgetFetch;
5767
}
5768
5769
void CurrentBudgetData::RemoveBlock(UINT group, UINT64 blockBytes)
5770
{
5771
D3D12MA_ASSERT(m_BlockBytes[group] >= blockBytes);
5772
D3D12MA_ASSERT(m_BlockCount[group] > 0);
5773
m_BlockBytes[group] -= blockBytes;
5774
--m_BlockCount[group];
5775
++m_OperationsSinceBudgetFetch;
5776
}
5777
#endif // _D3D12MA_CURRENT_BUDGET_DATA_FUNCTIONS
5778
#endif // _D3D12MA_CURRENT_BUDGET_DATA
5779
5780
#ifndef _D3D12MA_DEFRAGMENTATION_CONTEXT_PIMPL
5781
class DefragmentationContextPimpl
5782
{
5783
D3D12MA_CLASS_NO_COPY(DefragmentationContextPimpl)
5784
public:
5785
DefragmentationContextPimpl(
5786
AllocatorPimpl* hAllocator,
5787
const DEFRAGMENTATION_DESC& desc,
5788
BlockVector* poolVector);
5789
~DefragmentationContextPimpl();
5790
5791
void GetStats(DEFRAGMENTATION_STATS& outStats) { outStats = m_GlobalStats; }
5792
const ALLOCATION_CALLBACKS& GetAllocs() const { return m_Moves.GetAllocs(); }
5793
5794
HRESULT DefragmentPassBegin(DEFRAGMENTATION_PASS_MOVE_INFO& moveInfo);
5795
HRESULT DefragmentPassEnd(DEFRAGMENTATION_PASS_MOVE_INFO& moveInfo);
5796
5797
private:
5798
// Max number of allocations to ignore due to size constraints before ending single pass
5799
static const UINT8 MAX_ALLOCS_TO_IGNORE = 16;
5800
enum class CounterStatus { Pass, Ignore, End };
5801
5802
struct FragmentedBlock
5803
{
5804
UINT32 data;
5805
NormalBlock* block;
5806
};
5807
struct StateBalanced
5808
{
5809
UINT64 avgFreeSize = 0;
5810
UINT64 avgAllocSize = UINT64_MAX;
5811
};
5812
struct MoveAllocationData
5813
{
5814
UINT64 size;
5815
UINT64 alignment;
5816
ALLOCATION_FLAGS flags;
5817
DEFRAGMENTATION_MOVE move = {};
5818
};
5819
5820
const UINT64 m_MaxPassBytes;
5821
const UINT32 m_MaxPassAllocations;
5822
5823
Vector<DEFRAGMENTATION_MOVE> m_Moves;
5824
5825
UINT8 m_IgnoredAllocs = 0;
5826
UINT32 m_Algorithm;
5827
UINT32 m_BlockVectorCount;
5828
BlockVector* m_PoolBlockVector;
5829
BlockVector** m_pBlockVectors;
5830
size_t m_ImmovableBlockCount = 0;
5831
DEFRAGMENTATION_STATS m_GlobalStats = { 0 };
5832
DEFRAGMENTATION_STATS m_PassStats = { 0 };
5833
void* m_AlgorithmState = NULL;
5834
5835
static MoveAllocationData GetMoveData(AllocHandle handle, BlockMetadata* metadata);
5836
CounterStatus CheckCounters(UINT64 bytes);
5837
bool IncrementCounters(UINT64 bytes);
5838
bool ReallocWithinBlock(BlockVector& vector, NormalBlock* block);
5839
bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, BlockVector& vector);
5840
5841
bool ComputeDefragmentation(BlockVector& vector, size_t index);
5842
bool ComputeDefragmentation_Fast(BlockVector& vector);
5843
bool ComputeDefragmentation_Balanced(BlockVector& vector, size_t index, bool update);
5844
bool ComputeDefragmentation_Full(BlockVector& vector);
5845
5846
void UpdateVectorStatistics(BlockVector& vector, StateBalanced& state);
5847
};
5848
#endif // _D3D12MA_DEFRAGMENTATION_CONTEXT_PIMPL
5849
5850
#ifndef _D3D12MA_POOL_PIMPL
5851
class PoolPimpl
5852
{
5853
friend class Allocator;
5854
friend struct PoolListItemTraits;
5855
public:
5856
PoolPimpl(AllocatorPimpl* allocator, const POOL_DESC& desc);
5857
~PoolPimpl();
5858
5859
AllocatorPimpl* GetAllocator() const { return m_Allocator; }
5860
const POOL_DESC& GetDesc() const { return m_Desc; }
5861
bool AlwaysCommitted() const { return (m_Desc.Flags & POOL_FLAG_ALWAYS_COMMITTED) != 0; }
5862
bool SupportsCommittedAllocations() const { return m_Desc.BlockSize == 0; }
5863
LPCWSTR GetName() const { return m_Name; }
5864
5865
BlockVector* GetBlockVector() { return m_BlockVector; }
5866
CommittedAllocationList* GetCommittedAllocationList() { return SupportsCommittedAllocations() ? &m_CommittedAllocations : NULL; }
5867
5868
HRESULT Init();
5869
void GetStatistics(Statistics& outStats);
5870
void CalculateStatistics(DetailedStatistics& outStats);
5871
void AddDetailedStatistics(DetailedStatistics& inoutStats);
5872
void SetName(LPCWSTR Name);
5873
5874
private:
5875
AllocatorPimpl* m_Allocator; // Externally owned object.
5876
POOL_DESC m_Desc;
5877
BlockVector* m_BlockVector; // Owned object.
5878
CommittedAllocationList m_CommittedAllocations;
5879
wchar_t* m_Name;
5880
PoolPimpl* m_PrevPool = NULL;
5881
PoolPimpl* m_NextPool = NULL;
5882
5883
void FreeName();
5884
};
5885
5886
struct PoolListItemTraits
5887
{
5888
using ItemType = PoolPimpl;
5889
static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
5890
static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
5891
static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
5892
static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
5893
};
5894
#endif // _D3D12MA_POOL_PIMPL
5895
5896
5897
#ifndef _D3D12MA_ALLOCATOR_PIMPL
5898
class AllocatorPimpl
5899
{
5900
friend class Allocator;
5901
friend class Pool;
5902
public:
5903
std::atomic_uint32_t m_RefCount = {1};
5904
CurrentBudgetData m_Budget;
5905
5906
AllocatorPimpl(const ALLOCATION_CALLBACKS& allocationCallbacks, const ALLOCATOR_DESC& desc);
5907
~AllocatorPimpl();
5908
5909
ID3D12Device* GetDevice() const { return m_Device; }
5910
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
5911
ID3D12Device1* GetDevice1() const { return m_Device1; }
5912
#endif
5913
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
5914
ID3D12Device4* GetDevice4() const { return m_Device4; }
5915
#endif
5916
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
5917
ID3D12Device8* GetDevice8() const { return m_Device8; }
5918
#endif
5919
// Shortcut for "Allocation Callbacks", because this function is called so often.
5920
const ALLOCATION_CALLBACKS& GetAllocs() const { return m_AllocationCallbacks; }
5921
const D3D12_FEATURE_DATA_D3D12_OPTIONS& GetD3D12Options() const { return m_D3D12Options; }
5922
BOOL IsUMA() const { return m_D3D12Architecture.UMA; }
5923
BOOL IsCacheCoherentUMA() const { return m_D3D12Architecture.CacheCoherentUMA; }
5924
bool SupportsResourceHeapTier2() const { return m_D3D12Options.ResourceHeapTier >= D3D12_RESOURCE_HEAP_TIER_2; }
5925
bool IsGPUUploadHeapSupported() const { return m_GPUUploadHeapSupported != FALSE; }
5926
bool UseMutex() const { return m_UseMutex; }
5927
AllocationObjectAllocator& GetAllocationObjectAllocator() { return m_AllocationObjectAllocator; }
5928
UINT GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5929
/*
5930
If SupportsResourceHeapTier2():
5931
0: D3D12_HEAP_TYPE_DEFAULT
5932
1: D3D12_HEAP_TYPE_UPLOAD
5933
2: D3D12_HEAP_TYPE_READBACK
5934
3: D3D12_HEAP_TYPE_GPU_UPLOAD
5935
else:
5936
0: D3D12_HEAP_TYPE_DEFAULT + buffer
5937
1: D3D12_HEAP_TYPE_DEFAULT + texture
5938
2: D3D12_HEAP_TYPE_DEFAULT + texture RT or DS
5939
3: D3D12_HEAP_TYPE_UPLOAD + buffer
5940
4: D3D12_HEAP_TYPE_UPLOAD + texture
5941
5: D3D12_HEAP_TYPE_UPLOAD + texture RT or DS
5942
6: D3D12_HEAP_TYPE_READBACK + buffer
5943
7: D3D12_HEAP_TYPE_READBACK + texture
5944
8: D3D12_HEAP_TYPE_READBACK + texture RT or DS
5945
9: D3D12_HEAP_TYPE_GPU_UPLOAD + buffer
5946
10: D3D12_HEAP_TYPE_GPU_UPLOAD + texture
5947
11: D3D12_HEAP_TYPE_GPU_UPLOAD + texture RT or DS
5948
*/
5949
UINT GetDefaultPoolCount() const { return SupportsResourceHeapTier2() ? 4 : 12; }
5950
BlockVector** GetDefaultPools() { return m_BlockVectors; }
5951
5952
HRESULT Init(const ALLOCATOR_DESC& desc);
5953
bool HeapFlagsFulfillResourceHeapTier(D3D12_HEAP_FLAGS flags) const;
5954
UINT StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE heapType) const;
5955
UINT HeapPropertiesToMemorySegmentGroup(const D3D12_HEAP_PROPERTIES& heapProps) const;
5956
UINT64 GetMemoryCapacity(UINT memorySegmentGroup) const;
5957
5958
HRESULT CreatePlacedResourceWrap(
5959
ID3D12Heap *pHeap,
5960
UINT64 HeapOffset,
5961
const CREATE_RESOURCE_PARAMS& createParams,
5962
REFIID riidResource,
5963
void** ppvResource);
5964
5965
HRESULT CreateResource(
5966
const ALLOCATION_DESC* pAllocDesc,
5967
const CREATE_RESOURCE_PARAMS& createParams,
5968
Allocation** ppAllocation,
5969
REFIID riidResource,
5970
void** ppvResource);
5971
5972
HRESULT CreateAliasingResource(
5973
Allocation* pAllocation,
5974
UINT64 AllocationLocalOffset,
5975
const CREATE_RESOURCE_PARAMS& createParams,
5976
REFIID riidResource,
5977
void** ppvResource);
5978
5979
HRESULT AllocateMemory(
5980
const ALLOCATION_DESC* pAllocDesc,
5981
const D3D12_RESOURCE_ALLOCATION_INFO* pAllocInfo,
5982
Allocation** ppAllocation);
5983
5984
// Unregisters allocation from the collection of dedicated allocations.
5985
// Allocation object must be deleted externally afterwards.
5986
void FreeCommittedMemory(Allocation* allocation);
5987
// Unregisters allocation from the collection of placed allocations.
5988
// Allocation object must be deleted externally afterwards.
5989
void FreePlacedMemory(Allocation* allocation);
5990
// Unregisters allocation from the collection of dedicated allocations and destroys associated heap.
5991
// Allocation object must be deleted externally afterwards.
5992
void FreeHeapMemory(Allocation* allocation);
5993
5994
void SetResidencyPriority(ID3D12Pageable* obj, D3D12_RESIDENCY_PRIORITY priority) const;
5995
5996
void SetCurrentFrameIndex(UINT frameIndex);
5997
// For more deailed stats use outCustomHeaps to access statistics divided into L0 and L1 group
5998
void CalculateStatistics(TotalStatistics& outStats, DetailedStatistics outCustomHeaps[2] = NULL);
5999
6000
void GetBudget(Budget* outLocalBudget, Budget* outNonLocalBudget);
6001
void GetBudgetForHeapType(Budget& outBudget, D3D12_HEAP_TYPE heapType);
6002
6003
void BuildStatsString(WCHAR** ppStatsString, BOOL detailedMap);
6004
void FreeStatsString(WCHAR* pStatsString);
6005
6006
private:
6007
using PoolList = IntrusiveLinkedList<PoolListItemTraits>;
6008
6009
const bool m_UseMutex;
6010
const bool m_AlwaysCommitted;
6011
const bool m_MsaaAlwaysCommitted;
6012
const bool m_PreferSmallBuffersCommitted;
6013
bool m_DefaultPoolsNotZeroed = false;
6014
ID3D12Device* m_Device; // AddRef
6015
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
6016
ID3D12Device1* m_Device1 = NULL; // AddRef, optional
6017
#endif
6018
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
6019
ID3D12Device4* m_Device4 = NULL; // AddRef, optional
6020
#endif
6021
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6022
ID3D12Device8* m_Device8 = NULL; // AddRef, optional
6023
#endif
6024
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6025
ID3D12Device10* m_Device10 = NULL; // AddRef, optional
6026
#endif
6027
IDXGIAdapter* m_Adapter; // AddRef
6028
#if D3D12MA_DXGI_1_4
6029
IDXGIAdapter3* m_Adapter3 = NULL; // AddRef, optional
6030
#endif
6031
UINT64 m_PreferredBlockSize;
6032
ALLOCATION_CALLBACKS m_AllocationCallbacks;
6033
D3D12MA_ATOMIC_UINT32 m_CurrentFrameIndex;
6034
DXGI_ADAPTER_DESC m_AdapterDesc;
6035
D3D12_FEATURE_DATA_D3D12_OPTIONS m_D3D12Options;
6036
BOOL m_GPUUploadHeapSupported = FALSE;
6037
D3D12_FEATURE_DATA_ARCHITECTURE m_D3D12Architecture;
6038
AllocationObjectAllocator m_AllocationObjectAllocator;
6039
6040
D3D12MA_RW_MUTEX m_PoolsMutex[HEAP_TYPE_COUNT];
6041
PoolList m_Pools[HEAP_TYPE_COUNT];
6042
// Default pools.
6043
BlockVector* m_BlockVectors[DEFAULT_POOL_MAX_COUNT];
6044
CommittedAllocationList m_CommittedAllocations[STANDARD_HEAP_TYPE_COUNT];
6045
6046
/*
6047
Heuristics that decides whether a resource should better be placed in its own,
6048
dedicated allocation (committed resource rather than placed resource).
6049
*/
6050
template<typename D3D12_RESOURCE_DESC_T>
6051
bool PrefersCommittedAllocation(const D3D12_RESOURCE_DESC_T& resourceDesc,
6052
ALLOCATION_FLAGS strategy);
6053
6054
// Allocates and registers new committed resource with implicit heap, as dedicated allocation.
6055
// Creates and returns Allocation object and optionally D3D12 resource.
6056
HRESULT AllocateCommittedResource(
6057
const CommittedAllocationParameters& committedAllocParams,
6058
UINT64 resourceSize, bool withinBudget, void* pPrivateData,
6059
const CREATE_RESOURCE_PARAMS& createParams,
6060
Allocation** ppAllocation, REFIID riidResource, void** ppvResource);
6061
6062
// Allocates and registers new heap without any resources placed in it, as dedicated allocation.
6063
// Creates and returns Allocation object.
6064
HRESULT AllocateHeap(
6065
const CommittedAllocationParameters& committedAllocParams,
6066
const D3D12_RESOURCE_ALLOCATION_INFO& allocInfo, bool withinBudget,
6067
void* pPrivateData, Allocation** ppAllocation);
6068
6069
template<typename D3D12_RESOURCE_DESC_T>
6070
HRESULT CalcAllocationParams(const ALLOCATION_DESC& allocDesc, UINT64 allocSize,
6071
const D3D12_RESOURCE_DESC_T* resDesc, // Optional
6072
BlockVector*& outBlockVector, CommittedAllocationParameters& outCommittedAllocationParams, bool& outPreferCommitted);
6073
6074
// Returns UINT32_MAX if index cannot be calculcated.
6075
UINT CalcDefaultPoolIndex(const ALLOCATION_DESC& allocDesc, ResourceClass resourceClass) const;
6076
void CalcDefaultPoolParams(D3D12_HEAP_TYPE& outHeapType, D3D12_HEAP_FLAGS& outHeapFlags, UINT index) const;
6077
6078
// Registers Pool object in m_Pools.
6079
void RegisterPool(Pool* pool, D3D12_HEAP_TYPE heapType);
6080
// Unregisters Pool object from m_Pools.
6081
void UnregisterPool(Pool* pool, D3D12_HEAP_TYPE heapType);
6082
6083
HRESULT UpdateD3D12Budget();
6084
6085
D3D12_RESOURCE_ALLOCATION_INFO GetResourceAllocationInfoNative(const D3D12_RESOURCE_DESC& resourceDesc) const;
6086
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6087
D3D12_RESOURCE_ALLOCATION_INFO GetResourceAllocationInfoNative(const D3D12_RESOURCE_DESC1& resourceDesc) const;
6088
#endif
6089
6090
template<typename D3D12_RESOURCE_DESC_T>
6091
D3D12_RESOURCE_ALLOCATION_INFO GetResourceAllocationInfo(D3D12_RESOURCE_DESC_T& inOutResourceDesc) const;
6092
6093
bool NewAllocationWithinBudget(D3D12_HEAP_TYPE heapType, UINT64 size);
6094
6095
// Writes object { } with data of given budget.
6096
static void WriteBudgetToJson(JsonWriter& json, const Budget& budget);
6097
};
6098
6099
#ifndef _D3D12MA_ALLOCATOR_PIMPL_FUNCTINOS
6100
AllocatorPimpl::AllocatorPimpl(const ALLOCATION_CALLBACKS& allocationCallbacks, const ALLOCATOR_DESC& desc)
6101
: m_UseMutex((desc.Flags & ALLOCATOR_FLAG_SINGLETHREADED) == 0),
6102
m_AlwaysCommitted((desc.Flags & ALLOCATOR_FLAG_ALWAYS_COMMITTED) != 0),
6103
m_MsaaAlwaysCommitted((desc.Flags & ALLOCATOR_FLAG_MSAA_TEXTURES_ALWAYS_COMMITTED) != 0),
6104
m_PreferSmallBuffersCommitted((desc.Flags & ALLOCATOR_FLAG_DONT_PREFER_SMALL_BUFFERS_COMMITTED) == 0),
6105
m_Device(desc.pDevice),
6106
m_Adapter(desc.pAdapter),
6107
m_PreferredBlockSize(desc.PreferredBlockSize != 0 ? desc.PreferredBlockSize : D3D12MA_DEFAULT_BLOCK_SIZE),
6108
m_AllocationCallbacks(allocationCallbacks),
6109
m_CurrentFrameIndex(0),
6110
// Below this line don't use allocationCallbacks but m_AllocationCallbacks!!!
6111
m_AllocationObjectAllocator(m_AllocationCallbacks, m_UseMutex)
6112
{
6113
// desc.pAllocationCallbacks intentionally ignored here, preprocessed by CreateAllocator.
6114
ZeroMemory(&m_D3D12Options, sizeof(m_D3D12Options));
6115
ZeroMemory(&m_D3D12Architecture, sizeof(m_D3D12Architecture));
6116
6117
ZeroMemory(m_BlockVectors, sizeof(m_BlockVectors));
6118
6119
for (UINT i = 0; i < STANDARD_HEAP_TYPE_COUNT; ++i)
6120
{
6121
m_CommittedAllocations[i].Init(
6122
m_UseMutex,
6123
IndexToStandardHeapType(i),
6124
NULL); // pool
6125
}
6126
6127
m_Device->AddRef();
6128
m_Adapter->AddRef();
6129
}
6130
6131
HRESULT AllocatorPimpl::Init(const ALLOCATOR_DESC& desc)
6132
{
6133
#if D3D12MA_DXGI_1_4
6134
desc.pAdapter->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Adapter3));
6135
#endif
6136
6137
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
6138
m_Device->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Device1));
6139
#endif
6140
6141
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
6142
m_Device->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Device4));
6143
#endif
6144
6145
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6146
m_Device->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Device8));
6147
6148
if((desc.Flags & ALLOCATOR_FLAG_DEFAULT_POOLS_NOT_ZEROED) != 0)
6149
{
6150
D3D12_FEATURE_DATA_D3D12_OPTIONS7 options7 = {};
6151
if(SUCCEEDED(m_Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS7, &options7, sizeof(options7))))
6152
{
6153
// DEFAULT_POOLS_NOT_ZEROED both supported and enabled by the user.
6154
m_DefaultPoolsNotZeroed = true;
6155
}
6156
}
6157
#endif
6158
6159
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6160
m_Device->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Device10));
6161
#endif
6162
6163
HRESULT hr = m_Adapter->GetDesc(&m_AdapterDesc);
6164
if (FAILED(hr))
6165
{
6166
return hr;
6167
}
6168
6169
hr = m_Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS, &m_D3D12Options, sizeof(m_D3D12Options));
6170
if (FAILED(hr))
6171
{
6172
return hr;
6173
}
6174
#ifdef D3D12MA_FORCE_RESOURCE_HEAP_TIER
6175
m_D3D12Options.ResourceHeapTier = (D3D12MA_FORCE_RESOURCE_HEAP_TIER);
6176
#endif
6177
6178
#if D3D12MA_OPTIONS16_SUPPORTED
6179
{
6180
D3D12_FEATURE_DATA_D3D12_OPTIONS16 options16 = {};
6181
hr = m_Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS16, &options16, sizeof(options16));
6182
if (SUCCEEDED(hr))
6183
{
6184
m_GPUUploadHeapSupported = options16.GPUUploadHeapSupported;
6185
}
6186
}
6187
#endif // #if D3D12MA_OPTIONS16_SUPPORTED
6188
6189
hr = m_Device->CheckFeatureSupport(D3D12_FEATURE_ARCHITECTURE, &m_D3D12Architecture, sizeof(m_D3D12Architecture));
6190
if (FAILED(hr))
6191
{
6192
m_D3D12Architecture.UMA = FALSE;
6193
m_D3D12Architecture.CacheCoherentUMA = FALSE;
6194
}
6195
6196
D3D12_HEAP_PROPERTIES heapProps = {};
6197
const UINT defaultPoolCount = GetDefaultPoolCount();
6198
for (UINT i = 0; i < defaultPoolCount; ++i)
6199
{
6200
D3D12_HEAP_FLAGS heapFlags;
6201
CalcDefaultPoolParams(heapProps.Type, heapFlags, i);
6202
6203
#if D3D12MA_CREATE_NOT_ZEROED_AVAILABLE
6204
if(m_DefaultPoolsNotZeroed)
6205
{
6206
heapFlags |= D3D12_HEAP_FLAG_CREATE_NOT_ZEROED;
6207
}
6208
#endif
6209
6210
m_BlockVectors[i] = D3D12MA_NEW(GetAllocs(), BlockVector)(
6211
this, // hAllocator
6212
heapProps, // heapType
6213
heapFlags, // heapFlags
6214
m_PreferredBlockSize,
6215
0, // minBlockCount
6216
SIZE_MAX, // maxBlockCount
6217
false, // explicitBlockSize
6218
D3D12MA_DEBUG_ALIGNMENT, // minAllocationAlignment
6219
0, // Default algorithm,
6220
m_MsaaAlwaysCommitted,
6221
NULL, // pProtectedSession
6222
D3D12_RESIDENCY_PRIORITY_NONE); // residencyPriority
6223
// No need to call m_pBlockVectors[i]->CreateMinBlocks here, becase minBlockCount is 0.
6224
}
6225
6226
#if D3D12MA_DXGI_1_4
6227
UpdateD3D12Budget();
6228
#endif
6229
6230
return S_OK;
6231
}
6232
6233
AllocatorPimpl::~AllocatorPimpl()
6234
{
6235
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6236
SAFE_RELEASE(m_Device10);
6237
#endif
6238
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6239
SAFE_RELEASE(m_Device8);
6240
#endif
6241
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
6242
SAFE_RELEASE(m_Device4);
6243
#endif
6244
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
6245
SAFE_RELEASE(m_Device1);
6246
#endif
6247
#if D3D12MA_DXGI_1_4
6248
SAFE_RELEASE(m_Adapter3);
6249
#endif
6250
SAFE_RELEASE(m_Adapter);
6251
SAFE_RELEASE(m_Device);
6252
6253
for (UINT i = DEFAULT_POOL_MAX_COUNT; i--; )
6254
{
6255
D3D12MA_DELETE(GetAllocs(), m_BlockVectors[i]);
6256
}
6257
6258
for (UINT i = HEAP_TYPE_COUNT; i--; )
6259
{
6260
if (!m_Pools[i].IsEmpty())
6261
{
6262
D3D12MA_ASSERT(0 && "Unfreed pools found!");
6263
}
6264
}
6265
}
6266
6267
bool AllocatorPimpl::HeapFlagsFulfillResourceHeapTier(D3D12_HEAP_FLAGS flags) const
6268
{
6269
if (SupportsResourceHeapTier2())
6270
{
6271
return true;
6272
}
6273
else
6274
{
6275
const bool allowBuffers = (flags & D3D12_HEAP_FLAG_DENY_BUFFERS) == 0;
6276
const bool allowRtDsTextures = (flags & D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES) == 0;
6277
const bool allowNonRtDsTextures = (flags & D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES) == 0;
6278
const uint8_t allowedGroupCount = (allowBuffers ? 1 : 0) + (allowRtDsTextures ? 1 : 0) + (allowNonRtDsTextures ? 1 : 0);
6279
return allowedGroupCount == 1;
6280
}
6281
}
6282
6283
UINT AllocatorPimpl::StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE heapType) const
6284
{
6285
D3D12MA_ASSERT(IsHeapTypeStandard(heapType));
6286
if (IsUMA())
6287
return DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY;
6288
return (heapType == D3D12_HEAP_TYPE_DEFAULT || heapType == D3D12_HEAP_TYPE_GPU_UPLOAD_COPY) ?
6289
DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY : DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY;
6290
}
6291
6292
UINT AllocatorPimpl::HeapPropertiesToMemorySegmentGroup(const D3D12_HEAP_PROPERTIES& heapProps) const
6293
{
6294
if (IsUMA())
6295
return DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY;
6296
if (heapProps.MemoryPoolPreference == D3D12_MEMORY_POOL_UNKNOWN)
6297
return StandardHeapTypeToMemorySegmentGroup(heapProps.Type);
6298
return heapProps.MemoryPoolPreference == D3D12_MEMORY_POOL_L1 ?
6299
DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY : DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY;
6300
}
6301
6302
UINT64 AllocatorPimpl::GetMemoryCapacity(UINT memorySegmentGroup) const
6303
{
6304
switch (memorySegmentGroup)
6305
{
6306
case DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY:
6307
return IsUMA() ?
6308
m_AdapterDesc.DedicatedVideoMemory + m_AdapterDesc.SharedSystemMemory : m_AdapterDesc.DedicatedVideoMemory;
6309
case DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY:
6310
return IsUMA() ? 0 : m_AdapterDesc.SharedSystemMemory;
6311
default:
6312
D3D12MA_ASSERT(0);
6313
return UINT64_MAX;
6314
}
6315
}
6316
6317
HRESULT AllocatorPimpl::CreatePlacedResourceWrap(
6318
ID3D12Heap *pHeap,
6319
UINT64 HeapOffset,
6320
const CREATE_RESOURCE_PARAMS& createParams,
6321
REFIID riidResource,
6322
void** ppvResource)
6323
{
6324
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6325
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_LAYOUT)
6326
{
6327
if (!m_Device10)
6328
{
6329
return E_NOINTERFACE;
6330
}
6331
return m_Device10->CreatePlacedResource2(pHeap, HeapOffset,
6332
createParams.GetResourceDesc1(), createParams.GetInitialLayout(),
6333
createParams.GetOptimizedClearValue(), createParams.GetNumCastableFormats(),
6334
createParams.GetCastableFormats(), riidResource, ppvResource);
6335
} else
6336
#endif
6337
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6338
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
6339
{
6340
if (!m_Device8)
6341
{
6342
return E_NOINTERFACE;
6343
}
6344
return m_Device8->CreatePlacedResource1(pHeap, HeapOffset,
6345
createParams.GetResourceDesc1(), createParams.GetInitialResourceState(),
6346
createParams.GetOptimizedClearValue(), riidResource, ppvResource);
6347
} else
6348
#endif
6349
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE)
6350
{
6351
return m_Device->CreatePlacedResource(pHeap, HeapOffset,
6352
createParams.GetResourceDesc(), createParams.GetInitialResourceState(),
6353
createParams.GetOptimizedClearValue(), riidResource, ppvResource);
6354
}
6355
else
6356
{
6357
D3D12MA_ASSERT(0);
6358
return E_INVALIDARG;
6359
}
6360
}
6361
6362
6363
HRESULT AllocatorPimpl::CreateResource(
6364
const ALLOCATION_DESC* pAllocDesc,
6365
const CREATE_RESOURCE_PARAMS& createParams,
6366
Allocation** ppAllocation,
6367
REFIID riidResource,
6368
void** ppvResource)
6369
{
6370
D3D12MA_ASSERT(pAllocDesc && createParams.GetBaseResourceDesc() && ppAllocation);
6371
6372
*ppAllocation = NULL;
6373
if (ppvResource)
6374
{
6375
*ppvResource = NULL;
6376
}
6377
6378
CREATE_RESOURCE_PARAMS finalCreateParams = createParams;
6379
D3D12_RESOURCE_DESC finalResourceDesc;
6380
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6381
D3D12_RESOURCE_DESC1 finalResourceDesc1;
6382
#endif
6383
D3D12_RESOURCE_ALLOCATION_INFO resAllocInfo;
6384
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE)
6385
{
6386
finalResourceDesc = *createParams.GetResourceDesc();
6387
finalCreateParams.AccessResourceDesc() = &finalResourceDesc;
6388
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc);
6389
}
6390
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6391
else if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
6392
{
6393
if (!m_Device8)
6394
{
6395
return E_NOINTERFACE;
6396
}
6397
finalResourceDesc1 = *createParams.GetResourceDesc1();
6398
finalCreateParams.AccessResourceDesc1() = &finalResourceDesc1;
6399
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc1);
6400
}
6401
#endif
6402
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6403
else if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_LAYOUT)
6404
{
6405
if (!m_Device10)
6406
{
6407
return E_NOINTERFACE;
6408
}
6409
finalResourceDesc1 = *createParams.GetResourceDesc1();
6410
finalCreateParams.AccessResourceDesc1() = &finalResourceDesc1;
6411
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc1);
6412
}
6413
#endif
6414
else
6415
{
6416
D3D12MA_ASSERT(0);
6417
return E_INVALIDARG;
6418
}
6419
D3D12MA_ASSERT(IsPow2(resAllocInfo.Alignment));
6420
D3D12MA_ASSERT(resAllocInfo.SizeInBytes > 0);
6421
6422
BlockVector* blockVector = NULL;
6423
CommittedAllocationParameters committedAllocationParams = {};
6424
bool preferCommitted = false;
6425
6426
HRESULT hr;
6427
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6428
if (createParams.Variant >= CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
6429
{
6430
hr = CalcAllocationParams<D3D12_RESOURCE_DESC1>(*pAllocDesc, resAllocInfo.SizeInBytes,
6431
createParams.GetResourceDesc1(),
6432
blockVector, committedAllocationParams, preferCommitted);
6433
}
6434
else
6435
#endif
6436
{
6437
hr = CalcAllocationParams<D3D12_RESOURCE_DESC>(*pAllocDesc, resAllocInfo.SizeInBytes,
6438
createParams.GetResourceDesc(),
6439
blockVector, committedAllocationParams, preferCommitted);
6440
}
6441
if (FAILED(hr))
6442
return hr;
6443
6444
const bool withinBudget = (pAllocDesc->Flags & ALLOCATION_FLAG_WITHIN_BUDGET) != 0;
6445
hr = E_INVALIDARG;
6446
if (committedAllocationParams.IsValid() && preferCommitted)
6447
{
6448
hr = AllocateCommittedResource(committedAllocationParams,
6449
resAllocInfo.SizeInBytes, withinBudget, pAllocDesc->pPrivateData,
6450
finalCreateParams, ppAllocation, riidResource, ppvResource);
6451
if (SUCCEEDED(hr))
6452
return hr;
6453
}
6454
if (blockVector != NULL)
6455
{
6456
hr = blockVector->CreateResource(resAllocInfo.SizeInBytes, resAllocInfo.Alignment,
6457
*pAllocDesc, finalCreateParams,
6458
ppAllocation, riidResource, ppvResource);
6459
if (SUCCEEDED(hr))
6460
return hr;
6461
}
6462
if (committedAllocationParams.IsValid() && !preferCommitted)
6463
{
6464
hr = AllocateCommittedResource(committedAllocationParams,
6465
resAllocInfo.SizeInBytes, withinBudget, pAllocDesc->pPrivateData,
6466
finalCreateParams, ppAllocation, riidResource, ppvResource);
6467
if (SUCCEEDED(hr))
6468
return hr;
6469
}
6470
return hr;
6471
}
6472
6473
HRESULT AllocatorPimpl::AllocateMemory(
6474
const ALLOCATION_DESC* pAllocDesc,
6475
const D3D12_RESOURCE_ALLOCATION_INFO* pAllocInfo,
6476
Allocation** ppAllocation)
6477
{
6478
*ppAllocation = NULL;
6479
6480
BlockVector* blockVector = NULL;
6481
CommittedAllocationParameters committedAllocationParams = {};
6482
bool preferCommitted = false;
6483
HRESULT hr = CalcAllocationParams<D3D12_RESOURCE_DESC>(*pAllocDesc, pAllocInfo->SizeInBytes,
6484
NULL, // pResDesc
6485
blockVector, committedAllocationParams, preferCommitted);
6486
if (FAILED(hr))
6487
return hr;
6488
6489
const bool withinBudget = (pAllocDesc->Flags & ALLOCATION_FLAG_WITHIN_BUDGET) != 0;
6490
hr = E_INVALIDARG;
6491
if (committedAllocationParams.IsValid() && preferCommitted)
6492
{
6493
hr = AllocateHeap(committedAllocationParams, *pAllocInfo, withinBudget, pAllocDesc->pPrivateData, ppAllocation);
6494
if (SUCCEEDED(hr))
6495
return hr;
6496
}
6497
if (blockVector != NULL)
6498
{
6499
hr = blockVector->Allocate(pAllocInfo->SizeInBytes, pAllocInfo->Alignment,
6500
*pAllocDesc, 1, (Allocation**)ppAllocation);
6501
if (SUCCEEDED(hr))
6502
return hr;
6503
}
6504
if (committedAllocationParams.IsValid() && !preferCommitted)
6505
{
6506
hr = AllocateHeap(committedAllocationParams, *pAllocInfo, withinBudget, pAllocDesc->pPrivateData, ppAllocation);
6507
if (SUCCEEDED(hr))
6508
return hr;
6509
}
6510
return hr;
6511
}
6512
6513
HRESULT AllocatorPimpl::CreateAliasingResource(
6514
Allocation* pAllocation,
6515
UINT64 AllocationLocalOffset,
6516
const CREATE_RESOURCE_PARAMS& createParams,
6517
REFIID riidResource,
6518
void** ppvResource)
6519
{
6520
*ppvResource = NULL;
6521
6522
CREATE_RESOURCE_PARAMS finalCreateParams = createParams;
6523
D3D12_RESOURCE_DESC finalResourceDesc;
6524
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6525
D3D12_RESOURCE_DESC1 finalResourceDesc1;
6526
#endif
6527
D3D12_RESOURCE_ALLOCATION_INFO resAllocInfo;
6528
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE)
6529
{
6530
finalResourceDesc = *createParams.GetResourceDesc();
6531
finalCreateParams.AccessResourceDesc() = &finalResourceDesc;
6532
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc);
6533
}
6534
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6535
else if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
6536
{
6537
if (!m_Device8)
6538
{
6539
return E_NOINTERFACE;
6540
}
6541
finalResourceDesc1 = *createParams.GetResourceDesc1();
6542
finalCreateParams.AccessResourceDesc1() = &finalResourceDesc1;
6543
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc1);
6544
}
6545
#endif
6546
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6547
else if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_LAYOUT)
6548
{
6549
if (!m_Device10)
6550
{
6551
return E_NOINTERFACE;
6552
}
6553
finalResourceDesc1 = *createParams.GetResourceDesc1();
6554
finalCreateParams.AccessResourceDesc1() = &finalResourceDesc1;
6555
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc1);
6556
}
6557
#endif
6558
else
6559
{
6560
D3D12MA_ASSERT(0);
6561
return E_INVALIDARG;
6562
}
6563
D3D12MA_ASSERT(IsPow2(resAllocInfo.Alignment));
6564
D3D12MA_ASSERT(resAllocInfo.SizeInBytes > 0);
6565
6566
ID3D12Heap* const existingHeap = pAllocation->GetHeap();
6567
const UINT64 existingOffset = pAllocation->GetOffset();
6568
const UINT64 existingSize = pAllocation->GetSize();
6569
const UINT64 newOffset = existingOffset + AllocationLocalOffset;
6570
6571
if (existingHeap == NULL ||
6572
AllocationLocalOffset + resAllocInfo.SizeInBytes > existingSize ||
6573
newOffset % resAllocInfo.Alignment != 0)
6574
{
6575
return E_INVALIDARG;
6576
}
6577
6578
return CreatePlacedResourceWrap(existingHeap, newOffset, finalCreateParams, riidResource, ppvResource);
6579
}
6580
6581
void AllocatorPimpl::FreeCommittedMemory(Allocation* allocation)
6582
{
6583
D3D12MA_ASSERT(allocation && allocation->m_PackedData.GetType() == Allocation::TYPE_COMMITTED);
6584
6585
CommittedAllocationList* const allocList = allocation->m_Committed.list;
6586
allocList->Unregister(allocation);
6587
6588
const UINT memSegmentGroup = allocList->GetMemorySegmentGroup(this);
6589
const UINT64 allocSize = allocation->GetSize();
6590
m_Budget.RemoveAllocation(memSegmentGroup, allocSize);
6591
m_Budget.RemoveBlock(memSegmentGroup, allocSize);
6592
}
6593
6594
void AllocatorPimpl::FreePlacedMemory(Allocation* allocation)
6595
{
6596
D3D12MA_ASSERT(allocation && allocation->m_PackedData.GetType() == Allocation::TYPE_PLACED);
6597
6598
NormalBlock* const block = allocation->m_Placed.block;
6599
D3D12MA_ASSERT(block);
6600
BlockVector* const blockVector = block->GetBlockVector();
6601
D3D12MA_ASSERT(blockVector);
6602
m_Budget.RemoveAllocation(HeapPropertiesToMemorySegmentGroup(block->GetHeapProperties()), allocation->GetSize());
6603
blockVector->Free(allocation);
6604
}
6605
6606
void AllocatorPimpl::FreeHeapMemory(Allocation* allocation)
6607
{
6608
D3D12MA_ASSERT(allocation && allocation->m_PackedData.GetType() == Allocation::TYPE_HEAP);
6609
6610
CommittedAllocationList* const allocList = allocation->m_Committed.list;
6611
allocList->Unregister(allocation);
6612
SAFE_RELEASE(allocation->m_Heap.heap);
6613
6614
const UINT memSegmentGroup = allocList->GetMemorySegmentGroup(this);
6615
const UINT64 allocSize = allocation->GetSize();
6616
m_Budget.RemoveAllocation(memSegmentGroup, allocSize);
6617
m_Budget.RemoveBlock(memSegmentGroup, allocSize);
6618
}
6619
6620
void AllocatorPimpl::SetResidencyPriority(ID3D12Pageable* obj, D3D12_RESIDENCY_PRIORITY priority) const
6621
{
6622
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
6623
if (priority != D3D12_RESIDENCY_PRIORITY_NONE && m_Device1)
6624
{
6625
// Intentionally ignoring the result.
6626
m_Device1->SetResidencyPriority(1, &obj, &priority);
6627
}
6628
#endif
6629
}
6630
6631
void AllocatorPimpl::SetCurrentFrameIndex(UINT frameIndex)
6632
{
6633
m_CurrentFrameIndex.store(frameIndex);
6634
6635
#if D3D12MA_DXGI_1_4
6636
UpdateD3D12Budget();
6637
#endif
6638
}
6639
6640
void AllocatorPimpl::CalculateStatistics(TotalStatistics& outStats, DetailedStatistics outCustomHeaps[2])
6641
{
6642
// Init stats
6643
for (size_t i = 0; i < HEAP_TYPE_COUNT; i++)
6644
ClearDetailedStatistics(outStats.HeapType[i]);
6645
for (size_t i = 0; i < DXGI_MEMORY_SEGMENT_GROUP_COUNT; i++)
6646
ClearDetailedStatistics(outStats.MemorySegmentGroup[i]);
6647
ClearDetailedStatistics(outStats.Total);
6648
if (outCustomHeaps)
6649
{
6650
ClearDetailedStatistics(outCustomHeaps[0]);
6651
ClearDetailedStatistics(outCustomHeaps[1]);
6652
}
6653
6654
// Process default pools. 4 standard heap types only. Add them to outStats.HeapType[i].
6655
if (SupportsResourceHeapTier2())
6656
{
6657
// DEFAULT, UPLOAD, READBACK, GPU_UPLOAD.
6658
for (size_t heapTypeIndex = 0; heapTypeIndex < STANDARD_HEAP_TYPE_COUNT; ++heapTypeIndex)
6659
{
6660
BlockVector* const pBlockVector = m_BlockVectors[heapTypeIndex];
6661
D3D12MA_ASSERT(pBlockVector);
6662
const size_t outputIndex = heapTypeIndex < 3 ? heapTypeIndex : 4; // GPU_UPLOAD 3 -> 4
6663
pBlockVector->AddDetailedStatistics(outStats.HeapType[outputIndex]);
6664
}
6665
}
6666
else
6667
{
6668
// DEFAULT, UPLOAD, READBACK.
6669
for (size_t heapTypeIndex = 0; heapTypeIndex < STANDARD_HEAP_TYPE_COUNT; ++heapTypeIndex)
6670
{
6671
for (size_t heapSubType = 0; heapSubType < 3; ++heapSubType)
6672
{
6673
BlockVector* const pBlockVector = m_BlockVectors[heapTypeIndex * 3 + heapSubType];
6674
D3D12MA_ASSERT(pBlockVector);
6675
6676
const size_t outputIndex = heapTypeIndex < 3 ? heapTypeIndex : 4; // GPU_UPLOAD 3 -> 4
6677
pBlockVector->AddDetailedStatistics(outStats.HeapType[outputIndex]);
6678
}
6679
}
6680
}
6681
6682
// Sum them up to memory segment groups.
6683
AddDetailedStatistics(
6684
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE_DEFAULT)],
6685
outStats.HeapType[0]);
6686
AddDetailedStatistics(
6687
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE_UPLOAD)],
6688
outStats.HeapType[1]);
6689
AddDetailedStatistics(
6690
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE_READBACK)],
6691
outStats.HeapType[2]);
6692
AddDetailedStatistics(
6693
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE_GPU_UPLOAD_COPY)],
6694
outStats.HeapType[4]);
6695
6696
// Process custom pools.
6697
DetailedStatistics tmpStats;
6698
for (size_t heapTypeIndex = 0; heapTypeIndex < HEAP_TYPE_COUNT; ++heapTypeIndex)
6699
{
6700
MutexLockRead lock(m_PoolsMutex[heapTypeIndex], m_UseMutex);
6701
PoolList& poolList = m_Pools[heapTypeIndex];
6702
for (PoolPimpl* pool = poolList.Front(); pool != NULL; pool = poolList.GetNext(pool))
6703
{
6704
const D3D12_HEAP_PROPERTIES& poolHeapProps = pool->GetDesc().HeapProperties;
6705
ClearDetailedStatistics(tmpStats);
6706
pool->AddDetailedStatistics(tmpStats);
6707
AddDetailedStatistics(
6708
outStats.HeapType[heapTypeIndex], tmpStats);
6709
6710
UINT memorySegment = HeapPropertiesToMemorySegmentGroup(poolHeapProps);
6711
AddDetailedStatistics(
6712
outStats.MemorySegmentGroup[memorySegment], tmpStats);
6713
6714
if (outCustomHeaps)
6715
AddDetailedStatistics(outCustomHeaps[memorySegment], tmpStats);
6716
}
6717
}
6718
6719
// Process committed allocations. standard heap types only.
6720
for (UINT heapTypeIndex = 0; heapTypeIndex < STANDARD_HEAP_TYPE_COUNT; ++heapTypeIndex)
6721
{
6722
ClearDetailedStatistics(tmpStats);
6723
m_CommittedAllocations[heapTypeIndex].AddDetailedStatistics(tmpStats);
6724
const size_t outputIndex = heapTypeIndex < 3 ? heapTypeIndex : 4; // GPU_UPLOAD 3 -> 4
6725
AddDetailedStatistics(
6726
outStats.HeapType[outputIndex], tmpStats);
6727
AddDetailedStatistics(
6728
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(IndexToStandardHeapType(heapTypeIndex))], tmpStats);
6729
}
6730
6731
// Sum up memory segment groups to totals.
6732
AddDetailedStatistics(outStats.Total, outStats.MemorySegmentGroup[0]);
6733
AddDetailedStatistics(outStats.Total, outStats.MemorySegmentGroup[1]);
6734
6735
D3D12MA_ASSERT(outStats.Total.Stats.BlockCount ==
6736
outStats.MemorySegmentGroup[0].Stats.BlockCount + outStats.MemorySegmentGroup[1].Stats.BlockCount);
6737
D3D12MA_ASSERT(outStats.Total.Stats.AllocationCount ==
6738
outStats.MemorySegmentGroup[0].Stats.AllocationCount + outStats.MemorySegmentGroup[1].Stats.AllocationCount);
6739
D3D12MA_ASSERT(outStats.Total.Stats.BlockBytes ==
6740
outStats.MemorySegmentGroup[0].Stats.BlockBytes + outStats.MemorySegmentGroup[1].Stats.BlockBytes);
6741
D3D12MA_ASSERT(outStats.Total.Stats.AllocationBytes ==
6742
outStats.MemorySegmentGroup[0].Stats.AllocationBytes + outStats.MemorySegmentGroup[1].Stats.AllocationBytes);
6743
D3D12MA_ASSERT(outStats.Total.UnusedRangeCount ==
6744
outStats.MemorySegmentGroup[0].UnusedRangeCount + outStats.MemorySegmentGroup[1].UnusedRangeCount);
6745
6746
D3D12MA_ASSERT(outStats.Total.Stats.BlockCount ==
6747
outStats.HeapType[0].Stats.BlockCount + outStats.HeapType[1].Stats.BlockCount +
6748
outStats.HeapType[2].Stats.BlockCount + outStats.HeapType[3].Stats.BlockCount +
6749
outStats.HeapType[4].Stats.BlockCount);
6750
D3D12MA_ASSERT(outStats.Total.Stats.AllocationCount ==
6751
outStats.HeapType[0].Stats.AllocationCount + outStats.HeapType[1].Stats.AllocationCount +
6752
outStats.HeapType[2].Stats.AllocationCount + outStats.HeapType[3].Stats.AllocationCount +
6753
outStats.HeapType[4].Stats.AllocationCount);
6754
D3D12MA_ASSERT(outStats.Total.Stats.BlockBytes ==
6755
outStats.HeapType[0].Stats.BlockBytes + outStats.HeapType[1].Stats.BlockBytes +
6756
outStats.HeapType[2].Stats.BlockBytes + outStats.HeapType[3].Stats.BlockBytes +
6757
outStats.HeapType[4].Stats.BlockBytes);
6758
D3D12MA_ASSERT(outStats.Total.Stats.AllocationBytes ==
6759
outStats.HeapType[0].Stats.AllocationBytes + outStats.HeapType[1].Stats.AllocationBytes +
6760
outStats.HeapType[2].Stats.AllocationBytes + outStats.HeapType[3].Stats.AllocationBytes +
6761
outStats.HeapType[4].Stats.AllocationBytes);
6762
D3D12MA_ASSERT(outStats.Total.UnusedRangeCount ==
6763
outStats.HeapType[0].UnusedRangeCount + outStats.HeapType[1].UnusedRangeCount +
6764
outStats.HeapType[2].UnusedRangeCount + outStats.HeapType[3].UnusedRangeCount +
6765
outStats.HeapType[4].UnusedRangeCount);
6766
}
6767
6768
void AllocatorPimpl::GetBudget(Budget* outLocalBudget, Budget* outNonLocalBudget)
6769
{
6770
if (outLocalBudget)
6771
m_Budget.GetStatistics(outLocalBudget->Stats, DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY);
6772
if (outNonLocalBudget)
6773
m_Budget.GetStatistics(outNonLocalBudget->Stats, DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY);
6774
6775
#if D3D12MA_DXGI_1_4
6776
if (m_Adapter3)
6777
{
6778
if (!m_Budget.ShouldUpdateBudget())
6779
{
6780
m_Budget.GetBudget(m_UseMutex,
6781
outLocalBudget ? &outLocalBudget->UsageBytes : NULL,
6782
outLocalBudget ? &outLocalBudget->BudgetBytes : NULL,
6783
outNonLocalBudget ? &outNonLocalBudget->UsageBytes : NULL,
6784
outNonLocalBudget ? &outNonLocalBudget->BudgetBytes : NULL);
6785
}
6786
else
6787
{
6788
UpdateD3D12Budget();
6789
GetBudget(outLocalBudget, outNonLocalBudget); // Recursion
6790
}
6791
}
6792
else
6793
#endif
6794
{
6795
if (outLocalBudget)
6796
{
6797
outLocalBudget->UsageBytes = outLocalBudget->Stats.BlockBytes;
6798
outLocalBudget->BudgetBytes = GetMemoryCapacity(DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY) * 8 / 10; // 80% heuristics.
6799
}
6800
if (outNonLocalBudget)
6801
{
6802
outNonLocalBudget->UsageBytes = outNonLocalBudget->Stats.BlockBytes;
6803
outNonLocalBudget->BudgetBytes = GetMemoryCapacity(DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY) * 8 / 10; // 80% heuristics.
6804
}
6805
}
6806
}
6807
6808
void AllocatorPimpl::GetBudgetForHeapType(Budget& outBudget, D3D12_HEAP_TYPE heapType)
6809
{
6810
switch (heapType)
6811
{
6812
case D3D12_HEAP_TYPE_DEFAULT:
6813
case D3D12_HEAP_TYPE_GPU_UPLOAD_COPY:
6814
GetBudget(&outBudget, NULL);
6815
break;
6816
case D3D12_HEAP_TYPE_UPLOAD:
6817
case D3D12_HEAP_TYPE_READBACK:
6818
GetBudget(NULL, &outBudget);
6819
break;
6820
default: D3D12MA_ASSERT(0);
6821
}
6822
}
6823
6824
void AllocatorPimpl::BuildStatsString(WCHAR** ppStatsString, BOOL detailedMap)
6825
{
6826
StringBuilder sb(GetAllocs());
6827
{
6828
Budget localBudget = {}, nonLocalBudget = {};
6829
GetBudget(&localBudget, &nonLocalBudget);
6830
6831
TotalStatistics stats;
6832
DetailedStatistics customHeaps[2];
6833
CalculateStatistics(stats, customHeaps);
6834
6835
JsonWriter json(GetAllocs(), sb);
6836
json.BeginObject();
6837
{
6838
json.WriteString(L"General");
6839
json.BeginObject();
6840
{
6841
json.WriteString(L"API");
6842
json.WriteString(L"Direct3D 12");
6843
6844
json.WriteString(L"GPU");
6845
json.WriteString(m_AdapterDesc.Description);
6846
6847
json.WriteString(L"DedicatedVideoMemory");
6848
json.WriteNumber((UINT64)m_AdapterDesc.DedicatedVideoMemory);
6849
json.WriteString(L"DedicatedSystemMemory");
6850
json.WriteNumber((UINT64)m_AdapterDesc.DedicatedSystemMemory);
6851
json.WriteString(L"SharedSystemMemory");
6852
json.WriteNumber((UINT64)m_AdapterDesc.SharedSystemMemory);
6853
6854
json.WriteString(L"ResourceHeapTier");
6855
json.WriteNumber(static_cast<UINT>(m_D3D12Options.ResourceHeapTier));
6856
6857
json.WriteString(L"ResourceBindingTier");
6858
json.WriteNumber(static_cast<UINT>(m_D3D12Options.ResourceBindingTier));
6859
6860
json.WriteString(L"TiledResourcesTier");
6861
json.WriteNumber(static_cast<UINT>(m_D3D12Options.TiledResourcesTier));
6862
6863
json.WriteString(L"TileBasedRenderer");
6864
json.WriteBool(m_D3D12Architecture.TileBasedRenderer);
6865
6866
json.WriteString(L"UMA");
6867
json.WriteBool(m_D3D12Architecture.UMA);
6868
json.WriteString(L"CacheCoherentUMA");
6869
json.WriteBool(m_D3D12Architecture.CacheCoherentUMA);
6870
6871
json.WriteString(L"GPUUploadHeapSupported");
6872
json.WriteBool(m_GPUUploadHeapSupported != FALSE);
6873
}
6874
json.EndObject();
6875
}
6876
{
6877
json.WriteString(L"Total");
6878
json.AddDetailedStatisticsInfoObject(stats.Total);
6879
}
6880
{
6881
json.WriteString(L"MemoryInfo");
6882
json.BeginObject();
6883
{
6884
json.WriteString(L"L0");
6885
json.BeginObject();
6886
{
6887
json.WriteString(L"Budget");
6888
WriteBudgetToJson(json, IsUMA() ? localBudget : nonLocalBudget); // When UMA device only L0 present as local
6889
6890
json.WriteString(L"Stats");
6891
json.AddDetailedStatisticsInfoObject(stats.MemorySegmentGroup[!IsUMA()]);
6892
6893
json.WriteString(L"MemoryPools");
6894
json.BeginObject();
6895
{
6896
if (IsUMA())
6897
{
6898
json.WriteString(L"DEFAULT");
6899
json.BeginObject();
6900
{
6901
json.WriteString(L"Stats");
6902
json.AddDetailedStatisticsInfoObject(stats.HeapType[0]);
6903
}
6904
json.EndObject();
6905
6906
if(IsGPUUploadHeapSupported())
6907
{
6908
json.WriteString(L"GPU_UPLOAD");
6909
json.BeginObject();
6910
{
6911
json.WriteString(L"Stats");
6912
json.AddDetailedStatisticsInfoObject(stats.HeapType[4]);
6913
}
6914
json.EndObject();
6915
}
6916
}
6917
json.WriteString(L"UPLOAD");
6918
json.BeginObject();
6919
{
6920
json.WriteString(L"Stats");
6921
json.AddDetailedStatisticsInfoObject(stats.HeapType[1]);
6922
}
6923
json.EndObject();
6924
6925
json.WriteString(L"READBACK");
6926
json.BeginObject();
6927
{
6928
json.WriteString(L"Stats");
6929
json.AddDetailedStatisticsInfoObject(stats.HeapType[2]);
6930
}
6931
json.EndObject();
6932
6933
json.WriteString(L"CUSTOM");
6934
json.BeginObject();
6935
{
6936
json.WriteString(L"Stats");
6937
json.AddDetailedStatisticsInfoObject(customHeaps[!IsUMA()]);
6938
}
6939
json.EndObject();
6940
}
6941
json.EndObject();
6942
}
6943
json.EndObject();
6944
if (!IsUMA())
6945
{
6946
json.WriteString(L"L1");
6947
json.BeginObject();
6948
{
6949
json.WriteString(L"Budget");
6950
WriteBudgetToJson(json, localBudget);
6951
6952
json.WriteString(L"Stats");
6953
json.AddDetailedStatisticsInfoObject(stats.MemorySegmentGroup[0]);
6954
6955
json.WriteString(L"MemoryPools");
6956
json.BeginObject();
6957
{
6958
json.WriteString(L"DEFAULT");
6959
json.BeginObject();
6960
{
6961
json.WriteString(L"Stats");
6962
json.AddDetailedStatisticsInfoObject(stats.HeapType[0]);
6963
}
6964
json.EndObject();
6965
6966
if(IsGPUUploadHeapSupported())
6967
{
6968
json.WriteString(L"GPU_UPLOAD");
6969
json.BeginObject();
6970
{
6971
json.WriteString(L"Stats");
6972
json.AddDetailedStatisticsInfoObject(stats.HeapType[4]);
6973
}
6974
json.EndObject();
6975
}
6976
6977
json.WriteString(L"CUSTOM");
6978
json.BeginObject();
6979
{
6980
json.WriteString(L"Stats");
6981
json.AddDetailedStatisticsInfoObject(customHeaps[0]);
6982
}
6983
json.EndObject();
6984
}
6985
json.EndObject();
6986
}
6987
json.EndObject();
6988
}
6989
}
6990
json.EndObject();
6991
}
6992
6993
if (detailedMap)
6994
{
6995
const auto writeHeapInfo = [&](BlockVector* blockVector, CommittedAllocationList* committedAllocs, bool customHeap)
6996
{
6997
D3D12MA_ASSERT(blockVector);
6998
6999
D3D12_HEAP_FLAGS flags = blockVector->GetHeapFlags();
7000
json.WriteString(L"Flags");
7001
json.BeginArray(true);
7002
{
7003
if (flags & D3D12_HEAP_FLAG_SHARED)
7004
json.WriteString(L"HEAP_FLAG_SHARED");
7005
if (flags & D3D12_HEAP_FLAG_ALLOW_DISPLAY)
7006
json.WriteString(L"HEAP_FLAG_ALLOW_DISPLAY");
7007
if (flags & D3D12_HEAP_FLAG_SHARED_CROSS_ADAPTER)
7008
json.WriteString(L"HEAP_FLAG_CROSS_ADAPTER");
7009
if (flags & D3D12_HEAP_FLAG_HARDWARE_PROTECTED)
7010
json.WriteString(L"HEAP_FLAG_HARDWARE_PROTECTED");
7011
if (flags & D3D12_HEAP_FLAG_ALLOW_WRITE_WATCH)
7012
json.WriteString(L"HEAP_FLAG_ALLOW_WRITE_WATCH");
7013
if (flags & D3D12_HEAP_FLAG_ALLOW_SHADER_ATOMICS)
7014
json.WriteString(L"HEAP_FLAG_ALLOW_SHADER_ATOMICS");
7015
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7016
if (flags & D3D12_HEAP_FLAG_CREATE_NOT_RESIDENT)
7017
json.WriteString(L"HEAP_FLAG_CREATE_NOT_RESIDENT");
7018
if (flags & D3D12_HEAP_FLAG_CREATE_NOT_ZEROED)
7019
json.WriteString(L"HEAP_FLAG_CREATE_NOT_ZEROED");
7020
#endif
7021
7022
if (flags & D3D12_HEAP_FLAG_DENY_BUFFERS)
7023
json.WriteString(L"HEAP_FLAG_DENY_BUFFERS");
7024
if (flags & D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES)
7025
json.WriteString(L"HEAP_FLAG_DENY_RT_DS_TEXTURES");
7026
if (flags & D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES)
7027
json.WriteString(L"HEAP_FLAG_DENY_NON_RT_DS_TEXTURES");
7028
7029
flags &= ~(D3D12_HEAP_FLAG_SHARED
7030
| D3D12_HEAP_FLAG_DENY_BUFFERS
7031
| D3D12_HEAP_FLAG_ALLOW_DISPLAY
7032
| D3D12_HEAP_FLAG_SHARED_CROSS_ADAPTER
7033
| D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES
7034
| D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES
7035
| D3D12_HEAP_FLAG_HARDWARE_PROTECTED
7036
| D3D12_HEAP_FLAG_ALLOW_WRITE_WATCH
7037
| D3D12_HEAP_FLAG_ALLOW_SHADER_ATOMICS);
7038
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7039
flags &= ~(D3D12_HEAP_FLAG_CREATE_NOT_RESIDENT
7040
| D3D12_HEAP_FLAG_CREATE_NOT_ZEROED);
7041
#endif
7042
if (flags != 0)
7043
json.WriteNumber((UINT)flags);
7044
7045
if (customHeap)
7046
{
7047
const D3D12_HEAP_PROPERTIES& properties = blockVector->GetHeapProperties();
7048
switch (properties.MemoryPoolPreference)
7049
{
7050
default:
7051
D3D12MA_ASSERT(0);
7052
case D3D12_MEMORY_POOL_UNKNOWN:
7053
json.WriteString(L"MEMORY_POOL_UNKNOWN");
7054
break;
7055
case D3D12_MEMORY_POOL_L0:
7056
json.WriteString(L"MEMORY_POOL_L0");
7057
break;
7058
case D3D12_MEMORY_POOL_L1:
7059
json.WriteString(L"MEMORY_POOL_L1");
7060
break;
7061
}
7062
switch (properties.CPUPageProperty)
7063
{
7064
default:
7065
D3D12MA_ASSERT(0);
7066
case D3D12_CPU_PAGE_PROPERTY_UNKNOWN:
7067
json.WriteString(L"CPU_PAGE_PROPERTY_UNKNOWN");
7068
break;
7069
case D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE:
7070
json.WriteString(L"CPU_PAGE_PROPERTY_NOT_AVAILABLE");
7071
break;
7072
case D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE:
7073
json.WriteString(L"CPU_PAGE_PROPERTY_WRITE_COMBINE");
7074
break;
7075
case D3D12_CPU_PAGE_PROPERTY_WRITE_BACK:
7076
json.WriteString(L"CPU_PAGE_PROPERTY_WRITE_BACK");
7077
break;
7078
}
7079
}
7080
}
7081
json.EndArray();
7082
7083
json.WriteString(L"PreferredBlockSize");
7084
json.WriteNumber(blockVector->GetPreferredBlockSize());
7085
7086
json.WriteString(L"Blocks");
7087
blockVector->WriteBlockInfoToJson(json);
7088
7089
json.WriteString(L"DedicatedAllocations");
7090
json.BeginArray();
7091
if (committedAllocs)
7092
committedAllocs->BuildStatsString(json);
7093
json.EndArray();
7094
};
7095
7096
json.WriteString(L"DefaultPools");
7097
json.BeginObject();
7098
{
7099
if (SupportsResourceHeapTier2())
7100
{
7101
for (uint8_t heapType = 0; heapType < STANDARD_HEAP_TYPE_COUNT; ++heapType)
7102
{
7103
json.WriteString(StandardHeapTypeNames[heapType]);
7104
json.BeginObject();
7105
writeHeapInfo(m_BlockVectors[heapType], m_CommittedAllocations + heapType, false);
7106
json.EndObject();
7107
}
7108
}
7109
else
7110
{
7111
for (uint8_t heapType = 0; heapType < STANDARD_HEAP_TYPE_COUNT; ++heapType)
7112
{
7113
for (uint8_t heapSubType = 0; heapSubType < 3; ++heapSubType)
7114
{
7115
static const WCHAR* const heapSubTypeName[] = {
7116
L" - Buffers",
7117
L" - Textures",
7118
L" - Textures RT/DS",
7119
};
7120
json.BeginString(StandardHeapTypeNames[heapType]);
7121
json.EndString(heapSubTypeName[heapSubType]);
7122
7123
json.BeginObject();
7124
writeHeapInfo(m_BlockVectors[heapType * 3 + heapSubType], m_CommittedAllocations + heapType, false);
7125
json.EndObject();
7126
}
7127
}
7128
}
7129
}
7130
json.EndObject();
7131
7132
json.WriteString(L"CustomPools");
7133
json.BeginObject();
7134
for (uint8_t heapTypeIndex = 0; heapTypeIndex < HEAP_TYPE_COUNT; ++heapTypeIndex)
7135
{
7136
MutexLockRead mutex(m_PoolsMutex[heapTypeIndex], m_UseMutex);
7137
auto* item = m_Pools[heapTypeIndex].Front();
7138
if (item != NULL)
7139
{
7140
size_t index = 0;
7141
json.WriteString(HeapTypeNames[heapTypeIndex]);
7142
json.BeginArray();
7143
do
7144
{
7145
json.BeginObject();
7146
json.WriteString(L"Name");
7147
json.BeginString();
7148
json.ContinueString(index++);
7149
if (item->GetName())
7150
{
7151
json.ContinueString(L" - ");
7152
json.ContinueString(item->GetName());
7153
}
7154
json.EndString();
7155
7156
writeHeapInfo(item->GetBlockVector(), item->GetCommittedAllocationList(), heapTypeIndex == 3);
7157
json.EndObject();
7158
} while ((item = PoolList::GetNext(item)) != NULL);
7159
json.EndArray();
7160
}
7161
}
7162
json.EndObject();
7163
}
7164
json.EndObject();
7165
}
7166
7167
const size_t length = sb.GetLength();
7168
WCHAR* result = AllocateArray<WCHAR>(GetAllocs(), length + 2);
7169
result[0] = 0xFEFF;
7170
memcpy(result + 1, sb.GetData(), length * sizeof(WCHAR));
7171
result[length + 1] = L'\0';
7172
*ppStatsString = result;
7173
}
7174
7175
void AllocatorPimpl::FreeStatsString(WCHAR* pStatsString)
7176
{
7177
D3D12MA_ASSERT(pStatsString);
7178
Free(GetAllocs(), pStatsString);
7179
}
7180
7181
template<typename D3D12_RESOURCE_DESC_T>
7182
bool AllocatorPimpl::PrefersCommittedAllocation(const D3D12_RESOURCE_DESC_T& resourceDesc,
7183
ALLOCATION_FLAGS strategy)
7184
{
7185
// Prefer creating small buffers <= 32 KB as committed, because drivers pack them better,
7186
// while placed buffers require 64 KB alignment.
7187
if(resourceDesc.Dimension == D3D12_RESOURCE_DIMENSION_BUFFER &&
7188
resourceDesc.Width <= D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT / 2 &&
7189
strategy != ALLOCATION_FLAG_STRATEGY_MIN_TIME && // Creating as committed would be slower.
7190
m_PreferSmallBuffersCommitted)
7191
{
7192
return true;
7193
}
7194
7195
// Intentional. It may change in the future.
7196
return false;
7197
}
7198
7199
HRESULT AllocatorPimpl::AllocateCommittedResource(
7200
const CommittedAllocationParameters& committedAllocParams,
7201
UINT64 resourceSize, bool withinBudget, void* pPrivateData,
7202
const CREATE_RESOURCE_PARAMS& createParams,
7203
Allocation** ppAllocation, REFIID riidResource, void** ppvResource)
7204
{
7205
D3D12MA_ASSERT(committedAllocParams.IsValid());
7206
7207
HRESULT hr;
7208
ID3D12Resource* res = NULL;
7209
// Allocate aliasing memory with explicit heap
7210
if (committedAllocParams.m_CanAlias)
7211
{
7212
D3D12_RESOURCE_ALLOCATION_INFO heapAllocInfo = {};
7213
heapAllocInfo.SizeInBytes = resourceSize;
7214
heapAllocInfo.Alignment = HeapFlagsToAlignment(committedAllocParams.m_HeapFlags, m_MsaaAlwaysCommitted);
7215
hr = AllocateHeap(committedAllocParams, heapAllocInfo, withinBudget, pPrivateData, ppAllocation);
7216
if (SUCCEEDED(hr))
7217
{
7218
hr = CreatePlacedResourceWrap((*ppAllocation)->GetHeap(), 0,
7219
createParams, D3D12MA_IID_PPV_ARGS(&res));
7220
if (SUCCEEDED(hr))
7221
{
7222
if (ppvResource != NULL)
7223
hr = res->QueryInterface(riidResource, ppvResource);
7224
if (SUCCEEDED(hr))
7225
{
7226
(*ppAllocation)->SetResourcePointer(res, createParams.GetBaseResourceDesc());
7227
return hr;
7228
}
7229
res->Release();
7230
}
7231
FreeHeapMemory(*ppAllocation);
7232
}
7233
return hr;
7234
}
7235
7236
if (withinBudget &&
7237
!NewAllocationWithinBudget(committedAllocParams.m_HeapProperties.Type, resourceSize))
7238
{
7239
return E_OUTOFMEMORY;
7240
}
7241
7242
/* D3D12 ERROR:
7243
* ID3D12Device::CreateCommittedResource:
7244
* When creating a committed resource, D3D12_HEAP_FLAGS must not have either
7245
* D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES,
7246
* D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES,
7247
* nor D3D12_HEAP_FLAG_DENY_BUFFERS set.
7248
* These flags will be set automatically to correspond with the committed resource type.
7249
*
7250
* [ STATE_CREATION ERROR #640: CREATERESOURCEANDHEAP_INVALIDHEAPMISCFLAGS]
7251
*/
7252
7253
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
7254
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_LAYOUT)
7255
{
7256
if (!m_Device10)
7257
{
7258
return E_NOINTERFACE;
7259
}
7260
hr = m_Device10->CreateCommittedResource3(
7261
&committedAllocParams.m_HeapProperties,
7262
committedAllocParams.m_HeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS,
7263
createParams.GetResourceDesc1(), createParams.GetInitialLayout(),
7264
createParams.GetOptimizedClearValue(), committedAllocParams.m_ProtectedSession,
7265
createParams.GetNumCastableFormats(), createParams.GetCastableFormats(),
7266
D3D12MA_IID_PPV_ARGS(&res));
7267
} else
7268
#endif
7269
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7270
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
7271
{
7272
if (!m_Device8)
7273
{
7274
return E_NOINTERFACE;
7275
}
7276
hr = m_Device8->CreateCommittedResource2(
7277
&committedAllocParams.m_HeapProperties,
7278
committedAllocParams.m_HeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS,
7279
createParams.GetResourceDesc1(), createParams.GetInitialResourceState(),
7280
createParams.GetOptimizedClearValue(), committedAllocParams.m_ProtectedSession,
7281
D3D12MA_IID_PPV_ARGS(&res));
7282
} else
7283
#endif
7284
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE)
7285
{
7286
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
7287
if (m_Device4)
7288
{
7289
hr = m_Device4->CreateCommittedResource1(
7290
&committedAllocParams.m_HeapProperties,
7291
committedAllocParams.m_HeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS,
7292
createParams.GetResourceDesc(), createParams.GetInitialResourceState(),
7293
createParams.GetOptimizedClearValue(), committedAllocParams.m_ProtectedSession,
7294
D3D12MA_IID_PPV_ARGS(&res));
7295
}
7296
else
7297
#endif
7298
{
7299
if (committedAllocParams.m_ProtectedSession == NULL)
7300
{
7301
hr = m_Device->CreateCommittedResource(
7302
&committedAllocParams.m_HeapProperties,
7303
committedAllocParams.m_HeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS,
7304
createParams.GetResourceDesc(), createParams.GetInitialResourceState(),
7305
createParams.GetOptimizedClearValue(), D3D12MA_IID_PPV_ARGS(&res));
7306
}
7307
else
7308
hr = E_NOINTERFACE;
7309
}
7310
}
7311
else
7312
{
7313
D3D12MA_ASSERT(0);
7314
return E_INVALIDARG;
7315
}
7316
7317
if (SUCCEEDED(hr))
7318
{
7319
SetResidencyPriority(res, committedAllocParams.m_ResidencyPriority);
7320
7321
if (ppvResource != NULL)
7322
{
7323
hr = res->QueryInterface(riidResource, ppvResource);
7324
}
7325
if (SUCCEEDED(hr))
7326
{
7327
Allocation* alloc = m_AllocationObjectAllocator.Allocate(
7328
this, resourceSize, createParams.GetBaseResourceDesc()->Alignment);
7329
alloc->InitCommitted(committedAllocParams.m_List);
7330
alloc->SetResourcePointer(res, createParams.GetBaseResourceDesc());
7331
alloc->SetPrivateData(pPrivateData);
7332
7333
*ppAllocation = alloc;
7334
7335
committedAllocParams.m_List->Register(alloc);
7336
7337
const UINT memSegmentGroup = HeapPropertiesToMemorySegmentGroup(committedAllocParams.m_HeapProperties);
7338
m_Budget.AddBlock(memSegmentGroup, resourceSize);
7339
m_Budget.AddAllocation(memSegmentGroup, resourceSize);
7340
}
7341
else
7342
{
7343
res->Release();
7344
}
7345
}
7346
return hr;
7347
}
7348
7349
HRESULT AllocatorPimpl::AllocateHeap(
7350
const CommittedAllocationParameters& committedAllocParams,
7351
const D3D12_RESOURCE_ALLOCATION_INFO& allocInfo, bool withinBudget,
7352
void* pPrivateData, Allocation** ppAllocation)
7353
{
7354
D3D12MA_ASSERT(committedAllocParams.IsValid());
7355
7356
*ppAllocation = nullptr;
7357
7358
if (withinBudget &&
7359
!NewAllocationWithinBudget(committedAllocParams.m_HeapProperties.Type, allocInfo.SizeInBytes))
7360
{
7361
return E_OUTOFMEMORY;
7362
}
7363
7364
D3D12_HEAP_DESC heapDesc = {};
7365
heapDesc.SizeInBytes = allocInfo.SizeInBytes;
7366
heapDesc.Properties = committedAllocParams.m_HeapProperties;
7367
heapDesc.Alignment = allocInfo.Alignment;
7368
heapDesc.Flags = committedAllocParams.m_HeapFlags;
7369
7370
HRESULT hr;
7371
ID3D12Heap* heap = nullptr;
7372
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
7373
if (m_Device4)
7374
hr = m_Device4->CreateHeap1(&heapDesc, committedAllocParams.m_ProtectedSession, D3D12MA_IID_PPV_ARGS(&heap));
7375
else
7376
#endif
7377
{
7378
if (committedAllocParams.m_ProtectedSession == NULL)
7379
hr = m_Device->CreateHeap(&heapDesc, D3D12MA_IID_PPV_ARGS(&heap));
7380
else
7381
hr = E_NOINTERFACE;
7382
}
7383
7384
if (SUCCEEDED(hr))
7385
{
7386
SetResidencyPriority(heap, committedAllocParams.m_ResidencyPriority);
7387
(*ppAllocation) = m_AllocationObjectAllocator.Allocate(this, allocInfo.SizeInBytes, allocInfo.Alignment);
7388
(*ppAllocation)->InitHeap(committedAllocParams.m_List, heap);
7389
(*ppAllocation)->SetPrivateData(pPrivateData);
7390
committedAllocParams.m_List->Register(*ppAllocation);
7391
7392
const UINT memSegmentGroup = HeapPropertiesToMemorySegmentGroup(committedAllocParams.m_HeapProperties);
7393
m_Budget.AddBlock(memSegmentGroup, allocInfo.SizeInBytes);
7394
m_Budget.AddAllocation(memSegmentGroup, allocInfo.SizeInBytes);
7395
}
7396
return hr;
7397
}
7398
7399
template<typename D3D12_RESOURCE_DESC_T>
7400
HRESULT AllocatorPimpl::CalcAllocationParams(const ALLOCATION_DESC& allocDesc, UINT64 allocSize,
7401
const D3D12_RESOURCE_DESC_T* resDesc,
7402
BlockVector*& outBlockVector, CommittedAllocationParameters& outCommittedAllocationParams, bool& outPreferCommitted)
7403
{
7404
outBlockVector = NULL;
7405
outCommittedAllocationParams = CommittedAllocationParameters();
7406
outPreferCommitted = false;
7407
7408
if (allocDesc.HeapType == D3D12_HEAP_TYPE_GPU_UPLOAD_COPY && !IsGPUUploadHeapSupported())
7409
return E_NOTIMPL;
7410
7411
bool msaaAlwaysCommitted;
7412
if (allocDesc.CustomPool != NULL)
7413
{
7414
PoolPimpl* const pool = allocDesc.CustomPool->m_Pimpl;
7415
7416
msaaAlwaysCommitted = pool->GetBlockVector()->DeniesMsaaTextures();
7417
if(!pool->AlwaysCommitted())
7418
outBlockVector = pool->GetBlockVector();
7419
7420
const auto& desc = pool->GetDesc();
7421
outCommittedAllocationParams.m_ProtectedSession = desc.pProtectedSession;
7422
outCommittedAllocationParams.m_HeapProperties = desc.HeapProperties;
7423
outCommittedAllocationParams.m_HeapFlags = desc.HeapFlags;
7424
outCommittedAllocationParams.m_List = pool->GetCommittedAllocationList();
7425
outCommittedAllocationParams.m_ResidencyPriority = pool->GetDesc().ResidencyPriority;
7426
}
7427
else
7428
{
7429
if (!IsHeapTypeStandard(allocDesc.HeapType))
7430
{
7431
return E_INVALIDARG;
7432
}
7433
msaaAlwaysCommitted = m_MsaaAlwaysCommitted;
7434
7435
outCommittedAllocationParams.m_HeapProperties = StandardHeapTypeToHeapProperties(allocDesc.HeapType);
7436
outCommittedAllocationParams.m_HeapFlags = allocDesc.ExtraHeapFlags;
7437
outCommittedAllocationParams.m_List = &m_CommittedAllocations[StandardHeapTypeToIndex(allocDesc.HeapType)];
7438
// outCommittedAllocationParams.m_ResidencyPriority intentionally left with default value.
7439
7440
const ResourceClass resourceClass = (resDesc != NULL) ?
7441
ResourceDescToResourceClass(*resDesc) : HeapFlagsToResourceClass(allocDesc.ExtraHeapFlags);
7442
const UINT defaultPoolIndex = CalcDefaultPoolIndex(allocDesc, resourceClass);
7443
if (defaultPoolIndex != UINT32_MAX)
7444
{
7445
outBlockVector = m_BlockVectors[defaultPoolIndex];
7446
const UINT64 preferredBlockSize = outBlockVector->GetPreferredBlockSize();
7447
if (allocSize > preferredBlockSize)
7448
{
7449
outBlockVector = NULL;
7450
}
7451
else if (allocSize > preferredBlockSize / 2)
7452
{
7453
// Heuristics: Allocate committed memory if requested size if greater than half of preferred block size.
7454
outPreferCommitted = true;
7455
}
7456
}
7457
}
7458
7459
if ((allocDesc.Flags & ALLOCATION_FLAG_COMMITTED) != 0 ||
7460
m_AlwaysCommitted)
7461
{
7462
outBlockVector = NULL;
7463
}
7464
if ((allocDesc.Flags & ALLOCATION_FLAG_NEVER_ALLOCATE) != 0)
7465
{
7466
outCommittedAllocationParams.m_List = NULL;
7467
}
7468
outCommittedAllocationParams.m_CanAlias = allocDesc.Flags & ALLOCATION_FLAG_CAN_ALIAS;
7469
7470
if (resDesc != NULL)
7471
{
7472
if (resDesc->SampleDesc.Count > 1 && msaaAlwaysCommitted)
7473
outBlockVector = NULL;
7474
if (!outPreferCommitted && PrefersCommittedAllocation(*resDesc, allocDesc.Flags & ALLOCATION_FLAG_STRATEGY_MASK))
7475
outPreferCommitted = true;
7476
}
7477
7478
return (outBlockVector != NULL || outCommittedAllocationParams.m_List != NULL) ? S_OK : E_INVALIDARG;
7479
}
7480
7481
UINT AllocatorPimpl::CalcDefaultPoolIndex(const ALLOCATION_DESC& allocDesc, ResourceClass resourceClass) const
7482
{
7483
D3D12_HEAP_FLAGS extraHeapFlags = allocDesc.ExtraHeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS;
7484
7485
#if D3D12MA_CREATE_NOT_ZEROED_AVAILABLE
7486
extraHeapFlags &= ~D3D12_HEAP_FLAG_CREATE_NOT_ZEROED;
7487
#endif
7488
7489
if (extraHeapFlags != 0)
7490
{
7491
return UINT32_MAX;
7492
}
7493
7494
UINT poolIndex = UINT_MAX;
7495
switch (allocDesc.HeapType)
7496
{
7497
case D3D12_HEAP_TYPE_DEFAULT: poolIndex = 0; break;
7498
case D3D12_HEAP_TYPE_UPLOAD: poolIndex = 1; break;
7499
case D3D12_HEAP_TYPE_READBACK: poolIndex = 2; break;
7500
case D3D12_HEAP_TYPE_GPU_UPLOAD_COPY: poolIndex = 3; break;
7501
default: D3D12MA_ASSERT(0);
7502
}
7503
7504
if (SupportsResourceHeapTier2())
7505
return poolIndex;
7506
else
7507
{
7508
switch (resourceClass)
7509
{
7510
case ResourceClass::Buffer:
7511
return poolIndex * 3;
7512
case ResourceClass::Non_RT_DS_Texture:
7513
return poolIndex * 3 + 1;
7514
case ResourceClass::RT_DS_Texture:
7515
return poolIndex * 3 + 2;
7516
default:
7517
return UINT32_MAX;
7518
}
7519
}
7520
}
7521
7522
void AllocatorPimpl::CalcDefaultPoolParams(D3D12_HEAP_TYPE& outHeapType, D3D12_HEAP_FLAGS& outHeapFlags, UINT index) const
7523
{
7524
outHeapType = D3D12_HEAP_TYPE_DEFAULT;
7525
outHeapFlags = D3D12_HEAP_FLAG_NONE;
7526
7527
if (!SupportsResourceHeapTier2())
7528
{
7529
switch (index % 3)
7530
{
7531
case 0:
7532
outHeapFlags = D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES | D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES;
7533
break;
7534
case 1:
7535
outHeapFlags = D3D12_HEAP_FLAG_DENY_BUFFERS | D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES;
7536
break;
7537
case 2:
7538
outHeapFlags = D3D12_HEAP_FLAG_DENY_BUFFERS | D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES;
7539
break;
7540
}
7541
7542
index /= 3;
7543
}
7544
7545
switch (index)
7546
{
7547
case 0:
7548
outHeapType = D3D12_HEAP_TYPE_DEFAULT;
7549
break;
7550
case 1:
7551
outHeapType = D3D12_HEAP_TYPE_UPLOAD;
7552
break;
7553
case 2:
7554
outHeapType = D3D12_HEAP_TYPE_READBACK;
7555
break;
7556
case 3:
7557
outHeapType = D3D12_HEAP_TYPE_GPU_UPLOAD_COPY;
7558
break;
7559
default:
7560
D3D12MA_ASSERT(0);
7561
}
7562
}
7563
7564
void AllocatorPimpl::RegisterPool(Pool* pool, D3D12_HEAP_TYPE heapType)
7565
{
7566
const UINT heapTypeIndex = (UINT)heapType - 1;
7567
7568
MutexLockWrite lock(m_PoolsMutex[heapTypeIndex], m_UseMutex);
7569
m_Pools[heapTypeIndex].PushBack(pool->m_Pimpl);
7570
}
7571
7572
void AllocatorPimpl::UnregisterPool(Pool* pool, D3D12_HEAP_TYPE heapType)
7573
{
7574
const UINT heapTypeIndex = (UINT)heapType - 1;
7575
7576
MutexLockWrite lock(m_PoolsMutex[heapTypeIndex], m_UseMutex);
7577
m_Pools[heapTypeIndex].Remove(pool->m_Pimpl);
7578
}
7579
7580
HRESULT AllocatorPimpl::UpdateD3D12Budget()
7581
{
7582
#if D3D12MA_DXGI_1_4
7583
if (m_Adapter3)
7584
return m_Budget.UpdateBudget(m_Adapter3, m_UseMutex);
7585
else
7586
return E_NOINTERFACE;
7587
#else
7588
return S_OK;
7589
#endif
7590
}
7591
7592
D3D12_RESOURCE_ALLOCATION_INFO AllocatorPimpl::GetResourceAllocationInfoNative(const D3D12_RESOURCE_DESC& resourceDesc) const
7593
{
7594
// This is how new D3D12 headers define GetResourceAllocationInfo function -
7595
// different signature depending on these macros.
7596
#if defined(_MSC_VER) || !defined(_WIN32)
7597
return m_Device->GetResourceAllocationInfo(0, 1, &resourceDesc);
7598
#else
7599
D3D12_RESOURCE_ALLOCATION_INFO retVal;
7600
return *m_Device->GetResourceAllocationInfo(&retVal, 0, 1, &resourceDesc);
7601
#endif
7602
}
7603
7604
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7605
D3D12_RESOURCE_ALLOCATION_INFO AllocatorPimpl::GetResourceAllocationInfoNative(const D3D12_RESOURCE_DESC1& resourceDesc) const
7606
{
7607
D3D12MA_ASSERT(m_Device8 != NULL);
7608
D3D12_RESOURCE_ALLOCATION_INFO1 info1Unused;
7609
7610
// This is how new D3D12 headers define GetResourceAllocationInfo function -
7611
// different signature depending on these macros.
7612
#if defined(_MSC_VER) || !defined(_WIN32)
7613
return m_Device8->GetResourceAllocationInfo2(0, 1, &resourceDesc, &info1Unused);
7614
#else
7615
D3D12_RESOURCE_ALLOCATION_INFO retVal;
7616
return *m_Device8->GetResourceAllocationInfo2(&retVal, 0, 1, &resourceDesc, &info1Unused);
7617
#endif
7618
}
7619
#endif // #ifdef __ID3D12Device8_INTERFACE_DEFINED__
7620
7621
template<typename D3D12_RESOURCE_DESC_T>
7622
D3D12_RESOURCE_ALLOCATION_INFO AllocatorPimpl::GetResourceAllocationInfo(D3D12_RESOURCE_DESC_T& inOutResourceDesc) const
7623
{
7624
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
7625
/* Optional optimization: Microsoft documentation says:
7626
https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-getresourceallocationinfo
7627
7628
Your application can forgo using GetResourceAllocationInfo for buffer resources
7629
(D3D12_RESOURCE_DIMENSION_BUFFER). Buffers have the same size on all adapters,
7630
which is merely the smallest multiple of 64KB that's greater or equal to
7631
D3D12_RESOURCE_DESC::Width.
7632
*/
7633
if (inOutResourceDesc.Alignment == 0 &&
7634
inOutResourceDesc.Dimension == D3D12_RESOURCE_DIMENSION_BUFFER)
7635
{
7636
return {
7637
AlignUp<UINT64>(inOutResourceDesc.Width, D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT), // SizeInBytes
7638
D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT }; // Alignment
7639
}
7640
#endif // #ifdef __ID3D12Device1_INTERFACE_DEFINED__
7641
7642
#if D3D12MA_USE_SMALL_RESOURCE_PLACEMENT_ALIGNMENT
7643
if (inOutResourceDesc.Alignment == 0 &&
7644
inOutResourceDesc.Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE2D &&
7645
(inOutResourceDesc.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) == 0
7646
#if D3D12MA_USE_SMALL_RESOURCE_PLACEMENT_ALIGNMENT == 1
7647
&& CanUseSmallAlignment(inOutResourceDesc)
7648
#endif
7649
)
7650
{
7651
/*
7652
The algorithm here is based on Microsoft sample: "Small Resources Sample"
7653
https://github.com/microsoft/DirectX-Graphics-Samples/tree/master/Samples/Desktop/D3D12SmallResources
7654
*/
7655
const UINT64 smallAlignmentToTry = inOutResourceDesc.SampleDesc.Count > 1 ?
7656
D3D12_SMALL_MSAA_RESOURCE_PLACEMENT_ALIGNMENT :
7657
D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT;
7658
inOutResourceDesc.Alignment = smallAlignmentToTry;
7659
const D3D12_RESOURCE_ALLOCATION_INFO smallAllocInfo = GetResourceAllocationInfoNative(inOutResourceDesc);
7660
// Check if alignment requested has been granted.
7661
if (smallAllocInfo.Alignment == smallAlignmentToTry)
7662
{
7663
return smallAllocInfo;
7664
}
7665
inOutResourceDesc.Alignment = 0; // Restore original
7666
}
7667
#endif // #if D3D12MA_USE_SMALL_RESOURCE_PLACEMENT_ALIGNMENT
7668
7669
return GetResourceAllocationInfoNative(inOutResourceDesc);
7670
}
7671
7672
bool AllocatorPimpl::NewAllocationWithinBudget(D3D12_HEAP_TYPE heapType, UINT64 size)
7673
{
7674
Budget budget = {};
7675
GetBudgetForHeapType(budget, heapType);
7676
return budget.UsageBytes + size <= budget.BudgetBytes;
7677
}
7678
7679
void AllocatorPimpl::WriteBudgetToJson(JsonWriter& json, const Budget& budget)
7680
{
7681
json.BeginObject();
7682
{
7683
json.WriteString(L"BudgetBytes");
7684
json.WriteNumber(budget.BudgetBytes);
7685
json.WriteString(L"UsageBytes");
7686
json.WriteNumber(budget.UsageBytes);
7687
}
7688
json.EndObject();
7689
}
7690
7691
#endif // _D3D12MA_ALLOCATOR_PIMPL
7692
#endif // _D3D12MA_ALLOCATOR_PIMPL
7693
7694
#ifndef _D3D12MA_VIRTUAL_BLOCK_PIMPL
7695
class VirtualBlockPimpl
7696
{
7697
public:
7698
const ALLOCATION_CALLBACKS m_AllocationCallbacks;
7699
const UINT64 m_Size;
7700
BlockMetadata* m_Metadata;
7701
7702
VirtualBlockPimpl(const ALLOCATION_CALLBACKS& allocationCallbacks, const VIRTUAL_BLOCK_DESC& desc);
7703
~VirtualBlockPimpl();
7704
};
7705
7706
#ifndef _D3D12MA_VIRTUAL_BLOCK_PIMPL_FUNCTIONS
7707
VirtualBlockPimpl::VirtualBlockPimpl(const ALLOCATION_CALLBACKS& allocationCallbacks, const VIRTUAL_BLOCK_DESC& desc)
7708
: m_AllocationCallbacks(allocationCallbacks), m_Size(desc.Size)
7709
{
7710
switch (desc.Flags & VIRTUAL_BLOCK_FLAG_ALGORITHM_MASK)
7711
{
7712
case VIRTUAL_BLOCK_FLAG_ALGORITHM_LINEAR:
7713
m_Metadata = D3D12MA_NEW(allocationCallbacks, BlockMetadata_Linear)(&m_AllocationCallbacks, true);
7714
break;
7715
default:
7716
D3D12MA_ASSERT(0);
7717
case 0:
7718
m_Metadata = D3D12MA_NEW(allocationCallbacks, BlockMetadata_TLSF)(&m_AllocationCallbacks, true);
7719
break;
7720
}
7721
m_Metadata->Init(m_Size);
7722
}
7723
7724
VirtualBlockPimpl::~VirtualBlockPimpl()
7725
{
7726
D3D12MA_DELETE(m_AllocationCallbacks, m_Metadata);
7727
}
7728
#endif // _D3D12MA_VIRTUAL_BLOCK_PIMPL_FUNCTIONS
7729
#endif // _D3D12MA_VIRTUAL_BLOCK_PIMPL
7730
7731
7732
#ifndef _D3D12MA_MEMORY_BLOCK_FUNCTIONS
7733
MemoryBlock::MemoryBlock(
7734
AllocatorPimpl* allocator,
7735
const D3D12_HEAP_PROPERTIES& heapProps,
7736
D3D12_HEAP_FLAGS heapFlags,
7737
UINT64 size,
7738
UINT id)
7739
: m_Allocator(allocator),
7740
m_HeapProps(heapProps),
7741
m_HeapFlags(heapFlags),
7742
m_Size(size),
7743
m_Id(id) {}
7744
7745
MemoryBlock::~MemoryBlock()
7746
{
7747
if (m_Heap)
7748
{
7749
m_Heap->Release();
7750
m_Allocator->m_Budget.RemoveBlock(
7751
m_Allocator->HeapPropertiesToMemorySegmentGroup(m_HeapProps), m_Size);
7752
}
7753
}
7754
7755
HRESULT MemoryBlock::Init(ID3D12ProtectedResourceSession* pProtectedSession, bool denyMsaaTextures)
7756
{
7757
D3D12MA_ASSERT(m_Heap == NULL && m_Size > 0);
7758
7759
D3D12_HEAP_DESC heapDesc = {};
7760
heapDesc.SizeInBytes = m_Size;
7761
heapDesc.Properties = m_HeapProps;
7762
heapDesc.Alignment = HeapFlagsToAlignment(m_HeapFlags, denyMsaaTextures);
7763
heapDesc.Flags = m_HeapFlags;
7764
7765
HRESULT hr;
7766
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
7767
ID3D12Device4* const device4 = m_Allocator->GetDevice4();
7768
if (device4)
7769
hr = m_Allocator->GetDevice4()->CreateHeap1(&heapDesc, pProtectedSession, D3D12MA_IID_PPV_ARGS(&m_Heap));
7770
else
7771
#endif
7772
{
7773
if (pProtectedSession == NULL)
7774
hr = m_Allocator->GetDevice()->CreateHeap(&heapDesc, D3D12MA_IID_PPV_ARGS(&m_Heap));
7775
else
7776
hr = E_NOINTERFACE;
7777
}
7778
7779
if (SUCCEEDED(hr))
7780
{
7781
m_Allocator->m_Budget.AddBlock(
7782
m_Allocator->HeapPropertiesToMemorySegmentGroup(m_HeapProps), m_Size);
7783
}
7784
return hr;
7785
}
7786
#endif // _D3D12MA_MEMORY_BLOCK_FUNCTIONS
7787
7788
#ifndef _D3D12MA_NORMAL_BLOCK_FUNCTIONS
7789
NormalBlock::NormalBlock(
7790
AllocatorPimpl* allocator,
7791
BlockVector* blockVector,
7792
const D3D12_HEAP_PROPERTIES& heapProps,
7793
D3D12_HEAP_FLAGS heapFlags,
7794
UINT64 size,
7795
UINT id)
7796
: MemoryBlock(allocator, heapProps, heapFlags, size, id),
7797
m_pMetadata(NULL),
7798
m_BlockVector(blockVector) {}
7799
7800
NormalBlock::~NormalBlock()
7801
{
7802
if (m_pMetadata != NULL)
7803
{
7804
// Define macro D3D12MA_DEBUG_LOG to receive the list of the unfreed allocations.
7805
if (!m_pMetadata->IsEmpty())
7806
m_pMetadata->DebugLogAllAllocations();
7807
7808
// THIS IS THE MOST IMPORTANT ASSERT IN THE ENTIRE LIBRARY!
7809
// Hitting it means you have some memory leak - unreleased Allocation objects.
7810
D3D12MA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
7811
7812
D3D12MA_DELETE(m_Allocator->GetAllocs(), m_pMetadata);
7813
}
7814
}
7815
7816
HRESULT NormalBlock::Init(UINT32 algorithm, ID3D12ProtectedResourceSession* pProtectedSession, bool denyMsaaTextures)
7817
{
7818
HRESULT hr = MemoryBlock::Init(pProtectedSession, denyMsaaTextures);
7819
if (FAILED(hr))
7820
{
7821
return hr;
7822
}
7823
7824
switch (algorithm)
7825
{
7826
case POOL_FLAG_ALGORITHM_LINEAR:
7827
m_pMetadata = D3D12MA_NEW(m_Allocator->GetAllocs(), BlockMetadata_Linear)(&m_Allocator->GetAllocs(), false);
7828
break;
7829
default:
7830
D3D12MA_ASSERT(0);
7831
case 0:
7832
m_pMetadata = D3D12MA_NEW(m_Allocator->GetAllocs(), BlockMetadata_TLSF)(&m_Allocator->GetAllocs(), false);
7833
break;
7834
}
7835
m_pMetadata->Init(m_Size);
7836
7837
return hr;
7838
}
7839
7840
bool NormalBlock::Validate() const
7841
{
7842
D3D12MA_VALIDATE(GetHeap() &&
7843
m_pMetadata &&
7844
m_pMetadata->GetSize() != 0 &&
7845
m_pMetadata->GetSize() == GetSize());
7846
return m_pMetadata->Validate();
7847
}
7848
#endif // _D3D12MA_NORMAL_BLOCK_FUNCTIONS
7849
7850
#ifndef _D3D12MA_COMMITTED_ALLOCATION_LIST_FUNCTIONS
7851
void CommittedAllocationList::Init(bool useMutex, D3D12_HEAP_TYPE heapType, PoolPimpl* pool)
7852
{
7853
m_UseMutex = useMutex;
7854
m_HeapType = heapType;
7855
m_Pool = pool;
7856
}
7857
7858
CommittedAllocationList::~CommittedAllocationList()
7859
{
7860
if (!m_AllocationList.IsEmpty())
7861
{
7862
D3D12MA_ASSERT(0 && "Unfreed committed allocations found!");
7863
}
7864
}
7865
7866
UINT CommittedAllocationList::GetMemorySegmentGroup(AllocatorPimpl* allocator) const
7867
{
7868
if (m_Pool)
7869
return allocator->HeapPropertiesToMemorySegmentGroup(m_Pool->GetDesc().HeapProperties);
7870
else
7871
return allocator->StandardHeapTypeToMemorySegmentGroup(m_HeapType);
7872
}
7873
7874
void CommittedAllocationList::AddStatistics(Statistics& inoutStats)
7875
{
7876
MutexLockRead lock(m_Mutex, m_UseMutex);
7877
7878
for (Allocation* alloc = m_AllocationList.Front();
7879
alloc != NULL; alloc = m_AllocationList.GetNext(alloc))
7880
{
7881
const UINT64 size = alloc->GetSize();
7882
inoutStats.BlockCount++;
7883
inoutStats.AllocationCount++;
7884
inoutStats.BlockBytes += size;
7885
inoutStats.AllocationBytes += size;
7886
}
7887
}
7888
7889
void CommittedAllocationList::AddDetailedStatistics(DetailedStatistics& inoutStats)
7890
{
7891
MutexLockRead lock(m_Mutex, m_UseMutex);
7892
7893
for (Allocation* alloc = m_AllocationList.Front();
7894
alloc != NULL; alloc = m_AllocationList.GetNext(alloc))
7895
{
7896
const UINT64 size = alloc->GetSize();
7897
inoutStats.Stats.BlockCount++;
7898
inoutStats.Stats.BlockBytes += size;
7899
AddDetailedStatisticsAllocation(inoutStats, size);
7900
}
7901
}
7902
7903
void CommittedAllocationList::BuildStatsString(JsonWriter& json)
7904
{
7905
MutexLockRead lock(m_Mutex, m_UseMutex);
7906
7907
for (Allocation* alloc = m_AllocationList.Front();
7908
alloc != NULL; alloc = m_AllocationList.GetNext(alloc))
7909
{
7910
json.BeginObject(true);
7911
json.AddAllocationToObject(*alloc);
7912
json.EndObject();
7913
}
7914
}
7915
7916
void CommittedAllocationList::Register(Allocation* alloc)
7917
{
7918
MutexLockWrite lock(m_Mutex, m_UseMutex);
7919
m_AllocationList.PushBack(alloc);
7920
}
7921
7922
void CommittedAllocationList::Unregister(Allocation* alloc)
7923
{
7924
MutexLockWrite lock(m_Mutex, m_UseMutex);
7925
m_AllocationList.Remove(alloc);
7926
}
7927
#endif // _D3D12MA_COMMITTED_ALLOCATION_LIST_FUNCTIONS
7928
7929
#ifndef _D3D12MA_BLOCK_VECTOR_FUNCTIONS
7930
BlockVector::BlockVector(
7931
AllocatorPimpl* hAllocator,
7932
const D3D12_HEAP_PROPERTIES& heapProps,
7933
D3D12_HEAP_FLAGS heapFlags,
7934
UINT64 preferredBlockSize,
7935
size_t minBlockCount,
7936
size_t maxBlockCount,
7937
bool explicitBlockSize,
7938
UINT64 minAllocationAlignment,
7939
UINT32 algorithm,
7940
bool denyMsaaTextures,
7941
ID3D12ProtectedResourceSession* pProtectedSession,
7942
D3D12_RESIDENCY_PRIORITY residencyPriority)
7943
: m_hAllocator(hAllocator),
7944
m_HeapProps(heapProps),
7945
m_HeapFlags(heapFlags),
7946
m_PreferredBlockSize(preferredBlockSize),
7947
m_MinBlockCount(minBlockCount),
7948
m_MaxBlockCount(maxBlockCount),
7949
m_ExplicitBlockSize(explicitBlockSize),
7950
m_MinAllocationAlignment(minAllocationAlignment),
7951
m_Algorithm(algorithm),
7952
m_DenyMsaaTextures(denyMsaaTextures),
7953
m_ProtectedSession(pProtectedSession),
7954
m_ResidencyPriority(residencyPriority),
7955
m_HasEmptyBlock(false),
7956
m_Blocks(hAllocator->GetAllocs()),
7957
m_NextBlockId(0) {}
7958
7959
BlockVector::~BlockVector()
7960
{
7961
for (size_t i = m_Blocks.size(); i--; )
7962
{
7963
D3D12MA_DELETE(m_hAllocator->GetAllocs(), m_Blocks[i]);
7964
}
7965
}
7966
7967
HRESULT BlockVector::CreateMinBlocks()
7968
{
7969
for (size_t i = 0; i < m_MinBlockCount; ++i)
7970
{
7971
HRESULT hr = CreateBlock(m_PreferredBlockSize, NULL);
7972
if (FAILED(hr))
7973
{
7974
return hr;
7975
}
7976
}
7977
return S_OK;
7978
}
7979
7980
bool BlockVector::IsEmpty()
7981
{
7982
MutexLockRead lock(m_Mutex, m_hAllocator->UseMutex());
7983
return m_Blocks.empty();
7984
}
7985
7986
HRESULT BlockVector::Allocate(
7987
UINT64 size,
7988
UINT64 alignment,
7989
const ALLOCATION_DESC& allocDesc,
7990
size_t allocationCount,
7991
Allocation** pAllocations)
7992
{
7993
size_t allocIndex;
7994
HRESULT hr = S_OK;
7995
7996
{
7997
MutexLockWrite lock(m_Mutex, m_hAllocator->UseMutex());
7998
for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7999
{
8000
hr = AllocatePage(
8001
size,
8002
alignment,
8003
allocDesc,
8004
pAllocations + allocIndex);
8005
if (FAILED(hr))
8006
{
8007
break;
8008
}
8009
}
8010
}
8011
8012
if (FAILED(hr))
8013
{
8014
// Free all already created allocations.
8015
while (allocIndex--)
8016
{
8017
Free(pAllocations[allocIndex]);
8018
}
8019
ZeroMemory(pAllocations, sizeof(Allocation*) * allocationCount);
8020
}
8021
8022
return hr;
8023
}
8024
8025
void BlockVector::Free(Allocation* hAllocation)
8026
{
8027
NormalBlock* pBlockToDelete = NULL;
8028
8029
bool budgetExceeded = false;
8030
if (IsHeapTypeStandard(m_HeapProps.Type))
8031
{
8032
Budget budget = {};
8033
m_hAllocator->GetBudgetForHeapType(budget, m_HeapProps.Type);
8034
budgetExceeded = budget.UsageBytes >= budget.BudgetBytes;
8035
}
8036
8037
// Scope for lock.
8038
{
8039
MutexLockWrite lock(m_Mutex, m_hAllocator->UseMutex());
8040
8041
NormalBlock* pBlock = hAllocation->m_Placed.block;
8042
8043
pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle());
8044
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8045
8046
const size_t blockCount = m_Blocks.size();
8047
// pBlock became empty after this deallocation.
8048
if (pBlock->m_pMetadata->IsEmpty())
8049
{
8050
// Already has empty Allocation. We don't want to have two, so delete this one.
8051
if ((m_HasEmptyBlock || budgetExceeded) &&
8052
blockCount > m_MinBlockCount)
8053
{
8054
pBlockToDelete = pBlock;
8055
Remove(pBlock);
8056
}
8057
// We now have first empty block.
8058
else
8059
{
8060
m_HasEmptyBlock = true;
8061
}
8062
}
8063
// pBlock didn't become empty, but we have another empty block - find and free that one.
8064
// (This is optional, heuristics.)
8065
else if (m_HasEmptyBlock && blockCount > m_MinBlockCount)
8066
{
8067
NormalBlock* pLastBlock = m_Blocks.back();
8068
if (pLastBlock->m_pMetadata->IsEmpty())
8069
{
8070
pBlockToDelete = pLastBlock;
8071
m_Blocks.pop_back();
8072
m_HasEmptyBlock = false;
8073
}
8074
}
8075
8076
IncrementallySortBlocks();
8077
}
8078
8079
// Destruction of a free Allocation. Deferred until this point, outside of mutex
8080
// lock, for performance reason.
8081
if (pBlockToDelete != NULL)
8082
{
8083
D3D12MA_DELETE(m_hAllocator->GetAllocs(), pBlockToDelete);
8084
}
8085
}
8086
8087
HRESULT BlockVector::CreateResource(
8088
UINT64 size,
8089
UINT64 alignment,
8090
const ALLOCATION_DESC& allocDesc,
8091
const CREATE_RESOURCE_PARAMS& createParams,
8092
Allocation** ppAllocation,
8093
REFIID riidResource,
8094
void** ppvResource)
8095
{
8096
HRESULT hr = Allocate(size, alignment, allocDesc, 1, ppAllocation);
8097
if (SUCCEEDED(hr))
8098
{
8099
ID3D12Resource* res = NULL;
8100
hr = m_hAllocator->CreatePlacedResourceWrap(
8101
(*ppAllocation)->m_Placed.block->GetHeap(),
8102
(*ppAllocation)->GetOffset(),
8103
createParams,
8104
D3D12MA_IID_PPV_ARGS(&res));
8105
if (SUCCEEDED(hr))
8106
{
8107
if (ppvResource != NULL)
8108
{
8109
hr = res->QueryInterface(riidResource, ppvResource);
8110
}
8111
if (SUCCEEDED(hr))
8112
{
8113
(*ppAllocation)->SetResourcePointer(res, createParams.GetBaseResourceDesc());
8114
}
8115
else
8116
{
8117
res->Release();
8118
SAFE_RELEASE(*ppAllocation);
8119
}
8120
}
8121
else
8122
{
8123
SAFE_RELEASE(*ppAllocation);
8124
}
8125
}
8126
return hr;
8127
}
8128
8129
void BlockVector::AddStatistics(Statistics& inoutStats)
8130
{
8131
MutexLockRead lock(m_Mutex, m_hAllocator->UseMutex());
8132
8133
for (size_t i = 0; i < m_Blocks.size(); ++i)
8134
{
8135
const NormalBlock* const pBlock = m_Blocks[i];
8136
D3D12MA_ASSERT(pBlock);
8137
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8138
pBlock->m_pMetadata->AddStatistics(inoutStats);
8139
}
8140
}
8141
8142
void BlockVector::AddDetailedStatistics(DetailedStatistics& inoutStats)
8143
{
8144
MutexLockRead lock(m_Mutex, m_hAllocator->UseMutex());
8145
8146
for (size_t i = 0; i < m_Blocks.size(); ++i)
8147
{
8148
const NormalBlock* const pBlock = m_Blocks[i];
8149
D3D12MA_ASSERT(pBlock);
8150
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8151
pBlock->m_pMetadata->AddDetailedStatistics(inoutStats);
8152
}
8153
}
8154
8155
void BlockVector::WriteBlockInfoToJson(JsonWriter& json)
8156
{
8157
MutexLockRead lock(m_Mutex, m_hAllocator->UseMutex());
8158
8159
json.BeginObject();
8160
8161
for (size_t i = 0, count = m_Blocks.size(); i < count; ++i)
8162
{
8163
const NormalBlock* const pBlock = m_Blocks[i];
8164
D3D12MA_ASSERT(pBlock);
8165
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8166
json.BeginString();
8167
json.ContinueString(pBlock->GetId());
8168
json.EndString();
8169
8170
json.BeginObject();
8171
pBlock->m_pMetadata->WriteAllocationInfoToJson(json);
8172
json.EndObject();
8173
}
8174
8175
json.EndObject();
8176
}
8177
8178
UINT64 BlockVector::CalcSumBlockSize() const
8179
{
8180
UINT64 result = 0;
8181
for (size_t i = m_Blocks.size(); i--; )
8182
{
8183
result += m_Blocks[i]->m_pMetadata->GetSize();
8184
}
8185
return result;
8186
}
8187
8188
UINT64 BlockVector::CalcMaxBlockSize() const
8189
{
8190
UINT64 result = 0;
8191
for (size_t i = m_Blocks.size(); i--; )
8192
{
8193
result = D3D12MA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
8194
if (result >= m_PreferredBlockSize)
8195
{
8196
break;
8197
}
8198
}
8199
return result;
8200
}
8201
8202
void BlockVector::Remove(NormalBlock* pBlock)
8203
{
8204
for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
8205
{
8206
if (m_Blocks[blockIndex] == pBlock)
8207
{
8208
m_Blocks.remove(blockIndex);
8209
return;
8210
}
8211
}
8212
D3D12MA_ASSERT(0);
8213
}
8214
8215
void BlockVector::IncrementallySortBlocks()
8216
{
8217
if (!m_IncrementalSort)
8218
return;
8219
// Bubble sort only until first swap.
8220
for (size_t i = 1; i < m_Blocks.size(); ++i)
8221
{
8222
if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
8223
{
8224
D3D12MA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
8225
return;
8226
}
8227
}
8228
}
8229
8230
void BlockVector::SortByFreeSize()
8231
{
8232
D3D12MA_SORT(m_Blocks.begin(), m_Blocks.end(),
8233
[](auto* b1, auto* b2)
8234
{
8235
return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize();
8236
});
8237
}
8238
8239
HRESULT BlockVector::AllocatePage(
8240
UINT64 size,
8241
UINT64 alignment,
8242
const ALLOCATION_DESC& allocDesc,
8243
Allocation** pAllocation)
8244
{
8245
// Early reject: requested allocation size is larger that maximum block size for this block vector.
8246
if (size + D3D12MA_DEBUG_MARGIN > m_PreferredBlockSize)
8247
{
8248
return E_OUTOFMEMORY;
8249
}
8250
8251
UINT64 freeMemory = UINT64_MAX;
8252
if (IsHeapTypeStandard(m_HeapProps.Type))
8253
{
8254
Budget budget = {};
8255
m_hAllocator->GetBudgetForHeapType(budget, m_HeapProps.Type);
8256
freeMemory = (budget.UsageBytes < budget.BudgetBytes) ? (budget.BudgetBytes - budget.UsageBytes) : 0;
8257
}
8258
8259
const bool canCreateNewBlock =
8260
((allocDesc.Flags & ALLOCATION_FLAG_NEVER_ALLOCATE) == 0) &&
8261
(m_Blocks.size() < m_MaxBlockCount) &&
8262
// Even if we don't have to stay within budget with this allocation, when the
8263
// budget would be exceeded, we don't want to allocate new blocks, but always
8264
// create resources as committed.
8265
freeMemory >= size;
8266
8267
// 1. Search existing allocations
8268
{
8269
// Forward order in m_Blocks - prefer blocks with smallest amount of free space.
8270
for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
8271
{
8272
NormalBlock* const pCurrBlock = m_Blocks[blockIndex];
8273
D3D12MA_ASSERT(pCurrBlock);
8274
HRESULT hr = AllocateFromBlock(
8275
pCurrBlock,
8276
size,
8277
alignment,
8278
allocDesc.Flags,
8279
allocDesc.pPrivateData,
8280
allocDesc.Flags & ALLOCATION_FLAG_STRATEGY_MASK,
8281
pAllocation);
8282
if (SUCCEEDED(hr))
8283
{
8284
return hr;
8285
}
8286
}
8287
}
8288
8289
// 2. Try to create new block.
8290
if (canCreateNewBlock)
8291
{
8292
// Calculate optimal size for new block.
8293
UINT64 newBlockSize = m_PreferredBlockSize;
8294
UINT newBlockSizeShift = 0;
8295
8296
if (!m_ExplicitBlockSize)
8297
{
8298
// Allocate 1/8, 1/4, 1/2 as first blocks.
8299
const UINT64 maxExistingBlockSize = CalcMaxBlockSize();
8300
for (UINT i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
8301
{
8302
const UINT64 smallerNewBlockSize = newBlockSize / 2;
8303
if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
8304
{
8305
newBlockSize = smallerNewBlockSize;
8306
++newBlockSizeShift;
8307
}
8308
else
8309
{
8310
break;
8311
}
8312
}
8313
}
8314
8315
size_t newBlockIndex = 0;
8316
HRESULT hr = newBlockSize <= freeMemory ?
8317
CreateBlock(newBlockSize, &newBlockIndex) : E_OUTOFMEMORY;
8318
// Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
8319
if (!m_ExplicitBlockSize)
8320
{
8321
while (FAILED(hr) && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
8322
{
8323
const UINT64 smallerNewBlockSize = newBlockSize / 2;
8324
if (smallerNewBlockSize >= size)
8325
{
8326
newBlockSize = smallerNewBlockSize;
8327
++newBlockSizeShift;
8328
hr = newBlockSize <= freeMemory ?
8329
CreateBlock(newBlockSize, &newBlockIndex) : E_OUTOFMEMORY;
8330
}
8331
else
8332
{
8333
break;
8334
}
8335
}
8336
}
8337
8338
if (SUCCEEDED(hr))
8339
{
8340
NormalBlock* const pBlock = m_Blocks[newBlockIndex];
8341
D3D12MA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
8342
8343
hr = AllocateFromBlock(
8344
pBlock,
8345
size,
8346
alignment,
8347
allocDesc.Flags,
8348
allocDesc.pPrivateData,
8349
allocDesc.Flags & ALLOCATION_FLAG_STRATEGY_MASK,
8350
pAllocation);
8351
if (SUCCEEDED(hr))
8352
{
8353
return hr;
8354
}
8355
else
8356
{
8357
// Allocation from new block failed, possibly due to D3D12MA_DEBUG_MARGIN or alignment.
8358
return E_OUTOFMEMORY;
8359
}
8360
}
8361
}
8362
8363
return E_OUTOFMEMORY;
8364
}
8365
8366
HRESULT BlockVector::AllocateFromBlock(
8367
NormalBlock* pBlock,
8368
UINT64 size,
8369
UINT64 alignment,
8370
ALLOCATION_FLAGS allocFlags,
8371
void* pPrivateData,
8372
UINT32 strategy,
8373
Allocation** pAllocation)
8374
{
8375
alignment = D3D12MA_MAX(alignment, m_MinAllocationAlignment);
8376
8377
AllocationRequest currRequest = {};
8378
if (pBlock->m_pMetadata->CreateAllocationRequest(
8379
size,
8380
alignment,
8381
allocFlags & ALLOCATION_FLAG_UPPER_ADDRESS,
8382
strategy,
8383
&currRequest))
8384
{
8385
return CommitAllocationRequest(currRequest, pBlock, size, alignment, pPrivateData, pAllocation);
8386
}
8387
return E_OUTOFMEMORY;
8388
}
8389
8390
HRESULT BlockVector::CommitAllocationRequest(
8391
AllocationRequest& allocRequest,
8392
NormalBlock* pBlock,
8393
UINT64 size,
8394
UINT64 alignment,
8395
void* pPrivateData,
8396
Allocation** pAllocation)
8397
{
8398
// We no longer have an empty Allocation.
8399
if (pBlock->m_pMetadata->IsEmpty())
8400
m_HasEmptyBlock = false;
8401
8402
*pAllocation = m_hAllocator->GetAllocationObjectAllocator().Allocate(m_hAllocator, size, alignment);
8403
pBlock->m_pMetadata->Alloc(allocRequest, size, *pAllocation);
8404
8405
(*pAllocation)->InitPlaced(allocRequest.allocHandle, pBlock);
8406
(*pAllocation)->SetPrivateData(pPrivateData);
8407
8408
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8409
m_hAllocator->m_Budget.AddAllocation(m_hAllocator->HeapPropertiesToMemorySegmentGroup(m_HeapProps), size);
8410
8411
return S_OK;
8412
}
8413
8414
HRESULT BlockVector::CreateBlock(
8415
UINT64 blockSize,
8416
size_t* pNewBlockIndex)
8417
{
8418
NormalBlock* const pBlock = D3D12MA_NEW(m_hAllocator->GetAllocs(), NormalBlock)(
8419
m_hAllocator,
8420
this,
8421
m_HeapProps,
8422
m_HeapFlags,
8423
blockSize,
8424
m_NextBlockId++);
8425
HRESULT hr = pBlock->Init(m_Algorithm, m_ProtectedSession, m_DenyMsaaTextures);
8426
if (FAILED(hr))
8427
{
8428
D3D12MA_DELETE(m_hAllocator->GetAllocs(), pBlock);
8429
return hr;
8430
}
8431
8432
m_hAllocator->SetResidencyPriority(pBlock->GetHeap(), m_ResidencyPriority);
8433
8434
m_Blocks.push_back(pBlock);
8435
if (pNewBlockIndex != NULL)
8436
{
8437
*pNewBlockIndex = m_Blocks.size() - 1;
8438
}
8439
8440
return hr;
8441
}
8442
#endif // _D3D12MA_BLOCK_VECTOR_FUNCTIONS
8443
8444
#ifndef _D3D12MA_DEFRAGMENTATION_CONTEXT_PIMPL_FUNCTIONS
8445
DefragmentationContextPimpl::DefragmentationContextPimpl(
8446
AllocatorPimpl* hAllocator,
8447
const DEFRAGMENTATION_DESC& desc,
8448
BlockVector* poolVector)
8449
: m_MaxPassBytes(desc.MaxBytesPerPass == 0 ? UINT64_MAX : desc.MaxBytesPerPass),
8450
m_MaxPassAllocations(desc.MaxAllocationsPerPass == 0 ? UINT32_MAX : desc.MaxAllocationsPerPass),
8451
m_Moves(hAllocator->GetAllocs())
8452
{
8453
m_Algorithm = desc.Flags & DEFRAGMENTATION_FLAG_ALGORITHM_MASK;
8454
8455
if (poolVector != NULL)
8456
{
8457
m_BlockVectorCount = 1;
8458
m_PoolBlockVector = poolVector;
8459
m_pBlockVectors = &m_PoolBlockVector;
8460
m_PoolBlockVector->SetIncrementalSort(false);
8461
m_PoolBlockVector->SortByFreeSize();
8462
}
8463
else
8464
{
8465
m_BlockVectorCount = hAllocator->GetDefaultPoolCount();
8466
m_PoolBlockVector = NULL;
8467
m_pBlockVectors = hAllocator->GetDefaultPools();
8468
for (UINT32 i = 0; i < m_BlockVectorCount; ++i)
8469
{
8470
BlockVector* vector = m_pBlockVectors[i];
8471
if (vector != NULL)
8472
{
8473
vector->SetIncrementalSort(false);
8474
vector->SortByFreeSize();
8475
}
8476
}
8477
}
8478
8479
switch (m_Algorithm)
8480
{
8481
case 0: // Default algorithm
8482
m_Algorithm = DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED;
8483
case DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED:
8484
{
8485
m_AlgorithmState = D3D12MA_NEW_ARRAY(hAllocator->GetAllocs(), StateBalanced, m_BlockVectorCount);
8486
break;
8487
}
8488
}
8489
}
8490
8491
DefragmentationContextPimpl::~DefragmentationContextPimpl()
8492
{
8493
if (m_PoolBlockVector != NULL)
8494
m_PoolBlockVector->SetIncrementalSort(true);
8495
else
8496
{
8497
for (UINT32 i = 0; i < m_BlockVectorCount; ++i)
8498
{
8499
BlockVector* vector = m_pBlockVectors[i];
8500
if (vector != NULL)
8501
vector->SetIncrementalSort(true);
8502
}
8503
}
8504
8505
if (m_AlgorithmState)
8506
{
8507
switch (m_Algorithm)
8508
{
8509
case DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED:
8510
D3D12MA_DELETE_ARRAY(m_Moves.GetAllocs(), reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount);
8511
break;
8512
default:
8513
D3D12MA_ASSERT(0);
8514
}
8515
}
8516
}
8517
8518
HRESULT DefragmentationContextPimpl::DefragmentPassBegin(DEFRAGMENTATION_PASS_MOVE_INFO& moveInfo)
8519
{
8520
if (m_PoolBlockVector != NULL)
8521
{
8522
MutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->m_hAllocator->UseMutex());
8523
8524
if (m_PoolBlockVector->GetBlockCount() > 1)
8525
ComputeDefragmentation(*m_PoolBlockVector, 0);
8526
else if (m_PoolBlockVector->GetBlockCount() == 1)
8527
ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0));
8528
8529
// Setup index into block vector
8530
for (size_t i = 0; i < m_Moves.size(); ++i)
8531
m_Moves[i].pDstTmpAllocation->SetPrivateData(0);
8532
}
8533
else
8534
{
8535
for (UINT32 i = 0; i < m_BlockVectorCount; ++i)
8536
{
8537
if (m_pBlockVectors[i] != NULL)
8538
{
8539
MutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->m_hAllocator->UseMutex());
8540
8541
bool end = false;
8542
size_t movesOffset = m_Moves.size();
8543
if (m_pBlockVectors[i]->GetBlockCount() > 1)
8544
{
8545
end = ComputeDefragmentation(*m_pBlockVectors[i], i);
8546
}
8547
else if (m_pBlockVectors[i]->GetBlockCount() == 1)
8548
{
8549
end = ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0));
8550
}
8551
8552
// Setup index into block vector
8553
for (; movesOffset < m_Moves.size(); ++movesOffset)
8554
m_Moves[movesOffset].pDstTmpAllocation->SetPrivateData(reinterpret_cast<void*>(static_cast<uintptr_t>(i)));
8555
8556
if (end)
8557
break;
8558
}
8559
}
8560
}
8561
8562
moveInfo.MoveCount = static_cast<UINT32>(m_Moves.size());
8563
if (moveInfo.MoveCount > 0)
8564
{
8565
moveInfo.pMoves = m_Moves.data();
8566
return S_FALSE;
8567
}
8568
8569
moveInfo.pMoves = NULL;
8570
return S_OK;
8571
}
8572
8573
HRESULT DefragmentationContextPimpl::DefragmentPassEnd(DEFRAGMENTATION_PASS_MOVE_INFO& moveInfo)
8574
{
8575
D3D12MA_ASSERT(moveInfo.MoveCount > 0 ? moveInfo.pMoves != NULL : true);
8576
8577
HRESULT result = S_OK;
8578
Vector<FragmentedBlock> immovableBlocks(m_Moves.GetAllocs());
8579
8580
for (uint32_t i = 0; i < moveInfo.MoveCount; ++i)
8581
{
8582
DEFRAGMENTATION_MOVE& move = moveInfo.pMoves[i];
8583
size_t prevCount = 0, currentCount = 0;
8584
UINT64 freedBlockSize = 0;
8585
8586
UINT32 vectorIndex;
8587
BlockVector* vector;
8588
if (m_PoolBlockVector != NULL)
8589
{
8590
vectorIndex = 0;
8591
vector = m_PoolBlockVector;
8592
}
8593
else
8594
{
8595
vectorIndex = static_cast<UINT32>(reinterpret_cast<uintptr_t>(move.pDstTmpAllocation->GetPrivateData()));
8596
vector = m_pBlockVectors[vectorIndex];
8597
D3D12MA_ASSERT(vector != NULL);
8598
}
8599
8600
switch (move.Operation)
8601
{
8602
case DEFRAGMENTATION_MOVE_OPERATION_COPY:
8603
{
8604
move.pSrcAllocation->SwapBlockAllocation(move.pDstTmpAllocation);
8605
8606
// Scope for locks, Free have it's own lock
8607
{
8608
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
8609
prevCount = vector->GetBlockCount();
8610
freedBlockSize = move.pDstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
8611
}
8612
move.pDstTmpAllocation->Release();
8613
{
8614
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
8615
currentCount = vector->GetBlockCount();
8616
}
8617
8618
result = S_FALSE;
8619
break;
8620
}
8621
case DEFRAGMENTATION_MOVE_OPERATION_IGNORE:
8622
{
8623
m_PassStats.BytesMoved -= move.pSrcAllocation->GetSize();
8624
--m_PassStats.AllocationsMoved;
8625
move.pDstTmpAllocation->Release();
8626
8627
NormalBlock* newBlock = move.pSrcAllocation->GetBlock();
8628
bool notPresent = true;
8629
for (const FragmentedBlock& block : immovableBlocks)
8630
{
8631
if (block.block == newBlock)
8632
{
8633
notPresent = false;
8634
break;
8635
}
8636
}
8637
if (notPresent)
8638
immovableBlocks.push_back({ vectorIndex, newBlock });
8639
break;
8640
}
8641
case DEFRAGMENTATION_MOVE_OPERATION_DESTROY:
8642
{
8643
m_PassStats.BytesMoved -= move.pSrcAllocation->GetSize();
8644
--m_PassStats.AllocationsMoved;
8645
// Scope for locks, Free have it's own lock
8646
{
8647
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
8648
prevCount = vector->GetBlockCount();
8649
freedBlockSize = move.pSrcAllocation->GetBlock()->m_pMetadata->GetSize();
8650
}
8651
move.pSrcAllocation->Release();
8652
{
8653
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
8654
currentCount = vector->GetBlockCount();
8655
}
8656
freedBlockSize *= prevCount - currentCount;
8657
8658
UINT64 dstBlockSize;
8659
{
8660
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
8661
dstBlockSize = move.pDstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
8662
}
8663
move.pDstTmpAllocation->Release();
8664
{
8665
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
8666
freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount());
8667
currentCount = vector->GetBlockCount();
8668
}
8669
8670
result = S_FALSE;
8671
break;
8672
}
8673
default:
8674
D3D12MA_ASSERT(0);
8675
}
8676
8677
if (prevCount > currentCount)
8678
{
8679
size_t freedBlocks = prevCount - currentCount;
8680
m_PassStats.HeapsFreed += static_cast<UINT32>(freedBlocks);
8681
m_PassStats.BytesFreed += freedBlockSize;
8682
}
8683
}
8684
moveInfo.MoveCount = 0;
8685
moveInfo.pMoves = NULL;
8686
m_Moves.clear();
8687
8688
// Update stats
8689
m_GlobalStats.AllocationsMoved += m_PassStats.AllocationsMoved;
8690
m_GlobalStats.BytesFreed += m_PassStats.BytesFreed;
8691
m_GlobalStats.BytesMoved += m_PassStats.BytesMoved;
8692
m_GlobalStats.HeapsFreed += m_PassStats.HeapsFreed;
8693
m_PassStats = { 0 };
8694
8695
// Move blocks with immovable allocations according to algorithm
8696
if (immovableBlocks.size() > 0)
8697
{
8698
// Move to the begining
8699
for (const FragmentedBlock& block : immovableBlocks)
8700
{
8701
BlockVector* vector = m_pBlockVectors[block.data];
8702
MutexLockWrite lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
8703
8704
for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i)
8705
{
8706
if (vector->GetBlock(i) == block.block)
8707
{
8708
D3D12MA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]);
8709
break;
8710
}
8711
}
8712
}
8713
}
8714
return result;
8715
}
8716
8717
bool DefragmentationContextPimpl::ComputeDefragmentation(BlockVector& vector, size_t index)
8718
{
8719
switch (m_Algorithm)
8720
{
8721
case DEFRAGMENTATION_FLAG_ALGORITHM_FAST:
8722
return ComputeDefragmentation_Fast(vector);
8723
default:
8724
D3D12MA_ASSERT(0);
8725
case DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED:
8726
return ComputeDefragmentation_Balanced(vector, index, true);
8727
case DEFRAGMENTATION_FLAG_ALGORITHM_FULL:
8728
return ComputeDefragmentation_Full(vector);
8729
}
8730
}
8731
8732
DefragmentationContextPimpl::MoveAllocationData DefragmentationContextPimpl::GetMoveData(
8733
AllocHandle handle, BlockMetadata* metadata)
8734
{
8735
MoveAllocationData moveData;
8736
moveData.move.pSrcAllocation = (Allocation*)metadata->GetAllocationPrivateData(handle);
8737
moveData.size = moveData.move.pSrcAllocation->GetSize();
8738
moveData.alignment = moveData.move.pSrcAllocation->GetAlignment();
8739
moveData.flags = ALLOCATION_FLAG_NONE;
8740
8741
return moveData;
8742
}
8743
8744
DefragmentationContextPimpl::CounterStatus DefragmentationContextPimpl::CheckCounters(UINT64 bytes)
8745
{
8746
// Ignore allocation if will exceed max size for copy
8747
if (m_PassStats.BytesMoved + bytes > m_MaxPassBytes)
8748
{
8749
if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE)
8750
return CounterStatus::Ignore;
8751
else
8752
return CounterStatus::End;
8753
}
8754
return CounterStatus::Pass;
8755
}
8756
8757
bool DefragmentationContextPimpl::IncrementCounters(UINT64 bytes)
8758
{
8759
m_PassStats.BytesMoved += bytes;
8760
// Early return when max found
8761
if (++m_PassStats.AllocationsMoved >= m_MaxPassAllocations || m_PassStats.BytesMoved >= m_MaxPassBytes)
8762
{
8763
D3D12MA_ASSERT((m_PassStats.AllocationsMoved == m_MaxPassAllocations ||
8764
m_PassStats.BytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!");
8765
return true;
8766
}
8767
return false;
8768
}
8769
8770
bool DefragmentationContextPimpl::ReallocWithinBlock(BlockVector& vector, NormalBlock* block)
8771
{
8772
BlockMetadata* metadata = block->m_pMetadata;
8773
8774
for (AllocHandle handle = metadata->GetAllocationListBegin();
8775
handle != (AllocHandle)0;
8776
handle = metadata->GetNextAllocation(handle))
8777
{
8778
MoveAllocationData moveData = GetMoveData(handle, metadata);
8779
// Ignore newly created allocations by defragmentation algorithm
8780
if (moveData.move.pSrcAllocation->GetPrivateData() == this)
8781
continue;
8782
switch (CheckCounters(moveData.move.pSrcAllocation->GetSize()))
8783
{
8784
case CounterStatus::Ignore:
8785
continue;
8786
case CounterStatus::End:
8787
return true;
8788
default:
8789
D3D12MA_ASSERT(0);
8790
case CounterStatus::Pass:
8791
break;
8792
}
8793
8794
UINT64 offset = moveData.move.pSrcAllocation->GetOffset();
8795
if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
8796
{
8797
AllocationRequest request = {};
8798
if (metadata->CreateAllocationRequest(
8799
moveData.size,
8800
moveData.alignment,
8801
false,
8802
ALLOCATION_FLAG_STRATEGY_MIN_OFFSET,
8803
&request))
8804
{
8805
if (metadata->GetAllocationOffset(request.allocHandle) < offset)
8806
{
8807
if (SUCCEEDED(vector.CommitAllocationRequest(
8808
request,
8809
block,
8810
moveData.size,
8811
moveData.alignment,
8812
this,
8813
&moveData.move.pDstTmpAllocation)))
8814
{
8815
m_Moves.push_back(moveData.move);
8816
if (IncrementCounters(moveData.size))
8817
return true;
8818
}
8819
}
8820
}
8821
}
8822
}
8823
return false;
8824
}
8825
8826
bool DefragmentationContextPimpl::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, BlockVector& vector)
8827
{
8828
for (; start < end; ++start)
8829
{
8830
NormalBlock* dstBlock = vector.GetBlock(start);
8831
if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size)
8832
{
8833
if (SUCCEEDED(vector.AllocateFromBlock(dstBlock,
8834
data.size,
8835
data.alignment,
8836
data.flags,
8837
this,
8838
0,
8839
&data.move.pDstTmpAllocation)))
8840
{
8841
m_Moves.push_back(data.move);
8842
if (IncrementCounters(data.size))
8843
return true;
8844
break;
8845
}
8846
}
8847
}
8848
return false;
8849
}
8850
8851
bool DefragmentationContextPimpl::ComputeDefragmentation_Fast(BlockVector& vector)
8852
{
8853
// Move only between blocks
8854
8855
// Go through allocations in last blocks and try to fit them inside first ones
8856
for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
8857
{
8858
BlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
8859
8860
for (AllocHandle handle = metadata->GetAllocationListBegin();
8861
handle != (AllocHandle)0;
8862
handle = metadata->GetNextAllocation(handle))
8863
{
8864
MoveAllocationData moveData = GetMoveData(handle, metadata);
8865
// Ignore newly created allocations by defragmentation algorithm
8866
if (moveData.move.pSrcAllocation->GetPrivateData() == this)
8867
continue;
8868
switch (CheckCounters(moveData.move.pSrcAllocation->GetSize()))
8869
{
8870
case CounterStatus::Ignore:
8871
continue;
8872
case CounterStatus::End:
8873
return true;
8874
default:
8875
D3D12MA_ASSERT(0);
8876
case CounterStatus::Pass:
8877
break;
8878
}
8879
8880
// Check all previous blocks for free space
8881
if (AllocInOtherBlock(0, i, moveData, vector))
8882
return true;
8883
}
8884
}
8885
return false;
8886
}
8887
8888
bool DefragmentationContextPimpl::ComputeDefragmentation_Balanced(BlockVector& vector, size_t index, bool update)
8889
{
8890
// Go over every allocation and try to fit it in previous blocks at lowest offsets,
8891
// if not possible: realloc within single block to minimize offset (exclude offset == 0),
8892
// but only if there are noticable gaps between them (some heuristic, ex. average size of allocation in block)
8893
D3D12MA_ASSERT(m_AlgorithmState != NULL);
8894
8895
StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];
8896
if (update && vectorState.avgAllocSize == UINT64_MAX)
8897
UpdateVectorStatistics(vector, vectorState);
8898
8899
const size_t startMoveCount = m_Moves.size();
8900
UINT64 minimalFreeRegion = vectorState.avgFreeSize / 2;
8901
for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
8902
{
8903
NormalBlock* block = vector.GetBlock(i);
8904
BlockMetadata* metadata = block->m_pMetadata;
8905
UINT64 prevFreeRegionSize = 0;
8906
8907
for (AllocHandle handle = metadata->GetAllocationListBegin();
8908
handle != (AllocHandle)0;
8909
handle = metadata->GetNextAllocation(handle))
8910
{
8911
MoveAllocationData moveData = GetMoveData(handle, metadata);
8912
// Ignore newly created allocations by defragmentation algorithm
8913
if (moveData.move.pSrcAllocation->GetPrivateData() == this)
8914
continue;
8915
switch (CheckCounters(moveData.move.pSrcAllocation->GetSize()))
8916
{
8917
case CounterStatus::Ignore:
8918
continue;
8919
case CounterStatus::End:
8920
return true;
8921
default:
8922
D3D12MA_ASSERT(0);
8923
case CounterStatus::Pass:
8924
break;
8925
}
8926
8927
// Check all previous blocks for free space
8928
const size_t prevMoveCount = m_Moves.size();
8929
if (AllocInOtherBlock(0, i, moveData, vector))
8930
return true;
8931
8932
UINT64 nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle);
8933
// If no room found then realloc within block for lower offset
8934
UINT64 offset = moveData.move.pSrcAllocation->GetOffset();
8935
if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
8936
{
8937
// Check if realloc will make sense
8938
if (prevFreeRegionSize >= minimalFreeRegion ||
8939
nextFreeRegionSize >= minimalFreeRegion ||
8940
moveData.size <= vectorState.avgFreeSize ||
8941
moveData.size <= vectorState.avgAllocSize)
8942
{
8943
AllocationRequest request = {};
8944
if (metadata->CreateAllocationRequest(
8945
moveData.size,
8946
moveData.alignment,
8947
false,
8948
ALLOCATION_FLAG_STRATEGY_MIN_OFFSET,
8949
&request))
8950
{
8951
if (metadata->GetAllocationOffset(request.allocHandle) < offset)
8952
{
8953
if (SUCCEEDED(vector.CommitAllocationRequest(
8954
request,
8955
block,
8956
moveData.size,
8957
moveData.alignment,
8958
this,
8959
&moveData.move.pDstTmpAllocation)))
8960
{
8961
m_Moves.push_back(moveData.move);
8962
if (IncrementCounters(moveData.size))
8963
return true;
8964
}
8965
}
8966
}
8967
}
8968
}
8969
prevFreeRegionSize = nextFreeRegionSize;
8970
}
8971
}
8972
8973
// No moves perfomed, update statistics to current vector state
8974
if (startMoveCount == m_Moves.size() && !update)
8975
{
8976
vectorState.avgAllocSize = UINT64_MAX;
8977
return ComputeDefragmentation_Balanced(vector, index, false);
8978
}
8979
return false;
8980
}
8981
8982
bool DefragmentationContextPimpl::ComputeDefragmentation_Full(BlockVector& vector)
8983
{
8984
// Go over every allocation and try to fit it in previous blocks at lowest offsets,
8985
// if not possible: realloc within single block to minimize offset (exclude offset == 0)
8986
8987
for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
8988
{
8989
NormalBlock* block = vector.GetBlock(i);
8990
BlockMetadata* metadata = block->m_pMetadata;
8991
8992
for (AllocHandle handle = metadata->GetAllocationListBegin();
8993
handle != (AllocHandle)0;
8994
handle = metadata->GetNextAllocation(handle))
8995
{
8996
MoveAllocationData moveData = GetMoveData(handle, metadata);
8997
// Ignore newly created allocations by defragmentation algorithm
8998
if (moveData.move.pSrcAllocation->GetPrivateData() == this)
8999
continue;
9000
switch (CheckCounters(moveData.move.pSrcAllocation->GetSize()))
9001
{
9002
case CounterStatus::Ignore:
9003
continue;
9004
case CounterStatus::End:
9005
return true;
9006
default:
9007
D3D12MA_ASSERT(0);
9008
case CounterStatus::Pass:
9009
break;
9010
}
9011
9012
// Check all previous blocks for free space
9013
const size_t prevMoveCount = m_Moves.size();
9014
if (AllocInOtherBlock(0, i, moveData, vector))
9015
return true;
9016
9017
// If no room found then realloc within block for lower offset
9018
UINT64 offset = moveData.move.pSrcAllocation->GetOffset();
9019
if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
9020
{
9021
AllocationRequest request = {};
9022
if (metadata->CreateAllocationRequest(
9023
moveData.size,
9024
moveData.alignment,
9025
false,
9026
ALLOCATION_FLAG_STRATEGY_MIN_OFFSET,
9027
&request))
9028
{
9029
if (metadata->GetAllocationOffset(request.allocHandle) < offset)
9030
{
9031
if (SUCCEEDED(vector.CommitAllocationRequest(
9032
request,
9033
block,
9034
moveData.size,
9035
moveData.alignment,
9036
this,
9037
&moveData.move.pDstTmpAllocation)))
9038
{
9039
m_Moves.push_back(moveData.move);
9040
if (IncrementCounters(moveData.size))
9041
return true;
9042
}
9043
}
9044
}
9045
}
9046
}
9047
}
9048
return false;
9049
}
9050
9051
void DefragmentationContextPimpl::UpdateVectorStatistics(BlockVector& vector, StateBalanced& state)
9052
{
9053
size_t allocCount = 0;
9054
size_t freeCount = 0;
9055
state.avgFreeSize = 0;
9056
state.avgAllocSize = 0;
9057
9058
for (size_t i = 0; i < vector.GetBlockCount(); ++i)
9059
{
9060
BlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
9061
9062
allocCount += metadata->GetAllocationCount();
9063
freeCount += metadata->GetFreeRegionsCount();
9064
state.avgFreeSize += metadata->GetSumFreeSize();
9065
state.avgAllocSize += metadata->GetSize();
9066
}
9067
9068
state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount;
9069
state.avgFreeSize /= freeCount;
9070
}
9071
#endif // _D3D12MA_DEFRAGMENTATION_CONTEXT_PIMPL_FUNCTIONS
9072
9073
#ifndef _D3D12MA_POOL_PIMPL_FUNCTIONS
9074
PoolPimpl::PoolPimpl(AllocatorPimpl* allocator, const POOL_DESC& desc)
9075
: m_Allocator(allocator),
9076
m_Desc(desc),
9077
m_BlockVector(NULL),
9078
m_Name(NULL)
9079
{
9080
const bool explicitBlockSize = desc.BlockSize != 0;
9081
const UINT64 preferredBlockSize = explicitBlockSize ? desc.BlockSize : D3D12MA_DEFAULT_BLOCK_SIZE;
9082
UINT maxBlockCount = desc.MaxBlockCount != 0 ? desc.MaxBlockCount : UINT_MAX;
9083
9084
#ifndef __ID3D12Device4_INTERFACE_DEFINED__
9085
D3D12MA_ASSERT(m_Desc.pProtectedSession == NULL);
9086
#endif
9087
9088
m_BlockVector = D3D12MA_NEW(allocator->GetAllocs(), BlockVector)(
9089
allocator, desc.HeapProperties, desc.HeapFlags,
9090
preferredBlockSize,
9091
desc.MinBlockCount, maxBlockCount,
9092
explicitBlockSize,
9093
D3D12MA_MAX(desc.MinAllocationAlignment, (UINT64)D3D12MA_DEBUG_ALIGNMENT),
9094
(desc.Flags & POOL_FLAG_ALGORITHM_MASK) != 0,
9095
(desc.Flags & POOL_FLAG_MSAA_TEXTURES_ALWAYS_COMMITTED) != 0,
9096
desc.pProtectedSession,
9097
desc.ResidencyPriority);
9098
}
9099
9100
PoolPimpl::~PoolPimpl()
9101
{
9102
D3D12MA_ASSERT(m_PrevPool == NULL && m_NextPool == NULL);
9103
FreeName();
9104
D3D12MA_DELETE(m_Allocator->GetAllocs(), m_BlockVector);
9105
}
9106
9107
HRESULT PoolPimpl::Init()
9108
{
9109
m_CommittedAllocations.Init(m_Allocator->UseMutex(), m_Desc.HeapProperties.Type, this);
9110
return m_BlockVector->CreateMinBlocks();
9111
}
9112
9113
void PoolPimpl::GetStatistics(Statistics& outStats)
9114
{
9115
ClearStatistics(outStats);
9116
m_BlockVector->AddStatistics(outStats);
9117
m_CommittedAllocations.AddStatistics(outStats);
9118
}
9119
9120
void PoolPimpl::CalculateStatistics(DetailedStatistics& outStats)
9121
{
9122
ClearDetailedStatistics(outStats);
9123
AddDetailedStatistics(outStats);
9124
}
9125
9126
void PoolPimpl::AddDetailedStatistics(DetailedStatistics& inoutStats)
9127
{
9128
m_BlockVector->AddDetailedStatistics(inoutStats);
9129
m_CommittedAllocations.AddDetailedStatistics(inoutStats);
9130
}
9131
9132
void PoolPimpl::SetName(LPCWSTR Name)
9133
{
9134
FreeName();
9135
9136
if (Name)
9137
{
9138
const size_t nameCharCount = wcslen(Name) + 1;
9139
m_Name = D3D12MA_NEW_ARRAY(m_Allocator->GetAllocs(), WCHAR, nameCharCount);
9140
memcpy(m_Name, Name, nameCharCount * sizeof(WCHAR));
9141
}
9142
}
9143
9144
void PoolPimpl::FreeName()
9145
{
9146
if (m_Name)
9147
{
9148
const size_t nameCharCount = wcslen(m_Name) + 1;
9149
D3D12MA_DELETE_ARRAY(m_Allocator->GetAllocs(), m_Name, nameCharCount);
9150
m_Name = NULL;
9151
}
9152
}
9153
#endif // _D3D12MA_POOL_PIMPL_FUNCTIONS
9154
9155
9156
#ifndef _D3D12MA_PUBLIC_INTERFACE
9157
HRESULT CreateAllocator(const ALLOCATOR_DESC* pDesc, Allocator** ppAllocator)
9158
{
9159
if (!pDesc || !ppAllocator || !pDesc->pDevice || !pDesc->pAdapter ||
9160
!(pDesc->PreferredBlockSize == 0 || (pDesc->PreferredBlockSize >= 16 && pDesc->PreferredBlockSize < 0x10000000000ull)))
9161
{
9162
D3D12MA_ASSERT(0 && "Invalid arguments passed to CreateAllocator.");
9163
return E_INVALIDARG;
9164
}
9165
9166
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9167
9168
ALLOCATION_CALLBACKS allocationCallbacks;
9169
SetupAllocationCallbacks(allocationCallbacks, pDesc->pAllocationCallbacks);
9170
9171
*ppAllocator = D3D12MA_NEW(allocationCallbacks, Allocator)(allocationCallbacks, *pDesc);
9172
HRESULT hr = (*ppAllocator)->m_Pimpl->Init(*pDesc);
9173
if (FAILED(hr))
9174
{
9175
D3D12MA_DELETE(allocationCallbacks, *ppAllocator);
9176
*ppAllocator = NULL;
9177
}
9178
return hr;
9179
}
9180
9181
HRESULT CreateVirtualBlock(const VIRTUAL_BLOCK_DESC* pDesc, VirtualBlock** ppVirtualBlock)
9182
{
9183
if (!pDesc || !ppVirtualBlock)
9184
{
9185
D3D12MA_ASSERT(0 && "Invalid arguments passed to CreateVirtualBlock.");
9186
return E_INVALIDARG;
9187
}
9188
9189
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9190
9191
ALLOCATION_CALLBACKS allocationCallbacks;
9192
SetupAllocationCallbacks(allocationCallbacks, pDesc->pAllocationCallbacks);
9193
9194
*ppVirtualBlock = D3D12MA_NEW(allocationCallbacks, VirtualBlock)(allocationCallbacks, *pDesc);
9195
return S_OK;
9196
}
9197
9198
#ifndef _D3D12MA_IUNKNOWN_IMPL_FUNCTIONS
9199
HRESULT STDMETHODCALLTYPE IUnknownImpl::QueryInterface(REFIID riid, void** ppvObject)
9200
{
9201
if (ppvObject == NULL)
9202
return E_POINTER;
9203
if (riid == IID_IUnknown)
9204
{
9205
++m_RefCount;
9206
*ppvObject = this;
9207
return S_OK;
9208
}
9209
*ppvObject = NULL;
9210
return E_NOINTERFACE;
9211
}
9212
9213
ULONG STDMETHODCALLTYPE IUnknownImpl::AddRef()
9214
{
9215
return ++m_RefCount;
9216
}
9217
9218
ULONG STDMETHODCALLTYPE IUnknownImpl::Release()
9219
{
9220
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9221
9222
const uint32_t newRefCount = --m_RefCount;
9223
if (newRefCount == 0)
9224
ReleaseThis();
9225
return newRefCount;
9226
}
9227
#endif // _D3D12MA_IUNKNOWN_IMPL_FUNCTIONS
9228
9229
#ifndef _D3D12MA_ALLOCATION_FUNCTIONS
9230
void Allocation::PackedData::SetType(Type type)
9231
{
9232
const UINT u = (UINT)type;
9233
D3D12MA_ASSERT(u < (1u << 2));
9234
m_Type = u;
9235
}
9236
9237
void Allocation::PackedData::SetResourceDimension(D3D12_RESOURCE_DIMENSION resourceDimension)
9238
{
9239
const UINT u = (UINT)resourceDimension;
9240
D3D12MA_ASSERT(u < (1u << 3));
9241
m_ResourceDimension = u;
9242
}
9243
9244
void Allocation::PackedData::SetResourceFlags(D3D12_RESOURCE_FLAGS resourceFlags)
9245
{
9246
const UINT u = (UINT)resourceFlags;
9247
D3D12MA_ASSERT(u < (1u << 24));
9248
m_ResourceFlags = u;
9249
}
9250
9251
void Allocation::PackedData::SetTextureLayout(D3D12_TEXTURE_LAYOUT textureLayout)
9252
{
9253
const UINT u = (UINT)textureLayout;
9254
D3D12MA_ASSERT(u < (1u << 9));
9255
m_TextureLayout = u;
9256
}
9257
9258
UINT64 Allocation::GetOffset() const
9259
{
9260
switch (m_PackedData.GetType())
9261
{
9262
case TYPE_COMMITTED:
9263
case TYPE_HEAP:
9264
return 0;
9265
case TYPE_PLACED:
9266
return m_Placed.block->m_pMetadata->GetAllocationOffset(m_Placed.allocHandle);
9267
default:
9268
D3D12MA_ASSERT(0);
9269
return 0;
9270
}
9271
}
9272
9273
void Allocation::SetResource(ID3D12Resource* pResource)
9274
{
9275
if (pResource != m_Resource)
9276
{
9277
if (m_Resource)
9278
m_Resource->Release();
9279
m_Resource = pResource;
9280
if (m_Resource)
9281
m_Resource->AddRef();
9282
}
9283
}
9284
9285
ID3D12Heap* Allocation::GetHeap() const
9286
{
9287
switch (m_PackedData.GetType())
9288
{
9289
case TYPE_COMMITTED:
9290
return NULL;
9291
case TYPE_PLACED:
9292
return m_Placed.block->GetHeap();
9293
case TYPE_HEAP:
9294
return m_Heap.heap;
9295
default:
9296
D3D12MA_ASSERT(0);
9297
return 0;
9298
}
9299
}
9300
9301
void Allocation::SetName(LPCWSTR Name)
9302
{
9303
FreeName();
9304
9305
if (Name)
9306
{
9307
const size_t nameCharCount = wcslen(Name) + 1;
9308
m_Name = D3D12MA_NEW_ARRAY(m_Allocator->GetAllocs(), WCHAR, nameCharCount);
9309
memcpy(m_Name, Name, nameCharCount * sizeof(WCHAR));
9310
}
9311
}
9312
9313
void Allocation::ReleaseThis()
9314
{
9315
SAFE_RELEASE(m_Resource);
9316
9317
switch (m_PackedData.GetType())
9318
{
9319
case TYPE_COMMITTED:
9320
m_Allocator->FreeCommittedMemory(this);
9321
break;
9322
case TYPE_PLACED:
9323
m_Allocator->FreePlacedMemory(this);
9324
break;
9325
case TYPE_HEAP:
9326
m_Allocator->FreeHeapMemory(this);
9327
break;
9328
}
9329
9330
FreeName();
9331
9332
m_Allocator->GetAllocationObjectAllocator().Free(this);
9333
}
9334
9335
Allocation::Allocation(AllocatorPimpl* allocator, UINT64 size, UINT64 alignment)
9336
: m_Allocator{ allocator },
9337
m_Size{ size },
9338
m_Alignment{ alignment },
9339
m_Resource{ NULL },
9340
m_pPrivateData{ NULL },
9341
m_Name{ NULL }
9342
{
9343
D3D12MA_ASSERT(allocator);
9344
9345
m_PackedData.SetType(TYPE_COUNT);
9346
m_PackedData.SetResourceDimension(D3D12_RESOURCE_DIMENSION_UNKNOWN);
9347
m_PackedData.SetResourceFlags(D3D12_RESOURCE_FLAG_NONE);
9348
m_PackedData.SetTextureLayout(D3D12_TEXTURE_LAYOUT_UNKNOWN);
9349
}
9350
9351
void Allocation::InitCommitted(CommittedAllocationList* list)
9352
{
9353
m_PackedData.SetType(TYPE_COMMITTED);
9354
m_Committed.list = list;
9355
m_Committed.prev = NULL;
9356
m_Committed.next = NULL;
9357
}
9358
9359
void Allocation::InitPlaced(AllocHandle allocHandle, NormalBlock* block)
9360
{
9361
m_PackedData.SetType(TYPE_PLACED);
9362
m_Placed.allocHandle = allocHandle;
9363
m_Placed.block = block;
9364
}
9365
9366
void Allocation::InitHeap(CommittedAllocationList* list, ID3D12Heap* heap)
9367
{
9368
m_PackedData.SetType(TYPE_HEAP);
9369
m_Heap.list = list;
9370
m_Committed.prev = NULL;
9371
m_Committed.next = NULL;
9372
m_Heap.heap = heap;
9373
}
9374
9375
void Allocation::SwapBlockAllocation(Allocation* allocation)
9376
{
9377
D3D12MA_ASSERT(allocation != NULL);
9378
D3D12MA_ASSERT(m_PackedData.GetType() == TYPE_PLACED);
9379
D3D12MA_ASSERT(allocation->m_PackedData.GetType() == TYPE_PLACED);
9380
9381
D3D12MA_SWAP(m_Resource, allocation->m_Resource);
9382
m_Placed.block->m_pMetadata->SetAllocationPrivateData(m_Placed.allocHandle, allocation);
9383
D3D12MA_SWAP(m_Placed, allocation->m_Placed);
9384
m_Placed.block->m_pMetadata->SetAllocationPrivateData(m_Placed.allocHandle, this);
9385
}
9386
9387
AllocHandle Allocation::GetAllocHandle() const
9388
{
9389
switch (m_PackedData.GetType())
9390
{
9391
case TYPE_COMMITTED:
9392
case TYPE_HEAP:
9393
return (AllocHandle)0;
9394
case TYPE_PLACED:
9395
return m_Placed.allocHandle;
9396
default:
9397
D3D12MA_ASSERT(0);
9398
return (AllocHandle)0;
9399
}
9400
}
9401
9402
NormalBlock* Allocation::GetBlock()
9403
{
9404
switch (m_PackedData.GetType())
9405
{
9406
case TYPE_COMMITTED:
9407
case TYPE_HEAP:
9408
return NULL;
9409
case TYPE_PLACED:
9410
return m_Placed.block;
9411
default:
9412
D3D12MA_ASSERT(0);
9413
return NULL;
9414
}
9415
}
9416
9417
template<typename D3D12_RESOURCE_DESC_T>
9418
void Allocation::SetResourcePointer(ID3D12Resource* resource, const D3D12_RESOURCE_DESC_T* pResourceDesc)
9419
{
9420
D3D12MA_ASSERT(m_Resource == NULL && pResourceDesc);
9421
m_Resource = resource;
9422
m_PackedData.SetResourceDimension(pResourceDesc->Dimension);
9423
m_PackedData.SetResourceFlags(pResourceDesc->Flags);
9424
m_PackedData.SetTextureLayout(pResourceDesc->Layout);
9425
}
9426
9427
void Allocation::FreeName()
9428
{
9429
if (m_Name)
9430
{
9431
const size_t nameCharCount = wcslen(m_Name) + 1;
9432
D3D12MA_DELETE_ARRAY(m_Allocator->GetAllocs(), m_Name, nameCharCount);
9433
m_Name = NULL;
9434
}
9435
}
9436
#endif // _D3D12MA_ALLOCATION_FUNCTIONS
9437
9438
#ifndef _D3D12MA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
9439
HRESULT DefragmentationContext::BeginPass(DEFRAGMENTATION_PASS_MOVE_INFO* pPassInfo)
9440
{
9441
D3D12MA_ASSERT(pPassInfo);
9442
return m_Pimpl->DefragmentPassBegin(*pPassInfo);
9443
}
9444
9445
HRESULT DefragmentationContext::EndPass(DEFRAGMENTATION_PASS_MOVE_INFO* pPassInfo)
9446
{
9447
D3D12MA_ASSERT(pPassInfo);
9448
return m_Pimpl->DefragmentPassEnd(*pPassInfo);
9449
}
9450
9451
void DefragmentationContext::GetStats(DEFRAGMENTATION_STATS* pStats)
9452
{
9453
D3D12MA_ASSERT(pStats);
9454
m_Pimpl->GetStats(*pStats);
9455
}
9456
9457
void DefragmentationContext::ReleaseThis()
9458
{
9459
D3D12MA_DELETE(m_Pimpl->GetAllocs(), this);
9460
}
9461
9462
DefragmentationContext::DefragmentationContext(AllocatorPimpl* allocator,
9463
const DEFRAGMENTATION_DESC& desc,
9464
BlockVector* poolVector)
9465
: m_Pimpl(D3D12MA_NEW(allocator->GetAllocs(), DefragmentationContextPimpl)(allocator, desc, poolVector)) {}
9466
9467
DefragmentationContext::~DefragmentationContext()
9468
{
9469
D3D12MA_DELETE(m_Pimpl->GetAllocs(), m_Pimpl);
9470
}
9471
#endif // _D3D12MA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
9472
9473
#ifndef _D3D12MA_POOL_FUNCTIONS
9474
POOL_DESC Pool::GetDesc() const
9475
{
9476
return m_Pimpl->GetDesc();
9477
}
9478
9479
void Pool::GetStatistics(Statistics* pStats)
9480
{
9481
D3D12MA_ASSERT(pStats);
9482
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9483
m_Pimpl->GetStatistics(*pStats);
9484
}
9485
9486
void Pool::CalculateStatistics(DetailedStatistics* pStats)
9487
{
9488
D3D12MA_ASSERT(pStats);
9489
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9490
m_Pimpl->CalculateStatistics(*pStats);
9491
}
9492
9493
void Pool::SetName(LPCWSTR Name)
9494
{
9495
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9496
m_Pimpl->SetName(Name);
9497
}
9498
9499
LPCWSTR Pool::GetName() const
9500
{
9501
return m_Pimpl->GetName();
9502
}
9503
9504
HRESULT Pool::BeginDefragmentation(const DEFRAGMENTATION_DESC* pDesc, DefragmentationContext** ppContext)
9505
{
9506
D3D12MA_ASSERT(pDesc && ppContext);
9507
9508
// Check for support
9509
if (m_Pimpl->GetBlockVector()->GetAlgorithm() & POOL_FLAG_ALGORITHM_LINEAR)
9510
return E_NOINTERFACE;
9511
if(m_Pimpl->AlwaysCommitted())
9512
return E_NOINTERFACE;
9513
9514
AllocatorPimpl* allocator = m_Pimpl->GetAllocator();
9515
*ppContext = D3D12MA_NEW(allocator->GetAllocs(), DefragmentationContext)(allocator, *pDesc, m_Pimpl->GetBlockVector());
9516
return S_OK;
9517
}
9518
9519
void Pool::ReleaseThis()
9520
{
9521
D3D12MA_DELETE(m_Pimpl->GetAllocator()->GetAllocs(), this);
9522
}
9523
9524
Pool::Pool(Allocator* allocator, const POOL_DESC& desc)
9525
: m_Pimpl(D3D12MA_NEW(allocator->m_Pimpl->GetAllocs(), PoolPimpl)(allocator->m_Pimpl, desc)) {}
9526
9527
Pool::~Pool()
9528
{
9529
m_Pimpl->GetAllocator()->UnregisterPool(this, m_Pimpl->GetDesc().HeapProperties.Type);
9530
9531
D3D12MA_DELETE(m_Pimpl->GetAllocator()->GetAllocs(), m_Pimpl);
9532
}
9533
#endif // _D3D12MA_POOL_FUNCTIONS
9534
9535
#ifndef _D3D12MA_ALLOCATOR_FUNCTIONS
9536
const D3D12_FEATURE_DATA_D3D12_OPTIONS& Allocator::GetD3D12Options() const
9537
{
9538
return m_Pimpl->GetD3D12Options();
9539
}
9540
9541
BOOL Allocator::IsUMA() const
9542
{
9543
return m_Pimpl->IsUMA();
9544
}
9545
9546
BOOL Allocator::IsCacheCoherentUMA() const
9547
{
9548
return m_Pimpl->IsCacheCoherentUMA();
9549
}
9550
9551
BOOL Allocator::IsGPUUploadHeapSupported() const
9552
{
9553
return m_Pimpl->IsGPUUploadHeapSupported();
9554
}
9555
9556
UINT64 Allocator::GetMemoryCapacity(UINT memorySegmentGroup) const
9557
{
9558
return m_Pimpl->GetMemoryCapacity(memorySegmentGroup);
9559
}
9560
9561
HRESULT Allocator::CreateResource(
9562
const ALLOCATION_DESC* pAllocDesc,
9563
const D3D12_RESOURCE_DESC* pResourceDesc,
9564
D3D12_RESOURCE_STATES InitialResourceState,
9565
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
9566
Allocation** ppAllocation,
9567
REFIID riidResource,
9568
void** ppvResource)
9569
{
9570
if (!pAllocDesc || !pResourceDesc || !ppAllocation)
9571
{
9572
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateResource.");
9573
return E_INVALIDARG;
9574
}
9575
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9576
return m_Pimpl->CreateResource(
9577
pAllocDesc,
9578
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialResourceState, pOptimizedClearValue),
9579
ppAllocation,
9580
riidResource,
9581
ppvResource);
9582
}
9583
9584
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
9585
HRESULT Allocator::CreateResource2(
9586
const ALLOCATION_DESC* pAllocDesc,
9587
const D3D12_RESOURCE_DESC1* pResourceDesc,
9588
D3D12_RESOURCE_STATES InitialResourceState,
9589
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
9590
Allocation** ppAllocation,
9591
REFIID riidResource,
9592
void** ppvResource)
9593
{
9594
if (!pAllocDesc || !pResourceDesc || !ppAllocation)
9595
{
9596
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateResource2.");
9597
return E_INVALIDARG;
9598
}
9599
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9600
return m_Pimpl->CreateResource(
9601
pAllocDesc,
9602
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialResourceState, pOptimizedClearValue),
9603
ppAllocation,
9604
riidResource,
9605
ppvResource);
9606
}
9607
#endif // #ifdef __ID3D12Device8_INTERFACE_DEFINED__
9608
9609
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
9610
HRESULT Allocator::CreateResource3(
9611
const ALLOCATION_DESC* pAllocDesc,
9612
const D3D12_RESOURCE_DESC1* pResourceDesc,
9613
D3D12_BARRIER_LAYOUT InitialLayout,
9614
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
9615
UINT32 NumCastableFormats,
9616
DXGI_FORMAT* pCastableFormats,
9617
Allocation** ppAllocation,
9618
REFIID riidResource,
9619
void** ppvResource)
9620
{
9621
if (!pAllocDesc || !pResourceDesc || !ppAllocation)
9622
{
9623
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateResource3.");
9624
return E_INVALIDARG;
9625
}
9626
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9627
return m_Pimpl->CreateResource(
9628
pAllocDesc,
9629
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialLayout, pOptimizedClearValue, NumCastableFormats, pCastableFormats),
9630
ppAllocation,
9631
riidResource,
9632
ppvResource);
9633
}
9634
#endif // #ifdef __ID3D12Device10_INTERFACE_DEFINED__
9635
9636
HRESULT Allocator::AllocateMemory(
9637
const ALLOCATION_DESC* pAllocDesc,
9638
const D3D12_RESOURCE_ALLOCATION_INFO* pAllocInfo,
9639
Allocation** ppAllocation)
9640
{
9641
if (!ValidateAllocateMemoryParameters(pAllocDesc, pAllocInfo, ppAllocation))
9642
{
9643
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::AllocateMemory.");
9644
return E_INVALIDARG;
9645
}
9646
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9647
return m_Pimpl->AllocateMemory(pAllocDesc, pAllocInfo, ppAllocation);
9648
}
9649
9650
HRESULT Allocator::CreateAliasingResource(
9651
Allocation* pAllocation,
9652
UINT64 AllocationLocalOffset,
9653
const D3D12_RESOURCE_DESC* pResourceDesc,
9654
D3D12_RESOURCE_STATES InitialResourceState,
9655
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
9656
REFIID riidResource,
9657
void** ppvResource)
9658
{
9659
if (!pAllocation || !pResourceDesc || !ppvResource)
9660
{
9661
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateAliasingResource.");
9662
return E_INVALIDARG;
9663
}
9664
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9665
return m_Pimpl->CreateAliasingResource(
9666
pAllocation,
9667
AllocationLocalOffset,
9668
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialResourceState, pOptimizedClearValue),
9669
riidResource,
9670
ppvResource);
9671
}
9672
9673
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
9674
HRESULT Allocator::CreateAliasingResource1(
9675
Allocation* pAllocation,
9676
UINT64 AllocationLocalOffset,
9677
const D3D12_RESOURCE_DESC1* pResourceDesc,
9678
D3D12_RESOURCE_STATES InitialResourceState,
9679
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
9680
REFIID riidResource,
9681
void** ppvResource)
9682
{
9683
if (!pAllocation || !pResourceDesc || !ppvResource)
9684
{
9685
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateAliasingResource.");
9686
return E_INVALIDARG;
9687
}
9688
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9689
return m_Pimpl->CreateAliasingResource(
9690
pAllocation,
9691
AllocationLocalOffset,
9692
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialResourceState, pOptimizedClearValue),
9693
riidResource,
9694
ppvResource);
9695
}
9696
#endif // #ifdef __ID3D12Device8_INTERFACE_DEFINED__
9697
9698
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
9699
HRESULT Allocator::CreateAliasingResource2(
9700
Allocation* pAllocation,
9701
UINT64 AllocationLocalOffset,
9702
const D3D12_RESOURCE_DESC1* pResourceDesc,
9703
D3D12_BARRIER_LAYOUT InitialLayout,
9704
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
9705
UINT32 NumCastableFormats,
9706
DXGI_FORMAT* pCastableFormats,
9707
REFIID riidResource,
9708
void** ppvResource)
9709
{
9710
if (!pAllocation || !pResourceDesc || !ppvResource)
9711
{
9712
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateAliasingResource.");
9713
return E_INVALIDARG;
9714
}
9715
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9716
return m_Pimpl->CreateAliasingResource(
9717
pAllocation,
9718
AllocationLocalOffset,
9719
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialLayout, pOptimizedClearValue, NumCastableFormats, pCastableFormats),
9720
riidResource,
9721
ppvResource);
9722
}
9723
#endif // #ifdef __ID3D12Device10_INTERFACE_DEFINED__
9724
9725
HRESULT Allocator::CreatePool(
9726
const POOL_DESC* pPoolDesc,
9727
Pool** ppPool)
9728
{
9729
if (!pPoolDesc || !ppPool ||
9730
(pPoolDesc->MaxBlockCount > 0 && pPoolDesc->MaxBlockCount < pPoolDesc->MinBlockCount) ||
9731
(pPoolDesc->MinAllocationAlignment > 0 && !IsPow2(pPoolDesc->MinAllocationAlignment)))
9732
{
9733
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreatePool.");
9734
return E_INVALIDARG;
9735
}
9736
if ((pPoolDesc->Flags & POOL_FLAG_ALWAYS_COMMITTED) != 0 &&
9737
(pPoolDesc->BlockSize != 0 || pPoolDesc->MinBlockCount > 0))
9738
{
9739
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreatePool while POOL_FLAG_ALWAYS_COMMITTED is specified.");
9740
return E_INVALIDARG;
9741
}
9742
if (!m_Pimpl->HeapFlagsFulfillResourceHeapTier(pPoolDesc->HeapFlags))
9743
{
9744
D3D12MA_ASSERT(0 && "Invalid pPoolDesc->HeapFlags passed to Allocator::CreatePool. Did you forget to handle ResourceHeapTier=1?");
9745
return E_INVALIDARG;
9746
}
9747
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9748
* ppPool = D3D12MA_NEW(m_Pimpl->GetAllocs(), Pool)(this, *pPoolDesc);
9749
HRESULT hr = (*ppPool)->m_Pimpl->Init();
9750
if (SUCCEEDED(hr))
9751
{
9752
m_Pimpl->RegisterPool(*ppPool, pPoolDesc->HeapProperties.Type);
9753
}
9754
else
9755
{
9756
D3D12MA_DELETE(m_Pimpl->GetAllocs(), *ppPool);
9757
*ppPool = NULL;
9758
}
9759
return hr;
9760
}
9761
9762
void Allocator::SetCurrentFrameIndex(UINT frameIndex)
9763
{
9764
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9765
m_Pimpl->SetCurrentFrameIndex(frameIndex);
9766
}
9767
9768
void Allocator::GetBudget(Budget* pLocalBudget, Budget* pNonLocalBudget)
9769
{
9770
if (pLocalBudget == NULL && pNonLocalBudget == NULL)
9771
{
9772
return;
9773
}
9774
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9775
m_Pimpl->GetBudget(pLocalBudget, pNonLocalBudget);
9776
}
9777
9778
void Allocator::CalculateStatistics(TotalStatistics* pStats)
9779
{
9780
D3D12MA_ASSERT(pStats);
9781
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9782
m_Pimpl->CalculateStatistics(*pStats);
9783
}
9784
9785
void Allocator::BuildStatsString(WCHAR** ppStatsString, BOOL DetailedMap) const
9786
{
9787
D3D12MA_ASSERT(ppStatsString);
9788
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9789
m_Pimpl->BuildStatsString(ppStatsString, DetailedMap);
9790
}
9791
9792
void Allocator::FreeStatsString(WCHAR* pStatsString) const
9793
{
9794
if (pStatsString != NULL)
9795
{
9796
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9797
m_Pimpl->FreeStatsString(pStatsString);
9798
}
9799
}
9800
9801
void Allocator::BeginDefragmentation(const DEFRAGMENTATION_DESC* pDesc, DefragmentationContext** ppContext)
9802
{
9803
D3D12MA_ASSERT(pDesc && ppContext);
9804
9805
*ppContext = D3D12MA_NEW(m_Pimpl->GetAllocs(), DefragmentationContext)(m_Pimpl, *pDesc, NULL);
9806
}
9807
9808
void Allocator::ReleaseThis()
9809
{
9810
// Copy is needed because otherwise we would call destructor and invalidate the structure with callbacks before using it to free memory.
9811
const ALLOCATION_CALLBACKS allocationCallbacksCopy = m_Pimpl->GetAllocs();
9812
D3D12MA_DELETE(allocationCallbacksCopy, this);
9813
}
9814
9815
Allocator::Allocator(const ALLOCATION_CALLBACKS& allocationCallbacks, const ALLOCATOR_DESC& desc)
9816
: m_Pimpl(D3D12MA_NEW(allocationCallbacks, AllocatorPimpl)(allocationCallbacks, desc)) {}
9817
9818
Allocator::~Allocator()
9819
{
9820
D3D12MA_DELETE(m_Pimpl->GetAllocs(), m_Pimpl);
9821
}
9822
#endif // _D3D12MA_ALLOCATOR_FUNCTIONS
9823
9824
#ifndef _D3D12MA_VIRTUAL_BLOCK_FUNCTIONS
9825
BOOL VirtualBlock::IsEmpty() const
9826
{
9827
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9828
return m_Pimpl->m_Metadata->IsEmpty() ? TRUE : FALSE;
9829
}
9830
9831
void VirtualBlock::GetAllocationInfo(VirtualAllocation allocation, VIRTUAL_ALLOCATION_INFO* pInfo) const
9832
{
9833
D3D12MA_ASSERT(allocation.AllocHandle != (AllocHandle)0 && pInfo);
9834
9835
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9836
m_Pimpl->m_Metadata->GetAllocationInfo(allocation.AllocHandle, *pInfo);
9837
}
9838
9839
HRESULT VirtualBlock::Allocate(const VIRTUAL_ALLOCATION_DESC* pDesc, VirtualAllocation* pAllocation, UINT64* pOffset)
9840
{
9841
if (!pDesc || !pAllocation || pDesc->Size == 0 || !IsPow2(pDesc->Alignment))
9842
{
9843
D3D12MA_ASSERT(0 && "Invalid arguments passed to VirtualBlock::Allocate.");
9844
return E_INVALIDARG;
9845
}
9846
9847
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9848
9849
const UINT64 alignment = pDesc->Alignment != 0 ? pDesc->Alignment : 1;
9850
AllocationRequest allocRequest = {};
9851
if (m_Pimpl->m_Metadata->CreateAllocationRequest(
9852
pDesc->Size,
9853
alignment,
9854
pDesc->Flags & VIRTUAL_ALLOCATION_FLAG_UPPER_ADDRESS,
9855
pDesc->Flags & VIRTUAL_ALLOCATION_FLAG_STRATEGY_MASK,
9856
&allocRequest))
9857
{
9858
m_Pimpl->m_Metadata->Alloc(allocRequest, pDesc->Size, pDesc->pPrivateData);
9859
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
9860
pAllocation->AllocHandle = allocRequest.allocHandle;
9861
9862
if (pOffset)
9863
*pOffset = m_Pimpl->m_Metadata->GetAllocationOffset(allocRequest.allocHandle);
9864
return S_OK;
9865
}
9866
9867
pAllocation->AllocHandle = (AllocHandle)0;
9868
if (pOffset)
9869
*pOffset = UINT64_MAX;
9870
9871
return E_OUTOFMEMORY;
9872
}
9873
9874
void VirtualBlock::FreeAllocation(VirtualAllocation allocation)
9875
{
9876
if (allocation.AllocHandle == (AllocHandle)0)
9877
return;
9878
9879
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9880
9881
m_Pimpl->m_Metadata->Free(allocation.AllocHandle);
9882
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
9883
}
9884
9885
void VirtualBlock::Clear()
9886
{
9887
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9888
9889
m_Pimpl->m_Metadata->Clear();
9890
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
9891
}
9892
9893
void VirtualBlock::SetAllocationPrivateData(VirtualAllocation allocation, void* pPrivateData)
9894
{
9895
D3D12MA_ASSERT(allocation.AllocHandle != (AllocHandle)0);
9896
9897
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9898
m_Pimpl->m_Metadata->SetAllocationPrivateData(allocation.AllocHandle, pPrivateData);
9899
}
9900
9901
void VirtualBlock::GetStatistics(Statistics* pStats) const
9902
{
9903
D3D12MA_ASSERT(pStats);
9904
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9905
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
9906
ClearStatistics(*pStats);
9907
m_Pimpl->m_Metadata->AddStatistics(*pStats);
9908
}
9909
9910
void VirtualBlock::CalculateStatistics(DetailedStatistics* pStats) const
9911
{
9912
D3D12MA_ASSERT(pStats);
9913
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9914
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
9915
ClearDetailedStatistics(*pStats);
9916
m_Pimpl->m_Metadata->AddDetailedStatistics(*pStats);
9917
}
9918
9919
void VirtualBlock::BuildStatsString(WCHAR** ppStatsString) const
9920
{
9921
D3D12MA_ASSERT(ppStatsString);
9922
9923
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9924
9925
StringBuilder sb(m_Pimpl->m_AllocationCallbacks);
9926
{
9927
JsonWriter json(m_Pimpl->m_AllocationCallbacks, sb);
9928
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
9929
json.BeginObject();
9930
m_Pimpl->m_Metadata->WriteAllocationInfoToJson(json);
9931
json.EndObject();
9932
} // Scope for JsonWriter
9933
9934
const size_t length = sb.GetLength();
9935
WCHAR* result = AllocateArray<WCHAR>(m_Pimpl->m_AllocationCallbacks, length + 1);
9936
memcpy(result, sb.GetData(), length * sizeof(WCHAR));
9937
result[length] = L'\0';
9938
*ppStatsString = result;
9939
}
9940
9941
void VirtualBlock::FreeStatsString(WCHAR* pStatsString) const
9942
{
9943
if (pStatsString != NULL)
9944
{
9945
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9946
D3D12MA::Free(m_Pimpl->m_AllocationCallbacks, pStatsString);
9947
}
9948
}
9949
9950
void VirtualBlock::ReleaseThis()
9951
{
9952
// Copy is needed because otherwise we would call destructor and invalidate the structure with callbacks before using it to free memory.
9953
const ALLOCATION_CALLBACKS allocationCallbacksCopy = m_Pimpl->m_AllocationCallbacks;
9954
D3D12MA_DELETE(allocationCallbacksCopy, this);
9955
}
9956
9957
VirtualBlock::VirtualBlock(const ALLOCATION_CALLBACKS& allocationCallbacks, const VIRTUAL_BLOCK_DESC& desc)
9958
: m_Pimpl(D3D12MA_NEW(allocationCallbacks, VirtualBlockPimpl)(allocationCallbacks, desc)) {}
9959
9960
VirtualBlock::~VirtualBlock()
9961
{
9962
// THIS IS AN IMPORTANT ASSERT!
9963
// Hitting it means you have some memory leak - unreleased allocations in this virtual block.
9964
D3D12MA_ASSERT(m_Pimpl->m_Metadata->IsEmpty() && "Some allocations were not freed before destruction of this virtual block!");
9965
9966
D3D12MA_DELETE(m_Pimpl->m_AllocationCallbacks, m_Pimpl);
9967
}
9968
#endif // _D3D12MA_VIRTUAL_BLOCK_FUNCTIONS
9969
#endif // _D3D12MA_PUBLIC_INTERFACE
9970
} // namespace D3D12MA
9971
9972