Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
stenzek
GitHub Repository: stenzek/duckstation
Path: blob/master/dep/d3d12ma/src/D3D12MemAlloc.cpp
4253 views
1
//
2
// Copyright (c) 2019-2022 Advanced Micro Devices, Inc. All rights reserved.
3
//
4
// Permission is hereby granted, free of charge, to any person obtaining a copy
5
// of this software and associated documentation files (the "Software"), to deal
6
// in the Software without restriction, including without limitation the rights
7
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8
// copies of the Software, and to permit persons to whom the Software is
9
// furnished to do so, subject to the following conditions:
10
//
11
// The above copyright notice and this permission notice shall be included in
12
// all copies or substantial portions of the Software.
13
//
14
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20
// THE SOFTWARE.
21
//
22
23
#include "D3D12MemAlloc.h"
24
25
#include <combaseapi.h>
26
#include <mutex>
27
#include <algorithm>
28
#include <utility>
29
#include <cstdlib>
30
#include <cstdint>
31
#include <malloc.h> // for _aligned_malloc, _aligned_free
32
#ifndef _WIN32
33
#include <shared_mutex>
34
#endif
35
36
////////////////////////////////////////////////////////////////////////////////
37
////////////////////////////////////////////////////////////////////////////////
38
//
39
// Configuration Begin
40
//
41
////////////////////////////////////////////////////////////////////////////////
42
////////////////////////////////////////////////////////////////////////////////
43
#ifndef _D3D12MA_CONFIGURATION
44
45
#ifdef _WIN32
46
#if !defined(WINVER) || WINVER < 0x0600
47
#error Required at least WinAPI version supporting: client = Windows Vista, server = Windows Server 2008.
48
#endif
49
#endif
50
51
#ifndef D3D12MA_SORT
52
#define D3D12MA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
53
#endif
54
55
#ifndef D3D12MA_D3D12_HEADERS_ALREADY_INCLUDED
56
#include <dxgi.h>
57
#if D3D12MA_DXGI_1_4
58
#include <dxgi1_4.h>
59
#endif
60
#endif
61
62
#ifndef D3D12MA_ASSERT
63
#include <cassert>
64
#define D3D12MA_ASSERT(cond) assert(cond)
65
#endif
66
67
// Assert that will be called very often, like inside data structures e.g. operator[].
68
// Making it non-empty can make program slow.
69
#ifndef D3D12MA_HEAVY_ASSERT
70
#ifdef _DEBUG
71
#define D3D12MA_HEAVY_ASSERT(expr) //D3D12MA_ASSERT(expr)
72
#else
73
#define D3D12MA_HEAVY_ASSERT(expr)
74
#endif
75
#endif
76
77
#ifndef D3D12MA_DEBUG_ALIGNMENT
78
/*
79
Minimum alignment of all allocations, in bytes.
80
Set to more than 1 for debugging purposes only. Must be power of two.
81
*/
82
#define D3D12MA_DEBUG_ALIGNMENT (1)
83
#endif
84
85
#ifndef D3D12MA_DEBUG_MARGIN
86
// Minimum margin before and after every allocation, in bytes.
87
// Set nonzero for debugging purposes only.
88
#define D3D12MA_DEBUG_MARGIN (0)
89
#endif
90
91
#ifndef D3D12MA_DEBUG_GLOBAL_MUTEX
92
/*
93
Set this to 1 for debugging purposes only, to enable single mutex protecting all
94
entry calls to the library. Can be useful for debugging multithreading issues.
95
*/
96
#define D3D12MA_DEBUG_GLOBAL_MUTEX (0)
97
#endif
98
99
/*
100
Define this macro for debugging purposes only to force specific D3D12_RESOURCE_HEAP_TIER,
101
especially to test compatibility with D3D12_RESOURCE_HEAP_TIER_1 on modern GPUs.
102
*/
103
//#define D3D12MA_FORCE_RESOURCE_HEAP_TIER D3D12_RESOURCE_HEAP_TIER_1
104
105
#ifndef D3D12MA_DEFAULT_BLOCK_SIZE
106
/// Default size of a block allocated as single ID3D12Heap.
107
#define D3D12MA_DEFAULT_BLOCK_SIZE (64ull * 1024 * 1024)
108
#endif
109
110
#ifndef D3D12MA_DEBUG_LOG
111
#define D3D12MA_DEBUG_LOG(format, ...)
112
/*
113
#define D3D12MA_DEBUG_LOG(format, ...) do { \
114
wprintf(format, __VA_ARGS__); \
115
wprintf(L"\n"); \
116
} while(false)
117
*/
118
#endif
119
120
#endif // _D3D12MA_CONFIGURATION
121
////////////////////////////////////////////////////////////////////////////////
122
////////////////////////////////////////////////////////////////////////////////
123
//
124
// Configuration End
125
//
126
////////////////////////////////////////////////////////////////////////////////
127
////////////////////////////////////////////////////////////////////////////////
128
129
#define D3D12MA_IID_PPV_ARGS(ppType) __uuidof(**(ppType)), reinterpret_cast<void**>(ppType)
130
131
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
132
#define D3D12MA_CREATE_NOT_ZEROED_AVAILABLE 1
133
#endif
134
135
namespace D3D12MA
136
{
137
static constexpr UINT HEAP_TYPE_COUNT = 4;
138
static constexpr UINT STANDARD_HEAP_TYPE_COUNT = 3; // Only DEFAULT, UPLOAD, READBACK.
139
static constexpr UINT DEFAULT_POOL_MAX_COUNT = 9;
140
static const UINT NEW_BLOCK_SIZE_SHIFT_MAX = 3;
141
// Minimum size of a free suballocation to register it in the free suballocation collection.
142
static const UINT64 MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
143
144
static const WCHAR* const HeapTypeNames[] =
145
{
146
L"DEFAULT",
147
L"UPLOAD",
148
L"READBACK",
149
L"CUSTOM",
150
};
151
static const WCHAR* const StandardHeapTypeNames[] =
152
{
153
L"DEFAULT",
154
L"UPLOAD",
155
L"READBACK",
156
};
157
158
static const D3D12_HEAP_FLAGS RESOURCE_CLASS_HEAP_FLAGS =
159
D3D12_HEAP_FLAG_DENY_BUFFERS | D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES | D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES;
160
161
static const D3D12_RESIDENCY_PRIORITY D3D12_RESIDENCY_PRIORITY_NONE = D3D12_RESIDENCY_PRIORITY(0);
162
163
#ifndef _D3D12MA_ENUM_DECLARATIONS
164
165
// Local copy of this enum, as it is provided only by <dxgi1_4.h>, so it may not be available.
166
enum DXGI_MEMORY_SEGMENT_GROUP_COPY
167
{
168
DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY = 0,
169
DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY = 1,
170
DXGI_MEMORY_SEGMENT_GROUP_COUNT
171
};
172
173
enum class ResourceClass
174
{
175
Unknown, Buffer, Non_RT_DS_Texture, RT_DS_Texture
176
};
177
178
enum SuballocationType
179
{
180
SUBALLOCATION_TYPE_FREE = 0,
181
SUBALLOCATION_TYPE_ALLOCATION = 1,
182
};
183
184
#endif // _D3D12MA_ENUM_DECLARATIONS
185
186
187
#ifndef _D3D12MA_FUNCTIONS
188
189
static void* DefaultAllocate(size_t Size, size_t Alignment, void* /*pPrivateData*/)
190
{
191
#ifdef _WIN32
192
return _aligned_malloc(Size, Alignment);
193
#else
194
return aligned_alloc(Alignment, Size);
195
#endif
196
}
197
static void DefaultFree(void* pMemory, void* /*pPrivateData*/)
198
{
199
#ifdef _WIN32
200
return _aligned_free(pMemory);
201
#else
202
return free(pMemory);
203
#endif
204
}
205
206
static void* Malloc(const ALLOCATION_CALLBACKS& allocs, size_t size, size_t alignment)
207
{
208
void* const result = (*allocs.pAllocate)(size, alignment, allocs.pPrivateData);
209
D3D12MA_ASSERT(result);
210
return result;
211
}
212
static void Free(const ALLOCATION_CALLBACKS& allocs, void* memory)
213
{
214
(*allocs.pFree)(memory, allocs.pPrivateData);
215
}
216
217
template<typename T>
218
static T* Allocate(const ALLOCATION_CALLBACKS& allocs)
219
{
220
return (T*)Malloc(allocs, sizeof(T), __alignof(T));
221
}
222
template<typename T>
223
static T* AllocateArray(const ALLOCATION_CALLBACKS& allocs, size_t count)
224
{
225
return (T*)Malloc(allocs, sizeof(T) * count, __alignof(T));
226
}
227
228
#define D3D12MA_NEW(allocs, type) new(D3D12MA::Allocate<type>(allocs))(type)
229
#define D3D12MA_NEW_ARRAY(allocs, type, count) new(D3D12MA::AllocateArray<type>((allocs), (count)))(type)
230
231
template<typename T>
232
void D3D12MA_DELETE(const ALLOCATION_CALLBACKS& allocs, T* memory)
233
{
234
if (memory)
235
{
236
memory->~T();
237
Free(allocs, memory);
238
}
239
}
240
template<typename T>
241
void D3D12MA_DELETE_ARRAY(const ALLOCATION_CALLBACKS& allocs, T* memory, size_t count)
242
{
243
if (memory)
244
{
245
for (size_t i = count; i--; )
246
{
247
memory[i].~T();
248
}
249
Free(allocs, memory);
250
}
251
}
252
253
static void SetupAllocationCallbacks(ALLOCATION_CALLBACKS& outAllocs, const ALLOCATION_CALLBACKS* allocationCallbacks)
254
{
255
if (allocationCallbacks)
256
{
257
outAllocs = *allocationCallbacks;
258
D3D12MA_ASSERT(outAllocs.pAllocate != NULL && outAllocs.pFree != NULL);
259
}
260
else
261
{
262
outAllocs.pAllocate = &DefaultAllocate;
263
outAllocs.pFree = &DefaultFree;
264
outAllocs.pPrivateData = NULL;
265
}
266
}
267
268
#define SAFE_RELEASE(ptr) do { if(ptr) { (ptr)->Release(); (ptr) = NULL; } } while(false)
269
270
#define D3D12MA_VALIDATE(cond) do { if(!(cond)) { \
271
D3D12MA_ASSERT(0 && "Validation failed: " #cond); \
272
return false; \
273
} } while(false)
274
275
template<typename T>
276
static T D3D12MA_MIN(const T& a, const T& b) { return a <= b ? a : b; }
277
template<typename T>
278
static T D3D12MA_MAX(const T& a, const T& b) { return a <= b ? b : a; }
279
280
template<typename T>
281
static void D3D12MA_SWAP(T& a, T& b) { T tmp = a; a = b; b = tmp; }
282
283
// Scans integer for index of first nonzero bit from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
284
static UINT8 BitScanLSB(UINT64 mask)
285
{
286
#if defined(_MSC_VER) && defined(_WIN64)
287
unsigned long pos;
288
if (_BitScanForward64(&pos, mask))
289
return static_cast<UINT8>(pos);
290
return UINT8_MAX;
291
#elif defined __GNUC__ || defined __clang__
292
return static_cast<UINT8>(__builtin_ffsll(mask)) - 1U;
293
#else
294
UINT8 pos = 0;
295
UINT64 bit = 1;
296
do
297
{
298
if (mask & bit)
299
return pos;
300
bit <<= 1;
301
} while (pos++ < 63);
302
return UINT8_MAX;
303
#endif
304
}
305
// Scans integer for index of first nonzero bit from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
306
static UINT8 BitScanLSB(UINT32 mask)
307
{
308
#ifdef _MSC_VER
309
unsigned long pos;
310
if (_BitScanForward(&pos, mask))
311
return static_cast<UINT8>(pos);
312
return UINT8_MAX;
313
#elif defined __GNUC__ || defined __clang__
314
return static_cast<UINT8>(__builtin_ffs(mask)) - 1U;
315
#else
316
UINT8 pos = 0;
317
UINT32 bit = 1;
318
do
319
{
320
if (mask & bit)
321
return pos;
322
bit <<= 1;
323
} while (pos++ < 31);
324
return UINT8_MAX;
325
#endif
326
}
327
328
// Scans integer for index of first nonzero bit from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
329
static UINT8 BitScanMSB(UINT64 mask)
330
{
331
#if defined(_MSC_VER) && defined(_WIN64)
332
unsigned long pos;
333
if (_BitScanReverse64(&pos, mask))
334
return static_cast<UINT8>(pos);
335
#elif defined __GNUC__ || defined __clang__
336
if (mask)
337
return 63 - static_cast<UINT8>(__builtin_clzll(mask));
338
#else
339
UINT8 pos = 63;
340
UINT64 bit = 1ULL << 63;
341
do
342
{
343
if (mask & bit)
344
return pos;
345
bit >>= 1;
346
} while (pos-- > 0);
347
#endif
348
return UINT8_MAX;
349
}
350
// Scans integer for index of first nonzero bit from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
351
static UINT8 BitScanMSB(UINT32 mask)
352
{
353
#ifdef _MSC_VER
354
unsigned long pos;
355
if (_BitScanReverse(&pos, mask))
356
return static_cast<UINT8>(pos);
357
#elif defined __GNUC__ || defined __clang__
358
if (mask)
359
return 31 - static_cast<UINT8>(__builtin_clz(mask));
360
#else
361
UINT8 pos = 31;
362
UINT32 bit = 1UL << 31;
363
do
364
{
365
if (mask & bit)
366
return pos;
367
bit >>= 1;
368
} while (pos-- > 0);
369
#endif
370
return UINT8_MAX;
371
}
372
373
/*
374
Returns true if given number is a power of two.
375
T must be unsigned integer number or signed integer but always nonnegative.
376
For 0 returns true.
377
*/
378
template <typename T>
379
static bool IsPow2(T x) { return (x & (x - 1)) == 0; }
380
381
// Aligns given value up to nearest multiply of align value. For example: AlignUp(11, 8) = 16.
382
// Use types like UINT, uint64_t as T.
383
template <typename T>
384
static T AlignUp(T val, T alignment)
385
{
386
D3D12MA_HEAVY_ASSERT(IsPow2(alignment));
387
return (val + alignment - 1) & ~(alignment - 1);
388
}
389
// Aligns given value down to nearest multiply of align value. For example: AlignUp(11, 8) = 8.
390
// Use types like UINT, uint64_t as T.
391
template <typename T>
392
static T AlignDown(T val, T alignment)
393
{
394
D3D12MA_HEAVY_ASSERT(IsPow2(alignment));
395
return val & ~(alignment - 1);
396
}
397
398
// Division with mathematical rounding to nearest number.
399
template <typename T>
400
static T RoundDiv(T x, T y) { return (x + (y / (T)2)) / y; }
401
template <typename T>
402
static T DivideRoundingUp(T x, T y) { return (x + y - 1) / y; }
403
404
static WCHAR HexDigitToChar(UINT8 digit)
405
{
406
if(digit < 10)
407
return L'0' + digit;
408
else
409
return L'A' + (digit - 10);
410
}
411
412
/*
413
Performs binary search and returns iterator to first element that is greater or
414
equal to `key`, according to comparison `cmp`.
415
416
Cmp should return true if first argument is less than second argument.
417
418
Returned value is the found element, if present in the collection or place where
419
new element with value (key) should be inserted.
420
*/
421
template <typename CmpLess, typename IterT, typename KeyT>
422
static IterT BinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
423
{
424
size_t down = 0, up = (end - beg);
425
while (down < up)
426
{
427
const size_t mid = (down + up) / 2;
428
if (cmp(*(beg + mid), key))
429
{
430
down = mid + 1;
431
}
432
else
433
{
434
up = mid;
435
}
436
}
437
return beg + down;
438
}
439
440
/*
441
Performs binary search and returns iterator to an element that is equal to `key`,
442
according to comparison `cmp`.
443
444
Cmp should return true if first argument is less than second argument.
445
446
Returned value is the found element, if present in the collection or end if not
447
found.
448
*/
449
template<typename CmpLess, typename IterT, typename KeyT>
450
static IterT BinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
451
{
452
IterT it = BinaryFindFirstNotLess<CmpLess, IterT, KeyT>(beg, end, value, cmp);
453
if (it == end ||
454
(!cmp(*it, value) && !cmp(value, *it)))
455
{
456
return it;
457
}
458
return end;
459
}
460
461
static UINT StandardHeapTypeToIndex(D3D12_HEAP_TYPE type)
462
{
463
switch (type)
464
{
465
case D3D12_HEAP_TYPE_DEFAULT: return 0;
466
case D3D12_HEAP_TYPE_UPLOAD: return 1;
467
case D3D12_HEAP_TYPE_READBACK: return 2;
468
default: D3D12MA_ASSERT(0); return UINT_MAX;
469
}
470
}
471
472
static D3D12_HEAP_TYPE IndexToStandardHeapType(UINT heapTypeIndex)
473
{
474
switch(heapTypeIndex)
475
{
476
case 0: return D3D12_HEAP_TYPE_DEFAULT;
477
case 1: return D3D12_HEAP_TYPE_UPLOAD;
478
case 2: return D3D12_HEAP_TYPE_READBACK;
479
default: D3D12MA_ASSERT(0); return D3D12_HEAP_TYPE_CUSTOM;
480
}
481
}
482
483
static UINT64 HeapFlagsToAlignment(D3D12_HEAP_FLAGS flags, bool denyMsaaTextures)
484
{
485
/*
486
Documentation of D3D12_HEAP_DESC structure says:
487
488
- D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT defined as 64KB.
489
- D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT defined as 4MB. An
490
application must decide whether the heap will contain multi-sample
491
anti-aliasing (MSAA), in which case, the application must choose [this flag].
492
493
https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_heap_desc
494
*/
495
496
if (denyMsaaTextures)
497
return D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT;
498
499
const D3D12_HEAP_FLAGS denyAllTexturesFlags =
500
D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES | D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES;
501
const bool canContainAnyTextures =
502
(flags & denyAllTexturesFlags) != denyAllTexturesFlags;
503
return canContainAnyTextures ?
504
D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT : D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT;
505
}
506
507
static ResourceClass HeapFlagsToResourceClass(D3D12_HEAP_FLAGS heapFlags)
508
{
509
const bool allowBuffers = (heapFlags & D3D12_HEAP_FLAG_DENY_BUFFERS) == 0;
510
const bool allowRtDsTextures = (heapFlags & D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES) == 0;
511
const bool allowNonRtDsTextures = (heapFlags & D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES) == 0;
512
513
const uint8_t allowedGroupCount = (allowBuffers ? 1 : 0) + (allowRtDsTextures ? 1 : 0) + (allowNonRtDsTextures ? 1 : 0);
514
if (allowedGroupCount != 1)
515
return ResourceClass::Unknown;
516
517
if (allowRtDsTextures)
518
return ResourceClass::RT_DS_Texture;
519
if (allowNonRtDsTextures)
520
return ResourceClass::Non_RT_DS_Texture;
521
return ResourceClass::Buffer;
522
}
523
524
static bool IsHeapTypeStandard(D3D12_HEAP_TYPE type)
525
{
526
return type == D3D12_HEAP_TYPE_DEFAULT ||
527
type == D3D12_HEAP_TYPE_UPLOAD ||
528
type == D3D12_HEAP_TYPE_READBACK;
529
}
530
531
static D3D12_HEAP_PROPERTIES StandardHeapTypeToHeapProperties(D3D12_HEAP_TYPE type)
532
{
533
D3D12MA_ASSERT(IsHeapTypeStandard(type));
534
D3D12_HEAP_PROPERTIES result = {};
535
result.Type = type;
536
return result;
537
}
538
539
static bool IsFormatCompressed(DXGI_FORMAT format)
540
{
541
switch (format)
542
{
543
case DXGI_FORMAT_BC1_TYPELESS:
544
case DXGI_FORMAT_BC1_UNORM:
545
case DXGI_FORMAT_BC1_UNORM_SRGB:
546
case DXGI_FORMAT_BC2_TYPELESS:
547
case DXGI_FORMAT_BC2_UNORM:
548
case DXGI_FORMAT_BC2_UNORM_SRGB:
549
case DXGI_FORMAT_BC3_TYPELESS:
550
case DXGI_FORMAT_BC3_UNORM:
551
case DXGI_FORMAT_BC3_UNORM_SRGB:
552
case DXGI_FORMAT_BC4_TYPELESS:
553
case DXGI_FORMAT_BC4_UNORM:
554
case DXGI_FORMAT_BC4_SNORM:
555
case DXGI_FORMAT_BC5_TYPELESS:
556
case DXGI_FORMAT_BC5_UNORM:
557
case DXGI_FORMAT_BC5_SNORM:
558
case DXGI_FORMAT_BC6H_TYPELESS:
559
case DXGI_FORMAT_BC6H_UF16:
560
case DXGI_FORMAT_BC6H_SF16:
561
case DXGI_FORMAT_BC7_TYPELESS:
562
case DXGI_FORMAT_BC7_UNORM:
563
case DXGI_FORMAT_BC7_UNORM_SRGB:
564
return true;
565
default:
566
return false;
567
}
568
}
569
570
// Only some formats are supported. For others it returns 0.
571
static UINT GetBitsPerPixel(DXGI_FORMAT format)
572
{
573
switch (format)
574
{
575
case DXGI_FORMAT_R32G32B32A32_TYPELESS:
576
case DXGI_FORMAT_R32G32B32A32_FLOAT:
577
case DXGI_FORMAT_R32G32B32A32_UINT:
578
case DXGI_FORMAT_R32G32B32A32_SINT:
579
return 128;
580
case DXGI_FORMAT_R32G32B32_TYPELESS:
581
case DXGI_FORMAT_R32G32B32_FLOAT:
582
case DXGI_FORMAT_R32G32B32_UINT:
583
case DXGI_FORMAT_R32G32B32_SINT:
584
return 96;
585
case DXGI_FORMAT_R16G16B16A16_TYPELESS:
586
case DXGI_FORMAT_R16G16B16A16_FLOAT:
587
case DXGI_FORMAT_R16G16B16A16_UNORM:
588
case DXGI_FORMAT_R16G16B16A16_UINT:
589
case DXGI_FORMAT_R16G16B16A16_SNORM:
590
case DXGI_FORMAT_R16G16B16A16_SINT:
591
return 64;
592
case DXGI_FORMAT_R32G32_TYPELESS:
593
case DXGI_FORMAT_R32G32_FLOAT:
594
case DXGI_FORMAT_R32G32_UINT:
595
case DXGI_FORMAT_R32G32_SINT:
596
return 64;
597
case DXGI_FORMAT_R32G8X24_TYPELESS:
598
case DXGI_FORMAT_D32_FLOAT_S8X24_UINT:
599
case DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS:
600
case DXGI_FORMAT_X32_TYPELESS_G8X24_UINT:
601
return 64;
602
case DXGI_FORMAT_R10G10B10A2_TYPELESS:
603
case DXGI_FORMAT_R10G10B10A2_UNORM:
604
case DXGI_FORMAT_R10G10B10A2_UINT:
605
case DXGI_FORMAT_R11G11B10_FLOAT:
606
return 32;
607
case DXGI_FORMAT_R8G8B8A8_TYPELESS:
608
case DXGI_FORMAT_R8G8B8A8_UNORM:
609
case DXGI_FORMAT_R8G8B8A8_UNORM_SRGB:
610
case DXGI_FORMAT_R8G8B8A8_UINT:
611
case DXGI_FORMAT_R8G8B8A8_SNORM:
612
case DXGI_FORMAT_R8G8B8A8_SINT:
613
return 32;
614
case DXGI_FORMAT_R16G16_TYPELESS:
615
case DXGI_FORMAT_R16G16_FLOAT:
616
case DXGI_FORMAT_R16G16_UNORM:
617
case DXGI_FORMAT_R16G16_UINT:
618
case DXGI_FORMAT_R16G16_SNORM:
619
case DXGI_FORMAT_R16G16_SINT:
620
return 32;
621
case DXGI_FORMAT_R32_TYPELESS:
622
case DXGI_FORMAT_D32_FLOAT:
623
case DXGI_FORMAT_R32_FLOAT:
624
case DXGI_FORMAT_R32_UINT:
625
case DXGI_FORMAT_R32_SINT:
626
return 32;
627
case DXGI_FORMAT_R24G8_TYPELESS:
628
case DXGI_FORMAT_D24_UNORM_S8_UINT:
629
case DXGI_FORMAT_R24_UNORM_X8_TYPELESS:
630
case DXGI_FORMAT_X24_TYPELESS_G8_UINT:
631
return 32;
632
case DXGI_FORMAT_R8G8_TYPELESS:
633
case DXGI_FORMAT_R8G8_UNORM:
634
case DXGI_FORMAT_R8G8_UINT:
635
case DXGI_FORMAT_R8G8_SNORM:
636
case DXGI_FORMAT_R8G8_SINT:
637
return 16;
638
case DXGI_FORMAT_R16_TYPELESS:
639
case DXGI_FORMAT_R16_FLOAT:
640
case DXGI_FORMAT_D16_UNORM:
641
case DXGI_FORMAT_R16_UNORM:
642
case DXGI_FORMAT_R16_UINT:
643
case DXGI_FORMAT_R16_SNORM:
644
case DXGI_FORMAT_R16_SINT:
645
return 16;
646
case DXGI_FORMAT_R8_TYPELESS:
647
case DXGI_FORMAT_R8_UNORM:
648
case DXGI_FORMAT_R8_UINT:
649
case DXGI_FORMAT_R8_SNORM:
650
case DXGI_FORMAT_R8_SINT:
651
case DXGI_FORMAT_A8_UNORM:
652
return 8;
653
case DXGI_FORMAT_BC1_TYPELESS:
654
case DXGI_FORMAT_BC1_UNORM:
655
case DXGI_FORMAT_BC1_UNORM_SRGB:
656
return 4;
657
case DXGI_FORMAT_BC2_TYPELESS:
658
case DXGI_FORMAT_BC2_UNORM:
659
case DXGI_FORMAT_BC2_UNORM_SRGB:
660
return 8;
661
case DXGI_FORMAT_BC3_TYPELESS:
662
case DXGI_FORMAT_BC3_UNORM:
663
case DXGI_FORMAT_BC3_UNORM_SRGB:
664
return 8;
665
case DXGI_FORMAT_BC4_TYPELESS:
666
case DXGI_FORMAT_BC4_UNORM:
667
case DXGI_FORMAT_BC4_SNORM:
668
return 4;
669
case DXGI_FORMAT_BC5_TYPELESS:
670
case DXGI_FORMAT_BC5_UNORM:
671
case DXGI_FORMAT_BC5_SNORM:
672
return 8;
673
case DXGI_FORMAT_BC6H_TYPELESS:
674
case DXGI_FORMAT_BC6H_UF16:
675
case DXGI_FORMAT_BC6H_SF16:
676
return 8;
677
case DXGI_FORMAT_BC7_TYPELESS:
678
case DXGI_FORMAT_BC7_UNORM:
679
case DXGI_FORMAT_BC7_UNORM_SRGB:
680
return 8;
681
default:
682
return 0;
683
}
684
}
685
686
template<typename D3D12_RESOURCE_DESC_T>
687
static ResourceClass ResourceDescToResourceClass(const D3D12_RESOURCE_DESC_T& resDesc)
688
{
689
if (resDesc.Dimension == D3D12_RESOURCE_DIMENSION_BUFFER)
690
return ResourceClass::Buffer;
691
// Else: it's surely a texture.
692
const bool isRenderTargetOrDepthStencil =
693
(resDesc.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0;
694
return isRenderTargetOrDepthStencil ? ResourceClass::RT_DS_Texture : ResourceClass::Non_RT_DS_Texture;
695
}
696
697
// This algorithm is overly conservative.
698
template<typename D3D12_RESOURCE_DESC_T>
699
static bool CanUseSmallAlignment(const D3D12_RESOURCE_DESC_T& resourceDesc)
700
{
701
if (resourceDesc.Dimension != D3D12_RESOURCE_DIMENSION_TEXTURE2D)
702
return false;
703
if ((resourceDesc.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0)
704
return false;
705
if (resourceDesc.SampleDesc.Count > 1)
706
return false;
707
if (resourceDesc.DepthOrArraySize != 1)
708
return false;
709
710
UINT sizeX = (UINT)resourceDesc.Width;
711
UINT sizeY = resourceDesc.Height;
712
UINT bitsPerPixel = GetBitsPerPixel(resourceDesc.Format);
713
if (bitsPerPixel == 0)
714
return false;
715
716
if (IsFormatCompressed(resourceDesc.Format))
717
{
718
sizeX = DivideRoundingUp(sizeX, 4u);
719
sizeY = DivideRoundingUp(sizeY, 4u);
720
bitsPerPixel *= 16;
721
}
722
723
UINT tileSizeX = 0, tileSizeY = 0;
724
switch (bitsPerPixel)
725
{
726
case 8: tileSizeX = 64; tileSizeY = 64; break;
727
case 16: tileSizeX = 64; tileSizeY = 32; break;
728
case 32: tileSizeX = 32; tileSizeY = 32; break;
729
case 64: tileSizeX = 32; tileSizeY = 16; break;
730
case 128: tileSizeX = 16; tileSizeY = 16; break;
731
default: return false;
732
}
733
734
const UINT tileCount = DivideRoundingUp(sizeX, tileSizeX) * DivideRoundingUp(sizeY, tileSizeY);
735
return tileCount <= 16;
736
}
737
738
static bool ValidateAllocateMemoryParameters(
739
const ALLOCATION_DESC* pAllocDesc,
740
const D3D12_RESOURCE_ALLOCATION_INFO* pAllocInfo,
741
Allocation** ppAllocation)
742
{
743
return pAllocDesc &&
744
pAllocInfo &&
745
ppAllocation &&
746
(pAllocInfo->Alignment == 0 ||
747
pAllocInfo->Alignment == D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT ||
748
pAllocInfo->Alignment == D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT) &&
749
pAllocInfo->SizeInBytes != 0 &&
750
pAllocInfo->SizeInBytes % (64ull * 1024) == 0;
751
}
752
753
#endif // _D3D12MA_FUNCTIONS
754
755
#ifndef _D3D12MA_STATISTICS_FUNCTIONS
756
757
static void ClearStatistics(Statistics& outStats)
758
{
759
outStats.BlockCount = 0;
760
outStats.AllocationCount = 0;
761
outStats.BlockBytes = 0;
762
outStats.AllocationBytes = 0;
763
}
764
765
static void ClearDetailedStatistics(DetailedStatistics& outStats)
766
{
767
ClearStatistics(outStats.Stats);
768
outStats.UnusedRangeCount = 0;
769
outStats.AllocationSizeMin = UINT64_MAX;
770
outStats.AllocationSizeMax = 0;
771
outStats.UnusedRangeSizeMin = UINT64_MAX;
772
outStats.UnusedRangeSizeMax = 0;
773
}
774
775
static void AddStatistics(Statistics& inoutStats, const Statistics& src)
776
{
777
inoutStats.BlockCount += src.BlockCount;
778
inoutStats.AllocationCount += src.AllocationCount;
779
inoutStats.BlockBytes += src.BlockBytes;
780
inoutStats.AllocationBytes += src.AllocationBytes;
781
}
782
783
static void AddDetailedStatistics(DetailedStatistics& inoutStats, const DetailedStatistics& src)
784
{
785
AddStatistics(inoutStats.Stats, src.Stats);
786
inoutStats.UnusedRangeCount += src.UnusedRangeCount;
787
inoutStats.AllocationSizeMin = D3D12MA_MIN(inoutStats.AllocationSizeMin, src.AllocationSizeMin);
788
inoutStats.AllocationSizeMax = D3D12MA_MAX(inoutStats.AllocationSizeMax, src.AllocationSizeMax);
789
inoutStats.UnusedRangeSizeMin = D3D12MA_MIN(inoutStats.UnusedRangeSizeMin, src.UnusedRangeSizeMin);
790
inoutStats.UnusedRangeSizeMax = D3D12MA_MAX(inoutStats.UnusedRangeSizeMax, src.UnusedRangeSizeMax);
791
}
792
793
static void AddDetailedStatisticsAllocation(DetailedStatistics& inoutStats, UINT64 size)
794
{
795
inoutStats.Stats.AllocationCount++;
796
inoutStats.Stats.AllocationBytes += size;
797
inoutStats.AllocationSizeMin = D3D12MA_MIN(inoutStats.AllocationSizeMin, size);
798
inoutStats.AllocationSizeMax = D3D12MA_MAX(inoutStats.AllocationSizeMax, size);
799
}
800
801
static void AddDetailedStatisticsUnusedRange(DetailedStatistics& inoutStats, UINT64 size)
802
{
803
inoutStats.UnusedRangeCount++;
804
inoutStats.UnusedRangeSizeMin = D3D12MA_MIN(inoutStats.UnusedRangeSizeMin, size);
805
inoutStats.UnusedRangeSizeMax = D3D12MA_MAX(inoutStats.UnusedRangeSizeMax, size);
806
}
807
808
#endif // _D3D12MA_STATISTICS_FUNCTIONS
809
810
811
#ifndef _D3D12MA_MUTEX
812
813
#ifndef D3D12MA_MUTEX
814
class Mutex
815
{
816
public:
817
void Lock() { m_Mutex.lock(); }
818
void Unlock() { m_Mutex.unlock(); }
819
820
private:
821
std::mutex m_Mutex;
822
};
823
#define D3D12MA_MUTEX Mutex
824
#endif
825
826
#ifndef D3D12MA_RW_MUTEX
827
#ifdef _WIN32
828
class RWMutex
829
{
830
public:
831
RWMutex() { InitializeSRWLock(&m_Lock); }
832
void LockRead() { AcquireSRWLockShared(&m_Lock); }
833
void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
834
void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
835
void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
836
837
private:
838
SRWLOCK m_Lock;
839
};
840
#else // #ifdef _WIN32
841
class RWMutex
842
{
843
public:
844
RWMutex() {}
845
void LockRead() { m_Mutex.lock_shared(); }
846
void UnlockRead() { m_Mutex.unlock_shared(); }
847
void LockWrite() { m_Mutex.lock(); }
848
void UnlockWrite() { m_Mutex.unlock(); }
849
850
private:
851
std::shared_timed_mutex m_Mutex;
852
};
853
#endif // #ifdef _WIN32
854
#define D3D12MA_RW_MUTEX RWMutex
855
#endif // #ifndef D3D12MA_RW_MUTEX
856
857
// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
858
struct MutexLock
859
{
860
D3D12MA_CLASS_NO_COPY(MutexLock);
861
public:
862
MutexLock(D3D12MA_MUTEX& mutex, bool useMutex = true) :
863
m_pMutex(useMutex ? &mutex : NULL)
864
{
865
if (m_pMutex) m_pMutex->Lock();
866
}
867
~MutexLock() { if (m_pMutex) m_pMutex->Unlock(); }
868
869
private:
870
D3D12MA_MUTEX* m_pMutex;
871
};
872
873
// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
874
struct MutexLockRead
875
{
876
D3D12MA_CLASS_NO_COPY(MutexLockRead);
877
public:
878
MutexLockRead(D3D12MA_RW_MUTEX& mutex, bool useMutex)
879
: m_pMutex(useMutex ? &mutex : NULL)
880
{
881
if(m_pMutex)
882
{
883
m_pMutex->LockRead();
884
}
885
}
886
~MutexLockRead() { if (m_pMutex) m_pMutex->UnlockRead(); }
887
888
private:
889
D3D12MA_RW_MUTEX* m_pMutex;
890
};
891
892
// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
893
struct MutexLockWrite
894
{
895
D3D12MA_CLASS_NO_COPY(MutexLockWrite);
896
public:
897
MutexLockWrite(D3D12MA_RW_MUTEX& mutex, bool useMutex)
898
: m_pMutex(useMutex ? &mutex : NULL)
899
{
900
if (m_pMutex) m_pMutex->LockWrite();
901
}
902
~MutexLockWrite() { if (m_pMutex) m_pMutex->UnlockWrite(); }
903
904
private:
905
D3D12MA_RW_MUTEX* m_pMutex;
906
};
907
908
#if D3D12MA_DEBUG_GLOBAL_MUTEX
909
static D3D12MA_MUTEX g_DebugGlobalMutex;
910
#define D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK MutexLock debugGlobalMutexLock(g_DebugGlobalMutex, true);
911
#else
912
#define D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
913
#endif
914
#endif // _D3D12MA_MUTEX
915
916
#ifndef _D3D12MA_VECTOR
917
/*
918
Dynamically resizing continuous array. Class with interface similar to std::vector.
919
T must be POD because constructors and destructors are not called and memcpy is
920
used for these objects.
921
*/
922
template<typename T>
923
class Vector
924
{
925
public:
926
using value_type = T;
927
using iterator = T*;
928
using const_iterator = const T*;
929
930
// allocationCallbacks externally owned, must outlive this object.
931
Vector(const ALLOCATION_CALLBACKS& allocationCallbacks);
932
Vector(size_t count, const ALLOCATION_CALLBACKS& allocationCallbacks);
933
Vector(const Vector<T>& src);
934
~Vector();
935
936
const ALLOCATION_CALLBACKS& GetAllocs() const { return m_AllocationCallbacks; }
937
bool empty() const { return m_Count == 0; }
938
size_t size() const { return m_Count; }
939
T* data() { return m_pArray; }
940
const T* data() const { return m_pArray; }
941
void clear(bool freeMemory = false) { resize(0, freeMemory); }
942
943
iterator begin() { return m_pArray; }
944
iterator end() { return m_pArray + m_Count; }
945
const_iterator cbegin() const { return m_pArray; }
946
const_iterator cend() const { return m_pArray + m_Count; }
947
const_iterator begin() const { return cbegin(); }
948
const_iterator end() const { return cend(); }
949
950
void push_front(const T& src) { insert(0, src); }
951
void push_back(const T& src);
952
void pop_front();
953
void pop_back();
954
955
T& front();
956
T& back();
957
const T& front() const;
958
const T& back() const;
959
960
void reserve(size_t newCapacity, bool freeMemory = false);
961
void resize(size_t newCount, bool freeMemory = false);
962
void insert(size_t index, const T& src);
963
void remove(size_t index);
964
965
template<typename CmpLess>
966
size_t InsertSorted(const T& value, const CmpLess& cmp);
967
template<typename CmpLess>
968
bool RemoveSorted(const T& value, const CmpLess& cmp);
969
970
Vector& operator=(const Vector<T>& rhs);
971
T& operator[](size_t index);
972
const T& operator[](size_t index) const;
973
974
private:
975
const ALLOCATION_CALLBACKS& m_AllocationCallbacks;
976
T* m_pArray;
977
size_t m_Count;
978
size_t m_Capacity;
979
};
980
981
#ifndef _D3D12MA_VECTOR_FUNCTIONS
982
template<typename T>
983
Vector<T>::Vector(const ALLOCATION_CALLBACKS& allocationCallbacks)
984
: m_AllocationCallbacks(allocationCallbacks),
985
m_pArray(NULL),
986
m_Count(0),
987
m_Capacity(0) {}
988
989
template<typename T>
990
Vector<T>::Vector(size_t count, const ALLOCATION_CALLBACKS& allocationCallbacks)
991
: m_AllocationCallbacks(allocationCallbacks),
992
m_pArray(count ? AllocateArray<T>(allocationCallbacks, count) : NULL),
993
m_Count(count),
994
m_Capacity(count) {}
995
996
template<typename T>
997
Vector<T>::Vector(const Vector<T>& src)
998
: m_AllocationCallbacks(src.m_AllocationCallbacks),
999
m_pArray(src.m_Count ? AllocateArray<T>(src.m_AllocationCallbacks, src.m_Count) : NULL),
1000
m_Count(src.m_Count),
1001
m_Capacity(src.m_Count)
1002
{
1003
if (m_Count > 0)
1004
{
1005
memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1006
}
1007
}
1008
1009
template<typename T>
1010
Vector<T>::~Vector()
1011
{
1012
Free(m_AllocationCallbacks, m_pArray);
1013
}
1014
1015
template<typename T>
1016
void Vector<T>::push_back(const T& src)
1017
{
1018
const size_t newIndex = size();
1019
resize(newIndex + 1);
1020
m_pArray[newIndex] = src;
1021
}
1022
1023
template<typename T>
1024
void Vector<T>::pop_front()
1025
{
1026
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1027
remove(0);
1028
}
1029
1030
template<typename T>
1031
void Vector<T>::pop_back()
1032
{
1033
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1034
resize(size() - 1);
1035
}
1036
1037
template<typename T>
1038
T& Vector<T>::front()
1039
{
1040
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1041
return m_pArray[0];
1042
}
1043
1044
template<typename T>
1045
T& Vector<T>::back()
1046
{
1047
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1048
return m_pArray[m_Count - 1];
1049
}
1050
1051
template<typename T>
1052
const T& Vector<T>::front() const
1053
{
1054
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1055
return m_pArray[0];
1056
}
1057
1058
template<typename T>
1059
const T& Vector<T>::back() const
1060
{
1061
D3D12MA_HEAVY_ASSERT(m_Count > 0);
1062
return m_pArray[m_Count - 1];
1063
}
1064
1065
template<typename T>
1066
void Vector<T>::reserve(size_t newCapacity, bool freeMemory)
1067
{
1068
newCapacity = D3D12MA_MAX(newCapacity, m_Count);
1069
1070
if ((newCapacity < m_Capacity) && !freeMemory)
1071
{
1072
newCapacity = m_Capacity;
1073
}
1074
1075
if (newCapacity != m_Capacity)
1076
{
1077
T* const newArray = newCapacity ? AllocateArray<T>(m_AllocationCallbacks, newCapacity) : NULL;
1078
if (m_Count != 0)
1079
{
1080
memcpy(newArray, m_pArray, m_Count * sizeof(T));
1081
}
1082
Free(m_AllocationCallbacks, m_pArray);
1083
m_Capacity = newCapacity;
1084
m_pArray = newArray;
1085
}
1086
}
1087
1088
template<typename T>
1089
void Vector<T>::resize(size_t newCount, bool freeMemory)
1090
{
1091
size_t newCapacity = m_Capacity;
1092
if (newCount > m_Capacity)
1093
{
1094
newCapacity = D3D12MA_MAX(newCount, D3D12MA_MAX(m_Capacity * 3 / 2, (size_t)8));
1095
}
1096
else if (freeMemory)
1097
{
1098
newCapacity = newCount;
1099
}
1100
1101
if (newCapacity != m_Capacity)
1102
{
1103
T* const newArray = newCapacity ? AllocateArray<T>(m_AllocationCallbacks, newCapacity) : NULL;
1104
const size_t elementsToCopy = D3D12MA_MIN(m_Count, newCount);
1105
if (elementsToCopy != 0)
1106
{
1107
memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1108
}
1109
Free(m_AllocationCallbacks, m_pArray);
1110
m_Capacity = newCapacity;
1111
m_pArray = newArray;
1112
}
1113
1114
m_Count = newCount;
1115
}
1116
1117
template<typename T>
1118
void Vector<T>::insert(size_t index, const T& src)
1119
{
1120
D3D12MA_HEAVY_ASSERT(index <= m_Count);
1121
const size_t oldCount = size();
1122
resize(oldCount + 1);
1123
if (index < oldCount)
1124
{
1125
memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1126
}
1127
m_pArray[index] = src;
1128
}
1129
1130
template<typename T>
1131
void Vector<T>::remove(size_t index)
1132
{
1133
D3D12MA_HEAVY_ASSERT(index < m_Count);
1134
const size_t oldCount = size();
1135
if (index < oldCount - 1)
1136
{
1137
memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1138
}
1139
resize(oldCount - 1);
1140
}
1141
1142
template<typename T> template<typename CmpLess>
1143
size_t Vector<T>::InsertSorted(const T& value, const CmpLess& cmp)
1144
{
1145
const size_t indexToInsert = BinaryFindFirstNotLess<CmpLess, iterator, T>(
1146
m_pArray,
1147
m_pArray + m_Count,
1148
value,
1149
cmp) - m_pArray;
1150
insert(indexToInsert, value);
1151
return indexToInsert;
1152
}
1153
1154
template<typename T> template<typename CmpLess>
1155
bool Vector<T>::RemoveSorted(const T& value, const CmpLess& cmp)
1156
{
1157
const iterator it = BinaryFindFirstNotLess(
1158
m_pArray,
1159
m_pArray + m_Count,
1160
value,
1161
cmp);
1162
if ((it != end()) && !cmp(*it, value) && !cmp(value, *it))
1163
{
1164
size_t indexToRemove = it - begin();
1165
remove(indexToRemove);
1166
return true;
1167
}
1168
return false;
1169
}
1170
1171
template<typename T>
1172
Vector<T>& Vector<T>::operator=(const Vector<T>& rhs)
1173
{
1174
if (&rhs != this)
1175
{
1176
resize(rhs.m_Count);
1177
if (m_Count != 0)
1178
{
1179
memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1180
}
1181
}
1182
return *this;
1183
}
1184
1185
template<typename T>
1186
T& Vector<T>::operator[](size_t index)
1187
{
1188
D3D12MA_HEAVY_ASSERT(index < m_Count);
1189
return m_pArray[index];
1190
}
1191
1192
template<typename T>
1193
const T& Vector<T>::operator[](size_t index) const
1194
{
1195
D3D12MA_HEAVY_ASSERT(index < m_Count);
1196
return m_pArray[index];
1197
}
1198
#endif // _D3D12MA_VECTOR_FUNCTIONS
1199
#endif // _D3D12MA_VECTOR
1200
1201
#ifndef _D3D12MA_STRING_BUILDER
1202
class StringBuilder
1203
{
1204
public:
1205
StringBuilder(const ALLOCATION_CALLBACKS& allocationCallbacks) : m_Data(allocationCallbacks) {}
1206
1207
size_t GetLength() const { return m_Data.size(); }
1208
LPCWSTR GetData() const { return m_Data.data(); }
1209
1210
void Add(WCHAR ch) { m_Data.push_back(ch); }
1211
void Add(LPCWSTR str);
1212
void AddNewLine() { Add(L'\n'); }
1213
void AddNumber(UINT num);
1214
void AddNumber(UINT64 num);
1215
void AddPointer(const void* ptr);
1216
1217
private:
1218
Vector<WCHAR> m_Data;
1219
};
1220
1221
#ifndef _D3D12MA_STRING_BUILDER_FUNCTIONS
1222
void StringBuilder::Add(LPCWSTR str)
1223
{
1224
const size_t len = wcslen(str);
1225
if (len > 0)
1226
{
1227
const size_t oldCount = m_Data.size();
1228
m_Data.resize(oldCount + len);
1229
memcpy(m_Data.data() + oldCount, str, len * sizeof(WCHAR));
1230
}
1231
}
1232
1233
void StringBuilder::AddNumber(UINT num)
1234
{
1235
WCHAR buf[11];
1236
buf[10] = L'\0';
1237
WCHAR *p = &buf[10];
1238
do
1239
{
1240
*--p = L'0' + (num % 10);
1241
num /= 10;
1242
}
1243
while (num);
1244
Add(p);
1245
}
1246
1247
void StringBuilder::AddNumber(UINT64 num)
1248
{
1249
WCHAR buf[21];
1250
buf[20] = L'\0';
1251
WCHAR *p = &buf[20];
1252
do
1253
{
1254
*--p = L'0' + (num % 10);
1255
num /= 10;
1256
}
1257
while (num);
1258
Add(p);
1259
}
1260
1261
void StringBuilder::AddPointer(const void* ptr)
1262
{
1263
WCHAR buf[21];
1264
uintptr_t num = (uintptr_t)ptr;
1265
buf[20] = L'\0';
1266
WCHAR *p = &buf[20];
1267
do
1268
{
1269
*--p = HexDigitToChar((UINT8)(num & 0xF));
1270
num >>= 4;
1271
}
1272
while (num);
1273
Add(p);
1274
}
1275
1276
#endif // _D3D12MA_STRING_BUILDER_FUNCTIONS
1277
#endif // _D3D12MA_STRING_BUILDER
1278
1279
#ifndef _D3D12MA_JSON_WRITER
1280
/*
1281
Allows to conveniently build a correct JSON document to be written to the
1282
StringBuilder passed to the constructor.
1283
*/
1284
class JsonWriter
1285
{
1286
public:
1287
// stringBuilder - string builder to write the document to. Must remain alive for the whole lifetime of this object.
1288
JsonWriter(const ALLOCATION_CALLBACKS& allocationCallbacks, StringBuilder& stringBuilder);
1289
~JsonWriter();
1290
1291
// Begins object by writing "{".
1292
// Inside an object, you must call pairs of WriteString and a value, e.g.:
1293
// j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject();
1294
// Will write: { "A": 1, "B": 2 }
1295
void BeginObject(bool singleLine = false);
1296
// Ends object by writing "}".
1297
void EndObject();
1298
1299
// Begins array by writing "[".
1300
// Inside an array, you can write a sequence of any values.
1301
void BeginArray(bool singleLine = false);
1302
// Ends array by writing "[".
1303
void EndArray();
1304
1305
// Writes a string value inside "".
1306
// pStr can contain any UTF-16 characters, including '"', new line etc. - they will be properly escaped.
1307
void WriteString(LPCWSTR pStr);
1308
1309
// Begins writing a string value.
1310
// Call BeginString, ContinueString, ContinueString, ..., EndString instead of
1311
// WriteString to conveniently build the string content incrementally, made of
1312
// parts including numbers.
1313
void BeginString(LPCWSTR pStr = NULL);
1314
// Posts next part of an open string.
1315
void ContinueString(LPCWSTR pStr);
1316
// Posts next part of an open string. The number is converted to decimal characters.
1317
void ContinueString(UINT num);
1318
void ContinueString(UINT64 num);
1319
void ContinueString_Pointer(const void* ptr);
1320
// Posts next part of an open string. Pointer value is converted to characters
1321
// using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00
1322
// void ContinueString_Pointer(const void* ptr);
1323
// Ends writing a string value by writing '"'.
1324
void EndString(LPCWSTR pStr = NULL);
1325
1326
// Writes a number value.
1327
void WriteNumber(UINT num);
1328
void WriteNumber(UINT64 num);
1329
// Writes a boolean value - false or true.
1330
void WriteBool(bool b);
1331
// Writes a null value.
1332
void WriteNull();
1333
1334
void AddAllocationToObject(const Allocation& alloc);
1335
void AddDetailedStatisticsInfoObject(const DetailedStatistics& stats);
1336
1337
private:
1338
static const WCHAR* const INDENT;
1339
1340
enum CollectionType
1341
{
1342
COLLECTION_TYPE_OBJECT,
1343
COLLECTION_TYPE_ARRAY,
1344
};
1345
struct StackItem
1346
{
1347
CollectionType type;
1348
UINT valueCount;
1349
bool singleLineMode;
1350
};
1351
1352
StringBuilder& m_SB;
1353
Vector<StackItem> m_Stack;
1354
bool m_InsideString;
1355
1356
void BeginValue(bool isString);
1357
void WriteIndent(bool oneLess = false);
1358
};
1359
1360
#ifndef _D3D12MA_JSON_WRITER_FUNCTIONS
1361
const WCHAR* const JsonWriter::INDENT = L" ";
1362
1363
JsonWriter::JsonWriter(const ALLOCATION_CALLBACKS& allocationCallbacks, StringBuilder& stringBuilder)
1364
: m_SB(stringBuilder),
1365
m_Stack(allocationCallbacks),
1366
m_InsideString(false) {}
1367
1368
JsonWriter::~JsonWriter()
1369
{
1370
D3D12MA_ASSERT(!m_InsideString);
1371
D3D12MA_ASSERT(m_Stack.empty());
1372
}
1373
1374
void JsonWriter::BeginObject(bool singleLine)
1375
{
1376
D3D12MA_ASSERT(!m_InsideString);
1377
1378
BeginValue(false);
1379
m_SB.Add(L'{');
1380
1381
StackItem stackItem;
1382
stackItem.type = COLLECTION_TYPE_OBJECT;
1383
stackItem.valueCount = 0;
1384
stackItem.singleLineMode = singleLine;
1385
m_Stack.push_back(stackItem);
1386
}
1387
1388
void JsonWriter::EndObject()
1389
{
1390
D3D12MA_ASSERT(!m_InsideString);
1391
D3D12MA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
1392
D3D12MA_ASSERT(m_Stack.back().valueCount % 2 == 0);
1393
1394
WriteIndent(true);
1395
m_SB.Add(L'}');
1396
1397
m_Stack.pop_back();
1398
}
1399
1400
void JsonWriter::BeginArray(bool singleLine)
1401
{
1402
D3D12MA_ASSERT(!m_InsideString);
1403
1404
BeginValue(false);
1405
m_SB.Add(L'[');
1406
1407
StackItem stackItem;
1408
stackItem.type = COLLECTION_TYPE_ARRAY;
1409
stackItem.valueCount = 0;
1410
stackItem.singleLineMode = singleLine;
1411
m_Stack.push_back(stackItem);
1412
}
1413
1414
void JsonWriter::EndArray()
1415
{
1416
D3D12MA_ASSERT(!m_InsideString);
1417
D3D12MA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
1418
1419
WriteIndent(true);
1420
m_SB.Add(L']');
1421
1422
m_Stack.pop_back();
1423
}
1424
1425
void JsonWriter::WriteString(LPCWSTR pStr)
1426
{
1427
BeginString(pStr);
1428
EndString();
1429
}
1430
1431
void JsonWriter::BeginString(LPCWSTR pStr)
1432
{
1433
D3D12MA_ASSERT(!m_InsideString);
1434
1435
BeginValue(true);
1436
m_InsideString = true;
1437
m_SB.Add(L'"');
1438
if (pStr != NULL)
1439
{
1440
ContinueString(pStr);
1441
}
1442
}
1443
1444
void JsonWriter::ContinueString(LPCWSTR pStr)
1445
{
1446
D3D12MA_ASSERT(m_InsideString);
1447
D3D12MA_ASSERT(pStr);
1448
1449
for (const WCHAR *p = pStr; *p; ++p)
1450
{
1451
// the strings we encode are assumed to be in UTF-16LE format, the native
1452
// windows wide character Unicode format. In this encoding Unicode code
1453
// points U+0000 to U+D7FF and U+E000 to U+FFFF are encoded in two bytes,
1454
// and everything else takes more than two bytes. We will reject any
1455
// multi wchar character encodings for simplicity.
1456
UINT val = (UINT)*p;
1457
D3D12MA_ASSERT(((val <= 0xD7FF) || (0xE000 <= val && val <= 0xFFFF)) &&
1458
"Character not currently supported.");
1459
switch (*p)
1460
{
1461
case L'"': m_SB.Add(L'\\'); m_SB.Add(L'"'); break;
1462
case L'\\': m_SB.Add(L'\\'); m_SB.Add(L'\\'); break;
1463
case L'/': m_SB.Add(L'\\'); m_SB.Add(L'/'); break;
1464
case L'\b': m_SB.Add(L'\\'); m_SB.Add(L'b'); break;
1465
case L'\f': m_SB.Add(L'\\'); m_SB.Add(L'f'); break;
1466
case L'\n': m_SB.Add(L'\\'); m_SB.Add(L'n'); break;
1467
case L'\r': m_SB.Add(L'\\'); m_SB.Add(L'r'); break;
1468
case L'\t': m_SB.Add(L'\\'); m_SB.Add(L't'); break;
1469
default:
1470
// conservatively use encoding \uXXXX for any Unicode character
1471
// requiring more than one byte.
1472
if (32 <= val && val < 256)
1473
m_SB.Add(*p);
1474
else
1475
{
1476
m_SB.Add(L'\\');
1477
m_SB.Add(L'u');
1478
for (UINT i = 0; i < 4; ++i)
1479
{
1480
UINT hexDigit = (val & 0xF000) >> 12;
1481
val <<= 4;
1482
if (hexDigit < 10)
1483
m_SB.Add(L'0' + (WCHAR)hexDigit);
1484
else
1485
m_SB.Add(L'A' + (WCHAR)hexDigit);
1486
}
1487
}
1488
break;
1489
}
1490
}
1491
}
1492
1493
void JsonWriter::ContinueString(UINT num)
1494
{
1495
D3D12MA_ASSERT(m_InsideString);
1496
m_SB.AddNumber(num);
1497
}
1498
1499
void JsonWriter::ContinueString(UINT64 num)
1500
{
1501
D3D12MA_ASSERT(m_InsideString);
1502
m_SB.AddNumber(num);
1503
}
1504
1505
void JsonWriter::ContinueString_Pointer(const void* ptr)
1506
{
1507
D3D12MA_ASSERT(m_InsideString);
1508
m_SB.AddPointer(ptr);
1509
}
1510
1511
void JsonWriter::EndString(LPCWSTR pStr)
1512
{
1513
D3D12MA_ASSERT(m_InsideString);
1514
1515
if (pStr)
1516
ContinueString(pStr);
1517
m_SB.Add(L'"');
1518
m_InsideString = false;
1519
}
1520
1521
void JsonWriter::WriteNumber(UINT num)
1522
{
1523
D3D12MA_ASSERT(!m_InsideString);
1524
BeginValue(false);
1525
m_SB.AddNumber(num);
1526
}
1527
1528
void JsonWriter::WriteNumber(UINT64 num)
1529
{
1530
D3D12MA_ASSERT(!m_InsideString);
1531
BeginValue(false);
1532
m_SB.AddNumber(num);
1533
}
1534
1535
void JsonWriter::WriteBool(bool b)
1536
{
1537
D3D12MA_ASSERT(!m_InsideString);
1538
BeginValue(false);
1539
if (b)
1540
m_SB.Add(L"true");
1541
else
1542
m_SB.Add(L"false");
1543
}
1544
1545
void JsonWriter::WriteNull()
1546
{
1547
D3D12MA_ASSERT(!m_InsideString);
1548
BeginValue(false);
1549
m_SB.Add(L"null");
1550
}
1551
1552
void JsonWriter::AddAllocationToObject(const Allocation& alloc)
1553
{
1554
WriteString(L"Type");
1555
switch (alloc.m_PackedData.GetResourceDimension()) {
1556
case D3D12_RESOURCE_DIMENSION_UNKNOWN:
1557
WriteString(L"UNKNOWN");
1558
break;
1559
case D3D12_RESOURCE_DIMENSION_BUFFER:
1560
WriteString(L"BUFFER");
1561
break;
1562
case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
1563
WriteString(L"TEXTURE1D");
1564
break;
1565
case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
1566
WriteString(L"TEXTURE2D");
1567
break;
1568
case D3D12_RESOURCE_DIMENSION_TEXTURE3D:
1569
WriteString(L"TEXTURE3D");
1570
break;
1571
default: D3D12MA_ASSERT(0); break;
1572
}
1573
1574
WriteString(L"Size");
1575
WriteNumber(alloc.GetSize());
1576
WriteString(L"Usage");
1577
WriteNumber((UINT)alloc.m_PackedData.GetResourceFlags());
1578
1579
void* privateData = alloc.GetPrivateData();
1580
if (privateData)
1581
{
1582
WriteString(L"CustomData");
1583
BeginString();
1584
ContinueString_Pointer(privateData);
1585
EndString();
1586
}
1587
1588
LPCWSTR name = alloc.GetName();
1589
if (name != NULL)
1590
{
1591
WriteString(L"Name");
1592
WriteString(name);
1593
}
1594
if (alloc.m_PackedData.GetTextureLayout())
1595
{
1596
WriteString(L"Layout");
1597
WriteNumber((UINT)alloc.m_PackedData.GetTextureLayout());
1598
}
1599
}
1600
1601
void JsonWriter::AddDetailedStatisticsInfoObject(const DetailedStatistics& stats)
1602
{
1603
BeginObject();
1604
1605
WriteString(L"BlockCount");
1606
WriteNumber(stats.Stats.BlockCount);
1607
WriteString(L"BlockBytes");
1608
WriteNumber(stats.Stats.BlockBytes);
1609
WriteString(L"AllocationCount");
1610
WriteNumber(stats.Stats.AllocationCount);
1611
WriteString(L"AllocationBytes");
1612
WriteNumber(stats.Stats.AllocationBytes);
1613
WriteString(L"UnusedRangeCount");
1614
WriteNumber(stats.UnusedRangeCount);
1615
1616
if (stats.Stats.AllocationCount > 1)
1617
{
1618
WriteString(L"AllocationSizeMin");
1619
WriteNumber(stats.AllocationSizeMin);
1620
WriteString(L"AllocationSizeMax");
1621
WriteNumber(stats.AllocationSizeMax);
1622
}
1623
if (stats.UnusedRangeCount > 1)
1624
{
1625
WriteString(L"UnusedRangeSizeMin");
1626
WriteNumber(stats.UnusedRangeSizeMin);
1627
WriteString(L"UnusedRangeSizeMax");
1628
WriteNumber(stats.UnusedRangeSizeMax);
1629
}
1630
EndObject();
1631
}
1632
1633
void JsonWriter::BeginValue(bool isString)
1634
{
1635
if (!m_Stack.empty())
1636
{
1637
StackItem& currItem = m_Stack.back();
1638
if (currItem.type == COLLECTION_TYPE_OBJECT && currItem.valueCount % 2 == 0)
1639
{
1640
D3D12MA_ASSERT(isString);
1641
}
1642
1643
if (currItem.type == COLLECTION_TYPE_OBJECT && currItem.valueCount % 2 == 1)
1644
{
1645
m_SB.Add(L':'); m_SB.Add(L' ');
1646
}
1647
else if (currItem.valueCount > 0)
1648
{
1649
m_SB.Add(L','); m_SB.Add(L' ');
1650
WriteIndent();
1651
}
1652
else
1653
{
1654
WriteIndent();
1655
}
1656
++currItem.valueCount;
1657
}
1658
}
1659
1660
void JsonWriter::WriteIndent(bool oneLess)
1661
{
1662
if (!m_Stack.empty() && !m_Stack.back().singleLineMode)
1663
{
1664
m_SB.AddNewLine();
1665
1666
size_t count = m_Stack.size();
1667
if (count > 0 && oneLess)
1668
{
1669
--count;
1670
}
1671
for (size_t i = 0; i < count; ++i)
1672
{
1673
m_SB.Add(INDENT);
1674
}
1675
}
1676
}
1677
#endif // _D3D12MA_JSON_WRITER_FUNCTIONS
1678
#endif // _D3D12MA_JSON_WRITER
1679
1680
#ifndef _D3D12MA_POOL_ALLOCATOR
1681
/*
1682
Allocator for objects of type T using a list of arrays (pools) to speed up
1683
allocation. Number of elements that can be allocated is not bounded because
1684
allocator can create multiple blocks.
1685
T should be POD because constructor and destructor is not called in Alloc or
1686
Free.
1687
*/
1688
template<typename T>
1689
class PoolAllocator
1690
{
1691
D3D12MA_CLASS_NO_COPY(PoolAllocator)
1692
public:
1693
// allocationCallbacks externally owned, must outlive this object.
1694
PoolAllocator(const ALLOCATION_CALLBACKS& allocationCallbacks, UINT firstBlockCapacity);
1695
~PoolAllocator() { Clear(); }
1696
1697
void Clear();
1698
template<typename... Types>
1699
T* Alloc(Types... args);
1700
void Free(T* ptr);
1701
1702
private:
1703
union Item
1704
{
1705
UINT NextFreeIndex; // UINT32_MAX means end of list.
1706
alignas(T) char Value[sizeof(T)];
1707
};
1708
1709
struct ItemBlock
1710
{
1711
Item* pItems;
1712
UINT Capacity;
1713
UINT FirstFreeIndex;
1714
};
1715
1716
const ALLOCATION_CALLBACKS& m_AllocationCallbacks;
1717
const UINT m_FirstBlockCapacity;
1718
Vector<ItemBlock> m_ItemBlocks;
1719
1720
ItemBlock& CreateNewBlock();
1721
};
1722
1723
#ifndef _D3D12MA_POOL_ALLOCATOR_FUNCTIONS
1724
template<typename T>
1725
PoolAllocator<T>::PoolAllocator(const ALLOCATION_CALLBACKS& allocationCallbacks, UINT firstBlockCapacity)
1726
: m_AllocationCallbacks(allocationCallbacks),
1727
m_FirstBlockCapacity(firstBlockCapacity),
1728
m_ItemBlocks(allocationCallbacks)
1729
{
1730
D3D12MA_ASSERT(m_FirstBlockCapacity > 1);
1731
}
1732
1733
template<typename T>
1734
void PoolAllocator<T>::Clear()
1735
{
1736
for(size_t i = m_ItemBlocks.size(); i--; )
1737
{
1738
D3D12MA_DELETE_ARRAY(m_AllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
1739
}
1740
m_ItemBlocks.clear(true);
1741
}
1742
1743
template<typename T> template<typename... Types>
1744
T* PoolAllocator<T>::Alloc(Types... args)
1745
{
1746
for(size_t i = m_ItemBlocks.size(); i--; )
1747
{
1748
ItemBlock& block = m_ItemBlocks[i];
1749
// This block has some free items: Use first one.
1750
if(block.FirstFreeIndex != UINT32_MAX)
1751
{
1752
Item* const pItem = &block.pItems[block.FirstFreeIndex];
1753
block.FirstFreeIndex = pItem->NextFreeIndex;
1754
T* result = (T*)&pItem->Value;
1755
new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
1756
return result;
1757
}
1758
}
1759
1760
// No block has free item: Create new one and use it.
1761
ItemBlock& newBlock = CreateNewBlock();
1762
Item* const pItem = &newBlock.pItems[0];
1763
newBlock.FirstFreeIndex = pItem->NextFreeIndex;
1764
T* result = (T*)pItem->Value;
1765
new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
1766
return result;
1767
}
1768
1769
template<typename T>
1770
void PoolAllocator<T>::Free(T* ptr)
1771
{
1772
// Search all memory blocks to find ptr.
1773
for(size_t i = m_ItemBlocks.size(); i--; )
1774
{
1775
ItemBlock& block = m_ItemBlocks[i];
1776
1777
Item* pItemPtr;
1778
memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
1779
1780
// Check if pItemPtr is in address range of this block.
1781
if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
1782
{
1783
ptr->~T(); // Explicit destructor call.
1784
const UINT index = static_cast<UINT>(pItemPtr - block.pItems);
1785
pItemPtr->NextFreeIndex = block.FirstFreeIndex;
1786
block.FirstFreeIndex = index;
1787
return;
1788
}
1789
}
1790
D3D12MA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
1791
}
1792
1793
template<typename T>
1794
typename PoolAllocator<T>::ItemBlock& PoolAllocator<T>::CreateNewBlock()
1795
{
1796
const UINT newBlockCapacity = m_ItemBlocks.empty() ?
1797
m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
1798
1799
const ItemBlock newBlock = {
1800
D3D12MA_NEW_ARRAY(m_AllocationCallbacks, Item, newBlockCapacity),
1801
newBlockCapacity,
1802
0 };
1803
1804
m_ItemBlocks.push_back(newBlock);
1805
1806
// Setup singly-linked list of all free items in this block.
1807
for(UINT i = 0; i < newBlockCapacity - 1; ++i)
1808
{
1809
newBlock.pItems[i].NextFreeIndex = i + 1;
1810
}
1811
newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
1812
return m_ItemBlocks.back();
1813
}
1814
#endif // _D3D12MA_POOL_ALLOCATOR_FUNCTIONS
1815
#endif // _D3D12MA_POOL_ALLOCATOR
1816
1817
#ifndef _D3D12MA_LIST
1818
/*
1819
Doubly linked list, with elements allocated out of PoolAllocator.
1820
Has custom interface, as well as STL-style interface, including iterator and
1821
const_iterator.
1822
*/
1823
template<typename T>
1824
class List
1825
{
1826
D3D12MA_CLASS_NO_COPY(List)
1827
public:
1828
struct Item
1829
{
1830
Item* pPrev;
1831
Item* pNext;
1832
T Value;
1833
};
1834
1835
class reverse_iterator;
1836
class const_reverse_iterator;
1837
class iterator
1838
{
1839
friend class List<T>;
1840
friend class const_iterator;
1841
1842
public:
1843
iterator() = default;
1844
iterator(const reverse_iterator& src)
1845
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1846
1847
T& operator*() const;
1848
T* operator->() const;
1849
1850
iterator& operator++();
1851
iterator& operator--();
1852
iterator operator++(int);
1853
iterator operator--(int);
1854
1855
bool operator==(const iterator& rhs) const;
1856
bool operator!=(const iterator& rhs) const;
1857
1858
private:
1859
List<T>* m_pList = NULL;
1860
Item* m_pItem = NULL;
1861
1862
iterator(List<T>* pList, Item* pItem) : m_pList(pList), m_pItem(pItem) {}
1863
};
1864
1865
class reverse_iterator
1866
{
1867
friend class List<T>;
1868
friend class const_reverse_iterator;
1869
1870
public:
1871
reverse_iterator() = default;
1872
reverse_iterator(const iterator& src)
1873
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1874
1875
T& operator*() const;
1876
T* operator->() const;
1877
1878
reverse_iterator& operator++();
1879
reverse_iterator& operator--();
1880
reverse_iterator operator++(int);
1881
reverse_iterator operator--(int);
1882
1883
bool operator==(const reverse_iterator& rhs) const;
1884
bool operator!=(const reverse_iterator& rhs) const;
1885
1886
private:
1887
List<T>* m_pList = NULL;
1888
Item* m_pItem = NULL;
1889
1890
reverse_iterator(List<T>* pList, Item* pItem)
1891
: m_pList(pList), m_pItem(pItem) {}
1892
};
1893
1894
class const_iterator
1895
{
1896
friend class List<T>;
1897
1898
public:
1899
const_iterator() = default;
1900
const_iterator(const iterator& src)
1901
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1902
const_iterator(const reverse_iterator& src)
1903
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1904
const_iterator(const const_reverse_iterator& src)
1905
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1906
1907
iterator dropConst() const;
1908
const T& operator*() const;
1909
const T* operator->() const;
1910
1911
const_iterator& operator++();
1912
const_iterator& operator--();
1913
const_iterator operator++(int);
1914
const_iterator operator--(int);
1915
1916
bool operator==(const const_iterator& rhs) const;
1917
bool operator!=(const const_iterator& rhs) const;
1918
1919
private:
1920
const List<T>* m_pList = NULL;
1921
const Item* m_pItem = NULL;
1922
1923
const_iterator(const List<T>* pList, const Item* pItem)
1924
: m_pList(pList), m_pItem(pItem) {}
1925
};
1926
1927
class const_reverse_iterator
1928
{
1929
friend class List<T>;
1930
1931
public:
1932
const_reverse_iterator() = default;
1933
const_reverse_iterator(const iterator& src)
1934
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1935
const_reverse_iterator(const reverse_iterator& src)
1936
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1937
const_reverse_iterator(const const_iterator& src)
1938
: m_pList(src.m_pList), m_pItem(src.m_pItem) {}
1939
1940
reverse_iterator dropConst() const;
1941
const T& operator*() const;
1942
const T* operator->() const;
1943
1944
const_reverse_iterator& operator++();
1945
const_reverse_iterator& operator--();
1946
const_reverse_iterator operator++(int);
1947
const_reverse_iterator operator--(int);
1948
1949
bool operator==(const const_reverse_iterator& rhs) const;
1950
bool operator!=(const const_reverse_iterator& rhs) const;
1951
1952
private:
1953
const List<T>* m_pList = NULL;
1954
const Item* m_pItem = NULL;
1955
1956
const_reverse_iterator(const List<T>* pList, const Item* pItem)
1957
: m_pList(pList), m_pItem(pItem) {}
1958
};
1959
1960
// allocationCallbacks externally owned, must outlive this object.
1961
List(const ALLOCATION_CALLBACKS& allocationCallbacks);
1962
// Intentionally not calling Clear, because that would be unnecessary
1963
// computations to return all items to m_ItemAllocator as free.
1964
~List() = default;
1965
1966
size_t GetCount() const { return m_Count; }
1967
bool IsEmpty() const { return m_Count == 0; }
1968
1969
Item* Front() { return m_pFront; }
1970
const Item* Front() const { return m_pFront; }
1971
Item* Back() { return m_pBack; }
1972
const Item* Back() const { return m_pBack; }
1973
1974
bool empty() const { return IsEmpty(); }
1975
size_t size() const { return GetCount(); }
1976
void push_back(const T& value) { PushBack(value); }
1977
iterator insert(iterator it, const T& value) { return iterator(this, InsertBefore(it.m_pItem, value)); }
1978
void clear() { Clear(); }
1979
void erase(iterator it) { Remove(it.m_pItem); }
1980
1981
iterator begin() { return iterator(this, Front()); }
1982
iterator end() { return iterator(this, NULL); }
1983
reverse_iterator rbegin() { return reverse_iterator(this, Back()); }
1984
reverse_iterator rend() { return reverse_iterator(this, NULL); }
1985
1986
const_iterator cbegin() const { return const_iterator(this, Front()); }
1987
const_iterator cend() const { return const_iterator(this, NULL); }
1988
const_iterator begin() const { return cbegin(); }
1989
const_iterator end() const { return cend(); }
1990
1991
const_reverse_iterator crbegin() const { return const_reverse_iterator(this, Back()); }
1992
const_reverse_iterator crend() const { return const_reverse_iterator(this, NULL); }
1993
const_reverse_iterator rbegin() const { return crbegin(); }
1994
const_reverse_iterator rend() const { return crend(); }
1995
1996
Item* PushBack();
1997
Item* PushFront();
1998
Item* PushBack(const T& value);
1999
Item* PushFront(const T& value);
2000
void PopBack();
2001
void PopFront();
2002
2003
// Item can be null - it means PushBack.
2004
Item* InsertBefore(Item* pItem);
2005
// Item can be null - it means PushFront.
2006
Item* InsertAfter(Item* pItem);
2007
Item* InsertBefore(Item* pItem, const T& value);
2008
Item* InsertAfter(Item* pItem, const T& value);
2009
2010
void Clear();
2011
void Remove(Item* pItem);
2012
2013
private:
2014
const ALLOCATION_CALLBACKS& m_AllocationCallbacks;
2015
PoolAllocator<Item> m_ItemAllocator;
2016
Item* m_pFront;
2017
Item* m_pBack;
2018
size_t m_Count;
2019
};
2020
2021
#ifndef _D3D12MA_LIST_ITERATOR_FUNCTIONS
2022
template<typename T>
2023
T& List<T>::iterator::operator*() const
2024
{
2025
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2026
return m_pItem->Value;
2027
}
2028
2029
template<typename T>
2030
T* List<T>::iterator::operator->() const
2031
{
2032
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2033
return &m_pItem->Value;
2034
}
2035
2036
template<typename T>
2037
typename List<T>::iterator& List<T>::iterator::operator++()
2038
{
2039
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2040
m_pItem = m_pItem->pNext;
2041
return *this;
2042
}
2043
2044
template<typename T>
2045
typename List<T>::iterator& List<T>::iterator::operator--()
2046
{
2047
if (m_pItem != NULL)
2048
{
2049
m_pItem = m_pItem->pPrev;
2050
}
2051
else
2052
{
2053
D3D12MA_HEAVY_ASSERT(!m_pList->IsEmpty());
2054
m_pItem = m_pList->Back();
2055
}
2056
return *this;
2057
}
2058
2059
template<typename T>
2060
typename List<T>::iterator List<T>::iterator::operator++(int)
2061
{
2062
iterator result = *this;
2063
++* this;
2064
return result;
2065
}
2066
2067
template<typename T>
2068
typename List<T>::iterator List<T>::iterator::operator--(int)
2069
{
2070
iterator result = *this;
2071
--* this;
2072
return result;
2073
}
2074
2075
template<typename T>
2076
bool List<T>::iterator::operator==(const iterator& rhs) const
2077
{
2078
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2079
return m_pItem == rhs.m_pItem;
2080
}
2081
2082
template<typename T>
2083
bool List<T>::iterator::operator!=(const iterator& rhs) const
2084
{
2085
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2086
return m_pItem != rhs.m_pItem;
2087
}
2088
#endif // _D3D12MA_LIST_ITERATOR_FUNCTIONS
2089
2090
#ifndef _D3D12MA_LIST_REVERSE_ITERATOR_FUNCTIONS
2091
template<typename T>
2092
T& List<T>::reverse_iterator::operator*() const
2093
{
2094
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2095
return m_pItem->Value;
2096
}
2097
2098
template<typename T>
2099
T* List<T>::reverse_iterator::operator->() const
2100
{
2101
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2102
return &m_pItem->Value;
2103
}
2104
2105
template<typename T>
2106
typename List<T>::reverse_iterator& List<T>::reverse_iterator::operator++()
2107
{
2108
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2109
m_pItem = m_pItem->pPrev;
2110
return *this;
2111
}
2112
2113
template<typename T>
2114
typename List<T>::reverse_iterator& List<T>::reverse_iterator::operator--()
2115
{
2116
if (m_pItem != NULL)
2117
{
2118
m_pItem = m_pItem->pNext;
2119
}
2120
else
2121
{
2122
D3D12MA_HEAVY_ASSERT(!m_pList->IsEmpty());
2123
m_pItem = m_pList->Front();
2124
}
2125
return *this;
2126
}
2127
2128
template<typename T>
2129
typename List<T>::reverse_iterator List<T>::reverse_iterator::operator++(int)
2130
{
2131
reverse_iterator result = *this;
2132
++* this;
2133
return result;
2134
}
2135
2136
template<typename T>
2137
typename List<T>::reverse_iterator List<T>::reverse_iterator::operator--(int)
2138
{
2139
reverse_iterator result = *this;
2140
--* this;
2141
return result;
2142
}
2143
2144
template<typename T>
2145
bool List<T>::reverse_iterator::operator==(const reverse_iterator& rhs) const
2146
{
2147
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2148
return m_pItem == rhs.m_pItem;
2149
}
2150
2151
template<typename T>
2152
bool List<T>::reverse_iterator::operator!=(const reverse_iterator& rhs) const
2153
{
2154
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2155
return m_pItem != rhs.m_pItem;
2156
}
2157
#endif // _D3D12MA_LIST_REVERSE_ITERATOR_FUNCTIONS
2158
2159
#ifndef _D3D12MA_LIST_CONST_ITERATOR_FUNCTIONS
2160
template<typename T>
2161
typename List<T>::iterator List<T>::const_iterator::dropConst() const
2162
{
2163
return iterator(const_cast<List<T>*>(m_pList), const_cast<Item*>(m_pItem));
2164
}
2165
2166
template<typename T>
2167
const T& List<T>::const_iterator::operator*() const
2168
{
2169
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2170
return m_pItem->Value;
2171
}
2172
2173
template<typename T>
2174
const T* List<T>::const_iterator::operator->() const
2175
{
2176
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2177
return &m_pItem->Value;
2178
}
2179
2180
template<typename T>
2181
typename List<T>::const_iterator& List<T>::const_iterator::operator++()
2182
{
2183
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2184
m_pItem = m_pItem->pNext;
2185
return *this;
2186
}
2187
2188
template<typename T>
2189
typename List<T>::const_iterator& List<T>::const_iterator::operator--()
2190
{
2191
if (m_pItem != NULL)
2192
{
2193
m_pItem = m_pItem->pPrev;
2194
}
2195
else
2196
{
2197
D3D12MA_HEAVY_ASSERT(!m_pList->IsEmpty());
2198
m_pItem = m_pList->Back();
2199
}
2200
return *this;
2201
}
2202
2203
template<typename T>
2204
typename List<T>::const_iterator List<T>::const_iterator::operator++(int)
2205
{
2206
const_iterator result = *this;
2207
++* this;
2208
return result;
2209
}
2210
2211
template<typename T>
2212
typename List<T>::const_iterator List<T>::const_iterator::operator--(int)
2213
{
2214
const_iterator result = *this;
2215
--* this;
2216
return result;
2217
}
2218
2219
template<typename T>
2220
bool List<T>::const_iterator::operator==(const const_iterator& rhs) const
2221
{
2222
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2223
return m_pItem == rhs.m_pItem;
2224
}
2225
2226
template<typename T>
2227
bool List<T>::const_iterator::operator!=(const const_iterator& rhs) const
2228
{
2229
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2230
return m_pItem != rhs.m_pItem;
2231
}
2232
#endif // _D3D12MA_LIST_CONST_ITERATOR_FUNCTIONS
2233
2234
#ifndef _D3D12MA_LIST_CONST_REVERSE_ITERATOR_FUNCTIONS
2235
template<typename T>
2236
typename List<T>::reverse_iterator List<T>::const_reverse_iterator::dropConst() const
2237
{
2238
return reverse_iterator(const_cast<List<T>*>(m_pList), const_cast<Item*>(m_pItem));
2239
}
2240
2241
template<typename T>
2242
const T& List<T>::const_reverse_iterator::operator*() const
2243
{
2244
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2245
return m_pItem->Value;
2246
}
2247
2248
template<typename T>
2249
const T* List<T>::const_reverse_iterator::operator->() const
2250
{
2251
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2252
return &m_pItem->Value;
2253
}
2254
2255
template<typename T>
2256
typename List<T>::const_reverse_iterator& List<T>::const_reverse_iterator::operator++()
2257
{
2258
D3D12MA_HEAVY_ASSERT(m_pItem != NULL);
2259
m_pItem = m_pItem->pPrev;
2260
return *this;
2261
}
2262
2263
template<typename T>
2264
typename List<T>::const_reverse_iterator& List<T>::const_reverse_iterator::operator--()
2265
{
2266
if (m_pItem != NULL)
2267
{
2268
m_pItem = m_pItem->pNext;
2269
}
2270
else
2271
{
2272
D3D12MA_HEAVY_ASSERT(!m_pList->IsEmpty());
2273
m_pItem = m_pList->Front();
2274
}
2275
return *this;
2276
}
2277
2278
template<typename T>
2279
typename List<T>::const_reverse_iterator List<T>::const_reverse_iterator::operator++(int)
2280
{
2281
const_reverse_iterator result = *this;
2282
++* this;
2283
return result;
2284
}
2285
2286
template<typename T>
2287
typename List<T>::const_reverse_iterator List<T>::const_reverse_iterator::operator--(int)
2288
{
2289
const_reverse_iterator result = *this;
2290
--* this;
2291
return result;
2292
}
2293
2294
template<typename T>
2295
bool List<T>::const_reverse_iterator::operator==(const const_reverse_iterator& rhs) const
2296
{
2297
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2298
return m_pItem == rhs.m_pItem;
2299
}
2300
2301
template<typename T>
2302
bool List<T>::const_reverse_iterator::operator!=(const const_reverse_iterator& rhs) const
2303
{
2304
D3D12MA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2305
return m_pItem != rhs.m_pItem;
2306
}
2307
#endif // _D3D12MA_LIST_CONST_REVERSE_ITERATOR_FUNCTIONS
2308
2309
#ifndef _D3D12MA_LIST_FUNCTIONS
2310
template<typename T>
2311
List<T>::List(const ALLOCATION_CALLBACKS& allocationCallbacks)
2312
: m_AllocationCallbacks(allocationCallbacks),
2313
m_ItemAllocator(allocationCallbacks, 128),
2314
m_pFront(NULL),
2315
m_pBack(NULL),
2316
m_Count(0) {}
2317
2318
template<typename T>
2319
void List<T>::Clear()
2320
{
2321
if(!IsEmpty())
2322
{
2323
Item* pItem = m_pBack;
2324
while(pItem != NULL)
2325
{
2326
Item* const pPrevItem = pItem->pPrev;
2327
m_ItemAllocator.Free(pItem);
2328
pItem = pPrevItem;
2329
}
2330
m_pFront = NULL;
2331
m_pBack = NULL;
2332
m_Count = 0;
2333
}
2334
}
2335
2336
template<typename T>
2337
typename List<T>::Item* List<T>::PushBack()
2338
{
2339
Item* const pNewItem = m_ItemAllocator.Alloc();
2340
pNewItem->pNext = NULL;
2341
if(IsEmpty())
2342
{
2343
pNewItem->pPrev = NULL;
2344
m_pFront = pNewItem;
2345
m_pBack = pNewItem;
2346
m_Count = 1;
2347
}
2348
else
2349
{
2350
pNewItem->pPrev = m_pBack;
2351
m_pBack->pNext = pNewItem;
2352
m_pBack = pNewItem;
2353
++m_Count;
2354
}
2355
return pNewItem;
2356
}
2357
2358
template<typename T>
2359
typename List<T>::Item* List<T>::PushFront()
2360
{
2361
Item* const pNewItem = m_ItemAllocator.Alloc();
2362
pNewItem->pPrev = NULL;
2363
if(IsEmpty())
2364
{
2365
pNewItem->pNext = NULL;
2366
m_pFront = pNewItem;
2367
m_pBack = pNewItem;
2368
m_Count = 1;
2369
}
2370
else
2371
{
2372
pNewItem->pNext = m_pFront;
2373
m_pFront->pPrev = pNewItem;
2374
m_pFront = pNewItem;
2375
++m_Count;
2376
}
2377
return pNewItem;
2378
}
2379
2380
template<typename T>
2381
typename List<T>::Item* List<T>::PushBack(const T& value)
2382
{
2383
Item* const pNewItem = PushBack();
2384
pNewItem->Value = value;
2385
return pNewItem;
2386
}
2387
2388
template<typename T>
2389
typename List<T>::Item* List<T>::PushFront(const T& value)
2390
{
2391
Item* const pNewItem = PushFront();
2392
pNewItem->Value = value;
2393
return pNewItem;
2394
}
2395
2396
template<typename T>
2397
void List<T>::PopBack()
2398
{
2399
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2400
Item* const pBackItem = m_pBack;
2401
Item* const pPrevItem = pBackItem->pPrev;
2402
if(pPrevItem != NULL)
2403
{
2404
pPrevItem->pNext = NULL;
2405
}
2406
m_pBack = pPrevItem;
2407
m_ItemAllocator.Free(pBackItem);
2408
--m_Count;
2409
}
2410
2411
template<typename T>
2412
void List<T>::PopFront()
2413
{
2414
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2415
Item* const pFrontItem = m_pFront;
2416
Item* const pNextItem = pFrontItem->pNext;
2417
if(pNextItem != NULL)
2418
{
2419
pNextItem->pPrev = NULL;
2420
}
2421
m_pFront = pNextItem;
2422
m_ItemAllocator.Free(pFrontItem);
2423
--m_Count;
2424
}
2425
2426
template<typename T>
2427
void List<T>::Remove(Item* pItem)
2428
{
2429
D3D12MA_HEAVY_ASSERT(pItem != NULL);
2430
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2431
2432
if(pItem->pPrev != NULL)
2433
{
2434
pItem->pPrev->pNext = pItem->pNext;
2435
}
2436
else
2437
{
2438
D3D12MA_HEAVY_ASSERT(m_pFront == pItem);
2439
m_pFront = pItem->pNext;
2440
}
2441
2442
if(pItem->pNext != NULL)
2443
{
2444
pItem->pNext->pPrev = pItem->pPrev;
2445
}
2446
else
2447
{
2448
D3D12MA_HEAVY_ASSERT(m_pBack == pItem);
2449
m_pBack = pItem->pPrev;
2450
}
2451
2452
m_ItemAllocator.Free(pItem);
2453
--m_Count;
2454
}
2455
2456
template<typename T>
2457
typename List<T>::Item* List<T>::InsertBefore(Item* pItem)
2458
{
2459
if(pItem != NULL)
2460
{
2461
Item* const prevItem = pItem->pPrev;
2462
Item* const newItem = m_ItemAllocator.Alloc();
2463
newItem->pPrev = prevItem;
2464
newItem->pNext = pItem;
2465
pItem->pPrev = newItem;
2466
if(prevItem != NULL)
2467
{
2468
prevItem->pNext = newItem;
2469
}
2470
else
2471
{
2472
D3D12MA_HEAVY_ASSERT(m_pFront == pItem);
2473
m_pFront = newItem;
2474
}
2475
++m_Count;
2476
return newItem;
2477
}
2478
else
2479
{
2480
return PushBack();
2481
}
2482
}
2483
2484
template<typename T>
2485
typename List<T>::Item* List<T>::InsertAfter(Item* pItem)
2486
{
2487
if(pItem != NULL)
2488
{
2489
Item* const nextItem = pItem->pNext;
2490
Item* const newItem = m_ItemAllocator.Alloc();
2491
newItem->pNext = nextItem;
2492
newItem->pPrev = pItem;
2493
pItem->pNext = newItem;
2494
if(nextItem != NULL)
2495
{
2496
nextItem->pPrev = newItem;
2497
}
2498
else
2499
{
2500
D3D12MA_HEAVY_ASSERT(m_pBack == pItem);
2501
m_pBack = newItem;
2502
}
2503
++m_Count;
2504
return newItem;
2505
}
2506
else
2507
return PushFront();
2508
}
2509
2510
template<typename T>
2511
typename List<T>::Item* List<T>::InsertBefore(Item* pItem, const T& value)
2512
{
2513
Item* const newItem = InsertBefore(pItem);
2514
newItem->Value = value;
2515
return newItem;
2516
}
2517
2518
template<typename T>
2519
typename List<T>::Item* List<T>::InsertAfter(Item* pItem, const T& value)
2520
{
2521
Item* const newItem = InsertAfter(pItem);
2522
newItem->Value = value;
2523
return newItem;
2524
}
2525
#endif // _D3D12MA_LIST_FUNCTIONS
2526
#endif // _D3D12MA_LIST
2527
2528
#ifndef _D3D12MA_INTRUSIVE_LINKED_LIST
2529
/*
2530
Expected interface of ItemTypeTraits:
2531
struct MyItemTypeTraits
2532
{
2533
using ItemType = MyItem;
2534
static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
2535
static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
2536
static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
2537
static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
2538
};
2539
*/
2540
template<typename ItemTypeTraits>
2541
class IntrusiveLinkedList
2542
{
2543
public:
2544
using ItemType = typename ItemTypeTraits::ItemType;
2545
static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
2546
static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
2547
2548
// Movable, not copyable.
2549
IntrusiveLinkedList() = default;
2550
IntrusiveLinkedList(const IntrusiveLinkedList&) = delete;
2551
IntrusiveLinkedList(IntrusiveLinkedList&& src);
2552
IntrusiveLinkedList& operator=(const IntrusiveLinkedList&) = delete;
2553
IntrusiveLinkedList& operator=(IntrusiveLinkedList&& src);
2554
~IntrusiveLinkedList() { D3D12MA_HEAVY_ASSERT(IsEmpty()); }
2555
2556
size_t GetCount() const { return m_Count; }
2557
bool IsEmpty() const { return m_Count == 0; }
2558
2559
ItemType* Front() { return m_Front; }
2560
ItemType* Back() { return m_Back; }
2561
const ItemType* Front() const { return m_Front; }
2562
const ItemType* Back() const { return m_Back; }
2563
2564
void PushBack(ItemType* item);
2565
void PushFront(ItemType* item);
2566
ItemType* PopBack();
2567
ItemType* PopFront();
2568
2569
// MyItem can be null - it means PushBack.
2570
void InsertBefore(ItemType* existingItem, ItemType* newItem);
2571
// MyItem can be null - it means PushFront.
2572
void InsertAfter(ItemType* existingItem, ItemType* newItem);
2573
2574
void Remove(ItemType* item);
2575
void RemoveAll();
2576
2577
private:
2578
ItemType* m_Front = NULL;
2579
ItemType* m_Back = NULL;
2580
size_t m_Count = 0;
2581
};
2582
2583
#ifndef _D3D12MA_INTRUSIVE_LINKED_LIST_FUNCTIONS
2584
template<typename ItemTypeTraits>
2585
IntrusiveLinkedList<ItemTypeTraits>::IntrusiveLinkedList(IntrusiveLinkedList&& src)
2586
: m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
2587
{
2588
src.m_Front = src.m_Back = NULL;
2589
src.m_Count = 0;
2590
}
2591
2592
template<typename ItemTypeTraits>
2593
IntrusiveLinkedList<ItemTypeTraits>& IntrusiveLinkedList<ItemTypeTraits>::operator=(IntrusiveLinkedList&& src)
2594
{
2595
if (&src != this)
2596
{
2597
D3D12MA_HEAVY_ASSERT(IsEmpty());
2598
m_Front = src.m_Front;
2599
m_Back = src.m_Back;
2600
m_Count = src.m_Count;
2601
src.m_Front = src.m_Back = NULL;
2602
src.m_Count = 0;
2603
}
2604
return *this;
2605
}
2606
2607
template<typename ItemTypeTraits>
2608
void IntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)
2609
{
2610
D3D12MA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == NULL && ItemTypeTraits::GetNext(item) == NULL);
2611
if (IsEmpty())
2612
{
2613
m_Front = item;
2614
m_Back = item;
2615
m_Count = 1;
2616
}
2617
else
2618
{
2619
ItemTypeTraits::AccessPrev(item) = m_Back;
2620
ItemTypeTraits::AccessNext(m_Back) = item;
2621
m_Back = item;
2622
++m_Count;
2623
}
2624
}
2625
2626
template<typename ItemTypeTraits>
2627
void IntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)
2628
{
2629
D3D12MA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == NULL && ItemTypeTraits::GetNext(item) == NULL);
2630
if (IsEmpty())
2631
{
2632
m_Front = item;
2633
m_Back = item;
2634
m_Count = 1;
2635
}
2636
else
2637
{
2638
ItemTypeTraits::AccessNext(item) = m_Front;
2639
ItemTypeTraits::AccessPrev(m_Front) = item;
2640
m_Front = item;
2641
++m_Count;
2642
}
2643
}
2644
2645
template<typename ItemTypeTraits>
2646
typename IntrusiveLinkedList<ItemTypeTraits>::ItemType* IntrusiveLinkedList<ItemTypeTraits>::PopBack()
2647
{
2648
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2649
ItemType* const backItem = m_Back;
2650
ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
2651
if (prevItem != NULL)
2652
{
2653
ItemTypeTraits::AccessNext(prevItem) = NULL;
2654
}
2655
m_Back = prevItem;
2656
--m_Count;
2657
ItemTypeTraits::AccessPrev(backItem) = NULL;
2658
ItemTypeTraits::AccessNext(backItem) = NULL;
2659
return backItem;
2660
}
2661
2662
template<typename ItemTypeTraits>
2663
typename IntrusiveLinkedList<ItemTypeTraits>::ItemType* IntrusiveLinkedList<ItemTypeTraits>::PopFront()
2664
{
2665
D3D12MA_HEAVY_ASSERT(m_Count > 0);
2666
ItemType* const frontItem = m_Front;
2667
ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
2668
if (nextItem != NULL)
2669
{
2670
ItemTypeTraits::AccessPrev(nextItem) = NULL;
2671
}
2672
m_Front = nextItem;
2673
--m_Count;
2674
ItemTypeTraits::AccessPrev(frontItem) = NULL;
2675
ItemTypeTraits::AccessNext(frontItem) = NULL;
2676
return frontItem;
2677
}
2678
2679
template<typename ItemTypeTraits>
2680
void IntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)
2681
{
2682
D3D12MA_HEAVY_ASSERT(newItem != NULL && ItemTypeTraits::GetPrev(newItem) == NULL && ItemTypeTraits::GetNext(newItem) == NULL);
2683
if (existingItem != NULL)
2684
{
2685
ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
2686
ItemTypeTraits::AccessPrev(newItem) = prevItem;
2687
ItemTypeTraits::AccessNext(newItem) = existingItem;
2688
ItemTypeTraits::AccessPrev(existingItem) = newItem;
2689
if (prevItem != NULL)
2690
{
2691
ItemTypeTraits::AccessNext(prevItem) = newItem;
2692
}
2693
else
2694
{
2695
D3D12MA_HEAVY_ASSERT(m_Front == existingItem);
2696
m_Front = newItem;
2697
}
2698
++m_Count;
2699
}
2700
else
2701
PushBack(newItem);
2702
}
2703
2704
template<typename ItemTypeTraits>
2705
void IntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)
2706
{
2707
D3D12MA_HEAVY_ASSERT(newItem != NULL && ItemTypeTraits::GetPrev(newItem) == NULL && ItemTypeTraits::GetNext(newItem) == NULL);
2708
if (existingItem != NULL)
2709
{
2710
ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
2711
ItemTypeTraits::AccessNext(newItem) = nextItem;
2712
ItemTypeTraits::AccessPrev(newItem) = existingItem;
2713
ItemTypeTraits::AccessNext(existingItem) = newItem;
2714
if (nextItem != NULL)
2715
{
2716
ItemTypeTraits::AccessPrev(nextItem) = newItem;
2717
}
2718
else
2719
{
2720
D3D12MA_HEAVY_ASSERT(m_Back == existingItem);
2721
m_Back = newItem;
2722
}
2723
++m_Count;
2724
}
2725
else
2726
return PushFront(newItem);
2727
}
2728
2729
template<typename ItemTypeTraits>
2730
void IntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)
2731
{
2732
D3D12MA_HEAVY_ASSERT(item != NULL && m_Count > 0);
2733
if (ItemTypeTraits::GetPrev(item) != NULL)
2734
{
2735
ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
2736
}
2737
else
2738
{
2739
D3D12MA_HEAVY_ASSERT(m_Front == item);
2740
m_Front = ItemTypeTraits::GetNext(item);
2741
}
2742
2743
if (ItemTypeTraits::GetNext(item) != NULL)
2744
{
2745
ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
2746
}
2747
else
2748
{
2749
D3D12MA_HEAVY_ASSERT(m_Back == item);
2750
m_Back = ItemTypeTraits::GetPrev(item);
2751
}
2752
ItemTypeTraits::AccessPrev(item) = NULL;
2753
ItemTypeTraits::AccessNext(item) = NULL;
2754
--m_Count;
2755
}
2756
2757
template<typename ItemTypeTraits>
2758
void IntrusiveLinkedList<ItemTypeTraits>::RemoveAll()
2759
{
2760
if (!IsEmpty())
2761
{
2762
ItemType* item = m_Back;
2763
while (item != NULL)
2764
{
2765
ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
2766
ItemTypeTraits::AccessPrev(item) = NULL;
2767
ItemTypeTraits::AccessNext(item) = NULL;
2768
item = prevItem;
2769
}
2770
m_Front = NULL;
2771
m_Back = NULL;
2772
m_Count = 0;
2773
}
2774
}
2775
#endif // _D3D12MA_INTRUSIVE_LINKED_LIST_FUNCTIONS
2776
#endif // _D3D12MA_INTRUSIVE_LINKED_LIST
2777
2778
#ifndef _D3D12MA_ALLOCATION_OBJECT_ALLOCATOR
2779
/*
2780
Thread-safe wrapper over PoolAllocator free list, for allocation of Allocation objects.
2781
*/
2782
class AllocationObjectAllocator
2783
{
2784
D3D12MA_CLASS_NO_COPY(AllocationObjectAllocator);
2785
public:
2786
AllocationObjectAllocator(const ALLOCATION_CALLBACKS& allocationCallbacks)
2787
: m_Allocator(allocationCallbacks, 1024) {}
2788
2789
template<typename... Types>
2790
Allocation* Allocate(Types... args);
2791
void Free(Allocation* alloc);
2792
2793
private:
2794
D3D12MA_MUTEX m_Mutex;
2795
PoolAllocator<Allocation> m_Allocator;
2796
};
2797
2798
#ifndef _D3D12MA_ALLOCATION_OBJECT_ALLOCATOR_FUNCTIONS
2799
template<typename... Types>
2800
Allocation* AllocationObjectAllocator::Allocate(Types... args)
2801
{
2802
MutexLock mutexLock(m_Mutex);
2803
return m_Allocator.Alloc(std::forward<Types>(args)...);
2804
}
2805
2806
void AllocationObjectAllocator::Free(Allocation* alloc)
2807
{
2808
MutexLock mutexLock(m_Mutex);
2809
m_Allocator.Free(alloc);
2810
}
2811
#endif // _D3D12MA_ALLOCATION_OBJECT_ALLOCATOR_FUNCTIONS
2812
#endif // _D3D12MA_ALLOCATION_OBJECT_ALLOCATOR
2813
2814
#ifndef _D3D12MA_SUBALLOCATION
2815
/*
2816
Represents a region of NormalBlock that is either assigned and returned as
2817
allocated memory block or free.
2818
*/
2819
struct Suballocation
2820
{
2821
UINT64 offset;
2822
UINT64 size;
2823
void* privateData;
2824
SuballocationType type;
2825
};
2826
using SuballocationList = List<Suballocation>;
2827
2828
// Comparator for offsets.
2829
struct SuballocationOffsetLess
2830
{
2831
bool operator()(const Suballocation& lhs, const Suballocation& rhs) const
2832
{
2833
return lhs.offset < rhs.offset;
2834
}
2835
};
2836
2837
struct SuballocationOffsetGreater
2838
{
2839
bool operator()(const Suballocation& lhs, const Suballocation& rhs) const
2840
{
2841
return lhs.offset > rhs.offset;
2842
}
2843
};
2844
2845
struct SuballocationItemSizeLess
2846
{
2847
bool operator()(const SuballocationList::iterator lhs, const SuballocationList::iterator rhs) const
2848
{
2849
return lhs->size < rhs->size;
2850
}
2851
bool operator()(const SuballocationList::iterator lhs, UINT64 rhsSize) const
2852
{
2853
return lhs->size < rhsSize;
2854
}
2855
};
2856
#endif // _D3D12MA_SUBALLOCATION
2857
2858
#ifndef _D3D12MA_ALLOCATION_REQUEST
2859
/*
2860
Parameters of planned allocation inside a NormalBlock.
2861
*/
2862
struct AllocationRequest
2863
{
2864
AllocHandle allocHandle;
2865
UINT64 size;
2866
UINT64 algorithmData;
2867
UINT64 sumFreeSize; // Sum size of free items that overlap with proposed allocation.
2868
UINT64 sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
2869
SuballocationList::iterator item;
2870
BOOL zeroInitialized = FALSE; // TODO Implement proper handling in TLSF and Linear, using ZeroInitializedRange class.
2871
};
2872
#endif // _D3D12MA_ALLOCATION_REQUEST
2873
2874
#ifndef _D3D12MA_ZERO_INITIALIZED_RANGE
2875
/*
2876
Keeps track of the range of bytes that are surely initialized with zeros.
2877
Everything outside of it is considered uninitialized memory that may contain
2878
garbage data.
2879
2880
The range is left-inclusive.
2881
*/
2882
class ZeroInitializedRange
2883
{
2884
public:
2885
void Reset(UINT64 size);
2886
BOOL IsRangeZeroInitialized(UINT64 beg, UINT64 end) const;
2887
void MarkRangeAsUsed(UINT64 usedBeg, UINT64 usedEnd);
2888
2889
private:
2890
UINT64 m_ZeroBeg = 0, m_ZeroEnd = 0;
2891
};
2892
2893
#ifndef _D3D12MA_ZERO_INITIALIZED_RANGE_FUNCTIONS
2894
void ZeroInitializedRange::Reset(UINT64 size)
2895
{
2896
D3D12MA_ASSERT(size > 0);
2897
m_ZeroBeg = 0;
2898
m_ZeroEnd = size;
2899
}
2900
2901
BOOL ZeroInitializedRange::IsRangeZeroInitialized(UINT64 beg, UINT64 end) const
2902
{
2903
D3D12MA_ASSERT(beg < end);
2904
return m_ZeroBeg <= beg && end <= m_ZeroEnd;
2905
}
2906
2907
void ZeroInitializedRange::MarkRangeAsUsed(UINT64 usedBeg, UINT64 usedEnd)
2908
{
2909
D3D12MA_ASSERT(usedBeg < usedEnd);
2910
// No new bytes marked.
2911
if (usedEnd <= m_ZeroBeg || m_ZeroEnd <= usedBeg)
2912
{
2913
return;
2914
}
2915
// All bytes marked.
2916
if (usedBeg <= m_ZeroBeg && m_ZeroEnd <= usedEnd)
2917
{
2918
m_ZeroBeg = m_ZeroEnd = 0;
2919
}
2920
// Some bytes marked.
2921
else
2922
{
2923
const UINT64 remainingZeroBefore = usedBeg > m_ZeroBeg ? usedBeg - m_ZeroBeg : 0;
2924
const UINT64 remainingZeroAfter = usedEnd < m_ZeroEnd ? m_ZeroEnd - usedEnd : 0;
2925
D3D12MA_ASSERT(remainingZeroBefore > 0 || remainingZeroAfter > 0);
2926
if (remainingZeroBefore > remainingZeroAfter)
2927
{
2928
m_ZeroEnd = usedBeg;
2929
}
2930
else
2931
{
2932
m_ZeroBeg = usedEnd;
2933
}
2934
}
2935
}
2936
#endif // _D3D12MA_ZERO_INITIALIZED_RANGE_FUNCTIONS
2937
#endif // _D3D12MA_ZERO_INITIALIZED_RANGE
2938
2939
#ifndef _D3D12MA_BLOCK_METADATA
2940
/*
2941
Data structure used for bookkeeping of allocations and unused ranges of memory
2942
in a single ID3D12Heap memory block.
2943
*/
2944
class BlockMetadata
2945
{
2946
public:
2947
BlockMetadata(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual);
2948
virtual ~BlockMetadata() = default;
2949
2950
virtual void Init(UINT64 size) { m_Size = size; }
2951
// Validates all data structures inside this object. If not valid, returns false.
2952
virtual bool Validate() const = 0;
2953
UINT64 GetSize() const { return m_Size; }
2954
bool IsVirtual() const { return m_IsVirtual; }
2955
virtual size_t GetAllocationCount() const = 0;
2956
virtual size_t GetFreeRegionsCount() const = 0;
2957
virtual UINT64 GetSumFreeSize() const = 0;
2958
virtual UINT64 GetAllocationOffset(AllocHandle allocHandle) const = 0;
2959
// Returns true if this block is empty - contains only single free suballocation.
2960
virtual bool IsEmpty() const = 0;
2961
2962
virtual void GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const = 0;
2963
2964
// Tries to find a place for suballocation with given parameters inside this block.
2965
// If succeeded, fills pAllocationRequest and returns true.
2966
// If failed, returns false.
2967
virtual bool CreateAllocationRequest(
2968
UINT64 allocSize,
2969
UINT64 allocAlignment,
2970
bool upperAddress,
2971
UINT32 strategy,
2972
AllocationRequest* pAllocationRequest) = 0;
2973
2974
// Makes actual allocation based on request. Request must already be checked and valid.
2975
virtual void Alloc(
2976
const AllocationRequest& request,
2977
UINT64 allocSize,
2978
void* PrivateData) = 0;
2979
2980
virtual void Free(AllocHandle allocHandle) = 0;
2981
// Frees all allocations.
2982
// Careful! Don't call it if there are Allocation objects owned by pPrivateData of of cleared allocations!
2983
virtual void Clear() = 0;
2984
2985
virtual AllocHandle GetAllocationListBegin() const = 0;
2986
virtual AllocHandle GetNextAllocation(AllocHandle prevAlloc) const = 0;
2987
virtual UINT64 GetNextFreeRegionSize(AllocHandle alloc) const = 0;
2988
virtual void* GetAllocationPrivateData(AllocHandle allocHandle) const = 0;
2989
virtual void SetAllocationPrivateData(AllocHandle allocHandle, void* privateData) = 0;
2990
2991
virtual void AddStatistics(Statistics& inoutStats) const = 0;
2992
virtual void AddDetailedStatistics(DetailedStatistics& inoutStats) const = 0;
2993
virtual void WriteAllocationInfoToJson(JsonWriter& json) const = 0;
2994
virtual void DebugLogAllAllocations() const = 0;
2995
2996
protected:
2997
const ALLOCATION_CALLBACKS* GetAllocs() const { return m_pAllocationCallbacks; }
2998
UINT64 GetDebugMargin() const { return IsVirtual() ? 0 : D3D12MA_DEBUG_MARGIN; }
2999
3000
void DebugLogAllocation(UINT64 offset, UINT64 size, void* privateData) const;
3001
void PrintDetailedMap_Begin(JsonWriter& json,
3002
UINT64 unusedBytes,
3003
size_t allocationCount,
3004
size_t unusedRangeCount) const;
3005
void PrintDetailedMap_Allocation(JsonWriter& json,
3006
UINT64 offset, UINT64 size, void* privateData) const;
3007
void PrintDetailedMap_UnusedRange(JsonWriter& json,
3008
UINT64 offset, UINT64 size) const;
3009
void PrintDetailedMap_End(JsonWriter& json) const;
3010
3011
private:
3012
UINT64 m_Size;
3013
bool m_IsVirtual;
3014
const ALLOCATION_CALLBACKS* m_pAllocationCallbacks;
3015
3016
D3D12MA_CLASS_NO_COPY(BlockMetadata);
3017
};
3018
3019
#ifndef _D3D12MA_BLOCK_METADATA_FUNCTIONS
3020
BlockMetadata::BlockMetadata(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual)
3021
: m_Size(0),
3022
m_IsVirtual(isVirtual),
3023
m_pAllocationCallbacks(allocationCallbacks)
3024
{
3025
D3D12MA_ASSERT(allocationCallbacks);
3026
}
3027
3028
void BlockMetadata::DebugLogAllocation(UINT64 offset, UINT64 size, void* privateData) const
3029
{
3030
if (IsVirtual())
3031
{
3032
D3D12MA_DEBUG_LOG(L"UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; PrivateData: %p", offset, size, privateData);
3033
}
3034
else
3035
{
3036
D3D12MA_ASSERT(privateData != NULL);
3037
Allocation* allocation = reinterpret_cast<Allocation*>(privateData);
3038
3039
privateData = allocation->GetPrivateData();
3040
LPCWSTR name = allocation->GetName();
3041
3042
D3D12MA_DEBUG_LOG(L"UNFREED ALLOCATION; Offset: %llu; Size: %llu; PrivateData: %p; Name: %s",
3043
offset, size, privateData, name ? name : L"D3D12MA_Empty");
3044
}
3045
}
3046
3047
void BlockMetadata::PrintDetailedMap_Begin(JsonWriter& json,
3048
UINT64 unusedBytes, size_t allocationCount, size_t unusedRangeCount) const
3049
{
3050
json.WriteString(L"TotalBytes");
3051
json.WriteNumber(GetSize());
3052
3053
json.WriteString(L"UnusedBytes");
3054
json.WriteNumber(unusedBytes);
3055
3056
json.WriteString(L"Allocations");
3057
json.WriteNumber((UINT64)allocationCount);
3058
3059
json.WriteString(L"UnusedRanges");
3060
json.WriteNumber((UINT64)unusedRangeCount);
3061
3062
json.WriteString(L"Suballocations");
3063
json.BeginArray();
3064
}
3065
3066
void BlockMetadata::PrintDetailedMap_Allocation(JsonWriter& json,
3067
UINT64 offset, UINT64 size, void* privateData) const
3068
{
3069
json.BeginObject(true);
3070
3071
json.WriteString(L"Offset");
3072
json.WriteNumber(offset);
3073
3074
if (IsVirtual())
3075
{
3076
json.WriteString(L"Size");
3077
json.WriteNumber(size);
3078
if (privateData)
3079
{
3080
json.WriteString(L"CustomData");
3081
json.WriteNumber((uintptr_t)privateData);
3082
}
3083
}
3084
else
3085
{
3086
const Allocation* const alloc = (const Allocation*)privateData;
3087
D3D12MA_ASSERT(alloc);
3088
json.AddAllocationToObject(*alloc);
3089
}
3090
json.EndObject();
3091
}
3092
3093
void BlockMetadata::PrintDetailedMap_UnusedRange(JsonWriter& json,
3094
UINT64 offset, UINT64 size) const
3095
{
3096
json.BeginObject(true);
3097
3098
json.WriteString(L"Offset");
3099
json.WriteNumber(offset);
3100
3101
json.WriteString(L"Type");
3102
json.WriteString(L"FREE");
3103
3104
json.WriteString(L"Size");
3105
json.WriteNumber(size);
3106
3107
json.EndObject();
3108
}
3109
3110
void BlockMetadata::PrintDetailedMap_End(JsonWriter& json) const
3111
{
3112
json.EndArray();
3113
}
3114
#endif // _D3D12MA_BLOCK_METADATA_FUNCTIONS
3115
#endif // _D3D12MA_BLOCK_METADATA
3116
3117
#if 0
3118
#ifndef _D3D12MA_BLOCK_METADATA_GENERIC
3119
class BlockMetadata_Generic : public BlockMetadata
3120
{
3121
public:
3122
BlockMetadata_Generic(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual);
3123
virtual ~BlockMetadata_Generic() = default;
3124
3125
size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; }
3126
UINT64 GetSumFreeSize() const override { return m_SumFreeSize; }
3127
UINT64 GetAllocationOffset(AllocHandle allocHandle) const override { return (UINT64)allocHandle - 1; }
3128
3129
void Init(UINT64 size) override;
3130
bool Validate() const override;
3131
bool IsEmpty() const override;
3132
void GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const override;
3133
3134
bool CreateAllocationRequest(
3135
UINT64 allocSize,
3136
UINT64 allocAlignment,
3137
bool upperAddress,
3138
AllocationRequest* pAllocationRequest) override;
3139
3140
void Alloc(
3141
const AllocationRequest& request,
3142
UINT64 allocSize,
3143
void* privateData) override;
3144
3145
void Free(AllocHandle allocHandle) override;
3146
void Clear() override;
3147
3148
void SetAllocationPrivateData(AllocHandle allocHandle, void* privateData) override;
3149
3150
void AddStatistics(Statistics& inoutStats) const override;
3151
void AddDetailedStatistics(DetailedStatistics& inoutStats) const override;
3152
void WriteAllocationInfoToJson(JsonWriter& json) const override;
3153
3154
private:
3155
UINT m_FreeCount;
3156
UINT64 m_SumFreeSize;
3157
SuballocationList m_Suballocations;
3158
// Suballocations that are free and have size greater than certain threshold.
3159
// Sorted by size, ascending.
3160
Vector<SuballocationList::iterator> m_FreeSuballocationsBySize;
3161
ZeroInitializedRange m_ZeroInitializedRange;
3162
3163
SuballocationList::const_iterator FindAtOffset(UINT64 offset) const;
3164
bool ValidateFreeSuballocationList() const;
3165
3166
// Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3167
// If yes, fills pOffset and returns true. If no, returns false.
3168
bool CheckAllocation(
3169
UINT64 allocSize,
3170
UINT64 allocAlignment,
3171
SuballocationList::const_iterator suballocItem,
3172
AllocHandle* pAllocHandle,
3173
UINT64* pSumFreeSize,
3174
UINT64* pSumItemSize,
3175
BOOL *pZeroInitialized) const;
3176
// Given free suballocation, it merges it with following one, which must also be free.
3177
void MergeFreeWithNext(SuballocationList::iterator item);
3178
// Releases given suballocation, making it free.
3179
// Merges it with adjacent free suballocations if applicable.
3180
// Returns iterator to new free suballocation at this place.
3181
SuballocationList::iterator FreeSuballocation(SuballocationList::iterator suballocItem);
3182
// Given free suballocation, it inserts it into sorted list of
3183
// m_FreeSuballocationsBySize if it's suitable.
3184
void RegisterFreeSuballocation(SuballocationList::iterator item);
3185
// Given free suballocation, it removes it from sorted list of
3186
// m_FreeSuballocationsBySize if it's suitable.
3187
void UnregisterFreeSuballocation(SuballocationList::iterator item);
3188
3189
D3D12MA_CLASS_NO_COPY(BlockMetadata_Generic)
3190
};
3191
3192
#ifndef _D3D12MA_BLOCK_METADATA_GENERIC_FUNCTIONS
3193
BlockMetadata_Generic::BlockMetadata_Generic(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual)
3194
: BlockMetadata(allocationCallbacks, isVirtual),
3195
m_FreeCount(0),
3196
m_SumFreeSize(0),
3197
m_Suballocations(*allocationCallbacks),
3198
m_FreeSuballocationsBySize(*allocationCallbacks)
3199
{
3200
D3D12MA_ASSERT(allocationCallbacks);
3201
}
3202
3203
void BlockMetadata_Generic::Init(UINT64 size)
3204
{
3205
BlockMetadata::Init(size);
3206
m_ZeroInitializedRange.Reset(size);
3207
3208
m_FreeCount = 1;
3209
m_SumFreeSize = size;
3210
3211
Suballocation suballoc = {};
3212
suballoc.offset = 0;
3213
suballoc.size = size;
3214
suballoc.type = SUBALLOCATION_TYPE_FREE;
3215
suballoc.privateData = NULL;
3216
3217
D3D12MA_ASSERT(size > MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
3218
m_Suballocations.push_back(suballoc);
3219
SuballocationList::iterator suballocItem = m_Suballocations.end();
3220
--suballocItem;
3221
m_FreeSuballocationsBySize.push_back(suballocItem);
3222
}
3223
3224
bool BlockMetadata_Generic::Validate() const
3225
{
3226
D3D12MA_VALIDATE(!m_Suballocations.empty());
3227
3228
// Expected offset of new suballocation as calculated from previous ones.
3229
UINT64 calculatedOffset = 0;
3230
// Expected number of free suballocations as calculated from traversing their list.
3231
UINT calculatedFreeCount = 0;
3232
// Expected sum size of free suballocations as calculated from traversing their list.
3233
UINT64 calculatedSumFreeSize = 0;
3234
// Expected number of free suballocations that should be registered in
3235
// m_FreeSuballocationsBySize calculated from traversing their list.
3236
size_t freeSuballocationsToRegister = 0;
3237
// True if previous visited suballocation was free.
3238
bool prevFree = false;
3239
3240
for (const auto& subAlloc : m_Suballocations)
3241
{
3242
// Actual offset of this suballocation doesn't match expected one.
3243
D3D12MA_VALIDATE(subAlloc.offset == calculatedOffset);
3244
3245
const bool currFree = (subAlloc.type == SUBALLOCATION_TYPE_FREE);
3246
// Two adjacent free suballocations are invalid. They should be merged.
3247
D3D12MA_VALIDATE(!prevFree || !currFree);
3248
3249
const Allocation* const alloc = (Allocation*)subAlloc.privateData;
3250
if (!IsVirtual())
3251
{
3252
D3D12MA_VALIDATE(currFree == (alloc == NULL));
3253
}
3254
3255
if (currFree)
3256
{
3257
calculatedSumFreeSize += subAlloc.size;
3258
++calculatedFreeCount;
3259
if (subAlloc.size >= MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
3260
{
3261
++freeSuballocationsToRegister;
3262
}
3263
3264
// Margin required between allocations - every free space must be at least that large.
3265
D3D12MA_VALIDATE(subAlloc.size >= GetDebugMargin());
3266
}
3267
else
3268
{
3269
if (!IsVirtual())
3270
{
3271
D3D12MA_VALIDATE(alloc->GetOffset() == subAlloc.offset);
3272
D3D12MA_VALIDATE(alloc->GetSize() == subAlloc.size);
3273
}
3274
3275
// Margin required between allocations - previous allocation must be free.
3276
D3D12MA_VALIDATE(GetDebugMargin() == 0 || prevFree);
3277
}
3278
3279
calculatedOffset += subAlloc.size;
3280
prevFree = currFree;
3281
}
3282
3283
// Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
3284
// match expected one.
3285
D3D12MA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
3286
3287
UINT64 lastSize = 0;
3288
for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
3289
{
3290
SuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
3291
3292
// Only free suballocations can be registered in m_FreeSuballocationsBySize.
3293
D3D12MA_VALIDATE(suballocItem->type == SUBALLOCATION_TYPE_FREE);
3294
// They must be sorted by size ascending.
3295
D3D12MA_VALIDATE(suballocItem->size >= lastSize);
3296
3297
lastSize = suballocItem->size;
3298
}
3299
3300
// Check if totals match calculacted values.
3301
D3D12MA_VALIDATE(ValidateFreeSuballocationList());
3302
D3D12MA_VALIDATE(calculatedOffset == GetSize());
3303
D3D12MA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
3304
D3D12MA_VALIDATE(calculatedFreeCount == m_FreeCount);
3305
3306
return true;
3307
}
3308
3309
bool BlockMetadata_Generic::IsEmpty() const
3310
{
3311
return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
3312
}
3313
3314
void BlockMetadata_Generic::GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const
3315
{
3316
Suballocation& suballoc = *FindAtOffset((UINT64)allocHandle - 1).dropConst();
3317
outInfo.Offset = suballoc.offset;
3318
outInfo.Size = suballoc.size;
3319
outInfo.pPrivateData = suballoc.privateData;
3320
}
3321
3322
bool BlockMetadata_Generic::CreateAllocationRequest(
3323
UINT64 allocSize,
3324
UINT64 allocAlignment,
3325
bool upperAddress,
3326
AllocationRequest* pAllocationRequest)
3327
{
3328
D3D12MA_ASSERT(allocSize > 0);
3329
D3D12MA_ASSERT(!upperAddress && "ALLOCATION_FLAG_UPPER_ADDRESS can be used only with linear algorithm.");
3330
D3D12MA_ASSERT(pAllocationRequest != NULL);
3331
D3D12MA_HEAVY_ASSERT(Validate());
3332
3333
// There is not enough total free space in this block to fullfill the request: Early return.
3334
if (m_SumFreeSize < allocSize + GetDebugMargin())
3335
{
3336
return false;
3337
}
3338
3339
// New algorithm, efficiently searching freeSuballocationsBySize.
3340
const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
3341
if (freeSuballocCount > 0)
3342
{
3343
// Find first free suballocation with size not less than allocSize + GetDebugMargin().
3344
SuballocationList::iterator* const it = BinaryFindFirstNotLess(
3345
m_FreeSuballocationsBySize.data(),
3346
m_FreeSuballocationsBySize.data() + freeSuballocCount,
3347
allocSize + GetDebugMargin(),
3348
SuballocationItemSizeLess());
3349
size_t index = it - m_FreeSuballocationsBySize.data();
3350
for (; index < freeSuballocCount; ++index)
3351
{
3352
if (CheckAllocation(
3353
allocSize,
3354
allocAlignment,
3355
m_FreeSuballocationsBySize[index],
3356
&pAllocationRequest->allocHandle,
3357
&pAllocationRequest->sumFreeSize,
3358
&pAllocationRequest->sumItemSize,
3359
&pAllocationRequest->zeroInitialized))
3360
{
3361
pAllocationRequest->item = m_FreeSuballocationsBySize[index];
3362
return true;
3363
}
3364
}
3365
}
3366
3367
return false;
3368
}
3369
3370
void BlockMetadata_Generic::Alloc(
3371
const AllocationRequest& request,
3372
UINT64 allocSize,
3373
void* privateData)
3374
{
3375
D3D12MA_ASSERT(request.item != m_Suballocations.end());
3376
Suballocation& suballoc = *request.item;
3377
// Given suballocation is a free block.
3378
D3D12MA_ASSERT(suballoc.type == SUBALLOCATION_TYPE_FREE);
3379
// Given offset is inside this suballocation.
3380
UINT64 offset = (UINT64)request.allocHandle - 1;
3381
D3D12MA_ASSERT(offset >= suballoc.offset);
3382
const UINT64 paddingBegin = offset - suballoc.offset;
3383
D3D12MA_ASSERT(suballoc.size >= paddingBegin + allocSize);
3384
const UINT64 paddingEnd = suballoc.size - paddingBegin - allocSize;
3385
3386
// Unregister this free suballocation from m_FreeSuballocationsBySize and update
3387
// it to become used.
3388
UnregisterFreeSuballocation(request.item);
3389
3390
suballoc.offset = offset;
3391
suballoc.size = allocSize;
3392
suballoc.type = SUBALLOCATION_TYPE_ALLOCATION;
3393
suballoc.privateData = privateData;
3394
3395
// If there are any free bytes remaining at the end, insert new free suballocation after current one.
3396
if (paddingEnd)
3397
{
3398
Suballocation paddingSuballoc = {};
3399
paddingSuballoc.offset = offset + allocSize;
3400
paddingSuballoc.size = paddingEnd;
3401
paddingSuballoc.type = SUBALLOCATION_TYPE_FREE;
3402
SuballocationList::iterator next = request.item;
3403
++next;
3404
const SuballocationList::iterator paddingEndItem =
3405
m_Suballocations.insert(next, paddingSuballoc);
3406
RegisterFreeSuballocation(paddingEndItem);
3407
}
3408
3409
// If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
3410
if (paddingBegin)
3411
{
3412
Suballocation paddingSuballoc = {};
3413
paddingSuballoc.offset = offset - paddingBegin;
3414
paddingSuballoc.size = paddingBegin;
3415
paddingSuballoc.type = SUBALLOCATION_TYPE_FREE;
3416
const SuballocationList::iterator paddingBeginItem =
3417
m_Suballocations.insert(request.item, paddingSuballoc);
3418
RegisterFreeSuballocation(paddingBeginItem);
3419
}
3420
3421
// Update totals.
3422
m_FreeCount = m_FreeCount - 1;
3423
if (paddingBegin > 0)
3424
{
3425
++m_FreeCount;
3426
}
3427
if (paddingEnd > 0)
3428
{
3429
++m_FreeCount;
3430
}
3431
m_SumFreeSize -= allocSize;
3432
3433
m_ZeroInitializedRange.MarkRangeAsUsed(offset, offset + allocSize);
3434
}
3435
3436
void BlockMetadata_Generic::Free(AllocHandle allocHandle)
3437
{
3438
FreeSuballocation(FindAtOffset((UINT64)allocHandle - 1).dropConst());
3439
}
3440
3441
void BlockMetadata_Generic::Clear()
3442
{
3443
m_FreeCount = 1;
3444
m_SumFreeSize = GetSize();
3445
3446
m_Suballocations.clear();
3447
Suballocation suballoc = {};
3448
suballoc.offset = 0;
3449
suballoc.size = GetSize();
3450
suballoc.type = SUBALLOCATION_TYPE_FREE;
3451
m_Suballocations.push_back(suballoc);
3452
3453
m_FreeSuballocationsBySize.clear();
3454
m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
3455
}
3456
3457
SuballocationList::const_iterator BlockMetadata_Generic::FindAtOffset(UINT64 offset) const
3458
{
3459
const UINT64 last = m_Suballocations.crbegin()->offset;
3460
if (last == offset)
3461
return m_Suballocations.crbegin();
3462
const UINT64 first = m_Suballocations.cbegin()->offset;
3463
if (first == offset)
3464
return m_Suballocations.cbegin();
3465
3466
const size_t suballocCount = m_Suballocations.size();
3467
const UINT64 step = (last - first + m_Suballocations.cbegin()->size) / suballocCount;
3468
auto findSuballocation = [&](auto begin, auto end) -> SuballocationList::const_iterator
3469
{
3470
for (auto suballocItem = begin;
3471
suballocItem != end;
3472
++suballocItem)
3473
{
3474
const Suballocation& suballoc = *suballocItem;
3475
if (suballoc.offset == offset)
3476
return suballocItem;
3477
}
3478
D3D12MA_ASSERT(false && "Not found!");
3479
return m_Suballocations.end();
3480
};
3481
// If requested offset is closer to the end of range, search from the end
3482
if ((offset - first) > suballocCount * step / 2)
3483
{
3484
return findSuballocation(m_Suballocations.crbegin(), m_Suballocations.crend());
3485
}
3486
return findSuballocation(m_Suballocations.cbegin(), m_Suballocations.cend());
3487
}
3488
3489
bool BlockMetadata_Generic::ValidateFreeSuballocationList() const
3490
{
3491
UINT64 lastSize = 0;
3492
for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
3493
{
3494
const SuballocationList::iterator it = m_FreeSuballocationsBySize[i];
3495
3496
D3D12MA_VALIDATE(it->type == SUBALLOCATION_TYPE_FREE);
3497
D3D12MA_VALIDATE(it->size >= MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
3498
D3D12MA_VALIDATE(it->size >= lastSize);
3499
lastSize = it->size;
3500
}
3501
return true;
3502
}
3503
3504
bool BlockMetadata_Generic::CheckAllocation(
3505
UINT64 allocSize,
3506
UINT64 allocAlignment,
3507
SuballocationList::const_iterator suballocItem,
3508
AllocHandle* pAllocHandle,
3509
UINT64* pSumFreeSize,
3510
UINT64* pSumItemSize,
3511
BOOL* pZeroInitialized) const
3512
{
3513
D3D12MA_ASSERT(allocSize > 0);
3514
D3D12MA_ASSERT(suballocItem != m_Suballocations.cend());
3515
D3D12MA_ASSERT(pAllocHandle != NULL && pZeroInitialized != NULL);
3516
3517
*pSumFreeSize = 0;
3518
*pSumItemSize = 0;
3519
*pZeroInitialized = FALSE;
3520
3521
const Suballocation& suballoc = *suballocItem;
3522
D3D12MA_ASSERT(suballoc.type == SUBALLOCATION_TYPE_FREE);
3523
3524
*pSumFreeSize = suballoc.size;
3525
3526
// Size of this suballocation is too small for this request: Early return.
3527
if (suballoc.size < allocSize)
3528
{
3529
return false;
3530
}
3531
3532
// Start from offset equal to beginning of this suballocation and debug margin of previous allocation if present.
3533
UINT64 offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin());
3534
3535
// Apply alignment.
3536
offset = AlignUp(offset, allocAlignment);
3537
3538
// Calculate padding at the beginning based on current offset.
3539
const UINT64 paddingBegin = offset - suballoc.offset;
3540
3541
// Fail if requested size plus margin after is bigger than size of this suballocation.
3542
if (paddingBegin + allocSize + GetDebugMargin() > suballoc.size)
3543
{
3544
return false;
3545
}
3546
3547
// All tests passed: Success. Offset is already filled.
3548
*pZeroInitialized = m_ZeroInitializedRange.IsRangeZeroInitialized(offset, offset + allocSize);
3549
*pAllocHandle = (AllocHandle)(offset + 1);
3550
return true;
3551
}
3552
3553
void BlockMetadata_Generic::MergeFreeWithNext(SuballocationList::iterator item)
3554
{
3555
D3D12MA_ASSERT(item != m_Suballocations.end());
3556
D3D12MA_ASSERT(item->type == SUBALLOCATION_TYPE_FREE);
3557
3558
SuballocationList::iterator nextItem = item;
3559
++nextItem;
3560
D3D12MA_ASSERT(nextItem != m_Suballocations.end());
3561
D3D12MA_ASSERT(nextItem->type == SUBALLOCATION_TYPE_FREE);
3562
3563
item->size += nextItem->size;
3564
--m_FreeCount;
3565
m_Suballocations.erase(nextItem);
3566
}
3567
3568
SuballocationList::iterator BlockMetadata_Generic::FreeSuballocation(SuballocationList::iterator suballocItem)
3569
{
3570
// Change this suballocation to be marked as free.
3571
Suballocation& suballoc = *suballocItem;
3572
suballoc.type = SUBALLOCATION_TYPE_FREE;
3573
suballoc.privateData = NULL;
3574
3575
// Update totals.
3576
++m_FreeCount;
3577
m_SumFreeSize += suballoc.size;
3578
3579
// Merge with previous and/or next suballocation if it's also free.
3580
bool mergeWithNext = false;
3581
bool mergeWithPrev = false;
3582
3583
SuballocationList::iterator nextItem = suballocItem;
3584
++nextItem;
3585
if ((nextItem != m_Suballocations.end()) && (nextItem->type == SUBALLOCATION_TYPE_FREE))
3586
{
3587
mergeWithNext = true;
3588
}
3589
3590
SuballocationList::iterator prevItem = suballocItem;
3591
if (suballocItem != m_Suballocations.begin())
3592
{
3593
--prevItem;
3594
if (prevItem->type == SUBALLOCATION_TYPE_FREE)
3595
{
3596
mergeWithPrev = true;
3597
}
3598
}
3599
3600
if (mergeWithNext)
3601
{
3602
UnregisterFreeSuballocation(nextItem);
3603
MergeFreeWithNext(suballocItem);
3604
}
3605
3606
if (mergeWithPrev)
3607
{
3608
UnregisterFreeSuballocation(prevItem);
3609
MergeFreeWithNext(prevItem);
3610
RegisterFreeSuballocation(prevItem);
3611
return prevItem;
3612
}
3613
else
3614
{
3615
RegisterFreeSuballocation(suballocItem);
3616
return suballocItem;
3617
}
3618
}
3619
3620
void BlockMetadata_Generic::RegisterFreeSuballocation(SuballocationList::iterator item)
3621
{
3622
D3D12MA_ASSERT(item->type == SUBALLOCATION_TYPE_FREE);
3623
D3D12MA_ASSERT(item->size > 0);
3624
3625
// You may want to enable this validation at the beginning or at the end of
3626
// this function, depending on what do you want to check.
3627
D3D12MA_HEAVY_ASSERT(ValidateFreeSuballocationList());
3628
3629
if (item->size >= MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
3630
{
3631
if (m_FreeSuballocationsBySize.empty())
3632
{
3633
m_FreeSuballocationsBySize.push_back(item);
3634
}
3635
else
3636
{
3637
m_FreeSuballocationsBySize.InsertSorted(item, SuballocationItemSizeLess());
3638
}
3639
}
3640
3641
//D3D12MA_HEAVY_ASSERT(ValidateFreeSuballocationList());
3642
}
3643
3644
void BlockMetadata_Generic::UnregisterFreeSuballocation(SuballocationList::iterator item)
3645
{
3646
D3D12MA_ASSERT(item->type == SUBALLOCATION_TYPE_FREE);
3647
D3D12MA_ASSERT(item->size > 0);
3648
3649
// You may want to enable this validation at the beginning or at the end of
3650
// this function, depending on what do you want to check.
3651
D3D12MA_HEAVY_ASSERT(ValidateFreeSuballocationList());
3652
3653
if (item->size >= MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
3654
{
3655
SuballocationList::iterator* const it = BinaryFindFirstNotLess(
3656
m_FreeSuballocationsBySize.data(),
3657
m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
3658
item,
3659
SuballocationItemSizeLess());
3660
for (size_t index = it - m_FreeSuballocationsBySize.data();
3661
index < m_FreeSuballocationsBySize.size();
3662
++index)
3663
{
3664
if (m_FreeSuballocationsBySize[index] == item)
3665
{
3666
m_FreeSuballocationsBySize.remove(index);
3667
return;
3668
}
3669
D3D12MA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
3670
}
3671
D3D12MA_ASSERT(0 && "Not found.");
3672
}
3673
3674
//D3D12MA_HEAVY_ASSERT(ValidateFreeSuballocationList());
3675
}
3676
3677
void BlockMetadata_Generic::SetAllocationPrivateData(AllocHandle allocHandle, void* privateData)
3678
{
3679
Suballocation& suballoc = *FindAtOffset((UINT64)allocHandle - 1).dropConst();
3680
suballoc.privateData = privateData;
3681
}
3682
3683
void BlockMetadata_Generic::AddStatistics(Statistics& inoutStats) const
3684
{
3685
inoutStats.BlockCount++;
3686
inoutStats.AllocationCount += (UINT)m_Suballocations.size() - m_FreeCount;
3687
inoutStats.BlockBytes += GetSize();
3688
inoutStats.AllocationBytes += GetSize() - m_SumFreeSize;
3689
}
3690
3691
void BlockMetadata_Generic::AddDetailedStatistics(DetailedStatistics& inoutStats) const
3692
{
3693
inoutStats.Stats.BlockCount++;
3694
inoutStats.Stats.BlockBytes += GetSize();
3695
3696
for (const auto& suballoc : m_Suballocations)
3697
{
3698
if (suballoc.type == SUBALLOCATION_TYPE_FREE)
3699
AddDetailedStatisticsUnusedRange(inoutStats, suballoc.size);
3700
else
3701
AddDetailedStatisticsAllocation(inoutStats, suballoc.size);
3702
}
3703
}
3704
3705
void BlockMetadata_Generic::WriteAllocationInfoToJson(JsonWriter& json) const
3706
{
3707
PrintDetailedMap_Begin(json, GetSumFreeSize(), GetAllocationCount(), m_FreeCount);
3708
for (const auto& suballoc : m_Suballocations)
3709
{
3710
if (suballoc.type == SUBALLOCATION_TYPE_FREE)
3711
PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
3712
else
3713
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.privateData);
3714
}
3715
PrintDetailedMap_End(json);
3716
}
3717
#endif // _D3D12MA_BLOCK_METADATA_GENERIC_FUNCTIONS
3718
#endif // _D3D12MA_BLOCK_METADATA_GENERIC
3719
#endif // #if 0
3720
3721
#ifndef _D3D12MA_BLOCK_METADATA_LINEAR
3722
class BlockMetadata_Linear : public BlockMetadata
3723
{
3724
public:
3725
BlockMetadata_Linear(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual);
3726
virtual ~BlockMetadata_Linear() = default;
3727
3728
UINT64 GetSumFreeSize() const override { return m_SumFreeSize; }
3729
bool IsEmpty() const override { return GetAllocationCount() == 0; }
3730
UINT64 GetAllocationOffset(AllocHandle allocHandle) const override { return (UINT64)allocHandle - 1; };
3731
3732
void Init(UINT64 size) override;
3733
bool Validate() const override;
3734
size_t GetAllocationCount() const override;
3735
size_t GetFreeRegionsCount() const override;
3736
void GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const override;
3737
3738
bool CreateAllocationRequest(
3739
UINT64 allocSize,
3740
UINT64 allocAlignment,
3741
bool upperAddress,
3742
UINT32 strategy,
3743
AllocationRequest* pAllocationRequest) override;
3744
3745
void Alloc(
3746
const AllocationRequest& request,
3747
UINT64 allocSize,
3748
void* privateData) override;
3749
3750
void Free(AllocHandle allocHandle) override;
3751
void Clear() override;
3752
3753
AllocHandle GetAllocationListBegin() const override;
3754
AllocHandle GetNextAllocation(AllocHandle prevAlloc) const override;
3755
UINT64 GetNextFreeRegionSize(AllocHandle alloc) const override;
3756
void* GetAllocationPrivateData(AllocHandle allocHandle) const override;
3757
void SetAllocationPrivateData(AllocHandle allocHandle, void* privateData) override;
3758
3759
void AddStatistics(Statistics& inoutStats) const override;
3760
void AddDetailedStatistics(DetailedStatistics& inoutStats) const override;
3761
void WriteAllocationInfoToJson(JsonWriter& json) const override;
3762
void DebugLogAllAllocations() const override;
3763
3764
private:
3765
/*
3766
There are two suballocation vectors, used in ping-pong way.
3767
The one with index m_1stVectorIndex is called 1st.
3768
The one with index (m_1stVectorIndex ^ 1) is called 2nd.
3769
2nd can be non-empty only when 1st is not empty.
3770
When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
3771
*/
3772
typedef Vector<Suballocation> SuballocationVectorType;
3773
3774
enum ALLOC_REQUEST_TYPE
3775
{
3776
ALLOC_REQUEST_UPPER_ADDRESS,
3777
ALLOC_REQUEST_END_OF_1ST,
3778
ALLOC_REQUEST_END_OF_2ND,
3779
};
3780
3781
enum SECOND_VECTOR_MODE
3782
{
3783
SECOND_VECTOR_EMPTY,
3784
/*
3785
Suballocations in 2nd vector are created later than the ones in 1st, but they
3786
all have smaller offset.
3787
*/
3788
SECOND_VECTOR_RING_BUFFER,
3789
/*
3790
Suballocations in 2nd vector are upper side of double stack.
3791
They all have offsets higher than those in 1st vector.
3792
Top of this stack means smaller offsets, but higher indices in this vector.
3793
*/
3794
SECOND_VECTOR_DOUBLE_STACK,
3795
};
3796
3797
UINT64 m_SumFreeSize;
3798
SuballocationVectorType m_Suballocations0, m_Suballocations1;
3799
UINT32 m_1stVectorIndex;
3800
SECOND_VECTOR_MODE m_2ndVectorMode;
3801
// Number of items in 1st vector with hAllocation = null at the beginning.
3802
size_t m_1stNullItemsBeginCount;
3803
// Number of other items in 1st vector with hAllocation = null somewhere in the middle.
3804
size_t m_1stNullItemsMiddleCount;
3805
// Number of items in 2nd vector with hAllocation = null.
3806
size_t m_2ndNullItemsCount;
3807
3808
SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
3809
SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
3810
const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
3811
const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
3812
3813
Suballocation& FindSuballocation(UINT64 offset) const;
3814
bool ShouldCompact1st() const;
3815
void CleanupAfterFree();
3816
3817
bool CreateAllocationRequest_LowerAddress(
3818
UINT64 allocSize,
3819
UINT64 allocAlignment,
3820
AllocationRequest* pAllocationRequest);
3821
bool CreateAllocationRequest_UpperAddress(
3822
UINT64 allocSize,
3823
UINT64 allocAlignment,
3824
AllocationRequest* pAllocationRequest);
3825
3826
D3D12MA_CLASS_NO_COPY(BlockMetadata_Linear)
3827
};
3828
3829
#ifndef _D3D12MA_BLOCK_METADATA_LINEAR_FUNCTIONS
3830
BlockMetadata_Linear::BlockMetadata_Linear(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual)
3831
: BlockMetadata(allocationCallbacks, isVirtual),
3832
m_SumFreeSize(0),
3833
m_Suballocations0(*allocationCallbacks),
3834
m_Suballocations1(*allocationCallbacks),
3835
m_1stVectorIndex(0),
3836
m_2ndVectorMode(SECOND_VECTOR_EMPTY),
3837
m_1stNullItemsBeginCount(0),
3838
m_1stNullItemsMiddleCount(0),
3839
m_2ndNullItemsCount(0)
3840
{
3841
D3D12MA_ASSERT(allocationCallbacks);
3842
}
3843
3844
void BlockMetadata_Linear::Init(UINT64 size)
3845
{
3846
BlockMetadata::Init(size);
3847
m_SumFreeSize = size;
3848
}
3849
3850
bool BlockMetadata_Linear::Validate() const
3851
{
3852
D3D12MA_VALIDATE(GetSumFreeSize() <= GetSize());
3853
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
3854
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
3855
3856
D3D12MA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
3857
D3D12MA_VALIDATE(!suballocations1st.empty() ||
3858
suballocations2nd.empty() ||
3859
m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
3860
3861
if (!suballocations1st.empty())
3862
{
3863
// Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
3864
D3D12MA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != SUBALLOCATION_TYPE_FREE);
3865
// Null item at the end should be just pop_back().
3866
D3D12MA_VALIDATE(suballocations1st.back().type != SUBALLOCATION_TYPE_FREE);
3867
}
3868
if (!suballocations2nd.empty())
3869
{
3870
// Null item at the end should be just pop_back().
3871
D3D12MA_VALIDATE(suballocations2nd.back().type != SUBALLOCATION_TYPE_FREE);
3872
}
3873
3874
D3D12MA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
3875
D3D12MA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
3876
3877
UINT64 sumUsedSize = 0;
3878
const size_t suballoc1stCount = suballocations1st.size();
3879
UINT64 offset = 0;
3880
3881
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
3882
{
3883
const size_t suballoc2ndCount = suballocations2nd.size();
3884
size_t nullItem2ndCount = 0;
3885
for (size_t i = 0; i < suballoc2ndCount; ++i)
3886
{
3887
const Suballocation& suballoc = suballocations2nd[i];
3888
const bool currFree = (suballoc.type == SUBALLOCATION_TYPE_FREE);
3889
3890
const Allocation* alloc = (Allocation*)suballoc.privateData;
3891
if (!IsVirtual())
3892
{
3893
D3D12MA_VALIDATE(currFree == (alloc == NULL));
3894
}
3895
D3D12MA_VALIDATE(suballoc.offset >= offset);
3896
3897
if (!currFree)
3898
{
3899
if (!IsVirtual())
3900
{
3901
D3D12MA_VALIDATE(GetAllocationOffset(alloc->GetAllocHandle()) == suballoc.offset);
3902
D3D12MA_VALIDATE(alloc->GetSize() == suballoc.size);
3903
}
3904
sumUsedSize += suballoc.size;
3905
}
3906
else
3907
{
3908
++nullItem2ndCount;
3909
}
3910
3911
offset = suballoc.offset + suballoc.size + GetDebugMargin();
3912
}
3913
3914
D3D12MA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
3915
}
3916
3917
for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
3918
{
3919
const Suballocation& suballoc = suballocations1st[i];
3920
D3D12MA_VALIDATE(suballoc.type == SUBALLOCATION_TYPE_FREE &&
3921
suballoc.privateData == NULL);
3922
}
3923
3924
size_t nullItem1stCount = m_1stNullItemsBeginCount;
3925
3926
for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
3927
{
3928
const Suballocation& suballoc = suballocations1st[i];
3929
const bool currFree = (suballoc.type == SUBALLOCATION_TYPE_FREE);
3930
3931
const Allocation* alloc = (Allocation*)suballoc.privateData;
3932
if (!IsVirtual())
3933
{
3934
D3D12MA_VALIDATE(currFree == (alloc == NULL));
3935
}
3936
D3D12MA_VALIDATE(suballoc.offset >= offset);
3937
D3D12MA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
3938
3939
if (!currFree)
3940
{
3941
if (!IsVirtual())
3942
{
3943
D3D12MA_VALIDATE(GetAllocationOffset(alloc->GetAllocHandle()) == suballoc.offset);
3944
D3D12MA_VALIDATE(alloc->GetSize() == suballoc.size);
3945
}
3946
sumUsedSize += suballoc.size;
3947
}
3948
else
3949
{
3950
++nullItem1stCount;
3951
}
3952
3953
offset = suballoc.offset + suballoc.size + GetDebugMargin();
3954
}
3955
D3D12MA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
3956
3957
if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
3958
{
3959
const size_t suballoc2ndCount = suballocations2nd.size();
3960
size_t nullItem2ndCount = 0;
3961
for (size_t i = suballoc2ndCount; i--; )
3962
{
3963
const Suballocation& suballoc = suballocations2nd[i];
3964
const bool currFree = (suballoc.type == SUBALLOCATION_TYPE_FREE);
3965
3966
const Allocation* alloc = (Allocation*)suballoc.privateData;
3967
if (!IsVirtual())
3968
{
3969
D3D12MA_VALIDATE(currFree == (alloc == NULL));
3970
}
3971
D3D12MA_VALIDATE(suballoc.offset >= offset);
3972
3973
if (!currFree)
3974
{
3975
if (!IsVirtual())
3976
{
3977
D3D12MA_VALIDATE(GetAllocationOffset(alloc->GetAllocHandle()) == suballoc.offset);
3978
D3D12MA_VALIDATE(alloc->GetSize() == suballoc.size);
3979
}
3980
sumUsedSize += suballoc.size;
3981
}
3982
else
3983
{
3984
++nullItem2ndCount;
3985
}
3986
3987
offset = suballoc.offset + suballoc.size + GetDebugMargin();
3988
}
3989
3990
D3D12MA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
3991
}
3992
3993
D3D12MA_VALIDATE(offset <= GetSize());
3994
D3D12MA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
3995
3996
return true;
3997
}
3998
3999
size_t BlockMetadata_Linear::GetAllocationCount() const
4000
{
4001
return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount +
4002
AccessSuballocations2nd().size() - m_2ndNullItemsCount;
4003
}
4004
4005
size_t BlockMetadata_Linear::GetFreeRegionsCount() const
4006
{
4007
// Function only used for defragmentation, which is disabled for this algorithm
4008
D3D12MA_ASSERT(0);
4009
return SIZE_MAX;
4010
}
4011
4012
void BlockMetadata_Linear::GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const
4013
{
4014
const Suballocation& suballoc = FindSuballocation((UINT64)allocHandle - 1);
4015
outInfo.Offset = suballoc.offset;
4016
outInfo.Size = suballoc.size;
4017
outInfo.pPrivateData = suballoc.privateData;
4018
}
4019
4020
bool BlockMetadata_Linear::CreateAllocationRequest(
4021
UINT64 allocSize,
4022
UINT64 allocAlignment,
4023
bool upperAddress,
4024
UINT32 strategy,
4025
AllocationRequest* pAllocationRequest)
4026
{
4027
D3D12MA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
4028
D3D12MA_ASSERT(pAllocationRequest != NULL);
4029
D3D12MA_HEAVY_ASSERT(Validate());
4030
pAllocationRequest->size = allocSize;
4031
return upperAddress ?
4032
CreateAllocationRequest_UpperAddress(
4033
allocSize, allocAlignment, pAllocationRequest) :
4034
CreateAllocationRequest_LowerAddress(
4035
allocSize, allocAlignment, pAllocationRequest);
4036
}
4037
4038
void BlockMetadata_Linear::Alloc(
4039
const AllocationRequest& request,
4040
UINT64 allocSize,
4041
void* privateData)
4042
{
4043
UINT64 offset = (UINT64)request.allocHandle - 1;
4044
const Suballocation newSuballoc = { offset, request.size, privateData, SUBALLOCATION_TYPE_ALLOCATION };
4045
4046
switch (request.algorithmData)
4047
{
4048
case ALLOC_REQUEST_UPPER_ADDRESS:
4049
{
4050
D3D12MA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
4051
"CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
4052
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4053
suballocations2nd.push_back(newSuballoc);
4054
m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
4055
break;
4056
}
4057
case ALLOC_REQUEST_END_OF_1ST:
4058
{
4059
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4060
4061
D3D12MA_ASSERT(suballocations1st.empty() ||
4062
offset >= suballocations1st.back().offset + suballocations1st.back().size);
4063
// Check if it fits before the end of the block.
4064
D3D12MA_ASSERT(offset + request.size <= GetSize());
4065
4066
suballocations1st.push_back(newSuballoc);
4067
break;
4068
}
4069
case ALLOC_REQUEST_END_OF_2ND:
4070
{
4071
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4072
// New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
4073
D3D12MA_ASSERT(!suballocations1st.empty() &&
4074
offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset);
4075
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4076
4077
switch (m_2ndVectorMode)
4078
{
4079
case SECOND_VECTOR_EMPTY:
4080
// First allocation from second part ring buffer.
4081
D3D12MA_ASSERT(suballocations2nd.empty());
4082
m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
4083
break;
4084
case SECOND_VECTOR_RING_BUFFER:
4085
// 2-part ring buffer is already started.
4086
D3D12MA_ASSERT(!suballocations2nd.empty());
4087
break;
4088
case SECOND_VECTOR_DOUBLE_STACK:
4089
D3D12MA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
4090
break;
4091
default:
4092
D3D12MA_ASSERT(0);
4093
}
4094
4095
suballocations2nd.push_back(newSuballoc);
4096
break;
4097
}
4098
default:
4099
D3D12MA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
4100
}
4101
m_SumFreeSize -= newSuballoc.size;
4102
}
4103
4104
void BlockMetadata_Linear::Free(AllocHandle allocHandle)
4105
{
4106
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4107
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4108
UINT64 offset = (UINT64)allocHandle - 1;
4109
4110
if (!suballocations1st.empty())
4111
{
4112
// First allocation: Mark it as next empty at the beginning.
4113
Suballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
4114
if (firstSuballoc.offset == offset)
4115
{
4116
firstSuballoc.type = SUBALLOCATION_TYPE_FREE;
4117
firstSuballoc.privateData = NULL;
4118
m_SumFreeSize += firstSuballoc.size;
4119
++m_1stNullItemsBeginCount;
4120
CleanupAfterFree();
4121
return;
4122
}
4123
}
4124
4125
// Last allocation in 2-part ring buffer or top of upper stack (same logic).
4126
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
4127
m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
4128
{
4129
Suballocation& lastSuballoc = suballocations2nd.back();
4130
if (lastSuballoc.offset == offset)
4131
{
4132
m_SumFreeSize += lastSuballoc.size;
4133
suballocations2nd.pop_back();
4134
CleanupAfterFree();
4135
return;
4136
}
4137
}
4138
// Last allocation in 1st vector.
4139
else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY)
4140
{
4141
Suballocation& lastSuballoc = suballocations1st.back();
4142
if (lastSuballoc.offset == offset)
4143
{
4144
m_SumFreeSize += lastSuballoc.size;
4145
suballocations1st.pop_back();
4146
CleanupAfterFree();
4147
return;
4148
}
4149
}
4150
4151
Suballocation refSuballoc;
4152
refSuballoc.offset = offset;
4153
// Rest of members stays uninitialized intentionally for better performance.
4154
4155
// Item from the middle of 1st vector.
4156
{
4157
const SuballocationVectorType::iterator it = BinaryFindSorted(
4158
suballocations1st.begin() + m_1stNullItemsBeginCount,
4159
suballocations1st.end(),
4160
refSuballoc,
4161
SuballocationOffsetLess());
4162
if (it != suballocations1st.end())
4163
{
4164
it->type = SUBALLOCATION_TYPE_FREE;
4165
it->privateData = NULL;
4166
++m_1stNullItemsMiddleCount;
4167
m_SumFreeSize += it->size;
4168
CleanupAfterFree();
4169
return;
4170
}
4171
}
4172
4173
if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
4174
{
4175
// Item from the middle of 2nd vector.
4176
const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
4177
BinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, SuballocationOffsetLess()) :
4178
BinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, SuballocationOffsetGreater());
4179
if (it != suballocations2nd.end())
4180
{
4181
it->type = SUBALLOCATION_TYPE_FREE;
4182
it->privateData = NULL;
4183
++m_2ndNullItemsCount;
4184
m_SumFreeSize += it->size;
4185
CleanupAfterFree();
4186
return;
4187
}
4188
}
4189
4190
D3D12MA_ASSERT(0 && "Allocation to free not found in linear allocator!");
4191
}
4192
4193
void BlockMetadata_Linear::Clear()
4194
{
4195
m_SumFreeSize = GetSize();
4196
m_Suballocations0.clear();
4197
m_Suballocations1.clear();
4198
// Leaving m_1stVectorIndex unchanged - it doesn't matter.
4199
m_2ndVectorMode = SECOND_VECTOR_EMPTY;
4200
m_1stNullItemsBeginCount = 0;
4201
m_1stNullItemsMiddleCount = 0;
4202
m_2ndNullItemsCount = 0;
4203
}
4204
4205
AllocHandle BlockMetadata_Linear::GetAllocationListBegin() const
4206
{
4207
// Function only used for defragmentation, which is disabled for this algorithm
4208
D3D12MA_ASSERT(0);
4209
return (AllocHandle)0;
4210
}
4211
4212
AllocHandle BlockMetadata_Linear::GetNextAllocation(AllocHandle prevAlloc) const
4213
{
4214
// Function only used for defragmentation, which is disabled for this algorithm
4215
D3D12MA_ASSERT(0);
4216
return (AllocHandle)0;
4217
}
4218
4219
UINT64 BlockMetadata_Linear::GetNextFreeRegionSize(AllocHandle alloc) const
4220
{
4221
// Function only used for defragmentation, which is disabled for this algorithm
4222
D3D12MA_ASSERT(0);
4223
return 0;
4224
}
4225
4226
void* BlockMetadata_Linear::GetAllocationPrivateData(AllocHandle allocHandle) const
4227
{
4228
return FindSuballocation((UINT64)allocHandle - 1).privateData;
4229
}
4230
4231
void BlockMetadata_Linear::SetAllocationPrivateData(AllocHandle allocHandle, void* privateData)
4232
{
4233
Suballocation& suballoc = FindSuballocation((UINT64)allocHandle - 1);
4234
suballoc.privateData = privateData;
4235
}
4236
4237
void BlockMetadata_Linear::AddStatistics(Statistics& inoutStats) const
4238
{
4239
inoutStats.BlockCount++;
4240
inoutStats.AllocationCount += (UINT)GetAllocationCount();
4241
inoutStats.BlockBytes += GetSize();
4242
inoutStats.AllocationBytes += GetSize() - m_SumFreeSize;
4243
}
4244
4245
void BlockMetadata_Linear::AddDetailedStatistics(DetailedStatistics& inoutStats) const
4246
{
4247
inoutStats.Stats.BlockCount++;
4248
inoutStats.Stats.BlockBytes += GetSize();
4249
4250
const UINT64 size = GetSize();
4251
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4252
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4253
const size_t suballoc1stCount = suballocations1st.size();
4254
const size_t suballoc2ndCount = suballocations2nd.size();
4255
4256
UINT64 lastOffset = 0;
4257
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4258
{
4259
const UINT64 freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
4260
size_t nextAlloc2ndIndex = 0;
4261
while (lastOffset < freeSpace2ndTo1stEnd)
4262
{
4263
// Find next non-null allocation or move nextAllocIndex to the end.
4264
while (nextAlloc2ndIndex < suballoc2ndCount &&
4265
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4266
{
4267
++nextAlloc2ndIndex;
4268
}
4269
4270
// Found non-null allocation.
4271
if (nextAlloc2ndIndex < suballoc2ndCount)
4272
{
4273
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4274
4275
// 1. Process free space before this allocation.
4276
if (lastOffset < suballoc.offset)
4277
{
4278
// There is free space from lastOffset to suballoc.offset.
4279
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4280
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
4281
}
4282
4283
// 2. Process this allocation.
4284
// There is allocation with suballoc.offset, suballoc.size.
4285
AddDetailedStatisticsAllocation(inoutStats, suballoc.size);
4286
4287
// 3. Prepare for next iteration.
4288
lastOffset = suballoc.offset + suballoc.size;
4289
++nextAlloc2ndIndex;
4290
}
4291
// We are at the end.
4292
else
4293
{
4294
// There is free space from lastOffset to freeSpace2ndTo1stEnd.
4295
if (lastOffset < freeSpace2ndTo1stEnd)
4296
{
4297
const UINT64 unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
4298
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
4299
}
4300
4301
// End of loop.
4302
lastOffset = freeSpace2ndTo1stEnd;
4303
}
4304
}
4305
}
4306
4307
size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
4308
const UINT64 freeSpace1stTo2ndEnd =
4309
m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
4310
while (lastOffset < freeSpace1stTo2ndEnd)
4311
{
4312
// Find next non-null allocation or move nextAllocIndex to the end.
4313
while (nextAlloc1stIndex < suballoc1stCount &&
4314
suballocations1st[nextAlloc1stIndex].privateData == NULL)
4315
{
4316
++nextAlloc1stIndex;
4317
}
4318
4319
// Found non-null allocation.
4320
if (nextAlloc1stIndex < suballoc1stCount)
4321
{
4322
const Suballocation& suballoc = suballocations1st[nextAlloc1stIndex];
4323
4324
// 1. Process free space before this allocation.
4325
if (lastOffset < suballoc.offset)
4326
{
4327
// There is free space from lastOffset to suballoc.offset.
4328
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4329
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
4330
}
4331
4332
// 2. Process this allocation.
4333
// There is allocation with suballoc.offset, suballoc.size.
4334
AddDetailedStatisticsAllocation(inoutStats, suballoc.size);
4335
4336
// 3. Prepare for next iteration.
4337
lastOffset = suballoc.offset + suballoc.size;
4338
++nextAlloc1stIndex;
4339
}
4340
// We are at the end.
4341
else
4342
{
4343
// There is free space from lastOffset to freeSpace1stTo2ndEnd.
4344
if (lastOffset < freeSpace1stTo2ndEnd)
4345
{
4346
const UINT64 unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
4347
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
4348
}
4349
4350
// End of loop.
4351
lastOffset = freeSpace1stTo2ndEnd;
4352
}
4353
}
4354
4355
if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
4356
{
4357
size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
4358
while (lastOffset < size)
4359
{
4360
// Find next non-null allocation or move nextAllocIndex to the end.
4361
while (nextAlloc2ndIndex != SIZE_MAX &&
4362
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4363
{
4364
--nextAlloc2ndIndex;
4365
}
4366
4367
// Found non-null allocation.
4368
if (nextAlloc2ndIndex != SIZE_MAX)
4369
{
4370
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4371
4372
// 1. Process free space before this allocation.
4373
if (lastOffset < suballoc.offset)
4374
{
4375
// There is free space from lastOffset to suballoc.offset.
4376
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4377
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
4378
}
4379
4380
// 2. Process this allocation.
4381
// There is allocation with suballoc.offset, suballoc.size.
4382
AddDetailedStatisticsAllocation(inoutStats, suballoc.size);
4383
4384
// 3. Prepare for next iteration.
4385
lastOffset = suballoc.offset + suballoc.size;
4386
--nextAlloc2ndIndex;
4387
}
4388
// We are at the end.
4389
else
4390
{
4391
// There is free space from lastOffset to size.
4392
if (lastOffset < size)
4393
{
4394
const UINT64 unusedRangeSize = size - lastOffset;
4395
AddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
4396
}
4397
4398
// End of loop.
4399
lastOffset = size;
4400
}
4401
}
4402
}
4403
}
4404
4405
void BlockMetadata_Linear::WriteAllocationInfoToJson(JsonWriter& json) const
4406
{
4407
const UINT64 size = GetSize();
4408
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4409
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4410
const size_t suballoc1stCount = suballocations1st.size();
4411
const size_t suballoc2ndCount = suballocations2nd.size();
4412
4413
// FIRST PASS
4414
4415
size_t unusedRangeCount = 0;
4416
UINT64 usedBytes = 0;
4417
4418
UINT64 lastOffset = 0;
4419
4420
size_t alloc2ndCount = 0;
4421
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4422
{
4423
const UINT64 freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
4424
size_t nextAlloc2ndIndex = 0;
4425
while (lastOffset < freeSpace2ndTo1stEnd)
4426
{
4427
// Find next non-null allocation or move nextAlloc2ndIndex to the end.
4428
while (nextAlloc2ndIndex < suballoc2ndCount &&
4429
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4430
{
4431
++nextAlloc2ndIndex;
4432
}
4433
4434
// Found non-null allocation.
4435
if (nextAlloc2ndIndex < suballoc2ndCount)
4436
{
4437
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4438
4439
// 1. Process free space before this allocation.
4440
if (lastOffset < suballoc.offset)
4441
{
4442
// There is free space from lastOffset to suballoc.offset.
4443
++unusedRangeCount;
4444
}
4445
4446
// 2. Process this allocation.
4447
// There is allocation with suballoc.offset, suballoc.size.
4448
++alloc2ndCount;
4449
usedBytes += suballoc.size;
4450
4451
// 3. Prepare for next iteration.
4452
lastOffset = suballoc.offset + suballoc.size;
4453
++nextAlloc2ndIndex;
4454
}
4455
// We are at the end.
4456
else
4457
{
4458
if (lastOffset < freeSpace2ndTo1stEnd)
4459
{
4460
// There is free space from lastOffset to freeSpace2ndTo1stEnd.
4461
++unusedRangeCount;
4462
}
4463
4464
// End of loop.
4465
lastOffset = freeSpace2ndTo1stEnd;
4466
}
4467
}
4468
}
4469
4470
size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
4471
size_t alloc1stCount = 0;
4472
const UINT64 freeSpace1stTo2ndEnd =
4473
m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
4474
while (lastOffset < freeSpace1stTo2ndEnd)
4475
{
4476
// Find next non-null allocation or move nextAllocIndex to the end.
4477
while (nextAlloc1stIndex < suballoc1stCount &&
4478
suballocations1st[nextAlloc1stIndex].privateData == NULL)
4479
{
4480
++nextAlloc1stIndex;
4481
}
4482
4483
// Found non-null allocation.
4484
if (nextAlloc1stIndex < suballoc1stCount)
4485
{
4486
const Suballocation& suballoc = suballocations1st[nextAlloc1stIndex];
4487
4488
// 1. Process free space before this allocation.
4489
if (lastOffset < suballoc.offset)
4490
{
4491
// There is free space from lastOffset to suballoc.offset.
4492
++unusedRangeCount;
4493
}
4494
4495
// 2. Process this allocation.
4496
// There is allocation with suballoc.offset, suballoc.size.
4497
++alloc1stCount;
4498
usedBytes += suballoc.size;
4499
4500
// 3. Prepare for next iteration.
4501
lastOffset = suballoc.offset + suballoc.size;
4502
++nextAlloc1stIndex;
4503
}
4504
// We are at the end.
4505
else
4506
{
4507
if (lastOffset < size)
4508
{
4509
// There is free space from lastOffset to freeSpace1stTo2ndEnd.
4510
++unusedRangeCount;
4511
}
4512
4513
// End of loop.
4514
lastOffset = freeSpace1stTo2ndEnd;
4515
}
4516
}
4517
4518
if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
4519
{
4520
size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
4521
while (lastOffset < size)
4522
{
4523
// Find next non-null allocation or move nextAlloc2ndIndex to the end.
4524
while (nextAlloc2ndIndex != SIZE_MAX &&
4525
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4526
{
4527
--nextAlloc2ndIndex;
4528
}
4529
4530
// Found non-null allocation.
4531
if (nextAlloc2ndIndex != SIZE_MAX)
4532
{
4533
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4534
4535
// 1. Process free space before this allocation.
4536
if (lastOffset < suballoc.offset)
4537
{
4538
// There is free space from lastOffset to suballoc.offset.
4539
++unusedRangeCount;
4540
}
4541
4542
// 2. Process this allocation.
4543
// There is allocation with suballoc.offset, suballoc.size.
4544
++alloc2ndCount;
4545
usedBytes += suballoc.size;
4546
4547
// 3. Prepare for next iteration.
4548
lastOffset = suballoc.offset + suballoc.size;
4549
--nextAlloc2ndIndex;
4550
}
4551
// We are at the end.
4552
else
4553
{
4554
if (lastOffset < size)
4555
{
4556
// There is free space from lastOffset to size.
4557
++unusedRangeCount;
4558
}
4559
4560
// End of loop.
4561
lastOffset = size;
4562
}
4563
}
4564
}
4565
4566
const UINT64 unusedBytes = size - usedBytes;
4567
PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
4568
4569
// SECOND PASS
4570
lastOffset = 0;
4571
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4572
{
4573
const UINT64 freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
4574
size_t nextAlloc2ndIndex = 0;
4575
while (lastOffset < freeSpace2ndTo1stEnd)
4576
{
4577
// Find next non-null allocation or move nextAlloc2ndIndex to the end.
4578
while (nextAlloc2ndIndex < suballoc2ndCount &&
4579
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4580
{
4581
++nextAlloc2ndIndex;
4582
}
4583
4584
// Found non-null allocation.
4585
if (nextAlloc2ndIndex < suballoc2ndCount)
4586
{
4587
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4588
4589
// 1. Process free space before this allocation.
4590
if (lastOffset < suballoc.offset)
4591
{
4592
// There is free space from lastOffset to suballoc.offset.
4593
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4594
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4595
}
4596
4597
// 2. Process this allocation.
4598
// There is allocation with suballoc.offset, suballoc.size.
4599
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.privateData);
4600
4601
// 3. Prepare for next iteration.
4602
lastOffset = suballoc.offset + suballoc.size;
4603
++nextAlloc2ndIndex;
4604
}
4605
// We are at the end.
4606
else
4607
{
4608
if (lastOffset < freeSpace2ndTo1stEnd)
4609
{
4610
// There is free space from lastOffset to freeSpace2ndTo1stEnd.
4611
const UINT64 unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
4612
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4613
}
4614
4615
// End of loop.
4616
lastOffset = freeSpace2ndTo1stEnd;
4617
}
4618
}
4619
}
4620
4621
nextAlloc1stIndex = m_1stNullItemsBeginCount;
4622
while (lastOffset < freeSpace1stTo2ndEnd)
4623
{
4624
// Find next non-null allocation or move nextAllocIndex to the end.
4625
while (nextAlloc1stIndex < suballoc1stCount &&
4626
suballocations1st[nextAlloc1stIndex].privateData == NULL)
4627
{
4628
++nextAlloc1stIndex;
4629
}
4630
4631
// Found non-null allocation.
4632
if (nextAlloc1stIndex < suballoc1stCount)
4633
{
4634
const Suballocation& suballoc = suballocations1st[nextAlloc1stIndex];
4635
4636
// 1. Process free space before this allocation.
4637
if (lastOffset < suballoc.offset)
4638
{
4639
// There is free space from lastOffset to suballoc.offset.
4640
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4641
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4642
}
4643
4644
// 2. Process this allocation.
4645
// There is allocation with suballoc.offset, suballoc.size.
4646
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.privateData);
4647
4648
// 3. Prepare for next iteration.
4649
lastOffset = suballoc.offset + suballoc.size;
4650
++nextAlloc1stIndex;
4651
}
4652
// We are at the end.
4653
else
4654
{
4655
if (lastOffset < freeSpace1stTo2ndEnd)
4656
{
4657
// There is free space from lastOffset to freeSpace1stTo2ndEnd.
4658
const UINT64 unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
4659
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4660
}
4661
4662
// End of loop.
4663
lastOffset = freeSpace1stTo2ndEnd;
4664
}
4665
}
4666
4667
if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
4668
{
4669
size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
4670
while (lastOffset < size)
4671
{
4672
// Find next non-null allocation or move nextAlloc2ndIndex to the end.
4673
while (nextAlloc2ndIndex != SIZE_MAX &&
4674
suballocations2nd[nextAlloc2ndIndex].privateData == NULL)
4675
{
4676
--nextAlloc2ndIndex;
4677
}
4678
4679
// Found non-null allocation.
4680
if (nextAlloc2ndIndex != SIZE_MAX)
4681
{
4682
const Suballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
4683
4684
// 1. Process free space before this allocation.
4685
if (lastOffset < suballoc.offset)
4686
{
4687
// There is free space from lastOffset to suballoc.offset.
4688
const UINT64 unusedRangeSize = suballoc.offset - lastOffset;
4689
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4690
}
4691
4692
// 2. Process this allocation.
4693
// There is allocation with suballoc.offset, suballoc.size.
4694
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.privateData);
4695
4696
// 3. Prepare for next iteration.
4697
lastOffset = suballoc.offset + suballoc.size;
4698
--nextAlloc2ndIndex;
4699
}
4700
// We are at the end.
4701
else
4702
{
4703
if (lastOffset < size)
4704
{
4705
// There is free space from lastOffset to size.
4706
const UINT64 unusedRangeSize = size - lastOffset;
4707
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
4708
}
4709
4710
// End of loop.
4711
lastOffset = size;
4712
}
4713
}
4714
}
4715
4716
PrintDetailedMap_End(json);
4717
}
4718
4719
void BlockMetadata_Linear::DebugLogAllAllocations() const
4720
{
4721
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4722
for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it)
4723
if (it->type != SUBALLOCATION_TYPE_FREE)
4724
DebugLogAllocation(it->offset, it->size, it->privateData);
4725
4726
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4727
for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it)
4728
if (it->type != SUBALLOCATION_TYPE_FREE)
4729
DebugLogAllocation(it->offset, it->size, it->privateData);
4730
}
4731
4732
Suballocation& BlockMetadata_Linear::FindSuballocation(UINT64 offset) const
4733
{
4734
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4735
const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4736
4737
Suballocation refSuballoc;
4738
refSuballoc.offset = offset;
4739
// Rest of members stays uninitialized intentionally for better performance.
4740
4741
// Item from the 1st vector.
4742
{
4743
const SuballocationVectorType::const_iterator it = BinaryFindSorted(
4744
suballocations1st.begin() + m_1stNullItemsBeginCount,
4745
suballocations1st.end(),
4746
refSuballoc,
4747
SuballocationOffsetLess());
4748
if (it != suballocations1st.end())
4749
{
4750
return const_cast<Suballocation&>(*it);
4751
}
4752
}
4753
4754
if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
4755
{
4756
// Rest of members stays uninitialized intentionally for better performance.
4757
const SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
4758
BinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, SuballocationOffsetLess()) :
4759
BinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, SuballocationOffsetGreater());
4760
if (it != suballocations2nd.end())
4761
{
4762
return const_cast<Suballocation&>(*it);
4763
}
4764
}
4765
4766
D3D12MA_ASSERT(0 && "Allocation not found in linear allocator!");
4767
return const_cast<Suballocation&>(suballocations1st.back()); // Should never occur.
4768
}
4769
4770
bool BlockMetadata_Linear::ShouldCompact1st() const
4771
{
4772
const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
4773
const size_t suballocCount = AccessSuballocations1st().size();
4774
return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
4775
}
4776
4777
void BlockMetadata_Linear::CleanupAfterFree()
4778
{
4779
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4780
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4781
4782
if (IsEmpty())
4783
{
4784
suballocations1st.clear();
4785
suballocations2nd.clear();
4786
m_1stNullItemsBeginCount = 0;
4787
m_1stNullItemsMiddleCount = 0;
4788
m_2ndNullItemsCount = 0;
4789
m_2ndVectorMode = SECOND_VECTOR_EMPTY;
4790
}
4791
else
4792
{
4793
const size_t suballoc1stCount = suballocations1st.size();
4794
const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
4795
D3D12MA_ASSERT(nullItem1stCount <= suballoc1stCount);
4796
4797
// Find more null items at the beginning of 1st vector.
4798
while (m_1stNullItemsBeginCount < suballoc1stCount &&
4799
suballocations1st[m_1stNullItemsBeginCount].type == SUBALLOCATION_TYPE_FREE)
4800
{
4801
++m_1stNullItemsBeginCount;
4802
--m_1stNullItemsMiddleCount;
4803
}
4804
4805
// Find more null items at the end of 1st vector.
4806
while (m_1stNullItemsMiddleCount > 0 &&
4807
suballocations1st.back().type == SUBALLOCATION_TYPE_FREE)
4808
{
4809
--m_1stNullItemsMiddleCount;
4810
suballocations1st.pop_back();
4811
}
4812
4813
// Find more null items at the end of 2nd vector.
4814
while (m_2ndNullItemsCount > 0 &&
4815
suballocations2nd.back().type == SUBALLOCATION_TYPE_FREE)
4816
{
4817
--m_2ndNullItemsCount;
4818
suballocations2nd.pop_back();
4819
}
4820
4821
// Find more null items at the beginning of 2nd vector.
4822
while (m_2ndNullItemsCount > 0 &&
4823
suballocations2nd[0].type == SUBALLOCATION_TYPE_FREE)
4824
{
4825
--m_2ndNullItemsCount;
4826
suballocations2nd.remove(0);
4827
}
4828
4829
if (ShouldCompact1st())
4830
{
4831
const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
4832
size_t srcIndex = m_1stNullItemsBeginCount;
4833
for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
4834
{
4835
while (suballocations1st[srcIndex].type == SUBALLOCATION_TYPE_FREE)
4836
{
4837
++srcIndex;
4838
}
4839
if (dstIndex != srcIndex)
4840
{
4841
suballocations1st[dstIndex] = suballocations1st[srcIndex];
4842
}
4843
++srcIndex;
4844
}
4845
suballocations1st.resize(nonNullItemCount);
4846
m_1stNullItemsBeginCount = 0;
4847
m_1stNullItemsMiddleCount = 0;
4848
}
4849
4850
// 2nd vector became empty.
4851
if (suballocations2nd.empty())
4852
{
4853
m_2ndVectorMode = SECOND_VECTOR_EMPTY;
4854
}
4855
4856
// 1st vector became empty.
4857
if (suballocations1st.size() - m_1stNullItemsBeginCount == 0)
4858
{
4859
suballocations1st.clear();
4860
m_1stNullItemsBeginCount = 0;
4861
4862
if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4863
{
4864
// Swap 1st with 2nd. Now 2nd is empty.
4865
m_2ndVectorMode = SECOND_VECTOR_EMPTY;
4866
m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
4867
while (m_1stNullItemsBeginCount < suballocations2nd.size() &&
4868
suballocations2nd[m_1stNullItemsBeginCount].type == SUBALLOCATION_TYPE_FREE)
4869
{
4870
++m_1stNullItemsBeginCount;
4871
--m_1stNullItemsMiddleCount;
4872
}
4873
m_2ndNullItemsCount = 0;
4874
m_1stVectorIndex ^= 1;
4875
}
4876
}
4877
}
4878
4879
D3D12MA_HEAVY_ASSERT(Validate());
4880
}
4881
4882
bool BlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
4883
UINT64 allocSize,
4884
UINT64 allocAlignment,
4885
AllocationRequest* pAllocationRequest)
4886
{
4887
const UINT64 blockSize = GetSize();
4888
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4889
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4890
4891
if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
4892
{
4893
// Try to allocate at the end of 1st vector.
4894
4895
UINT64 resultBaseOffset = 0;
4896
if (!suballocations1st.empty())
4897
{
4898
const Suballocation& lastSuballoc = suballocations1st.back();
4899
resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + GetDebugMargin();
4900
}
4901
4902
// Start from offset equal to beginning of free space.
4903
UINT64 resultOffset = resultBaseOffset;
4904
// Apply alignment.
4905
resultOffset = AlignUp(resultOffset, allocAlignment);
4906
4907
const UINT64 freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
4908
suballocations2nd.back().offset : blockSize;
4909
4910
// There is enough free space at the end after alignment.
4911
if (resultOffset + allocSize + GetDebugMargin() <= freeSpaceEnd)
4912
{
4913
// All tests passed: Success.
4914
pAllocationRequest->allocHandle = (AllocHandle)(resultOffset + 1);
4915
// pAllocationRequest->item, customData unused.
4916
pAllocationRequest->algorithmData = ALLOC_REQUEST_END_OF_1ST;
4917
return true;
4918
}
4919
}
4920
4921
// Wrap-around to end of 2nd vector. Try to allocate there, watching for the
4922
// beginning of 1st vector as the end of free space.
4923
if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4924
{
4925
D3D12MA_ASSERT(!suballocations1st.empty());
4926
4927
UINT64 resultBaseOffset = 0;
4928
if (!suballocations2nd.empty())
4929
{
4930
const Suballocation& lastSuballoc = suballocations2nd.back();
4931
resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + GetDebugMargin();
4932
}
4933
4934
// Start from offset equal to beginning of free space.
4935
UINT64 resultOffset = resultBaseOffset;
4936
4937
// Apply alignment.
4938
resultOffset = AlignUp(resultOffset, allocAlignment);
4939
4940
size_t index1st = m_1stNullItemsBeginCount;
4941
// There is enough free space at the end after alignment.
4942
if ((index1st == suballocations1st.size() && resultOffset + allocSize + GetDebugMargin() <= blockSize) ||
4943
(index1st < suballocations1st.size() && resultOffset + allocSize + GetDebugMargin() <= suballocations1st[index1st].offset))
4944
{
4945
// All tests passed: Success.
4946
pAllocationRequest->allocHandle = (AllocHandle)(resultOffset + 1);
4947
pAllocationRequest->algorithmData = ALLOC_REQUEST_END_OF_2ND;
4948
// pAllocationRequest->item, customData unused.
4949
return true;
4950
}
4951
}
4952
return false;
4953
}
4954
4955
bool BlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
4956
UINT64 allocSize,
4957
UINT64 allocAlignment,
4958
AllocationRequest* pAllocationRequest)
4959
{
4960
const UINT64 blockSize = GetSize();
4961
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
4962
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
4963
4964
if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
4965
{
4966
D3D12MA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
4967
return false;
4968
}
4969
4970
// Try to allocate before 2nd.back(), or end of block if 2nd.empty().
4971
if (allocSize > blockSize)
4972
{
4973
return false;
4974
}
4975
UINT64 resultBaseOffset = blockSize - allocSize;
4976
if (!suballocations2nd.empty())
4977
{
4978
const Suballocation& lastSuballoc = suballocations2nd.back();
4979
resultBaseOffset = lastSuballoc.offset - allocSize;
4980
if (allocSize > lastSuballoc.offset)
4981
{
4982
return false;
4983
}
4984
}
4985
4986
// Start from offset equal to end of free space.
4987
UINT64 resultOffset = resultBaseOffset;
4988
// Apply debugMargin at the end.
4989
if (GetDebugMargin() > 0)
4990
{
4991
if (resultOffset < GetDebugMargin())
4992
{
4993
return false;
4994
}
4995
resultOffset -= GetDebugMargin();
4996
}
4997
4998
// Apply alignment.
4999
resultOffset = AlignDown(resultOffset, allocAlignment);
5000
// There is enough free space.
5001
const UINT64 endOf1st = !suballocations1st.empty() ?
5002
suballocations1st.back().offset + suballocations1st.back().size : 0;
5003
5004
if (endOf1st + GetDebugMargin() <= resultOffset)
5005
{
5006
// All tests passed: Success.
5007
pAllocationRequest->allocHandle = (AllocHandle)(resultOffset + 1);
5008
// pAllocationRequest->item unused.
5009
pAllocationRequest->algorithmData = ALLOC_REQUEST_UPPER_ADDRESS;
5010
return true;
5011
}
5012
return false;
5013
}
5014
#endif // _D3D12MA_BLOCK_METADATA_LINEAR_FUNCTIONS
5015
#endif // _D3D12MA_BLOCK_METADATA_LINEAR
5016
5017
#ifndef _D3D12MA_BLOCK_METADATA_TLSF
5018
class BlockMetadata_TLSF : public BlockMetadata
5019
{
5020
public:
5021
BlockMetadata_TLSF(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual);
5022
virtual ~BlockMetadata_TLSF();
5023
5024
size_t GetAllocationCount() const override { return m_AllocCount; }
5025
size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; }
5026
UINT64 GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; }
5027
bool IsEmpty() const override { return m_NullBlock->offset == 0; }
5028
UINT64 GetAllocationOffset(AllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; };
5029
5030
void Init(UINT64 size) override;
5031
bool Validate() const override;
5032
void GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const override;
5033
5034
bool CreateAllocationRequest(
5035
UINT64 allocSize,
5036
UINT64 allocAlignment,
5037
bool upperAddress,
5038
UINT32 strategy,
5039
AllocationRequest* pAllocationRequest) override;
5040
5041
void Alloc(
5042
const AllocationRequest& request,
5043
UINT64 allocSize,
5044
void* privateData) override;
5045
5046
void Free(AllocHandle allocHandle) override;
5047
void Clear() override;
5048
5049
AllocHandle GetAllocationListBegin() const override;
5050
AllocHandle GetNextAllocation(AllocHandle prevAlloc) const override;
5051
UINT64 GetNextFreeRegionSize(AllocHandle alloc) const override;
5052
void* GetAllocationPrivateData(AllocHandle allocHandle) const override;
5053
void SetAllocationPrivateData(AllocHandle allocHandle, void* privateData) override;
5054
5055
void AddStatistics(Statistics& inoutStats) const override;
5056
void AddDetailedStatistics(DetailedStatistics& inoutStats) const override;
5057
void WriteAllocationInfoToJson(JsonWriter& json) const override;
5058
void DebugLogAllAllocations() const override;
5059
5060
private:
5061
// According to original paper it should be preferable 4 or 5:
5062
// M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems"
5063
// http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf
5064
static const UINT8 SECOND_LEVEL_INDEX = 5;
5065
static const UINT16 SMALL_BUFFER_SIZE = 256;
5066
static const UINT INITIAL_BLOCK_ALLOC_COUNT = 16;
5067
static const UINT8 MEMORY_CLASS_SHIFT = 7;
5068
static const UINT8 MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
5069
5070
class Block
5071
{
5072
public:
5073
UINT64 offset;
5074
UINT64 size;
5075
Block* prevPhysical;
5076
Block* nextPhysical;
5077
5078
void MarkFree() { prevFree = NULL; }
5079
void MarkTaken() { prevFree = this; }
5080
bool IsFree() const { return prevFree != this; }
5081
void*& PrivateData() { D3D12MA_HEAVY_ASSERT(!IsFree()); return privateData; }
5082
Block*& PrevFree() { return prevFree; }
5083
Block*& NextFree() { D3D12MA_HEAVY_ASSERT(IsFree()); return nextFree; }
5084
5085
private:
5086
Block* prevFree; // Address of the same block here indicates that block is taken
5087
union
5088
{
5089
Block* nextFree;
5090
void* privateData;
5091
};
5092
};
5093
5094
size_t m_AllocCount = 0;
5095
// Total number of free blocks besides null block
5096
size_t m_BlocksFreeCount = 0;
5097
// Total size of free blocks excluding null block
5098
UINT64 m_BlocksFreeSize = 0;
5099
UINT32 m_IsFreeBitmap = 0;
5100
UINT8 m_MemoryClasses = 0;
5101
UINT32 m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES];
5102
UINT32 m_ListsCount = 0;
5103
/*
5104
* 0: 0-3 lists for small buffers
5105
* 1+: 0-(2^SLI-1) lists for normal buffers
5106
*/
5107
Block** m_FreeList = NULL;
5108
PoolAllocator<Block> m_BlockAllocator;
5109
Block* m_NullBlock = NULL;
5110
5111
UINT8 SizeToMemoryClass(UINT64 size) const;
5112
UINT16 SizeToSecondIndex(UINT64 size, UINT8 memoryClass) const;
5113
UINT32 GetListIndex(UINT8 memoryClass, UINT16 secondIndex) const;
5114
UINT32 GetListIndex(UINT64 size) const;
5115
5116
void RemoveFreeBlock(Block* block);
5117
void InsertFreeBlock(Block* block);
5118
void MergeBlock(Block* block, Block* prev);
5119
5120
Block* FindFreeBlock(UINT64 size, UINT32& listIndex) const;
5121
bool CheckBlock(
5122
Block& block,
5123
UINT32 listIndex,
5124
UINT64 allocSize,
5125
UINT64 allocAlignment,
5126
AllocationRequest* pAllocationRequest);
5127
5128
D3D12MA_CLASS_NO_COPY(BlockMetadata_TLSF)
5129
};
5130
5131
#ifndef _D3D12MA_BLOCK_METADATA_TLSF_FUNCTIONS
5132
BlockMetadata_TLSF::BlockMetadata_TLSF(const ALLOCATION_CALLBACKS* allocationCallbacks, bool isVirtual)
5133
: BlockMetadata(allocationCallbacks, isVirtual),
5134
m_BlockAllocator(*allocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT)
5135
{
5136
D3D12MA_ASSERT(allocationCallbacks);
5137
}
5138
5139
BlockMetadata_TLSF::~BlockMetadata_TLSF()
5140
{
5141
D3D12MA_DELETE_ARRAY(*GetAllocs(), m_FreeList, m_ListsCount);
5142
}
5143
5144
void BlockMetadata_TLSF::Init(UINT64 size)
5145
{
5146
BlockMetadata::Init(size);
5147
5148
m_NullBlock = m_BlockAllocator.Alloc();
5149
m_NullBlock->size = size;
5150
m_NullBlock->offset = 0;
5151
m_NullBlock->prevPhysical = NULL;
5152
m_NullBlock->nextPhysical = NULL;
5153
m_NullBlock->MarkFree();
5154
m_NullBlock->NextFree() = NULL;
5155
m_NullBlock->PrevFree() = NULL;
5156
UINT8 memoryClass = SizeToMemoryClass(size);
5157
UINT16 sli = SizeToSecondIndex(size, memoryClass);
5158
m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1;
5159
if (IsVirtual())
5160
m_ListsCount += 1UL << SECOND_LEVEL_INDEX;
5161
else
5162
m_ListsCount += 4;
5163
5164
m_MemoryClasses = memoryClass + 2;
5165
memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(UINT32));
5166
5167
m_FreeList = D3D12MA_NEW_ARRAY(*GetAllocs(), Block*, m_ListsCount);
5168
memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
5169
}
5170
5171
bool BlockMetadata_TLSF::Validate() const
5172
{
5173
D3D12MA_VALIDATE(GetSumFreeSize() <= GetSize());
5174
5175
UINT64 calculatedSize = m_NullBlock->size;
5176
UINT64 calculatedFreeSize = m_NullBlock->size;
5177
size_t allocCount = 0;
5178
size_t freeCount = 0;
5179
5180
// Check integrity of free lists
5181
for (UINT32 list = 0; list < m_ListsCount; ++list)
5182
{
5183
Block* block = m_FreeList[list];
5184
if (block != NULL)
5185
{
5186
D3D12MA_VALIDATE(block->IsFree());
5187
D3D12MA_VALIDATE(block->PrevFree() == NULL);
5188
while (block->NextFree())
5189
{
5190
D3D12MA_VALIDATE(block->NextFree()->IsFree());
5191
D3D12MA_VALIDATE(block->NextFree()->PrevFree() == block);
5192
block = block->NextFree();
5193
}
5194
}
5195
}
5196
5197
D3D12MA_VALIDATE(m_NullBlock->nextPhysical == NULL);
5198
if (m_NullBlock->prevPhysical)
5199
{
5200
D3D12MA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock);
5201
}
5202
5203
// Check all blocks
5204
UINT64 nextOffset = m_NullBlock->offset;
5205
for (Block* prev = m_NullBlock->prevPhysical; prev != NULL; prev = prev->prevPhysical)
5206
{
5207
D3D12MA_VALIDATE(prev->offset + prev->size == nextOffset);
5208
nextOffset = prev->offset;
5209
calculatedSize += prev->size;
5210
5211
UINT32 listIndex = GetListIndex(prev->size);
5212
if (prev->IsFree())
5213
{
5214
++freeCount;
5215
// Check if free block belongs to free list
5216
Block* freeBlock = m_FreeList[listIndex];
5217
D3D12MA_VALIDATE(freeBlock != NULL);
5218
5219
bool found = false;
5220
do
5221
{
5222
if (freeBlock == prev)
5223
found = true;
5224
5225
freeBlock = freeBlock->NextFree();
5226
} while (!found && freeBlock != NULL);
5227
5228
D3D12MA_VALIDATE(found);
5229
calculatedFreeSize += prev->size;
5230
}
5231
else
5232
{
5233
++allocCount;
5234
// Check if taken block is not on a free list
5235
Block* freeBlock = m_FreeList[listIndex];
5236
while (freeBlock)
5237
{
5238
D3D12MA_VALIDATE(freeBlock != prev);
5239
freeBlock = freeBlock->NextFree();
5240
}
5241
}
5242
5243
if (prev->prevPhysical)
5244
{
5245
D3D12MA_VALIDATE(prev->prevPhysical->nextPhysical == prev);
5246
}
5247
}
5248
5249
D3D12MA_VALIDATE(nextOffset == 0);
5250
D3D12MA_VALIDATE(calculatedSize == GetSize());
5251
D3D12MA_VALIDATE(calculatedFreeSize == GetSumFreeSize());
5252
D3D12MA_VALIDATE(allocCount == m_AllocCount);
5253
D3D12MA_VALIDATE(freeCount == m_BlocksFreeCount);
5254
5255
return true;
5256
}
5257
5258
void BlockMetadata_TLSF::GetAllocationInfo(AllocHandle allocHandle, VIRTUAL_ALLOCATION_INFO& outInfo) const
5259
{
5260
Block* block = (Block*)allocHandle;
5261
D3D12MA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!");
5262
outInfo.Offset = block->offset;
5263
outInfo.Size = block->size;
5264
outInfo.pPrivateData = block->PrivateData();
5265
}
5266
5267
bool BlockMetadata_TLSF::CreateAllocationRequest(
5268
UINT64 allocSize,
5269
UINT64 allocAlignment,
5270
bool upperAddress,
5271
UINT32 strategy,
5272
AllocationRequest* pAllocationRequest)
5273
{
5274
D3D12MA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
5275
D3D12MA_ASSERT(!upperAddress && "ALLOCATION_FLAG_UPPER_ADDRESS can be used only with linear algorithm.");
5276
D3D12MA_ASSERT(pAllocationRequest != NULL);
5277
D3D12MA_HEAVY_ASSERT(Validate());
5278
5279
allocSize += GetDebugMargin();
5280
// Quick check for too small pool
5281
if (allocSize > GetSumFreeSize())
5282
return false;
5283
5284
// If no free blocks in pool then check only null block
5285
if (m_BlocksFreeCount == 0)
5286
return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest);
5287
5288
// Round up to the next block
5289
UINT64 sizeForNextList = allocSize;
5290
UINT16 smallSizeStep = SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4);
5291
if (allocSize > SMALL_BUFFER_SIZE)
5292
{
5293
sizeForNextList += (1ULL << (BitScanMSB(allocSize) - SECOND_LEVEL_INDEX));
5294
}
5295
else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep)
5296
sizeForNextList = SMALL_BUFFER_SIZE + 1;
5297
else
5298
sizeForNextList += smallSizeStep;
5299
5300
UINT32 nextListIndex = 0;
5301
UINT32 prevListIndex = 0;
5302
Block* nextListBlock = NULL;
5303
Block* prevListBlock = NULL;
5304
5305
// Check blocks according to strategies
5306
if (strategy & ALLOCATION_FLAG_STRATEGY_MIN_TIME)
5307
{
5308
// Quick check for larger block first
5309
nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
5310
if (nextListBlock != NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
5311
return true;
5312
5313
// If not fitted then null block
5314
if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest))
5315
return true;
5316
5317
// Null block failed, search larger bucket
5318
while (nextListBlock)
5319
{
5320
if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
5321
return true;
5322
nextListBlock = nextListBlock->NextFree();
5323
}
5324
5325
// Failed again, check best fit bucket
5326
prevListBlock = FindFreeBlock(allocSize, prevListIndex);
5327
while (prevListBlock)
5328
{
5329
if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, pAllocationRequest))
5330
return true;
5331
prevListBlock = prevListBlock->NextFree();
5332
}
5333
}
5334
else if (strategy & ALLOCATION_FLAG_STRATEGY_MIN_MEMORY)
5335
{
5336
// Check best fit bucket
5337
prevListBlock = FindFreeBlock(allocSize, prevListIndex);
5338
while (prevListBlock)
5339
{
5340
if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, pAllocationRequest))
5341
return true;
5342
prevListBlock = prevListBlock->NextFree();
5343
}
5344
5345
// If failed check null block
5346
if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest))
5347
return true;
5348
5349
// Check larger bucket
5350
nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
5351
while (nextListBlock)
5352
{
5353
if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
5354
return true;
5355
nextListBlock = nextListBlock->NextFree();
5356
}
5357
}
5358
else if (strategy & ALLOCATION_FLAG_STRATEGY_MIN_OFFSET)
5359
{
5360
// Perform search from the start
5361
Vector<Block*> blockList(m_BlocksFreeCount, *GetAllocs());
5362
5363
size_t i = m_BlocksFreeCount;
5364
for (Block* block = m_NullBlock->prevPhysical; block != NULL; block = block->prevPhysical)
5365
{
5366
if (block->IsFree() && block->size >= allocSize)
5367
blockList[--i] = block;
5368
}
5369
5370
for (; i < m_BlocksFreeCount; ++i)
5371
{
5372
Block& block = *blockList[i];
5373
if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, pAllocationRequest))
5374
return true;
5375
}
5376
5377
// If failed check null block
5378
if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest))
5379
return true;
5380
5381
// Whole range searched, no more memory
5382
return false;
5383
}
5384
else
5385
{
5386
// Check larger bucket
5387
nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
5388
while (nextListBlock)
5389
{
5390
if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
5391
return true;
5392
nextListBlock = nextListBlock->NextFree();
5393
}
5394
5395
// If failed check null block
5396
if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, pAllocationRequest))
5397
return true;
5398
5399
// Check best fit bucket
5400
prevListBlock = FindFreeBlock(allocSize, prevListIndex);
5401
while (prevListBlock)
5402
{
5403
if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, pAllocationRequest))
5404
return true;
5405
prevListBlock = prevListBlock->NextFree();
5406
}
5407
}
5408
5409
// Worst case, full search has to be done
5410
while (++nextListIndex < m_ListsCount)
5411
{
5412
nextListBlock = m_FreeList[nextListIndex];
5413
while (nextListBlock)
5414
{
5415
if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, pAllocationRequest))
5416
return true;
5417
nextListBlock = nextListBlock->NextFree();
5418
}
5419
}
5420
5421
// No more memory sadly
5422
return false;
5423
}
5424
5425
void BlockMetadata_TLSF::Alloc(
5426
const AllocationRequest& request,
5427
UINT64 allocSize,
5428
void* privateData)
5429
{
5430
// Get block and pop it from the free list
5431
Block* currentBlock = (Block*)request.allocHandle;
5432
UINT64 offset = request.algorithmData;
5433
D3D12MA_ASSERT(currentBlock != NULL);
5434
D3D12MA_ASSERT(currentBlock->offset <= offset);
5435
5436
if (currentBlock != m_NullBlock)
5437
RemoveFreeBlock(currentBlock);
5438
5439
// Append missing alignment to prev block or create new one
5440
UINT64 misssingAlignment = offset - currentBlock->offset;
5441
if (misssingAlignment)
5442
{
5443
Block* prevBlock = currentBlock->prevPhysical;
5444
D3D12MA_ASSERT(prevBlock != NULL && "There should be no missing alignment at offset 0!");
5445
5446
if (prevBlock->IsFree() && prevBlock->size != GetDebugMargin())
5447
{
5448
UINT32 oldList = GetListIndex(prevBlock->size);
5449
prevBlock->size += misssingAlignment;
5450
// Check if new size crosses list bucket
5451
if (oldList != GetListIndex(prevBlock->size))
5452
{
5453
prevBlock->size -= misssingAlignment;
5454
RemoveFreeBlock(prevBlock);
5455
prevBlock->size += misssingAlignment;
5456
InsertFreeBlock(prevBlock);
5457
}
5458
else
5459
m_BlocksFreeSize += misssingAlignment;
5460
}
5461
else
5462
{
5463
Block* newBlock = m_BlockAllocator.Alloc();
5464
currentBlock->prevPhysical = newBlock;
5465
prevBlock->nextPhysical = newBlock;
5466
newBlock->prevPhysical = prevBlock;
5467
newBlock->nextPhysical = currentBlock;
5468
newBlock->size = misssingAlignment;
5469
newBlock->offset = currentBlock->offset;
5470
newBlock->MarkTaken();
5471
5472
InsertFreeBlock(newBlock);
5473
}
5474
5475
currentBlock->size -= misssingAlignment;
5476
currentBlock->offset += misssingAlignment;
5477
}
5478
5479
UINT64 size = request.size + GetDebugMargin();
5480
if (currentBlock->size == size)
5481
{
5482
if (currentBlock == m_NullBlock)
5483
{
5484
// Setup new null block
5485
m_NullBlock = m_BlockAllocator.Alloc();
5486
m_NullBlock->size = 0;
5487
m_NullBlock->offset = currentBlock->offset + size;
5488
m_NullBlock->prevPhysical = currentBlock;
5489
m_NullBlock->nextPhysical = NULL;
5490
m_NullBlock->MarkFree();
5491
m_NullBlock->PrevFree() = NULL;
5492
m_NullBlock->NextFree() = NULL;
5493
currentBlock->nextPhysical = m_NullBlock;
5494
currentBlock->MarkTaken();
5495
}
5496
}
5497
else
5498
{
5499
D3D12MA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!");
5500
5501
// Create new free block
5502
Block* newBlock = m_BlockAllocator.Alloc();
5503
newBlock->size = currentBlock->size - size;
5504
newBlock->offset = currentBlock->offset + size;
5505
newBlock->prevPhysical = currentBlock;
5506
newBlock->nextPhysical = currentBlock->nextPhysical;
5507
currentBlock->nextPhysical = newBlock;
5508
currentBlock->size = size;
5509
5510
if (currentBlock == m_NullBlock)
5511
{
5512
m_NullBlock = newBlock;
5513
m_NullBlock->MarkFree();
5514
m_NullBlock->NextFree() = NULL;
5515
m_NullBlock->PrevFree() = NULL;
5516
currentBlock->MarkTaken();
5517
}
5518
else
5519
{
5520
newBlock->nextPhysical->prevPhysical = newBlock;
5521
newBlock->MarkTaken();
5522
InsertFreeBlock(newBlock);
5523
}
5524
}
5525
currentBlock->PrivateData() = privateData;
5526
5527
if (GetDebugMargin() > 0)
5528
{
5529
currentBlock->size -= GetDebugMargin();
5530
Block* newBlock = m_BlockAllocator.Alloc();
5531
newBlock->size = GetDebugMargin();
5532
newBlock->offset = currentBlock->offset + currentBlock->size;
5533
newBlock->prevPhysical = currentBlock;
5534
newBlock->nextPhysical = currentBlock->nextPhysical;
5535
newBlock->MarkTaken();
5536
currentBlock->nextPhysical->prevPhysical = newBlock;
5537
currentBlock->nextPhysical = newBlock;
5538
InsertFreeBlock(newBlock);
5539
}
5540
++m_AllocCount;
5541
}
5542
5543
void BlockMetadata_TLSF::Free(AllocHandle allocHandle)
5544
{
5545
Block* block = (Block*)allocHandle;
5546
Block* next = block->nextPhysical;
5547
D3D12MA_ASSERT(!block->IsFree() && "Block is already free!");
5548
5549
--m_AllocCount;
5550
if (GetDebugMargin() > 0)
5551
{
5552
RemoveFreeBlock(next);
5553
MergeBlock(next, block);
5554
block = next;
5555
next = next->nextPhysical;
5556
}
5557
5558
// Try merging
5559
Block* prev = block->prevPhysical;
5560
if (prev != NULL && prev->IsFree() && prev->size != GetDebugMargin())
5561
{
5562
RemoveFreeBlock(prev);
5563
MergeBlock(block, prev);
5564
}
5565
5566
if (!next->IsFree())
5567
InsertFreeBlock(block);
5568
else if (next == m_NullBlock)
5569
MergeBlock(m_NullBlock, block);
5570
else
5571
{
5572
RemoveFreeBlock(next);
5573
MergeBlock(next, block);
5574
InsertFreeBlock(next);
5575
}
5576
}
5577
5578
void BlockMetadata_TLSF::Clear()
5579
{
5580
m_AllocCount = 0;
5581
m_BlocksFreeCount = 0;
5582
m_BlocksFreeSize = 0;
5583
m_IsFreeBitmap = 0;
5584
m_NullBlock->offset = 0;
5585
m_NullBlock->size = GetSize();
5586
Block* block = m_NullBlock->prevPhysical;
5587
m_NullBlock->prevPhysical = NULL;
5588
while (block)
5589
{
5590
Block* prev = block->prevPhysical;
5591
m_BlockAllocator.Free(block);
5592
block = prev;
5593
}
5594
memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
5595
memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(UINT32));
5596
}
5597
5598
AllocHandle BlockMetadata_TLSF::GetAllocationListBegin() const
5599
{
5600
if (m_AllocCount == 0)
5601
return (AllocHandle)0;
5602
5603
for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical)
5604
{
5605
if (!block->IsFree())
5606
return (AllocHandle)block;
5607
}
5608
D3D12MA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!");
5609
return (AllocHandle)0;
5610
}
5611
5612
AllocHandle BlockMetadata_TLSF::GetNextAllocation(AllocHandle prevAlloc) const
5613
{
5614
Block* startBlock = (Block*)prevAlloc;
5615
D3D12MA_ASSERT(!startBlock->IsFree() && "Incorrect block!");
5616
5617
for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical)
5618
{
5619
if (!block->IsFree())
5620
return (AllocHandle)block;
5621
}
5622
return (AllocHandle)0;
5623
}
5624
5625
UINT64 BlockMetadata_TLSF::GetNextFreeRegionSize(AllocHandle alloc) const
5626
{
5627
Block* block = (Block*)alloc;
5628
D3D12MA_ASSERT(!block->IsFree() && "Incorrect block!");
5629
5630
if (block->prevPhysical)
5631
return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0;
5632
return 0;
5633
}
5634
5635
void* BlockMetadata_TLSF::GetAllocationPrivateData(AllocHandle allocHandle) const
5636
{
5637
Block* block = (Block*)allocHandle;
5638
D3D12MA_ASSERT(!block->IsFree() && "Cannot get user data for free block!");
5639
return block->PrivateData();
5640
}
5641
5642
void BlockMetadata_TLSF::SetAllocationPrivateData(AllocHandle allocHandle, void* privateData)
5643
{
5644
Block* block = (Block*)allocHandle;
5645
D3D12MA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!");
5646
block->PrivateData() = privateData;
5647
}
5648
5649
void BlockMetadata_TLSF::AddStatistics(Statistics& inoutStats) const
5650
{
5651
inoutStats.BlockCount++;
5652
inoutStats.AllocationCount += static_cast<UINT>(m_AllocCount);
5653
inoutStats.BlockBytes += GetSize();
5654
inoutStats.AllocationBytes += GetSize() - GetSumFreeSize();
5655
}
5656
5657
void BlockMetadata_TLSF::AddDetailedStatistics(DetailedStatistics& inoutStats) const
5658
{
5659
inoutStats.Stats.BlockCount++;
5660
inoutStats.Stats.BlockBytes += GetSize();
5661
5662
for (Block* block = m_NullBlock->prevPhysical; block != NULL; block = block->prevPhysical)
5663
{
5664
if (block->IsFree())
5665
AddDetailedStatisticsUnusedRange(inoutStats, block->size);
5666
else
5667
AddDetailedStatisticsAllocation(inoutStats, block->size);
5668
}
5669
5670
if (m_NullBlock->size > 0)
5671
AddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size);
5672
}
5673
5674
void BlockMetadata_TLSF::WriteAllocationInfoToJson(JsonWriter& json) const
5675
{
5676
size_t blockCount = m_AllocCount + m_BlocksFreeCount;
5677
Vector<Block*> blockList(blockCount, *GetAllocs());
5678
5679
size_t i = blockCount;
5680
if (m_NullBlock->size > 0)
5681
{
5682
++blockCount;
5683
blockList.push_back(m_NullBlock);
5684
}
5685
for (Block* block = m_NullBlock->prevPhysical; block != NULL; block = block->prevPhysical)
5686
{
5687
blockList[--i] = block;
5688
}
5689
D3D12MA_ASSERT(i == 0);
5690
5691
PrintDetailedMap_Begin(json, GetSumFreeSize(), GetAllocationCount(), m_BlocksFreeCount + static_cast<bool>(m_NullBlock->size));
5692
for (; i < blockCount; ++i)
5693
{
5694
Block* block = blockList[i];
5695
if (block->IsFree())
5696
PrintDetailedMap_UnusedRange(json, block->offset, block->size);
5697
else
5698
PrintDetailedMap_Allocation(json, block->offset, block->size, block->PrivateData());
5699
}
5700
PrintDetailedMap_End(json);
5701
}
5702
5703
void BlockMetadata_TLSF::DebugLogAllAllocations() const
5704
{
5705
for (Block* block = m_NullBlock->prevPhysical; block != NULL; block = block->prevPhysical)
5706
{
5707
if (!block->IsFree())
5708
{
5709
DebugLogAllocation(block->offset, block->size, block->PrivateData());
5710
}
5711
}
5712
}
5713
5714
UINT8 BlockMetadata_TLSF::SizeToMemoryClass(UINT64 size) const
5715
{
5716
if (size > SMALL_BUFFER_SIZE)
5717
return BitScanMSB(size) - MEMORY_CLASS_SHIFT;
5718
return 0;
5719
}
5720
5721
UINT16 BlockMetadata_TLSF::SizeToSecondIndex(UINT64 size, UINT8 memoryClass) const
5722
{
5723
if (memoryClass == 0)
5724
{
5725
if (IsVirtual())
5726
return static_cast<UINT16>((size - 1) / 8);
5727
else
5728
return static_cast<UINT16>((size - 1) / 64);
5729
}
5730
return static_cast<UINT16>((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX));
5731
}
5732
5733
UINT32 BlockMetadata_TLSF::GetListIndex(UINT8 memoryClass, UINT16 secondIndex) const
5734
{
5735
if (memoryClass == 0)
5736
return secondIndex;
5737
5738
const UINT32 index = static_cast<UINT32>(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex;
5739
if (IsVirtual())
5740
return index + (1 << SECOND_LEVEL_INDEX);
5741
else
5742
return index + 4;
5743
}
5744
5745
UINT32 BlockMetadata_TLSF::GetListIndex(UINT64 size) const
5746
{
5747
UINT8 memoryClass = SizeToMemoryClass(size);
5748
return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass));
5749
}
5750
5751
void BlockMetadata_TLSF::RemoveFreeBlock(Block* block)
5752
{
5753
D3D12MA_ASSERT(block != m_NullBlock);
5754
D3D12MA_ASSERT(block->IsFree());
5755
5756
if (block->NextFree() != NULL)
5757
block->NextFree()->PrevFree() = block->PrevFree();
5758
if (block->PrevFree() != NULL)
5759
block->PrevFree()->NextFree() = block->NextFree();
5760
else
5761
{
5762
UINT8 memClass = SizeToMemoryClass(block->size);
5763
UINT16 secondIndex = SizeToSecondIndex(block->size, memClass);
5764
UINT32 index = GetListIndex(memClass, secondIndex);
5765
m_FreeList[index] = block->NextFree();
5766
if (block->NextFree() == NULL)
5767
{
5768
m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex);
5769
if (m_InnerIsFreeBitmap[memClass] == 0)
5770
m_IsFreeBitmap &= ~(1UL << memClass);
5771
}
5772
}
5773
block->MarkTaken();
5774
block->PrivateData() = NULL;
5775
--m_BlocksFreeCount;
5776
m_BlocksFreeSize -= block->size;
5777
}
5778
5779
void BlockMetadata_TLSF::InsertFreeBlock(Block* block)
5780
{
5781
D3D12MA_ASSERT(block != m_NullBlock);
5782
D3D12MA_ASSERT(!block->IsFree() && "Cannot insert block twice!");
5783
5784
UINT8 memClass = SizeToMemoryClass(block->size);
5785
UINT16 secondIndex = SizeToSecondIndex(block->size, memClass);
5786
UINT32 index = GetListIndex(memClass, secondIndex);
5787
block->PrevFree() = NULL;
5788
block->NextFree() = m_FreeList[index];
5789
m_FreeList[index] = block;
5790
if (block->NextFree() != NULL)
5791
block->NextFree()->PrevFree() = block;
5792
else
5793
{
5794
m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex;
5795
m_IsFreeBitmap |= 1UL << memClass;
5796
}
5797
++m_BlocksFreeCount;
5798
m_BlocksFreeSize += block->size;
5799
}
5800
5801
void BlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)
5802
{
5803
D3D12MA_ASSERT(block->prevPhysical == prev && "Cannot merge seperate physical regions!");
5804
D3D12MA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!");
5805
5806
block->offset = prev->offset;
5807
block->size += prev->size;
5808
block->prevPhysical = prev->prevPhysical;
5809
if (block->prevPhysical)
5810
block->prevPhysical->nextPhysical = block;
5811
m_BlockAllocator.Free(prev);
5812
}
5813
5814
BlockMetadata_TLSF::Block* BlockMetadata_TLSF::FindFreeBlock(UINT64 size, UINT32& listIndex) const
5815
{
5816
UINT8 memoryClass = SizeToMemoryClass(size);
5817
UINT32 innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));
5818
if (!innerFreeMap)
5819
{
5820
// Check higher levels for avaiable blocks
5821
UINT32 freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));
5822
if (!freeMap)
5823
return NULL; // No more memory avaible
5824
5825
// Find lowest free region
5826
memoryClass = BitScanLSB(freeMap);
5827
innerFreeMap = m_InnerIsFreeBitmap[memoryClass];
5828
D3D12MA_ASSERT(innerFreeMap != 0);
5829
}
5830
// Find lowest free subregion
5831
listIndex = GetListIndex(memoryClass, BitScanLSB(innerFreeMap));
5832
return m_FreeList[listIndex];
5833
}
5834
5835
bool BlockMetadata_TLSF::CheckBlock(
5836
Block& block,
5837
UINT32 listIndex,
5838
UINT64 allocSize,
5839
UINT64 allocAlignment,
5840
AllocationRequest* pAllocationRequest)
5841
{
5842
D3D12MA_ASSERT(block.IsFree() && "Block is already taken!");
5843
5844
UINT64 alignedOffset = AlignUp(block.offset, allocAlignment);
5845
if (block.size < allocSize + alignedOffset - block.offset)
5846
return false;
5847
5848
// Alloc successful
5849
pAllocationRequest->allocHandle = (AllocHandle)&block;
5850
pAllocationRequest->size = allocSize - GetDebugMargin();
5851
pAllocationRequest->algorithmData = alignedOffset;
5852
5853
// Place block at the start of list if it's normal block
5854
if (listIndex != m_ListsCount && block.PrevFree())
5855
{
5856
block.PrevFree()->NextFree() = block.NextFree();
5857
if (block.NextFree())
5858
block.NextFree()->PrevFree() = block.PrevFree();
5859
block.PrevFree() = NULL;
5860
block.NextFree() = m_FreeList[listIndex];
5861
m_FreeList[listIndex] = &block;
5862
if (block.NextFree())
5863
block.NextFree()->PrevFree() = &block;
5864
}
5865
5866
return true;
5867
}
5868
#endif // _D3D12MA_BLOCK_METADATA_TLSF_FUNCTIONS
5869
#endif // _D3D12MA_BLOCK_METADATA_TLSF
5870
5871
#ifndef _D3D12MA_MEMORY_BLOCK
5872
/*
5873
Represents a single block of device memory (heap).
5874
Base class for inheritance.
5875
Thread-safety: This class must be externally synchronized.
5876
*/
5877
class MemoryBlock
5878
{
5879
public:
5880
// Creates the ID3D12Heap.
5881
MemoryBlock(
5882
AllocatorPimpl* allocator,
5883
const D3D12_HEAP_PROPERTIES& heapProps,
5884
D3D12_HEAP_FLAGS heapFlags,
5885
UINT64 size,
5886
UINT id);
5887
virtual ~MemoryBlock();
5888
5889
const D3D12_HEAP_PROPERTIES& GetHeapProperties() const { return m_HeapProps; }
5890
D3D12_HEAP_FLAGS GetHeapFlags() const { return m_HeapFlags; }
5891
UINT64 GetSize() const { return m_Size; }
5892
UINT GetId() const { return m_Id; }
5893
ID3D12Heap* GetHeap() const { return m_Heap; }
5894
5895
protected:
5896
AllocatorPimpl* const m_Allocator;
5897
const D3D12_HEAP_PROPERTIES m_HeapProps;
5898
const D3D12_HEAP_FLAGS m_HeapFlags;
5899
const UINT64 m_Size;
5900
const UINT m_Id;
5901
5902
HRESULT Init(ID3D12ProtectedResourceSession* pProtectedSession, bool denyMsaaTextures);
5903
5904
private:
5905
ID3D12Heap* m_Heap = NULL;
5906
5907
D3D12MA_CLASS_NO_COPY(MemoryBlock)
5908
};
5909
#endif // _D3D12MA_MEMORY_BLOCK
5910
5911
#ifndef _D3D12MA_NORMAL_BLOCK
5912
/*
5913
Represents a single block of device memory (heap) with all the data about its
5914
regions (aka suballocations, Allocation), assigned and free.
5915
Thread-safety: This class must be externally synchronized.
5916
*/
5917
class NormalBlock : public MemoryBlock
5918
{
5919
public:
5920
BlockMetadata* m_pMetadata;
5921
5922
NormalBlock(
5923
AllocatorPimpl* allocator,
5924
BlockVector* blockVector,
5925
const D3D12_HEAP_PROPERTIES& heapProps,
5926
D3D12_HEAP_FLAGS heapFlags,
5927
UINT64 size,
5928
UINT id);
5929
virtual ~NormalBlock();
5930
5931
BlockVector* GetBlockVector() const { return m_BlockVector; }
5932
5933
// 'algorithm' should be one of the *_ALGORITHM_* flags in enums POOL_FLAGS or VIRTUAL_BLOCK_FLAGS
5934
HRESULT Init(UINT32 algorithm, ID3D12ProtectedResourceSession* pProtectedSession, bool denyMsaaTextures);
5935
5936
// Validates all data structures inside this object. If not valid, returns false.
5937
bool Validate() const;
5938
5939
private:
5940
BlockVector* m_BlockVector;
5941
5942
D3D12MA_CLASS_NO_COPY(NormalBlock)
5943
};
5944
#endif // _D3D12MA_NORMAL_BLOCK
5945
5946
#ifndef _D3D12MA_COMMITTED_ALLOCATION_LIST_ITEM_TRAITS
5947
struct CommittedAllocationListItemTraits
5948
{
5949
using ItemType = Allocation;
5950
5951
static ItemType* GetPrev(const ItemType* item)
5952
{
5953
D3D12MA_ASSERT(item->m_PackedData.GetType() == Allocation::TYPE_COMMITTED || item->m_PackedData.GetType() == Allocation::TYPE_HEAP);
5954
return item->m_Committed.prev;
5955
}
5956
static ItemType* GetNext(const ItemType* item)
5957
{
5958
D3D12MA_ASSERT(item->m_PackedData.GetType() == Allocation::TYPE_COMMITTED || item->m_PackedData.GetType() == Allocation::TYPE_HEAP);
5959
return item->m_Committed.next;
5960
}
5961
static ItemType*& AccessPrev(ItemType* item)
5962
{
5963
D3D12MA_ASSERT(item->m_PackedData.GetType() == Allocation::TYPE_COMMITTED || item->m_PackedData.GetType() == Allocation::TYPE_HEAP);
5964
return item->m_Committed.prev;
5965
}
5966
static ItemType*& AccessNext(ItemType* item)
5967
{
5968
D3D12MA_ASSERT(item->m_PackedData.GetType() == Allocation::TYPE_COMMITTED || item->m_PackedData.GetType() == Allocation::TYPE_HEAP);
5969
return item->m_Committed.next;
5970
}
5971
};
5972
#endif // _D3D12MA_COMMITTED_ALLOCATION_LIST_ITEM_TRAITS
5973
5974
#ifndef _D3D12MA_COMMITTED_ALLOCATION_LIST
5975
/*
5976
Stores linked list of Allocation objects that are of TYPE_COMMITTED or TYPE_HEAP.
5977
Thread-safe, synchronized internally.
5978
*/
5979
class CommittedAllocationList
5980
{
5981
public:
5982
CommittedAllocationList() = default;
5983
void Init(bool useMutex, D3D12_HEAP_TYPE heapType, PoolPimpl* pool);
5984
~CommittedAllocationList();
5985
5986
D3D12_HEAP_TYPE GetHeapType() const { return m_HeapType; }
5987
PoolPimpl* GetPool() const { return m_Pool; }
5988
UINT GetMemorySegmentGroup(AllocatorPimpl* allocator) const;
5989
5990
void AddStatistics(Statistics& inoutStats);
5991
void AddDetailedStatistics(DetailedStatistics& inoutStats);
5992
// Writes JSON array with the list of allocations.
5993
void BuildStatsString(JsonWriter& json);
5994
5995
void Register(Allocation* alloc);
5996
void Unregister(Allocation* alloc);
5997
5998
private:
5999
using CommittedAllocationLinkedList = IntrusiveLinkedList<CommittedAllocationListItemTraits>;
6000
6001
bool m_UseMutex = true;
6002
D3D12_HEAP_TYPE m_HeapType = D3D12_HEAP_TYPE_CUSTOM;
6003
PoolPimpl* m_Pool = NULL;
6004
6005
D3D12MA_RW_MUTEX m_Mutex;
6006
CommittedAllocationLinkedList m_AllocationList;
6007
};
6008
#endif // _D3D12MA_COMMITTED_ALLOCATION_LIST
6009
6010
#ifndef _D3D12M_COMMITTED_ALLOCATION_PARAMETERS
6011
struct CommittedAllocationParameters
6012
{
6013
CommittedAllocationList* m_List = NULL;
6014
D3D12_HEAP_PROPERTIES m_HeapProperties = {};
6015
D3D12_HEAP_FLAGS m_HeapFlags = D3D12_HEAP_FLAG_NONE;
6016
ID3D12ProtectedResourceSession* m_ProtectedSession = NULL;
6017
bool m_CanAlias = false;
6018
D3D12_RESIDENCY_PRIORITY m_ResidencyPriority = D3D12_RESIDENCY_PRIORITY_NONE;
6019
6020
bool IsValid() const { return m_List != NULL; }
6021
};
6022
#endif // _D3D12M_COMMITTED_ALLOCATION_PARAMETERS
6023
6024
// Simple variant data structure to hold all possible variations of ID3D12Device*::CreateCommittedResource* and ID3D12Device*::CreatePlacedResource* arguments
6025
struct CREATE_RESOURCE_PARAMS
6026
{
6027
CREATE_RESOURCE_PARAMS() = delete;
6028
CREATE_RESOURCE_PARAMS(
6029
const D3D12_RESOURCE_DESC* pResourceDesc,
6030
D3D12_RESOURCE_STATES InitialResourceState,
6031
const D3D12_CLEAR_VALUE* pOptimizedClearValue)
6032
: Variant(VARIANT_WITH_STATE)
6033
, pResourceDesc(pResourceDesc)
6034
, InitialResourceState(InitialResourceState)
6035
, pOptimizedClearValue(pOptimizedClearValue)
6036
{
6037
}
6038
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6039
CREATE_RESOURCE_PARAMS(
6040
const D3D12_RESOURCE_DESC1* pResourceDesc,
6041
D3D12_RESOURCE_STATES InitialResourceState,
6042
const D3D12_CLEAR_VALUE* pOptimizedClearValue)
6043
: Variant(VARIANT_WITH_STATE_AND_DESC1)
6044
, pResourceDesc1(pResourceDesc)
6045
, InitialResourceState(InitialResourceState)
6046
, pOptimizedClearValue(pOptimizedClearValue)
6047
{
6048
}
6049
#endif
6050
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6051
CREATE_RESOURCE_PARAMS(
6052
const D3D12_RESOURCE_DESC1* pResourceDesc,
6053
D3D12_BARRIER_LAYOUT InitialLayout,
6054
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
6055
UINT32 NumCastableFormats,
6056
DXGI_FORMAT* pCastableFormats)
6057
: Variant(VARIANT_WITH_LAYOUT)
6058
, pResourceDesc1(pResourceDesc)
6059
, InitialLayout(InitialLayout)
6060
, pOptimizedClearValue(pOptimizedClearValue)
6061
, NumCastableFormats(NumCastableFormats)
6062
, pCastableFormats(pCastableFormats)
6063
{
6064
}
6065
#endif
6066
6067
enum VARIANT
6068
{
6069
VARIANT_INVALID = 0,
6070
VARIANT_WITH_STATE,
6071
VARIANT_WITH_STATE_AND_DESC1,
6072
VARIANT_WITH_LAYOUT
6073
};
6074
6075
VARIANT Variant = VARIANT_INVALID;
6076
6077
const D3D12_RESOURCE_DESC* GetResourceDesc() const
6078
{
6079
D3D12MA_ASSERT(Variant == VARIANT_WITH_STATE);
6080
return pResourceDesc;
6081
}
6082
const D3D12_RESOURCE_DESC*& AccessResourceDesc()
6083
{
6084
D3D12MA_ASSERT(Variant == VARIANT_WITH_STATE);
6085
return pResourceDesc;
6086
}
6087
const D3D12_RESOURCE_DESC* GetBaseResourceDesc() const
6088
{
6089
// D3D12_RESOURCE_DESC1 can be cast to D3D12_RESOURCE_DESC by discarding the new members at the end.
6090
return pResourceDesc;
6091
}
6092
D3D12_RESOURCE_STATES GetInitialResourceState() const
6093
{
6094
D3D12MA_ASSERT(Variant < VARIANT_WITH_LAYOUT);
6095
return InitialResourceState;
6096
}
6097
const D3D12_CLEAR_VALUE* GetOptimizedClearValue() const
6098
{
6099
return pOptimizedClearValue;
6100
}
6101
6102
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6103
const D3D12_RESOURCE_DESC1* GetResourceDesc1() const
6104
{
6105
D3D12MA_ASSERT(Variant >= VARIANT_WITH_STATE_AND_DESC1);
6106
return pResourceDesc1;
6107
}
6108
const D3D12_RESOURCE_DESC1*& AccessResourceDesc1()
6109
{
6110
D3D12MA_ASSERT(Variant >= VARIANT_WITH_STATE_AND_DESC1);
6111
return pResourceDesc1;
6112
}
6113
#endif
6114
6115
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6116
D3D12_BARRIER_LAYOUT GetInitialLayout() const
6117
{
6118
D3D12MA_ASSERT(Variant >= VARIANT_WITH_LAYOUT);
6119
return InitialLayout;
6120
}
6121
UINT32 GetNumCastableFormats() const
6122
{
6123
D3D12MA_ASSERT(Variant >= VARIANT_WITH_LAYOUT);
6124
return NumCastableFormats;
6125
}
6126
DXGI_FORMAT* GetCastableFormats() const
6127
{
6128
D3D12MA_ASSERT(Variant >= VARIANT_WITH_LAYOUT);
6129
return pCastableFormats;
6130
}
6131
#endif
6132
6133
private:
6134
union
6135
{
6136
const D3D12_RESOURCE_DESC* pResourceDesc;
6137
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6138
const D3D12_RESOURCE_DESC1* pResourceDesc1;
6139
#endif
6140
};
6141
union
6142
{
6143
D3D12_RESOURCE_STATES InitialResourceState;
6144
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6145
D3D12_BARRIER_LAYOUT InitialLayout;
6146
#endif
6147
};
6148
const D3D12_CLEAR_VALUE* pOptimizedClearValue;
6149
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6150
UINT32 NumCastableFormats;
6151
DXGI_FORMAT* pCastableFormats;
6152
#endif
6153
};
6154
6155
#ifndef _D3D12MA_BLOCK_VECTOR
6156
/*
6157
Sequence of NormalBlock. Represents memory blocks allocated for a specific
6158
heap type and possibly resource type (if only Tier 1 is supported).
6159
6160
Synchronized internally with a mutex.
6161
*/
6162
class BlockVector
6163
{
6164
friend class DefragmentationContextPimpl;
6165
D3D12MA_CLASS_NO_COPY(BlockVector)
6166
public:
6167
BlockVector(
6168
AllocatorPimpl* hAllocator,
6169
const D3D12_HEAP_PROPERTIES& heapProps,
6170
D3D12_HEAP_FLAGS heapFlags,
6171
UINT64 preferredBlockSize,
6172
size_t minBlockCount,
6173
size_t maxBlockCount,
6174
bool explicitBlockSize,
6175
UINT64 minAllocationAlignment,
6176
UINT32 algorithm,
6177
bool denyMsaaTextures,
6178
ID3D12ProtectedResourceSession* pProtectedSession,
6179
D3D12_RESIDENCY_PRIORITY residencyPriority);
6180
~BlockVector();
6181
D3D12_RESIDENCY_PRIORITY GetResidencyPriority() const { return m_ResidencyPriority; }
6182
6183
const D3D12_HEAP_PROPERTIES& GetHeapProperties() const { return m_HeapProps; }
6184
D3D12_HEAP_FLAGS GetHeapFlags() const { return m_HeapFlags; }
6185
UINT64 GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6186
UINT32 GetAlgorithm() const { return m_Algorithm; }
6187
bool DeniesMsaaTextures() const { return m_DenyMsaaTextures; }
6188
// To be used only while the m_Mutex is locked. Used during defragmentation.
6189
size_t GetBlockCount() const { return m_Blocks.size(); }
6190
// To be used only while the m_Mutex is locked. Used during defragmentation.
6191
NormalBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6192
D3D12MA_RW_MUTEX& GetMutex() { return m_Mutex; }
6193
6194
HRESULT CreateMinBlocks();
6195
bool IsEmpty();
6196
6197
HRESULT Allocate(
6198
UINT64 size,
6199
UINT64 alignment,
6200
const ALLOCATION_DESC& allocDesc,
6201
size_t allocationCount,
6202
Allocation** pAllocations);
6203
6204
void Free(Allocation* hAllocation);
6205
6206
HRESULT CreateResource(
6207
UINT64 size,
6208
UINT64 alignment,
6209
const ALLOCATION_DESC& allocDesc,
6210
const CREATE_RESOURCE_PARAMS& createParams,
6211
Allocation** ppAllocation,
6212
REFIID riidResource,
6213
void** ppvResource);
6214
6215
void AddStatistics(Statistics& inoutStats);
6216
void AddDetailedStatistics(DetailedStatistics& inoutStats);
6217
6218
void WriteBlockInfoToJson(JsonWriter& json);
6219
6220
private:
6221
AllocatorPimpl* const m_hAllocator;
6222
const D3D12_HEAP_PROPERTIES m_HeapProps;
6223
const D3D12_HEAP_FLAGS m_HeapFlags;
6224
const UINT64 m_PreferredBlockSize;
6225
const size_t m_MinBlockCount;
6226
const size_t m_MaxBlockCount;
6227
const bool m_ExplicitBlockSize;
6228
const UINT64 m_MinAllocationAlignment;
6229
const UINT32 m_Algorithm;
6230
const bool m_DenyMsaaTextures;
6231
ID3D12ProtectedResourceSession* const m_ProtectedSession;
6232
const D3D12_RESIDENCY_PRIORITY m_ResidencyPriority;
6233
/* There can be at most one allocation that is completely empty - a
6234
hysteresis to avoid pessimistic case of alternating creation and destruction
6235
of a ID3D12Heap. */
6236
bool m_HasEmptyBlock;
6237
D3D12MA_RW_MUTEX m_Mutex;
6238
// Incrementally sorted by sumFreeSize, ascending.
6239
Vector<NormalBlock*> m_Blocks;
6240
UINT m_NextBlockId;
6241
bool m_IncrementalSort = true;
6242
6243
// Disable incremental sorting when freeing allocations
6244
void SetIncrementalSort(bool val) { m_IncrementalSort = val; }
6245
6246
UINT64 CalcSumBlockSize() const;
6247
UINT64 CalcMaxBlockSize() const;
6248
6249
// Finds and removes given block from vector.
6250
void Remove(NormalBlock* pBlock);
6251
6252
// Performs single step in sorting m_Blocks. They may not be fully sorted
6253
// after this call.
6254
void IncrementallySortBlocks();
6255
void SortByFreeSize();
6256
6257
HRESULT AllocatePage(
6258
UINT64 size,
6259
UINT64 alignment,
6260
const ALLOCATION_DESC& allocDesc,
6261
Allocation** pAllocation);
6262
6263
HRESULT AllocateFromBlock(
6264
NormalBlock* pBlock,
6265
UINT64 size,
6266
UINT64 alignment,
6267
ALLOCATION_FLAGS allocFlags,
6268
void* pPrivateData,
6269
UINT32 strategy,
6270
Allocation** pAllocation);
6271
6272
HRESULT CommitAllocationRequest(
6273
AllocationRequest& allocRequest,
6274
NormalBlock* pBlock,
6275
UINT64 size,
6276
UINT64 alignment,
6277
void* pPrivateData,
6278
Allocation** pAllocation);
6279
6280
HRESULT CreateBlock(
6281
UINT64 blockSize,
6282
size_t* pNewBlockIndex);
6283
};
6284
#endif // _D3D12MA_BLOCK_VECTOR
6285
6286
#ifndef _D3D12MA_CURRENT_BUDGET_DATA
6287
class CurrentBudgetData
6288
{
6289
public:
6290
bool ShouldUpdateBudget() const { return m_OperationsSinceBudgetFetch >= 30; }
6291
6292
void GetStatistics(Statistics& outStats, UINT group) const;
6293
void GetBudget(bool useMutex,
6294
UINT64* outLocalUsage, UINT64* outLocalBudget,
6295
UINT64* outNonLocalUsage, UINT64* outNonLocalBudget);
6296
6297
#if D3D12MA_DXGI_1_4
6298
HRESULT UpdateBudget(IDXGIAdapter3* adapter3, bool useMutex);
6299
#endif
6300
6301
void AddAllocation(UINT group, UINT64 allocationBytes);
6302
void RemoveAllocation(UINT group, UINT64 allocationBytes);
6303
6304
void AddBlock(UINT group, UINT64 blockBytes);
6305
void RemoveBlock(UINT group, UINT64 blockBytes);
6306
6307
private:
6308
D3D12MA_ATOMIC_UINT32 m_BlockCount[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6309
D3D12MA_ATOMIC_UINT32 m_AllocationCount[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6310
D3D12MA_ATOMIC_UINT64 m_BlockBytes[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6311
D3D12MA_ATOMIC_UINT64 m_AllocationBytes[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6312
6313
D3D12MA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch = {0};
6314
D3D12MA_RW_MUTEX m_BudgetMutex;
6315
UINT64 m_D3D12Usage[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6316
UINT64 m_D3D12Budget[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6317
UINT64 m_BlockBytesAtD3D12Fetch[DXGI_MEMORY_SEGMENT_GROUP_COUNT] = {};
6318
};
6319
6320
#ifndef _D3D12MA_CURRENT_BUDGET_DATA_FUNCTIONS
6321
void CurrentBudgetData::GetStatistics(Statistics& outStats, UINT group) const
6322
{
6323
outStats.BlockCount = m_BlockCount[group];
6324
outStats.AllocationCount = m_AllocationCount[group];
6325
outStats.BlockBytes = m_BlockBytes[group];
6326
outStats.AllocationBytes = m_AllocationBytes[group];
6327
}
6328
6329
void CurrentBudgetData::GetBudget(bool useMutex,
6330
UINT64* outLocalUsage, UINT64* outLocalBudget,
6331
UINT64* outNonLocalUsage, UINT64* outNonLocalBudget)
6332
{
6333
MutexLockRead lockRead(m_BudgetMutex, useMutex);
6334
6335
if (outLocalUsage)
6336
{
6337
const UINT64 D3D12Usage = m_D3D12Usage[DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY];
6338
const UINT64 blockBytes = m_BlockBytes[DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY];
6339
const UINT64 blockBytesAtD3D12Fetch = m_BlockBytesAtD3D12Fetch[DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY];
6340
*outLocalUsage = D3D12Usage + blockBytes > blockBytesAtD3D12Fetch ?
6341
D3D12Usage + blockBytes - blockBytesAtD3D12Fetch : 0;
6342
}
6343
if (outLocalBudget)
6344
*outLocalBudget = m_D3D12Budget[DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY];
6345
6346
if (outNonLocalUsage)
6347
{
6348
const UINT64 D3D12Usage = m_D3D12Usage[DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY];
6349
const UINT64 blockBytes = m_BlockBytes[DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY];
6350
const UINT64 blockBytesAtD3D12Fetch = m_BlockBytesAtD3D12Fetch[DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY];
6351
*outNonLocalUsage = D3D12Usage + blockBytes > blockBytesAtD3D12Fetch ?
6352
D3D12Usage + blockBytes - blockBytesAtD3D12Fetch : 0;
6353
}
6354
if (outNonLocalBudget)
6355
*outNonLocalBudget = m_D3D12Budget[DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY];
6356
}
6357
6358
#if D3D12MA_DXGI_1_4
6359
HRESULT CurrentBudgetData::UpdateBudget(IDXGIAdapter3* adapter3, bool useMutex)
6360
{
6361
D3D12MA_ASSERT(adapter3);
6362
6363
DXGI_QUERY_VIDEO_MEMORY_INFO infoLocal = {};
6364
DXGI_QUERY_VIDEO_MEMORY_INFO infoNonLocal = {};
6365
const HRESULT hrLocal = adapter3->QueryVideoMemoryInfo(0, DXGI_MEMORY_SEGMENT_GROUP_LOCAL, &infoLocal);
6366
const HRESULT hrNonLocal = adapter3->QueryVideoMemoryInfo(0, DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL, &infoNonLocal);
6367
6368
if (SUCCEEDED(hrLocal) || SUCCEEDED(hrNonLocal))
6369
{
6370
MutexLockWrite lockWrite(m_BudgetMutex, useMutex);
6371
6372
if (SUCCEEDED(hrLocal))
6373
{
6374
m_D3D12Usage[0] = infoLocal.CurrentUsage;
6375
m_D3D12Budget[0] = infoLocal.Budget;
6376
}
6377
if (SUCCEEDED(hrNonLocal))
6378
{
6379
m_D3D12Usage[1] = infoNonLocal.CurrentUsage;
6380
m_D3D12Budget[1] = infoNonLocal.Budget;
6381
}
6382
6383
m_BlockBytesAtD3D12Fetch[0] = m_BlockBytes[0];
6384
m_BlockBytesAtD3D12Fetch[1] = m_BlockBytes[1];
6385
m_OperationsSinceBudgetFetch = 0;
6386
}
6387
6388
return FAILED(hrLocal) ? hrLocal : hrNonLocal;
6389
}
6390
#endif // #if D3D12MA_DXGI_1_4
6391
6392
void CurrentBudgetData::AddAllocation(UINT group, UINT64 allocationBytes)
6393
{
6394
++m_AllocationCount[group];
6395
m_AllocationBytes[group] += allocationBytes;
6396
++m_OperationsSinceBudgetFetch;
6397
}
6398
6399
void CurrentBudgetData::RemoveAllocation(UINT group, UINT64 allocationBytes)
6400
{
6401
D3D12MA_ASSERT(m_AllocationBytes[group] >= allocationBytes);
6402
D3D12MA_ASSERT(m_AllocationCount[group] > 0);
6403
m_AllocationBytes[group] -= allocationBytes;
6404
--m_AllocationCount[group];
6405
++m_OperationsSinceBudgetFetch;
6406
}
6407
6408
void CurrentBudgetData::AddBlock(UINT group, UINT64 blockBytes)
6409
{
6410
++m_BlockCount[group];
6411
m_BlockBytes[group] += blockBytes;
6412
++m_OperationsSinceBudgetFetch;
6413
}
6414
6415
void CurrentBudgetData::RemoveBlock(UINT group, UINT64 blockBytes)
6416
{
6417
D3D12MA_ASSERT(m_BlockBytes[group] >= blockBytes);
6418
D3D12MA_ASSERT(m_BlockCount[group] > 0);
6419
m_BlockBytes[group] -= blockBytes;
6420
--m_BlockCount[group];
6421
++m_OperationsSinceBudgetFetch;
6422
}
6423
#endif // _D3D12MA_CURRENT_BUDGET_DATA_FUNCTIONS
6424
#endif // _D3D12MA_CURRENT_BUDGET_DATA
6425
6426
#ifndef _D3D12MA_DEFRAGMENTATION_CONTEXT_PIMPL
6427
class DefragmentationContextPimpl
6428
{
6429
D3D12MA_CLASS_NO_COPY(DefragmentationContextPimpl)
6430
public:
6431
DefragmentationContextPimpl(
6432
AllocatorPimpl* hAllocator,
6433
const DEFRAGMENTATION_DESC& desc,
6434
BlockVector* poolVector);
6435
~DefragmentationContextPimpl();
6436
6437
void GetStats(DEFRAGMENTATION_STATS& outStats) { outStats = m_GlobalStats; }
6438
const ALLOCATION_CALLBACKS& GetAllocs() const { return m_Moves.GetAllocs(); }
6439
6440
HRESULT DefragmentPassBegin(DEFRAGMENTATION_PASS_MOVE_INFO& moveInfo);
6441
HRESULT DefragmentPassEnd(DEFRAGMENTATION_PASS_MOVE_INFO& moveInfo);
6442
6443
private:
6444
// Max number of allocations to ignore due to size constraints before ending single pass
6445
static const UINT8 MAX_ALLOCS_TO_IGNORE = 16;
6446
enum class CounterStatus { Pass, Ignore, End };
6447
6448
struct FragmentedBlock
6449
{
6450
UINT32 data;
6451
NormalBlock* block;
6452
};
6453
struct StateBalanced
6454
{
6455
UINT64 avgFreeSize = 0;
6456
UINT64 avgAllocSize = UINT64_MAX;
6457
};
6458
struct MoveAllocationData
6459
{
6460
UINT64 size;
6461
UINT64 alignment;
6462
ALLOCATION_FLAGS flags;
6463
DEFRAGMENTATION_MOVE move = {};
6464
};
6465
6466
const UINT64 m_MaxPassBytes;
6467
const UINT32 m_MaxPassAllocations;
6468
6469
Vector<DEFRAGMENTATION_MOVE> m_Moves;
6470
6471
UINT8 m_IgnoredAllocs = 0;
6472
UINT32 m_Algorithm;
6473
UINT32 m_BlockVectorCount;
6474
BlockVector* m_PoolBlockVector;
6475
BlockVector** m_pBlockVectors;
6476
size_t m_ImmovableBlockCount = 0;
6477
DEFRAGMENTATION_STATS m_GlobalStats = { 0 };
6478
DEFRAGMENTATION_STATS m_PassStats = { 0 };
6479
void* m_AlgorithmState = NULL;
6480
6481
static MoveAllocationData GetMoveData(AllocHandle handle, BlockMetadata* metadata);
6482
CounterStatus CheckCounters(UINT64 bytes);
6483
bool IncrementCounters(UINT64 bytes);
6484
bool ReallocWithinBlock(BlockVector& vector, NormalBlock* block);
6485
bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, BlockVector& vector);
6486
6487
bool ComputeDefragmentation(BlockVector& vector, size_t index);
6488
bool ComputeDefragmentation_Fast(BlockVector& vector);
6489
bool ComputeDefragmentation_Balanced(BlockVector& vector, size_t index, bool update);
6490
bool ComputeDefragmentation_Full(BlockVector& vector);
6491
6492
void UpdateVectorStatistics(BlockVector& vector, StateBalanced& state);
6493
};
6494
#endif // _D3D12MA_DEFRAGMENTATION_CONTEXT_PIMPL
6495
6496
#ifndef _D3D12MA_POOL_PIMPL
6497
class PoolPimpl
6498
{
6499
friend class Allocator;
6500
friend struct PoolListItemTraits;
6501
public:
6502
PoolPimpl(AllocatorPimpl* allocator, const POOL_DESC& desc);
6503
~PoolPimpl();
6504
6505
AllocatorPimpl* GetAllocator() const { return m_Allocator; }
6506
const POOL_DESC& GetDesc() const { return m_Desc; }
6507
bool SupportsCommittedAllocations() const { return m_Desc.BlockSize == 0; }
6508
LPCWSTR GetName() const { return m_Name; }
6509
6510
BlockVector* GetBlockVector() { return m_BlockVector; }
6511
CommittedAllocationList* GetCommittedAllocationList() { return SupportsCommittedAllocations() ? &m_CommittedAllocations : NULL; }
6512
6513
HRESULT Init();
6514
void GetStatistics(Statistics& outStats);
6515
void CalculateStatistics(DetailedStatistics& outStats);
6516
void AddDetailedStatistics(DetailedStatistics& inoutStats);
6517
void SetName(LPCWSTR Name);
6518
6519
private:
6520
AllocatorPimpl* m_Allocator; // Externally owned object.
6521
POOL_DESC m_Desc;
6522
BlockVector* m_BlockVector; // Owned object.
6523
CommittedAllocationList m_CommittedAllocations;
6524
wchar_t* m_Name;
6525
PoolPimpl* m_PrevPool = NULL;
6526
PoolPimpl* m_NextPool = NULL;
6527
6528
void FreeName();
6529
};
6530
6531
struct PoolListItemTraits
6532
{
6533
using ItemType = PoolPimpl;
6534
static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
6535
static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
6536
static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
6537
static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
6538
};
6539
#endif // _D3D12MA_POOL_PIMPL
6540
6541
6542
#ifndef _D3D12MA_ALLOCATOR_PIMPL
6543
class AllocatorPimpl
6544
{
6545
friend class Allocator;
6546
friend class Pool;
6547
public:
6548
std::atomic_uint32_t m_RefCount = {1};
6549
CurrentBudgetData m_Budget;
6550
6551
AllocatorPimpl(const ALLOCATION_CALLBACKS& allocationCallbacks, const ALLOCATOR_DESC& desc);
6552
~AllocatorPimpl();
6553
6554
ID3D12Device* GetDevice() const { return m_Device; }
6555
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
6556
ID3D12Device1* GetDevice1() const { return m_Device1; }
6557
#endif
6558
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
6559
ID3D12Device4* GetDevice4() const { return m_Device4; }
6560
#endif
6561
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6562
ID3D12Device8* GetDevice8() const { return m_Device8; }
6563
#endif
6564
// Shortcut for "Allocation Callbacks", because this function is called so often.
6565
const ALLOCATION_CALLBACKS& GetAllocs() const { return m_AllocationCallbacks; }
6566
const D3D12_FEATURE_DATA_D3D12_OPTIONS& GetD3D12Options() const { return m_D3D12Options; }
6567
BOOL IsUMA() const { return m_D3D12Architecture.UMA; }
6568
BOOL IsCacheCoherentUMA() const { return m_D3D12Architecture.CacheCoherentUMA; }
6569
bool SupportsResourceHeapTier2() const { return m_D3D12Options.ResourceHeapTier >= D3D12_RESOURCE_HEAP_TIER_2; }
6570
bool UseMutex() const { return m_UseMutex; }
6571
AllocationObjectAllocator& GetAllocationObjectAllocator() { return m_AllocationObjectAllocator; }
6572
UINT GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6573
/*
6574
If SupportsResourceHeapTier2():
6575
0: D3D12_HEAP_TYPE_DEFAULT
6576
1: D3D12_HEAP_TYPE_UPLOAD
6577
2: D3D12_HEAP_TYPE_READBACK
6578
else:
6579
0: D3D12_HEAP_TYPE_DEFAULT + buffer
6580
1: D3D12_HEAP_TYPE_DEFAULT + texture
6581
2: D3D12_HEAP_TYPE_DEFAULT + texture RT or DS
6582
3: D3D12_HEAP_TYPE_UPLOAD + buffer
6583
4: D3D12_HEAP_TYPE_UPLOAD + texture
6584
5: D3D12_HEAP_TYPE_UPLOAD + texture RT or DS
6585
6: D3D12_HEAP_TYPE_READBACK + buffer
6586
7: D3D12_HEAP_TYPE_READBACK + texture
6587
8: D3D12_HEAP_TYPE_READBACK + texture RT or DS
6588
*/
6589
UINT GetDefaultPoolCount() const { return SupportsResourceHeapTier2() ? 3 : 9; }
6590
BlockVector** GetDefaultPools() { return m_BlockVectors; }
6591
6592
HRESULT Init(const ALLOCATOR_DESC& desc);
6593
bool HeapFlagsFulfillResourceHeapTier(D3D12_HEAP_FLAGS flags) const;
6594
UINT StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE heapType) const;
6595
UINT HeapPropertiesToMemorySegmentGroup(const D3D12_HEAP_PROPERTIES& heapProps) const;
6596
UINT64 GetMemoryCapacity(UINT memorySegmentGroup) const;
6597
6598
HRESULT CreatePlacedResourceWrap(
6599
ID3D12Heap *pHeap,
6600
UINT64 HeapOffset,
6601
const CREATE_RESOURCE_PARAMS& createParams,
6602
REFIID riidResource,
6603
void** ppvResource);
6604
6605
HRESULT CreateResource(
6606
const ALLOCATION_DESC* pAllocDesc,
6607
const CREATE_RESOURCE_PARAMS& createParams,
6608
Allocation** ppAllocation,
6609
REFIID riidResource,
6610
void** ppvResource);
6611
6612
HRESULT CreateAliasingResource(
6613
Allocation* pAllocation,
6614
UINT64 AllocationLocalOffset,
6615
const CREATE_RESOURCE_PARAMS& createParams,
6616
REFIID riidResource,
6617
void** ppvResource);
6618
6619
HRESULT AllocateMemory(
6620
const ALLOCATION_DESC* pAllocDesc,
6621
const D3D12_RESOURCE_ALLOCATION_INFO* pAllocInfo,
6622
Allocation** ppAllocation);
6623
6624
// Unregisters allocation from the collection of dedicated allocations.
6625
// Allocation object must be deleted externally afterwards.
6626
void FreeCommittedMemory(Allocation* allocation);
6627
// Unregisters allocation from the collection of placed allocations.
6628
// Allocation object must be deleted externally afterwards.
6629
void FreePlacedMemory(Allocation* allocation);
6630
// Unregisters allocation from the collection of dedicated allocations and destroys associated heap.
6631
// Allocation object must be deleted externally afterwards.
6632
void FreeHeapMemory(Allocation* allocation);
6633
6634
void SetResidencyPriority(ID3D12Pageable* obj, D3D12_RESIDENCY_PRIORITY priority) const;
6635
6636
void SetCurrentFrameIndex(UINT frameIndex);
6637
// For more deailed stats use outCustomHeaps to access statistics divided into L0 and L1 group
6638
void CalculateStatistics(TotalStatistics& outStats, DetailedStatistics outCustomHeaps[2] = NULL);
6639
6640
void GetBudget(Budget* outLocalBudget, Budget* outNonLocalBudget);
6641
void GetBudgetForHeapType(Budget& outBudget, D3D12_HEAP_TYPE heapType);
6642
6643
void BuildStatsString(WCHAR** ppStatsString, BOOL detailedMap);
6644
void FreeStatsString(WCHAR* pStatsString);
6645
6646
private:
6647
using PoolList = IntrusiveLinkedList<PoolListItemTraits>;
6648
6649
const bool m_UseMutex;
6650
const bool m_AlwaysCommitted;
6651
const bool m_MsaaAlwaysCommitted;
6652
bool m_DefaultPoolsNotZeroed = false;
6653
ID3D12Device* m_Device; // AddRef
6654
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
6655
ID3D12Device1* m_Device1 = NULL; // AddRef, optional
6656
#endif
6657
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
6658
ID3D12Device4* m_Device4 = NULL; // AddRef, optional
6659
#endif
6660
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6661
ID3D12Device8* m_Device8 = NULL; // AddRef, optional
6662
#endif
6663
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6664
ID3D12Device10* m_Device10 = NULL; // AddRef, optional
6665
#endif
6666
IDXGIAdapter* m_Adapter; // AddRef
6667
#if D3D12MA_DXGI_1_4
6668
IDXGIAdapter3* m_Adapter3 = NULL; // AddRef, optional
6669
#endif
6670
UINT64 m_PreferredBlockSize;
6671
ALLOCATION_CALLBACKS m_AllocationCallbacks;
6672
D3D12MA_ATOMIC_UINT32 m_CurrentFrameIndex;
6673
DXGI_ADAPTER_DESC m_AdapterDesc;
6674
D3D12_FEATURE_DATA_D3D12_OPTIONS m_D3D12Options;
6675
D3D12_FEATURE_DATA_ARCHITECTURE m_D3D12Architecture;
6676
AllocationObjectAllocator m_AllocationObjectAllocator;
6677
6678
D3D12MA_RW_MUTEX m_PoolsMutex[HEAP_TYPE_COUNT];
6679
PoolList m_Pools[HEAP_TYPE_COUNT];
6680
// Default pools.
6681
BlockVector* m_BlockVectors[DEFAULT_POOL_MAX_COUNT];
6682
CommittedAllocationList m_CommittedAllocations[STANDARD_HEAP_TYPE_COUNT];
6683
6684
/*
6685
Heuristics that decides whether a resource should better be placed in its own,
6686
dedicated allocation (committed resource rather than placed resource).
6687
*/
6688
template<typename D3D12_RESOURCE_DESC_T>
6689
static bool PrefersCommittedAllocation(const D3D12_RESOURCE_DESC_T& resourceDesc);
6690
6691
// Allocates and registers new committed resource with implicit heap, as dedicated allocation.
6692
// Creates and returns Allocation object and optionally D3D12 resource.
6693
HRESULT AllocateCommittedResource(
6694
const CommittedAllocationParameters& committedAllocParams,
6695
UINT64 resourceSize, bool withinBudget, void* pPrivateData,
6696
const CREATE_RESOURCE_PARAMS& createParams,
6697
Allocation** ppAllocation, REFIID riidResource, void** ppvResource);
6698
6699
// Allocates and registers new heap without any resources placed in it, as dedicated allocation.
6700
// Creates and returns Allocation object.
6701
HRESULT AllocateHeap(
6702
const CommittedAllocationParameters& committedAllocParams,
6703
const D3D12_RESOURCE_ALLOCATION_INFO& allocInfo, bool withinBudget,
6704
void* pPrivateData, Allocation** ppAllocation);
6705
6706
template<typename D3D12_RESOURCE_DESC_T>
6707
HRESULT CalcAllocationParams(const ALLOCATION_DESC& allocDesc, UINT64 allocSize,
6708
const D3D12_RESOURCE_DESC_T* resDesc, // Optional
6709
BlockVector*& outBlockVector, CommittedAllocationParameters& outCommittedAllocationParams, bool& outPreferCommitted);
6710
6711
// Returns UINT32_MAX if index cannot be calculcated.
6712
UINT CalcDefaultPoolIndex(const ALLOCATION_DESC& allocDesc, ResourceClass resourceClass) const;
6713
void CalcDefaultPoolParams(D3D12_HEAP_TYPE& outHeapType, D3D12_HEAP_FLAGS& outHeapFlags, UINT index) const;
6714
6715
// Registers Pool object in m_Pools.
6716
void RegisterPool(Pool* pool, D3D12_HEAP_TYPE heapType);
6717
// Unregisters Pool object from m_Pools.
6718
void UnregisterPool(Pool* pool, D3D12_HEAP_TYPE heapType);
6719
6720
HRESULT UpdateD3D12Budget();
6721
6722
D3D12_RESOURCE_ALLOCATION_INFO GetResourceAllocationInfoNative(const D3D12_RESOURCE_DESC& resourceDesc) const;
6723
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6724
D3D12_RESOURCE_ALLOCATION_INFO GetResourceAllocationInfoNative(const D3D12_RESOURCE_DESC1& resourceDesc) const;
6725
#endif
6726
6727
template<typename D3D12_RESOURCE_DESC_T>
6728
D3D12_RESOURCE_ALLOCATION_INFO GetResourceAllocationInfo(D3D12_RESOURCE_DESC_T& inOutResourceDesc) const;
6729
6730
bool NewAllocationWithinBudget(D3D12_HEAP_TYPE heapType, UINT64 size);
6731
6732
// Writes object { } with data of given budget.
6733
static void WriteBudgetToJson(JsonWriter& json, const Budget& budget);
6734
};
6735
6736
#ifndef _D3D12MA_ALLOCATOR_PIMPL_FUNCTINOS
6737
AllocatorPimpl::AllocatorPimpl(const ALLOCATION_CALLBACKS& allocationCallbacks, const ALLOCATOR_DESC& desc)
6738
: m_UseMutex((desc.Flags & ALLOCATOR_FLAG_SINGLETHREADED) == 0),
6739
m_AlwaysCommitted((desc.Flags & ALLOCATOR_FLAG_ALWAYS_COMMITTED) != 0),
6740
m_MsaaAlwaysCommitted((desc.Flags & ALLOCATOR_FLAG_MSAA_TEXTURES_ALWAYS_COMMITTED) != 0),
6741
m_Device(desc.pDevice),
6742
m_Adapter(desc.pAdapter),
6743
m_PreferredBlockSize(desc.PreferredBlockSize != 0 ? desc.PreferredBlockSize : D3D12MA_DEFAULT_BLOCK_SIZE),
6744
m_AllocationCallbacks(allocationCallbacks),
6745
m_CurrentFrameIndex(0),
6746
// Below this line don't use allocationCallbacks but m_AllocationCallbacks!!!
6747
m_AllocationObjectAllocator(m_AllocationCallbacks)
6748
{
6749
// desc.pAllocationCallbacks intentionally ignored here, preprocessed by CreateAllocator.
6750
ZeroMemory(&m_D3D12Options, sizeof(m_D3D12Options));
6751
ZeroMemory(&m_D3D12Architecture, sizeof(m_D3D12Architecture));
6752
6753
ZeroMemory(m_BlockVectors, sizeof(m_BlockVectors));
6754
6755
for (UINT i = 0; i < STANDARD_HEAP_TYPE_COUNT; ++i)
6756
{
6757
m_CommittedAllocations[i].Init(
6758
m_UseMutex,
6759
IndexToStandardHeapType(i),
6760
NULL); // pool
6761
}
6762
6763
m_Device->AddRef();
6764
m_Adapter->AddRef();
6765
}
6766
6767
HRESULT AllocatorPimpl::Init(const ALLOCATOR_DESC& desc)
6768
{
6769
#if D3D12MA_DXGI_1_4
6770
desc.pAdapter->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Adapter3));
6771
#endif
6772
6773
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
6774
m_Device->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Device1));
6775
#endif
6776
6777
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
6778
m_Device->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Device4));
6779
#endif
6780
6781
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6782
m_Device->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Device8));
6783
6784
if((desc.Flags & ALLOCATOR_FLAG_DEFAULT_POOLS_NOT_ZEROED) != 0)
6785
{
6786
D3D12_FEATURE_DATA_D3D12_OPTIONS7 options7 = {};
6787
if(SUCCEEDED(m_Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS7, &options7, sizeof(options7))))
6788
{
6789
// DEFAULT_POOLS_NOT_ZEROED both supported and enabled by the user.
6790
m_DefaultPoolsNotZeroed = true;
6791
}
6792
}
6793
#endif
6794
6795
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6796
m_Device->QueryInterface(D3D12MA_IID_PPV_ARGS(&m_Device10));
6797
#endif
6798
6799
HRESULT hr = m_Adapter->GetDesc(&m_AdapterDesc);
6800
if (FAILED(hr))
6801
{
6802
return hr;
6803
}
6804
6805
hr = m_Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS, &m_D3D12Options, sizeof(m_D3D12Options));
6806
if (FAILED(hr))
6807
{
6808
return hr;
6809
}
6810
#ifdef D3D12MA_FORCE_RESOURCE_HEAP_TIER
6811
m_D3D12Options.ResourceHeapTier = (D3D12MA_FORCE_RESOURCE_HEAP_TIER);
6812
#endif
6813
6814
hr = m_Device->CheckFeatureSupport(D3D12_FEATURE_ARCHITECTURE, &m_D3D12Architecture, sizeof(m_D3D12Architecture));
6815
if (FAILED(hr))
6816
{
6817
m_D3D12Architecture.UMA = FALSE;
6818
m_D3D12Architecture.CacheCoherentUMA = FALSE;
6819
}
6820
6821
D3D12_HEAP_PROPERTIES heapProps = {};
6822
const UINT defaultPoolCount = GetDefaultPoolCount();
6823
for (UINT i = 0; i < defaultPoolCount; ++i)
6824
{
6825
D3D12_HEAP_FLAGS heapFlags;
6826
CalcDefaultPoolParams(heapProps.Type, heapFlags, i);
6827
6828
#if D3D12MA_CREATE_NOT_ZEROED_AVAILABLE
6829
if(m_DefaultPoolsNotZeroed)
6830
{
6831
heapFlags |= D3D12_HEAP_FLAG_CREATE_NOT_ZEROED;
6832
}
6833
#endif
6834
6835
m_BlockVectors[i] = D3D12MA_NEW(GetAllocs(), BlockVector)(
6836
this, // hAllocator
6837
heapProps, // heapType
6838
heapFlags, // heapFlags
6839
m_PreferredBlockSize,
6840
0, // minBlockCount
6841
SIZE_MAX, // maxBlockCount
6842
false, // explicitBlockSize
6843
D3D12MA_DEBUG_ALIGNMENT, // minAllocationAlignment
6844
0, // Default algorithm,
6845
m_MsaaAlwaysCommitted,
6846
NULL, // pProtectedSession
6847
D3D12_RESIDENCY_PRIORITY_NONE); // residencyPriority
6848
// No need to call m_pBlockVectors[i]->CreateMinBlocks here, becase minBlockCount is 0.
6849
}
6850
6851
#if D3D12MA_DXGI_1_4
6852
UpdateD3D12Budget();
6853
#endif
6854
6855
return S_OK;
6856
}
6857
6858
AllocatorPimpl::~AllocatorPimpl()
6859
{
6860
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6861
SAFE_RELEASE(m_Device10);
6862
#endif
6863
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6864
SAFE_RELEASE(m_Device8);
6865
#endif
6866
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
6867
SAFE_RELEASE(m_Device4);
6868
#endif
6869
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
6870
SAFE_RELEASE(m_Device1);
6871
#endif
6872
#if D3D12MA_DXGI_1_4
6873
SAFE_RELEASE(m_Adapter3);
6874
#endif
6875
SAFE_RELEASE(m_Adapter);
6876
SAFE_RELEASE(m_Device);
6877
6878
for (UINT i = DEFAULT_POOL_MAX_COUNT; i--; )
6879
{
6880
D3D12MA_DELETE(GetAllocs(), m_BlockVectors[i]);
6881
}
6882
6883
for (UINT i = HEAP_TYPE_COUNT; i--; )
6884
{
6885
if (!m_Pools[i].IsEmpty())
6886
{
6887
D3D12MA_ASSERT(0 && "Unfreed pools found!");
6888
}
6889
}
6890
}
6891
6892
bool AllocatorPimpl::HeapFlagsFulfillResourceHeapTier(D3D12_HEAP_FLAGS flags) const
6893
{
6894
if (SupportsResourceHeapTier2())
6895
{
6896
return true;
6897
}
6898
else
6899
{
6900
const bool allowBuffers = (flags & D3D12_HEAP_FLAG_DENY_BUFFERS) == 0;
6901
const bool allowRtDsTextures = (flags & D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES) == 0;
6902
const bool allowNonRtDsTextures = (flags & D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES) == 0;
6903
const uint8_t allowedGroupCount = (allowBuffers ? 1 : 0) + (allowRtDsTextures ? 1 : 0) + (allowNonRtDsTextures ? 1 : 0);
6904
return allowedGroupCount == 1;
6905
}
6906
}
6907
6908
UINT AllocatorPimpl::StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE heapType) const
6909
{
6910
D3D12MA_ASSERT(IsHeapTypeStandard(heapType));
6911
if (IsUMA())
6912
return DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY;
6913
return heapType == D3D12_HEAP_TYPE_DEFAULT ?
6914
DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY : DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY;
6915
}
6916
6917
UINT AllocatorPimpl::HeapPropertiesToMemorySegmentGroup(const D3D12_HEAP_PROPERTIES& heapProps) const
6918
{
6919
if (IsUMA())
6920
return DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY;
6921
if (heapProps.MemoryPoolPreference == D3D12_MEMORY_POOL_UNKNOWN)
6922
return StandardHeapTypeToMemorySegmentGroup(heapProps.Type);
6923
return heapProps.MemoryPoolPreference == D3D12_MEMORY_POOL_L1 ?
6924
DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY : DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY;
6925
}
6926
6927
UINT64 AllocatorPimpl::GetMemoryCapacity(UINT memorySegmentGroup) const
6928
{
6929
switch (memorySegmentGroup)
6930
{
6931
case DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY:
6932
return IsUMA() ?
6933
m_AdapterDesc.DedicatedVideoMemory + m_AdapterDesc.SharedSystemMemory : m_AdapterDesc.DedicatedVideoMemory;
6934
case DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY:
6935
return IsUMA() ? 0 : m_AdapterDesc.SharedSystemMemory;
6936
default:
6937
D3D12MA_ASSERT(0);
6938
return UINT64_MAX;
6939
}
6940
}
6941
6942
HRESULT AllocatorPimpl::CreatePlacedResourceWrap(
6943
ID3D12Heap *pHeap,
6944
UINT64 HeapOffset,
6945
const CREATE_RESOURCE_PARAMS& createParams,
6946
REFIID riidResource,
6947
void** ppvResource)
6948
{
6949
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
6950
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_LAYOUT)
6951
{
6952
if (!m_Device10)
6953
{
6954
return E_NOINTERFACE;
6955
}
6956
return m_Device10->CreatePlacedResource2(pHeap, HeapOffset,
6957
createParams.GetResourceDesc1(), createParams.GetInitialLayout(),
6958
createParams.GetOptimizedClearValue(), createParams.GetNumCastableFormats(),
6959
createParams.GetCastableFormats(), riidResource, ppvResource);
6960
} else
6961
#endif
6962
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
6963
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
6964
{
6965
if (!m_Device8)
6966
{
6967
return E_NOINTERFACE;
6968
}
6969
return m_Device8->CreatePlacedResource1(pHeap, HeapOffset,
6970
createParams.GetResourceDesc1(), createParams.GetInitialResourceState(),
6971
createParams.GetOptimizedClearValue(), riidResource, ppvResource);
6972
} else
6973
#endif
6974
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE)
6975
{
6976
return m_Device->CreatePlacedResource(pHeap, HeapOffset,
6977
createParams.GetResourceDesc(), createParams.GetInitialResourceState(),
6978
createParams.GetOptimizedClearValue(), riidResource, ppvResource);
6979
}
6980
else
6981
{
6982
D3D12MA_ASSERT(0);
6983
return E_INVALIDARG;
6984
}
6985
}
6986
6987
6988
HRESULT AllocatorPimpl::CreateResource(
6989
const ALLOCATION_DESC* pAllocDesc,
6990
const CREATE_RESOURCE_PARAMS& createParams,
6991
Allocation** ppAllocation,
6992
REFIID riidResource,
6993
void** ppvResource)
6994
{
6995
D3D12MA_ASSERT(pAllocDesc && createParams.GetBaseResourceDesc() && ppAllocation);
6996
6997
*ppAllocation = NULL;
6998
if (ppvResource)
6999
{
7000
*ppvResource = NULL;
7001
}
7002
7003
CREATE_RESOURCE_PARAMS finalCreateParams = createParams;
7004
D3D12_RESOURCE_DESC finalResourceDesc;
7005
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7006
D3D12_RESOURCE_DESC1 finalResourceDesc1;
7007
#endif
7008
D3D12_RESOURCE_ALLOCATION_INFO resAllocInfo;
7009
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE)
7010
{
7011
finalResourceDesc = *createParams.GetResourceDesc();
7012
finalCreateParams.AccessResourceDesc() = &finalResourceDesc;
7013
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc);
7014
}
7015
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7016
else if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
7017
{
7018
if (!m_Device8)
7019
{
7020
return E_NOINTERFACE;
7021
}
7022
finalResourceDesc1 = *createParams.GetResourceDesc1();
7023
finalCreateParams.AccessResourceDesc1() = &finalResourceDesc1;
7024
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc1);
7025
}
7026
#endif
7027
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
7028
else if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_LAYOUT)
7029
{
7030
if (!m_Device10)
7031
{
7032
return E_NOINTERFACE;
7033
}
7034
finalResourceDesc1 = *createParams.GetResourceDesc1();
7035
finalCreateParams.AccessResourceDesc1() = &finalResourceDesc1;
7036
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc1);
7037
}
7038
#endif
7039
else
7040
{
7041
D3D12MA_ASSERT(0);
7042
return E_INVALIDARG;
7043
}
7044
D3D12MA_ASSERT(IsPow2(resAllocInfo.Alignment));
7045
D3D12MA_ASSERT(resAllocInfo.SizeInBytes > 0);
7046
7047
BlockVector* blockVector = NULL;
7048
CommittedAllocationParameters committedAllocationParams = {};
7049
bool preferCommitted = false;
7050
7051
HRESULT hr;
7052
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7053
if (createParams.Variant >= CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
7054
{
7055
hr = CalcAllocationParams<D3D12_RESOURCE_DESC1>(*pAllocDesc, resAllocInfo.SizeInBytes,
7056
createParams.GetResourceDesc1(),
7057
blockVector, committedAllocationParams, preferCommitted);
7058
}
7059
else
7060
#endif
7061
{
7062
hr = CalcAllocationParams<D3D12_RESOURCE_DESC>(*pAllocDesc, resAllocInfo.SizeInBytes,
7063
createParams.GetResourceDesc(),
7064
blockVector, committedAllocationParams, preferCommitted);
7065
}
7066
if (FAILED(hr))
7067
return hr;
7068
7069
const bool withinBudget = (pAllocDesc->Flags & ALLOCATION_FLAG_WITHIN_BUDGET) != 0;
7070
hr = E_INVALIDARG;
7071
if (committedAllocationParams.IsValid() && preferCommitted)
7072
{
7073
hr = AllocateCommittedResource(committedAllocationParams,
7074
resAllocInfo.SizeInBytes, withinBudget, pAllocDesc->pPrivateData,
7075
finalCreateParams, ppAllocation, riidResource, ppvResource);
7076
if (SUCCEEDED(hr))
7077
return hr;
7078
}
7079
if (blockVector != NULL)
7080
{
7081
hr = blockVector->CreateResource(resAllocInfo.SizeInBytes, resAllocInfo.Alignment,
7082
*pAllocDesc, finalCreateParams,
7083
ppAllocation, riidResource, ppvResource);
7084
if (SUCCEEDED(hr))
7085
return hr;
7086
}
7087
if (committedAllocationParams.IsValid() && !preferCommitted)
7088
{
7089
hr = AllocateCommittedResource(committedAllocationParams,
7090
resAllocInfo.SizeInBytes, withinBudget, pAllocDesc->pPrivateData,
7091
finalCreateParams, ppAllocation, riidResource, ppvResource);
7092
if (SUCCEEDED(hr))
7093
return hr;
7094
}
7095
return hr;
7096
}
7097
7098
HRESULT AllocatorPimpl::AllocateMemory(
7099
const ALLOCATION_DESC* pAllocDesc,
7100
const D3D12_RESOURCE_ALLOCATION_INFO* pAllocInfo,
7101
Allocation** ppAllocation)
7102
{
7103
*ppAllocation = NULL;
7104
7105
BlockVector* blockVector = NULL;
7106
CommittedAllocationParameters committedAllocationParams = {};
7107
bool preferCommitted = false;
7108
HRESULT hr = CalcAllocationParams<D3D12_RESOURCE_DESC>(*pAllocDesc, pAllocInfo->SizeInBytes,
7109
NULL, // pResDesc
7110
blockVector, committedAllocationParams, preferCommitted);
7111
if (FAILED(hr))
7112
return hr;
7113
7114
const bool withinBudget = (pAllocDesc->Flags & ALLOCATION_FLAG_WITHIN_BUDGET) != 0;
7115
hr = E_INVALIDARG;
7116
if (committedAllocationParams.IsValid() && preferCommitted)
7117
{
7118
hr = AllocateHeap(committedAllocationParams, *pAllocInfo, withinBudget, pAllocDesc->pPrivateData, ppAllocation);
7119
if (SUCCEEDED(hr))
7120
return hr;
7121
}
7122
if (blockVector != NULL)
7123
{
7124
hr = blockVector->Allocate(pAllocInfo->SizeInBytes, pAllocInfo->Alignment,
7125
*pAllocDesc, 1, (Allocation**)ppAllocation);
7126
if (SUCCEEDED(hr))
7127
return hr;
7128
}
7129
if (committedAllocationParams.IsValid() && !preferCommitted)
7130
{
7131
hr = AllocateHeap(committedAllocationParams, *pAllocInfo, withinBudget, pAllocDesc->pPrivateData, ppAllocation);
7132
if (SUCCEEDED(hr))
7133
return hr;
7134
}
7135
return hr;
7136
}
7137
7138
HRESULT AllocatorPimpl::CreateAliasingResource(
7139
Allocation* pAllocation,
7140
UINT64 AllocationLocalOffset,
7141
const CREATE_RESOURCE_PARAMS& createParams,
7142
REFIID riidResource,
7143
void** ppvResource)
7144
{
7145
*ppvResource = NULL;
7146
7147
CREATE_RESOURCE_PARAMS finalCreateParams = createParams;
7148
D3D12_RESOURCE_DESC finalResourceDesc;
7149
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7150
D3D12_RESOURCE_DESC1 finalResourceDesc1;
7151
#endif
7152
D3D12_RESOURCE_ALLOCATION_INFO resAllocInfo;
7153
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE)
7154
{
7155
finalResourceDesc = *createParams.GetResourceDesc();
7156
finalCreateParams.AccessResourceDesc() = &finalResourceDesc;
7157
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc);
7158
}
7159
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7160
else if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
7161
{
7162
if (!m_Device8)
7163
{
7164
return E_NOINTERFACE;
7165
}
7166
finalResourceDesc1 = *createParams.GetResourceDesc1();
7167
finalCreateParams.AccessResourceDesc1() = &finalResourceDesc1;
7168
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc1);
7169
}
7170
#endif
7171
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
7172
else if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_LAYOUT)
7173
{
7174
if (!m_Device10)
7175
{
7176
return E_NOINTERFACE;
7177
}
7178
finalResourceDesc1 = *createParams.GetResourceDesc1();
7179
finalCreateParams.AccessResourceDesc1() = &finalResourceDesc1;
7180
resAllocInfo = GetResourceAllocationInfo(finalResourceDesc1);
7181
}
7182
#endif
7183
else
7184
{
7185
D3D12MA_ASSERT(0);
7186
return E_INVALIDARG;
7187
}
7188
D3D12MA_ASSERT(IsPow2(resAllocInfo.Alignment));
7189
D3D12MA_ASSERT(resAllocInfo.SizeInBytes > 0);
7190
7191
ID3D12Heap* const existingHeap = pAllocation->GetHeap();
7192
const UINT64 existingOffset = pAllocation->GetOffset();
7193
const UINT64 existingSize = pAllocation->GetSize();
7194
const UINT64 newOffset = existingOffset + AllocationLocalOffset;
7195
7196
if (existingHeap == NULL ||
7197
AllocationLocalOffset + resAllocInfo.SizeInBytes > existingSize ||
7198
newOffset % resAllocInfo.Alignment != 0)
7199
{
7200
return E_INVALIDARG;
7201
}
7202
7203
return CreatePlacedResourceWrap(existingHeap, newOffset, finalCreateParams, riidResource, ppvResource);
7204
}
7205
7206
void AllocatorPimpl::FreeCommittedMemory(Allocation* allocation)
7207
{
7208
D3D12MA_ASSERT(allocation && allocation->m_PackedData.GetType() == Allocation::TYPE_COMMITTED);
7209
7210
CommittedAllocationList* const allocList = allocation->m_Committed.list;
7211
allocList->Unregister(allocation);
7212
7213
const UINT memSegmentGroup = allocList->GetMemorySegmentGroup(this);
7214
const UINT64 allocSize = allocation->GetSize();
7215
m_Budget.RemoveAllocation(memSegmentGroup, allocSize);
7216
m_Budget.RemoveBlock(memSegmentGroup, allocSize);
7217
}
7218
7219
void AllocatorPimpl::FreePlacedMemory(Allocation* allocation)
7220
{
7221
D3D12MA_ASSERT(allocation && allocation->m_PackedData.GetType() == Allocation::TYPE_PLACED);
7222
7223
NormalBlock* const block = allocation->m_Placed.block;
7224
D3D12MA_ASSERT(block);
7225
BlockVector* const blockVector = block->GetBlockVector();
7226
D3D12MA_ASSERT(blockVector);
7227
m_Budget.RemoveAllocation(HeapPropertiesToMemorySegmentGroup(block->GetHeapProperties()), allocation->GetSize());
7228
blockVector->Free(allocation);
7229
}
7230
7231
void AllocatorPimpl::FreeHeapMemory(Allocation* allocation)
7232
{
7233
D3D12MA_ASSERT(allocation && allocation->m_PackedData.GetType() == Allocation::TYPE_HEAP);
7234
7235
CommittedAllocationList* const allocList = allocation->m_Committed.list;
7236
allocList->Unregister(allocation);
7237
SAFE_RELEASE(allocation->m_Heap.heap);
7238
7239
const UINT memSegmentGroup = allocList->GetMemorySegmentGroup(this);
7240
const UINT64 allocSize = allocation->GetSize();
7241
m_Budget.RemoveAllocation(memSegmentGroup, allocSize);
7242
m_Budget.RemoveBlock(memSegmentGroup, allocSize);
7243
}
7244
7245
void AllocatorPimpl::SetResidencyPriority(ID3D12Pageable* obj, D3D12_RESIDENCY_PRIORITY priority) const
7246
{
7247
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
7248
if (priority != D3D12_RESIDENCY_PRIORITY_NONE && m_Device1)
7249
{
7250
// Intentionally ignoring the result.
7251
m_Device1->SetResidencyPriority(1, &obj, &priority);
7252
}
7253
#endif
7254
}
7255
7256
void AllocatorPimpl::SetCurrentFrameIndex(UINT frameIndex)
7257
{
7258
m_CurrentFrameIndex.store(frameIndex);
7259
7260
#if D3D12MA_DXGI_1_4
7261
UpdateD3D12Budget();
7262
#endif
7263
}
7264
7265
void AllocatorPimpl::CalculateStatistics(TotalStatistics& outStats, DetailedStatistics outCustomHeaps[2])
7266
{
7267
// Init stats
7268
for (size_t i = 0; i < HEAP_TYPE_COUNT; i++)
7269
ClearDetailedStatistics(outStats.HeapType[i]);
7270
for (size_t i = 0; i < DXGI_MEMORY_SEGMENT_GROUP_COUNT; i++)
7271
ClearDetailedStatistics(outStats.MemorySegmentGroup[i]);
7272
ClearDetailedStatistics(outStats.Total);
7273
if (outCustomHeaps)
7274
{
7275
ClearDetailedStatistics(outCustomHeaps[0]);
7276
ClearDetailedStatistics(outCustomHeaps[1]);
7277
}
7278
7279
// Process default pools. 3 standard heap types only. Add them to outStats.HeapType[i].
7280
if (SupportsResourceHeapTier2())
7281
{
7282
// DEFAULT, UPLOAD, READBACK.
7283
for (size_t heapTypeIndex = 0; heapTypeIndex < STANDARD_HEAP_TYPE_COUNT; ++heapTypeIndex)
7284
{
7285
BlockVector* const pBlockVector = m_BlockVectors[heapTypeIndex];
7286
D3D12MA_ASSERT(pBlockVector);
7287
pBlockVector->AddDetailedStatistics(outStats.HeapType[heapTypeIndex]);
7288
}
7289
}
7290
else
7291
{
7292
// DEFAULT, UPLOAD, READBACK.
7293
for (size_t heapTypeIndex = 0; heapTypeIndex < STANDARD_HEAP_TYPE_COUNT; ++heapTypeIndex)
7294
{
7295
for (size_t heapSubType = 0; heapSubType < 3; ++heapSubType)
7296
{
7297
BlockVector* const pBlockVector = m_BlockVectors[heapTypeIndex * 3 + heapSubType];
7298
D3D12MA_ASSERT(pBlockVector);
7299
pBlockVector->AddDetailedStatistics(outStats.HeapType[heapTypeIndex]);
7300
}
7301
}
7302
}
7303
7304
// Sum them up to memory segment groups.
7305
AddDetailedStatistics(
7306
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE_DEFAULT)],
7307
outStats.HeapType[0]);
7308
AddDetailedStatistics(
7309
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE_UPLOAD)],
7310
outStats.HeapType[1]);
7311
AddDetailedStatistics(
7312
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(D3D12_HEAP_TYPE_READBACK)],
7313
outStats.HeapType[2]);
7314
7315
// Process custom pools.
7316
DetailedStatistics tmpStats;
7317
for (size_t heapTypeIndex = 0; heapTypeIndex < HEAP_TYPE_COUNT; ++heapTypeIndex)
7318
{
7319
MutexLockRead lock(m_PoolsMutex[heapTypeIndex], m_UseMutex);
7320
PoolList& poolList = m_Pools[heapTypeIndex];
7321
for (PoolPimpl* pool = poolList.Front(); pool != NULL; pool = poolList.GetNext(pool))
7322
{
7323
const D3D12_HEAP_PROPERTIES& poolHeapProps = pool->GetDesc().HeapProperties;
7324
ClearDetailedStatistics(tmpStats);
7325
pool->AddDetailedStatistics(tmpStats);
7326
AddDetailedStatistics(
7327
outStats.HeapType[heapTypeIndex], tmpStats);
7328
7329
UINT memorySegment = HeapPropertiesToMemorySegmentGroup(poolHeapProps);
7330
AddDetailedStatistics(
7331
outStats.MemorySegmentGroup[memorySegment], tmpStats);
7332
7333
if (outCustomHeaps)
7334
AddDetailedStatistics(outCustomHeaps[memorySegment], tmpStats);
7335
}
7336
}
7337
7338
// Process committed allocations. 3 standard heap types only.
7339
for (UINT heapTypeIndex = 0; heapTypeIndex < STANDARD_HEAP_TYPE_COUNT; ++heapTypeIndex)
7340
{
7341
ClearDetailedStatistics(tmpStats);
7342
m_CommittedAllocations[heapTypeIndex].AddDetailedStatistics(tmpStats);
7343
AddDetailedStatistics(
7344
outStats.HeapType[heapTypeIndex], tmpStats);
7345
AddDetailedStatistics(
7346
outStats.MemorySegmentGroup[StandardHeapTypeToMemorySegmentGroup(IndexToStandardHeapType(heapTypeIndex))], tmpStats);
7347
}
7348
7349
// Sum up memory segment groups to totals.
7350
AddDetailedStatistics(outStats.Total, outStats.MemorySegmentGroup[0]);
7351
AddDetailedStatistics(outStats.Total, outStats.MemorySegmentGroup[1]);
7352
7353
D3D12MA_ASSERT(outStats.Total.Stats.BlockCount ==
7354
outStats.MemorySegmentGroup[0].Stats.BlockCount + outStats.MemorySegmentGroup[1].Stats.BlockCount);
7355
D3D12MA_ASSERT(outStats.Total.Stats.AllocationCount ==
7356
outStats.MemorySegmentGroup[0].Stats.AllocationCount + outStats.MemorySegmentGroup[1].Stats.AllocationCount);
7357
D3D12MA_ASSERT(outStats.Total.Stats.BlockBytes ==
7358
outStats.MemorySegmentGroup[0].Stats.BlockBytes + outStats.MemorySegmentGroup[1].Stats.BlockBytes);
7359
D3D12MA_ASSERT(outStats.Total.Stats.AllocationBytes ==
7360
outStats.MemorySegmentGroup[0].Stats.AllocationBytes + outStats.MemorySegmentGroup[1].Stats.AllocationBytes);
7361
D3D12MA_ASSERT(outStats.Total.UnusedRangeCount ==
7362
outStats.MemorySegmentGroup[0].UnusedRangeCount + outStats.MemorySegmentGroup[1].UnusedRangeCount);
7363
7364
D3D12MA_ASSERT(outStats.Total.Stats.BlockCount ==
7365
outStats.HeapType[0].Stats.BlockCount + outStats.HeapType[1].Stats.BlockCount +
7366
outStats.HeapType[2].Stats.BlockCount + outStats.HeapType[3].Stats.BlockCount);
7367
D3D12MA_ASSERT(outStats.Total.Stats.AllocationCount ==
7368
outStats.HeapType[0].Stats.AllocationCount + outStats.HeapType[1].Stats.AllocationCount +
7369
outStats.HeapType[2].Stats.AllocationCount + outStats.HeapType[3].Stats.AllocationCount);
7370
D3D12MA_ASSERT(outStats.Total.Stats.BlockBytes ==
7371
outStats.HeapType[0].Stats.BlockBytes + outStats.HeapType[1].Stats.BlockBytes +
7372
outStats.HeapType[2].Stats.BlockBytes + outStats.HeapType[3].Stats.BlockBytes);
7373
D3D12MA_ASSERT(outStats.Total.Stats.AllocationBytes ==
7374
outStats.HeapType[0].Stats.AllocationBytes + outStats.HeapType[1].Stats.AllocationBytes +
7375
outStats.HeapType[2].Stats.AllocationBytes + outStats.HeapType[3].Stats.AllocationBytes);
7376
D3D12MA_ASSERT(outStats.Total.UnusedRangeCount ==
7377
outStats.HeapType[0].UnusedRangeCount + outStats.HeapType[1].UnusedRangeCount +
7378
outStats.HeapType[2].UnusedRangeCount + outStats.HeapType[3].UnusedRangeCount);
7379
}
7380
7381
void AllocatorPimpl::GetBudget(Budget* outLocalBudget, Budget* outNonLocalBudget)
7382
{
7383
if (outLocalBudget)
7384
m_Budget.GetStatistics(outLocalBudget->Stats, DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY);
7385
if (outNonLocalBudget)
7386
m_Budget.GetStatistics(outNonLocalBudget->Stats, DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY);
7387
7388
#if D3D12MA_DXGI_1_4
7389
if (m_Adapter3)
7390
{
7391
if (!m_Budget.ShouldUpdateBudget())
7392
{
7393
m_Budget.GetBudget(m_UseMutex,
7394
outLocalBudget ? &outLocalBudget->UsageBytes : NULL,
7395
outLocalBudget ? &outLocalBudget->BudgetBytes : NULL,
7396
outNonLocalBudget ? &outNonLocalBudget->UsageBytes : NULL,
7397
outNonLocalBudget ? &outNonLocalBudget->BudgetBytes : NULL);
7398
}
7399
else
7400
{
7401
UpdateD3D12Budget();
7402
GetBudget(outLocalBudget, outNonLocalBudget); // Recursion
7403
}
7404
}
7405
else
7406
#endif
7407
{
7408
if (outLocalBudget)
7409
{
7410
outLocalBudget->UsageBytes = outLocalBudget->Stats.BlockBytes;
7411
outLocalBudget->BudgetBytes = GetMemoryCapacity(DXGI_MEMORY_SEGMENT_GROUP_LOCAL_COPY) * 8 / 10; // 80% heuristics.
7412
}
7413
if (outNonLocalBudget)
7414
{
7415
outNonLocalBudget->UsageBytes = outNonLocalBudget->Stats.BlockBytes;
7416
outNonLocalBudget->BudgetBytes = GetMemoryCapacity(DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL_COPY) * 8 / 10; // 80% heuristics.
7417
}
7418
}
7419
}
7420
7421
void AllocatorPimpl::GetBudgetForHeapType(Budget& outBudget, D3D12_HEAP_TYPE heapType)
7422
{
7423
switch (heapType)
7424
{
7425
case D3D12_HEAP_TYPE_DEFAULT:
7426
GetBudget(&outBudget, NULL);
7427
break;
7428
case D3D12_HEAP_TYPE_UPLOAD:
7429
case D3D12_HEAP_TYPE_READBACK:
7430
GetBudget(NULL, &outBudget);
7431
break;
7432
default: D3D12MA_ASSERT(0);
7433
}
7434
}
7435
7436
void AllocatorPimpl::BuildStatsString(WCHAR** ppStatsString, BOOL detailedMap)
7437
{
7438
StringBuilder sb(GetAllocs());
7439
{
7440
Budget localBudget = {}, nonLocalBudget = {};
7441
GetBudget(&localBudget, &nonLocalBudget);
7442
7443
TotalStatistics stats;
7444
DetailedStatistics customHeaps[2];
7445
CalculateStatistics(stats, customHeaps);
7446
7447
JsonWriter json(GetAllocs(), sb);
7448
json.BeginObject();
7449
{
7450
json.WriteString(L"General");
7451
json.BeginObject();
7452
{
7453
json.WriteString(L"API");
7454
json.WriteString(L"Direct3D 12");
7455
7456
json.WriteString(L"GPU");
7457
json.WriteString(m_AdapterDesc.Description);
7458
7459
json.WriteString(L"DedicatedVideoMemory");
7460
json.WriteNumber((UINT64)m_AdapterDesc.DedicatedVideoMemory);
7461
json.WriteString(L"DedicatedSystemMemory");
7462
json.WriteNumber((UINT64)m_AdapterDesc.DedicatedSystemMemory);
7463
json.WriteString(L"SharedSystemMemory");
7464
json.WriteNumber((UINT64)m_AdapterDesc.SharedSystemMemory);
7465
7466
json.WriteString(L"ResourceHeapTier");
7467
json.WriteNumber(static_cast<UINT>(m_D3D12Options.ResourceHeapTier));
7468
7469
json.WriteString(L"ResourceBindingTier");
7470
json.WriteNumber(static_cast<UINT>(m_D3D12Options.ResourceBindingTier));
7471
7472
json.WriteString(L"TiledResourcesTier");
7473
json.WriteNumber(static_cast<UINT>(m_D3D12Options.TiledResourcesTier));
7474
7475
json.WriteString(L"TileBasedRenderer");
7476
json.WriteBool(m_D3D12Architecture.TileBasedRenderer);
7477
7478
json.WriteString(L"UMA");
7479
json.WriteBool(m_D3D12Architecture.UMA);
7480
json.WriteString(L"CacheCoherentUMA");
7481
json.WriteBool(m_D3D12Architecture.CacheCoherentUMA);
7482
}
7483
json.EndObject();
7484
}
7485
{
7486
json.WriteString(L"Total");
7487
json.AddDetailedStatisticsInfoObject(stats.Total);
7488
}
7489
{
7490
json.WriteString(L"MemoryInfo");
7491
json.BeginObject();
7492
{
7493
json.WriteString(L"L0");
7494
json.BeginObject();
7495
{
7496
json.WriteString(L"Budget");
7497
WriteBudgetToJson(json, IsUMA() ? localBudget : nonLocalBudget); // When UMA device only L0 present as local
7498
7499
json.WriteString(L"Stats");
7500
json.AddDetailedStatisticsInfoObject(stats.MemorySegmentGroup[!IsUMA()]);
7501
7502
json.WriteString(L"MemoryPools");
7503
json.BeginObject();
7504
{
7505
if (IsUMA())
7506
{
7507
json.WriteString(L"DEFAULT");
7508
json.BeginObject();
7509
{
7510
json.WriteString(L"Stats");
7511
json.AddDetailedStatisticsInfoObject(stats.HeapType[0]);
7512
}
7513
json.EndObject();
7514
}
7515
json.WriteString(L"UPLOAD");
7516
json.BeginObject();
7517
{
7518
json.WriteString(L"Stats");
7519
json.AddDetailedStatisticsInfoObject(stats.HeapType[1]);
7520
}
7521
json.EndObject();
7522
7523
json.WriteString(L"READBACK");
7524
json.BeginObject();
7525
{
7526
json.WriteString(L"Stats");
7527
json.AddDetailedStatisticsInfoObject(stats.HeapType[2]);
7528
}
7529
json.EndObject();
7530
7531
json.WriteString(L"CUSTOM");
7532
json.BeginObject();
7533
{
7534
json.WriteString(L"Stats");
7535
json.AddDetailedStatisticsInfoObject(customHeaps[!IsUMA()]);
7536
}
7537
json.EndObject();
7538
}
7539
json.EndObject();
7540
}
7541
json.EndObject();
7542
if (!IsUMA())
7543
{
7544
json.WriteString(L"L1");
7545
json.BeginObject();
7546
{
7547
json.WriteString(L"Budget");
7548
WriteBudgetToJson(json, localBudget);
7549
7550
json.WriteString(L"Stats");
7551
json.AddDetailedStatisticsInfoObject(stats.MemorySegmentGroup[0]);
7552
7553
json.WriteString(L"MemoryPools");
7554
json.BeginObject();
7555
{
7556
json.WriteString(L"DEFAULT");
7557
json.BeginObject();
7558
{
7559
json.WriteString(L"Stats");
7560
json.AddDetailedStatisticsInfoObject(stats.HeapType[0]);
7561
}
7562
json.EndObject();
7563
7564
json.WriteString(L"CUSTOM");
7565
json.BeginObject();
7566
{
7567
json.WriteString(L"Stats");
7568
json.AddDetailedStatisticsInfoObject(customHeaps[0]);
7569
}
7570
json.EndObject();
7571
}
7572
json.EndObject();
7573
}
7574
json.EndObject();
7575
}
7576
}
7577
json.EndObject();
7578
}
7579
7580
if (detailedMap)
7581
{
7582
const auto writeHeapInfo = [&](BlockVector* blockVector, CommittedAllocationList* committedAllocs, bool customHeap)
7583
{
7584
D3D12MA_ASSERT(blockVector);
7585
7586
D3D12_HEAP_FLAGS flags = blockVector->GetHeapFlags();
7587
json.WriteString(L"Flags");
7588
json.BeginArray(true);
7589
{
7590
if (flags & D3D12_HEAP_FLAG_SHARED)
7591
json.WriteString(L"HEAP_FLAG_SHARED");
7592
if (flags & D3D12_HEAP_FLAG_ALLOW_DISPLAY)
7593
json.WriteString(L"HEAP_FLAG_ALLOW_DISPLAY");
7594
if (flags & D3D12_HEAP_FLAG_SHARED_CROSS_ADAPTER)
7595
json.WriteString(L"HEAP_FLAG_CROSS_ADAPTER");
7596
if (flags & D3D12_HEAP_FLAG_HARDWARE_PROTECTED)
7597
json.WriteString(L"HEAP_FLAG_HARDWARE_PROTECTED");
7598
if (flags & D3D12_HEAP_FLAG_ALLOW_WRITE_WATCH)
7599
json.WriteString(L"HEAP_FLAG_ALLOW_WRITE_WATCH");
7600
if (flags & D3D12_HEAP_FLAG_ALLOW_SHADER_ATOMICS)
7601
json.WriteString(L"HEAP_FLAG_ALLOW_SHADER_ATOMICS");
7602
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7603
if (flags & D3D12_HEAP_FLAG_CREATE_NOT_RESIDENT)
7604
json.WriteString(L"HEAP_FLAG_CREATE_NOT_RESIDENT");
7605
if (flags & D3D12_HEAP_FLAG_CREATE_NOT_ZEROED)
7606
json.WriteString(L"HEAP_FLAG_CREATE_NOT_ZEROED");
7607
#endif
7608
7609
if (flags & D3D12_HEAP_FLAG_DENY_BUFFERS)
7610
json.WriteString(L"HEAP_FLAG_DENY_BUFFERS");
7611
if (flags & D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES)
7612
json.WriteString(L"HEAP_FLAG_DENY_RT_DS_TEXTURES");
7613
if (flags & D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES)
7614
json.WriteString(L"HEAP_FLAG_DENY_NON_RT_DS_TEXTURES");
7615
7616
flags &= ~(D3D12_HEAP_FLAG_SHARED
7617
| D3D12_HEAP_FLAG_DENY_BUFFERS
7618
| D3D12_HEAP_FLAG_ALLOW_DISPLAY
7619
| D3D12_HEAP_FLAG_SHARED_CROSS_ADAPTER
7620
| D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES
7621
| D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES
7622
| D3D12_HEAP_FLAG_HARDWARE_PROTECTED
7623
| D3D12_HEAP_FLAG_ALLOW_WRITE_WATCH
7624
| D3D12_HEAP_FLAG_ALLOW_SHADER_ATOMICS);
7625
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7626
flags &= ~(D3D12_HEAP_FLAG_CREATE_NOT_RESIDENT
7627
| D3D12_HEAP_FLAG_CREATE_NOT_ZEROED);
7628
#endif
7629
if (flags != 0)
7630
json.WriteNumber((UINT)flags);
7631
7632
if (customHeap)
7633
{
7634
const D3D12_HEAP_PROPERTIES& properties = blockVector->GetHeapProperties();
7635
switch (properties.MemoryPoolPreference)
7636
{
7637
default:
7638
D3D12MA_ASSERT(0);
7639
case D3D12_MEMORY_POOL_UNKNOWN:
7640
json.WriteString(L"MEMORY_POOL_UNKNOWN");
7641
break;
7642
case D3D12_MEMORY_POOL_L0:
7643
json.WriteString(L"MEMORY_POOL_L0");
7644
break;
7645
case D3D12_MEMORY_POOL_L1:
7646
json.WriteString(L"MEMORY_POOL_L1");
7647
break;
7648
}
7649
switch (properties.CPUPageProperty)
7650
{
7651
default:
7652
D3D12MA_ASSERT(0);
7653
case D3D12_CPU_PAGE_PROPERTY_UNKNOWN:
7654
json.WriteString(L"CPU_PAGE_PROPERTY_UNKNOWN");
7655
break;
7656
case D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE:
7657
json.WriteString(L"CPU_PAGE_PROPERTY_NOT_AVAILABLE");
7658
break;
7659
case D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE:
7660
json.WriteString(L"CPU_PAGE_PROPERTY_WRITE_COMBINE");
7661
break;
7662
case D3D12_CPU_PAGE_PROPERTY_WRITE_BACK:
7663
json.WriteString(L"CPU_PAGE_PROPERTY_WRITE_BACK");
7664
break;
7665
}
7666
}
7667
}
7668
json.EndArray();
7669
7670
json.WriteString(L"PreferredBlockSize");
7671
json.WriteNumber(blockVector->GetPreferredBlockSize());
7672
7673
json.WriteString(L"Blocks");
7674
blockVector->WriteBlockInfoToJson(json);
7675
7676
json.WriteString(L"DedicatedAllocations");
7677
json.BeginArray();
7678
if (committedAllocs)
7679
committedAllocs->BuildStatsString(json);
7680
json.EndArray();
7681
};
7682
7683
json.WriteString(L"DefaultPools");
7684
json.BeginObject();
7685
{
7686
if (SupportsResourceHeapTier2())
7687
{
7688
for (uint8_t heapType = 0; heapType < STANDARD_HEAP_TYPE_COUNT; ++heapType)
7689
{
7690
json.WriteString(StandardHeapTypeNames[heapType]);
7691
json.BeginObject();
7692
writeHeapInfo(m_BlockVectors[heapType], m_CommittedAllocations + heapType, false);
7693
json.EndObject();
7694
}
7695
}
7696
else
7697
{
7698
for (uint8_t heapType = 0; heapType < STANDARD_HEAP_TYPE_COUNT; ++heapType)
7699
{
7700
for (uint8_t heapSubType = 0; heapSubType < 3; ++heapSubType)
7701
{
7702
static const WCHAR* const heapSubTypeName[] = {
7703
L" - Buffers",
7704
L" - Textures",
7705
L" - Textures RT/DS",
7706
};
7707
json.BeginString(StandardHeapTypeNames[heapType]);
7708
json.EndString(heapSubTypeName[heapSubType]);
7709
7710
json.BeginObject();
7711
writeHeapInfo(m_BlockVectors[heapType * 3 + heapSubType], m_CommittedAllocations + heapType, false);
7712
json.EndObject();
7713
}
7714
}
7715
}
7716
}
7717
json.EndObject();
7718
7719
json.WriteString(L"CustomPools");
7720
json.BeginObject();
7721
for (uint8_t heapTypeIndex = 0; heapTypeIndex < HEAP_TYPE_COUNT; ++heapTypeIndex)
7722
{
7723
MutexLockRead mutex(m_PoolsMutex[heapTypeIndex], m_UseMutex);
7724
auto* item = m_Pools[heapTypeIndex].Front();
7725
if (item != NULL)
7726
{
7727
size_t index = 0;
7728
json.WriteString(HeapTypeNames[heapTypeIndex]);
7729
json.BeginArray();
7730
do
7731
{
7732
json.BeginObject();
7733
json.WriteString(L"Name");
7734
json.BeginString();
7735
json.ContinueString(index++);
7736
if (item->GetName())
7737
{
7738
json.ContinueString(L" - ");
7739
json.ContinueString(item->GetName());
7740
}
7741
json.EndString();
7742
7743
writeHeapInfo(item->GetBlockVector(), item->GetCommittedAllocationList(), heapTypeIndex == 3);
7744
json.EndObject();
7745
} while ((item = PoolList::GetNext(item)) != NULL);
7746
json.EndArray();
7747
}
7748
}
7749
json.EndObject();
7750
}
7751
json.EndObject();
7752
}
7753
7754
const size_t length = sb.GetLength();
7755
WCHAR* result = AllocateArray<WCHAR>(GetAllocs(), length + 2);
7756
result[0] = 0xFEFF;
7757
memcpy(result + 1, sb.GetData(), length * sizeof(WCHAR));
7758
result[length + 1] = L'\0';
7759
*ppStatsString = result;
7760
}
7761
7762
void AllocatorPimpl::FreeStatsString(WCHAR* pStatsString)
7763
{
7764
D3D12MA_ASSERT(pStatsString);
7765
Free(GetAllocs(), pStatsString);
7766
}
7767
7768
template<typename D3D12_RESOURCE_DESC_T>
7769
bool AllocatorPimpl::PrefersCommittedAllocation(const D3D12_RESOURCE_DESC_T& resourceDesc)
7770
{
7771
// Intentional. It may change in the future.
7772
return false;
7773
}
7774
7775
HRESULT AllocatorPimpl::AllocateCommittedResource(
7776
const CommittedAllocationParameters& committedAllocParams,
7777
UINT64 resourceSize, bool withinBudget, void* pPrivateData,
7778
const CREATE_RESOURCE_PARAMS& createParams,
7779
Allocation** ppAllocation, REFIID riidResource, void** ppvResource)
7780
{
7781
D3D12MA_ASSERT(committedAllocParams.IsValid());
7782
7783
HRESULT hr;
7784
ID3D12Resource* res = NULL;
7785
// Allocate aliasing memory with explicit heap
7786
if (committedAllocParams.m_CanAlias)
7787
{
7788
D3D12_RESOURCE_ALLOCATION_INFO heapAllocInfo = {};
7789
heapAllocInfo.SizeInBytes = resourceSize;
7790
heapAllocInfo.Alignment = HeapFlagsToAlignment(committedAllocParams.m_HeapFlags, m_MsaaAlwaysCommitted);
7791
hr = AllocateHeap(committedAllocParams, heapAllocInfo, withinBudget, pPrivateData, ppAllocation);
7792
if (SUCCEEDED(hr))
7793
{
7794
hr = CreatePlacedResourceWrap((*ppAllocation)->GetHeap(), 0,
7795
createParams, D3D12MA_IID_PPV_ARGS(&res));
7796
if (SUCCEEDED(hr))
7797
{
7798
if (ppvResource != NULL)
7799
hr = res->QueryInterface(riidResource, ppvResource);
7800
if (SUCCEEDED(hr))
7801
{
7802
(*ppAllocation)->SetResourcePointer(res, createParams.GetBaseResourceDesc());
7803
return hr;
7804
}
7805
res->Release();
7806
}
7807
FreeHeapMemory(*ppAllocation);
7808
}
7809
return hr;
7810
}
7811
7812
if (withinBudget &&
7813
!NewAllocationWithinBudget(committedAllocParams.m_HeapProperties.Type, resourceSize))
7814
{
7815
return E_OUTOFMEMORY;
7816
}
7817
7818
/* D3D12 ERROR:
7819
* ID3D12Device::CreateCommittedResource:
7820
* When creating a committed resource, D3D12_HEAP_FLAGS must not have either
7821
* D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES,
7822
* D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES,
7823
* nor D3D12_HEAP_FLAG_DENY_BUFFERS set.
7824
* These flags will be set automatically to correspond with the committed resource type.
7825
*
7826
* [ STATE_CREATION ERROR #640: CREATERESOURCEANDHEAP_INVALIDHEAPMISCFLAGS]
7827
*/
7828
7829
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
7830
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_LAYOUT)
7831
{
7832
if (!m_Device10)
7833
{
7834
return E_NOINTERFACE;
7835
}
7836
hr = m_Device10->CreateCommittedResource3(
7837
&committedAllocParams.m_HeapProperties,
7838
committedAllocParams.m_HeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS,
7839
createParams.GetResourceDesc1(), createParams.GetInitialLayout(),
7840
createParams.GetOptimizedClearValue(), committedAllocParams.m_ProtectedSession,
7841
createParams.GetNumCastableFormats(), createParams.GetCastableFormats(),
7842
D3D12MA_IID_PPV_ARGS(&res));
7843
} else
7844
#endif
7845
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
7846
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE_AND_DESC1)
7847
{
7848
if (!m_Device8)
7849
{
7850
return E_NOINTERFACE;
7851
}
7852
hr = m_Device8->CreateCommittedResource2(
7853
&committedAllocParams.m_HeapProperties,
7854
committedAllocParams.m_HeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS,
7855
createParams.GetResourceDesc1(), createParams.GetInitialResourceState(),
7856
createParams.GetOptimizedClearValue(), committedAllocParams.m_ProtectedSession,
7857
D3D12MA_IID_PPV_ARGS(&res));
7858
} else
7859
#endif
7860
if (createParams.Variant == CREATE_RESOURCE_PARAMS::VARIANT_WITH_STATE)
7861
{
7862
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
7863
if (m_Device4)
7864
{
7865
hr = m_Device4->CreateCommittedResource1(
7866
&committedAllocParams.m_HeapProperties,
7867
committedAllocParams.m_HeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS,
7868
createParams.GetResourceDesc(), createParams.GetInitialResourceState(),
7869
createParams.GetOptimizedClearValue(), committedAllocParams.m_ProtectedSession,
7870
D3D12MA_IID_PPV_ARGS(&res));
7871
}
7872
else
7873
#endif
7874
{
7875
if (committedAllocParams.m_ProtectedSession == NULL)
7876
{
7877
hr = m_Device->CreateCommittedResource(
7878
&committedAllocParams.m_HeapProperties,
7879
committedAllocParams.m_HeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS,
7880
createParams.GetResourceDesc(), createParams.GetInitialResourceState(),
7881
createParams.GetOptimizedClearValue(), D3D12MA_IID_PPV_ARGS(&res));
7882
}
7883
else
7884
hr = E_NOINTERFACE;
7885
}
7886
}
7887
else
7888
{
7889
D3D12MA_ASSERT(0);
7890
return E_INVALIDARG;
7891
}
7892
7893
if (SUCCEEDED(hr))
7894
{
7895
SetResidencyPriority(res, committedAllocParams.m_ResidencyPriority);
7896
7897
if (ppvResource != NULL)
7898
{
7899
hr = res->QueryInterface(riidResource, ppvResource);
7900
}
7901
if (SUCCEEDED(hr))
7902
{
7903
BOOL wasZeroInitialized = TRUE;
7904
#if D3D12MA_CREATE_NOT_ZEROED_AVAILABLE
7905
if((committedAllocParams.m_HeapFlags & D3D12_HEAP_FLAG_CREATE_NOT_ZEROED) != 0)
7906
{
7907
wasZeroInitialized = FALSE;
7908
}
7909
#endif
7910
7911
Allocation* alloc = m_AllocationObjectAllocator.Allocate(
7912
this, resourceSize, createParams.GetBaseResourceDesc()->Alignment, wasZeroInitialized);
7913
alloc->InitCommitted(committedAllocParams.m_List);
7914
alloc->SetResourcePointer(res, createParams.GetBaseResourceDesc());
7915
alloc->SetPrivateData(pPrivateData);
7916
7917
*ppAllocation = alloc;
7918
7919
committedAllocParams.m_List->Register(alloc);
7920
7921
const UINT memSegmentGroup = HeapPropertiesToMemorySegmentGroup(committedAllocParams.m_HeapProperties);
7922
m_Budget.AddBlock(memSegmentGroup, resourceSize);
7923
m_Budget.AddAllocation(memSegmentGroup, resourceSize);
7924
}
7925
else
7926
{
7927
res->Release();
7928
}
7929
}
7930
return hr;
7931
}
7932
7933
HRESULT AllocatorPimpl::AllocateHeap(
7934
const CommittedAllocationParameters& committedAllocParams,
7935
const D3D12_RESOURCE_ALLOCATION_INFO& allocInfo, bool withinBudget,
7936
void* pPrivateData, Allocation** ppAllocation)
7937
{
7938
D3D12MA_ASSERT(committedAllocParams.IsValid());
7939
7940
*ppAllocation = nullptr;
7941
7942
if (withinBudget &&
7943
!NewAllocationWithinBudget(committedAllocParams.m_HeapProperties.Type, allocInfo.SizeInBytes))
7944
{
7945
return E_OUTOFMEMORY;
7946
}
7947
7948
D3D12_HEAP_DESC heapDesc = {};
7949
heapDesc.SizeInBytes = allocInfo.SizeInBytes;
7950
heapDesc.Properties = committedAllocParams.m_HeapProperties;
7951
heapDesc.Alignment = allocInfo.Alignment;
7952
heapDesc.Flags = committedAllocParams.m_HeapFlags;
7953
7954
HRESULT hr;
7955
ID3D12Heap* heap = nullptr;
7956
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
7957
if (m_Device4)
7958
hr = m_Device4->CreateHeap1(&heapDesc, committedAllocParams.m_ProtectedSession, D3D12MA_IID_PPV_ARGS(&heap));
7959
else
7960
#endif
7961
{
7962
if (committedAllocParams.m_ProtectedSession == NULL)
7963
hr = m_Device->CreateHeap(&heapDesc, D3D12MA_IID_PPV_ARGS(&heap));
7964
else
7965
hr = E_NOINTERFACE;
7966
}
7967
7968
if (SUCCEEDED(hr))
7969
{
7970
SetResidencyPriority(heap, committedAllocParams.m_ResidencyPriority);
7971
7972
BOOL wasZeroInitialized = TRUE;
7973
#if D3D12MA_CREATE_NOT_ZEROED_AVAILABLE
7974
if((heapDesc.Flags & D3D12_HEAP_FLAG_CREATE_NOT_ZEROED) != 0)
7975
{
7976
wasZeroInitialized = FALSE;
7977
}
7978
#endif
7979
7980
(*ppAllocation) = m_AllocationObjectAllocator.Allocate(this, allocInfo.SizeInBytes, allocInfo.Alignment, wasZeroInitialized);
7981
(*ppAllocation)->InitHeap(committedAllocParams.m_List, heap);
7982
(*ppAllocation)->SetPrivateData(pPrivateData);
7983
committedAllocParams.m_List->Register(*ppAllocation);
7984
7985
const UINT memSegmentGroup = HeapPropertiesToMemorySegmentGroup(committedAllocParams.m_HeapProperties);
7986
m_Budget.AddBlock(memSegmentGroup, allocInfo.SizeInBytes);
7987
m_Budget.AddAllocation(memSegmentGroup, allocInfo.SizeInBytes);
7988
}
7989
return hr;
7990
}
7991
7992
template<typename D3D12_RESOURCE_DESC_T>
7993
HRESULT AllocatorPimpl::CalcAllocationParams(const ALLOCATION_DESC& allocDesc, UINT64 allocSize,
7994
const D3D12_RESOURCE_DESC_T* resDesc,
7995
BlockVector*& outBlockVector, CommittedAllocationParameters& outCommittedAllocationParams, bool& outPreferCommitted)
7996
{
7997
outBlockVector = NULL;
7998
outCommittedAllocationParams = CommittedAllocationParameters();
7999
outPreferCommitted = false;
8000
8001
bool msaaAlwaysCommitted;
8002
if (allocDesc.CustomPool != NULL)
8003
{
8004
PoolPimpl* const pool = allocDesc.CustomPool->m_Pimpl;
8005
8006
msaaAlwaysCommitted = pool->GetBlockVector()->DeniesMsaaTextures();
8007
outBlockVector = pool->GetBlockVector();
8008
8009
const auto& desc = pool->GetDesc();
8010
outCommittedAllocationParams.m_ProtectedSession = desc.pProtectedSession;
8011
outCommittedAllocationParams.m_HeapProperties = desc.HeapProperties;
8012
outCommittedAllocationParams.m_HeapFlags = desc.HeapFlags;
8013
outCommittedAllocationParams.m_List = pool->GetCommittedAllocationList();
8014
outCommittedAllocationParams.m_ResidencyPriority = pool->GetDesc().ResidencyPriority;
8015
}
8016
else
8017
{
8018
if (!IsHeapTypeStandard(allocDesc.HeapType))
8019
{
8020
return E_INVALIDARG;
8021
}
8022
msaaAlwaysCommitted = m_MsaaAlwaysCommitted;
8023
8024
outCommittedAllocationParams.m_HeapProperties = StandardHeapTypeToHeapProperties(allocDesc.HeapType);
8025
outCommittedAllocationParams.m_HeapFlags = allocDesc.ExtraHeapFlags;
8026
outCommittedAllocationParams.m_List = &m_CommittedAllocations[StandardHeapTypeToIndex(allocDesc.HeapType)];
8027
// outCommittedAllocationParams.m_ResidencyPriority intentionally left with default value.
8028
8029
const ResourceClass resourceClass = (resDesc != NULL) ?
8030
ResourceDescToResourceClass(*resDesc) : HeapFlagsToResourceClass(allocDesc.ExtraHeapFlags);
8031
const UINT defaultPoolIndex = CalcDefaultPoolIndex(allocDesc, resourceClass);
8032
if (defaultPoolIndex != UINT32_MAX)
8033
{
8034
outBlockVector = m_BlockVectors[defaultPoolIndex];
8035
const UINT64 preferredBlockSize = outBlockVector->GetPreferredBlockSize();
8036
if (allocSize > preferredBlockSize)
8037
{
8038
outBlockVector = NULL;
8039
}
8040
else if (allocSize > preferredBlockSize / 2)
8041
{
8042
// Heuristics: Allocate committed memory if requested size if greater than half of preferred block size.
8043
outPreferCommitted = true;
8044
}
8045
}
8046
8047
const D3D12_HEAP_FLAGS extraHeapFlags = allocDesc.ExtraHeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS;
8048
if (outBlockVector != NULL && extraHeapFlags != 0)
8049
{
8050
outBlockVector = NULL;
8051
}
8052
}
8053
8054
if ((allocDesc.Flags & ALLOCATION_FLAG_COMMITTED) != 0 ||
8055
m_AlwaysCommitted)
8056
{
8057
outBlockVector = NULL;
8058
}
8059
if ((allocDesc.Flags & ALLOCATION_FLAG_NEVER_ALLOCATE) != 0)
8060
{
8061
outCommittedAllocationParams.m_List = NULL;
8062
}
8063
outCommittedAllocationParams.m_CanAlias = allocDesc.Flags & ALLOCATION_FLAG_CAN_ALIAS;
8064
8065
if (resDesc != NULL)
8066
{
8067
if (resDesc->SampleDesc.Count > 1 && msaaAlwaysCommitted)
8068
outBlockVector = NULL;
8069
if (!outPreferCommitted && PrefersCommittedAllocation(*resDesc))
8070
outPreferCommitted = true;
8071
}
8072
8073
return (outBlockVector != NULL || outCommittedAllocationParams.m_List != NULL) ? S_OK : E_INVALIDARG;
8074
}
8075
8076
UINT AllocatorPimpl::CalcDefaultPoolIndex(const ALLOCATION_DESC& allocDesc, ResourceClass resourceClass) const
8077
{
8078
D3D12_HEAP_FLAGS extraHeapFlags = allocDesc.ExtraHeapFlags & ~RESOURCE_CLASS_HEAP_FLAGS;
8079
8080
#if D3D12MA_CREATE_NOT_ZEROED_AVAILABLE
8081
// If allocator was created with ALLOCATOR_FLAG_DEFAULT_POOLS_NOT_ZEROED, also ignore
8082
// D3D12_HEAP_FLAG_CREATE_NOT_ZEROED.
8083
if(m_DefaultPoolsNotZeroed)
8084
{
8085
extraHeapFlags &= ~D3D12_HEAP_FLAG_CREATE_NOT_ZEROED;
8086
}
8087
#endif
8088
8089
if (extraHeapFlags != 0)
8090
{
8091
return UINT32_MAX;
8092
}
8093
8094
UINT poolIndex = UINT_MAX;
8095
switch (allocDesc.HeapType)
8096
{
8097
case D3D12_HEAP_TYPE_DEFAULT: poolIndex = 0; break;
8098
case D3D12_HEAP_TYPE_UPLOAD: poolIndex = 1; break;
8099
case D3D12_HEAP_TYPE_READBACK: poolIndex = 2; break;
8100
default: D3D12MA_ASSERT(0);
8101
}
8102
8103
if (SupportsResourceHeapTier2())
8104
return poolIndex;
8105
else
8106
{
8107
switch (resourceClass)
8108
{
8109
case ResourceClass::Buffer:
8110
return poolIndex * 3;
8111
case ResourceClass::Non_RT_DS_Texture:
8112
return poolIndex * 3 + 1;
8113
case ResourceClass::RT_DS_Texture:
8114
return poolIndex * 3 + 2;
8115
default:
8116
return UINT32_MAX;
8117
}
8118
}
8119
}
8120
8121
void AllocatorPimpl::CalcDefaultPoolParams(D3D12_HEAP_TYPE& outHeapType, D3D12_HEAP_FLAGS& outHeapFlags, UINT index) const
8122
{
8123
outHeapType = D3D12_HEAP_TYPE_DEFAULT;
8124
outHeapFlags = D3D12_HEAP_FLAG_NONE;
8125
8126
if (!SupportsResourceHeapTier2())
8127
{
8128
switch (index % 3)
8129
{
8130
case 0:
8131
outHeapFlags = D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES | D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES;
8132
break;
8133
case 1:
8134
outHeapFlags = D3D12_HEAP_FLAG_DENY_BUFFERS | D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES;
8135
break;
8136
case 2:
8137
outHeapFlags = D3D12_HEAP_FLAG_DENY_BUFFERS | D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES;
8138
break;
8139
}
8140
8141
index /= 3;
8142
}
8143
8144
switch (index)
8145
{
8146
case 0:
8147
outHeapType = D3D12_HEAP_TYPE_DEFAULT;
8148
break;
8149
case 1:
8150
outHeapType = D3D12_HEAP_TYPE_UPLOAD;
8151
break;
8152
case 2:
8153
outHeapType = D3D12_HEAP_TYPE_READBACK;
8154
break;
8155
default:
8156
D3D12MA_ASSERT(0);
8157
}
8158
}
8159
8160
void AllocatorPimpl::RegisterPool(Pool* pool, D3D12_HEAP_TYPE heapType)
8161
{
8162
const UINT heapTypeIndex = (UINT)heapType - 1;
8163
8164
MutexLockWrite lock(m_PoolsMutex[heapTypeIndex], m_UseMutex);
8165
m_Pools[heapTypeIndex].PushBack(pool->m_Pimpl);
8166
}
8167
8168
void AllocatorPimpl::UnregisterPool(Pool* pool, D3D12_HEAP_TYPE heapType)
8169
{
8170
const UINT heapTypeIndex = (UINT)heapType - 1;
8171
8172
MutexLockWrite lock(m_PoolsMutex[heapTypeIndex], m_UseMutex);
8173
m_Pools[heapTypeIndex].Remove(pool->m_Pimpl);
8174
}
8175
8176
HRESULT AllocatorPimpl::UpdateD3D12Budget()
8177
{
8178
#if D3D12MA_DXGI_1_4
8179
if (m_Adapter3)
8180
return m_Budget.UpdateBudget(m_Adapter3, m_UseMutex);
8181
else
8182
return E_NOINTERFACE;
8183
#else
8184
return S_OK;
8185
#endif
8186
}
8187
8188
D3D12_RESOURCE_ALLOCATION_INFO AllocatorPimpl::GetResourceAllocationInfoNative(const D3D12_RESOURCE_DESC& resourceDesc) const
8189
{
8190
return m_Device->GetResourceAllocationInfo(0, 1, &resourceDesc);
8191
}
8192
8193
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
8194
D3D12_RESOURCE_ALLOCATION_INFO AllocatorPimpl::GetResourceAllocationInfoNative(const D3D12_RESOURCE_DESC1& resourceDesc) const
8195
{
8196
D3D12MA_ASSERT(m_Device8 != NULL);
8197
D3D12_RESOURCE_ALLOCATION_INFO1 info1Unused;
8198
return m_Device8->GetResourceAllocationInfo2(0, 1, &resourceDesc, &info1Unused);
8199
}
8200
#endif // #ifdef __ID3D12Device8_INTERFACE_DEFINED__
8201
8202
template<typename D3D12_RESOURCE_DESC_T>
8203
D3D12_RESOURCE_ALLOCATION_INFO AllocatorPimpl::GetResourceAllocationInfo(D3D12_RESOURCE_DESC_T& inOutResourceDesc) const
8204
{
8205
#ifdef __ID3D12Device1_INTERFACE_DEFINED__
8206
/* Optional optimization: Microsoft documentation says:
8207
https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-getresourceallocationinfo
8208
8209
Your application can forgo using GetResourceAllocationInfo for buffer resources
8210
(D3D12_RESOURCE_DIMENSION_BUFFER). Buffers have the same size on all adapters,
8211
which is merely the smallest multiple of 64KB that's greater or equal to
8212
D3D12_RESOURCE_DESC::Width.
8213
*/
8214
if (inOutResourceDesc.Alignment == 0 &&
8215
inOutResourceDesc.Dimension == D3D12_RESOURCE_DIMENSION_BUFFER)
8216
{
8217
return {
8218
AlignUp<UINT64>(inOutResourceDesc.Width, D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT), // SizeInBytes
8219
D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT }; // Alignment
8220
}
8221
#endif // #ifdef __ID3D12Device1_INTERFACE_DEFINED__
8222
8223
#if D3D12MA_USE_SMALL_RESOURCE_PLACEMENT_ALIGNMENT
8224
if (inOutResourceDesc.Alignment == 0 &&
8225
inOutResourceDesc.Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE2D &&
8226
(inOutResourceDesc.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) == 0
8227
#if D3D12MA_USE_SMALL_RESOURCE_PLACEMENT_ALIGNMENT == 1
8228
&& CanUseSmallAlignment(inOutResourceDesc)
8229
#endif
8230
)
8231
{
8232
/*
8233
The algorithm here is based on Microsoft sample: "Small Resources Sample"
8234
https://github.com/microsoft/DirectX-Graphics-Samples/tree/master/Samples/Desktop/D3D12SmallResources
8235
*/
8236
const UINT64 smallAlignmentToTry = inOutResourceDesc.SampleDesc.Count > 1 ?
8237
D3D12_SMALL_MSAA_RESOURCE_PLACEMENT_ALIGNMENT :
8238
D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT;
8239
inOutResourceDesc.Alignment = smallAlignmentToTry;
8240
const D3D12_RESOURCE_ALLOCATION_INFO smallAllocInfo = GetResourceAllocationInfoNative(inOutResourceDesc);
8241
// Check if alignment requested has been granted.
8242
if (smallAllocInfo.Alignment == smallAlignmentToTry)
8243
{
8244
return smallAllocInfo;
8245
}
8246
inOutResourceDesc.Alignment = 0; // Restore original
8247
}
8248
#endif // #if D3D12MA_USE_SMALL_RESOURCE_PLACEMENT_ALIGNMENT
8249
8250
return GetResourceAllocationInfoNative(inOutResourceDesc);
8251
}
8252
8253
bool AllocatorPimpl::NewAllocationWithinBudget(D3D12_HEAP_TYPE heapType, UINT64 size)
8254
{
8255
Budget budget = {};
8256
GetBudgetForHeapType(budget, heapType);
8257
return budget.UsageBytes + size <= budget.BudgetBytes;
8258
}
8259
8260
void AllocatorPimpl::WriteBudgetToJson(JsonWriter& json, const Budget& budget)
8261
{
8262
json.BeginObject();
8263
{
8264
json.WriteString(L"BudgetBytes");
8265
json.WriteNumber(budget.BudgetBytes);
8266
json.WriteString(L"UsageBytes");
8267
json.WriteNumber(budget.UsageBytes);
8268
}
8269
json.EndObject();
8270
}
8271
8272
#endif // _D3D12MA_ALLOCATOR_PIMPL
8273
#endif // _D3D12MA_ALLOCATOR_PIMPL
8274
8275
#ifndef _D3D12MA_VIRTUAL_BLOCK_PIMPL
8276
class VirtualBlockPimpl
8277
{
8278
public:
8279
const ALLOCATION_CALLBACKS m_AllocationCallbacks;
8280
const UINT64 m_Size;
8281
BlockMetadata* m_Metadata;
8282
8283
VirtualBlockPimpl(const ALLOCATION_CALLBACKS& allocationCallbacks, const VIRTUAL_BLOCK_DESC& desc);
8284
~VirtualBlockPimpl();
8285
};
8286
8287
#ifndef _D3D12MA_VIRTUAL_BLOCK_PIMPL_FUNCTIONS
8288
VirtualBlockPimpl::VirtualBlockPimpl(const ALLOCATION_CALLBACKS& allocationCallbacks, const VIRTUAL_BLOCK_DESC& desc)
8289
: m_AllocationCallbacks(allocationCallbacks), m_Size(desc.Size)
8290
{
8291
switch (desc.Flags & VIRTUAL_BLOCK_FLAG_ALGORITHM_MASK)
8292
{
8293
case VIRTUAL_BLOCK_FLAG_ALGORITHM_LINEAR:
8294
m_Metadata = D3D12MA_NEW(allocationCallbacks, BlockMetadata_Linear)(&m_AllocationCallbacks, true);
8295
break;
8296
default:
8297
D3D12MA_ASSERT(0);
8298
case 0:
8299
m_Metadata = D3D12MA_NEW(allocationCallbacks, BlockMetadata_TLSF)(&m_AllocationCallbacks, true);
8300
break;
8301
}
8302
m_Metadata->Init(m_Size);
8303
}
8304
8305
VirtualBlockPimpl::~VirtualBlockPimpl()
8306
{
8307
D3D12MA_DELETE(m_AllocationCallbacks, m_Metadata);
8308
}
8309
#endif // _D3D12MA_VIRTUAL_BLOCK_PIMPL_FUNCTIONS
8310
#endif // _D3D12MA_VIRTUAL_BLOCK_PIMPL
8311
8312
8313
#ifndef _D3D12MA_MEMORY_BLOCK_FUNCTIONS
8314
MemoryBlock::MemoryBlock(
8315
AllocatorPimpl* allocator,
8316
const D3D12_HEAP_PROPERTIES& heapProps,
8317
D3D12_HEAP_FLAGS heapFlags,
8318
UINT64 size,
8319
UINT id)
8320
: m_Allocator(allocator),
8321
m_HeapProps(heapProps),
8322
m_HeapFlags(heapFlags),
8323
m_Size(size),
8324
m_Id(id) {}
8325
8326
MemoryBlock::~MemoryBlock()
8327
{
8328
if (m_Heap)
8329
{
8330
m_Heap->Release();
8331
m_Allocator->m_Budget.RemoveBlock(
8332
m_Allocator->HeapPropertiesToMemorySegmentGroup(m_HeapProps), m_Size);
8333
}
8334
}
8335
8336
HRESULT MemoryBlock::Init(ID3D12ProtectedResourceSession* pProtectedSession, bool denyMsaaTextures)
8337
{
8338
D3D12MA_ASSERT(m_Heap == NULL && m_Size > 0);
8339
8340
D3D12_HEAP_DESC heapDesc = {};
8341
heapDesc.SizeInBytes = m_Size;
8342
heapDesc.Properties = m_HeapProps;
8343
heapDesc.Alignment = HeapFlagsToAlignment(m_HeapFlags, denyMsaaTextures);
8344
heapDesc.Flags = m_HeapFlags;
8345
8346
HRESULT hr;
8347
#ifdef __ID3D12Device4_INTERFACE_DEFINED__
8348
ID3D12Device4* const device4 = m_Allocator->GetDevice4();
8349
if (device4)
8350
hr = m_Allocator->GetDevice4()->CreateHeap1(&heapDesc, pProtectedSession, D3D12MA_IID_PPV_ARGS(&m_Heap));
8351
else
8352
#endif
8353
{
8354
if (pProtectedSession == NULL)
8355
hr = m_Allocator->GetDevice()->CreateHeap(&heapDesc, D3D12MA_IID_PPV_ARGS(&m_Heap));
8356
else
8357
hr = E_NOINTERFACE;
8358
}
8359
8360
if (SUCCEEDED(hr))
8361
{
8362
m_Allocator->m_Budget.AddBlock(
8363
m_Allocator->HeapPropertiesToMemorySegmentGroup(m_HeapProps), m_Size);
8364
}
8365
return hr;
8366
}
8367
#endif // _D3D12MA_MEMORY_BLOCK_FUNCTIONS
8368
8369
#ifndef _D3D12MA_NORMAL_BLOCK_FUNCTIONS
8370
NormalBlock::NormalBlock(
8371
AllocatorPimpl* allocator,
8372
BlockVector* blockVector,
8373
const D3D12_HEAP_PROPERTIES& heapProps,
8374
D3D12_HEAP_FLAGS heapFlags,
8375
UINT64 size,
8376
UINT id)
8377
: MemoryBlock(allocator, heapProps, heapFlags, size, id),
8378
m_pMetadata(NULL),
8379
m_BlockVector(blockVector) {}
8380
8381
NormalBlock::~NormalBlock()
8382
{
8383
if (m_pMetadata != NULL)
8384
{
8385
// Define macro D3D12MA_DEBUG_LOG to receive the list of the unfreed allocations.
8386
if (!m_pMetadata->IsEmpty())
8387
m_pMetadata->DebugLogAllAllocations();
8388
8389
// THIS IS THE MOST IMPORTANT ASSERT IN THE ENTIRE LIBRARY!
8390
// Hitting it means you have some memory leak - unreleased Allocation objects.
8391
D3D12MA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
8392
8393
D3D12MA_DELETE(m_Allocator->GetAllocs(), m_pMetadata);
8394
}
8395
}
8396
8397
HRESULT NormalBlock::Init(UINT32 algorithm, ID3D12ProtectedResourceSession* pProtectedSession, bool denyMsaaTextures)
8398
{
8399
HRESULT hr = MemoryBlock::Init(pProtectedSession, denyMsaaTextures);
8400
if (FAILED(hr))
8401
{
8402
return hr;
8403
}
8404
8405
switch (algorithm)
8406
{
8407
case POOL_FLAG_ALGORITHM_LINEAR:
8408
m_pMetadata = D3D12MA_NEW(m_Allocator->GetAllocs(), BlockMetadata_Linear)(&m_Allocator->GetAllocs(), false);
8409
break;
8410
default:
8411
D3D12MA_ASSERT(0);
8412
case 0:
8413
m_pMetadata = D3D12MA_NEW(m_Allocator->GetAllocs(), BlockMetadata_TLSF)(&m_Allocator->GetAllocs(), false);
8414
break;
8415
}
8416
m_pMetadata->Init(m_Size);
8417
8418
return hr;
8419
}
8420
8421
bool NormalBlock::Validate() const
8422
{
8423
D3D12MA_VALIDATE(GetHeap() &&
8424
m_pMetadata &&
8425
m_pMetadata->GetSize() != 0 &&
8426
m_pMetadata->GetSize() == GetSize());
8427
return m_pMetadata->Validate();
8428
}
8429
#endif // _D3D12MA_NORMAL_BLOCK_FUNCTIONS
8430
8431
#ifndef _D3D12MA_COMMITTED_ALLOCATION_LIST_FUNCTIONS
8432
void CommittedAllocationList::Init(bool useMutex, D3D12_HEAP_TYPE heapType, PoolPimpl* pool)
8433
{
8434
m_UseMutex = useMutex;
8435
m_HeapType = heapType;
8436
m_Pool = pool;
8437
}
8438
8439
CommittedAllocationList::~CommittedAllocationList()
8440
{
8441
if (!m_AllocationList.IsEmpty())
8442
{
8443
D3D12MA_ASSERT(0 && "Unfreed committed allocations found!");
8444
}
8445
}
8446
8447
UINT CommittedAllocationList::GetMemorySegmentGroup(AllocatorPimpl* allocator) const
8448
{
8449
if (m_Pool)
8450
return allocator->HeapPropertiesToMemorySegmentGroup(m_Pool->GetDesc().HeapProperties);
8451
else
8452
return allocator->StandardHeapTypeToMemorySegmentGroup(m_HeapType);
8453
}
8454
8455
void CommittedAllocationList::AddStatistics(Statistics& inoutStats)
8456
{
8457
MutexLockRead lock(m_Mutex, m_UseMutex);
8458
8459
for (Allocation* alloc = m_AllocationList.Front();
8460
alloc != NULL; alloc = m_AllocationList.GetNext(alloc))
8461
{
8462
const UINT64 size = alloc->GetSize();
8463
inoutStats.BlockCount++;
8464
inoutStats.AllocationCount++;
8465
inoutStats.BlockBytes += size;
8466
inoutStats.AllocationBytes += size;
8467
}
8468
}
8469
8470
void CommittedAllocationList::AddDetailedStatistics(DetailedStatistics& inoutStats)
8471
{
8472
MutexLockRead lock(m_Mutex, m_UseMutex);
8473
8474
for (Allocation* alloc = m_AllocationList.Front();
8475
alloc != NULL; alloc = m_AllocationList.GetNext(alloc))
8476
{
8477
const UINT64 size = alloc->GetSize();
8478
inoutStats.Stats.BlockCount++;
8479
inoutStats.Stats.BlockBytes += size;
8480
AddDetailedStatisticsAllocation(inoutStats, size);
8481
}
8482
}
8483
8484
void CommittedAllocationList::BuildStatsString(JsonWriter& json)
8485
{
8486
MutexLockRead lock(m_Mutex, m_UseMutex);
8487
8488
for (Allocation* alloc = m_AllocationList.Front();
8489
alloc != NULL; alloc = m_AllocationList.GetNext(alloc))
8490
{
8491
json.BeginObject(true);
8492
json.AddAllocationToObject(*alloc);
8493
json.EndObject();
8494
}
8495
}
8496
8497
void CommittedAllocationList::Register(Allocation* alloc)
8498
{
8499
MutexLockWrite lock(m_Mutex, m_UseMutex);
8500
m_AllocationList.PushBack(alloc);
8501
}
8502
8503
void CommittedAllocationList::Unregister(Allocation* alloc)
8504
{
8505
MutexLockWrite lock(m_Mutex, m_UseMutex);
8506
m_AllocationList.Remove(alloc);
8507
}
8508
#endif // _D3D12MA_COMMITTED_ALLOCATION_LIST_FUNCTIONS
8509
8510
#ifndef _D3D12MA_BLOCK_VECTOR_FUNCTIONS
8511
BlockVector::BlockVector(
8512
AllocatorPimpl* hAllocator,
8513
const D3D12_HEAP_PROPERTIES& heapProps,
8514
D3D12_HEAP_FLAGS heapFlags,
8515
UINT64 preferredBlockSize,
8516
size_t minBlockCount,
8517
size_t maxBlockCount,
8518
bool explicitBlockSize,
8519
UINT64 minAllocationAlignment,
8520
UINT32 algorithm,
8521
bool denyMsaaTextures,
8522
ID3D12ProtectedResourceSession* pProtectedSession,
8523
D3D12_RESIDENCY_PRIORITY residencyPriority)
8524
: m_hAllocator(hAllocator),
8525
m_HeapProps(heapProps),
8526
m_HeapFlags(heapFlags),
8527
m_PreferredBlockSize(preferredBlockSize),
8528
m_MinBlockCount(minBlockCount),
8529
m_MaxBlockCount(maxBlockCount),
8530
m_ExplicitBlockSize(explicitBlockSize),
8531
m_MinAllocationAlignment(minAllocationAlignment),
8532
m_Algorithm(algorithm),
8533
m_DenyMsaaTextures(denyMsaaTextures),
8534
m_ProtectedSession(pProtectedSession),
8535
m_ResidencyPriority(residencyPriority),
8536
m_HasEmptyBlock(false),
8537
m_Blocks(hAllocator->GetAllocs()),
8538
m_NextBlockId(0) {}
8539
8540
BlockVector::~BlockVector()
8541
{
8542
for (size_t i = m_Blocks.size(); i--; )
8543
{
8544
D3D12MA_DELETE(m_hAllocator->GetAllocs(), m_Blocks[i]);
8545
}
8546
}
8547
8548
HRESULT BlockVector::CreateMinBlocks()
8549
{
8550
for (size_t i = 0; i < m_MinBlockCount; ++i)
8551
{
8552
HRESULT hr = CreateBlock(m_PreferredBlockSize, NULL);
8553
if (FAILED(hr))
8554
{
8555
return hr;
8556
}
8557
}
8558
return S_OK;
8559
}
8560
8561
bool BlockVector::IsEmpty()
8562
{
8563
MutexLockRead lock(m_Mutex, m_hAllocator->UseMutex());
8564
return m_Blocks.empty();
8565
}
8566
8567
HRESULT BlockVector::Allocate(
8568
UINT64 size,
8569
UINT64 alignment,
8570
const ALLOCATION_DESC& allocDesc,
8571
size_t allocationCount,
8572
Allocation** pAllocations)
8573
{
8574
size_t allocIndex;
8575
HRESULT hr = S_OK;
8576
8577
{
8578
MutexLockWrite lock(m_Mutex, m_hAllocator->UseMutex());
8579
for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
8580
{
8581
hr = AllocatePage(
8582
size,
8583
alignment,
8584
allocDesc,
8585
pAllocations + allocIndex);
8586
if (FAILED(hr))
8587
{
8588
break;
8589
}
8590
}
8591
}
8592
8593
if (FAILED(hr))
8594
{
8595
// Free all already created allocations.
8596
while (allocIndex--)
8597
{
8598
Free(pAllocations[allocIndex]);
8599
}
8600
ZeroMemory(pAllocations, sizeof(Allocation*) * allocationCount);
8601
}
8602
8603
return hr;
8604
}
8605
8606
void BlockVector::Free(Allocation* hAllocation)
8607
{
8608
NormalBlock* pBlockToDelete = NULL;
8609
8610
bool budgetExceeded = false;
8611
if (IsHeapTypeStandard(m_HeapProps.Type))
8612
{
8613
Budget budget = {};
8614
m_hAllocator->GetBudgetForHeapType(budget, m_HeapProps.Type);
8615
budgetExceeded = budget.UsageBytes >= budget.BudgetBytes;
8616
}
8617
8618
// Scope for lock.
8619
{
8620
MutexLockWrite lock(m_Mutex, m_hAllocator->UseMutex());
8621
8622
NormalBlock* pBlock = hAllocation->m_Placed.block;
8623
8624
pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle());
8625
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8626
8627
const size_t blockCount = m_Blocks.size();
8628
// pBlock became empty after this deallocation.
8629
if (pBlock->m_pMetadata->IsEmpty())
8630
{
8631
// Already has empty Allocation. We don't want to have two, so delete this one.
8632
if ((m_HasEmptyBlock || budgetExceeded) &&
8633
blockCount > m_MinBlockCount)
8634
{
8635
pBlockToDelete = pBlock;
8636
Remove(pBlock);
8637
}
8638
// We now have first empty block.
8639
else
8640
{
8641
m_HasEmptyBlock = true;
8642
}
8643
}
8644
// pBlock didn't become empty, but we have another empty block - find and free that one.
8645
// (This is optional, heuristics.)
8646
else if (m_HasEmptyBlock && blockCount > m_MinBlockCount)
8647
{
8648
NormalBlock* pLastBlock = m_Blocks.back();
8649
if (pLastBlock->m_pMetadata->IsEmpty())
8650
{
8651
pBlockToDelete = pLastBlock;
8652
m_Blocks.pop_back();
8653
m_HasEmptyBlock = false;
8654
}
8655
}
8656
8657
IncrementallySortBlocks();
8658
}
8659
8660
// Destruction of a free Allocation. Deferred until this point, outside of mutex
8661
// lock, for performance reason.
8662
if (pBlockToDelete != NULL)
8663
{
8664
D3D12MA_DELETE(m_hAllocator->GetAllocs(), pBlockToDelete);
8665
}
8666
}
8667
8668
HRESULT BlockVector::CreateResource(
8669
UINT64 size,
8670
UINT64 alignment,
8671
const ALLOCATION_DESC& allocDesc,
8672
const CREATE_RESOURCE_PARAMS& createParams,
8673
Allocation** ppAllocation,
8674
REFIID riidResource,
8675
void** ppvResource)
8676
{
8677
HRESULT hr = Allocate(size, alignment, allocDesc, 1, ppAllocation);
8678
if (SUCCEEDED(hr))
8679
{
8680
ID3D12Resource* res = NULL;
8681
hr = m_hAllocator->CreatePlacedResourceWrap(
8682
(*ppAllocation)->m_Placed.block->GetHeap(),
8683
(*ppAllocation)->GetOffset(),
8684
createParams,
8685
D3D12MA_IID_PPV_ARGS(&res));
8686
if (SUCCEEDED(hr))
8687
{
8688
if (ppvResource != NULL)
8689
{
8690
hr = res->QueryInterface(riidResource, ppvResource);
8691
}
8692
if (SUCCEEDED(hr))
8693
{
8694
(*ppAllocation)->SetResourcePointer(res, createParams.GetBaseResourceDesc());
8695
}
8696
else
8697
{
8698
res->Release();
8699
SAFE_RELEASE(*ppAllocation);
8700
}
8701
}
8702
else
8703
{
8704
SAFE_RELEASE(*ppAllocation);
8705
}
8706
}
8707
return hr;
8708
}
8709
8710
void BlockVector::AddStatistics(Statistics& inoutStats)
8711
{
8712
MutexLockRead lock(m_Mutex, m_hAllocator->UseMutex());
8713
8714
for (size_t i = 0; i < m_Blocks.size(); ++i)
8715
{
8716
const NormalBlock* const pBlock = m_Blocks[i];
8717
D3D12MA_ASSERT(pBlock);
8718
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8719
pBlock->m_pMetadata->AddStatistics(inoutStats);
8720
}
8721
}
8722
8723
void BlockVector::AddDetailedStatistics(DetailedStatistics& inoutStats)
8724
{
8725
MutexLockRead lock(m_Mutex, m_hAllocator->UseMutex());
8726
8727
for (size_t i = 0; i < m_Blocks.size(); ++i)
8728
{
8729
const NormalBlock* const pBlock = m_Blocks[i];
8730
D3D12MA_ASSERT(pBlock);
8731
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8732
pBlock->m_pMetadata->AddDetailedStatistics(inoutStats);
8733
}
8734
}
8735
8736
void BlockVector::WriteBlockInfoToJson(JsonWriter& json)
8737
{
8738
MutexLockRead lock(m_Mutex, m_hAllocator->UseMutex());
8739
8740
json.BeginObject();
8741
8742
for (size_t i = 0, count = m_Blocks.size(); i < count; ++i)
8743
{
8744
const NormalBlock* const pBlock = m_Blocks[i];
8745
D3D12MA_ASSERT(pBlock);
8746
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8747
json.BeginString();
8748
json.ContinueString(pBlock->GetId());
8749
json.EndString();
8750
8751
json.BeginObject();
8752
pBlock->m_pMetadata->WriteAllocationInfoToJson(json);
8753
json.EndObject();
8754
}
8755
8756
json.EndObject();
8757
}
8758
8759
UINT64 BlockVector::CalcSumBlockSize() const
8760
{
8761
UINT64 result = 0;
8762
for (size_t i = m_Blocks.size(); i--; )
8763
{
8764
result += m_Blocks[i]->m_pMetadata->GetSize();
8765
}
8766
return result;
8767
}
8768
8769
UINT64 BlockVector::CalcMaxBlockSize() const
8770
{
8771
UINT64 result = 0;
8772
for (size_t i = m_Blocks.size(); i--; )
8773
{
8774
result = D3D12MA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
8775
if (result >= m_PreferredBlockSize)
8776
{
8777
break;
8778
}
8779
}
8780
return result;
8781
}
8782
8783
void BlockVector::Remove(NormalBlock* pBlock)
8784
{
8785
for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
8786
{
8787
if (m_Blocks[blockIndex] == pBlock)
8788
{
8789
m_Blocks.remove(blockIndex);
8790
return;
8791
}
8792
}
8793
D3D12MA_ASSERT(0);
8794
}
8795
8796
void BlockVector::IncrementallySortBlocks()
8797
{
8798
if (!m_IncrementalSort)
8799
return;
8800
// Bubble sort only until first swap.
8801
for (size_t i = 1; i < m_Blocks.size(); ++i)
8802
{
8803
if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
8804
{
8805
D3D12MA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
8806
return;
8807
}
8808
}
8809
}
8810
8811
void BlockVector::SortByFreeSize()
8812
{
8813
D3D12MA_SORT(m_Blocks.begin(), m_Blocks.end(),
8814
[](auto* b1, auto* b2)
8815
{
8816
return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize();
8817
});
8818
}
8819
8820
HRESULT BlockVector::AllocatePage(
8821
UINT64 size,
8822
UINT64 alignment,
8823
const ALLOCATION_DESC& allocDesc,
8824
Allocation** pAllocation)
8825
{
8826
// Early reject: requested allocation size is larger that maximum block size for this block vector.
8827
if (size + D3D12MA_DEBUG_MARGIN > m_PreferredBlockSize)
8828
{
8829
return E_OUTOFMEMORY;
8830
}
8831
8832
UINT64 freeMemory = UINT64_MAX;
8833
if (IsHeapTypeStandard(m_HeapProps.Type))
8834
{
8835
Budget budget = {};
8836
m_hAllocator->GetBudgetForHeapType(budget, m_HeapProps.Type);
8837
freeMemory = (budget.UsageBytes < budget.BudgetBytes) ? (budget.BudgetBytes - budget.UsageBytes) : 0;
8838
}
8839
8840
const bool canCreateNewBlock =
8841
((allocDesc.Flags & ALLOCATION_FLAG_NEVER_ALLOCATE) == 0) &&
8842
(m_Blocks.size() < m_MaxBlockCount) &&
8843
// Even if we don't have to stay within budget with this allocation, when the
8844
// budget would be exceeded, we don't want to allocate new blocks, but always
8845
// create resources as committed.
8846
freeMemory >= size;
8847
8848
// 1. Search existing allocations
8849
{
8850
// Forward order in m_Blocks - prefer blocks with smallest amount of free space.
8851
for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
8852
{
8853
NormalBlock* const pCurrBlock = m_Blocks[blockIndex];
8854
D3D12MA_ASSERT(pCurrBlock);
8855
HRESULT hr = AllocateFromBlock(
8856
pCurrBlock,
8857
size,
8858
alignment,
8859
allocDesc.Flags,
8860
allocDesc.pPrivateData,
8861
allocDesc.Flags & ALLOCATION_FLAG_STRATEGY_MASK,
8862
pAllocation);
8863
if (SUCCEEDED(hr))
8864
{
8865
return hr;
8866
}
8867
}
8868
}
8869
8870
// 2. Try to create new block.
8871
if (canCreateNewBlock)
8872
{
8873
// Calculate optimal size for new block.
8874
UINT64 newBlockSize = m_PreferredBlockSize;
8875
UINT newBlockSizeShift = 0;
8876
8877
if (!m_ExplicitBlockSize)
8878
{
8879
// Allocate 1/8, 1/4, 1/2 as first blocks.
8880
const UINT64 maxExistingBlockSize = CalcMaxBlockSize();
8881
for (UINT i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
8882
{
8883
const UINT64 smallerNewBlockSize = newBlockSize / 2;
8884
if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
8885
{
8886
newBlockSize = smallerNewBlockSize;
8887
++newBlockSizeShift;
8888
}
8889
else
8890
{
8891
break;
8892
}
8893
}
8894
}
8895
8896
size_t newBlockIndex = 0;
8897
HRESULT hr = newBlockSize <= freeMemory ?
8898
CreateBlock(newBlockSize, &newBlockIndex) : E_OUTOFMEMORY;
8899
// Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
8900
if (!m_ExplicitBlockSize)
8901
{
8902
while (FAILED(hr) && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
8903
{
8904
const UINT64 smallerNewBlockSize = newBlockSize / 2;
8905
if (smallerNewBlockSize >= size)
8906
{
8907
newBlockSize = smallerNewBlockSize;
8908
++newBlockSizeShift;
8909
hr = newBlockSize <= freeMemory ?
8910
CreateBlock(newBlockSize, &newBlockIndex) : E_OUTOFMEMORY;
8911
}
8912
else
8913
{
8914
break;
8915
}
8916
}
8917
}
8918
8919
if (SUCCEEDED(hr))
8920
{
8921
NormalBlock* const pBlock = m_Blocks[newBlockIndex];
8922
D3D12MA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
8923
8924
hr = AllocateFromBlock(
8925
pBlock,
8926
size,
8927
alignment,
8928
allocDesc.Flags,
8929
allocDesc.pPrivateData,
8930
allocDesc.Flags & ALLOCATION_FLAG_STRATEGY_MASK,
8931
pAllocation);
8932
if (SUCCEEDED(hr))
8933
{
8934
return hr;
8935
}
8936
else
8937
{
8938
// Allocation from new block failed, possibly due to D3D12MA_DEBUG_MARGIN or alignment.
8939
return E_OUTOFMEMORY;
8940
}
8941
}
8942
}
8943
8944
return E_OUTOFMEMORY;
8945
}
8946
8947
HRESULT BlockVector::AllocateFromBlock(
8948
NormalBlock* pBlock,
8949
UINT64 size,
8950
UINT64 alignment,
8951
ALLOCATION_FLAGS allocFlags,
8952
void* pPrivateData,
8953
UINT32 strategy,
8954
Allocation** pAllocation)
8955
{
8956
alignment = D3D12MA_MAX(alignment, m_MinAllocationAlignment);
8957
8958
AllocationRequest currRequest = {};
8959
if (pBlock->m_pMetadata->CreateAllocationRequest(
8960
size,
8961
alignment,
8962
allocFlags & ALLOCATION_FLAG_UPPER_ADDRESS,
8963
strategy,
8964
&currRequest))
8965
{
8966
return CommitAllocationRequest(currRequest, pBlock, size, alignment, pPrivateData, pAllocation);
8967
}
8968
return E_OUTOFMEMORY;
8969
}
8970
8971
HRESULT BlockVector::CommitAllocationRequest(
8972
AllocationRequest& allocRequest,
8973
NormalBlock* pBlock,
8974
UINT64 size,
8975
UINT64 alignment,
8976
void* pPrivateData,
8977
Allocation** pAllocation)
8978
{
8979
// We no longer have an empty Allocation.
8980
if (pBlock->m_pMetadata->IsEmpty())
8981
m_HasEmptyBlock = false;
8982
8983
*pAllocation = m_hAllocator->GetAllocationObjectAllocator().Allocate(m_hAllocator, size, alignment, allocRequest.zeroInitialized);
8984
pBlock->m_pMetadata->Alloc(allocRequest, size, *pAllocation);
8985
8986
(*pAllocation)->InitPlaced(allocRequest.allocHandle, pBlock);
8987
(*pAllocation)->SetPrivateData(pPrivateData);
8988
8989
D3D12MA_HEAVY_ASSERT(pBlock->Validate());
8990
m_hAllocator->m_Budget.AddAllocation(m_hAllocator->HeapPropertiesToMemorySegmentGroup(m_HeapProps), size);
8991
8992
return S_OK;
8993
}
8994
8995
HRESULT BlockVector::CreateBlock(
8996
UINT64 blockSize,
8997
size_t* pNewBlockIndex)
8998
{
8999
NormalBlock* const pBlock = D3D12MA_NEW(m_hAllocator->GetAllocs(), NormalBlock)(
9000
m_hAllocator,
9001
this,
9002
m_HeapProps,
9003
m_HeapFlags,
9004
blockSize,
9005
m_NextBlockId++);
9006
HRESULT hr = pBlock->Init(m_Algorithm, m_ProtectedSession, m_DenyMsaaTextures);
9007
if (FAILED(hr))
9008
{
9009
D3D12MA_DELETE(m_hAllocator->GetAllocs(), pBlock);
9010
return hr;
9011
}
9012
9013
m_hAllocator->SetResidencyPriority(pBlock->GetHeap(), m_ResidencyPriority);
9014
9015
m_Blocks.push_back(pBlock);
9016
if (pNewBlockIndex != NULL)
9017
{
9018
*pNewBlockIndex = m_Blocks.size() - 1;
9019
}
9020
9021
return hr;
9022
}
9023
#endif // _D3D12MA_BLOCK_VECTOR_FUNCTIONS
9024
9025
#ifndef _D3D12MA_DEFRAGMENTATION_CONTEXT_PIMPL_FUNCTIONS
9026
DefragmentationContextPimpl::DefragmentationContextPimpl(
9027
AllocatorPimpl* hAllocator,
9028
const DEFRAGMENTATION_DESC& desc,
9029
BlockVector* poolVector)
9030
: m_MaxPassBytes(desc.MaxBytesPerPass == 0 ? UINT64_MAX : desc.MaxBytesPerPass),
9031
m_MaxPassAllocations(desc.MaxAllocationsPerPass == 0 ? UINT32_MAX : desc.MaxAllocationsPerPass),
9032
m_Moves(hAllocator->GetAllocs())
9033
{
9034
m_Algorithm = desc.Flags & DEFRAGMENTATION_FLAG_ALGORITHM_MASK;
9035
9036
if (poolVector != NULL)
9037
{
9038
m_BlockVectorCount = 1;
9039
m_PoolBlockVector = poolVector;
9040
m_pBlockVectors = &m_PoolBlockVector;
9041
m_PoolBlockVector->SetIncrementalSort(false);
9042
m_PoolBlockVector->SortByFreeSize();
9043
}
9044
else
9045
{
9046
m_BlockVectorCount = hAllocator->GetDefaultPoolCount();
9047
m_PoolBlockVector = NULL;
9048
m_pBlockVectors = hAllocator->GetDefaultPools();
9049
for (UINT32 i = 0; i < m_BlockVectorCount; ++i)
9050
{
9051
BlockVector* vector = m_pBlockVectors[i];
9052
if (vector != NULL)
9053
{
9054
vector->SetIncrementalSort(false);
9055
vector->SortByFreeSize();
9056
}
9057
}
9058
}
9059
9060
switch (m_Algorithm)
9061
{
9062
case 0: // Default algorithm
9063
m_Algorithm = DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED;
9064
case DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED:
9065
{
9066
m_AlgorithmState = D3D12MA_NEW_ARRAY(hAllocator->GetAllocs(), StateBalanced, m_BlockVectorCount);
9067
break;
9068
}
9069
}
9070
}
9071
9072
DefragmentationContextPimpl::~DefragmentationContextPimpl()
9073
{
9074
if (m_PoolBlockVector != NULL)
9075
m_PoolBlockVector->SetIncrementalSort(true);
9076
else
9077
{
9078
for (UINT32 i = 0; i < m_BlockVectorCount; ++i)
9079
{
9080
BlockVector* vector = m_pBlockVectors[i];
9081
if (vector != NULL)
9082
vector->SetIncrementalSort(true);
9083
}
9084
}
9085
9086
if (m_AlgorithmState)
9087
{
9088
switch (m_Algorithm)
9089
{
9090
case DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED:
9091
D3D12MA_DELETE_ARRAY(m_Moves.GetAllocs(), reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount);
9092
break;
9093
default:
9094
D3D12MA_ASSERT(0);
9095
}
9096
}
9097
}
9098
9099
HRESULT DefragmentationContextPimpl::DefragmentPassBegin(DEFRAGMENTATION_PASS_MOVE_INFO& moveInfo)
9100
{
9101
if (m_PoolBlockVector != NULL)
9102
{
9103
MutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->m_hAllocator->UseMutex());
9104
9105
if (m_PoolBlockVector->GetBlockCount() > 1)
9106
ComputeDefragmentation(*m_PoolBlockVector, 0);
9107
else if (m_PoolBlockVector->GetBlockCount() == 1)
9108
ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0));
9109
9110
// Setup index into block vector
9111
for (size_t i = 0; i < m_Moves.size(); ++i)
9112
m_Moves[i].pDstTmpAllocation->SetPrivateData(0);
9113
}
9114
else
9115
{
9116
for (UINT32 i = 0; i < m_BlockVectorCount; ++i)
9117
{
9118
if (m_pBlockVectors[i] != NULL)
9119
{
9120
MutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->m_hAllocator->UseMutex());
9121
9122
bool end = false;
9123
size_t movesOffset = m_Moves.size();
9124
if (m_pBlockVectors[i]->GetBlockCount() > 1)
9125
{
9126
end = ComputeDefragmentation(*m_pBlockVectors[i], i);
9127
}
9128
else if (m_pBlockVectors[i]->GetBlockCount() == 1)
9129
{
9130
end = ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0));
9131
}
9132
9133
// Setup index into block vector
9134
for (; movesOffset < m_Moves.size(); ++movesOffset)
9135
m_Moves[movesOffset].pDstTmpAllocation->SetPrivateData(reinterpret_cast<void*>(static_cast<uintptr_t>(i)));
9136
9137
if (end)
9138
break;
9139
}
9140
}
9141
}
9142
9143
moveInfo.MoveCount = static_cast<UINT32>(m_Moves.size());
9144
if (moveInfo.MoveCount > 0)
9145
{
9146
moveInfo.pMoves = m_Moves.data();
9147
return S_FALSE;
9148
}
9149
9150
moveInfo.pMoves = NULL;
9151
return S_OK;
9152
}
9153
9154
HRESULT DefragmentationContextPimpl::DefragmentPassEnd(DEFRAGMENTATION_PASS_MOVE_INFO& moveInfo)
9155
{
9156
D3D12MA_ASSERT(moveInfo.MoveCount > 0 ? moveInfo.pMoves != NULL : true);
9157
9158
HRESULT result = S_OK;
9159
Vector<FragmentedBlock> immovableBlocks(m_Moves.GetAllocs());
9160
9161
for (uint32_t i = 0; i < moveInfo.MoveCount; ++i)
9162
{
9163
DEFRAGMENTATION_MOVE& move = moveInfo.pMoves[i];
9164
size_t prevCount = 0, currentCount = 0;
9165
UINT64 freedBlockSize = 0;
9166
9167
UINT32 vectorIndex;
9168
BlockVector* vector;
9169
if (m_PoolBlockVector != NULL)
9170
{
9171
vectorIndex = 0;
9172
vector = m_PoolBlockVector;
9173
}
9174
else
9175
{
9176
vectorIndex = static_cast<UINT32>(reinterpret_cast<uintptr_t>(move.pDstTmpAllocation->GetPrivateData()));
9177
vector = m_pBlockVectors[vectorIndex];
9178
D3D12MA_ASSERT(vector != NULL);
9179
}
9180
9181
switch (move.Operation)
9182
{
9183
case DEFRAGMENTATION_MOVE_OPERATION_COPY:
9184
{
9185
move.pSrcAllocation->SwapBlockAllocation(move.pDstTmpAllocation);
9186
9187
// Scope for locks, Free have it's own lock
9188
{
9189
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9190
prevCount = vector->GetBlockCount();
9191
freedBlockSize = move.pDstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
9192
}
9193
move.pDstTmpAllocation->Release();
9194
{
9195
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9196
currentCount = vector->GetBlockCount();
9197
}
9198
9199
result = S_FALSE;
9200
break;
9201
}
9202
case DEFRAGMENTATION_MOVE_OPERATION_IGNORE:
9203
{
9204
m_PassStats.BytesMoved -= move.pSrcAllocation->GetSize();
9205
--m_PassStats.AllocationsMoved;
9206
move.pDstTmpAllocation->Release();
9207
9208
NormalBlock* newBlock = move.pSrcAllocation->GetBlock();
9209
bool notPresent = true;
9210
for (const FragmentedBlock& block : immovableBlocks)
9211
{
9212
if (block.block == newBlock)
9213
{
9214
notPresent = false;
9215
break;
9216
}
9217
}
9218
if (notPresent)
9219
immovableBlocks.push_back({ vectorIndex, newBlock });
9220
break;
9221
}
9222
case DEFRAGMENTATION_MOVE_OPERATION_DESTROY:
9223
{
9224
m_PassStats.BytesMoved -= move.pSrcAllocation->GetSize();
9225
--m_PassStats.AllocationsMoved;
9226
// Scope for locks, Free have it's own lock
9227
{
9228
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9229
prevCount = vector->GetBlockCount();
9230
freedBlockSize = move.pSrcAllocation->GetBlock()->m_pMetadata->GetSize();
9231
}
9232
move.pSrcAllocation->Release();
9233
{
9234
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9235
currentCount = vector->GetBlockCount();
9236
}
9237
freedBlockSize *= prevCount - currentCount;
9238
9239
UINT64 dstBlockSize;
9240
{
9241
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9242
dstBlockSize = move.pDstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
9243
}
9244
move.pDstTmpAllocation->Release();
9245
{
9246
MutexLockRead lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9247
freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount());
9248
currentCount = vector->GetBlockCount();
9249
}
9250
9251
result = S_FALSE;
9252
break;
9253
}
9254
default:
9255
D3D12MA_ASSERT(0);
9256
}
9257
9258
if (prevCount > currentCount)
9259
{
9260
size_t freedBlocks = prevCount - currentCount;
9261
m_PassStats.HeapsFreed += static_cast<UINT32>(freedBlocks);
9262
m_PassStats.BytesFreed += freedBlockSize;
9263
}
9264
}
9265
moveInfo.MoveCount = 0;
9266
moveInfo.pMoves = NULL;
9267
m_Moves.clear();
9268
9269
// Update stats
9270
m_GlobalStats.AllocationsMoved += m_PassStats.AllocationsMoved;
9271
m_GlobalStats.BytesFreed += m_PassStats.BytesFreed;
9272
m_GlobalStats.BytesMoved += m_PassStats.BytesMoved;
9273
m_GlobalStats.HeapsFreed += m_PassStats.HeapsFreed;
9274
m_PassStats = { 0 };
9275
9276
// Move blocks with immovable allocations according to algorithm
9277
if (immovableBlocks.size() > 0)
9278
{
9279
// Move to the begining
9280
for (const FragmentedBlock& block : immovableBlocks)
9281
{
9282
BlockVector* vector = m_pBlockVectors[block.data];
9283
MutexLockWrite lock(vector->GetMutex(), vector->m_hAllocator->UseMutex());
9284
9285
for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i)
9286
{
9287
if (vector->GetBlock(i) == block.block)
9288
{
9289
D3D12MA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]);
9290
break;
9291
}
9292
}
9293
}
9294
}
9295
return result;
9296
}
9297
9298
bool DefragmentationContextPimpl::ComputeDefragmentation(BlockVector& vector, size_t index)
9299
{
9300
switch (m_Algorithm)
9301
{
9302
case DEFRAGMENTATION_FLAG_ALGORITHM_FAST:
9303
return ComputeDefragmentation_Fast(vector);
9304
default:
9305
D3D12MA_ASSERT(0);
9306
case DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED:
9307
return ComputeDefragmentation_Balanced(vector, index, true);
9308
case DEFRAGMENTATION_FLAG_ALGORITHM_FULL:
9309
return ComputeDefragmentation_Full(vector);
9310
}
9311
}
9312
9313
DefragmentationContextPimpl::MoveAllocationData DefragmentationContextPimpl::GetMoveData(
9314
AllocHandle handle, BlockMetadata* metadata)
9315
{
9316
MoveAllocationData moveData;
9317
moveData.move.pSrcAllocation = (Allocation*)metadata->GetAllocationPrivateData(handle);
9318
moveData.size = moveData.move.pSrcAllocation->GetSize();
9319
moveData.alignment = moveData.move.pSrcAllocation->GetAlignment();
9320
moveData.flags = ALLOCATION_FLAG_NONE;
9321
9322
return moveData;
9323
}
9324
9325
DefragmentationContextPimpl::CounterStatus DefragmentationContextPimpl::CheckCounters(UINT64 bytes)
9326
{
9327
// Ignore allocation if will exceed max size for copy
9328
if (m_PassStats.BytesMoved + bytes > m_MaxPassBytes)
9329
{
9330
if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE)
9331
return CounterStatus::Ignore;
9332
else
9333
return CounterStatus::End;
9334
}
9335
return CounterStatus::Pass;
9336
}
9337
9338
bool DefragmentationContextPimpl::IncrementCounters(UINT64 bytes)
9339
{
9340
m_PassStats.BytesMoved += bytes;
9341
// Early return when max found
9342
if (++m_PassStats.AllocationsMoved >= m_MaxPassAllocations || m_PassStats.BytesMoved >= m_MaxPassBytes)
9343
{
9344
D3D12MA_ASSERT((m_PassStats.AllocationsMoved == m_MaxPassAllocations ||
9345
m_PassStats.BytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!");
9346
return true;
9347
}
9348
return false;
9349
}
9350
9351
bool DefragmentationContextPimpl::ReallocWithinBlock(BlockVector& vector, NormalBlock* block)
9352
{
9353
BlockMetadata* metadata = block->m_pMetadata;
9354
9355
for (AllocHandle handle = metadata->GetAllocationListBegin();
9356
handle != (AllocHandle)0;
9357
handle = metadata->GetNextAllocation(handle))
9358
{
9359
MoveAllocationData moveData = GetMoveData(handle, metadata);
9360
// Ignore newly created allocations by defragmentation algorithm
9361
if (moveData.move.pSrcAllocation->GetPrivateData() == this)
9362
continue;
9363
switch (CheckCounters(moveData.move.pSrcAllocation->GetSize()))
9364
{
9365
case CounterStatus::Ignore:
9366
continue;
9367
case CounterStatus::End:
9368
return true;
9369
default:
9370
D3D12MA_ASSERT(0);
9371
case CounterStatus::Pass:
9372
break;
9373
}
9374
9375
UINT64 offset = moveData.move.pSrcAllocation->GetOffset();
9376
if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
9377
{
9378
AllocationRequest request = {};
9379
if (metadata->CreateAllocationRequest(
9380
moveData.size,
9381
moveData.alignment,
9382
false,
9383
ALLOCATION_FLAG_STRATEGY_MIN_OFFSET,
9384
&request))
9385
{
9386
if (metadata->GetAllocationOffset(request.allocHandle) < offset)
9387
{
9388
if (SUCCEEDED(vector.CommitAllocationRequest(
9389
request,
9390
block,
9391
moveData.size,
9392
moveData.alignment,
9393
this,
9394
&moveData.move.pDstTmpAllocation)))
9395
{
9396
m_Moves.push_back(moveData.move);
9397
if (IncrementCounters(moveData.size))
9398
return true;
9399
}
9400
}
9401
}
9402
}
9403
}
9404
return false;
9405
}
9406
9407
bool DefragmentationContextPimpl::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, BlockVector& vector)
9408
{
9409
for (; start < end; ++start)
9410
{
9411
NormalBlock* dstBlock = vector.GetBlock(start);
9412
if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size)
9413
{
9414
if (SUCCEEDED(vector.AllocateFromBlock(dstBlock,
9415
data.size,
9416
data.alignment,
9417
data.flags,
9418
this,
9419
0,
9420
&data.move.pDstTmpAllocation)))
9421
{
9422
m_Moves.push_back(data.move);
9423
if (IncrementCounters(data.size))
9424
return true;
9425
break;
9426
}
9427
}
9428
}
9429
return false;
9430
}
9431
9432
bool DefragmentationContextPimpl::ComputeDefragmentation_Fast(BlockVector& vector)
9433
{
9434
// Move only between blocks
9435
9436
// Go through allocations in last blocks and try to fit them inside first ones
9437
for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
9438
{
9439
BlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
9440
9441
for (AllocHandle handle = metadata->GetAllocationListBegin();
9442
handle != (AllocHandle)0;
9443
handle = metadata->GetNextAllocation(handle))
9444
{
9445
MoveAllocationData moveData = GetMoveData(handle, metadata);
9446
// Ignore newly created allocations by defragmentation algorithm
9447
if (moveData.move.pSrcAllocation->GetPrivateData() == this)
9448
continue;
9449
switch (CheckCounters(moveData.move.pSrcAllocation->GetSize()))
9450
{
9451
case CounterStatus::Ignore:
9452
continue;
9453
case CounterStatus::End:
9454
return true;
9455
default:
9456
D3D12MA_ASSERT(0);
9457
case CounterStatus::Pass:
9458
break;
9459
}
9460
9461
// Check all previous blocks for free space
9462
if (AllocInOtherBlock(0, i, moveData, vector))
9463
return true;
9464
}
9465
}
9466
return false;
9467
}
9468
9469
bool DefragmentationContextPimpl::ComputeDefragmentation_Balanced(BlockVector& vector, size_t index, bool update)
9470
{
9471
// Go over every allocation and try to fit it in previous blocks at lowest offsets,
9472
// if not possible: realloc within single block to minimize offset (exclude offset == 0),
9473
// but only if there are noticable gaps between them (some heuristic, ex. average size of allocation in block)
9474
D3D12MA_ASSERT(m_AlgorithmState != NULL);
9475
9476
StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];
9477
if (update && vectorState.avgAllocSize == UINT64_MAX)
9478
UpdateVectorStatistics(vector, vectorState);
9479
9480
const size_t startMoveCount = m_Moves.size();
9481
UINT64 minimalFreeRegion = vectorState.avgFreeSize / 2;
9482
for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
9483
{
9484
NormalBlock* block = vector.GetBlock(i);
9485
BlockMetadata* metadata = block->m_pMetadata;
9486
UINT64 prevFreeRegionSize = 0;
9487
9488
for (AllocHandle handle = metadata->GetAllocationListBegin();
9489
handle != (AllocHandle)0;
9490
handle = metadata->GetNextAllocation(handle))
9491
{
9492
MoveAllocationData moveData = GetMoveData(handle, metadata);
9493
// Ignore newly created allocations by defragmentation algorithm
9494
if (moveData.move.pSrcAllocation->GetPrivateData() == this)
9495
continue;
9496
switch (CheckCounters(moveData.move.pSrcAllocation->GetSize()))
9497
{
9498
case CounterStatus::Ignore:
9499
continue;
9500
case CounterStatus::End:
9501
return true;
9502
default:
9503
D3D12MA_ASSERT(0);
9504
case CounterStatus::Pass:
9505
break;
9506
}
9507
9508
// Check all previous blocks for free space
9509
const size_t prevMoveCount = m_Moves.size();
9510
if (AllocInOtherBlock(0, i, moveData, vector))
9511
return true;
9512
9513
UINT64 nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle);
9514
// If no room found then realloc within block for lower offset
9515
UINT64 offset = moveData.move.pSrcAllocation->GetOffset();
9516
if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
9517
{
9518
// Check if realloc will make sense
9519
if (prevFreeRegionSize >= minimalFreeRegion ||
9520
nextFreeRegionSize >= minimalFreeRegion ||
9521
moveData.size <= vectorState.avgFreeSize ||
9522
moveData.size <= vectorState.avgAllocSize)
9523
{
9524
AllocationRequest request = {};
9525
if (metadata->CreateAllocationRequest(
9526
moveData.size,
9527
moveData.alignment,
9528
false,
9529
ALLOCATION_FLAG_STRATEGY_MIN_OFFSET,
9530
&request))
9531
{
9532
if (metadata->GetAllocationOffset(request.allocHandle) < offset)
9533
{
9534
if (SUCCEEDED(vector.CommitAllocationRequest(
9535
request,
9536
block,
9537
moveData.size,
9538
moveData.alignment,
9539
this,
9540
&moveData.move.pDstTmpAllocation)))
9541
{
9542
m_Moves.push_back(moveData.move);
9543
if (IncrementCounters(moveData.size))
9544
return true;
9545
}
9546
}
9547
}
9548
}
9549
}
9550
prevFreeRegionSize = nextFreeRegionSize;
9551
}
9552
}
9553
9554
// No moves perfomed, update statistics to current vector state
9555
if (startMoveCount == m_Moves.size() && !update)
9556
{
9557
vectorState.avgAllocSize = UINT64_MAX;
9558
return ComputeDefragmentation_Balanced(vector, index, false);
9559
}
9560
return false;
9561
}
9562
9563
bool DefragmentationContextPimpl::ComputeDefragmentation_Full(BlockVector& vector)
9564
{
9565
// Go over every allocation and try to fit it in previous blocks at lowest offsets,
9566
// if not possible: realloc within single block to minimize offset (exclude offset == 0)
9567
9568
for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
9569
{
9570
NormalBlock* block = vector.GetBlock(i);
9571
BlockMetadata* metadata = block->m_pMetadata;
9572
9573
for (AllocHandle handle = metadata->GetAllocationListBegin();
9574
handle != (AllocHandle)0;
9575
handle = metadata->GetNextAllocation(handle))
9576
{
9577
MoveAllocationData moveData = GetMoveData(handle, metadata);
9578
// Ignore newly created allocations by defragmentation algorithm
9579
if (moveData.move.pSrcAllocation->GetPrivateData() == this)
9580
continue;
9581
switch (CheckCounters(moveData.move.pSrcAllocation->GetSize()))
9582
{
9583
case CounterStatus::Ignore:
9584
continue;
9585
case CounterStatus::End:
9586
return true;
9587
default:
9588
D3D12MA_ASSERT(0);
9589
case CounterStatus::Pass:
9590
break;
9591
}
9592
9593
// Check all previous blocks for free space
9594
const size_t prevMoveCount = m_Moves.size();
9595
if (AllocInOtherBlock(0, i, moveData, vector))
9596
return true;
9597
9598
// If no room found then realloc within block for lower offset
9599
UINT64 offset = moveData.move.pSrcAllocation->GetOffset();
9600
if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
9601
{
9602
AllocationRequest request = {};
9603
if (metadata->CreateAllocationRequest(
9604
moveData.size,
9605
moveData.alignment,
9606
false,
9607
ALLOCATION_FLAG_STRATEGY_MIN_OFFSET,
9608
&request))
9609
{
9610
if (metadata->GetAllocationOffset(request.allocHandle) < offset)
9611
{
9612
if (SUCCEEDED(vector.CommitAllocationRequest(
9613
request,
9614
block,
9615
moveData.size,
9616
moveData.alignment,
9617
this,
9618
&moveData.move.pDstTmpAllocation)))
9619
{
9620
m_Moves.push_back(moveData.move);
9621
if (IncrementCounters(moveData.size))
9622
return true;
9623
}
9624
}
9625
}
9626
}
9627
}
9628
}
9629
return false;
9630
}
9631
9632
void DefragmentationContextPimpl::UpdateVectorStatistics(BlockVector& vector, StateBalanced& state)
9633
{
9634
size_t allocCount = 0;
9635
size_t freeCount = 0;
9636
state.avgFreeSize = 0;
9637
state.avgAllocSize = 0;
9638
9639
for (size_t i = 0; i < vector.GetBlockCount(); ++i)
9640
{
9641
BlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
9642
9643
allocCount += metadata->GetAllocationCount();
9644
freeCount += metadata->GetFreeRegionsCount();
9645
state.avgFreeSize += metadata->GetSumFreeSize();
9646
state.avgAllocSize += metadata->GetSize();
9647
}
9648
9649
state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount;
9650
state.avgFreeSize /= freeCount;
9651
}
9652
#endif // _D3D12MA_DEFRAGMENTATION_CONTEXT_PIMPL_FUNCTIONS
9653
9654
#ifndef _D3D12MA_POOL_PIMPL_FUNCTIONS
9655
PoolPimpl::PoolPimpl(AllocatorPimpl* allocator, const POOL_DESC& desc)
9656
: m_Allocator(allocator),
9657
m_Desc(desc),
9658
m_BlockVector(NULL),
9659
m_Name(NULL)
9660
{
9661
const bool explicitBlockSize = desc.BlockSize != 0;
9662
const UINT64 preferredBlockSize = explicitBlockSize ? desc.BlockSize : D3D12MA_DEFAULT_BLOCK_SIZE;
9663
UINT maxBlockCount = desc.MaxBlockCount != 0 ? desc.MaxBlockCount : UINT_MAX;
9664
9665
#ifndef __ID3D12Device4_INTERFACE_DEFINED__
9666
D3D12MA_ASSERT(m_Desc.pProtectedSession == NULL);
9667
#endif
9668
9669
m_BlockVector = D3D12MA_NEW(allocator->GetAllocs(), BlockVector)(
9670
allocator, desc.HeapProperties, desc.HeapFlags,
9671
preferredBlockSize,
9672
desc.MinBlockCount, maxBlockCount,
9673
explicitBlockSize,
9674
D3D12MA_MAX(desc.MinAllocationAlignment, (UINT64)D3D12MA_DEBUG_ALIGNMENT),
9675
(desc.Flags & POOL_FLAG_ALGORITHM_MASK) != 0,
9676
(desc.Flags & POOL_FLAG_MSAA_TEXTURES_ALWAYS_COMMITTED) != 0,
9677
desc.pProtectedSession,
9678
desc.ResidencyPriority);
9679
}
9680
9681
PoolPimpl::~PoolPimpl()
9682
{
9683
D3D12MA_ASSERT(m_PrevPool == NULL && m_NextPool == NULL);
9684
FreeName();
9685
D3D12MA_DELETE(m_Allocator->GetAllocs(), m_BlockVector);
9686
}
9687
9688
HRESULT PoolPimpl::Init()
9689
{
9690
m_CommittedAllocations.Init(m_Allocator->UseMutex(), m_Desc.HeapProperties.Type, this);
9691
return m_BlockVector->CreateMinBlocks();
9692
}
9693
9694
void PoolPimpl::GetStatistics(Statistics& outStats)
9695
{
9696
ClearStatistics(outStats);
9697
m_BlockVector->AddStatistics(outStats);
9698
m_CommittedAllocations.AddStatistics(outStats);
9699
}
9700
9701
void PoolPimpl::CalculateStatistics(DetailedStatistics& outStats)
9702
{
9703
ClearDetailedStatistics(outStats);
9704
AddDetailedStatistics(outStats);
9705
}
9706
9707
void PoolPimpl::AddDetailedStatistics(DetailedStatistics& inoutStats)
9708
{
9709
m_BlockVector->AddDetailedStatistics(inoutStats);
9710
m_CommittedAllocations.AddDetailedStatistics(inoutStats);
9711
}
9712
9713
void PoolPimpl::SetName(LPCWSTR Name)
9714
{
9715
FreeName();
9716
9717
if (Name)
9718
{
9719
const size_t nameCharCount = wcslen(Name) + 1;
9720
m_Name = D3D12MA_NEW_ARRAY(m_Allocator->GetAllocs(), WCHAR, nameCharCount);
9721
memcpy(m_Name, Name, nameCharCount * sizeof(WCHAR));
9722
}
9723
}
9724
9725
void PoolPimpl::FreeName()
9726
{
9727
if (m_Name)
9728
{
9729
const size_t nameCharCount = wcslen(m_Name) + 1;
9730
D3D12MA_DELETE_ARRAY(m_Allocator->GetAllocs(), m_Name, nameCharCount);
9731
m_Name = NULL;
9732
}
9733
}
9734
#endif // _D3D12MA_POOL_PIMPL_FUNCTIONS
9735
9736
9737
#ifndef _D3D12MA_PUBLIC_INTERFACE
9738
HRESULT CreateAllocator(const ALLOCATOR_DESC* pDesc, Allocator** ppAllocator)
9739
{
9740
if (!pDesc || !ppAllocator || !pDesc->pDevice || !pDesc->pAdapter ||
9741
!(pDesc->PreferredBlockSize == 0 || (pDesc->PreferredBlockSize >= 16 && pDesc->PreferredBlockSize < 0x10000000000ull)))
9742
{
9743
D3D12MA_ASSERT(0 && "Invalid arguments passed to CreateAllocator.");
9744
return E_INVALIDARG;
9745
}
9746
9747
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9748
9749
ALLOCATION_CALLBACKS allocationCallbacks;
9750
SetupAllocationCallbacks(allocationCallbacks, pDesc->pAllocationCallbacks);
9751
9752
*ppAllocator = D3D12MA_NEW(allocationCallbacks, Allocator)(allocationCallbacks, *pDesc);
9753
HRESULT hr = (*ppAllocator)->m_Pimpl->Init(*pDesc);
9754
if (FAILED(hr))
9755
{
9756
D3D12MA_DELETE(allocationCallbacks, *ppAllocator);
9757
*ppAllocator = NULL;
9758
}
9759
return hr;
9760
}
9761
9762
HRESULT CreateVirtualBlock(const VIRTUAL_BLOCK_DESC* pDesc, VirtualBlock** ppVirtualBlock)
9763
{
9764
if (!pDesc || !ppVirtualBlock)
9765
{
9766
D3D12MA_ASSERT(0 && "Invalid arguments passed to CreateVirtualBlock.");
9767
return E_INVALIDARG;
9768
}
9769
9770
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9771
9772
ALLOCATION_CALLBACKS allocationCallbacks;
9773
SetupAllocationCallbacks(allocationCallbacks, pDesc->pAllocationCallbacks);
9774
9775
*ppVirtualBlock = D3D12MA_NEW(allocationCallbacks, VirtualBlock)(allocationCallbacks, *pDesc);
9776
return S_OK;
9777
}
9778
9779
#ifndef _D3D12MA_IUNKNOWN_IMPL_FUNCTIONS
9780
HRESULT STDMETHODCALLTYPE IUnknownImpl::QueryInterface(REFIID riid, void** ppvObject)
9781
{
9782
if (ppvObject == NULL)
9783
return E_POINTER;
9784
if (riid == IID_IUnknown)
9785
{
9786
++m_RefCount;
9787
*ppvObject = this;
9788
return S_OK;
9789
}
9790
*ppvObject = NULL;
9791
return E_NOINTERFACE;
9792
}
9793
9794
ULONG STDMETHODCALLTYPE IUnknownImpl::AddRef()
9795
{
9796
return ++m_RefCount;
9797
}
9798
9799
ULONG STDMETHODCALLTYPE IUnknownImpl::Release()
9800
{
9801
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
9802
9803
const uint32_t newRefCount = --m_RefCount;
9804
if (newRefCount == 0)
9805
ReleaseThis();
9806
return newRefCount;
9807
}
9808
#endif // _D3D12MA_IUNKNOWN_IMPL_FUNCTIONS
9809
9810
#ifndef _D3D12MA_ALLOCATION_FUNCTIONS
9811
void Allocation::PackedData::SetType(Type type)
9812
{
9813
const UINT u = (UINT)type;
9814
D3D12MA_ASSERT(u < (1u << 2));
9815
m_Type = u;
9816
}
9817
9818
void Allocation::PackedData::SetResourceDimension(D3D12_RESOURCE_DIMENSION resourceDimension)
9819
{
9820
const UINT u = (UINT)resourceDimension;
9821
D3D12MA_ASSERT(u < (1u << 3));
9822
m_ResourceDimension = u;
9823
}
9824
9825
void Allocation::PackedData::SetResourceFlags(D3D12_RESOURCE_FLAGS resourceFlags)
9826
{
9827
const UINT u = (UINT)resourceFlags;
9828
D3D12MA_ASSERT(u < (1u << 24));
9829
m_ResourceFlags = u;
9830
}
9831
9832
void Allocation::PackedData::SetTextureLayout(D3D12_TEXTURE_LAYOUT textureLayout)
9833
{
9834
const UINT u = (UINT)textureLayout;
9835
D3D12MA_ASSERT(u < (1u << 9));
9836
m_TextureLayout = u;
9837
}
9838
9839
UINT64 Allocation::GetOffset() const
9840
{
9841
switch (m_PackedData.GetType())
9842
{
9843
case TYPE_COMMITTED:
9844
case TYPE_HEAP:
9845
return 0;
9846
case TYPE_PLACED:
9847
return m_Placed.block->m_pMetadata->GetAllocationOffset(m_Placed.allocHandle);
9848
default:
9849
D3D12MA_ASSERT(0);
9850
return 0;
9851
}
9852
}
9853
9854
void Allocation::SetResource(ID3D12Resource* pResource)
9855
{
9856
if (pResource != m_Resource)
9857
{
9858
if (m_Resource)
9859
m_Resource->Release();
9860
m_Resource = pResource;
9861
if (m_Resource)
9862
m_Resource->AddRef();
9863
}
9864
}
9865
9866
ID3D12Heap* Allocation::GetHeap() const
9867
{
9868
switch (m_PackedData.GetType())
9869
{
9870
case TYPE_COMMITTED:
9871
return NULL;
9872
case TYPE_PLACED:
9873
return m_Placed.block->GetHeap();
9874
case TYPE_HEAP:
9875
return m_Heap.heap;
9876
default:
9877
D3D12MA_ASSERT(0);
9878
return 0;
9879
}
9880
}
9881
9882
void Allocation::SetName(LPCWSTR Name)
9883
{
9884
FreeName();
9885
9886
if (Name)
9887
{
9888
const size_t nameCharCount = wcslen(Name) + 1;
9889
m_Name = D3D12MA_NEW_ARRAY(m_Allocator->GetAllocs(), WCHAR, nameCharCount);
9890
memcpy(m_Name, Name, nameCharCount * sizeof(WCHAR));
9891
}
9892
}
9893
9894
void Allocation::ReleaseThis()
9895
{
9896
if (this == NULL)
9897
{
9898
return;
9899
}
9900
9901
SAFE_RELEASE(m_Resource);
9902
9903
switch (m_PackedData.GetType())
9904
{
9905
case TYPE_COMMITTED:
9906
m_Allocator->FreeCommittedMemory(this);
9907
break;
9908
case TYPE_PLACED:
9909
m_Allocator->FreePlacedMemory(this);
9910
break;
9911
case TYPE_HEAP:
9912
m_Allocator->FreeHeapMemory(this);
9913
break;
9914
}
9915
9916
FreeName();
9917
9918
m_Allocator->GetAllocationObjectAllocator().Free(this);
9919
}
9920
9921
Allocation::Allocation(AllocatorPimpl* allocator, UINT64 size, UINT64 alignment, BOOL wasZeroInitialized)
9922
: m_Allocator{ allocator },
9923
m_Size{ size },
9924
m_Alignment{ alignment },
9925
m_Resource{ NULL },
9926
m_pPrivateData{ NULL },
9927
m_Name{ NULL }
9928
{
9929
D3D12MA_ASSERT(allocator);
9930
9931
m_PackedData.SetType(TYPE_COUNT);
9932
m_PackedData.SetResourceDimension(D3D12_RESOURCE_DIMENSION_UNKNOWN);
9933
m_PackedData.SetResourceFlags(D3D12_RESOURCE_FLAG_NONE);
9934
m_PackedData.SetTextureLayout(D3D12_TEXTURE_LAYOUT_UNKNOWN);
9935
m_PackedData.SetWasZeroInitialized(wasZeroInitialized);
9936
}
9937
9938
void Allocation::InitCommitted(CommittedAllocationList* list)
9939
{
9940
m_PackedData.SetType(TYPE_COMMITTED);
9941
m_Committed.list = list;
9942
m_Committed.prev = NULL;
9943
m_Committed.next = NULL;
9944
}
9945
9946
void Allocation::InitPlaced(AllocHandle allocHandle, NormalBlock* block)
9947
{
9948
m_PackedData.SetType(TYPE_PLACED);
9949
m_Placed.allocHandle = allocHandle;
9950
m_Placed.block = block;
9951
}
9952
9953
void Allocation::InitHeap(CommittedAllocationList* list, ID3D12Heap* heap)
9954
{
9955
m_PackedData.SetType(TYPE_HEAP);
9956
m_Heap.list = list;
9957
m_Committed.prev = NULL;
9958
m_Committed.next = NULL;
9959
m_Heap.heap = heap;
9960
}
9961
9962
void Allocation::SwapBlockAllocation(Allocation* allocation)
9963
{
9964
D3D12MA_ASSERT(allocation != NULL);
9965
D3D12MA_ASSERT(m_PackedData.GetType() == TYPE_PLACED);
9966
D3D12MA_ASSERT(allocation->m_PackedData.GetType() == TYPE_PLACED);
9967
9968
D3D12MA_SWAP(m_Resource, allocation->m_Resource);
9969
m_PackedData.SetWasZeroInitialized(allocation->m_PackedData.WasZeroInitialized());
9970
m_Placed.block->m_pMetadata->SetAllocationPrivateData(m_Placed.allocHandle, allocation);
9971
D3D12MA_SWAP(m_Placed, allocation->m_Placed);
9972
m_Placed.block->m_pMetadata->SetAllocationPrivateData(m_Placed.allocHandle, this);
9973
}
9974
9975
AllocHandle Allocation::GetAllocHandle() const
9976
{
9977
switch (m_PackedData.GetType())
9978
{
9979
case TYPE_COMMITTED:
9980
case TYPE_HEAP:
9981
return (AllocHandle)0;
9982
case TYPE_PLACED:
9983
return m_Placed.allocHandle;
9984
default:
9985
D3D12MA_ASSERT(0);
9986
return (AllocHandle)0;
9987
}
9988
}
9989
9990
NormalBlock* Allocation::GetBlock()
9991
{
9992
switch (m_PackedData.GetType())
9993
{
9994
case TYPE_COMMITTED:
9995
case TYPE_HEAP:
9996
return NULL;
9997
case TYPE_PLACED:
9998
return m_Placed.block;
9999
default:
10000
D3D12MA_ASSERT(0);
10001
return NULL;
10002
}
10003
}
10004
10005
template<typename D3D12_RESOURCE_DESC_T>
10006
void Allocation::SetResourcePointer(ID3D12Resource* resource, const D3D12_RESOURCE_DESC_T* pResourceDesc)
10007
{
10008
D3D12MA_ASSERT(m_Resource == NULL && pResourceDesc);
10009
m_Resource = resource;
10010
m_PackedData.SetResourceDimension(pResourceDesc->Dimension);
10011
m_PackedData.SetResourceFlags(pResourceDesc->Flags);
10012
m_PackedData.SetTextureLayout(pResourceDesc->Layout);
10013
}
10014
10015
void Allocation::FreeName()
10016
{
10017
if (m_Name)
10018
{
10019
const size_t nameCharCount = wcslen(m_Name) + 1;
10020
D3D12MA_DELETE_ARRAY(m_Allocator->GetAllocs(), m_Name, nameCharCount);
10021
m_Name = NULL;
10022
}
10023
}
10024
#endif // _D3D12MA_ALLOCATION_FUNCTIONS
10025
10026
#ifndef _D3D12MA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
10027
HRESULT DefragmentationContext::BeginPass(DEFRAGMENTATION_PASS_MOVE_INFO* pPassInfo)
10028
{
10029
D3D12MA_ASSERT(pPassInfo);
10030
return m_Pimpl->DefragmentPassBegin(*pPassInfo);
10031
}
10032
10033
HRESULT DefragmentationContext::EndPass(DEFRAGMENTATION_PASS_MOVE_INFO* pPassInfo)
10034
{
10035
D3D12MA_ASSERT(pPassInfo);
10036
return m_Pimpl->DefragmentPassEnd(*pPassInfo);
10037
}
10038
10039
void DefragmentationContext::GetStats(DEFRAGMENTATION_STATS* pStats)
10040
{
10041
D3D12MA_ASSERT(pStats);
10042
m_Pimpl->GetStats(*pStats);
10043
}
10044
10045
void DefragmentationContext::ReleaseThis()
10046
{
10047
if (this == NULL)
10048
{
10049
return;
10050
}
10051
10052
D3D12MA_DELETE(m_Pimpl->GetAllocs(), this);
10053
}
10054
10055
DefragmentationContext::DefragmentationContext(AllocatorPimpl* allocator,
10056
const DEFRAGMENTATION_DESC& desc,
10057
BlockVector* poolVector)
10058
: m_Pimpl(D3D12MA_NEW(allocator->GetAllocs(), DefragmentationContextPimpl)(allocator, desc, poolVector)) {}
10059
10060
DefragmentationContext::~DefragmentationContext()
10061
{
10062
D3D12MA_DELETE(m_Pimpl->GetAllocs(), m_Pimpl);
10063
}
10064
#endif // _D3D12MA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
10065
10066
#ifndef _D3D12MA_POOL_FUNCTIONS
10067
POOL_DESC Pool::GetDesc() const
10068
{
10069
return m_Pimpl->GetDesc();
10070
}
10071
10072
void Pool::GetStatistics(Statistics* pStats)
10073
{
10074
D3D12MA_ASSERT(pStats);
10075
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10076
m_Pimpl->GetStatistics(*pStats);
10077
}
10078
10079
void Pool::CalculateStatistics(DetailedStatistics* pStats)
10080
{
10081
D3D12MA_ASSERT(pStats);
10082
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10083
m_Pimpl->CalculateStatistics(*pStats);
10084
}
10085
10086
void Pool::SetName(LPCWSTR Name)
10087
{
10088
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10089
m_Pimpl->SetName(Name);
10090
}
10091
10092
LPCWSTR Pool::GetName() const
10093
{
10094
return m_Pimpl->GetName();
10095
}
10096
10097
HRESULT Pool::BeginDefragmentation(const DEFRAGMENTATION_DESC* pDesc, DefragmentationContext** ppContext)
10098
{
10099
D3D12MA_ASSERT(pDesc && ppContext);
10100
10101
// Check for support
10102
if (m_Pimpl->GetBlockVector()->GetAlgorithm() & POOL_FLAG_ALGORITHM_LINEAR)
10103
return E_NOINTERFACE;
10104
10105
AllocatorPimpl* allocator = m_Pimpl->GetAllocator();
10106
*ppContext = D3D12MA_NEW(allocator->GetAllocs(), DefragmentationContext)(allocator, *pDesc, m_Pimpl->GetBlockVector());
10107
return S_OK;
10108
}
10109
10110
void Pool::ReleaseThis()
10111
{
10112
if (this == NULL)
10113
{
10114
return;
10115
}
10116
10117
D3D12MA_DELETE(m_Pimpl->GetAllocator()->GetAllocs(), this);
10118
}
10119
10120
Pool::Pool(Allocator* allocator, const POOL_DESC& desc)
10121
: m_Pimpl(D3D12MA_NEW(allocator->m_Pimpl->GetAllocs(), PoolPimpl)(allocator->m_Pimpl, desc)) {}
10122
10123
Pool::~Pool()
10124
{
10125
m_Pimpl->GetAllocator()->UnregisterPool(this, m_Pimpl->GetDesc().HeapProperties.Type);
10126
10127
D3D12MA_DELETE(m_Pimpl->GetAllocator()->GetAllocs(), m_Pimpl);
10128
}
10129
#endif // _D3D12MA_POOL_FUNCTIONS
10130
10131
#ifndef _D3D12MA_ALLOCATOR_FUNCTIONS
10132
const D3D12_FEATURE_DATA_D3D12_OPTIONS& Allocator::GetD3D12Options() const
10133
{
10134
return m_Pimpl->GetD3D12Options();
10135
}
10136
10137
BOOL Allocator::IsUMA() const
10138
{
10139
return m_Pimpl->IsUMA();
10140
}
10141
10142
BOOL Allocator::IsCacheCoherentUMA() const
10143
{
10144
return m_Pimpl->IsCacheCoherentUMA();
10145
}
10146
10147
UINT64 Allocator::GetMemoryCapacity(UINT memorySegmentGroup) const
10148
{
10149
return m_Pimpl->GetMemoryCapacity(memorySegmentGroup);
10150
}
10151
10152
HRESULT Allocator::CreateResource(
10153
const ALLOCATION_DESC* pAllocDesc,
10154
const D3D12_RESOURCE_DESC* pResourceDesc,
10155
D3D12_RESOURCE_STATES InitialResourceState,
10156
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
10157
Allocation** ppAllocation,
10158
REFIID riidResource,
10159
void** ppvResource)
10160
{
10161
if (!pAllocDesc || !pResourceDesc || !ppAllocation)
10162
{
10163
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateResource.");
10164
return E_INVALIDARG;
10165
}
10166
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10167
return m_Pimpl->CreateResource(
10168
pAllocDesc,
10169
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialResourceState, pOptimizedClearValue),
10170
ppAllocation,
10171
riidResource,
10172
ppvResource);
10173
}
10174
10175
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
10176
HRESULT Allocator::CreateResource2(
10177
const ALLOCATION_DESC* pAllocDesc,
10178
const D3D12_RESOURCE_DESC1* pResourceDesc,
10179
D3D12_RESOURCE_STATES InitialResourceState,
10180
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
10181
Allocation** ppAllocation,
10182
REFIID riidResource,
10183
void** ppvResource)
10184
{
10185
if (!pAllocDesc || !pResourceDesc || !ppAllocation)
10186
{
10187
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateResource2.");
10188
return E_INVALIDARG;
10189
}
10190
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10191
return m_Pimpl->CreateResource(
10192
pAllocDesc,
10193
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialResourceState, pOptimizedClearValue),
10194
ppAllocation,
10195
riidResource,
10196
ppvResource);
10197
}
10198
#endif // #ifdef __ID3D12Device8_INTERFACE_DEFINED__
10199
10200
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
10201
HRESULT Allocator::CreateResource3(
10202
const ALLOCATION_DESC* pAllocDesc,
10203
const D3D12_RESOURCE_DESC1* pResourceDesc,
10204
D3D12_BARRIER_LAYOUT InitialLayout,
10205
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
10206
UINT32 NumCastableFormats,
10207
DXGI_FORMAT* pCastableFormats,
10208
Allocation** ppAllocation,
10209
REFIID riidResource,
10210
void** ppvResource)
10211
{
10212
if (!pAllocDesc || !pResourceDesc || !ppAllocation)
10213
{
10214
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateResource3.");
10215
return E_INVALIDARG;
10216
}
10217
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10218
return m_Pimpl->CreateResource(
10219
pAllocDesc,
10220
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialLayout, pOptimizedClearValue, NumCastableFormats, pCastableFormats),
10221
ppAllocation,
10222
riidResource,
10223
ppvResource);
10224
}
10225
#endif // #ifdef __ID3D12Device10_INTERFACE_DEFINED__
10226
10227
HRESULT Allocator::AllocateMemory(
10228
const ALLOCATION_DESC* pAllocDesc,
10229
const D3D12_RESOURCE_ALLOCATION_INFO* pAllocInfo,
10230
Allocation** ppAllocation)
10231
{
10232
if (!ValidateAllocateMemoryParameters(pAllocDesc, pAllocInfo, ppAllocation))
10233
{
10234
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::AllocateMemory.");
10235
return E_INVALIDARG;
10236
}
10237
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10238
return m_Pimpl->AllocateMemory(pAllocDesc, pAllocInfo, ppAllocation);
10239
}
10240
10241
HRESULT Allocator::CreateAliasingResource(
10242
Allocation* pAllocation,
10243
UINT64 AllocationLocalOffset,
10244
const D3D12_RESOURCE_DESC* pResourceDesc,
10245
D3D12_RESOURCE_STATES InitialResourceState,
10246
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
10247
REFIID riidResource,
10248
void** ppvResource)
10249
{
10250
if (!pAllocation || !pResourceDesc || !ppvResource)
10251
{
10252
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateAliasingResource.");
10253
return E_INVALIDARG;
10254
}
10255
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10256
return m_Pimpl->CreateAliasingResource(
10257
pAllocation,
10258
AllocationLocalOffset,
10259
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialResourceState, pOptimizedClearValue),
10260
riidResource,
10261
ppvResource);
10262
}
10263
10264
#ifdef __ID3D12Device8_INTERFACE_DEFINED__
10265
HRESULT Allocator::CreateAliasingResource1(
10266
Allocation* pAllocation,
10267
UINT64 AllocationLocalOffset,
10268
const D3D12_RESOURCE_DESC1* pResourceDesc,
10269
D3D12_RESOURCE_STATES InitialResourceState,
10270
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
10271
REFIID riidResource,
10272
void** ppvResource)
10273
{
10274
if (!pAllocation || !pResourceDesc || !ppvResource)
10275
{
10276
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateAliasingResource.");
10277
return E_INVALIDARG;
10278
}
10279
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10280
return m_Pimpl->CreateAliasingResource(
10281
pAllocation,
10282
AllocationLocalOffset,
10283
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialResourceState, pOptimizedClearValue),
10284
riidResource,
10285
ppvResource);
10286
}
10287
#endif // #ifdef __ID3D12Device8_INTERFACE_DEFINED__
10288
10289
#ifdef __ID3D12Device10_INTERFACE_DEFINED__
10290
HRESULT Allocator::CreateAliasingResource2(
10291
Allocation* pAllocation,
10292
UINT64 AllocationLocalOffset,
10293
const D3D12_RESOURCE_DESC1* pResourceDesc,
10294
D3D12_BARRIER_LAYOUT InitialLayout,
10295
const D3D12_CLEAR_VALUE* pOptimizedClearValue,
10296
UINT32 NumCastableFormats,
10297
DXGI_FORMAT* pCastableFormats,
10298
REFIID riidResource,
10299
void** ppvResource)
10300
{
10301
if (!pAllocation || !pResourceDesc || !ppvResource)
10302
{
10303
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreateAliasingResource.");
10304
return E_INVALIDARG;
10305
}
10306
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10307
return m_Pimpl->CreateAliasingResource(
10308
pAllocation,
10309
AllocationLocalOffset,
10310
CREATE_RESOURCE_PARAMS(pResourceDesc, InitialLayout, pOptimizedClearValue, NumCastableFormats, pCastableFormats),
10311
riidResource,
10312
ppvResource);
10313
}
10314
#endif // #ifdef __ID3D12Device10_INTERFACE_DEFINED__
10315
10316
HRESULT Allocator::CreatePool(
10317
const POOL_DESC* pPoolDesc,
10318
Pool** ppPool)
10319
{
10320
if (!pPoolDesc || !ppPool ||
10321
(pPoolDesc->MaxBlockCount > 0 && pPoolDesc->MaxBlockCount < pPoolDesc->MinBlockCount) ||
10322
(pPoolDesc->MinAllocationAlignment > 0 && !IsPow2(pPoolDesc->MinAllocationAlignment)))
10323
{
10324
D3D12MA_ASSERT(0 && "Invalid arguments passed to Allocator::CreatePool.");
10325
return E_INVALIDARG;
10326
}
10327
if (!m_Pimpl->HeapFlagsFulfillResourceHeapTier(pPoolDesc->HeapFlags))
10328
{
10329
D3D12MA_ASSERT(0 && "Invalid pPoolDesc->HeapFlags passed to Allocator::CreatePool. Did you forget to handle ResourceHeapTier=1?");
10330
return E_INVALIDARG;
10331
}
10332
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10333
* ppPool = D3D12MA_NEW(m_Pimpl->GetAllocs(), Pool)(this, *pPoolDesc);
10334
HRESULT hr = (*ppPool)->m_Pimpl->Init();
10335
if (SUCCEEDED(hr))
10336
{
10337
m_Pimpl->RegisterPool(*ppPool, pPoolDesc->HeapProperties.Type);
10338
}
10339
else
10340
{
10341
D3D12MA_DELETE(m_Pimpl->GetAllocs(), *ppPool);
10342
*ppPool = NULL;
10343
}
10344
return hr;
10345
}
10346
10347
void Allocator::SetCurrentFrameIndex(UINT frameIndex)
10348
{
10349
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10350
m_Pimpl->SetCurrentFrameIndex(frameIndex);
10351
}
10352
10353
void Allocator::GetBudget(Budget* pLocalBudget, Budget* pNonLocalBudget)
10354
{
10355
if (pLocalBudget == NULL && pNonLocalBudget == NULL)
10356
{
10357
return;
10358
}
10359
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10360
m_Pimpl->GetBudget(pLocalBudget, pNonLocalBudget);
10361
}
10362
10363
void Allocator::CalculateStatistics(TotalStatistics* pStats)
10364
{
10365
D3D12MA_ASSERT(pStats);
10366
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10367
m_Pimpl->CalculateStatistics(*pStats);
10368
}
10369
10370
void Allocator::BuildStatsString(WCHAR** ppStatsString, BOOL DetailedMap) const
10371
{
10372
D3D12MA_ASSERT(ppStatsString);
10373
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10374
m_Pimpl->BuildStatsString(ppStatsString, DetailedMap);
10375
}
10376
10377
void Allocator::FreeStatsString(WCHAR* pStatsString) const
10378
{
10379
if (pStatsString != NULL)
10380
{
10381
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10382
m_Pimpl->FreeStatsString(pStatsString);
10383
}
10384
}
10385
10386
void Allocator::BeginDefragmentation(const DEFRAGMENTATION_DESC* pDesc, DefragmentationContext** ppContext)
10387
{
10388
D3D12MA_ASSERT(pDesc && ppContext);
10389
10390
*ppContext = D3D12MA_NEW(m_Pimpl->GetAllocs(), DefragmentationContext)(m_Pimpl, *pDesc, NULL);
10391
}
10392
10393
void Allocator::ReleaseThis()
10394
{
10395
// Copy is needed because otherwise we would call destructor and invalidate the structure with callbacks before using it to free memory.
10396
const ALLOCATION_CALLBACKS allocationCallbacksCopy = m_Pimpl->GetAllocs();
10397
D3D12MA_DELETE(allocationCallbacksCopy, this);
10398
}
10399
10400
Allocator::Allocator(const ALLOCATION_CALLBACKS& allocationCallbacks, const ALLOCATOR_DESC& desc)
10401
: m_Pimpl(D3D12MA_NEW(allocationCallbacks, AllocatorPimpl)(allocationCallbacks, desc)) {}
10402
10403
Allocator::~Allocator()
10404
{
10405
D3D12MA_DELETE(m_Pimpl->GetAllocs(), m_Pimpl);
10406
}
10407
#endif // _D3D12MA_ALLOCATOR_FUNCTIONS
10408
10409
#ifndef _D3D12MA_VIRTUAL_BLOCK_FUNCTIONS
10410
BOOL VirtualBlock::IsEmpty() const
10411
{
10412
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10413
return m_Pimpl->m_Metadata->IsEmpty() ? TRUE : FALSE;
10414
}
10415
10416
void VirtualBlock::GetAllocationInfo(VirtualAllocation allocation, VIRTUAL_ALLOCATION_INFO* pInfo) const
10417
{
10418
D3D12MA_ASSERT(allocation.AllocHandle != (AllocHandle)0 && pInfo);
10419
10420
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10421
m_Pimpl->m_Metadata->GetAllocationInfo(allocation.AllocHandle, *pInfo);
10422
}
10423
10424
HRESULT VirtualBlock::Allocate(const VIRTUAL_ALLOCATION_DESC* pDesc, VirtualAllocation* pAllocation, UINT64* pOffset)
10425
{
10426
if (!pDesc || !pAllocation || pDesc->Size == 0 || !IsPow2(pDesc->Alignment))
10427
{
10428
D3D12MA_ASSERT(0 && "Invalid arguments passed to VirtualBlock::Allocate.");
10429
return E_INVALIDARG;
10430
}
10431
10432
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10433
10434
const UINT64 alignment = pDesc->Alignment != 0 ? pDesc->Alignment : 1;
10435
AllocationRequest allocRequest = {};
10436
if (m_Pimpl->m_Metadata->CreateAllocationRequest(
10437
pDesc->Size,
10438
alignment,
10439
pDesc->Flags & VIRTUAL_ALLOCATION_FLAG_UPPER_ADDRESS,
10440
pDesc->Flags & VIRTUAL_ALLOCATION_FLAG_STRATEGY_MASK,
10441
&allocRequest))
10442
{
10443
m_Pimpl->m_Metadata->Alloc(allocRequest, pDesc->Size, pDesc->pPrivateData);
10444
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
10445
pAllocation->AllocHandle = allocRequest.allocHandle;
10446
10447
if (pOffset)
10448
*pOffset = m_Pimpl->m_Metadata->GetAllocationOffset(allocRequest.allocHandle);
10449
return S_OK;
10450
}
10451
10452
pAllocation->AllocHandle = (AllocHandle)0;
10453
if (pOffset)
10454
*pOffset = UINT64_MAX;
10455
10456
return E_OUTOFMEMORY;
10457
}
10458
10459
void VirtualBlock::FreeAllocation(VirtualAllocation allocation)
10460
{
10461
if (allocation.AllocHandle == (AllocHandle)0)
10462
return;
10463
10464
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10465
10466
m_Pimpl->m_Metadata->Free(allocation.AllocHandle);
10467
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
10468
}
10469
10470
void VirtualBlock::Clear()
10471
{
10472
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10473
10474
m_Pimpl->m_Metadata->Clear();
10475
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
10476
}
10477
10478
void VirtualBlock::SetAllocationPrivateData(VirtualAllocation allocation, void* pPrivateData)
10479
{
10480
D3D12MA_ASSERT(allocation.AllocHandle != (AllocHandle)0);
10481
10482
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10483
m_Pimpl->m_Metadata->SetAllocationPrivateData(allocation.AllocHandle, pPrivateData);
10484
}
10485
10486
void VirtualBlock::GetStatistics(Statistics* pStats) const
10487
{
10488
D3D12MA_ASSERT(pStats);
10489
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10490
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
10491
ClearStatistics(*pStats);
10492
m_Pimpl->m_Metadata->AddStatistics(*pStats);
10493
}
10494
10495
void VirtualBlock::CalculateStatistics(DetailedStatistics* pStats) const
10496
{
10497
D3D12MA_ASSERT(pStats);
10498
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10499
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
10500
ClearDetailedStatistics(*pStats);
10501
m_Pimpl->m_Metadata->AddDetailedStatistics(*pStats);
10502
}
10503
10504
void VirtualBlock::BuildStatsString(WCHAR** ppStatsString) const
10505
{
10506
D3D12MA_ASSERT(ppStatsString);
10507
10508
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10509
10510
StringBuilder sb(m_Pimpl->m_AllocationCallbacks);
10511
{
10512
JsonWriter json(m_Pimpl->m_AllocationCallbacks, sb);
10513
D3D12MA_HEAVY_ASSERT(m_Pimpl->m_Metadata->Validate());
10514
json.BeginObject();
10515
m_Pimpl->m_Metadata->WriteAllocationInfoToJson(json);
10516
json.EndObject();
10517
} // Scope for JsonWriter
10518
10519
const size_t length = sb.GetLength();
10520
WCHAR* result = AllocateArray<WCHAR>(m_Pimpl->m_AllocationCallbacks, length + 1);
10521
memcpy(result, sb.GetData(), length * sizeof(WCHAR));
10522
result[length] = L'\0';
10523
*ppStatsString = result;
10524
}
10525
10526
void VirtualBlock::FreeStatsString(WCHAR* pStatsString) const
10527
{
10528
if (pStatsString != NULL)
10529
{
10530
D3D12MA_DEBUG_GLOBAL_MUTEX_LOCK
10531
D3D12MA::Free(m_Pimpl->m_AllocationCallbacks, pStatsString);
10532
}
10533
}
10534
10535
void VirtualBlock::ReleaseThis()
10536
{
10537
// Copy is needed because otherwise we would call destructor and invalidate the structure with callbacks before using it to free memory.
10538
const ALLOCATION_CALLBACKS allocationCallbacksCopy = m_Pimpl->m_AllocationCallbacks;
10539
D3D12MA_DELETE(allocationCallbacksCopy, this);
10540
}
10541
10542
VirtualBlock::VirtualBlock(const ALLOCATION_CALLBACKS& allocationCallbacks, const VIRTUAL_BLOCK_DESC& desc)
10543
: m_Pimpl(D3D12MA_NEW(allocationCallbacks, VirtualBlockPimpl)(allocationCallbacks, desc)) {}
10544
10545
VirtualBlock::~VirtualBlock()
10546
{
10547
// THIS IS AN IMPORTANT ASSERT!
10548
// Hitting it means you have some memory leak - unreleased allocations in this virtual block.
10549
D3D12MA_ASSERT(m_Pimpl->m_Metadata->IsEmpty() && "Some allocations were not freed before destruction of this virtual block!");
10550
10551
D3D12MA_DELETE(m_Pimpl->m_AllocationCallbacks, m_Pimpl);
10552
}
10553
#endif // _D3D12MA_VIRTUAL_BLOCK_FUNCTIONS
10554
#endif // _D3D12MA_PUBLIC_INTERFACE
10555
} // namespace D3D12MA
10556
10557