Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/drivers/metal/metal_objects_shared.h
20919 views
1
/**************************************************************************/
2
/* metal_objects_shared.h */
3
/**************************************************************************/
4
/* This file is part of: */
5
/* GODOT ENGINE */
6
/* https://godotengine.org */
7
/**************************************************************************/
8
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
9
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
10
/* */
11
/* Permission is hereby granted, free of charge, to any person obtaining */
12
/* a copy of this software and associated documentation files (the */
13
/* "Software"), to deal in the Software without restriction, including */
14
/* without limitation the rights to use, copy, modify, merge, publish, */
15
/* distribute, sublicense, and/or sell copies of the Software, and to */
16
/* permit persons to whom the Software is furnished to do so, subject to */
17
/* the following conditions: */
18
/* */
19
/* The above copyright notice and this permission notice shall be */
20
/* included in all copies or substantial portions of the Software. */
21
/* */
22
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
23
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
24
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
25
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
26
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
27
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
28
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
29
/**************************************************************************/
30
31
#pragma once
32
33
#include "metal_device_properties.h"
34
#include "metal_utils.h"
35
#include "pixel_formats.h"
36
#include "sha256_digest.h"
37
38
#include <CoreFoundation/CoreFoundation.h>
39
#include <memory>
40
#include <optional>
41
42
class RenderingDeviceDriverMetal;
43
44
using RDC = RenderingDeviceCommons;
45
46
enum ShaderStageUsage : uint32_t {
47
None = 0,
48
Vertex = RDD::SHADER_STAGE_VERTEX_BIT,
49
Fragment = RDD::SHADER_STAGE_FRAGMENT_BIT,
50
TesselationControl = RDD::SHADER_STAGE_TESSELATION_CONTROL_BIT,
51
TesselationEvaluation = RDD::SHADER_STAGE_TESSELATION_EVALUATION_BIT,
52
Compute = RDD::SHADER_STAGE_COMPUTE_BIT,
53
};
54
55
_FORCE_INLINE_ ShaderStageUsage &operator|=(ShaderStageUsage &p_a, int p_b) {
56
p_a = ShaderStageUsage(uint32_t(p_a) | uint32_t(p_b));
57
return p_a;
58
}
59
60
struct ClearAttKey {
61
const static uint32_t COLOR_COUNT = MAX_COLOR_ATTACHMENT_COUNT;
62
const static uint32_t DEPTH_INDEX = COLOR_COUNT;
63
const static uint32_t STENCIL_INDEX = DEPTH_INDEX + 1;
64
const static uint32_t ATTACHMENT_COUNT = STENCIL_INDEX + 1;
65
66
enum Flags : uint16_t {
67
CLEAR_FLAGS_NONE = 0,
68
CLEAR_FLAGS_LAYERED = 1 << 0,
69
};
70
71
Flags flags = CLEAR_FLAGS_NONE;
72
uint16_t sample_count = 0;
73
uint16_t pixel_formats[ATTACHMENT_COUNT] = { 0 };
74
75
_FORCE_INLINE_ void set_color_format(uint32_t p_idx, MTL::PixelFormat p_fmt) { pixel_formats[p_idx] = p_fmt; }
76
_FORCE_INLINE_ void set_depth_format(MTL::PixelFormat p_fmt) { pixel_formats[DEPTH_INDEX] = p_fmt; }
77
_FORCE_INLINE_ void set_stencil_format(MTL::PixelFormat p_fmt) { pixel_formats[STENCIL_INDEX] = p_fmt; }
78
_FORCE_INLINE_ MTL::PixelFormat depth_format() const { return (MTL::PixelFormat)pixel_formats[DEPTH_INDEX]; }
79
_FORCE_INLINE_ MTL::PixelFormat stencil_format() const { return (MTL::PixelFormat)pixel_formats[STENCIL_INDEX]; }
80
_FORCE_INLINE_ void enable_layered_rendering() { flags::set(flags, CLEAR_FLAGS_LAYERED); }
81
82
_FORCE_INLINE_ bool is_enabled(uint32_t p_idx) const { return pixel_formats[p_idx] != 0; }
83
_FORCE_INLINE_ bool is_depth_enabled() const { return pixel_formats[DEPTH_INDEX] != 0; }
84
_FORCE_INLINE_ bool is_stencil_enabled() const { return pixel_formats[STENCIL_INDEX] != 0; }
85
_FORCE_INLINE_ bool is_layered_rendering_enabled() const { return flags::any(flags, CLEAR_FLAGS_LAYERED); }
86
87
_FORCE_INLINE_ bool operator==(const ClearAttKey &p_rhs) const {
88
return memcmp(this, &p_rhs, sizeof(ClearAttKey)) == 0;
89
}
90
91
uint32_t hash() const {
92
uint32_t h = hash_murmur3_one_32(flags);
93
h = hash_murmur3_one_32(sample_count, h);
94
h = hash_murmur3_buffer(pixel_formats, ATTACHMENT_COUNT * sizeof(pixel_formats[0]), h);
95
return hash_fmix32(h);
96
}
97
};
98
99
#pragma mark - Ring Buffer
100
101
/// A ring buffer backed by MTLBuffer instances for transient GPU allocations.
102
/// Allocations are 16-byte aligned with a minimum size of 16 bytes.
103
/// When the current buffer is exhausted, a new buffer is allocated.
104
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDRingBuffer {
105
public:
106
static constexpr uint32_t DEFAULT_BUFFER_SIZE = 512 * 1024;
107
static constexpr uint32_t MIN_BLOCK_SIZE = 16;
108
static constexpr uint32_t ALIGNMENT = 16;
109
110
struct Allocation {
111
void *ptr = nullptr;
112
MTL::Buffer *buffer = nullptr;
113
uint64_t gpu_address = 0;
114
uint32_t offset = 0;
115
116
_FORCE_INLINE_ bool is_valid() const { return ptr != nullptr; }
117
};
118
119
private:
120
MTL::Device *device = nullptr;
121
LocalVector<MTL::Buffer *> buffers;
122
LocalVector<uint32_t> heads;
123
uint32_t current_segment = 0;
124
uint32_t buffer_size = DEFAULT_BUFFER_SIZE;
125
bool changed = false;
126
127
_FORCE_INLINE_ uint32_t alloc_segment() {
128
MTL::Buffer *buffer = device->newBuffer(buffer_size, MTL::ResourceStorageModeShared | MTL::ResourceHazardTrackingModeUntracked);
129
buffers.push_back(buffer);
130
heads.push_back(0);
131
changed = true;
132
133
return buffers.size() - 1;
134
}
135
136
public:
137
MDRingBuffer() = default;
138
139
MDRingBuffer(MTL::Device *p_device, uint32_t p_buffer_size = DEFAULT_BUFFER_SIZE) :
140
device(p_device), buffer_size(p_buffer_size) {}
141
142
~MDRingBuffer() {
143
for (MTL::Buffer *buffer : buffers) {
144
buffer->release();
145
}
146
}
147
148
/// Allocates a block of memory from the ring buffer.
149
/// Returns an Allocation with the pointer, buffer, and offset.
150
_FORCE_INLINE_ Allocation allocate(uint32_t p_size) {
151
p_size = MAX(p_size, MIN_BLOCK_SIZE);
152
p_size = (p_size + ALIGNMENT - 1) & ~(ALIGNMENT - 1);
153
154
if (buffers.is_empty()) {
155
alloc_segment();
156
}
157
158
uint32_t aligned_head = (heads[current_segment] + ALIGNMENT - 1) & ~(ALIGNMENT - 1);
159
160
if (aligned_head + p_size > buffer_size) {
161
// Current segment exhausted, try to find one with space or allocate new.
162
bool found = false;
163
for (uint32_t i = 0; i < buffers.size(); i++) {
164
uint32_t ah = (heads[i] + ALIGNMENT - 1) & ~(ALIGNMENT - 1);
165
if (ah + p_size <= buffer_size) {
166
current_segment = i;
167
aligned_head = ah;
168
found = true;
169
break;
170
}
171
}
172
173
if (!found) {
174
current_segment = alloc_segment();
175
aligned_head = 0;
176
}
177
}
178
179
MTL::Buffer *buffer = buffers[current_segment];
180
Allocation alloc;
181
alloc.buffer = buffer;
182
alloc.offset = aligned_head;
183
alloc.ptr = static_cast<uint8_t *>(buffer->contents()) + aligned_head;
184
if (__builtin_available(macOS 13.0, iOS 16.0, tvOS 16.0, *)) {
185
alloc.gpu_address = buffer->gpuAddress() + aligned_head;
186
}
187
heads[current_segment] = aligned_head + p_size;
188
189
return alloc;
190
}
191
192
/// Resets all segments for reuse. Call at frame boundaries when GPU work is complete.
193
_FORCE_INLINE_ void reset() {
194
for (uint32_t &head : heads) {
195
head = 0;
196
}
197
current_segment = 0;
198
}
199
200
/// Returns true if buffers were added or removed since last clear_changed().
201
_FORCE_INLINE_ bool is_changed() const { return changed; }
202
203
/// Clears the changed flag.
204
_FORCE_INLINE_ void clear_changed() { changed = false; }
205
206
/// Returns a Span of all backing buffers.
207
_FORCE_INLINE_ Span<MTL::Buffer *const> get_buffers() const {
208
return Span<MTL::Buffer *const>(buffers.ptr(), buffers.size());
209
}
210
211
/// Returns the number of buffer segments currently allocated.
212
_FORCE_INLINE_ uint32_t get_segment_count() const {
213
return buffers.size();
214
}
215
};
216
217
#pragma mark - Resource Factory
218
219
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDResourceFactory {
220
private:
221
MTL::Device *device;
222
PixelFormats &pixel_formats;
223
uint32_t max_buffer_count;
224
225
NS::SharedPtr<MTL::Function> new_func(NS::String *p_source, NS::String *p_name, NS::Error **p_error);
226
NS::SharedPtr<MTL::Function> new_clear_vert_func(ClearAttKey &p_key);
227
NS::SharedPtr<MTL::Function> new_clear_frag_func(ClearAttKey &p_key);
228
const char *get_format_type_string(MTL::PixelFormat p_fmt) const;
229
230
_FORCE_INLINE_ uint32_t get_vertex_buffer_index(uint32_t p_binding) {
231
return (max_buffer_count - 1) - p_binding;
232
}
233
234
public:
235
NS::SharedPtr<MTL::RenderPipelineState> new_clear_pipeline_state(ClearAttKey &p_key, NS::Error **p_error);
236
NS::SharedPtr<MTL::RenderPipelineState> new_empty_draw_pipeline_state(ClearAttKey &p_key, NS::Error **p_error);
237
NS::SharedPtr<MTL::DepthStencilState> new_depth_stencil_state(bool p_use_depth, bool p_use_stencil);
238
239
MDResourceFactory(MTL::Device *p_device, PixelFormats &p_pixel_formats, uint32_t p_max_buffer_count) :
240
device(p_device), pixel_formats(p_pixel_formats), max_buffer_count(p_max_buffer_count) {}
241
~MDResourceFactory() = default;
242
};
243
244
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDResourceCache {
245
private:
246
typedef HashMap<ClearAttKey, NS::SharedPtr<MTL::RenderPipelineState>> HashMap;
247
std::unique_ptr<MDResourceFactory> resource_factory;
248
HashMap clear_states;
249
HashMap empty_draw_states;
250
251
struct {
252
NS::SharedPtr<MTL::DepthStencilState> all;
253
NS::SharedPtr<MTL::DepthStencilState> depth_only;
254
NS::SharedPtr<MTL::DepthStencilState> stencil_only;
255
NS::SharedPtr<MTL::DepthStencilState> none;
256
} clear_depth_stencil_state;
257
258
public:
259
MTL::RenderPipelineState *get_clear_render_pipeline_state(ClearAttKey &p_key, NS::Error **p_error);
260
MTL::RenderPipelineState *get_empty_draw_pipeline_state(ClearAttKey &p_key, NS::Error **p_error);
261
MTL::DepthStencilState *get_depth_stencil_state(bool p_use_depth, bool p_use_stencil);
262
263
explicit MDResourceCache(MTL::Device *p_device, PixelFormats &p_pixel_formats, uint32_t p_max_buffer_count) :
264
resource_factory(new MDResourceFactory(p_device, p_pixel_formats, p_max_buffer_count)) {}
265
~MDResourceCache() = default;
266
};
267
268
/**
269
* Returns an index that can be used to map a shader stage to an index in a fixed-size array that is used for
270
* a single pipeline type.
271
*/
272
_FORCE_INLINE_ static uint32_t to_index(RDD::ShaderStage p_s) {
273
switch (p_s) {
274
case RenderingDeviceCommons::SHADER_STAGE_VERTEX:
275
case RenderingDeviceCommons::SHADER_STAGE_TESSELATION_CONTROL:
276
case RenderingDeviceCommons::SHADER_STAGE_TESSELATION_EVALUATION:
277
case RenderingDeviceCommons::SHADER_STAGE_COMPUTE:
278
default:
279
return 0;
280
case RenderingDeviceCommons::SHADER_STAGE_FRAGMENT:
281
return 1;
282
}
283
}
284
285
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDFrameBuffer {
286
Vector<MTL::Texture *> textures;
287
288
public:
289
Size2i size;
290
MDFrameBuffer(Vector<MTL::Texture *> p_textures, Size2i p_size) :
291
textures(p_textures), size(p_size) {}
292
MDFrameBuffer() {}
293
294
/// Returns the texture at the given index.
295
_ALWAYS_INLINE_ MTL::Texture *get_texture(uint32_t p_idx) const {
296
return textures[p_idx];
297
}
298
299
/// Returns true if the texture at the given index is not nil.
300
_ALWAYS_INLINE_ bool has_texture(uint32_t p_idx) const {
301
return textures[p_idx] != nullptr;
302
}
303
304
/// Set the texture at the given index.
305
_ALWAYS_INLINE_ void set_texture(uint32_t p_idx, MTL::Texture *p_texture) {
306
textures.write[p_idx] = p_texture;
307
}
308
309
/// Unset or nil the texture at the given index.
310
_ALWAYS_INLINE_ void unset_texture(uint32_t p_idx) {
311
textures.write[p_idx] = nullptr;
312
}
313
314
/// Resizes buffers to the specified size.
315
_ALWAYS_INLINE_ void set_texture_count(uint32_t p_size) {
316
textures.resize(p_size);
317
}
318
319
virtual ~MDFrameBuffer() = default;
320
};
321
322
template <>
323
struct HashMapComparatorDefault<RDD::ShaderID> {
324
static bool compare(const RDD::ShaderID &p_lhs, const RDD::ShaderID &p_rhs) {
325
return p_lhs.id == p_rhs.id;
326
}
327
};
328
329
template <>
330
struct HashMapComparatorDefault<RDD::BufferID> {
331
static bool compare(const RDD::BufferID &p_lhs, const RDD::BufferID &p_rhs) {
332
return p_lhs.id == p_rhs.id;
333
}
334
};
335
336
template <>
337
struct HashMapComparatorDefault<RDD::TextureID> {
338
static bool compare(const RDD::TextureID &p_lhs, const RDD::TextureID &p_rhs) {
339
return p_lhs.id == p_rhs.id;
340
}
341
};
342
343
template <>
344
struct HashMapHasherDefaultImpl<RDD::BufferID> {
345
static _FORCE_INLINE_ uint32_t hash(const RDD::BufferID &p_value) {
346
return HashMapHasherDefaultImpl<uint64_t>::hash(p_value.id);
347
}
348
};
349
350
template <>
351
struct HashMapHasherDefaultImpl<RDD::TextureID> {
352
static _FORCE_INLINE_ uint32_t hash(const RDD::TextureID &p_value) {
353
return HashMapHasherDefaultImpl<uint64_t>::hash(p_value.id);
354
}
355
};
356
357
namespace rid {
358
359
template <typename T>
360
_FORCE_INLINE_ T *get(RDD::ID p_id) {
361
return reinterpret_cast<T *>(p_id.id);
362
}
363
364
template <typename T>
365
_FORCE_INLINE_ T *get(uint64_t p_id) {
366
return reinterpret_cast<T *>(p_id);
367
}
368
369
} // namespace rid
370
371
#pragma mark - Render Pass Types
372
373
class MDRenderPass;
374
375
enum class MDAttachmentType : uint8_t {
376
None = 0,
377
Color = 1 << 0,
378
Depth = 1 << 1,
379
Stencil = 1 << 2,
380
};
381
382
_FORCE_INLINE_ MDAttachmentType &operator|=(MDAttachmentType &p_a, MDAttachmentType p_b) {
383
flags::set(p_a, p_b);
384
return p_a;
385
}
386
387
_FORCE_INLINE_ bool operator&(MDAttachmentType p_a, MDAttachmentType p_b) {
388
return uint8_t(p_a) & uint8_t(p_b);
389
}
390
391
struct MDSubpass {
392
uint32_t subpass_index = 0;
393
uint32_t view_count = 0;
394
LocalVector<RDD::AttachmentReference> input_references;
395
LocalVector<RDD::AttachmentReference> color_references;
396
RDD::AttachmentReference depth_stencil_reference;
397
LocalVector<RDD::AttachmentReference> resolve_references;
398
399
MTLFmtCaps getRequiredFmtCapsForAttachmentAt(uint32_t p_index) const;
400
};
401
402
struct API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDAttachment {
403
private:
404
uint32_t index = 0;
405
uint32_t firstUseSubpassIndex = 0;
406
uint32_t lastUseSubpassIndex = 0;
407
408
public:
409
MTL::PixelFormat format = MTL::PixelFormatInvalid;
410
MDAttachmentType type = MDAttachmentType::None;
411
MTL::LoadAction loadAction = MTL::LoadActionDontCare;
412
MTL::StoreAction storeAction = MTL::StoreActionDontCare;
413
MTL::LoadAction stencilLoadAction = MTL::LoadActionDontCare;
414
MTL::StoreAction stencilStoreAction = MTL::StoreActionDontCare;
415
uint32_t samples = 1;
416
417
/*!
418
* @brief Returns true if this attachment is first used in the given subpass.
419
* @param p_subpass
420
* @return
421
*/
422
_FORCE_INLINE_ bool isFirstUseOf(MDSubpass const &p_subpass) const {
423
return p_subpass.subpass_index == firstUseSubpassIndex;
424
}
425
426
/*!
427
* @brief Returns true if this attachment is last used in the given subpass.
428
* @param p_subpass
429
* @return
430
*/
431
_FORCE_INLINE_ bool isLastUseOf(MDSubpass const &p_subpass) const {
432
return p_subpass.subpass_index == lastUseSubpassIndex;
433
}
434
435
void linkToSubpass(MDRenderPass const &p_pass);
436
437
MTL::StoreAction getMTLStoreAction(MDSubpass const &p_subpass,
438
bool p_is_rendering_entire_area,
439
bool p_has_resolve,
440
bool p_can_resolve,
441
bool p_is_stencil) const;
442
bool configureDescriptor(MTL::RenderPassAttachmentDescriptor *p_desc,
443
PixelFormats &p_pf,
444
MDSubpass const &p_subpass,
445
MTL::Texture *p_attachment,
446
bool p_is_rendering_entire_area,
447
bool p_has_resolve,
448
bool p_can_resolve,
449
bool p_is_stencil) const {
450
p_desc->setTexture(p_attachment);
451
452
MTL::LoadAction load;
453
if (!p_is_rendering_entire_area || !isFirstUseOf(p_subpass)) {
454
load = MTL::LoadActionLoad;
455
} else {
456
load = p_is_stencil ? (MTL::LoadAction)stencilLoadAction : (MTL::LoadAction)loadAction;
457
}
458
459
p_desc->setLoadAction(load);
460
461
MTL::PixelFormat mtlFmt = p_attachment->pixelFormat();
462
bool isDepthFormat = p_pf.isDepthFormat(mtlFmt);
463
bool isStencilFormat = p_pf.isStencilFormat(mtlFmt);
464
if (isStencilFormat && !p_is_stencil && !isDepthFormat) {
465
p_desc->setStoreAction(MTL::StoreActionDontCare);
466
} else {
467
p_desc->setStoreAction(getMTLStoreAction(p_subpass, p_is_rendering_entire_area, p_has_resolve, p_can_resolve, p_is_stencil));
468
}
469
470
return load == MTL::LoadActionClear;
471
}
472
473
/** Returns whether this attachment should be cleared in the subpass. */
474
bool shouldClear(MDSubpass const &p_subpass, bool p_is_stencil) const;
475
};
476
477
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDRenderPass {
478
public:
479
Vector<MDAttachment> attachments;
480
Vector<MDSubpass> subpasses;
481
482
uint32_t get_sample_count() const {
483
return attachments.is_empty() ? 1 : attachments[0].samples;
484
}
485
486
MDRenderPass(Vector<MDAttachment> &p_attachments, Vector<MDSubpass> &p_subpasses);
487
};
488
489
#pragma mark - Command Buffer Helpers
490
491
_FORCE_INLINE_ static MTL::Size MTLSizeFromVector3i(Vector3i p_size) {
492
return MTL::Size{ (NS::UInteger)p_size.x, (NS::UInteger)p_size.y, (NS::UInteger)p_size.z };
493
}
494
495
_FORCE_INLINE_ static MTL::Origin MTLOriginFromVector3i(Vector3i p_origin) {
496
return MTL::Origin{ (NS::UInteger)p_origin.x, (NS::UInteger)p_origin.y, (NS::UInteger)p_origin.z };
497
}
498
499
// Clamps the size so that the sum of the origin and size do not exceed the maximum size.
500
_FORCE_INLINE_ static MTL::Size clampMTLSize(MTL::Size p_size, MTL::Origin p_origin, MTL::Size p_max_size) {
501
MTL::Size clamped;
502
clamped.width = MIN(p_size.width, p_max_size.width - p_origin.x);
503
clamped.height = MIN(p_size.height, p_max_size.height - p_origin.y);
504
clamped.depth = MIN(p_size.depth, p_max_size.depth - p_origin.z);
505
return clamped;
506
}
507
508
API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0))
509
_FORCE_INLINE_ static bool isArrayTexture(MTL::TextureType p_type) {
510
return (p_type == MTL::TextureType3D ||
511
p_type == MTL::TextureType2DArray ||
512
p_type == MTL::TextureType2DMultisampleArray ||
513
p_type == MTL::TextureType1DArray);
514
}
515
516
_FORCE_INLINE_ static bool operator==(MTL::Size p_a, MTL::Size p_b) {
517
return p_a.width == p_b.width && p_a.height == p_b.height && p_a.depth == p_b.depth;
518
}
519
520
#pragma mark - Pipeline Stage Conversion
521
522
GODOT_CLANG_WARNING_PUSH_AND_IGNORE("-Wunguarded-availability")
523
524
_FORCE_INLINE_ static MTL::Stages convert_src_pipeline_stages_to_metal(BitField<RDD::PipelineStageBits> p_stages) {
525
p_stages.clear_flag(RDD::PIPELINE_STAGE_TOP_OF_PIPE_BIT);
526
527
// BOTTOM_OF_PIPE or ALL_COMMANDS means "all prior work must complete".
528
if (p_stages & (RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | RDD::PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
529
return MTL::StageAll;
530
}
531
532
MTL::Stages mtlStages = 0;
533
534
// Vertex stage mappings.
535
if (p_stages & (RDD::PIPELINE_STAGE_DRAW_INDIRECT_BIT | RDD::PIPELINE_STAGE_VERTEX_INPUT_BIT | RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT | RDD::PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | RDD::PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | RDD::PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
536
mtlStages |= MTL::StageVertex;
537
}
538
539
// Fragment stage mappings.
540
// Includes resolve and clear_storage, which on Metal use the render pipeline.
541
if (p_stages & (RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT | RDD::PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | RDD::PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | RDD::PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | RDD::PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT | RDD::PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT | RDD::PIPELINE_STAGE_RESOLVE_BIT | RDD::PIPELINE_STAGE_CLEAR_STORAGE_BIT)) {
542
mtlStages |= MTL::StageFragment;
543
}
544
545
// Compute stage.
546
if (p_stages & RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT) {
547
mtlStages |= MTL::StageDispatch;
548
}
549
550
// Blit stage (transfer operations).
551
if (p_stages & RDD::PIPELINE_STAGE_COPY_BIT) {
552
mtlStages |= MTL::StageBlit;
553
}
554
555
// ALL_GRAPHICS_BIT special case.
556
if (p_stages & RDD::PIPELINE_STAGE_ALL_GRAPHICS_BIT) {
557
mtlStages |= (MTL::StageVertex | MTL::StageFragment);
558
}
559
560
return mtlStages;
561
}
562
563
_FORCE_INLINE_ static MTL::Stages convert_dst_pipeline_stages_to_metal(BitField<RDD::PipelineStageBits> p_stages) {
564
p_stages.clear_flag(RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
565
566
// TOP_OF_PIPE or ALL_COMMANDS means "wait before any work starts".
567
if (p_stages & (RDD::PIPELINE_STAGE_ALL_COMMANDS_BIT | RDD::PIPELINE_STAGE_TOP_OF_PIPE_BIT)) {
568
return MTL::StageAll;
569
}
570
571
MTL::Stages mtlStages = 0;
572
573
// Vertex stage mappings.
574
if (p_stages & (RDD::PIPELINE_STAGE_DRAW_INDIRECT_BIT | RDD::PIPELINE_STAGE_VERTEX_INPUT_BIT | RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT | RDD::PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | RDD::PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | RDD::PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
575
mtlStages |= MTL::StageVertex;
576
}
577
578
// Fragment stage mappings.
579
// Includes resolve and clear_storage, which on Metal use the render pipeline.
580
if (p_stages & (RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT | RDD::PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | RDD::PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | RDD::PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | RDD::PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT | RDD::PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT | RDD::PIPELINE_STAGE_RESOLVE_BIT | RDD::PIPELINE_STAGE_CLEAR_STORAGE_BIT)) {
581
mtlStages |= MTL::StageFragment;
582
}
583
584
// Compute stage.
585
if (p_stages & RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT) {
586
mtlStages |= MTL::StageDispatch;
587
}
588
589
// Blit stage (transfer operations).
590
if (p_stages & RDD::PIPELINE_STAGE_COPY_BIT) {
591
mtlStages |= MTL::StageBlit;
592
}
593
594
// ALL_GRAPHICS_BIT special case.
595
if (p_stages & RDD::PIPELINE_STAGE_ALL_GRAPHICS_BIT) {
596
mtlStages |= (MTL::StageVertex | MTL::StageFragment);
597
}
598
599
return mtlStages;
600
}
601
602
GODOT_CLANG_WARNING_POP
603
604
#pragma mark - Command Buffer Base
605
606
enum class MDCommandBufferStateType {
607
None,
608
Render,
609
Compute,
610
Blit, // Only used by Metal 3
611
};
612
613
/// Base struct for render state shared between MTL3 and MTL4 implementations.
614
struct RenderStateBase {
615
LocalVector<MTL::Viewport> viewports;
616
LocalVector<MTL::ScissorRect> scissors;
617
std::optional<Color> blend_constants;
618
619
// clang-format off
620
enum DirtyFlag : uint16_t {
621
DIRTY_NONE = 0,
622
DIRTY_PIPELINE = 1 << 0,
623
DIRTY_UNIFORMS = 1 << 1,
624
DIRTY_PUSH = 1 << 2,
625
DIRTY_DEPTH = 1 << 3,
626
DIRTY_VERTEX = 1 << 4,
627
DIRTY_VIEWPORT = 1 << 5,
628
DIRTY_SCISSOR = 1 << 6,
629
DIRTY_BLEND = 1 << 7,
630
DIRTY_RASTER = 1 << 8,
631
DIRTY_ALL = (1 << 9) - 1,
632
};
633
// clang-format on
634
BitField<DirtyFlag> dirty = DIRTY_NONE;
635
};
636
637
/// Abstract base class for Metal command buffers, shared between MTL3 and MTL4 implementations.
638
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDCommandBufferBase {
639
LocalVector<CFTypeRef> _retained_resources;
640
641
protected:
642
// From RenderingDevice
643
static constexpr uint32_t MAX_PUSH_CONSTANT_SIZE = 128;
644
645
MDCommandBufferStateType type = MDCommandBufferStateType::None;
646
647
uint8_t push_constant_data[MAX_PUSH_CONSTANT_SIZE];
648
uint32_t push_constant_data_len = 0;
649
uint32_t push_constant_binding = UINT32_MAX;
650
651
::RenderingDeviceDriverMetal *device_driver = nullptr;
652
653
void release_resources();
654
655
/// Called when push constants are modified to mark the appropriate dirty flags.
656
virtual void mark_push_constants_dirty() = 0;
657
658
/// Returns a reference to the render state base for viewport/scissor/blend operations.
659
virtual RenderStateBase &get_render_state_base() = 0;
660
661
/// Returns the view count for the current subpass.
662
virtual uint32_t get_current_view_count() const = 0;
663
664
/// Accessors for render pass state.
665
virtual MDRenderPass *get_render_pass() const = 0;
666
virtual MDFrameBuffer *get_frame_buffer() const = 0;
667
virtual const MDSubpass &get_current_subpass() const = 0;
668
virtual LocalVector<RDD::RenderPassClearValue> &get_clear_values() = 0;
669
virtual const Rect2i &get_render_area() const = 0;
670
virtual void end_render_encoding() = 0;
671
672
void _populate_vertices(simd::float4 *p_vertices, Size2i p_fb_size, VectorView<Rect2i> p_rects);
673
uint32_t _populate_vertices(simd::float4 *p_vertices, uint32_t p_index, Rect2i const &p_rect, Size2i p_fb_size);
674
void _end_render_pass();
675
void _render_clear_render_area();
676
677
public:
678
virtual ~MDCommandBufferBase() { release_resources(); }
679
680
virtual void begin() = 0;
681
virtual void commit() = 0;
682
virtual void end() = 0;
683
684
virtual void bind_pipeline(RDD::PipelineID p_pipeline) = 0;
685
void encode_push_constant_data(RDD::ShaderID p_shader, VectorView<uint32_t> p_data);
686
687
void retain_resource(CFTypeRef p_resource);
688
689
#pragma mark - Render Commands
690
691
virtual void render_bind_uniform_sets(VectorView<RDD::UniformSetID> p_uniform_sets, RDD::ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count, uint32_t p_dynamic_offsets) = 0;
692
virtual void render_clear_attachments(VectorView<RDD::AttachmentClear> p_attachment_clears, VectorView<Rect2i> p_rects) = 0;
693
void render_set_viewport(VectorView<Rect2i> p_viewports);
694
void render_set_scissor(VectorView<Rect2i> p_scissors);
695
void render_set_blend_constants(const Color &p_constants);
696
virtual void render_begin_pass(RDD::RenderPassID p_render_pass,
697
RDD::FramebufferID p_frameBuffer,
698
RDD::CommandBufferType p_cmd_buffer_type,
699
const Rect2i &p_rect,
700
VectorView<RDD::RenderPassClearValue> p_clear_values) = 0;
701
virtual void render_next_subpass() = 0;
702
virtual void render_draw(uint32_t p_vertex_count,
703
uint32_t p_instance_count,
704
uint32_t p_base_vertex,
705
uint32_t p_first_instance) = 0;
706
virtual void render_bind_vertex_buffers(uint32_t p_binding_count, const RDD::BufferID *p_buffers, const uint64_t *p_offsets, uint64_t p_dynamic_offsets) = 0;
707
virtual void render_bind_index_buffer(RDD::BufferID p_buffer, RDD::IndexBufferFormat p_format, uint64_t p_offset) = 0;
708
709
virtual void render_draw_indexed(uint32_t p_index_count,
710
uint32_t p_instance_count,
711
uint32_t p_first_index,
712
int32_t p_vertex_offset,
713
uint32_t p_first_instance) = 0;
714
715
virtual void render_draw_indexed_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) = 0;
716
virtual void render_draw_indexed_indirect_count(RDD::BufferID p_indirect_buffer, uint64_t p_offset, RDD::BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) = 0;
717
virtual void render_draw_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) = 0;
718
virtual void render_draw_indirect_count(RDD::BufferID p_indirect_buffer, uint64_t p_offset, RDD::BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) = 0;
719
720
virtual void render_end_pass() = 0;
721
722
#pragma mark - Compute Commands
723
724
virtual void compute_bind_uniform_sets(VectorView<RDD::UniformSetID> p_uniform_sets, RDD::ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count, uint32_t p_dynamic_offsets) = 0;
725
virtual void compute_dispatch(uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) = 0;
726
virtual void compute_dispatch_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset) = 0;
727
728
#pragma mark - Transfer
729
730
virtual void resolve_texture(RDD::TextureID p_src_texture, RDD::TextureLayout p_src_texture_layout, uint32_t p_src_layer, uint32_t p_src_mipmap, RDD::TextureID p_dst_texture, RDD::TextureLayout p_dst_texture_layout, uint32_t p_dst_layer, uint32_t p_dst_mipmap) = 0;
731
virtual void clear_color_texture(RDD::TextureID p_texture, RDD::TextureLayout p_texture_layout, const Color &p_color, const RDD::TextureSubresourceRange &p_subresources) = 0;
732
virtual void clear_depth_stencil_texture(RDD::TextureID p_texture, RDD::TextureLayout p_texture_layout, float p_depth, uint8_t p_stencil, const RDD::TextureSubresourceRange &p_subresources) = 0;
733
virtual void clear_buffer(RDD::BufferID p_buffer, uint64_t p_offset, uint64_t p_size) = 0;
734
virtual void copy_buffer(RDD::BufferID p_src_buffer, RDD::BufferID p_dst_buffer, VectorView<RDD::BufferCopyRegion> p_regions) = 0;
735
virtual void copy_texture(RDD::TextureID p_src_texture, RDD::TextureID p_dst_texture, VectorView<RDD::TextureCopyRegion> p_regions) = 0;
736
virtual void copy_buffer_to_texture(RDD::BufferID p_src_buffer, RDD::TextureID p_dst_texture, VectorView<RDD::BufferTextureCopyRegion> p_regions) = 0;
737
virtual void copy_texture_to_buffer(RDD::TextureID p_src_texture, RDD::BufferID p_dst_buffer, VectorView<RDD::BufferTextureCopyRegion> p_regions) = 0;
738
739
#pragma mark - Synchronization
740
741
virtual void pipeline_barrier(BitField<RDD::PipelineStageBits> p_src_stages,
742
BitField<RDD::PipelineStageBits> p_dst_stages,
743
VectorView<RDD::MemoryAccessBarrier> p_memory_barriers,
744
VectorView<RDD::BufferBarrier> p_buffer_barriers,
745
VectorView<RDD::TextureBarrier> p_texture_barriers,
746
VectorView<RDD::AccelerationStructureBarrier> p_acceleration_structure_barriers) = 0;
747
748
#pragma mark - Debugging
749
750
virtual void begin_label(const char *p_label_name, const Color &p_color) = 0;
751
virtual void end_label() = 0;
752
};
753
754
#pragma mark - Uniform Types
755
756
struct API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) UniformInfo {
757
uint32_t binding;
758
BitField<RDD::ShaderStage> active_stages;
759
MTL::DataType dataType = MTL::DataTypeNone;
760
MTL::BindingAccess access = MTL::BindingAccessReadOnly;
761
MTL::ResourceUsage usage = 0;
762
MTL::TextureType textureType = MTL::TextureType2D;
763
uint32_t imageFormat = 0;
764
uint32_t arrayLength = 0;
765
bool isMultisampled = 0;
766
767
struct Indexes {
768
uint32_t buffer = UINT32_MAX;
769
uint32_t texture = UINT32_MAX;
770
uint32_t sampler = UINT32_MAX;
771
};
772
Indexes slot;
773
Indexes arg_buffer;
774
775
enum class IndexType {
776
SLOT,
777
ARG,
778
};
779
780
_FORCE_INLINE_ Indexes &get_indexes(IndexType p_type) {
781
switch (p_type) {
782
case IndexType::SLOT:
783
return slot;
784
case IndexType::ARG:
785
return arg_buffer;
786
}
787
}
788
};
789
790
struct API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) UniformSet {
791
LocalVector<UniformInfo> uniforms;
792
LocalVector<uint32_t> dynamic_uniforms;
793
uint32_t buffer_size = 0;
794
};
795
796
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) DynamicOffsetLayout {
797
struct Data {
798
uint8_t offset : 4;
799
uint8_t count : 4;
800
};
801
802
union {
803
Data data[MAX_DYNAMIC_BUFFERS];
804
uint64_t _val = 0;
805
};
806
807
public:
808
_FORCE_INLINE_ bool is_empty() const { return _val == 0; }
809
810
_FORCE_INLINE_ uint32_t get_count(uint32_t p_set_index) const {
811
return data[p_set_index].count;
812
}
813
814
_FORCE_INLINE_ uint32_t get_offset(uint32_t p_set_index) const {
815
return data[p_set_index].offset;
816
}
817
818
_FORCE_INLINE_ void set_offset_count(uint32_t p_set_index, uint8_t p_offset, uint8_t p_count) {
819
data[p_set_index].offset = p_offset;
820
data[p_set_index].count = p_count;
821
}
822
823
_FORCE_INLINE_ uint32_t get_offset_index_shift(uint32_t p_set_index, uint32_t p_dynamic_index = 0) const {
824
return (data[p_set_index].offset + p_dynamic_index) * 4u;
825
}
826
};
827
828
#pragma mark - Shader Types
829
830
class MDLibrary; // Forward declaration for C++ code
831
struct ShaderCacheEntry; // Forward declaration for C++ code
832
833
enum class ShaderLoadStrategy {
834
IMMEDIATE,
835
LAZY,
836
837
/// The default strategy is to load the shader immediately.
838
DEFAULT = IMMEDIATE,
839
};
840
841
/// A Metal shader library.
842
class MDLibrary : public std::enable_shared_from_this<MDLibrary> {
843
protected:
844
ShaderCacheEntry *_entry = nullptr;
845
#ifdef DEV_ENABLED
846
NS::SharedPtr<NS::String> _original_source = nullptr;
847
#endif
848
849
MDLibrary(ShaderCacheEntry *p_entry
850
#ifdef DEV_ENABLED
851
,
852
NS::String *p_source
853
#endif
854
);
855
856
public:
857
virtual ~MDLibrary();
858
859
virtual MTL::Library *get_library() = 0;
860
virtual NS::Error *get_error() = 0;
861
virtual void set_label(NS::String *p_label);
862
#ifdef DEV_ENABLED
863
NS::String *get_original_source() const { return _original_source.get(); }
864
#endif
865
866
static std::shared_ptr<MDLibrary> create(ShaderCacheEntry *p_entry,
867
MTL::Device *p_device,
868
NS::String *p_source,
869
MTL::CompileOptions *p_options,
870
ShaderLoadStrategy p_strategy);
871
872
static std::shared_ptr<MDLibrary> create(ShaderCacheEntry *p_entry,
873
MTL::Device *p_device,
874
#ifdef DEV_ENABLED
875
NS::String *p_source,
876
#endif
877
dispatch_data_t p_data);
878
};
879
880
/// A cache entry for a Metal shader library.
881
struct ShaderCacheEntry {
882
RenderingDeviceDriverMetal &owner;
883
/// A hash of the Metal shader source code.
884
SHA256Digest key;
885
CharString name;
886
RD::ShaderStage stage = RD::SHADER_STAGE_VERTEX;
887
/// Weak reference to the library; allows cache lookup without preventing cleanup.
888
std::weak_ptr<MDLibrary> library;
889
890
/// Notify the cache that this entry is no longer needed.
891
void notify_free() const;
892
893
ShaderCacheEntry(RenderingDeviceDriverMetal &p_owner, SHA256Digest p_key) :
894
owner(p_owner), key(p_key) {
895
}
896
~ShaderCacheEntry() = default;
897
};
898
899
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDShader {
900
public:
901
CharString name;
902
Vector<UniformSet> sets;
903
struct {
904
BitField<RDD::ShaderStage> stages = {};
905
uint32_t binding = UINT32_MAX;
906
uint32_t size = 0;
907
} push_constants;
908
DynamicOffsetLayout dynamic_offset_layout;
909
bool uses_argument_buffers = true;
910
911
MDShader(CharString p_name, Vector<UniformSet> p_sets, bool p_uses_argument_buffers) :
912
name(p_name), sets(p_sets), uses_argument_buffers(p_uses_argument_buffers) {}
913
virtual ~MDShader() = default;
914
};
915
916
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDComputeShader final : public MDShader {
917
public:
918
MTL::Size local = {};
919
920
std::shared_ptr<MDLibrary> kernel;
921
922
MDComputeShader(CharString p_name, Vector<UniformSet> p_sets, bool p_uses_argument_buffers, std::shared_ptr<MDLibrary> p_kernel);
923
};
924
925
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDRenderShader final : public MDShader {
926
public:
927
bool needs_view_mask_buffer = false;
928
929
std::shared_ptr<MDLibrary> vert;
930
std::shared_ptr<MDLibrary> frag;
931
932
MDRenderShader(CharString p_name,
933
Vector<UniformSet> p_sets,
934
bool p_needs_view_mask_buffer,
935
bool p_uses_argument_buffers,
936
std::shared_ptr<MDLibrary> p_vert, std::shared_ptr<MDLibrary> p_frag);
937
};
938
939
#pragma mark - Uniform Set
940
941
enum StageResourceUsage : uint32_t {
942
ResourceUnused = 0,
943
VertexRead = (MTL::ResourceUsageRead << RDD::SHADER_STAGE_VERTEX * 2),
944
VertexWrite = (MTL::ResourceUsageWrite << RDD::SHADER_STAGE_VERTEX * 2),
945
FragmentRead = (MTL::ResourceUsageRead << RDD::SHADER_STAGE_FRAGMENT * 2),
946
FragmentWrite = (MTL::ResourceUsageWrite << RDD::SHADER_STAGE_FRAGMENT * 2),
947
TesselationControlRead = (MTL::ResourceUsageRead << RDD::SHADER_STAGE_TESSELATION_CONTROL * 2),
948
TesselationControlWrite = (MTL::ResourceUsageWrite << RDD::SHADER_STAGE_TESSELATION_CONTROL * 2),
949
TesselationEvaluationRead = (MTL::ResourceUsageRead << RDD::SHADER_STAGE_TESSELATION_EVALUATION * 2),
950
TesselationEvaluationWrite = (MTL::ResourceUsageWrite << RDD::SHADER_STAGE_TESSELATION_EVALUATION * 2),
951
ComputeRead = (MTL::ResourceUsageRead << RDD::SHADER_STAGE_COMPUTE * 2),
952
ComputeWrite = (MTL::ResourceUsageWrite << RDD::SHADER_STAGE_COMPUTE * 2),
953
};
954
955
typedef LocalVector<MTL::Resource *> ResourceVector;
956
typedef HashMap<StageResourceUsage, ResourceVector> ResourceUsageMap;
957
958
_FORCE_INLINE_ StageResourceUsage &operator|=(StageResourceUsage &p_a, uint32_t p_b) {
959
p_a = StageResourceUsage(uint32_t(p_a) | p_b);
960
return p_a;
961
}
962
963
_FORCE_INLINE_ StageResourceUsage stage_resource_usage(RDC::ShaderStage p_stage, MTL::ResourceUsage p_usage) {
964
return StageResourceUsage(p_usage << (p_stage * 2));
965
}
966
967
_FORCE_INLINE_ MTL::ResourceUsage resource_usage_for_stage(StageResourceUsage p_usage, RDC::ShaderStage p_stage) {
968
return MTL::ResourceUsage((p_usage >> (p_stage * 2)) & 0b11);
969
}
970
971
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDUniformSet {
972
public:
973
NS::SharedPtr<MTL::Buffer> arg_buffer;
974
Vector<uint8_t> arg_buffer_data; // Stored for dynamic uniform sets.
975
ResourceUsageMap usage_to_resources; // Used by Metal 3 for resource tracking.
976
Vector<RDD::BoundUniform> uniforms;
977
};
978
979
#pragma mark - Pipeline Types
980
981
enum class MDPipelineType {
982
None,
983
Render,
984
Compute,
985
};
986
987
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDPipeline {
988
public:
989
MDPipelineType type;
990
991
explicit MDPipeline(MDPipelineType p_type) :
992
type(p_type) {}
993
virtual ~MDPipeline() = default;
994
};
995
996
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDRenderPipeline final : public MDPipeline {
997
public:
998
NS::SharedPtr<MTL::RenderPipelineState> state;
999
NS::SharedPtr<MTL::DepthStencilState> depth_stencil;
1000
uint32_t push_constant_size = 0;
1001
uint32_t push_constant_stages_mask = 0;
1002
SampleCount sample_count = SampleCount1;
1003
1004
struct {
1005
MTL::CullMode cull_mode = MTL::CullModeNone;
1006
MTL::TriangleFillMode fill_mode = MTL::TriangleFillModeFill;
1007
MTL::DepthClipMode clip_mode = MTL::DepthClipModeClip;
1008
MTL::Winding winding = MTL::WindingClockwise;
1009
MTL::PrimitiveType render_primitive = MTL::PrimitiveTypePoint;
1010
1011
struct {
1012
bool enabled = false;
1013
} depth_test;
1014
1015
struct {
1016
bool enabled = false;
1017
float depth_bias = 0.0;
1018
float slope_scale = 0.0;
1019
float clamp = 0.0;
1020
1021
template <typename T>
1022
_FORCE_INLINE_ void apply(T *p_enc) const {
1023
if (!enabled) {
1024
return;
1025
}
1026
p_enc->setDepthBias(depth_bias, slope_scale, clamp);
1027
}
1028
} depth_bias;
1029
1030
struct {
1031
bool enabled = false;
1032
uint32_t front_reference = 0;
1033
uint32_t back_reference = 0;
1034
1035
template <typename T>
1036
_FORCE_INLINE_ void apply(T *p_enc) const {
1037
if (!enabled) {
1038
return;
1039
}
1040
p_enc->setStencilReferenceValues(front_reference, back_reference);
1041
}
1042
} stencil;
1043
1044
struct {
1045
bool enabled = false;
1046
float r = 0.0;
1047
float g = 0.0;
1048
float b = 0.0;
1049
float a = 0.0;
1050
1051
template <typename T>
1052
_FORCE_INLINE_ void apply(T *p_enc) const {
1053
p_enc->setBlendColor(r, g, b, a);
1054
}
1055
} blend;
1056
1057
template <typename T>
1058
_FORCE_INLINE_ void apply(T *p_enc) const {
1059
p_enc->setCullMode(cull_mode);
1060
p_enc->setTriangleFillMode(fill_mode);
1061
p_enc->setDepthClipMode(clip_mode);
1062
p_enc->setFrontFacingWinding(winding);
1063
depth_bias.apply(p_enc);
1064
stencil.apply(p_enc);
1065
blend.apply(p_enc);
1066
}
1067
1068
} raster_state;
1069
1070
MDRenderShader *shader = nullptr;
1071
1072
MDRenderPipeline() :
1073
MDPipeline(MDPipelineType::Render) {}
1074
~MDRenderPipeline() final = default;
1075
};
1076
1077
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), visionos(2.0)) MDComputePipeline final : public MDPipeline {
1078
public:
1079
NS::SharedPtr<MTL::ComputePipelineState> state;
1080
struct {
1081
MTL::Size local = {};
1082
} compute_state;
1083
1084
MDComputeShader *shader = nullptr;
1085
1086
explicit MDComputePipeline(NS::SharedPtr<MTL::ComputePipelineState> p_state) :
1087
MDPipeline(MDPipelineType::Compute), state(std::move(p_state)) {}
1088
~MDComputePipeline() final = default;
1089
};
1090
1091