Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/drivers/metal/metal3_objects.h
21175 views
1
/**************************************************************************/
2
/* metal3_objects.h */
3
/**************************************************************************/
4
/* This file is part of: */
5
/* GODOT ENGINE */
6
/* https://godotengine.org */
7
/**************************************************************************/
8
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
9
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
10
/* */
11
/* Permission is hereby granted, free of charge, to any person obtaining */
12
/* a copy of this software and associated documentation files (the */
13
/* "Software"), to deal in the Software without restriction, including */
14
/* without limitation the rights to use, copy, modify, merge, publish, */
15
/* distribute, sublicense, and/or sell copies of the Software, and to */
16
/* permit persons to whom the Software is furnished to do so, subject to */
17
/* the following conditions: */
18
/* */
19
/* The above copyright notice and this permission notice shall be */
20
/* included in all copies or substantial portions of the Software. */
21
/* */
22
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
23
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
24
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
25
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
26
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
27
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
28
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
29
/**************************************************************************/
30
31
#pragma once
32
33
/**************************************************************************/
34
/* */
35
/* Portions of this code were derived from MoltenVK. */
36
/* */
37
/* Copyright (c) 2015-2023 The Brenwill Workshop Ltd. */
38
/* (http://www.brenwill.com) */
39
/* */
40
/* Licensed under the Apache License, Version 2.0 (the "License"); */
41
/* you may not use this file except in compliance with the License. */
42
/* You may obtain a copy of the License at */
43
/* */
44
/* http://www.apache.org/licenses/LICENSE-2.0 */
45
/* */
46
/* Unless required by applicable law or agreed to in writing, software */
47
/* distributed under the License is distributed on an "AS IS" BASIS, */
48
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
49
/* implied. See the License for the specific language governing */
50
/* permissions and limitations under the License. */
51
/**************************************************************************/
52
53
#include "metal_objects_shared.h"
54
55
#include "servers/rendering/rendering_device_driver.h"
56
57
#include <Metal/Metal.hpp>
58
59
#include <initializer_list>
60
#include <optional>
61
62
namespace MTL3 {
63
64
// These types are defined in the global namespace (metal_objects_shared.h / rendering_device_driver_metal.h)
65
using ::MDAttachment;
66
using ::MDAttachmentType;
67
using ::MDCommandBufferBase;
68
using ::MDCommandBufferStateType;
69
using ::MDFrameBuffer;
70
using ::MDRenderPass;
71
using ::MDRingBuffer;
72
using ::MDSubpass;
73
using ::RenderStateBase;
74
75
using ::DynamicOffsetLayout;
76
using ::MDComputePipeline;
77
using ::MDComputeShader;
78
using ::MDLibrary;
79
using ::MDPipeline;
80
using ::MDPipelineType;
81
using ::MDRenderPipeline;
82
using ::MDRenderShader;
83
using ::MDShader;
84
using ::MDUniformSet;
85
using ::ShaderCacheEntry;
86
using ::ShaderLoadStrategy;
87
using ::UniformInfo;
88
using ::UniformSet;
89
90
using RDM = ::RenderingDeviceDriverMetal;
91
92
struct ResourceUsageEntry {
93
StageResourceUsage usage = ResourceUnused;
94
uint32_t unused = 0;
95
96
ResourceUsageEntry() {}
97
ResourceUsageEntry(StageResourceUsage p_usage) :
98
usage(p_usage) {}
99
};
100
101
} // namespace MTL3
102
103
template <>
104
struct is_zero_constructible<MTL3::ResourceUsageEntry> : std::true_type {};
105
106
namespace MTL3 {
107
108
/*! Track the cumulative usage for a resource during a render or compute pass */
109
typedef HashMap<MTL::Resource *, ResourceUsageEntry> ResourceToStageUsage;
110
111
/*! Track resource and ensure they are resident prior to dispatch or draw commands.
112
*
113
* The primary purpose of this data structure is to track all the resources that must be made resident prior
114
* to issuing the next dispatch or draw command. It aggregates all resources used from argument buffers.
115
*
116
* As an optimization, this data structure also tracks previous usage for resources, so that
117
* it may avoid binding them again in later commands if the resource is already resident and its usage flagged.
118
*/
119
struct API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0)) ResourceTracker {
120
// A constant specifying how many iterations a resource can remain in
121
// the _previous HashSet before it will be removed permanently.
122
//
123
// Keeping them in the _previous HashMap reduces churn if resources are regularly
124
// bound. 256 is arbitrary, but if an object remains unused for 256 encoders,
125
// it will be released.
126
static constexpr uint32_t RESOURCE_UNUSED_CLEANUP_COUNT = 256;
127
128
// Used as a scratch buffer to periodically clean up resources from _previous.
129
ResourceVector _scratch;
130
// Tracks all resources and their prior usage for the duration of the encoder.
131
ResourceToStageUsage _previous;
132
// Tracks resources for the current command that must be made resident
133
ResourceUsageMap _current;
134
135
void merge_from(const ::ResourceUsageMap &p_from);
136
void encode(MTL::RenderCommandEncoder *p_enc);
137
void encode(MTL::ComputeCommandEncoder *p_enc);
138
void reset();
139
};
140
141
struct BindingCache {
142
struct BufferBinding {
143
MTL::Buffer *buffer = nullptr;
144
NS::UInteger offset = 0;
145
146
bool operator!=(const BufferBinding &p_other) const {
147
return buffer != p_other.buffer || offset != p_other.offset;
148
}
149
};
150
151
LocalVector<MTL::Texture *> textures;
152
LocalVector<MTL::SamplerState *> samplers;
153
LocalVector<BufferBinding> buffers;
154
155
_FORCE_INLINE_ void clear() {
156
textures.clear();
157
samplers.clear();
158
buffers.clear();
159
}
160
161
private:
162
template <typename T>
163
_FORCE_INLINE_ void ensure_size(LocalVector<T> &p_vec, uint32_t p_required) {
164
if (p_vec.size() < p_required) {
165
p_vec.resize_initialized(p_required);
166
}
167
}
168
169
public:
170
_FORCE_INLINE_ bool update(NS::Range p_range, MTL::Texture *const *p_values) {
171
if (p_range.length == 0) {
172
return false;
173
}
174
uint32_t required = (uint32_t)(p_range.location + p_range.length);
175
ensure_size(textures, required);
176
bool changed = false;
177
for (NS::UInteger i = 0; i < p_range.length; ++i) {
178
uint32_t slot = (uint32_t)(p_range.location + i);
179
MTL::Texture *value = p_values[i];
180
if (textures[slot] != value) {
181
textures[slot] = value;
182
changed = true;
183
}
184
}
185
return changed;
186
}
187
188
_FORCE_INLINE_ bool update(NS::Range p_range, MTL::SamplerState *const *p_values) {
189
if (p_range.length == 0) {
190
return false;
191
}
192
uint32_t required = (uint32_t)(p_range.location + p_range.length);
193
ensure_size(samplers, required);
194
bool changed = false;
195
for (NS::UInteger i = 0; i < p_range.length; ++i) {
196
uint32_t slot = (uint32_t)(p_range.location + i);
197
MTL::SamplerState *value = p_values[i];
198
if (samplers[slot] != value) {
199
samplers[slot] = value;
200
changed = true;
201
}
202
}
203
return changed;
204
}
205
206
_FORCE_INLINE_ bool update(NS::Range p_range, MTL::Buffer *const *p_values, const NS::UInteger *p_offsets) {
207
if (p_range.length == 0) {
208
return false;
209
}
210
uint32_t required = (uint32_t)(p_range.location + p_range.length);
211
ensure_size(buffers, required);
212
BufferBinding *buffers_ptr = buffers.ptr() + p_range.location;
213
bool changed = false;
214
for (NS::UInteger i = 0; i < p_range.length; ++i) {
215
BufferBinding &binding = *buffers_ptr;
216
BufferBinding new_binding = {
217
.buffer = p_values[i],
218
.offset = p_offsets[i],
219
};
220
if (binding != new_binding) {
221
binding = new_binding;
222
changed = true;
223
}
224
++buffers_ptr;
225
}
226
return changed;
227
}
228
229
_FORCE_INLINE_ bool update(MTL::Buffer *p_buffer, NS::UInteger p_offset, uint32_t p_index) {
230
uint32_t required = p_index + 1;
231
ensure_size(buffers, required);
232
BufferBinding &binding = buffers.ptr()[p_index];
233
BufferBinding new_binding = {
234
.buffer = p_buffer,
235
.offset = p_offset,
236
};
237
if (binding != new_binding) {
238
binding = new_binding;
239
return true;
240
}
241
return false;
242
}
243
};
244
245
// A type used to encode resources directly to a MTLCommandEncoder
246
struct DirectEncoder {
247
MTL::CommandEncoder *encoder;
248
BindingCache &cache;
249
enum Mode {
250
RENDER,
251
COMPUTE
252
};
253
Mode mode;
254
255
void set(MTL::Buffer **p_buffers, const NS::UInteger *p_offsets, NS::Range p_range);
256
void set(MTL::Buffer *p_buffer, NS::UInteger p_offset, uint32_t p_index);
257
void set(MTL::Texture **p_textures, NS::Range p_range);
258
void set(MTL::SamplerState **p_samplers, NS::Range p_range);
259
260
DirectEncoder(MTL::CommandEncoder *p_encoder, BindingCache &p_cache, Mode p_mode) :
261
encoder(p_encoder), cache(p_cache), mode(p_mode) {}
262
};
263
264
class API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0)) MDCommandBuffer : public MDCommandBufferBase {
265
friend class MDUniformSet;
266
267
private:
268
#pragma mark - Common State
269
270
BindingCache binding_cache;
271
272
#pragma mark - Argument Buffer Ring Allocator
273
274
using Alloc = MDRingBuffer::Allocation;
275
276
// Used for argument buffers that contain dynamic uniforms.
277
MDRingBuffer _scratch;
278
279
/// Allocates from the ring buffer for dynamic argument buffers.
280
Alloc allocate_arg_buffer(uint32_t p_size);
281
282
struct {
283
NS::SharedPtr<MTL::ResidencySet> rs;
284
} _frame_state;
285
286
#pragma mark - Synchronization
287
288
enum {
289
STAGE_RENDER,
290
STAGE_COMPUTE,
291
STAGE_BLIT,
292
STAGE_MAX,
293
};
294
bool use_barriers = false;
295
MTL::Stages pending_after_stages[STAGE_MAX] = { 0, 0, 0 };
296
MTL::Stages pending_before_queue_stages[STAGE_MAX] = { 0, 0, 0 };
297
void _encode_barrier(MTL::CommandEncoder *p_enc);
298
299
void reset();
300
301
MTL::CommandQueue *queue = nullptr;
302
NS::SharedPtr<MTL::CommandBuffer> commandBuffer;
303
bool state_begin = false;
304
305
MTL::CommandBuffer *command_buffer();
306
307
void _end_compute_dispatch();
308
void _end_blit();
309
MTL::BlitCommandEncoder *_ensure_blit_encoder();
310
311
enum class CopySource {
312
Buffer,
313
Texture,
314
};
315
void _copy_texture_buffer(CopySource p_source,
316
RDD::TextureID p_texture,
317
RDD::BufferID p_buffer,
318
VectorView<RDD::BufferTextureCopyRegion> p_regions);
319
320
#pragma mark - Render
321
322
void _render_set_dirty_state();
323
void _render_bind_uniform_sets();
324
void _bind_uniforms_argument_buffers(MDUniformSet *p_set, MDShader *p_shader, uint32_t p_set_index, uint32_t p_dynamic_offsets);
325
void _bind_uniforms_direct(MDUniformSet *p_set, MDShader *p_shader, DirectEncoder p_enc, uint32_t p_set_index, uint32_t p_dynamic_offsets);
326
327
#pragma mark - Compute
328
329
void _compute_set_dirty_state();
330
void _compute_bind_uniform_sets();
331
void _bind_uniforms_argument_buffers_compute(MDUniformSet *p_set, MDShader *p_shader, uint32_t p_set_index, uint32_t p_dynamic_offsets);
332
333
protected:
334
void mark_push_constants_dirty() override;
335
RenderStateBase &get_render_state_base() override { return render; }
336
uint32_t get_current_view_count() const override { return render.get_subpass().view_count; }
337
MDRenderPass *get_render_pass() const override { return render.pass; }
338
MDFrameBuffer *get_frame_buffer() const override { return render.frameBuffer; }
339
const MDSubpass &get_current_subpass() const override { return render.get_subpass(); }
340
LocalVector<RDD::RenderPassClearValue> &get_clear_values() override { return render.clear_values; }
341
const Rect2i &get_render_area() const override { return render.render_area; }
342
void end_render_encoding() override { render.end_encoding(); }
343
344
public:
345
struct RenderState : public RenderStateBase {
346
MDRenderPass *pass = nullptr;
347
MDFrameBuffer *frameBuffer = nullptr;
348
MDRenderPipeline *pipeline = nullptr;
349
LocalVector<RDD::RenderPassClearValue> clear_values;
350
uint32_t current_subpass = UINT32_MAX;
351
Rect2i render_area = {};
352
bool is_rendering_entire_area = false;
353
NS::SharedPtr<MTL::RenderPassDescriptor> desc;
354
NS::SharedPtr<MTL::RenderCommandEncoder> encoder;
355
MTL::Buffer *index_buffer = nullptr; // Buffer is owned by RDD.
356
MTL::IndexType index_type = MTL::IndexTypeUInt16;
357
uint32_t index_offset = 0;
358
LocalVector<MTL::Buffer *> vertex_buffers;
359
LocalVector<NS::UInteger> vertex_offsets;
360
ResourceTracker resource_tracker;
361
362
LocalVector<MDUniformSet *> uniform_sets;
363
uint32_t dynamic_offsets = 0;
364
// Bit mask of the uniform sets that are dirty, to prevent redundant binding.
365
uint64_t uniform_set_mask = 0;
366
367
_FORCE_INLINE_ void reset();
368
void end_encoding();
369
370
_ALWAYS_INLINE_ const MDSubpass &get_subpass() const {
371
DEV_ASSERT(pass != nullptr);
372
return pass->subpasses[current_subpass];
373
}
374
375
_FORCE_INLINE_ void mark_viewport_dirty() {
376
if (viewports.is_empty()) {
377
return;
378
}
379
dirty.set_flag(DirtyFlag::DIRTY_VIEWPORT);
380
}
381
382
_FORCE_INLINE_ void mark_scissors_dirty() {
383
if (scissors.is_empty()) {
384
return;
385
}
386
dirty.set_flag(DirtyFlag::DIRTY_SCISSOR);
387
}
388
389
_FORCE_INLINE_ void mark_vertex_dirty() {
390
if (vertex_buffers.is_empty()) {
391
return;
392
}
393
dirty.set_flag(DirtyFlag::DIRTY_VERTEX);
394
}
395
396
_FORCE_INLINE_ void mark_uniforms_dirty(std::initializer_list<uint32_t> l) {
397
if (uniform_sets.is_empty()) {
398
return;
399
}
400
for (uint32_t i : l) {
401
if (i < uniform_sets.size() && uniform_sets[i] != nullptr) {
402
uniform_set_mask |= 1 << i;
403
}
404
}
405
dirty.set_flag(DirtyFlag::DIRTY_UNIFORMS);
406
}
407
408
_FORCE_INLINE_ void mark_uniforms_dirty(void) {
409
if (uniform_sets.is_empty()) {
410
return;
411
}
412
for (uint32_t i = 0; i < uniform_sets.size(); i++) {
413
if (uniform_sets[i] != nullptr) {
414
uniform_set_mask |= 1 << i;
415
}
416
}
417
dirty.set_flag(DirtyFlag::DIRTY_UNIFORMS);
418
}
419
420
_FORCE_INLINE_ void mark_blend_dirty() {
421
if (!blend_constants.has_value()) {
422
return;
423
}
424
dirty.set_flag(DirtyFlag::DIRTY_BLEND);
425
}
426
427
MTL::ScissorRect clip_to_render_area(MTL::ScissorRect p_rect) const {
428
uint32_t raLeft = render_area.position.x;
429
uint32_t raRight = raLeft + render_area.size.width;
430
uint32_t raBottom = render_area.position.y;
431
uint32_t raTop = raBottom + render_area.size.height;
432
433
p_rect.x = CLAMP(p_rect.x, raLeft, MAX(raRight - 1, raLeft));
434
p_rect.y = CLAMP(p_rect.y, raBottom, MAX(raTop - 1, raBottom));
435
p_rect.width = MIN(p_rect.width, raRight - p_rect.x);
436
p_rect.height = MIN(p_rect.height, raTop - p_rect.y);
437
438
return p_rect;
439
}
440
441
Rect2i clip_to_render_area(Rect2i p_rect) const {
442
int32_t raLeft = render_area.position.x;
443
int32_t raRight = raLeft + render_area.size.width;
444
int32_t raBottom = render_area.position.y;
445
int32_t raTop = raBottom + render_area.size.height;
446
447
p_rect.position.x = CLAMP(p_rect.position.x, raLeft, MAX(raRight - 1, raLeft));
448
p_rect.position.y = CLAMP(p_rect.position.y, raBottom, MAX(raTop - 1, raBottom));
449
p_rect.size.width = MIN(p_rect.size.width, raRight - p_rect.position.x);
450
p_rect.size.height = MIN(p_rect.size.height, raTop - p_rect.position.y);
451
452
return p_rect;
453
}
454
455
} render;
456
457
// State specific for a compute pass.
458
struct ComputeState {
459
MDComputePipeline *pipeline = nullptr;
460
NS::SharedPtr<MTL::ComputeCommandEncoder> encoder;
461
ResourceTracker resource_tracker;
462
// clang-format off
463
enum DirtyFlag: uint16_t {
464
DIRTY_NONE = 0,
465
DIRTY_PIPELINE = 1 << 0, //! pipeline state
466
DIRTY_UNIFORMS = 1 << 1, //! uniform sets
467
DIRTY_PUSH = 1 << 2, //! push constants
468
DIRTY_ALL = (1 << 3) - 1,
469
};
470
// clang-format on
471
BitField<DirtyFlag> dirty = DIRTY_NONE;
472
473
LocalVector<MDUniformSet *> uniform_sets;
474
uint32_t dynamic_offsets = 0;
475
// Bit mask of the uniform sets that are dirty, to prevent redundant binding.
476
uint64_t uniform_set_mask = 0;
477
478
_FORCE_INLINE_ void reset();
479
void end_encoding();
480
481
_FORCE_INLINE_ void mark_uniforms_dirty(void) {
482
if (uniform_sets.is_empty()) {
483
return;
484
}
485
for (uint32_t i = 0; i < uniform_sets.size(); i++) {
486
if (uniform_sets[i] != nullptr) {
487
uniform_set_mask |= 1 << i;
488
}
489
}
490
dirty.set_flag(DirtyFlag::DIRTY_UNIFORMS);
491
}
492
} compute;
493
494
// State specific to a blit pass.
495
struct {
496
NS::SharedPtr<MTL::BlitCommandEncoder> encoder;
497
_FORCE_INLINE_ void reset() {
498
encoder.reset();
499
}
500
} blit;
501
502
_FORCE_INLINE_ MTL::CommandBuffer *get_command_buffer() const {
503
return commandBuffer.get();
504
}
505
506
void begin() override;
507
void commit() override;
508
void end() override;
509
510
void bind_pipeline(RDD::PipelineID p_pipeline) override;
511
512
#pragma mark - Render Commands
513
514
void render_bind_uniform_sets(VectorView<RDD::UniformSetID> p_uniform_sets, RDD::ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count, uint32_t p_dynamic_offsets) override;
515
void render_clear_attachments(VectorView<RDD::AttachmentClear> p_attachment_clears, VectorView<Rect2i> p_rects) override;
516
void render_begin_pass(RDD::RenderPassID p_render_pass,
517
RDD::FramebufferID p_frameBuffer,
518
RDD::CommandBufferType p_cmd_buffer_type,
519
const Rect2i &p_rect,
520
VectorView<RDD::RenderPassClearValue> p_clear_values) override;
521
void render_next_subpass() override;
522
void render_draw(uint32_t p_vertex_count,
523
uint32_t p_instance_count,
524
uint32_t p_base_vertex,
525
uint32_t p_first_instance) override;
526
void render_bind_vertex_buffers(uint32_t p_binding_count, const RDD::BufferID *p_buffers, const uint64_t *p_offsets, uint64_t p_dynamic_offsets) override;
527
void render_bind_index_buffer(RDD::BufferID p_buffer, RDD::IndexBufferFormat p_format, uint64_t p_offset) override;
528
529
void render_draw_indexed(uint32_t p_index_count,
530
uint32_t p_instance_count,
531
uint32_t p_first_index,
532
int32_t p_vertex_offset,
533
uint32_t p_first_instance) override;
534
535
void render_draw_indexed_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) override;
536
void render_draw_indexed_indirect_count(RDD::BufferID p_indirect_buffer, uint64_t p_offset, RDD::BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) override;
537
void render_draw_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) override;
538
void render_draw_indirect_count(RDD::BufferID p_indirect_buffer, uint64_t p_offset, RDD::BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) override;
539
540
void render_end_pass() override;
541
542
#pragma mark - Compute Commands
543
544
void compute_bind_uniform_sets(VectorView<RDD::UniformSetID> p_uniform_sets, RDD::ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count, uint32_t p_dynamic_offsets) override;
545
void compute_dispatch(uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) override;
546
void compute_dispatch_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset) override;
547
548
#pragma mark - Transfer
549
550
private:
551
MTL::RenderCommandEncoder *get_new_render_encoder_with_descriptor(MTL::RenderPassDescriptor *p_desc);
552
553
public:
554
void resolve_texture(RDD::TextureID p_src_texture, RDD::TextureLayout p_src_texture_layout, uint32_t p_src_layer, uint32_t p_src_mipmap, RDD::TextureID p_dst_texture, RDD::TextureLayout p_dst_texture_layout, uint32_t p_dst_layer, uint32_t p_dst_mipmap) override;
555
void clear_color_texture(RDD::TextureID p_texture, RDD::TextureLayout p_texture_layout, const Color &p_color, const RDD::TextureSubresourceRange &p_subresources) override;
556
void clear_depth_stencil_texture(RDD::TextureID p_texture, RDD::TextureLayout p_texture_layout, float p_depth, uint8_t p_stencil, const RDD::TextureSubresourceRange &p_subresources) override;
557
void clear_buffer(RDD::BufferID p_buffer, uint64_t p_offset, uint64_t p_size) override;
558
void copy_buffer(RDD::BufferID p_src_buffer, RDD::BufferID p_dst_buffer, VectorView<RDD::BufferCopyRegion> p_regions) override;
559
void copy_texture(RDD::TextureID p_src_texture, RDD::TextureID p_dst_texture, VectorView<RDD::TextureCopyRegion> p_regions) override;
560
void copy_buffer_to_texture(RDD::BufferID p_src_buffer, RDD::TextureID p_dst_texture, VectorView<RDD::BufferTextureCopyRegion> p_regions) override;
561
void copy_texture_to_buffer(RDD::TextureID p_src_texture, RDD::BufferID p_dst_buffer, VectorView<RDD::BufferTextureCopyRegion> p_regions) override;
562
563
#pragma mark - Synchronization
564
565
void pipeline_barrier(BitField<RDD::PipelineStageBits> p_src_stages,
566
BitField<RDD::PipelineStageBits> p_dst_stages,
567
VectorView<RDD::MemoryAccessBarrier> p_memory_barriers,
568
VectorView<RDD::BufferBarrier> p_buffer_barriers,
569
VectorView<RDD::TextureBarrier> p_texture_barriers,
570
VectorView<RDD::AccelerationStructureBarrier> p_acceleration_structure_barriers) override;
571
572
#pragma mark - Debugging
573
574
void begin_label(const char *p_label_name, const Color &p_color) override;
575
void end_label() override;
576
577
MDCommandBuffer(MTL::CommandQueue *p_queue, ::RenderingDeviceDriverMetal *p_device_driver);
578
MDCommandBuffer() = default;
579
};
580
581
} // namespace MTL3
582
583
// C++ helper to get mipmap level size from texture
584
_FORCE_INLINE_ static MTL::Size mipmapLevelSizeFromTexture(MTL::Texture *p_tex, NS::UInteger p_level) {
585
MTL::Size lvlSize;
586
lvlSize.width = MAX(p_tex->width() >> p_level, 1UL);
587
lvlSize.height = MAX(p_tex->height() >> p_level, 1UL);
588
lvlSize.depth = MAX(p_tex->depth() >> p_level, 1UL);
589
return lvlSize;
590
}
591
592