Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/freedreno/freedreno_resource.h
4570 views
1
/*
2
* Copyright (C) 2012 Rob Clark <[email protected]>
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
* SOFTWARE.
22
*
23
* Authors:
24
* Rob Clark <[email protected]>
25
*/
26
27
#ifndef FREEDRENO_RESOURCE_H_
28
#define FREEDRENO_RESOURCE_H_
29
30
#include "util/list.h"
31
#include "util/simple_mtx.h"
32
#include "util/u_dump.h"
33
#include "util/u_range.h"
34
#include "util/u_transfer_helper.h"
35
36
#include "freedreno/fdl/freedreno_layout.h"
37
#include "freedreno_batch.h"
38
#include "freedreno_util.h"
39
40
#define PRSC_FMT \
41
"p: target=%s, format=%s, %ux%ux%u, " \
42
"array_size=%u, last_level=%u, " \
43
"nr_samples=%u, usage=%u, bind=%x, flags=%x"
44
#define PRSC_ARGS(p) \
45
(p), util_str_tex_target((p)->target, true), \
46
util_format_short_name((p)->format), (p)->width0, (p)->height0, \
47
(p)->depth0, (p)->array_size, (p)->last_level, (p)->nr_samples, \
48
(p)->usage, (p)->bind, (p)->flags
49
50
enum fd_lrz_direction {
51
FD_LRZ_UNKNOWN,
52
/* Depth func less/less-than: */
53
FD_LRZ_LESS,
54
/* Depth func greater/greater-than: */
55
FD_LRZ_GREATER,
56
};
57
58
/**
59
* State related to batch/resource tracking.
60
*
61
* With threaded_context we need to support replace_buffer_storage, in
62
* which case we can end up in transfer_map with tres->latest, but other
63
* pipe_context APIs using the original prsc pointer. This allows TC to
64
* not have to synchronize the front-end thread with the buffer storage
65
* replacement called on driver thread. But it complicates the batch/
66
* resource tracking.
67
*
68
* To handle this, we need to split the tracking out into it's own ref-
69
* counted structure, so as needed both "versions" of the resource can
70
* point to the same tracking.
71
*
72
* We could *almost* just push this down to fd_bo, except for a3xx/a4xx
73
* hw queries, where we don't know up-front the size to allocate for
74
* per-tile query results.
75
*/
76
struct fd_resource_tracking {
77
struct pipe_reference reference;
78
79
/* bitmask of in-flight batches which reference this resource. Note
80
* that the batch doesn't hold reference to resources (but instead
81
* the fd_ringbuffer holds refs to the underlying fd_bo), but in case
82
* the resource is destroyed we need to clean up the batch's weak
83
* references to us.
84
*/
85
uint32_t batch_mask;
86
87
/* reference to batch that writes this resource: */
88
struct fd_batch *write_batch;
89
90
/* Set of batches whose batch-cache key references this resource.
91
* We need to track this to know which batch-cache entries to
92
* invalidate if, for example, the resource is invalidated or
93
* shadowed.
94
*/
95
uint32_t bc_batch_mask;
96
};
97
98
void __fd_resource_tracking_destroy(struct fd_resource_tracking *track);
99
100
static inline void
101
fd_resource_tracking_reference(struct fd_resource_tracking **ptr,
102
struct fd_resource_tracking *track)
103
{
104
struct fd_resource_tracking *old_track = *ptr;
105
106
if (pipe_reference(&(*ptr)->reference, &track->reference)) {
107
assert(!old_track->write_batch);
108
free(old_track);
109
}
110
111
*ptr = track;
112
}
113
114
/**
115
* A resource (any buffer/texture/image/etc)
116
*/
117
struct fd_resource {
118
struct threaded_resource b;
119
struct fd_bo *bo; /* use fd_resource_set_bo() to write */
120
enum pipe_format internal_format;
121
struct fdl_layout layout;
122
123
/* buffer range that has been initialized */
124
struct util_range valid_buffer_range;
125
bool valid;
126
struct renderonly_scanout *scanout;
127
128
/* reference to the resource holding stencil data for a z32_s8 texture */
129
/* TODO rename to secondary or auxiliary? */
130
struct fd_resource *stencil;
131
132
struct fd_resource_tracking *track;
133
134
simple_mtx_t lock;
135
136
/* bitmask of state this resource could potentially dirty when rebound,
137
* see rebind_resource()
138
*/
139
enum fd_dirty_3d_state dirty;
140
141
/* Sequence # incremented each time bo changes: */
142
uint16_t seqno;
143
144
/* Is this buffer a replacement created by threaded_context to avoid
145
* a stall in PIPE_MAP_DISCARD_WHOLE_RESOURCE|PIPE_MAP_WRITE case?
146
* If so, it no longer "owns" it's rsc->track, and so should not
147
* invalidate when the rsc is destroyed.
148
*/
149
bool is_replacement : 1;
150
151
/* Uninitialized resources with UBWC format need their UBWC flag data
152
* cleared before writes, as the UBWC state is read and used during
153
* writes, so undefined UBWC flag data results in undefined results.
154
*/
155
bool needs_ubwc_clear : 1;
156
157
/*
158
* LRZ
159
*
160
* TODO lrz width/height/pitch should probably also move to
161
* fdl_layout
162
*/
163
bool lrz_valid : 1;
164
enum fd_lrz_direction lrz_direction : 2;
165
uint16_t lrz_width; // for lrz clear, does this differ from lrz_pitch?
166
uint16_t lrz_height;
167
uint16_t lrz_pitch;
168
struct fd_bo *lrz;
169
};
170
171
struct fd_memory_object {
172
struct pipe_memory_object b;
173
struct fd_bo *bo;
174
};
175
176
static inline struct fd_resource *
177
fd_resource(struct pipe_resource *ptex)
178
{
179
return (struct fd_resource *)ptex;
180
}
181
182
static inline const struct fd_resource *
183
fd_resource_const(const struct pipe_resource *ptex)
184
{
185
return (const struct fd_resource *)ptex;
186
}
187
188
static inline struct fd_memory_object *
189
fd_memory_object(struct pipe_memory_object *pmemobj)
190
{
191
return (struct fd_memory_object *)pmemobj;
192
}
193
194
static inline bool
195
pending(struct fd_resource *rsc, bool write)
196
{
197
/* if we have a pending GPU write, we are busy in any case: */
198
if (rsc->track->write_batch)
199
return true;
200
201
/* if CPU wants to write, but we are pending a GPU read, we are busy: */
202
if (write && rsc->track->batch_mask)
203
return true;
204
205
if (rsc->stencil && pending(rsc->stencil, write))
206
return true;
207
208
return false;
209
}
210
211
static inline bool
212
resource_busy(struct fd_resource *rsc, unsigned op)
213
{
214
return fd_bo_cpu_prep(rsc->bo, NULL, op | FD_BO_PREP_NOSYNC) != 0;
215
}
216
217
int __fd_resource_wait(struct fd_context *ctx, struct fd_resource *rsc,
218
unsigned op, const char *func);
219
#define fd_resource_wait(ctx, rsc, op) \
220
__fd_resource_wait(ctx, rsc, op, __func__)
221
222
static inline void
223
fd_resource_lock(struct fd_resource *rsc)
224
{
225
simple_mtx_lock(&rsc->lock);
226
}
227
228
static inline void
229
fd_resource_unlock(struct fd_resource *rsc)
230
{
231
simple_mtx_unlock(&rsc->lock);
232
}
233
234
static inline void
235
fd_resource_set_usage(struct pipe_resource *prsc, enum fd_dirty_3d_state usage)
236
{
237
if (!prsc)
238
return;
239
struct fd_resource *rsc = fd_resource(prsc);
240
/* Bits are only ever ORed in, and we expect many set_usage() per
241
* resource, so do the quick check outside of the lock.
242
*/
243
if (likely(rsc->dirty & usage))
244
return;
245
fd_resource_lock(rsc);
246
rsc->dirty |= usage;
247
fd_resource_unlock(rsc);
248
}
249
250
static inline bool
251
has_depth(enum pipe_format format)
252
{
253
const struct util_format_description *desc = util_format_description(format);
254
return util_format_has_depth(desc);
255
}
256
257
struct fd_transfer {
258
struct threaded_transfer b;
259
struct pipe_resource *staging_prsc;
260
struct pipe_box staging_box;
261
};
262
263
static inline struct fd_transfer *
264
fd_transfer(struct pipe_transfer *ptrans)
265
{
266
return (struct fd_transfer *)ptrans;
267
}
268
269
static inline struct fdl_slice *
270
fd_resource_slice(struct fd_resource *rsc, unsigned level)
271
{
272
assert(level <= rsc->b.b.last_level);
273
return &rsc->layout.slices[level];
274
}
275
276
static inline uint32_t
277
fd_resource_layer_stride(struct fd_resource *rsc, unsigned level)
278
{
279
return fdl_layer_stride(&rsc->layout, level);
280
}
281
282
/* get pitch (in bytes) for specified mipmap level */
283
static inline uint32_t
284
fd_resource_pitch(struct fd_resource *rsc, unsigned level)
285
{
286
if (is_a2xx(fd_screen(rsc->b.b.screen)))
287
return fdl2_pitch(&rsc->layout, level);
288
289
return fdl_pitch(&rsc->layout, level);
290
}
291
292
/* get offset for specified mipmap level and texture/array layer */
293
static inline uint32_t
294
fd_resource_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
295
{
296
uint32_t offset = fdl_surface_offset(&rsc->layout, level, layer);
297
debug_assert(offset < fd_bo_size(rsc->bo));
298
return offset;
299
}
300
301
static inline uint32_t
302
fd_resource_ubwc_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
303
{
304
uint32_t offset = fdl_ubwc_offset(&rsc->layout, level, layer);
305
debug_assert(offset < fd_bo_size(rsc->bo));
306
return offset;
307
}
308
309
/* This might be a5xx specific, but higher mipmap levels are always linear: */
310
static inline bool
311
fd_resource_level_linear(const struct pipe_resource *prsc, int level)
312
{
313
struct fd_screen *screen = fd_screen(prsc->screen);
314
debug_assert(!is_a3xx(screen));
315
316
return fdl_level_linear(&fd_resource_const(prsc)->layout, level);
317
}
318
319
static inline uint32_t
320
fd_resource_tile_mode(struct pipe_resource *prsc, int level)
321
{
322
return fdl_tile_mode(&fd_resource(prsc)->layout, level);
323
}
324
325
static inline const char *
326
fd_resource_tile_mode_desc(const struct fd_resource *rsc, int level)
327
{
328
return fdl_tile_mode_desc(&rsc->layout, level);
329
}
330
331
static inline bool
332
fd_resource_ubwc_enabled(struct fd_resource *rsc, int level)
333
{
334
return fdl_ubwc_enabled(&rsc->layout, level);
335
}
336
337
/* access # of samples, with 0 normalized to 1 (which is what we care about
338
* most of the time)
339
*/
340
static inline unsigned
341
fd_resource_nr_samples(struct pipe_resource *prsc)
342
{
343
return MAX2(1, prsc->nr_samples);
344
}
345
346
void fd_resource_screen_init(struct pipe_screen *pscreen);
347
void fd_resource_context_init(struct pipe_context *pctx);
348
349
uint32_t fd_setup_slices(struct fd_resource *rsc);
350
void fd_resource_resize(struct pipe_resource *prsc, uint32_t sz);
351
void fd_replace_buffer_storage(struct pipe_context *ctx,
352
struct pipe_resource *dst,
353
struct pipe_resource *src,
354
unsigned num_rebinds,
355
uint32_t rebind_mask,
356
uint32_t delete_buffer_id) in_dt;
357
bool fd_resource_busy(struct pipe_screen *pscreen, struct pipe_resource *prsc,
358
unsigned usage);
359
360
void fd_resource_uncompress(struct fd_context *ctx,
361
struct fd_resource *rsc,
362
bool linear) assert_dt;
363
void fd_resource_dump(struct fd_resource *rsc, const char *name);
364
365
bool fd_render_condition_check(struct pipe_context *pctx) assert_dt;
366
367
static inline bool
368
fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
369
{
370
return rsc->track->batch_mask & (1 << batch->idx);
371
}
372
373
static inline void
374
fd_batch_write_prep(struct fd_batch *batch, struct fd_resource *rsc) assert_dt
375
{
376
if (unlikely(rsc->needs_ubwc_clear)) {
377
batch->ctx->clear_ubwc(batch, rsc);
378
rsc->needs_ubwc_clear = false;
379
}
380
}
381
382
static inline void
383
fd_batch_resource_read(struct fd_batch *batch,
384
struct fd_resource *rsc) assert_dt
385
{
386
/* Fast path: if we hit this then we know we don't have anyone else
387
* writing to it (since both _write and _read flush other writers), and
388
* that we've already recursed for stencil.
389
*/
390
if (unlikely(!fd_batch_references_resource(batch, rsc)))
391
fd_batch_resource_read_slowpath(batch, rsc);
392
}
393
394
#endif /* FREEDRENO_RESOURCE_H_ */
395
396