Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/zink/zink_fence.c
4570 views
1
/*
2
* Copyright 2018 Collabora Ltd.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
22
*/
23
24
#include "zink_batch.h"
25
#include "zink_context.h"
26
#include "zink_fence.h"
27
28
#include "zink_resource.h"
29
#include "zink_screen.h"
30
31
#include "util/set.h"
32
#include "util/u_memory.h"
33
34
static void
35
destroy_fence(struct zink_screen *screen, struct zink_tc_fence *mfence)
36
{
37
struct zink_batch_state *bs = zink_batch_state(mfence->fence);
38
mfence->fence = NULL;
39
zink_batch_state_reference(screen, &bs, NULL);
40
tc_unflushed_batch_token_reference(&mfence->tc_token, NULL);
41
FREE(mfence);
42
}
43
44
struct zink_tc_fence *
45
zink_create_tc_fence(void)
46
{
47
struct zink_tc_fence *mfence = CALLOC_STRUCT(zink_tc_fence);
48
if (!mfence)
49
return NULL;
50
pipe_reference_init(&mfence->reference, 1);
51
util_queue_fence_init(&mfence->ready);
52
return mfence;
53
}
54
55
struct pipe_fence_handle *
56
zink_create_tc_fence_for_tc(struct pipe_context *pctx, struct tc_unflushed_batch_token *tc_token)
57
{
58
struct zink_tc_fence *mfence = zink_create_tc_fence();
59
if (!mfence)
60
return NULL;
61
util_queue_fence_reset(&mfence->ready);
62
tc_unflushed_batch_token_reference(&mfence->tc_token, tc_token);
63
return (struct pipe_fence_handle*)mfence;
64
}
65
66
void
67
zink_fence_reference(struct zink_screen *screen,
68
struct zink_tc_fence **ptr,
69
struct zink_tc_fence *mfence)
70
{
71
if (pipe_reference(&(*ptr)->reference, &mfence->reference))
72
destroy_fence(screen, *ptr);
73
74
*ptr = mfence;
75
}
76
77
static void
78
fence_reference(struct pipe_screen *pscreen,
79
struct pipe_fence_handle **pptr,
80
struct pipe_fence_handle *pfence)
81
{
82
zink_fence_reference(zink_screen(pscreen), (struct zink_tc_fence **)pptr,
83
zink_tc_fence(pfence));
84
}
85
86
static bool
87
tc_fence_finish(struct zink_context *ctx, struct zink_tc_fence *mfence, uint64_t *timeout_ns)
88
{
89
if (!util_queue_fence_is_signalled(&mfence->ready)) {
90
int64_t abs_timeout = os_time_get_absolute_timeout(*timeout_ns);
91
if (mfence->tc_token) {
92
/* Ensure that zink_flush will be called for
93
* this mfence, but only if we're in the API thread
94
* where the context is current.
95
*
96
* Note that the batch containing the flush may already
97
* be in flight in the driver thread, so the mfence
98
* may not be ready yet when this call returns.
99
*/
100
threaded_context_flush(&ctx->base, mfence->tc_token, *timeout_ns == 0);
101
}
102
103
if (!timeout_ns)
104
return false;
105
106
/* this is a tc mfence, so we're just waiting on the queue mfence to complete
107
* after being signaled by the real mfence
108
*/
109
if (*timeout_ns == PIPE_TIMEOUT_INFINITE) {
110
util_queue_fence_wait(&mfence->ready);
111
} else {
112
if (!util_queue_fence_wait_timeout(&mfence->ready, abs_timeout))
113
return false;
114
}
115
if (*timeout_ns && *timeout_ns != PIPE_TIMEOUT_INFINITE) {
116
int64_t time_ns = os_time_get_nano();
117
*timeout_ns = abs_timeout > time_ns ? abs_timeout - time_ns : 0;
118
}
119
}
120
121
return true;
122
}
123
124
bool
125
zink_vkfence_wait(struct zink_screen *screen, struct zink_fence *fence, uint64_t timeout_ns)
126
{
127
if (screen->device_lost)
128
return true;
129
if (p_atomic_read(&fence->completed))
130
return true;
131
132
assert(fence->batch_id);
133
assert(fence->submitted);
134
135
bool success = false;
136
137
VkResult ret;
138
if (timeout_ns)
139
ret = vkWaitForFences(screen->dev, 1, &fence->fence, VK_TRUE, timeout_ns);
140
else
141
ret = vkGetFenceStatus(screen->dev, fence->fence);
142
success = zink_screen_handle_vkresult(screen, ret);
143
144
if (success) {
145
p_atomic_set(&fence->completed, true);
146
zink_batch_state(fence)->usage.usage = 0;
147
zink_screen_update_last_finished(screen, fence->batch_id);
148
}
149
return success;
150
}
151
152
static bool
153
zink_fence_finish(struct zink_screen *screen, struct pipe_context *pctx, struct zink_tc_fence *mfence,
154
uint64_t timeout_ns)
155
{
156
pctx = threaded_context_unwrap_sync(pctx);
157
struct zink_context *ctx = zink_context(pctx);
158
159
if (screen->device_lost)
160
return true;
161
162
if (pctx && mfence->deferred_ctx == pctx) {
163
if (mfence->fence == ctx->deferred_fence) {
164
zink_context(pctx)->batch.has_work = true;
165
/* this must be the current batch */
166
pctx->flush(pctx, NULL, !timeout_ns ? PIPE_FLUSH_ASYNC : 0);
167
if (!timeout_ns)
168
return false;
169
}
170
}
171
172
/* need to ensure the tc mfence has been flushed before we wait */
173
bool tc_finish = tc_fence_finish(ctx, mfence, &timeout_ns);
174
/* the submit thread hasn't finished yet */
175
if (!tc_finish)
176
return false;
177
/* this was an invalid flush, just return completed */
178
if (!mfence->fence)
179
return true;
180
181
struct zink_fence *fence = mfence->fence;
182
183
unsigned submit_diff = zink_batch_state(mfence->fence)->submit_count - mfence->submit_count;
184
/* this batch is known to have finished because it has been submitted more than 1 time
185
* since the tc fence last saw it
186
*/
187
if (submit_diff > 1)
188
return true;
189
190
if (fence->submitted && zink_screen_check_last_finished(screen, fence->batch_id))
191
return true;
192
193
return zink_vkfence_wait(screen, fence, timeout_ns);
194
}
195
196
static bool
197
fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
198
struct pipe_fence_handle *pfence, uint64_t timeout_ns)
199
{
200
return zink_fence_finish(zink_screen(pscreen), pctx, zink_tc_fence(pfence),
201
timeout_ns);
202
}
203
204
void
205
zink_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *pfence)
206
{
207
struct zink_tc_fence *mfence = zink_tc_fence(pfence);
208
209
if (pctx && mfence->deferred_ctx == pctx)
210
return;
211
212
if (mfence->deferred_ctx) {
213
zink_context(pctx)->batch.has_work = true;
214
/* this must be the current batch */
215
pctx->flush(pctx, NULL, 0);
216
}
217
zink_fence_finish(zink_screen(pctx->screen), pctx, mfence, PIPE_TIMEOUT_INFINITE);
218
}
219
220
void
221
zink_screen_fence_init(struct pipe_screen *pscreen)
222
{
223
pscreen->fence_reference = fence_reference;
224
pscreen->fence_finish = fence_finish;
225
}
226
227