Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/virtio/vulkan/vn_ring.c
4560 views
1
/*
2
* Copyright 2021 Google LLC
3
* SPDX-License-Identifier: MIT
4
*/
5
6
#include "vn_ring.h"
7
8
#include "vn_renderer.h"
9
10
/* must be power-of-two */
11
#define VN_RING_BUFFER_SIZE (1u << 11)
12
#define VN_RING_BUFFER_MASK (VN_RING_BUFFER_SIZE - 1)
13
14
enum vn_ring_status_flag {
15
VN_RING_STATUS_IDLE = 1u << 0,
16
};
17
18
static uint32_t
19
vn_ring_load_head(const struct vn_ring *ring)
20
{
21
/* the renderer is expected to store the head with memory_order_release,
22
* forming a release-acquire ordering
23
*/
24
return atomic_load_explicit(ring->shared.head, memory_order_acquire);
25
}
26
27
static void
28
vn_ring_store_tail(struct vn_ring *ring)
29
{
30
/* the renderer is expected to load the tail with memory_order_acquire,
31
* forming a release-acquire ordering
32
*/
33
return atomic_store_explicit(ring->shared.tail, ring->cur,
34
memory_order_release);
35
}
36
37
static uint32_t
38
vn_ring_load_status(const struct vn_ring *ring)
39
{
40
/* this must be called and ordered after vn_ring_store_tail */
41
return atomic_load_explicit(ring->shared.status, memory_order_seq_cst);
42
}
43
44
static void
45
vn_ring_write_buffer(struct vn_ring *ring, const void *data, size_t size)
46
{
47
assert(ring->cur + size - vn_ring_load_head(ring) <= VN_RING_BUFFER_SIZE);
48
49
const size_t offset = ring->cur & VN_RING_BUFFER_MASK;
50
if (offset + size <= VN_RING_BUFFER_SIZE) {
51
memcpy(ring->shared.buffer + offset, data, size);
52
} else {
53
const size_t s = VN_RING_BUFFER_SIZE - offset;
54
memcpy(ring->shared.buffer + offset, data, s);
55
memcpy(ring->shared.buffer, data + s, size - s);
56
}
57
58
ring->cur += size;
59
}
60
61
static bool
62
vn_ring_ge_seqno(const struct vn_ring *ring, uint32_t a, uint32_t b)
63
{
64
/* this can return false negative when not called fast enough (e.g., when
65
* called once every couple hours), but following calls with larger a's
66
* will correct itself
67
*
68
* TODO use real seqnos?
69
*/
70
if (a >= b)
71
return ring->cur >= a || ring->cur < b;
72
else
73
return ring->cur >= a && ring->cur < b;
74
}
75
76
static void
77
vn_ring_retire_submits(struct vn_ring *ring, uint32_t seqno)
78
{
79
list_for_each_entry_safe(struct vn_ring_submit, submit, &ring->submits,
80
head) {
81
if (!vn_ring_ge_seqno(ring, seqno, submit->seqno))
82
break;
83
84
for (uint32_t i = 0; i < submit->shmem_count; i++)
85
vn_renderer_shmem_unref(ring->renderer, submit->shmems[i]);
86
87
list_del(&submit->head);
88
list_add(&submit->head, &ring->free_submits);
89
}
90
}
91
92
static uint32_t
93
vn_ring_wait_seqno(const struct vn_ring *ring, uint32_t seqno)
94
{
95
/* A renderer wait incurs several hops and the renderer might poll
96
* repeatedly anyway. Let's just poll here.
97
*/
98
uint32_t iter = 0;
99
do {
100
const uint32_t head = vn_ring_load_head(ring);
101
if (vn_ring_ge_seqno(ring, head, seqno))
102
return head;
103
vn_relax(&iter);
104
} while (true);
105
}
106
107
static uint32_t
108
vn_ring_wait_space(const struct vn_ring *ring, uint32_t size)
109
{
110
assert(size <= VN_RING_BUFFER_SIZE);
111
112
/* see the reasoning in vn_ring_wait_seqno */
113
uint32_t iter = 0;
114
do {
115
const uint32_t head = vn_ring_load_head(ring);
116
if (ring->cur + size - head <= VN_RING_BUFFER_SIZE)
117
return head;
118
vn_relax(&iter);
119
} while (true);
120
}
121
122
void
123
vn_ring_get_layout(size_t extra_size, struct vn_ring_layout *layout)
124
{
125
/* this can be changed/extended quite freely */
126
struct layout {
127
uint32_t head __attribute__((aligned(64)));
128
uint32_t tail __attribute__((aligned(64)));
129
uint32_t status __attribute__((aligned(64)));
130
131
uint8_t buffer[] __attribute__((aligned(64)));
132
};
133
const size_t buf_size = VN_RING_BUFFER_SIZE;
134
135
assert(buf_size && util_is_power_of_two_or_zero(buf_size));
136
137
layout->head_offset = offsetof(struct layout, head);
138
layout->tail_offset = offsetof(struct layout, tail);
139
layout->status_offset = offsetof(struct layout, status);
140
141
layout->buffer_offset = offsetof(struct layout, buffer);
142
layout->buffer_size = buf_size;
143
144
layout->extra_offset = layout->buffer_offset + layout->buffer_size;
145
layout->extra_size = extra_size;
146
147
layout->shmem_size = layout->extra_offset + layout->extra_size;
148
}
149
150
void
151
vn_ring_init(struct vn_ring *ring,
152
struct vn_renderer *renderer,
153
const struct vn_ring_layout *layout,
154
void *shared)
155
{
156
memset(ring, 0, sizeof(*ring));
157
memset(shared, 0, layout->shmem_size);
158
159
ring->renderer = renderer;
160
161
ring->shared.head = shared + layout->head_offset;
162
ring->shared.tail = shared + layout->tail_offset;
163
ring->shared.status = shared + layout->status_offset;
164
ring->shared.buffer = shared + layout->buffer_offset;
165
ring->shared.extra = shared + layout->extra_offset;
166
167
list_inithead(&ring->submits);
168
list_inithead(&ring->free_submits);
169
}
170
171
void
172
vn_ring_fini(struct vn_ring *ring)
173
{
174
vn_ring_retire_submits(ring, ring->cur);
175
assert(list_is_empty(&ring->submits));
176
177
list_for_each_entry_safe(struct vn_ring_submit, submit,
178
&ring->free_submits, head)
179
free(submit);
180
}
181
182
struct vn_ring_submit *
183
vn_ring_get_submit(struct vn_ring *ring, uint32_t shmem_count)
184
{
185
const uint32_t min_shmem_count = 2;
186
struct vn_ring_submit *submit;
187
188
/* TODO this could be simplified if we could omit shmem_count */
189
if (shmem_count <= min_shmem_count &&
190
!list_is_empty(&ring->free_submits)) {
191
submit =
192
list_first_entry(&ring->free_submits, struct vn_ring_submit, head);
193
list_del(&submit->head);
194
} else {
195
shmem_count = MAX2(shmem_count, min_shmem_count);
196
submit =
197
malloc(sizeof(*submit) + sizeof(submit->shmems[0]) * shmem_count);
198
}
199
200
return submit;
201
}
202
203
bool
204
vn_ring_submit(struct vn_ring *ring,
205
struct vn_ring_submit *submit,
206
const void *cs_data,
207
size_t cs_size,
208
uint32_t *seqno)
209
{
210
const uint32_t cur_seqno = vn_ring_wait_space(ring, cs_size);
211
vn_ring_write_buffer(ring, cs_data, cs_size);
212
vn_ring_store_tail(ring);
213
const bool notify = vn_ring_load_status(ring) & VN_RING_STATUS_IDLE;
214
215
vn_ring_retire_submits(ring, cur_seqno);
216
217
submit->seqno = ring->cur;
218
list_addtail(&submit->head, &ring->submits);
219
220
*seqno = submit->seqno;
221
return notify;
222
}
223
224
/**
225
* This is thread-safe.
226
*/
227
void
228
vn_ring_wait(const struct vn_ring *ring, uint32_t seqno)
229
{
230
vn_ring_wait_seqno(ring, seqno);
231
}
232
233