Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/radeonsi/si_gpu_load.c
4570 views
1
/*
2
* Copyright 2015 Advanced Micro Devices, Inc.
3
* All Rights Reserved.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
14
* Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
* SOFTWARE.
23
*/
24
25
/* The GPU load is measured as follows.
26
*
27
* There is a thread which samples the GRBM_STATUS register at a certain
28
* frequency and the "busy" or "idle" counter is incremented based on
29
* whether the GUI_ACTIVE bit is set or not.
30
*
31
* Then, the user can sample the counters twice and calculate the average
32
* GPU load between the two samples.
33
*/
34
35
#include "radeonsi/si_pipe.h"
36
#include "radeonsi/si_query.h"
37
#include "util/os_time.h"
38
39
/* For good accuracy at 1000 fps or lower. This will be inaccurate for higher
40
* fps (there are too few samples per frame). */
41
#define SAMPLES_PER_SEC 10000
42
43
#define GRBM_STATUS 0x8010
44
#define TA_BUSY(x) (((x) >> 14) & 0x1)
45
#define GDS_BUSY(x) (((x) >> 15) & 0x1)
46
#define VGT_BUSY(x) (((x) >> 17) & 0x1)
47
#define IA_BUSY(x) (((x) >> 19) & 0x1)
48
#define SX_BUSY(x) (((x) >> 20) & 0x1)
49
#define WD_BUSY(x) (((x) >> 21) & 0x1)
50
#define SPI_BUSY(x) (((x) >> 22) & 0x1)
51
#define BCI_BUSY(x) (((x) >> 23) & 0x1)
52
#define SC_BUSY(x) (((x) >> 24) & 0x1)
53
#define PA_BUSY(x) (((x) >> 25) & 0x1)
54
#define DB_BUSY(x) (((x) >> 26) & 0x1)
55
#define CP_BUSY(x) (((x) >> 29) & 0x1)
56
#define CB_BUSY(x) (((x) >> 30) & 0x1)
57
#define GUI_ACTIVE(x) (((x) >> 31) & 0x1)
58
59
#define SRBM_STATUS2 0x0e4c
60
#define SDMA_BUSY(x) (((x) >> 5) & 0x1)
61
62
#define CP_STAT 0x8680
63
#define PFP_BUSY(x) (((x) >> 15) & 0x1)
64
#define MEQ_BUSY(x) (((x) >> 16) & 0x1)
65
#define ME_BUSY(x) (((x) >> 17) & 0x1)
66
#define SURFACE_SYNC_BUSY(x) (((x) >> 21) & 0x1)
67
#define DMA_BUSY(x) (((x) >> 22) & 0x1)
68
#define SCRATCH_RAM_BUSY(x) (((x) >> 24) & 0x1)
69
70
#define IDENTITY(x) x
71
72
#define UPDATE_COUNTER(field, mask) \
73
do { \
74
if (mask(value)) \
75
p_atomic_inc(&counters->named.field.busy); \
76
else \
77
p_atomic_inc(&counters->named.field.idle); \
78
} while (0)
79
80
static void si_update_mmio_counters(struct si_screen *sscreen, union si_mmio_counters *counters)
81
{
82
uint32_t value = 0;
83
bool gui_busy, sdma_busy = false;
84
85
/* GRBM_STATUS */
86
sscreen->ws->read_registers(sscreen->ws, GRBM_STATUS, 1, &value);
87
88
UPDATE_COUNTER(ta, TA_BUSY);
89
UPDATE_COUNTER(gds, GDS_BUSY);
90
UPDATE_COUNTER(vgt, VGT_BUSY);
91
UPDATE_COUNTER(ia, IA_BUSY);
92
UPDATE_COUNTER(sx, SX_BUSY);
93
UPDATE_COUNTER(wd, WD_BUSY);
94
UPDATE_COUNTER(spi, SPI_BUSY);
95
UPDATE_COUNTER(bci, BCI_BUSY);
96
UPDATE_COUNTER(sc, SC_BUSY);
97
UPDATE_COUNTER(pa, PA_BUSY);
98
UPDATE_COUNTER(db, DB_BUSY);
99
UPDATE_COUNTER(cp, CP_BUSY);
100
UPDATE_COUNTER(cb, CB_BUSY);
101
UPDATE_COUNTER(gui, GUI_ACTIVE);
102
gui_busy = GUI_ACTIVE(value);
103
104
if (sscreen->info.chip_class == GFX7 || sscreen->info.chip_class == GFX8) {
105
/* SRBM_STATUS2 */
106
sscreen->ws->read_registers(sscreen->ws, SRBM_STATUS2, 1, &value);
107
108
UPDATE_COUNTER(sdma, SDMA_BUSY);
109
sdma_busy = SDMA_BUSY(value);
110
}
111
112
if (sscreen->info.chip_class >= GFX8) {
113
/* CP_STAT */
114
sscreen->ws->read_registers(sscreen->ws, CP_STAT, 1, &value);
115
116
UPDATE_COUNTER(pfp, PFP_BUSY);
117
UPDATE_COUNTER(meq, MEQ_BUSY);
118
UPDATE_COUNTER(me, ME_BUSY);
119
UPDATE_COUNTER(surf_sync, SURFACE_SYNC_BUSY);
120
UPDATE_COUNTER(cp_dma, DMA_BUSY);
121
UPDATE_COUNTER(scratch_ram, SCRATCH_RAM_BUSY);
122
}
123
124
value = gui_busy || sdma_busy;
125
UPDATE_COUNTER(gpu, IDENTITY);
126
}
127
128
#undef UPDATE_COUNTER
129
130
static int si_gpu_load_thread(void *param)
131
{
132
struct si_screen *sscreen = (struct si_screen *)param;
133
const int period_us = 1000000 / SAMPLES_PER_SEC;
134
int sleep_us = period_us;
135
int64_t cur_time, last_time = os_time_get();
136
137
while (!p_atomic_read(&sscreen->gpu_load_stop_thread)) {
138
if (sleep_us)
139
os_time_sleep(sleep_us);
140
141
/* Make sure we sleep the ideal amount of time to match
142
* the expected frequency. */
143
cur_time = os_time_get();
144
145
if (os_time_timeout(last_time, last_time + period_us, cur_time))
146
sleep_us = MAX2(sleep_us - 1, 1);
147
else
148
sleep_us += 1;
149
150
/*printf("Hz: %.1f\n", 1000000.0 / (cur_time - last_time));*/
151
last_time = cur_time;
152
153
/* Update the counters. */
154
si_update_mmio_counters(sscreen, &sscreen->mmio_counters);
155
}
156
p_atomic_dec(&sscreen->gpu_load_stop_thread);
157
return 0;
158
}
159
160
void si_gpu_load_kill_thread(struct si_screen *sscreen)
161
{
162
if (!sscreen->gpu_load_thread)
163
return;
164
165
p_atomic_inc(&sscreen->gpu_load_stop_thread);
166
thrd_join(sscreen->gpu_load_thread, NULL);
167
sscreen->gpu_load_thread = 0;
168
}
169
170
static uint64_t si_read_mmio_counter(struct si_screen *sscreen, unsigned busy_index)
171
{
172
/* Start the thread if needed. */
173
if (!sscreen->gpu_load_thread) {
174
simple_mtx_lock(&sscreen->gpu_load_mutex);
175
/* Check again inside the mutex. */
176
if (!sscreen->gpu_load_thread)
177
sscreen->gpu_load_thread = u_thread_create(si_gpu_load_thread, sscreen);
178
simple_mtx_unlock(&sscreen->gpu_load_mutex);
179
}
180
181
unsigned busy = p_atomic_read(&sscreen->mmio_counters.array[busy_index]);
182
unsigned idle = p_atomic_read(&sscreen->mmio_counters.array[busy_index + 1]);
183
184
return busy | ((uint64_t)idle << 32);
185
}
186
187
static unsigned si_end_mmio_counter(struct si_screen *sscreen, uint64_t begin, unsigned busy_index)
188
{
189
uint64_t end = si_read_mmio_counter(sscreen, busy_index);
190
unsigned busy = (end & 0xffffffff) - (begin & 0xffffffff);
191
unsigned idle = (end >> 32) - (begin >> 32);
192
193
/* Calculate the % of time the busy counter was being incremented.
194
*
195
* If no counters were incremented, return the current counter status.
196
* It's for the case when the load is queried faster than
197
* the counters are updated.
198
*/
199
if (idle || busy) {
200
return busy * 100 / (busy + idle);
201
} else {
202
union si_mmio_counters counters;
203
204
memset(&counters, 0, sizeof(counters));
205
si_update_mmio_counters(sscreen, &counters);
206
return counters.array[busy_index] ? 100 : 0;
207
}
208
}
209
210
#define BUSY_INDEX(sscreen, field) \
211
(&sscreen->mmio_counters.named.field.busy - sscreen->mmio_counters.array)
212
213
static unsigned busy_index_from_type(struct si_screen *sscreen, unsigned type)
214
{
215
switch (type) {
216
case SI_QUERY_GPU_LOAD:
217
return BUSY_INDEX(sscreen, gpu);
218
case SI_QUERY_GPU_SHADERS_BUSY:
219
return BUSY_INDEX(sscreen, spi);
220
case SI_QUERY_GPU_TA_BUSY:
221
return BUSY_INDEX(sscreen, ta);
222
case SI_QUERY_GPU_GDS_BUSY:
223
return BUSY_INDEX(sscreen, gds);
224
case SI_QUERY_GPU_VGT_BUSY:
225
return BUSY_INDEX(sscreen, vgt);
226
case SI_QUERY_GPU_IA_BUSY:
227
return BUSY_INDEX(sscreen, ia);
228
case SI_QUERY_GPU_SX_BUSY:
229
return BUSY_INDEX(sscreen, sx);
230
case SI_QUERY_GPU_WD_BUSY:
231
return BUSY_INDEX(sscreen, wd);
232
case SI_QUERY_GPU_BCI_BUSY:
233
return BUSY_INDEX(sscreen, bci);
234
case SI_QUERY_GPU_SC_BUSY:
235
return BUSY_INDEX(sscreen, sc);
236
case SI_QUERY_GPU_PA_BUSY:
237
return BUSY_INDEX(sscreen, pa);
238
case SI_QUERY_GPU_DB_BUSY:
239
return BUSY_INDEX(sscreen, db);
240
case SI_QUERY_GPU_CP_BUSY:
241
return BUSY_INDEX(sscreen, cp);
242
case SI_QUERY_GPU_CB_BUSY:
243
return BUSY_INDEX(sscreen, cb);
244
case SI_QUERY_GPU_SDMA_BUSY:
245
return BUSY_INDEX(sscreen, sdma);
246
case SI_QUERY_GPU_PFP_BUSY:
247
return BUSY_INDEX(sscreen, pfp);
248
case SI_QUERY_GPU_MEQ_BUSY:
249
return BUSY_INDEX(sscreen, meq);
250
case SI_QUERY_GPU_ME_BUSY:
251
return BUSY_INDEX(sscreen, me);
252
case SI_QUERY_GPU_SURF_SYNC_BUSY:
253
return BUSY_INDEX(sscreen, surf_sync);
254
case SI_QUERY_GPU_CP_DMA_BUSY:
255
return BUSY_INDEX(sscreen, cp_dma);
256
case SI_QUERY_GPU_SCRATCH_RAM_BUSY:
257
return BUSY_INDEX(sscreen, scratch_ram);
258
default:
259
unreachable("invalid query type");
260
}
261
}
262
263
uint64_t si_begin_counter(struct si_screen *sscreen, unsigned type)
264
{
265
unsigned busy_index = busy_index_from_type(sscreen, type);
266
return si_read_mmio_counter(sscreen, busy_index);
267
}
268
269
unsigned si_end_counter(struct si_screen *sscreen, unsigned type, uint64_t begin)
270
{
271
unsigned busy_index = busy_index_from_type(sscreen, type);
272
return si_end_mmio_counter(sscreen, begin, busy_index);
273
}
274
275