Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/radeonsi/si_cp_reg_shadowing.c
4570 views
1
/*
2
* Copyright 2020 Advanced Micro Devices, Inc.
3
* All Rights Reserved.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* on the rights to use, copy, modify, merge, publish, distribute, sub
9
* license, and/or sell copies of the Software, and to permit persons to whom
10
* the Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
14
* Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22
* USE OR OTHER DEALINGS IN THE SOFTWARE.
23
*/
24
25
#include "si_build_pm4.h"
26
#include "ac_debug.h"
27
#include "ac_shadowed_regs.h"
28
#include "util/u_memory.h"
29
30
static void si_build_load_reg(struct si_screen *sscreen, struct si_pm4_state *pm4,
31
enum ac_reg_range_type type,
32
struct si_resource *shadow_regs)
33
{
34
uint64_t gpu_address = shadow_regs->gpu_address;
35
unsigned packet, num_ranges, offset;
36
const struct ac_reg_range *ranges;
37
38
ac_get_reg_ranges(sscreen->info.chip_class, sscreen->info.family,
39
type, &num_ranges, &ranges);
40
41
switch (type) {
42
case SI_REG_RANGE_UCONFIG:
43
gpu_address += SI_SHADOWED_UCONFIG_REG_OFFSET;
44
offset = CIK_UCONFIG_REG_OFFSET;
45
packet = PKT3_LOAD_UCONFIG_REG;
46
break;
47
case SI_REG_RANGE_CONTEXT:
48
gpu_address += SI_SHADOWED_CONTEXT_REG_OFFSET;
49
offset = SI_CONTEXT_REG_OFFSET;
50
packet = PKT3_LOAD_CONTEXT_REG;
51
break;
52
default:
53
gpu_address += SI_SHADOWED_SH_REG_OFFSET;
54
offset = SI_SH_REG_OFFSET;
55
packet = PKT3_LOAD_SH_REG;
56
break;
57
}
58
59
si_pm4_cmd_add(pm4, PKT3(packet, 1 + num_ranges * 2, 0));
60
si_pm4_cmd_add(pm4, gpu_address);
61
si_pm4_cmd_add(pm4, gpu_address >> 32);
62
for (unsigned i = 0; i < num_ranges; i++) {
63
si_pm4_cmd_add(pm4, (ranges[i].offset - offset) / 4);
64
si_pm4_cmd_add(pm4, ranges[i].size / 4);
65
}
66
}
67
68
static struct si_pm4_state *
69
si_create_shadowing_ib_preamble(struct si_context *sctx)
70
{
71
struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
72
73
if (sctx->chip_class == GFX10) {
74
/* SQ_NON_EVENT must be emitted before GE_PC_ALLOC is written. */
75
si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
76
si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_SQ_NON_EVENT) | EVENT_INDEX(0));
77
}
78
79
if (sctx->screen->dpbb_allowed) {
80
si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
81
si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
82
}
83
84
/* Wait for idle, because we'll update VGT ring pointers. */
85
si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
86
si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
87
88
/* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
89
si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
90
si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
91
92
if (sctx->chip_class >= GFX10) {
93
unsigned gcr_cntl = S_586_GL2_INV(1) | S_586_GL2_WB(1) |
94
S_586_GLM_INV(1) | S_586_GLM_WB(1) |
95
S_586_GL1_INV(1) | S_586_GLV_INV(1) |
96
S_586_GLK_INV(1) | S_586_GLI_INV(V_586_GLI_ALL);
97
98
si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
99
si_pm4_cmd_add(pm4, 0); /* CP_COHER_CNTL */
100
si_pm4_cmd_add(pm4, 0xffffffff); /* CP_COHER_SIZE */
101
si_pm4_cmd_add(pm4, 0xffffff); /* CP_COHER_SIZE_HI */
102
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE */
103
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
104
si_pm4_cmd_add(pm4, 0x0000000A); /* POLL_INTERVAL */
105
si_pm4_cmd_add(pm4, gcr_cntl); /* GCR_CNTL */
106
} else if (sctx->chip_class == GFX9) {
107
unsigned cp_coher_cntl = S_0301F0_SH_ICACHE_ACTION_ENA(1) |
108
S_0301F0_SH_KCACHE_ACTION_ENA(1) |
109
S_0301F0_TC_ACTION_ENA(1) |
110
S_0301F0_TCL1_ACTION_ENA(1) |
111
S_0301F0_TC_WB_ACTION_ENA(1);
112
113
si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
114
si_pm4_cmd_add(pm4, cp_coher_cntl); /* CP_COHER_CNTL */
115
si_pm4_cmd_add(pm4, 0xffffffff); /* CP_COHER_SIZE */
116
si_pm4_cmd_add(pm4, 0xffffff); /* CP_COHER_SIZE_HI */
117
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE */
118
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
119
si_pm4_cmd_add(pm4, 0x0000000A); /* POLL_INTERVAL */
120
} else {
121
unreachable("invalid chip");
122
}
123
124
si_pm4_cmd_add(pm4, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
125
si_pm4_cmd_add(pm4, 0);
126
127
si_pm4_cmd_add(pm4, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
128
si_pm4_cmd_add(pm4,
129
CC0_UPDATE_LOAD_ENABLES(1) |
130
CC0_LOAD_PER_CONTEXT_STATE(1) |
131
CC0_LOAD_CS_SH_REGS(1) |
132
CC0_LOAD_GFX_SH_REGS(1) |
133
CC0_LOAD_GLOBAL_UCONFIG(1));
134
si_pm4_cmd_add(pm4,
135
CC1_UPDATE_SHADOW_ENABLES(1) |
136
CC1_SHADOW_PER_CONTEXT_STATE(1) |
137
CC1_SHADOW_CS_SH_REGS(1) |
138
CC1_SHADOW_GFX_SH_REGS(1) |
139
CC1_SHADOW_GLOBAL_UCONFIG(1));
140
141
for (unsigned i = 0; i < SI_NUM_SHADOWED_REG_RANGES; i++)
142
si_build_load_reg(sctx->screen, pm4, i, sctx->shadowed_regs);
143
144
return pm4;
145
}
146
147
static void si_set_context_reg_array(struct radeon_cmdbuf *cs, unsigned reg, unsigned num,
148
const uint32_t *values)
149
{
150
radeon_begin(cs);
151
radeon_set_context_reg_seq(cs, reg, num);
152
radeon_emit_array(cs, values, num);
153
radeon_end();
154
}
155
156
void si_init_cp_reg_shadowing(struct si_context *sctx)
157
{
158
if (sctx->screen->info.mid_command_buffer_preemption_enabled ||
159
sctx->screen->debug_flags & DBG(SHADOW_REGS)) {
160
sctx->shadowed_regs =
161
si_aligned_buffer_create(sctx->b.screen,
162
SI_RESOURCE_FLAG_UNMAPPABLE | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
163
PIPE_USAGE_DEFAULT,
164
SI_SHADOWED_REG_BUFFER_SIZE,
165
4096);
166
if (!sctx->shadowed_regs)
167
fprintf(stderr, "radeonsi: cannot create a shadowed_regs buffer\n");
168
}
169
170
si_init_cs_preamble_state(sctx, sctx->shadowed_regs != NULL);
171
172
if (sctx->shadowed_regs) {
173
/* We need to clear the shadowed reg buffer. */
174
si_cp_dma_clear_buffer(sctx, &sctx->gfx_cs, &sctx->shadowed_regs->b.b,
175
0, sctx->shadowed_regs->bo_size, 0, SI_OP_SYNC_AFTER,
176
SI_COHERENCY_CP, L2_BYPASS);
177
178
/* Create the shadowing preamble. */
179
struct si_pm4_state *shadowing_preamble =
180
si_create_shadowing_ib_preamble(sctx);
181
182
/* Initialize shadowed registers as follows. */
183
radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->shadowed_regs,
184
RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
185
si_pm4_emit(sctx, shadowing_preamble);
186
ac_emulate_clear_state(&sctx->screen->info, &sctx->gfx_cs, si_set_context_reg_array);
187
si_pm4_emit(sctx, sctx->cs_preamble_state);
188
189
/* The register values are shadowed, so we won't need to set them again. */
190
si_pm4_free_state(sctx, sctx->cs_preamble_state, ~0);
191
sctx->cs_preamble_state = NULL;
192
193
si_set_tracked_regs_to_clear_state(sctx);
194
195
/* Setup preemption. The shadowing preamble will be executed as a preamble IB,
196
* which will load register values from memory on a context switch.
197
*/
198
sctx->ws->cs_setup_preemption(&sctx->gfx_cs, shadowing_preamble->pm4,
199
shadowing_preamble->ndw);
200
si_pm4_free_state(sctx, shadowing_preamble, ~0);
201
}
202
}
203
204