Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/ivpu/ivpu_hw_ip.c
52224 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2020-2024 Intel Corporation
4
*/
5
6
#include "ivpu_drv.h"
7
#include "ivpu_fw.h"
8
#include "ivpu_gem.h"
9
#include "ivpu_hw.h"
10
#include "ivpu_hw_37xx_reg.h"
11
#include "ivpu_hw_40xx_reg.h"
12
#include "ivpu_hw_btrs.h"
13
#include "ivpu_hw_ip.h"
14
#include "ivpu_hw_reg_io.h"
15
#include "ivpu_mmu.h"
16
#include "ivpu_pm.h"
17
18
#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
19
20
#define TIM_SAFE_ENABLE 0xf1d0dead
21
#define TIM_WATCHDOG_RESET_VALUE 0xffffffff
22
23
#define ICB_0_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
24
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
25
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
26
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
27
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
28
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
29
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
30
31
#define ICB_1_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
32
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
33
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
34
35
#define ICB_0_1_IRQ_MASK_37XX ((((u64)ICB_1_IRQ_MASK_37XX) << 32) | ICB_0_IRQ_MASK_37XX)
36
37
#define ICB_0_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
38
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
39
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
40
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
41
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
42
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
43
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
44
45
#define ICB_1_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
46
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
47
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
48
49
#define ICB_0_1_IRQ_MASK_40XX ((((u64)ICB_1_IRQ_MASK_40XX) << 32) | ICB_0_IRQ_MASK_40XX)
50
51
#define ITF_FIREWALL_VIOLATION_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
52
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
53
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
54
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
55
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
56
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
57
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
58
59
#define ITF_FIREWALL_VIOLATION_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
60
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
61
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
62
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
63
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
64
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
65
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
66
67
static int wait_for_ip_bar(struct ivpu_device *vdev)
68
{
69
return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
70
}
71
72
static void host_ss_rst_clr(struct ivpu_device *vdev)
73
{
74
u32 val = 0;
75
76
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
77
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
78
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
79
80
REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
81
}
82
83
static int host_ss_noc_qreqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
84
{
85
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
86
87
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
88
return -EIO;
89
90
return 0;
91
}
92
93
static int host_ss_noc_qreqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
94
{
95
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
96
97
if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
98
return -EIO;
99
100
return 0;
101
}
102
103
static int host_ss_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
104
{
105
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
106
return host_ss_noc_qreqn_check_37xx(vdev, exp_val);
107
else
108
return host_ss_noc_qreqn_check_40xx(vdev, exp_val);
109
}
110
111
static int host_ss_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
112
{
113
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
114
115
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
116
return -EIO;
117
118
return 0;
119
}
120
121
static int host_ss_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
122
{
123
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN);
124
125
if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
126
return -EIO;
127
128
return 0;
129
}
130
131
static int host_ss_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
132
{
133
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
134
return host_ss_noc_qacceptn_check_37xx(vdev, exp_val);
135
else
136
return host_ss_noc_qacceptn_check_40xx(vdev, exp_val);
137
}
138
139
static int host_ss_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
140
{
141
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
142
143
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
144
return -EIO;
145
146
return 0;
147
}
148
149
static int host_ss_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
150
{
151
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY);
152
153
if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
154
return -EIO;
155
156
return 0;
157
}
158
159
static int host_ss_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
160
{
161
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
162
return host_ss_noc_qdeny_check_37xx(vdev, exp_val);
163
else
164
return host_ss_noc_qdeny_check_40xx(vdev, exp_val);
165
}
166
167
static int top_noc_qrenqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
168
{
169
u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
170
171
if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
172
!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
173
return -EIO;
174
175
return 0;
176
}
177
178
static int top_noc_qrenqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
179
{
180
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
181
182
if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
183
!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
184
return -EIO;
185
186
return 0;
187
}
188
189
static int top_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
190
{
191
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
192
return top_noc_qrenqn_check_37xx(vdev, exp_val);
193
else
194
return top_noc_qrenqn_check_40xx(vdev, exp_val);
195
}
196
197
int ivpu_hw_ip_host_ss_configure(struct ivpu_device *vdev)
198
{
199
int ret;
200
201
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
202
ret = wait_for_ip_bar(vdev);
203
if (ret) {
204
ivpu_err(vdev, "Timed out waiting for NPU IP bar\n");
205
return ret;
206
}
207
host_ss_rst_clr(vdev);
208
}
209
210
ret = host_ss_noc_qreqn_check(vdev, 0x0);
211
if (ret) {
212
ivpu_err(vdev, "Failed qreqn check: %d\n", ret);
213
return ret;
214
}
215
216
ret = host_ss_noc_qacceptn_check(vdev, 0x0);
217
if (ret) {
218
ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
219
return ret;
220
}
221
222
ret = host_ss_noc_qdeny_check(vdev, 0x0);
223
if (ret)
224
ivpu_err(vdev, "Failed qdeny check %d\n", ret);
225
226
return ret;
227
}
228
229
static void idle_gen_drive_37xx(struct ivpu_device *vdev, bool enable)
230
{
231
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN);
232
233
if (enable)
234
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
235
else
236
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
237
238
REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, val);
239
}
240
241
static void idle_gen_drive_40xx(struct ivpu_device *vdev, bool enable)
242
{
243
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN);
244
245
if (enable)
246
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
247
else
248
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
249
250
REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val);
251
}
252
253
void ivpu_hw_ip_idle_gen_enable(struct ivpu_device *vdev)
254
{
255
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
256
idle_gen_drive_37xx(vdev, true);
257
else
258
idle_gen_drive_40xx(vdev, true);
259
}
260
261
void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev)
262
{
263
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
264
idle_gen_drive_37xx(vdev, false);
265
else
266
idle_gen_drive_40xx(vdev, false);
267
}
268
269
static void
270
pwr_island_delay_set_50xx(struct ivpu_device *vdev, u32 post, u32 post1, u32 post2, u32 status)
271
{
272
u32 val;
273
274
val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY);
275
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val);
276
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST1_DLY, post1, val);
277
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST2_DLY, post2, val);
278
REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val);
279
280
val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY);
281
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, STATUS_DLY, status, val);
282
REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, val);
283
}
284
285
static void pwr_island_trickle_drive_37xx(struct ivpu_device *vdev, bool enable)
286
{
287
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
288
289
if (enable)
290
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
291
else
292
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
293
294
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
295
}
296
297
static void pwr_island_trickle_drive_40xx(struct ivpu_device *vdev, bool enable)
298
{
299
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
300
301
if (enable)
302
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
303
else
304
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
305
306
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
307
}
308
309
static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable)
310
{
311
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0);
312
313
if (enable)
314
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
315
else
316
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
317
318
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
319
}
320
321
static void pwr_island_drive_40xx(struct ivpu_device *vdev, bool enable)
322
{
323
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
324
325
if (enable)
326
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
327
else
328
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
329
330
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
331
}
332
333
static void pwr_island_enable(struct ivpu_device *vdev)
334
{
335
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
336
pwr_island_trickle_drive_37xx(vdev, true);
337
ndelay(500);
338
pwr_island_drive_37xx(vdev, true);
339
} else {
340
pwr_island_trickle_drive_40xx(vdev, true);
341
ndelay(500);
342
pwr_island_drive_40xx(vdev, true);
343
}
344
}
345
346
static int wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
347
{
348
if (IVPU_WA(punit_disabled))
349
return 0;
350
351
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
352
return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, exp_val,
353
PWR_ISLAND_STATUS_TIMEOUT_US);
354
else
355
return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU, exp_val,
356
PWR_ISLAND_STATUS_TIMEOUT_US);
357
}
358
359
static void pwr_island_isolation_drive_37xx(struct ivpu_device *vdev, bool enable)
360
{
361
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
362
363
if (enable)
364
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
365
else
366
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
367
368
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
369
}
370
371
static void pwr_island_isolation_drive_40xx(struct ivpu_device *vdev, bool enable)
372
{
373
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0);
374
375
if (enable)
376
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
377
else
378
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
379
380
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val);
381
}
382
383
static void pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
384
{
385
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
386
pwr_island_isolation_drive_37xx(vdev, enable);
387
else
388
pwr_island_isolation_drive_40xx(vdev, enable);
389
}
390
391
static void pwr_island_isolation_disable(struct ivpu_device *vdev)
392
{
393
pwr_island_isolation_drive(vdev, false);
394
}
395
396
static void host_ss_clk_drive_37xx(struct ivpu_device *vdev, bool enable)
397
{
398
u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
399
400
if (enable) {
401
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
402
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
403
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
404
} else {
405
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
406
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
407
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
408
}
409
410
REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
411
}
412
413
static void host_ss_clk_drive_40xx(struct ivpu_device *vdev, bool enable)
414
{
415
u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN);
416
417
if (enable) {
418
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
419
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
420
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
421
} else {
422
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
423
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
424
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
425
}
426
427
REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val);
428
}
429
430
static void host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
431
{
432
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
433
host_ss_clk_drive_37xx(vdev, enable);
434
else
435
host_ss_clk_drive_40xx(vdev, enable);
436
}
437
438
static void host_ss_clk_enable(struct ivpu_device *vdev)
439
{
440
host_ss_clk_drive(vdev, true);
441
}
442
443
static void host_ss_rst_drive_37xx(struct ivpu_device *vdev, bool enable)
444
{
445
u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
446
447
if (enable) {
448
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
449
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
450
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
451
} else {
452
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
453
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
454
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
455
}
456
457
REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
458
}
459
460
static void host_ss_rst_drive_40xx(struct ivpu_device *vdev, bool enable)
461
{
462
u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN);
463
464
if (enable) {
465
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
466
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
467
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
468
} else {
469
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
470
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
471
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
472
}
473
474
REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val);
475
}
476
477
static void host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
478
{
479
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
480
host_ss_rst_drive_37xx(vdev, enable);
481
else
482
host_ss_rst_drive_40xx(vdev, enable);
483
}
484
485
static void host_ss_rst_enable(struct ivpu_device *vdev)
486
{
487
host_ss_rst_drive(vdev, true);
488
}
489
490
static void host_ss_noc_qreqn_top_socmmio_drive_37xx(struct ivpu_device *vdev, bool enable)
491
{
492
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
493
494
if (enable)
495
val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
496
else
497
val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
498
REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
499
}
500
501
static void host_ss_noc_qreqn_top_socmmio_drive_40xx(struct ivpu_device *vdev, bool enable)
502
{
503
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
504
505
if (enable)
506
val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
507
else
508
val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
509
REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val);
510
}
511
512
static void host_ss_noc_qreqn_top_socmmio_drive(struct ivpu_device *vdev, bool enable)
513
{
514
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
515
host_ss_noc_qreqn_top_socmmio_drive_37xx(vdev, enable);
516
else
517
host_ss_noc_qreqn_top_socmmio_drive_40xx(vdev, enable);
518
}
519
520
static int host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
521
{
522
int ret;
523
524
host_ss_noc_qreqn_top_socmmio_drive(vdev, enable);
525
526
ret = host_ss_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
527
if (ret) {
528
ivpu_err(vdev, "Failed HOST SS NOC QACCEPTN check: %d\n", ret);
529
return ret;
530
}
531
532
ret = host_ss_noc_qdeny_check(vdev, 0x0);
533
if (ret)
534
ivpu_err(vdev, "Failed HOST SS NOC QDENY check: %d\n", ret);
535
536
return ret;
537
}
538
539
static void top_noc_qreqn_drive_40xx(struct ivpu_device *vdev, bool enable)
540
{
541
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
542
543
if (enable) {
544
val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
545
val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
546
} else {
547
val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
548
val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
549
}
550
551
REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val);
552
}
553
554
static void top_noc_qreqn_drive_37xx(struct ivpu_device *vdev, bool enable)
555
{
556
u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
557
558
if (enable) {
559
val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
560
val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
561
} else {
562
val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
563
val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
564
}
565
566
REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val);
567
}
568
569
static void top_noc_qreqn_drive(struct ivpu_device *vdev, bool enable)
570
{
571
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
572
top_noc_qreqn_drive_37xx(vdev, enable);
573
else
574
top_noc_qreqn_drive_40xx(vdev, enable);
575
}
576
577
int ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device *vdev)
578
{
579
return host_ss_axi_drive(vdev, true);
580
}
581
582
static int top_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
583
{
584
u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN);
585
586
if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
587
!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
588
return -EIO;
589
590
return 0;
591
}
592
593
static int top_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
594
{
595
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN);
596
597
if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
598
!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
599
return -EIO;
600
601
return 0;
602
}
603
604
static int top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
605
{
606
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
607
return top_noc_qacceptn_check_37xx(vdev, exp_val);
608
else
609
return top_noc_qacceptn_check_40xx(vdev, exp_val);
610
}
611
612
static int top_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
613
{
614
u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY);
615
616
if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
617
!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
618
return -EIO;
619
620
return 0;
621
}
622
623
static int top_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
624
{
625
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY);
626
627
if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
628
!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
629
return -EIO;
630
631
return 0;
632
}
633
634
static int top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
635
{
636
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
637
return top_noc_qdeny_check_37xx(vdev, exp_val);
638
else
639
return top_noc_qdeny_check_40xx(vdev, exp_val);
640
}
641
642
static int top_noc_drive(struct ivpu_device *vdev, bool enable)
643
{
644
int ret;
645
646
top_noc_qreqn_drive(vdev, enable);
647
648
ret = top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
649
if (ret) {
650
ivpu_err(vdev, "Failed TOP NOC QACCEPTN check: %d\n", ret);
651
return ret;
652
}
653
654
ret = top_noc_qdeny_check(vdev, 0x0);
655
if (ret)
656
ivpu_err(vdev, "Failed TOP NOC QDENY check: %d\n", ret);
657
658
return ret;
659
}
660
661
int ivpu_hw_ip_top_noc_enable(struct ivpu_device *vdev)
662
{
663
return top_noc_drive(vdev, true);
664
}
665
666
static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable)
667
{
668
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
669
670
if (enable)
671
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
672
else
673
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
674
675
REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
676
}
677
678
static void pwr_island_delay_set(struct ivpu_device *vdev)
679
{
680
bool high = vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_HIGH;
681
u32 post, post1, post2, status;
682
683
if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
684
return;
685
686
switch (ivpu_device_id(vdev)) {
687
case PCI_DEVICE_ID_WCL:
688
case PCI_DEVICE_ID_PTL_P:
689
post = high ? 18 : 0;
690
post1 = 0;
691
post2 = 0;
692
status = high ? 46 : 3;
693
break;
694
695
case PCI_DEVICE_ID_NVL:
696
post = high ? 198 : 17;
697
post1 = 0;
698
post2 = high ? 198 : 17;
699
status = 0;
700
break;
701
702
default:
703
dump_stack();
704
ivpu_err(vdev, "Unknown device ID\n");
705
return;
706
}
707
708
pwr_island_delay_set_50xx(vdev, post, post1, post2, status);
709
}
710
711
int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev)
712
{
713
int ret;
714
715
pwr_island_delay_set(vdev);
716
pwr_island_enable(vdev);
717
718
ret = wait_for_pwr_island_status(vdev, 0x1);
719
if (ret) {
720
ivpu_err(vdev, "Timed out waiting for power island status\n");
721
return ret;
722
}
723
724
ret = top_noc_qreqn_check(vdev, 0x0);
725
if (ret) {
726
ivpu_err(vdev, "Failed TOP NOC QREQN check %d\n", ret);
727
return ret;
728
}
729
730
host_ss_clk_enable(vdev);
731
pwr_island_isolation_disable(vdev);
732
host_ss_rst_enable(vdev);
733
734
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
735
dpu_active_drive_37xx(vdev, true);
736
737
return ret;
738
}
739
740
u64 ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device *vdev)
741
{
742
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
743
return REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT);
744
else
745
return REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT);
746
}
747
748
static void ivpu_hw_ip_snoop_disable_37xx(struct ivpu_device *vdev)
749
{
750
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
751
752
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
753
val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
754
755
if (ivpu_is_force_snoop_enabled(vdev))
756
val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
757
else
758
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
759
760
REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
761
}
762
763
static void ivpu_hw_ip_snoop_disable_40xx(struct ivpu_device *vdev)
764
{
765
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
766
767
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
768
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
769
770
if (ivpu_is_force_snoop_enabled(vdev))
771
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
772
else
773
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
774
775
REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
776
}
777
778
void ivpu_hw_ip_snoop_disable(struct ivpu_device *vdev)
779
{
780
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
781
return ivpu_hw_ip_snoop_disable_37xx(vdev);
782
else
783
return ivpu_hw_ip_snoop_disable_40xx(vdev);
784
}
785
786
static void ivpu_hw_ip_tbu_mmu_enable_37xx(struct ivpu_device *vdev)
787
{
788
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
789
790
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
791
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
792
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
793
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
794
795
REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
796
}
797
798
static void ivpu_hw_ip_tbu_mmu_enable_40xx(struct ivpu_device *vdev)
799
{
800
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV);
801
802
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
803
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
804
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
805
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
806
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
807
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
808
809
REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val);
810
}
811
812
void ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device *vdev)
813
{
814
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
815
return ivpu_hw_ip_tbu_mmu_enable_37xx(vdev);
816
else
817
return ivpu_hw_ip_tbu_mmu_enable_40xx(vdev);
818
}
819
820
static inline u64 get_entry_point_addr(struct ivpu_device *vdev)
821
{
822
if (ivpu_fw_is_warm_boot(vdev))
823
return vdev->fw->warm_boot_entry_point;
824
else
825
return vdev->fw->cold_boot_entry_point;
826
}
827
828
static int soc_cpu_boot_37xx(struct ivpu_device *vdev)
829
{
830
u32 val;
831
832
val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
833
val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
834
835
val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
836
REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
837
838
val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
839
REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
840
841
val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
842
REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
843
844
val = get_entry_point_addr(vdev) >> 9;
845
REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
846
847
val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
848
REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
849
850
return 0;
851
}
852
853
static int cpu_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
854
{
855
u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN);
856
857
if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val))
858
return -EIO;
859
860
return 0;
861
}
862
863
static int cpu_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
864
{
865
u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY);
866
867
if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val))
868
return -EIO;
869
870
return 0;
871
}
872
873
static void cpu_noc_top_mmio_drive_40xx(struct ivpu_device *vdev, bool enable)
874
{
875
u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN);
876
877
if (enable)
878
val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
879
else
880
val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
881
REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val);
882
}
883
884
static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable)
885
{
886
int ret;
887
888
cpu_noc_top_mmio_drive_40xx(vdev, enable);
889
890
ret = cpu_noc_qacceptn_check_40xx(vdev, enable ? 0x1 : 0x0);
891
if (ret) {
892
ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
893
return ret;
894
}
895
896
ret = cpu_noc_qdeny_check_40xx(vdev, 0x0);
897
if (ret)
898
ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
899
900
return ret;
901
}
902
903
static void soc_cpu_set_entry_point_40xx(struct ivpu_device *vdev, u64 entry_point)
904
{
905
u64 val64;
906
u32 val;
907
908
val64 = entry_point;
909
val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1;
910
REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64);
911
912
val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO);
913
val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val);
914
REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val);
915
}
916
917
static int soc_cpu_boot_40xx(struct ivpu_device *vdev)
918
{
919
int ret;
920
921
ret = soc_cpu_drive_40xx(vdev, true);
922
if (ret) {
923
ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret);
924
return ret;
925
}
926
927
soc_cpu_set_entry_point_40xx(vdev, get_entry_point_addr(vdev));
928
929
return 0;
930
}
931
932
static int soc_cpu_boot_60xx(struct ivpu_device *vdev)
933
{
934
REGV_WR64(VPU_40XX_HOST_SS_AON_RETENTION1, vdev->fw->mem_bp->vpu_addr);
935
soc_cpu_set_entry_point_40xx(vdev, vdev->fw->cold_boot_entry_point);
936
937
return 0;
938
}
939
940
int ivpu_hw_ip_soc_cpu_boot(struct ivpu_device *vdev)
941
{
942
int ret;
943
944
switch (ivpu_hw_ip_gen(vdev)) {
945
case IVPU_HW_IP_37XX:
946
ret = soc_cpu_boot_37xx(vdev);
947
break;
948
949
case IVPU_HW_IP_40XX:
950
case IVPU_HW_IP_50XX:
951
ret = soc_cpu_boot_40xx(vdev);
952
break;
953
954
default:
955
ret = soc_cpu_boot_60xx(vdev);
956
}
957
958
if (ret)
959
return ret;
960
961
ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
962
ivpu_fw_is_warm_boot(vdev) ? "warm boot" : "cold boot");
963
964
return 0;
965
}
966
967
static void wdt_disable_37xx(struct ivpu_device *vdev)
968
{
969
u32 val;
970
971
/* Enable writing and set non-zero WDT value */
972
REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
973
REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
974
975
/* Enable writing and disable watchdog timer */
976
REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
977
REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0);
978
979
/* Now clear the timeout interrupt */
980
val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG);
981
val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
982
REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val);
983
}
984
985
static void wdt_disable_40xx(struct ivpu_device *vdev)
986
{
987
u32 val;
988
989
REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
990
REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
991
992
REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
993
REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0);
994
995
val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG);
996
val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
997
REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
998
}
999
1000
void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev)
1001
{
1002
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1003
return wdt_disable_37xx(vdev);
1004
else
1005
return wdt_disable_40xx(vdev);
1006
}
1007
1008
static u32 ipc_rx_count_get_37xx(struct ivpu_device *vdev)
1009
{
1010
u32 count = readl(vdev->regv + VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
1011
1012
return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
1013
}
1014
1015
static u32 ipc_rx_count_get_40xx(struct ivpu_device *vdev)
1016
{
1017
u32 count = readl(vdev->regv + VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
1018
1019
return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
1020
}
1021
1022
u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev)
1023
{
1024
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1025
return ipc_rx_count_get_37xx(vdev);
1026
else
1027
return ipc_rx_count_get_40xx(vdev);
1028
}
1029
1030
void ivpu_hw_ip_irq_enable(struct ivpu_device *vdev)
1031
{
1032
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
1033
REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_37XX);
1034
REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_37XX);
1035
} else {
1036
REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_40XX);
1037
REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_40XX);
1038
}
1039
}
1040
1041
void ivpu_hw_ip_irq_disable(struct ivpu_device *vdev)
1042
{
1043
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
1044
REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
1045
REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
1046
} else {
1047
REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
1048
REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul);
1049
}
1050
}
1051
1052
static void diagnose_failure_37xx(struct ivpu_device *vdev)
1053
{
1054
u32 reg = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
1055
1056
if (ipc_rx_count_get_37xx(vdev))
1057
ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1058
1059
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
1060
ivpu_err(vdev, "WDT MSS timeout detected\n");
1061
1062
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
1063
ivpu_err(vdev, "WDT NCE timeout detected\n");
1064
1065
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
1066
ivpu_err(vdev, "NOC Firewall irq detected\n");
1067
}
1068
1069
static void diagnose_failure_40xx(struct ivpu_device *vdev)
1070
{
1071
u32 reg = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
1072
1073
if (ipc_rx_count_get_40xx(vdev))
1074
ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1075
1076
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
1077
ivpu_err(vdev, "WDT MSS timeout detected\n");
1078
1079
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
1080
ivpu_err(vdev, "WDT NCE timeout detected\n");
1081
1082
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
1083
ivpu_err(vdev, "NOC Firewall irq detected\n");
1084
}
1085
1086
void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev)
1087
{
1088
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1089
diagnose_failure_37xx(vdev);
1090
else
1091
diagnose_failure_40xx(vdev);
1092
}
1093
1094
void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev)
1095
{
1096
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1097
REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_37XX);
1098
else
1099
REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_40XX);
1100
}
1101
1102
static void irq_wdt_nce_handler(struct ivpu_device *vdev)
1103
{
1104
ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
1105
}
1106
1107
static void irq_wdt_mss_handler(struct ivpu_device *vdev)
1108
{
1109
ivpu_hw_ip_wdt_disable(vdev);
1110
ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
1111
}
1112
1113
static void irq_noc_firewall_handler(struct ivpu_device *vdev)
1114
{
1115
atomic_inc(&vdev->hw->firewall_irq_counter);
1116
1117
ivpu_dbg(vdev, IRQ, "NOC Firewall interrupt detected, counter %d\n",
1118
atomic_read(&vdev->hw->firewall_irq_counter));
1119
}
1120
1121
/* Handler for IRQs from NPU core */
1122
bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq)
1123
{
1124
u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
1125
1126
if (!status)
1127
return false;
1128
1129
REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
1130
1131
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
1132
ivpu_mmu_irq_evtq_handler(vdev);
1133
1134
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
1135
ivpu_ipc_irq_handler(vdev);
1136
1137
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
1138
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
1139
1140
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
1141
ivpu_mmu_irq_gerr_handler(vdev);
1142
1143
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
1144
irq_wdt_mss_handler(vdev);
1145
1146
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
1147
irq_wdt_nce_handler(vdev);
1148
1149
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
1150
irq_noc_firewall_handler(vdev);
1151
1152
return true;
1153
}
1154
1155
/* Handler for IRQs from NPU core */
1156
bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq)
1157
{
1158
u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
1159
1160
if (!status)
1161
return false;
1162
1163
REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
1164
1165
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
1166
ivpu_mmu_irq_evtq_handler(vdev);
1167
1168
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
1169
ivpu_ipc_irq_handler(vdev);
1170
1171
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
1172
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
1173
1174
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
1175
ivpu_mmu_irq_gerr_handler(vdev);
1176
1177
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
1178
irq_wdt_mss_handler(vdev);
1179
1180
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
1181
irq_wdt_nce_handler(vdev);
1182
1183
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
1184
irq_noc_firewall_handler(vdev);
1185
1186
return true;
1187
}
1188
1189
static void db_set_37xx(struct ivpu_device *vdev, u32 db_id)
1190
{
1191
u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0;
1192
u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET);
1193
1194
REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
1195
}
1196
1197
static void db_set_40xx(struct ivpu_device *vdev, u32 db_id)
1198
{
1199
u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0;
1200
u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET);
1201
1202
REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
1203
}
1204
1205
void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id)
1206
{
1207
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1208
db_set_37xx(vdev, db_id);
1209
else
1210
db_set_40xx(vdev, db_id);
1211
}
1212
1213
u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev)
1214
{
1215
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1216
return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
1217
else
1218
return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM);
1219
}
1220
1221
void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
1222
{
1223
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1224
REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
1225
else
1226
REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
1227
}
1228
1229