Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/ivpu/ivpu_hw_ip.c
26428 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2020-2024 Intel Corporation
4
*/
5
6
#include "ivpu_drv.h"
7
#include "ivpu_fw.h"
8
#include "ivpu_hw.h"
9
#include "ivpu_hw_37xx_reg.h"
10
#include "ivpu_hw_40xx_reg.h"
11
#include "ivpu_hw_btrs.h"
12
#include "ivpu_hw_ip.h"
13
#include "ivpu_hw_reg_io.h"
14
#include "ivpu_mmu.h"
15
#include "ivpu_pm.h"
16
17
#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
18
19
#define TIM_SAFE_ENABLE 0xf1d0dead
20
#define TIM_WATCHDOG_RESET_VALUE 0xffffffff
21
22
#define ICB_0_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
23
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
24
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
25
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
26
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
27
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
28
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
29
30
#define ICB_1_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
31
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
32
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
33
34
#define ICB_0_1_IRQ_MASK_37XX ((((u64)ICB_1_IRQ_MASK_37XX) << 32) | ICB_0_IRQ_MASK_37XX)
35
36
#define ICB_0_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
37
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
38
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
39
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
40
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
41
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
42
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
43
44
#define ICB_1_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
45
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
46
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
47
48
#define ICB_0_1_IRQ_MASK_40XX ((((u64)ICB_1_IRQ_MASK_40XX) << 32) | ICB_0_IRQ_MASK_40XX)
49
50
#define ITF_FIREWALL_VIOLATION_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
51
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
52
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
53
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
54
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
55
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
56
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
57
58
#define ITF_FIREWALL_VIOLATION_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
59
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
60
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
61
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
62
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
63
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
64
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
65
66
static int wait_for_ip_bar(struct ivpu_device *vdev)
67
{
68
return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
69
}
70
71
static void host_ss_rst_clr(struct ivpu_device *vdev)
72
{
73
u32 val = 0;
74
75
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
76
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
77
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
78
79
REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
80
}
81
82
static int host_ss_noc_qreqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
83
{
84
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
85
86
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
87
return -EIO;
88
89
return 0;
90
}
91
92
static int host_ss_noc_qreqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
93
{
94
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
95
96
if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
97
return -EIO;
98
99
return 0;
100
}
101
102
static int host_ss_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
103
{
104
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
105
return host_ss_noc_qreqn_check_37xx(vdev, exp_val);
106
else
107
return host_ss_noc_qreqn_check_40xx(vdev, exp_val);
108
}
109
110
static int host_ss_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
111
{
112
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
113
114
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
115
return -EIO;
116
117
return 0;
118
}
119
120
static int host_ss_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
121
{
122
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN);
123
124
if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
125
return -EIO;
126
127
return 0;
128
}
129
130
static int host_ss_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
131
{
132
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
133
return host_ss_noc_qacceptn_check_37xx(vdev, exp_val);
134
else
135
return host_ss_noc_qacceptn_check_40xx(vdev, exp_val);
136
}
137
138
static int host_ss_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
139
{
140
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
141
142
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
143
return -EIO;
144
145
return 0;
146
}
147
148
static int host_ss_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
149
{
150
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY);
151
152
if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
153
return -EIO;
154
155
return 0;
156
}
157
158
static int host_ss_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
159
{
160
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
161
return host_ss_noc_qdeny_check_37xx(vdev, exp_val);
162
else
163
return host_ss_noc_qdeny_check_40xx(vdev, exp_val);
164
}
165
166
static int top_noc_qrenqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
167
{
168
u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
169
170
if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
171
!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
172
return -EIO;
173
174
return 0;
175
}
176
177
static int top_noc_qrenqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
178
{
179
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
180
181
if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
182
!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
183
return -EIO;
184
185
return 0;
186
}
187
188
static int top_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
189
{
190
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
191
return top_noc_qrenqn_check_37xx(vdev, exp_val);
192
else
193
return top_noc_qrenqn_check_40xx(vdev, exp_val);
194
}
195
196
int ivpu_hw_ip_host_ss_configure(struct ivpu_device *vdev)
197
{
198
int ret;
199
200
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
201
ret = wait_for_ip_bar(vdev);
202
if (ret) {
203
ivpu_err(vdev, "Timed out waiting for NPU IP bar\n");
204
return ret;
205
}
206
host_ss_rst_clr(vdev);
207
}
208
209
ret = host_ss_noc_qreqn_check(vdev, 0x0);
210
if (ret) {
211
ivpu_err(vdev, "Failed qreqn check: %d\n", ret);
212
return ret;
213
}
214
215
ret = host_ss_noc_qacceptn_check(vdev, 0x0);
216
if (ret) {
217
ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
218
return ret;
219
}
220
221
ret = host_ss_noc_qdeny_check(vdev, 0x0);
222
if (ret)
223
ivpu_err(vdev, "Failed qdeny check %d\n", ret);
224
225
return ret;
226
}
227
228
static void idle_gen_drive_37xx(struct ivpu_device *vdev, bool enable)
229
{
230
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN);
231
232
if (enable)
233
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
234
else
235
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
236
237
REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, val);
238
}
239
240
static void idle_gen_drive_40xx(struct ivpu_device *vdev, bool enable)
241
{
242
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN);
243
244
if (enable)
245
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
246
else
247
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
248
249
REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val);
250
}
251
252
void ivpu_hw_ip_idle_gen_enable(struct ivpu_device *vdev)
253
{
254
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
255
idle_gen_drive_37xx(vdev, true);
256
else
257
idle_gen_drive_40xx(vdev, true);
258
}
259
260
void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev)
261
{
262
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
263
idle_gen_drive_37xx(vdev, false);
264
else
265
idle_gen_drive_40xx(vdev, false);
266
}
267
268
static void
269
pwr_island_delay_set_50xx(struct ivpu_device *vdev, u32 post, u32 post1, u32 post2, u32 status)
270
{
271
u32 val;
272
273
val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY);
274
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val);
275
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST1_DLY, post1, val);
276
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST2_DLY, post2, val);
277
REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val);
278
279
val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY);
280
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, STATUS_DLY, status, val);
281
REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, val);
282
}
283
284
static void pwr_island_trickle_drive_37xx(struct ivpu_device *vdev, bool enable)
285
{
286
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
287
288
if (enable)
289
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
290
else
291
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
292
293
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
294
}
295
296
static void pwr_island_trickle_drive_40xx(struct ivpu_device *vdev, bool enable)
297
{
298
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
299
300
if (enable)
301
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
302
else
303
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
304
305
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
306
}
307
308
static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable)
309
{
310
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0);
311
312
if (enable)
313
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
314
else
315
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
316
317
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
318
}
319
320
static void pwr_island_drive_40xx(struct ivpu_device *vdev, bool enable)
321
{
322
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
323
324
if (enable)
325
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
326
else
327
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
328
329
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
330
}
331
332
static void pwr_island_enable(struct ivpu_device *vdev)
333
{
334
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
335
pwr_island_trickle_drive_37xx(vdev, true);
336
ndelay(500);
337
pwr_island_drive_37xx(vdev, true);
338
} else {
339
pwr_island_trickle_drive_40xx(vdev, true);
340
ndelay(500);
341
pwr_island_drive_40xx(vdev, true);
342
}
343
}
344
345
static int wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
346
{
347
if (IVPU_WA(punit_disabled))
348
return 0;
349
350
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
351
return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, exp_val,
352
PWR_ISLAND_STATUS_TIMEOUT_US);
353
else
354
return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU, exp_val,
355
PWR_ISLAND_STATUS_TIMEOUT_US);
356
}
357
358
static void pwr_island_isolation_drive_37xx(struct ivpu_device *vdev, bool enable)
359
{
360
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
361
362
if (enable)
363
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
364
else
365
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
366
367
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
368
}
369
370
static void pwr_island_isolation_drive_40xx(struct ivpu_device *vdev, bool enable)
371
{
372
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0);
373
374
if (enable)
375
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
376
else
377
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
378
379
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val);
380
}
381
382
static void pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
383
{
384
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
385
pwr_island_isolation_drive_37xx(vdev, enable);
386
else
387
pwr_island_isolation_drive_40xx(vdev, enable);
388
}
389
390
static void pwr_island_isolation_disable(struct ivpu_device *vdev)
391
{
392
pwr_island_isolation_drive(vdev, false);
393
}
394
395
static void host_ss_clk_drive_37xx(struct ivpu_device *vdev, bool enable)
396
{
397
u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
398
399
if (enable) {
400
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
401
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
402
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
403
} else {
404
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
405
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
406
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
407
}
408
409
REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
410
}
411
412
static void host_ss_clk_drive_40xx(struct ivpu_device *vdev, bool enable)
413
{
414
u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN);
415
416
if (enable) {
417
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
418
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
419
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
420
} else {
421
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
422
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
423
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
424
}
425
426
REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val);
427
}
428
429
static void host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
430
{
431
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
432
host_ss_clk_drive_37xx(vdev, enable);
433
else
434
host_ss_clk_drive_40xx(vdev, enable);
435
}
436
437
static void host_ss_clk_enable(struct ivpu_device *vdev)
438
{
439
host_ss_clk_drive(vdev, true);
440
}
441
442
static void host_ss_rst_drive_37xx(struct ivpu_device *vdev, bool enable)
443
{
444
u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
445
446
if (enable) {
447
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
448
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
449
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
450
} else {
451
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
452
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
453
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
454
}
455
456
REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
457
}
458
459
static void host_ss_rst_drive_40xx(struct ivpu_device *vdev, bool enable)
460
{
461
u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN);
462
463
if (enable) {
464
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
465
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
466
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
467
} else {
468
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
469
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
470
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
471
}
472
473
REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val);
474
}
475
476
static void host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
477
{
478
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
479
host_ss_rst_drive_37xx(vdev, enable);
480
else
481
host_ss_rst_drive_40xx(vdev, enable);
482
}
483
484
static void host_ss_rst_enable(struct ivpu_device *vdev)
485
{
486
host_ss_rst_drive(vdev, true);
487
}
488
489
static void host_ss_noc_qreqn_top_socmmio_drive_37xx(struct ivpu_device *vdev, bool enable)
490
{
491
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
492
493
if (enable)
494
val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
495
else
496
val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
497
REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
498
}
499
500
static void host_ss_noc_qreqn_top_socmmio_drive_40xx(struct ivpu_device *vdev, bool enable)
501
{
502
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
503
504
if (enable)
505
val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
506
else
507
val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
508
REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val);
509
}
510
511
static void host_ss_noc_qreqn_top_socmmio_drive(struct ivpu_device *vdev, bool enable)
512
{
513
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
514
host_ss_noc_qreqn_top_socmmio_drive_37xx(vdev, enable);
515
else
516
host_ss_noc_qreqn_top_socmmio_drive_40xx(vdev, enable);
517
}
518
519
static int host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
520
{
521
int ret;
522
523
host_ss_noc_qreqn_top_socmmio_drive(vdev, enable);
524
525
ret = host_ss_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
526
if (ret) {
527
ivpu_err(vdev, "Failed HOST SS NOC QACCEPTN check: %d\n", ret);
528
return ret;
529
}
530
531
ret = host_ss_noc_qdeny_check(vdev, 0x0);
532
if (ret)
533
ivpu_err(vdev, "Failed HOST SS NOC QDENY check: %d\n", ret);
534
535
return ret;
536
}
537
538
static void top_noc_qreqn_drive_40xx(struct ivpu_device *vdev, bool enable)
539
{
540
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
541
542
if (enable) {
543
val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
544
val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
545
} else {
546
val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
547
val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
548
}
549
550
REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val);
551
}
552
553
static void top_noc_qreqn_drive_37xx(struct ivpu_device *vdev, bool enable)
554
{
555
u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
556
557
if (enable) {
558
val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
559
val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
560
} else {
561
val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
562
val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
563
}
564
565
REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val);
566
}
567
568
static void top_noc_qreqn_drive(struct ivpu_device *vdev, bool enable)
569
{
570
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
571
top_noc_qreqn_drive_37xx(vdev, enable);
572
else
573
top_noc_qreqn_drive_40xx(vdev, enable);
574
}
575
576
int ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device *vdev)
577
{
578
return host_ss_axi_drive(vdev, true);
579
}
580
581
static int top_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
582
{
583
u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN);
584
585
if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
586
!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
587
return -EIO;
588
589
return 0;
590
}
591
592
static int top_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
593
{
594
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN);
595
596
if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
597
!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
598
return -EIO;
599
600
return 0;
601
}
602
603
static int top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
604
{
605
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
606
return top_noc_qacceptn_check_37xx(vdev, exp_val);
607
else
608
return top_noc_qacceptn_check_40xx(vdev, exp_val);
609
}
610
611
static int top_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
612
{
613
u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY);
614
615
if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
616
!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
617
return -EIO;
618
619
return 0;
620
}
621
622
static int top_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
623
{
624
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY);
625
626
if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
627
!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
628
return -EIO;
629
630
return 0;
631
}
632
633
static int top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
634
{
635
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
636
return top_noc_qdeny_check_37xx(vdev, exp_val);
637
else
638
return top_noc_qdeny_check_40xx(vdev, exp_val);
639
}
640
641
static int top_noc_drive(struct ivpu_device *vdev, bool enable)
642
{
643
int ret;
644
645
top_noc_qreqn_drive(vdev, enable);
646
647
ret = top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
648
if (ret) {
649
ivpu_err(vdev, "Failed TOP NOC QACCEPTN check: %d\n", ret);
650
return ret;
651
}
652
653
ret = top_noc_qdeny_check(vdev, 0x0);
654
if (ret)
655
ivpu_err(vdev, "Failed TOP NOC QDENY check: %d\n", ret);
656
657
return ret;
658
}
659
660
int ivpu_hw_ip_top_noc_enable(struct ivpu_device *vdev)
661
{
662
return top_noc_drive(vdev, true);
663
}
664
665
static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable)
666
{
667
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
668
669
if (enable)
670
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
671
else
672
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
673
674
REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
675
}
676
677
static void pwr_island_delay_set(struct ivpu_device *vdev)
678
{
679
bool high = vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_HIGH;
680
u32 post, post1, post2, status;
681
682
if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
683
return;
684
685
switch (ivpu_device_id(vdev)) {
686
case PCI_DEVICE_ID_WCL:
687
case PCI_DEVICE_ID_PTL_P:
688
post = high ? 18 : 0;
689
post1 = 0;
690
post2 = 0;
691
status = high ? 46 : 3;
692
break;
693
694
default:
695
dump_stack();
696
ivpu_err(vdev, "Unknown device ID\n");
697
return;
698
}
699
700
pwr_island_delay_set_50xx(vdev, post, post1, post2, status);
701
}
702
703
int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev)
704
{
705
int ret;
706
707
pwr_island_delay_set(vdev);
708
pwr_island_enable(vdev);
709
710
ret = wait_for_pwr_island_status(vdev, 0x1);
711
if (ret) {
712
ivpu_err(vdev, "Timed out waiting for power island status\n");
713
return ret;
714
}
715
716
ret = top_noc_qreqn_check(vdev, 0x0);
717
if (ret) {
718
ivpu_err(vdev, "Failed TOP NOC QREQN check %d\n", ret);
719
return ret;
720
}
721
722
host_ss_clk_enable(vdev);
723
pwr_island_isolation_disable(vdev);
724
host_ss_rst_enable(vdev);
725
726
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
727
dpu_active_drive_37xx(vdev, true);
728
729
return ret;
730
}
731
732
u64 ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device *vdev)
733
{
734
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
735
return REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT);
736
else
737
return REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT);
738
}
739
740
static void ivpu_hw_ip_snoop_disable_37xx(struct ivpu_device *vdev)
741
{
742
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
743
744
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
745
val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
746
747
if (ivpu_is_force_snoop_enabled(vdev))
748
val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
749
else
750
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
751
752
REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
753
}
754
755
static void ivpu_hw_ip_snoop_disable_40xx(struct ivpu_device *vdev)
756
{
757
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
758
759
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
760
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
761
762
if (ivpu_is_force_snoop_enabled(vdev))
763
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
764
else
765
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
766
767
REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
768
}
769
770
void ivpu_hw_ip_snoop_disable(struct ivpu_device *vdev)
771
{
772
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
773
return ivpu_hw_ip_snoop_disable_37xx(vdev);
774
else
775
return ivpu_hw_ip_snoop_disable_40xx(vdev);
776
}
777
778
static void ivpu_hw_ip_tbu_mmu_enable_37xx(struct ivpu_device *vdev)
779
{
780
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
781
782
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
783
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
784
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
785
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
786
787
REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
788
}
789
790
static void ivpu_hw_ip_tbu_mmu_enable_40xx(struct ivpu_device *vdev)
791
{
792
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV);
793
794
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
795
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
796
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
797
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
798
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
799
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
800
801
REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val);
802
}
803
804
void ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device *vdev)
805
{
806
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
807
return ivpu_hw_ip_tbu_mmu_enable_37xx(vdev);
808
else
809
return ivpu_hw_ip_tbu_mmu_enable_40xx(vdev);
810
}
811
812
static int soc_cpu_boot_37xx(struct ivpu_device *vdev)
813
{
814
u32 val;
815
816
val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
817
val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
818
819
val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
820
REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
821
822
val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
823
REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
824
825
val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
826
REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
827
828
val = vdev->fw->entry_point >> 9;
829
REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
830
831
val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
832
REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
833
834
ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
835
vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
836
837
return 0;
838
}
839
840
static int cpu_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
841
{
842
u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN);
843
844
if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val))
845
return -EIO;
846
847
return 0;
848
}
849
850
static int cpu_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
851
{
852
u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY);
853
854
if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val))
855
return -EIO;
856
857
return 0;
858
}
859
860
static void cpu_noc_top_mmio_drive_40xx(struct ivpu_device *vdev, bool enable)
861
{
862
u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN);
863
864
if (enable)
865
val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
866
else
867
val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
868
REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val);
869
}
870
871
static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable)
872
{
873
int ret;
874
875
cpu_noc_top_mmio_drive_40xx(vdev, enable);
876
877
ret = cpu_noc_qacceptn_check_40xx(vdev, enable ? 0x1 : 0x0);
878
if (ret) {
879
ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
880
return ret;
881
}
882
883
ret = cpu_noc_qdeny_check_40xx(vdev, 0x0);
884
if (ret)
885
ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
886
887
return ret;
888
}
889
890
static int soc_cpu_enable(struct ivpu_device *vdev)
891
{
892
return soc_cpu_drive_40xx(vdev, true);
893
}
894
895
static int soc_cpu_boot_40xx(struct ivpu_device *vdev)
896
{
897
int ret;
898
u32 val;
899
u64 val64;
900
901
ret = soc_cpu_enable(vdev);
902
if (ret) {
903
ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret);
904
return ret;
905
}
906
907
val64 = vdev->fw->entry_point;
908
val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1;
909
REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64);
910
911
val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO);
912
val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val);
913
REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val);
914
915
ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
916
ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume");
917
918
return 0;
919
}
920
921
int ivpu_hw_ip_soc_cpu_boot(struct ivpu_device *vdev)
922
{
923
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
924
return soc_cpu_boot_37xx(vdev);
925
else
926
return soc_cpu_boot_40xx(vdev);
927
}
928
929
static void wdt_disable_37xx(struct ivpu_device *vdev)
930
{
931
u32 val;
932
933
/* Enable writing and set non-zero WDT value */
934
REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
935
REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
936
937
/* Enable writing and disable watchdog timer */
938
REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
939
REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0);
940
941
/* Now clear the timeout interrupt */
942
val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG);
943
val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
944
REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val);
945
}
946
947
static void wdt_disable_40xx(struct ivpu_device *vdev)
948
{
949
u32 val;
950
951
REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
952
REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
953
954
REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
955
REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0);
956
957
val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG);
958
val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
959
REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
960
}
961
962
void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev)
963
{
964
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
965
return wdt_disable_37xx(vdev);
966
else
967
return wdt_disable_40xx(vdev);
968
}
969
970
static u32 ipc_rx_count_get_37xx(struct ivpu_device *vdev)
971
{
972
u32 count = readl(vdev->regv + VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
973
974
return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
975
}
976
977
static u32 ipc_rx_count_get_40xx(struct ivpu_device *vdev)
978
{
979
u32 count = readl(vdev->regv + VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
980
981
return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
982
}
983
984
u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev)
985
{
986
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
987
return ipc_rx_count_get_37xx(vdev);
988
else
989
return ipc_rx_count_get_40xx(vdev);
990
}
991
992
void ivpu_hw_ip_irq_enable(struct ivpu_device *vdev)
993
{
994
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
995
REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_37XX);
996
REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_37XX);
997
} else {
998
REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_40XX);
999
REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_40XX);
1000
}
1001
}
1002
1003
void ivpu_hw_ip_irq_disable(struct ivpu_device *vdev)
1004
{
1005
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
1006
REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
1007
REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
1008
} else {
1009
REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
1010
REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul);
1011
}
1012
}
1013
1014
static void diagnose_failure_37xx(struct ivpu_device *vdev)
1015
{
1016
u32 reg = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
1017
1018
if (ipc_rx_count_get_37xx(vdev))
1019
ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1020
1021
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
1022
ivpu_err(vdev, "WDT MSS timeout detected\n");
1023
1024
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
1025
ivpu_err(vdev, "WDT NCE timeout detected\n");
1026
1027
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
1028
ivpu_err(vdev, "NOC Firewall irq detected\n");
1029
}
1030
1031
static void diagnose_failure_40xx(struct ivpu_device *vdev)
1032
{
1033
u32 reg = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
1034
1035
if (ipc_rx_count_get_40xx(vdev))
1036
ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1037
1038
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
1039
ivpu_err(vdev, "WDT MSS timeout detected\n");
1040
1041
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
1042
ivpu_err(vdev, "WDT NCE timeout detected\n");
1043
1044
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
1045
ivpu_err(vdev, "NOC Firewall irq detected\n");
1046
}
1047
1048
void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev)
1049
{
1050
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1051
diagnose_failure_37xx(vdev);
1052
else
1053
diagnose_failure_40xx(vdev);
1054
}
1055
1056
void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev)
1057
{
1058
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1059
REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_37XX);
1060
else
1061
REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_40XX);
1062
}
1063
1064
static void irq_wdt_nce_handler(struct ivpu_device *vdev)
1065
{
1066
ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
1067
}
1068
1069
static void irq_wdt_mss_handler(struct ivpu_device *vdev)
1070
{
1071
ivpu_hw_ip_wdt_disable(vdev);
1072
ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
1073
}
1074
1075
static void irq_noc_firewall_handler(struct ivpu_device *vdev)
1076
{
1077
atomic_inc(&vdev->hw->firewall_irq_counter);
1078
1079
ivpu_dbg(vdev, IRQ, "NOC Firewall interrupt detected, counter %d\n",
1080
atomic_read(&vdev->hw->firewall_irq_counter));
1081
}
1082
1083
/* Handler for IRQs from NPU core */
1084
bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq)
1085
{
1086
u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
1087
1088
if (!status)
1089
return false;
1090
1091
REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
1092
1093
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
1094
ivpu_mmu_irq_evtq_handler(vdev);
1095
1096
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
1097
ivpu_ipc_irq_handler(vdev);
1098
1099
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
1100
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
1101
1102
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
1103
ivpu_mmu_irq_gerr_handler(vdev);
1104
1105
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
1106
irq_wdt_mss_handler(vdev);
1107
1108
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
1109
irq_wdt_nce_handler(vdev);
1110
1111
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
1112
irq_noc_firewall_handler(vdev);
1113
1114
return true;
1115
}
1116
1117
/* Handler for IRQs from NPU core */
1118
bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq)
1119
{
1120
u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
1121
1122
if (!status)
1123
return false;
1124
1125
REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
1126
1127
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
1128
ivpu_mmu_irq_evtq_handler(vdev);
1129
1130
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
1131
ivpu_ipc_irq_handler(vdev);
1132
1133
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
1134
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
1135
1136
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
1137
ivpu_mmu_irq_gerr_handler(vdev);
1138
1139
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
1140
irq_wdt_mss_handler(vdev);
1141
1142
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
1143
irq_wdt_nce_handler(vdev);
1144
1145
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
1146
irq_noc_firewall_handler(vdev);
1147
1148
return true;
1149
}
1150
1151
static void db_set_37xx(struct ivpu_device *vdev, u32 db_id)
1152
{
1153
u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0;
1154
u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET);
1155
1156
REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
1157
}
1158
1159
static void db_set_40xx(struct ivpu_device *vdev, u32 db_id)
1160
{
1161
u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0;
1162
u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET);
1163
1164
REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
1165
}
1166
1167
void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id)
1168
{
1169
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1170
db_set_37xx(vdev, db_id);
1171
else
1172
db_set_40xx(vdev, db_id);
1173
}
1174
1175
u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev)
1176
{
1177
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1178
return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
1179
else
1180
return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM);
1181
}
1182
1183
void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
1184
{
1185
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1186
REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
1187
else
1188
REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
1189
}
1190
1191