Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/ivpu/ivpu_hw_btrs.c
26428 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2020-2025 Intel Corporation
4
*/
5
6
#include <linux/units.h>
7
8
#include "ivpu_drv.h"
9
#include "ivpu_hw.h"
10
#include "ivpu_hw_btrs.h"
11
#include "ivpu_hw_btrs_lnl_reg.h"
12
#include "ivpu_hw_btrs_mtl_reg.h"
13
#include "ivpu_hw_reg_io.h"
14
#include "ivpu_pm.h"
15
16
#define BTRS_MTL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR)) | \
17
(REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR)))
18
19
#define BTRS_LNL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR)) | \
20
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR)) | \
21
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR)) | \
22
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR)) | \
23
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR)) | \
24
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR)))
25
26
#define BTRS_MTL_ALL_IRQ_MASK (BTRS_MTL_IRQ_MASK | (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, \
27
FREQ_CHANGE)))
28
29
#define BTRS_IRQ_DISABLE_MASK ((u32)-1)
30
31
#define BTRS_LNL_ALL_IRQ_MASK ((u32)-1)
32
33
34
#define PLL_CDYN_DEFAULT 0x80
35
#define PLL_EPP_DEFAULT 0x80
36
#define PLL_CONFIG_DEFAULT 0x0
37
#define PLL_REF_CLK_FREQ 50000000ull
38
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
39
40
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
41
#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
42
#define TIMEOUT_US (150 * USEC_PER_MSEC)
43
44
/* Work point configuration values */
45
#define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
46
#define MTL_CONFIG_1_TILE 0x01
47
#define MTL_CONFIG_2_TILE 0x02
48
#define MTL_PLL_RATIO_5_3 0x01
49
#define MTL_PLL_RATIO_4_3 0x02
50
#define BTRS_MTL_TILE_FUSE_ENABLE_BOTH 0x0
51
#define BTRS_MTL_TILE_SKU_BOTH 0x3630
52
53
#define BTRS_LNL_TILE_MAX_NUM 6
54
#define BTRS_LNL_TILE_MAX_MASK 0x3f
55
56
#define WEIGHTS_DEFAULT 0xf711f711u
57
#define WEIGHTS_ATS_DEFAULT 0x0000f711u
58
59
#define DCT_REQ 0x2
60
#define DCT_ENABLE 0x1
61
#define DCT_DISABLE 0x0
62
63
static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio);
64
65
int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev)
66
{
67
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK);
68
if (REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) == BTRS_MTL_ALL_IRQ_MASK) {
69
/* Writing 1s does not clear the interrupt status register */
70
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0);
71
return true;
72
}
73
74
return false;
75
}
76
77
static void freq_ratios_init_mtl(struct ivpu_device *vdev)
78
{
79
struct ivpu_hw_info *hw = vdev->hw;
80
u32 fmin_fuse, fmax_fuse;
81
82
fmin_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMIN_FUSE);
83
hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, MIN_RATIO, fmin_fuse);
84
hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, PN_RATIO, fmin_fuse);
85
86
fmax_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMAX_FUSE);
87
hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMAX_FUSE, MAX_RATIO, fmax_fuse);
88
}
89
90
static void freq_ratios_init_lnl(struct ivpu_device *vdev)
91
{
92
struct ivpu_hw_info *hw = vdev->hw;
93
u32 fmin_fuse, fmax_fuse;
94
95
fmin_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMIN_FUSE);
96
hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, MIN_RATIO, fmin_fuse);
97
hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, PN_RATIO, fmin_fuse);
98
99
fmax_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMAX_FUSE);
100
hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMAX_FUSE, MAX_RATIO, fmax_fuse);
101
}
102
103
void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev)
104
{
105
struct ivpu_hw_info *hw = vdev->hw;
106
107
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
108
freq_ratios_init_mtl(vdev);
109
else
110
freq_ratios_init_lnl(vdev);
111
112
hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
113
hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
114
hw->pll.pn_ratio = clamp_t(u8, hw->pll.pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
115
}
116
117
static bool tile_disable_check(u32 config)
118
{
119
/* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
120
if (config == 0)
121
return true;
122
123
if (config > BIT(BTRS_LNL_TILE_MAX_NUM - 1))
124
return false;
125
126
if ((config & (config - 1)) == 0)
127
return true;
128
129
return false;
130
}
131
132
static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config)
133
{
134
u32 fuse;
135
u32 config;
136
137
fuse = REGB_RD32(VPU_HW_BTRS_LNL_TILE_FUSE);
138
if (!REG_TEST_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, VALID, fuse)) {
139
ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse);
140
return -EIO;
141
}
142
143
config = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse);
144
if (!tile_disable_check(config))
145
ivpu_warn(vdev, "More than 1 tile disabled, tile fuse config mask: 0x%x\n", config);
146
147
ivpu_dbg(vdev, MISC, "Tile disable config mask: 0x%x\n", config);
148
149
*tile_fuse_config = config;
150
return 0;
151
}
152
153
static int info_init_mtl(struct ivpu_device *vdev)
154
{
155
struct ivpu_hw_info *hw = vdev->hw;
156
157
hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH;
158
hw->sku = BTRS_MTL_TILE_SKU_BOTH;
159
hw->config = WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3);
160
161
return 0;
162
}
163
164
static int info_init_lnl(struct ivpu_device *vdev)
165
{
166
struct ivpu_hw_info *hw = vdev->hw;
167
u32 tile_fuse_config;
168
int ret;
169
170
ret = read_tile_config_fuse(vdev, &tile_fuse_config);
171
if (ret)
172
return ret;
173
174
hw->tile_fuse = tile_fuse_config;
175
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
176
177
return 0;
178
}
179
180
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev)
181
{
182
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
183
return info_init_mtl(vdev);
184
else
185
return info_init_lnl(vdev);
186
}
187
188
static int wp_request_sync(struct ivpu_device *vdev)
189
{
190
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
191
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
192
else
193
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
194
}
195
196
static int wait_for_status_ready(struct ivpu_device *vdev, bool enable)
197
{
198
u32 exp_val = enable ? 0x1 : 0x0;
199
200
if (IVPU_WA(punit_disabled))
201
return 0;
202
203
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
204
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US);
205
else
206
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US);
207
}
208
209
struct wp_request {
210
u16 min;
211
u16 max;
212
u16 target;
213
u16 cfg;
214
u16 epp;
215
u16 cdyn;
216
};
217
218
static void wp_request_mtl(struct ivpu_device *vdev, struct wp_request *wp)
219
{
220
u32 val;
221
222
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0);
223
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val);
224
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val);
225
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, val);
226
227
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1);
228
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val);
229
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, EPP, PLL_EPP_DEFAULT, val);
230
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, val);
231
232
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2);
233
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val);
234
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, val);
235
236
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_CMD);
237
val = REG_SET_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, val);
238
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_CMD, val);
239
}
240
241
static void wp_request_lnl(struct ivpu_device *vdev, struct wp_request *wp)
242
{
243
u32 val;
244
245
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0);
246
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val);
247
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val);
248
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, val);
249
250
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1);
251
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val);
252
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, EPP, wp->epp, val);
253
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, val);
254
255
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2);
256
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val);
257
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CDYN, wp->cdyn, val);
258
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, val);
259
260
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_CMD);
261
val = REG_SET_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, val);
262
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_CMD, val);
263
}
264
265
static void wp_request(struct ivpu_device *vdev, struct wp_request *wp)
266
{
267
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
268
wp_request_mtl(vdev, wp);
269
else
270
wp_request_lnl(vdev, wp);
271
}
272
273
static int wp_request_send(struct ivpu_device *vdev, struct wp_request *wp)
274
{
275
int ret;
276
277
ret = wp_request_sync(vdev);
278
if (ret) {
279
ivpu_err(vdev, "Failed to sync before workpoint request: %d\n", ret);
280
return ret;
281
}
282
283
wp_request(vdev, wp);
284
285
ret = wp_request_sync(vdev);
286
if (ret)
287
ivpu_err(vdev, "Failed to sync after workpoint request: %d\n", ret);
288
289
return ret;
290
}
291
292
static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp, bool enable)
293
{
294
struct ivpu_hw_info *hw = vdev->hw;
295
296
wp->min = hw->pll.min_ratio;
297
wp->max = hw->pll.max_ratio;
298
299
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
300
wp->target = enable ? hw->pll.pn_ratio : 0;
301
wp->cfg = enable ? hw->config : 0;
302
wp->cdyn = 0;
303
wp->epp = 0;
304
} else {
305
wp->target = hw->pll.pn_ratio;
306
wp->cfg = enable ? PLL_CONFIG_DEFAULT : 0;
307
wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0;
308
wp->epp = enable ? PLL_EPP_DEFAULT : 0;
309
}
310
}
311
312
static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable)
313
{
314
u32 exp_val = enable ? 0x1 : 0x0;
315
316
if (ivpu_hw_btrs_gen(vdev) != IVPU_HW_BTRS_MTL)
317
return 0;
318
319
if (IVPU_WA(punit_disabled))
320
return 0;
321
322
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
323
}
324
325
int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
326
{
327
struct wp_request wp;
328
int ret;
329
330
if (IVPU_WA(punit_disabled)) {
331
ivpu_dbg(vdev, PM, "Skipping workpoint request\n");
332
return 0;
333
}
334
335
prepare_wp_request(vdev, &wp, enable);
336
337
ivpu_dbg(vdev, PM, "PLL workpoint request: %lu MHz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
338
pll_ratio_to_dpu_freq(vdev, wp.target) / HZ_PER_MHZ, wp.cfg, wp.epp, wp.cdyn);
339
340
ret = wp_request_send(vdev, &wp);
341
if (ret) {
342
ivpu_err(vdev, "Failed to send workpoint request: %d\n", ret);
343
return ret;
344
}
345
346
ret = wait_for_pll_lock(vdev, enable);
347
if (ret) {
348
ivpu_err(vdev, "Timed out waiting for PLL lock\n");
349
return ret;
350
}
351
352
ret = wait_for_status_ready(vdev, enable);
353
if (ret) {
354
ivpu_err(vdev, "Timed out waiting for NPU ready status\n");
355
return ret;
356
}
357
358
return 0;
359
}
360
361
static int d0i3_drive_mtl(struct ivpu_device *vdev, bool enable)
362
{
363
int ret;
364
u32 val;
365
366
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
367
if (ret) {
368
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
369
return ret;
370
}
371
372
val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL);
373
if (enable)
374
val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val);
375
else
376
val = REG_CLR_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val);
377
REGB_WR32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, val);
378
379
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
380
if (ret)
381
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
382
383
return ret;
384
}
385
386
static int d0i3_drive_lnl(struct ivpu_device *vdev, bool enable)
387
{
388
int ret;
389
u32 val;
390
391
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
392
if (ret) {
393
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
394
return ret;
395
}
396
397
val = REGB_RD32(VPU_HW_BTRS_LNL_D0I3_CONTROL);
398
if (enable)
399
val = REG_SET_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val);
400
else
401
val = REG_CLR_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val);
402
REGB_WR32(VPU_HW_BTRS_LNL_D0I3_CONTROL, val);
403
404
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
405
if (ret) {
406
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
407
return ret;
408
}
409
410
return 0;
411
}
412
413
static int d0i3_drive(struct ivpu_device *vdev, bool enable)
414
{
415
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
416
return d0i3_drive_mtl(vdev, enable);
417
else
418
return d0i3_drive_lnl(vdev, enable);
419
}
420
421
int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev)
422
{
423
int ret;
424
425
if (IVPU_WA(punit_disabled))
426
return 0;
427
428
ret = d0i3_drive(vdev, true);
429
if (ret)
430
ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
431
432
udelay(5); /* VPU requires 5 us to complete the transition */
433
434
return ret;
435
}
436
437
int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev)
438
{
439
int ret;
440
441
if (IVPU_WA(punit_disabled))
442
return 0;
443
444
ret = d0i3_drive(vdev, false);
445
if (ret)
446
ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
447
448
return ret;
449
}
450
451
int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev)
452
{
453
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
454
return 0;
455
456
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US);
457
}
458
459
void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev)
460
{
461
REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT);
462
REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT);
463
}
464
465
static int ip_reset_mtl(struct ivpu_device *vdev)
466
{
467
int ret;
468
u32 val;
469
470
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
471
if (ret) {
472
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
473
return ret;
474
}
475
476
val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_IP_RESET);
477
val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, val);
478
REGB_WR32(VPU_HW_BTRS_MTL_VPU_IP_RESET, val);
479
480
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
481
if (ret)
482
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
483
484
return ret;
485
}
486
487
static int ip_reset_lnl(struct ivpu_device *vdev)
488
{
489
int ret;
490
u32 val;
491
492
ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
493
494
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US);
495
if (ret) {
496
ivpu_err(vdev, "Wait for *_TRIGGER timed out\n");
497
return ret;
498
}
499
500
val = REGB_RD32(VPU_HW_BTRS_LNL_IP_RESET);
501
val = REG_SET_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, val);
502
REGB_WR32(VPU_HW_BTRS_LNL_IP_RESET, val);
503
504
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US);
505
if (ret)
506
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
507
508
return ret;
509
}
510
511
int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev)
512
{
513
if (IVPU_WA(punit_disabled))
514
return 0;
515
516
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
517
return ip_reset_mtl(vdev);
518
else
519
return ip_reset_lnl(vdev);
520
}
521
522
void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev)
523
{
524
u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
525
526
if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT)
527
val = REG_CLR_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val);
528
else
529
val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val);
530
531
REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val);
532
}
533
534
void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev)
535
{
536
ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n",
537
REGB_RD32(VPU_HW_BTRS_LNL_HM_ATS) ? "Enable" : "Disable");
538
}
539
540
void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev)
541
{
542
u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
543
544
val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, DISABLE_CLK_RELINQUISH, val);
545
REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val);
546
}
547
548
bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev)
549
{
550
u32 val;
551
552
if (IVPU_WA(punit_disabled))
553
return true;
554
555
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
556
val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_STATUS);
557
558
return REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, val) &&
559
REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, val);
560
} else {
561
val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
562
563
return REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, val) &&
564
REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, val);
565
}
566
}
567
568
int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev)
569
{
570
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
571
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
572
else
573
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
574
}
575
576
static u32 pll_config_get_mtl(struct ivpu_device *vdev)
577
{
578
return REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
579
}
580
581
static u32 pll_config_get_lnl(struct ivpu_device *vdev)
582
{
583
return REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
584
}
585
586
static u32 pll_ratio_to_dpu_freq_mtl(u16 ratio)
587
{
588
return (PLL_RATIO_TO_FREQ(ratio) * 2) / 3;
589
}
590
591
static u32 pll_ratio_to_dpu_freq_lnl(u16 ratio)
592
{
593
return PLL_RATIO_TO_FREQ(ratio) / 2;
594
}
595
596
static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio)
597
{
598
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
599
return pll_ratio_to_dpu_freq_mtl(ratio);
600
else
601
return pll_ratio_to_dpu_freq_lnl(ratio);
602
}
603
604
u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev)
605
{
606
return pll_ratio_to_dpu_freq(vdev, vdev->hw->pll.max_ratio);
607
}
608
609
u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev)
610
{
611
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
612
return pll_ratio_to_dpu_freq_mtl(pll_config_get_mtl(vdev));
613
else
614
return pll_ratio_to_dpu_freq_lnl(pll_config_get_lnl(vdev));
615
}
616
617
/* Handler for IRQs from Buttress core (irqB) */
618
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
619
{
620
u32 status = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK;
621
bool schedule_recovery = false;
622
623
if (!status)
624
return false;
625
626
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
627
u32 pll = pll_config_get_mtl(vdev);
628
629
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
630
pll, pll_ratio_to_dpu_freq_mtl(pll) / HZ_PER_MHZ);
631
}
632
633
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) {
634
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
635
REGB_WR32(VPU_HW_BTRS_MTL_ATS_ERR_CLEAR, 0x1);
636
schedule_recovery = true;
637
}
638
639
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, status)) {
640
u32 ufi_log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG);
641
642
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
643
ufi_log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, ufi_log),
644
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, ufi_log),
645
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, ufi_log));
646
REGB_WR32(VPU_HW_BTRS_MTL_UFI_ERR_CLEAR, 0x1);
647
schedule_recovery = true;
648
}
649
650
/* This must be done after interrupts are cleared at the source. */
651
if (IVPU_WA(interrupt_clear_with_0))
652
/*
653
* Writing 1 triggers an interrupt, so we can't perform read update write.
654
* Clear local interrupt status by writing 0 to all bits.
655
*/
656
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0);
657
else
658
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, status);
659
660
if (schedule_recovery)
661
ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
662
663
return true;
664
}
665
666
/* Handler for IRQs from Buttress core (irqB) */
667
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
668
{
669
u32 status = REGB_RD32(VPU_HW_BTRS_LNL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK;
670
bool schedule_recovery = false;
671
672
if (!status)
673
return false;
674
675
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
676
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
677
queue_work(system_wq, &vdev->irq_dct_work);
678
}
679
680
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
681
u32 pll = pll_config_get_lnl(vdev);
682
683
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
684
pll, pll_ratio_to_dpu_freq_lnl(pll) / HZ_PER_MHZ);
685
}
686
687
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) {
688
ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
689
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1),
690
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2));
691
REGB_WR32(VPU_HW_BTRS_LNL_ATS_ERR_CLEAR, 0x1);
692
schedule_recovery = true;
693
}
694
695
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, status)) {
696
ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG));
697
REGB_WR32(VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR, 0x1);
698
schedule_recovery = true;
699
}
700
701
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, status)) {
702
ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG));
703
REGB_WR32(VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR, 0x1);
704
schedule_recovery = true;
705
}
706
707
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, status)) {
708
ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
709
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW),
710
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH));
711
REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR, 0x1);
712
schedule_recovery = true;
713
}
714
715
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, status)) {
716
ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
717
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW),
718
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH));
719
REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR, 0x1);
720
schedule_recovery = true;
721
}
722
723
/* This must be done after interrupts are cleared at the source. */
724
REGB_WR32(VPU_HW_BTRS_LNL_INTERRUPT_STAT, status);
725
726
if (schedule_recovery)
727
ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
728
729
return true;
730
}
731
732
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable)
733
{
734
u32 val = REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW);
735
u32 cmd = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, CMD, val);
736
u32 param1 = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, PARAM1, val);
737
738
if (cmd != DCT_REQ) {
739
ivpu_err_ratelimited(vdev, "Unsupported PCODE command: 0x%x\n", cmd);
740
return -EBADR;
741
}
742
743
switch (param1) {
744
case DCT_ENABLE:
745
*enable = true;
746
return 0;
747
case DCT_DISABLE:
748
*enable = false;
749
return 0;
750
default:
751
ivpu_err_ratelimited(vdev, "Invalid PARAM1 value: %u\n", param1);
752
return -EINVAL;
753
}
754
}
755
756
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent)
757
{
758
u32 val = 0;
759
u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE;
760
761
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, CMD, DCT_REQ, val);
762
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM1, cmd, val);
763
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM2, active_percent, val);
764
765
REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val);
766
}
767
768
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev)
769
{
770
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
771
return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET);
772
else
773
return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET);
774
}
775
776
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev)
777
{
778
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
779
return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE);
780
else
781
return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE);
782
}
783
784
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev)
785
{
786
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
787
return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE);
788
else
789
return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE);
790
}
791
792
void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev)
793
{
794
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
795
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1);
796
else
797
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1);
798
}
799
800
void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev)
801
{
802
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
803
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0);
804
else
805
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0);
806
}
807
808
void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev)
809
{
810
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
811
REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, (u32)(~BTRS_MTL_IRQ_MASK));
812
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0);
813
} else {
814
REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, (u32)(~BTRS_LNL_IRQ_MASK));
815
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0);
816
}
817
}
818
819
void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev)
820
{
821
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
822
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1);
823
REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK);
824
} else {
825
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1);
826
REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK);
827
}
828
}
829
830
static void diagnose_failure_mtl(struct ivpu_device *vdev)
831
{
832
u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK;
833
834
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, reg))
835
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
836
837
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, reg)) {
838
u32 log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG);
839
840
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
841
log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, log),
842
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, log),
843
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, log));
844
}
845
}
846
847
static void diagnose_failure_lnl(struct ivpu_device *vdev)
848
{
849
u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK;
850
851
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, reg)) {
852
ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
853
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1),
854
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2));
855
}
856
857
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, reg))
858
ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG));
859
860
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, reg))
861
ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG));
862
863
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, reg))
864
ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
865
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW),
866
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH));
867
868
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, reg))
869
ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
870
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW),
871
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH));
872
873
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, reg))
874
ivpu_err(vdev, "Survivability IRQ\n");
875
}
876
877
void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev)
878
{
879
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
880
return diagnose_failure_mtl(vdev);
881
else
882
return diagnose_failure_lnl(vdev);
883
}
884
885
int ivpu_hw_btrs_platform_read(struct ivpu_device *vdev)
886
{
887
u32 reg = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
888
889
return REG_GET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PLATFORM, reg);
890
}
891
892