Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/ivpu/ivpu_hw.c
26428 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2020 - 2024 Intel Corporation
4
*/
5
6
#include "ivpu_drv.h"
7
#include "ivpu_hw.h"
8
#include "ivpu_hw_btrs.h"
9
#include "ivpu_hw_ip.h"
10
11
#include <linux/dmi.h>
12
#include <linux/fault-inject.h>
13
#include <linux/pm_runtime.h>
14
15
#ifdef CONFIG_FAULT_INJECTION
16
DECLARE_FAULT_ATTR(ivpu_hw_failure);
17
18
static char *ivpu_fail_hw;
19
module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444);
20
MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>");
21
#endif
22
23
static char *platform_to_str(u32 platform)
24
{
25
switch (platform) {
26
case IVPU_PLATFORM_SILICON:
27
return "SILICON";
28
case IVPU_PLATFORM_SIMICS:
29
return "SIMICS";
30
case IVPU_PLATFORM_FPGA:
31
return "FPGA";
32
case IVPU_PLATFORM_HSLE:
33
return "HSLE";
34
default:
35
return "Invalid platform";
36
}
37
}
38
39
static void platform_init(struct ivpu_device *vdev)
40
{
41
int platform = ivpu_hw_btrs_platform_read(vdev);
42
43
ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", platform_to_str(platform), platform);
44
45
switch (platform) {
46
case IVPU_PLATFORM_SILICON:
47
case IVPU_PLATFORM_SIMICS:
48
case IVPU_PLATFORM_FPGA:
49
case IVPU_PLATFORM_HSLE:
50
vdev->platform = platform;
51
break;
52
53
default:
54
ivpu_err(vdev, "Invalid platform type: %d\n", platform);
55
break;
56
}
57
}
58
59
static void wa_init(struct ivpu_device *vdev)
60
{
61
vdev->wa.punit_disabled = false;
62
vdev->wa.clear_runtime_mem = false;
63
64
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
65
vdev->wa.interrupt_clear_with_0 = ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev);
66
67
if (ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL &&
68
ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0)
69
vdev->wa.disable_clock_relinquish = true;
70
71
if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_ENABLE)
72
vdev->wa.disable_clock_relinquish = false;
73
74
if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_DISABLE)
75
vdev->wa.disable_clock_relinquish = true;
76
77
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
78
vdev->wa.wp0_during_power_up = true;
79
80
if (ivpu_test_mode & IVPU_TEST_MODE_D0I2_DISABLE)
81
vdev->wa.disable_d0i2 = true;
82
83
IVPU_PRINT_WA(punit_disabled);
84
IVPU_PRINT_WA(clear_runtime_mem);
85
IVPU_PRINT_WA(interrupt_clear_with_0);
86
IVPU_PRINT_WA(disable_clock_relinquish);
87
IVPU_PRINT_WA(wp0_during_power_up);
88
IVPU_PRINT_WA(disable_d0i2);
89
}
90
91
static void timeouts_init(struct ivpu_device *vdev)
92
{
93
if (ivpu_test_mode & IVPU_TEST_MODE_DISABLE_TIMEOUTS) {
94
vdev->timeout.boot = -1;
95
vdev->timeout.jsm = -1;
96
vdev->timeout.tdr = -1;
97
vdev->timeout.inference = -1;
98
vdev->timeout.autosuspend = -1;
99
vdev->timeout.d0i3_entry_msg = -1;
100
} else if (ivpu_is_fpga(vdev)) {
101
vdev->timeout.boot = 50;
102
vdev->timeout.jsm = 15000;
103
vdev->timeout.tdr = 30000;
104
vdev->timeout.inference = 900000;
105
vdev->timeout.autosuspend = -1;
106
vdev->timeout.d0i3_entry_msg = 500;
107
vdev->timeout.state_dump_msg = 10000;
108
} else if (ivpu_is_simics(vdev)) {
109
vdev->timeout.boot = 50;
110
vdev->timeout.jsm = 500;
111
vdev->timeout.tdr = 10000;
112
vdev->timeout.inference = 300000;
113
vdev->timeout.autosuspend = 100;
114
vdev->timeout.d0i3_entry_msg = 100;
115
vdev->timeout.state_dump_msg = 10;
116
} else {
117
vdev->timeout.boot = 1000;
118
vdev->timeout.jsm = 500;
119
vdev->timeout.tdr = 2000;
120
vdev->timeout.inference = 60000;
121
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
122
vdev->timeout.autosuspend = 10;
123
else
124
vdev->timeout.autosuspend = 100;
125
vdev->timeout.d0i3_entry_msg = 5;
126
vdev->timeout.state_dump_msg = 100;
127
}
128
}
129
130
static void priority_bands_init(struct ivpu_device *vdev)
131
{
132
/* Idle */
133
vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 0;
134
vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 50000;
135
vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 160000;
136
/* Normal */
137
vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
138
vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
139
vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 300000;
140
/* Focus */
141
vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
142
vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
143
vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 200000;
144
/* Realtime */
145
vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 0;
146
vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 50000;
147
vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
148
}
149
150
static void memory_ranges_init(struct ivpu_device *vdev)
151
{
152
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
153
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
154
ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M);
155
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
156
ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G);
157
} else {
158
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
159
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G);
160
ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G);
161
vdev->hw->ranges.dma = vdev->hw->ranges.user;
162
}
163
}
164
165
static int wp_enable(struct ivpu_device *vdev)
166
{
167
return ivpu_hw_btrs_wp_drive(vdev, true);
168
}
169
170
static int wp_disable(struct ivpu_device *vdev)
171
{
172
return ivpu_hw_btrs_wp_drive(vdev, false);
173
}
174
175
int ivpu_hw_power_up(struct ivpu_device *vdev)
176
{
177
int ret;
178
179
if (IVPU_WA(wp0_during_power_up)) {
180
/* WP requests may fail when powering down, so issue WP 0 here */
181
ret = wp_disable(vdev);
182
if (ret)
183
ivpu_warn(vdev, "Failed to disable workpoint: %d\n", ret);
184
}
185
186
ret = ivpu_hw_btrs_d0i3_disable(vdev);
187
if (ret)
188
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
189
190
ret = wp_enable(vdev);
191
if (ret) {
192
ivpu_err(vdev, "Failed to enable workpoint: %d\n", ret);
193
return ret;
194
}
195
196
if (ivpu_hw_btrs_gen(vdev) >= IVPU_HW_BTRS_LNL) {
197
if (IVPU_WA(disable_clock_relinquish))
198
ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
199
ivpu_hw_btrs_profiling_freq_reg_set_lnl(vdev);
200
ivpu_hw_btrs_ats_print_lnl(vdev);
201
}
202
203
ret = ivpu_hw_ip_host_ss_configure(vdev);
204
if (ret) {
205
ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
206
return ret;
207
}
208
209
ivpu_hw_ip_idle_gen_disable(vdev);
210
211
ret = ivpu_hw_btrs_wait_for_clock_res_own_ack(vdev);
212
if (ret) {
213
ivpu_err(vdev, "Timed out waiting for clock resource own ACK\n");
214
return ret;
215
}
216
217
ret = ivpu_hw_ip_pwr_domain_enable(vdev);
218
if (ret) {
219
ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
220
return ret;
221
}
222
223
ret = ivpu_hw_ip_host_ss_axi_enable(vdev);
224
if (ret) {
225
ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
226
return ret;
227
}
228
229
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_LNL)
230
ivpu_hw_btrs_set_port_arbitration_weights_lnl(vdev);
231
232
ret = ivpu_hw_ip_top_noc_enable(vdev);
233
if (ret)
234
ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
235
236
return ret;
237
}
238
239
static void save_d0i3_entry_timestamp(struct ivpu_device *vdev)
240
{
241
vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
242
vdev->hw->d0i3_entry_vpu_ts = ivpu_hw_ip_read_perf_timer_counter(vdev);
243
}
244
245
int ivpu_hw_reset(struct ivpu_device *vdev)
246
{
247
int ret = 0;
248
249
if (ivpu_hw_btrs_ip_reset(vdev)) {
250
ivpu_err(vdev, "Failed to reset NPU IP\n");
251
ret = -EIO;
252
}
253
254
if (wp_disable(vdev)) {
255
ivpu_err(vdev, "Failed to disable workpoint\n");
256
ret = -EIO;
257
}
258
259
return ret;
260
}
261
262
int ivpu_hw_power_down(struct ivpu_device *vdev)
263
{
264
int ret = 0;
265
266
save_d0i3_entry_timestamp(vdev);
267
268
if (!ivpu_hw_is_idle(vdev))
269
ivpu_warn(vdev, "NPU not idle during power down\n");
270
271
if (ivpu_hw_reset(vdev)) {
272
ivpu_err(vdev, "Failed to reset NPU\n");
273
ret = -EIO;
274
}
275
276
if (ivpu_hw_btrs_d0i3_enable(vdev)) {
277
ivpu_err(vdev, "Failed to enter D0I3\n");
278
ret = -EIO;
279
}
280
281
return ret;
282
}
283
284
int ivpu_hw_init(struct ivpu_device *vdev)
285
{
286
ivpu_hw_btrs_info_init(vdev);
287
ivpu_hw_btrs_freq_ratios_init(vdev);
288
priority_bands_init(vdev);
289
memory_ranges_init(vdev);
290
platform_init(vdev);
291
wa_init(vdev);
292
timeouts_init(vdev);
293
atomic_set(&vdev->hw->firewall_irq_counter, 0);
294
295
#ifdef CONFIG_FAULT_INJECTION
296
if (ivpu_fail_hw)
297
setup_fault_attr(&ivpu_hw_failure, ivpu_fail_hw);
298
#endif
299
300
return 0;
301
}
302
303
int ivpu_hw_boot_fw(struct ivpu_device *vdev)
304
{
305
int ret;
306
307
ivpu_hw_ip_snoop_disable(vdev);
308
ivpu_hw_ip_tbu_mmu_enable(vdev);
309
ret = ivpu_hw_ip_soc_cpu_boot(vdev);
310
if (ret)
311
ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
312
313
return ret;
314
}
315
316
void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
317
{
318
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
319
vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
320
return;
321
}
322
323
if (enable)
324
vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH;
325
else
326
vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
327
}
328
329
void ivpu_irq_handlers_init(struct ivpu_device *vdev)
330
{
331
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
332
vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
333
else
334
vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_40xx;
335
336
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
337
vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_mtl;
338
else
339
vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_lnl;
340
}
341
342
void ivpu_hw_irq_enable(struct ivpu_device *vdev)
343
{
344
ivpu_hw_ip_irq_enable(vdev);
345
ivpu_hw_btrs_irq_enable(vdev);
346
}
347
348
void ivpu_hw_irq_disable(struct ivpu_device *vdev)
349
{
350
ivpu_hw_btrs_irq_disable(vdev);
351
ivpu_hw_ip_irq_disable(vdev);
352
}
353
354
irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
355
{
356
struct ivpu_device *vdev = ptr;
357
bool ip_handled, btrs_handled;
358
359
ivpu_hw_btrs_global_int_disable(vdev);
360
361
btrs_handled = ivpu_hw_btrs_irq_handler(vdev, irq);
362
if (!ivpu_hw_is_idle((vdev)) || !btrs_handled)
363
ip_handled = ivpu_hw_ip_irq_handler(vdev, irq);
364
else
365
ip_handled = false;
366
367
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
368
ivpu_hw_btrs_global_int_enable(vdev);
369
370
if (!ip_handled && !btrs_handled)
371
return IRQ_NONE;
372
373
pm_runtime_mark_last_busy(vdev->drm.dev);
374
return IRQ_HANDLED;
375
}
376
377