Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/hisilicon/qm.c
50708 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright (c) 2019 HiSilicon Limited. */
3
#include <asm/page.h>
4
#include <linux/acpi.h>
5
#include <linux/bitmap.h>
6
#include <linux/dma-mapping.h>
7
#include <linux/idr.h>
8
#include <linux/io.h>
9
#include <linux/irqreturn.h>
10
#include <linux/log2.h>
11
#include <linux/pm_runtime.h>
12
#include <linux/seq_file.h>
13
#include <linux/slab.h>
14
#include <linux/uacce.h>
15
#include <linux/uaccess.h>
16
#include <uapi/misc/uacce/hisi_qm.h>
17
#include <linux/hisi_acc_qm.h>
18
#include "qm_common.h"
19
20
/* eq/aeq irq enable */
21
#define QM_VF_AEQ_INT_SOURCE 0x0
22
#define QM_VF_AEQ_INT_MASK 0x4
23
#define QM_VF_EQ_INT_SOURCE 0x8
24
#define QM_VF_EQ_INT_MASK 0xc
25
26
#define QM_IRQ_VECTOR_MASK GENMASK(15, 0)
27
#define QM_IRQ_TYPE_MASK GENMASK(15, 0)
28
#define QM_IRQ_TYPE_SHIFT 16
29
#define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0)
30
31
/* mailbox */
32
#define QM_MB_PING_ALL_VFS 0xffff
33
#define QM_MB_STATUS_MASK GENMASK(12, 9)
34
#define QM_MB_BUSY_MASK BIT(13)
35
#define QM_MB_MAX_WAIT_TIMEOUT USEC_PER_SEC
36
#define QM_MB_MAX_STOP_TIMEOUT (5 * USEC_PER_SEC)
37
38
/* sqc shift */
39
#define QM_SQ_HOP_NUM_SHIFT 0
40
#define QM_SQ_PAGE_SIZE_SHIFT 4
41
#define QM_SQ_BUF_SIZE_SHIFT 8
42
#define QM_SQ_SQE_SIZE_SHIFT 12
43
#define QM_SQ_PRIORITY_SHIFT 0
44
#define QM_SQ_ORDERS_SHIFT 4
45
#define QM_SQ_TYPE_SHIFT 8
46
#define QM_QC_PASID_ENABLE 0x1
47
#define QM_QC_PASID_ENABLE_SHIFT 7
48
49
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
50
#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1)
51
#define QM_SQC_DISABLE_QP (1U << 6)
52
#define QM_XQC_RANDOM_DATA 0xaaaa
53
54
/* cqc shift */
55
#define QM_CQ_HOP_NUM_SHIFT 0
56
#define QM_CQ_PAGE_SIZE_SHIFT 4
57
#define QM_CQ_BUF_SIZE_SHIFT 8
58
#define QM_CQ_CQE_SIZE_SHIFT 12
59
#define QM_CQ_PHASE_SHIFT 0
60
#define QM_CQ_FLAG_SHIFT 1
61
62
#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
63
#define QM_QC_CQE_SIZE 4
64
#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1)
65
66
/* eqc shift */
67
#define QM_EQE_AEQE_SIZE (2UL << 12)
68
#define QM_EQC_PHASE_SHIFT 16
69
70
#define QM_EQE_PHASE(dw0) (((dw0) >> 16) & 0x1)
71
#define QM_EQE_CQN_MASK GENMASK(15, 0)
72
73
#define QM_AEQE_PHASE(dw0) (((dw0) >> 16) & 0x1)
74
#define QM_AEQE_TYPE_SHIFT 17
75
#define QM_AEQE_TYPE_MASK 0xf
76
#define QM_AEQE_CQN_MASK GENMASK(15, 0)
77
#define QM_CQ_OVERFLOW 0
78
#define QM_EQ_OVERFLOW 1
79
#define QM_CQE_ERROR 2
80
81
#define QM_XQ_DEPTH_SHIFT 16
82
#define QM_XQ_DEPTH_MASK GENMASK(15, 0)
83
84
#define QM_DOORBELL_CMD_SQ 0
85
#define QM_DOORBELL_CMD_CQ 1
86
#define QM_DOORBELL_CMD_EQ 2
87
#define QM_DOORBELL_CMD_AEQ 3
88
89
#define QM_DOORBELL_BASE_V1 0x340
90
#define QM_DB_CMD_SHIFT_V1 16
91
#define QM_DB_INDEX_SHIFT_V1 32
92
#define QM_DB_PRIORITY_SHIFT_V1 48
93
#define QM_PAGE_SIZE 0x0034
94
#define QM_QP_DB_INTERVAL 0x10000
95
#define QM_DB_TIMEOUT_CFG 0x100074
96
#define QM_DB_TIMEOUT_SET 0x1fffff
97
98
#define QM_MEM_START_INIT 0x100040
99
#define QM_MEM_INIT_DONE 0x100044
100
#define QM_VFT_CFG_RDY 0x10006c
101
#define QM_VFT_CFG_OP_WR 0x100058
102
#define QM_VFT_CFG_TYPE 0x10005c
103
#define QM_VFT_CFG 0x100060
104
#define QM_VFT_CFG_OP_ENABLE 0x100054
105
#define QM_PM_CTRL 0x100148
106
#define QM_IDLE_DISABLE BIT(9)
107
108
#define QM_SUB_VERSION_ID 0x210
109
110
#define QM_VFT_CFG_DATA_L 0x100064
111
#define QM_VFT_CFG_DATA_H 0x100068
112
#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
113
#define QM_SQC_VFT_SQC_SIZE (5ULL << 12)
114
#define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16)
115
#define QM_SQC_VFT_START_SQN_SHIFT 28
116
#define QM_SQC_VFT_VALID (1ULL << 44)
117
#define QM_SQC_VFT_SQN_SHIFT 45
118
#define QM_CQC_VFT_BUF_SIZE (7ULL << 8)
119
#define QM_CQC_VFT_SQC_SIZE (5ULL << 12)
120
#define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16)
121
#define QM_CQC_VFT_VALID (1ULL << 28)
122
123
#define QM_SQC_VFT_BASE_SHIFT_V2 28
124
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
125
#define QM_SQC_VFT_NUM_SHIFT_V2 45
126
#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
127
#define QM_MAX_QC_TYPE 2
128
129
#define QM_ABNORMAL_INT_SOURCE 0x100000
130
#define QM_ABNORMAL_INT_MASK 0x100004
131
#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
132
#define QM_ABNORMAL_INT_STATUS 0x100008
133
#define QM_ABNORMAL_INT_SET 0x10000c
134
#define QM_ABNORMAL_INF00 0x100010
135
#define QM_FIFO_OVERFLOW_TYPE 0xc0
136
#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
137
#define QM_FIFO_OVERFLOW_VF 0x3f
138
#define QM_FIFO_OVERFLOW_QP_SHIFT 16
139
#define QM_ABNORMAL_INF01 0x100014
140
#define QM_DB_TIMEOUT_TYPE 0xc0
141
#define QM_DB_TIMEOUT_TYPE_SHIFT 6
142
#define QM_DB_TIMEOUT_VF 0x3f
143
#define QM_DB_TIMEOUT_QP_SHIFT 16
144
#define QM_ABNORMAL_INF02 0x100018
145
#define QM_AXI_POISON_ERR BIT(22)
146
#define QM_RAS_CE_ENABLE 0x1000ec
147
#define QM_RAS_FE_ENABLE 0x1000f0
148
#define QM_RAS_NFE_ENABLE 0x1000f4
149
#define QM_RAS_CE_THRESHOLD 0x1000f8
150
#define QM_RAS_CE_TIMES_PER_IRQ 1
151
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
152
#define QM_AXI_RRESP_ERR BIT(0)
153
#define QM_DB_TIMEOUT BIT(10)
154
#define QM_OF_FIFO_OF BIT(11)
155
#define QM_RAS_AXI_ERROR (BIT(0) | BIT(1) | BIT(12))
156
157
#define QM_RESET_WAIT_TIMEOUT 400
158
#define QM_PEH_VENDOR_ID 0x1000d8
159
#define ACC_VENDOR_ID_VALUE 0x5a5a
160
#define QM_PEH_DFX_INFO0 0x1000fc
161
#define QM_PEH_DFX_INFO1 0x100100
162
#define QM_PEH_DFX_MASK (BIT(0) | BIT(2))
163
#define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16)
164
#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
165
#define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
166
#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
167
#define ACC_MASTER_TRANS_RETURN_RW 3
168
#define ACC_MASTER_TRANS_RETURN 0x300150
169
#define ACC_MASTER_GLOBAL_CTRL 0x300000
170
#define ACC_AM_CFG_PORT_WR_EN 0x30001c
171
#define ACC_AM_ROB_ECC_INT_STS 0x300104
172
#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
173
#define QM_MSI_CAP_ENABLE BIT(16)
174
175
/* interfunction communication */
176
#define QM_IFC_READY_STATUS 0x100128
177
#define QM_IFC_INT_SET_P 0x100130
178
#define QM_IFC_INT_CFG 0x100134
179
#define QM_IFC_INT_SOURCE_P 0x100138
180
#define QM_IFC_INT_SOURCE_V 0x0020
181
#define QM_IFC_INT_MASK 0x0024
182
#define QM_IFC_INT_STATUS 0x0028
183
#define QM_IFC_INT_SET_V 0x002C
184
#define QM_PF2VF_PF_W 0x104700
185
#define QM_VF2PF_PF_R 0x104800
186
#define QM_VF2PF_VF_W 0x320
187
#define QM_PF2VF_VF_R 0x380
188
#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
189
#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
190
#define QM_IFC_INT_SOURCE_MASK BIT(0)
191
#define QM_IFC_INT_DISABLE BIT(0)
192
#define QM_IFC_INT_STATUS_MASK BIT(0)
193
#define QM_IFC_INT_SET_MASK BIT(0)
194
#define QM_WAIT_DST_ACK 1000
195
#define QM_MAX_PF_WAIT_COUNT 20
196
#define QM_MAX_VF_WAIT_COUNT 40
197
#define QM_VF_RESET_WAIT_US 20000
198
#define QM_VF_RESET_WAIT_CNT 3000
199
#define QM_VF2PF_REG_SIZE 4
200
#define QM_IFC_CMD_MASK GENMASK(31, 0)
201
#define QM_IFC_DATA_SHIFT 32
202
#define QM_VF_RESET_WAIT_TIMEOUT_US \
203
(QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
204
205
#define POLL_PERIOD 10
206
#define POLL_TIMEOUT 1000
207
#define WAIT_PERIOD_US_MAX 200
208
#define WAIT_PERIOD_US_MIN 100
209
#define MAX_WAIT_COUNTS 1000
210
#define QM_CACHE_WB_START 0x204
211
#define QM_CACHE_WB_DONE 0x208
212
#define QM_FUNC_CAPS_REG 0x3100
213
#define QM_CAPBILITY_VERSION GENMASK(7, 0)
214
215
#define PCI_BAR_2 2
216
#define PCI_BAR_4 4
217
#define QMC_ALIGN(sz) ALIGN(sz, 32)
218
219
#define QM_DBG_READ_LEN 256
220
#define QM_PCI_COMMAND_INVALID ~0
221
#define QM_RESET_STOP_TX_OFFSET 1
222
#define QM_RESET_STOP_RX_OFFSET 2
223
224
#define WAIT_PERIOD 20
225
#define REMOVE_WAIT_DELAY 10
226
227
#define QM_QOS_PARAM_NUM 2
228
#define QM_QOS_MAX_VAL 1000
229
#define QM_QOS_RATE 100
230
#define QM_QOS_EXPAND_RATE 1000
231
#define QM_SHAPER_CIR_B_MASK GENMASK(7, 0)
232
#define QM_SHAPER_CIR_U_MASK GENMASK(10, 8)
233
#define QM_SHAPER_CIR_S_MASK GENMASK(14, 11)
234
#define QM_SHAPER_FACTOR_CIR_U_SHIFT 8
235
#define QM_SHAPER_FACTOR_CIR_S_SHIFT 11
236
#define QM_SHAPER_FACTOR_CBS_B_SHIFT 15
237
#define QM_SHAPER_FACTOR_CBS_S_SHIFT 19
238
#define QM_SHAPER_CBS_B 1
239
#define QM_SHAPER_VFT_OFFSET 6
240
#define QM_QOS_MIN_ERROR_RATE 5
241
#define QM_SHAPER_MIN_CBS_S 8
242
#define QM_QOS_TICK 0x300U
243
#define QM_QOS_DIVISOR_CLK 0x1f40U
244
#define QM_QOS_MAX_CIR_B 200
245
#define QM_QOS_MIN_CIR_B 100
246
#define QM_QOS_MAX_CIR_U 6
247
#define QM_AUTOSUSPEND_DELAY 3000
248
249
/* abnormal status value for stopping queue */
250
#define QM_STOP_QUEUE_FAIL 1
251
#define QM_DUMP_SQC_FAIL 3
252
#define QM_DUMP_CQC_FAIL 4
253
#define QM_FINISH_WAIT 5
254
255
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
256
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
257
((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
258
((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
259
((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
260
261
#define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \
262
((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
263
264
#define QM_MK_SQC_W13(priority, orders, alg_type) \
265
(((priority) << QM_SQ_PRIORITY_SHIFT) | \
266
((orders) << QM_SQ_ORDERS_SHIFT) | \
267
(((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
268
269
#define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
270
(((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
271
((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
272
((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
273
((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
274
275
#define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \
276
((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
277
278
enum vft_type {
279
SQC_VFT = 0,
280
CQC_VFT,
281
SHAPER_VFT,
282
};
283
284
enum qm_alg_type {
285
ALG_TYPE_0,
286
ALG_TYPE_1,
287
};
288
289
enum qm_ifc_cmd {
290
QM_PF_FLR_PREPARE = 0x01,
291
QM_PF_SRST_PREPARE,
292
QM_PF_RESET_DONE,
293
QM_VF_PREPARE_DONE,
294
QM_VF_PREPARE_FAIL,
295
QM_VF_START_DONE,
296
QM_VF_START_FAIL,
297
QM_PF_SET_QOS,
298
QM_VF_GET_QOS,
299
};
300
301
enum qm_basic_type {
302
QM_TOTAL_QP_NUM_CAP = 0x0,
303
QM_FUNC_MAX_QP_CAP,
304
QM_XEQ_DEPTH_CAP,
305
QM_QP_DEPTH_CAP,
306
QM_EQ_IRQ_TYPE_CAP,
307
QM_AEQ_IRQ_TYPE_CAP,
308
QM_ABN_IRQ_TYPE_CAP,
309
QM_PF2VF_IRQ_TYPE_CAP,
310
QM_PF_IRQ_NUM_CAP,
311
QM_VF_IRQ_NUM_CAP,
312
};
313
314
enum qm_cap_table_type {
315
QM_CAP_VF = 0x0,
316
QM_AEQE_NUM,
317
QM_SCQE_NUM,
318
QM_EQ_IRQ,
319
QM_AEQ_IRQ,
320
QM_ABNORMAL_IRQ,
321
QM_MB_IRQ,
322
MAX_IRQ_NUM,
323
EXT_BAR_INDEX,
324
};
325
326
static const struct hisi_qm_cap_query_info qm_cap_query_info[] = {
327
{QM_CAP_VF, "QM_CAP_VF ", 0x3100, 0x0, 0x0, 0x6F01},
328
{QM_AEQE_NUM, "QM_AEQE_NUM ", 0x3104, 0x800, 0x4000800, 0x4000800},
329
{QM_SCQE_NUM, "QM_SCQE_NUM ",
330
0x3108, 0x4000400, 0x4000400, 0x4000400},
331
{QM_EQ_IRQ, "QM_EQ_IRQ ", 0x310c, 0x10000, 0x10000, 0x10000},
332
{QM_AEQ_IRQ, "QM_AEQ_IRQ ", 0x3110, 0x0, 0x10001, 0x10001},
333
{QM_ABNORMAL_IRQ, "QM_ABNORMAL_IRQ ", 0x3114, 0x0, 0x10003, 0x10003},
334
{QM_MB_IRQ, "QM_MB_IRQ ", 0x3118, 0x0, 0x0, 0x10002},
335
{MAX_IRQ_NUM, "MAX_IRQ_NUM ", 0x311c, 0x10001, 0x40002, 0x40003},
336
{EXT_BAR_INDEX, "EXT_BAR_INDEX ", 0x3120, 0x0, 0x0, 0x14},
337
};
338
339
static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
340
{QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
341
{QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
342
{QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1},
343
{QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1},
344
{QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
345
{QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
346
{QM_SUPPORT_DAE, 0x3100, 0, BIT(15), 0x0, 0x0, 0x0},
347
};
348
349
static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
350
{QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1},
351
};
352
353
static const struct hisi_qm_cap_info qm_cap_info_vf[] = {
354
{QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0},
355
};
356
357
static const struct hisi_qm_cap_info qm_basic_info[] = {
358
{QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400},
359
{QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400},
360
{QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800},
361
{QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
362
{QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000},
363
{QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001},
364
{QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003},
365
{QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002},
366
{QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4},
367
{QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
368
};
369
370
struct qm_mailbox {
371
__le16 w0;
372
__le16 queue_num;
373
__le32 base_l;
374
__le32 base_h;
375
__le32 rsvd;
376
};
377
378
struct qm_doorbell {
379
__le16 queue_num;
380
__le16 cmd;
381
__le16 index;
382
__le16 priority;
383
};
384
385
struct hisi_qm_resource {
386
struct hisi_qm *qm;
387
int distance;
388
struct list_head list;
389
};
390
391
/**
392
* struct qm_hw_err - Structure describing the device errors
393
* @list: hardware error list
394
* @timestamp: timestamp when the error occurred
395
*/
396
struct qm_hw_err {
397
struct list_head list;
398
unsigned long long timestamp;
399
};
400
401
struct hisi_qm_hw_ops {
402
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
403
void (*qm_db)(struct hisi_qm *qm, u16 qn,
404
u8 cmd, u16 index, u8 priority);
405
int (*debug_init)(struct hisi_qm *qm);
406
void (*hw_error_init)(struct hisi_qm *qm);
407
void (*hw_error_uninit)(struct hisi_qm *qm);
408
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
409
int (*set_msi)(struct hisi_qm *qm, bool set);
410
411
/* (u64)msg = (u32)data << 32 | (enum qm_ifc_cmd)cmd */
412
int (*set_ifc_begin)(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num);
413
void (*set_ifc_end)(struct hisi_qm *qm);
414
int (*get_ifc)(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num);
415
};
416
417
struct hisi_qm_hw_error {
418
u32 int_msk;
419
const char *msg;
420
};
421
422
static const struct hisi_qm_hw_error qm_hw_error[] = {
423
{ .int_msk = BIT(0), .msg = "qm_axi_rresp" },
424
{ .int_msk = BIT(1), .msg = "qm_axi_bresp" },
425
{ .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
426
{ .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
427
{ .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
428
{ .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
429
{ .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
430
{ .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
431
{ .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
432
{ .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
433
{ .int_msk = BIT(10), .msg = "qm_db_timeout" },
434
{ .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
435
{ .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
436
{ .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
437
{ .int_msk = BIT(14), .msg = "qm_flr_timeout" },
438
};
439
440
static const char * const qm_db_timeout[] = {
441
"sq", "cq", "eq", "aeq",
442
};
443
444
static const char * const qm_fifo_overflow[] = {
445
"cq", "eq", "aeq",
446
};
447
448
struct qm_typical_qos_table {
449
u32 start;
450
u32 end;
451
u32 val;
452
};
453
454
/* the qos step is 100 */
455
static struct qm_typical_qos_table shaper_cir_s[] = {
456
{100, 100, 4},
457
{200, 200, 3},
458
{300, 500, 2},
459
{600, 1000, 1},
460
{1100, 100000, 0},
461
};
462
463
static struct qm_typical_qos_table shaper_cbs_s[] = {
464
{100, 200, 9},
465
{300, 500, 11},
466
{600, 1000, 12},
467
{1100, 10000, 16},
468
{10100, 25000, 17},
469
{25100, 50000, 18},
470
{50100, 100000, 19}
471
};
472
473
static void qm_irqs_unregister(struct hisi_qm *qm);
474
static int qm_reset_device(struct hisi_qm *qm);
475
int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp,
476
unsigned int device)
477
{
478
struct pci_dev *pdev;
479
u32 n, q_num;
480
int ret;
481
482
if (!val)
483
return -EINVAL;
484
485
pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
486
if (!pdev) {
487
q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
488
pr_info("No device found currently, suppose queue number is %u\n",
489
q_num);
490
} else {
491
if (pdev->revision == QM_HW_V1)
492
q_num = QM_QNUM_V1;
493
else
494
q_num = QM_QNUM_V2;
495
496
pci_dev_put(pdev);
497
}
498
499
ret = kstrtou32(val, 10, &n);
500
if (ret || n < QM_MIN_QNUM || n > q_num)
501
return -EINVAL;
502
503
return param_set_int(val, kp);
504
}
505
EXPORT_SYMBOL_GPL(hisi_qm_q_num_set);
506
507
static u32 qm_get_hw_error_status(struct hisi_qm *qm)
508
{
509
return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
510
}
511
512
static u32 qm_get_dev_err_status(struct hisi_qm *qm)
513
{
514
return qm->err_ini->get_dev_hw_err_status(qm);
515
}
516
517
/* Check if the error causes the master ooo block */
518
static bool qm_check_dev_error(struct hisi_qm *qm)
519
{
520
struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
521
u32 err_status;
522
523
if (pf_qm->fun_type == QM_HW_VF)
524
return false;
525
526
err_status = qm_get_hw_error_status(pf_qm);
527
if (err_status & pf_qm->err_info.qm_err.shutdown_mask)
528
return true;
529
530
if (pf_qm->err_ini->dev_is_abnormal)
531
return pf_qm->err_ini->dev_is_abnormal(pf_qm);
532
533
return false;
534
}
535
536
static int qm_wait_reset_finish(struct hisi_qm *qm)
537
{
538
int delay = 0;
539
540
/* All reset requests need to be queued for processing */
541
while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
542
msleep(++delay);
543
if (delay > QM_RESET_WAIT_TIMEOUT)
544
return -EBUSY;
545
}
546
547
return 0;
548
}
549
550
static int qm_reset_prepare_ready(struct hisi_qm *qm)
551
{
552
struct pci_dev *pdev = qm->pdev;
553
struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
554
555
/*
556
* PF and VF on host doesnot support resetting at the
557
* same time on Kunpeng920.
558
*/
559
if (qm->ver < QM_HW_V3)
560
return qm_wait_reset_finish(pf_qm);
561
562
return qm_wait_reset_finish(qm);
563
}
564
565
static void qm_reset_bit_clear(struct hisi_qm *qm)
566
{
567
struct pci_dev *pdev = qm->pdev;
568
struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
569
570
if (qm->ver < QM_HW_V3)
571
clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
572
573
clear_bit(QM_RESETTING, &qm->misc_ctl);
574
}
575
576
static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
577
u64 base, u16 queue, bool op)
578
{
579
mailbox->w0 = cpu_to_le16((cmd) |
580
((op) ? 0x1 << QM_MB_OP_SHIFT : 0) |
581
(0x1 << QM_MB_BUSY_SHIFT));
582
mailbox->queue_num = cpu_to_le16(queue);
583
mailbox->base_l = cpu_to_le32(lower_32_bits(base));
584
mailbox->base_h = cpu_to_le32(upper_32_bits(base));
585
mailbox->rsvd = 0;
586
}
587
588
/*
589
* The mailbox is 128 bits and requires a single read/write operation.
590
* Since there is no general 128-bit IO memory access API in the current
591
* ARM64 architecture, this needs to be implemented in the driver.
592
*/
593
static struct qm_mailbox qm_mb_read(struct hisi_qm *qm)
594
{
595
struct qm_mailbox mailbox = {0};
596
597
#if IS_ENABLED(CONFIG_ARM64)
598
const void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
599
unsigned long tmp0, tmp1;
600
601
asm volatile("ldp %0, %1, %3\n"
602
"stp %0, %1, %2\n"
603
: "=&r" (tmp0),
604
"=&r" (tmp1),
605
"+Q" (mailbox)
606
: "Q" (*((char __iomem *)fun_base))
607
: "memory");
608
#endif
609
610
return mailbox;
611
}
612
613
/* 128 bit should be written to hardware at one time to trigger a mailbox */
614
static void qm_mb_write(struct hisi_qm *qm, const void *src)
615
{
616
#if IS_ENABLED(CONFIG_ARM64)
617
void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
618
unsigned long tmp0, tmp1;
619
/*
620
* The dmb oshst instruction ensures that the data in the
621
* mailbox is written before it is sent to the hardware.
622
*/
623
asm volatile("ldp %0, %1, %3\n"
624
"dmb oshst\n"
625
"stp %0, %1, %2\n"
626
: "=&r" (tmp0),
627
"=&r" (tmp1),
628
"+Q" (*((char __iomem *)fun_base))
629
: "Q" (*((char *)src))
630
: "memory");
631
#endif
632
}
633
634
int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
635
{
636
struct qm_mailbox mailbox = {0};
637
int ret;
638
639
ret = read_poll_timeout(qm_mb_read, mailbox,
640
!(le16_to_cpu(mailbox.w0) & QM_MB_BUSY_MASK),
641
POLL_PERIOD, POLL_TIMEOUT,
642
true, qm);
643
if (ret)
644
dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
645
646
return ret;
647
}
648
EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready);
649
650
static int qm_wait_mb_finish(struct hisi_qm *qm, struct qm_mailbox *mailbox, u32 wait_timeout)
651
{
652
struct device *dev = &qm->pdev->dev;
653
int ret;
654
655
ret = read_poll_timeout(qm_mb_read, *mailbox,
656
!(le16_to_cpu(mailbox->w0) & QM_MB_BUSY_MASK),
657
POLL_PERIOD, wait_timeout,
658
true, qm);
659
if (ret) {
660
dev_err(dev, "QM mailbox operation timeout!\n");
661
return ret;
662
}
663
664
if (le16_to_cpu(mailbox->w0) & QM_MB_STATUS_MASK) {
665
dev_err(dev, "QM mailbox operation failed!\n");
666
return -EIO;
667
}
668
669
return 0;
670
}
671
672
static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox, u32 wait_timeout)
673
{
674
int ret;
675
676
ret = hisi_qm_wait_mb_ready(qm);
677
if (ret)
678
goto mb_err_cnt_increase;
679
680
qm_mb_write(qm, mailbox);
681
682
ret = qm_wait_mb_finish(qm, mailbox, wait_timeout);
683
if (ret)
684
goto mb_err_cnt_increase;
685
686
return 0;
687
688
mb_err_cnt_increase:
689
atomic64_inc(&qm->debug.dfx.mb_err_cnt);
690
return ret;
691
}
692
693
int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
694
bool op)
695
{
696
struct qm_mailbox mailbox;
697
u32 wait_timeout;
698
int ret;
699
700
if (cmd == QM_MB_CMD_STOP_QP || cmd == QM_MB_CMD_FLUSH_QM)
701
wait_timeout = QM_MB_MAX_STOP_TIMEOUT;
702
else
703
wait_timeout = QM_MB_MAX_WAIT_TIMEOUT;
704
705
/* No need to judge if master OOO is blocked. */
706
if (qm_check_dev_error(qm)) {
707
dev_err(&qm->pdev->dev, "QM mailbox operation failed since qm is stop!\n");
708
return -EIO;
709
}
710
711
qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
712
713
mutex_lock(&qm->mailbox_lock);
714
ret = qm_mb_nolock(qm, &mailbox, wait_timeout);
715
mutex_unlock(&qm->mailbox_lock);
716
717
return ret;
718
}
719
EXPORT_SYMBOL_GPL(hisi_qm_mb);
720
721
int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue)
722
{
723
struct qm_mailbox mailbox;
724
int ret;
725
726
qm_mb_pre_init(&mailbox, cmd, 0, queue, 1);
727
mutex_lock(&qm->mailbox_lock);
728
ret = qm_mb_nolock(qm, &mailbox, QM_MB_MAX_WAIT_TIMEOUT);
729
mutex_unlock(&qm->mailbox_lock);
730
if (ret)
731
return ret;
732
733
*base = le32_to_cpu(mailbox.base_l) |
734
((u64)le32_to_cpu(mailbox.base_h) << 32);
735
736
return 0;
737
}
738
EXPORT_SYMBOL_GPL(hisi_qm_mb_read);
739
740
/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
741
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
742
{
743
struct qm_mailbox mailbox;
744
dma_addr_t xqc_dma;
745
void *tmp_xqc;
746
size_t size;
747
int ret;
748
749
switch (cmd) {
750
case QM_MB_CMD_SQC:
751
size = sizeof(struct qm_sqc);
752
tmp_xqc = qm->xqc_buf.sqc;
753
xqc_dma = qm->xqc_buf.sqc_dma;
754
break;
755
case QM_MB_CMD_CQC:
756
size = sizeof(struct qm_cqc);
757
tmp_xqc = qm->xqc_buf.cqc;
758
xqc_dma = qm->xqc_buf.cqc_dma;
759
break;
760
case QM_MB_CMD_EQC:
761
size = sizeof(struct qm_eqc);
762
tmp_xqc = qm->xqc_buf.eqc;
763
xqc_dma = qm->xqc_buf.eqc_dma;
764
break;
765
case QM_MB_CMD_AEQC:
766
size = sizeof(struct qm_aeqc);
767
tmp_xqc = qm->xqc_buf.aeqc;
768
xqc_dma = qm->xqc_buf.aeqc_dma;
769
break;
770
default:
771
dev_err(&qm->pdev->dev, "unknown mailbox cmd %u\n", cmd);
772
return -EINVAL;
773
}
774
775
/* Setting xqc will fail if master OOO is blocked. */
776
if (qm_check_dev_error(qm)) {
777
dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n");
778
return -EIO;
779
}
780
781
mutex_lock(&qm->mailbox_lock);
782
if (!op)
783
memcpy(tmp_xqc, xqc, size);
784
785
qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op);
786
ret = qm_mb_nolock(qm, &mailbox, QM_MB_MAX_WAIT_TIMEOUT);
787
if (!ret && op)
788
memcpy(xqc, tmp_xqc, size);
789
790
mutex_unlock(&qm->mailbox_lock);
791
792
return ret;
793
}
794
795
static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
796
{
797
u64 doorbell;
798
799
doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
800
((u64)index << QM_DB_INDEX_SHIFT_V1) |
801
((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
802
803
writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
804
}
805
806
static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
807
{
808
void __iomem *io_base = qm->io_base;
809
u16 randata = 0;
810
u64 doorbell;
811
812
if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
813
io_base = qm->db_io_base + (u64)qn * qm->db_interval +
814
QM_DOORBELL_SQ_CQ_BASE_V2;
815
else
816
io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
817
818
doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
819
((u64)randata << QM_DB_RAND_SHIFT_V2) |
820
((u64)index << QM_DB_INDEX_SHIFT_V2) |
821
((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
822
823
writeq(doorbell, io_base);
824
}
825
826
static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
827
{
828
dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
829
qn, cmd, index);
830
831
qm->ops->qm_db(qm, qn, cmd, index, priority);
832
}
833
834
static void qm_disable_clock_gate(struct hisi_qm *qm)
835
{
836
u32 val;
837
838
/* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */
839
if (qm->ver < QM_HW_V3)
840
return;
841
842
val = readl(qm->io_base + QM_PM_CTRL);
843
val |= QM_IDLE_DISABLE;
844
writel(val, qm->io_base + QM_PM_CTRL);
845
}
846
847
static int qm_dev_mem_reset(struct hisi_qm *qm)
848
{
849
u32 val;
850
851
writel(0x1, qm->io_base + QM_MEM_START_INIT);
852
return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
853
val & BIT(0), POLL_PERIOD,
854
POLL_TIMEOUT);
855
}
856
857
/**
858
* hisi_qm_get_hw_info() - Get device information.
859
* @qm: The qm which want to get information.
860
* @info_table: Array for storing device information.
861
* @index: Index in info_table.
862
* @is_read: Whether read from reg, 0: not support read from reg.
863
*
864
* This function returns device information the caller needs.
865
*/
866
u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
867
const struct hisi_qm_cap_info *info_table,
868
u32 index, bool is_read)
869
{
870
u32 val;
871
872
switch (qm->ver) {
873
case QM_HW_V1:
874
return info_table[index].v1_val;
875
case QM_HW_V2:
876
return info_table[index].v2_val;
877
default:
878
if (!is_read)
879
return info_table[index].v3_val;
880
881
val = readl(qm->io_base + info_table[index].offset);
882
return (val >> info_table[index].shift) & info_table[index].mask;
883
}
884
}
885
EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
886
887
u32 hisi_qm_get_cap_value(struct hisi_qm *qm,
888
const struct hisi_qm_cap_query_info *info_table,
889
u32 index, bool is_read)
890
{
891
u32 val;
892
893
switch (qm->ver) {
894
case QM_HW_V1:
895
return info_table[index].v1_val;
896
case QM_HW_V2:
897
return info_table[index].v2_val;
898
default:
899
if (!is_read)
900
return info_table[index].v3_val;
901
902
val = readl(qm->io_base + info_table[index].offset);
903
return val;
904
}
905
}
906
EXPORT_SYMBOL_GPL(hisi_qm_get_cap_value);
907
908
static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
909
u16 *high_bits, enum qm_basic_type type)
910
{
911
u32 depth;
912
913
depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
914
*low_bits = depth & QM_XQ_DEPTH_MASK;
915
*high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
916
}
917
918
int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
919
u32 dev_algs_size)
920
{
921
struct device *dev = &qm->pdev->dev;
922
char *algs, *ptr;
923
int i;
924
925
if (!qm->uacce)
926
return 0;
927
928
if (dev_algs_size >= QM_DEV_ALG_MAX_LEN) {
929
dev_err(dev, "algs size %u is equal or larger than %d.\n",
930
dev_algs_size, QM_DEV_ALG_MAX_LEN);
931
return -EINVAL;
932
}
933
934
algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN, GFP_KERNEL);
935
if (!algs)
936
return -ENOMEM;
937
938
for (i = 0; i < dev_algs_size; i++)
939
if (alg_msk & dev_algs[i].alg_msk)
940
strcat(algs, dev_algs[i].alg);
941
942
ptr = strrchr(algs, '\n');
943
if (ptr)
944
*ptr = '\0';
945
946
qm->uacce->algs = algs;
947
948
return 0;
949
}
950
EXPORT_SYMBOL_GPL(hisi_qm_set_algs);
951
952
static u32 qm_get_irq_num(struct hisi_qm *qm)
953
{
954
if (qm->fun_type == QM_HW_PF)
955
return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver);
956
957
return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver);
958
}
959
960
static int qm_pm_get_sync(struct hisi_qm *qm)
961
{
962
struct device *dev = &qm->pdev->dev;
963
int ret;
964
965
if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
966
return 0;
967
968
ret = pm_runtime_resume_and_get(dev);
969
if (ret < 0) {
970
dev_err(dev, "failed to get_sync(%d).\n", ret);
971
return ret;
972
}
973
974
return 0;
975
}
976
977
static void qm_pm_put_sync(struct hisi_qm *qm)
978
{
979
struct device *dev = &qm->pdev->dev;
980
981
if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
982
return;
983
984
pm_runtime_put_autosuspend(dev);
985
}
986
987
static void qm_cq_head_update(struct hisi_qp *qp)
988
{
989
if (qp->qp_status.cq_head == qp->cq_depth - 1) {
990
qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
991
qp->qp_status.cq_head = 0;
992
} else {
993
qp->qp_status.cq_head++;
994
}
995
}
996
997
static void qm_poll_req_cb(struct hisi_qp *qp)
998
{
999
struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
1000
struct hisi_qm *qm = qp->qm;
1001
1002
while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
1003
dma_rmb();
1004
qp->req_cb(qp, qp->sqe + qm->sqe_size *
1005
le16_to_cpu(cqe->sq_head));
1006
qm_cq_head_update(qp);
1007
cqe = qp->cqe + qp->qp_status.cq_head;
1008
qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
1009
qp->qp_status.cq_head, 0);
1010
atomic_dec(&qp->qp_status.used);
1011
1012
cond_resched();
1013
}
1014
1015
/* set c_flag */
1016
qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
1017
}
1018
1019
static void qm_work_process(struct work_struct *work)
1020
{
1021
struct hisi_qm_poll_data *poll_data =
1022
container_of(work, struct hisi_qm_poll_data, work);
1023
struct hisi_qm *qm = poll_data->qm;
1024
u16 eqe_num = poll_data->eqe_num;
1025
struct hisi_qp *qp;
1026
int i;
1027
1028
for (i = eqe_num - 1; i >= 0; i--) {
1029
qp = &qm->qp_array[poll_data->qp_finish_id[i]];
1030
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
1031
continue;
1032
1033
if (qp->event_cb) {
1034
qp->event_cb(qp);
1035
continue;
1036
}
1037
1038
if (likely(qp->req_cb))
1039
qm_poll_req_cb(qp);
1040
}
1041
}
1042
1043
static void qm_get_complete_eqe_num(struct hisi_qm *qm)
1044
{
1045
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
1046
struct hisi_qm_poll_data *poll_data = NULL;
1047
u32 dw0 = le32_to_cpu(eqe->dw0);
1048
u16 eq_depth = qm->eq_depth;
1049
u16 cqn, eqe_num = 0;
1050
1051
if (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) {
1052
atomic64_inc(&qm->debug.dfx.err_irq_cnt);
1053
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
1054
return;
1055
}
1056
1057
cqn = dw0 & QM_EQE_CQN_MASK;
1058
if (unlikely(cqn >= qm->qp_num))
1059
return;
1060
poll_data = &qm->poll_data[cqn];
1061
1062
do {
1063
poll_data->qp_finish_id[eqe_num] = dw0 & QM_EQE_CQN_MASK;
1064
eqe_num++;
1065
1066
if (qm->status.eq_head == eq_depth - 1) {
1067
qm->status.eqc_phase = !qm->status.eqc_phase;
1068
eqe = qm->eqe;
1069
qm->status.eq_head = 0;
1070
} else {
1071
eqe++;
1072
qm->status.eq_head++;
1073
}
1074
1075
dw0 = le32_to_cpu(eqe->dw0);
1076
if (QM_EQE_PHASE(dw0) != qm->status.eqc_phase)
1077
break;
1078
} while (eqe_num < (eq_depth >> 1) - 1);
1079
1080
poll_data->eqe_num = eqe_num;
1081
queue_work(qm->wq, &poll_data->work);
1082
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
1083
}
1084
1085
static irqreturn_t qm_eq_irq(int irq, void *data)
1086
{
1087
struct hisi_qm *qm = data;
1088
1089
/* Get qp id of completed tasks and re-enable the interrupt */
1090
qm_get_complete_eqe_num(qm);
1091
1092
return IRQ_HANDLED;
1093
}
1094
1095
static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
1096
{
1097
struct hisi_qm *qm = data;
1098
u32 val;
1099
1100
val = readl(qm->io_base + QM_IFC_INT_STATUS);
1101
val &= QM_IFC_INT_STATUS_MASK;
1102
if (!val)
1103
return IRQ_NONE;
1104
1105
if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) {
1106
dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n");
1107
return IRQ_HANDLED;
1108
}
1109
1110
schedule_work(&qm->cmd_process);
1111
1112
return IRQ_HANDLED;
1113
}
1114
1115
static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
1116
{
1117
u32 *addr;
1118
1119
if (qp->is_in_kernel)
1120
return;
1121
1122
addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset;
1123
*addr = 1;
1124
1125
/* make sure setup is completed */
1126
smp_wmb();
1127
}
1128
1129
static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
1130
{
1131
struct hisi_qp *qp = &qm->qp_array[qp_id];
1132
1133
qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET);
1134
hisi_qm_stop_qp(qp);
1135
qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET);
1136
}
1137
1138
static void qm_reset_function(struct hisi_qm *qm)
1139
{
1140
struct device *dev = &qm->pdev->dev;
1141
int ret;
1142
1143
if (qm_check_dev_error(qm))
1144
return;
1145
1146
ret = qm_reset_prepare_ready(qm);
1147
if (ret) {
1148
dev_err(dev, "reset function not ready\n");
1149
return;
1150
}
1151
1152
ret = hisi_qm_stop(qm, QM_DOWN);
1153
if (ret) {
1154
dev_err(dev, "failed to stop qm when reset function\n");
1155
goto clear_bit;
1156
}
1157
1158
ret = hisi_qm_start(qm);
1159
if (ret)
1160
dev_err(dev, "failed to start qm when reset function\n");
1161
1162
clear_bit:
1163
qm_reset_bit_clear(qm);
1164
}
1165
1166
static irqreturn_t qm_aeq_thread(int irq, void *data)
1167
{
1168
struct hisi_qm *qm = data;
1169
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
1170
u32 dw0 = le32_to_cpu(aeqe->dw0);
1171
u16 aeq_depth = qm->aeq_depth;
1172
u32 type, qp_id;
1173
1174
atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
1175
1176
while (QM_AEQE_PHASE(dw0) == qm->status.aeqc_phase) {
1177
type = (dw0 >> QM_AEQE_TYPE_SHIFT) & QM_AEQE_TYPE_MASK;
1178
qp_id = dw0 & QM_AEQE_CQN_MASK;
1179
1180
switch (type) {
1181
case QM_EQ_OVERFLOW:
1182
dev_err(&qm->pdev->dev, "eq overflow, reset function\n");
1183
qm_reset_function(qm);
1184
return IRQ_HANDLED;
1185
case QM_CQ_OVERFLOW:
1186
dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n",
1187
qp_id);
1188
fallthrough;
1189
case QM_CQE_ERROR:
1190
qm_disable_qp(qm, qp_id);
1191
break;
1192
default:
1193
dev_err(&qm->pdev->dev, "unknown error type %u\n",
1194
type);
1195
break;
1196
}
1197
1198
if (qm->status.aeq_head == aeq_depth - 1) {
1199
qm->status.aeqc_phase = !qm->status.aeqc_phase;
1200
aeqe = qm->aeqe;
1201
qm->status.aeq_head = 0;
1202
} else {
1203
aeqe++;
1204
qm->status.aeq_head++;
1205
}
1206
dw0 = le32_to_cpu(aeqe->dw0);
1207
}
1208
1209
qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
1210
1211
return IRQ_HANDLED;
1212
}
1213
1214
static void qm_init_qp_status(struct hisi_qp *qp)
1215
{
1216
struct hisi_qp_status *qp_status = &qp->qp_status;
1217
1218
qp_status->sq_tail = 0;
1219
qp_status->cq_head = 0;
1220
qp_status->cqc_phase = true;
1221
atomic_set(&qp_status->used, 0);
1222
}
1223
1224
static void qm_init_prefetch(struct hisi_qm *qm)
1225
{
1226
struct device *dev = &qm->pdev->dev;
1227
u32 page_type = 0x0;
1228
1229
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
1230
return;
1231
1232
switch (PAGE_SIZE) {
1233
case SZ_4K:
1234
page_type = 0x0;
1235
break;
1236
case SZ_16K:
1237
page_type = 0x1;
1238
break;
1239
case SZ_64K:
1240
page_type = 0x2;
1241
break;
1242
default:
1243
dev_err(dev, "system page size is not support: %lu, default set to 4KB",
1244
PAGE_SIZE);
1245
}
1246
1247
writel(page_type, qm->io_base + QM_PAGE_SIZE);
1248
}
1249
1250
/*
1251
* acc_shaper_para_calc() Get the IR value by the qos formula, the return value
1252
* is the expected qos calculated.
1253
* the formula:
1254
* IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps
1255
*
1256
* IR_b * (2 ^ IR_u) * 8000
1257
* IR(Mbps) = -------------------------
1258
* Tick * (2 ^ IR_s)
1259
*/
1260
static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
1261
{
1262
return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) /
1263
(QM_QOS_TICK * (1 << cir_s));
1264
}
1265
1266
static u32 acc_shaper_calc_cbs_s(u32 ir)
1267
{
1268
int table_size = ARRAY_SIZE(shaper_cbs_s);
1269
int i;
1270
1271
for (i = 0; i < table_size; i++) {
1272
if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end)
1273
return shaper_cbs_s[i].val;
1274
}
1275
1276
return QM_SHAPER_MIN_CBS_S;
1277
}
1278
1279
static u32 acc_shaper_calc_cir_s(u32 ir)
1280
{
1281
int table_size = ARRAY_SIZE(shaper_cir_s);
1282
int i;
1283
1284
for (i = 0; i < table_size; i++) {
1285
if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end)
1286
return shaper_cir_s[i].val;
1287
}
1288
1289
return 0;
1290
}
1291
1292
static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
1293
{
1294
u32 cir_b, cir_u, cir_s, ir_calc;
1295
u32 error_rate;
1296
1297
factor->cbs_s = acc_shaper_calc_cbs_s(ir);
1298
cir_s = acc_shaper_calc_cir_s(ir);
1299
1300
for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
1301
for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
1302
ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
1303
1304
error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
1305
if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
1306
factor->cir_b = cir_b;
1307
factor->cir_u = cir_u;
1308
factor->cir_s = cir_s;
1309
return 0;
1310
}
1311
}
1312
}
1313
1314
return -EINVAL;
1315
}
1316
1317
static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
1318
u32 number, struct qm_shaper_factor *factor)
1319
{
1320
u64 tmp = 0;
1321
1322
if (number > 0) {
1323
switch (type) {
1324
case SQC_VFT:
1325
if (qm->ver == QM_HW_V1) {
1326
tmp = QM_SQC_VFT_BUF_SIZE |
1327
QM_SQC_VFT_SQC_SIZE |
1328
QM_SQC_VFT_INDEX_NUMBER |
1329
QM_SQC_VFT_VALID |
1330
(u64)base << QM_SQC_VFT_START_SQN_SHIFT;
1331
} else {
1332
tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
1333
QM_SQC_VFT_VALID |
1334
(u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
1335
}
1336
break;
1337
case CQC_VFT:
1338
if (qm->ver == QM_HW_V1) {
1339
tmp = QM_CQC_VFT_BUF_SIZE |
1340
QM_CQC_VFT_SQC_SIZE |
1341
QM_CQC_VFT_INDEX_NUMBER |
1342
QM_CQC_VFT_VALID;
1343
} else {
1344
tmp = QM_CQC_VFT_VALID;
1345
}
1346
break;
1347
case SHAPER_VFT:
1348
if (factor) {
1349
tmp = factor->cir_b |
1350
(factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
1351
(factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
1352
(QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
1353
(factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
1354
}
1355
break;
1356
/*
1357
* Note: The current logic only needs to handle the above three types
1358
* If new types are added, they need to be supplemented here,
1359
* otherwise undefined behavior may occur.
1360
*/
1361
default:
1362
break;
1363
}
1364
}
1365
1366
writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
1367
writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
1368
}
1369
1370
static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
1371
u32 fun_num, u32 base, u32 number)
1372
{
1373
struct qm_shaper_factor *factor = NULL;
1374
unsigned int val;
1375
int ret;
1376
1377
if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
1378
factor = &qm->factor[fun_num];
1379
1380
ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1381
val & BIT(0), POLL_PERIOD,
1382
POLL_TIMEOUT);
1383
if (ret)
1384
return ret;
1385
1386
writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
1387
writel(type, qm->io_base + QM_VFT_CFG_TYPE);
1388
if (type == SHAPER_VFT)
1389
fun_num |= base << QM_SHAPER_VFT_OFFSET;
1390
1391
writel(fun_num, qm->io_base + QM_VFT_CFG);
1392
1393
qm_vft_data_cfg(qm, type, base, number, factor);
1394
1395
writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
1396
writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
1397
1398
return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1399
val & BIT(0), POLL_PERIOD,
1400
POLL_TIMEOUT);
1401
}
1402
1403
static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
1404
{
1405
u32 qos = qm->factor[fun_num].func_qos;
1406
int ret, i;
1407
1408
ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
1409
if (ret) {
1410
dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
1411
return ret;
1412
}
1413
writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
1414
for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
1415
/* The base number of queue reuse for different alg type */
1416
ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
1417
if (ret)
1418
return ret;
1419
}
1420
1421
return 0;
1422
}
1423
1424
/* The config should be conducted after qm_dev_mem_reset() */
1425
static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
1426
u32 number)
1427
{
1428
int ret, i;
1429
1430
for (i = SQC_VFT; i <= CQC_VFT; i++) {
1431
ret = qm_set_vft_common(qm, i, fun_num, base, number);
1432
if (ret)
1433
return ret;
1434
}
1435
1436
/* init default shaper qos val */
1437
if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
1438
ret = qm_shaper_init_vft(qm, fun_num);
1439
if (ret)
1440
goto back_sqc_cqc;
1441
}
1442
1443
return 0;
1444
back_sqc_cqc:
1445
for (i = SQC_VFT; i <= CQC_VFT; i++)
1446
qm_set_vft_common(qm, i, fun_num, 0, 0);
1447
1448
return ret;
1449
}
1450
1451
static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
1452
{
1453
u64 sqc_vft;
1454
int ret;
1455
1456
ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0);
1457
if (ret)
1458
return ret;
1459
1460
*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
1461
*number = (QM_SQC_VFT_NUM_MASK_V2 &
1462
(sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
1463
1464
return 0;
1465
}
1466
1467
static void qm_hw_error_init_v1(struct hisi_qm *qm)
1468
{
1469
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1470
}
1471
1472
static void qm_hw_error_cfg(struct hisi_qm *qm)
1473
{
1474
struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
1475
1476
qm->error_mask = qm_err->nfe | qm_err->ce | qm_err->fe;
1477
/* clear QM hw residual error source */
1478
writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
1479
1480
/* configure error type */
1481
writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE);
1482
writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1483
writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1484
writel(qm_err->fe, qm->io_base + QM_RAS_FE_ENABLE);
1485
}
1486
1487
static void qm_hw_error_init_v2(struct hisi_qm *qm)
1488
{
1489
u32 irq_unmask;
1490
1491
qm_hw_error_cfg(qm);
1492
1493
irq_unmask = ~qm->error_mask;
1494
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1495
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1496
}
1497
1498
static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
1499
{
1500
u32 irq_mask = qm->error_mask;
1501
1502
irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1503
writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
1504
}
1505
1506
static void qm_hw_error_init_v3(struct hisi_qm *qm)
1507
{
1508
u32 irq_unmask;
1509
1510
qm_hw_error_cfg(qm);
1511
1512
/* enable close master ooo when hardware error happened */
1513
writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
1514
1515
irq_unmask = ~qm->error_mask;
1516
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1517
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1518
}
1519
1520
static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
1521
{
1522
u32 irq_mask = qm->error_mask;
1523
1524
irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1525
writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
1526
1527
/* disable close master ooo when hardware error happened */
1528
writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
1529
}
1530
1531
static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
1532
{
1533
const struct hisi_qm_hw_error *err;
1534
struct device *dev = &qm->pdev->dev;
1535
u32 reg_val, type, vf_num, qp_id;
1536
int i;
1537
1538
for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
1539
err = &qm_hw_error[i];
1540
if (!(err->int_msk & error_status))
1541
continue;
1542
1543
dev_err(dev, "%s [error status=0x%x] found\n",
1544
err->msg, err->int_msk);
1545
1546
if (err->int_msk & QM_DB_TIMEOUT) {
1547
reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
1548
type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
1549
QM_DB_TIMEOUT_TYPE_SHIFT;
1550
vf_num = reg_val & QM_DB_TIMEOUT_VF;
1551
qp_id = reg_val >> QM_DB_TIMEOUT_QP_SHIFT;
1552
dev_err(dev, "qm %s doorbell timeout in function %u qp %u\n",
1553
qm_db_timeout[type], vf_num, qp_id);
1554
} else if (err->int_msk & QM_OF_FIFO_OF) {
1555
reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
1556
type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
1557
QM_FIFO_OVERFLOW_TYPE_SHIFT;
1558
vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
1559
qp_id = reg_val >> QM_FIFO_OVERFLOW_QP_SHIFT;
1560
if (type < ARRAY_SIZE(qm_fifo_overflow))
1561
dev_err(dev, "qm %s fifo overflow in function %u qp %u\n",
1562
qm_fifo_overflow[type], vf_num, qp_id);
1563
else
1564
dev_err(dev, "unknown error type\n");
1565
} else if (err->int_msk & QM_AXI_RRESP_ERR) {
1566
reg_val = readl(qm->io_base + QM_ABNORMAL_INF02);
1567
if (reg_val & QM_AXI_POISON_ERR)
1568
dev_err(dev, "qm axi poison error happened\n");
1569
}
1570
}
1571
}
1572
1573
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
1574
{
1575
struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
1576
u32 error_status;
1577
1578
error_status = qm_get_hw_error_status(qm);
1579
if (error_status & qm->error_mask) {
1580
if (error_status & QM_ECC_MBIT)
1581
qm->err_status.is_qm_ecc_mbit = true;
1582
1583
qm_log_hw_error(qm, error_status);
1584
if (error_status & qm_err->reset_mask) {
1585
/* Disable the same error reporting until device is recovered. */
1586
writel(qm_err->nfe & (~error_status), qm->io_base + QM_RAS_NFE_ENABLE);
1587
return ACC_ERR_NEED_RESET;
1588
}
1589
1590
/* Clear error source if not need reset. */
1591
writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
1592
writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1593
writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE);
1594
}
1595
1596
return ACC_ERR_RECOVERED;
1597
}
1598
1599
static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
1600
{
1601
u32 val;
1602
1603
if (qm->fun_type == QM_HW_PF)
1604
writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
1605
1606
val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
1607
val |= QM_IFC_INT_SOURCE_MASK;
1608
writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
1609
}
1610
1611
static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
1612
{
1613
struct device *dev = &qm->pdev->dev;
1614
enum qm_ifc_cmd cmd;
1615
int ret;
1616
1617
ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id);
1618
if (ret) {
1619
dev_err(dev, "failed to get command from VF(%u)!\n", vf_id);
1620
return;
1621
}
1622
1623
switch (cmd) {
1624
case QM_VF_PREPARE_FAIL:
1625
dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
1626
break;
1627
case QM_VF_START_FAIL:
1628
dev_err(dev, "failed to start VF(%u)!\n", vf_id);
1629
break;
1630
case QM_VF_PREPARE_DONE:
1631
case QM_VF_START_DONE:
1632
break;
1633
default:
1634
dev_err(dev, "unsupported command(0x%x) sent by VF(%u)!\n", cmd, vf_id);
1635
break;
1636
}
1637
}
1638
1639
static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
1640
{
1641
struct device *dev = &qm->pdev->dev;
1642
u32 vfs_num = qm->vfs_num;
1643
int cnt = 0;
1644
int ret = 0;
1645
u64 val;
1646
u32 i;
1647
1648
if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
1649
return 0;
1650
1651
while (true) {
1652
val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
1653
/* All VFs send command to PF, break */
1654
if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1))
1655
break;
1656
1657
if (++cnt > QM_MAX_PF_WAIT_COUNT) {
1658
ret = -EBUSY;
1659
break;
1660
}
1661
1662
msleep(QM_WAIT_DST_ACK);
1663
}
1664
1665
/* PF check VFs msg */
1666
for (i = 1; i <= vfs_num; i++) {
1667
if (val & BIT(i))
1668
qm_handle_vf_msg(qm, i);
1669
else
1670
dev_err(dev, "VF(%u) not ping PF!\n", i);
1671
}
1672
1673
/* PF clear interrupt to ack VFs */
1674
qm_clear_cmd_interrupt(qm, val);
1675
1676
return ret;
1677
}
1678
1679
static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
1680
{
1681
u32 val;
1682
1683
val = readl(qm->io_base + QM_IFC_INT_CFG);
1684
val &= ~QM_IFC_SEND_ALL_VFS;
1685
val |= fun_num;
1686
writel(val, qm->io_base + QM_IFC_INT_CFG);
1687
1688
val = readl(qm->io_base + QM_IFC_INT_SET_P);
1689
val |= QM_IFC_INT_SET_MASK;
1690
writel(val, qm->io_base + QM_IFC_INT_SET_P);
1691
}
1692
1693
static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
1694
{
1695
u32 val;
1696
1697
val = readl(qm->io_base + QM_IFC_INT_SET_V);
1698
val |= QM_IFC_INT_SET_MASK;
1699
writel(val, qm->io_base + QM_IFC_INT_SET_V);
1700
}
1701
1702
static int qm_ping_single_vf(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
1703
{
1704
struct device *dev = &qm->pdev->dev;
1705
int cnt = 0;
1706
u64 val;
1707
int ret;
1708
1709
ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num);
1710
if (ret) {
1711
dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
1712
goto err_unlock;
1713
}
1714
1715
qm_trigger_vf_interrupt(qm, fun_num);
1716
while (true) {
1717
msleep(QM_WAIT_DST_ACK);
1718
val = readq(qm->io_base + QM_IFC_READY_STATUS);
1719
/* if VF respond, PF notifies VF successfully. */
1720
if (!(val & BIT(fun_num)))
1721
goto err_unlock;
1722
1723
if (++cnt > QM_MAX_PF_WAIT_COUNT) {
1724
dev_err(dev, "failed to get response from VF(%u)!\n", fun_num);
1725
ret = -ETIMEDOUT;
1726
break;
1727
}
1728
}
1729
1730
err_unlock:
1731
qm->ops->set_ifc_end(qm);
1732
return ret;
1733
}
1734
1735
static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
1736
{
1737
struct device *dev = &qm->pdev->dev;
1738
u32 vfs_num = qm->vfs_num;
1739
u64 val = 0;
1740
int cnt = 0;
1741
int ret;
1742
u32 i;
1743
1744
ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS);
1745
if (ret) {
1746
dev_err(dev, "failed to send command(0x%x) to all vfs!\n", cmd);
1747
qm->ops->set_ifc_end(qm);
1748
return ret;
1749
}
1750
1751
qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
1752
while (true) {
1753
msleep(QM_WAIT_DST_ACK);
1754
val = readq(qm->io_base + QM_IFC_READY_STATUS);
1755
/* If all VFs acked, PF notifies VFs successfully. */
1756
if (!(val & GENMASK(vfs_num, 1))) {
1757
qm->ops->set_ifc_end(qm);
1758
return 0;
1759
}
1760
1761
if (++cnt > QM_MAX_PF_WAIT_COUNT)
1762
break;
1763
}
1764
1765
qm->ops->set_ifc_end(qm);
1766
1767
/* Check which vf respond timeout. */
1768
for (i = 1; i <= vfs_num; i++) {
1769
if (val & BIT(i))
1770
dev_err(dev, "failed to get response from VF(%u)!\n", i);
1771
}
1772
1773
return -ETIMEDOUT;
1774
}
1775
1776
static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
1777
{
1778
int cnt = 0;
1779
u32 val;
1780
int ret;
1781
1782
ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0);
1783
if (ret) {
1784
dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd);
1785
goto unlock;
1786
}
1787
1788
qm_trigger_pf_interrupt(qm);
1789
/* Waiting for PF response */
1790
while (true) {
1791
msleep(QM_WAIT_DST_ACK);
1792
val = readl(qm->io_base + QM_IFC_INT_SET_V);
1793
if (!(val & QM_IFC_INT_STATUS_MASK))
1794
break;
1795
1796
if (++cnt > QM_MAX_VF_WAIT_COUNT) {
1797
ret = -ETIMEDOUT;
1798
break;
1799
}
1800
}
1801
1802
unlock:
1803
qm->ops->set_ifc_end(qm);
1804
1805
return ret;
1806
}
1807
1808
static int qm_drain_qm(struct hisi_qm *qm)
1809
{
1810
return hisi_qm_mb(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0);
1811
}
1812
1813
static int qm_stop_qp(struct hisi_qp *qp)
1814
{
1815
return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
1816
}
1817
1818
static int qm_set_msi(struct hisi_qm *qm, bool set)
1819
{
1820
struct pci_dev *pdev = qm->pdev;
1821
1822
if (set) {
1823
pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
1824
0);
1825
} else {
1826
pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
1827
ACC_PEH_MSI_DISABLE);
1828
if (qm->err_status.is_qm_ecc_mbit ||
1829
qm->err_status.is_dev_ecc_mbit)
1830
return 0;
1831
1832
mdelay(1);
1833
if (readl(qm->io_base + QM_PEH_DFX_INFO0))
1834
return -EFAULT;
1835
}
1836
1837
return 0;
1838
}
1839
1840
static void qm_wait_msi_finish(struct hisi_qm *qm)
1841
{
1842
struct pci_dev *pdev = qm->pdev;
1843
u32 cmd = ~0;
1844
int cnt = 0;
1845
u32 val;
1846
int ret;
1847
1848
while (true) {
1849
pci_read_config_dword(pdev, pdev->msi_cap +
1850
PCI_MSI_PENDING_64, &cmd);
1851
if (!cmd)
1852
break;
1853
1854
if (++cnt > MAX_WAIT_COUNTS) {
1855
pci_warn(pdev, "failed to empty MSI PENDING!\n");
1856
break;
1857
}
1858
1859
udelay(1);
1860
}
1861
1862
ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
1863
val, !(val & QM_PEH_DFX_MASK),
1864
POLL_PERIOD, POLL_TIMEOUT);
1865
if (ret)
1866
pci_warn(pdev, "failed to empty PEH MSI!\n");
1867
1868
ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
1869
val, !(val & QM_PEH_MSI_FINISH_MASK),
1870
POLL_PERIOD, POLL_TIMEOUT);
1871
if (ret)
1872
pci_warn(pdev, "failed to finish MSI operation!\n");
1873
}
1874
1875
static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
1876
{
1877
struct pci_dev *pdev = qm->pdev;
1878
int ret = -ETIMEDOUT;
1879
u32 cmd, i;
1880
1881
pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
1882
if (set)
1883
cmd |= QM_MSI_CAP_ENABLE;
1884
else
1885
cmd &= ~QM_MSI_CAP_ENABLE;
1886
1887
pci_write_config_dword(pdev, pdev->msi_cap, cmd);
1888
if (set) {
1889
for (i = 0; i < MAX_WAIT_COUNTS; i++) {
1890
pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
1891
if (cmd & QM_MSI_CAP_ENABLE)
1892
return 0;
1893
1894
udelay(1);
1895
}
1896
} else {
1897
udelay(WAIT_PERIOD_US_MIN);
1898
qm_wait_msi_finish(qm);
1899
ret = 0;
1900
}
1901
1902
return ret;
1903
}
1904
1905
static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
1906
{
1907
struct qm_mailbox mailbox;
1908
u64 msg;
1909
1910
msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
1911
1912
qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0);
1913
mutex_lock(&qm->mailbox_lock);
1914
return qm_mb_nolock(qm, &mailbox, QM_MB_MAX_WAIT_TIMEOUT);
1915
}
1916
1917
static void qm_set_ifc_end_v3(struct hisi_qm *qm)
1918
{
1919
mutex_unlock(&qm->mailbox_lock);
1920
}
1921
1922
static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
1923
{
1924
u64 msg;
1925
int ret;
1926
1927
ret = hisi_qm_mb_read(qm, &msg, QM_MB_CMD_DST, fun_num);
1928
if (ret)
1929
return ret;
1930
1931
*cmd = msg & QM_IFC_CMD_MASK;
1932
1933
if (data)
1934
*data = msg >> QM_IFC_DATA_SHIFT;
1935
1936
return 0;
1937
}
1938
1939
static int qm_set_ifc_begin_v4(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
1940
{
1941
uintptr_t offset;
1942
u64 msg;
1943
1944
if (qm->fun_type == QM_HW_PF)
1945
offset = QM_PF2VF_PF_W;
1946
else
1947
offset = QM_VF2PF_VF_W;
1948
1949
msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
1950
1951
mutex_lock(&qm->ifc_lock);
1952
writeq(msg, qm->io_base + offset);
1953
1954
return 0;
1955
}
1956
1957
static void qm_set_ifc_end_v4(struct hisi_qm *qm)
1958
{
1959
mutex_unlock(&qm->ifc_lock);
1960
}
1961
1962
static u64 qm_get_ifc_pf(struct hisi_qm *qm, u32 fun_num)
1963
{
1964
uintptr_t offset;
1965
1966
offset = QM_VF2PF_PF_R + QM_VF2PF_REG_SIZE * fun_num;
1967
1968
return (u64)readl(qm->io_base + offset);
1969
}
1970
1971
static u64 qm_get_ifc_vf(struct hisi_qm *qm)
1972
{
1973
return readq(qm->io_base + QM_PF2VF_VF_R);
1974
}
1975
1976
static int qm_get_ifc_v4(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
1977
{
1978
u64 msg;
1979
1980
if (qm->fun_type == QM_HW_PF)
1981
msg = qm_get_ifc_pf(qm, fun_num);
1982
else
1983
msg = qm_get_ifc_vf(qm);
1984
1985
*cmd = msg & QM_IFC_CMD_MASK;
1986
1987
if (data)
1988
*data = msg >> QM_IFC_DATA_SHIFT;
1989
1990
return 0;
1991
}
1992
1993
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
1994
.qm_db = qm_db_v1,
1995
.hw_error_init = qm_hw_error_init_v1,
1996
.set_msi = qm_set_msi,
1997
};
1998
1999
static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
2000
.get_vft = qm_get_vft_v2,
2001
.qm_db = qm_db_v2,
2002
.hw_error_init = qm_hw_error_init_v2,
2003
.hw_error_uninit = qm_hw_error_uninit_v2,
2004
.hw_error_handle = qm_hw_error_handle_v2,
2005
.set_msi = qm_set_msi,
2006
};
2007
2008
static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
2009
.get_vft = qm_get_vft_v2,
2010
.qm_db = qm_db_v2,
2011
.hw_error_init = qm_hw_error_init_v3,
2012
.hw_error_uninit = qm_hw_error_uninit_v3,
2013
.hw_error_handle = qm_hw_error_handle_v2,
2014
.set_msi = qm_set_msi_v3,
2015
.set_ifc_begin = qm_set_ifc_begin_v3,
2016
.set_ifc_end = qm_set_ifc_end_v3,
2017
.get_ifc = qm_get_ifc_v3,
2018
};
2019
2020
static const struct hisi_qm_hw_ops qm_hw_ops_v4 = {
2021
.get_vft = qm_get_vft_v2,
2022
.qm_db = qm_db_v2,
2023
.hw_error_init = qm_hw_error_init_v3,
2024
.hw_error_uninit = qm_hw_error_uninit_v3,
2025
.hw_error_handle = qm_hw_error_handle_v2,
2026
.set_msi = qm_set_msi_v3,
2027
.set_ifc_begin = qm_set_ifc_begin_v4,
2028
.set_ifc_end = qm_set_ifc_end_v4,
2029
.get_ifc = qm_get_ifc_v4,
2030
};
2031
2032
static void *qm_get_avail_sqe(struct hisi_qp *qp)
2033
{
2034
struct hisi_qp_status *qp_status = &qp->qp_status;
2035
u16 sq_tail = qp_status->sq_tail;
2036
2037
if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1))
2038
return NULL;
2039
2040
return qp->sqe + sq_tail * qp->qm->sqe_size;
2041
}
2042
2043
static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
2044
{
2045
u64 *addr;
2046
2047
/* Use last 64 bits of DUS to reset status. */
2048
addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET;
2049
*addr = 0;
2050
}
2051
2052
static struct hisi_qp *find_shareable_qp(struct hisi_qm *qm, u8 alg_type, bool is_in_kernel)
2053
{
2054
struct device *dev = &qm->pdev->dev;
2055
struct hisi_qp *share_qp = NULL;
2056
struct hisi_qp *qp;
2057
u32 ref_count = ~0;
2058
int i;
2059
2060
if (!is_in_kernel)
2061
goto queues_busy;
2062
2063
for (i = 0; i < qm->qp_num; i++) {
2064
qp = &qm->qp_array[i];
2065
if (qp->is_in_kernel && qp->alg_type == alg_type && qp->ref_count < ref_count) {
2066
ref_count = qp->ref_count;
2067
share_qp = qp;
2068
}
2069
}
2070
2071
if (share_qp) {
2072
share_qp->ref_count++;
2073
return share_qp;
2074
}
2075
2076
queues_busy:
2077
dev_info_ratelimited(dev, "All %u queues of QM are busy and no shareable queue\n",
2078
qm->qp_num);
2079
atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2080
return ERR_PTR(-EBUSY);
2081
}
2082
2083
static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type, bool is_in_kernel)
2084
{
2085
struct device *dev = &qm->pdev->dev;
2086
struct hisi_qp *qp;
2087
int qp_id;
2088
2089
if (atomic_read(&qm->status.flags) == QM_STOP) {
2090
dev_info_ratelimited(dev, "failed to create qp as qm is stop!\n");
2091
return ERR_PTR(-EPERM);
2092
}
2093
2094
/* Try to find a shareable queue when all queues are busy */
2095
if (qm->qp_in_used == qm->qp_num)
2096
return find_shareable_qp(qm, alg_type, is_in_kernel);
2097
2098
qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
2099
if (qp_id < 0) {
2100
dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
2101
qm->qp_num);
2102
atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2103
return ERR_PTR(-EBUSY);
2104
}
2105
2106
qp = &qm->qp_array[qp_id];
2107
hisi_qm_unset_hw_reset(qp);
2108
memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth);
2109
2110
qp->event_cb = NULL;
2111
qp->req_cb = NULL;
2112
qp->alg_type = alg_type;
2113
qp->is_in_kernel = is_in_kernel;
2114
qm->qp_in_used++;
2115
qp->ref_count = 1;
2116
2117
return qp;
2118
}
2119
2120
/**
2121
* hisi_qm_create_qp() - Create a queue pair from qm.
2122
* @qm: The qm we create a qp from.
2123
* @alg_type: Accelerator specific algorithm type in sqc.
2124
*
2125
* Return created qp, negative error code if failed.
2126
*/
2127
static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
2128
{
2129
struct hisi_qp *qp;
2130
int ret;
2131
2132
ret = qm_pm_get_sync(qm);
2133
if (ret)
2134
return ERR_PTR(ret);
2135
2136
down_write(&qm->qps_lock);
2137
qp = qm_create_qp_nolock(qm, alg_type, false);
2138
up_write(&qm->qps_lock);
2139
2140
if (IS_ERR(qp))
2141
qm_pm_put_sync(qm);
2142
2143
return qp;
2144
}
2145
2146
/**
2147
* hisi_qm_release_qp() - Release a qp back to its qm.
2148
* @qp: The qp we want to release.
2149
*
2150
* This function releases the resource of a qp.
2151
*/
2152
static void hisi_qm_release_qp(struct hisi_qp *qp)
2153
{
2154
struct hisi_qm *qm = qp->qm;
2155
2156
down_write(&qm->qps_lock);
2157
2158
qm->qp_in_used--;
2159
idr_remove(&qm->qp_idr, qp->qp_id);
2160
2161
up_write(&qm->qps_lock);
2162
2163
qm_pm_put_sync(qm);
2164
}
2165
2166
static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2167
{
2168
struct hisi_qm *qm = qp->qm;
2169
enum qm_hw_ver ver = qm->ver;
2170
struct qm_sqc sqc = {0};
2171
2172
if (ver == QM_HW_V1) {
2173
sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
2174
sqc.w8 = cpu_to_le16(qp->sq_depth - 1);
2175
} else {
2176
sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
2177
sqc.w8 = 0; /* rand_qc */
2178
}
2179
sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
2180
sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma));
2181
sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma));
2182
sqc.cq_num = cpu_to_le16(qp_id);
2183
sqc.pasid = cpu_to_le16(pasid);
2184
2185
if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2186
sqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
2187
QM_QC_PASID_ENABLE_SHIFT);
2188
2189
return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0);
2190
}
2191
2192
static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2193
{
2194
struct hisi_qm *qm = qp->qm;
2195
enum qm_hw_ver ver = qm->ver;
2196
struct qm_cqc cqc = {0};
2197
2198
if (ver == QM_HW_V1) {
2199
cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE));
2200
cqc.w8 = cpu_to_le16(qp->cq_depth - 1);
2201
} else {
2202
cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
2203
cqc.w8 = 0; /* rand_qc */
2204
}
2205
/*
2206
* Enable request finishing interrupts defaultly.
2207
* So, there will be some interrupts until disabling
2208
* this.
2209
*/
2210
cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
2211
cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma));
2212
cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma));
2213
cqc.pasid = cpu_to_le16(pasid);
2214
2215
if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2216
cqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
2217
2218
return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0);
2219
}
2220
2221
static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2222
{
2223
int ret;
2224
2225
qm_init_qp_status(qp);
2226
2227
ret = qm_sq_ctx_cfg(qp, qp_id, pasid);
2228
if (ret)
2229
return ret;
2230
2231
return qm_cq_ctx_cfg(qp, qp_id, pasid);
2232
}
2233
2234
static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
2235
{
2236
struct hisi_qm *qm = qp->qm;
2237
struct device *dev = &qm->pdev->dev;
2238
int qp_id = qp->qp_id;
2239
u32 pasid = arg;
2240
int ret;
2241
2242
if (atomic_read(&qm->status.flags) == QM_STOP) {
2243
dev_info_ratelimited(dev, "failed to start qp as qm is stop!\n");
2244
return -EPERM;
2245
}
2246
2247
ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
2248
if (ret)
2249
return ret;
2250
2251
atomic_set(&qp->qp_status.flags, QP_START);
2252
dev_dbg(dev, "queue %d started\n", qp_id);
2253
2254
return 0;
2255
}
2256
2257
/**
2258
* hisi_qm_start_qp() - Start a qp into running.
2259
* @qp: The qp we want to start to run.
2260
* @arg: Accelerator specific argument.
2261
*
2262
* After this function, qp can receive request from user. Return 0 if
2263
* successful, negative error code if failed.
2264
*/
2265
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
2266
{
2267
struct hisi_qm *qm = qp->qm;
2268
int ret;
2269
2270
down_write(&qm->qps_lock);
2271
ret = qm_start_qp_nolock(qp, arg);
2272
up_write(&qm->qps_lock);
2273
2274
return ret;
2275
}
2276
EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
2277
2278
/**
2279
* qp_stop_fail_cb() - call request cb.
2280
* @qp: stopped failed qp.
2281
*
2282
* Callback function should be called whether task completed or not.
2283
*/
2284
static void qp_stop_fail_cb(struct hisi_qp *qp)
2285
{
2286
int qp_used = atomic_read(&qp->qp_status.used);
2287
u16 cur_tail = qp->qp_status.sq_tail;
2288
u16 sq_depth = qp->sq_depth;
2289
u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth;
2290
struct hisi_qm *qm = qp->qm;
2291
u16 pos;
2292
int i;
2293
2294
for (i = 0; i < qp_used; i++) {
2295
pos = (i + cur_head) % sq_depth;
2296
qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
2297
qm_cq_head_update(qp);
2298
atomic_dec(&qp->qp_status.used);
2299
}
2300
}
2301
2302
static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id)
2303
{
2304
struct device *dev = &qm->pdev->dev;
2305
struct qm_sqc sqc;
2306
struct qm_cqc cqc;
2307
int ret, i = 0;
2308
2309
while (++i) {
2310
ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1);
2311
if (ret) {
2312
dev_err_ratelimited(dev, "Failed to dump sqc!\n");
2313
*state = QM_DUMP_SQC_FAIL;
2314
return ret;
2315
}
2316
2317
ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1);
2318
if (ret) {
2319
dev_err_ratelimited(dev, "Failed to dump cqc!\n");
2320
*state = QM_DUMP_CQC_FAIL;
2321
return ret;
2322
}
2323
2324
if ((sqc.tail == cqc.tail) &&
2325
(QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
2326
break;
2327
2328
if (i == MAX_WAIT_COUNTS) {
2329
dev_err(dev, "Fail to empty queue %u!\n", qp_id);
2330
*state = QM_STOP_QUEUE_FAIL;
2331
return -ETIMEDOUT;
2332
}
2333
2334
usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
2335
}
2336
2337
return 0;
2338
}
2339
2340
/**
2341
* qm_drain_qp() - Drain a qp.
2342
* @qp: The qp we want to drain.
2343
*
2344
* If the device does not support stopping queue by sending mailbox,
2345
* determine whether the queue is cleared by judging the tail pointers of
2346
* sq and cq.
2347
*/
2348
static int qm_drain_qp(struct hisi_qp *qp)
2349
{
2350
struct hisi_qm *qm = qp->qm;
2351
u32 state = 0;
2352
int ret;
2353
2354
/* No need to judge if master OOO is blocked. */
2355
if (qm_check_dev_error(qm))
2356
return 0;
2357
2358
/* HW V3 supports drain qp by device */
2359
if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
2360
ret = qm_stop_qp(qp);
2361
if (ret) {
2362
dev_err(&qm->pdev->dev, "Failed to stop qp!\n");
2363
state = QM_STOP_QUEUE_FAIL;
2364
goto set_dev_state;
2365
}
2366
return ret;
2367
}
2368
2369
ret = qm_wait_qp_empty(qm, &state, qp->qp_id);
2370
if (ret)
2371
goto set_dev_state;
2372
2373
return 0;
2374
2375
set_dev_state:
2376
if (qm->debug.dev_dfx.dev_timeout)
2377
qm->debug.dev_dfx.dev_state = state;
2378
2379
return ret;
2380
}
2381
2382
static void qm_stop_qp_nolock(struct hisi_qp *qp)
2383
{
2384
struct hisi_qm *qm = qp->qm;
2385
struct device *dev = &qm->pdev->dev;
2386
int ret;
2387
2388
/*
2389
* It is allowed to stop and release qp when reset, If the qp is
2390
* stopped when reset but still want to be released then, the
2391
* is_resetting flag should be set negative so that this qp will not
2392
* be restarted after reset.
2393
*/
2394
if (atomic_read(&qp->qp_status.flags) != QP_START) {
2395
qp->is_resetting = false;
2396
return;
2397
}
2398
2399
atomic_set(&qp->qp_status.flags, QP_STOP);
2400
2401
/* V3 supports direct stop function when FLR prepare */
2402
if (qm->ver < QM_HW_V3 || qm->status.stop_reason == QM_NORMAL) {
2403
ret = qm_drain_qp(qp);
2404
if (ret)
2405
dev_err(dev, "Failed to drain out data for stopping qp(%u)!\n", qp->qp_id);
2406
}
2407
2408
flush_workqueue(qm->wq);
2409
if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
2410
qp_stop_fail_cb(qp);
2411
2412
dev_dbg(dev, "stop queue %u!", qp->qp_id);
2413
}
2414
2415
/**
2416
* hisi_qm_stop_qp() - Stop a qp in qm.
2417
* @qp: The qp we want to stop.
2418
*
2419
* This function is reverse of hisi_qm_start_qp.
2420
*/
2421
void hisi_qm_stop_qp(struct hisi_qp *qp)
2422
{
2423
down_write(&qp->qm->qps_lock);
2424
qm_stop_qp_nolock(qp);
2425
up_write(&qp->qm->qps_lock);
2426
}
2427
EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
2428
2429
/**
2430
* hisi_qp_send() - Queue up a task in the hardware queue.
2431
* @qp: The qp in which to put the message.
2432
* @msg: The message.
2433
*
2434
* This function will return -EBUSY if qp is currently full, and -EAGAIN
2435
* if qp related qm is resetting.
2436
*
2437
* Note: This function may run with qm_irq_thread and ACC reset at same time.
2438
* It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
2439
* reset may happen, we have no lock here considering performance. This
2440
* causes current qm_db sending fail or can not receive sended sqe. QM
2441
* sync/async receive function should handle the error sqe. ACC reset
2442
* done function should clear used sqe to 0.
2443
*/
2444
int hisi_qp_send(struct hisi_qp *qp, const void *msg)
2445
{
2446
struct hisi_qp_status *qp_status = &qp->qp_status;
2447
u16 sq_tail, sq_tail_next;
2448
void *sqe;
2449
2450
spin_lock_bh(&qp->qp_lock);
2451
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
2452
atomic_read(&qp->qm->status.flags) == QM_STOP ||
2453
qp->is_resetting)) {
2454
spin_unlock_bh(&qp->qp_lock);
2455
dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
2456
return -EAGAIN;
2457
}
2458
2459
sqe = qm_get_avail_sqe(qp);
2460
if (!sqe) {
2461
spin_unlock_bh(&qp->qp_lock);
2462
return -EBUSY;
2463
}
2464
2465
sq_tail = qp_status->sq_tail;
2466
sq_tail_next = (sq_tail + 1) % qp->sq_depth;
2467
memcpy(sqe, msg, qp->qm->sqe_size);
2468
qp->msg[sq_tail] = msg;
2469
2470
qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
2471
atomic_inc(&qp->qp_status.used);
2472
qp_status->sq_tail = sq_tail_next;
2473
spin_unlock_bh(&qp->qp_lock);
2474
2475
return 0;
2476
}
2477
EXPORT_SYMBOL_GPL(hisi_qp_send);
2478
2479
static void hisi_qm_cache_wb(struct hisi_qm *qm)
2480
{
2481
unsigned int val;
2482
2483
if (qm->ver == QM_HW_V1)
2484
return;
2485
2486
writel(0x1, qm->io_base + QM_CACHE_WB_START);
2487
if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
2488
val, val & BIT(0), POLL_PERIOD,
2489
POLL_TIMEOUT))
2490
dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
2491
}
2492
2493
static void qm_qp_event_notifier(struct hisi_qp *qp)
2494
{
2495
wake_up_interruptible(&qp->uacce_q->wait);
2496
}
2497
2498
/* This function returns free number of qp in qm. */
2499
static int hisi_qm_get_available_instances(struct uacce_device *uacce)
2500
{
2501
struct hisi_qm *qm = uacce->priv;
2502
int ret;
2503
2504
down_read(&qm->qps_lock);
2505
ret = qm->qp_num - qm->qp_in_used;
2506
up_read(&qm->qps_lock);
2507
2508
return ret;
2509
}
2510
2511
static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
2512
{
2513
int i;
2514
2515
for (i = 0; i < qm->qp_num; i++)
2516
qm_set_qp_disable(&qm->qp_array[i], offset);
2517
}
2518
2519
static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
2520
unsigned long arg,
2521
struct uacce_queue *q)
2522
{
2523
struct hisi_qm *qm = uacce->priv;
2524
struct hisi_qp *qp;
2525
u8 alg_type = 0;
2526
2527
qp = hisi_qm_create_qp(qm, alg_type);
2528
if (IS_ERR(qp))
2529
return PTR_ERR(qp);
2530
2531
q->priv = qp;
2532
q->uacce = uacce;
2533
qp->uacce_q = q;
2534
qp->event_cb = qm_qp_event_notifier;
2535
qp->pasid = arg;
2536
2537
return 0;
2538
}
2539
2540
static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
2541
{
2542
struct hisi_qp *qp = q->priv;
2543
2544
hisi_qm_release_qp(qp);
2545
}
2546
2547
/* map sq/cq/doorbell to user space */
2548
static int hisi_qm_uacce_mmap(struct uacce_queue *q,
2549
struct vm_area_struct *vma,
2550
struct uacce_qfile_region *qfr)
2551
{
2552
struct hisi_qp *qp = q->priv;
2553
struct hisi_qm *qm = qp->qm;
2554
resource_size_t phys_base = qm->db_phys_base +
2555
qp->qp_id * qm->db_interval;
2556
size_t sz = vma->vm_end - vma->vm_start;
2557
struct pci_dev *pdev = qm->pdev;
2558
struct device *dev = &pdev->dev;
2559
unsigned long vm_pgoff;
2560
int ret;
2561
2562
switch (qfr->type) {
2563
case UACCE_QFRT_MMIO:
2564
if (qm->ver == QM_HW_V1) {
2565
if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
2566
return -EINVAL;
2567
} else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
2568
if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
2569
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
2570
return -EINVAL;
2571
} else {
2572
if (sz > qm->db_interval)
2573
return -EINVAL;
2574
}
2575
2576
vm_flags_set(vma, VM_IO);
2577
2578
return remap_pfn_range(vma, vma->vm_start,
2579
phys_base >> PAGE_SHIFT,
2580
sz, pgprot_noncached(vma->vm_page_prot));
2581
case UACCE_QFRT_DUS:
2582
if (sz != qp->qdma.size)
2583
return -EINVAL;
2584
2585
/*
2586
* dma_mmap_coherent() requires vm_pgoff as 0
2587
* restore vm_pfoff to initial value for mmap()
2588
*/
2589
vm_pgoff = vma->vm_pgoff;
2590
vma->vm_pgoff = 0;
2591
ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
2592
qp->qdma.dma, sz);
2593
vma->vm_pgoff = vm_pgoff;
2594
return ret;
2595
2596
default:
2597
return -EINVAL;
2598
}
2599
}
2600
2601
static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
2602
{
2603
struct hisi_qp *qp = q->priv;
2604
2605
return hisi_qm_start_qp(qp, qp->pasid);
2606
}
2607
2608
static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
2609
{
2610
struct hisi_qp *qp = q->priv;
2611
struct hisi_qm *qm = qp->qm;
2612
struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx;
2613
u32 i = 0;
2614
2615
hisi_qm_stop_qp(qp);
2616
2617
if (!dev_dfx->dev_timeout || !dev_dfx->dev_state)
2618
return;
2619
2620
/*
2621
* After the queue fails to be stopped,
2622
* wait for a period of time before releasing the queue.
2623
*/
2624
while (++i) {
2625
msleep(WAIT_PERIOD);
2626
2627
/* Since dev_timeout maybe modified, check i >= dev_timeout */
2628
if (i >= dev_dfx->dev_timeout) {
2629
dev_err(&qm->pdev->dev, "Stop q %u timeout, state %u\n",
2630
qp->qp_id, dev_dfx->dev_state);
2631
dev_dfx->dev_state = QM_FINISH_WAIT;
2632
break;
2633
}
2634
}
2635
}
2636
2637
static int hisi_qm_is_q_updated(struct uacce_queue *q)
2638
{
2639
struct hisi_qp *qp = q->priv;
2640
struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
2641
int updated = 0;
2642
2643
while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
2644
/* make sure to read data from memory */
2645
dma_rmb();
2646
qm_cq_head_update(qp);
2647
cqe = qp->cqe + qp->qp_status.cq_head;
2648
updated = 1;
2649
}
2650
2651
return updated;
2652
}
2653
2654
static void qm_set_sqctype(struct uacce_queue *q, u16 type)
2655
{
2656
struct hisi_qm *qm = q->uacce->priv;
2657
struct hisi_qp *qp = q->priv;
2658
2659
down_write(&qm->qps_lock);
2660
qp->alg_type = type;
2661
up_write(&qm->qps_lock);
2662
}
2663
2664
static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
2665
unsigned long arg)
2666
{
2667
struct hisi_qp *qp = q->priv;
2668
struct hisi_qp_info qp_info;
2669
struct hisi_qp_ctx qp_ctx;
2670
2671
if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
2672
if (copy_from_user(&qp_ctx, (void __user *)arg,
2673
sizeof(struct hisi_qp_ctx)))
2674
return -EFAULT;
2675
2676
if (qp_ctx.qc_type > QM_MAX_QC_TYPE)
2677
return -EINVAL;
2678
2679
qm_set_sqctype(q, qp_ctx.qc_type);
2680
qp_ctx.id = qp->qp_id;
2681
2682
if (copy_to_user((void __user *)arg, &qp_ctx,
2683
sizeof(struct hisi_qp_ctx)))
2684
return -EFAULT;
2685
2686
return 0;
2687
} else if (cmd == UACCE_CMD_QM_SET_QP_INFO) {
2688
if (copy_from_user(&qp_info, (void __user *)arg,
2689
sizeof(struct hisi_qp_info)))
2690
return -EFAULT;
2691
2692
qp_info.sqe_size = qp->qm->sqe_size;
2693
qp_info.sq_depth = qp->sq_depth;
2694
qp_info.cq_depth = qp->cq_depth;
2695
2696
if (copy_to_user((void __user *)arg, &qp_info,
2697
sizeof(struct hisi_qp_info)))
2698
return -EFAULT;
2699
2700
return 0;
2701
}
2702
2703
return -EINVAL;
2704
}
2705
2706
/**
2707
* qm_hw_err_isolate() - Try to set the isolation status of the uacce device
2708
* according to user's configuration of error threshold.
2709
* @qm: the uacce device
2710
*/
2711
static int qm_hw_err_isolate(struct hisi_qm *qm)
2712
{
2713
struct qm_hw_err *err, *tmp, *hw_err;
2714
struct qm_err_isolate *isolate;
2715
u32 count = 0;
2716
2717
isolate = &qm->isolate_data;
2718
2719
#define SECONDS_PER_HOUR 3600
2720
2721
/* All the hw errs are processed by PF driver */
2722
if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold)
2723
return 0;
2724
2725
hw_err = kzalloc(sizeof(*hw_err), GFP_KERNEL);
2726
if (!hw_err)
2727
return -ENOMEM;
2728
2729
/*
2730
* Time-stamp every slot AER error. Then check the AER error log when the
2731
* next device AER error occurred. if the device slot AER error count exceeds
2732
* the setting error threshold in one hour, the isolated state will be set
2733
* to true. And the AER error logs that exceed one hour will be cleared.
2734
*/
2735
mutex_lock(&isolate->isolate_lock);
2736
hw_err->timestamp = jiffies;
2737
list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) {
2738
if ((hw_err->timestamp - err->timestamp) / HZ >
2739
SECONDS_PER_HOUR) {
2740
list_del(&err->list);
2741
kfree(err);
2742
} else {
2743
count++;
2744
}
2745
}
2746
list_add(&hw_err->list, &isolate->qm_hw_errs);
2747
2748
if (count >= isolate->err_threshold)
2749
isolate->is_isolate = true;
2750
mutex_unlock(&isolate->isolate_lock);
2751
2752
return 0;
2753
}
2754
2755
static void qm_hw_err_destroy(struct hisi_qm *qm)
2756
{
2757
struct qm_hw_err *err, *tmp;
2758
2759
list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) {
2760
list_del(&err->list);
2761
kfree(err);
2762
}
2763
}
2764
2765
static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce)
2766
{
2767
struct hisi_qm *qm = uacce->priv;
2768
struct hisi_qm *pf_qm;
2769
2770
if (uacce->is_vf)
2771
pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
2772
else
2773
pf_qm = qm;
2774
2775
return pf_qm->isolate_data.is_isolate ?
2776
UACCE_DEV_ISOLATE : UACCE_DEV_NORMAL;
2777
}
2778
2779
static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num)
2780
{
2781
struct hisi_qm *qm = uacce->priv;
2782
2783
/* Must be set by PF */
2784
if (uacce->is_vf)
2785
return -EPERM;
2786
2787
if (qm->isolate_data.is_isolate)
2788
return -EPERM;
2789
2790
mutex_lock(&qm->isolate_data.isolate_lock);
2791
qm->isolate_data.err_threshold = num;
2792
2793
/* After the policy is updated, need to reset the hardware err list */
2794
qm_hw_err_destroy(qm);
2795
mutex_unlock(&qm->isolate_data.isolate_lock);
2796
2797
return 0;
2798
}
2799
2800
static u32 hisi_qm_isolate_threshold_read(struct uacce_device *uacce)
2801
{
2802
struct hisi_qm *qm = uacce->priv;
2803
struct hisi_qm *pf_qm;
2804
2805
if (uacce->is_vf) {
2806
pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
2807
return pf_qm->isolate_data.err_threshold;
2808
}
2809
2810
return qm->isolate_data.err_threshold;
2811
}
2812
2813
static const struct uacce_ops uacce_qm_ops = {
2814
.get_available_instances = hisi_qm_get_available_instances,
2815
.get_queue = hisi_qm_uacce_get_queue,
2816
.put_queue = hisi_qm_uacce_put_queue,
2817
.start_queue = hisi_qm_uacce_start_queue,
2818
.stop_queue = hisi_qm_uacce_stop_queue,
2819
.mmap = hisi_qm_uacce_mmap,
2820
.ioctl = hisi_qm_uacce_ioctl,
2821
.is_q_updated = hisi_qm_is_q_updated,
2822
.get_isolate_state = hisi_qm_get_isolate_state,
2823
.isolate_err_threshold_write = hisi_qm_isolate_threshold_write,
2824
.isolate_err_threshold_read = hisi_qm_isolate_threshold_read,
2825
};
2826
2827
static void qm_remove_uacce(struct hisi_qm *qm)
2828
{
2829
struct uacce_device *uacce = qm->uacce;
2830
2831
if (qm->use_sva) {
2832
mutex_lock(&qm->isolate_data.isolate_lock);
2833
qm_hw_err_destroy(qm);
2834
mutex_unlock(&qm->isolate_data.isolate_lock);
2835
2836
uacce_remove(uacce);
2837
qm->uacce = NULL;
2838
}
2839
}
2840
2841
static void qm_uacce_api_ver_init(struct hisi_qm *qm)
2842
{
2843
struct uacce_device *uacce = qm->uacce;
2844
2845
switch (qm->ver) {
2846
case QM_HW_V1:
2847
uacce->api_ver = HISI_QM_API_VER_BASE;
2848
break;
2849
case QM_HW_V2:
2850
uacce->api_ver = HISI_QM_API_VER2_BASE;
2851
break;
2852
case QM_HW_V3:
2853
case QM_HW_V4:
2854
uacce->api_ver = HISI_QM_API_VER3_BASE;
2855
break;
2856
default:
2857
uacce->api_ver = HISI_QM_API_VER5_BASE;
2858
break;
2859
}
2860
}
2861
2862
static int qm_alloc_uacce(struct hisi_qm *qm)
2863
{
2864
struct pci_dev *pdev = qm->pdev;
2865
struct uacce_device *uacce;
2866
unsigned long mmio_page_nr;
2867
unsigned long dus_page_nr;
2868
u16 sq_depth, cq_depth;
2869
struct uacce_interface interface = {
2870
.flags = UACCE_DEV_SVA,
2871
.ops = &uacce_qm_ops,
2872
};
2873
int ret;
2874
2875
ret = strscpy(interface.name, dev_driver_string(&pdev->dev),
2876
sizeof(interface.name));
2877
if (ret < 0)
2878
return -ENAMETOOLONG;
2879
2880
uacce = uacce_alloc(&pdev->dev, &interface);
2881
if (IS_ERR(uacce))
2882
return PTR_ERR(uacce);
2883
2884
if (uacce->flags & UACCE_DEV_SVA) {
2885
qm->use_sva = true;
2886
} else {
2887
/* only consider sva case */
2888
qm_remove_uacce(qm);
2889
return -EINVAL;
2890
}
2891
2892
uacce->is_vf = pdev->is_virtfn;
2893
uacce->priv = qm;
2894
2895
if (qm->ver == QM_HW_V1)
2896
mmio_page_nr = QM_DOORBELL_PAGE_NR;
2897
else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
2898
mmio_page_nr = QM_DOORBELL_PAGE_NR +
2899
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
2900
else
2901
mmio_page_nr = qm->db_interval / PAGE_SIZE;
2902
2903
qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
2904
2905
/* Add one more page for device or qp status */
2906
dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
2907
sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >>
2908
PAGE_SHIFT;
2909
2910
uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
2911
uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
2912
2913
qm->uacce = uacce;
2914
qm_uacce_api_ver_init(qm);
2915
INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs);
2916
mutex_init(&qm->isolate_data.isolate_lock);
2917
2918
return 0;
2919
}
2920
2921
/**
2922
* qm_frozen() - Try to froze QM to cut continuous queue request. If
2923
* there is user on the QM, return failure without doing anything.
2924
* @qm: The qm needed to be fronzen.
2925
*
2926
* This function frozes QM, then we can do SRIOV disabling.
2927
*/
2928
static int qm_frozen(struct hisi_qm *qm)
2929
{
2930
if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
2931
return 0;
2932
2933
down_write(&qm->qps_lock);
2934
2935
if (!qm->qp_in_used) {
2936
qm->qp_in_used = qm->qp_num;
2937
up_write(&qm->qps_lock);
2938
set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
2939
return 0;
2940
}
2941
2942
up_write(&qm->qps_lock);
2943
2944
return -EBUSY;
2945
}
2946
2947
static int qm_try_frozen_vfs(struct pci_dev *pdev,
2948
struct hisi_qm_list *qm_list)
2949
{
2950
struct hisi_qm *qm, *vf_qm;
2951
struct pci_dev *dev;
2952
int ret = 0;
2953
2954
if (!qm_list || !pdev)
2955
return -EINVAL;
2956
2957
/* Try to frozen all the VFs as disable SRIOV */
2958
mutex_lock(&qm_list->lock);
2959
list_for_each_entry(qm, &qm_list->list, list) {
2960
dev = qm->pdev;
2961
if (dev == pdev)
2962
continue;
2963
if (pci_physfn(dev) == pdev) {
2964
vf_qm = pci_get_drvdata(dev);
2965
ret = qm_frozen(vf_qm);
2966
if (ret)
2967
goto frozen_fail;
2968
}
2969
}
2970
2971
frozen_fail:
2972
mutex_unlock(&qm_list->lock);
2973
2974
return ret;
2975
}
2976
2977
/**
2978
* hisi_qm_wait_task_finish() - Wait until the task is finished
2979
* when removing the driver.
2980
* @qm: The qm needed to wait for the task to finish.
2981
* @qm_list: The list of all available devices.
2982
*/
2983
void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
2984
{
2985
while (qm_frozen(qm) ||
2986
((qm->fun_type == QM_HW_PF) &&
2987
qm_try_frozen_vfs(qm->pdev, qm_list))) {
2988
msleep(WAIT_PERIOD);
2989
}
2990
2991
while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
2992
test_bit(QM_RESETTING, &qm->misc_ctl))
2993
msleep(WAIT_PERIOD);
2994
2995
if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2996
flush_work(&qm->cmd_process);
2997
2998
udelay(REMOVE_WAIT_DELAY);
2999
}
3000
EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
3001
3002
static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
3003
{
3004
struct device *dev = &qm->pdev->dev;
3005
struct hisi_qp *qp;
3006
int i;
3007
3008
for (i = num - 1; i >= 0; i--) {
3009
qp = &qm->qp_array[i];
3010
dma_free_coherent(dev, qp->qdma.size, qp->qdma.va, qp->qdma.dma);
3011
kfree(qp->msg);
3012
kfree(qm->poll_data[i].qp_finish_id);
3013
}
3014
3015
kfree(qm->poll_data);
3016
kfree(qm->qp_array);
3017
}
3018
3019
static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
3020
u16 sq_depth, u16 cq_depth)
3021
{
3022
struct device *dev = &qm->pdev->dev;
3023
size_t off = qm->sqe_size * sq_depth;
3024
struct hisi_qp *qp;
3025
int ret = -ENOMEM;
3026
3027
qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16),
3028
GFP_KERNEL);
3029
if (!qm->poll_data[id].qp_finish_id)
3030
return -ENOMEM;
3031
3032
qp = &qm->qp_array[id];
3033
qp->msg = kmalloc_array(sq_depth, sizeof(void *), GFP_KERNEL);
3034
if (!qp->msg)
3035
goto err_free_qp_finish_id;
3036
3037
qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
3038
GFP_KERNEL);
3039
if (!qp->qdma.va)
3040
goto err_free_qp_msg;
3041
3042
qp->sqe = qp->qdma.va;
3043
qp->sqe_dma = qp->qdma.dma;
3044
qp->cqe = qp->qdma.va + off;
3045
qp->cqe_dma = qp->qdma.dma + off;
3046
qp->qdma.size = dma_size;
3047
qp->sq_depth = sq_depth;
3048
qp->cq_depth = cq_depth;
3049
qp->qm = qm;
3050
qp->qp_id = id;
3051
3052
spin_lock_init(&qp->qp_lock);
3053
spin_lock_init(&qp->backlog.lock);
3054
INIT_LIST_HEAD(&qp->backlog.list);
3055
3056
return 0;
3057
3058
err_free_qp_msg:
3059
kfree(qp->msg);
3060
err_free_qp_finish_id:
3061
kfree(qm->poll_data[id].qp_finish_id);
3062
return ret;
3063
}
3064
3065
static void hisi_qm_pre_init(struct hisi_qm *qm)
3066
{
3067
struct pci_dev *pdev = qm->pdev;
3068
3069
if (qm->ver == QM_HW_V1)
3070
qm->ops = &qm_hw_ops_v1;
3071
else if (qm->ver == QM_HW_V2)
3072
qm->ops = &qm_hw_ops_v2;
3073
else if (qm->ver == QM_HW_V3)
3074
qm->ops = &qm_hw_ops_v3;
3075
else
3076
qm->ops = &qm_hw_ops_v4;
3077
3078
pci_set_drvdata(pdev, qm);
3079
mutex_init(&qm->mailbox_lock);
3080
mutex_init(&qm->ifc_lock);
3081
init_rwsem(&qm->qps_lock);
3082
qm->qp_in_used = 0;
3083
if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
3084
if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
3085
dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
3086
}
3087
}
3088
3089
static void qm_cmd_uninit(struct hisi_qm *qm)
3090
{
3091
u32 val;
3092
3093
if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
3094
return;
3095
3096
val = readl(qm->io_base + QM_IFC_INT_MASK);
3097
val |= QM_IFC_INT_DISABLE;
3098
writel(val, qm->io_base + QM_IFC_INT_MASK);
3099
}
3100
3101
static void qm_cmd_init(struct hisi_qm *qm)
3102
{
3103
u32 val;
3104
3105
if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
3106
return;
3107
3108
/* Clear communication interrupt source */
3109
qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
3110
3111
/* Enable pf to vf communication reg. */
3112
val = readl(qm->io_base + QM_IFC_INT_MASK);
3113
val &= ~QM_IFC_INT_DISABLE;
3114
writel(val, qm->io_base + QM_IFC_INT_MASK);
3115
}
3116
3117
static void qm_put_pci_res(struct hisi_qm *qm)
3118
{
3119
struct pci_dev *pdev = qm->pdev;
3120
3121
if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
3122
iounmap(qm->db_io_base);
3123
3124
iounmap(qm->io_base);
3125
pci_release_mem_regions(pdev);
3126
}
3127
3128
static void hisi_mig_region_clear(struct hisi_qm *qm)
3129
{
3130
u32 val;
3131
3132
/* Clear migration region set of PF */
3133
if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V3) {
3134
val = readl(qm->io_base + QM_MIG_REGION_SEL);
3135
val &= ~QM_MIG_REGION_EN;
3136
writel(val, qm->io_base + QM_MIG_REGION_SEL);
3137
}
3138
}
3139
3140
static void hisi_mig_region_enable(struct hisi_qm *qm)
3141
{
3142
u32 val;
3143
3144
/* Select migration region of PF */
3145
if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V3) {
3146
val = readl(qm->io_base + QM_MIG_REGION_SEL);
3147
val |= QM_MIG_REGION_EN;
3148
writel(val, qm->io_base + QM_MIG_REGION_SEL);
3149
}
3150
}
3151
3152
static void hisi_qm_pci_uninit(struct hisi_qm *qm)
3153
{
3154
struct pci_dev *pdev = qm->pdev;
3155
3156
pci_free_irq_vectors(pdev);
3157
hisi_mig_region_clear(qm);
3158
qm_put_pci_res(qm);
3159
pci_disable_device(pdev);
3160
}
3161
3162
static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
3163
{
3164
if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF)
3165
writel(state, qm->io_base + QM_VF_STATE);
3166
}
3167
3168
static void hisi_qm_unint_work(struct hisi_qm *qm)
3169
{
3170
destroy_workqueue(qm->wq);
3171
}
3172
3173
static void hisi_qm_free_rsv_buf(struct hisi_qm *qm)
3174
{
3175
struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma;
3176
struct device *dev = &qm->pdev->dev;
3177
3178
dma_free_coherent(dev, xqc_dma->size, xqc_dma->va, xqc_dma->dma);
3179
}
3180
3181
static void hisi_qm_memory_uninit(struct hisi_qm *qm)
3182
{
3183
struct device *dev = &qm->pdev->dev;
3184
3185
hisi_qp_memory_uninit(qm, qm->qp_num);
3186
hisi_qm_free_rsv_buf(qm);
3187
if (qm->qdma.va) {
3188
hisi_qm_cache_wb(qm);
3189
dma_free_coherent(dev, qm->qdma.size,
3190
qm->qdma.va, qm->qdma.dma);
3191
}
3192
3193
idr_destroy(&qm->qp_idr);
3194
3195
if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
3196
kfree(qm->factor);
3197
}
3198
3199
/**
3200
* hisi_qm_uninit() - Uninitialize qm.
3201
* @qm: The qm needed uninit.
3202
*
3203
* This function uninits qm related device resources.
3204
*/
3205
void hisi_qm_uninit(struct hisi_qm *qm)
3206
{
3207
qm_cmd_uninit(qm);
3208
hisi_qm_unint_work(qm);
3209
3210
down_write(&qm->qps_lock);
3211
hisi_qm_memory_uninit(qm);
3212
hisi_qm_set_state(qm, QM_NOT_READY);
3213
up_write(&qm->qps_lock);
3214
3215
qm_remove_uacce(qm);
3216
qm_irqs_unregister(qm);
3217
hisi_qm_pci_uninit(qm);
3218
}
3219
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
3220
3221
/**
3222
* hisi_qm_get_vft() - Get vft from a qm.
3223
* @qm: The qm we want to get its vft.
3224
* @base: The base number of queue in vft.
3225
* @number: The number of queues in vft.
3226
*
3227
* We can allocate multiple queues to a qm by configuring virtual function
3228
* table. We get related configures by this function. Normally, we call this
3229
* function in VF driver to get the queue information.
3230
*
3231
* qm hw v1 does not support this interface.
3232
*/
3233
static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
3234
{
3235
if (!base || !number)
3236
return -EINVAL;
3237
3238
if (!qm->ops->get_vft) {
3239
dev_err(&qm->pdev->dev, "Don't support vft read!\n");
3240
return -EINVAL;
3241
}
3242
3243
return qm->ops->get_vft(qm, base, number);
3244
}
3245
3246
/**
3247
* hisi_qm_set_vft() - Set vft to a qm.
3248
* @qm: The qm we want to set its vft.
3249
* @fun_num: The function number.
3250
* @base: The base number of queue in vft.
3251
* @number: The number of queues in vft.
3252
*
3253
* This function is alway called in PF driver, it is used to assign queues
3254
* among PF and VFs.
3255
*
3256
* Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
3257
* Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
3258
* (VF function number 0x2)
3259
*/
3260
static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
3261
u32 number)
3262
{
3263
u32 max_q_num = qm->ctrl_qp_num;
3264
3265
if (base >= max_q_num || number > max_q_num ||
3266
(base + number) > max_q_num)
3267
return -EINVAL;
3268
3269
return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
3270
}
3271
3272
static void qm_init_eq_aeq_status(struct hisi_qm *qm)
3273
{
3274
struct hisi_qm_status *status = &qm->status;
3275
3276
status->eq_head = 0;
3277
status->aeq_head = 0;
3278
status->eqc_phase = true;
3279
status->aeqc_phase = true;
3280
}
3281
3282
static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm)
3283
{
3284
/* Clear eq/aeq interrupt source */
3285
qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
3286
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
3287
3288
writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
3289
writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
3290
}
3291
3292
static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
3293
{
3294
writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
3295
writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
3296
}
3297
3298
static int qm_eq_ctx_cfg(struct hisi_qm *qm)
3299
{
3300
struct qm_eqc eqc = {0};
3301
3302
eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
3303
eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
3304
if (qm->ver == QM_HW_V1)
3305
eqc.dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
3306
eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
3307
3308
return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0);
3309
}
3310
3311
static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
3312
{
3313
struct qm_aeqc aeqc = {0};
3314
3315
aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
3316
aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
3317
aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
3318
3319
return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0);
3320
}
3321
3322
static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
3323
{
3324
struct device *dev = &qm->pdev->dev;
3325
int ret;
3326
3327
qm_init_eq_aeq_status(qm);
3328
3329
/* Before starting the dev, clear the memory and then configure to device using. */
3330
memset(qm->qdma.va, 0, qm->qdma.size);
3331
3332
ret = qm_eq_ctx_cfg(qm);
3333
if (ret) {
3334
dev_err(dev, "Set eqc failed!\n");
3335
return ret;
3336
}
3337
3338
return qm_aeq_ctx_cfg(qm);
3339
}
3340
3341
static int __hisi_qm_start(struct hisi_qm *qm)
3342
{
3343
struct device *dev = &qm->pdev->dev;
3344
int ret;
3345
3346
if (!qm->qdma.va) {
3347
dev_err(dev, "qm qdma is NULL!\n");
3348
return -EINVAL;
3349
}
3350
3351
if (qm->fun_type == QM_HW_PF) {
3352
ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
3353
if (ret)
3354
return ret;
3355
}
3356
3357
ret = qm_eq_aeq_ctx_cfg(qm);
3358
if (ret)
3359
return ret;
3360
3361
ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
3362
if (ret)
3363
return ret;
3364
3365
ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
3366
if (ret)
3367
return ret;
3368
3369
qm_init_prefetch(qm);
3370
qm_enable_eq_aeq_interrupts(qm);
3371
3372
return 0;
3373
}
3374
3375
/**
3376
* hisi_qm_start() - start qm
3377
* @qm: The qm to be started.
3378
*
3379
* This function starts a qm, then we can allocate qp from this qm.
3380
*/
3381
int hisi_qm_start(struct hisi_qm *qm)
3382
{
3383
struct device *dev = &qm->pdev->dev;
3384
int ret = 0;
3385
3386
down_write(&qm->qps_lock);
3387
3388
dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
3389
3390
if (!qm->qp_num) {
3391
dev_err(dev, "qp_num should not be 0\n");
3392
ret = -EINVAL;
3393
goto err_unlock;
3394
}
3395
3396
ret = __hisi_qm_start(qm);
3397
if (ret)
3398
goto err_unlock;
3399
3400
atomic_set(&qm->status.flags, QM_WORK);
3401
hisi_qm_set_state(qm, QM_READY);
3402
3403
err_unlock:
3404
up_write(&qm->qps_lock);
3405
return ret;
3406
}
3407
EXPORT_SYMBOL_GPL(hisi_qm_start);
3408
3409
static int qm_restart(struct hisi_qm *qm)
3410
{
3411
struct device *dev = &qm->pdev->dev;
3412
struct hisi_qp *qp;
3413
int ret, i;
3414
3415
ret = hisi_qm_start(qm);
3416
if (ret < 0)
3417
return ret;
3418
3419
down_write(&qm->qps_lock);
3420
for (i = 0; i < qm->qp_num; i++) {
3421
qp = &qm->qp_array[i];
3422
if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
3423
qp->is_resetting == true && qp->is_in_kernel == true) {
3424
ret = qm_start_qp_nolock(qp, 0);
3425
if (ret < 0) {
3426
dev_err(dev, "Failed to start qp%d!\n", i);
3427
3428
up_write(&qm->qps_lock);
3429
return ret;
3430
}
3431
qp->is_resetting = false;
3432
}
3433
}
3434
up_write(&qm->qps_lock);
3435
3436
return 0;
3437
}
3438
3439
/* Stop started qps in reset flow */
3440
static void qm_stop_started_qp(struct hisi_qm *qm)
3441
{
3442
struct hisi_qp *qp;
3443
int i;
3444
3445
for (i = 0; i < qm->qp_num; i++) {
3446
qp = &qm->qp_array[i];
3447
if (atomic_read(&qp->qp_status.flags) == QP_START) {
3448
qp->is_resetting = true;
3449
qm_stop_qp_nolock(qp);
3450
}
3451
}
3452
}
3453
3454
/**
3455
* qm_invalid_queues() - invalid all queues in use.
3456
* @qm: The qm in which the queues will be invalidated.
3457
*
3458
* This function invalid all queues in use. If the doorbell command is sent
3459
* to device in user space after the device is reset, the device discards
3460
* the doorbell command.
3461
*/
3462
static void qm_invalid_queues(struct hisi_qm *qm)
3463
{
3464
struct hisi_qp *qp;
3465
struct qm_sqc *sqc;
3466
struct qm_cqc *cqc;
3467
int i;
3468
3469
/*
3470
* Normal stop queues is no longer used and does not need to be
3471
* invalid queues.
3472
*/
3473
if (qm->status.stop_reason == QM_NORMAL)
3474
return;
3475
3476
if (qm->status.stop_reason == QM_DOWN)
3477
hisi_qm_cache_wb(qm);
3478
3479
for (i = 0; i < qm->qp_num; i++) {
3480
qp = &qm->qp_array[i];
3481
if (!qp->is_resetting)
3482
continue;
3483
3484
/* Modify random data and set sqc close bit to invalid queue. */
3485
sqc = qm->sqc + i;
3486
cqc = qm->cqc + i;
3487
sqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA);
3488
sqc->w13 = cpu_to_le16(QM_SQC_DISABLE_QP);
3489
cqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA);
3490
if (qp->is_in_kernel)
3491
memset(qp->qdma.va, 0, qp->qdma.size);
3492
}
3493
}
3494
3495
/**
3496
* hisi_qm_stop() - Stop a qm.
3497
* @qm: The qm which will be stopped.
3498
* @r: The reason to stop qm.
3499
*
3500
* This function stops qm and its qps, then qm can not accept request.
3501
* Related resources are not released at this state, we can use hisi_qm_start
3502
* to let qm start again.
3503
*/
3504
int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
3505
{
3506
struct device *dev = &qm->pdev->dev;
3507
int ret = 0;
3508
3509
down_write(&qm->qps_lock);
3510
3511
if (atomic_read(&qm->status.flags) == QM_STOP)
3512
goto err_unlock;
3513
3514
/* Stop all the request sending at first. */
3515
atomic_set(&qm->status.flags, QM_STOP);
3516
qm->status.stop_reason = r;
3517
3518
if (qm->status.stop_reason != QM_NORMAL) {
3519
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
3520
/*
3521
* When performing soft reset, the hardware will no longer
3522
* do tasks, and the tasks in the device will be flushed
3523
* out directly since the master ooo is closed.
3524
*/
3525
if (test_bit(QM_SUPPORT_STOP_FUNC, &qm->caps) &&
3526
r != QM_SOFT_RESET) {
3527
ret = qm_drain_qm(qm);
3528
if (ret) {
3529
dev_err(dev, "failed to drain qm!\n");
3530
goto err_unlock;
3531
}
3532
}
3533
3534
qm_stop_started_qp(qm);
3535
3536
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
3537
}
3538
3539
qm_disable_eq_aeq_interrupts(qm);
3540
if (qm->fun_type == QM_HW_PF) {
3541
ret = hisi_qm_set_vft(qm, 0, 0, 0);
3542
if (ret < 0) {
3543
dev_err(dev, "Failed to set vft!\n");
3544
ret = -EBUSY;
3545
goto err_unlock;
3546
}
3547
}
3548
3549
qm_invalid_queues(qm);
3550
qm->status.stop_reason = QM_NORMAL;
3551
3552
err_unlock:
3553
up_write(&qm->qps_lock);
3554
return ret;
3555
}
3556
EXPORT_SYMBOL_GPL(hisi_qm_stop);
3557
3558
static void qm_hw_error_init(struct hisi_qm *qm)
3559
{
3560
if (!qm->ops->hw_error_init) {
3561
dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
3562
return;
3563
}
3564
3565
qm->ops->hw_error_init(qm);
3566
}
3567
3568
static void qm_hw_error_uninit(struct hisi_qm *qm)
3569
{
3570
if (!qm->ops->hw_error_uninit) {
3571
dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
3572
return;
3573
}
3574
3575
qm->ops->hw_error_uninit(qm);
3576
}
3577
3578
static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
3579
{
3580
if (!qm->ops->hw_error_handle) {
3581
dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
3582
return ACC_ERR_NONE;
3583
}
3584
3585
return qm->ops->hw_error_handle(qm);
3586
}
3587
3588
/**
3589
* hisi_qm_dev_err_init() - Initialize device error configuration.
3590
* @qm: The qm for which we want to do error initialization.
3591
*
3592
* Initialize QM and device error related configuration.
3593
*/
3594
void hisi_qm_dev_err_init(struct hisi_qm *qm)
3595
{
3596
if (qm->fun_type == QM_HW_VF)
3597
return;
3598
3599
qm_hw_error_init(qm);
3600
3601
if (!qm->err_ini->hw_err_enable) {
3602
dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
3603
return;
3604
}
3605
qm->err_ini->hw_err_enable(qm);
3606
}
3607
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
3608
3609
/**
3610
* hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
3611
* @qm: The qm for which we want to do error uninitialization.
3612
*
3613
* Uninitialize QM and device error related configuration.
3614
*/
3615
void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
3616
{
3617
if (qm->fun_type == QM_HW_VF)
3618
return;
3619
3620
qm_hw_error_uninit(qm);
3621
3622
if (!qm->err_ini->hw_err_disable) {
3623
dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
3624
return;
3625
}
3626
qm->err_ini->hw_err_disable(qm);
3627
}
3628
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
3629
3630
static void qm_release_qp_nolock(struct hisi_qp *qp)
3631
{
3632
struct hisi_qm *qm = qp->qm;
3633
3634
if (--qp->ref_count)
3635
return;
3636
3637
qm->qp_in_used--;
3638
idr_remove(&qm->qp_idr, qp->qp_id);
3639
}
3640
3641
/**
3642
* hisi_qm_free_qps() - free multiple queue pairs.
3643
* @qps: The queue pairs need to be freed.
3644
* @qp_num: The num of queue pairs.
3645
*/
3646
void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
3647
{
3648
int i;
3649
3650
if (!qps || qp_num <= 0)
3651
return;
3652
3653
down_write(&qps[0]->qm->qps_lock);
3654
3655
for (i = qp_num - 1; i >= 0; i--) {
3656
if (qps[i]->ref_count == 1)
3657
qm_stop_qp_nolock(qps[i]);
3658
3659
qm_release_qp_nolock(qps[i]);
3660
}
3661
3662
up_write(&qps[0]->qm->qps_lock);
3663
qm_pm_put_sync(qps[0]->qm);
3664
}
3665
EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
3666
3667
static void qm_insert_sorted(struct list_head *head, struct hisi_qm_resource *res)
3668
{
3669
struct hisi_qm_resource *tmp;
3670
struct list_head *n = head;
3671
3672
list_for_each_entry(tmp, head, list) {
3673
if (res->distance < tmp->distance) {
3674
n = &tmp->list;
3675
break;
3676
}
3677
}
3678
list_add_tail(&res->list, n);
3679
}
3680
3681
static void free_list(struct list_head *head)
3682
{
3683
struct hisi_qm_resource *res, *tmp;
3684
3685
list_for_each_entry_safe(res, tmp, head, list) {
3686
list_del(&res->list);
3687
kfree(res);
3688
}
3689
}
3690
3691
static int qm_get_and_start_qp(struct hisi_qm *qm, int qp_num, struct hisi_qp **qps, u8 *alg_type)
3692
{
3693
int i, ret;
3694
3695
ret = qm_pm_get_sync(qm);
3696
if (ret)
3697
return ret;
3698
3699
down_write(&qm->qps_lock);
3700
for (i = 0; i < qp_num; i++) {
3701
qps[i] = qm_create_qp_nolock(qm, alg_type[i], true);
3702
if (IS_ERR(qps[i])) {
3703
ret = -ENODEV;
3704
goto stop_and_free;
3705
}
3706
3707
if (qps[i]->ref_count != 1)
3708
continue;
3709
3710
ret = qm_start_qp_nolock(qps[i], 0);
3711
if (ret) {
3712
qm_release_qp_nolock(qps[i]);
3713
goto stop_and_free;
3714
}
3715
}
3716
up_write(&qm->qps_lock);
3717
3718
return 0;
3719
3720
stop_and_free:
3721
for (i--; i >= 0; i--) {
3722
if (qps[i]->ref_count == 1)
3723
qm_stop_qp_nolock(qps[i]);
3724
3725
qm_release_qp_nolock(qps[i]);
3726
}
3727
up_write(&qm->qps_lock);
3728
qm_pm_put_sync(qm);
3729
3730
return ret;
3731
}
3732
3733
static int hisi_qm_sort_devices(int node, struct list_head *head,
3734
struct hisi_qm_list *qm_list)
3735
{
3736
struct hisi_qm_resource *res;
3737
struct hisi_qm *qm;
3738
struct device *dev;
3739
int dev_node;
3740
LIST_HEAD(non_full_list);
3741
LIST_HEAD(full_list);
3742
3743
list_for_each_entry(qm, &qm_list->list, list) {
3744
dev = &qm->pdev->dev;
3745
3746
dev_node = dev_to_node(dev);
3747
if (dev_node < 0)
3748
dev_node = 0;
3749
3750
res = kzalloc(sizeof(*res), GFP_KERNEL);
3751
if (!res)
3752
return -ENOMEM;
3753
3754
res->qm = qm;
3755
res->distance = node_distance(dev_node, node);
3756
3757
if (qm->qp_in_used == qm->qp_num)
3758
qm_insert_sorted(&full_list, res);
3759
else
3760
qm_insert_sorted(&non_full_list, res);
3761
}
3762
3763
list_splice_tail(&non_full_list, head);
3764
list_splice_tail(&full_list, head);
3765
3766
return 0;
3767
}
3768
3769
/**
3770
* hisi_qm_alloc_qps_node() - Create multiple queue pairs.
3771
* @qm_list: The list of all available devices.
3772
* @qp_num: The number of queue pairs need created.
3773
* @alg_type: The algorithm type.
3774
* @node: The numa node.
3775
* @qps: The queue pairs need created.
3776
*
3777
* This function will sort all available device according to numa distance.
3778
* Then try to create all queue pairs from one device, if all devices do
3779
* not meet the requirements will return error.
3780
*/
3781
int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
3782
u8 *alg_type, int node, struct hisi_qp **qps)
3783
{
3784
struct hisi_qm_resource *tmp;
3785
int ret = -ENODEV;
3786
LIST_HEAD(head);
3787
3788
if (!qps || !qm_list || qp_num <= 0)
3789
return -EINVAL;
3790
3791
mutex_lock(&qm_list->lock);
3792
if (hisi_qm_sort_devices(node, &head, qm_list)) {
3793
mutex_unlock(&qm_list->lock);
3794
goto err;
3795
}
3796
3797
list_for_each_entry(tmp, &head, list) {
3798
ret = qm_get_and_start_qp(tmp->qm, qp_num, qps, alg_type);
3799
if (!ret)
3800
break;
3801
}
3802
3803
mutex_unlock(&qm_list->lock);
3804
if (ret)
3805
pr_info("Failed to create qps, node[%d], qp[%d]!\n",
3806
node, qp_num);
3807
3808
err:
3809
free_list(&head);
3810
return ret;
3811
}
3812
EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
3813
3814
static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
3815
{
3816
u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
3817
u32 max_qp_num = qm->max_qp_num;
3818
u32 q_base = qm->qp_num;
3819
int ret;
3820
3821
if (!num_vfs)
3822
return -EINVAL;
3823
3824
vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
3825
3826
/* If vfs_q_num is less than num_vfs, return error. */
3827
if (vfs_q_num < num_vfs)
3828
return -EINVAL;
3829
3830
q_num = vfs_q_num / num_vfs;
3831
remain_q_num = vfs_q_num % num_vfs;
3832
3833
for (i = num_vfs; i > 0; i--) {
3834
/*
3835
* if q_num + remain_q_num > max_qp_num in last vf, divide the
3836
* remaining queues equally.
3837
*/
3838
if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
3839
act_q_num = q_num + remain_q_num;
3840
remain_q_num = 0;
3841
} else if (remain_q_num > 0) {
3842
act_q_num = q_num + 1;
3843
remain_q_num--;
3844
} else {
3845
act_q_num = q_num;
3846
}
3847
3848
act_q_num = min(act_q_num, max_qp_num);
3849
ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
3850
if (ret) {
3851
for (j = num_vfs; j > i; j--)
3852
hisi_qm_set_vft(qm, j, 0, 0);
3853
return ret;
3854
}
3855
q_base += act_q_num;
3856
}
3857
3858
return 0;
3859
}
3860
3861
static void qm_clear_vft_config(struct hisi_qm *qm)
3862
{
3863
u32 i;
3864
3865
/*
3866
* When disabling SR-IOV, clear the configuration of each VF in the hardware
3867
* sequentially. Failure to clear a single VF should not affect the clearing
3868
* operation of other VFs.
3869
*/
3870
for (i = 1; i <= qm->vfs_num; i++)
3871
(void)hisi_qm_set_vft(qm, i, 0, 0);
3872
3873
qm->vfs_num = 0;
3874
}
3875
3876
static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
3877
{
3878
struct device *dev = &qm->pdev->dev;
3879
struct qm_shaper_factor t_factor;
3880
u32 ir = qos * QM_QOS_RATE;
3881
int ret, total_vfs, i;
3882
3883
total_vfs = pci_sriov_get_totalvfs(qm->pdev);
3884
if (fun_index > total_vfs)
3885
return -EINVAL;
3886
3887
memcpy(&t_factor, &qm->factor[fun_index], sizeof(t_factor));
3888
qm->factor[fun_index].func_qos = qos;
3889
3890
ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
3891
if (ret) {
3892
dev_err(dev, "failed to calculate shaper parameter!\n");
3893
return -EINVAL;
3894
}
3895
3896
for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
3897
/* The base number of queue reuse for different alg type */
3898
ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
3899
if (ret) {
3900
dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
3901
goto back_func_qos;
3902
}
3903
}
3904
3905
return 0;
3906
3907
back_func_qos:
3908
memcpy(&qm->factor[fun_index], &t_factor, sizeof(t_factor));
3909
for (i--; i >= ALG_TYPE_0; i--) {
3910
ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
3911
if (ret)
3912
dev_err(dev, "failed to restore shaper vft during rollback!\n");
3913
}
3914
3915
return -EINVAL;
3916
}
3917
3918
static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
3919
{
3920
u64 cir_u = 0, cir_b = 0, cir_s = 0;
3921
u64 shaper_vft, ir_calc, ir;
3922
unsigned int val;
3923
u32 error_rate;
3924
int ret;
3925
3926
ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
3927
val & BIT(0), POLL_PERIOD,
3928
POLL_TIMEOUT);
3929
if (ret)
3930
return 0;
3931
3932
writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
3933
writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
3934
writel(fun_index, qm->io_base + QM_VFT_CFG);
3935
3936
writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
3937
writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
3938
3939
ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
3940
val & BIT(0), POLL_PERIOD,
3941
POLL_TIMEOUT);
3942
if (ret)
3943
return 0;
3944
3945
shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
3946
((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
3947
3948
cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK;
3949
cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK;
3950
cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT;
3951
3952
cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK;
3953
cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT;
3954
3955
ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
3956
3957
ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
3958
3959
error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
3960
if (error_rate > QM_QOS_MIN_ERROR_RATE) {
3961
pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
3962
return 0;
3963
}
3964
3965
return ir;
3966
}
3967
3968
static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
3969
{
3970
struct device *dev = &qm->pdev->dev;
3971
u32 qos;
3972
int ret;
3973
3974
qos = qm_get_shaper_vft_qos(qm, fun_num);
3975
if (!qos) {
3976
dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num);
3977
return;
3978
}
3979
3980
ret = qm_ping_single_vf(qm, QM_PF_SET_QOS, qos, fun_num);
3981
if (ret)
3982
dev_err(dev, "failed to send command(0x%x) to VF(%u)!\n", QM_PF_SET_QOS, fun_num);
3983
}
3984
3985
static int qm_vf_read_qos(struct hisi_qm *qm)
3986
{
3987
int cnt = 0;
3988
int ret = -EINVAL;
3989
3990
/* reset mailbox qos val */
3991
qm->mb_qos = 0;
3992
3993
/* vf ping pf to get function qos */
3994
ret = qm_ping_pf(qm, QM_VF_GET_QOS);
3995
if (ret) {
3996
pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
3997
return ret;
3998
}
3999
4000
while (true) {
4001
msleep(QM_WAIT_DST_ACK);
4002
if (qm->mb_qos)
4003
break;
4004
4005
if (++cnt > QM_MAX_VF_WAIT_COUNT) {
4006
pci_err(qm->pdev, "PF ping VF timeout!\n");
4007
return -ETIMEDOUT;
4008
}
4009
}
4010
4011
return ret;
4012
}
4013
4014
static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
4015
size_t count, loff_t *pos)
4016
{
4017
struct hisi_qm *qm = filp->private_data;
4018
char tbuf[QM_DBG_READ_LEN];
4019
u32 qos_val, ir;
4020
int ret;
4021
4022
ret = hisi_qm_get_dfx_access(qm);
4023
if (ret)
4024
return ret;
4025
4026
/* Mailbox and reset cannot be operated at the same time */
4027
if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4028
pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
4029
ret = -EAGAIN;
4030
goto err_put_dfx_access;
4031
}
4032
4033
if (qm->fun_type == QM_HW_PF) {
4034
ir = qm_get_shaper_vft_qos(qm, 0);
4035
} else {
4036
ret = qm_vf_read_qos(qm);
4037
if (ret)
4038
goto err_get_status;
4039
ir = qm->mb_qos;
4040
}
4041
4042
qos_val = ir / QM_QOS_RATE;
4043
ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
4044
4045
ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
4046
4047
err_get_status:
4048
clear_bit(QM_RESETTING, &qm->misc_ctl);
4049
err_put_dfx_access:
4050
hisi_qm_put_dfx_access(qm);
4051
return ret;
4052
}
4053
4054
static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
4055
unsigned long *val,
4056
unsigned int *fun_index)
4057
{
4058
const struct bus_type *bus_type = qm->pdev->dev.bus;
4059
char tbuf_bdf[QM_DBG_READ_LEN] = {0};
4060
char val_buf[QM_DBG_READ_LEN] = {0};
4061
struct pci_dev *pdev;
4062
struct device *dev;
4063
int ret;
4064
4065
ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf);
4066
if (ret != QM_QOS_PARAM_NUM)
4067
return -EINVAL;
4068
4069
ret = kstrtoul(val_buf, 10, val);
4070
if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) {
4071
pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
4072
return -EINVAL;
4073
}
4074
4075
dev = bus_find_device_by_name(bus_type, NULL, tbuf_bdf);
4076
if (!dev) {
4077
pci_err(qm->pdev, "input pci bdf number is error!\n");
4078
return -ENODEV;
4079
}
4080
4081
pdev = container_of(dev, struct pci_dev, dev);
4082
if (pci_physfn(pdev) != qm->pdev) {
4083
pci_err(qm->pdev, "the pdev input does not match the pf!\n");
4084
put_device(dev);
4085
return -EINVAL;
4086
}
4087
4088
*fun_index = pdev->devfn;
4089
put_device(dev);
4090
4091
return 0;
4092
}
4093
4094
static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
4095
size_t count, loff_t *pos)
4096
{
4097
struct hisi_qm *qm = filp->private_data;
4098
char tbuf[QM_DBG_READ_LEN];
4099
unsigned int fun_index;
4100
unsigned long val;
4101
int len, ret;
4102
4103
if (*pos != 0)
4104
return 0;
4105
4106
if (count >= QM_DBG_READ_LEN)
4107
return -ENOSPC;
4108
4109
len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
4110
if (len < 0)
4111
return len;
4112
4113
tbuf[len] = '\0';
4114
ret = qm_get_qos_value(qm, tbuf, &val, &fun_index);
4115
if (ret)
4116
return ret;
4117
4118
/* Mailbox and reset cannot be operated at the same time */
4119
if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4120
pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
4121
return -EAGAIN;
4122
}
4123
4124
ret = qm_pm_get_sync(qm);
4125
if (ret) {
4126
ret = -EINVAL;
4127
goto err_get_status;
4128
}
4129
4130
ret = qm_func_shaper_enable(qm, fun_index, val);
4131
if (ret) {
4132
pci_err(qm->pdev, "failed to enable function shaper!\n");
4133
ret = -EINVAL;
4134
goto err_put_sync;
4135
}
4136
4137
pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n",
4138
fun_index, val);
4139
ret = count;
4140
4141
err_put_sync:
4142
qm_pm_put_sync(qm);
4143
err_get_status:
4144
clear_bit(QM_RESETTING, &qm->misc_ctl);
4145
return ret;
4146
}
4147
4148
static const struct file_operations qm_algqos_fops = {
4149
.owner = THIS_MODULE,
4150
.open = simple_open,
4151
.read = qm_algqos_read,
4152
.write = qm_algqos_write,
4153
};
4154
4155
/**
4156
* hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
4157
* @qm: The qm for which we want to add debugfs files.
4158
*
4159
* Create function qos debugfs files, VF ping PF to get function qos.
4160
*/
4161
void hisi_qm_set_algqos_init(struct hisi_qm *qm)
4162
{
4163
if (qm->fun_type == QM_HW_PF)
4164
debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
4165
qm, &qm_algqos_fops);
4166
else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
4167
debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
4168
qm, &qm_algqos_fops);
4169
}
4170
4171
static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
4172
{
4173
int i;
4174
4175
for (i = 1; i <= total_func; i++)
4176
qm->factor[i].func_qos = QM_QOS_MAX_VAL;
4177
}
4178
4179
/**
4180
* hisi_qm_sriov_enable() - enable virtual functions
4181
* @pdev: the PCIe device
4182
* @max_vfs: the number of virtual functions to enable
4183
*
4184
* Returns the number of enabled VFs. If there are VFs enabled already or
4185
* max_vfs is more than the total number of device can be enabled, returns
4186
* failure.
4187
*/
4188
int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
4189
{
4190
struct hisi_qm *qm = pci_get_drvdata(pdev);
4191
int pre_existing_vfs, num_vfs, total_vfs, ret;
4192
4193
ret = qm_pm_get_sync(qm);
4194
if (ret)
4195
return ret;
4196
4197
total_vfs = pci_sriov_get_totalvfs(pdev);
4198
pre_existing_vfs = pci_num_vf(pdev);
4199
if (pre_existing_vfs) {
4200
pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
4201
pre_existing_vfs);
4202
goto err_put_sync;
4203
}
4204
4205
if (max_vfs > total_vfs) {
4206
pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs);
4207
ret = -ERANGE;
4208
goto err_put_sync;
4209
}
4210
4211
num_vfs = max_vfs;
4212
4213
if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
4214
hisi_qm_init_vf_qos(qm, num_vfs);
4215
4216
ret = qm_vf_q_assign(qm, num_vfs);
4217
if (ret) {
4218
pci_err(pdev, "Can't assign queues for VF!\n");
4219
goto err_put_sync;
4220
}
4221
4222
qm->vfs_num = num_vfs;
4223
ret = pci_enable_sriov(pdev, num_vfs);
4224
if (ret) {
4225
pci_err(pdev, "Can't enable VF!\n");
4226
qm_clear_vft_config(qm);
4227
goto err_put_sync;
4228
}
4229
4230
pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
4231
4232
return num_vfs;
4233
4234
err_put_sync:
4235
qm_pm_put_sync(qm);
4236
return ret;
4237
}
4238
EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
4239
4240
/**
4241
* hisi_qm_sriov_disable - disable virtual functions
4242
* @pdev: the PCI device.
4243
* @is_frozen: true when all the VFs are frozen.
4244
*
4245
* Return failure if there are VFs assigned already or VF is in used.
4246
*/
4247
int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
4248
{
4249
struct hisi_qm *qm = pci_get_drvdata(pdev);
4250
4251
if (pci_vfs_assigned(pdev)) {
4252
pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
4253
return -EPERM;
4254
}
4255
4256
/* While VF is in used, SRIOV cannot be disabled. */
4257
if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
4258
pci_err(pdev, "Task is using its VF!\n");
4259
return -EBUSY;
4260
}
4261
4262
pci_disable_sriov(pdev);
4263
qm_clear_vft_config(qm);
4264
qm_pm_put_sync(qm);
4265
4266
return 0;
4267
}
4268
EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
4269
4270
/**
4271
* hisi_qm_sriov_configure - configure the number of VFs
4272
* @pdev: The PCI device
4273
* @num_vfs: The number of VFs need enabled
4274
*
4275
* Enable SR-IOV according to num_vfs, 0 means disable.
4276
*/
4277
int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
4278
{
4279
if (num_vfs == 0)
4280
return hisi_qm_sriov_disable(pdev, false);
4281
else
4282
return hisi_qm_sriov_enable(pdev, num_vfs);
4283
}
4284
EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
4285
4286
static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
4287
{
4288
if (!qm->err_ini->get_err_result) {
4289
dev_err(&qm->pdev->dev, "Device doesn't support reset!\n");
4290
return ACC_ERR_NONE;
4291
}
4292
4293
return qm->err_ini->get_err_result(qm);
4294
}
4295
4296
static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
4297
{
4298
enum acc_err_result qm_ret, dev_ret;
4299
4300
/* log qm error */
4301
qm_ret = qm_hw_error_handle(qm);
4302
4303
/* log device error */
4304
dev_ret = qm_dev_err_handle(qm);
4305
4306
return (qm_ret == ACC_ERR_NEED_RESET ||
4307
dev_ret == ACC_ERR_NEED_RESET) ?
4308
ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
4309
}
4310
4311
/**
4312
* hisi_qm_dev_err_detected() - Get device and qm error status then log it.
4313
* @pdev: The PCI device which need report error.
4314
* @state: The connectivity between CPU and device.
4315
*
4316
* We register this function into PCIe AER handlers, It will report device or
4317
* qm hardware error status when error occur.
4318
*/
4319
pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
4320
pci_channel_state_t state)
4321
{
4322
struct hisi_qm *qm = pci_get_drvdata(pdev);
4323
enum acc_err_result ret;
4324
4325
if (pdev->is_virtfn)
4326
return PCI_ERS_RESULT_NONE;
4327
4328
pci_info(pdev, "PCI error detected, state(=%u)!!\n", state);
4329
if (state == pci_channel_io_perm_failure)
4330
return PCI_ERS_RESULT_DISCONNECT;
4331
4332
ret = qm_process_dev_error(qm);
4333
if (ret == ACC_ERR_NEED_RESET)
4334
return PCI_ERS_RESULT_NEED_RESET;
4335
4336
return PCI_ERS_RESULT_RECOVERED;
4337
}
4338
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
4339
4340
static int qm_check_req_recv(struct hisi_qm *qm)
4341
{
4342
struct pci_dev *pdev = qm->pdev;
4343
int ret;
4344
u32 val;
4345
4346
if (qm->ver >= QM_HW_V3)
4347
return 0;
4348
4349
writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
4350
ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4351
(val == ACC_VENDOR_ID_VALUE),
4352
POLL_PERIOD, POLL_TIMEOUT);
4353
if (ret) {
4354
dev_err(&pdev->dev, "Fails to read QM reg!\n");
4355
return ret;
4356
}
4357
4358
writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
4359
ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4360
(val == PCI_VENDOR_ID_HUAWEI),
4361
POLL_PERIOD, POLL_TIMEOUT);
4362
if (ret)
4363
dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
4364
4365
return ret;
4366
}
4367
4368
static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
4369
{
4370
struct pci_dev *pdev = qm->pdev;
4371
u16 cmd;
4372
int i;
4373
4374
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
4375
if (set)
4376
cmd |= PCI_COMMAND_MEMORY;
4377
else
4378
cmd &= ~PCI_COMMAND_MEMORY;
4379
4380
pci_write_config_word(pdev, PCI_COMMAND, cmd);
4381
for (i = 0; i < MAX_WAIT_COUNTS; i++) {
4382
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
4383
if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
4384
return 0;
4385
4386
udelay(1);
4387
}
4388
4389
return -ETIMEDOUT;
4390
}
4391
4392
static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
4393
{
4394
struct pci_dev *pdev = qm->pdev;
4395
u16 sriov_ctrl;
4396
int pos;
4397
int i;
4398
4399
/*
4400
* Since function qm_set_vf_mse is called only after SRIOV is enabled,
4401
* pci_find_ext_capability cannot return 0, pos does not need to be
4402
* checked.
4403
*/
4404
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4405
pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
4406
if (set)
4407
sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
4408
else
4409
sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
4410
pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
4411
4412
for (i = 0; i < MAX_WAIT_COUNTS; i++) {
4413
pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
4414
if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
4415
ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
4416
return 0;
4417
4418
udelay(1);
4419
}
4420
4421
return -ETIMEDOUT;
4422
}
4423
4424
static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
4425
{
4426
u32 nfe_enb = 0;
4427
4428
/* Kunpeng930 hardware automatically close master ooo when NFE occurs */
4429
if (qm->ver >= QM_HW_V3)
4430
return;
4431
4432
if (!qm->err_status.is_dev_ecc_mbit &&
4433
qm->err_status.is_qm_ecc_mbit &&
4434
qm->err_ini->close_axi_master_ooo) {
4435
qm->err_ini->close_axi_master_ooo(qm);
4436
} else if (qm->err_status.is_dev_ecc_mbit &&
4437
!qm->err_status.is_qm_ecc_mbit &&
4438
!qm->err_ini->close_axi_master_ooo) {
4439
nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
4440
writel(nfe_enb & ~qm->err_info.qm_err.ecc_2bits_mask,
4441
qm->io_base + QM_RAS_NFE_ENABLE);
4442
writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SET);
4443
}
4444
}
4445
4446
static int qm_vf_reset_prepare(struct hisi_qm *qm,
4447
enum qm_stop_reason stop_reason)
4448
{
4449
struct hisi_qm_list *qm_list = qm->qm_list;
4450
struct pci_dev *pdev = qm->pdev;
4451
struct pci_dev *virtfn;
4452
struct hisi_qm *vf_qm;
4453
int ret = 0;
4454
4455
mutex_lock(&qm_list->lock);
4456
list_for_each_entry(vf_qm, &qm_list->list, list) {
4457
virtfn = vf_qm->pdev;
4458
if (virtfn == pdev)
4459
continue;
4460
4461
if (pci_physfn(virtfn) == pdev) {
4462
/* save VFs PCIE BAR configuration */
4463
pci_save_state(virtfn);
4464
4465
ret = hisi_qm_stop(vf_qm, stop_reason);
4466
if (ret)
4467
goto stop_fail;
4468
}
4469
}
4470
4471
stop_fail:
4472
mutex_unlock(&qm_list->lock);
4473
return ret;
4474
}
4475
4476
static int qm_try_stop_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd,
4477
enum qm_stop_reason stop_reason)
4478
{
4479
struct pci_dev *pdev = qm->pdev;
4480
int ret;
4481
4482
if (!qm->vfs_num)
4483
return 0;
4484
4485
/* Kunpeng930 supports to notify VFs to stop before PF reset */
4486
if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
4487
ret = qm_ping_all_vfs(qm, cmd);
4488
if (ret)
4489
pci_err(pdev, "failed to send command to all VFs before PF reset!\n");
4490
} else {
4491
ret = qm_vf_reset_prepare(qm, stop_reason);
4492
if (ret)
4493
pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret);
4494
}
4495
4496
return ret;
4497
}
4498
4499
static int qm_controller_reset_prepare(struct hisi_qm *qm)
4500
{
4501
struct pci_dev *pdev = qm->pdev;
4502
int ret;
4503
4504
if (qm->err_ini->set_priv_status) {
4505
ret = qm->err_ini->set_priv_status(qm);
4506
if (ret)
4507
return ret;
4508
}
4509
4510
ret = qm_reset_prepare_ready(qm);
4511
if (ret) {
4512
pci_err(pdev, "Controller reset not ready!\n");
4513
return ret;
4514
}
4515
4516
qm_dev_ecc_mbit_handle(qm);
4517
4518
/* PF obtains the information of VF by querying the register. */
4519
qm_cmd_uninit(qm);
4520
4521
/* Whether VFs stop successfully, soft reset will continue. */
4522
ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET);
4523
if (ret)
4524
pci_err(pdev, "failed to stop vfs by pf in soft reset.\n");
4525
4526
ret = hisi_qm_stop(qm, QM_SOFT_RESET);
4527
if (ret) {
4528
pci_err(pdev, "Fails to stop QM!\n");
4529
qm_reset_bit_clear(qm);
4530
return ret;
4531
}
4532
4533
if (qm->use_sva) {
4534
ret = qm_hw_err_isolate(qm);
4535
if (ret)
4536
pci_err(pdev, "failed to isolate hw err!\n");
4537
}
4538
4539
ret = qm_wait_vf_prepare_finish(qm);
4540
if (ret)
4541
pci_err(pdev, "failed to stop by vfs in soft reset!\n");
4542
4543
clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4544
4545
return 0;
4546
}
4547
4548
static int qm_master_ooo_check(struct hisi_qm *qm)
4549
{
4550
u32 val;
4551
int ret;
4552
4553
/* Check the ooo register of the device before resetting the device. */
4554
writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
4555
ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
4556
val, (val == ACC_MASTER_TRANS_RETURN_RW),
4557
POLL_PERIOD, POLL_TIMEOUT);
4558
if (ret)
4559
pci_warn(qm->pdev, "Bus lock! Please reset system.\n");
4560
4561
return ret;
4562
}
4563
4564
static int qm_soft_reset_prepare(struct hisi_qm *qm)
4565
{
4566
struct pci_dev *pdev = qm->pdev;
4567
int ret;
4568
4569
/* Ensure all doorbells and mailboxes received by QM */
4570
ret = qm_check_req_recv(qm);
4571
if (ret)
4572
return ret;
4573
4574
if (qm->vfs_num) {
4575
ret = qm_set_vf_mse(qm, false);
4576
if (ret) {
4577
pci_err(pdev, "Fails to disable vf MSE bit.\n");
4578
return ret;
4579
}
4580
}
4581
4582
ret = qm->ops->set_msi(qm, false);
4583
if (ret) {
4584
pci_err(pdev, "Fails to disable PEH MSI bit.\n");
4585
return ret;
4586
}
4587
4588
ret = qm_master_ooo_check(qm);
4589
if (ret)
4590
return ret;
4591
4592
if (qm->err_ini->close_sva_prefetch)
4593
qm->err_ini->close_sva_prefetch(qm);
4594
4595
ret = qm_set_pf_mse(qm, false);
4596
if (ret)
4597
pci_err(pdev, "Fails to disable pf MSE bit.\n");
4598
4599
return ret;
4600
}
4601
4602
static int qm_reset_device(struct hisi_qm *qm)
4603
{
4604
struct pci_dev *pdev = qm->pdev;
4605
4606
/* The reset related sub-control registers are not in PCI BAR */
4607
if (ACPI_HANDLE(&pdev->dev)) {
4608
unsigned long long value = 0;
4609
acpi_status s;
4610
4611
s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
4612
qm->err_info.acpi_rst,
4613
NULL, &value);
4614
if (ACPI_FAILURE(s)) {
4615
pci_err(pdev, "NO controller reset method!\n");
4616
return -EIO;
4617
}
4618
4619
if (value) {
4620
pci_err(pdev, "Reset step %llu failed!\n", value);
4621
return -EIO;
4622
}
4623
4624
return 0;
4625
}
4626
4627
pci_err(pdev, "No reset method!\n");
4628
return -EINVAL;
4629
}
4630
4631
static int qm_soft_reset(struct hisi_qm *qm)
4632
{
4633
int ret;
4634
4635
ret = qm_soft_reset_prepare(qm);
4636
if (ret)
4637
return ret;
4638
4639
return qm_reset_device(qm);
4640
}
4641
4642
static int qm_vf_reset_done(struct hisi_qm *qm)
4643
{
4644
struct hisi_qm_list *qm_list = qm->qm_list;
4645
struct pci_dev *pdev = qm->pdev;
4646
struct pci_dev *virtfn;
4647
struct hisi_qm *vf_qm;
4648
int ret = 0;
4649
4650
mutex_lock(&qm_list->lock);
4651
list_for_each_entry(vf_qm, &qm_list->list, list) {
4652
virtfn = vf_qm->pdev;
4653
if (virtfn == pdev)
4654
continue;
4655
4656
if (pci_physfn(virtfn) == pdev) {
4657
/* enable VFs PCIE BAR configuration */
4658
pci_restore_state(virtfn);
4659
4660
ret = qm_restart(vf_qm);
4661
if (ret)
4662
goto restart_fail;
4663
}
4664
}
4665
4666
restart_fail:
4667
mutex_unlock(&qm_list->lock);
4668
return ret;
4669
}
4670
4671
static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
4672
{
4673
struct pci_dev *pdev = qm->pdev;
4674
int ret;
4675
4676
if (!qm->vfs_num)
4677
return 0;
4678
4679
ret = qm_vf_q_assign(qm, qm->vfs_num);
4680
if (ret) {
4681
pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret);
4682
return ret;
4683
}
4684
4685
/* Kunpeng930 supports to notify VFs to start after PF reset. */
4686
if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
4687
ret = qm_ping_all_vfs(qm, cmd);
4688
if (ret)
4689
pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
4690
} else {
4691
ret = qm_vf_reset_done(qm);
4692
if (ret)
4693
pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret);
4694
}
4695
4696
return ret;
4697
}
4698
4699
static int qm_dev_hw_init(struct hisi_qm *qm)
4700
{
4701
return qm->err_ini->hw_init(qm);
4702
}
4703
4704
static void qm_restart_prepare(struct hisi_qm *qm)
4705
{
4706
u32 value;
4707
4708
if (qm->ver >= QM_HW_V3)
4709
return;
4710
4711
if (!qm->err_status.is_qm_ecc_mbit &&
4712
!qm->err_status.is_dev_ecc_mbit)
4713
return;
4714
4715
/* temporarily close the OOO port used for PEH to write out MSI */
4716
value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4717
writel(value & ~qm->err_info.msi_wr_port,
4718
qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4719
4720
/* clear dev ecc 2bit error source if having */
4721
value = qm_get_dev_err_status(qm) & qm->err_info.dev_err.ecc_2bits_mask;
4722
if (value && qm->err_ini->clear_dev_hw_err_status)
4723
qm->err_ini->clear_dev_hw_err_status(qm, value);
4724
4725
/* clear QM ecc mbit error source */
4726
writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
4727
4728
/* clear AM Reorder Buffer ecc mbit source */
4729
writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
4730
}
4731
4732
static void qm_restart_done(struct hisi_qm *qm)
4733
{
4734
u32 value;
4735
4736
if (qm->ver >= QM_HW_V3)
4737
goto clear_flags;
4738
4739
if (!qm->err_status.is_qm_ecc_mbit &&
4740
!qm->err_status.is_dev_ecc_mbit)
4741
return;
4742
4743
/* open the OOO port for PEH to write out MSI */
4744
value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4745
value |= qm->err_info.msi_wr_port;
4746
writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4747
4748
clear_flags:
4749
qm->err_status.is_qm_ecc_mbit = false;
4750
qm->err_status.is_dev_ecc_mbit = false;
4751
}
4752
4753
static void qm_disable_axi_error(struct hisi_qm *qm)
4754
{
4755
struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
4756
u32 val;
4757
4758
val = ~(qm->error_mask & (~QM_RAS_AXI_ERROR));
4759
writel(val, qm->io_base + QM_ABNORMAL_INT_MASK);
4760
if (qm->ver > QM_HW_V2)
4761
writel(qm_err->shutdown_mask & (~QM_RAS_AXI_ERROR),
4762
qm->io_base + QM_OOO_SHUTDOWN_SEL);
4763
4764
if (qm->err_ini->disable_axi_error)
4765
qm->err_ini->disable_axi_error(qm);
4766
}
4767
4768
static void qm_enable_axi_error(struct hisi_qm *qm)
4769
{
4770
/* clear axi error source */
4771
writel(QM_RAS_AXI_ERROR, qm->io_base + QM_ABNORMAL_INT_SOURCE);
4772
4773
writel(~qm->error_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
4774
if (qm->ver > QM_HW_V2)
4775
writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
4776
4777
if (qm->err_ini->enable_axi_error)
4778
qm->err_ini->enable_axi_error(qm);
4779
}
4780
4781
static int qm_controller_reset_done(struct hisi_qm *qm)
4782
{
4783
struct pci_dev *pdev = qm->pdev;
4784
int ret;
4785
4786
ret = qm->ops->set_msi(qm, true);
4787
if (ret) {
4788
pci_err(pdev, "Fails to enable PEH MSI bit!\n");
4789
return ret;
4790
}
4791
4792
ret = qm_set_pf_mse(qm, true);
4793
if (ret) {
4794
pci_err(pdev, "Fails to enable pf MSE bit!\n");
4795
return ret;
4796
}
4797
4798
if (qm->vfs_num) {
4799
ret = qm_set_vf_mse(qm, true);
4800
if (ret) {
4801
pci_err(pdev, "Fails to enable vf MSE bit!\n");
4802
return ret;
4803
}
4804
}
4805
4806
ret = qm_dev_hw_init(qm);
4807
if (ret) {
4808
pci_err(pdev, "Failed to init device\n");
4809
return ret;
4810
}
4811
4812
qm_restart_prepare(qm);
4813
hisi_qm_dev_err_init(qm);
4814
qm_disable_axi_error(qm);
4815
if (qm->err_ini->open_axi_master_ooo)
4816
qm->err_ini->open_axi_master_ooo(qm);
4817
4818
ret = qm_dev_mem_reset(qm);
4819
if (ret) {
4820
pci_err(pdev, "failed to reset device memory\n");
4821
return ret;
4822
}
4823
4824
ret = qm_restart(qm);
4825
if (ret) {
4826
pci_err(pdev, "Failed to start QM!\n");
4827
return ret;
4828
}
4829
4830
ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
4831
if (ret)
4832
pci_err(pdev, "failed to start vfs by pf in soft reset.\n");
4833
4834
ret = qm_wait_vf_prepare_finish(qm);
4835
if (ret)
4836
pci_err(pdev, "failed to start by vfs in soft reset!\n");
4837
qm_enable_axi_error(qm);
4838
qm_cmd_init(qm);
4839
qm_restart_done(qm);
4840
4841
qm_reset_bit_clear(qm);
4842
4843
return 0;
4844
}
4845
4846
static int qm_controller_reset(struct hisi_qm *qm)
4847
{
4848
struct pci_dev *pdev = qm->pdev;
4849
int ret;
4850
4851
pci_info(pdev, "Controller resetting...\n");
4852
4853
ret = qm_controller_reset_prepare(qm);
4854
if (ret) {
4855
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4856
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4857
clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4858
return ret;
4859
}
4860
4861
hisi_qm_show_last_dfx_regs(qm);
4862
if (qm->err_ini->show_last_dfx_regs)
4863
qm->err_ini->show_last_dfx_regs(qm);
4864
4865
ret = qm_soft_reset(qm);
4866
if (ret)
4867
goto err_reset;
4868
4869
ret = qm_controller_reset_done(qm);
4870
if (ret)
4871
goto err_reset;
4872
4873
pci_info(pdev, "Controller reset complete\n");
4874
4875
return 0;
4876
4877
err_reset:
4878
pci_err(pdev, "Controller reset failed (%d)\n", ret);
4879
qm_reset_bit_clear(qm);
4880
4881
/* if resetting fails, isolate the device */
4882
if (qm->use_sva)
4883
qm->isolate_data.is_isolate = true;
4884
return ret;
4885
}
4886
4887
/**
4888
* hisi_qm_dev_slot_reset() - slot reset
4889
* @pdev: the PCIe device
4890
*
4891
* This function offers QM relate PCIe device reset interface. Drivers which
4892
* use QM can use this function as slot_reset in its struct pci_error_handlers.
4893
*/
4894
pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
4895
{
4896
struct hisi_qm *qm = pci_get_drvdata(pdev);
4897
int ret;
4898
4899
if (pdev->is_virtfn)
4900
return PCI_ERS_RESULT_RECOVERED;
4901
4902
/* reset pcie device controller */
4903
ret = qm_controller_reset(qm);
4904
if (ret) {
4905
pci_err(pdev, "Controller reset failed (%d)\n", ret);
4906
return PCI_ERS_RESULT_DISCONNECT;
4907
}
4908
4909
return PCI_ERS_RESULT_RECOVERED;
4910
}
4911
EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
4912
4913
void hisi_qm_reset_prepare(struct pci_dev *pdev)
4914
{
4915
struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
4916
struct hisi_qm *qm = pci_get_drvdata(pdev);
4917
u32 delay = 0;
4918
int ret;
4919
4920
hisi_qm_dev_err_uninit(pf_qm);
4921
4922
/*
4923
* Check whether there is an ECC mbit error, If it occurs, need to
4924
* wait for soft reset to fix it.
4925
*/
4926
while (qm_check_dev_error(qm)) {
4927
msleep(++delay);
4928
if (delay > QM_RESET_WAIT_TIMEOUT)
4929
return;
4930
}
4931
4932
ret = qm_reset_prepare_ready(qm);
4933
if (ret) {
4934
pci_err(pdev, "FLR not ready!\n");
4935
return;
4936
}
4937
4938
/* PF obtains the information of VF by querying the register. */
4939
if (qm->fun_type == QM_HW_PF)
4940
qm_cmd_uninit(qm);
4941
4942
ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN);
4943
if (ret)
4944
pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
4945
4946
ret = hisi_qm_stop(qm, QM_DOWN);
4947
if (ret) {
4948
pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
4949
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4950
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4951
return;
4952
}
4953
4954
ret = qm_wait_vf_prepare_finish(qm);
4955
if (ret)
4956
pci_err(pdev, "failed to stop by vfs in FLR!\n");
4957
4958
pci_info(pdev, "FLR resetting...\n");
4959
}
4960
EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
4961
4962
static bool qm_flr_reset_complete(struct pci_dev *pdev)
4963
{
4964
struct pci_dev *pf_pdev = pci_physfn(pdev);
4965
struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
4966
u32 id;
4967
4968
pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
4969
if (id == QM_PCI_COMMAND_INVALID) {
4970
pci_err(pdev, "Device can not be used!\n");
4971
return false;
4972
}
4973
4974
return true;
4975
}
4976
4977
void hisi_qm_reset_done(struct pci_dev *pdev)
4978
{
4979
struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
4980
struct hisi_qm *qm = pci_get_drvdata(pdev);
4981
int ret;
4982
4983
if (qm->fun_type == QM_HW_PF) {
4984
ret = qm_dev_hw_init(qm);
4985
if (ret) {
4986
pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
4987
goto flr_done;
4988
}
4989
}
4990
4991
hisi_qm_dev_err_init(pf_qm);
4992
4993
ret = qm_restart(qm);
4994
if (ret) {
4995
pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
4996
goto flr_done;
4997
}
4998
4999
ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
5000
if (ret)
5001
pci_err(pdev, "failed to start vfs by pf in FLR.\n");
5002
5003
ret = qm_wait_vf_prepare_finish(qm);
5004
if (ret)
5005
pci_err(pdev, "failed to start by vfs in FLR!\n");
5006
5007
flr_done:
5008
if (qm->fun_type == QM_HW_PF)
5009
qm_cmd_init(qm);
5010
5011
if (qm_flr_reset_complete(pdev))
5012
pci_info(pdev, "FLR reset complete\n");
5013
5014
qm_reset_bit_clear(qm);
5015
}
5016
EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
5017
5018
static irqreturn_t qm_rsvd_irq(int irq, void *data)
5019
{
5020
struct hisi_qm *qm = data;
5021
5022
dev_info(&qm->pdev->dev, "Reserved interrupt, ignore!\n");
5023
5024
return IRQ_HANDLED;
5025
}
5026
5027
static irqreturn_t qm_abnormal_irq(int irq, void *data)
5028
{
5029
struct hisi_qm *qm = data;
5030
enum acc_err_result ret;
5031
5032
atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
5033
ret = qm_process_dev_error(qm);
5034
if (ret == ACC_ERR_NEED_RESET &&
5035
!test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
5036
!test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
5037
schedule_work(&qm->rst_work);
5038
5039
return IRQ_HANDLED;
5040
}
5041
5042
/**
5043
* hisi_qm_dev_shutdown() - Shutdown device.
5044
* @pdev: The device will be shutdown.
5045
*
5046
* This function will stop qm when OS shutdown or rebooting.
5047
*/
5048
void hisi_qm_dev_shutdown(struct pci_dev *pdev)
5049
{
5050
struct hisi_qm *qm = pci_get_drvdata(pdev);
5051
int ret;
5052
5053
ret = hisi_qm_stop(qm, QM_DOWN);
5054
if (ret)
5055
dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
5056
}
5057
EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
5058
5059
static void hisi_qm_controller_reset(struct work_struct *rst_work)
5060
{
5061
struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
5062
int ret;
5063
5064
ret = qm_pm_get_sync(qm);
5065
if (ret) {
5066
clear_bit(QM_RST_SCHED, &qm->misc_ctl);
5067
return;
5068
}
5069
5070
/* reset pcie device controller */
5071
ret = qm_controller_reset(qm);
5072
if (ret)
5073
dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
5074
5075
qm_pm_put_sync(qm);
5076
}
5077
5078
static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
5079
enum qm_stop_reason stop_reason)
5080
{
5081
enum qm_ifc_cmd cmd = QM_VF_PREPARE_DONE;
5082
struct pci_dev *pdev = qm->pdev;
5083
int ret;
5084
5085
ret = qm_reset_prepare_ready(qm);
5086
if (ret) {
5087
dev_err(&pdev->dev, "reset prepare not ready!\n");
5088
atomic_set(&qm->status.flags, QM_STOP);
5089
cmd = QM_VF_PREPARE_FAIL;
5090
goto err_prepare;
5091
}
5092
5093
ret = hisi_qm_stop(qm, stop_reason);
5094
if (ret) {
5095
dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret);
5096
atomic_set(&qm->status.flags, QM_STOP);
5097
cmd = QM_VF_PREPARE_FAIL;
5098
goto err_prepare;
5099
} else {
5100
goto out;
5101
}
5102
5103
err_prepare:
5104
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5105
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5106
out:
5107
pci_save_state(pdev);
5108
ret = qm_ping_pf(qm, cmd);
5109
if (ret)
5110
dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
5111
}
5112
5113
static void qm_pf_reset_vf_done(struct hisi_qm *qm)
5114
{
5115
enum qm_ifc_cmd cmd = QM_VF_START_DONE;
5116
struct pci_dev *pdev = qm->pdev;
5117
int ret;
5118
5119
pci_restore_state(pdev);
5120
ret = hisi_qm_start(qm);
5121
if (ret) {
5122
dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret);
5123
cmd = QM_VF_START_FAIL;
5124
}
5125
5126
qm_cmd_init(qm);
5127
ret = qm_ping_pf(qm, cmd);
5128
if (ret)
5129
dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
5130
5131
qm_reset_bit_clear(qm);
5132
}
5133
5134
static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
5135
{
5136
struct device *dev = &qm->pdev->dev;
5137
u32 val, cmd;
5138
int ret;
5139
5140
/* Wait for reset to finish */
5141
ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val,
5142
val == BIT(0), QM_VF_RESET_WAIT_US,
5143
QM_VF_RESET_WAIT_TIMEOUT_US);
5144
/* hardware completion status should be available by this time */
5145
if (ret) {
5146
dev_err(dev, "couldn't get reset done status from PF, timeout!\n");
5147
return -ETIMEDOUT;
5148
}
5149
5150
/*
5151
* Whether message is got successfully,
5152
* VF needs to ack PF by clearing the interrupt.
5153
*/
5154
ret = qm->ops->get_ifc(qm, &cmd, NULL, 0);
5155
qm_clear_cmd_interrupt(qm, 0);
5156
if (ret) {
5157
dev_err(dev, "failed to get command from PF in reset done!\n");
5158
return ret;
5159
}
5160
5161
if (cmd != QM_PF_RESET_DONE) {
5162
dev_err(dev, "the command(0x%x) is not reset done!\n", cmd);
5163
ret = -EINVAL;
5164
}
5165
5166
return ret;
5167
}
5168
5169
static void qm_pf_reset_vf_process(struct hisi_qm *qm,
5170
enum qm_stop_reason stop_reason)
5171
{
5172
struct device *dev = &qm->pdev->dev;
5173
int ret;
5174
5175
dev_info(dev, "device reset start...\n");
5176
5177
/* The message is obtained by querying the register during resetting */
5178
qm_cmd_uninit(qm);
5179
qm_pf_reset_vf_prepare(qm, stop_reason);
5180
5181
ret = qm_wait_pf_reset_finish(qm);
5182
if (ret)
5183
goto err_get_status;
5184
5185
qm_pf_reset_vf_done(qm);
5186
5187
dev_info(dev, "device reset done.\n");
5188
5189
return;
5190
5191
err_get_status:
5192
qm_cmd_init(qm);
5193
qm_reset_bit_clear(qm);
5194
}
5195
5196
static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
5197
{
5198
struct device *dev = &qm->pdev->dev;
5199
enum qm_ifc_cmd cmd;
5200
u32 data;
5201
int ret;
5202
5203
/*
5204
* Get the msg from source by sending mailbox. Whether message is got
5205
* successfully, destination needs to ack source by clearing the interrupt.
5206
*/
5207
ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num);
5208
qm_clear_cmd_interrupt(qm, BIT(fun_num));
5209
if (ret) {
5210
dev_err(dev, "failed to get command from source!\n");
5211
return;
5212
}
5213
5214
switch (cmd) {
5215
case QM_PF_FLR_PREPARE:
5216
qm_pf_reset_vf_process(qm, QM_DOWN);
5217
break;
5218
case QM_PF_SRST_PREPARE:
5219
qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
5220
break;
5221
case QM_VF_GET_QOS:
5222
qm_vf_get_qos(qm, fun_num);
5223
break;
5224
case QM_PF_SET_QOS:
5225
qm->mb_qos = data;
5226
break;
5227
default:
5228
dev_err(dev, "unsupported command(0x%x) sent by function(%u)!\n", cmd, fun_num);
5229
break;
5230
}
5231
}
5232
5233
static void qm_cmd_process(struct work_struct *cmd_process)
5234
{
5235
struct hisi_qm *qm = container_of(cmd_process,
5236
struct hisi_qm, cmd_process);
5237
u32 vfs_num = qm->vfs_num;
5238
u64 val;
5239
u32 i;
5240
5241
if (qm->fun_type == QM_HW_PF) {
5242
val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
5243
if (!val)
5244
return;
5245
5246
for (i = 1; i <= vfs_num; i++) {
5247
if (val & BIT(i))
5248
qm_handle_cmd_msg(qm, i);
5249
}
5250
5251
return;
5252
}
5253
5254
qm_handle_cmd_msg(qm, 0);
5255
}
5256
5257
/**
5258
* hisi_qm_alg_register() - Register alg to crypto.
5259
* @qm: The qm needs add.
5260
* @qm_list: The qm list.
5261
* @guard: Guard of qp_num.
5262
*
5263
* Register algorithm to crypto when the function is satisfy guard.
5264
*/
5265
int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard)
5266
{
5267
struct device *dev = &qm->pdev->dev;
5268
5269
if (qm->ver <= QM_HW_V2 && qm->use_sva) {
5270
dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n");
5271
return 0;
5272
}
5273
5274
if (qm->qp_num < guard) {
5275
dev_info(dev, "qp_num is less than task need.\n");
5276
return 0;
5277
}
5278
5279
return qm_list->register_to_crypto(qm);
5280
}
5281
EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
5282
5283
/**
5284
* hisi_qm_alg_unregister() - Unregister alg from crypto.
5285
* @qm: The qm needs delete.
5286
* @qm_list: The qm list.
5287
* @guard: Guard of qp_num.
5288
*
5289
* Unregister algorithm from crypto when the last function is satisfy guard.
5290
*/
5291
void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard)
5292
{
5293
if (qm->ver <= QM_HW_V2 && qm->use_sva)
5294
return;
5295
5296
if (qm->qp_num < guard)
5297
return;
5298
5299
qm_list->unregister_from_crypto(qm);
5300
}
5301
EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
5302
5303
static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
5304
{
5305
struct pci_dev *pdev = qm->pdev;
5306
u32 irq_vector, val;
5307
5308
if (qm->fun_type == QM_HW_VF && qm->ver < QM_HW_V3)
5309
return;
5310
5311
val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
5312
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
5313
return;
5314
5315
irq_vector = val & QM_IRQ_VECTOR_MASK;
5316
free_irq(pci_irq_vector(pdev, irq_vector), qm);
5317
}
5318
5319
static int qm_register_abnormal_irq(struct hisi_qm *qm)
5320
{
5321
struct pci_dev *pdev = qm->pdev;
5322
u32 irq_vector, val;
5323
int ret;
5324
5325
val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
5326
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
5327
return 0;
5328
irq_vector = val & QM_IRQ_VECTOR_MASK;
5329
5330
/* For VF, this is a reserved interrupt in V3 version. */
5331
if (qm->fun_type == QM_HW_VF) {
5332
if (qm->ver < QM_HW_V3)
5333
return 0;
5334
5335
ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_rsvd_irq,
5336
IRQF_NO_AUTOEN, qm->dev_name, qm);
5337
if (ret) {
5338
dev_err(&pdev->dev, "failed to request reserved irq, ret = %d!\n", ret);
5339
return ret;
5340
}
5341
return 0;
5342
}
5343
5344
ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
5345
if (ret)
5346
dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d!\n", ret);
5347
5348
return ret;
5349
}
5350
5351
static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
5352
{
5353
struct pci_dev *pdev = qm->pdev;
5354
u32 irq_vector, val;
5355
5356
val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val;
5357
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
5358
return;
5359
5360
irq_vector = val & QM_IRQ_VECTOR_MASK;
5361
free_irq(pci_irq_vector(pdev, irq_vector), qm);
5362
}
5363
5364
static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
5365
{
5366
struct pci_dev *pdev = qm->pdev;
5367
u32 irq_vector, val;
5368
int ret;
5369
5370
val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val;
5371
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
5372
return 0;
5373
5374
irq_vector = val & QM_IRQ_VECTOR_MASK;
5375
ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm);
5376
if (ret)
5377
dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret);
5378
5379
return ret;
5380
}
5381
5382
static void qm_unregister_aeq_irq(struct hisi_qm *qm)
5383
{
5384
struct pci_dev *pdev = qm->pdev;
5385
u32 irq_vector, val;
5386
5387
val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val;
5388
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
5389
return;
5390
5391
irq_vector = val & QM_IRQ_VECTOR_MASK;
5392
free_irq(pci_irq_vector(pdev, irq_vector), qm);
5393
}
5394
5395
static int qm_register_aeq_irq(struct hisi_qm *qm)
5396
{
5397
struct pci_dev *pdev = qm->pdev;
5398
u32 irq_vector, val;
5399
int ret;
5400
5401
val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val;
5402
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
5403
return 0;
5404
5405
irq_vector = val & QM_IRQ_VECTOR_MASK;
5406
ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL,
5407
qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm);
5408
if (ret)
5409
dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
5410
5411
return ret;
5412
}
5413
5414
static void qm_unregister_eq_irq(struct hisi_qm *qm)
5415
{
5416
struct pci_dev *pdev = qm->pdev;
5417
u32 irq_vector, val;
5418
5419
val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val;
5420
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
5421
return;
5422
5423
irq_vector = val & QM_IRQ_VECTOR_MASK;
5424
free_irq(pci_irq_vector(pdev, irq_vector), qm);
5425
}
5426
5427
static int qm_register_eq_irq(struct hisi_qm *qm)
5428
{
5429
struct pci_dev *pdev = qm->pdev;
5430
u32 irq_vector, val;
5431
int ret;
5432
5433
val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val;
5434
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
5435
return 0;
5436
5437
irq_vector = val & QM_IRQ_VECTOR_MASK;
5438
ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm);
5439
if (ret)
5440
dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
5441
5442
return ret;
5443
}
5444
5445
static void qm_irqs_unregister(struct hisi_qm *qm)
5446
{
5447
qm_unregister_mb_cmd_irq(qm);
5448
qm_unregister_abnormal_irq(qm);
5449
qm_unregister_aeq_irq(qm);
5450
qm_unregister_eq_irq(qm);
5451
}
5452
5453
static int qm_irqs_register(struct hisi_qm *qm)
5454
{
5455
int ret;
5456
5457
ret = qm_register_eq_irq(qm);
5458
if (ret)
5459
return ret;
5460
5461
ret = qm_register_aeq_irq(qm);
5462
if (ret)
5463
goto free_eq_irq;
5464
5465
ret = qm_register_abnormal_irq(qm);
5466
if (ret)
5467
goto free_aeq_irq;
5468
5469
ret = qm_register_mb_cmd_irq(qm);
5470
if (ret)
5471
goto free_abnormal_irq;
5472
5473
return 0;
5474
5475
free_abnormal_irq:
5476
qm_unregister_abnormal_irq(qm);
5477
free_aeq_irq:
5478
qm_unregister_aeq_irq(qm);
5479
free_eq_irq:
5480
qm_unregister_eq_irq(qm);
5481
return ret;
5482
}
5483
5484
static int qm_get_qp_num(struct hisi_qm *qm)
5485
{
5486
struct device *dev = &qm->pdev->dev;
5487
bool is_db_isolation;
5488
5489
/* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
5490
if (qm->fun_type == QM_HW_VF) {
5491
if (qm->ver != QM_HW_V1)
5492
/* v2 starts to support get vft by mailbox */
5493
return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
5494
5495
return 0;
5496
}
5497
5498
is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
5499
qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true);
5500
qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
5501
QM_FUNC_MAX_QP_CAP, is_db_isolation);
5502
5503
if (qm->qp_num <= qm->max_qp_num)
5504
return 0;
5505
5506
if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) {
5507
/* Check whether the set qp number is valid */
5508
dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n",
5509
qm->qp_num, qm->max_qp_num);
5510
return -EINVAL;
5511
}
5512
5513
dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n",
5514
qm->qp_num, qm->max_qp_num);
5515
qm->qp_num = qm->max_qp_num;
5516
qm->debug.curr_qm_qp_num = qm->qp_num;
5517
5518
return 0;
5519
}
5520
5521
static int qm_pre_store_caps(struct hisi_qm *qm)
5522
{
5523
struct hisi_qm_cap_record *qm_cap;
5524
struct pci_dev *pdev = qm->pdev;
5525
size_t i, size;
5526
5527
size = ARRAY_SIZE(qm_cap_query_info);
5528
qm_cap = devm_kcalloc(&pdev->dev, sizeof(*qm_cap), size, GFP_KERNEL);
5529
if (!qm_cap)
5530
return -ENOMEM;
5531
5532
for (i = 0; i < size; i++) {
5533
qm_cap[i].type = qm_cap_query_info[i].type;
5534
qm_cap[i].name = qm_cap_query_info[i].name;
5535
qm_cap[i].cap_val = hisi_qm_get_cap_value(qm, qm_cap_query_info,
5536
i, qm->cap_ver);
5537
}
5538
5539
qm->cap_tables.qm_cap_table = qm_cap;
5540
qm->cap_tables.qm_cap_size = size;
5541
5542
return 0;
5543
}
5544
5545
static int qm_get_hw_caps(struct hisi_qm *qm)
5546
{
5547
const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
5548
qm_cap_info_pf : qm_cap_info_vf;
5549
u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) :
5550
ARRAY_SIZE(qm_cap_info_vf);
5551
u32 val, i;
5552
5553
/* Doorbell isolate register is a independent register. */
5554
val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true);
5555
if (val)
5556
set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
5557
5558
if (qm->ver >= QM_HW_V3) {
5559
val = readl(qm->io_base + QM_FUNC_CAPS_REG);
5560
qm->cap_ver = val & QM_CAPBILITY_VERSION;
5561
}
5562
5563
/* Get PF/VF common capbility */
5564
for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) {
5565
val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver);
5566
if (val)
5567
set_bit(qm_cap_info_comm[i].type, &qm->caps);
5568
}
5569
5570
/* Get PF/VF different capbility */
5571
for (i = 0; i < size; i++) {
5572
val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver);
5573
if (val)
5574
set_bit(cap_info[i].type, &qm->caps);
5575
}
5576
5577
/* Fetch and save the value of qm capability registers */
5578
return qm_pre_store_caps(qm);
5579
}
5580
5581
static void qm_get_version(struct hisi_qm *qm)
5582
{
5583
struct pci_dev *pdev = qm->pdev;
5584
u32 sub_version_id;
5585
5586
qm->ver = pdev->revision;
5587
5588
if (pdev->revision == QM_HW_V3) {
5589
sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID);
5590
if (sub_version_id)
5591
qm->ver = sub_version_id;
5592
}
5593
}
5594
5595
static int qm_get_pci_res(struct hisi_qm *qm)
5596
{
5597
struct pci_dev *pdev = qm->pdev;
5598
struct device *dev = &pdev->dev;
5599
int ret;
5600
5601
ret = pci_request_mem_regions(pdev, qm->dev_name);
5602
if (ret < 0) {
5603
dev_err(dev, "Failed to request mem regions!\n");
5604
return ret;
5605
}
5606
5607
qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
5608
qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
5609
if (!qm->io_base) {
5610
ret = -EIO;
5611
goto err_request_mem_regions;
5612
}
5613
5614
qm_get_version(qm);
5615
5616
ret = qm_get_hw_caps(qm);
5617
if (ret)
5618
goto err_ioremap;
5619
5620
if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
5621
qm->db_interval = QM_QP_DB_INTERVAL;
5622
qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
5623
qm->db_io_base = ioremap(qm->db_phys_base,
5624
pci_resource_len(pdev, PCI_BAR_4));
5625
if (!qm->db_io_base) {
5626
ret = -EIO;
5627
goto err_ioremap;
5628
}
5629
} else {
5630
qm->db_phys_base = qm->phys_base;
5631
qm->db_io_base = qm->io_base;
5632
qm->db_interval = 0;
5633
}
5634
5635
hisi_qm_pre_init(qm);
5636
ret = qm_get_qp_num(qm);
5637
if (ret)
5638
goto err_db_ioremap;
5639
5640
return 0;
5641
5642
err_db_ioremap:
5643
if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
5644
iounmap(qm->db_io_base);
5645
err_ioremap:
5646
iounmap(qm->io_base);
5647
err_request_mem_regions:
5648
pci_release_mem_regions(pdev);
5649
return ret;
5650
}
5651
5652
static int qm_clear_device(struct hisi_qm *qm)
5653
{
5654
acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev);
5655
int ret;
5656
5657
if (qm->fun_type == QM_HW_VF)
5658
return 0;
5659
5660
/* Device does not support reset, return */
5661
if (!qm->err_ini->err_info_init)
5662
return 0;
5663
qm->err_ini->err_info_init(qm);
5664
5665
if (!handle)
5666
return 0;
5667
5668
/* No reset method, return */
5669
if (!acpi_has_method(handle, qm->err_info.acpi_rst))
5670
return 0;
5671
5672
ret = qm_master_ooo_check(qm);
5673
if (ret) {
5674
writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
5675
return ret;
5676
}
5677
5678
if (qm->err_ini->set_priv_status) {
5679
ret = qm->err_ini->set_priv_status(qm);
5680
if (ret) {
5681
writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
5682
return ret;
5683
}
5684
}
5685
5686
return qm_reset_device(qm);
5687
}
5688
5689
static int hisi_qm_pci_init(struct hisi_qm *qm)
5690
{
5691
struct pci_dev *pdev = qm->pdev;
5692
struct device *dev = &pdev->dev;
5693
unsigned int num_vec;
5694
int ret;
5695
5696
ret = pci_enable_device_mem(pdev);
5697
if (ret < 0) {
5698
dev_err(dev, "Failed to enable device mem!\n");
5699
return ret;
5700
}
5701
5702
ret = qm_get_pci_res(qm);
5703
if (ret)
5704
goto err_disable_pcidev;
5705
5706
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5707
if (ret < 0)
5708
goto err_get_pci_res;
5709
pci_set_master(pdev);
5710
5711
num_vec = qm_get_irq_num(qm);
5712
if (!num_vec) {
5713
dev_err(dev, "Device irq num is zero!\n");
5714
ret = -EINVAL;
5715
goto err_get_pci_res;
5716
}
5717
num_vec = roundup_pow_of_two(num_vec);
5718
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
5719
if (ret < 0) {
5720
dev_err(dev, "Failed to enable MSI vectors!\n");
5721
goto err_get_pci_res;
5722
}
5723
5724
ret = qm_clear_device(qm);
5725
if (ret)
5726
goto err_free_vectors;
5727
5728
return 0;
5729
5730
err_free_vectors:
5731
pci_free_irq_vectors(pdev);
5732
err_get_pci_res:
5733
qm_put_pci_res(qm);
5734
err_disable_pcidev:
5735
pci_disable_device(pdev);
5736
return ret;
5737
}
5738
5739
static int hisi_qm_init_work(struct hisi_qm *qm)
5740
{
5741
int i;
5742
5743
for (i = 0; i < qm->qp_num; i++)
5744
INIT_WORK(&qm->poll_data[i].work, qm_work_process);
5745
5746
if (qm->fun_type == QM_HW_PF)
5747
INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
5748
5749
if (qm->ver > QM_HW_V2)
5750
INIT_WORK(&qm->cmd_process, qm_cmd_process);
5751
5752
qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
5753
WQ_UNBOUND, num_online_cpus(),
5754
pci_name(qm->pdev));
5755
if (!qm->wq) {
5756
pci_err(qm->pdev, "failed to alloc workqueue!\n");
5757
return -ENOMEM;
5758
}
5759
5760
return 0;
5761
}
5762
5763
static int hisi_qp_alloc_memory(struct hisi_qm *qm)
5764
{
5765
struct device *dev = &qm->pdev->dev;
5766
u16 sq_depth, cq_depth;
5767
size_t qp_dma_size;
5768
int i, ret;
5769
5770
qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
5771
if (!qm->qp_array)
5772
return -ENOMEM;
5773
5774
qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL);
5775
if (!qm->poll_data) {
5776
kfree(qm->qp_array);
5777
return -ENOMEM;
5778
}
5779
5780
qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
5781
5782
/* one more page for device or qp statuses */
5783
qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth;
5784
qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
5785
for (i = 0; i < qm->qp_num; i++) {
5786
qm->poll_data[i].qm = qm;
5787
ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth);
5788
if (ret)
5789
goto err_init_qp_mem;
5790
5791
dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
5792
}
5793
5794
return 0;
5795
err_init_qp_mem:
5796
hisi_qp_memory_uninit(qm, i);
5797
5798
return ret;
5799
}
5800
5801
static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm)
5802
{
5803
struct qm_rsv_buf *xqc_buf = &qm->xqc_buf;
5804
struct qm_dma *xqc_dma = &xqc_buf->qcdma;
5805
struct device *dev = &qm->pdev->dev;
5806
size_t off = 0;
5807
5808
#define QM_XQC_BUF_INIT(xqc_buf, type) do { \
5809
(xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \
5810
(xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \
5811
off += QMC_ALIGN(sizeof(struct qm_##type)); \
5812
} while (0)
5813
5814
xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) +
5815
QMC_ALIGN(sizeof(struct qm_aeqc)) +
5816
QMC_ALIGN(sizeof(struct qm_sqc)) +
5817
QMC_ALIGN(sizeof(struct qm_cqc));
5818
xqc_dma->va = dma_alloc_coherent(dev, xqc_dma->size,
5819
&xqc_dma->dma, GFP_KERNEL);
5820
if (!xqc_dma->va)
5821
return -ENOMEM;
5822
5823
QM_XQC_BUF_INIT(xqc_buf, eqc);
5824
QM_XQC_BUF_INIT(xqc_buf, aeqc);
5825
QM_XQC_BUF_INIT(xqc_buf, sqc);
5826
QM_XQC_BUF_INIT(xqc_buf, cqc);
5827
5828
return 0;
5829
}
5830
5831
static int hisi_qm_memory_init(struct hisi_qm *qm)
5832
{
5833
struct device *dev = &qm->pdev->dev;
5834
int ret, total_func;
5835
size_t off = 0;
5836
5837
if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
5838
total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
5839
qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
5840
if (!qm->factor)
5841
return -ENOMEM;
5842
5843
/* Only the PF value needs to be initialized */
5844
qm->factor[0].func_qos = QM_QOS_MAX_VAL;
5845
}
5846
5847
#define QM_INIT_BUF(qm, type, num) do { \
5848
(qm)->type = ((qm)->qdma.va + (off)); \
5849
(qm)->type##_dma = (qm)->qdma.dma + (off); \
5850
off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
5851
} while (0)
5852
5853
idr_init(&qm->qp_idr);
5854
qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP);
5855
qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) +
5856
QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) +
5857
QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
5858
QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
5859
qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
5860
GFP_ATOMIC);
5861
dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
5862
if (!qm->qdma.va) {
5863
ret = -ENOMEM;
5864
goto err_destroy_idr;
5865
}
5866
5867
QM_INIT_BUF(qm, eqe, qm->eq_depth);
5868
QM_INIT_BUF(qm, aeqe, qm->aeq_depth);
5869
QM_INIT_BUF(qm, sqc, qm->qp_num);
5870
QM_INIT_BUF(qm, cqc, qm->qp_num);
5871
5872
ret = hisi_qm_alloc_rsv_buf(qm);
5873
if (ret)
5874
goto err_free_qdma;
5875
5876
ret = hisi_qp_alloc_memory(qm);
5877
if (ret)
5878
goto err_free_reserve_buf;
5879
5880
return 0;
5881
5882
err_free_reserve_buf:
5883
hisi_qm_free_rsv_buf(qm);
5884
err_free_qdma:
5885
dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
5886
err_destroy_idr:
5887
idr_destroy(&qm->qp_idr);
5888
if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
5889
kfree(qm->factor);
5890
5891
return ret;
5892
}
5893
5894
/**
5895
* hisi_qm_init() - Initialize configures about qm.
5896
* @qm: The qm needing init.
5897
*
5898
* This function init qm, then we can call hisi_qm_start to put qm into work.
5899
*/
5900
int hisi_qm_init(struct hisi_qm *qm)
5901
{
5902
struct pci_dev *pdev = qm->pdev;
5903
struct device *dev = &pdev->dev;
5904
int ret;
5905
5906
ret = hisi_qm_pci_init(qm);
5907
if (ret)
5908
return ret;
5909
5910
ret = qm_irqs_register(qm);
5911
if (ret)
5912
goto err_pci_init;
5913
5914
if (qm->fun_type == QM_HW_PF) {
5915
/* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
5916
writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
5917
qm_disable_clock_gate(qm);
5918
ret = qm_dev_mem_reset(qm);
5919
if (ret) {
5920
dev_err(dev, "failed to reset device memory\n");
5921
goto err_irq_register;
5922
}
5923
}
5924
5925
if (qm->mode == UACCE_MODE_SVA) {
5926
ret = qm_alloc_uacce(qm);
5927
if (ret < 0)
5928
dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
5929
}
5930
5931
ret = hisi_qm_memory_init(qm);
5932
if (ret)
5933
goto err_alloc_uacce;
5934
5935
ret = hisi_qm_init_work(qm);
5936
if (ret)
5937
goto err_free_qm_memory;
5938
5939
qm_cmd_init(qm);
5940
hisi_mig_region_enable(qm);
5941
5942
return 0;
5943
5944
err_free_qm_memory:
5945
hisi_qm_memory_uninit(qm);
5946
err_alloc_uacce:
5947
qm_remove_uacce(qm);
5948
err_irq_register:
5949
qm_irqs_unregister(qm);
5950
err_pci_init:
5951
hisi_qm_pci_uninit(qm);
5952
return ret;
5953
}
5954
EXPORT_SYMBOL_GPL(hisi_qm_init);
5955
5956
/**
5957
* hisi_qm_get_dfx_access() - Try to get dfx access.
5958
* @qm: pointer to accelerator device.
5959
*
5960
* Try to get dfx access, then user can get message.
5961
*
5962
* If device is in suspended, return failure, otherwise
5963
* bump up the runtime PM usage counter.
5964
*/
5965
int hisi_qm_get_dfx_access(struct hisi_qm *qm)
5966
{
5967
struct device *dev = &qm->pdev->dev;
5968
5969
if (pm_runtime_suspended(dev)) {
5970
dev_info(dev, "can not read/write - device in suspended.\n");
5971
return -EAGAIN;
5972
}
5973
5974
return qm_pm_get_sync(qm);
5975
}
5976
EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access);
5977
5978
/**
5979
* hisi_qm_put_dfx_access() - Put dfx access.
5980
* @qm: pointer to accelerator device.
5981
*
5982
* Put dfx access, drop runtime PM usage counter.
5983
*/
5984
void hisi_qm_put_dfx_access(struct hisi_qm *qm)
5985
{
5986
qm_pm_put_sync(qm);
5987
}
5988
EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access);
5989
5990
/**
5991
* hisi_qm_pm_init() - Initialize qm runtime PM.
5992
* @qm: pointer to accelerator device.
5993
*
5994
* Function that initialize qm runtime PM.
5995
*/
5996
void hisi_qm_pm_init(struct hisi_qm *qm)
5997
{
5998
struct device *dev = &qm->pdev->dev;
5999
6000
if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
6001
return;
6002
6003
pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
6004
pm_runtime_use_autosuspend(dev);
6005
pm_runtime_put_noidle(dev);
6006
}
6007
EXPORT_SYMBOL_GPL(hisi_qm_pm_init);
6008
6009
/**
6010
* hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
6011
* @qm: pointer to accelerator device.
6012
*
6013
* Function that uninitialize qm runtime PM.
6014
*/
6015
void hisi_qm_pm_uninit(struct hisi_qm *qm)
6016
{
6017
struct device *dev = &qm->pdev->dev;
6018
6019
if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
6020
return;
6021
6022
pm_runtime_get_noresume(dev);
6023
pm_runtime_dont_use_autosuspend(dev);
6024
}
6025
EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit);
6026
6027
static int qm_prepare_for_suspend(struct hisi_qm *qm)
6028
{
6029
struct pci_dev *pdev = qm->pdev;
6030
int ret;
6031
6032
ret = qm->ops->set_msi(qm, false);
6033
if (ret) {
6034
pci_err(pdev, "failed to disable MSI before suspending!\n");
6035
return ret;
6036
}
6037
6038
ret = qm_master_ooo_check(qm);
6039
if (ret)
6040
return ret;
6041
6042
if (qm->err_ini->set_priv_status) {
6043
ret = qm->err_ini->set_priv_status(qm);
6044
if (ret)
6045
return ret;
6046
}
6047
6048
ret = qm_set_pf_mse(qm, false);
6049
if (ret)
6050
pci_err(pdev, "failed to disable MSE before suspending!\n");
6051
6052
return ret;
6053
}
6054
6055
static int qm_rebuild_for_resume(struct hisi_qm *qm)
6056
{
6057
struct pci_dev *pdev = qm->pdev;
6058
int ret;
6059
6060
ret = qm_set_pf_mse(qm, true);
6061
if (ret) {
6062
pci_err(pdev, "failed to enable MSE after resuming!\n");
6063
return ret;
6064
}
6065
6066
ret = qm->ops->set_msi(qm, true);
6067
if (ret) {
6068
pci_err(pdev, "failed to enable MSI after resuming!\n");
6069
return ret;
6070
}
6071
6072
ret = qm_dev_hw_init(qm);
6073
if (ret) {
6074
pci_err(pdev, "failed to init device after resuming\n");
6075
return ret;
6076
}
6077
6078
qm_cmd_init(qm);
6079
hisi_mig_region_enable(qm);
6080
hisi_qm_dev_err_init(qm);
6081
/* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
6082
writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
6083
qm_disable_clock_gate(qm);
6084
ret = qm_dev_mem_reset(qm);
6085
if (ret)
6086
pci_err(pdev, "failed to reset device memory\n");
6087
6088
return ret;
6089
}
6090
6091
/**
6092
* hisi_qm_suspend() - Runtime suspend of given device.
6093
* @dev: device to suspend.
6094
*
6095
* Function that suspend the device.
6096
*/
6097
int hisi_qm_suspend(struct device *dev)
6098
{
6099
struct pci_dev *pdev = to_pci_dev(dev);
6100
struct hisi_qm *qm = pci_get_drvdata(pdev);
6101
int ret;
6102
6103
pci_info(pdev, "entering suspended state\n");
6104
6105
ret = hisi_qm_stop(qm, QM_NORMAL);
6106
if (ret) {
6107
pci_err(pdev, "failed to stop qm(%d)\n", ret);
6108
return ret;
6109
}
6110
6111
ret = qm_prepare_for_suspend(qm);
6112
if (ret)
6113
pci_err(pdev, "failed to prepare suspended(%d)\n", ret);
6114
6115
return ret;
6116
}
6117
EXPORT_SYMBOL_GPL(hisi_qm_suspend);
6118
6119
/**
6120
* hisi_qm_resume() - Runtime resume of given device.
6121
* @dev: device to resume.
6122
*
6123
* Function that resume the device.
6124
*/
6125
int hisi_qm_resume(struct device *dev)
6126
{
6127
struct pci_dev *pdev = to_pci_dev(dev);
6128
struct hisi_qm *qm = pci_get_drvdata(pdev);
6129
int ret;
6130
6131
pci_info(pdev, "resuming from suspend state\n");
6132
6133
ret = qm_rebuild_for_resume(qm);
6134
if (ret) {
6135
pci_err(pdev, "failed to rebuild resume(%d)\n", ret);
6136
return ret;
6137
}
6138
6139
ret = hisi_qm_start(qm);
6140
if (ret) {
6141
if (qm_check_dev_error(qm)) {
6142
pci_info(pdev, "failed to start qm due to device error, device will be reset!\n");
6143
return 0;
6144
}
6145
6146
pci_err(pdev, "failed to start qm(%d)!\n", ret);
6147
}
6148
6149
return ret;
6150
}
6151
EXPORT_SYMBOL_GPL(hisi_qm_resume);
6152
6153
MODULE_LICENSE("GPL v2");
6154
MODULE_AUTHOR("Zhou Wang <[email protected]>");
6155
MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
6156
6157