Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/inside-secure/safexcel.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2017 Marvell
4
*
5
* Antoine Tenart <[email protected]>
6
*/
7
8
#include <linux/clk.h>
9
#include <linux/device.h>
10
#include <linux/dma-mapping.h>
11
#include <linux/dmapool.h>
12
#include <linux/firmware.h>
13
#include <linux/interrupt.h>
14
#include <linux/module.h>
15
#include <linux/of_platform.h>
16
#include <linux/of_irq.h>
17
#include <linux/pci.h>
18
#include <linux/platform_device.h>
19
#include <linux/workqueue.h>
20
21
#include <crypto/internal/aead.h>
22
#include <crypto/internal/hash.h>
23
#include <crypto/internal/skcipher.h>
24
25
#include "safexcel.h"
26
27
static u32 max_rings = EIP197_MAX_RINGS;
28
module_param(max_rings, uint, 0644);
29
MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
30
31
static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv)
32
{
33
int i;
34
35
/*
36
* Map all interfaces/rings to register index 0
37
* so they can share contexts. Without this, the EIP197 will
38
* assume each interface/ring to be in its own memory domain
39
* i.e. have its own subset of UNIQUE memory addresses.
40
* Which would cause records with the SAME memory address to
41
* use DIFFERENT cache buffers, causing both poor cache utilization
42
* AND serious coherence/invalidation issues.
43
*/
44
for (i = 0; i < 4; i++)
45
writel(0, priv->base + EIP197_FLUE_IFC_LUT(i));
46
47
/*
48
* Initialize other virtualization regs for cache
49
* These may not be in their reset state ...
50
*/
51
for (i = 0; i < priv->config.rings; i++) {
52
writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i));
53
writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i));
54
writel(EIP197_FLUE_CONFIG_MAGIC,
55
priv->base + EIP197_FLUE_CONFIG(i));
56
}
57
writel(0, priv->base + EIP197_FLUE_OFFSETS);
58
writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET);
59
}
60
61
static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
62
u32 addrmid, int *actbank)
63
{
64
u32 val;
65
int curbank;
66
67
curbank = addrmid >> 16;
68
if (curbank != *actbank) {
69
val = readl(priv->base + EIP197_CS_RAM_CTRL);
70
val = (val & ~EIP197_CS_BANKSEL_MASK) |
71
(curbank << EIP197_CS_BANKSEL_OFS);
72
writel(val, priv->base + EIP197_CS_RAM_CTRL);
73
*actbank = curbank;
74
}
75
}
76
77
static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
78
int maxbanks, u32 probemask, u32 stride)
79
{
80
u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
81
int actbank;
82
83
/*
84
* And probe the actual size of the physically attached cache data RAM
85
* Using a binary subdivision algorithm downto 32 byte cache lines.
86
*/
87
addrhi = 1 << (16 + maxbanks);
88
addrlo = 0;
89
actbank = min(maxbanks - 1, 0);
90
while ((addrhi - addrlo) > stride) {
91
/* write marker to lowest address in top half */
92
addrmid = (addrhi + addrlo) >> 1;
93
marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
94
eip197_trc_cache_banksel(priv, addrmid, &actbank);
95
writel(marker,
96
priv->base + EIP197_CLASSIFICATION_RAMS +
97
(addrmid & 0xffff));
98
99
/* write invalid markers to possible aliases */
100
delta = 1 << __fls(addrmid);
101
while (delta >= stride) {
102
addralias = addrmid - delta;
103
eip197_trc_cache_banksel(priv, addralias, &actbank);
104
writel(~marker,
105
priv->base + EIP197_CLASSIFICATION_RAMS +
106
(addralias & 0xffff));
107
delta >>= 1;
108
}
109
110
/* read back marker from top half */
111
eip197_trc_cache_banksel(priv, addrmid, &actbank);
112
val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
113
(addrmid & 0xffff));
114
115
if ((val & probemask) == marker)
116
/* read back correct, continue with top half */
117
addrlo = addrmid;
118
else
119
/* not read back correct, continue with bottom half */
120
addrhi = addrmid;
121
}
122
return addrhi;
123
}
124
125
static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
126
int cs_rc_max, int cs_ht_wc)
127
{
128
int i;
129
u32 htable_offset, val, offset;
130
131
/* Clear all records in administration RAM */
132
for (i = 0; i < cs_rc_max; i++) {
133
offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
134
135
writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
136
EIP197_CS_RC_PREV(EIP197_RC_NULL),
137
priv->base + offset);
138
139
val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1);
140
if (i == 0)
141
val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
142
else if (i == cs_rc_max - 1)
143
val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
144
writel(val, priv->base + offset + 4);
145
/* must also initialize the address key due to ECC! */
146
writel(0, priv->base + offset + 8);
147
writel(0, priv->base + offset + 12);
148
}
149
150
/* Clear the hash table entries */
151
htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
152
for (i = 0; i < cs_ht_wc; i++)
153
writel(GENMASK(29, 0),
154
priv->base + EIP197_CLASSIFICATION_RAMS +
155
htable_offset + i * sizeof(u32));
156
}
157
158
static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
159
{
160
u32 val, dsize, asize;
161
int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
162
int cs_rc_abs_max, cs_ht_sz;
163
int maxbanks;
164
165
/* Setup (dummy) virtualization for cache */
166
eip197_trc_cache_setupvirt(priv);
167
168
/*
169
* Enable the record cache memory access and
170
* probe the bank select width
171
*/
172
val = readl(priv->base + EIP197_CS_RAM_CTRL);
173
val &= ~EIP197_TRC_ENABLE_MASK;
174
val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK;
175
writel(val, priv->base + EIP197_CS_RAM_CTRL);
176
val = readl(priv->base + EIP197_CS_RAM_CTRL);
177
maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1;
178
179
/* Clear all ECC errors */
180
writel(0, priv->base + EIP197_TRC_ECCCTRL);
181
182
/*
183
* Make sure the cache memory is accessible by taking record cache into
184
* reset. Need data memory access here, not admin access.
185
*/
186
val = readl(priv->base + EIP197_TRC_PARAMS);
187
val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS;
188
writel(val, priv->base + EIP197_TRC_PARAMS);
189
190
/* Probed data RAM size in bytes */
191
dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
192
193
/*
194
* Now probe the administration RAM size pretty much the same way
195
* Except that only the lower 30 bits are writable and we don't need
196
* bank selects
197
*/
198
val = readl(priv->base + EIP197_TRC_PARAMS);
199
/* admin access now */
200
val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK);
201
writel(val, priv->base + EIP197_TRC_PARAMS);
202
203
/* Probed admin RAM size in admin words */
204
asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
205
206
/* Clear any ECC errors detected while probing! */
207
writel(0, priv->base + EIP197_TRC_ECCCTRL);
208
209
/* Sanity check probing results */
210
if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
211
dev_err(priv->dev, "Record cache probing failed (%d,%d).",
212
dsize, asize);
213
return -ENODEV;
214
}
215
216
/*
217
* Determine optimal configuration from RAM sizes
218
* Note that we assume that the physical RAM configuration is sane
219
* Therefore, we don't do any parameter error checking here ...
220
*/
221
222
/* For now, just use a single record format covering everything */
223
cs_trc_rec_wc = EIP197_CS_TRC_REC_WC;
224
cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC;
225
226
/*
227
* Step #1: How many records will physically fit?
228
* Hard upper limit is 1023!
229
*/
230
cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023);
231
/* Step #2: Need at least 2 words in the admin RAM per record */
232
cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1));
233
/* Step #3: Determine log2 of hash table size */
234
cs_ht_sz = __fls(asize - cs_rc_max) - 2;
235
/* Step #4: determine current size of hash table in dwords */
236
cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
237
/* Step #5: add back excess words and see if we can fit more records */
238
cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
239
240
/* Clear the cache RAMs */
241
eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
242
243
/* Disable the record cache memory access */
244
val = readl(priv->base + EIP197_CS_RAM_CTRL);
245
val &= ~EIP197_TRC_ENABLE_MASK;
246
writel(val, priv->base + EIP197_CS_RAM_CTRL);
247
248
/* Write head and tail pointers of the record free chain */
249
val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
250
EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
251
writel(val, priv->base + EIP197_TRC_FREECHAIN);
252
253
/* Configure the record cache #1 */
254
val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
255
EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
256
writel(val, priv->base + EIP197_TRC_PARAMS2);
257
258
/* Configure the record cache #2 */
259
val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
260
EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
261
EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz);
262
writel(val, priv->base + EIP197_TRC_PARAMS);
263
264
dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
265
dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
266
return 0;
267
}
268
269
static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
270
{
271
int pe, i;
272
u32 val;
273
274
for (pe = 0; pe < priv->config.pes; pe++) {
275
/* Configure the token FIFO's */
276
writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
277
writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
278
279
/* Clear the ICE scratchpad memory */
280
val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
281
val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
282
EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
283
EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
284
EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
285
writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
286
287
/* clear the scratchpad RAM using 32 bit writes only */
288
for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
289
writel(0, EIP197_PE(priv) +
290
EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2));
291
292
/* Reset the IFPP engine to make its program mem accessible */
293
writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
294
EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
295
EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
296
EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
297
298
/* Reset the IPUE engine to make its program mem accessible */
299
writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
300
EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
301
EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
302
EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
303
304
/* Enable access to all IFPP program memories */
305
writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
306
EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
307
308
/* bypass the OCE, if present */
309
if (priv->flags & EIP197_OCE)
310
writel(EIP197_DEBUG_OCE_BYPASS, EIP197_PE(priv) +
311
EIP197_PE_DEBUG(pe));
312
}
313
314
}
315
316
static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
317
const struct firmware *fw)
318
{
319
u32 val;
320
int i;
321
322
/* Write the firmware */
323
for (i = 0; i < fw->size / sizeof(u32); i++) {
324
if (priv->data->fw_little_endian)
325
val = le32_to_cpu(((const __le32 *)fw->data)[i]);
326
else
327
val = be32_to_cpu(((const __be32 *)fw->data)[i]);
328
329
writel(val,
330
priv->base + EIP197_CLASSIFICATION_RAMS +
331
i * sizeof(val));
332
}
333
334
/* Exclude final 2 NOPs from size */
335
return i - EIP197_FW_TERMINAL_NOPS;
336
}
337
338
/*
339
* If FW is actual production firmware, then poll for its initialization
340
* to complete and check if it is good for the HW, otherwise just return OK.
341
*/
342
static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
343
{
344
int pe, pollcnt;
345
u32 base, pollofs;
346
347
if (fpp)
348
pollofs = EIP197_FW_FPP_READY;
349
else
350
pollofs = EIP197_FW_PUE_READY;
351
352
for (pe = 0; pe < priv->config.pes; pe++) {
353
base = EIP197_PE_ICE_SCRATCH_RAM(pe);
354
pollcnt = EIP197_FW_START_POLLCNT;
355
while (pollcnt &&
356
(readl_relaxed(EIP197_PE(priv) + base +
357
pollofs) != 1)) {
358
pollcnt--;
359
}
360
if (!pollcnt) {
361
dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
362
fpp, pe);
363
return false;
364
}
365
}
366
return true;
367
}
368
369
static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
370
int ipuesz, int ifppsz, int minifw)
371
{
372
int pe;
373
u32 val;
374
375
for (pe = 0; pe < priv->config.pes; pe++) {
376
/* Disable access to all program memory */
377
writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
378
379
/* Start IFPP microengines */
380
if (minifw)
381
val = 0;
382
else
383
val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
384
EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
385
EIP197_PE_ICE_UENG_DEBUG_RESET;
386
writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
387
388
/* Start IPUE microengines */
389
if (minifw)
390
val = 0;
391
else
392
val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
393
EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
394
EIP197_PE_ICE_UENG_DEBUG_RESET;
395
writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
396
}
397
398
/* For miniFW startup, there is no initialization, so always succeed */
399
if (minifw)
400
return true;
401
402
/* Wait until all the firmwares have properly started up */
403
if (!poll_fw_ready(priv, 1))
404
return false;
405
if (!poll_fw_ready(priv, 0))
406
return false;
407
408
return true;
409
}
410
411
static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
412
{
413
const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
414
const struct firmware *fw[FW_NB];
415
char fw_path[37], *dir = NULL;
416
int i, j, ret = 0, pe;
417
int ipuesz, ifppsz, minifw = 0;
418
419
if (priv->data->version == EIP197D_MRVL)
420
dir = "eip197d";
421
else if (priv->data->version == EIP197B_MRVL ||
422
priv->data->version == EIP197_DEVBRD)
423
dir = "eip197b";
424
else if (priv->data->version == EIP197C_MXL)
425
dir = "eip197c";
426
else
427
return -ENODEV;
428
429
retry_fw:
430
for (i = 0; i < FW_NB; i++) {
431
snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
432
ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
433
if (ret) {
434
if (minifw || priv->data->version != EIP197B_MRVL)
435
goto release_fw;
436
437
/* Fallback to the old firmware location for the
438
* EIP197b.
439
*/
440
ret = firmware_request_nowarn(&fw[i], fw_name[i],
441
priv->dev);
442
if (ret)
443
goto release_fw;
444
}
445
}
446
447
eip197_init_firmware(priv);
448
449
ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
450
451
/* Enable access to IPUE program memories */
452
for (pe = 0; pe < priv->config.pes; pe++)
453
writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
454
EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
455
456
ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
457
458
if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
459
dev_dbg(priv->dev, "Firmware loaded successfully\n");
460
return 0;
461
}
462
463
ret = -ENODEV;
464
465
release_fw:
466
for (j = 0; j < i; j++)
467
release_firmware(fw[j]);
468
469
if (!minifw) {
470
/* Retry with minifw path */
471
dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
472
dir = "eip197_minifw";
473
minifw = 1;
474
goto retry_fw;
475
}
476
477
dev_err(priv->dev, "Firmware load failed.\n");
478
479
return ret;
480
}
481
482
static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
483
{
484
u32 cd_size_rnd, val;
485
int i, cd_fetch_cnt;
486
487
cd_size_rnd = (priv->config.cd_size +
488
(BIT(priv->hwconfig.hwdataw) - 1)) >>
489
priv->hwconfig.hwdataw;
490
/* determine number of CD's we can fetch into the CD FIFO as 1 block */
491
if (priv->flags & SAFEXCEL_HW_EIP197) {
492
/* EIP197: try to fetch enough in 1 go to keep all pipes busy */
493
cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
494
cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
495
(priv->config.pes * EIP197_FETCH_DEPTH));
496
} else {
497
/* for the EIP97, just fetch all that fits minus 1 */
498
cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
499
cd_size_rnd) - 1;
500
}
501
/*
502
* Since we're using command desc's way larger than formally specified,
503
* we need to check whether we can fit even 1 for low-end EIP196's!
504
*/
505
if (!cd_fetch_cnt) {
506
dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
507
return -ENODEV;
508
}
509
510
for (i = 0; i < priv->config.rings; i++) {
511
/* ring base address */
512
writel(lower_32_bits(priv->ring[i].cdr.base_dma),
513
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
514
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
515
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
516
517
writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
518
(priv->config.cd_offset << 14) | priv->config.cd_size,
519
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
520
writel(((cd_fetch_cnt *
521
(cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
522
(cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
523
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
524
525
/* Configure DMA tx control */
526
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
527
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
528
writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
529
530
/* clear any pending interrupt */
531
writel(GENMASK(5, 0),
532
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
533
}
534
535
return 0;
536
}
537
538
static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
539
{
540
u32 rd_size_rnd, val;
541
int i, rd_fetch_cnt;
542
543
/* determine number of RD's we can fetch into the FIFO as one block */
544
rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
545
(BIT(priv->hwconfig.hwdataw) - 1)) >>
546
priv->hwconfig.hwdataw;
547
if (priv->flags & SAFEXCEL_HW_EIP197) {
548
/* EIP197: try to fetch enough in 1 go to keep all pipes busy */
549
rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
550
rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
551
(priv->config.pes * EIP197_FETCH_DEPTH));
552
} else {
553
/* for the EIP97, just fetch all that fits minus 1 */
554
rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
555
rd_size_rnd) - 1;
556
}
557
558
for (i = 0; i < priv->config.rings; i++) {
559
/* ring base address */
560
writel(lower_32_bits(priv->ring[i].rdr.base_dma),
561
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
562
writel(upper_32_bits(priv->ring[i].rdr.base_dma),
563
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
564
565
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
566
priv->config.rd_size,
567
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
568
569
writel(((rd_fetch_cnt *
570
(rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
571
(rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
572
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
573
574
/* Configure DMA tx control */
575
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
576
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
577
val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
578
writel(val,
579
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
580
581
/* clear any pending interrupt */
582
writel(GENMASK(7, 0),
583
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
584
585
/* enable ring interrupt */
586
val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
587
val |= EIP197_RDR_IRQ(i);
588
writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
589
}
590
591
return 0;
592
}
593
594
static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
595
{
596
u32 val;
597
int i, ret, pe, opbuflo, opbufhi;
598
599
dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
600
priv->config.pes, priv->config.rings);
601
602
/*
603
* For EIP197's only set maximum number of TX commands to 2^5 = 32
604
* Skip for the EIP97 as it does not have this field.
605
*/
606
if (priv->flags & SAFEXCEL_HW_EIP197) {
607
val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
608
val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
609
writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
610
}
611
612
/* Configure wr/rd cache values */
613
writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
614
EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
615
EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
616
617
/* Interrupts reset */
618
619
/* Disable all global interrupts */
620
writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
621
622
/* Clear any pending interrupt */
623
writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
624
625
/* Processing Engine configuration */
626
for (pe = 0; pe < priv->config.pes; pe++) {
627
/* Data Fetch Engine configuration */
628
629
/* Reset all DFE threads */
630
writel(EIP197_DxE_THR_CTRL_RESET_PE,
631
EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
632
633
if (priv->flags & EIP197_PE_ARB)
634
/* Reset HIA input interface arbiter (if present) */
635
writel(EIP197_HIA_RA_PE_CTRL_RESET,
636
EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
637
638
/* DMA transfer size to use */
639
val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
640
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
641
EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
642
val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
643
EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
644
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
645
val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
646
writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
647
648
/* Leave the DFE threads reset state */
649
writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
650
651
/* Configure the processing engine thresholds */
652
writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
653
EIP197_PE_IN_xBUF_THRES_MAX(9),
654
EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
655
writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
656
EIP197_PE_IN_xBUF_THRES_MAX(7),
657
EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
658
659
if (priv->flags & SAFEXCEL_HW_EIP197)
660
/* enable HIA input interface arbiter and rings */
661
writel(EIP197_HIA_RA_PE_CTRL_EN |
662
GENMASK(priv->config.rings - 1, 0),
663
EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
664
665
/* Data Store Engine configuration */
666
667
/* Reset all DSE threads */
668
writel(EIP197_DxE_THR_CTRL_RESET_PE,
669
EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
670
671
/* Wait for all DSE threads to complete */
672
while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
673
GENMASK(15, 12)) != GENMASK(15, 12))
674
;
675
676
/* DMA transfer size to use */
677
if (priv->hwconfig.hwnumpes > 4) {
678
opbuflo = 9;
679
opbufhi = 10;
680
} else {
681
opbuflo = 7;
682
opbufhi = 8;
683
}
684
val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
685
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
686
EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
687
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
688
val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
689
/* FIXME: instability issues can occur for EIP97 but disabling
690
* it impacts performance.
691
*/
692
if (priv->flags & SAFEXCEL_HW_EIP197)
693
val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
694
writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
695
696
/* Leave the DSE threads reset state */
697
writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
698
699
/* Configure the processing engine thresholds */
700
writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
701
EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
702
EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
703
704
/* Processing Engine configuration */
705
706
/* Token & context configuration */
707
val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
708
EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
709
EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
710
writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
711
712
/* H/W capabilities selection: just enable everything */
713
writel(EIP197_FUNCTION_ALL,
714
EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
715
writel(EIP197_FUNCTION_ALL,
716
EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
717
}
718
719
/* Command Descriptor Rings prepare */
720
for (i = 0; i < priv->config.rings; i++) {
721
/* Clear interrupts for this ring */
722
writel(GENMASK(31, 0),
723
EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
724
725
/* Disable external triggering */
726
writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
727
728
/* Clear the pending prepared counter */
729
writel(EIP197_xDR_PREP_CLR_COUNT,
730
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
731
732
/* Clear the pending processed counter */
733
writel(EIP197_xDR_PROC_CLR_COUNT,
734
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
735
736
writel(0,
737
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
738
writel(0,
739
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
740
741
writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
742
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
743
}
744
745
/* Result Descriptor Ring prepare */
746
for (i = 0; i < priv->config.rings; i++) {
747
/* Disable external triggering*/
748
writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
749
750
/* Clear the pending prepared counter */
751
writel(EIP197_xDR_PREP_CLR_COUNT,
752
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
753
754
/* Clear the pending processed counter */
755
writel(EIP197_xDR_PROC_CLR_COUNT,
756
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
757
758
writel(0,
759
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
760
writel(0,
761
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
762
763
/* Ring size */
764
writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
765
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
766
}
767
768
for (pe = 0; pe < priv->config.pes; pe++) {
769
/* Enable command descriptor rings */
770
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
771
EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
772
773
/* Enable result descriptor rings */
774
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
775
EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
776
}
777
778
/* Clear any HIA interrupt */
779
writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
780
781
if (priv->flags & EIP197_SIMPLE_TRC) {
782
writel(EIP197_STRC_CONFIG_INIT |
783
EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
784
EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
785
priv->base + EIP197_STRC_CONFIG);
786
writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
787
EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
788
} else if (priv->flags & SAFEXCEL_HW_EIP197) {
789
ret = eip197_trc_cache_init(priv);
790
if (ret)
791
return ret;
792
}
793
794
if (priv->flags & EIP197_ICE) {
795
ret = eip197_load_firmwares(priv);
796
if (ret)
797
return ret;
798
}
799
800
return safexcel_hw_setup_cdesc_rings(priv) ?:
801
safexcel_hw_setup_rdesc_rings(priv) ?:
802
0;
803
}
804
805
/* Called with ring's lock taken */
806
static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
807
int ring)
808
{
809
int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
810
811
if (!coal)
812
return;
813
814
/* Configure when we want an interrupt */
815
writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
816
EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
817
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
818
}
819
820
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
821
{
822
struct crypto_async_request *req, *backlog;
823
struct safexcel_context *ctx;
824
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
825
826
/* If a request wasn't properly dequeued because of a lack of resources,
827
* proceeded it first,
828
*/
829
req = priv->ring[ring].req;
830
backlog = priv->ring[ring].backlog;
831
if (req)
832
goto handle_req;
833
834
while (true) {
835
spin_lock_bh(&priv->ring[ring].queue_lock);
836
backlog = crypto_get_backlog(&priv->ring[ring].queue);
837
req = crypto_dequeue_request(&priv->ring[ring].queue);
838
spin_unlock_bh(&priv->ring[ring].queue_lock);
839
840
if (!req) {
841
priv->ring[ring].req = NULL;
842
priv->ring[ring].backlog = NULL;
843
goto finalize;
844
}
845
846
handle_req:
847
ctx = crypto_tfm_ctx(req->tfm);
848
ret = ctx->send(req, ring, &commands, &results);
849
if (ret)
850
goto request_failed;
851
852
if (backlog)
853
crypto_request_complete(backlog, -EINPROGRESS);
854
855
/* In case the send() helper did not issue any command to push
856
* to the engine because the input data was cached, continue to
857
* dequeue other requests as this is valid and not an error.
858
*/
859
if (!commands && !results)
860
continue;
861
862
cdesc += commands;
863
rdesc += results;
864
nreq++;
865
}
866
867
request_failed:
868
/* Not enough resources to handle all the requests. Bail out and save
869
* the request and the backlog for the next dequeue call (per-ring).
870
*/
871
priv->ring[ring].req = req;
872
priv->ring[ring].backlog = backlog;
873
874
finalize:
875
if (!nreq)
876
return;
877
878
spin_lock_bh(&priv->ring[ring].lock);
879
880
priv->ring[ring].requests += nreq;
881
882
if (!priv->ring[ring].busy) {
883
safexcel_try_push_requests(priv, ring);
884
priv->ring[ring].busy = true;
885
}
886
887
spin_unlock_bh(&priv->ring[ring].lock);
888
889
/* let the RDR know we have pending descriptors */
890
writel((rdesc * priv->config.rd_offset),
891
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
892
893
/* let the CDR know we have pending descriptors */
894
writel((cdesc * priv->config.cd_offset),
895
EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
896
}
897
898
inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
899
void *rdp)
900
{
901
struct safexcel_result_desc *rdesc = rdp;
902
struct result_data_desc *result_data = rdp + priv->config.res_offset;
903
904
if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
905
((!rdesc->descriptor_overflow) &&
906
(!rdesc->buffer_overflow) &&
907
(!result_data->error_code))))
908
return 0;
909
910
if (rdesc->descriptor_overflow)
911
dev_err(priv->dev, "Descriptor overflow detected");
912
913
if (rdesc->buffer_overflow)
914
dev_err(priv->dev, "Buffer overflow detected");
915
916
if (result_data->error_code & 0x4066) {
917
/* Fatal error (bits 1,2,5,6 & 14) */
918
dev_err(priv->dev,
919
"result descriptor error (%x)",
920
result_data->error_code);
921
922
return -EIO;
923
} else if (result_data->error_code &
924
(BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
925
/*
926
* Give priority over authentication fails:
927
* Blocksize, length & overflow errors,
928
* something wrong with the input!
929
*/
930
return -EINVAL;
931
} else if (result_data->error_code & BIT(9)) {
932
/* Authentication failed */
933
return -EBADMSG;
934
}
935
936
/* All other non-fatal errors */
937
return -EINVAL;
938
}
939
940
inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
941
int ring,
942
struct safexcel_result_desc *rdesc,
943
struct crypto_async_request *req)
944
{
945
int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
946
947
priv->ring[ring].rdr_req[i] = req;
948
}
949
950
inline struct crypto_async_request *
951
safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
952
{
953
int i = safexcel_ring_first_rdr_index(priv, ring);
954
955
return priv->ring[ring].rdr_req[i];
956
}
957
958
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
959
{
960
struct safexcel_command_desc *cdesc;
961
962
/* Acknowledge the command descriptors */
963
do {
964
cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
965
if (IS_ERR(cdesc)) {
966
dev_err(priv->dev,
967
"Could not retrieve the command descriptor\n");
968
return;
969
}
970
} while (!cdesc->last_seg);
971
}
972
973
int safexcel_invalidate_cache(struct crypto_async_request *async,
974
struct safexcel_crypto_priv *priv,
975
dma_addr_t ctxr_dma, int ring)
976
{
977
struct safexcel_command_desc *cdesc;
978
struct safexcel_result_desc *rdesc;
979
struct safexcel_token *dmmy;
980
int ret = 0;
981
982
/* Prepare command descriptor */
983
cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
984
&dmmy);
985
if (IS_ERR(cdesc))
986
return PTR_ERR(cdesc);
987
988
cdesc->control_data.type = EIP197_TYPE_EXTENDED;
989
cdesc->control_data.options = 0;
990
cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
991
cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
992
993
/* Prepare result descriptor */
994
rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
995
996
if (IS_ERR(rdesc)) {
997
ret = PTR_ERR(rdesc);
998
goto cdesc_rollback;
999
}
1000
1001
safexcel_rdr_req_set(priv, ring, rdesc, async);
1002
1003
return ret;
1004
1005
cdesc_rollback:
1006
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
1007
1008
return ret;
1009
}
1010
1011
static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
1012
int ring)
1013
{
1014
struct crypto_async_request *req;
1015
struct safexcel_context *ctx;
1016
int ret, i, nreq, ndesc, tot_descs, handled = 0;
1017
bool should_complete;
1018
1019
handle_results:
1020
tot_descs = 0;
1021
1022
nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
1023
nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
1024
nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
1025
if (!nreq)
1026
goto requests_left;
1027
1028
for (i = 0; i < nreq; i++) {
1029
req = safexcel_rdr_req_get(priv, ring);
1030
1031
ctx = crypto_tfm_ctx(req->tfm);
1032
ndesc = ctx->handle_result(priv, ring, req,
1033
&should_complete, &ret);
1034
if (ndesc < 0) {
1035
dev_err(priv->dev, "failed to handle result (%d)\n",
1036
ndesc);
1037
goto acknowledge;
1038
}
1039
1040
if (should_complete) {
1041
local_bh_disable();
1042
crypto_request_complete(req, ret);
1043
local_bh_enable();
1044
}
1045
1046
tot_descs += ndesc;
1047
handled++;
1048
}
1049
1050
acknowledge:
1051
if (i)
1052
writel(EIP197_xDR_PROC_xD_PKT(i) |
1053
(tot_descs * priv->config.rd_offset),
1054
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
1055
1056
/* If the number of requests overflowed the counter, try to proceed more
1057
* requests.
1058
*/
1059
if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
1060
goto handle_results;
1061
1062
requests_left:
1063
spin_lock_bh(&priv->ring[ring].lock);
1064
1065
priv->ring[ring].requests -= handled;
1066
safexcel_try_push_requests(priv, ring);
1067
1068
if (!priv->ring[ring].requests)
1069
priv->ring[ring].busy = false;
1070
1071
spin_unlock_bh(&priv->ring[ring].lock);
1072
}
1073
1074
static void safexcel_dequeue_work(struct work_struct *work)
1075
{
1076
struct safexcel_work_data *data =
1077
container_of(work, struct safexcel_work_data, work);
1078
1079
safexcel_dequeue(data->priv, data->ring);
1080
}
1081
1082
struct safexcel_ring_irq_data {
1083
struct safexcel_crypto_priv *priv;
1084
int ring;
1085
};
1086
1087
static irqreturn_t safexcel_irq_ring(int irq, void *data)
1088
{
1089
struct safexcel_ring_irq_data *irq_data = data;
1090
struct safexcel_crypto_priv *priv = irq_data->priv;
1091
int ring = irq_data->ring, rc = IRQ_NONE;
1092
u32 status, stat;
1093
1094
status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
1095
if (!status)
1096
return rc;
1097
1098
/* RDR interrupts */
1099
if (status & EIP197_RDR_IRQ(ring)) {
1100
stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
1101
1102
if (unlikely(stat & EIP197_xDR_ERR)) {
1103
/*
1104
* Fatal error, the RDR is unusable and must be
1105
* reinitialized. This should not happen under
1106
* normal circumstances.
1107
*/
1108
dev_err(priv->dev, "RDR: fatal error.\n");
1109
} else if (likely(stat & EIP197_xDR_THRESH)) {
1110
rc = IRQ_WAKE_THREAD;
1111
}
1112
1113
/* ACK the interrupts */
1114
writel(stat & 0xff,
1115
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
1116
}
1117
1118
/* ACK the interrupts */
1119
writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
1120
1121
return rc;
1122
}
1123
1124
static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
1125
{
1126
struct safexcel_ring_irq_data *irq_data = data;
1127
struct safexcel_crypto_priv *priv = irq_data->priv;
1128
int ring = irq_data->ring;
1129
1130
safexcel_handle_result_descriptor(priv, ring);
1131
1132
queue_work(priv->ring[ring].workqueue,
1133
&priv->ring[ring].work_data.work);
1134
1135
return IRQ_HANDLED;
1136
}
1137
1138
static int safexcel_request_ring_irq(void *pdev, int irqid,
1139
int is_pci_dev,
1140
int ring_id,
1141
irq_handler_t handler,
1142
irq_handler_t threaded_handler,
1143
struct safexcel_ring_irq_data *ring_irq_priv)
1144
{
1145
int ret, irq, cpu;
1146
struct device *dev;
1147
1148
if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
1149
struct pci_dev *pci_pdev = pdev;
1150
1151
dev = &pci_pdev->dev;
1152
irq = pci_irq_vector(pci_pdev, irqid);
1153
if (irq < 0) {
1154
dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
1155
irqid, irq);
1156
return irq;
1157
}
1158
} else if (IS_ENABLED(CONFIG_OF)) {
1159
struct platform_device *plf_pdev = pdev;
1160
char irq_name[6] = {0}; /* "ringX\0" */
1161
1162
snprintf(irq_name, 6, "ring%d", irqid);
1163
dev = &plf_pdev->dev;
1164
irq = platform_get_irq_byname(plf_pdev, irq_name);
1165
1166
if (irq < 0)
1167
return irq;
1168
} else {
1169
return -ENXIO;
1170
}
1171
1172
ret = devm_request_threaded_irq(dev, irq, handler,
1173
threaded_handler, IRQF_ONESHOT,
1174
dev_name(dev), ring_irq_priv);
1175
if (ret) {
1176
dev_err(dev, "unable to request IRQ %d\n", irq);
1177
return ret;
1178
}
1179
1180
/* Set affinity */
1181
cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
1182
irq_set_affinity_hint(irq, get_cpu_mask(cpu));
1183
1184
return irq;
1185
}
1186
1187
static struct safexcel_alg_template *safexcel_algs[] = {
1188
&safexcel_alg_ecb_des,
1189
&safexcel_alg_cbc_des,
1190
&safexcel_alg_ecb_des3_ede,
1191
&safexcel_alg_cbc_des3_ede,
1192
&safexcel_alg_ecb_aes,
1193
&safexcel_alg_cbc_aes,
1194
&safexcel_alg_ctr_aes,
1195
&safexcel_alg_md5,
1196
&safexcel_alg_sha1,
1197
&safexcel_alg_sha224,
1198
&safexcel_alg_sha256,
1199
&safexcel_alg_sha384,
1200
&safexcel_alg_sha512,
1201
&safexcel_alg_hmac_md5,
1202
&safexcel_alg_hmac_sha1,
1203
&safexcel_alg_hmac_sha224,
1204
&safexcel_alg_hmac_sha256,
1205
&safexcel_alg_hmac_sha384,
1206
&safexcel_alg_hmac_sha512,
1207
&safexcel_alg_authenc_hmac_sha1_cbc_aes,
1208
&safexcel_alg_authenc_hmac_sha224_cbc_aes,
1209
&safexcel_alg_authenc_hmac_sha256_cbc_aes,
1210
&safexcel_alg_authenc_hmac_sha384_cbc_aes,
1211
&safexcel_alg_authenc_hmac_sha512_cbc_aes,
1212
&safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
1213
&safexcel_alg_authenc_hmac_sha1_ctr_aes,
1214
&safexcel_alg_authenc_hmac_sha224_ctr_aes,
1215
&safexcel_alg_authenc_hmac_sha256_ctr_aes,
1216
&safexcel_alg_authenc_hmac_sha384_ctr_aes,
1217
&safexcel_alg_authenc_hmac_sha512_ctr_aes,
1218
&safexcel_alg_xts_aes,
1219
&safexcel_alg_gcm,
1220
&safexcel_alg_ccm,
1221
&safexcel_alg_cbcmac,
1222
&safexcel_alg_xcbcmac,
1223
&safexcel_alg_cmac,
1224
&safexcel_alg_chacha20,
1225
&safexcel_alg_chachapoly,
1226
&safexcel_alg_chachapoly_esp,
1227
&safexcel_alg_sm3,
1228
&safexcel_alg_hmac_sm3,
1229
&safexcel_alg_ecb_sm4,
1230
&safexcel_alg_cbc_sm4,
1231
&safexcel_alg_ctr_sm4,
1232
&safexcel_alg_authenc_hmac_sha1_cbc_sm4,
1233
&safexcel_alg_authenc_hmac_sm3_cbc_sm4,
1234
&safexcel_alg_authenc_hmac_sha1_ctr_sm4,
1235
&safexcel_alg_authenc_hmac_sm3_ctr_sm4,
1236
&safexcel_alg_sha3_224,
1237
&safexcel_alg_sha3_256,
1238
&safexcel_alg_sha3_384,
1239
&safexcel_alg_sha3_512,
1240
&safexcel_alg_hmac_sha3_224,
1241
&safexcel_alg_hmac_sha3_256,
1242
&safexcel_alg_hmac_sha3_384,
1243
&safexcel_alg_hmac_sha3_512,
1244
&safexcel_alg_authenc_hmac_sha1_cbc_des,
1245
&safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
1246
&safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
1247
&safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
1248
&safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
1249
&safexcel_alg_authenc_hmac_sha256_cbc_des,
1250
&safexcel_alg_authenc_hmac_sha224_cbc_des,
1251
&safexcel_alg_authenc_hmac_sha512_cbc_des,
1252
&safexcel_alg_authenc_hmac_sha384_cbc_des,
1253
&safexcel_alg_rfc4106_gcm,
1254
&safexcel_alg_rfc4543_gcm,
1255
&safexcel_alg_rfc4309_ccm,
1256
};
1257
1258
static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
1259
{
1260
int i, j, ret = 0;
1261
1262
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1263
safexcel_algs[i]->priv = priv;
1264
1265
/* Do we have all required base algorithms available? */
1266
if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1267
safexcel_algs[i]->algo_mask)
1268
/* No, so don't register this ciphersuite */
1269
continue;
1270
1271
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1272
ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
1273
else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
1274
ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
1275
else
1276
ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
1277
1278
if (ret)
1279
goto fail;
1280
}
1281
1282
return 0;
1283
1284
fail:
1285
for (j = 0; j < i; j++) {
1286
/* Do we have all required base algorithms available? */
1287
if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
1288
safexcel_algs[j]->algo_mask)
1289
/* No, so don't unregister this ciphersuite */
1290
continue;
1291
1292
if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1293
crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
1294
else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
1295
crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
1296
else
1297
crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
1298
}
1299
1300
return ret;
1301
}
1302
1303
static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
1304
{
1305
int i;
1306
1307
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1308
/* Do we have all required base algorithms available? */
1309
if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1310
safexcel_algs[i]->algo_mask)
1311
/* No, so don't unregister this ciphersuite */
1312
continue;
1313
1314
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1315
crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
1316
else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
1317
crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
1318
else
1319
crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
1320
}
1321
}
1322
1323
static void safexcel_configure(struct safexcel_crypto_priv *priv)
1324
{
1325
u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
1326
1327
priv->config.pes = priv->hwconfig.hwnumpes;
1328
priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
1329
/* Cannot currently support more rings than we have ring AICs! */
1330
priv->config.rings = min_t(u32, priv->config.rings,
1331
priv->hwconfig.hwnumraic);
1332
1333
priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
1334
priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
1335
priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
1336
1337
/* res token is behind the descr, but ofs must be rounded to buswdth */
1338
priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
1339
/* now the size of the descr is this 1st part plus the result struct */
1340
priv->config.rd_size = priv->config.res_offset +
1341
EIP197_RD64_RESULT_SIZE;
1342
priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
1343
1344
/* convert dwords to bytes */
1345
priv->config.cd_offset *= sizeof(u32);
1346
priv->config.cdsh_offset *= sizeof(u32);
1347
priv->config.rd_offset *= sizeof(u32);
1348
priv->config.res_offset *= sizeof(u32);
1349
}
1350
1351
static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
1352
{
1353
struct safexcel_register_offsets *offsets = &priv->offsets;
1354
1355
if (priv->flags & SAFEXCEL_HW_EIP197) {
1356
offsets->hia_aic = EIP197_HIA_AIC_BASE;
1357
offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
1358
offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
1359
offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
1360
offsets->hia_dfe = EIP197_HIA_DFE_BASE;
1361
offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
1362
offsets->hia_dse = EIP197_HIA_DSE_BASE;
1363
offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
1364
offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
1365
offsets->pe = EIP197_PE_BASE;
1366
offsets->global = EIP197_GLOBAL_BASE;
1367
} else {
1368
offsets->hia_aic = EIP97_HIA_AIC_BASE;
1369
offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
1370
offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
1371
offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
1372
offsets->hia_dfe = EIP97_HIA_DFE_BASE;
1373
offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
1374
offsets->hia_dse = EIP97_HIA_DSE_BASE;
1375
offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
1376
offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
1377
offsets->pe = EIP97_PE_BASE;
1378
offsets->global = EIP97_GLOBAL_BASE;
1379
}
1380
}
1381
1382
/*
1383
* Generic part of probe routine, shared by platform and PCI driver
1384
*
1385
* Assumes IO resources have been mapped, private data mem has been allocated,
1386
* clocks have been enabled, device pointer has been assigned etc.
1387
*
1388
*/
1389
static int safexcel_probe_generic(void *pdev,
1390
struct safexcel_crypto_priv *priv,
1391
int is_pci_dev)
1392
{
1393
struct device *dev = priv->dev;
1394
u32 peid, version, mask, val, hiaopt, hwopt, peopt;
1395
int i, ret, hwctg;
1396
1397
priv->context_pool = dmam_pool_create("safexcel-context", dev,
1398
sizeof(struct safexcel_context_record),
1399
1, 0);
1400
if (!priv->context_pool)
1401
return -ENOMEM;
1402
1403
/*
1404
* First try the EIP97 HIA version regs
1405
* For the EIP197, this is guaranteed to NOT return any of the test
1406
* values
1407
*/
1408
version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);
1409
1410
mask = 0; /* do not swap */
1411
if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1412
priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1413
} else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
1414
/* read back byte-swapped, so complement byte swap bits */
1415
mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1416
priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1417
} else {
1418
/* So it wasn't an EIP97 ... maybe it's an EIP197? */
1419
version = readl(priv->base + EIP197_HIA_AIC_BASE +
1420
EIP197_HIA_VERSION);
1421
if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1422
priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1423
priv->flags |= SAFEXCEL_HW_EIP197;
1424
} else if (EIP197_REG_HI16(version) ==
1425
EIP197_HIA_VERSION_BE) {
1426
/* read back byte-swapped, so complement swap bits */
1427
mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1428
priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1429
priv->flags |= SAFEXCEL_HW_EIP197;
1430
} else {
1431
return -ENODEV;
1432
}
1433
}
1434
1435
/* Now initialize the reg offsets based on the probing info so far */
1436
safexcel_init_register_offsets(priv);
1437
1438
/*
1439
* If the version was read byte-swapped, we need to flip the device
1440
* swapping Keep in mind here, though, that what we write will also be
1441
* byte-swapped ...
1442
*/
1443
if (mask) {
1444
val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1445
val = val ^ (mask >> 24); /* toggle byte swap bits */
1446
writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1447
}
1448
1449
/*
1450
* We're not done probing yet! We may fall through to here if no HIA
1451
* was found at all. So, with the endianness presumably correct now and
1452
* the offsets setup, *really* probe for the EIP97/EIP197.
1453
*/
1454
version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
1455
if (((priv->flags & SAFEXCEL_HW_EIP197) &&
1456
(EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
1457
(EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
1458
((!(priv->flags & SAFEXCEL_HW_EIP197) &&
1459
(EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
1460
/*
1461
* We did not find the device that matched our initial probing
1462
* (or our initial probing failed) Report appropriate error.
1463
*/
1464
dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
1465
version);
1466
return -ENODEV;
1467
}
1468
1469
priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
1470
hwctg = version >> 28;
1471
peid = version & 255;
1472
1473
/* Detect EIP206 processing pipe */
1474
version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
1475
if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
1476
dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
1477
return -ENODEV;
1478
}
1479
priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
1480
1481
/* Detect EIP96 packet engine and version */
1482
version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
1483
if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
1484
dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
1485
return -ENODEV;
1486
}
1487
priv->hwconfig.pever = EIP197_VERSION_MASK(version);
1488
1489
hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
1490
hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
1491
1492
priv->hwconfig.icever = 0;
1493
priv->hwconfig.ocever = 0;
1494
priv->hwconfig.psever = 0;
1495
if (priv->flags & SAFEXCEL_HW_EIP197) {
1496
/* EIP197 */
1497
peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
1498
1499
priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1500
EIP197_HWDATAW_MASK;
1501
priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
1502
EIP197_CFSIZE_MASK) +
1503
EIP197_CFSIZE_ADJUST;
1504
priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
1505
EIP197_RFSIZE_MASK) +
1506
EIP197_RFSIZE_ADJUST;
1507
priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) &
1508
EIP197_N_PES_MASK;
1509
priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
1510
EIP197_N_RINGS_MASK;
1511
if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
1512
priv->flags |= EIP197_PE_ARB;
1513
if (EIP206_OPT_ICE_TYPE(peopt) == 1) {
1514
priv->flags |= EIP197_ICE;
1515
/* Detect ICE EIP207 class. engine and version */
1516
version = readl(EIP197_PE(priv) +
1517
EIP197_PE_ICE_VERSION(0));
1518
if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
1519
dev_err(dev, "EIP%d: ICE EIP207 not detected.\n",
1520
peid);
1521
return -ENODEV;
1522
}
1523
priv->hwconfig.icever = EIP197_VERSION_MASK(version);
1524
}
1525
if (EIP206_OPT_OCE_TYPE(peopt) == 1) {
1526
priv->flags |= EIP197_OCE;
1527
/* Detect EIP96PP packet stream editor and version */
1528
version = readl(EIP197_PE(priv) + EIP197_PE_PSE_VERSION(0));
1529
if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
1530
dev_err(dev, "EIP%d: EIP96PP not detected.\n", peid);
1531
return -ENODEV;
1532
}
1533
priv->hwconfig.psever = EIP197_VERSION_MASK(version);
1534
/* Detect OCE EIP207 class. engine and version */
1535
version = readl(EIP197_PE(priv) +
1536
EIP197_PE_ICE_VERSION(0));
1537
if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
1538
dev_err(dev, "EIP%d: OCE EIP207 not detected.\n",
1539
peid);
1540
return -ENODEV;
1541
}
1542
priv->hwconfig.ocever = EIP197_VERSION_MASK(version);
1543
}
1544
/* If not a full TRC, then assume simple TRC */
1545
if (!(hwopt & EIP197_OPT_HAS_TRC))
1546
priv->flags |= EIP197_SIMPLE_TRC;
1547
/* EIP197 always has SOME form of TRC */
1548
priv->flags |= EIP197_TRC_CACHE;
1549
} else {
1550
/* EIP97 */
1551
priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1552
EIP97_HWDATAW_MASK;
1553
priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
1554
EIP97_CFSIZE_MASK;
1555
priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
1556
EIP97_RFSIZE_MASK;
1557
priv->hwconfig.hwnumpes = 1; /* by definition */
1558
priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
1559
EIP197_N_RINGS_MASK;
1560
}
1561
1562
/* Scan for ring AIC's */
1563
for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
1564
version = readl(EIP197_HIA_AIC_R(priv) +
1565
EIP197_HIA_AIC_R_VERSION(i));
1566
if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
1567
break;
1568
}
1569
priv->hwconfig.hwnumraic = i;
1570
/* Low-end EIP196 may not have any ring AIC's ... */
1571
if (!priv->hwconfig.hwnumraic) {
1572
dev_err(priv->dev, "No ring interrupt controller present!\n");
1573
return -ENODEV;
1574
}
1575
1576
/* Get supported algorithms from EIP96 transform engine */
1577
priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
1578
EIP197_PE_EIP96_OPTIONS(0));
1579
1580
/* Print single info line describing what we just detected */
1581
dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x(alg:%08x)/%x/%x/%x\n",
1582
peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
1583
priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
1584
priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
1585
priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
1586
priv->hwconfig.ppver, priv->hwconfig.pever,
1587
priv->hwconfig.algo_flags, priv->hwconfig.icever,
1588
priv->hwconfig.ocever, priv->hwconfig.psever);
1589
1590
safexcel_configure(priv);
1591
1592
if (IS_ENABLED(CONFIG_PCI) && priv->data->version == EIP197_DEVBRD) {
1593
/*
1594
* Request MSI vectors for global + 1 per ring -
1595
* or just 1 for older dev images
1596
*/
1597
struct pci_dev *pci_pdev = pdev;
1598
1599
ret = pci_alloc_irq_vectors(pci_pdev,
1600
priv->config.rings + 1,
1601
priv->config.rings + 1,
1602
PCI_IRQ_MSI | PCI_IRQ_MSIX);
1603
if (ret < 0) {
1604
dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
1605
return ret;
1606
}
1607
}
1608
1609
/* Register the ring IRQ handlers and configure the rings */
1610
priv->ring = devm_kcalloc(dev, priv->config.rings,
1611
sizeof(*priv->ring),
1612
GFP_KERNEL);
1613
if (!priv->ring)
1614
return -ENOMEM;
1615
1616
for (i = 0; i < priv->config.rings; i++) {
1617
char wq_name[9] = {0};
1618
int irq;
1619
struct safexcel_ring_irq_data *ring_irq;
1620
1621
ret = safexcel_init_ring_descriptors(priv,
1622
&priv->ring[i].cdr,
1623
&priv->ring[i].rdr);
1624
if (ret) {
1625
dev_err(dev, "Failed to initialize rings\n");
1626
goto err_cleanup_rings;
1627
}
1628
1629
priv->ring[i].rdr_req = devm_kcalloc(dev,
1630
EIP197_DEFAULT_RING_SIZE,
1631
sizeof(*priv->ring[i].rdr_req),
1632
GFP_KERNEL);
1633
if (!priv->ring[i].rdr_req) {
1634
ret = -ENOMEM;
1635
goto err_cleanup_rings;
1636
}
1637
1638
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1639
if (!ring_irq) {
1640
ret = -ENOMEM;
1641
goto err_cleanup_rings;
1642
}
1643
1644
ring_irq->priv = priv;
1645
ring_irq->ring = i;
1646
1647
irq = safexcel_request_ring_irq(pdev,
1648
EIP197_IRQ_NUMBER(i, is_pci_dev),
1649
is_pci_dev,
1650
i,
1651
safexcel_irq_ring,
1652
safexcel_irq_ring_thread,
1653
ring_irq);
1654
if (irq < 0) {
1655
dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
1656
ret = irq;
1657
goto err_cleanup_rings;
1658
}
1659
1660
priv->ring[i].irq = irq;
1661
priv->ring[i].work_data.priv = priv;
1662
priv->ring[i].work_data.ring = i;
1663
INIT_WORK(&priv->ring[i].work_data.work,
1664
safexcel_dequeue_work);
1665
1666
snprintf(wq_name, 9, "wq_ring%d", i);
1667
priv->ring[i].workqueue =
1668
create_singlethread_workqueue(wq_name);
1669
if (!priv->ring[i].workqueue) {
1670
ret = -ENOMEM;
1671
goto err_cleanup_rings;
1672
}
1673
1674
priv->ring[i].requests = 0;
1675
priv->ring[i].busy = false;
1676
1677
crypto_init_queue(&priv->ring[i].queue,
1678
EIP197_DEFAULT_RING_SIZE);
1679
1680
spin_lock_init(&priv->ring[i].lock);
1681
spin_lock_init(&priv->ring[i].queue_lock);
1682
}
1683
1684
atomic_set(&priv->ring_used, 0);
1685
1686
ret = safexcel_hw_init(priv);
1687
if (ret) {
1688
dev_err(dev, "HW init failed (%d)\n", ret);
1689
goto err_cleanup_rings;
1690
}
1691
1692
ret = safexcel_register_algorithms(priv);
1693
if (ret) {
1694
dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1695
goto err_cleanup_rings;
1696
}
1697
1698
return 0;
1699
1700
err_cleanup_rings:
1701
for (i = 0; i < priv->config.rings; i++) {
1702
if (priv->ring[i].irq)
1703
irq_set_affinity_hint(priv->ring[i].irq, NULL);
1704
if (priv->ring[i].workqueue)
1705
destroy_workqueue(priv->ring[i].workqueue);
1706
}
1707
1708
return ret;
1709
}
1710
1711
static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1712
{
1713
int i;
1714
1715
for (i = 0; i < priv->config.rings; i++) {
1716
/* clear any pending interrupt */
1717
writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1718
writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1719
1720
/* Reset the CDR base address */
1721
writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1722
writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1723
1724
/* Reset the RDR base address */
1725
writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1726
writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1727
}
1728
}
1729
1730
/* for Device Tree platform driver */
1731
1732
static int safexcel_probe(struct platform_device *pdev)
1733
{
1734
struct device *dev = &pdev->dev;
1735
struct safexcel_crypto_priv *priv;
1736
int ret;
1737
1738
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1739
if (!priv)
1740
return -ENOMEM;
1741
1742
priv->dev = dev;
1743
priv->data = (struct safexcel_priv_data *)of_device_get_match_data(dev);
1744
1745
platform_set_drvdata(pdev, priv);
1746
1747
priv->base = devm_platform_ioremap_resource(pdev, 0);
1748
if (IS_ERR(priv->base)) {
1749
dev_err(dev, "failed to get resource\n");
1750
return PTR_ERR(priv->base);
1751
}
1752
1753
priv->clk = devm_clk_get(&pdev->dev, NULL);
1754
ret = PTR_ERR_OR_ZERO(priv->clk);
1755
/* The clock isn't mandatory */
1756
if (ret != -ENOENT) {
1757
if (ret)
1758
return ret;
1759
1760
ret = clk_prepare_enable(priv->clk);
1761
if (ret) {
1762
dev_err(dev, "unable to enable clk (%d)\n", ret);
1763
return ret;
1764
}
1765
}
1766
1767
priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
1768
ret = PTR_ERR_OR_ZERO(priv->reg_clk);
1769
/* The clock isn't mandatory */
1770
if (ret != -ENOENT) {
1771
if (ret)
1772
goto err_core_clk;
1773
1774
ret = clk_prepare_enable(priv->reg_clk);
1775
if (ret) {
1776
dev_err(dev, "unable to enable reg clk (%d)\n", ret);
1777
goto err_core_clk;
1778
}
1779
}
1780
1781
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1782
if (ret)
1783
goto err_reg_clk;
1784
1785
/* Generic EIP97/EIP197 device probing */
1786
ret = safexcel_probe_generic(pdev, priv, 0);
1787
if (ret)
1788
goto err_reg_clk;
1789
1790
return 0;
1791
1792
err_reg_clk:
1793
clk_disable_unprepare(priv->reg_clk);
1794
err_core_clk:
1795
clk_disable_unprepare(priv->clk);
1796
return ret;
1797
}
1798
1799
static void safexcel_remove(struct platform_device *pdev)
1800
{
1801
struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1802
int i;
1803
1804
safexcel_unregister_algorithms(priv);
1805
safexcel_hw_reset_rings(priv);
1806
1807
clk_disable_unprepare(priv->reg_clk);
1808
clk_disable_unprepare(priv->clk);
1809
1810
for (i = 0; i < priv->config.rings; i++) {
1811
irq_set_affinity_hint(priv->ring[i].irq, NULL);
1812
destroy_workqueue(priv->ring[i].workqueue);
1813
}
1814
}
1815
1816
static const struct safexcel_priv_data eip97ies_mrvl_data = {
1817
.version = EIP97IES_MRVL,
1818
};
1819
1820
static const struct safexcel_priv_data eip197b_mrvl_data = {
1821
.version = EIP197B_MRVL,
1822
};
1823
1824
static const struct safexcel_priv_data eip197d_mrvl_data = {
1825
.version = EIP197D_MRVL,
1826
};
1827
1828
static const struct safexcel_priv_data eip197_devbrd_data = {
1829
.version = EIP197_DEVBRD,
1830
};
1831
1832
static const struct safexcel_priv_data eip197c_mxl_data = {
1833
.version = EIP197C_MXL,
1834
.fw_little_endian = true,
1835
};
1836
1837
static const struct of_device_id safexcel_of_match_table[] = {
1838
{
1839
.compatible = "inside-secure,safexcel-eip97ies",
1840
.data = &eip97ies_mrvl_data,
1841
},
1842
{
1843
.compatible = "inside-secure,safexcel-eip197b",
1844
.data = &eip197b_mrvl_data,
1845
},
1846
{
1847
.compatible = "inside-secure,safexcel-eip197d",
1848
.data = &eip197d_mrvl_data,
1849
},
1850
{
1851
.compatible = "inside-secure,safexcel-eip197c-mxl",
1852
.data = &eip197c_mxl_data,
1853
},
1854
/* For backward compatibility and intended for generic use */
1855
{
1856
.compatible = "inside-secure,safexcel-eip97",
1857
.data = &eip97ies_mrvl_data,
1858
},
1859
{
1860
.compatible = "inside-secure,safexcel-eip197",
1861
.data = &eip197b_mrvl_data,
1862
},
1863
{},
1864
};
1865
1866
MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
1867
1868
static struct platform_driver crypto_safexcel = {
1869
.probe = safexcel_probe,
1870
.remove = safexcel_remove,
1871
.driver = {
1872
.name = "crypto-safexcel",
1873
.of_match_table = safexcel_of_match_table,
1874
},
1875
};
1876
1877
/* PCIE devices - i.e. Inside Secure development boards */
1878
1879
static int safexcel_pci_probe(struct pci_dev *pdev,
1880
const struct pci_device_id *ent)
1881
{
1882
struct device *dev = &pdev->dev;
1883
struct safexcel_crypto_priv *priv;
1884
void __iomem *pciebase;
1885
int rc;
1886
u32 val;
1887
1888
dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
1889
ent->vendor, ent->device, ent->subvendor,
1890
ent->subdevice, ent->driver_data);
1891
1892
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1893
if (!priv)
1894
return -ENOMEM;
1895
1896
priv->dev = dev;
1897
priv->data = (struct safexcel_priv_data *)ent->driver_data;
1898
1899
pci_set_drvdata(pdev, priv);
1900
1901
/* enable the device */
1902
rc = pcim_enable_device(pdev);
1903
if (rc) {
1904
dev_err(dev, "Failed to enable PCI device\n");
1905
return rc;
1906
}
1907
1908
/* take ownership of PCI BAR0 */
1909
rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
1910
if (rc) {
1911
dev_err(dev, "Failed to map IO region for BAR0\n");
1912
return rc;
1913
}
1914
priv->base = pcim_iomap_table(pdev)[0];
1915
1916
if (priv->data->version == EIP197_DEVBRD) {
1917
dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
1918
1919
rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
1920
if (rc) {
1921
dev_err(dev, "Failed to map IO region for BAR4\n");
1922
return rc;
1923
}
1924
1925
pciebase = pcim_iomap_table(pdev)[2];
1926
val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
1927
if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
1928
dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
1929
(val & 0xff));
1930
1931
/* Setup MSI identity map mapping */
1932
writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
1933
pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
1934
writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
1935
pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
1936
writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
1937
pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
1938
writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
1939
pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
1940
1941
/* Enable all device interrupts */
1942
writel(GENMASK(31, 0),
1943
pciebase + EIP197_XLX_USER_INT_ENB_MSK);
1944
} else {
1945
dev_err(dev, "Unrecognised IRQ block identifier %x\n",
1946
val);
1947
return -ENODEV;
1948
}
1949
1950
/* HW reset FPGA dev board */
1951
/* assert reset */
1952
writel(1, priv->base + EIP197_XLX_GPIO_BASE);
1953
wmb(); /* maintain strict ordering for accesses here */
1954
/* deassert reset */
1955
writel(0, priv->base + EIP197_XLX_GPIO_BASE);
1956
wmb(); /* maintain strict ordering for accesses here */
1957
}
1958
1959
/* enable bus mastering */
1960
pci_set_master(pdev);
1961
1962
/* Generic EIP97/EIP197 device probing */
1963
rc = safexcel_probe_generic(pdev, priv, 1);
1964
return rc;
1965
}
1966
1967
static void safexcel_pci_remove(struct pci_dev *pdev)
1968
{
1969
struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
1970
int i;
1971
1972
safexcel_unregister_algorithms(priv);
1973
1974
for (i = 0; i < priv->config.rings; i++)
1975
destroy_workqueue(priv->ring[i].workqueue);
1976
1977
safexcel_hw_reset_rings(priv);
1978
}
1979
1980
static const struct pci_device_id safexcel_pci_ids[] = {
1981
{
1982
PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
1983
0x16ae, 0xc522),
1984
.driver_data = (kernel_ulong_t)&eip197_devbrd_data,
1985
},
1986
{},
1987
};
1988
1989
MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
1990
1991
static struct pci_driver safexcel_pci_driver = {
1992
.name = "crypto-safexcel",
1993
.id_table = safexcel_pci_ids,
1994
.probe = safexcel_pci_probe,
1995
.remove = safexcel_pci_remove,
1996
};
1997
1998
static int __init safexcel_init(void)
1999
{
2000
int ret;
2001
2002
/* Register PCI driver */
2003
ret = pci_register_driver(&safexcel_pci_driver);
2004
2005
/* Register platform driver */
2006
if (IS_ENABLED(CONFIG_OF) && !ret) {
2007
ret = platform_driver_register(&crypto_safexcel);
2008
if (ret)
2009
pci_unregister_driver(&safexcel_pci_driver);
2010
}
2011
2012
return ret;
2013
}
2014
2015
static void __exit safexcel_exit(void)
2016
{
2017
/* Unregister platform driver */
2018
if (IS_ENABLED(CONFIG_OF))
2019
platform_driver_unregister(&crypto_safexcel);
2020
2021
/* Unregister PCI driver if successfully registered before */
2022
pci_unregister_driver(&safexcel_pci_driver);
2023
}
2024
2025
module_init(safexcel_init);
2026
module_exit(safexcel_exit);
2027
2028
MODULE_AUTHOR("Antoine Tenart <[email protected]>");
2029
MODULE_AUTHOR("Ofer Heifetz <[email protected]>");
2030
MODULE_AUTHOR("Igal Liberman <[email protected]>");
2031
MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
2032
MODULE_LICENSE("GPL v2");
2033
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
2034
2035
MODULE_FIRMWARE("ifpp.bin");
2036
MODULE_FIRMWARE("ipue.bin");
2037
MODULE_FIRMWARE("inside-secure/eip197b/ifpp.bin");
2038
MODULE_FIRMWARE("inside-secure/eip197b/ipue.bin");
2039
MODULE_FIRMWARE("inside-secure/eip197d/ifpp.bin");
2040
MODULE_FIRMWARE("inside-secure/eip197d/ipue.bin");
2041
MODULE_FIRMWARE("inside-secure/eip197_minifw/ifpp.bin");
2042
MODULE_FIRMWARE("inside-secure/eip197_minifw/ipue.bin");
2043
2044