Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/marvell/cesa/cesa.h
26288 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef __MARVELL_CESA_H__
3
#define __MARVELL_CESA_H__
4
5
#include <crypto/internal/hash.h>
6
#include <crypto/internal/skcipher.h>
7
8
#include <linux/dma-direction.h>
9
#include <linux/dmapool.h>
10
11
#define CESA_ENGINE_OFF(i) (((i) * 0x2000))
12
13
#define CESA_TDMA_BYTE_CNT 0x800
14
#define CESA_TDMA_SRC_ADDR 0x810
15
#define CESA_TDMA_DST_ADDR 0x820
16
#define CESA_TDMA_NEXT_ADDR 0x830
17
18
#define CESA_TDMA_CONTROL 0x840
19
#define CESA_TDMA_DST_BURST GENMASK(2, 0)
20
#define CESA_TDMA_DST_BURST_32B 3
21
#define CESA_TDMA_DST_BURST_128B 4
22
#define CESA_TDMA_OUT_RD_EN BIT(4)
23
#define CESA_TDMA_SRC_BURST GENMASK(8, 6)
24
#define CESA_TDMA_SRC_BURST_32B (3 << 6)
25
#define CESA_TDMA_SRC_BURST_128B (4 << 6)
26
#define CESA_TDMA_CHAIN BIT(9)
27
#define CESA_TDMA_BYTE_SWAP BIT(11)
28
#define CESA_TDMA_NO_BYTE_SWAP BIT(11)
29
#define CESA_TDMA_EN BIT(12)
30
#define CESA_TDMA_FETCH_ND BIT(13)
31
#define CESA_TDMA_ACT BIT(14)
32
33
#define CESA_TDMA_CUR 0x870
34
#define CESA_TDMA_ERROR_CAUSE 0x8c8
35
#define CESA_TDMA_ERROR_MSK 0x8cc
36
37
#define CESA_TDMA_WINDOW_BASE(x) (((x) * 0x8) + 0xa00)
38
#define CESA_TDMA_WINDOW_CTRL(x) (((x) * 0x8) + 0xa04)
39
40
#define CESA_IVDIG(x) (0xdd00 + ((x) * 4) + \
41
(((x) < 5) ? 0 : 0x14))
42
43
#define CESA_SA_CMD 0xde00
44
#define CESA_SA_CMD_EN_CESA_SA_ACCL0 BIT(0)
45
#define CESA_SA_CMD_EN_CESA_SA_ACCL1 BIT(1)
46
#define CESA_SA_CMD_DISABLE_SEC BIT(2)
47
48
#define CESA_SA_DESC_P0 0xde04
49
50
#define CESA_SA_DESC_P1 0xde14
51
52
#define CESA_SA_CFG 0xde08
53
#define CESA_SA_CFG_STOP_DIG_ERR GENMASK(1, 0)
54
#define CESA_SA_CFG_DIG_ERR_CONT 0
55
#define CESA_SA_CFG_DIG_ERR_SKIP 1
56
#define CESA_SA_CFG_DIG_ERR_STOP 3
57
#define CESA_SA_CFG_CH0_W_IDMA BIT(7)
58
#define CESA_SA_CFG_CH1_W_IDMA BIT(8)
59
#define CESA_SA_CFG_ACT_CH0_IDMA BIT(9)
60
#define CESA_SA_CFG_ACT_CH1_IDMA BIT(10)
61
#define CESA_SA_CFG_MULTI_PKT BIT(11)
62
#define CESA_SA_CFG_PARA_DIS BIT(13)
63
64
#define CESA_SA_ACCEL_STATUS 0xde0c
65
#define CESA_SA_ST_ACT_0 BIT(0)
66
#define CESA_SA_ST_ACT_1 BIT(1)
67
68
/*
69
* CESA_SA_FPGA_INT_STATUS looks like an FPGA leftover and is documented only
70
* in Errata 4.12. It looks like that it was part of an IRQ-controller in FPGA
71
* and someone forgot to remove it while switching to the core and moving to
72
* CESA_SA_INT_STATUS.
73
*/
74
#define CESA_SA_FPGA_INT_STATUS 0xdd68
75
#define CESA_SA_INT_STATUS 0xde20
76
#define CESA_SA_INT_AUTH_DONE BIT(0)
77
#define CESA_SA_INT_DES_E_DONE BIT(1)
78
#define CESA_SA_INT_AES_E_DONE BIT(2)
79
#define CESA_SA_INT_AES_D_DONE BIT(3)
80
#define CESA_SA_INT_ENC_DONE BIT(4)
81
#define CESA_SA_INT_ACCEL0_DONE BIT(5)
82
#define CESA_SA_INT_ACCEL1_DONE BIT(6)
83
#define CESA_SA_INT_ACC0_IDMA_DONE BIT(7)
84
#define CESA_SA_INT_ACC1_IDMA_DONE BIT(8)
85
#define CESA_SA_INT_IDMA_DONE BIT(9)
86
#define CESA_SA_INT_IDMA_OWN_ERR BIT(10)
87
88
#define CESA_SA_INT_MSK 0xde24
89
90
#define CESA_SA_DESC_CFG_OP_MAC_ONLY 0
91
#define CESA_SA_DESC_CFG_OP_CRYPT_ONLY 1
92
#define CESA_SA_DESC_CFG_OP_MAC_CRYPT 2
93
#define CESA_SA_DESC_CFG_OP_CRYPT_MAC 3
94
#define CESA_SA_DESC_CFG_OP_MSK GENMASK(1, 0)
95
#define CESA_SA_DESC_CFG_MACM_SHA256 (1 << 4)
96
#define CESA_SA_DESC_CFG_MACM_HMAC_SHA256 (3 << 4)
97
#define CESA_SA_DESC_CFG_MACM_MD5 (4 << 4)
98
#define CESA_SA_DESC_CFG_MACM_SHA1 (5 << 4)
99
#define CESA_SA_DESC_CFG_MACM_HMAC_MD5 (6 << 4)
100
#define CESA_SA_DESC_CFG_MACM_HMAC_SHA1 (7 << 4)
101
#define CESA_SA_DESC_CFG_MACM_MSK GENMASK(6, 4)
102
#define CESA_SA_DESC_CFG_CRYPTM_DES (1 << 8)
103
#define CESA_SA_DESC_CFG_CRYPTM_3DES (2 << 8)
104
#define CESA_SA_DESC_CFG_CRYPTM_AES (3 << 8)
105
#define CESA_SA_DESC_CFG_CRYPTM_MSK GENMASK(9, 8)
106
#define CESA_SA_DESC_CFG_DIR_ENC (0 << 12)
107
#define CESA_SA_DESC_CFG_DIR_DEC (1 << 12)
108
#define CESA_SA_DESC_CFG_CRYPTCM_ECB (0 << 16)
109
#define CESA_SA_DESC_CFG_CRYPTCM_CBC (1 << 16)
110
#define CESA_SA_DESC_CFG_CRYPTCM_MSK BIT(16)
111
#define CESA_SA_DESC_CFG_3DES_EEE (0 << 20)
112
#define CESA_SA_DESC_CFG_3DES_EDE (1 << 20)
113
#define CESA_SA_DESC_CFG_AES_LEN_128 (0 << 24)
114
#define CESA_SA_DESC_CFG_AES_LEN_192 (1 << 24)
115
#define CESA_SA_DESC_CFG_AES_LEN_256 (2 << 24)
116
#define CESA_SA_DESC_CFG_AES_LEN_MSK GENMASK(25, 24)
117
#define CESA_SA_DESC_CFG_NOT_FRAG (0 << 30)
118
#define CESA_SA_DESC_CFG_FIRST_FRAG (1 << 30)
119
#define CESA_SA_DESC_CFG_LAST_FRAG (2 << 30)
120
#define CESA_SA_DESC_CFG_MID_FRAG (3 << 30)
121
#define CESA_SA_DESC_CFG_FRAG_MSK GENMASK(31, 30)
122
123
/*
124
* /-----------\ 0
125
* | ACCEL CFG | 4 * 8
126
* |-----------| 0x20
127
* | CRYPT KEY | 8 * 4
128
* |-----------| 0x40
129
* | IV IN | 4 * 4
130
* |-----------| 0x40 (inplace)
131
* | IV BUF | 4 * 4
132
* |-----------| 0x80
133
* | DATA IN | 16 * x (max ->max_req_size)
134
* |-----------| 0x80 (inplace operation)
135
* | DATA OUT | 16 * x (max ->max_req_size)
136
* \-----------/ SRAM size
137
*/
138
139
/*
140
* Hashing memory map:
141
* /-----------\ 0
142
* | ACCEL CFG | 4 * 8
143
* |-----------| 0x20
144
* | Inner IV | 8 * 4
145
* |-----------| 0x40
146
* | Outer IV | 8 * 4
147
* |-----------| 0x60
148
* | Output BUF| 8 * 4
149
* |-----------| 0x80
150
* | DATA IN | 64 * x (max ->max_req_size)
151
* \-----------/ SRAM size
152
*/
153
154
#define CESA_SA_CFG_SRAM_OFFSET 0x00
155
#define CESA_SA_DATA_SRAM_OFFSET 0x80
156
157
#define CESA_SA_CRYPT_KEY_SRAM_OFFSET 0x20
158
#define CESA_SA_CRYPT_IV_SRAM_OFFSET 0x40
159
160
#define CESA_SA_MAC_IIV_SRAM_OFFSET 0x20
161
#define CESA_SA_MAC_OIV_SRAM_OFFSET 0x40
162
#define CESA_SA_MAC_DIG_SRAM_OFFSET 0x60
163
164
#define CESA_SA_DESC_CRYPT_DATA(offset) \
165
cpu_to_le32((CESA_SA_DATA_SRAM_OFFSET + (offset)) | \
166
((CESA_SA_DATA_SRAM_OFFSET + (offset)) << 16))
167
168
#define CESA_SA_DESC_CRYPT_IV(offset) \
169
cpu_to_le32((CESA_SA_CRYPT_IV_SRAM_OFFSET + (offset)) | \
170
((CESA_SA_CRYPT_IV_SRAM_OFFSET + (offset)) << 16))
171
172
#define CESA_SA_DESC_CRYPT_KEY(offset) \
173
cpu_to_le32(CESA_SA_CRYPT_KEY_SRAM_OFFSET + (offset))
174
175
#define CESA_SA_DESC_MAC_DATA(offset) \
176
cpu_to_le32(CESA_SA_DATA_SRAM_OFFSET + (offset))
177
#define CESA_SA_DESC_MAC_DATA_MSK cpu_to_le32(GENMASK(15, 0))
178
179
#define CESA_SA_DESC_MAC_TOTAL_LEN(total_len) cpu_to_le32((total_len) << 16)
180
#define CESA_SA_DESC_MAC_TOTAL_LEN_MSK cpu_to_le32(GENMASK(31, 16))
181
182
#define CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX 0xffff
183
184
#define CESA_SA_DESC_MAC_DIGEST(offset) \
185
cpu_to_le32(CESA_SA_MAC_DIG_SRAM_OFFSET + (offset))
186
#define CESA_SA_DESC_MAC_DIGEST_MSK cpu_to_le32(GENMASK(15, 0))
187
188
#define CESA_SA_DESC_MAC_FRAG_LEN(frag_len) cpu_to_le32((frag_len) << 16)
189
#define CESA_SA_DESC_MAC_FRAG_LEN_MSK cpu_to_le32(GENMASK(31, 16))
190
191
#define CESA_SA_DESC_MAC_IV(offset) \
192
cpu_to_le32((CESA_SA_MAC_IIV_SRAM_OFFSET + (offset)) | \
193
((CESA_SA_MAC_OIV_SRAM_OFFSET + (offset)) << 16))
194
195
#define CESA_SA_SRAM_SIZE 2048
196
#define CESA_SA_SRAM_PAYLOAD_SIZE (cesa_dev->sram_size - \
197
CESA_SA_DATA_SRAM_OFFSET)
198
199
#define CESA_SA_DEFAULT_SRAM_SIZE 2048
200
#define CESA_SA_MIN_SRAM_SIZE 1024
201
202
#define CESA_SA_SRAM_MSK (2048 - 1)
203
204
#define CESA_MAX_HASH_BLOCK_SIZE 64
205
#define CESA_HASH_BLOCK_SIZE_MSK (CESA_MAX_HASH_BLOCK_SIZE - 1)
206
207
/**
208
* struct mv_cesa_sec_accel_desc - security accelerator descriptor
209
* @config: engine config
210
* @enc_p: input and output data pointers for a cipher operation
211
* @enc_len: cipher operation length
212
* @enc_key_p: cipher key pointer
213
* @enc_iv: cipher IV pointers
214
* @mac_src_p: input pointer and total hash length
215
* @mac_digest: digest pointer and hash operation length
216
* @mac_iv: hmac IV pointers
217
*
218
* Structure passed to the CESA engine to describe the crypto operation
219
* to be executed.
220
*/
221
struct mv_cesa_sec_accel_desc {
222
__le32 config;
223
__le32 enc_p;
224
__le32 enc_len;
225
__le32 enc_key_p;
226
__le32 enc_iv;
227
__le32 mac_src_p;
228
__le32 mac_digest;
229
__le32 mac_iv;
230
};
231
232
/**
233
* struct mv_cesa_skcipher_op_ctx - cipher operation context
234
* @key: cipher key
235
* @iv: cipher IV
236
*
237
* Context associated to a cipher operation.
238
*/
239
struct mv_cesa_skcipher_op_ctx {
240
__le32 key[8];
241
u32 iv[4];
242
};
243
244
/**
245
* struct mv_cesa_hash_op_ctx - hash or hmac operation context
246
* @key: cipher key
247
* @iv: cipher IV
248
*
249
* Context associated to an hash or hmac operation.
250
*/
251
struct mv_cesa_hash_op_ctx {
252
u32 iv[16];
253
__le32 hash[8];
254
};
255
256
/**
257
* struct mv_cesa_op_ctx - crypto operation context
258
* @desc: CESA descriptor
259
* @ctx: context associated to the crypto operation
260
*
261
* Context associated to a crypto operation.
262
*/
263
struct mv_cesa_op_ctx {
264
struct mv_cesa_sec_accel_desc desc;
265
union {
266
struct mv_cesa_skcipher_op_ctx skcipher;
267
struct mv_cesa_hash_op_ctx hash;
268
} ctx;
269
};
270
271
/* TDMA descriptor flags */
272
#define CESA_TDMA_DST_IN_SRAM BIT(31)
273
#define CESA_TDMA_SRC_IN_SRAM BIT(30)
274
#define CESA_TDMA_END_OF_REQ BIT(29)
275
#define CESA_TDMA_BREAK_CHAIN BIT(28)
276
#define CESA_TDMA_SET_STATE BIT(27)
277
#define CESA_TDMA_TYPE_MSK GENMASK(26, 0)
278
#define CESA_TDMA_DUMMY 0
279
#define CESA_TDMA_DATA 1
280
#define CESA_TDMA_OP 2
281
#define CESA_TDMA_RESULT 3
282
283
/**
284
* struct mv_cesa_tdma_desc - TDMA descriptor
285
* @byte_cnt: number of bytes to transfer
286
* @src: DMA address of the source
287
* @dst: DMA address of the destination
288
* @next_dma: DMA address of the next TDMA descriptor
289
* @cur_dma: DMA address of this TDMA descriptor
290
* @next: pointer to the next TDMA descriptor
291
* @op: CESA operation attached to this TDMA descriptor
292
* @data: raw data attached to this TDMA descriptor
293
* @flags: flags describing the TDMA transfer. See the
294
* "TDMA descriptor flags" section above
295
*
296
* TDMA descriptor used to create a transfer chain describing a crypto
297
* operation.
298
*/
299
struct mv_cesa_tdma_desc {
300
__le32 byte_cnt;
301
union {
302
__le32 src;
303
u32 src_dma;
304
};
305
union {
306
__le32 dst;
307
u32 dst_dma;
308
};
309
__le32 next_dma;
310
311
/* Software state */
312
dma_addr_t cur_dma;
313
struct mv_cesa_tdma_desc *next;
314
union {
315
struct mv_cesa_op_ctx *op;
316
void *data;
317
};
318
u32 flags;
319
};
320
321
/**
322
* struct mv_cesa_sg_dma_iter - scatter-gather iterator
323
* @dir: transfer direction
324
* @sg: scatter list
325
* @offset: current position in the scatter list
326
* @op_offset: current position in the crypto operation
327
*
328
* Iterator used to iterate over a scatterlist while creating a TDMA chain for
329
* a crypto operation.
330
*/
331
struct mv_cesa_sg_dma_iter {
332
enum dma_data_direction dir;
333
struct scatterlist *sg;
334
unsigned int offset;
335
unsigned int op_offset;
336
};
337
338
/**
339
* struct mv_cesa_dma_iter - crypto operation iterator
340
* @len: the crypto operation length
341
* @offset: current position in the crypto operation
342
* @op_len: sub-operation length (the crypto engine can only act on 2kb
343
* chunks)
344
*
345
* Iterator used to create a TDMA chain for a given crypto operation.
346
*/
347
struct mv_cesa_dma_iter {
348
unsigned int len;
349
unsigned int offset;
350
unsigned int op_len;
351
};
352
353
/**
354
* struct mv_cesa_tdma_chain - TDMA chain
355
* @first: first entry in the TDMA chain
356
* @last: last entry in the TDMA chain
357
*
358
* Stores a TDMA chain for a specific crypto operation.
359
*/
360
struct mv_cesa_tdma_chain {
361
struct mv_cesa_tdma_desc *first;
362
struct mv_cesa_tdma_desc *last;
363
};
364
365
struct mv_cesa_engine;
366
367
/**
368
* struct mv_cesa_caps - CESA device capabilities
369
* @engines: number of engines
370
* @has_tdma: whether this device has a TDMA block
371
* @cipher_algs: supported cipher algorithms
372
* @ncipher_algs: number of supported cipher algorithms
373
* @ahash_algs: supported hash algorithms
374
* @nahash_algs: number of supported hash algorithms
375
*
376
* Structure used to describe CESA device capabilities.
377
*/
378
struct mv_cesa_caps {
379
int nengines;
380
bool has_tdma;
381
struct skcipher_alg **cipher_algs;
382
int ncipher_algs;
383
struct ahash_alg **ahash_algs;
384
int nahash_algs;
385
};
386
387
/**
388
* struct mv_cesa_dev_dma - DMA pools
389
* @tdma_desc_pool: TDMA desc pool
390
* @op_pool: crypto operation pool
391
* @cache_pool: data cache pool (used by hash implementation when the
392
* hash request is smaller than the hash block size)
393
* @padding_pool: padding pool (used by hash implementation when hardware
394
* padding cannot be used)
395
*
396
* Structure containing the different DMA pools used by this driver.
397
*/
398
struct mv_cesa_dev_dma {
399
struct dma_pool *tdma_desc_pool;
400
struct dma_pool *op_pool;
401
struct dma_pool *cache_pool;
402
struct dma_pool *padding_pool;
403
};
404
405
/**
406
* struct mv_cesa_dev - CESA device
407
* @caps: device capabilities
408
* @regs: device registers
409
* @sram_size: usable SRAM size
410
* @lock: device lock
411
* @engines: array of engines
412
* @dma: dma pools
413
*
414
* Structure storing CESA device information.
415
*/
416
struct mv_cesa_dev {
417
const struct mv_cesa_caps *caps;
418
void __iomem *regs;
419
struct device *dev;
420
unsigned int sram_size;
421
spinlock_t lock;
422
struct mv_cesa_engine *engines;
423
struct mv_cesa_dev_dma *dma;
424
};
425
426
/**
427
* struct mv_cesa_engine - CESA engine
428
* @id: engine id
429
* @regs: engine registers
430
* @sram: SRAM memory region
431
* @sram_pool: SRAM memory region from pool
432
* @sram_dma: DMA address of the SRAM memory region
433
* @lock: engine lock
434
* @req: current crypto request
435
* @clk: engine clk
436
* @zclk: engine zclk
437
* @max_req_len: maximum chunk length (useful to create the TDMA chain)
438
* @int_mask: interrupt mask cache
439
* @pool: memory pool pointing to the memory region reserved in
440
* SRAM
441
* @queue: fifo of the pending crypto requests
442
* @load: engine load counter, useful for load balancing
443
* @chain_hw: list of the current tdma descriptors being processed
444
* by the hardware.
445
* @chain_sw: list of the current tdma descriptors that will be
446
* submitted to the hardware.
447
* @complete_queue: fifo of the processed requests by the engine
448
*
449
* Structure storing CESA engine information.
450
*/
451
struct mv_cesa_engine {
452
int id;
453
void __iomem *regs;
454
union {
455
void __iomem *sram;
456
void *sram_pool;
457
};
458
dma_addr_t sram_dma;
459
spinlock_t lock;
460
struct crypto_async_request *req;
461
struct clk *clk;
462
struct clk *zclk;
463
size_t max_req_len;
464
u32 int_mask;
465
struct gen_pool *pool;
466
struct crypto_queue queue;
467
atomic_t load;
468
struct mv_cesa_tdma_chain chain_hw;
469
struct mv_cesa_tdma_chain chain_sw;
470
struct list_head complete_queue;
471
int irq;
472
};
473
474
/**
475
* struct mv_cesa_req_ops - CESA request operations
476
* @process: process a request chunk result (should return 0 if the
477
* operation, -EINPROGRESS if it needs more steps or an error
478
* code)
479
* @step: launch the crypto operation on the next chunk
480
* @cleanup: cleanup the crypto request (release associated data)
481
* @complete: complete the request, i.e copy result or context from sram when
482
* needed.
483
*/
484
struct mv_cesa_req_ops {
485
int (*process)(struct crypto_async_request *req, u32 status);
486
void (*step)(struct crypto_async_request *req);
487
void (*cleanup)(struct crypto_async_request *req);
488
void (*complete)(struct crypto_async_request *req);
489
};
490
491
/**
492
* struct mv_cesa_ctx - CESA operation context
493
* @ops: crypto operations
494
*
495
* Base context structure inherited by operation specific ones.
496
*/
497
struct mv_cesa_ctx {
498
const struct mv_cesa_req_ops *ops;
499
};
500
501
/**
502
* struct mv_cesa_hash_ctx - CESA hash operation context
503
* @base: base context structure
504
*
505
* Hash context structure.
506
*/
507
struct mv_cesa_hash_ctx {
508
struct mv_cesa_ctx base;
509
};
510
511
/**
512
* struct mv_cesa_hash_ctx - CESA hmac operation context
513
* @base: base context structure
514
* @iv: initialization vectors
515
*
516
* HMAC context structure.
517
*/
518
struct mv_cesa_hmac_ctx {
519
struct mv_cesa_ctx base;
520
__be32 iv[16];
521
};
522
523
/**
524
* enum mv_cesa_req_type - request type definitions
525
* @CESA_STD_REQ: standard request
526
* @CESA_DMA_REQ: DMA request
527
*/
528
enum mv_cesa_req_type {
529
CESA_STD_REQ,
530
CESA_DMA_REQ,
531
};
532
533
/**
534
* struct mv_cesa_req - CESA request
535
* @engine: engine associated with this request
536
* @chain: list of tdma descriptors associated with this request
537
*/
538
struct mv_cesa_req {
539
struct mv_cesa_engine *engine;
540
struct mv_cesa_tdma_chain chain;
541
};
542
543
/**
544
* struct mv_cesa_sg_std_iter - CESA scatter-gather iterator for standard
545
* requests
546
* @iter: sg mapping iterator
547
* @offset: current offset in the SG entry mapped in memory
548
*/
549
struct mv_cesa_sg_std_iter {
550
struct sg_mapping_iter iter;
551
unsigned int offset;
552
};
553
554
/**
555
* struct mv_cesa_skcipher_std_req - cipher standard request
556
* @op: operation context
557
* @offset: current operation offset
558
* @size: size of the crypto operation
559
*/
560
struct mv_cesa_skcipher_std_req {
561
struct mv_cesa_op_ctx op;
562
unsigned int offset;
563
unsigned int size;
564
bool skip_ctx;
565
};
566
567
/**
568
* struct mv_cesa_skcipher_req - cipher request
569
* @req: type specific request information
570
* @src_nents: number of entries in the src sg list
571
* @dst_nents: number of entries in the dest sg list
572
*/
573
struct mv_cesa_skcipher_req {
574
struct mv_cesa_req base;
575
struct mv_cesa_skcipher_std_req std;
576
int src_nents;
577
int dst_nents;
578
};
579
580
/**
581
* struct mv_cesa_ahash_std_req - standard hash request
582
* @offset: current operation offset
583
*/
584
struct mv_cesa_ahash_std_req {
585
unsigned int offset;
586
};
587
588
/**
589
* struct mv_cesa_ahash_dma_req - DMA hash request
590
* @padding: padding buffer
591
* @padding_dma: DMA address of the padding buffer
592
* @cache_dma: DMA address of the cache buffer
593
*/
594
struct mv_cesa_ahash_dma_req {
595
u8 *padding;
596
dma_addr_t padding_dma;
597
u8 *cache;
598
dma_addr_t cache_dma;
599
};
600
601
/**
602
* struct mv_cesa_ahash_req - hash request
603
* @req: type specific request information
604
* @cache: cache buffer
605
* @cache_ptr: write pointer in the cache buffer
606
* @len: hash total length
607
* @src_nents: number of entries in the scatterlist
608
* @last_req: define whether the current operation is the last one
609
* or not
610
* @state: hash state
611
*/
612
struct mv_cesa_ahash_req {
613
struct mv_cesa_req base;
614
union {
615
struct mv_cesa_ahash_dma_req dma;
616
struct mv_cesa_ahash_std_req std;
617
} req;
618
struct mv_cesa_op_ctx op_tmpl;
619
u8 cache[CESA_MAX_HASH_BLOCK_SIZE];
620
unsigned int cache_ptr;
621
u64 len;
622
int src_nents;
623
bool last_req;
624
bool algo_le;
625
u32 state[8];
626
};
627
628
/* CESA functions */
629
630
extern struct mv_cesa_dev *cesa_dev;
631
632
633
static inline void
634
mv_cesa_engine_enqueue_complete_request(struct mv_cesa_engine *engine,
635
struct crypto_async_request *req)
636
{
637
list_add_tail(&req->list, &engine->complete_queue);
638
}
639
640
static inline struct crypto_async_request *
641
mv_cesa_engine_dequeue_complete_request(struct mv_cesa_engine *engine)
642
{
643
struct crypto_async_request *req;
644
645
req = list_first_entry_or_null(&engine->complete_queue,
646
struct crypto_async_request,
647
list);
648
if (req)
649
list_del(&req->list);
650
651
return req;
652
}
653
654
655
static inline enum mv_cesa_req_type
656
mv_cesa_req_get_type(struct mv_cesa_req *req)
657
{
658
return req->chain.first ? CESA_DMA_REQ : CESA_STD_REQ;
659
}
660
661
static inline void mv_cesa_update_op_cfg(struct mv_cesa_op_ctx *op,
662
u32 cfg, u32 mask)
663
{
664
op->desc.config &= cpu_to_le32(~mask);
665
op->desc.config |= cpu_to_le32(cfg);
666
}
667
668
static inline u32 mv_cesa_get_op_cfg(const struct mv_cesa_op_ctx *op)
669
{
670
return le32_to_cpu(op->desc.config);
671
}
672
673
static inline void mv_cesa_set_op_cfg(struct mv_cesa_op_ctx *op, u32 cfg)
674
{
675
op->desc.config = cpu_to_le32(cfg);
676
}
677
678
static inline void mv_cesa_adjust_op(struct mv_cesa_engine *engine,
679
struct mv_cesa_op_ctx *op)
680
{
681
u32 offset = engine->sram_dma & CESA_SA_SRAM_MSK;
682
683
op->desc.enc_p = CESA_SA_DESC_CRYPT_DATA(offset);
684
op->desc.enc_key_p = CESA_SA_DESC_CRYPT_KEY(offset);
685
op->desc.enc_iv = CESA_SA_DESC_CRYPT_IV(offset);
686
op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_DATA_MSK;
687
op->desc.mac_src_p |= CESA_SA_DESC_MAC_DATA(offset);
688
op->desc.mac_digest &= ~CESA_SA_DESC_MAC_DIGEST_MSK;
689
op->desc.mac_digest |= CESA_SA_DESC_MAC_DIGEST(offset);
690
op->desc.mac_iv = CESA_SA_DESC_MAC_IV(offset);
691
}
692
693
static inline void mv_cesa_set_crypt_op_len(struct mv_cesa_op_ctx *op, int len)
694
{
695
op->desc.enc_len = cpu_to_le32(len);
696
}
697
698
static inline void mv_cesa_set_mac_op_total_len(struct mv_cesa_op_ctx *op,
699
int len)
700
{
701
op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_TOTAL_LEN_MSK;
702
op->desc.mac_src_p |= CESA_SA_DESC_MAC_TOTAL_LEN(len);
703
}
704
705
static inline void mv_cesa_set_mac_op_frag_len(struct mv_cesa_op_ctx *op,
706
int len)
707
{
708
op->desc.mac_digest &= ~CESA_SA_DESC_MAC_FRAG_LEN_MSK;
709
op->desc.mac_digest |= CESA_SA_DESC_MAC_FRAG_LEN(len);
710
}
711
712
static inline void mv_cesa_set_int_mask(struct mv_cesa_engine *engine,
713
u32 int_mask)
714
{
715
if (int_mask == engine->int_mask)
716
return;
717
718
writel_relaxed(int_mask, engine->regs + CESA_SA_INT_MSK);
719
engine->int_mask = int_mask;
720
}
721
722
static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine)
723
{
724
return engine->int_mask;
725
}
726
727
static inline bool mv_cesa_mac_op_is_first_frag(const struct mv_cesa_op_ctx *op)
728
{
729
return (mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) ==
730
CESA_SA_DESC_CFG_FIRST_FRAG;
731
}
732
733
int mv_cesa_queue_req(struct crypto_async_request *req,
734
struct mv_cesa_req *creq);
735
736
struct crypto_async_request *
737
mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
738
struct crypto_async_request **backlog);
739
740
static inline struct mv_cesa_engine *mv_cesa_select_engine(int weight)
741
{
742
int i;
743
u32 min_load = U32_MAX;
744
struct mv_cesa_engine *selected = NULL;
745
746
for (i = 0; i < cesa_dev->caps->nengines; i++) {
747
struct mv_cesa_engine *engine = cesa_dev->engines + i;
748
u32 load = atomic_read(&engine->load);
749
750
if (load < min_load) {
751
min_load = load;
752
selected = engine;
753
}
754
}
755
756
atomic_add(weight, &selected->load);
757
758
return selected;
759
}
760
761
/*
762
* Helper function that indicates whether a crypto request needs to be
763
* cleaned up or not after being enqueued using mv_cesa_queue_req().
764
*/
765
static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
766
int ret)
767
{
768
/*
769
* The queue still had some space, the request was queued
770
* normally, so there's no need to clean it up.
771
*/
772
if (ret == -EINPROGRESS)
773
return false;
774
775
/*
776
* The queue had not space left, but since the request is
777
* flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to
778
* the backlog and will be processed later. There's no need to
779
* clean it up.
780
*/
781
if (ret == -EBUSY)
782
return false;
783
784
/* Request wasn't queued, we need to clean it up */
785
return true;
786
}
787
788
/* TDMA functions */
789
790
static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
791
unsigned int len)
792
{
793
iter->len = len;
794
iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE);
795
iter->offset = 0;
796
}
797
798
static inline void mv_cesa_sg_dma_iter_init(struct mv_cesa_sg_dma_iter *iter,
799
struct scatterlist *sg,
800
enum dma_data_direction dir)
801
{
802
iter->op_offset = 0;
803
iter->offset = 0;
804
iter->sg = sg;
805
iter->dir = dir;
806
}
807
808
static inline unsigned int
809
mv_cesa_req_dma_iter_transfer_len(struct mv_cesa_dma_iter *iter,
810
struct mv_cesa_sg_dma_iter *sgiter)
811
{
812
return min(iter->op_len - sgiter->op_offset,
813
sg_dma_len(sgiter->sg) - sgiter->offset);
814
}
815
816
bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *chain,
817
struct mv_cesa_sg_dma_iter *sgiter,
818
unsigned int len);
819
820
static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter)
821
{
822
iter->offset += iter->op_len;
823
iter->op_len = min(iter->len - iter->offset,
824
CESA_SA_SRAM_PAYLOAD_SIZE);
825
826
return iter->op_len;
827
}
828
829
void mv_cesa_dma_step(struct mv_cesa_req *dreq);
830
831
static inline int mv_cesa_dma_process(struct mv_cesa_req *dreq,
832
u32 status)
833
{
834
if (!(status & CESA_SA_INT_ACC0_IDMA_DONE))
835
return -EINPROGRESS;
836
837
if (status & CESA_SA_INT_IDMA_OWN_ERR)
838
return -EINVAL;
839
840
return 0;
841
}
842
843
void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
844
struct mv_cesa_engine *engine);
845
void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq);
846
void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
847
struct mv_cesa_req *dreq);
848
int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status);
849
850
851
static inline void
852
mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
853
{
854
memset(chain, 0, sizeof(*chain));
855
}
856
857
int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
858
u32 size, u32 flags, gfp_t gfp_flags);
859
860
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
861
const struct mv_cesa_op_ctx *op_templ,
862
bool skip_ctx,
863
gfp_t flags);
864
865
int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
866
dma_addr_t dst, dma_addr_t src, u32 size,
867
u32 flags, gfp_t gfp_flags);
868
869
int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags);
870
int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags);
871
872
int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
873
struct mv_cesa_dma_iter *dma_iter,
874
struct mv_cesa_sg_dma_iter *sgiter,
875
gfp_t gfp_flags);
876
877
size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
878
struct scatterlist *sgl, unsigned int nents,
879
unsigned int sram_off, size_t buflen, off_t skip,
880
bool to_sram);
881
882
static inline size_t mv_cesa_sg_copy_to_sram(struct mv_cesa_engine *engine,
883
struct scatterlist *sgl,
884
unsigned int nents,
885
unsigned int sram_off,
886
size_t buflen, off_t skip)
887
{
888
return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
889
true);
890
}
891
892
static inline size_t mv_cesa_sg_copy_from_sram(struct mv_cesa_engine *engine,
893
struct scatterlist *sgl,
894
unsigned int nents,
895
unsigned int sram_off,
896
size_t buflen, off_t skip)
897
{
898
return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
899
false);
900
}
901
902
/* Algorithm definitions */
903
904
extern struct ahash_alg mv_md5_alg;
905
extern struct ahash_alg mv_sha1_alg;
906
extern struct ahash_alg mv_sha256_alg;
907
extern struct ahash_alg mv_ahmac_md5_alg;
908
extern struct ahash_alg mv_ahmac_sha1_alg;
909
extern struct ahash_alg mv_ahmac_sha256_alg;
910
911
extern struct skcipher_alg mv_cesa_ecb_des_alg;
912
extern struct skcipher_alg mv_cesa_cbc_des_alg;
913
extern struct skcipher_alg mv_cesa_ecb_des3_ede_alg;
914
extern struct skcipher_alg mv_cesa_cbc_des3_ede_alg;
915
extern struct skcipher_alg mv_cesa_ecb_aes_alg;
916
extern struct skcipher_alg mv_cesa_cbc_aes_alg;
917
918
#endif /* __MARVELL_CESA_H__ */
919
920