Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/crypto/hifn_795x.c
15111 views
1
/*
2
* 2007+ Copyright (c) Evgeniy Polyakov <[email protected]>
3
* All rights reserved.
4
*
5
* This program is free software; you can redistribute it and/or modify
6
* it under the terms of the GNU General Public License as published by
7
* the Free Software Foundation; either version 2 of the License, or
8
* (at your option) any later version.
9
*
10
* This program is distributed in the hope that it will be useful,
11
* but WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
* GNU General Public License for more details.
14
*
15
* You should have received a copy of the GNU General Public License
16
* along with this program; if not, write to the Free Software
17
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
*/
19
20
#include <linux/kernel.h>
21
#include <linux/module.h>
22
#include <linux/moduleparam.h>
23
#include <linux/mod_devicetable.h>
24
#include <linux/interrupt.h>
25
#include <linux/pci.h>
26
#include <linux/slab.h>
27
#include <linux/delay.h>
28
#include <linux/mm.h>
29
#include <linux/dma-mapping.h>
30
#include <linux/scatterlist.h>
31
#include <linux/highmem.h>
32
#include <linux/crypto.h>
33
#include <linux/hw_random.h>
34
#include <linux/ktime.h>
35
36
#include <crypto/algapi.h>
37
#include <crypto/des.h>
38
39
#include <asm/kmap_types.h>
40
41
//#define HIFN_DEBUG
42
43
#ifdef HIFN_DEBUG
44
#define dprintk(f, a...) printk(f, ##a)
45
#else
46
#define dprintk(f, a...) do {} while (0)
47
#endif
48
49
static char hifn_pll_ref[sizeof("extNNN")] = "ext";
50
module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
51
MODULE_PARM_DESC(hifn_pll_ref,
52
"PLL reference clock (pci[freq] or ext[freq], default ext)");
53
54
static atomic_t hifn_dev_number;
55
56
#define ACRYPTO_OP_DECRYPT 0
57
#define ACRYPTO_OP_ENCRYPT 1
58
#define ACRYPTO_OP_HMAC 2
59
#define ACRYPTO_OP_RNG 3
60
61
#define ACRYPTO_MODE_ECB 0
62
#define ACRYPTO_MODE_CBC 1
63
#define ACRYPTO_MODE_CFB 2
64
#define ACRYPTO_MODE_OFB 3
65
66
#define ACRYPTO_TYPE_AES_128 0
67
#define ACRYPTO_TYPE_AES_192 1
68
#define ACRYPTO_TYPE_AES_256 2
69
#define ACRYPTO_TYPE_3DES 3
70
#define ACRYPTO_TYPE_DES 4
71
72
#define PCI_VENDOR_ID_HIFN 0x13A3
73
#define PCI_DEVICE_ID_HIFN_7955 0x0020
74
#define PCI_DEVICE_ID_HIFN_7956 0x001d
75
76
/* I/O region sizes */
77
78
#define HIFN_BAR0_SIZE 0x1000
79
#define HIFN_BAR1_SIZE 0x2000
80
#define HIFN_BAR2_SIZE 0x8000
81
82
/* DMA registres */
83
84
#define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */
85
#define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */
86
#define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */
87
#define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */
88
#define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */
89
#define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */
90
#define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */
91
#define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */
92
#define HIFN_CHIP_ID 0x98 /* Chip ID */
93
94
/*
95
* Processing Unit Registers (offset from BASEREG0)
96
*/
97
#define HIFN_0_PUDATA 0x00 /* Processing Unit Data */
98
#define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */
99
#define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */
100
#define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */
101
#define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */
102
#define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */
103
#define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */
104
#define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */
105
#define HIFN_0_SPACESIZE 0x20 /* Register space size */
106
107
/* Processing Unit Control Register (HIFN_0_PUCTRL) */
108
#define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */
109
#define HIFN_PUCTRL_STOP 0x0008 /* stop pu */
110
#define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */
111
#define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */
112
#define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */
113
114
/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
115
#define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */
116
#define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */
117
#define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
118
#define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
119
#define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */
120
#define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */
121
#define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */
122
#define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */
123
#define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */
124
#define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */
125
126
/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
127
#define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */
128
#define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */
129
#define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */
130
#define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */
131
#define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */
132
#define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */
133
#define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */
134
#define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */
135
#define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */
136
#define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */
137
#define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */
138
#define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */
139
#define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */
140
#define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */
141
#define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */
142
#define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */
143
#define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */
144
#define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */
145
#define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */
146
#define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */
147
#define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */
148
#define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */
149
#define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */
150
151
/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
152
#define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */
153
#define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */
154
#define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
155
#define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
156
#define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */
157
#define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */
158
#define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */
159
#define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */
160
#define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */
161
#define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */
162
163
/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
164
#define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */
165
#define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */
166
#define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
167
#define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
168
#define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */
169
#define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */
170
#define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */
171
#define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */
172
#define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */
173
#define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */
174
#define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */
175
#define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */
176
#define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */
177
#define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */
178
#define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */
179
#define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */
180
#define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */
181
182
/* FIFO Status Register (HIFN_0_FIFOSTAT) */
183
#define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */
184
#define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */
185
186
/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
187
#define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as 1 */
188
189
/*
190
* DMA Interface Registers (offset from BASEREG1)
191
*/
192
#define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */
193
#define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */
194
#define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */
195
#define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */
196
#define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */
197
#define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */
198
#define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */
199
#define HIFN_1_PLL 0x4c /* 795x: PLL config */
200
#define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */
201
#define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */
202
#define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */
203
#define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */
204
#define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */
205
#define HIFN_1_REVID 0x98 /* Revision ID */
206
#define HIFN_1_UNLOCK_SECRET1 0xf4
207
#define HIFN_1_UNLOCK_SECRET2 0xfc
208
#define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */
209
#define HIFN_1_PUB_BASE 0x300 /* Public Base Address */
210
#define HIFN_1_PUB_OPLEN 0x304 /* Public Operand Length */
211
#define HIFN_1_PUB_OP 0x308 /* Public Operand */
212
#define HIFN_1_PUB_STATUS 0x30c /* Public Status */
213
#define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */
214
#define HIFN_1_RNG_CONFIG 0x314 /* RNG config */
215
#define HIFN_1_RNG_DATA 0x318 /* RNG data */
216
#define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */
217
#define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */
218
219
/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
220
#define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */
221
#define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */
222
#define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */
223
#define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */
224
#define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */
225
#define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */
226
#define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */
227
#define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */
228
#define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */
229
#define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */
230
#define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */
231
#define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */
232
#define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */
233
#define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */
234
#define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */
235
#define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */
236
#define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */
237
#define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */
238
#define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */
239
#define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */
240
#define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */
241
#define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */
242
#define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */
243
#define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */
244
#define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */
245
#define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */
246
#define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */
247
#define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */
248
#define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */
249
#define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */
250
#define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */
251
#define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */
252
#define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */
253
#define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */
254
#define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */
255
#define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */
256
#define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */
257
#define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */
258
259
/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
260
#define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */
261
#define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */
262
#define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */
263
#define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */
264
#define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */
265
#define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */
266
#define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */
267
#define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */
268
#define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */
269
#define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */
270
#define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */
271
#define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */
272
#define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */
273
#define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */
274
#define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */
275
#define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */
276
#define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */
277
#define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */
278
#define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */
279
#define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */
280
#define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */
281
#define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */
282
283
/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
284
#define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */
285
#define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */
286
#define HIFN_DMACNFG_UNLOCK 0x00000800
287
#define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */
288
#define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */
289
#define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */
290
#define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */
291
#define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */
292
293
/* PLL configuration register */
294
#define HIFN_PLL_REF_CLK_HBI 0x00000000 /* HBI reference clock */
295
#define HIFN_PLL_REF_CLK_PLL 0x00000001 /* PLL reference clock */
296
#define HIFN_PLL_BP 0x00000002 /* Reference clock bypass */
297
#define HIFN_PLL_PK_CLK_HBI 0x00000000 /* PK engine HBI clock */
298
#define HIFN_PLL_PK_CLK_PLL 0x00000008 /* PK engine PLL clock */
299
#define HIFN_PLL_PE_CLK_HBI 0x00000000 /* PE engine HBI clock */
300
#define HIFN_PLL_PE_CLK_PLL 0x00000010 /* PE engine PLL clock */
301
#define HIFN_PLL_RESERVED_1 0x00000400 /* Reserved bit, must be 1 */
302
#define HIFN_PLL_ND_SHIFT 11 /* Clock multiplier shift */
303
#define HIFN_PLL_ND_MULT_2 0x00000000 /* PLL clock multiplier 2 */
304
#define HIFN_PLL_ND_MULT_4 0x00000800 /* PLL clock multiplier 4 */
305
#define HIFN_PLL_ND_MULT_6 0x00001000 /* PLL clock multiplier 6 */
306
#define HIFN_PLL_ND_MULT_8 0x00001800 /* PLL clock multiplier 8 */
307
#define HIFN_PLL_ND_MULT_10 0x00002000 /* PLL clock multiplier 10 */
308
#define HIFN_PLL_ND_MULT_12 0x00002800 /* PLL clock multiplier 12 */
309
#define HIFN_PLL_IS_1_8 0x00000000 /* charge pump (mult. 1-8) */
310
#define HIFN_PLL_IS_9_12 0x00010000 /* charge pump (mult. 9-12) */
311
312
#define HIFN_PLL_FCK_MAX 266 /* Maximum PLL frequency */
313
314
/* Public key reset register (HIFN_1_PUB_RESET) */
315
#define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */
316
317
/* Public base address register (HIFN_1_PUB_BASE) */
318
#define HIFN_PUBBASE_ADDR 0x00003fff /* base address */
319
320
/* Public operand length register (HIFN_1_PUB_OPLEN) */
321
#define HIFN_PUBOPLEN_MOD_M 0x0000007f /* modulus length mask */
322
#define HIFN_PUBOPLEN_MOD_S 0 /* modulus length shift */
323
#define HIFN_PUBOPLEN_EXP_M 0x0003ff80 /* exponent length mask */
324
#define HIFN_PUBOPLEN_EXP_S 7 /* exponent length shift */
325
#define HIFN_PUBOPLEN_RED_M 0x003c0000 /* reducend length mask */
326
#define HIFN_PUBOPLEN_RED_S 18 /* reducend length shift */
327
328
/* Public operation register (HIFN_1_PUB_OP) */
329
#define HIFN_PUBOP_AOFFSET_M 0x0000007f /* A offset mask */
330
#define HIFN_PUBOP_AOFFSET_S 0 /* A offset shift */
331
#define HIFN_PUBOP_BOFFSET_M 0x00000f80 /* B offset mask */
332
#define HIFN_PUBOP_BOFFSET_S 7 /* B offset shift */
333
#define HIFN_PUBOP_MOFFSET_M 0x0003f000 /* M offset mask */
334
#define HIFN_PUBOP_MOFFSET_S 12 /* M offset shift */
335
#define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */
336
#define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */
337
#define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */
338
#define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */
339
#define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */
340
#define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */
341
#define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */
342
#define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */
343
#define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */
344
#define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */
345
#define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */
346
#define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */
347
#define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular RED */
348
#define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular EXP */
349
350
/* Public status register (HIFN_1_PUB_STATUS) */
351
#define HIFN_PUBSTS_DONE 0x00000001 /* operation done */
352
#define HIFN_PUBSTS_CARRY 0x00000002 /* carry */
353
354
/* Public interrupt enable register (HIFN_1_PUB_IEN) */
355
#define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */
356
357
/* Random number generator config register (HIFN_1_RNG_CONFIG) */
358
#define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */
359
360
#define HIFN_NAMESIZE 32
361
#define HIFN_MAX_RESULT_ORDER 5
362
363
#define HIFN_D_CMD_RSIZE 24*1
364
#define HIFN_D_SRC_RSIZE 80*1
365
#define HIFN_D_DST_RSIZE 80*1
366
#define HIFN_D_RES_RSIZE 24*1
367
368
#define HIFN_D_DST_DALIGN 4
369
370
#define HIFN_QUEUE_LENGTH (HIFN_D_CMD_RSIZE - 1)
371
372
#define AES_MIN_KEY_SIZE 16
373
#define AES_MAX_KEY_SIZE 32
374
375
#define HIFN_DES_KEY_LENGTH 8
376
#define HIFN_3DES_KEY_LENGTH 24
377
#define HIFN_MAX_CRYPT_KEY_LENGTH AES_MAX_KEY_SIZE
378
#define HIFN_IV_LENGTH 8
379
#define HIFN_AES_IV_LENGTH 16
380
#define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH
381
382
#define HIFN_MAC_KEY_LENGTH 64
383
#define HIFN_MD5_LENGTH 16
384
#define HIFN_SHA1_LENGTH 20
385
#define HIFN_MAC_TRUNC_LENGTH 12
386
387
#define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260)
388
#define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4)
389
#define HIFN_USED_RESULT 12
390
391
struct hifn_desc
392
{
393
volatile __le32 l;
394
volatile __le32 p;
395
};
396
397
struct hifn_dma {
398
struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1];
399
struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1];
400
struct hifn_desc dstr[HIFN_D_DST_RSIZE+1];
401
struct hifn_desc resr[HIFN_D_RES_RSIZE+1];
402
403
u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
404
u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
405
406
/*
407
* Our current positions for insertion and removal from the descriptor
408
* rings.
409
*/
410
volatile int cmdi, srci, dsti, resi;
411
volatile int cmdu, srcu, dstu, resu;
412
int cmdk, srck, dstk, resk;
413
};
414
415
#define HIFN_FLAG_CMD_BUSY (1<<0)
416
#define HIFN_FLAG_SRC_BUSY (1<<1)
417
#define HIFN_FLAG_DST_BUSY (1<<2)
418
#define HIFN_FLAG_RES_BUSY (1<<3)
419
#define HIFN_FLAG_OLD_KEY (1<<4)
420
421
#define HIFN_DEFAULT_ACTIVE_NUM 5
422
423
struct hifn_device
424
{
425
char name[HIFN_NAMESIZE];
426
427
int irq;
428
429
struct pci_dev *pdev;
430
void __iomem *bar[3];
431
432
void *desc_virt;
433
dma_addr_t desc_dma;
434
435
u32 dmareg;
436
437
void *sa[HIFN_D_RES_RSIZE];
438
439
spinlock_t lock;
440
441
u32 flags;
442
int active, started;
443
struct delayed_work work;
444
unsigned long reset;
445
unsigned long success;
446
unsigned long prev_success;
447
448
u8 snum;
449
450
struct tasklet_struct tasklet;
451
452
struct crypto_queue queue;
453
struct list_head alg_list;
454
455
unsigned int pk_clk_freq;
456
457
#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
458
unsigned int rng_wait_time;
459
ktime_t rngtime;
460
struct hwrng rng;
461
#endif
462
};
463
464
#define HIFN_D_LENGTH 0x0000ffff
465
#define HIFN_D_NOINVALID 0x01000000
466
#define HIFN_D_MASKDONEIRQ 0x02000000
467
#define HIFN_D_DESTOVER 0x04000000
468
#define HIFN_D_OVER 0x08000000
469
#define HIFN_D_LAST 0x20000000
470
#define HIFN_D_JUMP 0x40000000
471
#define HIFN_D_VALID 0x80000000
472
473
struct hifn_base_command
474
{
475
volatile __le16 masks;
476
volatile __le16 session_num;
477
volatile __le16 total_source_count;
478
volatile __le16 total_dest_count;
479
};
480
481
#define HIFN_BASE_CMD_COMP 0x0100 /* enable compression engine */
482
#define HIFN_BASE_CMD_PAD 0x0200 /* enable padding engine */
483
#define HIFN_BASE_CMD_MAC 0x0400 /* enable MAC engine */
484
#define HIFN_BASE_CMD_CRYPT 0x0800 /* enable crypt engine */
485
#define HIFN_BASE_CMD_DECODE 0x2000
486
#define HIFN_BASE_CMD_SRCLEN_M 0xc000
487
#define HIFN_BASE_CMD_SRCLEN_S 14
488
#define HIFN_BASE_CMD_DSTLEN_M 0x3000
489
#define HIFN_BASE_CMD_DSTLEN_S 12
490
#define HIFN_BASE_CMD_LENMASK_HI 0x30000
491
#define HIFN_BASE_CMD_LENMASK_LO 0x0ffff
492
493
/*
494
* Structure to help build up the command data structure.
495
*/
496
struct hifn_crypt_command
497
{
498
volatile __le16 masks;
499
volatile __le16 header_skip;
500
volatile __le16 source_count;
501
volatile __le16 reserved;
502
};
503
504
#define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */
505
#define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */
506
#define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */
507
#define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */
508
#define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */
509
#define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */
510
#define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */
511
#define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */
512
#define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */
513
#define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */
514
#define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */
515
#define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */
516
#define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */
517
#define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */
518
#define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */
519
#define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */
520
#define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */
521
#define HIFN_CRYPT_CMD_SRCLEN_M 0xc000
522
#define HIFN_CRYPT_CMD_SRCLEN_S 14
523
524
/*
525
* Structure to help build up the command data structure.
526
*/
527
struct hifn_mac_command
528
{
529
volatile __le16 masks;
530
volatile __le16 header_skip;
531
volatile __le16 source_count;
532
volatile __le16 reserved;
533
};
534
535
#define HIFN_MAC_CMD_ALG_MASK 0x0001
536
#define HIFN_MAC_CMD_ALG_SHA1 0x0000
537
#define HIFN_MAC_CMD_ALG_MD5 0x0001
538
#define HIFN_MAC_CMD_MODE_MASK 0x000c
539
#define HIFN_MAC_CMD_MODE_HMAC 0x0000
540
#define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004
541
#define HIFN_MAC_CMD_MODE_HASH 0x0008
542
#define HIFN_MAC_CMD_MODE_FULL 0x0004
543
#define HIFN_MAC_CMD_TRUNC 0x0010
544
#define HIFN_MAC_CMD_RESULT 0x0020
545
#define HIFN_MAC_CMD_APPEND 0x0040
546
#define HIFN_MAC_CMD_SRCLEN_M 0xc000
547
#define HIFN_MAC_CMD_SRCLEN_S 14
548
549
/*
550
* MAC POS IPsec initiates authentication after encryption on encodes
551
* and before decryption on decodes.
552
*/
553
#define HIFN_MAC_CMD_POS_IPSEC 0x0200
554
#define HIFN_MAC_CMD_NEW_KEY 0x0800
555
556
struct hifn_comp_command
557
{
558
volatile __le16 masks;
559
volatile __le16 header_skip;
560
volatile __le16 source_count;
561
volatile __le16 reserved;
562
};
563
564
#define HIFN_COMP_CMD_SRCLEN_M 0xc000
565
#define HIFN_COMP_CMD_SRCLEN_S 14
566
#define HIFN_COMP_CMD_ONE 0x0100 /* must be one */
567
#define HIFN_COMP_CMD_CLEARHIST 0x0010 /* clear history */
568
#define HIFN_COMP_CMD_UPDATEHIST 0x0008 /* update history */
569
#define HIFN_COMP_CMD_LZS_STRIP0 0x0004 /* LZS: strip zero */
570
#define HIFN_COMP_CMD_MPPC_RESTART 0x0004 /* MPPC: restart */
571
#define HIFN_COMP_CMD_ALG_MASK 0x0001 /* compression mode: */
572
#define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */
573
#define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */
574
575
struct hifn_base_result
576
{
577
volatile __le16 flags;
578
volatile __le16 session;
579
volatile __le16 src_cnt; /* 15:0 of source count */
580
volatile __le16 dst_cnt; /* 15:0 of dest count */
581
};
582
583
#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */
584
#define HIFN_BASE_RES_SRCLEN_M 0xc000 /* 17:16 of source count */
585
#define HIFN_BASE_RES_SRCLEN_S 14
586
#define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */
587
#define HIFN_BASE_RES_DSTLEN_S 12
588
589
struct hifn_comp_result
590
{
591
volatile __le16 flags;
592
volatile __le16 crc;
593
};
594
595
#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */
596
#define HIFN_COMP_RES_LCB_S 8
597
#define HIFN_COMP_RES_RESTART 0x0004 /* MPPC: restart */
598
#define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */
599
#define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */
600
601
struct hifn_mac_result
602
{
603
volatile __le16 flags;
604
volatile __le16 reserved;
605
/* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
606
};
607
608
#define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */
609
#define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */
610
611
struct hifn_crypt_result
612
{
613
volatile __le16 flags;
614
volatile __le16 reserved;
615
};
616
617
#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */
618
619
#ifndef HIFN_POLL_FREQUENCY
620
#define HIFN_POLL_FREQUENCY 0x1
621
#endif
622
623
#ifndef HIFN_POLL_SCALAR
624
#define HIFN_POLL_SCALAR 0x0
625
#endif
626
627
#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */
628
#define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */
629
630
struct hifn_crypto_alg
631
{
632
struct list_head entry;
633
struct crypto_alg alg;
634
struct hifn_device *dev;
635
};
636
637
#define ASYNC_SCATTERLIST_CACHE 16
638
639
#define ASYNC_FLAGS_MISALIGNED (1<<0)
640
641
struct hifn_cipher_walk
642
{
643
struct scatterlist cache[ASYNC_SCATTERLIST_CACHE];
644
u32 flags;
645
int num;
646
};
647
648
struct hifn_context
649
{
650
u8 key[HIFN_MAX_CRYPT_KEY_LENGTH];
651
struct hifn_device *dev;
652
unsigned int keysize;
653
};
654
655
struct hifn_request_context
656
{
657
u8 *iv;
658
unsigned int ivsize;
659
u8 op, type, mode, unused;
660
struct hifn_cipher_walk walk;
661
};
662
663
#define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg)
664
665
static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg)
666
{
667
u32 ret;
668
669
ret = readl(dev->bar[0] + reg);
670
671
return ret;
672
}
673
674
static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
675
{
676
u32 ret;
677
678
ret = readl(dev->bar[1] + reg);
679
680
return ret;
681
}
682
683
static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
684
{
685
writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg);
686
}
687
688
static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
689
{
690
writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg);
691
}
692
693
static void hifn_wait_puc(struct hifn_device *dev)
694
{
695
int i;
696
u32 ret;
697
698
for (i=10000; i > 0; --i) {
699
ret = hifn_read_0(dev, HIFN_0_PUCTRL);
700
if (!(ret & HIFN_PUCTRL_RESET))
701
break;
702
703
udelay(1);
704
}
705
706
if (!i)
707
dprintk("%s: Failed to reset PUC unit.\n", dev->name);
708
}
709
710
static void hifn_reset_puc(struct hifn_device *dev)
711
{
712
hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
713
hifn_wait_puc(dev);
714
}
715
716
static void hifn_stop_device(struct hifn_device *dev)
717
{
718
hifn_write_1(dev, HIFN_1_DMA_CSR,
719
HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
720
HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS);
721
hifn_write_0(dev, HIFN_0_PUIER, 0);
722
hifn_write_1(dev, HIFN_1_DMA_IER, 0);
723
}
724
725
static void hifn_reset_dma(struct hifn_device *dev, int full)
726
{
727
hifn_stop_device(dev);
728
729
/*
730
* Setting poll frequency and others to 0.
731
*/
732
hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
733
HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
734
mdelay(1);
735
736
/*
737
* Reset DMA.
738
*/
739
if (full) {
740
hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
741
mdelay(1);
742
} else {
743
hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE |
744
HIFN_DMACNFG_MSTRESET);
745
hifn_reset_puc(dev);
746
}
747
748
hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
749
HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
750
751
hifn_reset_puc(dev);
752
}
753
754
static u32 hifn_next_signature(u_int32_t a, u_int cnt)
755
{
756
int i;
757
u32 v;
758
759
for (i = 0; i < cnt; i++) {
760
761
/* get the parity */
762
v = a & 0x80080125;
763
v ^= v >> 16;
764
v ^= v >> 8;
765
v ^= v >> 4;
766
v ^= v >> 2;
767
v ^= v >> 1;
768
769
a = (v & 1) ^ (a << 1);
770
}
771
772
return a;
773
}
774
775
static struct pci2id {
776
u_short pci_vendor;
777
u_short pci_prod;
778
char card_id[13];
779
} pci2id[] = {
780
{
781
PCI_VENDOR_ID_HIFN,
782
PCI_DEVICE_ID_HIFN_7955,
783
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
784
0x00, 0x00, 0x00, 0x00, 0x00 }
785
},
786
{
787
PCI_VENDOR_ID_HIFN,
788
PCI_DEVICE_ID_HIFN_7956,
789
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
790
0x00, 0x00, 0x00, 0x00, 0x00 }
791
}
792
};
793
794
#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
795
static int hifn_rng_data_present(struct hwrng *rng, int wait)
796
{
797
struct hifn_device *dev = (struct hifn_device *)rng->priv;
798
s64 nsec;
799
800
nsec = ktime_to_ns(ktime_sub(ktime_get(), dev->rngtime));
801
nsec -= dev->rng_wait_time;
802
if (nsec <= 0)
803
return 1;
804
if (!wait)
805
return 0;
806
ndelay(nsec);
807
return 1;
808
}
809
810
static int hifn_rng_data_read(struct hwrng *rng, u32 *data)
811
{
812
struct hifn_device *dev = (struct hifn_device *)rng->priv;
813
814
*data = hifn_read_1(dev, HIFN_1_RNG_DATA);
815
dev->rngtime = ktime_get();
816
return 4;
817
}
818
819
static int hifn_register_rng(struct hifn_device *dev)
820
{
821
/*
822
* We must wait at least 256 Pk_clk cycles between two reads of the rng.
823
*/
824
dev->rng_wait_time = DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) *
825
256;
826
827
dev->rng.name = dev->name;
828
dev->rng.data_present = hifn_rng_data_present,
829
dev->rng.data_read = hifn_rng_data_read,
830
dev->rng.priv = (unsigned long)dev;
831
832
return hwrng_register(&dev->rng);
833
}
834
835
static void hifn_unregister_rng(struct hifn_device *dev)
836
{
837
hwrng_unregister(&dev->rng);
838
}
839
#else
840
#define hifn_register_rng(dev) 0
841
#define hifn_unregister_rng(dev)
842
#endif
843
844
static int hifn_init_pubrng(struct hifn_device *dev)
845
{
846
int i;
847
848
hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) |
849
HIFN_PUBRST_RESET);
850
851
for (i=100; i > 0; --i) {
852
mdelay(1);
853
854
if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0)
855
break;
856
}
857
858
if (!i)
859
dprintk("Chip %s: Failed to initialise public key engine.\n",
860
dev->name);
861
else {
862
hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
863
dev->dmareg |= HIFN_DMAIER_PUBDONE;
864
hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
865
866
dprintk("Chip %s: Public key engine has been successfully "
867
"initialised.\n", dev->name);
868
}
869
870
/*
871
* Enable RNG engine.
872
*/
873
874
hifn_write_1(dev, HIFN_1_RNG_CONFIG,
875
hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA);
876
dprintk("Chip %s: RNG engine has been successfully initialised.\n",
877
dev->name);
878
879
#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
880
/* First value must be discarded */
881
hifn_read_1(dev, HIFN_1_RNG_DATA);
882
dev->rngtime = ktime_get();
883
#endif
884
return 0;
885
}
886
887
static int hifn_enable_crypto(struct hifn_device *dev)
888
{
889
u32 dmacfg, addr;
890
char *offtbl = NULL;
891
int i;
892
893
for (i = 0; i < ARRAY_SIZE(pci2id); i++) {
894
if (pci2id[i].pci_vendor == dev->pdev->vendor &&
895
pci2id[i].pci_prod == dev->pdev->device) {
896
offtbl = pci2id[i].card_id;
897
break;
898
}
899
}
900
901
if (offtbl == NULL) {
902
dprintk("Chip %s: Unknown card!\n", dev->name);
903
return -ENODEV;
904
}
905
906
dmacfg = hifn_read_1(dev, HIFN_1_DMA_CNFG);
907
908
hifn_write_1(dev, HIFN_1_DMA_CNFG,
909
HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET |
910
HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
911
mdelay(1);
912
addr = hifn_read_1(dev, HIFN_1_UNLOCK_SECRET1);
913
mdelay(1);
914
hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0);
915
mdelay(1);
916
917
for (i=0; i<12; ++i) {
918
addr = hifn_next_signature(addr, offtbl[i] + 0x101);
919
hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr);
920
921
mdelay(1);
922
}
923
hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg);
924
925
dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev));
926
927
return 0;
928
}
929
930
static void hifn_init_dma(struct hifn_device *dev)
931
{
932
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
933
u32 dptr = dev->desc_dma;
934
int i;
935
936
for (i=0; i<HIFN_D_CMD_RSIZE; ++i)
937
dma->cmdr[i].p = __cpu_to_le32(dptr +
938
offsetof(struct hifn_dma, command_bufs[i][0]));
939
for (i=0; i<HIFN_D_RES_RSIZE; ++i)
940
dma->resr[i].p = __cpu_to_le32(dptr +
941
offsetof(struct hifn_dma, result_bufs[i][0]));
942
943
/*
944
* Setup LAST descriptors.
945
*/
946
dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr +
947
offsetof(struct hifn_dma, cmdr[0]));
948
dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr +
949
offsetof(struct hifn_dma, srcr[0]));
950
dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr +
951
offsetof(struct hifn_dma, dstr[0]));
952
dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr +
953
offsetof(struct hifn_dma, resr[0]));
954
955
dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
956
dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
957
dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
958
}
959
960
/*
961
* Initialize the PLL. We need to know the frequency of the reference clock
962
* to calculate the optimal multiplier. For PCI we assume 66MHz, since that
963
* allows us to operate without the risk of overclocking the chip. If it
964
* actually uses 33MHz, the chip will operate at half the speed, this can be
965
* overriden by specifying the frequency as module parameter (pci33).
966
*
967
* Unfortunately the PCI clock is not very suitable since the HIFN needs a
968
* stable clock and the PCI clock frequency may vary, so the default is the
969
* external clock. There is no way to find out its frequency, we default to
970
* 66MHz since according to Mike Ham of HiFn, almost every board in existence
971
* has an external crystal populated at 66MHz.
972
*/
973
static void hifn_init_pll(struct hifn_device *dev)
974
{
975
unsigned int freq, m;
976
u32 pllcfg;
977
978
pllcfg = HIFN_1_PLL | HIFN_PLL_RESERVED_1;
979
980
if (strncmp(hifn_pll_ref, "ext", 3) == 0)
981
pllcfg |= HIFN_PLL_REF_CLK_PLL;
982
else
983
pllcfg |= HIFN_PLL_REF_CLK_HBI;
984
985
if (hifn_pll_ref[3] != '\0')
986
freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
987
else {
988
freq = 66;
989
printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, "
990
"override with hifn_pll_ref=%.3s<frequency>\n",
991
freq, hifn_pll_ref);
992
}
993
994
m = HIFN_PLL_FCK_MAX / freq;
995
996
pllcfg |= (m / 2 - 1) << HIFN_PLL_ND_SHIFT;
997
if (m <= 8)
998
pllcfg |= HIFN_PLL_IS_1_8;
999
else
1000
pllcfg |= HIFN_PLL_IS_9_12;
1001
1002
/* Select clock source and enable clock bypass */
1003
hifn_write_1(dev, HIFN_1_PLL, pllcfg |
1004
HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI | HIFN_PLL_BP);
1005
1006
/* Let the chip lock to the input clock */
1007
mdelay(10);
1008
1009
/* Disable clock bypass */
1010
hifn_write_1(dev, HIFN_1_PLL, pllcfg |
1011
HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI);
1012
1013
/* Switch the engines to the PLL */
1014
hifn_write_1(dev, HIFN_1_PLL, pllcfg |
1015
HIFN_PLL_PK_CLK_PLL | HIFN_PLL_PE_CLK_PLL);
1016
1017
/*
1018
* The Fpk_clk runs at half the total speed. Its frequency is needed to
1019
* calculate the minimum time between two reads of the rng. Since 33MHz
1020
* is actually 33.333... we overestimate the frequency here, resulting
1021
* in slightly larger intervals.
1022
*/
1023
dev->pk_clk_freq = 1000000 * (freq + 1) * m / 2;
1024
}
1025
1026
static void hifn_init_registers(struct hifn_device *dev)
1027
{
1028
u32 dptr = dev->desc_dma;
1029
1030
/* Initialization magic... */
1031
hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1032
hifn_write_0(dev, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1033
hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1034
1035
/* write all 4 ring address registers */
1036
hifn_write_1(dev, HIFN_1_DMA_CRAR, dptr +
1037
offsetof(struct hifn_dma, cmdr[0]));
1038
hifn_write_1(dev, HIFN_1_DMA_SRAR, dptr +
1039
offsetof(struct hifn_dma, srcr[0]));
1040
hifn_write_1(dev, HIFN_1_DMA_DRAR, dptr +
1041
offsetof(struct hifn_dma, dstr[0]));
1042
hifn_write_1(dev, HIFN_1_DMA_RRAR, dptr +
1043
offsetof(struct hifn_dma, resr[0]));
1044
1045
mdelay(2);
1046
#if 0
1047
hifn_write_1(dev, HIFN_1_DMA_CSR,
1048
HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1049
HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1050
HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1051
HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1052
HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1053
HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1054
HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1055
HIFN_DMACSR_S_WAIT |
1056
HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1057
HIFN_DMACSR_C_WAIT |
1058
HIFN_DMACSR_ENGINE |
1059
HIFN_DMACSR_PUBDONE);
1060
#else
1061
hifn_write_1(dev, HIFN_1_DMA_CSR,
1062
HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1063
HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA |
1064
HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1065
HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1066
HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1067
HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1068
HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1069
HIFN_DMACSR_S_WAIT |
1070
HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1071
HIFN_DMACSR_C_WAIT |
1072
HIFN_DMACSR_ENGINE |
1073
HIFN_DMACSR_PUBDONE);
1074
#endif
1075
hifn_read_1(dev, HIFN_1_DMA_CSR);
1076
1077
dev->dmareg |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1078
HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1079
HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1080
HIFN_DMAIER_ENGINE;
1081
dev->dmareg &= ~HIFN_DMAIER_C_WAIT;
1082
1083
hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
1084
hifn_read_1(dev, HIFN_1_DMA_IER);
1085
#if 0
1086
hifn_write_0(dev, HIFN_0_PUCNFG, HIFN_PUCNFG_ENCCNFG |
1087
HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1088
HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1089
HIFN_PUCNFG_DRAM);
1090
#else
1091
hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342);
1092
#endif
1093
hifn_init_pll(dev);
1094
1095
hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1096
hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1097
HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1098
((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1099
((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1100
}
1101
1102
static int hifn_setup_base_command(struct hifn_device *dev, u8 *buf,
1103
unsigned dlen, unsigned slen, u16 mask, u8 snum)
1104
{
1105
struct hifn_base_command *base_cmd;
1106
u8 *buf_pos = buf;
1107
1108
base_cmd = (struct hifn_base_command *)buf_pos;
1109
base_cmd->masks = __cpu_to_le16(mask);
1110
base_cmd->total_source_count =
1111
__cpu_to_le16(slen & HIFN_BASE_CMD_LENMASK_LO);
1112
base_cmd->total_dest_count =
1113
__cpu_to_le16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1114
1115
dlen >>= 16;
1116
slen >>= 16;
1117
base_cmd->session_num = __cpu_to_le16(snum |
1118
((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1119
((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1120
1121
return sizeof(struct hifn_base_command);
1122
}
1123
1124
static int hifn_setup_crypto_command(struct hifn_device *dev,
1125
u8 *buf, unsigned dlen, unsigned slen,
1126
u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
1127
{
1128
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1129
struct hifn_crypt_command *cry_cmd;
1130
u8 *buf_pos = buf;
1131
u16 cmd_len;
1132
1133
cry_cmd = (struct hifn_crypt_command *)buf_pos;
1134
1135
cry_cmd->source_count = __cpu_to_le16(dlen & 0xffff);
1136
dlen >>= 16;
1137
cry_cmd->masks = __cpu_to_le16(mode |
1138
((dlen << HIFN_CRYPT_CMD_SRCLEN_S) &
1139
HIFN_CRYPT_CMD_SRCLEN_M));
1140
cry_cmd->header_skip = 0;
1141
cry_cmd->reserved = 0;
1142
1143
buf_pos += sizeof(struct hifn_crypt_command);
1144
1145
dma->cmdu++;
1146
if (dma->cmdu > 1) {
1147
dev->dmareg |= HIFN_DMAIER_C_WAIT;
1148
hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
1149
}
1150
1151
if (keylen) {
1152
memcpy(buf_pos, key, keylen);
1153
buf_pos += keylen;
1154
}
1155
if (ivsize) {
1156
memcpy(buf_pos, iv, ivsize);
1157
buf_pos += ivsize;
1158
}
1159
1160
cmd_len = buf_pos - buf;
1161
1162
return cmd_len;
1163
}
1164
1165
static int hifn_setup_cmd_desc(struct hifn_device *dev,
1166
struct hifn_context *ctx, struct hifn_request_context *rctx,
1167
void *priv, unsigned int nbytes)
1168
{
1169
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1170
int cmd_len, sa_idx;
1171
u8 *buf, *buf_pos;
1172
u16 mask;
1173
1174
sa_idx = dma->cmdi;
1175
buf_pos = buf = dma->command_bufs[dma->cmdi];
1176
1177
mask = 0;
1178
switch (rctx->op) {
1179
case ACRYPTO_OP_DECRYPT:
1180
mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE;
1181
break;
1182
case ACRYPTO_OP_ENCRYPT:
1183
mask = HIFN_BASE_CMD_CRYPT;
1184
break;
1185
case ACRYPTO_OP_HMAC:
1186
mask = HIFN_BASE_CMD_MAC;
1187
break;
1188
default:
1189
goto err_out;
1190
}
1191
1192
buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes,
1193
nbytes, mask, dev->snum);
1194
1195
if (rctx->op == ACRYPTO_OP_ENCRYPT || rctx->op == ACRYPTO_OP_DECRYPT) {
1196
u16 md = 0;
1197
1198
if (ctx->keysize)
1199
md |= HIFN_CRYPT_CMD_NEW_KEY;
1200
if (rctx->iv && rctx->mode != ACRYPTO_MODE_ECB)
1201
md |= HIFN_CRYPT_CMD_NEW_IV;
1202
1203
switch (rctx->mode) {
1204
case ACRYPTO_MODE_ECB:
1205
md |= HIFN_CRYPT_CMD_MODE_ECB;
1206
break;
1207
case ACRYPTO_MODE_CBC:
1208
md |= HIFN_CRYPT_CMD_MODE_CBC;
1209
break;
1210
case ACRYPTO_MODE_CFB:
1211
md |= HIFN_CRYPT_CMD_MODE_CFB;
1212
break;
1213
case ACRYPTO_MODE_OFB:
1214
md |= HIFN_CRYPT_CMD_MODE_OFB;
1215
break;
1216
default:
1217
goto err_out;
1218
}
1219
1220
switch (rctx->type) {
1221
case ACRYPTO_TYPE_AES_128:
1222
if (ctx->keysize != 16)
1223
goto err_out;
1224
md |= HIFN_CRYPT_CMD_KSZ_128 |
1225
HIFN_CRYPT_CMD_ALG_AES;
1226
break;
1227
case ACRYPTO_TYPE_AES_192:
1228
if (ctx->keysize != 24)
1229
goto err_out;
1230
md |= HIFN_CRYPT_CMD_KSZ_192 |
1231
HIFN_CRYPT_CMD_ALG_AES;
1232
break;
1233
case ACRYPTO_TYPE_AES_256:
1234
if (ctx->keysize != 32)
1235
goto err_out;
1236
md |= HIFN_CRYPT_CMD_KSZ_256 |
1237
HIFN_CRYPT_CMD_ALG_AES;
1238
break;
1239
case ACRYPTO_TYPE_3DES:
1240
if (ctx->keysize != 24)
1241
goto err_out;
1242
md |= HIFN_CRYPT_CMD_ALG_3DES;
1243
break;
1244
case ACRYPTO_TYPE_DES:
1245
if (ctx->keysize != 8)
1246
goto err_out;
1247
md |= HIFN_CRYPT_CMD_ALG_DES;
1248
break;
1249
default:
1250
goto err_out;
1251
}
1252
1253
buf_pos += hifn_setup_crypto_command(dev, buf_pos,
1254
nbytes, nbytes, ctx->key, ctx->keysize,
1255
rctx->iv, rctx->ivsize, md);
1256
}
1257
1258
dev->sa[sa_idx] = priv;
1259
dev->started++;
1260
1261
cmd_len = buf_pos - buf;
1262
dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID |
1263
HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
1264
1265
if (++dma->cmdi == HIFN_D_CMD_RSIZE) {
1266
dma->cmdr[dma->cmdi].l = __cpu_to_le32(
1267
HIFN_D_VALID | HIFN_D_LAST |
1268
HIFN_D_MASKDONEIRQ | HIFN_D_JUMP);
1269
dma->cmdi = 0;
1270
} else
1271
dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID);
1272
1273
if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) {
1274
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1275
dev->flags |= HIFN_FLAG_CMD_BUSY;
1276
}
1277
return 0;
1278
1279
err_out:
1280
return -EINVAL;
1281
}
1282
1283
static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
1284
unsigned int offset, unsigned int size, int last)
1285
{
1286
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1287
int idx;
1288
dma_addr_t addr;
1289
1290
addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
1291
1292
idx = dma->srci;
1293
1294
dma->srcr[idx].p = __cpu_to_le32(addr);
1295
dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1296
HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0));
1297
1298
if (++idx == HIFN_D_SRC_RSIZE) {
1299
dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1300
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
1301
(last ? HIFN_D_LAST : 0));
1302
idx = 0;
1303
}
1304
1305
dma->srci = idx;
1306
dma->srcu++;
1307
1308
if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
1309
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1310
dev->flags |= HIFN_FLAG_SRC_BUSY;
1311
}
1312
1313
return size;
1314
}
1315
1316
static void hifn_setup_res_desc(struct hifn_device *dev)
1317
{
1318
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1319
1320
dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
1321
HIFN_D_VALID | HIFN_D_LAST);
1322
/*
1323
* dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
1324
* HIFN_D_LAST);
1325
*/
1326
1327
if (++dma->resi == HIFN_D_RES_RSIZE) {
1328
dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
1329
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1330
dma->resi = 0;
1331
}
1332
1333
dma->resu++;
1334
1335
if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
1336
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1337
dev->flags |= HIFN_FLAG_RES_BUSY;
1338
}
1339
}
1340
1341
static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
1342
unsigned offset, unsigned size, int last)
1343
{
1344
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1345
int idx;
1346
dma_addr_t addr;
1347
1348
addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
1349
1350
idx = dma->dsti;
1351
dma->dstr[idx].p = __cpu_to_le32(addr);
1352
dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1353
HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0));
1354
1355
if (++idx == HIFN_D_DST_RSIZE) {
1356
dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1357
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
1358
(last ? HIFN_D_LAST : 0));
1359
idx = 0;
1360
}
1361
dma->dsti = idx;
1362
dma->dstu++;
1363
1364
if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
1365
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1366
dev->flags |= HIFN_FLAG_DST_BUSY;
1367
}
1368
}
1369
1370
static int hifn_setup_dma(struct hifn_device *dev,
1371
struct hifn_context *ctx, struct hifn_request_context *rctx,
1372
struct scatterlist *src, struct scatterlist *dst,
1373
unsigned int nbytes, void *priv)
1374
{
1375
struct scatterlist *t;
1376
struct page *spage, *dpage;
1377
unsigned int soff, doff;
1378
unsigned int n, len;
1379
1380
n = nbytes;
1381
while (n) {
1382
spage = sg_page(src);
1383
soff = src->offset;
1384
len = min(src->length, n);
1385
1386
hifn_setup_src_desc(dev, spage, soff, len, n - len == 0);
1387
1388
src++;
1389
n -= len;
1390
}
1391
1392
t = &rctx->walk.cache[0];
1393
n = nbytes;
1394
while (n) {
1395
if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
1396
BUG_ON(!sg_page(t));
1397
dpage = sg_page(t);
1398
doff = 0;
1399
len = t->length;
1400
} else {
1401
BUG_ON(!sg_page(dst));
1402
dpage = sg_page(dst);
1403
doff = dst->offset;
1404
len = dst->length;
1405
}
1406
len = min(len, n);
1407
1408
hifn_setup_dst_desc(dev, dpage, doff, len, n - len == 0);
1409
1410
dst++;
1411
t++;
1412
n -= len;
1413
}
1414
1415
hifn_setup_cmd_desc(dev, ctx, rctx, priv, nbytes);
1416
hifn_setup_res_desc(dev);
1417
return 0;
1418
}
1419
1420
static int hifn_cipher_walk_init(struct hifn_cipher_walk *w,
1421
int num, gfp_t gfp_flags)
1422
{
1423
int i;
1424
1425
num = min(ASYNC_SCATTERLIST_CACHE, num);
1426
sg_init_table(w->cache, num);
1427
1428
w->num = 0;
1429
for (i=0; i<num; ++i) {
1430
struct page *page = alloc_page(gfp_flags);
1431
struct scatterlist *s;
1432
1433
if (!page)
1434
break;
1435
1436
s = &w->cache[i];
1437
1438
sg_set_page(s, page, PAGE_SIZE, 0);
1439
w->num++;
1440
}
1441
1442
return i;
1443
}
1444
1445
static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
1446
{
1447
int i;
1448
1449
for (i=0; i<w->num; ++i) {
1450
struct scatterlist *s = &w->cache[i];
1451
1452
__free_page(sg_page(s));
1453
1454
s->length = 0;
1455
}
1456
1457
w->num = 0;
1458
}
1459
1460
static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
1461
unsigned int size, unsigned int *nbytesp)
1462
{
1463
unsigned int copy, drest = *drestp, nbytes = *nbytesp;
1464
int idx = 0;
1465
1466
if (drest < size || size > nbytes)
1467
return -EINVAL;
1468
1469
while (size) {
1470
copy = min3(drest, size, dst->length);
1471
1472
size -= copy;
1473
drest -= copy;
1474
nbytes -= copy;
1475
1476
dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n",
1477
__func__, copy, size, drest, nbytes);
1478
1479
dst++;
1480
idx++;
1481
}
1482
1483
*nbytesp = nbytes;
1484
*drestp = drest;
1485
1486
return idx;
1487
}
1488
1489
static int hifn_cipher_walk(struct ablkcipher_request *req,
1490
struct hifn_cipher_walk *w)
1491
{
1492
struct scatterlist *dst, *t;
1493
unsigned int nbytes = req->nbytes, offset, copy, diff;
1494
int idx, tidx, err;
1495
1496
tidx = idx = 0;
1497
offset = 0;
1498
while (nbytes) {
1499
if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED))
1500
return -EINVAL;
1501
1502
dst = &req->dst[idx];
1503
1504
dprintk("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n",
1505
__func__, dst->length, dst->offset, offset, nbytes);
1506
1507
if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
1508
!IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) ||
1509
offset) {
1510
unsigned slen = min(dst->length - offset, nbytes);
1511
unsigned dlen = PAGE_SIZE;
1512
1513
t = &w->cache[idx];
1514
1515
err = ablkcipher_add(&dlen, dst, slen, &nbytes);
1516
if (err < 0)
1517
return err;
1518
1519
idx += err;
1520
1521
copy = slen & ~(HIFN_D_DST_DALIGN - 1);
1522
diff = slen & (HIFN_D_DST_DALIGN - 1);
1523
1524
if (dlen < nbytes) {
1525
/*
1526
* Destination page does not have enough space
1527
* to put there additional blocksized chunk,
1528
* so we mark that page as containing only
1529
* blocksize aligned chunks:
1530
* t->length = (slen & ~(HIFN_D_DST_DALIGN - 1));
1531
* and increase number of bytes to be processed
1532
* in next chunk:
1533
* nbytes += diff;
1534
*/
1535
nbytes += diff;
1536
1537
/*
1538
* Temporary of course...
1539
* Kick author if you will catch this one.
1540
*/
1541
printk(KERN_ERR "%s: dlen: %u, nbytes: %u,"
1542
"slen: %u, offset: %u.\n",
1543
__func__, dlen, nbytes, slen, offset);
1544
printk(KERN_ERR "%s: please contact author to fix this "
1545
"issue, generally you should not catch "
1546
"this path under any condition but who "
1547
"knows how did you use crypto code.\n"
1548
"Thank you.\n", __func__);
1549
BUG();
1550
} else {
1551
copy += diff + nbytes;
1552
1553
dst = &req->dst[idx];
1554
1555
err = ablkcipher_add(&dlen, dst, nbytes, &nbytes);
1556
if (err < 0)
1557
return err;
1558
1559
idx += err;
1560
}
1561
1562
t->length = copy;
1563
t->offset = offset;
1564
} else {
1565
nbytes -= min(dst->length, nbytes);
1566
idx++;
1567
}
1568
1569
tidx++;
1570
}
1571
1572
return tidx;
1573
}
1574
1575
static int hifn_setup_session(struct ablkcipher_request *req)
1576
{
1577
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
1578
struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
1579
struct hifn_device *dev = ctx->dev;
1580
unsigned long dlen, flags;
1581
unsigned int nbytes = req->nbytes, idx = 0;
1582
int err = -EINVAL, sg_num;
1583
struct scatterlist *dst;
1584
1585
if (rctx->iv && !rctx->ivsize && rctx->mode != ACRYPTO_MODE_ECB)
1586
goto err_out_exit;
1587
1588
rctx->walk.flags = 0;
1589
1590
while (nbytes) {
1591
dst = &req->dst[idx];
1592
dlen = min(dst->length, nbytes);
1593
1594
if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
1595
!IS_ALIGNED(dlen, HIFN_D_DST_DALIGN))
1596
rctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
1597
1598
nbytes -= dlen;
1599
idx++;
1600
}
1601
1602
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
1603
err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
1604
if (err < 0)
1605
return err;
1606
}
1607
1608
sg_num = hifn_cipher_walk(req, &rctx->walk);
1609
if (sg_num < 0) {
1610
err = sg_num;
1611
goto err_out_exit;
1612
}
1613
1614
spin_lock_irqsave(&dev->lock, flags);
1615
if (dev->started + sg_num > HIFN_QUEUE_LENGTH) {
1616
err = -EAGAIN;
1617
goto err_out;
1618
}
1619
1620
err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req);
1621
if (err)
1622
goto err_out;
1623
1624
dev->snum++;
1625
1626
dev->active = HIFN_DEFAULT_ACTIVE_NUM;
1627
spin_unlock_irqrestore(&dev->lock, flags);
1628
1629
return 0;
1630
1631
err_out:
1632
spin_unlock_irqrestore(&dev->lock, flags);
1633
err_out_exit:
1634
if (err) {
1635
printk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
1636
"type: %u, err: %d.\n",
1637
dev->name, rctx->iv, rctx->ivsize,
1638
ctx->key, ctx->keysize,
1639
rctx->mode, rctx->op, rctx->type, err);
1640
}
1641
1642
return err;
1643
}
1644
1645
static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
1646
{
1647
int n, err;
1648
u8 src[16];
1649
struct hifn_context ctx;
1650
struct hifn_request_context rctx;
1651
u8 fips_aes_ecb_from_zero[16] = {
1652
0x66, 0xE9, 0x4B, 0xD4,
1653
0xEF, 0x8A, 0x2C, 0x3B,
1654
0x88, 0x4C, 0xFA, 0x59,
1655
0xCA, 0x34, 0x2B, 0x2E};
1656
struct scatterlist sg;
1657
1658
memset(src, 0, sizeof(src));
1659
memset(ctx.key, 0, sizeof(ctx.key));
1660
1661
ctx.dev = dev;
1662
ctx.keysize = 16;
1663
rctx.ivsize = 0;
1664
rctx.iv = NULL;
1665
rctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT;
1666
rctx.mode = ACRYPTO_MODE_ECB;
1667
rctx.type = ACRYPTO_TYPE_AES_128;
1668
rctx.walk.cache[0].length = 0;
1669
1670
sg_init_one(&sg, &src, sizeof(src));
1671
1672
err = hifn_setup_dma(dev, &ctx, &rctx, &sg, &sg, sizeof(src), NULL);
1673
if (err)
1674
goto err_out;
1675
1676
dev->started = 0;
1677
msleep(200);
1678
1679
dprintk("%s: decoded: ", dev->name);
1680
for (n=0; n<sizeof(src); ++n)
1681
dprintk("%02x ", src[n]);
1682
dprintk("\n");
1683
dprintk("%s: FIPS : ", dev->name);
1684
for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n)
1685
dprintk("%02x ", fips_aes_ecb_from_zero[n]);
1686
dprintk("\n");
1687
1688
if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) {
1689
printk(KERN_INFO "%s: AES 128 ECB test has been successfully "
1690
"passed.\n", dev->name);
1691
return 0;
1692
}
1693
1694
err_out:
1695
printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name);
1696
return -1;
1697
}
1698
1699
static int hifn_start_device(struct hifn_device *dev)
1700
{
1701
int err;
1702
1703
dev->started = dev->active = 0;
1704
hifn_reset_dma(dev, 1);
1705
1706
err = hifn_enable_crypto(dev);
1707
if (err)
1708
return err;
1709
1710
hifn_reset_puc(dev);
1711
1712
hifn_init_dma(dev);
1713
1714
hifn_init_registers(dev);
1715
1716
hifn_init_pubrng(dev);
1717
1718
return 0;
1719
}
1720
1721
static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
1722
struct scatterlist *dst, unsigned int size, unsigned int *nbytesp)
1723
{
1724
unsigned int srest = *srestp, nbytes = *nbytesp, copy;
1725
void *daddr;
1726
int idx = 0;
1727
1728
if (srest < size || size > nbytes)
1729
return -EINVAL;
1730
1731
while (size) {
1732
copy = min3(srest, dst->length, size);
1733
1734
daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
1735
memcpy(daddr + dst->offset + offset, saddr, copy);
1736
kunmap_atomic(daddr, KM_IRQ0);
1737
1738
nbytes -= copy;
1739
size -= copy;
1740
srest -= copy;
1741
saddr += copy;
1742
offset = 0;
1743
1744
dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n",
1745
__func__, copy, size, srest, nbytes);
1746
1747
dst++;
1748
idx++;
1749
}
1750
1751
*nbytesp = nbytes;
1752
*srestp = srest;
1753
1754
return idx;
1755
}
1756
1757
static inline void hifn_complete_sa(struct hifn_device *dev, int i)
1758
{
1759
unsigned long flags;
1760
1761
spin_lock_irqsave(&dev->lock, flags);
1762
dev->sa[i] = NULL;
1763
dev->started--;
1764
if (dev->started < 0)
1765
printk("%s: started: %d.\n", __func__, dev->started);
1766
spin_unlock_irqrestore(&dev->lock, flags);
1767
BUG_ON(dev->started < 0);
1768
}
1769
1770
static void hifn_process_ready(struct ablkcipher_request *req, int error)
1771
{
1772
struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
1773
1774
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
1775
unsigned int nbytes = req->nbytes;
1776
int idx = 0, err;
1777
struct scatterlist *dst, *t;
1778
void *saddr;
1779
1780
while (nbytes) {
1781
t = &rctx->walk.cache[idx];
1782
dst = &req->dst[idx];
1783
1784
dprintk("\n%s: sg_page(t): %p, t->length: %u, "
1785
"sg_page(dst): %p, dst->length: %u, "
1786
"nbytes: %u.\n",
1787
__func__, sg_page(t), t->length,
1788
sg_page(dst), dst->length, nbytes);
1789
1790
if (!t->length) {
1791
nbytes -= min(dst->length, nbytes);
1792
idx++;
1793
continue;
1794
}
1795
1796
saddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0);
1797
1798
err = ablkcipher_get(saddr, &t->length, t->offset,
1799
dst, nbytes, &nbytes);
1800
if (err < 0) {
1801
kunmap_atomic(saddr, KM_SOFTIRQ0);
1802
break;
1803
}
1804
1805
idx += err;
1806
kunmap_atomic(saddr, KM_SOFTIRQ0);
1807
}
1808
1809
hifn_cipher_walk_exit(&rctx->walk);
1810
}
1811
1812
req->base.complete(&req->base, error);
1813
}
1814
1815
static void hifn_clear_rings(struct hifn_device *dev, int error)
1816
{
1817
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1818
int i, u;
1819
1820
dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
1821
"k: %d.%d.%d.%d.\n",
1822
dev->name,
1823
dma->cmdi, dma->srci, dma->dsti, dma->resi,
1824
dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1825
dma->cmdk, dma->srck, dma->dstk, dma->resk);
1826
1827
i = dma->resk; u = dma->resu;
1828
while (u != 0) {
1829
if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID))
1830
break;
1831
1832
if (dev->sa[i]) {
1833
dev->success++;
1834
dev->reset = 0;
1835
hifn_process_ready(dev->sa[i], error);
1836
hifn_complete_sa(dev, i);
1837
}
1838
1839
if (++i == HIFN_D_RES_RSIZE)
1840
i = 0;
1841
u--;
1842
}
1843
dma->resk = i; dma->resu = u;
1844
1845
i = dma->srck; u = dma->srcu;
1846
while (u != 0) {
1847
if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID))
1848
break;
1849
if (++i == HIFN_D_SRC_RSIZE)
1850
i = 0;
1851
u--;
1852
}
1853
dma->srck = i; dma->srcu = u;
1854
1855
i = dma->cmdk; u = dma->cmdu;
1856
while (u != 0) {
1857
if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID))
1858
break;
1859
if (++i == HIFN_D_CMD_RSIZE)
1860
i = 0;
1861
u--;
1862
}
1863
dma->cmdk = i; dma->cmdu = u;
1864
1865
i = dma->dstk; u = dma->dstu;
1866
while (u != 0) {
1867
if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID))
1868
break;
1869
if (++i == HIFN_D_DST_RSIZE)
1870
i = 0;
1871
u--;
1872
}
1873
dma->dstk = i; dma->dstu = u;
1874
1875
dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
1876
"k: %d.%d.%d.%d.\n",
1877
dev->name,
1878
dma->cmdi, dma->srci, dma->dsti, dma->resi,
1879
dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1880
dma->cmdk, dma->srck, dma->dstk, dma->resk);
1881
}
1882
1883
static void hifn_work(struct work_struct *work)
1884
{
1885
struct delayed_work *dw = to_delayed_work(work);
1886
struct hifn_device *dev = container_of(dw, struct hifn_device, work);
1887
unsigned long flags;
1888
int reset = 0;
1889
u32 r = 0;
1890
1891
spin_lock_irqsave(&dev->lock, flags);
1892
if (dev->active == 0) {
1893
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1894
1895
if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
1896
dev->flags &= ~HIFN_FLAG_CMD_BUSY;
1897
r |= HIFN_DMACSR_C_CTRL_DIS;
1898
}
1899
if (dma->srcu == 0 && (dev->flags & HIFN_FLAG_SRC_BUSY)) {
1900
dev->flags &= ~HIFN_FLAG_SRC_BUSY;
1901
r |= HIFN_DMACSR_S_CTRL_DIS;
1902
}
1903
if (dma->dstu == 0 && (dev->flags & HIFN_FLAG_DST_BUSY)) {
1904
dev->flags &= ~HIFN_FLAG_DST_BUSY;
1905
r |= HIFN_DMACSR_D_CTRL_DIS;
1906
}
1907
if (dma->resu == 0 && (dev->flags & HIFN_FLAG_RES_BUSY)) {
1908
dev->flags &= ~HIFN_FLAG_RES_BUSY;
1909
r |= HIFN_DMACSR_R_CTRL_DIS;
1910
}
1911
if (r)
1912
hifn_write_1(dev, HIFN_1_DMA_CSR, r);
1913
} else
1914
dev->active--;
1915
1916
if ((dev->prev_success == dev->success) && dev->started)
1917
reset = 1;
1918
dev->prev_success = dev->success;
1919
spin_unlock_irqrestore(&dev->lock, flags);
1920
1921
if (reset) {
1922
if (++dev->reset >= 5) {
1923
int i;
1924
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1925
1926
printk("%s: r: %08x, active: %d, started: %d, "
1927
"success: %lu: qlen: %u/%u, reset: %d.\n",
1928
dev->name, r, dev->active, dev->started,
1929
dev->success, dev->queue.qlen, dev->queue.max_qlen,
1930
reset);
1931
1932
printk("%s: res: ", __func__);
1933
for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
1934
printk("%x.%p ", dma->resr[i].l, dev->sa[i]);
1935
if (dev->sa[i]) {
1936
hifn_process_ready(dev->sa[i], -ENODEV);
1937
hifn_complete_sa(dev, i);
1938
}
1939
}
1940
printk("\n");
1941
1942
hifn_reset_dma(dev, 1);
1943
hifn_stop_device(dev);
1944
hifn_start_device(dev);
1945
dev->reset = 0;
1946
}
1947
1948
tasklet_schedule(&dev->tasklet);
1949
}
1950
1951
schedule_delayed_work(&dev->work, HZ);
1952
}
1953
1954
static irqreturn_t hifn_interrupt(int irq, void *data)
1955
{
1956
struct hifn_device *dev = (struct hifn_device *)data;
1957
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1958
u32 dmacsr, restart;
1959
1960
dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
1961
1962
dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], "
1963
"i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n",
1964
dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi,
1965
dma->cmdi, dma->srci, dma->dsti, dma->resi,
1966
dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1967
1968
if ((dmacsr & dev->dmareg) == 0)
1969
return IRQ_NONE;
1970
1971
hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & dev->dmareg);
1972
1973
if (dmacsr & HIFN_DMACSR_ENGINE)
1974
hifn_write_0(dev, HIFN_0_PUISR, hifn_read_0(dev, HIFN_0_PUISR));
1975
if (dmacsr & HIFN_DMACSR_PUBDONE)
1976
hifn_write_1(dev, HIFN_1_PUB_STATUS,
1977
hifn_read_1(dev, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1978
1979
restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1980
if (restart) {
1981
u32 puisr = hifn_read_0(dev, HIFN_0_PUISR);
1982
1983
printk(KERN_WARNING "%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
1984
dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER),
1985
!!(dmacsr & HIFN_DMACSR_D_OVER),
1986
puisr, !!(puisr & HIFN_PUISR_DSTOVER));
1987
if (!!(puisr & HIFN_PUISR_DSTOVER))
1988
hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1989
hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER |
1990
HIFN_DMACSR_D_OVER));
1991
}
1992
1993
restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1994
HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1995
if (restart) {
1996
printk(KERN_WARNING "%s: abort: c: %d, s: %d, d: %d, r: %d.\n",
1997
dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT),
1998
!!(dmacsr & HIFN_DMACSR_S_ABORT),
1999
!!(dmacsr & HIFN_DMACSR_D_ABORT),
2000
!!(dmacsr & HIFN_DMACSR_R_ABORT));
2001
hifn_reset_dma(dev, 1);
2002
hifn_init_dma(dev);
2003
hifn_init_registers(dev);
2004
}
2005
2006
if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2007
dprintk("%s: wait on command.\n", dev->name);
2008
dev->dmareg &= ~(HIFN_DMAIER_C_WAIT);
2009
hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
2010
}
2011
2012
tasklet_schedule(&dev->tasklet);
2013
2014
return IRQ_HANDLED;
2015
}
2016
2017
static void hifn_flush(struct hifn_device *dev)
2018
{
2019
unsigned long flags;
2020
struct crypto_async_request *async_req;
2021
struct ablkcipher_request *req;
2022
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
2023
int i;
2024
2025
for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
2026
struct hifn_desc *d = &dma->resr[i];
2027
2028
if (dev->sa[i]) {
2029
hifn_process_ready(dev->sa[i],
2030
(d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0);
2031
hifn_complete_sa(dev, i);
2032
}
2033
}
2034
2035
spin_lock_irqsave(&dev->lock, flags);
2036
while ((async_req = crypto_dequeue_request(&dev->queue))) {
2037
req = container_of(async_req, struct ablkcipher_request, base);
2038
spin_unlock_irqrestore(&dev->lock, flags);
2039
2040
hifn_process_ready(req, -ENODEV);
2041
2042
spin_lock_irqsave(&dev->lock, flags);
2043
}
2044
spin_unlock_irqrestore(&dev->lock, flags);
2045
}
2046
2047
static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2048
unsigned int len)
2049
{
2050
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
2051
struct hifn_context *ctx = crypto_tfm_ctx(tfm);
2052
struct hifn_device *dev = ctx->dev;
2053
2054
if (len > HIFN_MAX_CRYPT_KEY_LENGTH) {
2055
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2056
return -1;
2057
}
2058
2059
if (len == HIFN_DES_KEY_LENGTH) {
2060
u32 tmp[DES_EXPKEY_WORDS];
2061
int ret = des_ekey(tmp, key);
2062
2063
if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
2064
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
2065
return -EINVAL;
2066
}
2067
}
2068
2069
dev->flags &= ~HIFN_FLAG_OLD_KEY;
2070
2071
memcpy(ctx->key, key, len);
2072
ctx->keysize = len;
2073
2074
return 0;
2075
}
2076
2077
static int hifn_handle_req(struct ablkcipher_request *req)
2078
{
2079
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
2080
struct hifn_device *dev = ctx->dev;
2081
int err = -EAGAIN;
2082
2083
if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
2084
err = hifn_setup_session(req);
2085
2086
if (err == -EAGAIN) {
2087
unsigned long flags;
2088
2089
spin_lock_irqsave(&dev->lock, flags);
2090
err = ablkcipher_enqueue_request(&dev->queue, req);
2091
spin_unlock_irqrestore(&dev->lock, flags);
2092
}
2093
2094
return err;
2095
}
2096
2097
static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
2098
u8 type, u8 mode)
2099
{
2100
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
2101
struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
2102
unsigned ivsize;
2103
2104
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
2105
2106
if (req->info && mode != ACRYPTO_MODE_ECB) {
2107
if (type == ACRYPTO_TYPE_AES_128)
2108
ivsize = HIFN_AES_IV_LENGTH;
2109
else if (type == ACRYPTO_TYPE_DES)
2110
ivsize = HIFN_DES_KEY_LENGTH;
2111
else if (type == ACRYPTO_TYPE_3DES)
2112
ivsize = HIFN_3DES_KEY_LENGTH;
2113
}
2114
2115
if (ctx->keysize != 16 && type == ACRYPTO_TYPE_AES_128) {
2116
if (ctx->keysize == 24)
2117
type = ACRYPTO_TYPE_AES_192;
2118
else if (ctx->keysize == 32)
2119
type = ACRYPTO_TYPE_AES_256;
2120
}
2121
2122
rctx->op = op;
2123
rctx->mode = mode;
2124
rctx->type = type;
2125
rctx->iv = req->info;
2126
rctx->ivsize = ivsize;
2127
2128
/*
2129
* HEAVY TODO: needs to kick Herbert XU to write documentation.
2130
* HEAVY TODO: needs to kick Herbert XU to write documentation.
2131
* HEAVY TODO: needs to kick Herbert XU to write documentation.
2132
*/
2133
2134
return hifn_handle_req(req);
2135
}
2136
2137
static int hifn_process_queue(struct hifn_device *dev)
2138
{
2139
struct crypto_async_request *async_req, *backlog;
2140
struct ablkcipher_request *req;
2141
unsigned long flags;
2142
int err = 0;
2143
2144
while (dev->started < HIFN_QUEUE_LENGTH) {
2145
spin_lock_irqsave(&dev->lock, flags);
2146
backlog = crypto_get_backlog(&dev->queue);
2147
async_req = crypto_dequeue_request(&dev->queue);
2148
spin_unlock_irqrestore(&dev->lock, flags);
2149
2150
if (!async_req)
2151
break;
2152
2153
if (backlog)
2154
backlog->complete(backlog, -EINPROGRESS);
2155
2156
req = container_of(async_req, struct ablkcipher_request, base);
2157
2158
err = hifn_handle_req(req);
2159
if (err)
2160
break;
2161
}
2162
2163
return err;
2164
}
2165
2166
static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
2167
u8 type, u8 mode)
2168
{
2169
int err;
2170
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
2171
struct hifn_device *dev = ctx->dev;
2172
2173
err = hifn_setup_crypto_req(req, op, type, mode);
2174
if (err)
2175
return err;
2176
2177
if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
2178
hifn_process_queue(dev);
2179
2180
return -EINPROGRESS;
2181
}
2182
2183
/*
2184
* AES ecryption functions.
2185
*/
2186
static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
2187
{
2188
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2189
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
2190
}
2191
static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
2192
{
2193
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2194
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
2195
}
2196
static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
2197
{
2198
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2199
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
2200
}
2201
static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
2202
{
2203
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2204
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
2205
}
2206
2207
/*
2208
* AES decryption functions.
2209
*/
2210
static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
2211
{
2212
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2213
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
2214
}
2215
static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
2216
{
2217
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2218
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
2219
}
2220
static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
2221
{
2222
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2223
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
2224
}
2225
static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
2226
{
2227
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2228
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
2229
}
2230
2231
/*
2232
* DES ecryption functions.
2233
*/
2234
static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
2235
{
2236
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2237
ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
2238
}
2239
static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
2240
{
2241
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2242
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
2243
}
2244
static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
2245
{
2246
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2247
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
2248
}
2249
static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
2250
{
2251
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2252
ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
2253
}
2254
2255
/*
2256
* DES decryption functions.
2257
*/
2258
static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
2259
{
2260
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2261
ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
2262
}
2263
static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
2264
{
2265
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2266
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
2267
}
2268
static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
2269
{
2270
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2271
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
2272
}
2273
static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
2274
{
2275
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2276
ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
2277
}
2278
2279
/*
2280
* 3DES ecryption functions.
2281
*/
2282
static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
2283
{
2284
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2285
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
2286
}
2287
static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
2288
{
2289
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2290
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
2291
}
2292
static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
2293
{
2294
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2295
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
2296
}
2297
static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
2298
{
2299
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2300
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
2301
}
2302
2303
/*
2304
* 3DES decryption functions.
2305
*/
2306
static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
2307
{
2308
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2309
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
2310
}
2311
static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
2312
{
2313
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2314
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
2315
}
2316
static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
2317
{
2318
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2319
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
2320
}
2321
static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
2322
{
2323
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2324
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
2325
}
2326
2327
struct hifn_alg_template
2328
{
2329
char name[CRYPTO_MAX_ALG_NAME];
2330
char drv_name[CRYPTO_MAX_ALG_NAME];
2331
unsigned int bsize;
2332
struct ablkcipher_alg ablkcipher;
2333
};
2334
2335
static struct hifn_alg_template hifn_alg_templates[] = {
2336
/*
2337
* 3DES ECB, CBC, CFB and OFB modes.
2338
*/
2339
{
2340
.name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
2341
.ablkcipher = {
2342
.min_keysize = HIFN_3DES_KEY_LENGTH,
2343
.max_keysize = HIFN_3DES_KEY_LENGTH,
2344
.setkey = hifn_setkey,
2345
.encrypt = hifn_encrypt_3des_cfb,
2346
.decrypt = hifn_decrypt_3des_cfb,
2347
},
2348
},
2349
{
2350
.name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
2351
.ablkcipher = {
2352
.min_keysize = HIFN_3DES_KEY_LENGTH,
2353
.max_keysize = HIFN_3DES_KEY_LENGTH,
2354
.setkey = hifn_setkey,
2355
.encrypt = hifn_encrypt_3des_ofb,
2356
.decrypt = hifn_decrypt_3des_ofb,
2357
},
2358
},
2359
{
2360
.name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
2361
.ablkcipher = {
2362
.ivsize = HIFN_IV_LENGTH,
2363
.min_keysize = HIFN_3DES_KEY_LENGTH,
2364
.max_keysize = HIFN_3DES_KEY_LENGTH,
2365
.setkey = hifn_setkey,
2366
.encrypt = hifn_encrypt_3des_cbc,
2367
.decrypt = hifn_decrypt_3des_cbc,
2368
},
2369
},
2370
{
2371
.name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8,
2372
.ablkcipher = {
2373
.min_keysize = HIFN_3DES_KEY_LENGTH,
2374
.max_keysize = HIFN_3DES_KEY_LENGTH,
2375
.setkey = hifn_setkey,
2376
.encrypt = hifn_encrypt_3des_ecb,
2377
.decrypt = hifn_decrypt_3des_ecb,
2378
},
2379
},
2380
2381
/*
2382
* DES ECB, CBC, CFB and OFB modes.
2383
*/
2384
{
2385
.name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
2386
.ablkcipher = {
2387
.min_keysize = HIFN_DES_KEY_LENGTH,
2388
.max_keysize = HIFN_DES_KEY_LENGTH,
2389
.setkey = hifn_setkey,
2390
.encrypt = hifn_encrypt_des_cfb,
2391
.decrypt = hifn_decrypt_des_cfb,
2392
},
2393
},
2394
{
2395
.name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
2396
.ablkcipher = {
2397
.min_keysize = HIFN_DES_KEY_LENGTH,
2398
.max_keysize = HIFN_DES_KEY_LENGTH,
2399
.setkey = hifn_setkey,
2400
.encrypt = hifn_encrypt_des_ofb,
2401
.decrypt = hifn_decrypt_des_ofb,
2402
},
2403
},
2404
{
2405
.name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
2406
.ablkcipher = {
2407
.ivsize = HIFN_IV_LENGTH,
2408
.min_keysize = HIFN_DES_KEY_LENGTH,
2409
.max_keysize = HIFN_DES_KEY_LENGTH,
2410
.setkey = hifn_setkey,
2411
.encrypt = hifn_encrypt_des_cbc,
2412
.decrypt = hifn_decrypt_des_cbc,
2413
},
2414
},
2415
{
2416
.name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8,
2417
.ablkcipher = {
2418
.min_keysize = HIFN_DES_KEY_LENGTH,
2419
.max_keysize = HIFN_DES_KEY_LENGTH,
2420
.setkey = hifn_setkey,
2421
.encrypt = hifn_encrypt_des_ecb,
2422
.decrypt = hifn_decrypt_des_ecb,
2423
},
2424
},
2425
2426
/*
2427
* AES ECB, CBC, CFB and OFB modes.
2428
*/
2429
{
2430
.name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
2431
.ablkcipher = {
2432
.min_keysize = AES_MIN_KEY_SIZE,
2433
.max_keysize = AES_MAX_KEY_SIZE,
2434
.setkey = hifn_setkey,
2435
.encrypt = hifn_encrypt_aes_ecb,
2436
.decrypt = hifn_decrypt_aes_ecb,
2437
},
2438
},
2439
{
2440
.name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16,
2441
.ablkcipher = {
2442
.ivsize = HIFN_AES_IV_LENGTH,
2443
.min_keysize = AES_MIN_KEY_SIZE,
2444
.max_keysize = AES_MAX_KEY_SIZE,
2445
.setkey = hifn_setkey,
2446
.encrypt = hifn_encrypt_aes_cbc,
2447
.decrypt = hifn_decrypt_aes_cbc,
2448
},
2449
},
2450
{
2451
.name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
2452
.ablkcipher = {
2453
.min_keysize = AES_MIN_KEY_SIZE,
2454
.max_keysize = AES_MAX_KEY_SIZE,
2455
.setkey = hifn_setkey,
2456
.encrypt = hifn_encrypt_aes_cfb,
2457
.decrypt = hifn_decrypt_aes_cfb,
2458
},
2459
},
2460
{
2461
.name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
2462
.ablkcipher = {
2463
.min_keysize = AES_MIN_KEY_SIZE,
2464
.max_keysize = AES_MAX_KEY_SIZE,
2465
.setkey = hifn_setkey,
2466
.encrypt = hifn_encrypt_aes_ofb,
2467
.decrypt = hifn_decrypt_aes_ofb,
2468
},
2469
},
2470
};
2471
2472
static int hifn_cra_init(struct crypto_tfm *tfm)
2473
{
2474
struct crypto_alg *alg = tfm->__crt_alg;
2475
struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg);
2476
struct hifn_context *ctx = crypto_tfm_ctx(tfm);
2477
2478
ctx->dev = ha->dev;
2479
tfm->crt_ablkcipher.reqsize = sizeof(struct hifn_request_context);
2480
return 0;
2481
}
2482
2483
static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
2484
{
2485
struct hifn_crypto_alg *alg;
2486
int err;
2487
2488
alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL);
2489
if (!alg)
2490
return -ENOMEM;
2491
2492
snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
2493
snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
2494
t->drv_name, dev->name);
2495
2496
alg->alg.cra_priority = 300;
2497
alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
2498
alg->alg.cra_blocksize = t->bsize;
2499
alg->alg.cra_ctxsize = sizeof(struct hifn_context);
2500
alg->alg.cra_alignmask = 0;
2501
alg->alg.cra_type = &crypto_ablkcipher_type;
2502
alg->alg.cra_module = THIS_MODULE;
2503
alg->alg.cra_u.ablkcipher = t->ablkcipher;
2504
alg->alg.cra_init = hifn_cra_init;
2505
2506
alg->dev = dev;
2507
2508
list_add_tail(&alg->entry, &dev->alg_list);
2509
2510
err = crypto_register_alg(&alg->alg);
2511
if (err) {
2512
list_del(&alg->entry);
2513
kfree(alg);
2514
}
2515
2516
return err;
2517
}
2518
2519
static void hifn_unregister_alg(struct hifn_device *dev)
2520
{
2521
struct hifn_crypto_alg *a, *n;
2522
2523
list_for_each_entry_safe(a, n, &dev->alg_list, entry) {
2524
list_del(&a->entry);
2525
crypto_unregister_alg(&a->alg);
2526
kfree(a);
2527
}
2528
}
2529
2530
static int hifn_register_alg(struct hifn_device *dev)
2531
{
2532
int i, err;
2533
2534
for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) {
2535
err = hifn_alg_alloc(dev, &hifn_alg_templates[i]);
2536
if (err)
2537
goto err_out_exit;
2538
}
2539
2540
return 0;
2541
2542
err_out_exit:
2543
hifn_unregister_alg(dev);
2544
return err;
2545
}
2546
2547
static void hifn_tasklet_callback(unsigned long data)
2548
{
2549
struct hifn_device *dev = (struct hifn_device *)data;
2550
2551
/*
2552
* This is ok to call this without lock being held,
2553
* althogh it modifies some parameters used in parallel,
2554
* (like dev->success), but they are used in process
2555
* context or update is atomic (like setting dev->sa[i] to NULL).
2556
*/
2557
hifn_clear_rings(dev, 0);
2558
2559
if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
2560
hifn_process_queue(dev);
2561
}
2562
2563
static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2564
{
2565
int err, i;
2566
struct hifn_device *dev;
2567
char name[8];
2568
2569
err = pci_enable_device(pdev);
2570
if (err)
2571
return err;
2572
pci_set_master(pdev);
2573
2574
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2575
if (err)
2576
goto err_out_disable_pci_device;
2577
2578
snprintf(name, sizeof(name), "hifn%d",
2579
atomic_inc_return(&hifn_dev_number)-1);
2580
2581
err = pci_request_regions(pdev, name);
2582
if (err)
2583
goto err_out_disable_pci_device;
2584
2585
if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE ||
2586
pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE ||
2587
pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) {
2588
dprintk("%s: Broken hardware - I/O regions are too small.\n",
2589
pci_name(pdev));
2590
err = -ENODEV;
2591
goto err_out_free_regions;
2592
}
2593
2594
dev = kzalloc(sizeof(struct hifn_device) + sizeof(struct crypto_alg),
2595
GFP_KERNEL);
2596
if (!dev) {
2597
err = -ENOMEM;
2598
goto err_out_free_regions;
2599
}
2600
2601
INIT_LIST_HEAD(&dev->alg_list);
2602
2603
snprintf(dev->name, sizeof(dev->name), "%s", name);
2604
spin_lock_init(&dev->lock);
2605
2606
for (i=0; i<3; ++i) {
2607
unsigned long addr, size;
2608
2609
addr = pci_resource_start(pdev, i);
2610
size = pci_resource_len(pdev, i);
2611
2612
dev->bar[i] = ioremap_nocache(addr, size);
2613
if (!dev->bar[i])
2614
goto err_out_unmap_bars;
2615
}
2616
2617
dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma),
2618
&dev->desc_dma);
2619
if (!dev->desc_virt) {
2620
dprintk("Failed to allocate descriptor rings.\n");
2621
goto err_out_unmap_bars;
2622
}
2623
memset(dev->desc_virt, 0, sizeof(struct hifn_dma));
2624
2625
dev->pdev = pdev;
2626
dev->irq = pdev->irq;
2627
2628
for (i=0; i<HIFN_D_RES_RSIZE; ++i)
2629
dev->sa[i] = NULL;
2630
2631
pci_set_drvdata(pdev, dev);
2632
2633
tasklet_init(&dev->tasklet, hifn_tasklet_callback, (unsigned long)dev);
2634
2635
crypto_init_queue(&dev->queue, 1);
2636
2637
err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev);
2638
if (err) {
2639
dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err);
2640
dev->irq = 0;
2641
goto err_out_free_desc;
2642
}
2643
2644
err = hifn_start_device(dev);
2645
if (err)
2646
goto err_out_free_irq;
2647
2648
err = hifn_test(dev, 1, 0);
2649
if (err)
2650
goto err_out_stop_device;
2651
2652
err = hifn_register_rng(dev);
2653
if (err)
2654
goto err_out_stop_device;
2655
2656
err = hifn_register_alg(dev);
2657
if (err)
2658
goto err_out_unregister_rng;
2659
2660
INIT_DELAYED_WORK(&dev->work, hifn_work);
2661
schedule_delayed_work(&dev->work, HZ);
2662
2663
dprintk("HIFN crypto accelerator card at %s has been "
2664
"successfully registered as %s.\n",
2665
pci_name(pdev), dev->name);
2666
2667
return 0;
2668
2669
err_out_unregister_rng:
2670
hifn_unregister_rng(dev);
2671
err_out_stop_device:
2672
hifn_reset_dma(dev, 1);
2673
hifn_stop_device(dev);
2674
err_out_free_irq:
2675
free_irq(dev->irq, dev->name);
2676
tasklet_kill(&dev->tasklet);
2677
err_out_free_desc:
2678
pci_free_consistent(pdev, sizeof(struct hifn_dma),
2679
dev->desc_virt, dev->desc_dma);
2680
2681
err_out_unmap_bars:
2682
for (i=0; i<3; ++i)
2683
if (dev->bar[i])
2684
iounmap(dev->bar[i]);
2685
2686
err_out_free_regions:
2687
pci_release_regions(pdev);
2688
2689
err_out_disable_pci_device:
2690
pci_disable_device(pdev);
2691
2692
return err;
2693
}
2694
2695
static void __devexit hifn_remove(struct pci_dev *pdev)
2696
{
2697
int i;
2698
struct hifn_device *dev;
2699
2700
dev = pci_get_drvdata(pdev);
2701
2702
if (dev) {
2703
cancel_delayed_work_sync(&dev->work);
2704
2705
hifn_unregister_rng(dev);
2706
hifn_unregister_alg(dev);
2707
hifn_reset_dma(dev, 1);
2708
hifn_stop_device(dev);
2709
2710
free_irq(dev->irq, dev->name);
2711
tasklet_kill(&dev->tasklet);
2712
2713
hifn_flush(dev);
2714
2715
pci_free_consistent(pdev, sizeof(struct hifn_dma),
2716
dev->desc_virt, dev->desc_dma);
2717
for (i=0; i<3; ++i)
2718
if (dev->bar[i])
2719
iounmap(dev->bar[i]);
2720
2721
kfree(dev);
2722
}
2723
2724
pci_release_regions(pdev);
2725
pci_disable_device(pdev);
2726
}
2727
2728
static struct pci_device_id hifn_pci_tbl[] = {
2729
{ PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7955) },
2730
{ PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7956) },
2731
{ 0 }
2732
};
2733
MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
2734
2735
static struct pci_driver hifn_pci_driver = {
2736
.name = "hifn795x",
2737
.id_table = hifn_pci_tbl,
2738
.probe = hifn_probe,
2739
.remove = __devexit_p(hifn_remove),
2740
};
2741
2742
static int __init hifn_init(void)
2743
{
2744
unsigned int freq;
2745
int err;
2746
2747
if (sizeof(dma_addr_t) > 4) {
2748
printk(KERN_INFO "HIFN supports only 32-bit addresses.\n");
2749
return -EINVAL;
2750
}
2751
2752
if (strncmp(hifn_pll_ref, "ext", 3) &&
2753
strncmp(hifn_pll_ref, "pci", 3)) {
2754
printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, "
2755
"must be pci or ext");
2756
return -EINVAL;
2757
}
2758
2759
/*
2760
* For the 7955/7956 the reference clock frequency must be in the
2761
* range of 20MHz-100MHz. For the 7954 the upper bound is 66.67MHz,
2762
* but this chip is currently not supported.
2763
*/
2764
if (hifn_pll_ref[3] != '\0') {
2765
freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
2766
if (freq < 20 || freq > 100) {
2767
printk(KERN_ERR "hifn795x: invalid hifn_pll_ref "
2768
"frequency, must be in the range "
2769
"of 20-100");
2770
return -EINVAL;
2771
}
2772
}
2773
2774
err = pci_register_driver(&hifn_pci_driver);
2775
if (err < 0) {
2776
dprintk("Failed to register PCI driver for %s device.\n",
2777
hifn_pci_driver.name);
2778
return -ENODEV;
2779
}
2780
2781
printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
2782
"has been successfully registered.\n");
2783
2784
return 0;
2785
}
2786
2787
static void __exit hifn_fini(void)
2788
{
2789
pci_unregister_driver(&hifn_pci_driver);
2790
2791
printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
2792
"has been successfully unregistered.\n");
2793
}
2794
2795
module_init(hifn_init);
2796
module_exit(hifn_fini);
2797
2798
MODULE_LICENSE("GPL");
2799
MODULE_AUTHOR("Evgeniy Polyakov <[email protected]>");
2800
MODULE_DESCRIPTION("Driver for HIFN 795x crypto accelerator chip.");
2801
2802