Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/crypto/async_tx/async_pq.c
10817 views
1
/*
2
* Copyright(c) 2007 Yuri Tikhonov <[email protected]>
3
* Copyright(c) 2009 Intel Corporation
4
*
5
* This program is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License as published by the Free
7
* Software Foundation; either version 2 of the License, or (at your option)
8
* any later version.
9
*
10
* This program is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13
* more details.
14
*
15
* You should have received a copy of the GNU General Public License along with
16
* this program; if not, write to the Free Software Foundation, Inc., 59
17
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
*
19
* The full GNU General Public License is included in this distribution in the
20
* file called COPYING.
21
*/
22
#include <linux/kernel.h>
23
#include <linux/interrupt.h>
24
#include <linux/dma-mapping.h>
25
#include <linux/raid/pq.h>
26
#include <linux/async_tx.h>
27
#include <linux/gfp.h>
28
29
/**
30
* pq_scribble_page - space to hold throwaway P or Q buffer for
31
* synchronous gen_syndrome
32
*/
33
static struct page *pq_scribble_page;
34
35
/* the struct page *blocks[] parameter passed to async_gen_syndrome()
36
* and async_syndrome_val() contains the 'P' destination address at
37
* blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
38
*
39
* note: these are macros as they are used as lvalues
40
*/
41
#define P(b, d) (b[d-2])
42
#define Q(b, d) (b[d-1])
43
44
/**
45
* do_async_gen_syndrome - asynchronously calculate P and/or Q
46
*/
47
static __async_inline struct dma_async_tx_descriptor *
48
do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
49
const unsigned char *scfs, unsigned int offset, int disks,
50
size_t len, dma_addr_t *dma_src,
51
struct async_submit_ctl *submit)
52
{
53
struct dma_async_tx_descriptor *tx = NULL;
54
struct dma_device *dma = chan->device;
55
enum dma_ctrl_flags dma_flags = 0;
56
enum async_tx_flags flags_orig = submit->flags;
57
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
58
dma_async_tx_callback cb_param_orig = submit->cb_param;
59
int src_cnt = disks - 2;
60
unsigned char coefs[src_cnt];
61
unsigned short pq_src_cnt;
62
dma_addr_t dma_dest[2];
63
int src_off = 0;
64
int idx;
65
int i;
66
67
/* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
68
if (P(blocks, disks))
69
dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
70
len, DMA_BIDIRECTIONAL);
71
else
72
dma_flags |= DMA_PREP_PQ_DISABLE_P;
73
if (Q(blocks, disks))
74
dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
75
len, DMA_BIDIRECTIONAL);
76
else
77
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
78
79
/* convert source addresses being careful to collapse 'empty'
80
* sources and update the coefficients accordingly
81
*/
82
for (i = 0, idx = 0; i < src_cnt; i++) {
83
if (blocks[i] == NULL)
84
continue;
85
dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
86
DMA_TO_DEVICE);
87
coefs[idx] = scfs[i];
88
idx++;
89
}
90
src_cnt = idx;
91
92
while (src_cnt > 0) {
93
submit->flags = flags_orig;
94
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
95
/* if we are submitting additional pqs, leave the chain open,
96
* clear the callback parameters, and leave the destination
97
* buffers mapped
98
*/
99
if (src_cnt > pq_src_cnt) {
100
submit->flags &= ~ASYNC_TX_ACK;
101
submit->flags |= ASYNC_TX_FENCE;
102
dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
103
submit->cb_fn = NULL;
104
submit->cb_param = NULL;
105
} else {
106
dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
107
submit->cb_fn = cb_fn_orig;
108
submit->cb_param = cb_param_orig;
109
if (cb_fn_orig)
110
dma_flags |= DMA_PREP_INTERRUPT;
111
}
112
if (submit->flags & ASYNC_TX_FENCE)
113
dma_flags |= DMA_PREP_FENCE;
114
115
/* Since we have clobbered the src_list we are committed
116
* to doing this asynchronously. Drivers force forward
117
* progress in case they can not provide a descriptor
118
*/
119
for (;;) {
120
tx = dma->device_prep_dma_pq(chan, dma_dest,
121
&dma_src[src_off],
122
pq_src_cnt,
123
&coefs[src_off], len,
124
dma_flags);
125
if (likely(tx))
126
break;
127
async_tx_quiesce(&submit->depend_tx);
128
dma_async_issue_pending(chan);
129
}
130
131
async_tx_submit(chan, tx, submit);
132
submit->depend_tx = tx;
133
134
/* drop completed sources */
135
src_cnt -= pq_src_cnt;
136
src_off += pq_src_cnt;
137
138
dma_flags |= DMA_PREP_CONTINUE;
139
}
140
141
return tx;
142
}
143
144
/**
145
* do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
146
*/
147
static void
148
do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
149
size_t len, struct async_submit_ctl *submit)
150
{
151
void **srcs;
152
int i;
153
154
if (submit->scribble)
155
srcs = submit->scribble;
156
else
157
srcs = (void **) blocks;
158
159
for (i = 0; i < disks; i++) {
160
if (blocks[i] == NULL) {
161
BUG_ON(i > disks - 3); /* P or Q can't be zero */
162
srcs[i] = (void*)raid6_empty_zero_page;
163
} else
164
srcs[i] = page_address(blocks[i]) + offset;
165
}
166
raid6_call.gen_syndrome(disks, len, srcs);
167
async_tx_sync_epilog(submit);
168
}
169
170
/**
171
* async_gen_syndrome - asynchronously calculate a raid6 syndrome
172
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
173
* @offset: common offset into each block (src and dest) to start transaction
174
* @disks: number of blocks (including missing P or Q, see below)
175
* @len: length of operation in bytes
176
* @submit: submission/completion modifiers
177
*
178
* General note: This routine assumes a field of GF(2^8) with a
179
* primitive polynomial of 0x11d and a generator of {02}.
180
*
181
* 'disks' note: callers can optionally omit either P or Q (but not
182
* both) from the calculation by setting blocks[disks-2] or
183
* blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
184
* PAGE_SIZE as a temporary buffer of this size is used in the
185
* synchronous path. 'disks' always accounts for both destination
186
* buffers. If any source buffers (blocks[i] where i < disks - 2) are
187
* set to NULL those buffers will be replaced with the raid6_zero_page
188
* in the synchronous path and omitted in the hardware-asynchronous
189
* path.
190
*
191
* 'blocks' note: if submit->scribble is NULL then the contents of
192
* 'blocks' may be overwritten to perform address conversions
193
* (dma_map_page() or page_address()).
194
*/
195
struct dma_async_tx_descriptor *
196
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
197
size_t len, struct async_submit_ctl *submit)
198
{
199
int src_cnt = disks - 2;
200
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
201
&P(blocks, disks), 2,
202
blocks, src_cnt, len);
203
struct dma_device *device = chan ? chan->device : NULL;
204
dma_addr_t *dma_src = NULL;
205
206
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
207
208
if (submit->scribble)
209
dma_src = submit->scribble;
210
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
211
dma_src = (dma_addr_t *) blocks;
212
213
if (dma_src && device &&
214
(src_cnt <= dma_maxpq(device, 0) ||
215
dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
216
is_dma_pq_aligned(device, offset, 0, len)) {
217
/* run the p+q asynchronously */
218
pr_debug("%s: (async) disks: %d len: %zu\n",
219
__func__, disks, len);
220
return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
221
disks, len, dma_src, submit);
222
}
223
224
/* run the pq synchronously */
225
pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
226
227
/* wait for any prerequisite operations */
228
async_tx_quiesce(&submit->depend_tx);
229
230
if (!P(blocks, disks)) {
231
P(blocks, disks) = pq_scribble_page;
232
BUG_ON(len + offset > PAGE_SIZE);
233
}
234
if (!Q(blocks, disks)) {
235
Q(blocks, disks) = pq_scribble_page;
236
BUG_ON(len + offset > PAGE_SIZE);
237
}
238
do_sync_gen_syndrome(blocks, offset, disks, len, submit);
239
240
return NULL;
241
}
242
EXPORT_SYMBOL_GPL(async_gen_syndrome);
243
244
static inline struct dma_chan *
245
pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
246
{
247
#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
248
return NULL;
249
#endif
250
return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
251
disks, len);
252
}
253
254
/**
255
* async_syndrome_val - asynchronously validate a raid6 syndrome
256
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
257
* @offset: common offset into each block (src and dest) to start transaction
258
* @disks: number of blocks (including missing P or Q, see below)
259
* @len: length of operation in bytes
260
* @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
261
* @spare: temporary result buffer for the synchronous case
262
* @submit: submission / completion modifiers
263
*
264
* The same notes from async_gen_syndrome apply to the 'blocks',
265
* and 'disks' parameters of this routine. The synchronous path
266
* requires a temporary result buffer and submit->scribble to be
267
* specified.
268
*/
269
struct dma_async_tx_descriptor *
270
async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
271
size_t len, enum sum_check_flags *pqres, struct page *spare,
272
struct async_submit_ctl *submit)
273
{
274
struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
275
struct dma_device *device = chan ? chan->device : NULL;
276
struct dma_async_tx_descriptor *tx;
277
unsigned char coefs[disks-2];
278
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
279
dma_addr_t *dma_src = NULL;
280
int src_cnt = 0;
281
282
BUG_ON(disks < 4);
283
284
if (submit->scribble)
285
dma_src = submit->scribble;
286
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
287
dma_src = (dma_addr_t *) blocks;
288
289
if (dma_src && device && disks <= dma_maxpq(device, 0) &&
290
is_dma_pq_aligned(device, offset, 0, len)) {
291
struct device *dev = device->dev;
292
dma_addr_t *pq = &dma_src[disks-2];
293
int i;
294
295
pr_debug("%s: (async) disks: %d len: %zu\n",
296
__func__, disks, len);
297
if (!P(blocks, disks))
298
dma_flags |= DMA_PREP_PQ_DISABLE_P;
299
else
300
pq[0] = dma_map_page(dev, P(blocks, disks),
301
offset, len,
302
DMA_TO_DEVICE);
303
if (!Q(blocks, disks))
304
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
305
else
306
pq[1] = dma_map_page(dev, Q(blocks, disks),
307
offset, len,
308
DMA_TO_DEVICE);
309
310
if (submit->flags & ASYNC_TX_FENCE)
311
dma_flags |= DMA_PREP_FENCE;
312
for (i = 0; i < disks-2; i++)
313
if (likely(blocks[i])) {
314
dma_src[src_cnt] = dma_map_page(dev, blocks[i],
315
offset, len,
316
DMA_TO_DEVICE);
317
coefs[src_cnt] = raid6_gfexp[i];
318
src_cnt++;
319
}
320
321
for (;;) {
322
tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
323
src_cnt,
324
coefs,
325
len, pqres,
326
dma_flags);
327
if (likely(tx))
328
break;
329
async_tx_quiesce(&submit->depend_tx);
330
dma_async_issue_pending(chan);
331
}
332
async_tx_submit(chan, tx, submit);
333
334
return tx;
335
} else {
336
struct page *p_src = P(blocks, disks);
337
struct page *q_src = Q(blocks, disks);
338
enum async_tx_flags flags_orig = submit->flags;
339
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
340
void *scribble = submit->scribble;
341
void *cb_param_orig = submit->cb_param;
342
void *p, *q, *s;
343
344
pr_debug("%s: (sync) disks: %d len: %zu\n",
345
__func__, disks, len);
346
347
/* caller must provide a temporary result buffer and
348
* allow the input parameters to be preserved
349
*/
350
BUG_ON(!spare || !scribble);
351
352
/* wait for any prerequisite operations */
353
async_tx_quiesce(&submit->depend_tx);
354
355
/* recompute p and/or q into the temporary buffer and then
356
* check to see the result matches the current value
357
*/
358
tx = NULL;
359
*pqres = 0;
360
if (p_src) {
361
init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
362
NULL, NULL, scribble);
363
tx = async_xor(spare, blocks, offset, disks-2, len, submit);
364
async_tx_quiesce(&tx);
365
p = page_address(p_src) + offset;
366
s = page_address(spare) + offset;
367
*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
368
}
369
370
if (q_src) {
371
P(blocks, disks) = NULL;
372
Q(blocks, disks) = spare;
373
init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
374
tx = async_gen_syndrome(blocks, offset, disks, len, submit);
375
async_tx_quiesce(&tx);
376
q = page_address(q_src) + offset;
377
s = page_address(spare) + offset;
378
*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
379
}
380
381
/* restore P, Q and submit */
382
P(blocks, disks) = p_src;
383
Q(blocks, disks) = q_src;
384
385
submit->cb_fn = cb_fn_orig;
386
submit->cb_param = cb_param_orig;
387
submit->flags = flags_orig;
388
async_tx_sync_epilog(submit);
389
390
return NULL;
391
}
392
}
393
EXPORT_SYMBOL_GPL(async_syndrome_val);
394
395
static int __init async_pq_init(void)
396
{
397
pq_scribble_page = alloc_page(GFP_KERNEL);
398
399
if (pq_scribble_page)
400
return 0;
401
402
pr_err("%s: failed to allocate required spare page\n", __func__);
403
404
return -ENOMEM;
405
}
406
407
static void __exit async_pq_exit(void)
408
{
409
put_page(pq_scribble_page);
410
}
411
412
module_init(async_pq_init);
413
module_exit(async_pq_exit);
414
415
MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
416
MODULE_LICENSE("GPL");
417
418