Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/infiniband/hw/qib/qib_sdma.c
15112 views
1
/*
2
* Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3
*
4
* This software is available to you under a choice of one of two
5
* licenses. You may choose to be licensed under the terms of the GNU
6
* General Public License (GPL) Version 2, available from the file
7
* COPYING in the main directory of this source tree, or the
8
* OpenIB.org BSD license below:
9
*
10
* Redistribution and use in source and binary forms, with or
11
* without modification, are permitted provided that the following
12
* conditions are met:
13
*
14
* - Redistributions of source code must retain the above
15
* copyright notice, this list of conditions and the following
16
* disclaimer.
17
*
18
* - Redistributions in binary form must reproduce the above
19
* copyright notice, this list of conditions and the following
20
* disclaimer in the documentation and/or other materials
21
* provided with the distribution.
22
*
23
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
* SOFTWARE.
31
*/
32
33
#include <linux/spinlock.h>
34
#include <linux/netdevice.h>
35
36
#include "qib.h"
37
#include "qib_common.h"
38
39
/* default pio off, sdma on */
40
static ushort sdma_descq_cnt = 256;
41
module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
42
MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
43
44
/*
45
* Bits defined in the send DMA descriptor.
46
*/
47
#define SDMA_DESC_LAST (1ULL << 11)
48
#define SDMA_DESC_FIRST (1ULL << 12)
49
#define SDMA_DESC_DMA_HEAD (1ULL << 13)
50
#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
51
#define SDMA_DESC_INTR (1ULL << 15)
52
#define SDMA_DESC_COUNT_LSB 16
53
#define SDMA_DESC_GEN_LSB 30
54
55
char *qib_sdma_state_names[] = {
56
[qib_sdma_state_s00_hw_down] = "s00_HwDown",
57
[qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
58
[qib_sdma_state_s20_idle] = "s20_Idle",
59
[qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
60
[qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
61
[qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
62
[qib_sdma_state_s99_running] = "s99_Running",
63
};
64
65
char *qib_sdma_event_names[] = {
66
[qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",
67
[qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",
68
[qib_sdma_event_e20_hw_started] = "e20_HwStarted",
69
[qib_sdma_event_e30_go_running] = "e30_GoRunning",
70
[qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
71
[qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
72
[qib_sdma_event_e60_hw_halted] = "e60_HwHalted",
73
[qib_sdma_event_e70_go_idle] = "e70_GoIdle",
74
[qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
75
[qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
76
[qib_sdma_event_e90_timer_tick] = "e90_TimerTick",
77
};
78
79
/* declare all statics here rather than keep sorting */
80
static int alloc_sdma(struct qib_pportdata *);
81
static void sdma_complete(struct kref *);
82
static void sdma_finalput(struct qib_sdma_state *);
83
static void sdma_get(struct qib_sdma_state *);
84
static void sdma_put(struct qib_sdma_state *);
85
static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
86
static void sdma_start_sw_clean_up(struct qib_pportdata *);
87
static void sdma_sw_clean_up_task(unsigned long);
88
static void unmap_desc(struct qib_pportdata *, unsigned);
89
90
static void sdma_get(struct qib_sdma_state *ss)
91
{
92
kref_get(&ss->kref);
93
}
94
95
static void sdma_complete(struct kref *kref)
96
{
97
struct qib_sdma_state *ss =
98
container_of(kref, struct qib_sdma_state, kref);
99
100
complete(&ss->comp);
101
}
102
103
static void sdma_put(struct qib_sdma_state *ss)
104
{
105
kref_put(&ss->kref, sdma_complete);
106
}
107
108
static void sdma_finalput(struct qib_sdma_state *ss)
109
{
110
sdma_put(ss);
111
wait_for_completion(&ss->comp);
112
}
113
114
/*
115
* Complete all the sdma requests on the active list, in the correct
116
* order, and with appropriate processing. Called when cleaning up
117
* after sdma shutdown, and when new sdma requests are submitted for
118
* a link that is down. This matches what is done for requests
119
* that complete normally, it's just the full list.
120
*
121
* Must be called with sdma_lock held
122
*/
123
static void clear_sdma_activelist(struct qib_pportdata *ppd)
124
{
125
struct qib_sdma_txreq *txp, *txp_next;
126
127
list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
128
list_del_init(&txp->list);
129
if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
130
unsigned idx;
131
132
idx = txp->start_idx;
133
while (idx != txp->next_descq_idx) {
134
unmap_desc(ppd, idx);
135
if (++idx == ppd->sdma_descq_cnt)
136
idx = 0;
137
}
138
}
139
if (txp->callback)
140
(*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
141
}
142
}
143
144
static void sdma_sw_clean_up_task(unsigned long opaque)
145
{
146
struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
147
unsigned long flags;
148
149
spin_lock_irqsave(&ppd->sdma_lock, flags);
150
151
/*
152
* At this point, the following should always be true:
153
* - We are halted, so no more descriptors are getting retired.
154
* - We are not running, so no one is submitting new work.
155
* - Only we can send the e40_sw_cleaned, so we can't start
156
* running again until we say so. So, the active list and
157
* descq are ours to play with.
158
*/
159
160
/* Process all retired requests. */
161
qib_sdma_make_progress(ppd);
162
163
clear_sdma_activelist(ppd);
164
165
/*
166
* Resync count of added and removed. It is VERY important that
167
* sdma_descq_removed NEVER decrement - user_sdma depends on it.
168
*/
169
ppd->sdma_descq_removed = ppd->sdma_descq_added;
170
171
/*
172
* Reset our notion of head and tail.
173
* Note that the HW registers will be reset when switching states
174
* due to calling __qib_sdma_process_event() below.
175
*/
176
ppd->sdma_descq_tail = 0;
177
ppd->sdma_descq_head = 0;
178
ppd->sdma_head_dma[0] = 0;
179
ppd->sdma_generation = 0;
180
181
__qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
182
183
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
184
}
185
186
/*
187
* This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
188
* as a result of send buffer errors or send DMA descriptor errors.
189
* We want to disarm the buffers in these cases.
190
*/
191
static void sdma_hw_start_up(struct qib_pportdata *ppd)
192
{
193
struct qib_sdma_state *ss = &ppd->sdma_state;
194
unsigned bufno;
195
196
for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
197
ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
198
199
ppd->dd->f_sdma_hw_start_up(ppd);
200
}
201
202
static void sdma_sw_tear_down(struct qib_pportdata *ppd)
203
{
204
struct qib_sdma_state *ss = &ppd->sdma_state;
205
206
/* Releasing this reference means the state machine has stopped. */
207
sdma_put(ss);
208
}
209
210
static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
211
{
212
tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
213
}
214
215
static void sdma_set_state(struct qib_pportdata *ppd,
216
enum qib_sdma_states next_state)
217
{
218
struct qib_sdma_state *ss = &ppd->sdma_state;
219
struct sdma_set_state_action *action = ss->set_state_action;
220
unsigned op = 0;
221
222
/* debugging bookkeeping */
223
ss->previous_state = ss->current_state;
224
ss->previous_op = ss->current_op;
225
226
ss->current_state = next_state;
227
228
if (action[next_state].op_enable)
229
op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
230
231
if (action[next_state].op_intenable)
232
op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
233
234
if (action[next_state].op_halt)
235
op |= QIB_SDMA_SENDCTRL_OP_HALT;
236
237
if (action[next_state].op_drain)
238
op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
239
240
if (action[next_state].go_s99_running_tofalse)
241
ss->go_s99_running = 0;
242
243
if (action[next_state].go_s99_running_totrue)
244
ss->go_s99_running = 1;
245
246
ss->current_op = op;
247
248
ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
249
}
250
251
static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
252
{
253
__le64 *descqp = &ppd->sdma_descq[head].qw[0];
254
u64 desc[2];
255
dma_addr_t addr;
256
size_t len;
257
258
desc[0] = le64_to_cpu(descqp[0]);
259
desc[1] = le64_to_cpu(descqp[1]);
260
261
addr = (desc[1] << 32) | (desc[0] >> 32);
262
len = (desc[0] >> 14) & (0x7ffULL << 2);
263
dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
264
}
265
266
static int alloc_sdma(struct qib_pportdata *ppd)
267
{
268
ppd->sdma_descq_cnt = sdma_descq_cnt;
269
if (!ppd->sdma_descq_cnt)
270
ppd->sdma_descq_cnt = 256;
271
272
/* Allocate memory for SendDMA descriptor FIFO */
273
ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
274
ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
275
GFP_KERNEL);
276
277
if (!ppd->sdma_descq) {
278
qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
279
"FIFO memory\n");
280
goto bail;
281
}
282
283
/* Allocate memory for DMA of head register to memory */
284
ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
285
PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
286
if (!ppd->sdma_head_dma) {
287
qib_dev_err(ppd->dd, "failed to allocate SendDMA "
288
"head memory\n");
289
goto cleanup_descq;
290
}
291
ppd->sdma_head_dma[0] = 0;
292
return 0;
293
294
cleanup_descq:
295
dma_free_coherent(&ppd->dd->pcidev->dev,
296
ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
297
ppd->sdma_descq_phys);
298
ppd->sdma_descq = NULL;
299
ppd->sdma_descq_phys = 0;
300
bail:
301
ppd->sdma_descq_cnt = 0;
302
return -ENOMEM;
303
}
304
305
static void free_sdma(struct qib_pportdata *ppd)
306
{
307
struct qib_devdata *dd = ppd->dd;
308
309
if (ppd->sdma_head_dma) {
310
dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
311
(void *)ppd->sdma_head_dma,
312
ppd->sdma_head_phys);
313
ppd->sdma_head_dma = NULL;
314
ppd->sdma_head_phys = 0;
315
}
316
317
if (ppd->sdma_descq) {
318
dma_free_coherent(&dd->pcidev->dev,
319
ppd->sdma_descq_cnt * sizeof(u64[2]),
320
ppd->sdma_descq, ppd->sdma_descq_phys);
321
ppd->sdma_descq = NULL;
322
ppd->sdma_descq_phys = 0;
323
}
324
}
325
326
static inline void make_sdma_desc(struct qib_pportdata *ppd,
327
u64 *sdmadesc, u64 addr, u64 dwlen,
328
u64 dwoffset)
329
{
330
331
WARN_ON(addr & 3);
332
/* SDmaPhyAddr[47:32] */
333
sdmadesc[1] = addr >> 32;
334
/* SDmaPhyAddr[31:0] */
335
sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
336
/* SDmaGeneration[1:0] */
337
sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
338
SDMA_DESC_GEN_LSB;
339
/* SDmaDwordCount[10:0] */
340
sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
341
/* SDmaBufOffset[12:2] */
342
sdmadesc[0] |= dwoffset & 0x7ffULL;
343
}
344
345
/* sdma_lock must be held */
346
int qib_sdma_make_progress(struct qib_pportdata *ppd)
347
{
348
struct list_head *lp = NULL;
349
struct qib_sdma_txreq *txp = NULL;
350
struct qib_devdata *dd = ppd->dd;
351
int progress = 0;
352
u16 hwhead;
353
u16 idx = 0;
354
355
hwhead = dd->f_sdma_gethead(ppd);
356
357
/* The reason for some of the complexity of this code is that
358
* not all descriptors have corresponding txps. So, we have to
359
* be able to skip over descs until we wander into the range of
360
* the next txp on the list.
361
*/
362
363
if (!list_empty(&ppd->sdma_activelist)) {
364
lp = ppd->sdma_activelist.next;
365
txp = list_entry(lp, struct qib_sdma_txreq, list);
366
idx = txp->start_idx;
367
}
368
369
while (ppd->sdma_descq_head != hwhead) {
370
/* if desc is part of this txp, unmap if needed */
371
if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
372
(idx == ppd->sdma_descq_head)) {
373
unmap_desc(ppd, ppd->sdma_descq_head);
374
if (++idx == ppd->sdma_descq_cnt)
375
idx = 0;
376
}
377
378
/* increment dequed desc count */
379
ppd->sdma_descq_removed++;
380
381
/* advance head, wrap if needed */
382
if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
383
ppd->sdma_descq_head = 0;
384
385
/* if now past this txp's descs, do the callback */
386
if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
387
/* remove from active list */
388
list_del_init(&txp->list);
389
if (txp->callback)
390
(*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
391
/* see if there is another txp */
392
if (list_empty(&ppd->sdma_activelist))
393
txp = NULL;
394
else {
395
lp = ppd->sdma_activelist.next;
396
txp = list_entry(lp, struct qib_sdma_txreq,
397
list);
398
idx = txp->start_idx;
399
}
400
}
401
progress = 1;
402
}
403
if (progress)
404
qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
405
return progress;
406
}
407
408
/*
409
* This is called from interrupt context.
410
*/
411
void qib_sdma_intr(struct qib_pportdata *ppd)
412
{
413
unsigned long flags;
414
415
spin_lock_irqsave(&ppd->sdma_lock, flags);
416
417
__qib_sdma_intr(ppd);
418
419
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
420
}
421
422
void __qib_sdma_intr(struct qib_pportdata *ppd)
423
{
424
if (__qib_sdma_running(ppd))
425
qib_sdma_make_progress(ppd);
426
}
427
428
int qib_setup_sdma(struct qib_pportdata *ppd)
429
{
430
struct qib_devdata *dd = ppd->dd;
431
unsigned long flags;
432
int ret = 0;
433
434
ret = alloc_sdma(ppd);
435
if (ret)
436
goto bail;
437
438
/* set consistent sdma state */
439
ppd->dd->f_sdma_init_early(ppd);
440
spin_lock_irqsave(&ppd->sdma_lock, flags);
441
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
442
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
443
444
/* set up reference counting */
445
kref_init(&ppd->sdma_state.kref);
446
init_completion(&ppd->sdma_state.comp);
447
448
ppd->sdma_generation = 0;
449
ppd->sdma_descq_head = 0;
450
ppd->sdma_descq_removed = 0;
451
ppd->sdma_descq_added = 0;
452
453
INIT_LIST_HEAD(&ppd->sdma_activelist);
454
455
tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
456
(unsigned long)ppd);
457
458
ret = dd->f_init_sdma_regs(ppd);
459
if (ret)
460
goto bail_alloc;
461
462
qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
463
464
return 0;
465
466
bail_alloc:
467
qib_teardown_sdma(ppd);
468
bail:
469
return ret;
470
}
471
472
void qib_teardown_sdma(struct qib_pportdata *ppd)
473
{
474
qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
475
476
/*
477
* This waits for the state machine to exit so it is not
478
* necessary to kill the sdma_sw_clean_up_task to make sure
479
* it is not running.
480
*/
481
sdma_finalput(&ppd->sdma_state);
482
483
free_sdma(ppd);
484
}
485
486
int qib_sdma_running(struct qib_pportdata *ppd)
487
{
488
unsigned long flags;
489
int ret;
490
491
spin_lock_irqsave(&ppd->sdma_lock, flags);
492
ret = __qib_sdma_running(ppd);
493
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
494
495
return ret;
496
}
497
498
/*
499
* Complete a request when sdma not running; likely only request
500
* but to simplify the code, always queue it, then process the full
501
* activelist. We process the entire list to ensure that this particular
502
* request does get it's callback, but in the correct order.
503
* Must be called with sdma_lock held
504
*/
505
static void complete_sdma_err_req(struct qib_pportdata *ppd,
506
struct qib_verbs_txreq *tx)
507
{
508
atomic_inc(&tx->qp->s_dma_busy);
509
/* no sdma descriptors, so no unmap_desc */
510
tx->txreq.start_idx = 0;
511
tx->txreq.next_descq_idx = 0;
512
list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
513
clear_sdma_activelist(ppd);
514
}
515
516
/*
517
* This function queues one IB packet onto the send DMA queue per call.
518
* The caller is responsible for checking:
519
* 1) The number of send DMA descriptor entries is less than the size of
520
* the descriptor queue.
521
* 2) The IB SGE addresses and lengths are 32-bit aligned
522
* (except possibly the last SGE's length)
523
* 3) The SGE addresses are suitable for passing to dma_map_single().
524
*/
525
int qib_sdma_verbs_send(struct qib_pportdata *ppd,
526
struct qib_sge_state *ss, u32 dwords,
527
struct qib_verbs_txreq *tx)
528
{
529
unsigned long flags;
530
struct qib_sge *sge;
531
struct qib_qp *qp;
532
int ret = 0;
533
u16 tail;
534
__le64 *descqp;
535
u64 sdmadesc[2];
536
u32 dwoffset;
537
dma_addr_t addr;
538
539
spin_lock_irqsave(&ppd->sdma_lock, flags);
540
541
retry:
542
if (unlikely(!__qib_sdma_running(ppd))) {
543
complete_sdma_err_req(ppd, tx);
544
goto unlock;
545
}
546
547
if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
548
if (qib_sdma_make_progress(ppd))
549
goto retry;
550
if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
551
ppd->dd->f_sdma_set_desc_cnt(ppd,
552
ppd->sdma_descq_cnt / 2);
553
goto busy;
554
}
555
556
dwoffset = tx->hdr_dwords;
557
make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
558
559
sdmadesc[0] |= SDMA_DESC_FIRST;
560
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
561
sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
562
563
/* write to the descq */
564
tail = ppd->sdma_descq_tail;
565
descqp = &ppd->sdma_descq[tail].qw[0];
566
*descqp++ = cpu_to_le64(sdmadesc[0]);
567
*descqp++ = cpu_to_le64(sdmadesc[1]);
568
569
/* increment the tail */
570
if (++tail == ppd->sdma_descq_cnt) {
571
tail = 0;
572
descqp = &ppd->sdma_descq[0].qw[0];
573
++ppd->sdma_generation;
574
}
575
576
tx->txreq.start_idx = tail;
577
578
sge = &ss->sge;
579
while (dwords) {
580
u32 dw;
581
u32 len;
582
583
len = dwords << 2;
584
if (len > sge->length)
585
len = sge->length;
586
if (len > sge->sge_length)
587
len = sge->sge_length;
588
BUG_ON(len == 0);
589
dw = (len + 3) >> 2;
590
addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
591
dw << 2, DMA_TO_DEVICE);
592
if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
593
goto unmap;
594
sdmadesc[0] = 0;
595
make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
596
/* SDmaUseLargeBuf has to be set in every descriptor */
597
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
598
sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
599
/* write to the descq */
600
*descqp++ = cpu_to_le64(sdmadesc[0]);
601
*descqp++ = cpu_to_le64(sdmadesc[1]);
602
603
/* increment the tail */
604
if (++tail == ppd->sdma_descq_cnt) {
605
tail = 0;
606
descqp = &ppd->sdma_descq[0].qw[0];
607
++ppd->sdma_generation;
608
}
609
sge->vaddr += len;
610
sge->length -= len;
611
sge->sge_length -= len;
612
if (sge->sge_length == 0) {
613
if (--ss->num_sge)
614
*sge = *ss->sg_list++;
615
} else if (sge->length == 0 && sge->mr->lkey) {
616
if (++sge->n >= QIB_SEGSZ) {
617
if (++sge->m >= sge->mr->mapsz)
618
break;
619
sge->n = 0;
620
}
621
sge->vaddr =
622
sge->mr->map[sge->m]->segs[sge->n].vaddr;
623
sge->length =
624
sge->mr->map[sge->m]->segs[sge->n].length;
625
}
626
627
dwoffset += dw;
628
dwords -= dw;
629
}
630
631
if (!tail)
632
descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
633
descqp -= 2;
634
descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
635
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
636
descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
637
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
638
descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
639
640
atomic_inc(&tx->qp->s_dma_busy);
641
tx->txreq.next_descq_idx = tail;
642
ppd->dd->f_sdma_update_tail(ppd, tail);
643
ppd->sdma_descq_added += tx->txreq.sg_count;
644
list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
645
goto unlock;
646
647
unmap:
648
for (;;) {
649
if (!tail)
650
tail = ppd->sdma_descq_cnt - 1;
651
else
652
tail--;
653
if (tail == ppd->sdma_descq_tail)
654
break;
655
unmap_desc(ppd, tail);
656
}
657
qp = tx->qp;
658
qib_put_txreq(tx);
659
spin_lock(&qp->r_lock);
660
spin_lock(&qp->s_lock);
661
if (qp->ibqp.qp_type == IB_QPT_RC) {
662
/* XXX what about error sending RDMA read responses? */
663
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
664
qib_error_qp(qp, IB_WC_GENERAL_ERR);
665
} else if (qp->s_wqe)
666
qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
667
spin_unlock(&qp->s_lock);
668
spin_unlock(&qp->r_lock);
669
/* return zero to process the next send work request */
670
goto unlock;
671
672
busy:
673
qp = tx->qp;
674
spin_lock(&qp->s_lock);
675
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
676
struct qib_ibdev *dev;
677
678
/*
679
* If we couldn't queue the DMA request, save the info
680
* and try again later rather than destroying the
681
* buffer and undoing the side effects of the copy.
682
*/
683
tx->ss = ss;
684
tx->dwords = dwords;
685
qp->s_tx = tx;
686
dev = &ppd->dd->verbs_dev;
687
spin_lock(&dev->pending_lock);
688
if (list_empty(&qp->iowait)) {
689
struct qib_ibport *ibp;
690
691
ibp = &ppd->ibport_data;
692
ibp->n_dmawait++;
693
qp->s_flags |= QIB_S_WAIT_DMA_DESC;
694
list_add_tail(&qp->iowait, &dev->dmawait);
695
}
696
spin_unlock(&dev->pending_lock);
697
qp->s_flags &= ~QIB_S_BUSY;
698
spin_unlock(&qp->s_lock);
699
ret = -EBUSY;
700
} else {
701
spin_unlock(&qp->s_lock);
702
qib_put_txreq(tx);
703
}
704
unlock:
705
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
706
return ret;
707
}
708
709
void qib_sdma_process_event(struct qib_pportdata *ppd,
710
enum qib_sdma_events event)
711
{
712
unsigned long flags;
713
714
spin_lock_irqsave(&ppd->sdma_lock, flags);
715
716
__qib_sdma_process_event(ppd, event);
717
718
if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
719
qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
720
721
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
722
}
723
724
void __qib_sdma_process_event(struct qib_pportdata *ppd,
725
enum qib_sdma_events event)
726
{
727
struct qib_sdma_state *ss = &ppd->sdma_state;
728
729
switch (ss->current_state) {
730
case qib_sdma_state_s00_hw_down:
731
switch (event) {
732
case qib_sdma_event_e00_go_hw_down:
733
break;
734
case qib_sdma_event_e30_go_running:
735
/*
736
* If down, but running requested (usually result
737
* of link up, then we need to start up.
738
* This can happen when hw down is requested while
739
* bringing the link up with traffic active on
740
* 7220, e.g. */
741
ss->go_s99_running = 1;
742
/* fall through and start dma engine */
743
case qib_sdma_event_e10_go_hw_start:
744
/* This reference means the state machine is started */
745
sdma_get(&ppd->sdma_state);
746
sdma_set_state(ppd,
747
qib_sdma_state_s10_hw_start_up_wait);
748
break;
749
case qib_sdma_event_e20_hw_started:
750
break;
751
case qib_sdma_event_e40_sw_cleaned:
752
sdma_sw_tear_down(ppd);
753
break;
754
case qib_sdma_event_e50_hw_cleaned:
755
break;
756
case qib_sdma_event_e60_hw_halted:
757
break;
758
case qib_sdma_event_e70_go_idle:
759
break;
760
case qib_sdma_event_e7220_err_halted:
761
break;
762
case qib_sdma_event_e7322_err_halted:
763
break;
764
case qib_sdma_event_e90_timer_tick:
765
break;
766
}
767
break;
768
769
case qib_sdma_state_s10_hw_start_up_wait:
770
switch (event) {
771
case qib_sdma_event_e00_go_hw_down:
772
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
773
sdma_sw_tear_down(ppd);
774
break;
775
case qib_sdma_event_e10_go_hw_start:
776
break;
777
case qib_sdma_event_e20_hw_started:
778
sdma_set_state(ppd, ss->go_s99_running ?
779
qib_sdma_state_s99_running :
780
qib_sdma_state_s20_idle);
781
break;
782
case qib_sdma_event_e30_go_running:
783
ss->go_s99_running = 1;
784
break;
785
case qib_sdma_event_e40_sw_cleaned:
786
break;
787
case qib_sdma_event_e50_hw_cleaned:
788
break;
789
case qib_sdma_event_e60_hw_halted:
790
break;
791
case qib_sdma_event_e70_go_idle:
792
ss->go_s99_running = 0;
793
break;
794
case qib_sdma_event_e7220_err_halted:
795
break;
796
case qib_sdma_event_e7322_err_halted:
797
break;
798
case qib_sdma_event_e90_timer_tick:
799
break;
800
}
801
break;
802
803
case qib_sdma_state_s20_idle:
804
switch (event) {
805
case qib_sdma_event_e00_go_hw_down:
806
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
807
sdma_sw_tear_down(ppd);
808
break;
809
case qib_sdma_event_e10_go_hw_start:
810
break;
811
case qib_sdma_event_e20_hw_started:
812
break;
813
case qib_sdma_event_e30_go_running:
814
sdma_set_state(ppd, qib_sdma_state_s99_running);
815
ss->go_s99_running = 1;
816
break;
817
case qib_sdma_event_e40_sw_cleaned:
818
break;
819
case qib_sdma_event_e50_hw_cleaned:
820
break;
821
case qib_sdma_event_e60_hw_halted:
822
break;
823
case qib_sdma_event_e70_go_idle:
824
break;
825
case qib_sdma_event_e7220_err_halted:
826
break;
827
case qib_sdma_event_e7322_err_halted:
828
break;
829
case qib_sdma_event_e90_timer_tick:
830
break;
831
}
832
break;
833
834
case qib_sdma_state_s30_sw_clean_up_wait:
835
switch (event) {
836
case qib_sdma_event_e00_go_hw_down:
837
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
838
break;
839
case qib_sdma_event_e10_go_hw_start:
840
break;
841
case qib_sdma_event_e20_hw_started:
842
break;
843
case qib_sdma_event_e30_go_running:
844
ss->go_s99_running = 1;
845
break;
846
case qib_sdma_event_e40_sw_cleaned:
847
sdma_set_state(ppd,
848
qib_sdma_state_s10_hw_start_up_wait);
849
sdma_hw_start_up(ppd);
850
break;
851
case qib_sdma_event_e50_hw_cleaned:
852
break;
853
case qib_sdma_event_e60_hw_halted:
854
break;
855
case qib_sdma_event_e70_go_idle:
856
ss->go_s99_running = 0;
857
break;
858
case qib_sdma_event_e7220_err_halted:
859
break;
860
case qib_sdma_event_e7322_err_halted:
861
break;
862
case qib_sdma_event_e90_timer_tick:
863
break;
864
}
865
break;
866
867
case qib_sdma_state_s40_hw_clean_up_wait:
868
switch (event) {
869
case qib_sdma_event_e00_go_hw_down:
870
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
871
sdma_start_sw_clean_up(ppd);
872
break;
873
case qib_sdma_event_e10_go_hw_start:
874
break;
875
case qib_sdma_event_e20_hw_started:
876
break;
877
case qib_sdma_event_e30_go_running:
878
ss->go_s99_running = 1;
879
break;
880
case qib_sdma_event_e40_sw_cleaned:
881
break;
882
case qib_sdma_event_e50_hw_cleaned:
883
sdma_set_state(ppd,
884
qib_sdma_state_s30_sw_clean_up_wait);
885
sdma_start_sw_clean_up(ppd);
886
break;
887
case qib_sdma_event_e60_hw_halted:
888
break;
889
case qib_sdma_event_e70_go_idle:
890
ss->go_s99_running = 0;
891
break;
892
case qib_sdma_event_e7220_err_halted:
893
break;
894
case qib_sdma_event_e7322_err_halted:
895
break;
896
case qib_sdma_event_e90_timer_tick:
897
break;
898
}
899
break;
900
901
case qib_sdma_state_s50_hw_halt_wait:
902
switch (event) {
903
case qib_sdma_event_e00_go_hw_down:
904
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
905
sdma_start_sw_clean_up(ppd);
906
break;
907
case qib_sdma_event_e10_go_hw_start:
908
break;
909
case qib_sdma_event_e20_hw_started:
910
break;
911
case qib_sdma_event_e30_go_running:
912
ss->go_s99_running = 1;
913
break;
914
case qib_sdma_event_e40_sw_cleaned:
915
break;
916
case qib_sdma_event_e50_hw_cleaned:
917
break;
918
case qib_sdma_event_e60_hw_halted:
919
sdma_set_state(ppd,
920
qib_sdma_state_s40_hw_clean_up_wait);
921
ppd->dd->f_sdma_hw_clean_up(ppd);
922
break;
923
case qib_sdma_event_e70_go_idle:
924
ss->go_s99_running = 0;
925
break;
926
case qib_sdma_event_e7220_err_halted:
927
break;
928
case qib_sdma_event_e7322_err_halted:
929
break;
930
case qib_sdma_event_e90_timer_tick:
931
break;
932
}
933
break;
934
935
case qib_sdma_state_s99_running:
936
switch (event) {
937
case qib_sdma_event_e00_go_hw_down:
938
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
939
sdma_start_sw_clean_up(ppd);
940
break;
941
case qib_sdma_event_e10_go_hw_start:
942
break;
943
case qib_sdma_event_e20_hw_started:
944
break;
945
case qib_sdma_event_e30_go_running:
946
break;
947
case qib_sdma_event_e40_sw_cleaned:
948
break;
949
case qib_sdma_event_e50_hw_cleaned:
950
break;
951
case qib_sdma_event_e60_hw_halted:
952
sdma_set_state(ppd,
953
qib_sdma_state_s30_sw_clean_up_wait);
954
sdma_start_sw_clean_up(ppd);
955
break;
956
case qib_sdma_event_e70_go_idle:
957
sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
958
ss->go_s99_running = 0;
959
break;
960
case qib_sdma_event_e7220_err_halted:
961
sdma_set_state(ppd,
962
qib_sdma_state_s30_sw_clean_up_wait);
963
sdma_start_sw_clean_up(ppd);
964
break;
965
case qib_sdma_event_e7322_err_halted:
966
sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
967
break;
968
case qib_sdma_event_e90_timer_tick:
969
break;
970
}
971
break;
972
}
973
974
ss->last_event = event;
975
}
976
977