Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/infiniband/hw/amso1100/c2.c
15112 views
1
/*
2
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4
*
5
* This software is available to you under a choice of one of two
6
* licenses. You may choose to be licensed under the terms of the GNU
7
* General Public License (GPL) Version 2, available from the file
8
* COPYING in the main directory of this source tree, or the
9
* OpenIB.org BSD license below:
10
*
11
* Redistribution and use in source and binary forms, with or
12
* without modification, are permitted provided that the following
13
* conditions are met:
14
*
15
* - Redistributions of source code must retain the above
16
* copyright notice, this list of conditions and the following
17
* disclaimer.
18
*
19
* - Redistributions in binary form must reproduce the above
20
* copyright notice, this list of conditions and the following
21
* disclaimer in the documentation and/or other materials
22
* provided with the distribution.
23
*
24
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31
* SOFTWARE.
32
*/
33
#include <linux/module.h>
34
#include <linux/moduleparam.h>
35
#include <linux/pci.h>
36
#include <linux/netdevice.h>
37
#include <linux/etherdevice.h>
38
#include <linux/inetdevice.h>
39
#include <linux/delay.h>
40
#include <linux/ethtool.h>
41
#include <linux/mii.h>
42
#include <linux/if_vlan.h>
43
#include <linux/crc32.h>
44
#include <linux/in.h>
45
#include <linux/ip.h>
46
#include <linux/tcp.h>
47
#include <linux/init.h>
48
#include <linux/dma-mapping.h>
49
#include <linux/slab.h>
50
#include <linux/prefetch.h>
51
52
#include <asm/io.h>
53
#include <asm/irq.h>
54
#include <asm/byteorder.h>
55
56
#include <rdma/ib_smi.h>
57
#include "c2.h"
58
#include "c2_provider.h"
59
60
MODULE_AUTHOR("Tom Tucker <[email protected]>");
61
MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
62
MODULE_LICENSE("Dual BSD/GPL");
63
MODULE_VERSION(DRV_VERSION);
64
65
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
66
| NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
67
68
static int debug = -1; /* defaults above */
69
module_param(debug, int, 0);
70
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
71
72
static int c2_up(struct net_device *netdev);
73
static int c2_down(struct net_device *netdev);
74
static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
75
static void c2_tx_interrupt(struct net_device *netdev);
76
static void c2_rx_interrupt(struct net_device *netdev);
77
static irqreturn_t c2_interrupt(int irq, void *dev_id);
78
static void c2_tx_timeout(struct net_device *netdev);
79
static int c2_change_mtu(struct net_device *netdev, int new_mtu);
80
static void c2_reset(struct c2_port *c2_port);
81
82
static struct pci_device_id c2_pci_table[] = {
83
{ PCI_DEVICE(0x18b8, 0xb001) },
84
{ 0 }
85
};
86
87
MODULE_DEVICE_TABLE(pci, c2_pci_table);
88
89
static void c2_print_macaddr(struct net_device *netdev)
90
{
91
pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
92
}
93
94
static void c2_set_rxbufsize(struct c2_port *c2_port)
95
{
96
struct net_device *netdev = c2_port->netdev;
97
98
if (netdev->mtu > RX_BUF_SIZE)
99
c2_port->rx_buf_size =
100
netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
101
NET_IP_ALIGN;
102
else
103
c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
104
}
105
106
/*
107
* Allocate TX ring elements and chain them together.
108
* One-to-one association of adapter descriptors with ring elements.
109
*/
110
static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
111
dma_addr_t base, void __iomem * mmio_txp_ring)
112
{
113
struct c2_tx_desc *tx_desc;
114
struct c2_txp_desc __iomem *txp_desc;
115
struct c2_element *elem;
116
int i;
117
118
tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL);
119
if (!tx_ring->start)
120
return -ENOMEM;
121
122
elem = tx_ring->start;
123
tx_desc = vaddr;
124
txp_desc = mmio_txp_ring;
125
for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
126
tx_desc->len = 0;
127
tx_desc->status = 0;
128
129
/* Set TXP_HTXD_UNINIT */
130
__raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
131
(void __iomem *) txp_desc + C2_TXP_ADDR);
132
__raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
133
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
134
(void __iomem *) txp_desc + C2_TXP_FLAGS);
135
136
elem->skb = NULL;
137
elem->ht_desc = tx_desc;
138
elem->hw_desc = txp_desc;
139
140
if (i == tx_ring->count - 1) {
141
elem->next = tx_ring->start;
142
tx_desc->next_offset = base;
143
} else {
144
elem->next = elem + 1;
145
tx_desc->next_offset =
146
base + (i + 1) * sizeof(*tx_desc);
147
}
148
}
149
150
tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
151
152
return 0;
153
}
154
155
/*
156
* Allocate RX ring elements and chain them together.
157
* One-to-one association of adapter descriptors with ring elements.
158
*/
159
static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
160
dma_addr_t base, void __iomem * mmio_rxp_ring)
161
{
162
struct c2_rx_desc *rx_desc;
163
struct c2_rxp_desc __iomem *rxp_desc;
164
struct c2_element *elem;
165
int i;
166
167
rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL);
168
if (!rx_ring->start)
169
return -ENOMEM;
170
171
elem = rx_ring->start;
172
rx_desc = vaddr;
173
rxp_desc = mmio_rxp_ring;
174
for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
175
rx_desc->len = 0;
176
rx_desc->status = 0;
177
178
/* Set RXP_HRXD_UNINIT */
179
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
180
(void __iomem *) rxp_desc + C2_RXP_STATUS);
181
__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
182
__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
183
__raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
184
(void __iomem *) rxp_desc + C2_RXP_ADDR);
185
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
186
(void __iomem *) rxp_desc + C2_RXP_FLAGS);
187
188
elem->skb = NULL;
189
elem->ht_desc = rx_desc;
190
elem->hw_desc = rxp_desc;
191
192
if (i == rx_ring->count - 1) {
193
elem->next = rx_ring->start;
194
rx_desc->next_offset = base;
195
} else {
196
elem->next = elem + 1;
197
rx_desc->next_offset =
198
base + (i + 1) * sizeof(*rx_desc);
199
}
200
}
201
202
rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
203
204
return 0;
205
}
206
207
/* Setup buffer for receiving */
208
static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
209
{
210
struct c2_dev *c2dev = c2_port->c2dev;
211
struct c2_rx_desc *rx_desc = elem->ht_desc;
212
struct sk_buff *skb;
213
dma_addr_t mapaddr;
214
u32 maplen;
215
struct c2_rxp_hdr *rxp_hdr;
216
217
skb = dev_alloc_skb(c2_port->rx_buf_size);
218
if (unlikely(!skb)) {
219
pr_debug("%s: out of memory for receive\n",
220
c2_port->netdev->name);
221
return -ENOMEM;
222
}
223
224
/* Zero out the rxp hdr in the sk_buff */
225
memset(skb->data, 0, sizeof(*rxp_hdr));
226
227
skb->dev = c2_port->netdev;
228
229
maplen = c2_port->rx_buf_size;
230
mapaddr =
231
pci_map_single(c2dev->pcidev, skb->data, maplen,
232
PCI_DMA_FROMDEVICE);
233
234
/* Set the sk_buff RXP_header to RXP_HRXD_READY */
235
rxp_hdr = (struct c2_rxp_hdr *) skb->data;
236
rxp_hdr->flags = RXP_HRXD_READY;
237
238
__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
239
__raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
240
elem->hw_desc + C2_RXP_LEN);
241
__raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
242
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
243
elem->hw_desc + C2_RXP_FLAGS);
244
245
elem->skb = skb;
246
elem->mapaddr = mapaddr;
247
elem->maplen = maplen;
248
rx_desc->len = maplen;
249
250
return 0;
251
}
252
253
/*
254
* Allocate buffers for the Rx ring
255
* For receive: rx_ring.to_clean is next received frame
256
*/
257
static int c2_rx_fill(struct c2_port *c2_port)
258
{
259
struct c2_ring *rx_ring = &c2_port->rx_ring;
260
struct c2_element *elem;
261
int ret = 0;
262
263
elem = rx_ring->start;
264
do {
265
if (c2_rx_alloc(c2_port, elem)) {
266
ret = 1;
267
break;
268
}
269
} while ((elem = elem->next) != rx_ring->start);
270
271
rx_ring->to_clean = rx_ring->start;
272
return ret;
273
}
274
275
/* Free all buffers in RX ring, assumes receiver stopped */
276
static void c2_rx_clean(struct c2_port *c2_port)
277
{
278
struct c2_dev *c2dev = c2_port->c2dev;
279
struct c2_ring *rx_ring = &c2_port->rx_ring;
280
struct c2_element *elem;
281
struct c2_rx_desc *rx_desc;
282
283
elem = rx_ring->start;
284
do {
285
rx_desc = elem->ht_desc;
286
rx_desc->len = 0;
287
288
__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
289
__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
290
__raw_writew(0, elem->hw_desc + C2_RXP_LEN);
291
__raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
292
elem->hw_desc + C2_RXP_ADDR);
293
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
294
elem->hw_desc + C2_RXP_FLAGS);
295
296
if (elem->skb) {
297
pci_unmap_single(c2dev->pcidev, elem->mapaddr,
298
elem->maplen, PCI_DMA_FROMDEVICE);
299
dev_kfree_skb(elem->skb);
300
elem->skb = NULL;
301
}
302
} while ((elem = elem->next) != rx_ring->start);
303
}
304
305
static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
306
{
307
struct c2_tx_desc *tx_desc = elem->ht_desc;
308
309
tx_desc->len = 0;
310
311
pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
312
PCI_DMA_TODEVICE);
313
314
if (elem->skb) {
315
dev_kfree_skb_any(elem->skb);
316
elem->skb = NULL;
317
}
318
319
return 0;
320
}
321
322
/* Free all buffers in TX ring, assumes transmitter stopped */
323
static void c2_tx_clean(struct c2_port *c2_port)
324
{
325
struct c2_ring *tx_ring = &c2_port->tx_ring;
326
struct c2_element *elem;
327
struct c2_txp_desc txp_htxd;
328
int retry;
329
unsigned long flags;
330
331
spin_lock_irqsave(&c2_port->tx_lock, flags);
332
333
elem = tx_ring->start;
334
335
do {
336
retry = 0;
337
do {
338
txp_htxd.flags =
339
readw(elem->hw_desc + C2_TXP_FLAGS);
340
341
if (txp_htxd.flags == TXP_HTXD_READY) {
342
retry = 1;
343
__raw_writew(0,
344
elem->hw_desc + C2_TXP_LEN);
345
__raw_writeq(0,
346
elem->hw_desc + C2_TXP_ADDR);
347
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
348
elem->hw_desc + C2_TXP_FLAGS);
349
c2_port->netdev->stats.tx_dropped++;
350
break;
351
} else {
352
__raw_writew(0,
353
elem->hw_desc + C2_TXP_LEN);
354
__raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
355
elem->hw_desc + C2_TXP_ADDR);
356
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
357
elem->hw_desc + C2_TXP_FLAGS);
358
}
359
360
c2_tx_free(c2_port->c2dev, elem);
361
362
} while ((elem = elem->next) != tx_ring->start);
363
} while (retry);
364
365
c2_port->tx_avail = c2_port->tx_ring.count - 1;
366
c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
367
368
if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
369
netif_wake_queue(c2_port->netdev);
370
371
spin_unlock_irqrestore(&c2_port->tx_lock, flags);
372
}
373
374
/*
375
* Process transmit descriptors marked 'DONE' by the firmware,
376
* freeing up their unneeded sk_buffs.
377
*/
378
static void c2_tx_interrupt(struct net_device *netdev)
379
{
380
struct c2_port *c2_port = netdev_priv(netdev);
381
struct c2_dev *c2dev = c2_port->c2dev;
382
struct c2_ring *tx_ring = &c2_port->tx_ring;
383
struct c2_element *elem;
384
struct c2_txp_desc txp_htxd;
385
386
spin_lock(&c2_port->tx_lock);
387
388
for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
389
elem = elem->next) {
390
txp_htxd.flags =
391
be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
392
393
if (txp_htxd.flags != TXP_HTXD_DONE)
394
break;
395
396
if (netif_msg_tx_done(c2_port)) {
397
/* PCI reads are expensive in fast path */
398
txp_htxd.len =
399
be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
400
pr_debug("%s: tx done slot %3Zu status 0x%x len "
401
"%5u bytes\n",
402
netdev->name, elem - tx_ring->start,
403
txp_htxd.flags, txp_htxd.len);
404
}
405
406
c2_tx_free(c2dev, elem);
407
++(c2_port->tx_avail);
408
}
409
410
tx_ring->to_clean = elem;
411
412
if (netif_queue_stopped(netdev)
413
&& c2_port->tx_avail > MAX_SKB_FRAGS + 1)
414
netif_wake_queue(netdev);
415
416
spin_unlock(&c2_port->tx_lock);
417
}
418
419
static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
420
{
421
struct c2_rx_desc *rx_desc = elem->ht_desc;
422
struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
423
424
if (rxp_hdr->status != RXP_HRXD_OK ||
425
rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
426
pr_debug("BAD RXP_HRXD\n");
427
pr_debug(" rx_desc : %p\n", rx_desc);
428
pr_debug(" index : %Zu\n",
429
elem - c2_port->rx_ring.start);
430
pr_debug(" len : %u\n", rx_desc->len);
431
pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr,
432
(void *) __pa((unsigned long) rxp_hdr));
433
pr_debug(" flags : 0x%x\n", rxp_hdr->flags);
434
pr_debug(" status: 0x%x\n", rxp_hdr->status);
435
pr_debug(" len : %u\n", rxp_hdr->len);
436
pr_debug(" rsvd : 0x%x\n", rxp_hdr->rsvd);
437
}
438
439
/* Setup the skb for reuse since we're dropping this pkt */
440
elem->skb->data = elem->skb->head;
441
skb_reset_tail_pointer(elem->skb);
442
443
/* Zero out the rxp hdr in the sk_buff */
444
memset(elem->skb->data, 0, sizeof(*rxp_hdr));
445
446
/* Write the descriptor to the adapter's rx ring */
447
__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
448
__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
449
__raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
450
elem->hw_desc + C2_RXP_LEN);
451
__raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
452
elem->hw_desc + C2_RXP_ADDR);
453
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
454
elem->hw_desc + C2_RXP_FLAGS);
455
456
pr_debug("packet dropped\n");
457
c2_port->netdev->stats.rx_dropped++;
458
}
459
460
static void c2_rx_interrupt(struct net_device *netdev)
461
{
462
struct c2_port *c2_port = netdev_priv(netdev);
463
struct c2_dev *c2dev = c2_port->c2dev;
464
struct c2_ring *rx_ring = &c2_port->rx_ring;
465
struct c2_element *elem;
466
struct c2_rx_desc *rx_desc;
467
struct c2_rxp_hdr *rxp_hdr;
468
struct sk_buff *skb;
469
dma_addr_t mapaddr;
470
u32 maplen, buflen;
471
unsigned long flags;
472
473
spin_lock_irqsave(&c2dev->lock, flags);
474
475
/* Begin where we left off */
476
rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
477
478
for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
479
elem = elem->next) {
480
rx_desc = elem->ht_desc;
481
mapaddr = elem->mapaddr;
482
maplen = elem->maplen;
483
skb = elem->skb;
484
rxp_hdr = (struct c2_rxp_hdr *) skb->data;
485
486
if (rxp_hdr->flags != RXP_HRXD_DONE)
487
break;
488
buflen = rxp_hdr->len;
489
490
/* Sanity check the RXP header */
491
if (rxp_hdr->status != RXP_HRXD_OK ||
492
buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
493
c2_rx_error(c2_port, elem);
494
continue;
495
}
496
497
/*
498
* Allocate and map a new skb for replenishing the host
499
* RX desc
500
*/
501
if (c2_rx_alloc(c2_port, elem)) {
502
c2_rx_error(c2_port, elem);
503
continue;
504
}
505
506
/* Unmap the old skb */
507
pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
508
PCI_DMA_FROMDEVICE);
509
510
prefetch(skb->data);
511
512
/*
513
* Skip past the leading 8 bytes comprising of the
514
* "struct c2_rxp_hdr", prepended by the adapter
515
* to the usual Ethernet header ("struct ethhdr"),
516
* to the start of the raw Ethernet packet.
517
*
518
* Fix up the various fields in the sk_buff before
519
* passing it up to netif_rx(). The transfer size
520
* (in bytes) specified by the adapter len field of
521
* the "struct rxp_hdr_t" does NOT include the
522
* "sizeof(struct c2_rxp_hdr)".
523
*/
524
skb->data += sizeof(*rxp_hdr);
525
skb_set_tail_pointer(skb, buflen);
526
skb->len = buflen;
527
skb->protocol = eth_type_trans(skb, netdev);
528
529
netif_rx(skb);
530
531
netdev->stats.rx_packets++;
532
netdev->stats.rx_bytes += buflen;
533
}
534
535
/* Save where we left off */
536
rx_ring->to_clean = elem;
537
c2dev->cur_rx = elem - rx_ring->start;
538
C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
539
540
spin_unlock_irqrestore(&c2dev->lock, flags);
541
}
542
543
/*
544
* Handle netisr0 TX & RX interrupts.
545
*/
546
static irqreturn_t c2_interrupt(int irq, void *dev_id)
547
{
548
unsigned int netisr0, dmaisr;
549
int handled = 0;
550
struct c2_dev *c2dev = (struct c2_dev *) dev_id;
551
552
/* Process CCILNET interrupts */
553
netisr0 = readl(c2dev->regs + C2_NISR0);
554
if (netisr0) {
555
556
/*
557
* There is an issue with the firmware that always
558
* provides the status of RX for both TX & RX
559
* interrupts. So process both queues here.
560
*/
561
c2_rx_interrupt(c2dev->netdev);
562
c2_tx_interrupt(c2dev->netdev);
563
564
/* Clear the interrupt */
565
writel(netisr0, c2dev->regs + C2_NISR0);
566
handled++;
567
}
568
569
/* Process RNIC interrupts */
570
dmaisr = readl(c2dev->regs + C2_DISR);
571
if (dmaisr) {
572
writel(dmaisr, c2dev->regs + C2_DISR);
573
c2_rnic_interrupt(c2dev);
574
handled++;
575
}
576
577
if (handled) {
578
return IRQ_HANDLED;
579
} else {
580
return IRQ_NONE;
581
}
582
}
583
584
static int c2_up(struct net_device *netdev)
585
{
586
struct c2_port *c2_port = netdev_priv(netdev);
587
struct c2_dev *c2dev = c2_port->c2dev;
588
struct c2_element *elem;
589
struct c2_rxp_hdr *rxp_hdr;
590
struct in_device *in_dev;
591
size_t rx_size, tx_size;
592
int ret, i;
593
unsigned int netimr0;
594
595
if (netif_msg_ifup(c2_port))
596
pr_debug("%s: enabling interface\n", netdev->name);
597
598
/* Set the Rx buffer size based on MTU */
599
c2_set_rxbufsize(c2_port);
600
601
/* Allocate DMA'able memory for Tx/Rx host descriptor rings */
602
rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
603
tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
604
605
c2_port->mem_size = tx_size + rx_size;
606
c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
607
&c2_port->dma);
608
if (c2_port->mem == NULL) {
609
pr_debug("Unable to allocate memory for "
610
"host descriptor rings\n");
611
return -ENOMEM;
612
}
613
614
memset(c2_port->mem, 0, c2_port->mem_size);
615
616
/* Create the Rx host descriptor ring */
617
if ((ret =
618
c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
619
c2dev->mmio_rxp_ring))) {
620
pr_debug("Unable to create RX ring\n");
621
goto bail0;
622
}
623
624
/* Allocate Rx buffers for the host descriptor ring */
625
if (c2_rx_fill(c2_port)) {
626
pr_debug("Unable to fill RX ring\n");
627
goto bail1;
628
}
629
630
/* Create the Tx host descriptor ring */
631
if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
632
c2_port->dma + rx_size,
633
c2dev->mmio_txp_ring))) {
634
pr_debug("Unable to create TX ring\n");
635
goto bail1;
636
}
637
638
/* Set the TX pointer to where we left off */
639
c2_port->tx_avail = c2_port->tx_ring.count - 1;
640
c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
641
c2_port->tx_ring.start + c2dev->cur_tx;
642
643
/* missing: Initialize MAC */
644
645
BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
646
647
/* Reset the adapter, ensures the driver is in sync with the RXP */
648
c2_reset(c2_port);
649
650
/* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
651
for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
652
i++, elem++) {
653
rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
654
rxp_hdr->flags = 0;
655
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
656
elem->hw_desc + C2_RXP_FLAGS);
657
}
658
659
/* Enable network packets */
660
netif_start_queue(netdev);
661
662
/* Enable IRQ */
663
writel(0, c2dev->regs + C2_IDIS);
664
netimr0 = readl(c2dev->regs + C2_NIMR0);
665
netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
666
writel(netimr0, c2dev->regs + C2_NIMR0);
667
668
/* Tell the stack to ignore arp requests for ipaddrs bound to
669
* other interfaces. This is needed to prevent the host stack
670
* from responding to arp requests to the ipaddr bound on the
671
* rdma interface.
672
*/
673
in_dev = in_dev_get(netdev);
674
IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
675
in_dev_put(in_dev);
676
677
return 0;
678
679
bail1:
680
c2_rx_clean(c2_port);
681
kfree(c2_port->rx_ring.start);
682
683
bail0:
684
pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
685
c2_port->dma);
686
687
return ret;
688
}
689
690
static int c2_down(struct net_device *netdev)
691
{
692
struct c2_port *c2_port = netdev_priv(netdev);
693
struct c2_dev *c2dev = c2_port->c2dev;
694
695
if (netif_msg_ifdown(c2_port))
696
pr_debug("%s: disabling interface\n",
697
netdev->name);
698
699
/* Wait for all the queued packets to get sent */
700
c2_tx_interrupt(netdev);
701
702
/* Disable network packets */
703
netif_stop_queue(netdev);
704
705
/* Disable IRQs by clearing the interrupt mask */
706
writel(1, c2dev->regs + C2_IDIS);
707
writel(0, c2dev->regs + C2_NIMR0);
708
709
/* missing: Stop transmitter */
710
711
/* missing: Stop receiver */
712
713
/* Reset the adapter, ensures the driver is in sync with the RXP */
714
c2_reset(c2_port);
715
716
/* missing: Turn off LEDs here */
717
718
/* Free all buffers in the host descriptor rings */
719
c2_tx_clean(c2_port);
720
c2_rx_clean(c2_port);
721
722
/* Free the host descriptor rings */
723
kfree(c2_port->rx_ring.start);
724
kfree(c2_port->tx_ring.start);
725
pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
726
c2_port->dma);
727
728
return 0;
729
}
730
731
static void c2_reset(struct c2_port *c2_port)
732
{
733
struct c2_dev *c2dev = c2_port->c2dev;
734
unsigned int cur_rx = c2dev->cur_rx;
735
736
/* Tell the hardware to quiesce */
737
C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
738
739
/*
740
* The hardware will reset the C2_PCI_HRX_QUI bit once
741
* the RXP is quiesced. Wait 2 seconds for this.
742
*/
743
ssleep(2);
744
745
cur_rx = C2_GET_CUR_RX(c2dev);
746
747
if (cur_rx & C2_PCI_HRX_QUI)
748
pr_debug("c2_reset: failed to quiesce the hardware!\n");
749
750
cur_rx &= ~C2_PCI_HRX_QUI;
751
752
c2dev->cur_rx = cur_rx;
753
754
pr_debug("Current RX: %u\n", c2dev->cur_rx);
755
}
756
757
static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
758
{
759
struct c2_port *c2_port = netdev_priv(netdev);
760
struct c2_dev *c2dev = c2_port->c2dev;
761
struct c2_ring *tx_ring = &c2_port->tx_ring;
762
struct c2_element *elem;
763
dma_addr_t mapaddr;
764
u32 maplen;
765
unsigned long flags;
766
unsigned int i;
767
768
spin_lock_irqsave(&c2_port->tx_lock, flags);
769
770
if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
771
netif_stop_queue(netdev);
772
spin_unlock_irqrestore(&c2_port->tx_lock, flags);
773
774
pr_debug("%s: Tx ring full when queue awake!\n",
775
netdev->name);
776
return NETDEV_TX_BUSY;
777
}
778
779
maplen = skb_headlen(skb);
780
mapaddr =
781
pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
782
783
elem = tx_ring->to_use;
784
elem->skb = skb;
785
elem->mapaddr = mapaddr;
786
elem->maplen = maplen;
787
788
/* Tell HW to xmit */
789
__raw_writeq((__force u64) cpu_to_be64(mapaddr),
790
elem->hw_desc + C2_TXP_ADDR);
791
__raw_writew((__force u16) cpu_to_be16(maplen),
792
elem->hw_desc + C2_TXP_LEN);
793
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
794
elem->hw_desc + C2_TXP_FLAGS);
795
796
netdev->stats.tx_packets++;
797
netdev->stats.tx_bytes += maplen;
798
799
/* Loop thru additional data fragments and queue them */
800
if (skb_shinfo(skb)->nr_frags) {
801
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
802
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
803
maplen = frag->size;
804
mapaddr =
805
pci_map_page(c2dev->pcidev, frag->page,
806
frag->page_offset, maplen,
807
PCI_DMA_TODEVICE);
808
809
elem = elem->next;
810
elem->skb = NULL;
811
elem->mapaddr = mapaddr;
812
elem->maplen = maplen;
813
814
/* Tell HW to xmit */
815
__raw_writeq((__force u64) cpu_to_be64(mapaddr),
816
elem->hw_desc + C2_TXP_ADDR);
817
__raw_writew((__force u16) cpu_to_be16(maplen),
818
elem->hw_desc + C2_TXP_LEN);
819
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
820
elem->hw_desc + C2_TXP_FLAGS);
821
822
netdev->stats.tx_packets++;
823
netdev->stats.tx_bytes += maplen;
824
}
825
}
826
827
tx_ring->to_use = elem->next;
828
c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
829
830
if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
831
netif_stop_queue(netdev);
832
if (netif_msg_tx_queued(c2_port))
833
pr_debug("%s: transmit queue full\n",
834
netdev->name);
835
}
836
837
spin_unlock_irqrestore(&c2_port->tx_lock, flags);
838
839
netdev->trans_start = jiffies;
840
841
return NETDEV_TX_OK;
842
}
843
844
static void c2_tx_timeout(struct net_device *netdev)
845
{
846
struct c2_port *c2_port = netdev_priv(netdev);
847
848
if (netif_msg_timer(c2_port))
849
pr_debug("%s: tx timeout\n", netdev->name);
850
851
c2_tx_clean(c2_port);
852
}
853
854
static int c2_change_mtu(struct net_device *netdev, int new_mtu)
855
{
856
int ret = 0;
857
858
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
859
return -EINVAL;
860
861
netdev->mtu = new_mtu;
862
863
if (netif_running(netdev)) {
864
c2_down(netdev);
865
866
c2_up(netdev);
867
}
868
869
return ret;
870
}
871
872
static const struct net_device_ops c2_netdev = {
873
.ndo_open = c2_up,
874
.ndo_stop = c2_down,
875
.ndo_start_xmit = c2_xmit_frame,
876
.ndo_tx_timeout = c2_tx_timeout,
877
.ndo_change_mtu = c2_change_mtu,
878
.ndo_set_mac_address = eth_mac_addr,
879
.ndo_validate_addr = eth_validate_addr,
880
};
881
882
/* Initialize network device */
883
static struct net_device *c2_devinit(struct c2_dev *c2dev,
884
void __iomem * mmio_addr)
885
{
886
struct c2_port *c2_port = NULL;
887
struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
888
889
if (!netdev) {
890
pr_debug("c2_port etherdev alloc failed");
891
return NULL;
892
}
893
894
SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
895
896
netdev->netdev_ops = &c2_netdev;
897
netdev->watchdog_timeo = C2_TX_TIMEOUT;
898
netdev->irq = c2dev->pcidev->irq;
899
900
c2_port = netdev_priv(netdev);
901
c2_port->netdev = netdev;
902
c2_port->c2dev = c2dev;
903
c2_port->msg_enable = netif_msg_init(debug, default_msg);
904
c2_port->tx_ring.count = C2_NUM_TX_DESC;
905
c2_port->rx_ring.count = C2_NUM_RX_DESC;
906
907
spin_lock_init(&c2_port->tx_lock);
908
909
/* Copy our 48-bit ethernet hardware address */
910
memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
911
912
/* Validate the MAC address */
913
if (!is_valid_ether_addr(netdev->dev_addr)) {
914
pr_debug("Invalid MAC Address\n");
915
c2_print_macaddr(netdev);
916
free_netdev(netdev);
917
return NULL;
918
}
919
920
c2dev->netdev = netdev;
921
922
return netdev;
923
}
924
925
static int __devinit c2_probe(struct pci_dev *pcidev,
926
const struct pci_device_id *ent)
927
{
928
int ret = 0, i;
929
unsigned long reg0_start, reg0_flags, reg0_len;
930
unsigned long reg2_start, reg2_flags, reg2_len;
931
unsigned long reg4_start, reg4_flags, reg4_len;
932
unsigned kva_map_size;
933
struct net_device *netdev = NULL;
934
struct c2_dev *c2dev = NULL;
935
void __iomem *mmio_regs = NULL;
936
937
printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
938
DRV_VERSION);
939
940
/* Enable PCI device */
941
ret = pci_enable_device(pcidev);
942
if (ret) {
943
printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
944
pci_name(pcidev));
945
goto bail0;
946
}
947
948
reg0_start = pci_resource_start(pcidev, BAR_0);
949
reg0_len = pci_resource_len(pcidev, BAR_0);
950
reg0_flags = pci_resource_flags(pcidev, BAR_0);
951
952
reg2_start = pci_resource_start(pcidev, BAR_2);
953
reg2_len = pci_resource_len(pcidev, BAR_2);
954
reg2_flags = pci_resource_flags(pcidev, BAR_2);
955
956
reg4_start = pci_resource_start(pcidev, BAR_4);
957
reg4_len = pci_resource_len(pcidev, BAR_4);
958
reg4_flags = pci_resource_flags(pcidev, BAR_4);
959
960
pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
961
pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
962
pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
963
964
/* Make sure PCI base addr are MMIO */
965
if (!(reg0_flags & IORESOURCE_MEM) ||
966
!(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
967
printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
968
ret = -ENODEV;
969
goto bail1;
970
}
971
972
/* Check for weird/broken PCI region reporting */
973
if ((reg0_len < C2_REG0_SIZE) ||
974
(reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
975
printk(KERN_ERR PFX "Invalid PCI region sizes\n");
976
ret = -ENODEV;
977
goto bail1;
978
}
979
980
/* Reserve PCI I/O and memory resources */
981
ret = pci_request_regions(pcidev, DRV_NAME);
982
if (ret) {
983
printk(KERN_ERR PFX "%s: Unable to request regions\n",
984
pci_name(pcidev));
985
goto bail1;
986
}
987
988
if ((sizeof(dma_addr_t) > 4)) {
989
ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
990
if (ret < 0) {
991
printk(KERN_ERR PFX "64b DMA configuration failed\n");
992
goto bail2;
993
}
994
} else {
995
ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
996
if (ret < 0) {
997
printk(KERN_ERR PFX "32b DMA configuration failed\n");
998
goto bail2;
999
}
1000
}
1001
1002
/* Enables bus-mastering on the device */
1003
pci_set_master(pcidev);
1004
1005
/* Remap the adapter PCI registers in BAR4 */
1006
mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1007
sizeof(struct c2_adapter_pci_regs));
1008
if (!mmio_regs) {
1009
printk(KERN_ERR PFX
1010
"Unable to remap adapter PCI registers in BAR4\n");
1011
ret = -EIO;
1012
goto bail2;
1013
}
1014
1015
/* Validate PCI regs magic */
1016
for (i = 0; i < sizeof(c2_magic); i++) {
1017
if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
1018
printk(KERN_ERR PFX "Downlevel Firmware boot loader "
1019
"[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
1020
"utility to update your boot loader\n",
1021
i + 1, sizeof(c2_magic),
1022
readb(mmio_regs + C2_REGS_MAGIC + i),
1023
c2_magic[i]);
1024
printk(KERN_ERR PFX "Adapter not claimed\n");
1025
iounmap(mmio_regs);
1026
ret = -EIO;
1027
goto bail2;
1028
}
1029
}
1030
1031
/* Validate the adapter version */
1032
if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
1033
printk(KERN_ERR PFX "Version mismatch "
1034
"[fw=%u, c2=%u], Adapter not claimed\n",
1035
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
1036
C2_VERSION);
1037
ret = -EINVAL;
1038
iounmap(mmio_regs);
1039
goto bail2;
1040
}
1041
1042
/* Validate the adapter IVN */
1043
if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
1044
printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
1045
"the OpenIB device support kit. "
1046
"[fw=0x%x, c2=0x%x], Adapter not claimed\n",
1047
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
1048
C2_IVN);
1049
ret = -EINVAL;
1050
iounmap(mmio_regs);
1051
goto bail2;
1052
}
1053
1054
/* Allocate hardware structure */
1055
c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
1056
if (!c2dev) {
1057
printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
1058
pci_name(pcidev));
1059
ret = -ENOMEM;
1060
iounmap(mmio_regs);
1061
goto bail2;
1062
}
1063
1064
memset(c2dev, 0, sizeof(*c2dev));
1065
spin_lock_init(&c2dev->lock);
1066
c2dev->pcidev = pcidev;
1067
c2dev->cur_tx = 0;
1068
1069
/* Get the last RX index */
1070
c2dev->cur_rx =
1071
(be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
1072
0xffffc000) / sizeof(struct c2_rxp_desc);
1073
1074
/* Request an interrupt line for the driver */
1075
ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev);
1076
if (ret) {
1077
printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
1078
pci_name(pcidev), pcidev->irq);
1079
iounmap(mmio_regs);
1080
goto bail3;
1081
}
1082
1083
/* Set driver specific data */
1084
pci_set_drvdata(pcidev, c2dev);
1085
1086
/* Initialize network device */
1087
if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
1088
iounmap(mmio_regs);
1089
goto bail4;
1090
}
1091
1092
/* Save off the actual size prior to unmapping mmio_regs */
1093
kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
1094
1095
/* Unmap the adapter PCI registers in BAR4 */
1096
iounmap(mmio_regs);
1097
1098
/* Register network device */
1099
ret = register_netdev(netdev);
1100
if (ret) {
1101
printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
1102
ret);
1103
goto bail5;
1104
}
1105
1106
/* Disable network packets */
1107
netif_stop_queue(netdev);
1108
1109
/* Remap the adapter HRXDQ PA space to kernel VA space */
1110
c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
1111
C2_RXP_HRXDQ_SIZE);
1112
if (!c2dev->mmio_rxp_ring) {
1113
printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
1114
ret = -EIO;
1115
goto bail6;
1116
}
1117
1118
/* Remap the adapter HTXDQ PA space to kernel VA space */
1119
c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
1120
C2_TXP_HTXDQ_SIZE);
1121
if (!c2dev->mmio_txp_ring) {
1122
printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
1123
ret = -EIO;
1124
goto bail7;
1125
}
1126
1127
/* Save off the current RX index in the last 4 bytes of the TXP Ring */
1128
C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
1129
1130
/* Remap the PCI registers in adapter BAR0 to kernel VA space */
1131
c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
1132
if (!c2dev->regs) {
1133
printk(KERN_ERR PFX "Unable to remap BAR0\n");
1134
ret = -EIO;
1135
goto bail8;
1136
}
1137
1138
/* Remap the PCI registers in adapter BAR4 to kernel VA space */
1139
c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
1140
c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1141
kva_map_size);
1142
if (!c2dev->kva) {
1143
printk(KERN_ERR PFX "Unable to remap BAR4\n");
1144
ret = -EIO;
1145
goto bail9;
1146
}
1147
1148
/* Print out the MAC address */
1149
c2_print_macaddr(netdev);
1150
1151
ret = c2_rnic_init(c2dev);
1152
if (ret) {
1153
printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
1154
goto bail10;
1155
}
1156
1157
if (c2_register_device(c2dev))
1158
goto bail10;
1159
1160
return 0;
1161
1162
bail10:
1163
iounmap(c2dev->kva);
1164
1165
bail9:
1166
iounmap(c2dev->regs);
1167
1168
bail8:
1169
iounmap(c2dev->mmio_txp_ring);
1170
1171
bail7:
1172
iounmap(c2dev->mmio_rxp_ring);
1173
1174
bail6:
1175
unregister_netdev(netdev);
1176
1177
bail5:
1178
free_netdev(netdev);
1179
1180
bail4:
1181
free_irq(pcidev->irq, c2dev);
1182
1183
bail3:
1184
ib_dealloc_device(&c2dev->ibdev);
1185
1186
bail2:
1187
pci_release_regions(pcidev);
1188
1189
bail1:
1190
pci_disable_device(pcidev);
1191
1192
bail0:
1193
return ret;
1194
}
1195
1196
static void __devexit c2_remove(struct pci_dev *pcidev)
1197
{
1198
struct c2_dev *c2dev = pci_get_drvdata(pcidev);
1199
struct net_device *netdev = c2dev->netdev;
1200
1201
/* Unregister with OpenIB */
1202
c2_unregister_device(c2dev);
1203
1204
/* Clean up the RNIC resources */
1205
c2_rnic_term(c2dev);
1206
1207
/* Remove network device from the kernel */
1208
unregister_netdev(netdev);
1209
1210
/* Free network device */
1211
free_netdev(netdev);
1212
1213
/* Free the interrupt line */
1214
free_irq(pcidev->irq, c2dev);
1215
1216
/* missing: Turn LEDs off here */
1217
1218
/* Unmap adapter PA space */
1219
iounmap(c2dev->kva);
1220
iounmap(c2dev->regs);
1221
iounmap(c2dev->mmio_txp_ring);
1222
iounmap(c2dev->mmio_rxp_ring);
1223
1224
/* Free the hardware structure */
1225
ib_dealloc_device(&c2dev->ibdev);
1226
1227
/* Release reserved PCI I/O and memory resources */
1228
pci_release_regions(pcidev);
1229
1230
/* Disable PCI device */
1231
pci_disable_device(pcidev);
1232
1233
/* Clear driver specific data */
1234
pci_set_drvdata(pcidev, NULL);
1235
}
1236
1237
static struct pci_driver c2_pci_driver = {
1238
.name = DRV_NAME,
1239
.id_table = c2_pci_table,
1240
.probe = c2_probe,
1241
.remove = __devexit_p(c2_remove),
1242
};
1243
1244
static int __init c2_init_module(void)
1245
{
1246
return pci_register_driver(&c2_pci_driver);
1247
}
1248
1249
static void __exit c2_exit_module(void)
1250
{
1251
pci_unregister_driver(&c2_pci_driver);
1252
}
1253
1254
module_init(c2_init_module);
1255
module_exit(c2_exit_module);
1256
1257