Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/atm/he.c
26278 views
1
/*
2
3
he.c
4
5
ForeRunnerHE ATM Adapter driver for ATM on Linux
6
Copyright (C) 1999-2001 Naval Research Laboratory
7
8
This library is free software; you can redistribute it and/or
9
modify it under the terms of the GNU Lesser General Public
10
License as published by the Free Software Foundation; either
11
version 2.1 of the License, or (at your option) any later version.
12
13
This library is distributed in the hope that it will be useful,
14
but WITHOUT ANY WARRANTY; without even the implied warranty of
15
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16
Lesser General Public License for more details.
17
18
You should have received a copy of the GNU Lesser General Public
19
License along with this library; if not, write to the Free Software
20
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
22
*/
23
24
/*
25
26
he.c
27
28
ForeRunnerHE ATM Adapter driver for ATM on Linux
29
Copyright (C) 1999-2001 Naval Research Laboratory
30
31
Permission to use, copy, modify and distribute this software and its
32
documentation is hereby granted, provided that both the copyright
33
notice and this permission notice appear in all copies of the software,
34
derivative works or modified versions, and any portions thereof, and
35
that both notices appear in supporting documentation.
36
37
NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38
DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39
RESULTING FROM THE USE OF THIS SOFTWARE.
40
41
This driver was written using the "Programmer's Reference Manual for
42
ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44
AUTHORS:
45
chas williams <[email protected]>
46
eric kinzie <[email protected]>
47
48
NOTES:
49
4096 supported 'connections'
50
group 0 is used for all traffic
51
interrupt queue 0 is used for all interrupts
52
aal0 support (based on work from [email protected])
53
54
*/
55
56
#include <linux/module.h>
57
#include <linux/kernel.h>
58
#include <linux/skbuff.h>
59
#include <linux/pci.h>
60
#include <linux/errno.h>
61
#include <linux/types.h>
62
#include <linux/string.h>
63
#include <linux/delay.h>
64
#include <linux/init.h>
65
#include <linux/mm.h>
66
#include <linux/sched.h>
67
#include <linux/timer.h>
68
#include <linux/interrupt.h>
69
#include <linux/dma-mapping.h>
70
#include <linux/bitmap.h>
71
#include <linux/slab.h>
72
#include <asm/io.h>
73
#include <asm/byteorder.h>
74
#include <linux/uaccess.h>
75
76
#include <linux/atmdev.h>
77
#include <linux/atm.h>
78
#include <linux/sonet.h>
79
80
#undef USE_SCATTERGATHER
81
#undef USE_CHECKSUM_HW /* still confused about this */
82
/* #undef HE_DEBUG */
83
84
#include "he.h"
85
#include "suni.h"
86
#include <linux/atm_he.h>
87
88
#define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89
90
#ifdef HE_DEBUG
91
#define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92
#else /* !HE_DEBUG */
93
#define HPRINTK(fmt,args...) do { } while (0)
94
#endif /* HE_DEBUG */
95
96
/* declarations */
97
98
static int he_open(struct atm_vcc *vcc);
99
static void he_close(struct atm_vcc *vcc);
100
static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101
static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102
static irqreturn_t he_irq_handler(int irq, void *dev_id);
103
static void he_tasklet(unsigned long data);
104
static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105
static int he_start(struct atm_dev *dev);
106
static void he_stop(struct he_dev *dev);
107
static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108
static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109
110
static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111
112
/* globals */
113
114
static struct he_dev *he_devs;
115
static bool disable64;
116
static short nvpibits = -1;
117
static short nvcibits = -1;
118
static short rx_skb_reserve = 16;
119
static bool irq_coalesce = true;
120
static bool sdh;
121
122
/* Read from EEPROM = 0000 0011b */
123
static unsigned int readtab[] = {
124
CS_HIGH | CLK_HIGH,
125
CS_LOW | CLK_LOW,
126
CLK_HIGH, /* 0 */
127
CLK_LOW,
128
CLK_HIGH, /* 0 */
129
CLK_LOW,
130
CLK_HIGH, /* 0 */
131
CLK_LOW,
132
CLK_HIGH, /* 0 */
133
CLK_LOW,
134
CLK_HIGH, /* 0 */
135
CLK_LOW,
136
CLK_HIGH, /* 0 */
137
CLK_LOW | SI_HIGH,
138
CLK_HIGH | SI_HIGH, /* 1 */
139
CLK_LOW | SI_HIGH,
140
CLK_HIGH | SI_HIGH /* 1 */
141
};
142
143
/* Clock to read from/write to the EEPROM */
144
static unsigned int clocktab[] = {
145
CLK_LOW,
146
CLK_HIGH,
147
CLK_LOW,
148
CLK_HIGH,
149
CLK_LOW,
150
CLK_HIGH,
151
CLK_LOW,
152
CLK_HIGH,
153
CLK_LOW,
154
CLK_HIGH,
155
CLK_LOW,
156
CLK_HIGH,
157
CLK_LOW,
158
CLK_HIGH,
159
CLK_LOW,
160
CLK_HIGH,
161
CLK_LOW
162
};
163
164
static const struct atmdev_ops he_ops =
165
{
166
.open = he_open,
167
.close = he_close,
168
.ioctl = he_ioctl,
169
.send = he_send,
170
.phy_put = he_phy_put,
171
.phy_get = he_phy_get,
172
.proc_read = he_proc_read,
173
.owner = THIS_MODULE
174
};
175
176
#define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177
#define he_readl(dev, reg) readl((dev)->membase + (reg))
178
179
/* section 2.12 connection memory access */
180
181
static __inline__ void
182
he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183
unsigned flags)
184
{
185
he_writel(he_dev, val, CON_DAT);
186
(void) he_readl(he_dev, CON_DAT); /* flush posted writes */
187
he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188
while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189
}
190
191
#define he_writel_rcm(dev, val, reg) \
192
he_writel_internal(dev, val, reg, CON_CTL_RCM)
193
194
#define he_writel_tcm(dev, val, reg) \
195
he_writel_internal(dev, val, reg, CON_CTL_TCM)
196
197
#define he_writel_mbox(dev, val, reg) \
198
he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199
200
static unsigned
201
he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202
{
203
he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204
while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205
return he_readl(he_dev, CON_DAT);
206
}
207
208
#define he_readl_rcm(dev, reg) \
209
he_readl_internal(dev, reg, CON_CTL_RCM)
210
211
#define he_readl_tcm(dev, reg) \
212
he_readl_internal(dev, reg, CON_CTL_TCM)
213
214
#define he_readl_mbox(dev, reg) \
215
he_readl_internal(dev, reg, CON_CTL_MBOX)
216
217
218
/* figure 2.2 connection id */
219
220
#define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
221
222
/* 2.5.1 per connection transmit state registers */
223
224
#define he_writel_tsr0(dev, val, cid) \
225
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226
#define he_readl_tsr0(dev, cid) \
227
he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228
229
#define he_writel_tsr1(dev, val, cid) \
230
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231
232
#define he_writel_tsr2(dev, val, cid) \
233
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234
235
#define he_writel_tsr3(dev, val, cid) \
236
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237
238
#define he_writel_tsr4(dev, val, cid) \
239
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240
241
/* from page 2-20
242
*
243
* NOTE While the transmit connection is active, bits 23 through 0
244
* of this register must not be written by the host. Byte
245
* enables should be used during normal operation when writing
246
* the most significant byte.
247
*/
248
249
#define he_writel_tsr4_upper(dev, val, cid) \
250
he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251
CON_CTL_TCM \
252
| CON_BYTE_DISABLE_2 \
253
| CON_BYTE_DISABLE_1 \
254
| CON_BYTE_DISABLE_0)
255
256
#define he_readl_tsr4(dev, cid) \
257
he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258
259
#define he_writel_tsr5(dev, val, cid) \
260
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261
262
#define he_writel_tsr6(dev, val, cid) \
263
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264
265
#define he_writel_tsr7(dev, val, cid) \
266
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267
268
269
#define he_writel_tsr8(dev, val, cid) \
270
he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271
272
#define he_writel_tsr9(dev, val, cid) \
273
he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274
275
#define he_writel_tsr10(dev, val, cid) \
276
he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277
278
#define he_writel_tsr11(dev, val, cid) \
279
he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280
281
282
#define he_writel_tsr12(dev, val, cid) \
283
he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284
285
#define he_writel_tsr13(dev, val, cid) \
286
he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287
288
289
#define he_writel_tsr14(dev, val, cid) \
290
he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291
292
#define he_writel_tsr14_upper(dev, val, cid) \
293
he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294
CON_CTL_TCM \
295
| CON_BYTE_DISABLE_2 \
296
| CON_BYTE_DISABLE_1 \
297
| CON_BYTE_DISABLE_0)
298
299
/* 2.7.1 per connection receive state registers */
300
301
#define he_writel_rsr0(dev, val, cid) \
302
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303
#define he_readl_rsr0(dev, cid) \
304
he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305
306
#define he_writel_rsr1(dev, val, cid) \
307
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308
309
#define he_writel_rsr2(dev, val, cid) \
310
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311
312
#define he_writel_rsr3(dev, val, cid) \
313
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314
315
#define he_writel_rsr4(dev, val, cid) \
316
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317
318
#define he_writel_rsr5(dev, val, cid) \
319
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320
321
#define he_writel_rsr6(dev, val, cid) \
322
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323
324
#define he_writel_rsr7(dev, val, cid) \
325
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326
327
static __inline__ struct atm_vcc*
328
__find_vcc(struct he_dev *he_dev, unsigned cid)
329
{
330
struct hlist_head *head;
331
struct atm_vcc *vcc;
332
struct sock *s;
333
short vpi;
334
int vci;
335
336
vpi = cid >> he_dev->vcibits;
337
vci = cid & ((1 << he_dev->vcibits) - 1);
338
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339
340
sk_for_each(s, head) {
341
vcc = atm_sk(s);
342
if (vcc->dev == he_dev->atm_dev &&
343
vcc->vci == vci && vcc->vpi == vpi &&
344
vcc->qos.rxtp.traffic_class != ATM_NONE) {
345
return vcc;
346
}
347
}
348
return NULL;
349
}
350
351
static int he_init_one(struct pci_dev *pci_dev,
352
const struct pci_device_id *pci_ent)
353
{
354
struct atm_dev *atm_dev = NULL;
355
struct he_dev *he_dev = NULL;
356
int err = 0;
357
358
printk(KERN_INFO "ATM he driver\n");
359
360
if (pci_enable_device(pci_dev))
361
return -EIO;
362
if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
363
printk(KERN_WARNING "he: no suitable dma available\n");
364
err = -EIO;
365
goto init_one_failure;
366
}
367
368
atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
369
if (!atm_dev) {
370
err = -ENODEV;
371
goto init_one_failure;
372
}
373
pci_set_drvdata(pci_dev, atm_dev);
374
375
he_dev = kzalloc(sizeof(struct he_dev),
376
GFP_KERNEL);
377
if (!he_dev) {
378
err = -ENOMEM;
379
goto init_one_failure;
380
}
381
he_dev->pci_dev = pci_dev;
382
he_dev->atm_dev = atm_dev;
383
he_dev->atm_dev->dev_data = he_dev;
384
atm_dev->dev_data = he_dev;
385
he_dev->number = atm_dev->number;
386
tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387
spin_lock_init(&he_dev->global_lock);
388
389
if (he_start(atm_dev)) {
390
he_stop(he_dev);
391
err = -ENODEV;
392
goto init_one_failure;
393
}
394
he_dev->next = NULL;
395
if (he_devs)
396
he_dev->next = he_devs;
397
he_devs = he_dev;
398
return 0;
399
400
init_one_failure:
401
if (atm_dev)
402
atm_dev_deregister(atm_dev);
403
kfree(he_dev);
404
pci_disable_device(pci_dev);
405
return err;
406
}
407
408
static void he_remove_one(struct pci_dev *pci_dev)
409
{
410
struct atm_dev *atm_dev;
411
struct he_dev *he_dev;
412
413
atm_dev = pci_get_drvdata(pci_dev);
414
he_dev = HE_DEV(atm_dev);
415
416
/* need to remove from he_devs */
417
418
he_stop(he_dev);
419
atm_dev_deregister(atm_dev);
420
kfree(he_dev);
421
422
pci_disable_device(pci_dev);
423
}
424
425
426
static unsigned
427
rate_to_atmf(unsigned rate) /* cps to atm forum format */
428
{
429
#define NONZERO (1 << 14)
430
431
unsigned exp = 0;
432
433
if (rate == 0)
434
return 0;
435
436
rate <<= 9;
437
while (rate > 0x3ff) {
438
++exp;
439
rate >>= 1;
440
}
441
442
return (NONZERO | (exp << 9) | (rate & 0x1ff));
443
}
444
445
static void he_init_rx_lbfp0(struct he_dev *he_dev)
446
{
447
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
448
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
449
unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
450
unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
451
452
lbufd_index = 0;
453
lbm_offset = he_readl(he_dev, RCMLBM_BA);
454
455
he_writel(he_dev, lbufd_index, RLBF0_H);
456
457
for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
458
lbufd_index += 2;
459
lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
460
461
he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
462
he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
463
464
if (++lbuf_count == lbufs_per_row) {
465
lbuf_count = 0;
466
row_offset += he_dev->bytes_per_row;
467
}
468
lbm_offset += 4;
469
}
470
471
he_writel(he_dev, lbufd_index - 2, RLBF0_T);
472
he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
473
}
474
475
static void he_init_rx_lbfp1(struct he_dev *he_dev)
476
{
477
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
478
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
479
unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
480
unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
481
482
lbufd_index = 1;
483
lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
484
485
he_writel(he_dev, lbufd_index, RLBF1_H);
486
487
for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
488
lbufd_index += 2;
489
lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
490
491
he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
492
he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
493
494
if (++lbuf_count == lbufs_per_row) {
495
lbuf_count = 0;
496
row_offset += he_dev->bytes_per_row;
497
}
498
lbm_offset += 4;
499
}
500
501
he_writel(he_dev, lbufd_index - 2, RLBF1_T);
502
he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
503
}
504
505
static void he_init_tx_lbfp(struct he_dev *he_dev)
506
{
507
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
508
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
509
unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
510
unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
511
512
lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
513
lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
514
515
he_writel(he_dev, lbufd_index, TLBF_H);
516
517
for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
518
lbufd_index += 1;
519
lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
520
521
he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
522
he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
523
524
if (++lbuf_count == lbufs_per_row) {
525
lbuf_count = 0;
526
row_offset += he_dev->bytes_per_row;
527
}
528
lbm_offset += 2;
529
}
530
531
he_writel(he_dev, lbufd_index - 1, TLBF_T);
532
}
533
534
static int he_init_tpdrq(struct he_dev *he_dev)
535
{
536
he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
537
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
538
&he_dev->tpdrq_phys,
539
GFP_KERNEL);
540
if (he_dev->tpdrq_base == NULL) {
541
hprintk("failed to alloc tpdrq\n");
542
return -ENOMEM;
543
}
544
545
he_dev->tpdrq_tail = he_dev->tpdrq_base;
546
he_dev->tpdrq_head = he_dev->tpdrq_base;
547
548
he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
549
he_writel(he_dev, 0, TPDRQ_T);
550
he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
551
552
return 0;
553
}
554
555
static void he_init_cs_block(struct he_dev *he_dev)
556
{
557
unsigned clock, rate, delta;
558
int reg;
559
560
/* 5.1.7 cs block initialization */
561
562
for (reg = 0; reg < 0x20; ++reg)
563
he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
564
565
/* rate grid timer reload values */
566
567
clock = he_is622(he_dev) ? 66667000 : 50000000;
568
rate = he_dev->atm_dev->link_rate;
569
delta = rate / 16 / 2;
570
571
for (reg = 0; reg < 0x10; ++reg) {
572
/* 2.4 internal transmit function
573
*
574
* we initialize the first row in the rate grid.
575
* values are period (in clock cycles) of timer
576
*/
577
unsigned period = clock / rate;
578
579
he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
580
rate -= delta;
581
}
582
583
if (he_is622(he_dev)) {
584
/* table 5.2 (4 cells per lbuf) */
585
he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
586
he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
587
he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
588
he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
589
he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
590
591
/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
592
he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
593
he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
594
he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
595
he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
596
he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
597
he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
598
599
he_writel_mbox(he_dev, 0x4680, CS_RTATR);
600
601
/* table 5.8 */
602
he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
603
he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
604
he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
605
he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
606
he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
607
he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
608
609
/* table 5.9 */
610
he_writel_mbox(he_dev, 0x5, CS_OTPPER);
611
he_writel_mbox(he_dev, 0x14, CS_OTWPER);
612
} else {
613
/* table 5.1 (4 cells per lbuf) */
614
he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
615
he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
616
he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
617
he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
618
he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
619
620
/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
621
he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
622
he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
623
he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
624
he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
625
he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
626
he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
627
628
he_writel_mbox(he_dev, 0x4680, CS_RTATR);
629
630
/* table 5.8 */
631
he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
632
he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
633
he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
634
he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
635
he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
636
he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
637
638
/* table 5.9 */
639
he_writel_mbox(he_dev, 0x6, CS_OTPPER);
640
he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
641
}
642
643
he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
644
645
for (reg = 0; reg < 0x8; ++reg)
646
he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
647
648
}
649
650
static int he_init_cs_block_rcm(struct he_dev *he_dev)
651
{
652
unsigned (*rategrid)[16][16];
653
unsigned rate, delta;
654
int i, j, reg;
655
656
unsigned rate_atmf, exp, man;
657
unsigned long long rate_cps;
658
int mult, buf, buf_limit = 4;
659
660
rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
661
if (!rategrid)
662
return -ENOMEM;
663
664
/* initialize rate grid group table */
665
666
for (reg = 0x0; reg < 0xff; ++reg)
667
he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
668
669
/* initialize rate controller groups */
670
671
for (reg = 0x100; reg < 0x1ff; ++reg)
672
he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
673
674
/* initialize tNrm lookup table */
675
676
/* the manual makes reference to a routine in a sample driver
677
for proper configuration; fortunately, we only need this
678
in order to support abr connection */
679
680
/* initialize rate to group table */
681
682
rate = he_dev->atm_dev->link_rate;
683
delta = rate / 32;
684
685
/*
686
* 2.4 transmit internal functions
687
*
688
* we construct a copy of the rate grid used by the scheduler
689
* in order to construct the rate to group table below
690
*/
691
692
for (j = 0; j < 16; j++) {
693
(*rategrid)[0][j] = rate;
694
rate -= delta;
695
}
696
697
for (i = 1; i < 16; i++)
698
for (j = 0; j < 16; j++)
699
if (i > 14)
700
(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
701
else
702
(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
703
704
/*
705
* 2.4 transmit internal function
706
*
707
* this table maps the upper 5 bits of exponent and mantissa
708
* of the atm forum representation of the rate into an index
709
* on rate grid
710
*/
711
712
rate_atmf = 0;
713
while (rate_atmf < 0x400) {
714
man = (rate_atmf & 0x1f) << 4;
715
exp = rate_atmf >> 5;
716
717
/*
718
instead of '/ 512', use '>> 9' to prevent a call
719
to divdu3 on x86 platforms
720
*/
721
rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
722
723
if (rate_cps < 10)
724
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
725
726
for (i = 255; i > 0; i--)
727
if ((*rategrid)[i/16][i%16] >= rate_cps)
728
break; /* pick nearest rate instead? */
729
730
/*
731
* each table entry is 16 bits: (rate grid index (8 bits)
732
* and a buffer limit (8 bits)
733
* there are two table entries in each 32-bit register
734
*/
735
736
#ifdef notdef
737
buf = rate_cps * he_dev->tx_numbuffs /
738
(he_dev->atm_dev->link_rate * 2);
739
#else
740
/* this is pretty, but avoids _divdu3 and is mostly correct */
741
mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
742
if (rate_cps > (272ULL * mult))
743
buf = 4;
744
else if (rate_cps > (204ULL * mult))
745
buf = 3;
746
else if (rate_cps > (136ULL * mult))
747
buf = 2;
748
else if (rate_cps > (68ULL * mult))
749
buf = 1;
750
else
751
buf = 0;
752
#endif
753
if (buf > buf_limit)
754
buf = buf_limit;
755
reg = (reg << 16) | ((i << 8) | buf);
756
757
#define RTGTBL_OFFSET 0x400
758
759
if (rate_atmf & 0x1)
760
he_writel_rcm(he_dev, reg,
761
CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
762
763
++rate_atmf;
764
}
765
766
kfree(rategrid);
767
return 0;
768
}
769
770
static int he_init_group(struct he_dev *he_dev, int group)
771
{
772
struct he_buff *heb, *next;
773
dma_addr_t mapping;
774
int i;
775
776
he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
777
he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
778
he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
779
he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
780
G0_RBPS_BS + (group * 32));
781
782
/* bitmap table */
783
he_dev->rbpl_table = bitmap_zalloc(RBPL_TABLE_SIZE, GFP_KERNEL);
784
if (!he_dev->rbpl_table) {
785
hprintk("unable to allocate rbpl bitmap table\n");
786
return -ENOMEM;
787
}
788
789
/* rbpl_virt 64-bit pointers */
790
he_dev->rbpl_virt = kmalloc_array(RBPL_TABLE_SIZE,
791
sizeof(*he_dev->rbpl_virt),
792
GFP_KERNEL);
793
if (!he_dev->rbpl_virt) {
794
hprintk("unable to allocate rbpl virt table\n");
795
goto out_free_rbpl_table;
796
}
797
798
/* large buffer pool */
799
he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
800
CONFIG_RBPL_BUFSIZE, 64, 0);
801
if (he_dev->rbpl_pool == NULL) {
802
hprintk("unable to create rbpl pool\n");
803
goto out_free_rbpl_virt;
804
}
805
806
he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
807
CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
808
&he_dev->rbpl_phys, GFP_KERNEL);
809
if (he_dev->rbpl_base == NULL) {
810
hprintk("failed to alloc rbpl_base\n");
811
goto out_destroy_rbpl_pool;
812
}
813
814
INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
815
816
for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
817
818
heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
819
if (!heb)
820
goto out_free_rbpl;
821
heb->mapping = mapping;
822
list_add(&heb->entry, &he_dev->rbpl_outstanding);
823
824
set_bit(i, he_dev->rbpl_table);
825
he_dev->rbpl_virt[i] = heb;
826
he_dev->rbpl_hint = i + 1;
827
he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
828
he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
829
}
830
he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
831
832
he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
833
he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
834
G0_RBPL_T + (group * 32));
835
he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
836
G0_RBPL_BS + (group * 32));
837
he_writel(he_dev,
838
RBP_THRESH(CONFIG_RBPL_THRESH) |
839
RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
840
RBP_INT_ENB,
841
G0_RBPL_QI + (group * 32));
842
843
/* rx buffer ready queue */
844
845
he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
846
CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
847
&he_dev->rbrq_phys, GFP_KERNEL);
848
if (he_dev->rbrq_base == NULL) {
849
hprintk("failed to allocate rbrq\n");
850
goto out_free_rbpl;
851
}
852
853
he_dev->rbrq_head = he_dev->rbrq_base;
854
he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
855
he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
856
he_writel(he_dev,
857
RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
858
G0_RBRQ_Q + (group * 16));
859
if (irq_coalesce) {
860
hprintk("coalescing interrupts\n");
861
he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
862
G0_RBRQ_I + (group * 16));
863
} else
864
he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
865
G0_RBRQ_I + (group * 16));
866
867
/* tx buffer ready queue */
868
869
he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
870
CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
871
&he_dev->tbrq_phys, GFP_KERNEL);
872
if (he_dev->tbrq_base == NULL) {
873
hprintk("failed to allocate tbrq\n");
874
goto out_free_rbpq_base;
875
}
876
877
he_dev->tbrq_head = he_dev->tbrq_base;
878
879
he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
880
he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
881
he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
882
he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
883
884
return 0;
885
886
out_free_rbpq_base:
887
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
888
sizeof(struct he_rbrq), he_dev->rbrq_base,
889
he_dev->rbrq_phys);
890
out_free_rbpl:
891
list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
892
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
893
894
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
895
sizeof(struct he_rbp), he_dev->rbpl_base,
896
he_dev->rbpl_phys);
897
out_destroy_rbpl_pool:
898
dma_pool_destroy(he_dev->rbpl_pool);
899
out_free_rbpl_virt:
900
kfree(he_dev->rbpl_virt);
901
out_free_rbpl_table:
902
bitmap_free(he_dev->rbpl_table);
903
904
return -ENOMEM;
905
}
906
907
static int he_init_irq(struct he_dev *he_dev)
908
{
909
int i;
910
911
/* 2.9.3.5 tail offset for each interrupt queue is located after the
912
end of the interrupt queue */
913
914
he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
915
(CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
916
&he_dev->irq_phys, GFP_KERNEL);
917
if (he_dev->irq_base == NULL) {
918
hprintk("failed to allocate irq\n");
919
return -ENOMEM;
920
}
921
he_dev->irq_tailoffset = (unsigned *)
922
&he_dev->irq_base[CONFIG_IRQ_SIZE];
923
*he_dev->irq_tailoffset = 0;
924
he_dev->irq_head = he_dev->irq_base;
925
he_dev->irq_tail = he_dev->irq_base;
926
927
for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
928
he_dev->irq_base[i].isw = ITYPE_INVALID;
929
930
he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
931
he_writel(he_dev,
932
IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
933
IRQ0_HEAD);
934
he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
935
he_writel(he_dev, 0x0, IRQ0_DATA);
936
937
he_writel(he_dev, 0x0, IRQ1_BASE);
938
he_writel(he_dev, 0x0, IRQ1_HEAD);
939
he_writel(he_dev, 0x0, IRQ1_CNTL);
940
he_writel(he_dev, 0x0, IRQ1_DATA);
941
942
he_writel(he_dev, 0x0, IRQ2_BASE);
943
he_writel(he_dev, 0x0, IRQ2_HEAD);
944
he_writel(he_dev, 0x0, IRQ2_CNTL);
945
he_writel(he_dev, 0x0, IRQ2_DATA);
946
947
he_writel(he_dev, 0x0, IRQ3_BASE);
948
he_writel(he_dev, 0x0, IRQ3_HEAD);
949
he_writel(he_dev, 0x0, IRQ3_CNTL);
950
he_writel(he_dev, 0x0, IRQ3_DATA);
951
952
/* 2.9.3.2 interrupt queue mapping registers */
953
954
he_writel(he_dev, 0x0, GRP_10_MAP);
955
he_writel(he_dev, 0x0, GRP_32_MAP);
956
he_writel(he_dev, 0x0, GRP_54_MAP);
957
he_writel(he_dev, 0x0, GRP_76_MAP);
958
959
if (request_irq(he_dev->pci_dev->irq,
960
he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
961
hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
962
return -EINVAL;
963
}
964
965
he_dev->irq = he_dev->pci_dev->irq;
966
967
return 0;
968
}
969
970
static int he_start(struct atm_dev *dev)
971
{
972
struct he_dev *he_dev;
973
struct pci_dev *pci_dev;
974
unsigned long membase;
975
976
u16 command;
977
u32 gen_cntl_0, host_cntl, lb_swap;
978
u8 cache_size, timer;
979
980
unsigned err;
981
unsigned int status, reg;
982
int i, group;
983
984
he_dev = HE_DEV(dev);
985
pci_dev = he_dev->pci_dev;
986
987
membase = pci_resource_start(pci_dev, 0);
988
HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
989
990
/*
991
* pci bus controller initialization
992
*/
993
994
/* 4.3 pci bus controller-specific initialization */
995
if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
996
hprintk("can't read GEN_CNTL_0\n");
997
return -EINVAL;
998
}
999
gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1000
if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1001
hprintk("can't write GEN_CNTL_0.\n");
1002
return -EINVAL;
1003
}
1004
1005
if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1006
hprintk("can't read PCI_COMMAND.\n");
1007
return -EINVAL;
1008
}
1009
1010
command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1011
if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1012
hprintk("can't enable memory.\n");
1013
return -EINVAL;
1014
}
1015
1016
if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1017
hprintk("can't read cache line size?\n");
1018
return -EINVAL;
1019
}
1020
1021
if (cache_size < 16) {
1022
cache_size = 16;
1023
if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1024
hprintk("can't set cache line size to %d\n", cache_size);
1025
}
1026
1027
if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1028
hprintk("can't read latency timer?\n");
1029
return -EINVAL;
1030
}
1031
1032
/* from table 3.9
1033
*
1034
* LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1035
*
1036
* AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1037
* BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1038
*
1039
*/
1040
#define LAT_TIMER 209
1041
if (timer < LAT_TIMER) {
1042
HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1043
timer = LAT_TIMER;
1044
if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1045
hprintk("can't set latency timer to %d\n", timer);
1046
}
1047
1048
if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1049
hprintk("can't set up page mapping\n");
1050
return -EINVAL;
1051
}
1052
1053
/* 4.4 card reset */
1054
he_writel(he_dev, 0x0, RESET_CNTL);
1055
he_writel(he_dev, 0xff, RESET_CNTL);
1056
1057
msleep(16); /* 16 ms */
1058
status = he_readl(he_dev, RESET_CNTL);
1059
if ((status & BOARD_RST_STATUS) == 0) {
1060
hprintk("reset failed\n");
1061
return -EINVAL;
1062
}
1063
1064
/* 4.5 set bus width */
1065
host_cntl = he_readl(he_dev, HOST_CNTL);
1066
if (host_cntl & PCI_BUS_SIZE64)
1067
gen_cntl_0 |= ENBL_64;
1068
else
1069
gen_cntl_0 &= ~ENBL_64;
1070
1071
if (disable64 == 1) {
1072
hprintk("disabling 64-bit pci bus transfers\n");
1073
gen_cntl_0 &= ~ENBL_64;
1074
}
1075
1076
if (gen_cntl_0 & ENBL_64)
1077
hprintk("64-bit transfers enabled\n");
1078
1079
pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1080
1081
/* 4.7 read prom contents */
1082
for (i = 0; i < PROD_ID_LEN; ++i)
1083
he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1084
1085
he_dev->media = read_prom_byte(he_dev, MEDIA);
1086
1087
for (i = 0; i < 6; ++i)
1088
dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1089
1090
hprintk("%s%s, %pM\n", he_dev->prod_id,
1091
he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1092
he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1093
ATM_OC12_PCR : ATM_OC3_PCR;
1094
1095
/* 4.6 set host endianess */
1096
lb_swap = he_readl(he_dev, LB_SWAP);
1097
if (he_is622(he_dev))
1098
lb_swap &= ~XFER_SIZE; /* 4 cells */
1099
else
1100
lb_swap |= XFER_SIZE; /* 8 cells */
1101
#ifdef __BIG_ENDIAN
1102
lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1103
#else
1104
lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1105
DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1106
#endif /* __BIG_ENDIAN */
1107
he_writel(he_dev, lb_swap, LB_SWAP);
1108
1109
/* 4.8 sdram controller initialization */
1110
he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1111
1112
/* 4.9 initialize rnum value */
1113
lb_swap |= SWAP_RNUM_MAX(0xf);
1114
he_writel(he_dev, lb_swap, LB_SWAP);
1115
1116
/* 4.10 initialize the interrupt queues */
1117
if ((err = he_init_irq(he_dev)) != 0)
1118
return err;
1119
1120
/* 4.11 enable pci bus controller state machines */
1121
host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1122
QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1123
he_writel(he_dev, host_cntl, HOST_CNTL);
1124
1125
gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1126
pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1127
1128
/*
1129
* atm network controller initialization
1130
*/
1131
1132
/* 5.1.1 generic configuration state */
1133
1134
/*
1135
* local (cell) buffer memory map
1136
*
1137
* HE155 HE622
1138
*
1139
* 0 ____________1023 bytes 0 _______________________2047 bytes
1140
* | | | | |
1141
* | utility | | rx0 | |
1142
* 5|____________| 255|___________________| u |
1143
* 6| | 256| | t |
1144
* | | | | i |
1145
* | rx0 | row | tx | l |
1146
* | | | | i |
1147
* | | 767|___________________| t |
1148
* 517|____________| 768| | y |
1149
* row 518| | | rx1 | |
1150
* | | 1023|___________________|___|
1151
* | |
1152
* | tx |
1153
* | |
1154
* | |
1155
* 1535|____________|
1156
* 1536| |
1157
* | rx1 |
1158
* 2047|____________|
1159
*
1160
*/
1161
1162
/* total 4096 connections */
1163
he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1164
he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1165
1166
if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1167
hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1168
return -ENODEV;
1169
}
1170
1171
if (nvpibits != -1) {
1172
he_dev->vpibits = nvpibits;
1173
he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1174
}
1175
1176
if (nvcibits != -1) {
1177
he_dev->vcibits = nvcibits;
1178
he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1179
}
1180
1181
1182
if (he_is622(he_dev)) {
1183
he_dev->cells_per_row = 40;
1184
he_dev->bytes_per_row = 2048;
1185
he_dev->r0_numrows = 256;
1186
he_dev->tx_numrows = 512;
1187
he_dev->r1_numrows = 256;
1188
he_dev->r0_startrow = 0;
1189
he_dev->tx_startrow = 256;
1190
he_dev->r1_startrow = 768;
1191
} else {
1192
he_dev->cells_per_row = 20;
1193
he_dev->bytes_per_row = 1024;
1194
he_dev->r0_numrows = 512;
1195
he_dev->tx_numrows = 1018;
1196
he_dev->r1_numrows = 512;
1197
he_dev->r0_startrow = 6;
1198
he_dev->tx_startrow = 518;
1199
he_dev->r1_startrow = 1536;
1200
}
1201
1202
he_dev->cells_per_lbuf = 4;
1203
he_dev->buffer_limit = 4;
1204
he_dev->r0_numbuffs = he_dev->r0_numrows *
1205
he_dev->cells_per_row / he_dev->cells_per_lbuf;
1206
if (he_dev->r0_numbuffs > 2560)
1207
he_dev->r0_numbuffs = 2560;
1208
1209
he_dev->r1_numbuffs = he_dev->r1_numrows *
1210
he_dev->cells_per_row / he_dev->cells_per_lbuf;
1211
if (he_dev->r1_numbuffs > 2560)
1212
he_dev->r1_numbuffs = 2560;
1213
1214
he_dev->tx_numbuffs = he_dev->tx_numrows *
1215
he_dev->cells_per_row / he_dev->cells_per_lbuf;
1216
if (he_dev->tx_numbuffs > 5120)
1217
he_dev->tx_numbuffs = 5120;
1218
1219
/* 5.1.2 configure hardware dependent registers */
1220
1221
he_writel(he_dev,
1222
SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1223
RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1224
(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1225
(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1226
LBARB);
1227
1228
he_writel(he_dev, BANK_ON |
1229
(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1230
SDRAMCON);
1231
1232
he_writel(he_dev,
1233
(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1234
RM_RW_WAIT(1), RCMCONFIG);
1235
he_writel(he_dev,
1236
(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1237
TM_RW_WAIT(1), TCMCONFIG);
1238
1239
he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1240
1241
he_writel(he_dev,
1242
(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1243
(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1244
RX_VALVP(he_dev->vpibits) |
1245
RX_VALVC(he_dev->vcibits), RC_CONFIG);
1246
1247
he_writel(he_dev, DRF_THRESH(0x20) |
1248
(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1249
TX_VCI_MASK(he_dev->vcibits) |
1250
LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1251
1252
he_writel(he_dev, 0x0, TXAAL5_PROTO);
1253
1254
he_writel(he_dev, PHY_INT_ENB |
1255
(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1256
RH_CONFIG);
1257
1258
/* 5.1.3 initialize connection memory */
1259
1260
for (i = 0; i < TCM_MEM_SIZE; ++i)
1261
he_writel_tcm(he_dev, 0, i);
1262
1263
for (i = 0; i < RCM_MEM_SIZE; ++i)
1264
he_writel_rcm(he_dev, 0, i);
1265
1266
/*
1267
* transmit connection memory map
1268
*
1269
* tx memory
1270
* 0x0 ___________________
1271
* | |
1272
* | |
1273
* | TSRa |
1274
* | |
1275
* | |
1276
* 0x8000|___________________|
1277
* | |
1278
* | TSRb |
1279
* 0xc000|___________________|
1280
* | |
1281
* | TSRc |
1282
* 0xe000|___________________|
1283
* | TSRd |
1284
* 0xf000|___________________|
1285
* | tmABR |
1286
* 0x10000|___________________|
1287
* | |
1288
* | tmTPD |
1289
* |___________________|
1290
* | |
1291
* ....
1292
* 0x1ffff|___________________|
1293
*
1294
*
1295
*/
1296
1297
he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1298
he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1299
he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1300
he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1301
he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1302
1303
1304
/*
1305
* receive connection memory map
1306
*
1307
* 0x0 ___________________
1308
* | |
1309
* | |
1310
* | RSRa |
1311
* | |
1312
* | |
1313
* 0x8000|___________________|
1314
* | |
1315
* | rx0/1 |
1316
* | LBM | link lists of local
1317
* | tx | buffer memory
1318
* | |
1319
* 0xd000|___________________|
1320
* | |
1321
* | rmABR |
1322
* 0xe000|___________________|
1323
* | |
1324
* | RSRb |
1325
* |___________________|
1326
* | |
1327
* ....
1328
* 0xffff|___________________|
1329
*/
1330
1331
he_writel(he_dev, 0x08000, RCMLBM_BA);
1332
he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1333
he_writel(he_dev, 0x0d800, RCMABR_BA);
1334
1335
/* 5.1.4 initialize local buffer free pools linked lists */
1336
1337
he_init_rx_lbfp0(he_dev);
1338
he_init_rx_lbfp1(he_dev);
1339
1340
he_writel(he_dev, 0x0, RLBC_H);
1341
he_writel(he_dev, 0x0, RLBC_T);
1342
he_writel(he_dev, 0x0, RLBC_H2);
1343
1344
he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1345
he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1346
1347
he_init_tx_lbfp(he_dev);
1348
1349
he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1350
1351
/* 5.1.5 initialize intermediate receive queues */
1352
1353
if (he_is622(he_dev)) {
1354
he_writel(he_dev, 0x000f, G0_INMQ_S);
1355
he_writel(he_dev, 0x200f, G0_INMQ_L);
1356
1357
he_writel(he_dev, 0x001f, G1_INMQ_S);
1358
he_writel(he_dev, 0x201f, G1_INMQ_L);
1359
1360
he_writel(he_dev, 0x002f, G2_INMQ_S);
1361
he_writel(he_dev, 0x202f, G2_INMQ_L);
1362
1363
he_writel(he_dev, 0x003f, G3_INMQ_S);
1364
he_writel(he_dev, 0x203f, G3_INMQ_L);
1365
1366
he_writel(he_dev, 0x004f, G4_INMQ_S);
1367
he_writel(he_dev, 0x204f, G4_INMQ_L);
1368
1369
he_writel(he_dev, 0x005f, G5_INMQ_S);
1370
he_writel(he_dev, 0x205f, G5_INMQ_L);
1371
1372
he_writel(he_dev, 0x006f, G6_INMQ_S);
1373
he_writel(he_dev, 0x206f, G6_INMQ_L);
1374
1375
he_writel(he_dev, 0x007f, G7_INMQ_S);
1376
he_writel(he_dev, 0x207f, G7_INMQ_L);
1377
} else {
1378
he_writel(he_dev, 0x0000, G0_INMQ_S);
1379
he_writel(he_dev, 0x0008, G0_INMQ_L);
1380
1381
he_writel(he_dev, 0x0001, G1_INMQ_S);
1382
he_writel(he_dev, 0x0009, G1_INMQ_L);
1383
1384
he_writel(he_dev, 0x0002, G2_INMQ_S);
1385
he_writel(he_dev, 0x000a, G2_INMQ_L);
1386
1387
he_writel(he_dev, 0x0003, G3_INMQ_S);
1388
he_writel(he_dev, 0x000b, G3_INMQ_L);
1389
1390
he_writel(he_dev, 0x0004, G4_INMQ_S);
1391
he_writel(he_dev, 0x000c, G4_INMQ_L);
1392
1393
he_writel(he_dev, 0x0005, G5_INMQ_S);
1394
he_writel(he_dev, 0x000d, G5_INMQ_L);
1395
1396
he_writel(he_dev, 0x0006, G6_INMQ_S);
1397
he_writel(he_dev, 0x000e, G6_INMQ_L);
1398
1399
he_writel(he_dev, 0x0007, G7_INMQ_S);
1400
he_writel(he_dev, 0x000f, G7_INMQ_L);
1401
}
1402
1403
/* 5.1.6 application tunable parameters */
1404
1405
he_writel(he_dev, 0x0, MCC);
1406
he_writel(he_dev, 0x0, OEC);
1407
he_writel(he_dev, 0x0, DCC);
1408
he_writel(he_dev, 0x0, CEC);
1409
1410
/* 5.1.7 cs block initialization */
1411
1412
he_init_cs_block(he_dev);
1413
1414
/* 5.1.8 cs block connection memory initialization */
1415
1416
if (he_init_cs_block_rcm(he_dev) < 0)
1417
return -ENOMEM;
1418
1419
/* 5.1.10 initialize host structures */
1420
1421
he_init_tpdrq(he_dev);
1422
1423
he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1424
sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1425
if (he_dev->tpd_pool == NULL) {
1426
hprintk("unable to create tpd dma_pool\n");
1427
return -ENOMEM;
1428
}
1429
1430
INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1431
1432
if (he_init_group(he_dev, 0) != 0)
1433
return -ENOMEM;
1434
1435
for (group = 1; group < HE_NUM_GROUPS; ++group) {
1436
he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1437
he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1438
he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1439
he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1440
G0_RBPS_BS + (group * 32));
1441
1442
he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1443
he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1444
he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1445
G0_RBPL_QI + (group * 32));
1446
he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1447
1448
he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1449
he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1450
he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1451
G0_RBRQ_Q + (group * 16));
1452
he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1453
1454
he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1455
he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1456
he_writel(he_dev, TBRQ_THRESH(0x1),
1457
G0_TBRQ_THRESH + (group * 16));
1458
he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1459
}
1460
1461
/* host status page */
1462
1463
he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
1464
sizeof(struct he_hsp),
1465
&he_dev->hsp_phys, GFP_KERNEL);
1466
if (he_dev->hsp == NULL) {
1467
hprintk("failed to allocate host status page\n");
1468
return -ENOMEM;
1469
}
1470
he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1471
1472
/* initialize framer */
1473
1474
#ifdef CONFIG_ATM_HE_USE_SUNI
1475
if (he_isMM(he_dev))
1476
suni_init(he_dev->atm_dev);
1477
if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1478
he_dev->atm_dev->phy->start(he_dev->atm_dev);
1479
#endif /* CONFIG_ATM_HE_USE_SUNI */
1480
1481
if (sdh) {
1482
/* this really should be in suni.c but for now... */
1483
int val;
1484
1485
val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1486
val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1487
he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1488
he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1489
}
1490
1491
/* 5.1.12 enable transmit and receive */
1492
1493
reg = he_readl_mbox(he_dev, CS_ERCTL0);
1494
reg |= TX_ENABLE|ER_ENABLE;
1495
he_writel_mbox(he_dev, reg, CS_ERCTL0);
1496
1497
reg = he_readl(he_dev, RC_CONFIG);
1498
reg |= RX_ENABLE;
1499
he_writel(he_dev, reg, RC_CONFIG);
1500
1501
for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1502
he_dev->cs_stper[i].inuse = 0;
1503
he_dev->cs_stper[i].pcr = -1;
1504
}
1505
he_dev->total_bw = 0;
1506
1507
1508
/* atm linux initialization */
1509
1510
he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1511
he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1512
1513
he_dev->irq_peak = 0;
1514
he_dev->rbrq_peak = 0;
1515
he_dev->rbpl_peak = 0;
1516
he_dev->tbrq_peak = 0;
1517
1518
HPRINTK("hell bent for leather!\n");
1519
1520
return 0;
1521
}
1522
1523
static void
1524
he_stop(struct he_dev *he_dev)
1525
{
1526
struct he_buff *heb, *next;
1527
struct pci_dev *pci_dev;
1528
u32 gen_cntl_0, reg;
1529
u16 command;
1530
1531
pci_dev = he_dev->pci_dev;
1532
1533
/* disable interrupts */
1534
1535
if (he_dev->membase) {
1536
pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1537
gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1538
pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1539
1540
tasklet_disable(&he_dev->tasklet);
1541
1542
/* disable recv and transmit */
1543
1544
reg = he_readl_mbox(he_dev, CS_ERCTL0);
1545
reg &= ~(TX_ENABLE|ER_ENABLE);
1546
he_writel_mbox(he_dev, reg, CS_ERCTL0);
1547
1548
reg = he_readl(he_dev, RC_CONFIG);
1549
reg &= ~(RX_ENABLE);
1550
he_writel(he_dev, reg, RC_CONFIG);
1551
}
1552
1553
#ifdef CONFIG_ATM_HE_USE_SUNI
1554
if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1555
he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1556
#endif /* CONFIG_ATM_HE_USE_SUNI */
1557
1558
if (he_dev->irq)
1559
free_irq(he_dev->irq, he_dev);
1560
1561
if (he_dev->irq_base)
1562
dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1563
* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1564
1565
if (he_dev->hsp)
1566
dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1567
he_dev->hsp, he_dev->hsp_phys);
1568
1569
if (he_dev->rbpl_base) {
1570
list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1571
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1572
1573
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1574
* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1575
}
1576
1577
kfree(he_dev->rbpl_virt);
1578
bitmap_free(he_dev->rbpl_table);
1579
dma_pool_destroy(he_dev->rbpl_pool);
1580
1581
if (he_dev->rbrq_base)
1582
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1583
he_dev->rbrq_base, he_dev->rbrq_phys);
1584
1585
if (he_dev->tbrq_base)
1586
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1587
he_dev->tbrq_base, he_dev->tbrq_phys);
1588
1589
if (he_dev->tpdrq_base)
1590
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1591
he_dev->tpdrq_base, he_dev->tpdrq_phys);
1592
1593
dma_pool_destroy(he_dev->tpd_pool);
1594
1595
if (he_dev->pci_dev) {
1596
pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1597
command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1598
pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1599
}
1600
1601
if (he_dev->membase)
1602
iounmap(he_dev->membase);
1603
}
1604
1605
static struct he_tpd *
1606
__alloc_tpd(struct he_dev *he_dev)
1607
{
1608
struct he_tpd *tpd;
1609
dma_addr_t mapping;
1610
1611
tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1612
if (tpd == NULL)
1613
return NULL;
1614
1615
tpd->status = TPD_ADDR(mapping);
1616
tpd->reserved = 0;
1617
tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1618
tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1619
tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1620
1621
return tpd;
1622
}
1623
1624
#define AAL5_LEN(buf,len) \
1625
((((unsigned char *)(buf))[(len)-6] << 8) | \
1626
(((unsigned char *)(buf))[(len)-5]))
1627
1628
/* 2.10.1.2 receive
1629
*
1630
* aal5 packets can optionally return the tcp checksum in the lower
1631
* 16 bits of the crc (RSR0_TCP_CKSUM)
1632
*/
1633
1634
#define TCP_CKSUM(buf,len) \
1635
((((unsigned char *)(buf))[(len)-2] << 8) | \
1636
(((unsigned char *)(buf))[(len-1)]))
1637
1638
static int
1639
he_service_rbrq(struct he_dev *he_dev, int group)
1640
{
1641
struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1642
((unsigned long)he_dev->rbrq_base |
1643
he_dev->hsp->group[group].rbrq_tail);
1644
unsigned cid, lastcid = -1;
1645
struct sk_buff *skb;
1646
struct atm_vcc *vcc = NULL;
1647
struct he_vcc *he_vcc;
1648
struct he_buff *heb, *next;
1649
int i;
1650
int pdus_assembled = 0;
1651
int updated = 0;
1652
1653
read_lock(&vcc_sklist_lock);
1654
while (he_dev->rbrq_head != rbrq_tail) {
1655
++updated;
1656
1657
HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1658
he_dev->rbrq_head, group,
1659
RBRQ_ADDR(he_dev->rbrq_head),
1660
RBRQ_BUFLEN(he_dev->rbrq_head),
1661
RBRQ_CID(he_dev->rbrq_head),
1662
RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1663
RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1664
RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1665
RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1666
RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1667
RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1668
1669
i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1670
heb = he_dev->rbpl_virt[i];
1671
1672
cid = RBRQ_CID(he_dev->rbrq_head);
1673
if (cid != lastcid)
1674
vcc = __find_vcc(he_dev, cid);
1675
lastcid = cid;
1676
1677
if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1678
hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
1679
if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1680
clear_bit(i, he_dev->rbpl_table);
1681
list_del(&heb->entry);
1682
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1683
}
1684
1685
goto next_rbrq_entry;
1686
}
1687
1688
if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1689
hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1690
atomic_inc(&vcc->stats->rx_drop);
1691
goto return_host_buffers;
1692
}
1693
1694
heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1695
clear_bit(i, he_dev->rbpl_table);
1696
list_move_tail(&heb->entry, &he_vcc->buffers);
1697
he_vcc->pdu_len += heb->len;
1698
1699
if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1700
lastcid = -1;
1701
HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1702
wake_up(&he_vcc->rx_waitq);
1703
goto return_host_buffers;
1704
}
1705
1706
if (!RBRQ_END_PDU(he_dev->rbrq_head))
1707
goto next_rbrq_entry;
1708
1709
if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1710
|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1711
HPRINTK("%s%s (%d.%d)\n",
1712
RBRQ_CRC_ERR(he_dev->rbrq_head)
1713
? "CRC_ERR " : "",
1714
RBRQ_LEN_ERR(he_dev->rbrq_head)
1715
? "LEN_ERR" : "",
1716
vcc->vpi, vcc->vci);
1717
atomic_inc(&vcc->stats->rx_err);
1718
goto return_host_buffers;
1719
}
1720
1721
skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1722
GFP_ATOMIC);
1723
if (!skb) {
1724
HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1725
goto return_host_buffers;
1726
}
1727
1728
if (rx_skb_reserve > 0)
1729
skb_reserve(skb, rx_skb_reserve);
1730
1731
__net_timestamp(skb);
1732
1733
list_for_each_entry(heb, &he_vcc->buffers, entry)
1734
skb_put_data(skb, &heb->data, heb->len);
1735
1736
switch (vcc->qos.aal) {
1737
case ATM_AAL0:
1738
/* 2.10.1.5 raw cell receive */
1739
skb->len = ATM_AAL0_SDU;
1740
skb_set_tail_pointer(skb, skb->len);
1741
break;
1742
case ATM_AAL5:
1743
/* 2.10.1.2 aal5 receive */
1744
1745
skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1746
skb_set_tail_pointer(skb, skb->len);
1747
#ifdef USE_CHECKSUM_HW
1748
if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1749
skb->ip_summed = CHECKSUM_COMPLETE;
1750
skb->csum = TCP_CKSUM(skb->data,
1751
he_vcc->pdu_len);
1752
}
1753
#endif
1754
break;
1755
}
1756
1757
#ifdef should_never_happen
1758
if (skb->len > vcc->qos.rxtp.max_sdu)
1759
hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1760
#endif
1761
1762
#ifdef notdef
1763
ATM_SKB(skb)->vcc = vcc;
1764
#endif
1765
spin_unlock(&he_dev->global_lock);
1766
vcc->push(vcc, skb);
1767
spin_lock(&he_dev->global_lock);
1768
1769
atomic_inc(&vcc->stats->rx);
1770
1771
return_host_buffers:
1772
++pdus_assembled;
1773
1774
list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1775
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1776
INIT_LIST_HEAD(&he_vcc->buffers);
1777
he_vcc->pdu_len = 0;
1778
1779
next_rbrq_entry:
1780
he_dev->rbrq_head = (struct he_rbrq *)
1781
((unsigned long) he_dev->rbrq_base |
1782
RBRQ_MASK(he_dev->rbrq_head + 1));
1783
1784
}
1785
read_unlock(&vcc_sklist_lock);
1786
1787
if (updated) {
1788
if (updated > he_dev->rbrq_peak)
1789
he_dev->rbrq_peak = updated;
1790
1791
he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1792
G0_RBRQ_H + (group * 16));
1793
}
1794
1795
return pdus_assembled;
1796
}
1797
1798
static void
1799
he_service_tbrq(struct he_dev *he_dev, int group)
1800
{
1801
struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1802
((unsigned long)he_dev->tbrq_base |
1803
he_dev->hsp->group[group].tbrq_tail);
1804
struct he_tpd *tpd;
1805
int slot, updated = 0;
1806
struct he_tpd *__tpd;
1807
1808
/* 2.1.6 transmit buffer return queue */
1809
1810
while (he_dev->tbrq_head != tbrq_tail) {
1811
++updated;
1812
1813
HPRINTK("tbrq%d 0x%x%s%s\n",
1814
group,
1815
TBRQ_TPD(he_dev->tbrq_head),
1816
TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1817
TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1818
tpd = NULL;
1819
list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1820
if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1821
tpd = __tpd;
1822
list_del(&__tpd->entry);
1823
break;
1824
}
1825
}
1826
1827
if (tpd == NULL) {
1828
hprintk("unable to locate tpd for dma buffer %x\n",
1829
TBRQ_TPD(he_dev->tbrq_head));
1830
goto next_tbrq_entry;
1831
}
1832
1833
if (TBRQ_EOS(he_dev->tbrq_head)) {
1834
HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1835
he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1836
if (tpd->vcc)
1837
wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1838
1839
goto next_tbrq_entry;
1840
}
1841
1842
for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1843
if (tpd->iovec[slot].addr)
1844
dma_unmap_single(&he_dev->pci_dev->dev,
1845
tpd->iovec[slot].addr,
1846
tpd->iovec[slot].len & TPD_LEN_MASK,
1847
DMA_TO_DEVICE);
1848
if (tpd->iovec[slot].len & TPD_LST)
1849
break;
1850
1851
}
1852
1853
if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1854
if (tpd->vcc && tpd->vcc->pop)
1855
tpd->vcc->pop(tpd->vcc, tpd->skb);
1856
else
1857
dev_kfree_skb_any(tpd->skb);
1858
}
1859
1860
next_tbrq_entry:
1861
if (tpd)
1862
dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1863
he_dev->tbrq_head = (struct he_tbrq *)
1864
((unsigned long) he_dev->tbrq_base |
1865
TBRQ_MASK(he_dev->tbrq_head + 1));
1866
}
1867
1868
if (updated) {
1869
if (updated > he_dev->tbrq_peak)
1870
he_dev->tbrq_peak = updated;
1871
1872
he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1873
G0_TBRQ_H + (group * 16));
1874
}
1875
}
1876
1877
static void
1878
he_service_rbpl(struct he_dev *he_dev, int group)
1879
{
1880
struct he_rbp *new_tail;
1881
struct he_rbp *rbpl_head;
1882
struct he_buff *heb;
1883
dma_addr_t mapping;
1884
int i;
1885
int moved = 0;
1886
1887
rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1888
RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1889
1890
for (;;) {
1891
new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1892
RBPL_MASK(he_dev->rbpl_tail+1));
1893
1894
/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1895
if (new_tail == rbpl_head)
1896
break;
1897
1898
i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1899
if (i > (RBPL_TABLE_SIZE - 1)) {
1900
i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1901
if (i > (RBPL_TABLE_SIZE - 1))
1902
break;
1903
}
1904
he_dev->rbpl_hint = i + 1;
1905
1906
heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1907
if (!heb)
1908
break;
1909
heb->mapping = mapping;
1910
list_add(&heb->entry, &he_dev->rbpl_outstanding);
1911
he_dev->rbpl_virt[i] = heb;
1912
set_bit(i, he_dev->rbpl_table);
1913
new_tail->idx = i << RBP_IDX_OFFSET;
1914
new_tail->phys = mapping + offsetof(struct he_buff, data);
1915
1916
he_dev->rbpl_tail = new_tail;
1917
++moved;
1918
}
1919
1920
if (moved)
1921
he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1922
}
1923
1924
static void
1925
he_tasklet(unsigned long data)
1926
{
1927
unsigned long flags;
1928
struct he_dev *he_dev = (struct he_dev *) data;
1929
int group, type;
1930
int updated = 0;
1931
1932
HPRINTK("tasklet (0x%lx)\n", data);
1933
spin_lock_irqsave(&he_dev->global_lock, flags);
1934
1935
while (he_dev->irq_head != he_dev->irq_tail) {
1936
++updated;
1937
1938
type = ITYPE_TYPE(he_dev->irq_head->isw);
1939
group = ITYPE_GROUP(he_dev->irq_head->isw);
1940
1941
switch (type) {
1942
case ITYPE_RBRQ_THRESH:
1943
HPRINTK("rbrq%d threshold\n", group);
1944
fallthrough;
1945
case ITYPE_RBRQ_TIMER:
1946
if (he_service_rbrq(he_dev, group))
1947
he_service_rbpl(he_dev, group);
1948
break;
1949
case ITYPE_TBRQ_THRESH:
1950
HPRINTK("tbrq%d threshold\n", group);
1951
fallthrough;
1952
case ITYPE_TPD_COMPLETE:
1953
he_service_tbrq(he_dev, group);
1954
break;
1955
case ITYPE_RBPL_THRESH:
1956
he_service_rbpl(he_dev, group);
1957
break;
1958
case ITYPE_RBPS_THRESH:
1959
/* shouldn't happen unless small buffers enabled */
1960
break;
1961
case ITYPE_PHY:
1962
HPRINTK("phy interrupt\n");
1963
#ifdef CONFIG_ATM_HE_USE_SUNI
1964
spin_unlock_irqrestore(&he_dev->global_lock, flags);
1965
if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1966
he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1967
spin_lock_irqsave(&he_dev->global_lock, flags);
1968
#endif
1969
break;
1970
case ITYPE_OTHER:
1971
switch (type|group) {
1972
case ITYPE_PARITY:
1973
hprintk("parity error\n");
1974
break;
1975
case ITYPE_ABORT:
1976
hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1977
break;
1978
}
1979
break;
1980
case ITYPE_TYPE(ITYPE_INVALID):
1981
/* see 8.1.1 -- check all queues */
1982
1983
HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1984
1985
he_service_rbrq(he_dev, 0);
1986
he_service_rbpl(he_dev, 0);
1987
he_service_tbrq(he_dev, 0);
1988
break;
1989
default:
1990
hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1991
}
1992
1993
he_dev->irq_head->isw = ITYPE_INVALID;
1994
1995
he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
1996
}
1997
1998
if (updated) {
1999
if (updated > he_dev->irq_peak)
2000
he_dev->irq_peak = updated;
2001
2002
he_writel(he_dev,
2003
IRQ_SIZE(CONFIG_IRQ_SIZE) |
2004
IRQ_THRESH(CONFIG_IRQ_THRESH) |
2005
IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2006
(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2007
}
2008
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2009
}
2010
2011
static irqreturn_t
2012
he_irq_handler(int irq, void *dev_id)
2013
{
2014
unsigned long flags;
2015
struct he_dev *he_dev = (struct he_dev * )dev_id;
2016
int handled = 0;
2017
2018
if (he_dev == NULL)
2019
return IRQ_NONE;
2020
2021
spin_lock_irqsave(&he_dev->global_lock, flags);
2022
2023
he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2024
(*he_dev->irq_tailoffset << 2));
2025
2026
if (he_dev->irq_tail == he_dev->irq_head) {
2027
HPRINTK("tailoffset not updated?\n");
2028
he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2029
((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2030
(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2031
}
2032
2033
#ifdef DEBUG
2034
if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2035
hprintk("spurious (or shared) interrupt?\n");
2036
#endif
2037
2038
if (he_dev->irq_head != he_dev->irq_tail) {
2039
handled = 1;
2040
tasklet_schedule(&he_dev->tasklet);
2041
he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2042
(void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2043
}
2044
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2045
return IRQ_RETVAL(handled);
2046
2047
}
2048
2049
static __inline__ void
2050
__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2051
{
2052
struct he_tpdrq *new_tail;
2053
2054
HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2055
tpd, cid, he_dev->tpdrq_tail);
2056
2057
/* new_tail = he_dev->tpdrq_tail; */
2058
new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2059
TPDRQ_MASK(he_dev->tpdrq_tail+1));
2060
2061
/*
2062
* check to see if we are about to set the tail == head
2063
* if true, update the head pointer from the adapter
2064
* to see if this is really the case (reading the queue
2065
* head for every enqueue would be unnecessarily slow)
2066
*/
2067
2068
if (new_tail == he_dev->tpdrq_head) {
2069
he_dev->tpdrq_head = (struct he_tpdrq *)
2070
(((unsigned long)he_dev->tpdrq_base) |
2071
TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2072
2073
if (new_tail == he_dev->tpdrq_head) {
2074
int slot;
2075
2076
hprintk("tpdrq full (cid 0x%x)\n", cid);
2077
/*
2078
* FIXME
2079
* push tpd onto a transmit backlog queue
2080
* after service_tbrq, service the backlog
2081
* for now, we just drop the pdu
2082
*/
2083
for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2084
if (tpd->iovec[slot].addr)
2085
dma_unmap_single(&he_dev->pci_dev->dev,
2086
tpd->iovec[slot].addr,
2087
tpd->iovec[slot].len & TPD_LEN_MASK,
2088
DMA_TO_DEVICE);
2089
}
2090
if (tpd->skb) {
2091
if (tpd->vcc->pop)
2092
tpd->vcc->pop(tpd->vcc, tpd->skb);
2093
else
2094
dev_kfree_skb_any(tpd->skb);
2095
atomic_inc(&tpd->vcc->stats->tx_err);
2096
}
2097
dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2098
return;
2099
}
2100
}
2101
2102
/* 2.1.5 transmit packet descriptor ready queue */
2103
list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2104
he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2105
he_dev->tpdrq_tail->cid = cid;
2106
wmb();
2107
2108
he_dev->tpdrq_tail = new_tail;
2109
2110
he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2111
(void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2112
}
2113
2114
static int
2115
he_open(struct atm_vcc *vcc)
2116
{
2117
unsigned long flags;
2118
struct he_dev *he_dev = HE_DEV(vcc->dev);
2119
struct he_vcc *he_vcc;
2120
int err = 0;
2121
unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2122
short vpi = vcc->vpi;
2123
int vci = vcc->vci;
2124
2125
if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2126
return 0;
2127
2128
HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2129
2130
set_bit(ATM_VF_ADDR, &vcc->flags);
2131
2132
cid = he_mkcid(he_dev, vpi, vci);
2133
2134
he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2135
if (he_vcc == NULL) {
2136
hprintk("unable to allocate he_vcc during open\n");
2137
return -ENOMEM;
2138
}
2139
2140
INIT_LIST_HEAD(&he_vcc->buffers);
2141
he_vcc->pdu_len = 0;
2142
he_vcc->rc_index = -1;
2143
2144
init_waitqueue_head(&he_vcc->rx_waitq);
2145
init_waitqueue_head(&he_vcc->tx_waitq);
2146
2147
vcc->dev_data = he_vcc;
2148
2149
if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2150
int pcr_goal;
2151
2152
pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2153
if (pcr_goal == 0)
2154
pcr_goal = he_dev->atm_dev->link_rate;
2155
if (pcr_goal < 0) /* means round down, technically */
2156
pcr_goal = -pcr_goal;
2157
2158
HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2159
2160
switch (vcc->qos.aal) {
2161
case ATM_AAL5:
2162
tsr0_aal = TSR0_AAL5;
2163
tsr4 = TSR4_AAL5;
2164
break;
2165
case ATM_AAL0:
2166
tsr0_aal = TSR0_AAL0_SDU;
2167
tsr4 = TSR4_AAL0_SDU;
2168
break;
2169
default:
2170
err = -EINVAL;
2171
goto open_failed;
2172
}
2173
2174
spin_lock_irqsave(&he_dev->global_lock, flags);
2175
tsr0 = he_readl_tsr0(he_dev, cid);
2176
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2177
2178
if (TSR0_CONN_STATE(tsr0) != 0) {
2179
hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2180
err = -EBUSY;
2181
goto open_failed;
2182
}
2183
2184
switch (vcc->qos.txtp.traffic_class) {
2185
case ATM_UBR:
2186
/* 2.3.3.1 open connection ubr */
2187
2188
tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2189
TSR0_USE_WMIN | TSR0_UPDATE_GER;
2190
break;
2191
2192
case ATM_CBR:
2193
/* 2.3.3.2 open connection cbr */
2194
2195
/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2196
if ((he_dev->total_bw + pcr_goal)
2197
> (he_dev->atm_dev->link_rate * 9 / 10))
2198
{
2199
err = -EBUSY;
2200
goto open_failed;
2201
}
2202
2203
spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2204
2205
/* find an unused cs_stper register */
2206
for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2207
if (he_dev->cs_stper[reg].inuse == 0 ||
2208
he_dev->cs_stper[reg].pcr == pcr_goal)
2209
break;
2210
2211
if (reg == HE_NUM_CS_STPER) {
2212
err = -EBUSY;
2213
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2214
goto open_failed;
2215
}
2216
2217
he_dev->total_bw += pcr_goal;
2218
2219
he_vcc->rc_index = reg;
2220
++he_dev->cs_stper[reg].inuse;
2221
he_dev->cs_stper[reg].pcr = pcr_goal;
2222
2223
clock = he_is622(he_dev) ? 66667000 : 50000000;
2224
period = clock / pcr_goal;
2225
2226
HPRINTK("rc_index = %d period = %d\n",
2227
reg, period);
2228
2229
he_writel_mbox(he_dev, rate_to_atmf(period/2),
2230
CS_STPER0 + reg);
2231
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2232
2233
tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2234
TSR0_RC_INDEX(reg);
2235
2236
break;
2237
default:
2238
err = -EINVAL;
2239
goto open_failed;
2240
}
2241
2242
spin_lock_irqsave(&he_dev->global_lock, flags);
2243
2244
he_writel_tsr0(he_dev, tsr0, cid);
2245
he_writel_tsr4(he_dev, tsr4 | 1, cid);
2246
he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2247
TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2248
he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2249
he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2250
2251
he_writel_tsr3(he_dev, 0x0, cid);
2252
he_writel_tsr5(he_dev, 0x0, cid);
2253
he_writel_tsr6(he_dev, 0x0, cid);
2254
he_writel_tsr7(he_dev, 0x0, cid);
2255
he_writel_tsr8(he_dev, 0x0, cid);
2256
he_writel_tsr10(he_dev, 0x0, cid);
2257
he_writel_tsr11(he_dev, 0x0, cid);
2258
he_writel_tsr12(he_dev, 0x0, cid);
2259
he_writel_tsr13(he_dev, 0x0, cid);
2260
he_writel_tsr14(he_dev, 0x0, cid);
2261
(void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2262
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2263
}
2264
2265
if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2266
unsigned aal;
2267
2268
HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2269
&HE_VCC(vcc)->rx_waitq);
2270
2271
switch (vcc->qos.aal) {
2272
case ATM_AAL5:
2273
aal = RSR0_AAL5;
2274
break;
2275
case ATM_AAL0:
2276
aal = RSR0_RAWCELL;
2277
break;
2278
default:
2279
err = -EINVAL;
2280
goto open_failed;
2281
}
2282
2283
spin_lock_irqsave(&he_dev->global_lock, flags);
2284
2285
rsr0 = he_readl_rsr0(he_dev, cid);
2286
if (rsr0 & RSR0_OPEN_CONN) {
2287
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2288
2289
hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2290
err = -EBUSY;
2291
goto open_failed;
2292
}
2293
2294
rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2295
rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2296
rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2297
(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2298
2299
#ifdef USE_CHECKSUM_HW
2300
if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2301
rsr0 |= RSR0_TCP_CKSUM;
2302
#endif
2303
2304
he_writel_rsr4(he_dev, rsr4, cid);
2305
he_writel_rsr1(he_dev, rsr1, cid);
2306
/* 5.1.11 last parameter initialized should be
2307
the open/closed indication in rsr0 */
2308
he_writel_rsr0(he_dev,
2309
rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2310
(void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2311
2312
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2313
}
2314
2315
open_failed:
2316
2317
if (err) {
2318
kfree(he_vcc);
2319
clear_bit(ATM_VF_ADDR, &vcc->flags);
2320
}
2321
else
2322
set_bit(ATM_VF_READY, &vcc->flags);
2323
2324
return err;
2325
}
2326
2327
static void
2328
he_close(struct atm_vcc *vcc)
2329
{
2330
unsigned long flags;
2331
DECLARE_WAITQUEUE(wait, current);
2332
struct he_dev *he_dev = HE_DEV(vcc->dev);
2333
struct he_tpd *tpd;
2334
unsigned cid;
2335
struct he_vcc *he_vcc = HE_VCC(vcc);
2336
#define MAX_RETRY 30
2337
int retry = 0, sleep = 1, tx_inuse;
2338
2339
HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2340
2341
clear_bit(ATM_VF_READY, &vcc->flags);
2342
cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2343
2344
if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2345
int timeout;
2346
2347
HPRINTK("close rx cid 0x%x\n", cid);
2348
2349
/* 2.7.2.2 close receive operation */
2350
2351
/* wait for previous close (if any) to finish */
2352
2353
spin_lock_irqsave(&he_dev->global_lock, flags);
2354
while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2355
HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2356
udelay(250);
2357
}
2358
2359
set_current_state(TASK_UNINTERRUPTIBLE);
2360
add_wait_queue(&he_vcc->rx_waitq, &wait);
2361
2362
he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2363
(void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2364
he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2365
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2366
2367
timeout = schedule_timeout(30*HZ);
2368
2369
remove_wait_queue(&he_vcc->rx_waitq, &wait);
2370
set_current_state(TASK_RUNNING);
2371
2372
if (timeout == 0)
2373
hprintk("close rx timeout cid 0x%x\n", cid);
2374
2375
HPRINTK("close rx cid 0x%x complete\n", cid);
2376
2377
}
2378
2379
if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2380
volatile unsigned tsr4, tsr0;
2381
int timeout;
2382
2383
HPRINTK("close tx cid 0x%x\n", cid);
2384
2385
/* 2.1.2
2386
*
2387
* ... the host must first stop queueing packets to the TPDRQ
2388
* on the connection to be closed, then wait for all outstanding
2389
* packets to be transmitted and their buffers returned to the
2390
* TBRQ. When the last packet on the connection arrives in the
2391
* TBRQ, the host issues the close command to the adapter.
2392
*/
2393
2394
while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2395
(retry < MAX_RETRY)) {
2396
msleep(sleep);
2397
if (sleep < 250)
2398
sleep = sleep * 2;
2399
2400
++retry;
2401
}
2402
2403
if (tx_inuse > 1)
2404
hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2405
2406
/* 2.3.1.1 generic close operations with flush */
2407
2408
spin_lock_irqsave(&he_dev->global_lock, flags);
2409
he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2410
/* also clears TSR4_SESSION_ENDED */
2411
2412
switch (vcc->qos.txtp.traffic_class) {
2413
case ATM_UBR:
2414
he_writel_tsr1(he_dev,
2415
TSR1_MCR(rate_to_atmf(200000))
2416
| TSR1_PCR(0), cid);
2417
break;
2418
case ATM_CBR:
2419
he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2420
break;
2421
}
2422
(void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2423
2424
tpd = __alloc_tpd(he_dev);
2425
if (tpd == NULL) {
2426
hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2427
goto close_tx_incomplete;
2428
}
2429
tpd->status |= TPD_EOS | TPD_INT;
2430
tpd->skb = NULL;
2431
tpd->vcc = vcc;
2432
wmb();
2433
2434
set_current_state(TASK_UNINTERRUPTIBLE);
2435
add_wait_queue(&he_vcc->tx_waitq, &wait);
2436
__enqueue_tpd(he_dev, tpd, cid);
2437
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2438
2439
timeout = schedule_timeout(30*HZ);
2440
2441
remove_wait_queue(&he_vcc->tx_waitq, &wait);
2442
set_current_state(TASK_RUNNING);
2443
2444
spin_lock_irqsave(&he_dev->global_lock, flags);
2445
2446
if (timeout == 0) {
2447
hprintk("close tx timeout cid 0x%x\n", cid);
2448
goto close_tx_incomplete;
2449
}
2450
2451
while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2452
HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2453
udelay(250);
2454
}
2455
2456
while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2457
HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2458
udelay(250);
2459
}
2460
2461
close_tx_incomplete:
2462
2463
if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2464
int reg = he_vcc->rc_index;
2465
2466
HPRINTK("cs_stper reg = %d\n", reg);
2467
2468
if (he_dev->cs_stper[reg].inuse == 0)
2469
hprintk("cs_stper[%d].inuse = 0!\n", reg);
2470
else
2471
--he_dev->cs_stper[reg].inuse;
2472
2473
he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2474
}
2475
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2476
2477
HPRINTK("close tx cid 0x%x complete\n", cid);
2478
}
2479
2480
kfree(he_vcc);
2481
2482
clear_bit(ATM_VF_ADDR, &vcc->flags);
2483
}
2484
2485
static int
2486
he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2487
{
2488
unsigned long flags;
2489
struct he_dev *he_dev = HE_DEV(vcc->dev);
2490
unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2491
struct he_tpd *tpd;
2492
#ifdef USE_SCATTERGATHER
2493
int i, slot = 0;
2494
#endif
2495
2496
#define HE_TPD_BUFSIZE 0xffff
2497
2498
HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2499
2500
if ((skb->len > HE_TPD_BUFSIZE) ||
2501
((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2502
hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2503
if (vcc->pop)
2504
vcc->pop(vcc, skb);
2505
else
2506
dev_kfree_skb_any(skb);
2507
atomic_inc(&vcc->stats->tx_err);
2508
return -EINVAL;
2509
}
2510
2511
#ifndef USE_SCATTERGATHER
2512
if (skb_shinfo(skb)->nr_frags) {
2513
hprintk("no scatter/gather support\n");
2514
if (vcc->pop)
2515
vcc->pop(vcc, skb);
2516
else
2517
dev_kfree_skb_any(skb);
2518
atomic_inc(&vcc->stats->tx_err);
2519
return -EINVAL;
2520
}
2521
#endif
2522
spin_lock_irqsave(&he_dev->global_lock, flags);
2523
2524
tpd = __alloc_tpd(he_dev);
2525
if (tpd == NULL) {
2526
if (vcc->pop)
2527
vcc->pop(vcc, skb);
2528
else
2529
dev_kfree_skb_any(skb);
2530
atomic_inc(&vcc->stats->tx_err);
2531
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2532
return -ENOMEM;
2533
}
2534
2535
if (vcc->qos.aal == ATM_AAL5)
2536
tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2537
else {
2538
char *pti_clp = (void *) (skb->data + 3);
2539
int clp, pti;
2540
2541
pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2542
clp = (*pti_clp & ATM_HDR_CLP);
2543
tpd->status |= TPD_CELLTYPE(pti);
2544
if (clp)
2545
tpd->status |= TPD_CLP;
2546
2547
skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2548
}
2549
2550
#ifdef USE_SCATTERGATHER
2551
tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2552
skb_headlen(skb), DMA_TO_DEVICE);
2553
tpd->iovec[slot].len = skb_headlen(skb);
2554
++slot;
2555
2556
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2557
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2558
2559
if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2560
tpd->vcc = vcc;
2561
tpd->skb = NULL; /* not the last fragment
2562
so dont ->push() yet */
2563
wmb();
2564
2565
__enqueue_tpd(he_dev, tpd, cid);
2566
tpd = __alloc_tpd(he_dev);
2567
if (tpd == NULL) {
2568
if (vcc->pop)
2569
vcc->pop(vcc, skb);
2570
else
2571
dev_kfree_skb_any(skb);
2572
atomic_inc(&vcc->stats->tx_err);
2573
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2574
return -ENOMEM;
2575
}
2576
tpd->status |= TPD_USERCELL;
2577
slot = 0;
2578
}
2579
2580
tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev,
2581
frag, 0, skb_frag_size(frag), DMA_TO_DEVICE);
2582
tpd->iovec[slot].len = skb_frag_size(frag);
2583
++slot;
2584
2585
}
2586
2587
tpd->iovec[slot - 1].len |= TPD_LST;
2588
#else
2589
tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2590
tpd->length0 = skb->len | TPD_LST;
2591
#endif
2592
tpd->status |= TPD_INT;
2593
2594
tpd->vcc = vcc;
2595
tpd->skb = skb;
2596
wmb();
2597
ATM_SKB(skb)->vcc = vcc;
2598
2599
__enqueue_tpd(he_dev, tpd, cid);
2600
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2601
2602
atomic_inc(&vcc->stats->tx);
2603
2604
return 0;
2605
}
2606
2607
static int
2608
he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2609
{
2610
unsigned long flags;
2611
struct he_dev *he_dev = HE_DEV(atm_dev);
2612
struct he_ioctl_reg reg;
2613
int err = 0;
2614
2615
switch (cmd) {
2616
case HE_GET_REG:
2617
if (!capable(CAP_NET_ADMIN))
2618
return -EPERM;
2619
2620
if (copy_from_user(&reg, arg,
2621
sizeof(struct he_ioctl_reg)))
2622
return -EFAULT;
2623
2624
spin_lock_irqsave(&he_dev->global_lock, flags);
2625
switch (reg.type) {
2626
case HE_REGTYPE_PCI:
2627
if (reg.addr >= HE_REGMAP_SIZE) {
2628
err = -EINVAL;
2629
break;
2630
}
2631
2632
reg.val = he_readl(he_dev, reg.addr);
2633
break;
2634
case HE_REGTYPE_RCM:
2635
reg.val =
2636
he_readl_rcm(he_dev, reg.addr);
2637
break;
2638
case HE_REGTYPE_TCM:
2639
reg.val =
2640
he_readl_tcm(he_dev, reg.addr);
2641
break;
2642
case HE_REGTYPE_MBOX:
2643
reg.val =
2644
he_readl_mbox(he_dev, reg.addr);
2645
break;
2646
default:
2647
err = -EINVAL;
2648
break;
2649
}
2650
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2651
if (err == 0)
2652
if (copy_to_user(arg, &reg,
2653
sizeof(struct he_ioctl_reg)))
2654
return -EFAULT;
2655
break;
2656
default:
2657
#ifdef CONFIG_ATM_HE_USE_SUNI
2658
if (atm_dev->phy && atm_dev->phy->ioctl)
2659
err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2660
#else /* CONFIG_ATM_HE_USE_SUNI */
2661
err = -EINVAL;
2662
#endif /* CONFIG_ATM_HE_USE_SUNI */
2663
break;
2664
}
2665
2666
return err;
2667
}
2668
2669
static void
2670
he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2671
{
2672
unsigned long flags;
2673
struct he_dev *he_dev = HE_DEV(atm_dev);
2674
2675
HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2676
2677
spin_lock_irqsave(&he_dev->global_lock, flags);
2678
he_writel(he_dev, val, FRAMER + (addr*4));
2679
(void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2680
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2681
}
2682
2683
2684
static unsigned char
2685
he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2686
{
2687
unsigned long flags;
2688
struct he_dev *he_dev = HE_DEV(atm_dev);
2689
unsigned reg;
2690
2691
spin_lock_irqsave(&he_dev->global_lock, flags);
2692
reg = he_readl(he_dev, FRAMER + (addr*4));
2693
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2694
2695
HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2696
return reg;
2697
}
2698
2699
static int
2700
he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2701
{
2702
unsigned long flags;
2703
struct he_dev *he_dev = HE_DEV(dev);
2704
int left, i;
2705
#ifdef notdef
2706
struct he_rbrq *rbrq_tail;
2707
struct he_tpdrq *tpdrq_head;
2708
int rbpl_head, rbpl_tail;
2709
#endif
2710
static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2711
2712
2713
left = *pos;
2714
if (!left--)
2715
return sprintf(page, "ATM he driver\n");
2716
2717
if (!left--)
2718
return sprintf(page, "%s%s\n\n",
2719
he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2720
2721
if (!left--)
2722
return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2723
2724
spin_lock_irqsave(&he_dev->global_lock, flags);
2725
mcc += he_readl(he_dev, MCC);
2726
oec += he_readl(he_dev, OEC);
2727
dcc += he_readl(he_dev, DCC);
2728
cec += he_readl(he_dev, CEC);
2729
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2730
2731
if (!left--)
2732
return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2733
mcc, oec, dcc, cec);
2734
2735
if (!left--)
2736
return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2737
CONFIG_IRQ_SIZE, he_dev->irq_peak);
2738
2739
if (!left--)
2740
return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2741
CONFIG_TPDRQ_SIZE);
2742
2743
if (!left--)
2744
return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2745
CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2746
2747
if (!left--)
2748
return sprintf(page, "tbrq_size = %d peak = %d\n",
2749
CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2750
2751
2752
#ifdef notdef
2753
rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2754
rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2755
2756
inuse = rbpl_head - rbpl_tail;
2757
if (inuse < 0)
2758
inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2759
inuse /= sizeof(struct he_rbp);
2760
2761
if (!left--)
2762
return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2763
CONFIG_RBPL_SIZE, inuse);
2764
#endif
2765
2766
if (!left--)
2767
return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2768
2769
for (i = 0; i < HE_NUM_CS_STPER; ++i)
2770
if (!left--)
2771
return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2772
he_dev->cs_stper[i].pcr,
2773
he_dev->cs_stper[i].inuse);
2774
2775
if (!left--)
2776
return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2777
he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2778
2779
return 0;
2780
}
2781
2782
/* eeprom routines -- see 4.7 */
2783
2784
static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2785
{
2786
u32 val = 0, tmp_read = 0;
2787
int i, j = 0;
2788
u8 byte_read = 0;
2789
2790
val = readl(he_dev->membase + HOST_CNTL);
2791
val &= 0xFFFFE0FF;
2792
2793
/* Turn on write enable */
2794
val |= 0x800;
2795
he_writel(he_dev, val, HOST_CNTL);
2796
2797
/* Send READ instruction */
2798
for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2799
he_writel(he_dev, val | readtab[i], HOST_CNTL);
2800
udelay(EEPROM_DELAY);
2801
}
2802
2803
/* Next, we need to send the byte address to read from */
2804
for (i = 7; i >= 0; i--) {
2805
he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2806
udelay(EEPROM_DELAY);
2807
he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2808
udelay(EEPROM_DELAY);
2809
}
2810
2811
j = 0;
2812
2813
val &= 0xFFFFF7FF; /* Turn off write enable */
2814
he_writel(he_dev, val, HOST_CNTL);
2815
2816
/* Now, we can read data from the EEPROM by clocking it in */
2817
for (i = 7; i >= 0; i--) {
2818
he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2819
udelay(EEPROM_DELAY);
2820
tmp_read = he_readl(he_dev, HOST_CNTL);
2821
byte_read |= (unsigned char)
2822
((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2823
he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2824
udelay(EEPROM_DELAY);
2825
}
2826
2827
he_writel(he_dev, val | ID_CS, HOST_CNTL);
2828
udelay(EEPROM_DELAY);
2829
2830
return byte_read;
2831
}
2832
2833
MODULE_LICENSE("GPL");
2834
MODULE_AUTHOR("chas williams <[email protected]>");
2835
MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2836
module_param(disable64, bool, 0);
2837
MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2838
module_param(nvpibits, short, 0);
2839
MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2840
module_param(nvcibits, short, 0);
2841
MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2842
module_param(rx_skb_reserve, short, 0);
2843
MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2844
module_param(irq_coalesce, bool, 0);
2845
MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2846
module_param(sdh, bool, 0);
2847
MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2848
2849
static const struct pci_device_id he_pci_tbl[] = {
2850
{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2851
{ 0, }
2852
};
2853
2854
MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2855
2856
static struct pci_driver he_driver = {
2857
.name = "he",
2858
.probe = he_init_one,
2859
.remove = he_remove_one,
2860
.id_table = he_pci_tbl,
2861
};
2862
2863
module_pci_driver(he_driver);
2864
2865