Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/atm/he.c
49743 views
1
/*
2
3
he.c
4
5
ForeRunnerHE ATM Adapter driver for ATM on Linux
6
Copyright (C) 1999-2001 Naval Research Laboratory
7
8
This library is free software; you can redistribute it and/or
9
modify it under the terms of the GNU Lesser General Public
10
License as published by the Free Software Foundation; either
11
version 2.1 of the License, or (at your option) any later version.
12
13
This library is distributed in the hope that it will be useful,
14
but WITHOUT ANY WARRANTY; without even the implied warranty of
15
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16
Lesser General Public License for more details.
17
18
You should have received a copy of the GNU Lesser General Public
19
License along with this library; if not, write to the Free Software
20
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
22
*/
23
24
/*
25
26
he.c
27
28
ForeRunnerHE ATM Adapter driver for ATM on Linux
29
Copyright (C) 1999-2001 Naval Research Laboratory
30
31
Permission to use, copy, modify and distribute this software and its
32
documentation is hereby granted, provided that both the copyright
33
notice and this permission notice appear in all copies of the software,
34
derivative works or modified versions, and any portions thereof, and
35
that both notices appear in supporting documentation.
36
37
NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38
DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39
RESULTING FROM THE USE OF THIS SOFTWARE.
40
41
This driver was written using the "Programmer's Reference Manual for
42
ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44
AUTHORS:
45
chas williams <[email protected]>
46
eric kinzie <[email protected]>
47
48
NOTES:
49
4096 supported 'connections'
50
group 0 is used for all traffic
51
interrupt queue 0 is used for all interrupts
52
aal0 support (based on work from [email protected])
53
54
*/
55
56
#include <linux/module.h>
57
#include <linux/kernel.h>
58
#include <linux/skbuff.h>
59
#include <linux/pci.h>
60
#include <linux/errno.h>
61
#include <linux/types.h>
62
#include <linux/string.h>
63
#include <linux/delay.h>
64
#include <linux/init.h>
65
#include <linux/mm.h>
66
#include <linux/sched.h>
67
#include <linux/timer.h>
68
#include <linux/interrupt.h>
69
#include <linux/dma-mapping.h>
70
#include <linux/bitmap.h>
71
#include <linux/slab.h>
72
#include <asm/io.h>
73
#include <asm/byteorder.h>
74
#include <linux/uaccess.h>
75
76
#include <linux/atmdev.h>
77
#include <linux/atm.h>
78
#include <linux/sonet.h>
79
80
#undef USE_SCATTERGATHER
81
#undef USE_CHECKSUM_HW /* still confused about this */
82
/* #undef HE_DEBUG */
83
84
#include "he.h"
85
#include "suni.h"
86
#include <linux/atm_he.h>
87
88
#define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89
90
#ifdef HE_DEBUG
91
#define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92
#else /* !HE_DEBUG */
93
#define HPRINTK(fmt,args...) do { } while (0)
94
#endif /* HE_DEBUG */
95
96
/* declarations */
97
98
static int he_open(struct atm_vcc *vcc);
99
static void he_close(struct atm_vcc *vcc);
100
static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101
static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102
static irqreturn_t he_irq_handler(int irq, void *dev_id);
103
static void he_tasklet(unsigned long data);
104
static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105
static int he_start(struct atm_dev *dev);
106
static void he_stop(struct he_dev *dev);
107
static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108
static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109
110
static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111
112
/* globals */
113
114
static struct he_dev *he_devs;
115
static bool disable64;
116
static short nvpibits = -1;
117
static short nvcibits = -1;
118
static short rx_skb_reserve = 16;
119
static bool irq_coalesce = true;
120
static bool sdh;
121
122
/* Read from EEPROM = 0000 0011b */
123
static unsigned int readtab[] = {
124
CS_HIGH | CLK_HIGH,
125
CS_LOW | CLK_LOW,
126
CLK_HIGH, /* 0 */
127
CLK_LOW,
128
CLK_HIGH, /* 0 */
129
CLK_LOW,
130
CLK_HIGH, /* 0 */
131
CLK_LOW,
132
CLK_HIGH, /* 0 */
133
CLK_LOW,
134
CLK_HIGH, /* 0 */
135
CLK_LOW,
136
CLK_HIGH, /* 0 */
137
CLK_LOW | SI_HIGH,
138
CLK_HIGH | SI_HIGH, /* 1 */
139
CLK_LOW | SI_HIGH,
140
CLK_HIGH | SI_HIGH /* 1 */
141
};
142
143
/* Clock to read from/write to the EEPROM */
144
static unsigned int clocktab[] = {
145
CLK_LOW,
146
CLK_HIGH,
147
CLK_LOW,
148
CLK_HIGH,
149
CLK_LOW,
150
CLK_HIGH,
151
CLK_LOW,
152
CLK_HIGH,
153
CLK_LOW,
154
CLK_HIGH,
155
CLK_LOW,
156
CLK_HIGH,
157
CLK_LOW,
158
CLK_HIGH,
159
CLK_LOW,
160
CLK_HIGH,
161
CLK_LOW
162
};
163
164
static const struct atmdev_ops he_ops =
165
{
166
.open = he_open,
167
.close = he_close,
168
.ioctl = he_ioctl,
169
.send = he_send,
170
.phy_put = he_phy_put,
171
.phy_get = he_phy_get,
172
.proc_read = he_proc_read,
173
.owner = THIS_MODULE
174
};
175
176
#define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177
#define he_readl(dev, reg) readl((dev)->membase + (reg))
178
179
/* section 2.12 connection memory access */
180
181
static __inline__ void
182
he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183
unsigned flags)
184
{
185
he_writel(he_dev, val, CON_DAT);
186
(void) he_readl(he_dev, CON_DAT); /* flush posted writes */
187
he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188
while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189
}
190
191
#define he_writel_rcm(dev, val, reg) \
192
he_writel_internal(dev, val, reg, CON_CTL_RCM)
193
194
#define he_writel_tcm(dev, val, reg) \
195
he_writel_internal(dev, val, reg, CON_CTL_TCM)
196
197
#define he_writel_mbox(dev, val, reg) \
198
he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199
200
static unsigned
201
he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202
{
203
he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204
while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205
return he_readl(he_dev, CON_DAT);
206
}
207
208
#define he_readl_rcm(dev, reg) \
209
he_readl_internal(dev, reg, CON_CTL_RCM)
210
211
#define he_readl_tcm(dev, reg) \
212
he_readl_internal(dev, reg, CON_CTL_TCM)
213
214
#define he_readl_mbox(dev, reg) \
215
he_readl_internal(dev, reg, CON_CTL_MBOX)
216
217
218
/* figure 2.2 connection id */
219
220
#define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
221
222
/* 2.5.1 per connection transmit state registers */
223
224
#define he_writel_tsr0(dev, val, cid) \
225
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226
#define he_readl_tsr0(dev, cid) \
227
he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228
229
#define he_writel_tsr1(dev, val, cid) \
230
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231
232
#define he_writel_tsr2(dev, val, cid) \
233
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234
235
#define he_writel_tsr3(dev, val, cid) \
236
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237
238
#define he_writel_tsr4(dev, val, cid) \
239
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240
241
/* from page 2-20
242
*
243
* NOTE While the transmit connection is active, bits 23 through 0
244
* of this register must not be written by the host. Byte
245
* enables should be used during normal operation when writing
246
* the most significant byte.
247
*/
248
249
#define he_writel_tsr4_upper(dev, val, cid) \
250
he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251
CON_CTL_TCM \
252
| CON_BYTE_DISABLE_2 \
253
| CON_BYTE_DISABLE_1 \
254
| CON_BYTE_DISABLE_0)
255
256
#define he_readl_tsr4(dev, cid) \
257
he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258
259
#define he_writel_tsr5(dev, val, cid) \
260
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261
262
#define he_writel_tsr6(dev, val, cid) \
263
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264
265
#define he_writel_tsr7(dev, val, cid) \
266
he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267
268
269
#define he_writel_tsr8(dev, val, cid) \
270
he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271
272
#define he_writel_tsr9(dev, val, cid) \
273
he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274
275
#define he_writel_tsr10(dev, val, cid) \
276
he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277
278
#define he_writel_tsr11(dev, val, cid) \
279
he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280
281
282
#define he_writel_tsr12(dev, val, cid) \
283
he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284
285
#define he_writel_tsr13(dev, val, cid) \
286
he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287
288
289
#define he_writel_tsr14(dev, val, cid) \
290
he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291
292
#define he_writel_tsr14_upper(dev, val, cid) \
293
he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294
CON_CTL_TCM \
295
| CON_BYTE_DISABLE_2 \
296
| CON_BYTE_DISABLE_1 \
297
| CON_BYTE_DISABLE_0)
298
299
/* 2.7.1 per connection receive state registers */
300
301
#define he_writel_rsr0(dev, val, cid) \
302
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303
#define he_readl_rsr0(dev, cid) \
304
he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305
306
#define he_writel_rsr1(dev, val, cid) \
307
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308
309
#define he_writel_rsr2(dev, val, cid) \
310
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311
312
#define he_writel_rsr3(dev, val, cid) \
313
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314
315
#define he_writel_rsr4(dev, val, cid) \
316
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317
318
#define he_writel_rsr5(dev, val, cid) \
319
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320
321
#define he_writel_rsr6(dev, val, cid) \
322
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323
324
#define he_writel_rsr7(dev, val, cid) \
325
he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326
327
static __inline__ struct atm_vcc*
328
__find_vcc(struct he_dev *he_dev, unsigned cid)
329
{
330
struct hlist_head *head;
331
struct atm_vcc *vcc;
332
struct sock *s;
333
short vpi;
334
int vci;
335
336
vpi = cid >> he_dev->vcibits;
337
vci = cid & ((1 << he_dev->vcibits) - 1);
338
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339
340
sk_for_each(s, head) {
341
vcc = atm_sk(s);
342
if (vcc->dev == he_dev->atm_dev &&
343
vcc->vci == vci && vcc->vpi == vpi &&
344
vcc->qos.rxtp.traffic_class != ATM_NONE) {
345
return vcc;
346
}
347
}
348
return NULL;
349
}
350
351
static int he_init_one(struct pci_dev *pci_dev,
352
const struct pci_device_id *pci_ent)
353
{
354
struct atm_dev *atm_dev = NULL;
355
struct he_dev *he_dev = NULL;
356
int err = 0;
357
358
printk(KERN_INFO "ATM he driver\n");
359
360
if (pci_enable_device(pci_dev))
361
return -EIO;
362
if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
363
printk(KERN_WARNING "he: no suitable dma available\n");
364
err = -EIO;
365
goto init_one_failure;
366
}
367
368
atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
369
if (!atm_dev) {
370
err = -ENODEV;
371
goto init_one_failure;
372
}
373
pci_set_drvdata(pci_dev, atm_dev);
374
375
he_dev = kzalloc(sizeof(struct he_dev),
376
GFP_KERNEL);
377
if (!he_dev) {
378
err = -ENOMEM;
379
goto init_one_failure;
380
}
381
he_dev->pci_dev = pci_dev;
382
he_dev->atm_dev = atm_dev;
383
he_dev->atm_dev->dev_data = he_dev;
384
atm_dev->dev_data = he_dev;
385
he_dev->number = atm_dev->number;
386
tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387
spin_lock_init(&he_dev->global_lock);
388
389
if (he_start(atm_dev)) {
390
he_stop(he_dev);
391
err = -ENODEV;
392
goto init_one_failure;
393
}
394
he_dev->next = NULL;
395
if (he_devs)
396
he_dev->next = he_devs;
397
he_devs = he_dev;
398
return 0;
399
400
init_one_failure:
401
if (atm_dev)
402
atm_dev_deregister(atm_dev);
403
kfree(he_dev);
404
pci_disable_device(pci_dev);
405
return err;
406
}
407
408
static void he_remove_one(struct pci_dev *pci_dev)
409
{
410
struct atm_dev *atm_dev;
411
struct he_dev *he_dev;
412
413
atm_dev = pci_get_drvdata(pci_dev);
414
he_dev = HE_DEV(atm_dev);
415
416
/* need to remove from he_devs */
417
418
he_stop(he_dev);
419
atm_dev_deregister(atm_dev);
420
kfree(he_dev);
421
422
pci_disable_device(pci_dev);
423
}
424
425
426
static unsigned
427
rate_to_atmf(unsigned rate) /* cps to atm forum format */
428
{
429
#define NONZERO (1 << 14)
430
431
unsigned exp = 0;
432
433
if (rate == 0)
434
return 0;
435
436
rate <<= 9;
437
while (rate > 0x3ff) {
438
++exp;
439
rate >>= 1;
440
}
441
442
return (NONZERO | (exp << 9) | (rate & 0x1ff));
443
}
444
445
static void he_init_rx_lbfp0(struct he_dev *he_dev)
446
{
447
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
448
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
449
unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
450
unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
451
452
lbufd_index = 0;
453
lbm_offset = he_readl(he_dev, RCMLBM_BA);
454
455
he_writel(he_dev, lbufd_index, RLBF0_H);
456
457
for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
458
lbufd_index += 2;
459
lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
460
461
he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
462
he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
463
464
if (++lbuf_count == lbufs_per_row) {
465
lbuf_count = 0;
466
row_offset += he_dev->bytes_per_row;
467
}
468
lbm_offset += 4;
469
}
470
471
he_writel(he_dev, lbufd_index - 2, RLBF0_T);
472
he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
473
}
474
475
static void he_init_rx_lbfp1(struct he_dev *he_dev)
476
{
477
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
478
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
479
unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
480
unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
481
482
lbufd_index = 1;
483
lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
484
485
he_writel(he_dev, lbufd_index, RLBF1_H);
486
487
for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
488
lbufd_index += 2;
489
lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
490
491
he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
492
he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
493
494
if (++lbuf_count == lbufs_per_row) {
495
lbuf_count = 0;
496
row_offset += he_dev->bytes_per_row;
497
}
498
lbm_offset += 4;
499
}
500
501
he_writel(he_dev, lbufd_index - 2, RLBF1_T);
502
he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
503
}
504
505
static void he_init_tx_lbfp(struct he_dev *he_dev)
506
{
507
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
508
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
509
unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
510
unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
511
512
lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
513
lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
514
515
he_writel(he_dev, lbufd_index, TLBF_H);
516
517
for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
518
lbufd_index += 1;
519
lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
520
521
he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
522
he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
523
524
if (++lbuf_count == lbufs_per_row) {
525
lbuf_count = 0;
526
row_offset += he_dev->bytes_per_row;
527
}
528
lbm_offset += 2;
529
}
530
531
he_writel(he_dev, lbufd_index - 1, TLBF_T);
532
}
533
534
static int he_init_tpdrq(struct he_dev *he_dev)
535
{
536
he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
537
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
538
&he_dev->tpdrq_phys,
539
GFP_KERNEL);
540
if (he_dev->tpdrq_base == NULL) {
541
hprintk("failed to alloc tpdrq\n");
542
return -ENOMEM;
543
}
544
545
he_dev->tpdrq_tail = he_dev->tpdrq_base;
546
he_dev->tpdrq_head = he_dev->tpdrq_base;
547
548
he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
549
he_writel(he_dev, 0, TPDRQ_T);
550
he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
551
552
return 0;
553
}
554
555
static void he_init_cs_block(struct he_dev *he_dev)
556
{
557
unsigned clock, rate, delta;
558
int reg;
559
560
/* 5.1.7 cs block initialization */
561
562
for (reg = 0; reg < 0x20; ++reg)
563
he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
564
565
/* rate grid timer reload values */
566
567
clock = he_is622(he_dev) ? 66667000 : 50000000;
568
rate = he_dev->atm_dev->link_rate;
569
delta = rate / 16 / 2;
570
571
for (reg = 0; reg < 0x10; ++reg) {
572
/* 2.4 internal transmit function
573
*
574
* we initialize the first row in the rate grid.
575
* values are period (in clock cycles) of timer
576
*/
577
unsigned period = clock / rate;
578
579
he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
580
rate -= delta;
581
}
582
583
if (he_is622(he_dev)) {
584
/* table 5.2 (4 cells per lbuf) */
585
he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
586
he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
587
he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
588
he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
589
he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
590
591
/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
592
he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
593
he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
594
he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
595
he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
596
he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
597
he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
598
599
he_writel_mbox(he_dev, 0x4680, CS_RTATR);
600
601
/* table 5.8 */
602
he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
603
he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
604
he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
605
he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
606
he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
607
he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
608
609
/* table 5.9 */
610
he_writel_mbox(he_dev, 0x5, CS_OTPPER);
611
he_writel_mbox(he_dev, 0x14, CS_OTWPER);
612
} else {
613
/* table 5.1 (4 cells per lbuf) */
614
he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
615
he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
616
he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
617
he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
618
he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
619
620
/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
621
he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
622
he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
623
he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
624
he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
625
he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
626
he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
627
628
he_writel_mbox(he_dev, 0x4680, CS_RTATR);
629
630
/* table 5.8 */
631
he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
632
he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
633
he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
634
he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
635
he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
636
he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
637
638
/* table 5.9 */
639
he_writel_mbox(he_dev, 0x6, CS_OTPPER);
640
he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
641
}
642
643
he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
644
645
for (reg = 0; reg < 0x8; ++reg)
646
he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
647
648
}
649
650
static int he_init_cs_block_rcm(struct he_dev *he_dev)
651
{
652
unsigned (*rategrid)[16][16];
653
unsigned rate, delta;
654
int i, j, reg;
655
656
unsigned rate_atmf, exp, man;
657
unsigned long long rate_cps;
658
int mult, buf, buf_limit = 4;
659
660
rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
661
if (!rategrid)
662
return -ENOMEM;
663
664
/* initialize rate grid group table */
665
666
for (reg = 0x0; reg < 0xff; ++reg)
667
he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
668
669
/* initialize rate controller groups */
670
671
for (reg = 0x100; reg < 0x1ff; ++reg)
672
he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
673
674
/* initialize tNrm lookup table */
675
676
/* the manual makes reference to a routine in a sample driver
677
for proper configuration; fortunately, we only need this
678
in order to support abr connection */
679
680
/* initialize rate to group table */
681
682
rate = he_dev->atm_dev->link_rate;
683
delta = rate / 32;
684
685
/*
686
* 2.4 transmit internal functions
687
*
688
* we construct a copy of the rate grid used by the scheduler
689
* in order to construct the rate to group table below
690
*/
691
692
for (j = 0; j < 16; j++) {
693
(*rategrid)[0][j] = rate;
694
rate -= delta;
695
}
696
697
for (i = 1; i < 16; i++)
698
for (j = 0; j < 16; j++)
699
if (i > 14)
700
(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
701
else
702
(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
703
704
/*
705
* 2.4 transmit internal function
706
*
707
* this table maps the upper 5 bits of exponent and mantissa
708
* of the atm forum representation of the rate into an index
709
* on rate grid
710
*/
711
712
rate_atmf = 0;
713
while (rate_atmf < 0x400) {
714
man = (rate_atmf & 0x1f) << 4;
715
exp = rate_atmf >> 5;
716
717
/*
718
instead of '/ 512', use '>> 9' to prevent a call
719
to divdu3 on x86 platforms
720
*/
721
rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
722
723
if (rate_cps < 10)
724
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
725
726
for (i = 255; i > 0; i--)
727
if ((*rategrid)[i/16][i%16] >= rate_cps)
728
break; /* pick nearest rate instead? */
729
730
/*
731
* each table entry is 16 bits: (rate grid index (8 bits)
732
* and a buffer limit (8 bits)
733
* there are two table entries in each 32-bit register
734
*/
735
736
#ifdef notdef
737
buf = rate_cps * he_dev->tx_numbuffs /
738
(he_dev->atm_dev->link_rate * 2);
739
#else
740
/* this is pretty, but avoids _divdu3 and is mostly correct */
741
mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
742
if (rate_cps > (272ULL * mult))
743
buf = 4;
744
else if (rate_cps > (204ULL * mult))
745
buf = 3;
746
else if (rate_cps > (136ULL * mult))
747
buf = 2;
748
else if (rate_cps > (68ULL * mult))
749
buf = 1;
750
else
751
buf = 0;
752
#endif
753
if (buf > buf_limit)
754
buf = buf_limit;
755
reg = (reg << 16) | ((i << 8) | buf);
756
757
#define RTGTBL_OFFSET 0x400
758
759
if (rate_atmf & 0x1)
760
he_writel_rcm(he_dev, reg,
761
CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
762
763
++rate_atmf;
764
}
765
766
kfree(rategrid);
767
return 0;
768
}
769
770
static int he_init_group(struct he_dev *he_dev, int group)
771
{
772
struct he_buff *heb, *next;
773
dma_addr_t mapping;
774
int i;
775
776
he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
777
he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
778
he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
779
he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
780
G0_RBPS_BS + (group * 32));
781
782
/* bitmap table */
783
he_dev->rbpl_table = bitmap_zalloc(RBPL_TABLE_SIZE, GFP_KERNEL);
784
if (!he_dev->rbpl_table) {
785
hprintk("unable to allocate rbpl bitmap table\n");
786
return -ENOMEM;
787
}
788
789
/* rbpl_virt 64-bit pointers */
790
he_dev->rbpl_virt = kmalloc_array(RBPL_TABLE_SIZE,
791
sizeof(*he_dev->rbpl_virt),
792
GFP_KERNEL);
793
if (!he_dev->rbpl_virt) {
794
hprintk("unable to allocate rbpl virt table\n");
795
goto out_free_rbpl_table;
796
}
797
798
/* large buffer pool */
799
he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
800
CONFIG_RBPL_BUFSIZE, 64, 0);
801
if (he_dev->rbpl_pool == NULL) {
802
hprintk("unable to create rbpl pool\n");
803
goto out_free_rbpl_virt;
804
}
805
806
he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
807
CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
808
&he_dev->rbpl_phys, GFP_KERNEL);
809
if (he_dev->rbpl_base == NULL) {
810
hprintk("failed to alloc rbpl_base\n");
811
goto out_destroy_rbpl_pool;
812
}
813
814
INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
815
816
for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
817
818
heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
819
if (!heb)
820
goto out_free_rbpl;
821
heb->mapping = mapping;
822
list_add(&heb->entry, &he_dev->rbpl_outstanding);
823
824
set_bit(i, he_dev->rbpl_table);
825
he_dev->rbpl_virt[i] = heb;
826
he_dev->rbpl_hint = i + 1;
827
he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
828
he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
829
}
830
he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
831
832
he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
833
he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
834
G0_RBPL_T + (group * 32));
835
he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
836
G0_RBPL_BS + (group * 32));
837
he_writel(he_dev,
838
RBP_THRESH(CONFIG_RBPL_THRESH) |
839
RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
840
RBP_INT_ENB,
841
G0_RBPL_QI + (group * 32));
842
843
/* rx buffer ready queue */
844
845
he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
846
CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
847
&he_dev->rbrq_phys, GFP_KERNEL);
848
if (he_dev->rbrq_base == NULL) {
849
hprintk("failed to allocate rbrq\n");
850
goto out_free_rbpl;
851
}
852
853
he_dev->rbrq_head = he_dev->rbrq_base;
854
he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
855
he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
856
he_writel(he_dev,
857
RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
858
G0_RBRQ_Q + (group * 16));
859
if (irq_coalesce) {
860
hprintk("coalescing interrupts\n");
861
he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
862
G0_RBRQ_I + (group * 16));
863
} else
864
he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
865
G0_RBRQ_I + (group * 16));
866
867
/* tx buffer ready queue */
868
869
he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
870
CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
871
&he_dev->tbrq_phys, GFP_KERNEL);
872
if (he_dev->tbrq_base == NULL) {
873
hprintk("failed to allocate tbrq\n");
874
goto out_free_rbpq_base;
875
}
876
877
he_dev->tbrq_head = he_dev->tbrq_base;
878
879
he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
880
he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
881
he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
882
he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
883
884
return 0;
885
886
out_free_rbpq_base:
887
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
888
sizeof(struct he_rbrq), he_dev->rbrq_base,
889
he_dev->rbrq_phys);
890
out_free_rbpl:
891
list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
892
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
893
894
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
895
sizeof(struct he_rbp), he_dev->rbpl_base,
896
he_dev->rbpl_phys);
897
out_destroy_rbpl_pool:
898
dma_pool_destroy(he_dev->rbpl_pool);
899
out_free_rbpl_virt:
900
kfree(he_dev->rbpl_virt);
901
out_free_rbpl_table:
902
bitmap_free(he_dev->rbpl_table);
903
904
return -ENOMEM;
905
}
906
907
static int he_init_irq(struct he_dev *he_dev)
908
{
909
int i;
910
911
/* 2.9.3.5 tail offset for each interrupt queue is located after the
912
end of the interrupt queue */
913
914
he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
915
(CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
916
&he_dev->irq_phys, GFP_KERNEL);
917
if (he_dev->irq_base == NULL) {
918
hprintk("failed to allocate irq\n");
919
return -ENOMEM;
920
}
921
he_dev->irq_tailoffset = (unsigned *)
922
&he_dev->irq_base[CONFIG_IRQ_SIZE];
923
*he_dev->irq_tailoffset = 0;
924
he_dev->irq_head = he_dev->irq_base;
925
he_dev->irq_tail = he_dev->irq_base;
926
927
for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
928
he_dev->irq_base[i].isw = ITYPE_INVALID;
929
930
he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
931
he_writel(he_dev,
932
IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
933
IRQ0_HEAD);
934
he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
935
he_writel(he_dev, 0x0, IRQ0_DATA);
936
937
he_writel(he_dev, 0x0, IRQ1_BASE);
938
he_writel(he_dev, 0x0, IRQ1_HEAD);
939
he_writel(he_dev, 0x0, IRQ1_CNTL);
940
he_writel(he_dev, 0x0, IRQ1_DATA);
941
942
he_writel(he_dev, 0x0, IRQ2_BASE);
943
he_writel(he_dev, 0x0, IRQ2_HEAD);
944
he_writel(he_dev, 0x0, IRQ2_CNTL);
945
he_writel(he_dev, 0x0, IRQ2_DATA);
946
947
he_writel(he_dev, 0x0, IRQ3_BASE);
948
he_writel(he_dev, 0x0, IRQ3_HEAD);
949
he_writel(he_dev, 0x0, IRQ3_CNTL);
950
he_writel(he_dev, 0x0, IRQ3_DATA);
951
952
/* 2.9.3.2 interrupt queue mapping registers */
953
954
he_writel(he_dev, 0x0, GRP_10_MAP);
955
he_writel(he_dev, 0x0, GRP_32_MAP);
956
he_writel(he_dev, 0x0, GRP_54_MAP);
957
he_writel(he_dev, 0x0, GRP_76_MAP);
958
959
if (request_irq(he_dev->pci_dev->irq,
960
he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
961
hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
962
return -EINVAL;
963
}
964
965
he_dev->irq = he_dev->pci_dev->irq;
966
967
return 0;
968
}
969
970
static int he_start(struct atm_dev *dev)
971
{
972
struct he_dev *he_dev;
973
struct pci_dev *pci_dev;
974
unsigned long membase;
975
976
u16 command;
977
u32 gen_cntl_0, host_cntl, lb_swap;
978
u8 cache_size, timer;
979
980
unsigned err;
981
unsigned int status, reg;
982
int i, group;
983
984
he_dev = HE_DEV(dev);
985
pci_dev = he_dev->pci_dev;
986
987
membase = pci_resource_start(pci_dev, 0);
988
HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
989
990
/*
991
* pci bus controller initialization
992
*/
993
994
/* 4.3 pci bus controller-specific initialization */
995
if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
996
hprintk("can't read GEN_CNTL_0\n");
997
return -EINVAL;
998
}
999
gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1000
if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1001
hprintk("can't write GEN_CNTL_0.\n");
1002
return -EINVAL;
1003
}
1004
1005
if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1006
hprintk("can't read PCI_COMMAND.\n");
1007
return -EINVAL;
1008
}
1009
1010
command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1011
if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1012
hprintk("can't enable memory.\n");
1013
return -EINVAL;
1014
}
1015
1016
if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1017
hprintk("can't read cache line size?\n");
1018
return -EINVAL;
1019
}
1020
1021
if (cache_size < 16) {
1022
cache_size = 16;
1023
if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1024
hprintk("can't set cache line size to %d\n", cache_size);
1025
}
1026
1027
if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1028
hprintk("can't read latency timer?\n");
1029
return -EINVAL;
1030
}
1031
1032
/* from table 3.9
1033
*
1034
* LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1035
*
1036
* AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1037
* BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1038
*
1039
*/
1040
#define LAT_TIMER 209
1041
if (timer < LAT_TIMER) {
1042
HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1043
timer = LAT_TIMER;
1044
if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1045
hprintk("can't set latency timer to %d\n", timer);
1046
}
1047
1048
if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1049
hprintk("can't set up page mapping\n");
1050
return -EINVAL;
1051
}
1052
1053
/* 4.4 card reset */
1054
he_writel(he_dev, 0x0, RESET_CNTL);
1055
he_writel(he_dev, 0xff, RESET_CNTL);
1056
1057
msleep(16); /* 16 ms */
1058
status = he_readl(he_dev, RESET_CNTL);
1059
if ((status & BOARD_RST_STATUS) == 0) {
1060
hprintk("reset failed\n");
1061
return -EINVAL;
1062
}
1063
1064
/* 4.5 set bus width */
1065
host_cntl = he_readl(he_dev, HOST_CNTL);
1066
if (host_cntl & PCI_BUS_SIZE64)
1067
gen_cntl_0 |= ENBL_64;
1068
else
1069
gen_cntl_0 &= ~ENBL_64;
1070
1071
if (disable64 == 1) {
1072
hprintk("disabling 64-bit pci bus transfers\n");
1073
gen_cntl_0 &= ~ENBL_64;
1074
}
1075
1076
if (gen_cntl_0 & ENBL_64)
1077
hprintk("64-bit transfers enabled\n");
1078
1079
pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1080
1081
/* 4.7 read prom contents */
1082
for (i = 0; i < PROD_ID_LEN; ++i)
1083
he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1084
1085
he_dev->media = read_prom_byte(he_dev, MEDIA);
1086
1087
for (i = 0; i < 6; ++i)
1088
dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1089
1090
hprintk("%s%s, %pM\n", he_dev->prod_id,
1091
he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1092
he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1093
ATM_OC12_PCR : ATM_OC3_PCR;
1094
1095
/* 4.6 set host endianess */
1096
lb_swap = he_readl(he_dev, LB_SWAP);
1097
if (he_is622(he_dev))
1098
lb_swap &= ~XFER_SIZE; /* 4 cells */
1099
else
1100
lb_swap |= XFER_SIZE; /* 8 cells */
1101
#ifdef __BIG_ENDIAN
1102
lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1103
#else
1104
lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1105
DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1106
#endif /* __BIG_ENDIAN */
1107
he_writel(he_dev, lb_swap, LB_SWAP);
1108
1109
/* 4.8 sdram controller initialization */
1110
he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1111
1112
/* 4.9 initialize rnum value */
1113
lb_swap |= SWAP_RNUM_MAX(0xf);
1114
he_writel(he_dev, lb_swap, LB_SWAP);
1115
1116
/* 4.10 initialize the interrupt queues */
1117
if ((err = he_init_irq(he_dev)) != 0)
1118
return err;
1119
1120
/* 4.11 enable pci bus controller state machines */
1121
host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1122
QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1123
he_writel(he_dev, host_cntl, HOST_CNTL);
1124
1125
gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1126
pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1127
1128
/*
1129
* atm network controller initialization
1130
*/
1131
1132
/* 5.1.1 generic configuration state */
1133
1134
/*
1135
* local (cell) buffer memory map
1136
*
1137
* HE155 HE622
1138
*
1139
* 0 ____________1023 bytes 0 _______________________2047 bytes
1140
* | | | | |
1141
* | utility | | rx0 | |
1142
* 5|____________| 255|___________________| u |
1143
* 6| | 256| | t |
1144
* | | | | i |
1145
* | rx0 | row | tx | l |
1146
* | | | | i |
1147
* | | 767|___________________| t |
1148
* 517|____________| 768| | y |
1149
* row 518| | | rx1 | |
1150
* | | 1023|___________________|___|
1151
* | |
1152
* | tx |
1153
* | |
1154
* | |
1155
* 1535|____________|
1156
* 1536| |
1157
* | rx1 |
1158
* 2047|____________|
1159
*
1160
*/
1161
1162
/* total 4096 connections */
1163
he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1164
he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1165
1166
if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1167
hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1168
return -ENODEV;
1169
}
1170
1171
if (nvpibits != -1) {
1172
he_dev->vpibits = nvpibits;
1173
he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1174
}
1175
1176
if (nvcibits != -1) {
1177
he_dev->vcibits = nvcibits;
1178
he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1179
}
1180
1181
1182
if (he_is622(he_dev)) {
1183
he_dev->cells_per_row = 40;
1184
he_dev->bytes_per_row = 2048;
1185
he_dev->r0_numrows = 256;
1186
he_dev->tx_numrows = 512;
1187
he_dev->r1_numrows = 256;
1188
he_dev->r0_startrow = 0;
1189
he_dev->tx_startrow = 256;
1190
he_dev->r1_startrow = 768;
1191
} else {
1192
he_dev->cells_per_row = 20;
1193
he_dev->bytes_per_row = 1024;
1194
he_dev->r0_numrows = 512;
1195
he_dev->tx_numrows = 1018;
1196
he_dev->r1_numrows = 512;
1197
he_dev->r0_startrow = 6;
1198
he_dev->tx_startrow = 518;
1199
he_dev->r1_startrow = 1536;
1200
}
1201
1202
he_dev->cells_per_lbuf = 4;
1203
he_dev->buffer_limit = 4;
1204
he_dev->r0_numbuffs = he_dev->r0_numrows *
1205
he_dev->cells_per_row / he_dev->cells_per_lbuf;
1206
if (he_dev->r0_numbuffs > 2560)
1207
he_dev->r0_numbuffs = 2560;
1208
1209
he_dev->r1_numbuffs = he_dev->r1_numrows *
1210
he_dev->cells_per_row / he_dev->cells_per_lbuf;
1211
if (he_dev->r1_numbuffs > 2560)
1212
he_dev->r1_numbuffs = 2560;
1213
1214
he_dev->tx_numbuffs = he_dev->tx_numrows *
1215
he_dev->cells_per_row / he_dev->cells_per_lbuf;
1216
if (he_dev->tx_numbuffs > 5120)
1217
he_dev->tx_numbuffs = 5120;
1218
1219
/* 5.1.2 configure hardware dependent registers */
1220
1221
he_writel(he_dev,
1222
SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1223
RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1224
(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1225
(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1226
LBARB);
1227
1228
he_writel(he_dev, BANK_ON |
1229
(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1230
SDRAMCON);
1231
1232
he_writel(he_dev,
1233
(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1234
RM_RW_WAIT(1), RCMCONFIG);
1235
he_writel(he_dev,
1236
(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1237
TM_RW_WAIT(1), TCMCONFIG);
1238
1239
he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1240
1241
he_writel(he_dev,
1242
(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1243
(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1244
RX_VALVP(he_dev->vpibits) |
1245
RX_VALVC(he_dev->vcibits), RC_CONFIG);
1246
1247
he_writel(he_dev, DRF_THRESH(0x20) |
1248
(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1249
TX_VCI_MASK(he_dev->vcibits) |
1250
LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1251
1252
he_writel(he_dev, 0x0, TXAAL5_PROTO);
1253
1254
he_writel(he_dev, PHY_INT_ENB |
1255
(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1256
RH_CONFIG);
1257
1258
/* 5.1.3 initialize connection memory */
1259
1260
for (i = 0; i < TCM_MEM_SIZE; ++i)
1261
he_writel_tcm(he_dev, 0, i);
1262
1263
for (i = 0; i < RCM_MEM_SIZE; ++i)
1264
he_writel_rcm(he_dev, 0, i);
1265
1266
/*
1267
* transmit connection memory map
1268
*
1269
* tx memory
1270
* 0x0 ___________________
1271
* | |
1272
* | |
1273
* | TSRa |
1274
* | |
1275
* | |
1276
* 0x8000|___________________|
1277
* | |
1278
* | TSRb |
1279
* 0xc000|___________________|
1280
* | |
1281
* | TSRc |
1282
* 0xe000|___________________|
1283
* | TSRd |
1284
* 0xf000|___________________|
1285
* | tmABR |
1286
* 0x10000|___________________|
1287
* | |
1288
* | tmTPD |
1289
* |___________________|
1290
* | |
1291
* ....
1292
* 0x1ffff|___________________|
1293
*
1294
*
1295
*/
1296
1297
he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1298
he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1299
he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1300
he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1301
he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1302
1303
1304
/*
1305
* receive connection memory map
1306
*
1307
* 0x0 ___________________
1308
* | |
1309
* | |
1310
* | RSRa |
1311
* | |
1312
* | |
1313
* 0x8000|___________________|
1314
* | |
1315
* | rx0/1 |
1316
* | LBM | link lists of local
1317
* | tx | buffer memory
1318
* | |
1319
* 0xd000|___________________|
1320
* | |
1321
* | rmABR |
1322
* 0xe000|___________________|
1323
* | |
1324
* | RSRb |
1325
* |___________________|
1326
* | |
1327
* ....
1328
* 0xffff|___________________|
1329
*/
1330
1331
he_writel(he_dev, 0x08000, RCMLBM_BA);
1332
he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1333
he_writel(he_dev, 0x0d800, RCMABR_BA);
1334
1335
/* 5.1.4 initialize local buffer free pools linked lists */
1336
1337
he_init_rx_lbfp0(he_dev);
1338
he_init_rx_lbfp1(he_dev);
1339
1340
he_writel(he_dev, 0x0, RLBC_H);
1341
he_writel(he_dev, 0x0, RLBC_T);
1342
he_writel(he_dev, 0x0, RLBC_H2);
1343
1344
he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1345
he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1346
1347
he_init_tx_lbfp(he_dev);
1348
1349
he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1350
1351
/* 5.1.5 initialize intermediate receive queues */
1352
1353
if (he_is622(he_dev)) {
1354
he_writel(he_dev, 0x000f, G0_INMQ_S);
1355
he_writel(he_dev, 0x200f, G0_INMQ_L);
1356
1357
he_writel(he_dev, 0x001f, G1_INMQ_S);
1358
he_writel(he_dev, 0x201f, G1_INMQ_L);
1359
1360
he_writel(he_dev, 0x002f, G2_INMQ_S);
1361
he_writel(he_dev, 0x202f, G2_INMQ_L);
1362
1363
he_writel(he_dev, 0x003f, G3_INMQ_S);
1364
he_writel(he_dev, 0x203f, G3_INMQ_L);
1365
1366
he_writel(he_dev, 0x004f, G4_INMQ_S);
1367
he_writel(he_dev, 0x204f, G4_INMQ_L);
1368
1369
he_writel(he_dev, 0x005f, G5_INMQ_S);
1370
he_writel(he_dev, 0x205f, G5_INMQ_L);
1371
1372
he_writel(he_dev, 0x006f, G6_INMQ_S);
1373
he_writel(he_dev, 0x206f, G6_INMQ_L);
1374
1375
he_writel(he_dev, 0x007f, G7_INMQ_S);
1376
he_writel(he_dev, 0x207f, G7_INMQ_L);
1377
} else {
1378
he_writel(he_dev, 0x0000, G0_INMQ_S);
1379
he_writel(he_dev, 0x0008, G0_INMQ_L);
1380
1381
he_writel(he_dev, 0x0001, G1_INMQ_S);
1382
he_writel(he_dev, 0x0009, G1_INMQ_L);
1383
1384
he_writel(he_dev, 0x0002, G2_INMQ_S);
1385
he_writel(he_dev, 0x000a, G2_INMQ_L);
1386
1387
he_writel(he_dev, 0x0003, G3_INMQ_S);
1388
he_writel(he_dev, 0x000b, G3_INMQ_L);
1389
1390
he_writel(he_dev, 0x0004, G4_INMQ_S);
1391
he_writel(he_dev, 0x000c, G4_INMQ_L);
1392
1393
he_writel(he_dev, 0x0005, G5_INMQ_S);
1394
he_writel(he_dev, 0x000d, G5_INMQ_L);
1395
1396
he_writel(he_dev, 0x0006, G6_INMQ_S);
1397
he_writel(he_dev, 0x000e, G6_INMQ_L);
1398
1399
he_writel(he_dev, 0x0007, G7_INMQ_S);
1400
he_writel(he_dev, 0x000f, G7_INMQ_L);
1401
}
1402
1403
/* 5.1.6 application tunable parameters */
1404
1405
he_writel(he_dev, 0x0, MCC);
1406
he_writel(he_dev, 0x0, OEC);
1407
he_writel(he_dev, 0x0, DCC);
1408
he_writel(he_dev, 0x0, CEC);
1409
1410
/* 5.1.7 cs block initialization */
1411
1412
he_init_cs_block(he_dev);
1413
1414
/* 5.1.8 cs block connection memory initialization */
1415
1416
if (he_init_cs_block_rcm(he_dev) < 0)
1417
return -ENOMEM;
1418
1419
/* 5.1.10 initialize host structures */
1420
1421
he_init_tpdrq(he_dev);
1422
1423
he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1424
sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1425
if (he_dev->tpd_pool == NULL) {
1426
hprintk("unable to create tpd dma_pool\n");
1427
return -ENOMEM;
1428
}
1429
1430
INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1431
1432
if (he_init_group(he_dev, 0) != 0)
1433
return -ENOMEM;
1434
1435
for (group = 1; group < HE_NUM_GROUPS; ++group) {
1436
he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1437
he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1438
he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1439
he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1440
G0_RBPS_BS + (group * 32));
1441
1442
he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1443
he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1444
he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1445
G0_RBPL_QI + (group * 32));
1446
he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1447
1448
he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1449
he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1450
he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1451
G0_RBRQ_Q + (group * 16));
1452
he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1453
1454
he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1455
he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1456
he_writel(he_dev, TBRQ_THRESH(0x1),
1457
G0_TBRQ_THRESH + (group * 16));
1458
he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1459
}
1460
1461
/* host status page */
1462
1463
he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
1464
sizeof(struct he_hsp),
1465
&he_dev->hsp_phys, GFP_KERNEL);
1466
if (he_dev->hsp == NULL) {
1467
hprintk("failed to allocate host status page\n");
1468
return -ENOMEM;
1469
}
1470
he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1471
1472
/* initialize framer */
1473
1474
#ifdef CONFIG_ATM_HE_USE_SUNI
1475
if (he_isMM(he_dev))
1476
suni_init(he_dev->atm_dev);
1477
if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1478
he_dev->atm_dev->phy->start(he_dev->atm_dev);
1479
#endif /* CONFIG_ATM_HE_USE_SUNI */
1480
1481
if (sdh) {
1482
/* this really should be in suni.c but for now... */
1483
int val;
1484
1485
val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1486
val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1487
he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1488
he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1489
}
1490
1491
/* 5.1.12 enable transmit and receive */
1492
1493
reg = he_readl_mbox(he_dev, CS_ERCTL0);
1494
reg |= TX_ENABLE|ER_ENABLE;
1495
he_writel_mbox(he_dev, reg, CS_ERCTL0);
1496
1497
reg = he_readl(he_dev, RC_CONFIG);
1498
reg |= RX_ENABLE;
1499
he_writel(he_dev, reg, RC_CONFIG);
1500
1501
for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1502
he_dev->cs_stper[i].inuse = 0;
1503
he_dev->cs_stper[i].pcr = -1;
1504
}
1505
he_dev->total_bw = 0;
1506
1507
1508
/* atm linux initialization */
1509
1510
he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1511
he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1512
1513
he_dev->irq_peak = 0;
1514
he_dev->rbrq_peak = 0;
1515
he_dev->rbpl_peak = 0;
1516
he_dev->tbrq_peak = 0;
1517
1518
HPRINTK("hell bent for leather!\n");
1519
1520
return 0;
1521
}
1522
1523
static void
1524
he_stop(struct he_dev *he_dev)
1525
{
1526
struct he_buff *heb, *next;
1527
struct pci_dev *pci_dev;
1528
u32 gen_cntl_0, reg;
1529
u16 command;
1530
1531
pci_dev = he_dev->pci_dev;
1532
1533
/* disable interrupts */
1534
1535
if (he_dev->membase) {
1536
pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1537
gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1538
pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1539
1540
tasklet_disable(&he_dev->tasklet);
1541
1542
/* disable recv and transmit */
1543
1544
reg = he_readl_mbox(he_dev, CS_ERCTL0);
1545
reg &= ~(TX_ENABLE|ER_ENABLE);
1546
he_writel_mbox(he_dev, reg, CS_ERCTL0);
1547
1548
reg = he_readl(he_dev, RC_CONFIG);
1549
reg &= ~(RX_ENABLE);
1550
he_writel(he_dev, reg, RC_CONFIG);
1551
}
1552
1553
#ifdef CONFIG_ATM_HE_USE_SUNI
1554
if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1555
he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1556
#endif /* CONFIG_ATM_HE_USE_SUNI */
1557
1558
if (he_dev->irq)
1559
free_irq(he_dev->irq, he_dev);
1560
1561
if (he_dev->irq_base)
1562
dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1563
* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1564
1565
if (he_dev->hsp)
1566
dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1567
he_dev->hsp, he_dev->hsp_phys);
1568
1569
if (he_dev->rbpl_base) {
1570
list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1571
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1572
1573
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1574
* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1575
}
1576
1577
kfree(he_dev->rbpl_virt);
1578
bitmap_free(he_dev->rbpl_table);
1579
dma_pool_destroy(he_dev->rbpl_pool);
1580
1581
if (he_dev->rbrq_base)
1582
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1583
he_dev->rbrq_base, he_dev->rbrq_phys);
1584
1585
if (he_dev->tbrq_base)
1586
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1587
he_dev->tbrq_base, he_dev->tbrq_phys);
1588
1589
if (he_dev->tpdrq_base)
1590
dma_free_coherent(&he_dev->pci_dev->dev,
1591
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
1592
he_dev->tpdrq_base, he_dev->tpdrq_phys);
1593
1594
dma_pool_destroy(he_dev->tpd_pool);
1595
1596
if (he_dev->pci_dev) {
1597
pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1598
command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1599
pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1600
}
1601
1602
if (he_dev->membase)
1603
iounmap(he_dev->membase);
1604
}
1605
1606
static struct he_tpd *
1607
__alloc_tpd(struct he_dev *he_dev)
1608
{
1609
struct he_tpd *tpd;
1610
dma_addr_t mapping;
1611
1612
tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1613
if (tpd == NULL)
1614
return NULL;
1615
1616
tpd->status = TPD_ADDR(mapping);
1617
tpd->reserved = 0;
1618
tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1619
tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1620
tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1621
1622
return tpd;
1623
}
1624
1625
#define AAL5_LEN(buf,len) \
1626
((((unsigned char *)(buf))[(len)-6] << 8) | \
1627
(((unsigned char *)(buf))[(len)-5]))
1628
1629
/* 2.10.1.2 receive
1630
*
1631
* aal5 packets can optionally return the tcp checksum in the lower
1632
* 16 bits of the crc (RSR0_TCP_CKSUM)
1633
*/
1634
1635
#define TCP_CKSUM(buf,len) \
1636
((((unsigned char *)(buf))[(len)-2] << 8) | \
1637
(((unsigned char *)(buf))[(len-1)]))
1638
1639
static int
1640
he_service_rbrq(struct he_dev *he_dev, int group)
1641
{
1642
struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1643
((unsigned long)he_dev->rbrq_base |
1644
he_dev->hsp->group[group].rbrq_tail);
1645
unsigned cid, lastcid = -1;
1646
struct sk_buff *skb;
1647
struct atm_vcc *vcc = NULL;
1648
struct he_vcc *he_vcc;
1649
struct he_buff *heb, *next;
1650
int i;
1651
int pdus_assembled = 0;
1652
int updated = 0;
1653
1654
read_lock(&vcc_sklist_lock);
1655
while (he_dev->rbrq_head != rbrq_tail) {
1656
++updated;
1657
1658
HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1659
he_dev->rbrq_head, group,
1660
RBRQ_ADDR(he_dev->rbrq_head),
1661
RBRQ_BUFLEN(he_dev->rbrq_head),
1662
RBRQ_CID(he_dev->rbrq_head),
1663
RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1664
RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1665
RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1666
RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1667
RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1668
RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1669
1670
i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1671
heb = he_dev->rbpl_virt[i];
1672
1673
cid = RBRQ_CID(he_dev->rbrq_head);
1674
if (cid != lastcid)
1675
vcc = __find_vcc(he_dev, cid);
1676
lastcid = cid;
1677
1678
if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1679
hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
1680
if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1681
clear_bit(i, he_dev->rbpl_table);
1682
list_del(&heb->entry);
1683
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1684
}
1685
1686
goto next_rbrq_entry;
1687
}
1688
1689
if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1690
hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1691
atomic_inc(&vcc->stats->rx_drop);
1692
goto return_host_buffers;
1693
}
1694
1695
heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1696
clear_bit(i, he_dev->rbpl_table);
1697
list_move_tail(&heb->entry, &he_vcc->buffers);
1698
he_vcc->pdu_len += heb->len;
1699
1700
if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1701
lastcid = -1;
1702
HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1703
wake_up(&he_vcc->rx_waitq);
1704
goto return_host_buffers;
1705
}
1706
1707
if (!RBRQ_END_PDU(he_dev->rbrq_head))
1708
goto next_rbrq_entry;
1709
1710
if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1711
|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1712
HPRINTK("%s%s (%d.%d)\n",
1713
RBRQ_CRC_ERR(he_dev->rbrq_head)
1714
? "CRC_ERR " : "",
1715
RBRQ_LEN_ERR(he_dev->rbrq_head)
1716
? "LEN_ERR" : "",
1717
vcc->vpi, vcc->vci);
1718
atomic_inc(&vcc->stats->rx_err);
1719
goto return_host_buffers;
1720
}
1721
1722
skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1723
GFP_ATOMIC);
1724
if (!skb) {
1725
HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1726
goto return_host_buffers;
1727
}
1728
1729
if (rx_skb_reserve > 0)
1730
skb_reserve(skb, rx_skb_reserve);
1731
1732
__net_timestamp(skb);
1733
1734
list_for_each_entry(heb, &he_vcc->buffers, entry)
1735
skb_put_data(skb, &heb->data, heb->len);
1736
1737
switch (vcc->qos.aal) {
1738
case ATM_AAL0:
1739
/* 2.10.1.5 raw cell receive */
1740
skb->len = ATM_AAL0_SDU;
1741
skb_set_tail_pointer(skb, skb->len);
1742
break;
1743
case ATM_AAL5:
1744
/* 2.10.1.2 aal5 receive */
1745
1746
skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1747
skb_set_tail_pointer(skb, skb->len);
1748
#ifdef USE_CHECKSUM_HW
1749
if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1750
skb->ip_summed = CHECKSUM_COMPLETE;
1751
skb->csum = TCP_CKSUM(skb->data,
1752
he_vcc->pdu_len);
1753
}
1754
#endif
1755
break;
1756
}
1757
1758
#ifdef should_never_happen
1759
if (skb->len > vcc->qos.rxtp.max_sdu)
1760
hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1761
#endif
1762
1763
#ifdef notdef
1764
ATM_SKB(skb)->vcc = vcc;
1765
#endif
1766
spin_unlock(&he_dev->global_lock);
1767
vcc->push(vcc, skb);
1768
spin_lock(&he_dev->global_lock);
1769
1770
atomic_inc(&vcc->stats->rx);
1771
1772
return_host_buffers:
1773
++pdus_assembled;
1774
1775
list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1776
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1777
INIT_LIST_HEAD(&he_vcc->buffers);
1778
he_vcc->pdu_len = 0;
1779
1780
next_rbrq_entry:
1781
he_dev->rbrq_head = (struct he_rbrq *)
1782
((unsigned long) he_dev->rbrq_base |
1783
RBRQ_MASK(he_dev->rbrq_head + 1));
1784
1785
}
1786
read_unlock(&vcc_sklist_lock);
1787
1788
if (updated) {
1789
if (updated > he_dev->rbrq_peak)
1790
he_dev->rbrq_peak = updated;
1791
1792
he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1793
G0_RBRQ_H + (group * 16));
1794
}
1795
1796
return pdus_assembled;
1797
}
1798
1799
static void
1800
he_service_tbrq(struct he_dev *he_dev, int group)
1801
{
1802
struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1803
((unsigned long)he_dev->tbrq_base |
1804
he_dev->hsp->group[group].tbrq_tail);
1805
struct he_tpd *tpd;
1806
int slot, updated = 0;
1807
struct he_tpd *__tpd;
1808
1809
/* 2.1.6 transmit buffer return queue */
1810
1811
while (he_dev->tbrq_head != tbrq_tail) {
1812
++updated;
1813
1814
HPRINTK("tbrq%d 0x%x%s%s\n",
1815
group,
1816
TBRQ_TPD(he_dev->tbrq_head),
1817
TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1818
TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1819
tpd = NULL;
1820
list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1821
if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1822
tpd = __tpd;
1823
list_del(&__tpd->entry);
1824
break;
1825
}
1826
}
1827
1828
if (tpd == NULL) {
1829
hprintk("unable to locate tpd for dma buffer %x\n",
1830
TBRQ_TPD(he_dev->tbrq_head));
1831
goto next_tbrq_entry;
1832
}
1833
1834
if (TBRQ_EOS(he_dev->tbrq_head)) {
1835
HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1836
he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1837
if (tpd->vcc)
1838
wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1839
1840
goto next_tbrq_entry;
1841
}
1842
1843
for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1844
if (tpd->iovec[slot].addr)
1845
dma_unmap_single(&he_dev->pci_dev->dev,
1846
tpd->iovec[slot].addr,
1847
tpd->iovec[slot].len & TPD_LEN_MASK,
1848
DMA_TO_DEVICE);
1849
if (tpd->iovec[slot].len & TPD_LST)
1850
break;
1851
1852
}
1853
1854
if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1855
if (tpd->vcc && tpd->vcc->pop)
1856
tpd->vcc->pop(tpd->vcc, tpd->skb);
1857
else
1858
dev_kfree_skb_any(tpd->skb);
1859
}
1860
1861
next_tbrq_entry:
1862
if (tpd)
1863
dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1864
he_dev->tbrq_head = (struct he_tbrq *)
1865
((unsigned long) he_dev->tbrq_base |
1866
TBRQ_MASK(he_dev->tbrq_head + 1));
1867
}
1868
1869
if (updated) {
1870
if (updated > he_dev->tbrq_peak)
1871
he_dev->tbrq_peak = updated;
1872
1873
he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1874
G0_TBRQ_H + (group * 16));
1875
}
1876
}
1877
1878
static void
1879
he_service_rbpl(struct he_dev *he_dev, int group)
1880
{
1881
struct he_rbp *new_tail;
1882
struct he_rbp *rbpl_head;
1883
struct he_buff *heb;
1884
dma_addr_t mapping;
1885
int i;
1886
int moved = 0;
1887
1888
rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1889
RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1890
1891
for (;;) {
1892
new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1893
RBPL_MASK(he_dev->rbpl_tail+1));
1894
1895
/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1896
if (new_tail == rbpl_head)
1897
break;
1898
1899
i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1900
if (i > (RBPL_TABLE_SIZE - 1)) {
1901
i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1902
if (i > (RBPL_TABLE_SIZE - 1))
1903
break;
1904
}
1905
he_dev->rbpl_hint = i + 1;
1906
1907
heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1908
if (!heb)
1909
break;
1910
heb->mapping = mapping;
1911
list_add(&heb->entry, &he_dev->rbpl_outstanding);
1912
he_dev->rbpl_virt[i] = heb;
1913
set_bit(i, he_dev->rbpl_table);
1914
new_tail->idx = i << RBP_IDX_OFFSET;
1915
new_tail->phys = mapping + offsetof(struct he_buff, data);
1916
1917
he_dev->rbpl_tail = new_tail;
1918
++moved;
1919
}
1920
1921
if (moved)
1922
he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1923
}
1924
1925
static void
1926
he_tasklet(unsigned long data)
1927
{
1928
unsigned long flags;
1929
struct he_dev *he_dev = (struct he_dev *) data;
1930
int group, type;
1931
int updated = 0;
1932
1933
HPRINTK("tasklet (0x%lx)\n", data);
1934
spin_lock_irqsave(&he_dev->global_lock, flags);
1935
1936
while (he_dev->irq_head != he_dev->irq_tail) {
1937
++updated;
1938
1939
type = ITYPE_TYPE(he_dev->irq_head->isw);
1940
group = ITYPE_GROUP(he_dev->irq_head->isw);
1941
1942
switch (type) {
1943
case ITYPE_RBRQ_THRESH:
1944
HPRINTK("rbrq%d threshold\n", group);
1945
fallthrough;
1946
case ITYPE_RBRQ_TIMER:
1947
if (he_service_rbrq(he_dev, group))
1948
he_service_rbpl(he_dev, group);
1949
break;
1950
case ITYPE_TBRQ_THRESH:
1951
HPRINTK("tbrq%d threshold\n", group);
1952
fallthrough;
1953
case ITYPE_TPD_COMPLETE:
1954
he_service_tbrq(he_dev, group);
1955
break;
1956
case ITYPE_RBPL_THRESH:
1957
he_service_rbpl(he_dev, group);
1958
break;
1959
case ITYPE_RBPS_THRESH:
1960
/* shouldn't happen unless small buffers enabled */
1961
break;
1962
case ITYPE_PHY:
1963
HPRINTK("phy interrupt\n");
1964
#ifdef CONFIG_ATM_HE_USE_SUNI
1965
spin_unlock_irqrestore(&he_dev->global_lock, flags);
1966
if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1967
he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1968
spin_lock_irqsave(&he_dev->global_lock, flags);
1969
#endif
1970
break;
1971
case ITYPE_OTHER:
1972
switch (type|group) {
1973
case ITYPE_PARITY:
1974
hprintk("parity error\n");
1975
break;
1976
case ITYPE_ABORT:
1977
hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1978
break;
1979
}
1980
break;
1981
case ITYPE_TYPE(ITYPE_INVALID):
1982
/* see 8.1.1 -- check all queues */
1983
1984
HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1985
1986
he_service_rbrq(he_dev, 0);
1987
he_service_rbpl(he_dev, 0);
1988
he_service_tbrq(he_dev, 0);
1989
break;
1990
default:
1991
hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1992
}
1993
1994
he_dev->irq_head->isw = ITYPE_INVALID;
1995
1996
he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
1997
}
1998
1999
if (updated) {
2000
if (updated > he_dev->irq_peak)
2001
he_dev->irq_peak = updated;
2002
2003
he_writel(he_dev,
2004
IRQ_SIZE(CONFIG_IRQ_SIZE) |
2005
IRQ_THRESH(CONFIG_IRQ_THRESH) |
2006
IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2007
(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2008
}
2009
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2010
}
2011
2012
static irqreturn_t
2013
he_irq_handler(int irq, void *dev_id)
2014
{
2015
unsigned long flags;
2016
struct he_dev *he_dev = (struct he_dev * )dev_id;
2017
int handled = 0;
2018
2019
if (he_dev == NULL)
2020
return IRQ_NONE;
2021
2022
spin_lock_irqsave(&he_dev->global_lock, flags);
2023
2024
he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2025
(*he_dev->irq_tailoffset << 2));
2026
2027
if (he_dev->irq_tail == he_dev->irq_head) {
2028
HPRINTK("tailoffset not updated?\n");
2029
he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2030
((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2031
(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2032
}
2033
2034
#ifdef DEBUG
2035
if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2036
hprintk("spurious (or shared) interrupt?\n");
2037
#endif
2038
2039
if (he_dev->irq_head != he_dev->irq_tail) {
2040
handled = 1;
2041
tasklet_schedule(&he_dev->tasklet);
2042
he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2043
(void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2044
}
2045
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2046
return IRQ_RETVAL(handled);
2047
2048
}
2049
2050
static __inline__ void
2051
__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2052
{
2053
struct he_tpdrq *new_tail;
2054
2055
HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2056
tpd, cid, he_dev->tpdrq_tail);
2057
2058
/* new_tail = he_dev->tpdrq_tail; */
2059
new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2060
TPDRQ_MASK(he_dev->tpdrq_tail+1));
2061
2062
/*
2063
* check to see if we are about to set the tail == head
2064
* if true, update the head pointer from the adapter
2065
* to see if this is really the case (reading the queue
2066
* head for every enqueue would be unnecessarily slow)
2067
*/
2068
2069
if (new_tail == he_dev->tpdrq_head) {
2070
he_dev->tpdrq_head = (struct he_tpdrq *)
2071
(((unsigned long)he_dev->tpdrq_base) |
2072
TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2073
2074
if (new_tail == he_dev->tpdrq_head) {
2075
int slot;
2076
2077
hprintk("tpdrq full (cid 0x%x)\n", cid);
2078
/*
2079
* FIXME
2080
* push tpd onto a transmit backlog queue
2081
* after service_tbrq, service the backlog
2082
* for now, we just drop the pdu
2083
*/
2084
for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2085
if (tpd->iovec[slot].addr)
2086
dma_unmap_single(&he_dev->pci_dev->dev,
2087
tpd->iovec[slot].addr,
2088
tpd->iovec[slot].len & TPD_LEN_MASK,
2089
DMA_TO_DEVICE);
2090
}
2091
if (tpd->skb) {
2092
if (tpd->vcc->pop)
2093
tpd->vcc->pop(tpd->vcc, tpd->skb);
2094
else
2095
dev_kfree_skb_any(tpd->skb);
2096
atomic_inc(&tpd->vcc->stats->tx_err);
2097
}
2098
dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2099
return;
2100
}
2101
}
2102
2103
/* 2.1.5 transmit packet descriptor ready queue */
2104
list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2105
he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2106
he_dev->tpdrq_tail->cid = cid;
2107
wmb();
2108
2109
he_dev->tpdrq_tail = new_tail;
2110
2111
he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2112
(void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2113
}
2114
2115
static int
2116
he_open(struct atm_vcc *vcc)
2117
{
2118
unsigned long flags;
2119
struct he_dev *he_dev = HE_DEV(vcc->dev);
2120
struct he_vcc *he_vcc;
2121
int err = 0;
2122
unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2123
short vpi = vcc->vpi;
2124
int vci = vcc->vci;
2125
2126
if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2127
return 0;
2128
2129
HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2130
2131
set_bit(ATM_VF_ADDR, &vcc->flags);
2132
2133
cid = he_mkcid(he_dev, vpi, vci);
2134
2135
he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2136
if (he_vcc == NULL) {
2137
hprintk("unable to allocate he_vcc during open\n");
2138
return -ENOMEM;
2139
}
2140
2141
INIT_LIST_HEAD(&he_vcc->buffers);
2142
he_vcc->pdu_len = 0;
2143
he_vcc->rc_index = -1;
2144
2145
init_waitqueue_head(&he_vcc->rx_waitq);
2146
init_waitqueue_head(&he_vcc->tx_waitq);
2147
2148
vcc->dev_data = he_vcc;
2149
2150
if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2151
int pcr_goal;
2152
2153
pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2154
if (pcr_goal == 0)
2155
pcr_goal = he_dev->atm_dev->link_rate;
2156
if (pcr_goal < 0) /* means round down, technically */
2157
pcr_goal = -pcr_goal;
2158
2159
HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2160
2161
switch (vcc->qos.aal) {
2162
case ATM_AAL5:
2163
tsr0_aal = TSR0_AAL5;
2164
tsr4 = TSR4_AAL5;
2165
break;
2166
case ATM_AAL0:
2167
tsr0_aal = TSR0_AAL0_SDU;
2168
tsr4 = TSR4_AAL0_SDU;
2169
break;
2170
default:
2171
err = -EINVAL;
2172
goto open_failed;
2173
}
2174
2175
spin_lock_irqsave(&he_dev->global_lock, flags);
2176
tsr0 = he_readl_tsr0(he_dev, cid);
2177
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2178
2179
if (TSR0_CONN_STATE(tsr0) != 0) {
2180
hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2181
err = -EBUSY;
2182
goto open_failed;
2183
}
2184
2185
switch (vcc->qos.txtp.traffic_class) {
2186
case ATM_UBR:
2187
/* 2.3.3.1 open connection ubr */
2188
2189
tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2190
TSR0_USE_WMIN | TSR0_UPDATE_GER;
2191
break;
2192
2193
case ATM_CBR:
2194
/* 2.3.3.2 open connection cbr */
2195
2196
/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2197
if ((he_dev->total_bw + pcr_goal)
2198
> (he_dev->atm_dev->link_rate * 9 / 10))
2199
{
2200
err = -EBUSY;
2201
goto open_failed;
2202
}
2203
2204
spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2205
2206
/* find an unused cs_stper register */
2207
for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2208
if (he_dev->cs_stper[reg].inuse == 0 ||
2209
he_dev->cs_stper[reg].pcr == pcr_goal)
2210
break;
2211
2212
if (reg == HE_NUM_CS_STPER) {
2213
err = -EBUSY;
2214
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2215
goto open_failed;
2216
}
2217
2218
he_dev->total_bw += pcr_goal;
2219
2220
he_vcc->rc_index = reg;
2221
++he_dev->cs_stper[reg].inuse;
2222
he_dev->cs_stper[reg].pcr = pcr_goal;
2223
2224
clock = he_is622(he_dev) ? 66667000 : 50000000;
2225
period = clock / pcr_goal;
2226
2227
HPRINTK("rc_index = %d period = %d\n",
2228
reg, period);
2229
2230
he_writel_mbox(he_dev, rate_to_atmf(period/2),
2231
CS_STPER0 + reg);
2232
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2233
2234
tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2235
TSR0_RC_INDEX(reg);
2236
2237
break;
2238
default:
2239
err = -EINVAL;
2240
goto open_failed;
2241
}
2242
2243
spin_lock_irqsave(&he_dev->global_lock, flags);
2244
2245
he_writel_tsr0(he_dev, tsr0, cid);
2246
he_writel_tsr4(he_dev, tsr4 | 1, cid);
2247
he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2248
TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2249
he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2250
he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2251
2252
he_writel_tsr3(he_dev, 0x0, cid);
2253
he_writel_tsr5(he_dev, 0x0, cid);
2254
he_writel_tsr6(he_dev, 0x0, cid);
2255
he_writel_tsr7(he_dev, 0x0, cid);
2256
he_writel_tsr8(he_dev, 0x0, cid);
2257
he_writel_tsr10(he_dev, 0x0, cid);
2258
he_writel_tsr11(he_dev, 0x0, cid);
2259
he_writel_tsr12(he_dev, 0x0, cid);
2260
he_writel_tsr13(he_dev, 0x0, cid);
2261
he_writel_tsr14(he_dev, 0x0, cid);
2262
(void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2263
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2264
}
2265
2266
if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2267
unsigned aal;
2268
2269
HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2270
&HE_VCC(vcc)->rx_waitq);
2271
2272
switch (vcc->qos.aal) {
2273
case ATM_AAL5:
2274
aal = RSR0_AAL5;
2275
break;
2276
case ATM_AAL0:
2277
aal = RSR0_RAWCELL;
2278
break;
2279
default:
2280
err = -EINVAL;
2281
goto open_failed;
2282
}
2283
2284
spin_lock_irqsave(&he_dev->global_lock, flags);
2285
2286
rsr0 = he_readl_rsr0(he_dev, cid);
2287
if (rsr0 & RSR0_OPEN_CONN) {
2288
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2289
2290
hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2291
err = -EBUSY;
2292
goto open_failed;
2293
}
2294
2295
rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2296
rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2297
rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2298
(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2299
2300
#ifdef USE_CHECKSUM_HW
2301
if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2302
rsr0 |= RSR0_TCP_CKSUM;
2303
#endif
2304
2305
he_writel_rsr4(he_dev, rsr4, cid);
2306
he_writel_rsr1(he_dev, rsr1, cid);
2307
/* 5.1.11 last parameter initialized should be
2308
the open/closed indication in rsr0 */
2309
he_writel_rsr0(he_dev,
2310
rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2311
(void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2312
2313
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2314
}
2315
2316
open_failed:
2317
2318
if (err) {
2319
kfree(he_vcc);
2320
clear_bit(ATM_VF_ADDR, &vcc->flags);
2321
}
2322
else
2323
set_bit(ATM_VF_READY, &vcc->flags);
2324
2325
return err;
2326
}
2327
2328
static void
2329
he_close(struct atm_vcc *vcc)
2330
{
2331
unsigned long flags;
2332
DECLARE_WAITQUEUE(wait, current);
2333
struct he_dev *he_dev = HE_DEV(vcc->dev);
2334
struct he_tpd *tpd;
2335
unsigned cid;
2336
struct he_vcc *he_vcc = HE_VCC(vcc);
2337
#define MAX_RETRY 30
2338
int retry = 0, sleep = 1, tx_inuse;
2339
2340
HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2341
2342
clear_bit(ATM_VF_READY, &vcc->flags);
2343
cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2344
2345
if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2346
int timeout;
2347
2348
HPRINTK("close rx cid 0x%x\n", cid);
2349
2350
/* 2.7.2.2 close receive operation */
2351
2352
/* wait for previous close (if any) to finish */
2353
2354
spin_lock_irqsave(&he_dev->global_lock, flags);
2355
while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2356
HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2357
udelay(250);
2358
}
2359
2360
set_current_state(TASK_UNINTERRUPTIBLE);
2361
add_wait_queue(&he_vcc->rx_waitq, &wait);
2362
2363
he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2364
(void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2365
he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2366
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2367
2368
timeout = schedule_timeout(30*HZ);
2369
2370
remove_wait_queue(&he_vcc->rx_waitq, &wait);
2371
set_current_state(TASK_RUNNING);
2372
2373
if (timeout == 0)
2374
hprintk("close rx timeout cid 0x%x\n", cid);
2375
2376
HPRINTK("close rx cid 0x%x complete\n", cid);
2377
2378
}
2379
2380
if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2381
volatile unsigned tsr4, tsr0;
2382
int timeout;
2383
2384
HPRINTK("close tx cid 0x%x\n", cid);
2385
2386
/* 2.1.2
2387
*
2388
* ... the host must first stop queueing packets to the TPDRQ
2389
* on the connection to be closed, then wait for all outstanding
2390
* packets to be transmitted and their buffers returned to the
2391
* TBRQ. When the last packet on the connection arrives in the
2392
* TBRQ, the host issues the close command to the adapter.
2393
*/
2394
2395
while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2396
(retry < MAX_RETRY)) {
2397
msleep(sleep);
2398
if (sleep < 250)
2399
sleep = sleep * 2;
2400
2401
++retry;
2402
}
2403
2404
if (tx_inuse > 1)
2405
hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2406
2407
/* 2.3.1.1 generic close operations with flush */
2408
2409
spin_lock_irqsave(&he_dev->global_lock, flags);
2410
he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2411
/* also clears TSR4_SESSION_ENDED */
2412
2413
switch (vcc->qos.txtp.traffic_class) {
2414
case ATM_UBR:
2415
he_writel_tsr1(he_dev,
2416
TSR1_MCR(rate_to_atmf(200000))
2417
| TSR1_PCR(0), cid);
2418
break;
2419
case ATM_CBR:
2420
he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2421
break;
2422
}
2423
(void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2424
2425
tpd = __alloc_tpd(he_dev);
2426
if (tpd == NULL) {
2427
hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2428
goto close_tx_incomplete;
2429
}
2430
tpd->status |= TPD_EOS | TPD_INT;
2431
tpd->skb = NULL;
2432
tpd->vcc = vcc;
2433
wmb();
2434
2435
set_current_state(TASK_UNINTERRUPTIBLE);
2436
add_wait_queue(&he_vcc->tx_waitq, &wait);
2437
__enqueue_tpd(he_dev, tpd, cid);
2438
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2439
2440
timeout = schedule_timeout(30*HZ);
2441
2442
remove_wait_queue(&he_vcc->tx_waitq, &wait);
2443
set_current_state(TASK_RUNNING);
2444
2445
spin_lock_irqsave(&he_dev->global_lock, flags);
2446
2447
if (timeout == 0) {
2448
hprintk("close tx timeout cid 0x%x\n", cid);
2449
goto close_tx_incomplete;
2450
}
2451
2452
while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2453
HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2454
udelay(250);
2455
}
2456
2457
while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2458
HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2459
udelay(250);
2460
}
2461
2462
close_tx_incomplete:
2463
2464
if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2465
int reg = he_vcc->rc_index;
2466
2467
HPRINTK("cs_stper reg = %d\n", reg);
2468
2469
if (he_dev->cs_stper[reg].inuse == 0)
2470
hprintk("cs_stper[%d].inuse = 0!\n", reg);
2471
else
2472
--he_dev->cs_stper[reg].inuse;
2473
2474
he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2475
}
2476
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2477
2478
HPRINTK("close tx cid 0x%x complete\n", cid);
2479
}
2480
2481
kfree(he_vcc);
2482
2483
clear_bit(ATM_VF_ADDR, &vcc->flags);
2484
}
2485
2486
static int
2487
he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2488
{
2489
unsigned long flags;
2490
struct he_dev *he_dev = HE_DEV(vcc->dev);
2491
unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2492
struct he_tpd *tpd;
2493
#ifdef USE_SCATTERGATHER
2494
int i, slot = 0;
2495
#endif
2496
2497
#define HE_TPD_BUFSIZE 0xffff
2498
2499
HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2500
2501
if ((skb->len > HE_TPD_BUFSIZE) ||
2502
((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2503
hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2504
if (vcc->pop)
2505
vcc->pop(vcc, skb);
2506
else
2507
dev_kfree_skb_any(skb);
2508
atomic_inc(&vcc->stats->tx_err);
2509
return -EINVAL;
2510
}
2511
2512
#ifndef USE_SCATTERGATHER
2513
if (skb_shinfo(skb)->nr_frags) {
2514
hprintk("no scatter/gather support\n");
2515
if (vcc->pop)
2516
vcc->pop(vcc, skb);
2517
else
2518
dev_kfree_skb_any(skb);
2519
atomic_inc(&vcc->stats->tx_err);
2520
return -EINVAL;
2521
}
2522
#endif
2523
spin_lock_irqsave(&he_dev->global_lock, flags);
2524
2525
tpd = __alloc_tpd(he_dev);
2526
if (tpd == NULL) {
2527
if (vcc->pop)
2528
vcc->pop(vcc, skb);
2529
else
2530
dev_kfree_skb_any(skb);
2531
atomic_inc(&vcc->stats->tx_err);
2532
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2533
return -ENOMEM;
2534
}
2535
2536
if (vcc->qos.aal == ATM_AAL5)
2537
tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2538
else {
2539
char *pti_clp = (void *) (skb->data + 3);
2540
int clp, pti;
2541
2542
pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2543
clp = (*pti_clp & ATM_HDR_CLP);
2544
tpd->status |= TPD_CELLTYPE(pti);
2545
if (clp)
2546
tpd->status |= TPD_CLP;
2547
2548
skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2549
}
2550
2551
#ifdef USE_SCATTERGATHER
2552
tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2553
skb_headlen(skb), DMA_TO_DEVICE);
2554
tpd->iovec[slot].len = skb_headlen(skb);
2555
++slot;
2556
2557
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2558
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2559
2560
if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2561
tpd->vcc = vcc;
2562
tpd->skb = NULL; /* not the last fragment
2563
so dont ->push() yet */
2564
wmb();
2565
2566
__enqueue_tpd(he_dev, tpd, cid);
2567
tpd = __alloc_tpd(he_dev);
2568
if (tpd == NULL) {
2569
if (vcc->pop)
2570
vcc->pop(vcc, skb);
2571
else
2572
dev_kfree_skb_any(skb);
2573
atomic_inc(&vcc->stats->tx_err);
2574
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2575
return -ENOMEM;
2576
}
2577
tpd->status |= TPD_USERCELL;
2578
slot = 0;
2579
}
2580
2581
tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev,
2582
frag, 0, skb_frag_size(frag), DMA_TO_DEVICE);
2583
tpd->iovec[slot].len = skb_frag_size(frag);
2584
++slot;
2585
2586
}
2587
2588
tpd->iovec[slot - 1].len |= TPD_LST;
2589
#else
2590
tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2591
tpd->length0 = skb->len | TPD_LST;
2592
#endif
2593
tpd->status |= TPD_INT;
2594
2595
tpd->vcc = vcc;
2596
tpd->skb = skb;
2597
wmb();
2598
ATM_SKB(skb)->vcc = vcc;
2599
2600
__enqueue_tpd(he_dev, tpd, cid);
2601
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2602
2603
atomic_inc(&vcc->stats->tx);
2604
2605
return 0;
2606
}
2607
2608
static int
2609
he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2610
{
2611
unsigned long flags;
2612
struct he_dev *he_dev = HE_DEV(atm_dev);
2613
struct he_ioctl_reg reg;
2614
int err = 0;
2615
2616
switch (cmd) {
2617
case HE_GET_REG:
2618
if (!capable(CAP_NET_ADMIN))
2619
return -EPERM;
2620
2621
if (copy_from_user(&reg, arg,
2622
sizeof(struct he_ioctl_reg)))
2623
return -EFAULT;
2624
2625
spin_lock_irqsave(&he_dev->global_lock, flags);
2626
switch (reg.type) {
2627
case HE_REGTYPE_PCI:
2628
if (reg.addr >= HE_REGMAP_SIZE) {
2629
err = -EINVAL;
2630
break;
2631
}
2632
2633
reg.val = he_readl(he_dev, reg.addr);
2634
break;
2635
case HE_REGTYPE_RCM:
2636
reg.val =
2637
he_readl_rcm(he_dev, reg.addr);
2638
break;
2639
case HE_REGTYPE_TCM:
2640
reg.val =
2641
he_readl_tcm(he_dev, reg.addr);
2642
break;
2643
case HE_REGTYPE_MBOX:
2644
reg.val =
2645
he_readl_mbox(he_dev, reg.addr);
2646
break;
2647
default:
2648
err = -EINVAL;
2649
break;
2650
}
2651
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2652
if (err == 0)
2653
if (copy_to_user(arg, &reg,
2654
sizeof(struct he_ioctl_reg)))
2655
return -EFAULT;
2656
break;
2657
default:
2658
#ifdef CONFIG_ATM_HE_USE_SUNI
2659
if (atm_dev->phy && atm_dev->phy->ioctl)
2660
err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2661
#else /* CONFIG_ATM_HE_USE_SUNI */
2662
err = -EINVAL;
2663
#endif /* CONFIG_ATM_HE_USE_SUNI */
2664
break;
2665
}
2666
2667
return err;
2668
}
2669
2670
static void
2671
he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2672
{
2673
unsigned long flags;
2674
struct he_dev *he_dev = HE_DEV(atm_dev);
2675
2676
HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2677
2678
spin_lock_irqsave(&he_dev->global_lock, flags);
2679
he_writel(he_dev, val, FRAMER + (addr*4));
2680
(void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2681
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2682
}
2683
2684
2685
static unsigned char
2686
he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2687
{
2688
unsigned long flags;
2689
struct he_dev *he_dev = HE_DEV(atm_dev);
2690
unsigned reg;
2691
2692
spin_lock_irqsave(&he_dev->global_lock, flags);
2693
reg = he_readl(he_dev, FRAMER + (addr*4));
2694
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2695
2696
HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2697
return reg;
2698
}
2699
2700
static int
2701
he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2702
{
2703
unsigned long flags;
2704
struct he_dev *he_dev = HE_DEV(dev);
2705
int left, i;
2706
#ifdef notdef
2707
struct he_rbrq *rbrq_tail;
2708
struct he_tpdrq *tpdrq_head;
2709
int rbpl_head, rbpl_tail;
2710
#endif
2711
static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2712
2713
2714
left = *pos;
2715
if (!left--)
2716
return sprintf(page, "ATM he driver\n");
2717
2718
if (!left--)
2719
return sprintf(page, "%s%s\n\n",
2720
he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2721
2722
if (!left--)
2723
return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2724
2725
spin_lock_irqsave(&he_dev->global_lock, flags);
2726
mcc += he_readl(he_dev, MCC);
2727
oec += he_readl(he_dev, OEC);
2728
dcc += he_readl(he_dev, DCC);
2729
cec += he_readl(he_dev, CEC);
2730
spin_unlock_irqrestore(&he_dev->global_lock, flags);
2731
2732
if (!left--)
2733
return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2734
mcc, oec, dcc, cec);
2735
2736
if (!left--)
2737
return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2738
CONFIG_IRQ_SIZE, he_dev->irq_peak);
2739
2740
if (!left--)
2741
return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2742
CONFIG_TPDRQ_SIZE);
2743
2744
if (!left--)
2745
return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2746
CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2747
2748
if (!left--)
2749
return sprintf(page, "tbrq_size = %d peak = %d\n",
2750
CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2751
2752
2753
#ifdef notdef
2754
rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2755
rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2756
2757
inuse = rbpl_head - rbpl_tail;
2758
if (inuse < 0)
2759
inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2760
inuse /= sizeof(struct he_rbp);
2761
2762
if (!left--)
2763
return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2764
CONFIG_RBPL_SIZE, inuse);
2765
#endif
2766
2767
if (!left--)
2768
return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2769
2770
for (i = 0; i < HE_NUM_CS_STPER; ++i)
2771
if (!left--)
2772
return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2773
he_dev->cs_stper[i].pcr,
2774
he_dev->cs_stper[i].inuse);
2775
2776
if (!left--)
2777
return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2778
he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2779
2780
return 0;
2781
}
2782
2783
/* eeprom routines -- see 4.7 */
2784
2785
static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2786
{
2787
u32 val = 0, tmp_read = 0;
2788
int i, j = 0;
2789
u8 byte_read = 0;
2790
2791
val = readl(he_dev->membase + HOST_CNTL);
2792
val &= 0xFFFFE0FF;
2793
2794
/* Turn on write enable */
2795
val |= 0x800;
2796
he_writel(he_dev, val, HOST_CNTL);
2797
2798
/* Send READ instruction */
2799
for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2800
he_writel(he_dev, val | readtab[i], HOST_CNTL);
2801
udelay(EEPROM_DELAY);
2802
}
2803
2804
/* Next, we need to send the byte address to read from */
2805
for (i = 7; i >= 0; i--) {
2806
he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2807
udelay(EEPROM_DELAY);
2808
he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2809
udelay(EEPROM_DELAY);
2810
}
2811
2812
j = 0;
2813
2814
val &= 0xFFFFF7FF; /* Turn off write enable */
2815
he_writel(he_dev, val, HOST_CNTL);
2816
2817
/* Now, we can read data from the EEPROM by clocking it in */
2818
for (i = 7; i >= 0; i--) {
2819
he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2820
udelay(EEPROM_DELAY);
2821
tmp_read = he_readl(he_dev, HOST_CNTL);
2822
byte_read |= (unsigned char)
2823
((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2824
he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2825
udelay(EEPROM_DELAY);
2826
}
2827
2828
he_writel(he_dev, val | ID_CS, HOST_CNTL);
2829
udelay(EEPROM_DELAY);
2830
2831
return byte_read;
2832
}
2833
2834
MODULE_LICENSE("GPL");
2835
MODULE_AUTHOR("chas williams <[email protected]>");
2836
MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2837
module_param(disable64, bool, 0);
2838
MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2839
module_param(nvpibits, short, 0);
2840
MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2841
module_param(nvcibits, short, 0);
2842
MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2843
module_param(rx_skb_reserve, short, 0);
2844
MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2845
module_param(irq_coalesce, bool, 0);
2846
MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2847
module_param(sdh, bool, 0);
2848
MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2849
2850
static const struct pci_device_id he_pci_tbl[] = {
2851
{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2852
{ 0, }
2853
};
2854
2855
MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2856
2857
static struct pci_driver he_driver = {
2858
.name = "he",
2859
.probe = he_init_one,
2860
.remove = he_remove_one,
2861
.id_table = he_pci_tbl,
2862
};
2863
2864
module_pci_driver(he_driver);
2865
2866