Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/atm/iphase.c
26278 views
1
/******************************************************************************
2
iphase.c: Device driver for Interphase ATM PCI adapter cards
3
Author: Peter Wang <[email protected]>
4
Some fixes: Arnaldo Carvalho de Melo <[email protected]>
5
Interphase Corporation <www.iphase.com>
6
Version: 1.0
7
*******************************************************************************
8
9
This software may be used and distributed according to the terms
10
of the GNU General Public License (GPL), incorporated herein by reference.
11
Drivers based on this skeleton fall under the GPL and must retain
12
the authorship (implicit copyright) notice.
13
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17
General Public License for more details.
18
19
Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20
was originally written by Monalisa Agrawal at UNH. Now this driver
21
supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22
card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23
in terms of PHY type, the size of control memory and the size of
24
packet memory. The following are the change log and history:
25
26
Bugfix the Mona's UBR driver.
27
Modify the basic memory allocation and dma logic.
28
Port the driver to the latest kernel from 2.0.46.
29
Complete the ABR logic of the driver, and added the ABR work-
30
around for the hardware anormalies.
31
Add the CBR support.
32
Add the flow control logic to the driver to allow rate-limit VC.
33
Add 4K VC support to the board with 512K control memory.
34
Add the support of all the variants of the Interphase ATM PCI
35
(i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36
(25M UTP25) and x531 (DS3 and E3).
37
Add SMP support.
38
39
Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41
*******************************************************************************/
42
43
#include <linux/module.h>
44
#include <linux/kernel.h>
45
#include <linux/mm.h>
46
#include <linux/pci.h>
47
#include <linux/errno.h>
48
#include <linux/atm.h>
49
#include <linux/atmdev.h>
50
#include <linux/ctype.h>
51
#include <linux/sonet.h>
52
#include <linux/skbuff.h>
53
#include <linux/time.h>
54
#include <linux/delay.h>
55
#include <linux/uio.h>
56
#include <linux/init.h>
57
#include <linux/interrupt.h>
58
#include <linux/wait.h>
59
#include <linux/slab.h>
60
#include <asm/io.h>
61
#include <linux/atomic.h>
62
#include <linux/uaccess.h>
63
#include <asm/string.h>
64
#include <asm/byteorder.h>
65
#include <linux/vmalloc.h>
66
#include <linux/jiffies.h>
67
#include <linux/nospec.h>
68
#include "iphase.h"
69
#include "suni.h"
70
#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
71
72
#define PRIV(dev) ((struct suni_priv *) dev->phy_data)
73
74
static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
75
static void desc_dbg(IADEV *iadev);
76
77
static IADEV *ia_dev[8];
78
static struct atm_dev *_ia_dev[8];
79
static int iadev_count;
80
static void ia_led_timer(struct timer_list *unused);
81
static DEFINE_TIMER(ia_timer, ia_led_timer);
82
static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
83
static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
84
static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
85
|IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
86
87
module_param(IA_TX_BUF, int, 0);
88
module_param(IA_TX_BUF_SZ, int, 0);
89
module_param(IA_RX_BUF, int, 0);
90
module_param(IA_RX_BUF_SZ, int, 0);
91
module_param(IADebugFlag, uint, 0644);
92
93
MODULE_DESCRIPTION("Driver for Interphase ATM PCI NICs");
94
MODULE_LICENSE("GPL");
95
96
/**************************** IA_LIB **********************************/
97
98
static void ia_init_rtn_q (IARTN_Q *que)
99
{
100
que->next = NULL;
101
que->tail = NULL;
102
}
103
104
static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
105
{
106
data->next = NULL;
107
if (que->next == NULL)
108
que->next = que->tail = data;
109
else {
110
data->next = que->next;
111
que->next = data;
112
}
113
return;
114
}
115
116
static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
117
IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
118
if (!entry)
119
return -ENOMEM;
120
entry->data = data;
121
entry->next = NULL;
122
if (que->next == NULL)
123
que->next = que->tail = entry;
124
else {
125
que->tail->next = entry;
126
que->tail = que->tail->next;
127
}
128
return 1;
129
}
130
131
static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
132
IARTN_Q *tmpdata;
133
if (que->next == NULL)
134
return NULL;
135
tmpdata = que->next;
136
if ( que->next == que->tail)
137
que->next = que->tail = NULL;
138
else
139
que->next = que->next->next;
140
return tmpdata;
141
}
142
143
static void ia_hack_tcq(IADEV *dev) {
144
145
u_short desc1;
146
u_short tcq_wr;
147
struct ia_vcc *iavcc_r = NULL;
148
149
tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
150
while (dev->host_tcq_wr != tcq_wr) {
151
desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
152
if (!desc1) ;
153
else if (!dev->desc_tbl[desc1 -1].timestamp) {
154
IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
155
*(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
156
}
157
else if (dev->desc_tbl[desc1 -1].timestamp) {
158
if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
159
printk("IA: Fatal err in get_desc\n");
160
continue;
161
}
162
iavcc_r->vc_desc_cnt--;
163
dev->desc_tbl[desc1 -1].timestamp = 0;
164
IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
165
dev->desc_tbl[desc1 -1].txskb, desc1);)
166
if (iavcc_r->pcr < dev->rate_limit) {
167
IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
168
if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
169
printk("ia_hack_tcq: No memory available\n");
170
}
171
dev->desc_tbl[desc1 -1].iavcc = NULL;
172
dev->desc_tbl[desc1 -1].txskb = NULL;
173
}
174
dev->host_tcq_wr += 2;
175
if (dev->host_tcq_wr > dev->ffL.tcq_ed)
176
dev->host_tcq_wr = dev->ffL.tcq_st;
177
}
178
} /* ia_hack_tcq */
179
180
static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
181
u_short desc_num, i;
182
struct ia_vcc *iavcc_r = NULL;
183
unsigned long delta;
184
static unsigned long timer = 0;
185
int ltimeout;
186
187
ia_hack_tcq (dev);
188
if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
189
timer = jiffies;
190
i=0;
191
while (i < dev->num_tx_desc) {
192
if (!dev->desc_tbl[i].timestamp) {
193
i++;
194
continue;
195
}
196
ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
197
delta = jiffies - dev->desc_tbl[i].timestamp;
198
if (delta >= ltimeout) {
199
IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
200
if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
201
dev->ffL.tcq_rd = dev->ffL.tcq_ed;
202
else
203
dev->ffL.tcq_rd -= 2;
204
*(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
205
if (!dev->desc_tbl[i].txskb || !(iavcc_r = dev->desc_tbl[i].iavcc))
206
printk("Fatal err, desc table vcc or skb is NULL\n");
207
else
208
iavcc_r->vc_desc_cnt--;
209
dev->desc_tbl[i].timestamp = 0;
210
dev->desc_tbl[i].iavcc = NULL;
211
dev->desc_tbl[i].txskb = NULL;
212
}
213
i++;
214
} /* while */
215
}
216
if (dev->ffL.tcq_rd == dev->host_tcq_wr)
217
return 0xFFFF;
218
219
/* Get the next available descriptor number from TCQ */
220
desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
221
222
while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
223
dev->ffL.tcq_rd += 2;
224
if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
225
dev->ffL.tcq_rd = dev->ffL.tcq_st;
226
if (dev->ffL.tcq_rd == dev->host_tcq_wr)
227
return 0xFFFF;
228
desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
229
}
230
231
/* get system time */
232
dev->desc_tbl[desc_num -1].timestamp = jiffies;
233
return desc_num;
234
}
235
236
static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
237
u_char foundLockUp;
238
vcstatus_t *vcstatus;
239
u_short *shd_tbl;
240
u_short tempCellSlot, tempFract;
241
struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
242
struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
243
u_int i;
244
245
if (vcc->qos.txtp.traffic_class == ATM_ABR) {
246
vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
247
vcstatus->cnt++;
248
foundLockUp = 0;
249
if( vcstatus->cnt == 0x05 ) {
250
abr_vc += vcc->vci;
251
eabr_vc += vcc->vci;
252
if( eabr_vc->last_desc ) {
253
if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
254
/* Wait for 10 Micro sec */
255
udelay(10);
256
if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
257
foundLockUp = 1;
258
}
259
else {
260
tempCellSlot = abr_vc->last_cell_slot;
261
tempFract = abr_vc->fraction;
262
if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
263
&& (tempFract == dev->testTable[vcc->vci]->fract))
264
foundLockUp = 1;
265
dev->testTable[vcc->vci]->lastTime = tempCellSlot;
266
dev->testTable[vcc->vci]->fract = tempFract;
267
}
268
} /* last descriptor */
269
vcstatus->cnt = 0;
270
} /* vcstatus->cnt */
271
272
if (foundLockUp) {
273
IF_ABR(printk("LOCK UP found\n");)
274
writew(0xFFFD, dev->seg_reg+MODE_REG_0);
275
/* Wait for 10 Micro sec */
276
udelay(10);
277
abr_vc->status &= 0xFFF8;
278
abr_vc->status |= 0x0001; /* state is idle */
279
shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
280
for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
281
if (i < dev->num_vc)
282
shd_tbl[i] = vcc->vci;
283
else
284
IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
285
writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
286
writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
287
writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
288
vcstatus->cnt = 0;
289
} /* foundLockUp */
290
291
} /* if an ABR VC */
292
293
294
}
295
296
/*
297
** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
298
**
299
** +----+----+------------------+-------------------------------+
300
** | R | NZ | 5-bit exponent | 9-bit mantissa |
301
** +----+----+------------------+-------------------------------+
302
**
303
** R = reserved (written as 0)
304
** NZ = 0 if 0 cells/sec; 1 otherwise
305
**
306
** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
307
*/
308
static u16
309
cellrate_to_float(u32 cr)
310
{
311
312
#define NZ 0x4000
313
#define M_BITS 9 /* Number of bits in mantissa */
314
#define E_BITS 5 /* Number of bits in exponent */
315
#define M_MASK 0x1ff
316
#define E_MASK 0x1f
317
u16 flot;
318
u32 tmp = cr & 0x00ffffff;
319
int i = 0;
320
if (cr == 0)
321
return 0;
322
while (tmp != 1) {
323
tmp >>= 1;
324
i++;
325
}
326
if (i == M_BITS)
327
flot = NZ | (i << M_BITS) | (cr & M_MASK);
328
else if (i < M_BITS)
329
flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
330
else
331
flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
332
return flot;
333
}
334
335
#if 0
336
/*
337
** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
338
*/
339
static u32
340
float_to_cellrate(u16 rate)
341
{
342
u32 exp, mantissa, cps;
343
if ((rate & NZ) == 0)
344
return 0;
345
exp = (rate >> M_BITS) & E_MASK;
346
mantissa = rate & M_MASK;
347
if (exp == 0)
348
return 1;
349
cps = (1 << M_BITS) | mantissa;
350
if (exp == M_BITS)
351
cps = cps;
352
else if (exp > M_BITS)
353
cps <<= (exp - M_BITS);
354
else
355
cps >>= (M_BITS - exp);
356
return cps;
357
}
358
#endif
359
360
static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
361
srv_p->class_type = ATM_ABR;
362
srv_p->pcr = dev->LineRate;
363
srv_p->mcr = 0;
364
srv_p->icr = 0x055cb7;
365
srv_p->tbe = 0xffffff;
366
srv_p->frtt = 0x3a;
367
srv_p->rif = 0xf;
368
srv_p->rdf = 0xb;
369
srv_p->nrm = 0x4;
370
srv_p->trm = 0x7;
371
srv_p->cdf = 0x3;
372
srv_p->adtf = 50;
373
}
374
375
static int
376
ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
377
struct atm_vcc *vcc, u8 flag)
378
{
379
f_vc_abr_entry *f_abr_vc;
380
r_vc_abr_entry *r_abr_vc;
381
u32 icr;
382
u8 trm, nrm, crm;
383
u16 adtf, air, *ptr16;
384
f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
385
f_abr_vc += vcc->vci;
386
switch (flag) {
387
case 1: /* FFRED initialization */
388
#if 0 /* sanity check */
389
if (srv_p->pcr == 0)
390
return INVALID_PCR;
391
if (srv_p->pcr > dev->LineRate)
392
srv_p->pcr = dev->LineRate;
393
if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
394
return MCR_UNAVAILABLE;
395
if (srv_p->mcr > srv_p->pcr)
396
return INVALID_MCR;
397
if (!(srv_p->icr))
398
srv_p->icr = srv_p->pcr;
399
if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
400
return INVALID_ICR;
401
if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
402
return INVALID_TBE;
403
if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
404
return INVALID_FRTT;
405
if (srv_p->nrm > MAX_NRM)
406
return INVALID_NRM;
407
if (srv_p->trm > MAX_TRM)
408
return INVALID_TRM;
409
if (srv_p->adtf > MAX_ADTF)
410
return INVALID_ADTF;
411
else if (srv_p->adtf == 0)
412
srv_p->adtf = 1;
413
if (srv_p->cdf > MAX_CDF)
414
return INVALID_CDF;
415
if (srv_p->rif > MAX_RIF)
416
return INVALID_RIF;
417
if (srv_p->rdf > MAX_RDF)
418
return INVALID_RDF;
419
#endif
420
memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
421
f_abr_vc->f_vc_type = ABR;
422
nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
423
/* i.e 2**n = 2 << (n-1) */
424
f_abr_vc->f_nrm = nrm << 8 | nrm;
425
trm = 100000/(2 << (16 - srv_p->trm));
426
if ( trm == 0) trm = 1;
427
f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
428
crm = srv_p->tbe / nrm;
429
if (crm == 0) crm = 1;
430
f_abr_vc->f_crm = crm & 0xff;
431
f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
432
icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
433
((srv_p->tbe/srv_p->frtt)*1000000) :
434
(1000000/(srv_p->frtt/srv_p->tbe)));
435
f_abr_vc->f_icr = cellrate_to_float(icr);
436
adtf = (10000 * srv_p->adtf)/8192;
437
if (adtf == 0) adtf = 1;
438
f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
439
f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
440
f_abr_vc->f_acr = f_abr_vc->f_icr;
441
f_abr_vc->f_status = 0x0042;
442
break;
443
case 0: /* RFRED initialization */
444
ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
445
*(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
446
r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
447
r_abr_vc += vcc->vci;
448
r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
449
air = srv_p->pcr << (15 - srv_p->rif);
450
if (air == 0) air = 1;
451
r_abr_vc->r_air = cellrate_to_float(air);
452
dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
453
dev->sum_mcr += srv_p->mcr;
454
dev->n_abr++;
455
break;
456
default:
457
break;
458
}
459
return 0;
460
}
461
static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
462
u32 rateLow=0, rateHigh, rate;
463
int entries;
464
struct ia_vcc *ia_vcc;
465
466
int idealSlot =0, testSlot, toBeAssigned, inc;
467
u32 spacing;
468
u16 *SchedTbl, *TstSchedTbl;
469
u16 cbrVC, vcIndex;
470
u32 fracSlot = 0;
471
u32 sp_mod = 0;
472
u32 sp_mod2 = 0;
473
474
/* IpAdjustTrafficParams */
475
if (vcc->qos.txtp.max_pcr <= 0) {
476
IF_ERR(printk("PCR for CBR not defined\n");)
477
return -1;
478
}
479
rate = vcc->qos.txtp.max_pcr;
480
entries = rate / dev->Granularity;
481
IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
482
entries, rate, dev->Granularity);)
483
if (entries < 1)
484
IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
485
rateLow = entries * dev->Granularity;
486
rateHigh = (entries + 1) * dev->Granularity;
487
if (3*(rate - rateLow) > (rateHigh - rate))
488
entries++;
489
if (entries > dev->CbrRemEntries) {
490
IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
491
IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
492
entries, dev->CbrRemEntries);)
493
return -EBUSY;
494
}
495
496
ia_vcc = INPH_IA_VCC(vcc);
497
ia_vcc->NumCbrEntry = entries;
498
dev->sum_mcr += entries * dev->Granularity;
499
/* IaFFrednInsertCbrSched */
500
// Starting at an arbitrary location, place the entries into the table
501
// as smoothly as possible
502
cbrVC = 0;
503
spacing = dev->CbrTotEntries / entries;
504
sp_mod = dev->CbrTotEntries % entries; // get modulo
505
toBeAssigned = entries;
506
fracSlot = 0;
507
vcIndex = vcc->vci;
508
IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
509
while (toBeAssigned)
510
{
511
// If this is the first time, start the table loading for this connection
512
// as close to entryPoint as possible.
513
if (toBeAssigned == entries)
514
{
515
idealSlot = dev->CbrEntryPt;
516
dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
517
if (dev->CbrEntryPt >= dev->CbrTotEntries)
518
dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
519
} else {
520
idealSlot += (u32)(spacing + fracSlot); // Point to the next location
521
// in the table that would be smoothest
522
fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
523
sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
524
}
525
if (idealSlot >= (int)dev->CbrTotEntries)
526
idealSlot -= dev->CbrTotEntries;
527
// Continuously check around this ideal value until a null
528
// location is encountered.
529
SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
530
inc = 0;
531
testSlot = idealSlot;
532
TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
533
IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
534
testSlot, TstSchedTbl,toBeAssigned);)
535
memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
536
while (cbrVC) // If another VC at this location, we have to keep looking
537
{
538
inc++;
539
testSlot = idealSlot - inc;
540
if (testSlot < 0) { // Wrap if necessary
541
testSlot += dev->CbrTotEntries;
542
IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
543
SchedTbl,testSlot);)
544
}
545
TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
546
memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
547
if (!cbrVC)
548
break;
549
testSlot = idealSlot + inc;
550
if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
551
testSlot -= dev->CbrTotEntries;
552
IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
553
IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
554
testSlot, toBeAssigned);)
555
}
556
// set table index and read in value
557
TstSchedTbl = (u16*)(SchedTbl + testSlot);
558
IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
559
TstSchedTbl,cbrVC,inc);)
560
memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
561
} /* while */
562
// Move this VCI number into this location of the CBR Sched table.
563
memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
564
dev->CbrRemEntries--;
565
toBeAssigned--;
566
} /* while */
567
568
/* IaFFrednCbrEnable */
569
dev->NumEnabledCBR++;
570
if (dev->NumEnabledCBR == 1) {
571
writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
572
IF_CBR(printk("CBR is enabled\n");)
573
}
574
return 0;
575
}
576
static void ia_cbrVc_close (struct atm_vcc *vcc) {
577
IADEV *iadev;
578
u16 *SchedTbl, NullVci = 0;
579
u32 i, NumFound;
580
581
iadev = INPH_IA_DEV(vcc->dev);
582
iadev->NumEnabledCBR--;
583
SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
584
if (iadev->NumEnabledCBR == 0) {
585
writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
586
IF_CBR (printk("CBR support disabled\n");)
587
}
588
NumFound = 0;
589
for (i=0; i < iadev->CbrTotEntries; i++)
590
{
591
if (*SchedTbl == vcc->vci) {
592
iadev->CbrRemEntries++;
593
*SchedTbl = NullVci;
594
IF_CBR(NumFound++;)
595
}
596
SchedTbl++;
597
}
598
IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
599
}
600
601
static int ia_avail_descs(IADEV *iadev) {
602
int tmp = 0;
603
ia_hack_tcq(iadev);
604
if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
605
tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
606
else
607
tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
608
iadev->ffL.tcq_st) / 2;
609
return tmp;
610
}
611
612
static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
613
614
static int ia_que_tx (IADEV *iadev) {
615
struct sk_buff *skb;
616
int num_desc;
617
struct atm_vcc *vcc;
618
num_desc = ia_avail_descs(iadev);
619
620
while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
621
if (!(vcc = ATM_SKB(skb)->vcc)) {
622
dev_kfree_skb_any(skb);
623
printk("ia_que_tx: Null vcc\n");
624
break;
625
}
626
if (!test_bit(ATM_VF_READY,&vcc->flags)) {
627
dev_kfree_skb_any(skb);
628
printk("Free the SKB on closed vci %d \n", vcc->vci);
629
break;
630
}
631
if (ia_pkt_tx (vcc, skb)) {
632
skb_queue_head(&iadev->tx_backlog, skb);
633
}
634
num_desc--;
635
}
636
return 0;
637
}
638
639
static void ia_tx_poll (IADEV *iadev) {
640
struct atm_vcc *vcc = NULL;
641
struct sk_buff *skb = NULL, *skb1 = NULL;
642
struct ia_vcc *iavcc;
643
IARTN_Q * rtne;
644
645
ia_hack_tcq(iadev);
646
while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
647
skb = rtne->data.txskb;
648
if (!skb) {
649
printk("ia_tx_poll: skb is null\n");
650
goto out;
651
}
652
vcc = ATM_SKB(skb)->vcc;
653
if (!vcc) {
654
printk("ia_tx_poll: vcc is null\n");
655
dev_kfree_skb_any(skb);
656
goto out;
657
}
658
659
iavcc = INPH_IA_VCC(vcc);
660
if (!iavcc) {
661
printk("ia_tx_poll: iavcc is null\n");
662
dev_kfree_skb_any(skb);
663
goto out;
664
}
665
666
skb1 = skb_dequeue(&iavcc->txing_skb);
667
while (skb1 && (skb1 != skb)) {
668
if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
669
printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
670
}
671
IF_ERR(printk("Release the SKB not match\n");)
672
if ((vcc->pop) && (skb1->len != 0))
673
{
674
vcc->pop(vcc, skb1);
675
IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
676
(long)skb1);)
677
}
678
else
679
dev_kfree_skb_any(skb1);
680
skb1 = skb_dequeue(&iavcc->txing_skb);
681
}
682
if (!skb1) {
683
IF_EVENT(printk("IA: Vci %d - skb not found requeued\n",vcc->vci);)
684
ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
685
break;
686
}
687
if ((vcc->pop) && (skb->len != 0))
688
{
689
vcc->pop(vcc, skb);
690
IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
691
}
692
else
693
dev_kfree_skb_any(skb);
694
kfree(rtne);
695
}
696
ia_que_tx(iadev);
697
out:
698
return;
699
}
700
#if 0
701
static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
702
{
703
u32 t;
704
int i;
705
/*
706
* Issue a command to enable writes to the NOVRAM
707
*/
708
NVRAM_CMD (EXTEND + EWEN);
709
NVRAM_CLR_CE;
710
/*
711
* issue the write command
712
*/
713
NVRAM_CMD(IAWRITE + addr);
714
/*
715
* Send the data, starting with D15, then D14, and so on for 16 bits
716
*/
717
for (i=15; i>=0; i--) {
718
NVRAM_CLKOUT (val & 0x8000);
719
val <<= 1;
720
}
721
NVRAM_CLR_CE;
722
CFG_OR(NVCE);
723
t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
724
while (!(t & NVDO))
725
t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
726
727
NVRAM_CLR_CE;
728
/*
729
* disable writes again
730
*/
731
NVRAM_CMD(EXTEND + EWDS)
732
NVRAM_CLR_CE;
733
CFG_AND(~NVDI);
734
}
735
#endif
736
737
static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
738
{
739
u_short val;
740
u32 t;
741
int i;
742
/*
743
* Read the first bit that was clocked with the falling edge of
744
* the last command data clock
745
*/
746
NVRAM_CMD(IAREAD + addr);
747
/*
748
* Now read the rest of the bits, the next bit read is D14, then D13,
749
* and so on.
750
*/
751
val = 0;
752
for (i=15; i>=0; i--) {
753
NVRAM_CLKIN(t);
754
val |= (t << i);
755
}
756
NVRAM_CLR_CE;
757
CFG_AND(~NVDI);
758
return val;
759
}
760
761
static void ia_hw_type(IADEV *iadev) {
762
u_short memType = ia_eeprom_get(iadev, 25);
763
iadev->memType = memType;
764
if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
765
iadev->num_tx_desc = IA_TX_BUF;
766
iadev->tx_buf_sz = IA_TX_BUF_SZ;
767
iadev->num_rx_desc = IA_RX_BUF;
768
iadev->rx_buf_sz = IA_RX_BUF_SZ;
769
} else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
770
if (IA_TX_BUF == DFL_TX_BUFFERS)
771
iadev->num_tx_desc = IA_TX_BUF / 2;
772
else
773
iadev->num_tx_desc = IA_TX_BUF;
774
iadev->tx_buf_sz = IA_TX_BUF_SZ;
775
if (IA_RX_BUF == DFL_RX_BUFFERS)
776
iadev->num_rx_desc = IA_RX_BUF / 2;
777
else
778
iadev->num_rx_desc = IA_RX_BUF;
779
iadev->rx_buf_sz = IA_RX_BUF_SZ;
780
}
781
else {
782
if (IA_TX_BUF == DFL_TX_BUFFERS)
783
iadev->num_tx_desc = IA_TX_BUF / 8;
784
else
785
iadev->num_tx_desc = IA_TX_BUF;
786
iadev->tx_buf_sz = IA_TX_BUF_SZ;
787
if (IA_RX_BUF == DFL_RX_BUFFERS)
788
iadev->num_rx_desc = IA_RX_BUF / 8;
789
else
790
iadev->num_rx_desc = IA_RX_BUF;
791
iadev->rx_buf_sz = IA_RX_BUF_SZ;
792
}
793
iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
794
IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
795
iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
796
iadev->rx_buf_sz, iadev->rx_pkt_ram);)
797
798
#if 0
799
if ((memType & FE_MASK) == FE_SINGLE_MODE) {
800
iadev->phy_type = PHY_OC3C_S;
801
else if ((memType & FE_MASK) == FE_UTP_OPTION)
802
iadev->phy_type = PHY_UTP155;
803
else
804
iadev->phy_type = PHY_OC3C_M;
805
#endif
806
807
iadev->phy_type = memType & FE_MASK;
808
IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
809
memType,iadev->phy_type);)
810
if (iadev->phy_type == FE_25MBIT_PHY)
811
iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
812
else if (iadev->phy_type == FE_DS3_PHY)
813
iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
814
else if (iadev->phy_type == FE_E3_PHY)
815
iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
816
else
817
iadev->LineRate = (u32)(ATM_OC3_PCR);
818
IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
819
820
}
821
822
static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
823
{
824
return readl(ia->phy + (reg >> 2));
825
}
826
827
static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
828
{
829
writel(val, ia->phy + (reg >> 2));
830
}
831
832
static void ia_frontend_intr(struct iadev_priv *iadev)
833
{
834
u32 status;
835
836
if (iadev->phy_type & FE_25MBIT_PHY) {
837
status = ia_phy_read32(iadev, MB25_INTR_STATUS);
838
iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
839
} else if (iadev->phy_type & FE_DS3_PHY) {
840
ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
841
status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
842
iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
843
} else if (iadev->phy_type & FE_E3_PHY) {
844
ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
845
status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
846
iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
847
} else {
848
status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
849
iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
850
}
851
852
printk(KERN_INFO "IA: SUNI carrier %s\n",
853
iadev->carrier_detect ? "detected" : "lost signal");
854
}
855
856
static void ia_mb25_init(struct iadev_priv *iadev)
857
{
858
#if 0
859
mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
860
#endif
861
ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
862
ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
863
864
iadev->carrier_detect =
865
(ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
866
}
867
868
struct ia_reg {
869
u16 reg;
870
u16 val;
871
};
872
873
static void ia_phy_write(struct iadev_priv *iadev,
874
const struct ia_reg *regs, int len)
875
{
876
while (len--) {
877
ia_phy_write32(iadev, regs->reg, regs->val);
878
regs++;
879
}
880
}
881
882
static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
883
{
884
static const struct ia_reg suni_ds3_init[] = {
885
{ SUNI_DS3_FRM_INTR_ENBL, 0x17 },
886
{ SUNI_DS3_FRM_CFG, 0x01 },
887
{ SUNI_DS3_TRAN_CFG, 0x01 },
888
{ SUNI_CONFIG, 0 },
889
{ SUNI_SPLR_CFG, 0 },
890
{ SUNI_SPLT_CFG, 0 }
891
};
892
u32 status;
893
894
status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
895
iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
896
897
ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
898
}
899
900
static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
901
{
902
static const struct ia_reg suni_e3_init[] = {
903
{ SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
904
{ SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
905
{ SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
906
{ SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
907
{ SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
908
{ SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
909
{ SUNI_CONFIG, SUNI_PM7345_E3ENBL },
910
{ SUNI_SPLR_CFG, 0x41 },
911
{ SUNI_SPLT_CFG, 0x41 }
912
};
913
u32 status;
914
915
status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
916
iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
917
ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
918
}
919
920
static void ia_suni_pm7345_init(struct iadev_priv *iadev)
921
{
922
static const struct ia_reg suni_init[] = {
923
/* Enable RSOP loss of signal interrupt. */
924
{ SUNI_INTR_ENBL, 0x28 },
925
/* Clear error counters. */
926
{ SUNI_ID_RESET, 0 },
927
/* Clear "PMCTST" in master test register. */
928
{ SUNI_MASTER_TEST, 0 },
929
930
{ SUNI_RXCP_CTRL, 0x2c },
931
{ SUNI_RXCP_FCTRL, 0x81 },
932
933
{ SUNI_RXCP_IDLE_PAT_H1, 0 },
934
{ SUNI_RXCP_IDLE_PAT_H2, 0 },
935
{ SUNI_RXCP_IDLE_PAT_H3, 0 },
936
{ SUNI_RXCP_IDLE_PAT_H4, 0x01 },
937
938
{ SUNI_RXCP_IDLE_MASK_H1, 0xff },
939
{ SUNI_RXCP_IDLE_MASK_H2, 0xff },
940
{ SUNI_RXCP_IDLE_MASK_H3, 0xff },
941
{ SUNI_RXCP_IDLE_MASK_H4, 0xfe },
942
943
{ SUNI_RXCP_CELL_PAT_H1, 0 },
944
{ SUNI_RXCP_CELL_PAT_H2, 0 },
945
{ SUNI_RXCP_CELL_PAT_H3, 0 },
946
{ SUNI_RXCP_CELL_PAT_H4, 0x01 },
947
948
{ SUNI_RXCP_CELL_MASK_H1, 0xff },
949
{ SUNI_RXCP_CELL_MASK_H2, 0xff },
950
{ SUNI_RXCP_CELL_MASK_H3, 0xff },
951
{ SUNI_RXCP_CELL_MASK_H4, 0xff },
952
953
{ SUNI_TXCP_CTRL, 0xa4 },
954
{ SUNI_TXCP_INTR_EN_STS, 0x10 },
955
{ SUNI_TXCP_IDLE_PAT_H5, 0x55 }
956
};
957
958
if (iadev->phy_type & FE_DS3_PHY)
959
ia_suni_pm7345_init_ds3(iadev);
960
else
961
ia_suni_pm7345_init_e3(iadev);
962
963
ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
964
965
ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
966
~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
967
SUNI_PM7345_DLB | SUNI_PM7345_PLB));
968
#ifdef __SNMP__
969
suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
970
#endif /* __SNMP__ */
971
return;
972
}
973
974
975
/***************************** IA_LIB END *****************************/
976
977
#ifdef CONFIG_ATM_IA_DEBUG
978
static int tcnter = 0;
979
static void xdump( u_char* cp, int length, char* prefix )
980
{
981
int col, count;
982
u_char prntBuf[120];
983
u_char* pBuf = prntBuf;
984
count = 0;
985
while(count < length){
986
pBuf += sprintf( pBuf, "%s", prefix );
987
for(col = 0;count + col < length && col < 16; col++){
988
if (col != 0 && (col % 4) == 0)
989
pBuf += sprintf( pBuf, " " );
990
pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
991
}
992
while(col++ < 16){ /* pad end of buffer with blanks */
993
if ((col % 4) == 0)
994
sprintf( pBuf, " " );
995
pBuf += sprintf( pBuf, " " );
996
}
997
pBuf += sprintf( pBuf, " " );
998
for(col = 0;count + col < length && col < 16; col++){
999
u_char c = cp[count + col];
1000
1001
if (isascii(c) && isprint(c))
1002
pBuf += sprintf(pBuf, "%c", c);
1003
else
1004
pBuf += sprintf(pBuf, ".");
1005
}
1006
printk("%s\n", prntBuf);
1007
count += col;
1008
pBuf = prntBuf;
1009
}
1010
1011
} /* close xdump(... */
1012
#endif /* CONFIG_ATM_IA_DEBUG */
1013
1014
1015
static struct atm_dev *ia_boards = NULL;
1016
1017
#define ACTUAL_RAM_BASE \
1018
RAM_BASE*((iadev->mem)/(128 * 1024))
1019
#define ACTUAL_SEG_RAM_BASE \
1020
IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1021
#define ACTUAL_REASS_RAM_BASE \
1022
IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1023
1024
1025
/*-- some utilities and memory allocation stuff will come here -------------*/
1026
1027
static void desc_dbg(IADEV *iadev) {
1028
1029
u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1030
u32 i;
1031
void __iomem *tmp;
1032
// regval = readl((u32)ia_cmds->maddr);
1033
tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1034
printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1035
tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1036
readw(iadev->seg_ram+tcq_wr_ptr-2));
1037
printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1038
iadev->ffL.tcq_rd);
1039
tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1040
tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1041
printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1042
i = 0;
1043
while (tcq_st_ptr != tcq_ed_ptr) {
1044
tmp = iadev->seg_ram+tcq_st_ptr;
1045
printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
1046
tcq_st_ptr += 2;
1047
}
1048
for(i=0; i <iadev->num_tx_desc; i++)
1049
printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1050
}
1051
1052
1053
/*----------------------------- Receiving side stuff --------------------------*/
1054
1055
static void rx_excp_rcvd(struct atm_dev *dev)
1056
{
1057
#if 0 /* closing the receiving size will cause too many excp int */
1058
IADEV *iadev;
1059
u_short state;
1060
u_short excpq_rd_ptr;
1061
//u_short *ptr;
1062
int vci, error = 1;
1063
iadev = INPH_IA_DEV(dev);
1064
state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1065
while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1066
{ printk("state = %x \n", state);
1067
excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1068
printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1069
if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1070
IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1071
// TODO: update exception stat
1072
vci = readw(iadev->reass_ram+excpq_rd_ptr);
1073
error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1074
// pwang_test
1075
excpq_rd_ptr += 4;
1076
if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1077
excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1078
writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1079
state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1080
}
1081
#endif
1082
}
1083
1084
static void free_desc(struct atm_dev *dev, int desc)
1085
{
1086
IADEV *iadev;
1087
iadev = INPH_IA_DEV(dev);
1088
writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1089
iadev->rfL.fdq_wr +=2;
1090
if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1091
iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1092
writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1093
}
1094
1095
1096
static int rx_pkt(struct atm_dev *dev)
1097
{
1098
IADEV *iadev;
1099
struct atm_vcc *vcc;
1100
unsigned short status;
1101
struct rx_buf_desc __iomem *buf_desc_ptr;
1102
int desc;
1103
struct dle* wr_ptr;
1104
int len;
1105
struct sk_buff *skb;
1106
u_int buf_addr, dma_addr;
1107
1108
iadev = INPH_IA_DEV(dev);
1109
if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1110
{
1111
printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1112
return -EINVAL;
1113
}
1114
/* mask 1st 3 bits to get the actual descno. */
1115
desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1116
IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1117
iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1118
printk(" pcq_wr_ptr = 0x%x\n",
1119
readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1120
/* update the read pointer - maybe we shud do this in the end*/
1121
if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1122
iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1123
else
1124
iadev->rfL.pcq_rd += 2;
1125
writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1126
1127
/* get the buffer desc entry.
1128
update stuff. - doesn't seem to be any update necessary
1129
*/
1130
buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1131
/* make the ptr point to the corresponding buffer desc entry */
1132
buf_desc_ptr += desc;
1133
if (!desc || (desc > iadev->num_rx_desc) ||
1134
((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1135
free_desc(dev, desc);
1136
IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1137
return -1;
1138
}
1139
vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1140
if (!vcc)
1141
{
1142
free_desc(dev, desc);
1143
printk("IA: null vcc, drop PDU\n");
1144
return -1;
1145
}
1146
1147
1148
/* might want to check the status bits for errors */
1149
status = (u_short) (buf_desc_ptr->desc_mode);
1150
if (status & (RX_CER | RX_PTE | RX_OFL))
1151
{
1152
atomic_inc(&vcc->stats->rx_err);
1153
IF_ERR(printk("IA: bad packet, dropping it");)
1154
if (status & RX_CER) {
1155
IF_ERR(printk(" cause: packet CRC error\n");)
1156
}
1157
else if (status & RX_PTE) {
1158
IF_ERR(printk(" cause: packet time out\n");)
1159
}
1160
else {
1161
IF_ERR(printk(" cause: buffer overflow\n");)
1162
}
1163
goto out_free_desc;
1164
}
1165
1166
/*
1167
build DLE.
1168
*/
1169
1170
buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1171
dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1172
len = dma_addr - buf_addr;
1173
if (len > iadev->rx_buf_sz) {
1174
printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1175
atomic_inc(&vcc->stats->rx_err);
1176
goto out_free_desc;
1177
}
1178
1179
if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1180
if (vcc->vci < 32)
1181
printk("Drop control packets\n");
1182
goto out_free_desc;
1183
}
1184
skb_put(skb,len);
1185
// pwang_test
1186
ATM_SKB(skb)->vcc = vcc;
1187
ATM_DESC(skb) = desc;
1188
skb_queue_tail(&iadev->rx_dma_q, skb);
1189
1190
/* Build the DLE structure */
1191
wr_ptr = iadev->rx_dle_q.write;
1192
wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1193
len, DMA_FROM_DEVICE);
1194
wr_ptr->local_pkt_addr = buf_addr;
1195
wr_ptr->bytes = len; /* We don't know this do we ?? */
1196
wr_ptr->mode = DMA_INT_ENABLE;
1197
1198
/* shud take care of wrap around here too. */
1199
if(++wr_ptr == iadev->rx_dle_q.end)
1200
wr_ptr = iadev->rx_dle_q.start;
1201
iadev->rx_dle_q.write = wr_ptr;
1202
udelay(1);
1203
/* Increment transaction counter */
1204
writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1205
out: return 0;
1206
out_free_desc:
1207
free_desc(dev, desc);
1208
goto out;
1209
}
1210
1211
static void rx_intr(struct atm_dev *dev)
1212
{
1213
IADEV *iadev;
1214
u_short status;
1215
u_short state, i;
1216
1217
iadev = INPH_IA_DEV(dev);
1218
status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1219
IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1220
if (status & RX_PKT_RCVD)
1221
{
1222
/* do something */
1223
/* Basically recvd an interrupt for receiving a packet.
1224
A descriptor would have been written to the packet complete
1225
queue. Get all the descriptors and set up dma to move the
1226
packets till the packet complete queue is empty..
1227
*/
1228
state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1229
IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1230
while(!(state & PCQ_EMPTY))
1231
{
1232
rx_pkt(dev);
1233
state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1234
}
1235
iadev->rxing = 1;
1236
}
1237
if (status & RX_FREEQ_EMPT)
1238
{
1239
if (iadev->rxing) {
1240
iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1241
iadev->rx_tmp_jif = jiffies;
1242
iadev->rxing = 0;
1243
}
1244
else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1245
((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1246
for (i = 1; i <= iadev->num_rx_desc; i++)
1247
free_desc(dev, i);
1248
printk("Test logic RUN!!!!\n");
1249
writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1250
iadev->rxing = 1;
1251
}
1252
IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1253
}
1254
1255
if (status & RX_EXCP_RCVD)
1256
{
1257
/* probably need to handle the exception queue also. */
1258
IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1259
rx_excp_rcvd(dev);
1260
}
1261
1262
1263
if (status & RX_RAW_RCVD)
1264
{
1265
/* need to handle the raw incoming cells. This deepnds on
1266
whether we have programmed to receive the raw cells or not.
1267
Else ignore. */
1268
IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1269
}
1270
}
1271
1272
1273
static void rx_dle_intr(struct atm_dev *dev)
1274
{
1275
IADEV *iadev;
1276
struct atm_vcc *vcc;
1277
struct sk_buff *skb;
1278
int desc;
1279
u_short state;
1280
struct dle *dle, *cur_dle;
1281
u_int dle_lp;
1282
int len;
1283
iadev = INPH_IA_DEV(dev);
1284
1285
/* free all the dles done, that is just update our own dle read pointer
1286
- do we really need to do this. Think not. */
1287
/* DMA is done, just get all the recevie buffers from the rx dma queue
1288
and push them up to the higher layer protocol. Also free the desc
1289
associated with the buffer. */
1290
dle = iadev->rx_dle_q.read;
1291
dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1292
cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1293
while(dle != cur_dle)
1294
{
1295
/* free the DMAed skb */
1296
skb = skb_dequeue(&iadev->rx_dma_q);
1297
if (!skb)
1298
goto INCR_DLE;
1299
desc = ATM_DESC(skb);
1300
free_desc(dev, desc);
1301
1302
if (!(len = skb->len))
1303
{
1304
printk("rx_dle_intr: skb len 0\n");
1305
dev_kfree_skb_any(skb);
1306
}
1307
else
1308
{
1309
struct cpcs_trailer *trailer;
1310
u_short length;
1311
struct ia_vcc *ia_vcc;
1312
1313
dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1314
len, DMA_FROM_DEVICE);
1315
/* no VCC related housekeeping done as yet. lets see */
1316
vcc = ATM_SKB(skb)->vcc;
1317
if (!vcc) {
1318
printk("IA: null vcc\n");
1319
dev_kfree_skb_any(skb);
1320
goto INCR_DLE;
1321
}
1322
ia_vcc = INPH_IA_VCC(vcc);
1323
if (ia_vcc == NULL)
1324
{
1325
atomic_inc(&vcc->stats->rx_err);
1326
atm_return(vcc, skb->truesize);
1327
dev_kfree_skb_any(skb);
1328
goto INCR_DLE;
1329
}
1330
// get real pkt length pwang_test
1331
trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1332
skb->len - sizeof(*trailer));
1333
length = swap_byte_order(trailer->length);
1334
if ((length > iadev->rx_buf_sz) || (length >
1335
(skb->len - sizeof(struct cpcs_trailer))))
1336
{
1337
atomic_inc(&vcc->stats->rx_err);
1338
IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1339
length, skb->len);)
1340
atm_return(vcc, skb->truesize);
1341
dev_kfree_skb_any(skb);
1342
goto INCR_DLE;
1343
}
1344
skb_trim(skb, length);
1345
1346
/* Display the packet */
1347
IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1348
xdump(skb->data, skb->len, "RX: ");
1349
printk("\n");)
1350
1351
IF_RX(printk("rx_dle_intr: skb push");)
1352
vcc->push(vcc,skb);
1353
atomic_inc(&vcc->stats->rx);
1354
iadev->rx_pkt_cnt++;
1355
}
1356
INCR_DLE:
1357
if (++dle == iadev->rx_dle_q.end)
1358
dle = iadev->rx_dle_q.start;
1359
}
1360
iadev->rx_dle_q.read = dle;
1361
1362
/* if the interrupts are masked because there were no free desc available,
1363
unmask them now. */
1364
if (!iadev->rxing) {
1365
state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1366
if (!(state & FREEQ_EMPTY)) {
1367
state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1368
writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1369
iadev->reass_reg+REASS_MASK_REG);
1370
iadev->rxing++;
1371
}
1372
}
1373
}
1374
1375
1376
static int open_rx(struct atm_vcc *vcc)
1377
{
1378
IADEV *iadev;
1379
u_short __iomem *vc_table;
1380
u_short __iomem *reass_ptr;
1381
IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1382
1383
if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1384
iadev = INPH_IA_DEV(vcc->dev);
1385
if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1386
if (iadev->phy_type & FE_25MBIT_PHY) {
1387
printk("IA: ABR not support\n");
1388
return -EINVAL;
1389
}
1390
}
1391
/* Make only this VCI in the vc table valid and let all
1392
others be invalid entries */
1393
vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1394
vc_table += vcc->vci;
1395
/* mask the last 6 bits and OR it with 3 for 1K VCs */
1396
1397
*vc_table = vcc->vci << 6;
1398
/* Also keep a list of open rx vcs so that we can attach them with
1399
incoming PDUs later. */
1400
if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1401
(vcc->qos.txtp.traffic_class == ATM_ABR))
1402
{
1403
srv_cls_param_t srv_p;
1404
init_abr_vc(iadev, &srv_p);
1405
ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1406
}
1407
else { /* for UBR later may need to add CBR logic */
1408
reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1409
reass_ptr += vcc->vci;
1410
*reass_ptr = NO_AAL5_PKT;
1411
}
1412
1413
if (iadev->rx_open[vcc->vci])
1414
printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1415
vcc->dev->number, vcc->vci);
1416
iadev->rx_open[vcc->vci] = vcc;
1417
return 0;
1418
}
1419
1420
static int rx_init(struct atm_dev *dev)
1421
{
1422
IADEV *iadev;
1423
struct rx_buf_desc __iomem *buf_desc_ptr;
1424
unsigned long rx_pkt_start = 0;
1425
void *dle_addr;
1426
struct abr_vc_table *abr_vc_table;
1427
u16 *vc_table;
1428
u16 *reass_table;
1429
int i,j, vcsize_sel;
1430
u_short freeq_st_adr;
1431
u_short *freeq_start;
1432
1433
iadev = INPH_IA_DEV(dev);
1434
// spin_lock_init(&iadev->rx_lock);
1435
1436
/* Allocate 4k bytes - more aligned than needed (4k boundary) */
1437
dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1438
&iadev->rx_dle_dma, GFP_KERNEL);
1439
if (!dle_addr) {
1440
printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1441
goto err_out;
1442
}
1443
iadev->rx_dle_q.start = (struct dle *)dle_addr;
1444
iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1445
iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1446
iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1447
/* the end of the dle q points to the entry after the last
1448
DLE that can be used. */
1449
1450
/* write the upper 20 bits of the start address to rx list address register */
1451
/* We know this is 32bit bus addressed so the following is safe */
1452
writel(iadev->rx_dle_dma & 0xfffff000,
1453
iadev->dma + IPHASE5575_RX_LIST_ADDR);
1454
IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1455
iadev->dma+IPHASE5575_TX_LIST_ADDR,
1456
readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1457
printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1458
iadev->dma+IPHASE5575_RX_LIST_ADDR,
1459
readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1460
1461
writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1462
writew(0, iadev->reass_reg+MODE_REG);
1463
writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1464
1465
/* Receive side control memory map
1466
-------------------------------
1467
1468
Buffer descr 0x0000 (736 - 23K)
1469
VP Table 0x5c00 (256 - 512)
1470
Except q 0x5e00 (128 - 512)
1471
Free buffer q 0x6000 (1K - 2K)
1472
Packet comp q 0x6800 (1K - 2K)
1473
Reass Table 0x7000 (1K - 2K)
1474
VC Table 0x7800 (1K - 2K)
1475
ABR VC Table 0x8000 (1K - 32K)
1476
*/
1477
1478
/* Base address for Buffer Descriptor Table */
1479
writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1480
/* Set the buffer size register */
1481
writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1482
1483
/* Initialize each entry in the Buffer Descriptor Table */
1484
iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1485
buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1486
memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1487
buf_desc_ptr++;
1488
rx_pkt_start = iadev->rx_pkt_ram;
1489
for(i=1; i<=iadev->num_rx_desc; i++)
1490
{
1491
memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1492
buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1493
buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1494
buf_desc_ptr++;
1495
rx_pkt_start += iadev->rx_buf_sz;
1496
}
1497
IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1498
i = FREE_BUF_DESC_Q*iadev->memSize;
1499
writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1500
writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1501
writew(i+iadev->num_rx_desc*sizeof(u_short),
1502
iadev->reass_reg+FREEQ_ED_ADR);
1503
writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1504
writew(i+iadev->num_rx_desc*sizeof(u_short),
1505
iadev->reass_reg+FREEQ_WR_PTR);
1506
/* Fill the FREEQ with all the free descriptors. */
1507
freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1508
freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1509
for(i=1; i<=iadev->num_rx_desc; i++)
1510
{
1511
*freeq_start = (u_short)i;
1512
freeq_start++;
1513
}
1514
IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1515
/* Packet Complete Queue */
1516
i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1517
writew(i, iadev->reass_reg+PCQ_ST_ADR);
1518
writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1519
writew(i, iadev->reass_reg+PCQ_RD_PTR);
1520
writew(i, iadev->reass_reg+PCQ_WR_PTR);
1521
1522
/* Exception Queue */
1523
i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1524
writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1525
writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1526
iadev->reass_reg+EXCP_Q_ED_ADR);
1527
writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1528
writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1529
1530
/* Load local copy of FREEQ and PCQ ptrs */
1531
iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1532
iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1533
iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1534
iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1535
iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1536
iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1537
iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1538
iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1539
1540
IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1541
iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1542
iadev->rfL.pcq_wr);)
1543
/* just for check - no VP TBL */
1544
/* VP Table */
1545
/* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1546
/* initialize VP Table for invalid VPIs
1547
- I guess we can write all 1s or 0x000f in the entire memory
1548
space or something similar.
1549
*/
1550
1551
/* This seems to work and looks right to me too !!! */
1552
i = REASS_TABLE * iadev->memSize;
1553
writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1554
/* initialize Reassembly table to I don't know what ???? */
1555
reass_table = (u16 *)(iadev->reass_ram+i);
1556
j = REASS_TABLE_SZ * iadev->memSize;
1557
for(i=0; i < j; i++)
1558
*reass_table++ = NO_AAL5_PKT;
1559
i = 8*1024;
1560
vcsize_sel = 0;
1561
while (i != iadev->num_vc) {
1562
i /= 2;
1563
vcsize_sel++;
1564
}
1565
i = RX_VC_TABLE * iadev->memSize;
1566
writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1567
vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1568
j = RX_VC_TABLE_SZ * iadev->memSize;
1569
for(i = 0; i < j; i++)
1570
{
1571
/* shift the reassembly pointer by 3 + lower 3 bits of
1572
vc_lkup_base register (=3 for 1K VCs) and the last byte
1573
is those low 3 bits.
1574
Shall program this later.
1575
*/
1576
*vc_table = (i << 6) | 15; /* for invalid VCI */
1577
vc_table++;
1578
}
1579
/* ABR VC table */
1580
i = ABR_VC_TABLE * iadev->memSize;
1581
writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1582
1583
i = ABR_VC_TABLE * iadev->memSize;
1584
abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1585
j = REASS_TABLE_SZ * iadev->memSize;
1586
memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1587
for(i = 0; i < j; i++) {
1588
abr_vc_table->rdf = 0x0003;
1589
abr_vc_table->air = 0x5eb1;
1590
abr_vc_table++;
1591
}
1592
1593
/* Initialize other registers */
1594
1595
/* VP Filter Register set for VC Reassembly only */
1596
writew(0xff00, iadev->reass_reg+VP_FILTER);
1597
writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1598
writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1599
1600
/* Packet Timeout Count related Registers :
1601
Set packet timeout to occur in about 3 seconds
1602
Set Packet Aging Interval count register to overflow in about 4 us
1603
*/
1604
writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1605
1606
i = (j >> 6) & 0xFF;
1607
j += 2 * (j - 1);
1608
i |= ((j << 2) & 0xFF00);
1609
writew(i, iadev->reass_reg+TMOUT_RANGE);
1610
1611
/* initiate the desc_tble */
1612
for(i=0; i<iadev->num_tx_desc;i++)
1613
iadev->desc_tbl[i].timestamp = 0;
1614
1615
/* to clear the interrupt status register - read it */
1616
readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1617
1618
/* Mask Register - clear it */
1619
writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1620
1621
skb_queue_head_init(&iadev->rx_dma_q);
1622
iadev->rx_free_desc_qhead = NULL;
1623
1624
iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1625
if (!iadev->rx_open) {
1626
printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1627
dev->number);
1628
goto err_free_dle;
1629
}
1630
1631
iadev->rxing = 1;
1632
iadev->rx_pkt_cnt = 0;
1633
/* Mode Register */
1634
writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1635
return 0;
1636
1637
err_free_dle:
1638
dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1639
iadev->rx_dle_dma);
1640
err_out:
1641
return -ENOMEM;
1642
}
1643
1644
1645
/*
1646
The memory map suggested in appendix A and the coding for it.
1647
Keeping it around just in case we change our mind later.
1648
1649
Buffer descr 0x0000 (128 - 4K)
1650
UBR sched 0x1000 (1K - 4K)
1651
UBR Wait q 0x2000 (1K - 4K)
1652
Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1653
(128 - 256) each
1654
extended VC 0x4000 (1K - 8K)
1655
ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1656
CBR sched 0x7000 (as needed)
1657
VC table 0x8000 (1K - 32K)
1658
*/
1659
1660
static void tx_intr(struct atm_dev *dev)
1661
{
1662
IADEV *iadev;
1663
unsigned short status;
1664
unsigned long flags;
1665
1666
iadev = INPH_IA_DEV(dev);
1667
1668
status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1669
if (status & TRANSMIT_DONE){
1670
1671
IF_EVENT(printk("Transmit Done Intr logic run\n");)
1672
spin_lock_irqsave(&iadev->tx_lock, flags);
1673
ia_tx_poll(iadev);
1674
spin_unlock_irqrestore(&iadev->tx_lock, flags);
1675
writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1676
if (iadev->close_pending)
1677
wake_up(&iadev->close_wait);
1678
}
1679
if (status & TCQ_NOT_EMPTY)
1680
{
1681
IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1682
}
1683
}
1684
1685
static void tx_dle_intr(struct atm_dev *dev)
1686
{
1687
IADEV *iadev;
1688
struct dle *dle, *cur_dle;
1689
struct sk_buff *skb;
1690
struct atm_vcc *vcc;
1691
struct ia_vcc *iavcc;
1692
u_int dle_lp;
1693
unsigned long flags;
1694
1695
iadev = INPH_IA_DEV(dev);
1696
spin_lock_irqsave(&iadev->tx_lock, flags);
1697
dle = iadev->tx_dle_q.read;
1698
dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1699
(sizeof(struct dle)*DLE_ENTRIES - 1);
1700
cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1701
while (dle != cur_dle)
1702
{
1703
/* free the DMAed skb */
1704
skb = skb_dequeue(&iadev->tx_dma_q);
1705
if (!skb) break;
1706
1707
/* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1708
if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1709
dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1710
DMA_TO_DEVICE);
1711
}
1712
vcc = ATM_SKB(skb)->vcc;
1713
if (!vcc) {
1714
printk("tx_dle_intr: vcc is null\n");
1715
spin_unlock_irqrestore(&iadev->tx_lock, flags);
1716
dev_kfree_skb_any(skb);
1717
1718
return;
1719
}
1720
iavcc = INPH_IA_VCC(vcc);
1721
if (!iavcc) {
1722
printk("tx_dle_intr: iavcc is null\n");
1723
spin_unlock_irqrestore(&iadev->tx_lock, flags);
1724
dev_kfree_skb_any(skb);
1725
return;
1726
}
1727
if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1728
if ((vcc->pop) && (skb->len != 0))
1729
{
1730
vcc->pop(vcc, skb);
1731
}
1732
else {
1733
dev_kfree_skb_any(skb);
1734
}
1735
}
1736
else { /* Hold the rate-limited skb for flow control */
1737
IA_SKB_STATE(skb) |= IA_DLED;
1738
skb_queue_tail(&iavcc->txing_skb, skb);
1739
}
1740
IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1741
if (++dle == iadev->tx_dle_q.end)
1742
dle = iadev->tx_dle_q.start;
1743
}
1744
iadev->tx_dle_q.read = dle;
1745
spin_unlock_irqrestore(&iadev->tx_lock, flags);
1746
}
1747
1748
static int open_tx(struct atm_vcc *vcc)
1749
{
1750
struct ia_vcc *ia_vcc;
1751
IADEV *iadev;
1752
struct main_vc *vc;
1753
struct ext_vc *evc;
1754
int ret;
1755
IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1756
if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1757
iadev = INPH_IA_DEV(vcc->dev);
1758
1759
if (iadev->phy_type & FE_25MBIT_PHY) {
1760
if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1761
printk("IA: ABR not support\n");
1762
return -EINVAL;
1763
}
1764
if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1765
printk("IA: CBR not support\n");
1766
return -EINVAL;
1767
}
1768
}
1769
ia_vcc = INPH_IA_VCC(vcc);
1770
memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1771
if (vcc->qos.txtp.max_sdu >
1772
(iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1773
printk("IA: SDU size over (%d) the configured SDU size %d\n",
1774
vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1775
vcc->dev_data = NULL;
1776
kfree(ia_vcc);
1777
return -EINVAL;
1778
}
1779
ia_vcc->vc_desc_cnt = 0;
1780
ia_vcc->txing = 1;
1781
1782
/* find pcr */
1783
if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1784
vcc->qos.txtp.pcr = iadev->LineRate;
1785
else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1786
vcc->qos.txtp.pcr = iadev->LineRate;
1787
else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1788
vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1789
if (vcc->qos.txtp.pcr > iadev->LineRate)
1790
vcc->qos.txtp.pcr = iadev->LineRate;
1791
ia_vcc->pcr = vcc->qos.txtp.pcr;
1792
1793
if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1794
else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1795
else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1796
else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1797
if (ia_vcc->pcr < iadev->rate_limit)
1798
skb_queue_head_init (&ia_vcc->txing_skb);
1799
if (ia_vcc->pcr < iadev->rate_limit) {
1800
struct sock *sk = sk_atm(vcc);
1801
1802
if (vcc->qos.txtp.max_sdu != 0) {
1803
if (ia_vcc->pcr > 60000)
1804
sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1805
else if (ia_vcc->pcr > 2000)
1806
sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1807
else
1808
sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1809
}
1810
else
1811
sk->sk_sndbuf = 24576;
1812
}
1813
1814
vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1815
evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1816
vc += vcc->vci;
1817
evc += vcc->vci;
1818
memset((caddr_t)vc, 0, sizeof(*vc));
1819
memset((caddr_t)evc, 0, sizeof(*evc));
1820
1821
/* store the most significant 4 bits of vci as the last 4 bits
1822
of first part of atm header.
1823
store the last 12 bits of vci as first 12 bits of the second
1824
part of the atm header.
1825
*/
1826
evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1827
evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1828
1829
/* check the following for different traffic classes */
1830
if (vcc->qos.txtp.traffic_class == ATM_UBR)
1831
{
1832
vc->type = UBR;
1833
vc->status = CRC_APPEND;
1834
vc->acr = cellrate_to_float(iadev->LineRate);
1835
if (vcc->qos.txtp.pcr > 0)
1836
vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1837
IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1838
vcc->qos.txtp.max_pcr,vc->acr);)
1839
}
1840
else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1841
{ srv_cls_param_t srv_p;
1842
IF_ABR(printk("Tx ABR VCC\n");)
1843
init_abr_vc(iadev, &srv_p);
1844
if (vcc->qos.txtp.pcr > 0)
1845
srv_p.pcr = vcc->qos.txtp.pcr;
1846
if (vcc->qos.txtp.min_pcr > 0) {
1847
int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1848
if (tmpsum > iadev->LineRate)
1849
return -EBUSY;
1850
srv_p.mcr = vcc->qos.txtp.min_pcr;
1851
iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1852
}
1853
else srv_p.mcr = 0;
1854
if (vcc->qos.txtp.icr)
1855
srv_p.icr = vcc->qos.txtp.icr;
1856
if (vcc->qos.txtp.tbe)
1857
srv_p.tbe = vcc->qos.txtp.tbe;
1858
if (vcc->qos.txtp.frtt)
1859
srv_p.frtt = vcc->qos.txtp.frtt;
1860
if (vcc->qos.txtp.rif)
1861
srv_p.rif = vcc->qos.txtp.rif;
1862
if (vcc->qos.txtp.rdf)
1863
srv_p.rdf = vcc->qos.txtp.rdf;
1864
if (vcc->qos.txtp.nrm_pres)
1865
srv_p.nrm = vcc->qos.txtp.nrm;
1866
if (vcc->qos.txtp.trm_pres)
1867
srv_p.trm = vcc->qos.txtp.trm;
1868
if (vcc->qos.txtp.adtf_pres)
1869
srv_p.adtf = vcc->qos.txtp.adtf;
1870
if (vcc->qos.txtp.cdf_pres)
1871
srv_p.cdf = vcc->qos.txtp.cdf;
1872
if (srv_p.icr > srv_p.pcr)
1873
srv_p.icr = srv_p.pcr;
1874
IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1875
srv_p.pcr, srv_p.mcr);)
1876
ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1877
} else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1878
if (iadev->phy_type & FE_25MBIT_PHY) {
1879
printk("IA: CBR not support\n");
1880
return -EINVAL;
1881
}
1882
if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1883
IF_CBR(printk("PCR is not available\n");)
1884
return -1;
1885
}
1886
vc->type = CBR;
1887
vc->status = CRC_APPEND;
1888
if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1889
return ret;
1890
}
1891
} else {
1892
printk("iadev: Non UBR, ABR and CBR traffic not supported\n");
1893
}
1894
1895
iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1896
IF_EVENT(printk("ia open_tx returning \n");)
1897
return 0;
1898
}
1899
1900
1901
static int tx_init(struct atm_dev *dev)
1902
{
1903
IADEV *iadev;
1904
struct tx_buf_desc *buf_desc_ptr;
1905
unsigned int tx_pkt_start;
1906
void *dle_addr;
1907
int i;
1908
u_short tcq_st_adr;
1909
u_short *tcq_start;
1910
u_short prq_st_adr;
1911
u_short *prq_start;
1912
struct main_vc *vc;
1913
struct ext_vc *evc;
1914
u_short tmp16;
1915
u32 vcsize_sel;
1916
1917
iadev = INPH_IA_DEV(dev);
1918
spin_lock_init(&iadev->tx_lock);
1919
1920
IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1921
readw(iadev->seg_reg+SEG_MASK_REG));)
1922
1923
/* Allocate 4k (boundary aligned) bytes */
1924
dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1925
&iadev->tx_dle_dma, GFP_KERNEL);
1926
if (!dle_addr) {
1927
printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1928
goto err_out;
1929
}
1930
iadev->tx_dle_q.start = (struct dle*)dle_addr;
1931
iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1932
iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1933
iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1934
1935
/* write the upper 20 bits of the start address to tx list address register */
1936
writel(iadev->tx_dle_dma & 0xfffff000,
1937
iadev->dma + IPHASE5575_TX_LIST_ADDR);
1938
writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1939
writew(0, iadev->seg_reg+MODE_REG_0);
1940
writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1941
iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1942
iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1943
iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1944
1945
/*
1946
Transmit side control memory map
1947
--------------------------------
1948
Buffer descr 0x0000 (128 - 4K)
1949
Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1950
(512 - 1K) each
1951
TCQ - 4K, PRQ - 5K
1952
CBR Table 0x1800 (as needed) - 6K
1953
UBR Table 0x3000 (1K - 4K) - 12K
1954
UBR Wait queue 0x4000 (1K - 4K) - 16K
1955
ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1956
ABR Tbl - 20K, ABR Wq - 22K
1957
extended VC 0x6000 (1K - 8K) - 24K
1958
VC Table 0x8000 (1K - 32K) - 32K
1959
1960
Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1961
and Wait q, which can be allotted later.
1962
*/
1963
1964
/* Buffer Descriptor Table Base address */
1965
writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1966
1967
/* initialize each entry in the buffer descriptor table */
1968
buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1969
memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1970
buf_desc_ptr++;
1971
tx_pkt_start = TX_PACKET_RAM;
1972
for(i=1; i<=iadev->num_tx_desc; i++)
1973
{
1974
memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1975
buf_desc_ptr->desc_mode = AAL5;
1976
buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1977
buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1978
buf_desc_ptr++;
1979
tx_pkt_start += iadev->tx_buf_sz;
1980
}
1981
iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
1982
sizeof(*iadev->tx_buf),
1983
GFP_KERNEL);
1984
if (!iadev->tx_buf) {
1985
printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1986
goto err_free_dle;
1987
}
1988
for (i= 0; i< iadev->num_tx_desc; i++)
1989
{
1990
struct cpcs_trailer *cpcs;
1991
1992
cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1993
if(!cpcs) {
1994
printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1995
goto err_free_tx_bufs;
1996
}
1997
iadev->tx_buf[i].cpcs = cpcs;
1998
iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
1999
cpcs,
2000
sizeof(*cpcs),
2001
DMA_TO_DEVICE);
2002
}
2003
iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
2004
sizeof(*iadev->desc_tbl),
2005
GFP_KERNEL);
2006
if (!iadev->desc_tbl) {
2007
printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2008
goto err_free_all_tx_bufs;
2009
}
2010
2011
/* Communication Queues base address */
2012
i = TX_COMP_Q * iadev->memSize;
2013
writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2014
2015
/* Transmit Complete Queue */
2016
writew(i, iadev->seg_reg+TCQ_ST_ADR);
2017
writew(i, iadev->seg_reg+TCQ_RD_PTR);
2018
writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2019
iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2020
writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2021
iadev->seg_reg+TCQ_ED_ADR);
2022
/* Fill the TCQ with all the free descriptors. */
2023
tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2024
tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2025
for(i=1; i<=iadev->num_tx_desc; i++)
2026
{
2027
*tcq_start = (u_short)i;
2028
tcq_start++;
2029
}
2030
2031
/* Packet Ready Queue */
2032
i = PKT_RDY_Q * iadev->memSize;
2033
writew(i, iadev->seg_reg+PRQ_ST_ADR);
2034
writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2035
iadev->seg_reg+PRQ_ED_ADR);
2036
writew(i, iadev->seg_reg+PRQ_RD_PTR);
2037
writew(i, iadev->seg_reg+PRQ_WR_PTR);
2038
2039
/* Load local copy of PRQ and TCQ ptrs */
2040
iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2041
iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2042
iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2043
2044
iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2045
iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2046
iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2047
2048
/* Just for safety initializing the queue to have desc 1 always */
2049
/* Fill the PRQ with all the free descriptors. */
2050
prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2051
prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2052
for(i=1; i<=iadev->num_tx_desc; i++)
2053
{
2054
*prq_start = (u_short)0; /* desc 1 in all entries */
2055
prq_start++;
2056
}
2057
/* CBR Table */
2058
IF_INIT(printk("Start CBR Init\n");)
2059
#if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2060
writew(0,iadev->seg_reg+CBR_PTR_BASE);
2061
#else /* Charlie's logic is wrong ? */
2062
tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2063
IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2064
writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2065
#endif
2066
2067
IF_INIT(printk("value in register = 0x%x\n",
2068
readw(iadev->seg_reg+CBR_PTR_BASE));)
2069
tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2070
writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2071
IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2072
readw(iadev->seg_reg+CBR_TAB_BEG));)
2073
writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2074
tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2075
writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2076
IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2077
iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2078
IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2079
readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2080
readw(iadev->seg_reg+CBR_TAB_END+1));)
2081
2082
/* Initialize the CBR Schedualing Table */
2083
memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2084
0, iadev->num_vc*6);
2085
iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2086
iadev->CbrEntryPt = 0;
2087
iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2088
iadev->NumEnabledCBR = 0;
2089
2090
/* UBR scheduling Table and wait queue */
2091
/* initialize all bytes of UBR scheduler table and wait queue to 0
2092
- SCHEDSZ is 1K (# of entries).
2093
- UBR Table size is 4K
2094
- UBR wait queue is 4K
2095
since the table and wait queues are contiguous, all the bytes
2096
can be initialized by one memeset.
2097
*/
2098
2099
vcsize_sel = 0;
2100
i = 8*1024;
2101
while (i != iadev->num_vc) {
2102
i /= 2;
2103
vcsize_sel++;
2104
}
2105
2106
i = MAIN_VC_TABLE * iadev->memSize;
2107
writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2108
i = EXT_VC_TABLE * iadev->memSize;
2109
writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2110
i = UBR_SCHED_TABLE * iadev->memSize;
2111
writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2112
i = UBR_WAIT_Q * iadev->memSize;
2113
writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2114
memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2115
0, iadev->num_vc*8);
2116
/* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2117
/* initialize all bytes of ABR scheduler table and wait queue to 0
2118
- SCHEDSZ is 1K (# of entries).
2119
- ABR Table size is 2K
2120
- ABR wait queue is 2K
2121
since the table and wait queues are contiguous, all the bytes
2122
can be initialized by one memeset.
2123
*/
2124
i = ABR_SCHED_TABLE * iadev->memSize;
2125
writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2126
i = ABR_WAIT_Q * iadev->memSize;
2127
writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2128
2129
i = ABR_SCHED_TABLE*iadev->memSize;
2130
memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2131
vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2132
evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2133
iadev->testTable = kmalloc_array(iadev->num_vc,
2134
sizeof(*iadev->testTable),
2135
GFP_KERNEL);
2136
if (!iadev->testTable) {
2137
printk("Get freepage failed\n");
2138
goto err_free_desc_tbl;
2139
}
2140
for(i=0; i<iadev->num_vc; i++)
2141
{
2142
memset((caddr_t)vc, 0, sizeof(*vc));
2143
memset((caddr_t)evc, 0, sizeof(*evc));
2144
iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2145
GFP_KERNEL);
2146
if (!iadev->testTable[i])
2147
goto err_free_test_tables;
2148
iadev->testTable[i]->lastTime = 0;
2149
iadev->testTable[i]->fract = 0;
2150
iadev->testTable[i]->vc_status = VC_UBR;
2151
vc++;
2152
evc++;
2153
}
2154
2155
/* Other Initialization */
2156
2157
/* Max Rate Register */
2158
if (iadev->phy_type & FE_25MBIT_PHY) {
2159
writew(RATE25, iadev->seg_reg+MAXRATE);
2160
writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2161
}
2162
else {
2163
writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2164
writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2165
}
2166
/* Set Idle Header Reigisters to be sure */
2167
writew(0, iadev->seg_reg+IDLEHEADHI);
2168
writew(0, iadev->seg_reg+IDLEHEADLO);
2169
2170
/* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2171
writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2172
2173
iadev->close_pending = 0;
2174
init_waitqueue_head(&iadev->close_wait);
2175
init_waitqueue_head(&iadev->timeout_wait);
2176
skb_queue_head_init(&iadev->tx_dma_q);
2177
ia_init_rtn_q(&iadev->tx_return_q);
2178
2179
/* RM Cell Protocol ID and Message Type */
2180
writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2181
skb_queue_head_init (&iadev->tx_backlog);
2182
2183
/* Mode Register 1 */
2184
writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2185
2186
/* Mode Register 0 */
2187
writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2188
2189
/* Interrupt Status Register - read to clear */
2190
readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2191
2192
/* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2193
writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2194
writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2195
iadev->tx_pkt_cnt = 0;
2196
iadev->rate_limit = iadev->LineRate / 3;
2197
2198
return 0;
2199
2200
err_free_test_tables:
2201
while (--i >= 0)
2202
kfree(iadev->testTable[i]);
2203
kfree(iadev->testTable);
2204
err_free_desc_tbl:
2205
kfree(iadev->desc_tbl);
2206
err_free_all_tx_bufs:
2207
i = iadev->num_tx_desc;
2208
err_free_tx_bufs:
2209
while (--i >= 0) {
2210
struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2211
2212
dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2213
sizeof(*desc->cpcs), DMA_TO_DEVICE);
2214
kfree(desc->cpcs);
2215
}
2216
kfree(iadev->tx_buf);
2217
err_free_dle:
2218
dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2219
iadev->tx_dle_dma);
2220
err_out:
2221
return -ENOMEM;
2222
}
2223
2224
static irqreturn_t ia_int(int irq, void *dev_id)
2225
{
2226
struct atm_dev *dev;
2227
IADEV *iadev;
2228
unsigned int status;
2229
int handled = 0;
2230
2231
dev = dev_id;
2232
iadev = INPH_IA_DEV(dev);
2233
while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2234
{
2235
handled = 1;
2236
IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2237
if (status & STAT_REASSINT)
2238
{
2239
/* do something */
2240
IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2241
rx_intr(dev);
2242
}
2243
if (status & STAT_DLERINT)
2244
{
2245
/* Clear this bit by writing a 1 to it. */
2246
writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2247
rx_dle_intr(dev);
2248
}
2249
if (status & STAT_SEGINT)
2250
{
2251
/* do something */
2252
IF_EVENT(printk("IA: tx_intr \n");)
2253
tx_intr(dev);
2254
}
2255
if (status & STAT_DLETINT)
2256
{
2257
writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2258
tx_dle_intr(dev);
2259
}
2260
if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2261
{
2262
if (status & STAT_FEINT)
2263
ia_frontend_intr(iadev);
2264
}
2265
}
2266
return IRQ_RETVAL(handled);
2267
}
2268
2269
2270
2271
/*----------------------------- entries --------------------------------*/
2272
static int get_esi(struct atm_dev *dev)
2273
{
2274
IADEV *iadev;
2275
int i;
2276
u32 mac1;
2277
u16 mac2;
2278
2279
iadev = INPH_IA_DEV(dev);
2280
mac1 = cpu_to_be32(le32_to_cpu(readl(
2281
iadev->reg+IPHASE5575_MAC1)));
2282
mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2283
IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2284
for (i=0; i<MAC1_LEN; i++)
2285
dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2286
2287
for (i=0; i<MAC2_LEN; i++)
2288
dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2289
return 0;
2290
}
2291
2292
static int reset_sar(struct atm_dev *dev)
2293
{
2294
IADEV *iadev;
2295
int i, error;
2296
unsigned int pci[64];
2297
2298
iadev = INPH_IA_DEV(dev);
2299
for (i = 0; i < 64; i++) {
2300
error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
2301
if (error != PCIBIOS_SUCCESSFUL)
2302
return error;
2303
}
2304
writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2305
for (i = 0; i < 64; i++) {
2306
error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
2307
if (error != PCIBIOS_SUCCESSFUL)
2308
return error;
2309
}
2310
udelay(5);
2311
return 0;
2312
}
2313
2314
2315
static int ia_init(struct atm_dev *dev)
2316
{
2317
IADEV *iadev;
2318
unsigned long real_base;
2319
void __iomem *base;
2320
unsigned short command;
2321
int error, i;
2322
2323
/* The device has been identified and registered. Now we read
2324
necessary configuration info like memory base address,
2325
interrupt number etc */
2326
2327
IF_INIT(printk(">ia_init\n");)
2328
dev->ci_range.vpi_bits = 0;
2329
dev->ci_range.vci_bits = NR_VCI_LD;
2330
2331
iadev = INPH_IA_DEV(dev);
2332
real_base = pci_resource_start (iadev->pci, 0);
2333
iadev->irq = iadev->pci->irq;
2334
2335
error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2336
if (error) {
2337
printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2338
dev->number,error);
2339
return -EINVAL;
2340
}
2341
IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2342
dev->number, iadev->pci->revision, real_base, iadev->irq);)
2343
2344
/* find mapping size of board */
2345
2346
iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2347
2348
if (iadev->pci_map_size == 0x100000){
2349
iadev->num_vc = 4096;
2350
dev->ci_range.vci_bits = NR_VCI_4K_LD;
2351
iadev->memSize = 4;
2352
}
2353
else if (iadev->pci_map_size == 0x40000) {
2354
iadev->num_vc = 1024;
2355
iadev->memSize = 1;
2356
}
2357
else {
2358
printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2359
return -EINVAL;
2360
}
2361
IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2362
2363
/* enable bus mastering */
2364
pci_set_master(iadev->pci);
2365
2366
/*
2367
* Delay at least 1us before doing any mem accesses (how 'bout 10?)
2368
*/
2369
udelay(10);
2370
2371
/* mapping the physical address to a virtual address in address space */
2372
base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2373
2374
if (!base)
2375
{
2376
printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2377
dev->number);
2378
return -ENOMEM;
2379
}
2380
IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2381
dev->number, iadev->pci->revision, base, iadev->irq);)
2382
2383
/* filling the iphase dev structure */
2384
iadev->mem = iadev->pci_map_size /2;
2385
iadev->real_base = real_base;
2386
iadev->base = base;
2387
2388
/* Bus Interface Control Registers */
2389
iadev->reg = base + REG_BASE;
2390
/* Segmentation Control Registers */
2391
iadev->seg_reg = base + SEG_BASE;
2392
/* Reassembly Control Registers */
2393
iadev->reass_reg = base + REASS_BASE;
2394
/* Front end/ DMA control registers */
2395
iadev->phy = base + PHY_BASE;
2396
iadev->dma = base + PHY_BASE;
2397
/* RAM - Segmentation RAm and Reassembly RAM */
2398
iadev->ram = base + ACTUAL_RAM_BASE;
2399
iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2400
iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2401
2402
/* lets print out the above */
2403
IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2404
iadev->reg,iadev->seg_reg,iadev->reass_reg,
2405
iadev->phy, iadev->ram, iadev->seg_ram,
2406
iadev->reass_ram);)
2407
2408
/* lets try reading the MAC address */
2409
error = get_esi(dev);
2410
if (error) {
2411
iounmap(iadev->base);
2412
return error;
2413
}
2414
printk("IA: ");
2415
for (i=0; i < ESI_LEN; i++)
2416
printk("%s%02X",i ? "-" : "",dev->esi[i]);
2417
printk("\n");
2418
2419
/* reset SAR */
2420
if (reset_sar(dev)) {
2421
iounmap(iadev->base);
2422
printk("IA: reset SAR fail, please try again\n");
2423
return 1;
2424
}
2425
return 0;
2426
}
2427
2428
static void ia_update_stats(IADEV *iadev) {
2429
if (!iadev->carrier_detect)
2430
return;
2431
iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2432
iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2433
iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2434
iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2435
iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2436
iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2437
return;
2438
}
2439
2440
static void ia_led_timer(struct timer_list *unused) {
2441
unsigned long flags;
2442
static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2443
u_char i;
2444
static u32 ctrl_reg;
2445
for (i = 0; i < iadev_count; i++) {
2446
if (ia_dev[i]) {
2447
ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2448
if (blinking[i] == 0) {
2449
blinking[i]++;
2450
ctrl_reg &= (~CTRL_LED);
2451
writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2452
ia_update_stats(ia_dev[i]);
2453
}
2454
else {
2455
blinking[i] = 0;
2456
ctrl_reg |= CTRL_LED;
2457
writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2458
spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2459
if (ia_dev[i]->close_pending)
2460
wake_up(&ia_dev[i]->close_wait);
2461
ia_tx_poll(ia_dev[i]);
2462
spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2463
}
2464
}
2465
}
2466
mod_timer(&ia_timer, jiffies + HZ / 4);
2467
return;
2468
}
2469
2470
static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2471
unsigned long addr)
2472
{
2473
writel(value, INPH_IA_DEV(dev)->phy+addr);
2474
}
2475
2476
static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2477
{
2478
return readl(INPH_IA_DEV(dev)->phy+addr);
2479
}
2480
2481
static void ia_free_tx(IADEV *iadev)
2482
{
2483
int i;
2484
2485
kfree(iadev->desc_tbl);
2486
for (i = 0; i < iadev->num_vc; i++)
2487
kfree(iadev->testTable[i]);
2488
kfree(iadev->testTable);
2489
for (i = 0; i < iadev->num_tx_desc; i++) {
2490
struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2491
2492
dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2493
sizeof(*desc->cpcs), DMA_TO_DEVICE);
2494
kfree(desc->cpcs);
2495
}
2496
kfree(iadev->tx_buf);
2497
dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2498
iadev->tx_dle_dma);
2499
}
2500
2501
static void ia_free_rx(IADEV *iadev)
2502
{
2503
kfree(iadev->rx_open);
2504
dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2505
iadev->rx_dle_dma);
2506
}
2507
2508
static int ia_start(struct atm_dev *dev)
2509
{
2510
IADEV *iadev;
2511
int error;
2512
unsigned char phy;
2513
u32 ctrl_reg;
2514
IF_EVENT(printk(">ia_start\n");)
2515
iadev = INPH_IA_DEV(dev);
2516
if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2517
printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2518
dev->number, iadev->irq);
2519
error = -EAGAIN;
2520
goto err_out;
2521
}
2522
/* @@@ should release IRQ on error */
2523
/* enabling memory + master */
2524
if ((error = pci_write_config_word(iadev->pci,
2525
PCI_COMMAND,
2526
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2527
{
2528
printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2529
"master (0x%x)\n",dev->number, error);
2530
error = -EIO;
2531
goto err_free_irq;
2532
}
2533
udelay(10);
2534
2535
/* Maybe we should reset the front end, initialize Bus Interface Control
2536
Registers and see. */
2537
2538
IF_INIT(printk("Bus ctrl reg: %08x\n",
2539
readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2540
ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2541
ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2542
| CTRL_B8
2543
| CTRL_B16
2544
| CTRL_B32
2545
| CTRL_B48
2546
| CTRL_B64
2547
| CTRL_B128
2548
| CTRL_ERRMASK
2549
| CTRL_DLETMASK /* shud be removed l8r */
2550
| CTRL_DLERMASK
2551
| CTRL_SEGMASK
2552
| CTRL_REASSMASK
2553
| CTRL_FEMASK
2554
| CTRL_CSPREEMPT;
2555
2556
writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2557
2558
IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2559
readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2560
printk("Bus status reg after init: %08x\n",
2561
readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2562
2563
ia_hw_type(iadev);
2564
error = tx_init(dev);
2565
if (error)
2566
goto err_free_irq;
2567
error = rx_init(dev);
2568
if (error)
2569
goto err_free_tx;
2570
2571
ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2572
writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2573
IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2574
readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2575
phy = 0; /* resolve compiler complaint */
2576
IF_INIT (
2577
if ((phy=ia_phy_get(dev,0)) == 0x30)
2578
printk("IA: pm5346,rev.%d\n",phy&0x0f);
2579
else
2580
printk("IA: utopia,rev.%0x\n",phy);)
2581
2582
if (iadev->phy_type & FE_25MBIT_PHY)
2583
ia_mb25_init(iadev);
2584
else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2585
ia_suni_pm7345_init(iadev);
2586
else {
2587
error = suni_init(dev);
2588
if (error)
2589
goto err_free_rx;
2590
if (dev->phy->start) {
2591
error = dev->phy->start(dev);
2592
if (error)
2593
goto err_free_rx;
2594
}
2595
/* Get iadev->carrier_detect status */
2596
ia_frontend_intr(iadev);
2597
}
2598
return 0;
2599
2600
err_free_rx:
2601
ia_free_rx(iadev);
2602
err_free_tx:
2603
ia_free_tx(iadev);
2604
err_free_irq:
2605
free_irq(iadev->irq, dev);
2606
err_out:
2607
return error;
2608
}
2609
2610
static void ia_close(struct atm_vcc *vcc)
2611
{
2612
DEFINE_WAIT(wait);
2613
u16 *vc_table;
2614
IADEV *iadev;
2615
struct ia_vcc *ia_vcc;
2616
struct sk_buff *skb = NULL;
2617
struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2618
unsigned long closetime, flags;
2619
2620
iadev = INPH_IA_DEV(vcc->dev);
2621
ia_vcc = INPH_IA_VCC(vcc);
2622
if (!ia_vcc) return;
2623
2624
IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2625
ia_vcc->vc_desc_cnt,vcc->vci);)
2626
clear_bit(ATM_VF_READY,&vcc->flags);
2627
skb_queue_head_init (&tmp_tx_backlog);
2628
skb_queue_head_init (&tmp_vcc_backlog);
2629
if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2630
iadev->close_pending++;
2631
prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2632
schedule_timeout(msecs_to_jiffies(500));
2633
finish_wait(&iadev->timeout_wait, &wait);
2634
spin_lock_irqsave(&iadev->tx_lock, flags);
2635
while((skb = skb_dequeue(&iadev->tx_backlog))) {
2636
if (ATM_SKB(skb)->vcc == vcc){
2637
if (vcc->pop) vcc->pop(vcc, skb);
2638
else dev_kfree_skb_any(skb);
2639
}
2640
else
2641
skb_queue_tail(&tmp_tx_backlog, skb);
2642
}
2643
while((skb = skb_dequeue(&tmp_tx_backlog)))
2644
skb_queue_tail(&iadev->tx_backlog, skb);
2645
IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2646
closetime = 300000 / ia_vcc->pcr;
2647
if (closetime == 0)
2648
closetime = 1;
2649
spin_unlock_irqrestore(&iadev->tx_lock, flags);
2650
wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2651
spin_lock_irqsave(&iadev->tx_lock, flags);
2652
iadev->close_pending--;
2653
iadev->testTable[vcc->vci]->lastTime = 0;
2654
iadev->testTable[vcc->vci]->fract = 0;
2655
iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2656
if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2657
if (vcc->qos.txtp.min_pcr > 0)
2658
iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2659
}
2660
if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2661
ia_vcc = INPH_IA_VCC(vcc);
2662
iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2663
ia_cbrVc_close (vcc);
2664
}
2665
spin_unlock_irqrestore(&iadev->tx_lock, flags);
2666
}
2667
2668
if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2669
// reset reass table
2670
vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2671
vc_table += vcc->vci;
2672
*vc_table = NO_AAL5_PKT;
2673
// reset vc table
2674
vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2675
vc_table += vcc->vci;
2676
*vc_table = (vcc->vci << 6) | 15;
2677
if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2678
struct abr_vc_table __iomem *abr_vc_table =
2679
(iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2680
abr_vc_table += vcc->vci;
2681
abr_vc_table->rdf = 0x0003;
2682
abr_vc_table->air = 0x5eb1;
2683
}
2684
// Drain the packets
2685
rx_dle_intr(vcc->dev);
2686
iadev->rx_open[vcc->vci] = NULL;
2687
}
2688
kfree(INPH_IA_VCC(vcc));
2689
ia_vcc = NULL;
2690
vcc->dev_data = NULL;
2691
clear_bit(ATM_VF_ADDR,&vcc->flags);
2692
return;
2693
}
2694
2695
static int ia_open(struct atm_vcc *vcc)
2696
{
2697
struct ia_vcc *ia_vcc;
2698
int error;
2699
if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2700
{
2701
IF_EVENT(printk("ia: not partially allocated resources\n");)
2702
vcc->dev_data = NULL;
2703
}
2704
if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2705
{
2706
IF_EVENT(printk("iphase open: unspec part\n");)
2707
set_bit(ATM_VF_ADDR,&vcc->flags);
2708
}
2709
if (vcc->qos.aal != ATM_AAL5)
2710
return -EINVAL;
2711
IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2712
vcc->dev->number, vcc->vpi, vcc->vci);)
2713
2714
/* Device dependent initialization */
2715
ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2716
if (!ia_vcc) return -ENOMEM;
2717
vcc->dev_data = ia_vcc;
2718
2719
if ((error = open_rx(vcc)))
2720
{
2721
IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2722
ia_close(vcc);
2723
return error;
2724
}
2725
2726
if ((error = open_tx(vcc)))
2727
{
2728
IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2729
ia_close(vcc);
2730
return error;
2731
}
2732
2733
set_bit(ATM_VF_READY,&vcc->flags);
2734
2735
#if 0
2736
{
2737
static u8 first = 1;
2738
if (first) {
2739
ia_timer.expires = jiffies + 3*HZ;
2740
add_timer(&ia_timer);
2741
first = 0;
2742
}
2743
}
2744
#endif
2745
IF_EVENT(printk("ia open returning\n");)
2746
return 0;
2747
}
2748
2749
static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2750
{
2751
IF_EVENT(printk(">ia_change_qos\n");)
2752
return 0;
2753
}
2754
2755
static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2756
{
2757
IA_CMDBUF ia_cmds;
2758
IADEV *iadev;
2759
int i, board;
2760
u16 __user *tmps;
2761
IF_EVENT(printk(">ia_ioctl\n");)
2762
if (cmd != IA_CMD) {
2763
if (!dev->phy->ioctl) return -EINVAL;
2764
return dev->phy->ioctl(dev,cmd,arg);
2765
}
2766
if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2767
board = ia_cmds.status;
2768
2769
if ((board < 0) || (board > iadev_count))
2770
board = 0;
2771
board = array_index_nospec(board, iadev_count + 1);
2772
2773
iadev = ia_dev[board];
2774
switch (ia_cmds.cmd) {
2775
case MEMDUMP:
2776
{
2777
switch (ia_cmds.sub_cmd) {
2778
case MEMDUMP_SEGREG:
2779
if (!capable(CAP_NET_ADMIN)) return -EPERM;
2780
tmps = (u16 __user *)ia_cmds.buf;
2781
for(i=0; i<0x80; i+=2, tmps++)
2782
if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2783
ia_cmds.status = 0;
2784
ia_cmds.len = 0x80;
2785
break;
2786
case MEMDUMP_REASSREG:
2787
if (!capable(CAP_NET_ADMIN)) return -EPERM;
2788
tmps = (u16 __user *)ia_cmds.buf;
2789
for(i=0; i<0x80; i+=2, tmps++)
2790
if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2791
ia_cmds.status = 0;
2792
ia_cmds.len = 0x80;
2793
break;
2794
case MEMDUMP_FFL:
2795
{
2796
ia_regs_t *regs_local;
2797
ffredn_t *ffL;
2798
rfredn_t *rfL;
2799
2800
if (!capable(CAP_NET_ADMIN)) return -EPERM;
2801
regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2802
if (!regs_local) return -ENOMEM;
2803
ffL = &regs_local->ffredn;
2804
rfL = &regs_local->rfredn;
2805
/* Copy real rfred registers into the local copy */
2806
for (i=0; i<(sizeof (rfredn_t))/4; i++)
2807
((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2808
/* Copy real ffred registers into the local copy */
2809
for (i=0; i<(sizeof (ffredn_t))/4; i++)
2810
((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2811
2812
if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2813
kfree(regs_local);
2814
return -EFAULT;
2815
}
2816
kfree(regs_local);
2817
printk("Board %d registers dumped\n", board);
2818
ia_cmds.status = 0;
2819
}
2820
break;
2821
case READ_REG:
2822
{
2823
if (!capable(CAP_NET_ADMIN)) return -EPERM;
2824
desc_dbg(iadev);
2825
ia_cmds.status = 0;
2826
}
2827
break;
2828
case 0x6:
2829
{
2830
ia_cmds.status = 0;
2831
printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2832
printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2833
}
2834
break;
2835
case 0x8:
2836
{
2837
struct k_sonet_stats *stats;
2838
stats = &PRIV(_ia_dev[board])->sonet_stats;
2839
printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2840
printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2841
printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2842
printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2843
printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2844
printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2845
printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2846
printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2847
printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2848
}
2849
ia_cmds.status = 0;
2850
break;
2851
case 0x9:
2852
if (!capable(CAP_NET_ADMIN)) return -EPERM;
2853
for (i = 1; i <= iadev->num_rx_desc; i++)
2854
free_desc(_ia_dev[board], i);
2855
writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2856
iadev->reass_reg+REASS_MASK_REG);
2857
iadev->rxing = 1;
2858
2859
ia_cmds.status = 0;
2860
break;
2861
2862
case 0xb:
2863
if (!capable(CAP_NET_ADMIN)) return -EPERM;
2864
ia_frontend_intr(iadev);
2865
break;
2866
case 0xa:
2867
if (!capable(CAP_NET_ADMIN)) return -EPERM;
2868
{
2869
ia_cmds.status = 0;
2870
IADebugFlag = ia_cmds.maddr;
2871
printk("New debug option loaded\n");
2872
}
2873
break;
2874
default:
2875
ia_cmds.status = 0;
2876
break;
2877
}
2878
}
2879
break;
2880
default:
2881
break;
2882
2883
}
2884
return 0;
2885
}
2886
2887
static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2888
IADEV *iadev;
2889
struct dle *wr_ptr;
2890
struct tx_buf_desc __iomem *buf_desc_ptr;
2891
int desc;
2892
int comp_code;
2893
int total_len;
2894
struct cpcs_trailer *trailer;
2895
struct ia_vcc *iavcc;
2896
2897
iadev = INPH_IA_DEV(vcc->dev);
2898
iavcc = INPH_IA_VCC(vcc);
2899
if (!iavcc->txing) {
2900
printk("discard packet on closed VC\n");
2901
if (vcc->pop)
2902
vcc->pop(vcc, skb);
2903
else
2904
dev_kfree_skb_any(skb);
2905
return 0;
2906
}
2907
2908
if (skb->len > iadev->tx_buf_sz - 8) {
2909
printk("Transmit size over tx buffer size\n");
2910
if (vcc->pop)
2911
vcc->pop(vcc, skb);
2912
else
2913
dev_kfree_skb_any(skb);
2914
return 0;
2915
}
2916
if ((unsigned long)skb->data & 3) {
2917
printk("Misaligned SKB\n");
2918
if (vcc->pop)
2919
vcc->pop(vcc, skb);
2920
else
2921
dev_kfree_skb_any(skb);
2922
return 0;
2923
}
2924
/* Get a descriptor number from our free descriptor queue
2925
We get the descr number from the TCQ now, since I am using
2926
the TCQ as a free buffer queue. Initially TCQ will be
2927
initialized with all the descriptors and is hence, full.
2928
*/
2929
desc = get_desc (iadev, iavcc);
2930
if (desc == 0xffff)
2931
return 1;
2932
comp_code = desc >> 13;
2933
desc &= 0x1fff;
2934
2935
if ((desc == 0) || (desc > iadev->num_tx_desc))
2936
{
2937
IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2938
atomic_inc(&vcc->stats->tx);
2939
if (vcc->pop)
2940
vcc->pop(vcc, skb);
2941
else
2942
dev_kfree_skb_any(skb);
2943
return 0; /* return SUCCESS */
2944
}
2945
2946
if (comp_code)
2947
{
2948
IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2949
desc, comp_code);)
2950
}
2951
2952
/* remember the desc and vcc mapping */
2953
iavcc->vc_desc_cnt++;
2954
iadev->desc_tbl[desc-1].iavcc = iavcc;
2955
iadev->desc_tbl[desc-1].txskb = skb;
2956
IA_SKB_STATE(skb) = 0;
2957
2958
iadev->ffL.tcq_rd += 2;
2959
if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2960
iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2961
writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2962
2963
/* Put the descriptor number in the packet ready queue
2964
and put the updated write pointer in the DLE field
2965
*/
2966
*(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2967
2968
iadev->ffL.prq_wr += 2;
2969
if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2970
iadev->ffL.prq_wr = iadev->ffL.prq_st;
2971
2972
/* Figure out the exact length of the packet and padding required to
2973
make it aligned on a 48 byte boundary. */
2974
total_len = skb->len + sizeof(struct cpcs_trailer);
2975
total_len = ((total_len + 47) / 48) * 48;
2976
IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2977
2978
/* Put the packet in a tx buffer */
2979
trailer = iadev->tx_buf[desc-1].cpcs;
2980
IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2981
skb, skb->data, skb->len, desc);)
2982
trailer->control = 0;
2983
/*big endian*/
2984
trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2985
trailer->crc32 = 0; /* not needed - dummy bytes */
2986
2987
/* Display the packet */
2988
IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2989
skb->len, tcnter++);
2990
xdump(skb->data, skb->len, "TX: ");
2991
printk("\n");)
2992
2993
/* Build the buffer descriptor */
2994
buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2995
buf_desc_ptr += desc; /* points to the corresponding entry */
2996
buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2997
/* Huh ? p.115 of users guide describes this as a read-only register */
2998
writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2999
buf_desc_ptr->vc_index = vcc->vci;
3000
buf_desc_ptr->bytes = total_len;
3001
3002
if (vcc->qos.txtp.traffic_class == ATM_ABR)
3003
clear_lockup (vcc, iadev);
3004
3005
/* Build the DLE structure */
3006
wr_ptr = iadev->tx_dle_q.write;
3007
memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3008
wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3009
skb->len, DMA_TO_DEVICE);
3010
wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3011
buf_desc_ptr->buf_start_lo;
3012
/* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3013
wr_ptr->bytes = skb->len;
3014
3015
/* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3016
if ((wr_ptr->bytes >> 2) == 0xb)
3017
wr_ptr->bytes = 0x30;
3018
3019
wr_ptr->mode = TX_DLE_PSI;
3020
wr_ptr->prq_wr_ptr_data = 0;
3021
3022
/* end is not to be used for the DLE q */
3023
if (++wr_ptr == iadev->tx_dle_q.end)
3024
wr_ptr = iadev->tx_dle_q.start;
3025
3026
/* Build trailer dle */
3027
wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3028
wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3029
buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3030
3031
wr_ptr->bytes = sizeof(struct cpcs_trailer);
3032
wr_ptr->mode = DMA_INT_ENABLE;
3033
wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3034
3035
/* end is not to be used for the DLE q */
3036
if (++wr_ptr == iadev->tx_dle_q.end)
3037
wr_ptr = iadev->tx_dle_q.start;
3038
3039
iadev->tx_dle_q.write = wr_ptr;
3040
ATM_DESC(skb) = vcc->vci;
3041
skb_queue_tail(&iadev->tx_dma_q, skb);
3042
3043
atomic_inc(&vcc->stats->tx);
3044
iadev->tx_pkt_cnt++;
3045
/* Increment transaction counter */
3046
writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3047
3048
#if 0
3049
/* add flow control logic */
3050
if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3051
if (iavcc->vc_desc_cnt > 10) {
3052
vcc->tx_quota = vcc->tx_quota * 3 / 4;
3053
printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3054
iavcc->flow_inc = -1;
3055
iavcc->saved_tx_quota = vcc->tx_quota;
3056
} else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3057
// vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3058
printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3059
iavcc->flow_inc = 0;
3060
}
3061
}
3062
#endif
3063
IF_TX(printk("ia send done\n");)
3064
return 0;
3065
}
3066
3067
static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3068
{
3069
IADEV *iadev;
3070
unsigned long flags;
3071
3072
iadev = INPH_IA_DEV(vcc->dev);
3073
if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3074
{
3075
if (!skb)
3076
printk(KERN_CRIT "null skb in ia_send\n");
3077
else dev_kfree_skb_any(skb);
3078
return -EINVAL;
3079
}
3080
spin_lock_irqsave(&iadev->tx_lock, flags);
3081
if (!test_bit(ATM_VF_READY,&vcc->flags)){
3082
dev_kfree_skb_any(skb);
3083
spin_unlock_irqrestore(&iadev->tx_lock, flags);
3084
return -EINVAL;
3085
}
3086
ATM_SKB(skb)->vcc = vcc;
3087
3088
if (skb_peek(&iadev->tx_backlog)) {
3089
skb_queue_tail(&iadev->tx_backlog, skb);
3090
}
3091
else {
3092
if (ia_pkt_tx (vcc, skb)) {
3093
skb_queue_tail(&iadev->tx_backlog, skb);
3094
}
3095
}
3096
spin_unlock_irqrestore(&iadev->tx_lock, flags);
3097
return 0;
3098
3099
}
3100
3101
static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3102
{
3103
int left = *pos, n;
3104
char *tmpPtr;
3105
IADEV *iadev = INPH_IA_DEV(dev);
3106
if(!left--) {
3107
if (iadev->phy_type == FE_25MBIT_PHY) {
3108
n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3109
return n;
3110
}
3111
if (iadev->phy_type == FE_DS3_PHY)
3112
n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3113
else if (iadev->phy_type == FE_E3_PHY)
3114
n = sprintf(page, " Board Type : Iphase-ATM-E3");
3115
else if (iadev->phy_type == FE_UTP_OPTION)
3116
n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3117
else
3118
n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3119
tmpPtr = page + n;
3120
if (iadev->pci_map_size == 0x40000)
3121
n += sprintf(tmpPtr, "-1KVC-");
3122
else
3123
n += sprintf(tmpPtr, "-4KVC-");
3124
tmpPtr = page + n;
3125
if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3126
n += sprintf(tmpPtr, "1M \n");
3127
else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3128
n += sprintf(tmpPtr, "512K\n");
3129
else
3130
n += sprintf(tmpPtr, "128K\n");
3131
return n;
3132
}
3133
if (!left) {
3134
return sprintf(page, " Number of Tx Buffer: %u\n"
3135
" Size of Tx Buffer : %u\n"
3136
" Number of Rx Buffer: %u\n"
3137
" Size of Rx Buffer : %u\n"
3138
" Packets Received : %u\n"
3139
" Packets Transmitted: %u\n"
3140
" Cells Received : %u\n"
3141
" Cells Transmitted : %u\n"
3142
" Board Dropped Cells: %u\n"
3143
" Board Dropped Pkts : %u\n",
3144
iadev->num_tx_desc, iadev->tx_buf_sz,
3145
iadev->num_rx_desc, iadev->rx_buf_sz,
3146
iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3147
iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3148
iadev->drop_rxcell, iadev->drop_rxpkt);
3149
}
3150
return 0;
3151
}
3152
3153
static const struct atmdev_ops ops = {
3154
.open = ia_open,
3155
.close = ia_close,
3156
.ioctl = ia_ioctl,
3157
.send = ia_send,
3158
.phy_put = ia_phy_put,
3159
.phy_get = ia_phy_get,
3160
.change_qos = ia_change_qos,
3161
.proc_read = ia_proc_read,
3162
.owner = THIS_MODULE,
3163
};
3164
3165
static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3166
{
3167
struct atm_dev *dev;
3168
IADEV *iadev;
3169
int ret;
3170
3171
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3172
if (!iadev) {
3173
ret = -ENOMEM;
3174
goto err_out;
3175
}
3176
3177
iadev->pci = pdev;
3178
3179
IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3180
pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3181
if (pci_enable_device(pdev)) {
3182
ret = -ENODEV;
3183
goto err_out_free_iadev;
3184
}
3185
dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3186
if (!dev) {
3187
ret = -ENOMEM;
3188
goto err_out_disable_dev;
3189
}
3190
dev->dev_data = iadev;
3191
IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3192
IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3193
iadev->LineRate);)
3194
3195
pci_set_drvdata(pdev, dev);
3196
3197
ia_dev[iadev_count] = iadev;
3198
_ia_dev[iadev_count] = dev;
3199
iadev_count++;
3200
if (ia_init(dev) || ia_start(dev)) {
3201
IF_INIT(printk("IA register failed!\n");)
3202
iadev_count--;
3203
ia_dev[iadev_count] = NULL;
3204
_ia_dev[iadev_count] = NULL;
3205
ret = -EINVAL;
3206
goto err_out_deregister_dev;
3207
}
3208
IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3209
3210
iadev->next_board = ia_boards;
3211
ia_boards = dev;
3212
3213
return 0;
3214
3215
err_out_deregister_dev:
3216
atm_dev_deregister(dev);
3217
err_out_disable_dev:
3218
pci_disable_device(pdev);
3219
err_out_free_iadev:
3220
kfree(iadev);
3221
err_out:
3222
return ret;
3223
}
3224
3225
static void ia_remove_one(struct pci_dev *pdev)
3226
{
3227
struct atm_dev *dev = pci_get_drvdata(pdev);
3228
IADEV *iadev = INPH_IA_DEV(dev);
3229
3230
/* Disable phy interrupts */
3231
ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3232
SUNI_RSOP_CIE);
3233
udelay(1);
3234
3235
if (dev->phy && dev->phy->stop)
3236
dev->phy->stop(dev);
3237
3238
/* De-register device */
3239
free_irq(iadev->irq, dev);
3240
iadev_count--;
3241
ia_dev[iadev_count] = NULL;
3242
_ia_dev[iadev_count] = NULL;
3243
IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3244
atm_dev_deregister(dev);
3245
3246
iounmap(iadev->base);
3247
pci_disable_device(pdev);
3248
3249
ia_free_rx(iadev);
3250
ia_free_tx(iadev);
3251
3252
kfree(iadev);
3253
}
3254
3255
static const struct pci_device_id ia_pci_tbl[] = {
3256
{ PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3257
{ PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3258
{ 0,}
3259
};
3260
MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3261
3262
static struct pci_driver ia_driver = {
3263
.name = DEV_LABEL,
3264
.id_table = ia_pci_tbl,
3265
.probe = ia_init_one,
3266
.remove = ia_remove_one,
3267
};
3268
3269
static int __init ia_module_init(void)
3270
{
3271
int ret;
3272
3273
ret = pci_register_driver(&ia_driver);
3274
if (ret >= 0) {
3275
ia_timer.expires = jiffies + 3*HZ;
3276
add_timer(&ia_timer);
3277
} else
3278
printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3279
return ret;
3280
}
3281
3282
static void __exit ia_module_exit(void)
3283
{
3284
pci_unregister_driver(&ia_driver);
3285
3286
timer_delete_sync(&ia_timer);
3287
}
3288
3289
module_init(ia_module_init);
3290
module_exit(ia_module_exit);
3291
3292