Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/isdn/hardware/eicon/io.c
15115 views
1
2
/*
3
*
4
Copyright (c) Eicon Networks, 2002.
5
*
6
This source file is supplied for the use with
7
Eicon Networks range of DIVA Server Adapters.
8
*
9
Eicon File Revision : 2.1
10
*
11
This program is free software; you can redistribute it and/or modify
12
it under the terms of the GNU General Public License as published by
13
the Free Software Foundation; either version 2, or (at your option)
14
any later version.
15
*
16
This program is distributed in the hope that it will be useful,
17
but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
18
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19
See the GNU General Public License for more details.
20
*
21
You should have received a copy of the GNU General Public License
22
along with this program; if not, write to the Free Software
23
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24
*
25
*/
26
#include "platform.h"
27
#include "di_defs.h"
28
#include "pc.h"
29
#include "pr_pc.h"
30
#include "divasync.h"
31
#define MIPS_SCOM
32
#include "pkmaint.h" /* pc_main.h, packed in os-dependent fashion */
33
#include "di.h"
34
#include "mi_pc.h"
35
#include "io.h"
36
extern ADAPTER * adapter[MAX_ADAPTER];
37
extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
38
void request (PISDN_ADAPTER, ENTITY *);
39
static void pcm_req (PISDN_ADAPTER, ENTITY *);
40
/* --------------------------------------------------------------------------
41
local functions
42
-------------------------------------------------------------------------- */
43
#define ReqFunc(N) \
44
static void Request##N(ENTITY *e) \
45
{ if ( IoAdapters[N] ) (* IoAdapters[N]->DIRequest)(IoAdapters[N], e) ; }
46
ReqFunc(0)
47
ReqFunc(1)
48
ReqFunc(2)
49
ReqFunc(3)
50
ReqFunc(4)
51
ReqFunc(5)
52
ReqFunc(6)
53
ReqFunc(7)
54
ReqFunc(8)
55
ReqFunc(9)
56
ReqFunc(10)
57
ReqFunc(11)
58
ReqFunc(12)
59
ReqFunc(13)
60
ReqFunc(14)
61
ReqFunc(15)
62
IDI_CALL Requests[MAX_ADAPTER] =
63
{ &Request0, &Request1, &Request2, &Request3,
64
&Request4, &Request5, &Request6, &Request7,
65
&Request8, &Request9, &Request10, &Request11,
66
&Request12, &Request13, &Request14, &Request15
67
};
68
/*****************************************************************************/
69
/*
70
This array should indicate all new services, that this version of XDI
71
is able to provide to his clients
72
*/
73
static byte extended_xdi_features[DIVA_XDI_EXTENDED_FEATURES_MAX_SZ+1] = {
74
(DIVA_XDI_EXTENDED_FEATURES_VALID |
75
DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR |
76
DIVA_XDI_EXTENDED_FEATURE_CAPI_PRMS |
77
#if defined(DIVA_IDI_RX_DMA)
78
DIVA_XDI_EXTENDED_FEATURE_CMA |
79
DIVA_XDI_EXTENDED_FEATURE_RX_DMA |
80
DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA |
81
#endif
82
DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC),
83
0
84
};
85
/*****************************************************************************/
86
void
87
dump_xlog_buffer (PISDN_ADAPTER IoAdapter, Xdesc *xlogDesc)
88
{
89
dword logLen ;
90
word *Xlog = xlogDesc->buf ;
91
word logCnt = xlogDesc->cnt ;
92
word logOut = xlogDesc->out / sizeof(*Xlog) ;
93
DBG_FTL(("%s: ************* XLOG recovery (%d) *************",
94
&IoAdapter->Name[0], (int)logCnt))
95
DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
96
for ( ; logCnt > 0 ; --logCnt )
97
{
98
if ( !GET_WORD(&Xlog[logOut]) )
99
{
100
if ( --logCnt == 0 )
101
break ;
102
logOut = 0 ;
103
}
104
if ( GET_WORD(&Xlog[logOut]) <= (logOut * sizeof(*Xlog)) )
105
{
106
if ( logCnt > 2 )
107
{
108
DBG_FTL(("Possibly corrupted XLOG: %d entries left",
109
(int)logCnt))
110
}
111
break ;
112
}
113
logLen = (dword)(GET_WORD(&Xlog[logOut]) - (logOut * sizeof(*Xlog))) ;
114
DBG_FTL_MXLOG(( (char *)&Xlog[logOut + 1], (dword)(logLen - 2) ))
115
logOut = (GET_WORD(&Xlog[logOut]) + 1) / sizeof(*Xlog) ;
116
}
117
DBG_FTL(("%s: ***************** end of XLOG *****************",
118
&IoAdapter->Name[0]))
119
}
120
/*****************************************************************************/
121
#if defined(XDI_USE_XLOG)
122
static char *(ExceptionCauseTable[]) =
123
{
124
"Interrupt",
125
"TLB mod /IBOUND",
126
"TLB load /DBOUND",
127
"TLB store",
128
"Address error load",
129
"Address error store",
130
"Instruction load bus error",
131
"Data load/store bus error",
132
"Syscall",
133
"Breakpoint",
134
"Reverd instruction",
135
"Coprocessor unusable",
136
"Overflow",
137
"TRAP",
138
"VCEI",
139
"Floating Point Exception",
140
"CP2",
141
"Reserved 17",
142
"Reserved 18",
143
"Reserved 19",
144
"Reserved 20",
145
"Reserved 21",
146
"Reserved 22",
147
"WATCH",
148
"Reserved 24",
149
"Reserved 25",
150
"Reserved 26",
151
"Reserved 27",
152
"Reserved 28",
153
"Reserved 29",
154
"Reserved 30",
155
"VCED"
156
} ;
157
#endif
158
void
159
dump_trap_frame (PISDN_ADAPTER IoAdapter, byte __iomem *exceptionFrame)
160
{
161
MP_XCPTC __iomem *xcept = (MP_XCPTC __iomem *)exceptionFrame ;
162
dword __iomem *regs;
163
regs = &xcept->regs[0] ;
164
DBG_FTL(("%s: ***************** CPU TRAPPED *****************",
165
&IoAdapter->Name[0]))
166
DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
167
DBG_FTL(("Cause: %s",
168
ExceptionCauseTable[(READ_DWORD(&xcept->cr) & 0x0000007c) >> 2]))
169
DBG_FTL(("sr 0x%08x cr 0x%08x epc 0x%08x vaddr 0x%08x",
170
READ_DWORD(&xcept->sr), READ_DWORD(&xcept->cr),
171
READ_DWORD(&xcept->epc), READ_DWORD(&xcept->vaddr)))
172
DBG_FTL(("zero 0x%08x at 0x%08x v0 0x%08x v1 0x%08x",
173
READ_DWORD(&regs[ 0]), READ_DWORD(&regs[ 1]),
174
READ_DWORD(&regs[ 2]), READ_DWORD(&regs[ 3])))
175
DBG_FTL(("a0 0x%08x a1 0x%08x a2 0x%08x a3 0x%08x",
176
READ_DWORD(&regs[ 4]), READ_DWORD(&regs[ 5]),
177
READ_DWORD(&regs[ 6]), READ_DWORD(&regs[ 7])))
178
DBG_FTL(("t0 0x%08x t1 0x%08x t2 0x%08x t3 0x%08x",
179
READ_DWORD(&regs[ 8]), READ_DWORD(&regs[ 9]),
180
READ_DWORD(&regs[10]), READ_DWORD(&regs[11])))
181
DBG_FTL(("t4 0x%08x t5 0x%08x t6 0x%08x t7 0x%08x",
182
READ_DWORD(&regs[12]), READ_DWORD(&regs[13]),
183
READ_DWORD(&regs[14]), READ_DWORD(&regs[15])))
184
DBG_FTL(("s0 0x%08x s1 0x%08x s2 0x%08x s3 0x%08x",
185
READ_DWORD(&regs[16]), READ_DWORD(&regs[17]),
186
READ_DWORD(&regs[18]), READ_DWORD(&regs[19])))
187
DBG_FTL(("s4 0x%08x s5 0x%08x s6 0x%08x s7 0x%08x",
188
READ_DWORD(&regs[20]), READ_DWORD(&regs[21]),
189
READ_DWORD(&regs[22]), READ_DWORD(&regs[23])))
190
DBG_FTL(("t8 0x%08x t9 0x%08x k0 0x%08x k1 0x%08x",
191
READ_DWORD(&regs[24]), READ_DWORD(&regs[25]),
192
READ_DWORD(&regs[26]), READ_DWORD(&regs[27])))
193
DBG_FTL(("gp 0x%08x sp 0x%08x s8 0x%08x ra 0x%08x",
194
READ_DWORD(&regs[28]), READ_DWORD(&regs[29]),
195
READ_DWORD(&regs[30]), READ_DWORD(&regs[31])))
196
DBG_FTL(("md 0x%08x|%08x resvd 0x%08x class 0x%08x",
197
READ_DWORD(&xcept->mdhi), READ_DWORD(&xcept->mdlo),
198
READ_DWORD(&xcept->reseverd), READ_DWORD(&xcept->xclass)))
199
}
200
/* --------------------------------------------------------------------------
201
Real XDI Request function
202
-------------------------------------------------------------------------- */
203
void request(PISDN_ADAPTER IoAdapter, ENTITY * e)
204
{
205
byte i;
206
diva_os_spin_lock_magic_t irql;
207
/*
208
* if the Req field in the entity structure is 0,
209
* we treat this request as a special function call
210
*/
211
if ( !e->Req )
212
{
213
IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e ;
214
switch (e->Rc)
215
{
216
#if defined(DIVA_IDI_RX_DMA)
217
case IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION: {
218
diva_xdi_dma_descriptor_operation_t* pI = \
219
&syncReq->xdi_dma_descriptor_operation.info;
220
if (!IoAdapter->dma_map) {
221
pI->operation = -1;
222
pI->descriptor_number = -1;
223
return;
224
}
225
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "dma_op");
226
if (pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC) {
227
pI->descriptor_number = diva_alloc_dma_map_entry (\
228
(struct _diva_dma_map_entry*)IoAdapter->dma_map);
229
if (pI->descriptor_number >= 0) {
230
dword dma_magic;
231
void* local_addr;
232
diva_get_dma_map_entry (\
233
(struct _diva_dma_map_entry*)IoAdapter->dma_map,
234
pI->descriptor_number,
235
&local_addr, &dma_magic);
236
pI->descriptor_address = local_addr;
237
pI->descriptor_magic = dma_magic;
238
pI->operation = 0;
239
} else {
240
pI->operation = -1;
241
}
242
} else if ((pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE) &&
243
(pI->descriptor_number >= 0)) {
244
diva_free_dma_map_entry((struct _diva_dma_map_entry*)IoAdapter->dma_map,
245
pI->descriptor_number);
246
pI->descriptor_number = -1;
247
pI->operation = 0;
248
} else {
249
pI->descriptor_number = -1;
250
pI->operation = -1;
251
}
252
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "dma_op");
253
} return;
254
#endif
255
case IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER: {
256
diva_xdi_get_logical_adapter_number_s_t *pI = \
257
&syncReq->xdi_logical_adapter_number.info;
258
pI->logical_adapter_number = IoAdapter->ANum;
259
pI->controller = IoAdapter->ControllerNumber;
260
pI->total_controllers = IoAdapter->Properties.Adapters;
261
} return;
262
case IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS: {
263
diva_xdi_get_capi_parameters_t prms, *pI = &syncReq->xdi_capi_prms.info;
264
memset (&prms, 0x00, sizeof(prms));
265
prms.structure_length = min_t(size_t, sizeof(prms), pI->structure_length);
266
memset (pI, 0x00, pI->structure_length);
267
prms.flag_dynamic_l1_down = (IoAdapter->capi_cfg.cfg_1 & \
268
DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? 1 : 0;
269
prms.group_optimization_enabled = (IoAdapter->capi_cfg.cfg_1 & \
270
DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON) ? 1 : 0;
271
memcpy (pI, &prms, prms.structure_length);
272
} return;
273
case IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR:
274
syncReq->xdi_sdram_bar.info.bar = IoAdapter->sdram_bar;
275
return;
276
case IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES: {
277
dword i;
278
diva_xdi_get_extended_xdi_features_t* pI =\
279
&syncReq->xdi_extended_features.info;
280
pI->buffer_length_in_bytes &= ~0x80000000;
281
if (pI->buffer_length_in_bytes && pI->features) {
282
memset (pI->features, 0x00, pI->buffer_length_in_bytes);
283
}
284
for (i = 0; ((pI->features) && (i < pI->buffer_length_in_bytes) &&
285
(i < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ)); i++) {
286
pI->features[i] = extended_xdi_features[i];
287
}
288
if ((pI->buffer_length_in_bytes < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ) ||
289
(!pI->features)) {
290
pI->buffer_length_in_bytes =\
291
(0x80000000 | DIVA_XDI_EXTENDED_FEATURES_MAX_SZ);
292
}
293
} return;
294
case IDI_SYNC_REQ_XDI_GET_STREAM:
295
if (IoAdapter) {
296
diva_xdi_provide_istream_info (&IoAdapter->a,
297
&syncReq->xdi_stream_info.info);
298
} else {
299
syncReq->xdi_stream_info.info.provided_service = 0;
300
}
301
return;
302
case IDI_SYNC_REQ_GET_NAME:
303
if ( IoAdapter )
304
{
305
strcpy (&syncReq->GetName.name[0], IoAdapter->Name) ;
306
DBG_TRC(("xdi: Adapter %d / Name '%s'",
307
IoAdapter->ANum, IoAdapter->Name))
308
return ;
309
}
310
syncReq->GetName.name[0] = '\0' ;
311
break ;
312
case IDI_SYNC_REQ_GET_SERIAL:
313
if ( IoAdapter )
314
{
315
syncReq->GetSerial.serial = IoAdapter->serialNo ;
316
DBG_TRC(("xdi: Adapter %d / SerialNo %ld",
317
IoAdapter->ANum, IoAdapter->serialNo))
318
return ;
319
}
320
syncReq->GetSerial.serial = 0 ;
321
break ;
322
case IDI_SYNC_REQ_GET_CARDTYPE:
323
if ( IoAdapter )
324
{
325
syncReq->GetCardType.cardtype = IoAdapter->cardType ;
326
DBG_TRC(("xdi: Adapter %d / CardType %ld",
327
IoAdapter->ANum, IoAdapter->cardType))
328
return ;
329
}
330
syncReq->GetCardType.cardtype = 0 ;
331
break ;
332
case IDI_SYNC_REQ_GET_XLOG:
333
if ( IoAdapter )
334
{
335
pcm_req (IoAdapter, e) ;
336
return ;
337
}
338
e->Ind = 0 ;
339
break ;
340
case IDI_SYNC_REQ_GET_DBG_XLOG:
341
if ( IoAdapter )
342
{
343
pcm_req (IoAdapter, e) ;
344
return ;
345
}
346
e->Ind = 0 ;
347
break ;
348
case IDI_SYNC_REQ_GET_FEATURES:
349
if ( IoAdapter )
350
{
351
syncReq->GetFeatures.features =
352
(unsigned short)IoAdapter->features ;
353
return ;
354
}
355
syncReq->GetFeatures.features = 0 ;
356
break ;
357
case IDI_SYNC_REQ_PORTDRV_HOOK:
358
if ( IoAdapter )
359
{
360
DBG_TRC(("Xdi:IDI_SYNC_REQ_PORTDRV_HOOK - ignored"))
361
return ;
362
}
363
break;
364
}
365
if ( IoAdapter )
366
{
367
return ;
368
}
369
}
370
DBG_TRC(("xdi: Id 0x%x / Req 0x%x / Rc 0x%x", e->Id, e->Req, e->Rc))
371
if ( !IoAdapter )
372
{
373
DBG_FTL(("xdi: uninitialized Adapter used - ignore request"))
374
return ;
375
}
376
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
377
/*
378
* assign an entity
379
*/
380
if ( !(e->Id &0x1f) )
381
{
382
if ( IoAdapter->e_count >= IoAdapter->e_max )
383
{
384
DBG_FTL(("xdi: all Ids in use (max=%d) --> Req ignored",
385
IoAdapter->e_max))
386
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
387
return ;
388
}
389
/*
390
* find a new free id
391
*/
392
for ( i = 1 ; IoAdapter->e_tbl[i].e ; ++i ) ;
393
IoAdapter->e_tbl[i].e = e ;
394
IoAdapter->e_count++ ;
395
e->No = (byte)i ;
396
e->More = 0 ;
397
e->RCurrent = 0xff ;
398
}
399
else
400
{
401
i = e->No ;
402
}
403
/*
404
* if the entity is still busy, ignore the request call
405
*/
406
if ( e->More & XBUSY )
407
{
408
DBG_FTL(("xdi: Id 0x%x busy --> Req 0x%x ignored", e->Id, e->Req))
409
if ( !IoAdapter->trapped && IoAdapter->trapFnc )
410
{
411
IoAdapter->trapFnc (IoAdapter) ;
412
/*
413
Firs trap, also notify user if supported
414
*/
415
if (IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
416
(*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
417
}
418
}
419
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
420
return ;
421
}
422
/*
423
* initialize transmit status variables
424
*/
425
e->More |= XBUSY ;
426
e->More &= ~XMOREF ;
427
e->XCurrent = 0 ;
428
e->XOffset = 0 ;
429
/*
430
* queue this entity in the adapter request queue
431
*/
432
IoAdapter->e_tbl[i].next = 0 ;
433
if ( IoAdapter->head )
434
{
435
IoAdapter->e_tbl[IoAdapter->tail].next = i ;
436
IoAdapter->tail = i ;
437
}
438
else
439
{
440
IoAdapter->head = i ;
441
IoAdapter->tail = i ;
442
}
443
/*
444
* queue the DPC to process the request
445
*/
446
diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
447
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req");
448
}
449
/* ---------------------------------------------------------------------
450
Main DPC routine
451
--------------------------------------------------------------------- */
452
void DIDpcRoutine (struct _diva_os_soft_isr* psoft_isr, void* Context) {
453
PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)Context ;
454
ADAPTER* a = &IoAdapter->a ;
455
diva_os_atomic_t* pin_dpc = &IoAdapter->in_dpc;
456
if (diva_os_atomic_increment (pin_dpc) == 1) {
457
do {
458
if ( IoAdapter->tst_irq (a) )
459
{
460
if ( !IoAdapter->Unavailable )
461
IoAdapter->dpc (a) ;
462
IoAdapter->clr_irq (a) ;
463
}
464
IoAdapter->out (a) ;
465
} while (diva_os_atomic_decrement (pin_dpc) > 0);
466
/* ----------------------------------------------------------------
467
Look for XLOG request (cards with indirect addressing)
468
---------------------------------------------------------------- */
469
if (IoAdapter->pcm_pending) {
470
struct pc_maint *pcm;
471
diva_os_spin_lock_magic_t OldIrql ;
472
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
473
&OldIrql,
474
"data_dpc");
475
pcm = (struct pc_maint *)IoAdapter->pcm_data;
476
switch (IoAdapter->pcm_pending) {
477
case 1: /* ask card for XLOG */
478
a->ram_out (a, &IoAdapter->pcm->rc, 0) ;
479
a->ram_out (a, &IoAdapter->pcm->req, pcm->req) ;
480
IoAdapter->pcm_pending = 2;
481
break;
482
case 2: /* Try to get XLOG from the card */
483
if ((int)(a->ram_in (a, &IoAdapter->pcm->rc))) {
484
a->ram_in_buffer (a, IoAdapter->pcm, pcm, sizeof(*pcm)) ;
485
IoAdapter->pcm_pending = 3;
486
}
487
break;
488
case 3: /* let XDI recovery XLOG */
489
break;
490
}
491
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
492
&OldIrql,
493
"data_dpc");
494
}
495
/* ---------------------------------------------------------------- */
496
}
497
}
498
/* --------------------------------------------------------------------------
499
XLOG interface
500
-------------------------------------------------------------------------- */
501
static void
502
pcm_req (PISDN_ADAPTER IoAdapter, ENTITY *e)
503
{
504
diva_os_spin_lock_magic_t OldIrql ;
505
int i, rc ;
506
ADAPTER *a = &IoAdapter->a ;
507
struct pc_maint *pcm = (struct pc_maint *)&e->Ind ;
508
/*
509
* special handling of I/O based card interface
510
* the memory access isn't an atomic operation !
511
*/
512
if ( IoAdapter->Properties.Card == CARD_MAE )
513
{
514
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
515
&OldIrql,
516
"data_pcm_1");
517
IoAdapter->pcm_data = (void *)pcm;
518
IoAdapter->pcm_pending = 1;
519
diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
520
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
521
&OldIrql,
522
"data_pcm_1");
523
for ( rc = 0, i = (IoAdapter->trapped ? 3000 : 250) ; !rc && (i > 0) ; --i )
524
{
525
diva_os_sleep (1) ;
526
if (IoAdapter->pcm_pending == 3) {
527
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
528
&OldIrql,
529
"data_pcm_3");
530
IoAdapter->pcm_pending = 0;
531
IoAdapter->pcm_data = NULL ;
532
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
533
&OldIrql,
534
"data_pcm_3");
535
return ;
536
}
537
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
538
&OldIrql,
539
"data_pcm_2");
540
diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr);
541
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
542
&OldIrql,
543
"data_pcm_2");
544
}
545
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
546
&OldIrql,
547
"data_pcm_4");
548
IoAdapter->pcm_pending = 0;
549
IoAdapter->pcm_data = NULL ;
550
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
551
&OldIrql,
552
"data_pcm_4");
553
goto Trapped ;
554
}
555
/*
556
* memory based shared ram is accessible from different
557
* processors without disturbing concurrent processes.
558
*/
559
a->ram_out (a, &IoAdapter->pcm->rc, 0) ;
560
a->ram_out (a, &IoAdapter->pcm->req, pcm->req) ;
561
for ( i = (IoAdapter->trapped ? 3000 : 250) ; --i > 0 ; )
562
{
563
diva_os_sleep (1) ;
564
rc = (int)(a->ram_in (a, &IoAdapter->pcm->rc)) ;
565
if ( rc )
566
{
567
a->ram_in_buffer (a, IoAdapter->pcm, pcm, sizeof(*pcm)) ;
568
return ;
569
}
570
}
571
Trapped:
572
if ( IoAdapter->trapFnc )
573
{
574
int trapped = IoAdapter->trapped;
575
IoAdapter->trapFnc (IoAdapter) ;
576
/*
577
Firs trap, also notify user if supported
578
*/
579
if (!trapped && IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
580
(*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
581
}
582
}
583
}
584
/*------------------------------------------------------------------*/
585
/* ram access functions for memory mapped cards */
586
/*------------------------------------------------------------------*/
587
byte mem_in (ADAPTER *a, void *addr)
588
{
589
byte val;
590
volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
591
val = READ_BYTE(Base + (unsigned long)addr);
592
DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
593
return (val);
594
}
595
word mem_inw (ADAPTER *a, void *addr)
596
{
597
word val;
598
volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
599
val = READ_WORD((Base + (unsigned long)addr));
600
DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
601
return (val);
602
}
603
void mem_in_dw (ADAPTER *a, void *addr, dword* data, int dwords)
604
{
605
volatile byte __iomem * Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
606
while (dwords--) {
607
*data++ = READ_DWORD((Base + (unsigned long)addr));
608
addr+=4;
609
}
610
DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
611
}
612
void mem_in_buffer (ADAPTER *a, void *addr, void *buffer, word length)
613
{
614
volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
615
memcpy_fromio(buffer, (Base + (unsigned long)addr), length);
616
DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
617
}
618
void mem_look_ahead (ADAPTER *a, PBUFFER *RBuffer, ENTITY *e)
619
{
620
PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)a->io ;
621
IoAdapter->RBuffer.length = mem_inw (a, &RBuffer->length) ;
622
mem_in_buffer (a, RBuffer->P, IoAdapter->RBuffer.P,
623
IoAdapter->RBuffer.length) ;
624
e->RBuffer = (DBUFFER *)&IoAdapter->RBuffer ;
625
}
626
void mem_out (ADAPTER *a, void *addr, byte data)
627
{
628
volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
629
WRITE_BYTE(Base + (unsigned long)addr, data);
630
DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
631
}
632
void mem_outw (ADAPTER *a, void *addr, word data)
633
{
634
volatile byte __iomem * Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
635
WRITE_WORD((Base + (unsigned long)addr), data);
636
DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
637
}
638
void mem_out_dw (ADAPTER *a, void *addr, const dword* data, int dwords)
639
{
640
volatile byte __iomem * Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
641
while (dwords--) {
642
WRITE_DWORD((Base + (unsigned long)addr), *data);
643
addr+=4;
644
data++;
645
}
646
DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
647
}
648
void mem_out_buffer (ADAPTER *a, void *addr, void *buffer, word length)
649
{
650
volatile byte __iomem * Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
651
memcpy_toio((Base + (unsigned long)addr), buffer, length) ;
652
DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
653
}
654
void mem_inc (ADAPTER *a, void *addr)
655
{
656
volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
657
byte x = READ_BYTE(Base + (unsigned long)addr);
658
WRITE_BYTE(Base + (unsigned long)addr, x + 1);
659
DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
660
}
661
/*------------------------------------------------------------------*/
662
/* ram access functions for io-mapped cards */
663
/*------------------------------------------------------------------*/
664
byte io_in(ADAPTER * a, void * adr)
665
{
666
byte val;
667
byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
668
outppw(Port + 4, (word)(unsigned long)adr);
669
val = inpp(Port);
670
DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
671
return(val);
672
}
673
word io_inw(ADAPTER * a, void * adr)
674
{
675
word val;
676
byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
677
outppw(Port + 4, (word)(unsigned long)adr);
678
val = inppw(Port);
679
DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
680
return(val);
681
}
682
void io_in_buffer(ADAPTER * a, void * adr, void * buffer, word len)
683
{
684
byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
685
byte* P = (byte*)buffer;
686
if ((long)adr & 1) {
687
outppw(Port+4, (word)(unsigned long)adr);
688
*P = inpp(Port);
689
P++;
690
adr = ((byte *) adr) + 1;
691
len--;
692
if (!len) {
693
DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
694
return;
695
}
696
}
697
outppw(Port+4, (word)(unsigned long)adr);
698
inppw_buffer (Port, P, len+1);
699
DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
700
}
701
void io_look_ahead(ADAPTER * a, PBUFFER * RBuffer, ENTITY * e)
702
{
703
byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
704
outppw(Port+4, (word)(unsigned long)RBuffer);
705
((PISDN_ADAPTER)a->io)->RBuffer.length = inppw(Port);
706
inppw_buffer (Port, ((PISDN_ADAPTER)a->io)->RBuffer.P, ((PISDN_ADAPTER)a->io)->RBuffer.length + 1);
707
e->RBuffer = (DBUFFER *) &(((PISDN_ADAPTER)a->io)->RBuffer);
708
DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
709
}
710
void io_out(ADAPTER * a, void * adr, byte data)
711
{
712
byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
713
outppw(Port+4, (word)(unsigned long)adr);
714
outpp(Port, data);
715
DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
716
}
717
void io_outw(ADAPTER * a, void * adr, word data)
718
{
719
byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
720
outppw(Port+4, (word)(unsigned long)adr);
721
outppw(Port, data);
722
DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
723
}
724
void io_out_buffer(ADAPTER * a, void * adr, void * buffer, word len)
725
{
726
byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
727
byte* P = (byte*)buffer;
728
if ((long)adr & 1) {
729
outppw(Port+4, (word)(unsigned long)adr);
730
outpp(Port, *P);
731
P++;
732
adr = ((byte *) adr) + 1;
733
len--;
734
if (!len) {
735
DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
736
return;
737
}
738
}
739
outppw(Port+4, (word)(unsigned long)adr);
740
outppw_buffer (Port, P, len+1);
741
DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
742
}
743
void io_inc(ADAPTER * a, void * adr)
744
{
745
byte x;
746
byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
747
outppw(Port+4, (word)(unsigned long)adr);
748
x = inpp(Port);
749
outppw(Port+4, (word)(unsigned long)adr);
750
outpp(Port, x+1);
751
DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
752
}
753
/*------------------------------------------------------------------*/
754
/* OS specific functions related to queuing of entities */
755
/*------------------------------------------------------------------*/
756
void free_entity(ADAPTER * a, byte e_no)
757
{
758
PISDN_ADAPTER IoAdapter;
759
diva_os_spin_lock_magic_t irql;
760
IoAdapter = (PISDN_ADAPTER) a->io;
761
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_free");
762
IoAdapter->e_tbl[e_no].e = NULL;
763
IoAdapter->e_count--;
764
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_free");
765
}
766
void assign_queue(ADAPTER * a, byte e_no, word ref)
767
{
768
PISDN_ADAPTER IoAdapter;
769
diva_os_spin_lock_magic_t irql;
770
IoAdapter = (PISDN_ADAPTER) a->io;
771
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_assign");
772
IoAdapter->e_tbl[e_no].assign_ref = ref;
773
IoAdapter->e_tbl[e_no].next = (byte)IoAdapter->assign;
774
IoAdapter->assign = e_no;
775
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_assign");
776
}
777
byte get_assign(ADAPTER * a, word ref)
778
{
779
PISDN_ADAPTER IoAdapter;
780
diva_os_spin_lock_magic_t irql;
781
byte e_no;
782
IoAdapter = (PISDN_ADAPTER) a->io;
783
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock,
784
&irql,
785
"data_assign_get");
786
for(e_no = (byte)IoAdapter->assign;
787
e_no && IoAdapter->e_tbl[e_no].assign_ref!=ref;
788
e_no = IoAdapter->e_tbl[e_no].next);
789
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock,
790
&irql,
791
"data_assign_get");
792
return e_no;
793
}
794
void req_queue(ADAPTER * a, byte e_no)
795
{
796
PISDN_ADAPTER IoAdapter;
797
diva_os_spin_lock_magic_t irql;
798
IoAdapter = (PISDN_ADAPTER) a->io;
799
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_q");
800
IoAdapter->e_tbl[e_no].next = 0;
801
if(IoAdapter->head) {
802
IoAdapter->e_tbl[IoAdapter->tail].next = e_no;
803
IoAdapter->tail = e_no;
804
}
805
else {
806
IoAdapter->head = e_no;
807
IoAdapter->tail = e_no;
808
}
809
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_q");
810
}
811
byte look_req(ADAPTER * a)
812
{
813
PISDN_ADAPTER IoAdapter;
814
IoAdapter = (PISDN_ADAPTER) a->io;
815
return ((byte)IoAdapter->head) ;
816
}
817
void next_req(ADAPTER * a)
818
{
819
PISDN_ADAPTER IoAdapter;
820
diva_os_spin_lock_magic_t irql;
821
IoAdapter = (PISDN_ADAPTER) a->io;
822
diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_next");
823
IoAdapter->head = IoAdapter->e_tbl[IoAdapter->head].next;
824
if(!IoAdapter->head) IoAdapter->tail = 0;
825
diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_next");
826
}
827
/*------------------------------------------------------------------*/
828
/* memory map functions */
829
/*------------------------------------------------------------------*/
830
ENTITY * entity_ptr(ADAPTER * a, byte e_no)
831
{
832
PISDN_ADAPTER IoAdapter;
833
IoAdapter = (PISDN_ADAPTER) a->io;
834
return (IoAdapter->e_tbl[e_no].e);
835
}
836
void * PTR_X(ADAPTER * a, ENTITY * e)
837
{
838
return ((void *) e->X);
839
}
840
void * PTR_R(ADAPTER * a, ENTITY * e)
841
{
842
return ((void *) e->R);
843
}
844
void * PTR_P(ADAPTER * a, ENTITY * e, void * P)
845
{
846
return P;
847
}
848
void CALLBACK(ADAPTER * a, ENTITY * e)
849
{
850
if ( e && e->callback )
851
e->callback (e) ;
852
}
853
854