Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/ncsw/Peripherals/QM/qman_low.h
48378 views
1
/******************************************************************************
2
3
� 1995-2003, 2004, 2005-2011 Freescale Semiconductor, Inc.
4
All rights reserved.
5
6
This is proprietary source code of Freescale Semiconductor Inc.,
7
and its use is subject to the NetComm Device Drivers EULA.
8
The copyright notice above does not evidence any actual or intended
9
publication of such source code.
10
11
ALTERNATIVELY, redistribution and use in source and binary forms, with
12
or without modification, are permitted provided that the following
13
conditions are met:
14
* Redistributions of source code must retain the above copyright
15
notice, this list of conditions and the following disclaimer.
16
* Redistributions in binary form must reproduce the above copyright
17
notice, this list of conditions and the following disclaimer in the
18
documentation and/or other materials provided with the distribution.
19
* Neither the name of Freescale Semiconductor nor the
20
names of its contributors may be used to endorse or promote products
21
derived from this software without specific prior written permission.
22
23
THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26
DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
*
34
35
**************************************************************************/
36
/******************************************************************************
37
@File qman_low.c
38
39
@Description QM Low-level implementation
40
*//***************************************************************************/
41
#include "std_ext.h"
42
#include "core_ext.h"
43
#include "xx_ext.h"
44
#include "error_ext.h"
45
46
#include "qman_private.h"
47
48
49
/***************************/
50
/* Portal register assists */
51
/***************************/
52
53
/* Cache-inhibited register offsets */
54
#define REG_EQCR_PI_CINH 0x0000
55
#define REG_EQCR_CI_CINH 0x0004
56
#define REG_EQCR_ITR 0x0008
57
#define REG_DQRR_PI_CINH 0x0040
58
#define REG_DQRR_CI_CINH 0x0044
59
#define REG_DQRR_ITR 0x0048
60
#define REG_DQRR_DCAP 0x0050
61
#define REG_DQRR_SDQCR 0x0054
62
#define REG_DQRR_VDQCR 0x0058
63
#define REG_DQRR_PDQCR 0x005c
64
#define REG_MR_PI_CINH 0x0080
65
#define REG_MR_CI_CINH 0x0084
66
#define REG_MR_ITR 0x0088
67
#define REG_CFG 0x0100
68
#define REG_ISR 0x0e00
69
#define REG_IER 0x0e04
70
#define REG_ISDR 0x0e08
71
#define REG_IIR 0x0e0c
72
#define REG_ITPR 0x0e14
73
74
/* Cache-enabled register offsets */
75
#define CL_EQCR 0x0000
76
#define CL_DQRR 0x1000
77
#define CL_MR 0x2000
78
#define CL_EQCR_PI_CENA 0x3000
79
#define CL_EQCR_CI_CENA 0x3100
80
#define CL_DQRR_PI_CENA 0x3200
81
#define CL_DQRR_CI_CENA 0x3300
82
#define CL_MR_PI_CENA 0x3400
83
#define CL_MR_CI_CENA 0x3500
84
#define CL_RORI_CENA 0x3600
85
#define CL_CR 0x3800
86
#define CL_RR0 0x3900
87
#define CL_RR1 0x3940
88
89
static __inline__ void *ptr_ADD(void *a, uintptr_t b)
90
{
91
return (void *)((uintptr_t)a + b);
92
}
93
94
/* The h/w design requires mappings to be size-aligned so that "add"s can be
95
* reduced to "or"s. The primitives below do the same for s/w. */
96
/* Bitwise-OR two pointers */
97
static __inline__ void *ptr_OR(void *a, uintptr_t b)
98
{
99
return (void *)((uintptr_t)a | b);
100
}
101
102
/* Cache-inhibited register access */
103
static __inline__ uint32_t __qm_in(struct qm_addr *qm, uintptr_t offset)
104
{
105
uint32_t *tmp = (uint32_t *)ptr_ADD(qm->addr_ci, offset);
106
return GET_UINT32(*tmp);
107
}
108
static __inline__ void __qm_out(struct qm_addr *qm, uintptr_t offset, uint32_t val)
109
{
110
uint32_t *tmp = (uint32_t *)ptr_ADD(qm->addr_ci, offset);
111
WRITE_UINT32(*tmp, val);
112
}
113
#define qm_in(reg) __qm_in(&portal->addr, REG_##reg)
114
#define qm_out(reg, val) __qm_out(&portal->addr, REG_##reg, (uint32_t)val)
115
116
/* Convert 'n' cachelines to a pointer value for bitwise OR */
117
#define qm_cl(n) ((n) << 6)
118
119
/* Cache-enabled (index) register access */
120
static __inline__ void __qm_cl_touch_ro(struct qm_addr *qm, uintptr_t offset)
121
{
122
dcbt_ro(ptr_ADD(qm->addr_ce, offset));
123
}
124
static __inline__ void __qm_cl_touch_rw(struct qm_addr *qm, uintptr_t offset)
125
{
126
dcbt_rw(ptr_ADD(qm->addr_ce, offset));
127
}
128
static __inline__ uint32_t __qm_cl_in(struct qm_addr *qm, uintptr_t offset)
129
{
130
uint32_t *tmp = (uint32_t *)ptr_ADD(qm->addr_ce, offset);
131
return GET_UINT32(*tmp);
132
}
133
static __inline__ void __qm_cl_out(struct qm_addr *qm, uintptr_t offset, uint32_t val)
134
{
135
uint32_t *tmp = (uint32_t *)ptr_ADD(qm->addr_ce, offset);
136
WRITE_UINT32(*tmp, val);
137
dcbf(tmp);
138
}
139
static __inline__ void __qm_cl_invalidate(struct qm_addr *qm, uintptr_t offset)
140
{
141
dcbi(ptr_ADD(qm->addr_ce, offset));
142
}
143
#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, CL_##reg##_CENA)
144
#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, CL_##reg##_CENA)
145
#define qm_cl_in(reg) __qm_cl_in(&portal->addr, CL_##reg##_CENA)
146
#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, CL_##reg##_CENA, val)
147
#define qm_cl_invalidate(reg) __qm_cl_invalidate(&portal->addr, CL_##reg##_CENA)
148
149
/* Cyclic helper for rings. TODO: once we are able to do fine-grain perf
150
* analysis, look at using the "extra" bit in the ring index registers to avoid
151
* cyclic issues. */
152
static __inline__ uint8_t cyc_diff(uint8_t ringsize, uint8_t first, uint8_t last)
153
{
154
/* 'first' is included, 'last' is excluded */
155
if (first <= last)
156
return (uint8_t)(last - first);
157
return (uint8_t)(ringsize + last - first);
158
}
159
160
static __inline__ t_Error __qm_portal_bind(struct qm_portal *portal, uint8_t iface)
161
{
162
t_Error ret = E_BUSY;
163
if (!(portal->config.bound & iface)) {
164
portal->config.bound |= iface;
165
ret = E_OK;
166
}
167
return ret;
168
}
169
170
static __inline__ void __qm_portal_unbind(struct qm_portal *portal, uint8_t iface)
171
{
172
#ifdef QM_CHECKING
173
ASSERT_COND(portal->config.bound & iface);
174
#endif /* QM_CHECKING */
175
portal->config.bound &= ~iface;
176
}
177
178
/* ---------------- */
179
/* --- EQCR API --- */
180
181
/* It's safer to code in terms of the 'eqcr' object than the 'portal' object,
182
* because the latter runs the risk of copy-n-paste errors from other code where
183
* we could manipulate some other structure within 'portal'. */
184
/* #define EQCR_API_START() register struct qm_eqcr *eqcr = &portal->eqcr */
185
186
/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
187
#define EQCR_CARRYCLEAR(p) \
188
(void *)((uintptr_t)(p) & (~(uintptr_t)(QM_EQCR_SIZE << 6)))
189
190
/* Bit-wise logic to convert a ring pointer to a ring index */
191
static __inline__ uint8_t EQCR_PTR2IDX(struct qm_eqcr_entry *e)
192
{
193
return (uint8_t)(((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1));
194
}
195
196
/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
197
static __inline__ void EQCR_INC(struct qm_eqcr *eqcr)
198
{
199
/* NB: this is odd-looking, but experiments show that it generates fast
200
* code with essentially no branching overheads. We increment to the
201
* next EQCR pointer and handle overflow and 'vbit'. */
202
struct qm_eqcr_entry *partial = eqcr->cursor + 1;
203
eqcr->cursor = EQCR_CARRYCLEAR(partial);
204
if (partial != eqcr->cursor)
205
eqcr->vbit ^= QM_EQCR_VERB_VBIT;
206
}
207
208
static __inline__ t_Error qm_eqcr_init(struct qm_portal *portal, e_QmPortalProduceMode pmode,
209
e_QmPortalEqcrConsumeMode cmode)
210
{
211
register struct qm_eqcr *eqcr = &portal->eqcr;
212
uint32_t cfg;
213
uint8_t pi;
214
215
if (__qm_portal_bind(portal, QM_BIND_EQCR))
216
return ERROR_CODE(E_BUSY);
217
eqcr->ring = ptr_ADD(portal->addr.addr_ce, CL_EQCR);
218
eqcr->ci = (uint8_t)(qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1));
219
qm_cl_invalidate(EQCR_CI);
220
pi = (uint8_t)(qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1));
221
eqcr->cursor = eqcr->ring + pi;
222
eqcr->vbit = (uint8_t)((qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
223
QM_EQCR_VERB_VBIT : 0);
224
eqcr->available = (uint8_t)(QM_EQCR_SIZE - 1 -
225
cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi));
226
eqcr->ithresh = (uint8_t)qm_in(EQCR_ITR);
227
228
#ifdef QM_CHECKING
229
eqcr->busy = 0;
230
eqcr->pmode = pmode;
231
eqcr->cmode = cmode;
232
#else
233
UNUSED(cmode);
234
#endif /* QM_CHECKING */
235
cfg = (qm_in(CFG) & 0x00ffffff) |
236
((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
237
qm_out(CFG, cfg);
238
return 0;
239
}
240
241
static __inline__ void qm_eqcr_finish(struct qm_portal *portal)
242
{
243
register struct qm_eqcr *eqcr = &portal->eqcr;
244
uint8_t pi = (uint8_t)(qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1));
245
uint8_t ci = (uint8_t)(qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1));
246
247
#ifdef QM_CHECKING
248
ASSERT_COND(!eqcr->busy);
249
#endif /* QM_CHECKING */
250
if (pi != EQCR_PTR2IDX(eqcr->cursor))
251
REPORT_ERROR(WARNING, E_INVALID_STATE, ("losing uncommitted EQCR entries"));
252
if (ci != eqcr->ci)
253
REPORT_ERROR(WARNING, E_INVALID_STATE, ("missing existing EQCR completions"));
254
if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
255
REPORT_ERROR(WARNING, E_INVALID_STATE, ("EQCR destroyed unquiesced"));
256
__qm_portal_unbind(portal, QM_BIND_EQCR);
257
}
258
259
static __inline__ struct qm_eqcr_entry *qm_eqcr_start(struct qm_portal *portal)
260
{
261
register struct qm_eqcr *eqcr = &portal->eqcr;
262
#ifdef QM_CHECKING
263
ASSERT_COND(!eqcr->busy);
264
#endif /* QM_CHECKING */
265
if (!eqcr->available)
266
return NULL;
267
#ifdef QM_CHECKING
268
eqcr->busy = 1;
269
#endif /* QM_CHECKING */
270
dcbz_64(eqcr->cursor);
271
return eqcr->cursor;
272
}
273
274
static __inline__ void qm_eqcr_abort(struct qm_portal *portal)
275
{
276
#ifdef QM_CHECKING
277
register struct qm_eqcr *eqcr = &portal->eqcr;
278
ASSERT_COND(eqcr->busy);
279
eqcr->busy = 0;
280
#else
281
UNUSED(portal);
282
#endif /* QM_CHECKING */
283
}
284
285
static __inline__ struct qm_eqcr_entry *qm_eqcr_pend_and_next(struct qm_portal *portal, uint8_t myverb)
286
{
287
register struct qm_eqcr *eqcr = &portal->eqcr;
288
#ifdef QM_CHECKING
289
ASSERT_COND(eqcr->busy);
290
ASSERT_COND(eqcr->pmode != e_QmPortalPVB);
291
#endif /* QM_CHECKING */
292
if (eqcr->available == 1)
293
return NULL;
294
eqcr->cursor->__dont_write_directly__verb = (uint8_t)(myverb | eqcr->vbit);
295
dcbf_64(eqcr->cursor);
296
EQCR_INC(eqcr);
297
eqcr->available--;
298
dcbz_64(eqcr->cursor);
299
return eqcr->cursor;
300
}
301
302
#ifdef QM_CHECKING
303
#define EQCR_COMMIT_CHECKS(eqcr) \
304
do { \
305
ASSERT_COND(eqcr->busy); \
306
ASSERT_COND(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \
307
ASSERT_COND(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \
308
} while(0)
309
310
#else
311
#define EQCR_COMMIT_CHECKS(eqcr)
312
#endif /* QM_CHECKING */
313
314
315
static __inline__ void qmPortalEqcrPciCommit(struct qm_portal *portal, uint8_t myverb)
316
{
317
register struct qm_eqcr *eqcr = &portal->eqcr;
318
#ifdef QM_CHECKING
319
EQCR_COMMIT_CHECKS(eqcr);
320
ASSERT_COND(eqcr->pmode == e_QmPortalPCI);
321
#endif /* QM_CHECKING */
322
eqcr->cursor->__dont_write_directly__verb = (uint8_t)(myverb | eqcr->vbit);
323
EQCR_INC(eqcr);
324
eqcr->available--;
325
dcbf_64(eqcr->cursor);
326
mb();
327
qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
328
#ifdef QM_CHECKING
329
eqcr->busy = 0;
330
#endif /* QM_CHECKING */
331
}
332
333
static __inline__ void qmPortalEqcrPcePrefetch(struct qm_portal *portal)
334
{
335
#ifdef QM_CHECKING
336
register struct qm_eqcr *eqcr = &portal->eqcr;
337
ASSERT_COND(eqcr->pmode == e_QmPortalPCE);
338
#endif /* QM_CHECKING */
339
qm_cl_invalidate(EQCR_PI);
340
qm_cl_touch_rw(EQCR_PI);
341
}
342
343
static __inline__ void qmPortalEqcrPceCommit(struct qm_portal *portal, uint8_t myverb)
344
{
345
register struct qm_eqcr *eqcr = &portal->eqcr;
346
#ifdef QM_CHECKING
347
EQCR_COMMIT_CHECKS(eqcr);
348
ASSERT_COND(eqcr->pmode == e_QmPortalPCE);
349
#endif /* QM_CHECKING */
350
eqcr->cursor->__dont_write_directly__verb = (uint8_t)(myverb | eqcr->vbit);
351
EQCR_INC(eqcr);
352
eqcr->available--;
353
dcbf_64(eqcr->cursor);
354
wmb();
355
qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
356
#ifdef QM_CHECKING
357
eqcr->busy = 0;
358
#endif /* QM_CHECKING */
359
}
360
361
static __inline__ void qmPortalEqcrPvbCommit(struct qm_portal *portal, uint8_t myverb)
362
{
363
register struct qm_eqcr *eqcr = &portal->eqcr;
364
struct qm_eqcr_entry *eqcursor;
365
#ifdef QM_CHECKING
366
EQCR_COMMIT_CHECKS(eqcr);
367
ASSERT_COND(eqcr->pmode == e_QmPortalPVB);
368
#endif /* QM_CHECKING */
369
rmb();
370
eqcursor = eqcr->cursor;
371
eqcursor->__dont_write_directly__verb = (uint8_t)(myverb | eqcr->vbit);
372
dcbf_64(eqcursor);
373
EQCR_INC(eqcr);
374
eqcr->available--;
375
#ifdef QM_CHECKING
376
eqcr->busy = 0;
377
#endif /* QM_CHECKING */
378
}
379
380
static __inline__ uint8_t qmPortalEqcrCciUpdate(struct qm_portal *portal)
381
{
382
register struct qm_eqcr *eqcr = &portal->eqcr;
383
uint8_t diff, old_ci = eqcr->ci;
384
#ifdef QM_CHECKING
385
ASSERT_COND(eqcr->cmode == e_QmPortalEqcrCCI);
386
#endif /* QM_CHECKING */
387
eqcr->ci = (uint8_t)(qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1));
388
diff = cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
389
eqcr->available += diff;
390
return diff;
391
}
392
393
static __inline__ void qmPortalEqcrCcePrefetch(struct qm_portal *portal)
394
{
395
#ifdef QM_CHECKING
396
register struct qm_eqcr *eqcr = &portal->eqcr;
397
ASSERT_COND(eqcr->cmode == e_QmPortalEqcrCCE);
398
#endif /* QM_CHECKING */
399
qm_cl_touch_ro(EQCR_CI);
400
}
401
402
static __inline__ uint8_t qmPortalEqcrCceUpdate(struct qm_portal *portal)
403
{
404
register struct qm_eqcr *eqcr = &portal->eqcr;
405
uint8_t diff, old_ci = eqcr->ci;
406
#ifdef QM_CHECKING
407
ASSERT_COND(eqcr->cmode == e_QmPortalEqcrCCE);
408
#endif /* QM_CHECKING */
409
eqcr->ci = (uint8_t)(qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1));
410
qm_cl_invalidate(EQCR_CI);
411
diff = cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
412
eqcr->available += diff;
413
return diff;
414
}
415
416
static __inline__ uint8_t qm_eqcr_get_ithresh(struct qm_portal *portal)
417
{
418
register struct qm_eqcr *eqcr = &portal->eqcr;
419
return eqcr->ithresh;
420
}
421
422
static __inline__ void qm_eqcr_set_ithresh(struct qm_portal *portal, uint8_t ithresh)
423
{
424
register struct qm_eqcr *eqcr = &portal->eqcr;
425
eqcr->ithresh = ithresh;
426
qm_out(EQCR_ITR, ithresh);
427
}
428
429
static __inline__ uint8_t qm_eqcr_get_avail(struct qm_portal *portal)
430
{
431
register struct qm_eqcr *eqcr = &portal->eqcr;
432
return eqcr->available;
433
}
434
435
static __inline__ uint8_t qm_eqcr_get_fill(struct qm_portal *portal)
436
{
437
register struct qm_eqcr *eqcr = &portal->eqcr;
438
return (uint8_t)(QM_EQCR_SIZE - 1 - eqcr->available);
439
}
440
441
442
443
/* ---------------- */
444
/* --- DQRR API --- */
445
446
/* TODO: many possible improvements;
447
* - look at changing the API to use pointer rather than index parameters now
448
* that 'cursor' is a pointer,
449
* - consider moving other parameters to pointer if it could help (ci)
450
*/
451
452
/* It's safer to code in terms of the 'dqrr' object than the 'portal' object,
453
* because the latter runs the risk of copy-n-paste errors from other code where
454
* we could manipulate some other structure within 'portal'. */
455
/* #define DQRR_API_START() register struct qm_dqrr *dqrr = &portal->dqrr */
456
457
#define DQRR_CARRYCLEAR(p) \
458
(void *)((uintptr_t)(p) & (~(uintptr_t)(QM_DQRR_SIZE << 6)))
459
460
static __inline__ uint8_t DQRR_PTR2IDX(struct qm_dqrr_entry *e)
461
{
462
return (uint8_t)(((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1));
463
}
464
465
static __inline__ struct qm_dqrr_entry *DQRR_INC(struct qm_dqrr_entry *e)
466
{
467
return DQRR_CARRYCLEAR(e + 1);
468
}
469
470
static __inline__ void qm_dqrr_set_maxfill(struct qm_portal *portal, uint8_t mf)
471
{
472
qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
473
((mf & (QM_DQRR_SIZE - 1)) << 20));
474
}
475
476
static __inline__ t_Error qm_dqrr_init(struct qm_portal *portal, e_QmPortalDequeueMode dmode,
477
e_QmPortalProduceMode pmode, e_QmPortalDqrrConsumeMode cmode,
478
uint8_t max_fill, int stash_ring, int stash_data)
479
{
480
register struct qm_dqrr *dqrr = &portal->dqrr;
481
const struct qm_portal_config *config = &portal->config;
482
uint32_t cfg;
483
484
if (__qm_portal_bind(portal, QM_BIND_DQRR))
485
return ERROR_CODE(E_BUSY);
486
if ((stash_ring || stash_data) && (config->cpu == -1))
487
return ERROR_CODE(E_INVALID_STATE);
488
/* Make sure the DQRR will be idle when we enable */
489
qm_out(DQRR_SDQCR, 0);
490
qm_out(DQRR_VDQCR, 0);
491
qm_out(DQRR_PDQCR, 0);
492
dqrr->ring = ptr_ADD(portal->addr.addr_ce, CL_DQRR);
493
dqrr->pi = (uint8_t)(qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1));
494
dqrr->ci = (uint8_t)(qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1));
495
dqrr->cursor = dqrr->ring + dqrr->ci;
496
dqrr->fill = cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
497
dqrr->vbit = (uint8_t)((qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
498
QM_DQRR_VERB_VBIT : 0);
499
dqrr->ithresh = (uint8_t)qm_in(DQRR_ITR);
500
501
#ifdef QM_CHECKING
502
dqrr->dmode = dmode;
503
dqrr->pmode = pmode;
504
dqrr->cmode = cmode;
505
dqrr->flags = 0;
506
if (stash_ring)
507
dqrr->flags |= QM_DQRR_FLAG_RE;
508
if (stash_data)
509
dqrr->flags |= QM_DQRR_FLAG_SE;
510
#else
511
UNUSED(pmode);
512
#endif /* QM_CHECKING */
513
514
cfg = (qm_in(CFG) & 0xff000f00) |
515
((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
516
((dmode & 1) << 18) | /* DP */
517
((cmode & 3) << 16) | /* DCM */
518
(stash_ring ? 0x80 : 0) | /* RE */
519
(0 ? 0x40 : 0) | /* Ignore RP */
520
(stash_data ? 0x20 : 0) | /* SE */
521
(0 ? 0x10 : 0); /* Ignore SP */
522
qm_out(CFG, cfg);
523
return E_OK;
524
}
525
526
527
static __inline__ void qm_dqrr_finish(struct qm_portal *portal)
528
{
529
register struct qm_dqrr *dqrr = &portal->dqrr;
530
if (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor))
531
REPORT_ERROR(WARNING, E_INVALID_STATE, ("Ignoring completed DQRR entries"));
532
__qm_portal_unbind(portal, QM_BIND_DQRR);
533
}
534
535
static __inline__ struct qm_dqrr_entry *qm_dqrr_current(struct qm_portal *portal)
536
{
537
register struct qm_dqrr *dqrr = &portal->dqrr;
538
if (!dqrr->fill)
539
return NULL;
540
return dqrr->cursor;
541
}
542
543
static __inline__ uint8_t qm_dqrr_cursor(struct qm_portal *portal)
544
{
545
register struct qm_dqrr *dqrr = &portal->dqrr;
546
return DQRR_PTR2IDX(dqrr->cursor);
547
}
548
549
static __inline__ uint8_t qm_dqrr_next(struct qm_portal *portal)
550
{
551
register struct qm_dqrr *dqrr = &portal->dqrr;
552
#ifdef QM_CHECKING
553
ASSERT_COND(dqrr->fill);
554
#endif
555
dqrr->cursor = DQRR_INC(dqrr->cursor);
556
return --dqrr->fill;
557
}
558
559
static __inline__ uint8_t qmPortalDqrrPciUpdate(struct qm_portal *portal)
560
{
561
register struct qm_dqrr *dqrr = &portal->dqrr;
562
uint8_t diff, old_pi = dqrr->pi;
563
#ifdef QM_CHECKING
564
ASSERT_COND(dqrr->pmode == e_QmPortalPCI);
565
#endif /* QM_CHECKING */
566
dqrr->pi = (uint8_t)(qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1));
567
diff = cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
568
dqrr->fill += diff;
569
return diff;
570
}
571
572
static __inline__ void qmPortalDqrrPcePrefetch(struct qm_portal *portal)
573
{
574
#ifdef QM_CHECKING
575
register struct qm_dqrr *dqrr = &portal->dqrr;
576
ASSERT_COND(dqrr->pmode == e_QmPortalPCE);
577
#endif /* QM_CHECKING */
578
qm_cl_invalidate(DQRR_PI);
579
qm_cl_touch_ro(DQRR_PI);
580
}
581
582
static __inline__ uint8_t qmPortalDqrrPceUpdate(struct qm_portal *portal)
583
{
584
register struct qm_dqrr *dqrr = &portal->dqrr;
585
uint8_t diff, old_pi = dqrr->pi;
586
#ifdef QM_CHECKING
587
ASSERT_COND(dqrr->pmode == e_QmPortalPCE);
588
#endif /* QM_CHECKING */
589
dqrr->pi = (uint8_t)(qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1));
590
diff = cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
591
dqrr->fill += diff;
592
return diff;
593
}
594
595
static __inline__ void qmPortalDqrrPvbPrefetch(struct qm_portal *portal)
596
{
597
register struct qm_dqrr *dqrr = &portal->dqrr;
598
#ifdef QM_CHECKING
599
ASSERT_COND(dqrr->pmode == e_QmPortalPVB);
600
/* If ring entries get stashed, don't invalidate/prefetch */
601
if (!(dqrr->flags & QM_DQRR_FLAG_RE))
602
#endif /*QM_CHECKING */
603
dcbit_ro(ptr_ADD(dqrr->ring, qm_cl(dqrr->pi)));
604
}
605
606
static __inline__ uint8_t qmPortalDqrrPvbUpdate(struct qm_portal *portal)
607
{
608
register struct qm_dqrr *dqrr = &portal->dqrr;
609
struct qm_dqrr_entry *res = ptr_ADD(dqrr->ring, qm_cl(dqrr->pi));
610
#ifdef QM_CHECKING
611
ASSERT_COND(dqrr->pmode == e_QmPortalPVB);
612
#endif /* QM_CHECKING */
613
if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
614
dqrr->pi = (uint8_t)((dqrr->pi + 1) & (QM_DQRR_SIZE - 1));
615
if (!dqrr->pi)
616
dqrr->vbit ^= QM_DQRR_VERB_VBIT;
617
dqrr->fill++;
618
return 1;
619
}
620
return 0;
621
}
622
623
static __inline__ void qmPortalDqrrCciConsume(struct qm_portal *portal, uint8_t num)
624
{
625
register struct qm_dqrr *dqrr = &portal->dqrr;
626
#ifdef QM_CHECKING
627
ASSERT_COND(dqrr->cmode == e_QmPortalDqrrCCI);
628
#endif /* QM_CHECKING */
629
dqrr->ci = (uint8_t)((dqrr->ci + num) & (QM_DQRR_SIZE - 1));
630
qm_out(DQRR_CI_CINH, dqrr->ci);
631
}
632
633
static __inline__ void qmPortalDqrrCciConsumeToCurrent(struct qm_portal *portal)
634
{
635
register struct qm_dqrr *dqrr = &portal->dqrr;
636
#ifdef QM_CHECKING
637
ASSERT_COND(dqrr->cmode == e_QmPortalDqrrCCI);
638
#endif /* QM_CHECKING */
639
dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
640
qm_out(DQRR_CI_CINH, dqrr->ci);
641
}
642
643
static __inline__ void qmPortalDqrrCcePrefetch(struct qm_portal *portal)
644
{
645
#ifdef QM_CHECKING
646
register struct qm_dqrr *dqrr = &portal->dqrr;
647
ASSERT_COND(dqrr->cmode == e_QmPortalDqrrCCE);
648
#endif /* QM_CHECKING */
649
qm_cl_invalidate(DQRR_CI);
650
qm_cl_touch_rw(DQRR_CI);
651
}
652
653
static __inline__ void qmPortalDqrrCceConsume(struct qm_portal *portal, uint8_t num)
654
{
655
register struct qm_dqrr *dqrr = &portal->dqrr;
656
#ifdef QM_CHECKING
657
ASSERT_COND(dqrr->cmode == e_QmPortalDqrrCCE);
658
#endif /* QM_CHECKING */
659
dqrr->ci = (uint8_t)((dqrr->ci + num) & (QM_DQRR_SIZE - 1));
660
qm_cl_out(DQRR_CI, dqrr->ci);
661
}
662
663
static __inline__ void qmPortalDqrrCceConsume_to_current(struct qm_portal *portal)
664
{
665
register struct qm_dqrr *dqrr = &portal->dqrr;
666
#ifdef QM_CHECKING
667
ASSERT_COND(dqrr->cmode == e_QmPortalDqrrCCE);
668
#endif /* QM_CHECKING */
669
dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
670
qm_cl_out(DQRR_CI, dqrr->ci);
671
}
672
673
static __inline__ void qmPortalDqrrDcaConsume1(struct qm_portal *portal, uint8_t idx, bool park)
674
{
675
#ifdef QM_CHECKING
676
register struct qm_dqrr *dqrr = &portal->dqrr;
677
ASSERT_COND(dqrr->cmode == e_QmPortalDqrrDCA);
678
#endif /* QM_CHECKING */
679
ASSERT_COND(idx < QM_DQRR_SIZE);
680
qm_out(DQRR_DCAP, (0 << 8) | /* S */
681
((uint32_t)(park ? 1 : 0) << 6) | /* PK */
682
idx); /* DCAP_CI */
683
}
684
685
static __inline__ void qmPortalDqrrDcaConsume1ptr(struct qm_portal *portal,
686
struct qm_dqrr_entry *dq,
687
bool park)
688
{
689
uint8_t idx = DQRR_PTR2IDX(dq);
690
#ifdef QM_CHECKING
691
register struct qm_dqrr *dqrr = &portal->dqrr;
692
693
ASSERT_COND(dqrr->cmode == e_QmPortalDqrrDCA);
694
ASSERT_COND((dqrr->ring + idx) == dq);
695
ASSERT_COND(idx < QM_DQRR_SIZE);
696
#endif /* QM_CHECKING */
697
qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
698
((uint32_t)(park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
699
idx); /* DQRR_DCAP::DCAP_CI */
700
}
701
702
static __inline__ void qmPortalDqrrDcaConsumeN(struct qm_portal *portal, uint16_t bitmask)
703
{
704
#ifdef QM_CHECKING
705
register struct qm_dqrr *dqrr = &portal->dqrr;
706
ASSERT_COND(dqrr->cmode == e_QmPortalDqrrDCA);
707
#endif /* QM_CHECKING */
708
qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
709
((uint32_t)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
710
}
711
712
static __inline__ uint8_t qmPortalDqrrDcaCci(struct qm_portal *portal)
713
{
714
#ifdef QM_CHECKING
715
register struct qm_dqrr *dqrr = &portal->dqrr;
716
ASSERT_COND(dqrr->cmode == e_QmPortalDqrrDCA);
717
#endif /* QM_CHECKING */
718
return (uint8_t)(qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1));
719
}
720
721
static __inline__ void qmPortalDqrrDcaCcePrefetch(struct qm_portal *portal)
722
{
723
#ifdef QM_CHECKING
724
register struct qm_dqrr *dqrr = &portal->dqrr;
725
ASSERT_COND(dqrr->cmode == e_QmPortalDqrrDCA);
726
#endif /* QM_CHECKING */
727
qm_cl_invalidate(DQRR_CI);
728
qm_cl_touch_ro(DQRR_CI);
729
}
730
731
static __inline__ uint8_t qmPortalDqrrDcaCce(struct qm_portal *portal)
732
{
733
#ifdef QM_CHECKING
734
register struct qm_dqrr *dqrr = &portal->dqrr;
735
ASSERT_COND(dqrr->cmode == e_QmPortalDqrrDCA);
736
#endif /* QM_CHECKING */
737
return (uint8_t)(qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1));
738
}
739
740
static __inline__ uint8_t qm_dqrr_get_ci(struct qm_portal *portal)
741
{
742
register struct qm_dqrr *dqrr = &portal->dqrr;
743
#ifdef QM_CHECKING
744
ASSERT_COND(dqrr->cmode != e_QmPortalDqrrDCA);
745
#endif /* QM_CHECKING */
746
747
return dqrr->ci;
748
}
749
750
static __inline__ void qm_dqrr_park(struct qm_portal *portal, uint8_t idx)
751
{
752
#ifdef QM_CHECKING
753
register struct qm_dqrr *dqrr = &portal->dqrr;
754
ASSERT_COND(dqrr->cmode != e_QmPortalDqrrDCA);
755
#endif /* QM_CHECKING */
756
757
qm_out(DQRR_DCAP, (0 << 8) | /* S */
758
(uint32_t)(1 << 6) | /* PK */
759
(idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
760
}
761
762
static __inline__ void qm_dqrr_park_ci(struct qm_portal *portal)
763
{
764
register struct qm_dqrr *dqrr = &portal->dqrr;
765
#ifdef QM_CHECKING
766
ASSERT_COND(dqrr->cmode != e_QmPortalDqrrDCA);
767
#endif /* QM_CHECKING */
768
qm_out(DQRR_DCAP, (0 << 8) | /* S */
769
(uint32_t)(1 << 6) | /* PK */
770
(dqrr->ci & (QM_DQRR_SIZE - 1)));/* DCAP_CI */
771
}
772
773
static __inline__ void qm_dqrr_sdqcr_set(struct qm_portal *portal, uint32_t sdqcr)
774
{
775
qm_out(DQRR_SDQCR, sdqcr);
776
}
777
778
static __inline__ uint32_t qm_dqrr_sdqcr_get(struct qm_portal *portal)
779
{
780
return qm_in(DQRR_SDQCR);
781
}
782
783
static __inline__ void qm_dqrr_vdqcr_set(struct qm_portal *portal, uint32_t vdqcr)
784
{
785
qm_out(DQRR_VDQCR, vdqcr);
786
}
787
788
static __inline__ uint32_t qm_dqrr_vdqcr_get(struct qm_portal *portal)
789
{
790
return qm_in(DQRR_VDQCR);
791
}
792
793
static __inline__ void qm_dqrr_pdqcr_set(struct qm_portal *portal, uint32_t pdqcr)
794
{
795
qm_out(DQRR_PDQCR, pdqcr);
796
}
797
798
static __inline__ uint32_t qm_dqrr_pdqcr_get(struct qm_portal *portal)
799
{
800
return qm_in(DQRR_PDQCR);
801
}
802
803
static __inline__ uint8_t qm_dqrr_get_ithresh(struct qm_portal *portal)
804
{
805
register struct qm_dqrr *dqrr = &portal->dqrr;
806
return dqrr->ithresh;
807
}
808
809
static __inline__ void qm_dqrr_set_ithresh(struct qm_portal *portal, uint8_t ithresh)
810
{
811
qm_out(DQRR_ITR, ithresh);
812
}
813
814
static __inline__ uint8_t qm_dqrr_get_maxfill(struct qm_portal *portal)
815
{
816
return (uint8_t)((qm_in(CFG) & 0x00f00000) >> 20);
817
}
818
819
/* -------------- */
820
/* --- MR API --- */
821
822
/* It's safer to code in terms of the 'mr' object than the 'portal' object,
823
* because the latter runs the risk of copy-n-paste errors from other code where
824
* we could manipulate some other structure within 'portal'. */
825
/* #define MR_API_START() register struct qm_mr *mr = &portal->mr */
826
827
#define MR_CARRYCLEAR(p) \
828
(void *)((uintptr_t)(p) & (~(uintptr_t)(QM_MR_SIZE << 6)))
829
830
static __inline__ uint8_t MR_PTR2IDX(struct qm_mr_entry *e)
831
{
832
return (uint8_t)(((uintptr_t)e >> 6) & (QM_MR_SIZE - 1));
833
}
834
835
static __inline__ struct qm_mr_entry *MR_INC(struct qm_mr_entry *e)
836
{
837
return MR_CARRYCLEAR(e + 1);
838
}
839
840
static __inline__ t_Error qm_mr_init(struct qm_portal *portal, e_QmPortalProduceMode pmode,
841
e_QmPortalMrConsumeMode cmode)
842
{
843
register struct qm_mr *mr = &portal->mr;
844
uint32_t cfg;
845
846
if (__qm_portal_bind(portal, QM_BIND_MR))
847
return ERROR_CODE(E_BUSY);
848
mr->ring = ptr_ADD(portal->addr.addr_ce, CL_MR);
849
mr->pi = (uint8_t)(qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1));
850
mr->ci = (uint8_t)(qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1));
851
mr->cursor = mr->ring + mr->ci;
852
mr->fill = cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
853
mr->vbit = (uint8_t)((qm_in(MR_PI_CINH) & QM_MR_SIZE) ?QM_MR_VERB_VBIT : 0);
854
mr->ithresh = (uint8_t)qm_in(MR_ITR);
855
856
#ifdef QM_CHECKING
857
mr->pmode = pmode;
858
mr->cmode = cmode;
859
#else
860
UNUSED(pmode);
861
#endif /* QM_CHECKING */
862
cfg = (qm_in(CFG) & 0xfffff0ff) |
863
((cmode & 1) << 8); /* QCSP_CFG:MM */
864
qm_out(CFG, cfg);
865
return E_OK;
866
}
867
868
869
static __inline__ void qm_mr_finish(struct qm_portal *portal)
870
{
871
register struct qm_mr *mr = &portal->mr;
872
if (mr->ci != MR_PTR2IDX(mr->cursor))
873
REPORT_ERROR(WARNING, E_INVALID_STATE, ("Ignoring completed MR entries"));
874
__qm_portal_unbind(portal, QM_BIND_MR);
875
}
876
877
static __inline__ void qm_mr_current_prefetch(struct qm_portal *portal)
878
{
879
register struct qm_mr *mr = &portal->mr;
880
dcbt_ro(mr->cursor);
881
}
882
883
static __inline__ struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
884
{
885
register struct qm_mr *mr = &portal->mr;
886
if (!mr->fill)
887
return NULL;
888
return mr->cursor;
889
}
890
891
static __inline__ uint8_t qm_mr_cursor(struct qm_portal *portal)
892
{
893
register struct qm_mr *mr = &portal->mr;
894
return MR_PTR2IDX(mr->cursor);
895
}
896
897
static __inline__ uint8_t qm_mr_next(struct qm_portal *portal)
898
{
899
register struct qm_mr *mr = &portal->mr;
900
#ifdef QM_CHECKING
901
ASSERT_COND(mr->fill);
902
#endif /* QM_CHECKING */
903
mr->cursor = MR_INC(mr->cursor);
904
return --mr->fill;
905
}
906
907
static __inline__ uint8_t qmPortalMrPciUpdate(struct qm_portal *portal)
908
{
909
register struct qm_mr *mr = &portal->mr;
910
uint8_t diff, old_pi = mr->pi;
911
#ifdef QM_CHECKING
912
ASSERT_COND(mr->pmode == e_QmPortalPCI);
913
#endif /* QM_CHECKING */
914
mr->pi = (uint8_t)qm_in(MR_PI_CINH);
915
diff = cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
916
mr->fill += diff;
917
return diff;
918
}
919
920
static __inline__ void qmPortalMrPcePrefetch(struct qm_portal *portal)
921
{
922
#ifdef QM_CHECKING
923
register struct qm_mr *mr = &portal->mr;
924
ASSERT_COND(mr->pmode == e_QmPortalPCE);
925
#endif /* QM_CHECKING */
926
qm_cl_invalidate(MR_PI);
927
qm_cl_touch_ro(MR_PI);
928
}
929
930
static __inline__ uint8_t qmPortalMrPceUpdate(struct qm_portal *portal)
931
{
932
register struct qm_mr *mr = &portal->mr;
933
uint8_t diff, old_pi = mr->pi;
934
#ifdef QM_CHECKING
935
ASSERT_COND(mr->pmode == e_QmPortalPCE);
936
#endif /* QM_CHECKING */
937
mr->pi = (uint8_t)(qm_cl_in(MR_PI) & (QM_MR_SIZE - 1));
938
diff = cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
939
mr->fill += diff;
940
return diff;
941
}
942
943
static __inline__ void qmPortalMrPvbUpdate(struct qm_portal *portal)
944
{
945
register struct qm_mr *mr = &portal->mr;
946
struct qm_mr_entry *res = ptr_ADD(mr->ring, qm_cl(mr->pi));
947
#ifdef QM_CHECKING
948
ASSERT_COND(mr->pmode == e_QmPortalPVB);
949
#endif /* QM_CHECKING */
950
dcbit_ro(ptr_ADD(mr->ring, qm_cl(mr->pi)));
951
if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
952
mr->pi = (uint8_t)((mr->pi + 1) & (QM_MR_SIZE - 1));
953
if (!mr->pi)
954
mr->vbit ^= QM_MR_VERB_VBIT;
955
mr->fill++;
956
}
957
}
958
959
static __inline__ void qmPortalMrCciConsume(struct qm_portal *portal, uint8_t num)
960
{
961
register struct qm_mr *mr = &portal->mr;
962
#ifdef QM_CHECKING
963
ASSERT_COND(mr->cmode == e_QmPortalMrCCI);
964
#endif /* QM_CHECKING */
965
mr->ci = (uint8_t)((mr->ci + num) & (QM_MR_SIZE - 1));
966
qm_out(MR_CI_CINH, mr->ci);
967
}
968
969
static __inline__ void qmPortalMrCciConsumeToCurrent(struct qm_portal *portal)
970
{
971
register struct qm_mr *mr = &portal->mr;
972
#ifdef QM_CHECKING
973
ASSERT_COND(mr->cmode == e_QmPortalMrCCI);
974
#endif /* QM_CHECKING */
975
mr->ci = MR_PTR2IDX(mr->cursor);
976
qm_out(MR_CI_CINH, mr->ci);
977
}
978
979
static __inline__ void qmPortalMrCcePrefetch(struct qm_portal *portal)
980
{
981
#ifdef QM_CHECKING
982
register struct qm_mr *mr = &portal->mr;
983
ASSERT_COND(mr->cmode == e_QmPortalMrCCE);
984
#endif /* QM_CHECKING */
985
qm_cl_invalidate(MR_CI);
986
qm_cl_touch_rw(MR_CI);
987
}
988
989
static __inline__ void qmPortalMrCceConsume(struct qm_portal *portal, uint8_t num)
990
{
991
register struct qm_mr *mr = &portal->mr;
992
#ifdef QM_CHECKING
993
ASSERT_COND(mr->cmode == e_QmPortalMrCCE);
994
#endif /* QM_CHECKING */
995
mr->ci = (uint8_t)((mr->ci + num) & (QM_MR_SIZE - 1));
996
qm_cl_out(MR_CI, mr->ci);
997
}
998
999
static __inline__ void qmPortalMrCceConsumeToCurrent(struct qm_portal *portal)
1000
{
1001
register struct qm_mr *mr = &portal->mr;
1002
#ifdef QM_CHECKING
1003
ASSERT_COND(mr->cmode == e_QmPortalMrCCE);
1004
#endif /* QM_CHECKING */
1005
mr->ci = MR_PTR2IDX(mr->cursor);
1006
qm_cl_out(MR_CI, mr->ci);
1007
}
1008
1009
static __inline__ uint8_t qm_mr_get_ci(struct qm_portal *portal)
1010
{
1011
register struct qm_mr *mr = &portal->mr;
1012
return mr->ci;
1013
}
1014
1015
static __inline__ uint8_t qm_mr_get_ithresh(struct qm_portal *portal)
1016
{
1017
register struct qm_mr *mr = &portal->mr;
1018
return mr->ithresh;
1019
}
1020
1021
static __inline__ void qm_mr_set_ithresh(struct qm_portal *portal, uint8_t ithresh)
1022
{
1023
qm_out(MR_ITR, ithresh);
1024
}
1025
1026
/* ------------------------------ */
1027
/* --- Management command API --- */
1028
1029
/* It's safer to code in terms of the 'mc' object than the 'portal' object,
1030
* because the latter runs the risk of copy-n-paste errors from other code where
1031
* we could manipulate some other structure within 'portal'. */
1032
/* #define MC_API_START() register struct qm_mc *mc = &portal->mc */
1033
1034
static __inline__ t_Error qm_mc_init(struct qm_portal *portal)
1035
{
1036
register struct qm_mc *mc = &portal->mc;
1037
if (__qm_portal_bind(portal, QM_BIND_MC))
1038
return ERROR_CODE(E_BUSY);
1039
mc->cr = ptr_ADD(portal->addr.addr_ce, CL_CR);
1040
mc->rr = ptr_ADD(portal->addr.addr_ce, CL_RR0);
1041
mc->rridx = (uint8_t)((mc->cr->__dont_write_directly__verb & QM_MCC_VERB_VBIT) ?
1042
0 : 1);
1043
mc->vbit = (uint8_t)(mc->rridx ? QM_MCC_VERB_VBIT : 0);
1044
#ifdef QM_CHECKING
1045
mc->state = mc_idle;
1046
#endif /* QM_CHECKING */
1047
return E_OK;
1048
}
1049
1050
static __inline__ void qm_mc_finish(struct qm_portal *portal)
1051
{
1052
#ifdef QM_CHECKING
1053
register struct qm_mc *mc = &portal->mc;
1054
ASSERT_COND(mc->state == mc_idle);
1055
if (mc->state != mc_idle)
1056
REPORT_ERROR(WARNING, E_INVALID_STATE, ("Losing incomplete MC command"));
1057
#endif /* QM_CHECKING */
1058
__qm_portal_unbind(portal, QM_BIND_MC);
1059
}
1060
1061
static __inline__ struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
1062
{
1063
register struct qm_mc *mc = &portal->mc;
1064
#ifdef QM_CHECKING
1065
ASSERT_COND(mc->state == mc_idle);
1066
mc->state = mc_user;
1067
#endif /* QM_CHECKING */
1068
dcbz_64(mc->cr);
1069
return mc->cr;
1070
}
1071
1072
static __inline__ void qm_mc_abort(struct qm_portal *portal)
1073
{
1074
#ifdef QM_CHECKING
1075
register struct qm_mc *mc = &portal->mc;
1076
ASSERT_COND(mc->state == mc_user);
1077
mc->state = mc_idle;
1078
#else
1079
UNUSED(portal);
1080
#endif /* QM_CHECKING */
1081
}
1082
1083
static __inline__ void qm_mc_commit(struct qm_portal *portal, uint8_t myverb)
1084
{
1085
register struct qm_mc *mc = &portal->mc;
1086
#ifdef QM_CHECKING
1087
ASSERT_COND(mc->state == mc_user);
1088
#endif /* QM_CHECKING */
1089
rmb();
1090
mc->cr->__dont_write_directly__verb = (uint8_t)(myverb | mc->vbit);
1091
dcbf_64(mc->cr);
1092
dcbit_ro(mc->rr + mc->rridx);
1093
#ifdef QM_CHECKING
1094
mc->state = mc_hw;
1095
#endif /* QM_CHECKING */
1096
}
1097
1098
static __inline__ struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
1099
{
1100
register struct qm_mc *mc = &portal->mc;
1101
struct qm_mc_result *rr = mc->rr + mc->rridx;
1102
#ifdef QM_CHECKING
1103
ASSERT_COND(mc->state == mc_hw);
1104
#endif /* QM_CHECKING */
1105
/* The inactive response register's verb byte always returns zero until
1106
* its command is submitted and completed. This includes the valid-bit,
1107
* in case you were wondering... */
1108
if (!rr->verb) {
1109
dcbit_ro(rr);
1110
return NULL;
1111
}
1112
mc->rridx ^= 1;
1113
mc->vbit ^= QM_MCC_VERB_VBIT;
1114
#ifdef QM_CHECKING
1115
mc->state = mc_idle;
1116
#endif /* QM_CHECKING */
1117
return rr;
1118
}
1119
1120
/* ------------------------------------- */
1121
/* --- Portal interrupt register API --- */
1122
1123
static __inline__ t_Error qm_isr_init(struct qm_portal *portal)
1124
{
1125
if (__qm_portal_bind(portal, QM_BIND_ISR))
1126
return ERROR_CODE(E_BUSY);
1127
return E_OK;
1128
}
1129
1130
static __inline__ void qm_isr_finish(struct qm_portal *portal)
1131
{
1132
__qm_portal_unbind(portal, QM_BIND_ISR);
1133
}
1134
1135
static __inline__ void qm_isr_set_iperiod(struct qm_portal *portal, uint16_t iperiod)
1136
{
1137
qm_out(ITPR, iperiod);
1138
}
1139
1140
static __inline__ uint32_t __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
1141
{
1142
return __qm_in(&portal->addr, REG_ISR + (n << 2));
1143
}
1144
1145
static __inline__ void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n, uint32_t val)
1146
{
1147
__qm_out(&portal->addr, REG_ISR + (n << 2), val);
1148
}
1149
1150