Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/ncsw/Peripherals/QM/qm_portal_fqr.c
48378 views
1
/******************************************************************************
2
3
� 1995-2003, 2004, 2005-2011 Freescale Semiconductor, Inc.
4
All rights reserved.
5
6
This is proprietary source code of Freescale Semiconductor Inc.,
7
and its use is subject to the NetComm Device Drivers EULA.
8
The copyright notice above does not evidence any actual or intended
9
publication of such source code.
10
11
ALTERNATIVELY, redistribution and use in source and binary forms, with
12
or without modification, are permitted provided that the following
13
conditions are met:
14
* Redistributions of source code must retain the above copyright
15
notice, this list of conditions and the following disclaimer.
16
* Redistributions in binary form must reproduce the above copyright
17
notice, this list of conditions and the following disclaimer in the
18
documentation and/or other materials provided with the distribution.
19
* Neither the name of Freescale Semiconductor nor the
20
names of its contributors may be used to endorse or promote products
21
derived from this software without specific prior written permission.
22
23
THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26
DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
*
34
35
**************************************************************************/
36
/******************************************************************************
37
@File qm.c
38
39
@Description QM & Portal implementation
40
*//***************************************************************************/
41
#include <sys/cdefs.h>
42
#include <sys/types.h>
43
#include <machine/atomic.h>
44
45
#include "error_ext.h"
46
#include "std_ext.h"
47
#include "string_ext.h"
48
#include "mm_ext.h"
49
#include "qm.h"
50
#include "qman_low.h"
51
52
#include <machine/vmparam.h>
53
54
/****************************************/
55
/* static functions */
56
/****************************************/
57
58
#define SLOW_POLL_IDLE 1000
59
#define SLOW_POLL_BUSY 10
60
61
/*
62
* Context entries are 32-bit. The qman driver uses the pointer to the queue as
63
* its context, and the pointer is 64-byte aligned, per the XX_MallocSmart()
64
* call. Take advantage of this fact to shove a 64-bit kernel pointer into a
65
* 32-bit context integer, and back.
66
*
67
* XXX: This depends on the fact that VM_MAX_KERNEL_ADDRESS is less than 38-bit
68
* count from VM_MIN_KERNEL_ADDRESS. If this ever changes, this needs to be
69
* updated.
70
*/
71
CTASSERT((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) < (1ULL << 35));
72
static inline uint32_t
73
aligned_int_from_ptr(const void *p)
74
{
75
uintptr_t ctx;
76
77
ctx = (uintptr_t)p;
78
KASSERT(ctx >= VM_MIN_KERNEL_ADDRESS, ("%p is too low!\n", p));
79
ctx -= VM_MIN_KERNEL_ADDRESS;
80
KASSERT((ctx & 0x07) == 0, ("Pointer %p is not 8-byte aligned!\n", p));
81
82
return (ctx >> 3);
83
}
84
85
static inline void *
86
ptr_from_aligned_int(uint32_t ctx)
87
{
88
uintptr_t p;
89
90
p = ctx;
91
p = VM_MIN_KERNEL_ADDRESS + (p << 3);
92
93
return ((void *)p);
94
}
95
96
static t_Error qman_volatile_dequeue(t_QmPortal *p_QmPortal,
97
struct qman_fq *p_Fq,
98
uint32_t vdqcr)
99
{
100
ASSERT_COND((p_Fq->state == qman_fq_state_parked) ||
101
(p_Fq->state == qman_fq_state_retired));
102
ASSERT_COND(!(vdqcr & QM_VDQCR_FQID_MASK));
103
ASSERT_COND(!(p_Fq->flags & QMAN_FQ_STATE_VDQCR));
104
105
vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | p_Fq->fqid;
106
NCSW_PLOCK(p_QmPortal);
107
FQLOCK(p_Fq);
108
p_Fq->flags |= QMAN_FQ_STATE_VDQCR;
109
qm_dqrr_vdqcr_set(p_QmPortal->p_LowQmPortal, vdqcr);
110
FQUNLOCK(p_Fq);
111
PUNLOCK(p_QmPortal);
112
113
return E_OK;
114
}
115
116
static const char *mcr_result_str(uint8_t result)
117
{
118
switch (result) {
119
case QM_MCR_RESULT_NULL:
120
return "QM_MCR_RESULT_NULL";
121
case QM_MCR_RESULT_OK:
122
return "QM_MCR_RESULT_OK";
123
case QM_MCR_RESULT_ERR_FQID:
124
return "QM_MCR_RESULT_ERR_FQID";
125
case QM_MCR_RESULT_ERR_FQSTATE:
126
return "QM_MCR_RESULT_ERR_FQSTATE";
127
case QM_MCR_RESULT_ERR_NOTEMPTY:
128
return "QM_MCR_RESULT_ERR_NOTEMPTY";
129
case QM_MCR_RESULT_PENDING:
130
return "QM_MCR_RESULT_PENDING";
131
}
132
return "<unknown MCR result>";
133
}
134
135
static t_Error qman_create_fq(t_QmPortal *p_QmPortal,
136
uint32_t fqid,
137
uint32_t flags,
138
struct qman_fq *p_Fq)
139
{
140
struct qm_fqd fqd;
141
struct qm_mcr_queryfq_np np;
142
struct qm_mc_command *p_Mcc;
143
struct qm_mc_result *p_Mcr;
144
145
p_Fq->fqid = fqid;
146
p_Fq->flags = flags;
147
p_Fq->state = qman_fq_state_oos;
148
p_Fq->cgr_groupid = 0;
149
if (!(flags & QMAN_FQ_FLAG_RECOVER) ||
150
(flags & QMAN_FQ_FLAG_NO_MODIFY))
151
return E_OK;
152
/* Everything else is RECOVER support */
153
NCSW_PLOCK(p_QmPortal);
154
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
155
p_Mcc->queryfq.fqid = fqid;
156
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ);
157
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
158
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
159
if (p_Mcr->result != QM_MCR_RESULT_OK) {
160
PUNLOCK(p_QmPortal);
161
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QUERYFQ failed: %s", mcr_result_str(p_Mcr->result)));
162
}
163
fqd = p_Mcr->queryfq.fqd;
164
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
165
p_Mcc->queryfq_np.fqid = fqid;
166
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ_NP);
167
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
168
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
169
if (p_Mcr->result != QM_MCR_RESULT_OK) {
170
PUNLOCK(p_QmPortal);
171
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("UERYFQ_NP failed: %s", mcr_result_str(p_Mcr->result)));
172
}
173
np = p_Mcr->queryfq_np;
174
/* Phew, have queryfq and queryfq_np results, stitch together
175
* the FQ object from those. */
176
p_Fq->cgr_groupid = fqd.cgid;
177
switch (np.state & QM_MCR_NP_STATE_MASK) {
178
case QM_MCR_NP_STATE_OOS:
179
break;
180
case QM_MCR_NP_STATE_RETIRED:
181
p_Fq->state = qman_fq_state_retired;
182
if (np.frm_cnt)
183
p_Fq->flags |= QMAN_FQ_STATE_NE;
184
break;
185
case QM_MCR_NP_STATE_TEN_SCHED:
186
case QM_MCR_NP_STATE_TRU_SCHED:
187
case QM_MCR_NP_STATE_ACTIVE:
188
p_Fq->state = qman_fq_state_sched;
189
if (np.state & QM_MCR_NP_STATE_R)
190
p_Fq->flags |= QMAN_FQ_STATE_CHANGING;
191
break;
192
case QM_MCR_NP_STATE_PARKED:
193
p_Fq->state = qman_fq_state_parked;
194
break;
195
default:
196
ASSERT_COND(FALSE);
197
}
198
if (fqd.fq_ctrl & QM_FQCTRL_CGE)
199
p_Fq->state |= QMAN_FQ_STATE_CGR_EN;
200
PUNLOCK(p_QmPortal);
201
202
return E_OK;
203
}
204
205
static void qman_destroy_fq(struct qman_fq *p_Fq, uint32_t flags)
206
{
207
/* We don't need to lock the FQ as it is a pre-condition that the FQ be
208
* quiesced. Instead, run some checks. */
209
UNUSED(flags);
210
switch (p_Fq->state) {
211
case qman_fq_state_parked:
212
ASSERT_COND(flags & QMAN_FQ_DESTROY_PARKED);
213
case qman_fq_state_oos:
214
return;
215
default:
216
break;
217
}
218
ASSERT_COND(FALSE);
219
}
220
221
static t_Error qman_init_fq(t_QmPortal *p_QmPortal,
222
struct qman_fq *p_Fq,
223
uint32_t flags,
224
struct qm_mcc_initfq *p_Opts)
225
{
226
struct qm_mc_command *p_Mcc;
227
struct qm_mc_result *p_Mcr;
228
uint8_t res, myverb = (uint8_t)((flags & QMAN_INITFQ_FLAG_SCHED) ?
229
QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED);
230
231
SANITY_CHECK_RETURN_ERROR((p_Fq->state == qman_fq_state_oos) ||
232
(p_Fq->state == qman_fq_state_parked),
233
E_INVALID_STATE);
234
235
if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
236
return ERROR_CODE(E_INVALID_VALUE);
237
/* Issue an INITFQ_[PARKED|SCHED] management command */
238
NCSW_PLOCK(p_QmPortal);
239
FQLOCK(p_Fq);
240
if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) ||
241
((p_Fq->state != qman_fq_state_oos) &&
242
(p_Fq->state != qman_fq_state_parked))) {
243
FQUNLOCK(p_Fq);
244
PUNLOCK(p_QmPortal);
245
return ERROR_CODE(E_BUSY);
246
}
247
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
248
Mem2IOCpy32((void*)&p_Mcc->initfq, p_Opts, sizeof(struct qm_mcc_initfq));
249
qm_mc_commit(p_QmPortal->p_LowQmPortal, myverb);
250
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
251
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == myverb);
252
res = p_Mcr->result;
253
if (res != QM_MCR_RESULT_OK) {
254
FQUNLOCK(p_Fq);
255
PUNLOCK(p_QmPortal);
256
RETURN_ERROR(MINOR, E_INVALID_STATE,("INITFQ failed: %s", mcr_result_str(res)));
257
}
258
259
if (p_Mcc->initfq.we_mask & QM_INITFQ_WE_FQCTRL) {
260
if (p_Mcc->initfq.fqd.fq_ctrl & QM_FQCTRL_CGE)
261
p_Fq->flags |= QMAN_FQ_STATE_CGR_EN;
262
else
263
p_Fq->flags &= ~QMAN_FQ_STATE_CGR_EN;
264
}
265
if (p_Mcc->initfq.we_mask & QM_INITFQ_WE_CGID)
266
p_Fq->cgr_groupid = p_Mcc->initfq.fqd.cgid;
267
p_Fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
268
qman_fq_state_sched : qman_fq_state_parked;
269
FQUNLOCK(p_Fq);
270
PUNLOCK(p_QmPortal);
271
return E_OK;
272
}
273
274
static t_Error qman_retire_fq(t_QmPortal *p_QmPortal,
275
struct qman_fq *p_Fq,
276
uint32_t *p_Flags,
277
bool drain)
278
{
279
struct qm_mc_command *p_Mcc;
280
struct qm_mc_result *p_Mcr;
281
t_Error err = E_OK;
282
uint8_t res;
283
284
SANITY_CHECK_RETURN_ERROR((p_Fq->state == qman_fq_state_parked) ||
285
(p_Fq->state == qman_fq_state_sched),
286
E_INVALID_STATE);
287
288
if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
289
return E_INVALID_VALUE;
290
NCSW_PLOCK(p_QmPortal);
291
FQLOCK(p_Fq);
292
if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) ||
293
(p_Fq->state == qman_fq_state_retired) ||
294
(p_Fq->state == qman_fq_state_oos)) {
295
err = E_BUSY;
296
goto out;
297
}
298
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
299
p_Mcc->alterfq.fqid = p_Fq->fqid;
300
if (drain)
301
p_Mcc->alterfq.context_b = aligned_int_from_ptr(p_Fq);
302
qm_mc_commit(p_QmPortal->p_LowQmPortal,
303
(uint8_t)((drain)?QM_MCC_VERB_ALTER_RETIRE_CTXB:QM_MCC_VERB_ALTER_RETIRE));
304
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
305
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) ==
306
(drain)?QM_MCR_VERB_ALTER_RETIRE_CTXB:QM_MCR_VERB_ALTER_RETIRE);
307
res = p_Mcr->result;
308
if (res == QM_MCR_RESULT_OK)
309
{
310
/* Process 'fq' right away, we'll ignore FQRNI */
311
if (p_Mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
312
p_Fq->flags |= QMAN_FQ_STATE_NE;
313
if (p_Mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
314
p_Fq->flags |= QMAN_FQ_STATE_ORL;
315
p_Fq->state = qman_fq_state_retired;
316
}
317
else if (res == QM_MCR_RESULT_PENDING)
318
p_Fq->flags |= QMAN_FQ_STATE_CHANGING;
319
else {
320
XX_Print("ALTER_RETIRE failed: %s\n",
321
mcr_result_str(res));
322
err = E_INVALID_STATE;
323
}
324
if (p_Flags)
325
*p_Flags = p_Fq->flags;
326
out:
327
FQUNLOCK(p_Fq);
328
PUNLOCK(p_QmPortal);
329
return err;
330
}
331
332
static t_Error qman_oos_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq)
333
{
334
struct qm_mc_command *p_Mcc;
335
struct qm_mc_result *p_Mcr;
336
uint8_t res;
337
338
ASSERT_COND(p_Fq->state == qman_fq_state_retired);
339
if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
340
return ERROR_CODE(E_INVALID_VALUE);
341
NCSW_PLOCK(p_QmPortal);
342
FQLOCK(p_Fq);
343
if ((p_Fq->flags & QMAN_FQ_STATE_BLOCKOOS) ||
344
(p_Fq->state != qman_fq_state_retired)) {
345
FQUNLOCK(p_Fq);
346
PUNLOCK(p_QmPortal);
347
return ERROR_CODE(E_BUSY);
348
}
349
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
350
p_Mcc->alterfq.fqid = p_Fq->fqid;
351
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_ALTER_OOS);
352
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
353
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
354
res = p_Mcr->result;
355
if (res != QM_MCR_RESULT_OK) {
356
FQUNLOCK(p_Fq);
357
PUNLOCK(p_QmPortal);
358
RETURN_ERROR(MINOR, E_INVALID_STATE, ("ALTER_OOS failed: %s\n", mcr_result_str(res)));
359
}
360
p_Fq->state = qman_fq_state_oos;
361
362
FQUNLOCK(p_Fq);
363
PUNLOCK(p_QmPortal);
364
return E_OK;
365
}
366
367
static t_Error qman_schedule_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq)
368
{
369
struct qm_mc_command *p_Mcc;
370
struct qm_mc_result *p_Mcr;
371
uint8_t res;
372
373
ASSERT_COND(p_Fq->state == qman_fq_state_parked);
374
if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
375
return ERROR_CODE(E_INVALID_VALUE);
376
/* Issue a ALTERFQ_SCHED management command */
377
NCSW_PLOCK(p_QmPortal);
378
FQLOCK(p_Fq);
379
if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) ||
380
(p_Fq->state != qman_fq_state_parked)) {
381
FQUNLOCK(p_Fq);
382
PUNLOCK(p_QmPortal);
383
return ERROR_CODE(E_BUSY);
384
}
385
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
386
p_Mcc->alterfq.fqid = p_Fq->fqid;
387
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_ALTER_SCHED);
388
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
389
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
390
res = p_Mcr->result;
391
if (res != QM_MCR_RESULT_OK) {
392
FQUNLOCK(p_Fq);
393
PUNLOCK(p_QmPortal);
394
RETURN_ERROR(MINOR, E_INVALID_STATE, ("ALTER_SCHED failed: %s\n", mcr_result_str(res)));
395
}
396
p_Fq->state = qman_fq_state_sched;
397
398
FQUNLOCK(p_Fq);
399
PUNLOCK(p_QmPortal);
400
return E_OK;
401
}
402
403
/* Inline helper to reduce nesting in LoopMessageRing() */
404
static __inline__ void fq_state_change(struct qman_fq *p_Fq,
405
struct qm_mr_entry *p_Msg,
406
uint8_t verb)
407
{
408
FQLOCK(p_Fq);
409
switch(verb) {
410
case QM_MR_VERB_FQRL:
411
ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_ORL);
412
p_Fq->flags &= ~QMAN_FQ_STATE_ORL;
413
break;
414
case QM_MR_VERB_FQRN:
415
ASSERT_COND((p_Fq->state == qman_fq_state_parked) ||
416
(p_Fq->state == qman_fq_state_sched));
417
ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_CHANGING);
418
p_Fq->flags &= ~QMAN_FQ_STATE_CHANGING;
419
if (p_Msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
420
p_Fq->flags |= QMAN_FQ_STATE_NE;
421
if (p_Msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
422
p_Fq->flags |= QMAN_FQ_STATE_ORL;
423
p_Fq->state = qman_fq_state_retired;
424
break;
425
case QM_MR_VERB_FQPN:
426
ASSERT_COND(p_Fq->state == qman_fq_state_sched);
427
ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_CHANGING);
428
p_Fq->state = qman_fq_state_parked;
429
}
430
FQUNLOCK(p_Fq);
431
}
432
433
static t_Error freeDrainedFq(struct qman_fq *p_Fq)
434
{
435
t_QmFqr *p_QmFqr;
436
uint32_t i;
437
438
ASSERT_COND(p_Fq);
439
p_QmFqr = (t_QmFqr *)p_Fq->h_QmFqr;
440
ASSERT_COND(p_QmFqr);
441
442
ASSERT_COND(!p_QmFqr->p_DrainedFqs[p_Fq->fqidOffset]);
443
p_QmFqr->p_DrainedFqs[p_Fq->fqidOffset] = TRUE;
444
p_QmFqr->numOfDrainedFqids++;
445
if (p_QmFqr->numOfDrainedFqids == p_QmFqr->numOfFqids)
446
{
447
for (i=0;i<p_QmFqr->numOfFqids;i++)
448
{
449
if ((p_QmFqr->p_Fqs[i]->state == qman_fq_state_retired) &&
450
(qman_oos_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i]) != E_OK))
451
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_oos_fq() failed!"));
452
qman_destroy_fq(p_QmFqr->p_Fqs[i], 0);
453
XX_FreeSmart(p_QmFqr->p_Fqs[i]);
454
}
455
XX_Free(p_QmFqr->p_DrainedFqs);
456
p_QmFqr->p_DrainedFqs = NULL;
457
458
if (p_QmFqr->f_CompletionCB)
459
{
460
p_QmFqr->f_CompletionCB(p_QmFqr->h_App, p_QmFqr);
461
XX_Free(p_QmFqr->p_Fqs);
462
if (p_QmFqr->fqidBase)
463
QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase);
464
XX_Free(p_QmFqr);
465
}
466
}
467
468
return E_OK;
469
}
470
471
static t_Error drainRetiredFq(struct qman_fq *p_Fq)
472
{
473
t_QmFqr *p_QmFqr;
474
475
ASSERT_COND(p_Fq);
476
p_QmFqr = (t_QmFqr *)p_Fq->h_QmFqr;
477
ASSERT_COND(p_QmFqr);
478
479
if (p_Fq->flags & QMAN_FQ_STATE_NE)
480
{
481
if (qman_volatile_dequeue(p_QmFqr->h_QmPortal, p_Fq,
482
(QM_VDQCR_PRECEDENCE_VDQCR | QM_VDQCR_NUMFRAMES_TILLEMPTY)) != E_OK)
483
484
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("drain with volatile failed"));
485
return E_OK;
486
}
487
else
488
return freeDrainedFq(p_Fq);
489
}
490
491
static e_RxStoreResponse drainCB(t_Handle h_App,
492
t_Handle h_QmFqr,
493
t_Handle h_QmPortal,
494
uint32_t fqidOffset,
495
t_DpaaFD *p_Frame)
496
{
497
UNUSED(h_App);
498
UNUSED(h_QmFqr);
499
UNUSED(h_QmPortal);
500
UNUSED(fqidOffset);
501
UNUSED(p_Frame);
502
503
DBG(TRACE,("got fd for fqid %d", ((t_QmFqr *)h_QmFqr)->fqidBase + fqidOffset));
504
return e_RX_STORE_RESPONSE_CONTINUE;
505
}
506
507
static void cb_ern_dcErn(t_Handle h_App,
508
t_Handle h_QmPortal,
509
struct qman_fq *p_Fq,
510
const struct qm_mr_entry *p_Msg)
511
{
512
static int cnt = 0;
513
UNUSED(p_Fq);
514
UNUSED(p_Msg);
515
UNUSED(h_App);
516
UNUSED(h_QmPortal);
517
518
XX_Print("cb_ern_dcErn_fqs() unimplemented %d\n", ++cnt);
519
}
520
521
static void cb_fqs(t_Handle h_App,
522
t_Handle h_QmPortal,
523
struct qman_fq *p_Fq,
524
const struct qm_mr_entry *p_Msg)
525
{
526
UNUSED(p_Msg);
527
UNUSED(h_App);
528
UNUSED(h_QmPortal);
529
530
if (p_Fq->state == qman_fq_state_retired &&
531
!(p_Fq->flags & QMAN_FQ_STATE_ORL))
532
drainRetiredFq(p_Fq);
533
}
534
535
static void null_cb_mr(t_Handle h_App,
536
t_Handle h_QmPortal,
537
struct qman_fq *p_Fq,
538
const struct qm_mr_entry *p_Msg)
539
{
540
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
541
542
UNUSED(p_Fq);UNUSED(h_App);
543
544
if ((p_Msg->verb & QM_MR_VERB_DC_ERN) == QM_MR_VERB_DC_ERN)
545
XX_Print("Ignoring unowned MR frame on cpu %d, dc-portal 0x%02x.\n",
546
p_QmPortal->p_LowQmPortal->config.cpu,p_Msg->dcern.portal);
547
else
548
XX_Print("Ignoring unowned MR frame on cpu %d, verb 0x%02x.\n",
549
p_QmPortal->p_LowQmPortal->config.cpu,p_Msg->verb);
550
}
551
552
static uint32_t LoopMessageRing(t_QmPortal *p_QmPortal, uint32_t is)
553
{
554
struct qm_mr_entry *p_Msg;
555
556
if (is & QM_PIRQ_CSCI) {
557
struct qm_mc_result *p_Mcr;
558
struct qman_cgrs tmp;
559
uint32_t mask;
560
unsigned int i, j;
561
562
NCSW_PLOCK(p_QmPortal);
563
qm_mc_start(p_QmPortal->p_LowQmPortal);
564
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCONGESTION);
565
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
566
567
/* cgrs[0] is the portal mask for its cg's, cgrs[1] is the
568
previous state of cg's */
569
for (i = 0; i < QM_MAX_NUM_OF_CGS/32; i++)
570
{
571
/* get curent state */
572
tmp.q.__state[i] = p_Mcr->querycongestion.state.__state[i];
573
/* keep only cg's that are registered for this portal */
574
tmp.q.__state[i] &= p_QmPortal->cgrs[0].q.__state[i];
575
/* handle only cg's that changed their state from previous exception */
576
tmp.q.__state[i] ^= p_QmPortal->cgrs[1].q.__state[i];
577
/* update previous */
578
p_QmPortal->cgrs[1].q.__state[i] = p_Mcr->querycongestion.state.__state[i];
579
}
580
PUNLOCK(p_QmPortal);
581
582
/* if in interrupt */
583
/* call the callback routines for any CG with a changed state */
584
for (i = 0; i < QM_MAX_NUM_OF_CGS/32; i++)
585
for(j=0, mask = 0x80000000; j<32 ; j++, mask>>=1)
586
{
587
if(tmp.q.__state[i] & mask)
588
{
589
t_QmCg *p_QmCg = (t_QmCg *)(p_QmPortal->cgsHandles[i*32 + j]);
590
if(p_QmCg->f_Exception)
591
p_QmCg->f_Exception(p_QmCg->h_App, e_QM_EX_CG_STATE_CHANGE);
592
}
593
}
594
595
}
596
597
598
if (is & QM_PIRQ_EQRI) {
599
NCSW_PLOCK(p_QmPortal);
600
qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal);
601
qm_eqcr_set_ithresh(p_QmPortal->p_LowQmPortal, 0);
602
PUNLOCK(p_QmPortal);
603
}
604
605
if (is & QM_PIRQ_MRI) {
606
mr_loop:
607
qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal);
608
p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal);
609
if (p_Msg) {
610
struct qman_fq *p_FqFqs = ptr_from_aligned_int(p_Msg->fq.contextB);
611
struct qman_fq *p_FqErn = ptr_from_aligned_int(p_Msg->ern.tag);
612
uint8_t verb =(uint8_t)(p_Msg->verb & QM_MR_VERB_TYPE_MASK);
613
t_QmRejectedFrameInfo rejectedFrameInfo;
614
615
memset(&rejectedFrameInfo, 0, sizeof(t_QmRejectedFrameInfo));
616
if (!(verb & QM_MR_VERB_DC_ERN))
617
{
618
switch(p_Msg->ern.rc)
619
{
620
case(QM_MR_RC_CGR_TAILDROP):
621
rejectedFrameInfo.rejectionCode = e_QM_RC_CG_TAILDROP;
622
rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid;
623
break;
624
case(QM_MR_RC_WRED):
625
rejectedFrameInfo.rejectionCode = e_QM_RC_CG_WRED;
626
rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid;
627
break;
628
case(QM_MR_RC_FQ_TAILDROP):
629
rejectedFrameInfo.rejectionCode = e_QM_RC_FQ_TAILDROP;
630
rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid;
631
break;
632
case(QM_MR_RC_ERROR):
633
break;
634
default:
635
REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("Unknown rejection code"));
636
}
637
if (!p_FqErn)
638
p_QmPortal->p_NullCB->ern(p_QmPortal->h_App, NULL, p_QmPortal, 0, (t_DpaaFD*)&p_Msg->ern.fd, &rejectedFrameInfo);
639
else
640
p_FqErn->cb.ern(p_FqErn->h_App, p_FqErn->h_QmFqr, p_QmPortal, p_FqErn->fqidOffset, (t_DpaaFD*)&p_Msg->ern.fd, &rejectedFrameInfo);
641
} else if (verb == QM_MR_VERB_DC_ERN)
642
{
643
if (!p_FqErn)
644
p_QmPortal->p_NullCB->dc_ern(NULL, p_QmPortal, NULL, p_Msg);
645
else
646
p_FqErn->cb.dc_ern(p_FqErn->h_App, p_QmPortal, p_FqErn, p_Msg);
647
} else
648
{
649
if (verb == QM_MR_VERB_FQRNI)
650
; /* we drop FQRNIs on the floor */
651
else if (!p_FqFqs)
652
p_QmPortal->p_NullCB->fqs(NULL, p_QmPortal, NULL, p_Msg);
653
else if ((verb == QM_MR_VERB_FQRN) ||
654
(verb == QM_MR_VERB_FQRL) ||
655
(verb == QM_MR_VERB_FQPN))
656
{
657
fq_state_change(p_FqFqs, p_Msg, verb);
658
p_FqFqs->cb.fqs(p_FqFqs->h_App, p_QmPortal, p_FqFqs, p_Msg);
659
}
660
}
661
qm_mr_next(p_QmPortal->p_LowQmPortal);
662
qmPortalMrCciConsume(p_QmPortal->p_LowQmPortal, 1);
663
664
goto mr_loop;
665
}
666
}
667
668
return is & (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
669
}
670
671
static void LoopDequeueRing(t_Handle h_QmPortal)
672
{
673
struct qm_dqrr_entry *p_Dq;
674
struct qman_fq *p_Fq;
675
enum qman_cb_dqrr_result res = qman_cb_dqrr_consume;
676
e_RxStoreResponse tmpRes;
677
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
678
int prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH);
679
680
while (res != qman_cb_dqrr_pause)
681
{
682
if (prefetch)
683
qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
684
qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
685
p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
686
if (!p_Dq)
687
break;
688
p_Fq = ptr_from_aligned_int(p_Dq->contextB);
689
if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
690
/* We only set QMAN_FQ_STATE_NE when retiring, so we only need
691
* to check for clearing it when doing volatile dequeues. It's
692
* one less thing to check in the critical path (SDQCR). */
693
tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd);
694
if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
695
res = qman_cb_dqrr_pause;
696
/* Check for VDQCR completion */
697
if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
698
p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR;
699
if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY)
700
{
701
p_Fq->flags &= ~QMAN_FQ_STATE_NE;
702
freeDrainedFq(p_Fq);
703
}
704
}
705
else
706
{
707
/* Interpret 'dq' from the owner's perspective. */
708
/* use portal default handlers */
709
ASSERT_COND(p_Dq->fqid);
710
if (p_Fq)
711
{
712
tmpRes = p_Fq->cb.dqrr(p_Fq->h_App,
713
p_Fq->h_QmFqr,
714
p_QmPortal,
715
p_Fq->fqidOffset,
716
(t_DpaaFD*)&p_Dq->fd);
717
if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
718
res = qman_cb_dqrr_pause;
719
else if (p_Fq->state == qman_fq_state_waiting_parked)
720
res = qman_cb_dqrr_park;
721
}
722
else
723
{
724
tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App,
725
NULL,
726
p_QmPortal,
727
p_Dq->fqid,
728
(t_DpaaFD*)&p_Dq->fd);
729
if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
730
res = qman_cb_dqrr_pause;
731
}
732
}
733
734
/* Parking isn't possible unless HELDACTIVE was set. NB,
735
* FORCEELIGIBLE implies HELDACTIVE, so we only need to
736
* check for HELDACTIVE to cover both. */
737
ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
738
(res != qman_cb_dqrr_park));
739
if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) {
740
/* Defer just means "skip it, I'll consume it myself later on" */
741
if (res != qman_cb_dqrr_defer)
742
qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
743
p_Dq,
744
(res == qman_cb_dqrr_park));
745
qm_dqrr_next(p_QmPortal->p_LowQmPortal);
746
} else {
747
if (res == qman_cb_dqrr_park)
748
/* The only thing to do for non-DCA is the park-request */
749
qm_dqrr_park_ci(p_QmPortal->p_LowQmPortal);
750
qm_dqrr_next(p_QmPortal->p_LowQmPortal);
751
qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
752
}
753
}
754
}
755
756
static void LoopDequeueRingDcaOptimized(t_Handle h_QmPortal)
757
{
758
struct qm_dqrr_entry *p_Dq;
759
struct qman_fq *p_Fq;
760
enum qman_cb_dqrr_result res = qman_cb_dqrr_consume;
761
e_RxStoreResponse tmpRes;
762
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
763
764
while (res != qman_cb_dqrr_pause)
765
{
766
qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
767
p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
768
if (!p_Dq)
769
break;
770
p_Fq = ptr_from_aligned_int(p_Dq->contextB);
771
if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
772
/* We only set QMAN_FQ_STATE_NE when retiring, so we only need
773
* to check for clearing it when doing volatile dequeues. It's
774
* one less thing to check in the critical path (SDQCR). */
775
tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd);
776
if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
777
res = qman_cb_dqrr_pause;
778
/* Check for VDQCR completion */
779
if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
780
p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR;
781
if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY)
782
{
783
p_Fq->flags &= ~QMAN_FQ_STATE_NE;
784
freeDrainedFq(p_Fq);
785
}
786
}
787
else
788
{
789
/* Interpret 'dq' from the owner's perspective. */
790
/* use portal default handlers */
791
ASSERT_COND(p_Dq->fqid);
792
if (p_Fq)
793
{
794
tmpRes = p_Fq->cb.dqrr(p_Fq->h_App,
795
p_Fq->h_QmFqr,
796
p_QmPortal,
797
p_Fq->fqidOffset,
798
(t_DpaaFD*)&p_Dq->fd);
799
if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
800
res = qman_cb_dqrr_pause;
801
else if (p_Fq->state == qman_fq_state_waiting_parked)
802
res = qman_cb_dqrr_park;
803
}
804
else
805
{
806
tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App,
807
NULL,
808
p_QmPortal,
809
p_Dq->fqid,
810
(t_DpaaFD*)&p_Dq->fd);
811
if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
812
res = qman_cb_dqrr_pause;
813
}
814
}
815
816
/* Parking isn't possible unless HELDACTIVE was set. NB,
817
* FORCEELIGIBLE implies HELDACTIVE, so we only need to
818
* check for HELDACTIVE to cover both. */
819
ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
820
(res != qman_cb_dqrr_park));
821
/* Defer just means "skip it, I'll consume it myself later on" */
822
if (res != qman_cb_dqrr_defer)
823
qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
824
p_Dq,
825
(res == qman_cb_dqrr_park));
826
qm_dqrr_next(p_QmPortal->p_LowQmPortal);
827
}
828
}
829
830
static void LoopDequeueRingOptimized(t_Handle h_QmPortal)
831
{
832
struct qm_dqrr_entry *p_Dq;
833
struct qman_fq *p_Fq;
834
enum qman_cb_dqrr_result res = qman_cb_dqrr_consume;
835
e_RxStoreResponse tmpRes;
836
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
837
838
while (res != qman_cb_dqrr_pause)
839
{
840
qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
841
p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
842
if (!p_Dq)
843
break;
844
p_Fq = ptr_from_aligned_int(p_Dq->contextB);
845
if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
846
/* We only set QMAN_FQ_STATE_NE when retiring, so we only need
847
* to check for clearing it when doing volatile dequeues. It's
848
* one less thing to check in the critical path (SDQCR). */
849
tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd);
850
if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
851
res = qman_cb_dqrr_pause;
852
/* Check for VDQCR completion */
853
if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
854
p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR;
855
if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY)
856
{
857
p_Fq->flags &= ~QMAN_FQ_STATE_NE;
858
freeDrainedFq(p_Fq);
859
}
860
}
861
else
862
{
863
/* Interpret 'dq' from the owner's perspective. */
864
/* use portal default handlers */
865
ASSERT_COND(p_Dq->fqid);
866
if (p_Fq)
867
{
868
tmpRes = p_Fq->cb.dqrr(p_Fq->h_App,
869
p_Fq->h_QmFqr,
870
p_QmPortal,
871
p_Fq->fqidOffset,
872
(t_DpaaFD*)&p_Dq->fd);
873
if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
874
res = qman_cb_dqrr_pause;
875
else if (p_Fq->state == qman_fq_state_waiting_parked)
876
res = qman_cb_dqrr_park;
877
}
878
else
879
{
880
tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App,
881
NULL,
882
p_QmPortal,
883
p_Dq->fqid,
884
(t_DpaaFD*)&p_Dq->fd);
885
if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
886
res = qman_cb_dqrr_pause;
887
}
888
}
889
890
/* Parking isn't possible unless HELDACTIVE was set. NB,
891
* FORCEELIGIBLE implies HELDACTIVE, so we only need to
892
* check for HELDACTIVE to cover both. */
893
ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
894
(res != qman_cb_dqrr_park));
895
if (res == qman_cb_dqrr_park)
896
/* The only thing to do for non-DCA is the park-request */
897
qm_dqrr_park_ci(p_QmPortal->p_LowQmPortal);
898
qm_dqrr_next(p_QmPortal->p_LowQmPortal);
899
qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
900
}
901
}
902
903
/* Portal interrupt handler */
904
static void portal_isr(void *ptr)
905
{
906
t_QmPortal *p_QmPortal = ptr;
907
uint32_t event = 0;
908
uint32_t enableEvents = qm_isr_enable_read(p_QmPortal->p_LowQmPortal);
909
910
DBG(TRACE, ("software-portal %d got interrupt", p_QmPortal->p_LowQmPortal->config.cpu));
911
912
event |= (qm_isr_status_read(p_QmPortal->p_LowQmPortal) &
913
enableEvents);
914
915
qm_isr_status_clear(p_QmPortal->p_LowQmPortal, event);
916
/* Only do fast-path handling if it's required */
917
if (/*(event & QM_PIRQ_DQRI) &&*/
918
(p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ_FAST))
919
p_QmPortal->f_LoopDequeueRingCB(p_QmPortal);
920
if (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ_SLOW)
921
LoopMessageRing(p_QmPortal, event);
922
}
923
924
925
static t_Error qman_query_fq_np(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq, struct qm_mcr_queryfq_np *p_Np)
926
{
927
struct qm_mc_command *p_Mcc;
928
struct qm_mc_result *p_Mcr;
929
uint8_t res;
930
931
NCSW_PLOCK(p_QmPortal);
932
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
933
p_Mcc->queryfq_np.fqid = p_Fq->fqid;
934
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ_NP);
935
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
936
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
937
res = p_Mcr->result;
938
if (res == QM_MCR_RESULT_OK)
939
*p_Np = p_Mcr->queryfq_np;
940
PUNLOCK(p_QmPortal);
941
if (res != QM_MCR_RESULT_OK)
942
RETURN_ERROR(MINOR, E_INVALID_STATE, ("QUERYFQ_NP failed: %s\n", mcr_result_str(res)));
943
return E_OK;
944
}
945
946
static uint8_t QmCgGetCgId(t_Handle h_QmCg)
947
{
948
t_QmCg *p_QmCg = (t_QmCg *)h_QmCg;
949
950
return p_QmCg->id;
951
952
}
953
954
static t_Error qm_new_fq(t_QmPortal *p_QmPortal,
955
uint32_t fqid,
956
uint32_t fqidOffset,
957
uint32_t channel,
958
uint32_t wqid,
959
uint16_t count,
960
uint32_t flags,
961
t_QmFqrCongestionAvoidanceParams *p_CgParams,
962
t_QmContextA *p_ContextA,
963
t_QmContextB *p_ContextB,
964
bool initParked,
965
t_Handle h_QmFqr,
966
struct qman_fq **p_Fqs)
967
{
968
struct qman_fq *p_Fq = NULL;
969
struct qm_mcc_initfq fq_opts;
970
uint32_t i;
971
t_Error err = E_OK;
972
int gap, tmp;
973
uint32_t tmpA, tmpN, ta=0, tn=0, initFqFlag;
974
975
ASSERT_COND(p_QmPortal);
976
ASSERT_COND(count);
977
978
for(i=0;i<count;i++)
979
{
980
p_Fq = (struct qman_fq *)XX_MallocSmart(sizeof(struct qman_fq), 0, 64);
981
if (!p_Fq)
982
RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!"));
983
memset(p_Fq, 0, sizeof(struct qman_fq));
984
p_Fq->cb.dqrr = p_QmPortal->f_DfltFrame;
985
p_Fq->cb.ern = p_QmPortal->f_RejectedFrame;
986
p_Fq->cb.dc_ern = cb_ern_dcErn;
987
p_Fq->cb.fqs = cb_fqs;
988
p_Fq->h_App = p_QmPortal->h_App;
989
p_Fq->h_QmFqr = h_QmFqr;
990
p_Fq->fqidOffset = fqidOffset;
991
p_Fqs[i] = p_Fq;
992
if ((err = qman_create_fq(p_QmPortal,(uint32_t)(fqid + i), 0, p_Fqs[i])) != E_OK)
993
break;
994
}
995
996
if (err != E_OK)
997
{
998
for(i=0;i<count;i++)
999
if (p_Fqs[i])
1000
{
1001
XX_FreeSmart(p_Fqs[i]);
1002
p_Fqs[i] = NULL;
1003
}
1004
RETURN_ERROR(MINOR, err, ("Failed to create Fqs"));
1005
}
1006
1007
memset(&fq_opts,0,sizeof(fq_opts));
1008
fq_opts.fqid = fqid;
1009
fq_opts.count = (uint16_t)(count-1);
1010
fq_opts.we_mask |= QM_INITFQ_WE_DESTWQ;
1011
fq_opts.fqd.dest.channel = channel;
1012
fq_opts.fqd.dest.wq = wqid;
1013
fq_opts.we_mask |= QM_INITFQ_WE_FQCTRL;
1014
fq_opts.fqd.fq_ctrl = (uint16_t)flags;
1015
1016
if ((flags & QM_FQCTRL_CGE) || (flags & QM_FQCTRL_TDE))
1017
ASSERT_COND(p_CgParams);
1018
1019
if(flags & QM_FQCTRL_CGE)
1020
{
1021
ASSERT_COND(p_CgParams->h_QmCg);
1022
1023
/* CG OAC and FQ TD may not be configured at the same time. if both are required,
1024
than we configure CG first, and the FQ TD later - see below. */
1025
fq_opts.fqd.cgid = QmCgGetCgId(p_CgParams->h_QmCg);
1026
fq_opts.we_mask |= QM_INITFQ_WE_CGID;
1027
if(p_CgParams->overheadAccountingLength)
1028
{
1029
fq_opts.we_mask |= QM_INITFQ_WE_OAC;
1030
fq_opts.we_mask &= ~QM_INITFQ_WE_TDTHRESH;
1031
fq_opts.fqd.td_thresh = (uint16_t)(QM_FQD_TD_THRESH_OAC_EN | p_CgParams->overheadAccountingLength);
1032
}
1033
}
1034
if((flags & QM_FQCTRL_TDE) && (!p_CgParams->overheadAccountingLength))
1035
{
1036
ASSERT_COND(p_CgParams->fqTailDropThreshold);
1037
1038
fq_opts.we_mask |= QM_INITFQ_WE_TDTHRESH;
1039
1040
/* express thresh as ta*2^tn */
1041
gap = (int)p_CgParams->fqTailDropThreshold;
1042
for (tmpA=0 ; tmpA<256; tmpA++ )
1043
for (tmpN=0 ; tmpN<32; tmpN++ )
1044
{
1045
tmp = ABS((int)(p_CgParams->fqTailDropThreshold - tmpA*(1<<tmpN)));
1046
if (tmp < gap)
1047
{
1048
ta = tmpA;
1049
tn = tmpN;
1050
gap = tmp;
1051
}
1052
}
1053
fq_opts.fqd.td.exp = tn;
1054
fq_opts.fqd.td.mant = ta;
1055
}
1056
1057
if (p_ContextA)
1058
{
1059
fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTA;
1060
memcpy((void*)&fq_opts.fqd.context_a, p_ContextA, sizeof(t_QmContextA));
1061
}
1062
/* If this FQ will not be used for tx, we can use contextB field */
1063
if (fq_opts.fqd.dest.channel < e_QM_FQ_CHANNEL_FMAN0_SP0)
1064
{
1065
fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTB;
1066
fq_opts.fqd.context_b = aligned_int_from_ptr(p_Fqs[0]);
1067
}
1068
else if (p_ContextB) /* Tx-Queue */
1069
{
1070
fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTB;
1071
memcpy((void*)&fq_opts.fqd.context_b, p_ContextB, sizeof(t_QmContextB));
1072
}
1073
1074
if((flags & QM_FQCTRL_TDE) && (p_CgParams->overheadAccountingLength))
1075
initFqFlag = 0;
1076
else
1077
initFqFlag = (uint32_t)(initParked?0:QMAN_INITFQ_FLAG_SCHED);
1078
1079
if ((err = qman_init_fq(p_QmPortal, p_Fqs[0], initFqFlag, &fq_opts)) != E_OK)
1080
{
1081
for(i=0;i<count;i++)
1082
if (p_Fqs[i])
1083
{
1084
XX_FreeSmart(p_Fqs[i]);
1085
p_Fqs[i] = NULL;
1086
}
1087
RETURN_ERROR(MINOR, err, ("Failed to init Fqs [%d-%d]", fqid, fqid+count-1));
1088
}
1089
1090
/* if both CG OAC and FQ TD are needed, we call qman_init_fq again, this time for the FQ TD only */
1091
if((flags & QM_FQCTRL_TDE) && (p_CgParams->overheadAccountingLength))
1092
{
1093
ASSERT_COND(p_CgParams->fqTailDropThreshold);
1094
1095
fq_opts.we_mask = QM_INITFQ_WE_TDTHRESH;
1096
1097
/* express thresh as ta*2^tn */
1098
gap = (int)p_CgParams->fqTailDropThreshold;
1099
for (tmpA=0 ; tmpA<256; tmpA++ )
1100
for (tmpN=0 ; tmpN<32; tmpN++ )
1101
{
1102
tmp = ABS((int)(p_CgParams->fqTailDropThreshold - tmpA*(1<<tmpN)));
1103
if (tmp < gap)
1104
{
1105
ta = tmpA;
1106
tn = tmpN;
1107
gap = tmp;
1108
}
1109
}
1110
fq_opts.fqd.td.exp = tn;
1111
fq_opts.fqd.td.mant = ta;
1112
if ((err = qman_init_fq(p_QmPortal, p_Fqs[0], (uint32_t)(initParked?0:QMAN_INITFQ_FLAG_SCHED), &fq_opts)) != E_OK)
1113
{
1114
for(i=0;i<count;i++)
1115
if (p_Fqs[i])
1116
{
1117
XX_FreeSmart(p_Fqs[i]);
1118
p_Fqs[i] = NULL;
1119
}
1120
RETURN_ERROR(MINOR, err, ("Failed to init Fqs"));
1121
}
1122
}
1123
1124
1125
for(i=1;i<count;i++)
1126
{
1127
memcpy(p_Fqs[i], p_Fqs[0], sizeof(struct qman_fq));
1128
p_Fqs[i]->fqid += i;
1129
}
1130
1131
return err;
1132
}
1133
1134
1135
static t_Error qm_free_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq)
1136
{
1137
uint32_t flags=0;
1138
1139
if (qman_retire_fq(p_QmPortal, p_Fq, &flags, false) != E_OK)
1140
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed!"));
1141
1142
if (flags & QMAN_FQ_STATE_CHANGING)
1143
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("fq %d currently in use, will be retired", p_Fq->fqid));
1144
1145
if (flags & QMAN_FQ_STATE_NE)
1146
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed;" \
1147
"Frame Queue Not Empty, Need to dequeue"));
1148
1149
if (qman_oos_fq(p_QmPortal, p_Fq) != E_OK)
1150
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_oos_fq() failed!"));
1151
1152
qman_destroy_fq(p_Fq,0);
1153
1154
return E_OK;
1155
}
1156
1157
static void qman_disable_portal(t_QmPortal *p_QmPortal)
1158
{
1159
NCSW_PLOCK(p_QmPortal);
1160
if (!(p_QmPortal->disable_count++))
1161
qm_dqrr_set_maxfill(p_QmPortal->p_LowQmPortal, 0);
1162
PUNLOCK(p_QmPortal);
1163
}
1164
1165
1166
/* quiesce SDQCR/VDQCR, then drain till h/w wraps up anything it
1167
* was doing (5ms is more than enough to ensure it's done). */
1168
static void clean_dqrr_mr(t_QmPortal *p_QmPortal)
1169
{
1170
struct qm_dqrr_entry *p_Dq;
1171
struct qm_mr_entry *p_Msg;
1172
int idle = 0;
1173
1174
qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, 0);
1175
qm_dqrr_vdqcr_set(p_QmPortal->p_LowQmPortal, 0);
1176
drain_loop:
1177
qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
1178
qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
1179
qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal);
1180
p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
1181
p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal);
1182
if (p_Dq) {
1183
qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1184
qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1185
}
1186
if (p_Msg) {
1187
qm_mr_next(p_QmPortal->p_LowQmPortal);
1188
qmPortalMrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1189
}
1190
if (!p_Dq && !p_Msg) {
1191
if (++idle < 5) {
1192
XX_UDelay(1000);
1193
goto drain_loop;
1194
}
1195
} else {
1196
idle = 0;
1197
goto drain_loop;
1198
}
1199
}
1200
1201
static t_Error qman_create_portal(t_QmPortal *p_QmPortal,
1202
uint32_t flags,
1203
uint32_t sdqcrFlags,
1204
uint8_t dqrrSize)
1205
{
1206
const struct qm_portal_config *p_Config = &(p_QmPortal->p_LowQmPortal->config);
1207
int ret = 0;
1208
t_Error err;
1209
uint32_t isdr;
1210
1211
if ((err = qm_eqcr_init(p_QmPortal->p_LowQmPortal, e_QmPortalPVB, e_QmPortalEqcrCCE)) != E_OK)
1212
RETURN_ERROR(MINOR, err, ("Qman EQCR initialization failed\n"));
1213
1214
if (qm_dqrr_init(p_QmPortal->p_LowQmPortal,
1215
sdqcrFlags ? e_QmPortalDequeuePushMode : e_QmPortalDequeuePullMode,
1216
e_QmPortalPVB,
1217
(flags & QMAN_PORTAL_FLAG_DCA) ? e_QmPortalDqrrDCA : e_QmPortalDqrrCCI,
1218
dqrrSize,
1219
(flags & QMAN_PORTAL_FLAG_RSTASH) ? 1 : 0,
1220
(flags & QMAN_PORTAL_FLAG_DSTASH) ? 1 : 0)) {
1221
REPORT_ERROR(MAJOR, E_INVALID_STATE, ("DQRR initialization failed"));
1222
goto fail_dqrr;
1223
}
1224
1225
if (qm_mr_init(p_QmPortal->p_LowQmPortal, e_QmPortalPVB, e_QmPortalMrCCI)) {
1226
REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MR initialization failed"));
1227
goto fail_mr;
1228
}
1229
if (qm_mc_init(p_QmPortal->p_LowQmPortal)) {
1230
REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MC initialization failed"));
1231
goto fail_mc;
1232
}
1233
if (qm_isr_init(p_QmPortal->p_LowQmPortal)) {
1234
REPORT_ERROR(MAJOR, E_INVALID_STATE, ("ISR initialization failed"));
1235
goto fail_isr;
1236
}
1237
/* static interrupt-gating controls */
1238
qm_dqrr_set_ithresh(p_QmPortal->p_LowQmPortal, 12);
1239
qm_mr_set_ithresh(p_QmPortal->p_LowQmPortal, 4);
1240
qm_isr_set_iperiod(p_QmPortal->p_LowQmPortal, 100);
1241
p_QmPortal->options = flags;
1242
isdr = 0xffffffff;
1243
qm_isr_status_clear(p_QmPortal->p_LowQmPortal, 0xffffffff);
1244
qm_isr_enable_write(p_QmPortal->p_LowQmPortal, DEFAULT_portalExceptions);
1245
qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr);
1246
if (flags & QMAN_PORTAL_FLAG_IRQ)
1247
{
1248
XX_SetIntr(p_Config->irq, portal_isr, p_QmPortal);
1249
XX_EnableIntr(p_Config->irq);
1250
qm_isr_uninhibit(p_QmPortal->p_LowQmPortal);
1251
} else
1252
/* without IRQ, we can't block */
1253
flags &= ~QMAN_PORTAL_FLAG_WAIT;
1254
/* Need EQCR to be empty before continuing */
1255
isdr ^= QM_PIRQ_EQCI;
1256
qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr);
1257
ret = qm_eqcr_get_fill(p_QmPortal->p_LowQmPortal);
1258
if (ret) {
1259
REPORT_ERROR(MAJOR, E_INVALID_STATE, ("EQCR unclean"));
1260
goto fail_eqcr_empty;
1261
}
1262
isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
1263
qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr);
1264
if (qm_dqrr_current(p_QmPortal->p_LowQmPortal) != NULL)
1265
{
1266
REPORT_ERROR(MAJOR, E_INVALID_STATE, ("DQRR unclean"));
1267
goto fail_dqrr_mr_empty;
1268
}
1269
if (qm_mr_current(p_QmPortal->p_LowQmPortal) != NULL)
1270
{
1271
REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MR unclean"));
1272
goto fail_dqrr_mr_empty;
1273
}
1274
qm_isr_disable_write(p_QmPortal->p_LowQmPortal, 0);
1275
qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, sdqcrFlags);
1276
return E_OK;
1277
fail_dqrr_mr_empty:
1278
fail_eqcr_empty:
1279
qm_isr_finish(p_QmPortal->p_LowQmPortal);
1280
fail_isr:
1281
qm_mc_finish(p_QmPortal->p_LowQmPortal);
1282
fail_mc:
1283
qm_mr_finish(p_QmPortal->p_LowQmPortal);
1284
fail_mr:
1285
qm_dqrr_finish(p_QmPortal->p_LowQmPortal);
1286
fail_dqrr:
1287
qm_eqcr_finish(p_QmPortal->p_LowQmPortal);
1288
return ERROR_CODE(E_INVALID_STATE);
1289
}
1290
1291
static void qman_destroy_portal(t_QmPortal *p_QmPortal)
1292
{
1293
/* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1294
* something related to QM_PIRQ_EQCI, this may need fixing. */
1295
qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal);
1296
if (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ)
1297
{
1298
XX_DisableIntr(p_QmPortal->p_LowQmPortal->config.irq);
1299
XX_FreeIntr(p_QmPortal->p_LowQmPortal->config.irq);
1300
}
1301
qm_isr_finish(p_QmPortal->p_LowQmPortal);
1302
qm_mc_finish(p_QmPortal->p_LowQmPortal);
1303
qm_mr_finish(p_QmPortal->p_LowQmPortal);
1304
qm_dqrr_finish(p_QmPortal->p_LowQmPortal);
1305
qm_eqcr_finish(p_QmPortal->p_LowQmPortal);
1306
}
1307
1308
static inline struct qm_eqcr_entry *try_eq_start(t_QmPortal *p_QmPortal)
1309
{
1310
struct qm_eqcr_entry *p_Eq;
1311
uint8_t avail;
1312
1313
avail = qm_eqcr_get_avail(p_QmPortal->p_LowQmPortal);
1314
if (avail == EQCR_THRESH)
1315
qmPortalEqcrCcePrefetch(p_QmPortal->p_LowQmPortal);
1316
else if (avail < EQCR_THRESH)
1317
qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal);
1318
p_Eq = qm_eqcr_start(p_QmPortal->p_LowQmPortal);
1319
1320
return p_Eq;
1321
}
1322
1323
1324
static t_Error qman_orp_update(t_QmPortal *p_QmPortal,
1325
uint32_t orpId,
1326
uint16_t orpSeqnum,
1327
uint32_t flags)
1328
{
1329
struct qm_eqcr_entry *p_Eq;
1330
1331
NCSW_PLOCK(p_QmPortal);
1332
p_Eq = try_eq_start(p_QmPortal);
1333
if (!p_Eq)
1334
{
1335
PUNLOCK(p_QmPortal);
1336
return ERROR_CODE(E_BUSY);
1337
}
1338
1339
if (flags & QMAN_ENQUEUE_FLAG_NESN)
1340
orpSeqnum |= QM_EQCR_SEQNUM_NESN;
1341
else
1342
/* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
1343
orpSeqnum &= ~QM_EQCR_SEQNUM_NESN;
1344
p_Eq->seqnum = orpSeqnum;
1345
p_Eq->orp = orpId;
1346
qmPortalEqcrPvbCommit(p_QmPortal->p_LowQmPortal, (uint8_t)QM_EQCR_VERB_ORP);
1347
1348
PUNLOCK(p_QmPortal);
1349
return E_OK;
1350
}
1351
1352
static __inline__ t_Error CheckStashParams(t_QmFqrParams *p_QmFqrParams)
1353
{
1354
ASSERT_COND(p_QmFqrParams);
1355
1356
if (p_QmFqrParams->stashingParams.frameAnnotationSize > QM_CONTEXTA_MAX_STASH_SIZE)
1357
RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Annotation Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE));
1358
if (p_QmFqrParams->stashingParams.frameDataSize > QM_CONTEXTA_MAX_STASH_SIZE)
1359
RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Data Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE));
1360
if (p_QmFqrParams->stashingParams.fqContextSize > QM_CONTEXTA_MAX_STASH_SIZE)
1361
RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Context Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE));
1362
if (p_QmFqrParams->stashingParams.fqContextSize)
1363
{
1364
if (!p_QmFqrParams->stashingParams.fqContextAddr)
1365
RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address Must be givven"));
1366
if (!IS_ALIGNED(p_QmFqrParams->stashingParams.fqContextAddr, CACHELINE_SIZE))
1367
RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address Must be aligned to %d", CACHELINE_SIZE));
1368
if (p_QmFqrParams->stashingParams.fqContextAddr & 0xffffff0000000000LL)
1369
RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address May be up to 40 bit"));
1370
}
1371
1372
return E_OK;
1373
}
1374
1375
static t_Error QmPortalRegisterCg(t_Handle h_QmPortal, t_Handle h_QmCg, uint8_t cgId)
1376
{
1377
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1378
1379
/* cgrs[0] is the mask of registered CG's*/
1380
if(p_QmPortal->cgrs[0].q.__state[cgId/32] & (0x80000000 >> (cgId % 32)))
1381
RETURN_ERROR(MINOR, E_BUSY, ("CG already used"));
1382
1383
p_QmPortal->cgrs[0].q.__state[cgId/32] |= 0x80000000 >> (cgId % 32);
1384
p_QmPortal->cgsHandles[cgId] = h_QmCg;
1385
1386
return E_OK;
1387
}
1388
1389
static t_Error QmPortalUnregisterCg(t_Handle h_QmPortal, uint8_t cgId)
1390
{
1391
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1392
1393
/* cgrs[0] is the mask of registered CG's*/
1394
if(!(p_QmPortal->cgrs[0].q.__state[cgId/32] & (0x80000000 >> (cgId % 32))))
1395
RETURN_ERROR(MINOR, E_BUSY, ("CG is not in use"));
1396
1397
p_QmPortal->cgrs[0].q.__state[cgId/32] &= ~0x80000000 >> (cgId % 32);
1398
p_QmPortal->cgsHandles[cgId] = NULL;
1399
1400
return E_OK;
1401
}
1402
1403
static e_DpaaSwPortal QmPortalGetSwPortalId(t_Handle h_QmPortal)
1404
{
1405
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1406
1407
return (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu;
1408
}
1409
1410
static t_Error CalcWredCurve(t_QmCgWredCurve *p_WredCurve, uint32_t *p_CurveWord)
1411
{
1412
uint32_t maxP, roundDown, roundUp, tmpA, tmpN;
1413
uint32_t ma=0, mn=0, slope, sa=0, sn=0, pn;
1414
int pres = 1000;
1415
int gap, tmp;
1416
1417
/* TODO - change maxTh to uint64_t?
1418
if(p_WredCurve->maxTh > (1<<39))
1419
RETURN_ERROR(MINOR, E_INVALID_VALUE, ("maxTh is not in range"));*/
1420
1421
/* express maxTh as ma*2^mn */
1422
gap = (int)p_WredCurve->maxTh;
1423
for (tmpA=0 ; tmpA<256; tmpA++ )
1424
for (tmpN=0 ; tmpN<32; tmpN++ )
1425
{
1426
tmp = ABS((int)(p_WredCurve->maxTh - tmpA*(1<<tmpN)));
1427
if (tmp < gap)
1428
{
1429
ma = tmpA;
1430
mn = tmpN;
1431
gap = tmp;
1432
}
1433
}
1434
ASSERT_COND(ma <256);
1435
ASSERT_COND(mn <32);
1436
p_WredCurve->maxTh = ma*(1<<mn);
1437
1438
if(p_WredCurve->maxTh <= p_WredCurve->minTh)
1439
RETURN_ERROR(MINOR, E_INVALID_VALUE, ("maxTh must be larger than minTh"));
1440
if(p_WredCurve->probabilityDenominator > 64)
1441
RETURN_ERROR(MINOR, E_INVALID_VALUE, ("probabilityDenominator mustn't be 1-64"));
1442
1443
/* first we translate from Cisco probabilityDenominator
1444
to 256 fixed denominator, result must be divisible by 4. */
1445
/* we multiply by a fixed value to get better accuracy (without
1446
using floating point) */
1447
maxP = (uint32_t)(256*1000/p_WredCurve->probabilityDenominator);
1448
if (maxP % 4*pres)
1449
{
1450
roundDown = maxP + (maxP % (4*pres));
1451
roundUp = roundDown + 4*pres;
1452
if((roundUp - maxP) > (maxP - roundDown))
1453
maxP = roundDown;
1454
else
1455
maxP = roundUp;
1456
}
1457
maxP = maxP/pres;
1458
ASSERT_COND(maxP <= 256);
1459
pn = (uint8_t)(maxP/4 - 1);
1460
1461
if(maxP >= (p_WredCurve->maxTh - p_WredCurve->minTh))
1462
RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Due to probabilityDenominator selected, maxTh-minTh must be larger than %d", maxP));
1463
1464
pres = 1000000;
1465
slope = maxP*pres/(p_WredCurve->maxTh - p_WredCurve->minTh);
1466
/* express slope as sa/2^sn */
1467
gap = (int)slope;
1468
for (tmpA=(uint32_t)(64*pres) ; tmpA<128*pres; tmpA += pres )
1469
for (tmpN=7 ; tmpN<64; tmpN++ )
1470
{
1471
tmp = ABS((int)(slope - tmpA/(1UL<<(tmpN%32))));
1472
if (tmp < gap)
1473
{
1474
sa = tmpA;
1475
sn = tmpN;
1476
gap = tmp;
1477
}
1478
}
1479
sa = sa/pres;
1480
ASSERT_COND(sa<128 && sa>=64);
1481
ASSERT_COND(sn<64 && sn>=7);
1482
1483
*p_CurveWord = ((ma << 24) |
1484
(mn << 19) |
1485
(sa << 12) |
1486
(sn << 6) |
1487
pn);
1488
1489
return E_OK;
1490
}
1491
1492
static t_Error QmPortalPullFrame(t_Handle h_QmPortal, uint32_t pdqcr, t_DpaaFD *p_Frame)
1493
{
1494
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1495
struct qm_dqrr_entry *p_Dq;
1496
int prefetch;
1497
uint32_t *p_Dst, *p_Src;
1498
1499
ASSERT_COND(p_QmPortal);
1500
ASSERT_COND(p_Frame);
1501
SANITY_CHECK_RETURN_ERROR(p_QmPortal->pullMode, E_INVALID_STATE);
1502
1503
NCSW_PLOCK(p_QmPortal);
1504
1505
qm_dqrr_pdqcr_set(p_QmPortal->p_LowQmPortal, pdqcr);
1506
mb();
1507
while (qm_dqrr_pdqcr_get(p_QmPortal->p_LowQmPortal)) ;
1508
1509
prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH);
1510
while(TRUE)
1511
{
1512
if (prefetch)
1513
qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
1514
qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
1515
p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
1516
if (!p_Dq)
1517
continue;
1518
ASSERT_COND(p_Dq->fqid);
1519
p_Dst = (uint32_t *)p_Frame;
1520
p_Src = (uint32_t *)&p_Dq->fd;
1521
p_Dst[0] = p_Src[0];
1522
p_Dst[1] = p_Src[1];
1523
p_Dst[2] = p_Src[2];
1524
p_Dst[3] = p_Src[3];
1525
if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA)
1526
{
1527
qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
1528
p_Dq,
1529
false);
1530
qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1531
}
1532
else
1533
{
1534
qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1535
qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1536
}
1537
break;
1538
}
1539
1540
PUNLOCK(p_QmPortal);
1541
1542
if (!(p_Dq->stat & QM_DQRR_STAT_FD_VALID))
1543
return ERROR_CODE(E_EMPTY);
1544
1545
return E_OK;
1546
}
1547
1548
1549
/****************************************/
1550
/* API Init unit functions */
1551
/****************************************/
1552
t_Handle QM_PORTAL_Config(t_QmPortalParam *p_QmPortalParam)
1553
{
1554
t_QmPortal *p_QmPortal;
1555
uint32_t i;
1556
1557
SANITY_CHECK_RETURN_VALUE(p_QmPortalParam, E_INVALID_HANDLE, NULL);
1558
SANITY_CHECK_RETURN_VALUE(p_QmPortalParam->swPortalId < DPAA_MAX_NUM_OF_SW_PORTALS, E_INVALID_VALUE, 0);
1559
1560
p_QmPortal = (t_QmPortal *)XX_Malloc(sizeof(t_QmPortal));
1561
if (!p_QmPortal)
1562
{
1563
REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Qm Portal obj!!!"));
1564
return NULL;
1565
}
1566
memset(p_QmPortal, 0, sizeof(t_QmPortal));
1567
1568
p_QmPortal->p_LowQmPortal = (struct qm_portal *)XX_Malloc(sizeof(struct qm_portal));
1569
if (!p_QmPortal->p_LowQmPortal)
1570
{
1571
XX_Free(p_QmPortal);
1572
REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Low qm p_QmPortal obj!!!"));
1573
return NULL;
1574
}
1575
memset(p_QmPortal->p_LowQmPortal, 0, sizeof(struct qm_portal));
1576
1577
p_QmPortal->p_QmPortalDriverParams = (t_QmPortalDriverParams *)XX_Malloc(sizeof(t_QmPortalDriverParams));
1578
if (!p_QmPortal->p_QmPortalDriverParams)
1579
{
1580
XX_Free(p_QmPortal->p_LowQmPortal);
1581
XX_Free(p_QmPortal);
1582
REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Qm Portal driver parameters"));
1583
return NULL;
1584
}
1585
memset(p_QmPortal->p_QmPortalDriverParams, 0, sizeof(t_QmPortalDriverParams));
1586
1587
p_QmPortal->p_LowQmPortal->addr.addr_ce = UINT_TO_PTR(p_QmPortalParam->ceBaseAddress);
1588
p_QmPortal->p_LowQmPortal->addr.addr_ci = UINT_TO_PTR(p_QmPortalParam->ciBaseAddress);
1589
p_QmPortal->p_LowQmPortal->config.irq = p_QmPortalParam->irq;
1590
p_QmPortal->p_LowQmPortal->config.bound = 0;
1591
p_QmPortal->p_LowQmPortal->config.cpu = (int)p_QmPortalParam->swPortalId;
1592
p_QmPortal->p_LowQmPortal->config.channel = (e_QmFQChannel)(e_QM_FQ_CHANNEL_SWPORTAL0 + p_QmPortalParam->swPortalId);
1593
p_QmPortal->p_LowQmPortal->bind_lock = XX_InitSpinlock();
1594
1595
p_QmPortal->h_Qm = p_QmPortalParam->h_Qm;
1596
p_QmPortal->f_DfltFrame = p_QmPortalParam->f_DfltFrame;
1597
p_QmPortal->f_RejectedFrame = p_QmPortalParam->f_RejectedFrame;
1598
p_QmPortal->h_App = p_QmPortalParam->h_App;
1599
1600
p_QmPortal->p_QmPortalDriverParams->fdLiodnOffset = p_QmPortalParam->fdLiodnOffset;
1601
p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode = DEFAULT_dequeueDcaMode;
1602
p_QmPortal->p_QmPortalDriverParams->dequeueUpToThreeFrames = DEFAULT_dequeueUpToThreeFrames;
1603
p_QmPortal->p_QmPortalDriverParams->commandType = DEFAULT_dequeueCommandType;
1604
p_QmPortal->p_QmPortalDriverParams->userToken = DEFAULT_dequeueUserToken;
1605
p_QmPortal->p_QmPortalDriverParams->specifiedWq = DEFAULT_dequeueSpecifiedWq;
1606
p_QmPortal->p_QmPortalDriverParams->dedicatedChannel = DEFAULT_dequeueDedicatedChannel;
1607
p_QmPortal->p_QmPortalDriverParams->dedicatedChannelHasPrecedenceOverPoolChannels =
1608
DEFAULT_dequeueDedicatedChannelHasPrecedenceOverPoolChannels;
1609
p_QmPortal->p_QmPortalDriverParams->poolChannelId = DEFAULT_dequeuePoolChannelId;
1610
p_QmPortal->p_QmPortalDriverParams->wqId = DEFAULT_dequeueWqId;
1611
for (i=0;i<QM_MAX_NUM_OF_POOL_CHANNELS;i++)
1612
p_QmPortal->p_QmPortalDriverParams->poolChannels[i] = FALSE;
1613
p_QmPortal->p_QmPortalDriverParams->dqrrSize = DEFAULT_dqrrSize;
1614
p_QmPortal->p_QmPortalDriverParams->pullMode = DEFAULT_pullMode;
1615
1616
return p_QmPortal;
1617
}
1618
1619
t_Error QM_PORTAL_Init(t_Handle h_QmPortal)
1620
{
1621
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1622
uint32_t i, flags=0, sdqcrFlags=0;
1623
t_Error err;
1624
t_QmInterModulePortalInitParams qmParams;
1625
1626
SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1627
SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_INVALID_HANDLE);
1628
1629
memset(&qmParams, 0, sizeof(qmParams));
1630
qmParams.portalId = (uint8_t)p_QmPortal->p_LowQmPortal->config.cpu;
1631
qmParams.liodn = p_QmPortal->p_QmPortalDriverParams->fdLiodnOffset;
1632
qmParams.dqrrLiodn = p_QmPortal->p_QmPortalDriverParams->dqrrLiodn;
1633
qmParams.fdFqLiodn = p_QmPortal->p_QmPortalDriverParams->fdFqLiodn;
1634
qmParams.stashDestQueue = p_QmPortal->p_QmPortalDriverParams->stashDestQueue;
1635
if ((err = QmGetSetPortalParams(p_QmPortal->h_Qm, &qmParams)) != E_OK)
1636
RETURN_ERROR(MAJOR, err, NO_MSG);
1637
1638
flags = (uint32_t)(((p_QmPortal->p_LowQmPortal->config.irq == NO_IRQ) ?
1639
0 :
1640
(QMAN_PORTAL_FLAG_IRQ |
1641
QMAN_PORTAL_FLAG_IRQ_FAST |
1642
QMAN_PORTAL_FLAG_IRQ_SLOW)));
1643
flags |= ((p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode) ? QMAN_PORTAL_FLAG_DCA : 0);
1644
flags |= (p_QmPortal->p_QmPortalDriverParams->dqrr)?QMAN_PORTAL_FLAG_RSTASH:0;
1645
flags |= (p_QmPortal->p_QmPortalDriverParams->fdFq)?QMAN_PORTAL_FLAG_DSTASH:0;
1646
1647
p_QmPortal->pullMode = p_QmPortal->p_QmPortalDriverParams->pullMode;
1648
if (!p_QmPortal->pullMode)
1649
{
1650
sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dequeueUpToThreeFrames) ? QM_SDQCR_COUNT_UPTO3 : QM_SDQCR_COUNT_EXACT1;
1651
sdqcrFlags |= QM_SDQCR_TOKEN_SET(p_QmPortal->p_QmPortalDriverParams->userToken);
1652
sdqcrFlags |= QM_SDQCR_TYPE_SET(p_QmPortal->p_QmPortalDriverParams->commandType);
1653
if (!p_QmPortal->p_QmPortalDriverParams->specifiedWq)
1654
{
1655
/* sdqcrFlags |= QM_SDQCR_SOURCE_CHANNELS;*/ /* removed as the macro is '0' */
1656
sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannelHasPrecedenceOverPoolChannels) ? QM_SDQCR_DEDICATED_PRECEDENCE : 0;
1657
sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannel) ? QM_SDQCR_CHANNELS_DEDICATED : 0;
1658
for (i=0;i<QM_MAX_NUM_OF_POOL_CHANNELS;i++)
1659
sdqcrFlags |= ((p_QmPortal->p_QmPortalDriverParams->poolChannels[i]) ?
1660
QM_SDQCR_CHANNELS_POOL(i+1) : 0);
1661
}
1662
else
1663
{
1664
sdqcrFlags |= QM_SDQCR_SOURCE_SPECIFICWQ;
1665
sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannel) ?
1666
QM_SDQCR_SPECIFICWQ_DEDICATED : QM_SDQCR_SPECIFICWQ_POOL(p_QmPortal->p_QmPortalDriverParams->poolChannelId);
1667
sdqcrFlags |= QM_SDQCR_SPECIFICWQ_WQ(p_QmPortal->p_QmPortalDriverParams->wqId);
1668
}
1669
}
1670
if ((flags & QMAN_PORTAL_FLAG_RSTASH) && (flags & QMAN_PORTAL_FLAG_DCA))
1671
p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRingDcaOptimized;
1672
else if ((flags & QMAN_PORTAL_FLAG_RSTASH) && !(flags & QMAN_PORTAL_FLAG_DCA))
1673
p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRingOptimized;
1674
else
1675
p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRing;
1676
1677
if ((!p_QmPortal->f_RejectedFrame) || (!p_QmPortal->f_DfltFrame))
1678
RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_RejectedFrame or f_DfltFrame callback not provided"));
1679
1680
p_QmPortal->p_NullCB = (struct qman_fq_cb *)XX_Malloc(sizeof(struct qman_fq_cb));
1681
if (!p_QmPortal->p_NullCB)
1682
RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FQ Null CB obj!!!"));
1683
memset(p_QmPortal->p_NullCB, 0, sizeof(struct qman_fq_cb));
1684
1685
p_QmPortal->p_NullCB->dqrr = p_QmPortal->f_DfltFrame;
1686
p_QmPortal->p_NullCB->ern = p_QmPortal->f_RejectedFrame;
1687
p_QmPortal->p_NullCB->dc_ern = p_QmPortal->p_NullCB->fqs = null_cb_mr;
1688
1689
if (qman_create_portal(p_QmPortal, flags, sdqcrFlags, p_QmPortal->p_QmPortalDriverParams->dqrrSize) != E_OK)
1690
{
1691
RETURN_ERROR(MAJOR, E_NO_MEMORY, ("create portal failed"));
1692
}
1693
1694
QmSetPortalHandle(p_QmPortal->h_Qm, (t_Handle)p_QmPortal, (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu);
1695
XX_Free(p_QmPortal->p_QmPortalDriverParams);
1696
p_QmPortal->p_QmPortalDriverParams = NULL;
1697
1698
DBG(TRACE, ("Qman-Portal %d @ %p:%p",
1699
p_QmPortal->p_LowQmPortal->config.cpu,
1700
p_QmPortal->p_LowQmPortal->addr.addr_ce,
1701
p_QmPortal->p_LowQmPortal->addr.addr_ci
1702
));
1703
1704
DBG(TRACE, ("Qman-Portal %d phys @ 0x%016llx:0x%016llx",
1705
p_QmPortal->p_LowQmPortal->config.cpu,
1706
(uint64_t)XX_VirtToPhys(p_QmPortal->p_LowQmPortal->addr.addr_ce),
1707
(uint64_t)XX_VirtToPhys(p_QmPortal->p_LowQmPortal->addr.addr_ci)
1708
));
1709
1710
return E_OK;
1711
}
1712
1713
t_Error QM_PORTAL_Free(t_Handle h_QmPortal)
1714
{
1715
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1716
1717
if (!p_QmPortal)
1718
return ERROR_CODE(E_INVALID_HANDLE);
1719
1720
ASSERT_COND(p_QmPortal->p_LowQmPortal);
1721
QmSetPortalHandle(p_QmPortal->h_Qm, NULL, (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu);
1722
qman_destroy_portal(p_QmPortal);
1723
if (p_QmPortal->p_NullCB)
1724
XX_Free(p_QmPortal->p_NullCB);
1725
1726
if (p_QmPortal->p_LowQmPortal->bind_lock)
1727
XX_FreeSpinlock(p_QmPortal->p_LowQmPortal->bind_lock);
1728
if(p_QmPortal->p_QmPortalDriverParams)
1729
XX_Free(p_QmPortal->p_QmPortalDriverParams);
1730
XX_Free(p_QmPortal->p_LowQmPortal);
1731
XX_Free(p_QmPortal);
1732
1733
return E_OK;
1734
}
1735
1736
t_Error QM_PORTAL_ConfigDcaMode(t_Handle h_QmPortal, bool enable)
1737
{
1738
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1739
1740
SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1741
SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_INVALID_HANDLE);
1742
1743
p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode = enable;
1744
1745
return E_OK;
1746
}
1747
1748
t_Error QM_PORTAL_ConfigStash(t_Handle h_QmPortal, t_QmPortalStashParam *p_StashParams)
1749
{
1750
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1751
1752
SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1753
SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_NULL_POINTER);
1754
SANITY_CHECK_RETURN_ERROR(p_StashParams, E_NULL_POINTER);
1755
1756
p_QmPortal->p_QmPortalDriverParams->stashDestQueue = p_StashParams->stashDestQueue;
1757
p_QmPortal->p_QmPortalDriverParams->dqrrLiodn = p_StashParams->dqrrLiodn;
1758
p_QmPortal->p_QmPortalDriverParams->fdFqLiodn = p_StashParams->fdFqLiodn;
1759
p_QmPortal->p_QmPortalDriverParams->eqcr = p_StashParams->eqcr;
1760
p_QmPortal->p_QmPortalDriverParams->eqcrHighPri = p_StashParams->eqcrHighPri;
1761
p_QmPortal->p_QmPortalDriverParams->dqrr = p_StashParams->dqrr;
1762
p_QmPortal->p_QmPortalDriverParams->dqrrHighPri = p_StashParams->dqrrHighPri;
1763
p_QmPortal->p_QmPortalDriverParams->fdFq = p_StashParams->fdFq;
1764
p_QmPortal->p_QmPortalDriverParams->fdFqHighPri = p_StashParams->fdFqHighPri;
1765
p_QmPortal->p_QmPortalDriverParams->fdFqDrop = p_StashParams->fdFqDrop;
1766
1767
return E_OK;
1768
}
1769
1770
1771
t_Error QM_PORTAL_ConfigPullMode(t_Handle h_QmPortal, bool pullMode)
1772
{
1773
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1774
1775
SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1776
SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_NULL_POINTER);
1777
1778
p_QmPortal->p_QmPortalDriverParams->pullMode = pullMode;
1779
1780
return E_OK;
1781
}
1782
1783
t_Error QM_PORTAL_AddPoolChannel(t_Handle h_QmPortal, uint8_t poolChannelId)
1784
{
1785
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1786
uint32_t sdqcrFlags;
1787
1788
SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1789
SANITY_CHECK_RETURN_ERROR((poolChannelId < QM_MAX_NUM_OF_POOL_CHANNELS), E_INVALID_VALUE);
1790
1791
sdqcrFlags = qm_dqrr_sdqcr_get(p_QmPortal->p_LowQmPortal);
1792
sdqcrFlags |= QM_SDQCR_CHANNELS_POOL(poolChannelId+1);
1793
qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, sdqcrFlags);
1794
1795
return E_OK;
1796
}
1797
1798
t_Error QM_PORTAL_Poll(t_Handle h_QmPortal, e_QmPortalPollSource source)
1799
{
1800
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1801
1802
SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1803
1804
NCSW_PLOCK(p_QmPortal);
1805
1806
if ((source == e_QM_PORTAL_POLL_SOURCE_CONTROL_FRAMES) ||
1807
(source == e_QM_PORTAL_POLL_SOURCE_BOTH))
1808
{
1809
uint32_t is = qm_isr_status_read(p_QmPortal->p_LowQmPortal);
1810
uint32_t active = LoopMessageRing(p_QmPortal, is);
1811
if (active)
1812
qm_isr_status_clear(p_QmPortal->p_LowQmPortal, active);
1813
}
1814
if ((source == e_QM_PORTAL_POLL_SOURCE_DATA_FRAMES) ||
1815
(source == e_QM_PORTAL_POLL_SOURCE_BOTH))
1816
p_QmPortal->f_LoopDequeueRingCB((t_Handle)p_QmPortal);
1817
1818
PUNLOCK(p_QmPortal);
1819
1820
return E_OK;
1821
}
1822
1823
t_Error QM_PORTAL_PollFrame(t_Handle h_QmPortal, t_QmPortalFrameInfo *p_frameInfo)
1824
{
1825
t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1826
struct qm_dqrr_entry *p_Dq;
1827
struct qman_fq *p_Fq;
1828
int prefetch;
1829
1830
SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1831
SANITY_CHECK_RETURN_ERROR(p_frameInfo, E_NULL_POINTER);
1832
1833
NCSW_PLOCK(p_QmPortal);
1834
1835
prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH);
1836
if (prefetch)
1837
qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
1838
qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
1839
p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
1840
if (!p_Dq)
1841
{
1842
PUNLOCK(p_QmPortal);
1843
return ERROR_CODE(E_EMPTY);
1844
}
1845
p_Fq = ptr_from_aligned_int(p_Dq->contextB);
1846
ASSERT_COND(p_Dq->fqid);
1847
if (p_Fq)
1848
{
1849
p_frameInfo->h_App = p_Fq->h_App;
1850
p_frameInfo->h_QmFqr = p_Fq->h_QmFqr;
1851
p_frameInfo->fqidOffset = p_Fq->fqidOffset;
1852
memcpy((void*)&p_frameInfo->frame, (void*)&p_Dq->fd, sizeof(t_DpaaFD));
1853
}
1854
else
1855
{
1856
p_frameInfo->h_App = p_QmPortal->h_App;
1857
p_frameInfo->h_QmFqr = NULL;
1858
p_frameInfo->fqidOffset = p_Dq->fqid;
1859
memcpy((void*)&p_frameInfo->frame, (void*)&p_Dq->fd, sizeof(t_DpaaFD));
1860
}
1861
if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) {
1862
qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
1863
p_Dq,
1864
false);
1865
qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1866
} else {
1867
qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1868
qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1869
}
1870
1871
PUNLOCK(p_QmPortal);
1872
1873
return E_OK;
1874
}
1875
1876
1877
t_Handle QM_FQR_Create(t_QmFqrParams *p_QmFqrParams)
1878
{
1879
t_QmFqr *p_QmFqr;
1880
uint32_t i, flags = 0;
1881
u_QmFqdContextA cnxtA;
1882
1883
SANITY_CHECK_RETURN_VALUE(p_QmFqrParams, E_INVALID_HANDLE, NULL);
1884
SANITY_CHECK_RETURN_VALUE(p_QmFqrParams->h_Qm, E_INVALID_HANDLE, NULL);
1885
1886
if (p_QmFqrParams->shadowMode &&
1887
(!p_QmFqrParams->useForce || p_QmFqrParams->numOfFqids != 1))
1888
{
1889
REPORT_ERROR(MAJOR, E_CONFLICT, ("shadowMode must be use with useForce and numOfFqids==1!!!"));
1890
return NULL;
1891
}
1892
1893
p_QmFqr = (t_QmFqr *)XX_MallocSmart(sizeof(t_QmFqr), 0, 64);
1894
if (!p_QmFqr)
1895
{
1896
REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM FQR obj!!!"));
1897
return NULL;
1898
}
1899
memset(p_QmFqr, 0, sizeof(t_QmFqr));
1900
1901
p_QmFqr->h_Qm = p_QmFqrParams->h_Qm;
1902
p_QmFqr->h_QmPortal = p_QmFqrParams->h_QmPortal;
1903
p_QmFqr->shadowMode = p_QmFqrParams->shadowMode;
1904
p_QmFqr->numOfFqids = (p_QmFqrParams->useForce && !p_QmFqrParams->numOfFqids) ?
1905
1 : p_QmFqrParams->numOfFqids;
1906
1907
if (!p_QmFqr->h_QmPortal)
1908
{
1909
p_QmFqr->h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
1910
SANITY_CHECK_RETURN_VALUE(p_QmFqr->h_QmPortal, E_INVALID_HANDLE, NULL);
1911
}
1912
1913
p_QmFqr->p_Fqs = (struct qman_fq **)XX_Malloc(sizeof(struct qman_fq *) * p_QmFqr->numOfFqids);
1914
if (!p_QmFqr->p_Fqs)
1915
{
1916
REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM FQs obj!!!"));
1917
QM_FQR_Free(p_QmFqr);
1918
return NULL;
1919
}
1920
memset(p_QmFqr->p_Fqs, 0, sizeof(struct qman_fq *) * p_QmFqr->numOfFqids);
1921
1922
if (p_QmFqr->shadowMode)
1923
{
1924
struct qman_fq *p_Fq = NULL;
1925
1926
p_QmFqr->fqidBase = p_QmFqrParams->qs.frcQ.fqid;
1927
p_Fq = (struct qman_fq *)XX_MallocSmart(sizeof(struct qman_fq), 0, 64);
1928
if (!p_Fq)
1929
{
1930
REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!"));
1931
QM_FQR_Free(p_QmFqr);
1932
return NULL;
1933
}
1934
memset(p_Fq, 0, sizeof(struct qman_fq));
1935
p_Fq->cb.dqrr = ((t_QmPortal*)p_QmFqr->h_QmPortal)->f_DfltFrame;
1936
p_Fq->cb.ern = ((t_QmPortal*)p_QmFqr->h_QmPortal)->f_RejectedFrame;
1937
p_Fq->cb.dc_ern = cb_ern_dcErn;
1938
p_Fq->cb.fqs = cb_fqs;
1939
p_Fq->h_App = ((t_QmPortal*)p_QmFqr->h_QmPortal)->h_App;
1940
p_Fq->h_QmFqr = p_QmFqr;
1941
p_Fq->state = qman_fq_state_sched;
1942
p_Fq->fqid = p_QmFqr->fqidBase;
1943
p_QmFqr->p_Fqs[0] = p_Fq;
1944
}
1945
else
1946
{
1947
p_QmFqr->channel = p_QmFqrParams->channel;
1948
p_QmFqr->workQueue = p_QmFqrParams->wq;
1949
1950
p_QmFqr->fqidBase = QmFqidGet(p_QmFqr->h_Qm,
1951
p_QmFqr->numOfFqids,
1952
p_QmFqrParams->qs.nonFrcQs.align,
1953
p_QmFqrParams->useForce,
1954
p_QmFqrParams->qs.frcQ.fqid);
1955
if (p_QmFqr->fqidBase == (uint32_t)ILLEGAL_BASE)
1956
{
1957
REPORT_ERROR(CRITICAL,E_INVALID_STATE,("can't allocate a fqid"));
1958
QM_FQR_Free(p_QmFqr);
1959
return NULL;
1960
}
1961
1962
if(p_QmFqrParams->congestionAvoidanceEnable &&
1963
(p_QmFqrParams->congestionAvoidanceParams.h_QmCg == NULL) &&
1964
(p_QmFqrParams->congestionAvoidanceParams.fqTailDropThreshold == 0))
1965
{
1966
REPORT_ERROR(CRITICAL,E_INVALID_STATE,("NULL congestion group handle and no FQ Threshold"));
1967
QM_FQR_Free(p_QmFqr);
1968
return NULL;
1969
}
1970
if(p_QmFqrParams->congestionAvoidanceEnable)
1971
{
1972
if(p_QmFqrParams->congestionAvoidanceParams.h_QmCg)
1973
flags |= QM_FQCTRL_CGE;
1974
if(p_QmFqrParams->congestionAvoidanceParams.fqTailDropThreshold)
1975
flags |= QM_FQCTRL_TDE;
1976
}
1977
1978
/*
1979
flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_ORP : 0;
1980
flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_CPCSTASH : 0;
1981
flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_FORCESFDR : 0;
1982
flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_AVOIDBLOCK : 0;
1983
*/
1984
flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_HOLDACTIVE : 0;
1985
flags |= (p_QmFqrParams->preferInCache) ? QM_FQCTRL_LOCKINCACHE : 0;
1986
1987
if (p_QmFqrParams->useContextAForStash)
1988
{
1989
if (CheckStashParams(p_QmFqrParams) != E_OK)
1990
{
1991
REPORT_ERROR(CRITICAL,E_INVALID_STATE,NO_MSG);
1992
QM_FQR_Free(p_QmFqr);
1993
return NULL;
1994
}
1995
1996
memset(&cnxtA, 0, sizeof(cnxtA));
1997
cnxtA.stashing.annotation_cl = DIV_CEIL(p_QmFqrParams->stashingParams.frameAnnotationSize, CACHELINE_SIZE);
1998
cnxtA.stashing.data_cl = DIV_CEIL(p_QmFqrParams->stashingParams.frameDataSize, CACHELINE_SIZE);
1999
cnxtA.stashing.context_cl = DIV_CEIL(p_QmFqrParams->stashingParams.fqContextSize, CACHELINE_SIZE);
2000
cnxtA.context_hi = (uint8_t)((p_QmFqrParams->stashingParams.fqContextAddr >> 32) & 0xff);
2001
cnxtA.context_lo = (uint32_t)(p_QmFqrParams->stashingParams.fqContextAddr);
2002
flags |= QM_FQCTRL_CTXASTASHING;
2003
}
2004
2005
for(i=0;i<p_QmFqr->numOfFqids;i++)
2006
if (qm_new_fq(p_QmFqr->h_QmPortal,
2007
p_QmFqr->fqidBase+i,
2008
i,
2009
p_QmFqr->channel,
2010
p_QmFqr->workQueue,
2011
1/*p_QmFqr->numOfFqids*/,
2012
flags,
2013
(p_QmFqrParams->congestionAvoidanceEnable ?
2014
&p_QmFqrParams->congestionAvoidanceParams : NULL),
2015
p_QmFqrParams->useContextAForStash ?
2016
(t_QmContextA *)&cnxtA : p_QmFqrParams->p_ContextA,
2017
p_QmFqrParams->p_ContextB,
2018
p_QmFqrParams->initParked,
2019
p_QmFqr,
2020
&p_QmFqr->p_Fqs[i]) != E_OK)
2021
{
2022
QM_FQR_Free(p_QmFqr);
2023
return NULL;
2024
}
2025
}
2026
return p_QmFqr;
2027
}
2028
2029
t_Error QM_FQR_Free(t_Handle h_QmFqr)
2030
{
2031
t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2032
uint32_t i;
2033
2034
if (!p_QmFqr)
2035
return ERROR_CODE(E_INVALID_HANDLE);
2036
2037
if (p_QmFqr->p_Fqs)
2038
{
2039
for (i=0;i<p_QmFqr->numOfFqids;i++)
2040
if (p_QmFqr->p_Fqs[i])
2041
{
2042
if (!p_QmFqr->shadowMode)
2043
qm_free_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i]);
2044
XX_FreeSmart(p_QmFqr->p_Fqs[i]);
2045
}
2046
XX_Free(p_QmFqr->p_Fqs);
2047
}
2048
2049
if (!p_QmFqr->shadowMode && p_QmFqr->fqidBase)
2050
QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase);
2051
2052
XX_FreeSmart(p_QmFqr);
2053
2054
return E_OK;
2055
}
2056
2057
t_Error QM_FQR_FreeWDrain(t_Handle h_QmFqr,
2058
t_QmFqrDrainedCompletionCB *f_CompletionCB,
2059
bool deliverFrame,
2060
t_QmReceivedFrameCallback *f_CallBack,
2061
t_Handle h_App)
2062
{
2063
t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2064
uint32_t i;
2065
2066
if (!p_QmFqr)
2067
return ERROR_CODE(E_INVALID_HANDLE);
2068
2069
if (p_QmFqr->shadowMode)
2070
RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("QM_FQR_FreeWDrain can't be called to shadow FQR!!!. call QM_FQR_Free"));
2071
2072
p_QmFqr->p_DrainedFqs = (bool *)XX_Malloc(sizeof(bool) * p_QmFqr->numOfFqids);
2073
if (!p_QmFqr->p_DrainedFqs)
2074
RETURN_ERROR(MAJOR, E_NO_MEMORY, ("QM Drained-FQs obj!!!. Try to Free without draining"));
2075
memset(p_QmFqr->p_DrainedFqs, 0, sizeof(bool) * p_QmFqr->numOfFqids);
2076
2077
if (f_CompletionCB)
2078
{
2079
p_QmFqr->f_CompletionCB = f_CompletionCB;
2080
p_QmFqr->h_App = h_App;
2081
}
2082
2083
if (deliverFrame)
2084
{
2085
if (!f_CallBack)
2086
{
2087
REPORT_ERROR(MAJOR, E_NULL_POINTER, ("f_CallBack must be given."));
2088
XX_Free(p_QmFqr->p_DrainedFqs);
2089
return ERROR_CODE(E_NULL_POINTER);
2090
}
2091
QM_FQR_RegisterCB(p_QmFqr, f_CallBack, h_App);
2092
}
2093
else
2094
QM_FQR_RegisterCB(p_QmFqr, drainCB, h_App);
2095
2096
for (i=0;i<p_QmFqr->numOfFqids;i++)
2097
{
2098
if (qman_retire_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i], 0, true) != E_OK)
2099
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed!"));
2100
2101
if (p_QmFqr->p_Fqs[i]->flags & QMAN_FQ_STATE_CHANGING)
2102
DBG(INFO, ("fq %d currently in use, will be retired", p_QmFqr->p_Fqs[i]->fqid));
2103
else
2104
drainRetiredFq(p_QmFqr->p_Fqs[i]);
2105
}
2106
2107
if (!p_QmFqr->f_CompletionCB)
2108
{
2109
while(p_QmFqr->p_DrainedFqs) ;
2110
DBG(TRACE, ("QM-FQR with base %d completed", p_QmFqr->fqidBase));
2111
XX_FreeSmart(p_QmFqr->p_Fqs);
2112
if (p_QmFqr->fqidBase)
2113
QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase);
2114
XX_FreeSmart(p_QmFqr);
2115
}
2116
2117
return E_OK;
2118
}
2119
2120
t_Error QM_FQR_RegisterCB(t_Handle h_QmFqr, t_QmReceivedFrameCallback *f_CallBack, t_Handle h_App)
2121
{
2122
t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2123
int i;
2124
2125
SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2126
2127
for (i=0;i<p_QmFqr->numOfFqids;i++)
2128
{
2129
p_QmFqr->p_Fqs[i]->cb.dqrr = f_CallBack;
2130
p_QmFqr->p_Fqs[i]->h_App = h_App;
2131
}
2132
2133
return E_OK;
2134
}
2135
2136
t_Error QM_FQR_Enqueue(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame)
2137
{
2138
t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2139
t_QmPortal *p_QmPortal;
2140
struct qm_eqcr_entry *p_Eq;
2141
uint32_t *p_Dst, *p_Src;
2142
const struct qman_fq *p_Fq;
2143
2144
SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2145
SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2146
2147
if (!h_QmPortal)
2148
{
2149
SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE);
2150
h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2151
SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE);
2152
}
2153
p_QmPortal = (t_QmPortal *)h_QmPortal;
2154
2155
p_Fq = p_QmFqr->p_Fqs[fqidOffset];
2156
2157
#ifdef QM_CHECKING
2158
if (p_Fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE)
2159
RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
2160
if ((!(p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)) &&
2161
((p_Fq->state == qman_fq_state_retired) ||
2162
(p_Fq->state == qman_fq_state_oos)))
2163
return ERROR_CODE(E_BUSY);
2164
#endif /* QM_CHECKING */
2165
2166
NCSW_PLOCK(p_QmPortal);
2167
p_Eq = try_eq_start(p_QmPortal);
2168
if (!p_Eq)
2169
{
2170
PUNLOCK(p_QmPortal);
2171
return ERROR_CODE(E_BUSY);
2172
}
2173
2174
p_Eq->fqid = p_Fq->fqid;
2175
p_Eq->tag = aligned_int_from_ptr(p_Fq);
2176
/* gcc does a dreadful job of the following;
2177
* eq->fd = *fd;
2178
* It causes the entire function to save/restore a wider range of
2179
* registers, and comes up with instruction-waste galore. This will do
2180
* until we can rework the function for better code-generation. */
2181
p_Dst = (uint32_t *)&p_Eq->fd;
2182
p_Src = (uint32_t *)p_Frame;
2183
p_Dst[0] = p_Src[0];
2184
p_Dst[1] = p_Src[1];
2185
p_Dst[2] = p_Src[2];
2186
p_Dst[3] = p_Src[3];
2187
2188
qmPortalEqcrPvbCommit(p_QmPortal->p_LowQmPortal,
2189
(uint8_t)(QM_EQCR_VERB_CMD_ENQUEUE/* |
2190
(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))*/));
2191
PUNLOCK(p_QmPortal);
2192
2193
return E_OK;
2194
}
2195
2196
2197
t_Error QM_FQR_PullFrame(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame)
2198
{
2199
t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2200
uint32_t pdqcr = 0;
2201
2202
SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2203
SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2204
SANITY_CHECK_RETURN_ERROR(p_Frame, E_NULL_POINTER);
2205
SANITY_CHECK_RETURN_ERROR((p_QmFqr->p_Fqs[fqidOffset]->state == qman_fq_state_oos) ||
2206
(p_QmFqr->p_Fqs[fqidOffset]->state == qman_fq_state_parked),
2207
E_INVALID_STATE);
2208
if (!h_QmPortal)
2209
{
2210
SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE);
2211
h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2212
SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE);
2213
}
2214
2215
pdqcr |= QM_PDQCR_MODE_UNSCHEDULED;
2216
pdqcr |= QM_PDQCR_FQID(p_QmFqr->p_Fqs[fqidOffset]->fqid);
2217
return QmPortalPullFrame(h_QmPortal, pdqcr, p_Frame);
2218
}
2219
2220
t_Error QM_FQR_Resume(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset)
2221
{
2222
t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2223
2224
SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2225
SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2226
2227
if (!h_QmPortal)
2228
{
2229
SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE);
2230
h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2231
SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE);
2232
}
2233
return qman_schedule_fq(h_QmPortal, p_QmFqr->p_Fqs[fqidOffset]);
2234
}
2235
2236
t_Error QM_FQR_Suspend(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset)
2237
{
2238
t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2239
2240
SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2241
SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2242
SANITY_CHECK_RETURN_ERROR((p_QmFqr->p_Fqs[fqidOffset]->flags & QM_FQCTRL_HOLDACTIVE), E_INVALID_STATE);
2243
2244
UNUSED(h_QmPortal);
2245
p_QmFqr->p_Fqs[fqidOffset]->state = qman_fq_state_waiting_parked;
2246
2247
return E_OK;
2248
}
2249
2250
uint32_t QM_FQR_GetFqid(t_Handle h_QmFqr)
2251
{
2252
t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2253
2254
SANITY_CHECK_RETURN_VALUE(p_QmFqr, E_INVALID_HANDLE, 0);
2255
2256
return p_QmFqr->fqidBase;
2257
}
2258
2259
uint32_t QM_FQR_GetCounter(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, e_QmFqrCounters counter)
2260
{
2261
t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2262
struct qm_mcr_queryfq_np queryfq_np;
2263
2264
SANITY_CHECK_RETURN_VALUE(p_QmFqr, E_INVALID_HANDLE, 0);
2265
SANITY_CHECK_RETURN_VALUE((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE, 0);
2266
2267
if (!h_QmPortal)
2268
{
2269
SANITY_CHECK_RETURN_VALUE(p_QmFqr->h_Qm, E_INVALID_HANDLE, 0);
2270
h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2271
SANITY_CHECK_RETURN_VALUE(h_QmPortal, E_INVALID_HANDLE, 0);
2272
}
2273
if (qman_query_fq_np(h_QmPortal, p_QmFqr->p_Fqs[fqidOffset], &queryfq_np) != E_OK)
2274
return 0;
2275
switch (counter)
2276
{
2277
case e_QM_FQR_COUNTERS_FRAME :
2278
return queryfq_np.frm_cnt;
2279
case e_QM_FQR_COUNTERS_BYTE :
2280
return queryfq_np.byte_cnt;
2281
default :
2282
break;
2283
}
2284
/* should never get here */
2285
ASSERT_COND(FALSE);
2286
2287
return 0;
2288
}
2289
2290
2291
t_Handle QM_CG_Create(t_QmCgParams *p_CgParams)
2292
{
2293
t_QmCg *p_QmCg;
2294
t_QmPortal *p_QmPortal;
2295
t_Error err;
2296
uint32_t wredParams;
2297
uint32_t tmpA, tmpN, ta=0, tn=0;
2298
int gap, tmp;
2299
struct qm_mc_command *p_Mcc;
2300
struct qm_mc_result *p_Mcr;
2301
2302
SANITY_CHECK_RETURN_VALUE(p_CgParams, E_INVALID_HANDLE, NULL);
2303
SANITY_CHECK_RETURN_VALUE(p_CgParams->h_Qm, E_INVALID_HANDLE, NULL);
2304
2305
if(p_CgParams->notifyDcPortal &&
2306
((p_CgParams->dcPortalId == e_DPAA_DCPORTAL2) || (p_CgParams->dcPortalId == e_DPAA_DCPORTAL3)))
2307
{
2308
REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("notifyDcPortal is invalid for this DC Portal"));
2309
return NULL;
2310
}
2311
2312
if (!p_CgParams->h_QmPortal)
2313
{
2314
p_QmPortal = QmGetPortalHandle(p_CgParams->h_Qm);
2315
SANITY_CHECK_RETURN_VALUE(p_QmPortal, E_INVALID_STATE, NULL);
2316
}
2317
else
2318
p_QmPortal = p_CgParams->h_QmPortal;
2319
2320
p_QmCg = (t_QmCg *)XX_Malloc(sizeof(t_QmCg));
2321
if (!p_QmCg)
2322
{
2323
REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM CG obj!!!"));
2324
return NULL;
2325
}
2326
memset(p_QmCg, 0, sizeof(t_QmCg));
2327
2328
/* build CG struct */
2329
p_QmCg->h_Qm = p_CgParams->h_Qm;
2330
p_QmCg->h_QmPortal = p_QmPortal;
2331
p_QmCg->h_App = p_CgParams->h_App;
2332
err = QmGetCgId(p_CgParams->h_Qm, &p_QmCg->id);
2333
if (err)
2334
{
2335
XX_Free(p_QmCg);
2336
REPORT_ERROR(MAJOR, E_INVALID_STATE, ("QmGetCgId failed"));
2337
return NULL;
2338
}
2339
2340
NCSW_PLOCK(p_QmPortal);
2341
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2342
p_Mcc->initcgr.cgid = p_QmCg->id;
2343
2344
err = QmPortalRegisterCg(p_QmPortal, p_QmCg, p_QmCg->id);
2345
if (err)
2346
{
2347
XX_Free(p_QmCg);
2348
PUNLOCK(p_QmPortal);
2349
REPORT_ERROR(MAJOR, E_INVALID_STATE, ("QmPortalRegisterCg failed"));
2350
return NULL;
2351
}
2352
2353
/* Build CGR command */
2354
{
2355
#ifdef QM_CGS_NO_FRAME_MODE
2356
t_QmRevisionInfo revInfo;
2357
2358
QmGetRevision(p_QmCg->h_Qm, &revInfo);
2359
2360
if (!((revInfo.majorRev == 1) && (revInfo.minorRev == 0)))
2361
#endif /* QM_CGS_NO_FRAME_MODE */
2362
if (p_CgParams->frameCount)
2363
{
2364
p_Mcc->initcgr.we_mask |= QM_CGR_WE_MODE;
2365
p_Mcc->initcgr.cgr.frame_mode = QM_CGR_EN;
2366
}
2367
}
2368
2369
if (p_CgParams->wredEnable)
2370
{
2371
if (p_CgParams->wredParams.enableGreen)
2372
{
2373
err = CalcWredCurve(&p_CgParams->wredParams.greenCurve, &wredParams);
2374
if(err)
2375
{
2376
XX_Free(p_QmCg);
2377
PUNLOCK(p_QmPortal);
2378
REPORT_ERROR(MAJOR, err, NO_MSG);
2379
return NULL;
2380
}
2381
p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_G | QM_CGR_WE_WR_PARM_G;
2382
p_Mcc->initcgr.cgr.wr_en_g = QM_CGR_EN;
2383
p_Mcc->initcgr.cgr.wr_parm_g.word = wredParams;
2384
}
2385
if (p_CgParams->wredParams.enableYellow)
2386
{
2387
err = CalcWredCurve(&p_CgParams->wredParams.yellowCurve, &wredParams);
2388
if(err)
2389
{
2390
XX_Free(p_QmCg);
2391
PUNLOCK(p_QmPortal);
2392
REPORT_ERROR(MAJOR, err, NO_MSG);
2393
return NULL;
2394
}
2395
p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_Y | QM_CGR_WE_WR_PARM_Y;
2396
p_Mcc->initcgr.cgr.wr_en_y = QM_CGR_EN;
2397
p_Mcc->initcgr.cgr.wr_parm_y.word = wredParams;
2398
}
2399
if (p_CgParams->wredParams.enableRed)
2400
{
2401
err = CalcWredCurve(&p_CgParams->wredParams.redCurve, &wredParams);
2402
if(err)
2403
{
2404
XX_Free(p_QmCg);
2405
PUNLOCK(p_QmPortal);
2406
REPORT_ERROR(MAJOR, err, NO_MSG);
2407
return NULL;
2408
}
2409
p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_R | QM_CGR_WE_WR_PARM_R;
2410
p_Mcc->initcgr.cgr.wr_en_r = QM_CGR_EN;
2411
p_Mcc->initcgr.cgr.wr_parm_r.word = wredParams;
2412
}
2413
}
2414
2415
if (p_CgParams->tailDropEnable)
2416
{
2417
if (!p_CgParams->threshold)
2418
{
2419
XX_Free(p_QmCg);
2420
PUNLOCK(p_QmPortal);
2421
REPORT_ERROR(MINOR, E_INVALID_STATE, ("tailDropThreshold must be configured if tailDropEnable "));
2422
return NULL;
2423
}
2424
p_Mcc->initcgr.cgr.cstd_en = QM_CGR_EN;
2425
p_Mcc->initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
2426
}
2427
2428
if (p_CgParams->threshold)
2429
{
2430
p_Mcc->initcgr.we_mask |= QM_CGR_WE_CS_THRES;
2431
p_QmCg->f_Exception = p_CgParams->f_Exception;
2432
if (p_QmCg->f_Exception || p_CgParams->notifyDcPortal)
2433
{
2434
p_Mcc->initcgr.cgr.cscn_en = QM_CGR_EN;
2435
p_Mcc->initcgr.we_mask |= QM_CGR_WE_CSCN_EN | QM_CGR_WE_CSCN_TARG;
2436
/* if SW - set target, if HW - if FM, set HW target, otherwize, set SW target */
2437
p_Mcc->initcgr.cgr.cscn_targ = 0;
2438
if (p_QmCg->f_Exception)
2439
p_Mcc->initcgr.cgr.cscn_targ = (uint32_t)QM_CGR_TARGET_SWP(QmPortalGetSwPortalId(p_QmCg->h_QmPortal));
2440
if (p_CgParams->notifyDcPortal)
2441
p_Mcc->initcgr.cgr.cscn_targ |= (uint32_t)QM_CGR_TARGET_DCP(p_CgParams->dcPortalId);
2442
}
2443
2444
/* express thresh as ta*2^tn */
2445
gap = (int)p_CgParams->threshold;
2446
for (tmpA=0 ; tmpA<256; tmpA++ )
2447
for (tmpN=0 ; tmpN<32; tmpN++ )
2448
{
2449
tmp = ABS((int)(p_CgParams->threshold - tmpA*(1<<tmpN)));
2450
if (tmp < gap)
2451
{
2452
ta = tmpA;
2453
tn = tmpN;
2454
gap = tmp;
2455
}
2456
}
2457
p_Mcc->initcgr.cgr.cs_thres.TA = ta;
2458
p_Mcc->initcgr.cgr.cs_thres.Tn = tn;
2459
}
2460
else if(p_CgParams->f_Exception)
2461
{
2462
XX_Free(p_QmCg);
2463
PUNLOCK(p_QmPortal);
2464
REPORT_ERROR(MINOR, E_INVALID_STATE, ("No threshold configured, but f_Exception defined"));
2465
return NULL;
2466
}
2467
2468
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_INITCGR);
2469
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2470
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_INITCGR);
2471
if (p_Mcr->result != QM_MCR_RESULT_OK)
2472
{
2473
XX_Free(p_QmCg);
2474
PUNLOCK(p_QmPortal);
2475
REPORT_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2476
return NULL;
2477
}
2478
PUNLOCK(p_QmPortal);
2479
2480
return p_QmCg;
2481
}
2482
2483
t_Error QM_CG_Free(t_Handle h_QmCg)
2484
{
2485
2486
t_QmCg *p_QmCg = (t_QmCg *)h_QmCg;
2487
t_Error err;
2488
struct qm_mc_command *p_Mcc;
2489
struct qm_mc_result *p_Mcr;
2490
t_QmPortal *p_QmPortal;
2491
2492
SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2493
2494
p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2495
2496
NCSW_PLOCK(p_QmPortal);
2497
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2498
p_Mcc->initcgr.cgid = p_QmCg->id;
2499
p_Mcc->initcgr.we_mask = QM_CGR_WE_MASK;
2500
2501
err = QmFreeCgId(p_QmCg->h_Qm, p_QmCg->id);
2502
if(err)
2503
{
2504
XX_Free(p_QmCg);
2505
PUNLOCK(p_QmPortal);
2506
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QmFreeCgId failed"));
2507
}
2508
2509
err = QmPortalUnregisterCg(p_QmCg->h_QmPortal, p_QmCg->id);
2510
if(err)
2511
{
2512
XX_Free(p_QmCg);
2513
PUNLOCK(p_QmPortal);
2514
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QmPortalUnregisterCg failed"));
2515
}
2516
2517
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2518
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2519
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2520
if (p_Mcr->result != QM_MCR_RESULT_OK)
2521
{
2522
PUNLOCK(p_QmPortal);
2523
RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2524
}
2525
PUNLOCK(p_QmPortal);
2526
2527
XX_Free(p_QmCg);
2528
2529
return E_OK;
2530
}
2531
2532
t_Error QM_CG_SetException(t_Handle h_QmCg, e_QmExceptions exception, bool enable)
2533
{
2534
t_QmCg *p_QmCg = (t_QmCg *)h_QmCg;
2535
struct qm_mc_command *p_Mcc;
2536
struct qm_mc_result *p_Mcr;
2537
t_QmPortal *p_QmPortal;
2538
2539
SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2540
2541
p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2542
if (!p_QmCg->f_Exception)
2543
RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Either threshold or exception callback was not configured."));
2544
2545
NCSW_PLOCK(p_QmPortal);
2546
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2547
p_Mcc->initcgr.cgid = p_QmCg->id;
2548
p_Mcc->initcgr.we_mask = QM_CGR_WE_CSCN_EN;
2549
2550
if(exception == e_QM_EX_CG_STATE_CHANGE)
2551
{
2552
if(enable)
2553
p_Mcc->initcgr.cgr.cscn_en = QM_CGR_EN;
2554
}
2555
else
2556
{
2557
PUNLOCK(p_QmPortal);
2558
RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal exception"));
2559
}
2560
2561
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2562
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2563
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2564
if (p_Mcr->result != QM_MCR_RESULT_OK)
2565
{
2566
PUNLOCK(p_QmPortal);
2567
RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2568
}
2569
PUNLOCK(p_QmPortal);
2570
2571
return E_OK;
2572
}
2573
2574
t_Error QM_CG_ModifyWredCurve(t_Handle h_QmCg, t_QmCgModifyWredParams *p_QmCgModifyParams)
2575
{
2576
t_QmCg *p_QmCg = (t_QmCg *)h_QmCg;
2577
uint32_t wredParams;
2578
struct qm_mc_command *p_Mcc;
2579
struct qm_mc_result *p_Mcr;
2580
t_QmPortal *p_QmPortal;
2581
t_Error err = E_OK;
2582
2583
SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2584
2585
p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2586
2587
NCSW_PLOCK(p_QmPortal);
2588
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2589
p_Mcc->initcgr.cgid = p_QmCg->id;
2590
2591
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCGR);
2592
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2593
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2594
if (p_Mcr->result != QM_MCR_RESULT_OK)
2595
{
2596
PUNLOCK(p_QmPortal);
2597
RETURN_ERROR(MINOR, E_INVALID_STATE, ("QM_MCC_VERB_QUERYCGR failed: %s", mcr_result_str(p_Mcr->result)));
2598
}
2599
2600
switch(p_QmCgModifyParams->color)
2601
{
2602
case(e_QM_CG_COLOR_GREEN):
2603
if(!p_Mcr->querycgr.cgr.wr_en_g)
2604
{
2605
PUNLOCK(p_QmPortal);
2606
RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for green"));
2607
}
2608
break;
2609
case(e_QM_CG_COLOR_YELLOW):
2610
if(!p_Mcr->querycgr.cgr.wr_en_y)
2611
{
2612
PUNLOCK(p_QmPortal);
2613
RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for yellow"));
2614
}
2615
break;
2616
case(e_QM_CG_COLOR_RED):
2617
if(!p_Mcr->querycgr.cgr.wr_en_r)
2618
{
2619
PUNLOCK(p_QmPortal);
2620
RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for red"));
2621
}
2622
break;
2623
}
2624
2625
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2626
p_Mcc->initcgr.cgid = p_QmCg->id;
2627
2628
switch(p_QmCgModifyParams->color)
2629
{
2630
case(e_QM_CG_COLOR_GREEN):
2631
err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams);
2632
p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_G | QM_CGR_WE_WR_PARM_G;
2633
p_Mcc->initcgr.cgr.wr_en_g = QM_CGR_EN;
2634
p_Mcc->initcgr.cgr.wr_parm_g.word = wredParams;
2635
break;
2636
case(e_QM_CG_COLOR_YELLOW):
2637
err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams);
2638
p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_Y | QM_CGR_WE_WR_PARM_Y;
2639
p_Mcc->initcgr.cgr.wr_en_y = QM_CGR_EN;
2640
p_Mcc->initcgr.cgr.wr_parm_y.word = wredParams;
2641
break;
2642
case(e_QM_CG_COLOR_RED):
2643
err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams);
2644
p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_R | QM_CGR_WE_WR_PARM_R;
2645
p_Mcc->initcgr.cgr.wr_en_r = QM_CGR_EN;
2646
p_Mcc->initcgr.cgr.wr_parm_r.word = wredParams;
2647
break;
2648
}
2649
if (err)
2650
{
2651
PUNLOCK(p_QmPortal);
2652
RETURN_ERROR(MINOR, err, NO_MSG);
2653
}
2654
2655
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2656
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2657
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2658
if (p_Mcr->result != QM_MCR_RESULT_OK)
2659
{
2660
PUNLOCK(p_QmPortal);
2661
RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2662
}
2663
PUNLOCK(p_QmPortal);
2664
2665
return E_OK;
2666
}
2667
2668
t_Error QM_CG_ModifyTailDropThreshold(t_Handle h_QmCg, uint32_t threshold)
2669
{
2670
t_QmCg *p_QmCg = (t_QmCg *)h_QmCg;
2671
struct qm_mc_command *p_Mcc;
2672
struct qm_mc_result *p_Mcr;
2673
t_QmPortal *p_QmPortal;
2674
uint32_t tmpA, tmpN, ta=0, tn=0;
2675
int gap, tmp;
2676
2677
SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2678
2679
p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2680
2681
NCSW_PLOCK(p_QmPortal);
2682
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2683
p_Mcc->initcgr.cgid = p_QmCg->id;
2684
2685
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCGR);
2686
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2687
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2688
if (p_Mcr->result != QM_MCR_RESULT_OK)
2689
{
2690
PUNLOCK(p_QmPortal);
2691
RETURN_ERROR(MINOR, E_INVALID_STATE, ("QM_MCC_VERB_QUERYCGR failed: %s", mcr_result_str(p_Mcr->result)));
2692
}
2693
2694
if(!p_Mcr->querycgr.cgr.cstd_en)
2695
{
2696
PUNLOCK(p_QmPortal);
2697
RETURN_ERROR(MINOR, E_INVALID_STATE, ("Tail Drop is not enabled!"));
2698
}
2699
2700
p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2701
p_Mcc->initcgr.cgid = p_QmCg->id;
2702
p_Mcc->initcgr.we_mask |= QM_CGR_WE_CS_THRES;
2703
2704
/* express thresh as ta*2^tn */
2705
gap = (int)threshold;
2706
for (tmpA=0 ; tmpA<256; tmpA++ )
2707
for (tmpN=0 ; tmpN<32; tmpN++ )
2708
{
2709
tmp = ABS((int)(threshold - tmpA*(1<<tmpN)));
2710
if (tmp < gap)
2711
{
2712
ta = tmpA;
2713
tn = tmpN;
2714
gap = tmp;
2715
}
2716
}
2717
p_Mcc->initcgr.cgr.cs_thres.TA = ta;
2718
p_Mcc->initcgr.cgr.cs_thres.Tn = tn;
2719
2720
qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2721
while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2722
ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2723
if (p_Mcr->result != QM_MCR_RESULT_OK)
2724
{
2725
PUNLOCK(p_QmPortal);
2726
RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2727
}
2728
PUNLOCK(p_QmPortal);
2729
2730
return E_OK;
2731
}
2732
2733
2734