Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/ncsw/Peripherals/BM/bm_portal.c
48375 views
1
/******************************************************************************
2
3
� 1995-2003, 2004, 2005-2011 Freescale Semiconductor, Inc.
4
All rights reserved.
5
6
This is proprietary source code of Freescale Semiconductor Inc.,
7
and its use is subject to the NetComm Device Drivers EULA.
8
The copyright notice above does not evidence any actual or intended
9
publication of such source code.
10
11
ALTERNATIVELY, redistribution and use in source and binary forms, with
12
or without modification, are permitted provided that the following
13
conditions are met:
14
* Redistributions of source code must retain the above copyright
15
notice, this list of conditions and the following disclaimer.
16
* Redistributions in binary form must reproduce the above copyright
17
notice, this list of conditions and the following disclaimer in the
18
documentation and/or other materials provided with the distribution.
19
* Neither the name of Freescale Semiconductor nor the
20
names of its contributors may be used to endorse or promote products
21
derived from this software without specific prior written permission.
22
23
THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26
DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
*
34
35
**************************************************************************/
36
/******************************************************************************
37
@File bm.c
38
39
@Description BM
40
*//***************************************************************************/
41
#include "error_ext.h"
42
#include "std_ext.h"
43
#include "string_ext.h"
44
#include "mem_ext.h"
45
#include "core_ext.h"
46
47
#include "bm.h"
48
49
50
#define __ERR_MODULE__ MODULE_BM
51
52
53
/****************************************/
54
/* static functions */
55
/****************************************/
56
57
static uint32_t __poll_portal_slow(t_BmPortal *p);
58
static void __poll_portal_fast(t_BmPortal *p);
59
60
/* Portal interrupt handler */
61
static void portal_isr(void *ptr)
62
{
63
t_BmPortal *portal = ptr;
64
/* Only do fast-path handling if it's required */
65
if (portal->flags & BMAN_PORTAL_FLAG_IRQ_FAST)
66
__poll_portal_fast(portal);
67
__poll_portal_slow(portal);
68
69
}
70
71
/**
72
* bman_create_portal - Manage a Bman s/w portal
73
* @portal: the s/w corenet portal to use
74
* @flags: bit-mask of BMAN_PORTAL_FLAG_*** options
75
* @pools: bit-array of buffer pools available to this portal
76
* @portal_ctx: opaque user-supplied data to be associated with the portal
77
*
78
* Creates a managed portal object. @irq is only used if @flags specifies
79
* BMAN_PORTAL_FLAG_IRQ. @pools is copied, so the caller can do as they please
80
* with it after the function returns. It will only be possible to configure
81
* buffer pool objects as "suppliers" if they are specified in @pools, and the
82
* driver will only track depletion state changes to the same subset of buffer
83
* pools. If @pools is NULL, buffer pool depletion state will not be tracked.
84
* If the BMAN_PORTAL_FLAG_RECOVER flag is specified, then the function will
85
* attempt to expire any existing RCR entries, otherwise the function will fail
86
* if RCR is non-empty. If the BMAN_PORTAL_FLAG_WAIT flag is set, the function
87
* is allowed to block waiting for expiration of RCR. BMAN_PORTAL_FLAG_WAIT_INT
88
* makes any blocking interruptible.
89
*/
90
91
static t_Error bman_create_portal(t_BmPortal *p_BmPortal,
92
uint32_t flags,
93
const struct bman_depletion *pools)
94
{
95
int ret = 0;
96
uint8_t bpid = 0;
97
e_BmPortalRcrConsumeMode rcr_cmode;
98
e_BmPortalProduceMode pmode;
99
100
pmode = e_BmPortalPVB;
101
rcr_cmode = (flags & BMAN_PORTAL_FLAG_CACHE) ? e_BmPortalRcrCCE : e_BmPortalRcrCCI;
102
103
switch (pmode)
104
{
105
case e_BmPortalPCI:
106
p_BmPortal->cbs[BM_RCR_RING].f_BmCommitCb = bm_rcr_pci_commit;
107
break;
108
case e_BmPortalPCE:
109
p_BmPortal->cbs[BM_RCR_RING].f_BmCommitCb = bm_rcr_pce_commit;
110
break;
111
case e_BmPortalPVB:
112
p_BmPortal->cbs[BM_RCR_RING].f_BmCommitCb = bm_rcr_pvb_commit;
113
break;
114
}
115
switch (rcr_cmode)
116
{
117
case e_BmPortalRcrCCI:
118
p_BmPortal->cbs[BM_RCR_RING].f_BmUpdateCb = bm_rcr_cci_update;
119
p_BmPortal->cbs[BM_RCR_RING].f_BmPrefetchCb = NULL;
120
break;
121
case e_BmPortalRcrCCE:
122
p_BmPortal->cbs[BM_RCR_RING].f_BmUpdateCb = bm_rcr_cce_update;
123
p_BmPortal->cbs[BM_RCR_RING].f_BmPrefetchCb = bm_rcr_cce_prefetch;
124
break;
125
}
126
127
if (bm_rcr_init(p_BmPortal->p_BmPortalLow, pmode, rcr_cmode)) {
128
REPORT_ERROR(MAJOR, E_INVALID_STATE, ("RCR initialization failed"));
129
goto fail_rcr;
130
}
131
if (bm_mc_init(p_BmPortal->p_BmPortalLow)) {
132
REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MC initialization failed"));
133
goto fail_mc;
134
}
135
p_BmPortal->pools[0] = *pools;
136
bman_depletion_init(&p_BmPortal->pools[1]);
137
while (bpid < BM_MAX_NUM_OF_POOLS) {
138
/* Default to all BPIDs disabled, we enable as required
139
* at run-time. */
140
bm_isr_bscn_mask(p_BmPortal->p_BmPortalLow, bpid, 0);
141
bpid++;
142
}
143
p_BmPortal->flags = flags;
144
p_BmPortal->slowpoll = 0;
145
p_BmPortal->rcrProd = p_BmPortal->rcrCons = 0;
146
memset(&p_BmPortal->depletionPoolsTable, 0, sizeof(p_BmPortal->depletionPoolsTable));
147
/* Write-to-clear any stale interrupt status bits */
148
bm_isr_disable_write(p_BmPortal->p_BmPortalLow, 0xffffffff);
149
bm_isr_status_clear(p_BmPortal->p_BmPortalLow, 0xffffffff);
150
bm_isr_enable_write(p_BmPortal->p_BmPortalLow, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
151
if (flags & BMAN_PORTAL_FLAG_IRQ)
152
{
153
XX_SetIntr(p_BmPortal->irq, portal_isr, p_BmPortal);
154
XX_EnableIntr(p_BmPortal->irq);
155
/* Enable the bits that make sense */
156
bm_isr_uninhibit(p_BmPortal->p_BmPortalLow);
157
} else
158
/* without IRQ, we can't block */
159
flags &= ~BMAN_PORTAL_FLAG_WAIT;
160
/* Need RCR to be empty before continuing */
161
bm_isr_disable_write(p_BmPortal->p_BmPortalLow, (uint32_t)~BM_PIRQ_RCRI);
162
if (!(flags & BMAN_PORTAL_FLAG_RECOVER) ||
163
!(flags & BMAN_PORTAL_FLAG_WAIT))
164
ret = bm_rcr_get_fill(p_BmPortal->p_BmPortalLow);
165
if (ret) {
166
REPORT_ERROR(MAJOR, E_INVALID_STATE, ("RCR unclean, need recovery"));
167
goto fail_rcr_empty;
168
}
169
bm_isr_disable_write(p_BmPortal->p_BmPortalLow, 0);
170
return E_OK;
171
fail_rcr_empty:
172
bm_mc_finish(p_BmPortal->p_BmPortalLow);
173
fail_mc:
174
bm_rcr_finish(p_BmPortal->p_BmPortalLow);
175
fail_rcr:
176
XX_Free(p_BmPortal);
177
return ERROR_CODE(E_INVALID_STATE);
178
}
179
180
static void bman_destroy_portal(t_BmPortal* p_BmPortal)
181
{
182
BmUpdate(p_BmPortal, BM_RCR_RING);
183
if (p_BmPortal->flags & BMAN_PORTAL_FLAG_IRQ)
184
{
185
XX_DisableIntr(p_BmPortal->irq);
186
XX_FreeIntr(p_BmPortal->irq);
187
}
188
bm_mc_finish(p_BmPortal->p_BmPortalLow);
189
bm_rcr_finish(p_BmPortal->p_BmPortalLow);
190
XX_Free(p_BmPortal->p_BmPortalLow);
191
}
192
193
/* When release logic waits on available RCR space, we need a global waitqueue
194
* in the case of "affine" use (as the waits wake on different cpus which means
195
* different portals - so we can't wait on any per-portal waitqueue). */
196
197
static uint32_t __poll_portal_slow(t_BmPortal* p_BmPortal)
198
{
199
struct bman_depletion tmp;
200
t_BmPool *p_BmPool;
201
uint32_t ret,is = bm_isr_status_read(p_BmPortal->p_BmPortalLow);
202
ret = is;
203
204
/* There is a gotcha to be aware of. If we do the query before clearing
205
* the status register, we may miss state changes that occur between the
206
* two. If we write to clear the status register before the query, the
207
* cache-enabled query command may overtake the status register write
208
* unless we use a heavyweight sync (which we don't want). Instead, we
209
* write-to-clear the status register then *read it back* before doing
210
* the query, hence the odd while loop with the 'is' accumulation. */
211
if (is & BM_PIRQ_BSCN) {
212
uint32_t i, j;
213
uint32_t __is;
214
bm_isr_status_clear(p_BmPortal->p_BmPortalLow, BM_PIRQ_BSCN);
215
while ((__is = bm_isr_status_read(p_BmPortal->p_BmPortalLow)) & BM_PIRQ_BSCN) {
216
is |= __is;
217
bm_isr_status_clear(p_BmPortal->p_BmPortalLow, BM_PIRQ_BSCN);
218
}
219
is &= ~BM_PIRQ_BSCN;
220
BmPortalQuery(p_BmPortal, &tmp, TRUE);
221
for (i = 0; i < 2; i++) {
222
uint32_t idx = i * 32;
223
/* tmp is a mask of currently-depleted pools.
224
* pools[0] is mask of those we care about.
225
* pools[1] is our previous view (we only want to
226
* be told about changes). */
227
tmp.__state[i] &= p_BmPortal->pools[0].__state[i];
228
if (tmp.__state[i] == p_BmPortal->pools[1].__state[i])
229
/* fast-path, nothing to see, move along */
230
continue;
231
for (j = 0; j <= 31; j++, idx++) {
232
int b4 = bman_depletion_get(&p_BmPortal->pools[1], (uint8_t)idx);
233
int af = bman_depletion_get(&tmp, (uint8_t)idx);
234
if (b4 == af)
235
continue;
236
p_BmPool = p_BmPortal->depletionPoolsTable[idx];
237
ASSERT_COND(p_BmPool->f_Depletion);
238
p_BmPool->f_Depletion(p_BmPool->h_App, (bool)af);
239
}
240
}
241
p_BmPortal->pools[1] = tmp;
242
}
243
244
if (is & BM_PIRQ_RCRI) {
245
NCSW_PLOCK(p_BmPortal);
246
p_BmPortal->rcrCons += BmUpdate(p_BmPortal, BM_RCR_RING);
247
bm_rcr_set_ithresh(p_BmPortal->p_BmPortalLow, 0);
248
PUNLOCK(p_BmPortal);
249
bm_isr_status_clear(p_BmPortal->p_BmPortalLow, BM_PIRQ_RCRI);
250
is &= ~BM_PIRQ_RCRI;
251
}
252
253
/* There should be no status register bits left undefined */
254
ASSERT_COND(!is);
255
return ret;
256
}
257
258
static void __poll_portal_fast(t_BmPortal* p_BmPortal)
259
{
260
UNUSED(p_BmPortal);
261
/* nothing yet, this is where we'll put optimised RCR consumption
262
* tracking */
263
}
264
265
266
static __inline__ void rel_set_thresh(t_BmPortal *p_BmPortal, int check)
267
{
268
if (!check || !bm_rcr_get_ithresh(p_BmPortal->p_BmPortalLow))
269
bm_rcr_set_ithresh(p_BmPortal->p_BmPortalLow, RCR_ITHRESH);
270
}
271
272
/* Used as a wait_event() expression. If it returns non-NULL, any lock will
273
* remain held. */
274
static struct bm_rcr_entry *try_rel_start(t_BmPortal *p_BmPortal)
275
{
276
struct bm_rcr_entry *r;
277
278
NCSW_PLOCK(p_BmPortal);
279
if (bm_rcr_get_avail((p_BmPortal)->p_BmPortalLow) < RCR_THRESH)
280
BmUpdate(p_BmPortal, BM_RCR_RING);
281
r = bm_rcr_start((p_BmPortal)->p_BmPortalLow);
282
if (!r) {
283
rel_set_thresh(p_BmPortal, 1);
284
PUNLOCK(p_BmPortal);
285
}
286
return r;
287
}
288
289
static __inline__ t_Error wait_rel_start(t_BmPortal *p_BmPortal,
290
struct bm_rcr_entry **rel,
291
uint32_t flags)
292
{
293
int tries = 100;
294
295
UNUSED(flags);
296
do {
297
*rel = try_rel_start(p_BmPortal);
298
XX_Sleep(1);
299
} while (!*rel && --tries);
300
301
if (!(*rel))
302
return ERROR_CODE(E_BUSY);
303
304
return E_OK;
305
}
306
307
/* This copies Qman's eqcr_completed() routine, see that for details */
308
static int rel_completed(t_BmPortal *p_BmPortal, uint32_t rcr_poll)
309
{
310
uint32_t tr_cons = p_BmPortal->rcrCons;
311
if (rcr_poll & 0xc0000000) {
312
rcr_poll &= 0x7fffffff;
313
tr_cons ^= 0x80000000;
314
}
315
if (tr_cons >= rcr_poll)
316
return 1;
317
if ((rcr_poll - tr_cons) > BM_RCR_SIZE)
318
return 1;
319
if (!bm_rcr_get_fill(p_BmPortal->p_BmPortalLow))
320
/* If RCR is empty, we must have completed */
321
return 1;
322
rel_set_thresh(p_BmPortal, 0);
323
return 0;
324
}
325
326
static __inline__ void rel_commit(t_BmPortal *p_BmPortal, uint32_t flags,uint8_t num)
327
{
328
uint32_t rcr_poll;
329
330
BmCommit(p_BmPortal, BM_RCR_RING, (uint8_t)(BM_RCR_VERB_CMD_BPID_SINGLE | (num & BM_RCR_VERB_BUFCOUNT_MASK)));
331
/* increment the producer count and capture it for SYNC */
332
rcr_poll = ++p_BmPortal->rcrProd;
333
if ((flags & BMAN_RELEASE_FLAG_WAIT_SYNC) ==
334
BMAN_RELEASE_FLAG_WAIT_SYNC)
335
rel_set_thresh(p_BmPortal, 1);
336
PUNLOCK(p_BmPortal);
337
if ((flags & BMAN_RELEASE_FLAG_WAIT_SYNC) !=
338
BMAN_RELEASE_FLAG_WAIT_SYNC)
339
return;
340
rel_completed(p_BmPortal, rcr_poll);
341
}
342
343
344
/****************************************/
345
/* Inter-Module functions */
346
/****************************************/
347
348
/**
349
* bman_release - Release buffer(s) to the buffer pool
350
* @p_BmPool: the buffer pool object to release to
351
* @bufs: an array of buffers to release
352
* @num: the number of buffers in @bufs (1-8)
353
* @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
354
*
355
* Adds the given buffers to RCR entries. If the portal @p_BmPortal was created with the
356
* "COMPACT" flag, then it will be using a compaction algorithm to improve
357
* utilization of RCR. As such, these buffers may join an existing ring entry
358
* and/or it may not be issued right away so as to allow future releases to join
359
* the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this
360
* behavior by committing the RCR entry (or entries) right away. If the RCR
361
* ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT
362
* is selected, in which case it will sleep waiting for space to become
363
* available in RCR. If the function receives a signal before such time (and
364
* BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise,
365
* it returns zero.
366
*/
367
368
t_Error BmPortalRelease(t_Handle h_BmPortal,
369
uint8_t bpid,
370
struct bm_buffer *bufs,
371
uint8_t num,
372
uint32_t flags)
373
{
374
t_BmPortal *p_BmPortal = (t_BmPortal *)h_BmPortal;
375
struct bm_rcr_entry *r;
376
uint8_t i;
377
378
SANITY_CHECK_RETURN_ERROR(p_BmPortal, E_INVALID_HANDLE);
379
/* TODO: I'm ignoring BMAN_PORTAL_FLAG_COMPACT for now. */
380
r = try_rel_start(p_BmPortal);
381
if (!r) {
382
if (flags & BMAN_RELEASE_FLAG_WAIT) {
383
t_Error ret = wait_rel_start(p_BmPortal, &r, flags);
384
if (ret)
385
return ret;
386
} else
387
return ERROR_CODE(E_BUSY);
388
ASSERT_COND(r != NULL);
389
}
390
r->bpid = bpid;
391
for (i = 0; i < num; i++) {
392
r->bufs[i].hi = bufs[i].hi;
393
r->bufs[i].lo = bufs[i].lo;
394
}
395
/* Issue the release command and wait for sync if requested. NB: the
396
* commit can't fail, only waiting can. Don't propagate any failure if a
397
* signal arrives, otherwise the caller can't distinguish whether the
398
* release was issued or not. Code for user-space can check
399
* signal_pending() after we return. */
400
rel_commit(p_BmPortal, flags, num);
401
return E_OK;
402
}
403
404
uint8_t BmPortalAcquire(t_Handle h_BmPortal,
405
uint8_t bpid,
406
struct bm_buffer *bufs,
407
uint8_t num)
408
{
409
t_BmPortal *p_BmPortal = (t_BmPortal *)h_BmPortal;
410
struct bm_mc_command *mcc;
411
struct bm_mc_result *mcr;
412
uint8_t ret = 0;
413
414
SANITY_CHECK_RETURN_VALUE(p_BmPortal, E_INVALID_HANDLE, 0);
415
NCSW_PLOCK(p_BmPortal);
416
mcc = bm_mc_start(p_BmPortal->p_BmPortalLow);
417
mcc->acquire.bpid = bpid;
418
bm_mc_commit(p_BmPortal->p_BmPortalLow,
419
(uint8_t)(BM_MCC_VERB_CMD_ACQUIRE |
420
(num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)));
421
while (!(mcr = bm_mc_result(p_BmPortal->p_BmPortalLow))) ;
422
ret = num = (uint8_t)(mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT);
423
ASSERT_COND(num <= 8);
424
while (num--) {
425
bufs[num].bpid = bpid;
426
bufs[num].hi = mcr->acquire.bufs[num].hi;
427
bufs[num].lo = mcr->acquire.bufs[num].lo;
428
}
429
PUNLOCK(p_BmPortal);
430
return ret;
431
}
432
433
t_Error BmPortalQuery(t_Handle h_BmPortal, struct bman_depletion *p_Pools, bool depletion)
434
{
435
t_BmPortal *p_BmPortal = (t_BmPortal *)h_BmPortal;
436
struct bm_mc_result *mcr;
437
438
SANITY_CHECK_RETURN_ERROR(p_BmPortal, E_INVALID_HANDLE);
439
440
NCSW_PLOCK(p_BmPortal);
441
bm_mc_start(p_BmPortal->p_BmPortalLow);
442
bm_mc_commit(p_BmPortal->p_BmPortalLow, BM_MCC_VERB_CMD_QUERY);
443
while (!(mcr = bm_mc_result(p_BmPortal->p_BmPortalLow))) ;
444
if (depletion)
445
*p_Pools = mcr->query.ds.state;
446
else
447
*p_Pools = mcr->query.as.state;
448
PUNLOCK(p_BmPortal);
449
return E_OK;
450
}
451
452
/****************************************/
453
/* API Init unit functions */
454
/****************************************/
455
456
t_Handle BM_PORTAL_Config(t_BmPortalParam *p_BmPortalParam)
457
{
458
t_BmPortal *p_BmPortal;
459
460
SANITY_CHECK_RETURN_VALUE(p_BmPortalParam, E_INVALID_HANDLE, NULL);
461
SANITY_CHECK_RETURN_VALUE(p_BmPortalParam->h_Bm, E_INVALID_HANDLE, NULL);
462
463
p_BmPortal = (t_BmPortal *)XX_Malloc(sizeof(t_BmPortal));
464
if (!p_BmPortal)
465
{
466
REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Bm Portal obj!!!"));
467
return NULL;
468
}
469
memset(p_BmPortal, 0, sizeof(t_BmPortal));
470
471
p_BmPortal->p_BmPortalLow = (struct bm_portal *)XX_Malloc(sizeof(struct bm_portal));
472
if (!p_BmPortal->p_BmPortalLow)
473
{
474
XX_Free(p_BmPortal);
475
REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Low bm portal obj!!!"));
476
return NULL;
477
}
478
memset(p_BmPortal->p_BmPortalLow, 0, sizeof(struct bm_portal));
479
480
p_BmPortal->p_BmPortalDriverParams = (t_BmPortalDriverParams *)XX_Malloc(sizeof(t_BmPortalDriverParams));
481
if (!p_BmPortal->p_BmPortalDriverParams)
482
{
483
XX_Free(p_BmPortal);
484
XX_Free(p_BmPortal->p_BmPortalLow);
485
REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Bm Portal driver parameters"));
486
return NULL;
487
}
488
memset(p_BmPortal->p_BmPortalDriverParams, 0, sizeof(t_BmPortalDriverParams));
489
490
p_BmPortal->p_BmPortalLow->addr.addr_ce = UINT_TO_PTR(p_BmPortalParam->ceBaseAddress);
491
p_BmPortal->p_BmPortalLow->addr.addr_ci = UINT_TO_PTR(p_BmPortalParam->ciBaseAddress);
492
p_BmPortal->cpu = (int)p_BmPortalParam->swPortalId;
493
p_BmPortal->irq = p_BmPortalParam->irq;
494
495
p_BmPortal->h_Bm = p_BmPortalParam->h_Bm;
496
497
p_BmPortal->p_BmPortalDriverParams->hwExtStructsMemAttr = DEFAULT_memAttr;
498
bman_depletion_fill(&p_BmPortal->p_BmPortalDriverParams->mask);
499
500
return p_BmPortal;
501
}
502
503
t_Error BM_PORTAL_Init(t_Handle h_BmPortal)
504
{
505
t_BmPortal *p_BmPortal = (t_BmPortal *)h_BmPortal;
506
uint32_t flags;
507
508
SANITY_CHECK_RETURN_ERROR(p_BmPortal, E_INVALID_HANDLE);
509
510
flags = (uint32_t)((p_BmPortal->irq != NO_IRQ) ? BMAN_PORTAL_FLAG_IRQ : 0);
511
flags |= ((p_BmPortal->p_BmPortalDriverParams->hwExtStructsMemAttr & MEMORY_ATTR_CACHEABLE) ?
512
BMAN_PORTAL_FLAG_CACHE : 0);
513
514
if (bman_create_portal(p_BmPortal,flags,&p_BmPortal->p_BmPortalDriverParams->mask)!=E_OK)
515
{
516
BM_PORTAL_Free(p_BmPortal);
517
RETURN_ERROR(MAJOR, E_NULL_POINTER, ("create portal failed"));
518
}
519
BmSetPortalHandle(p_BmPortal->h_Bm, (t_Handle)p_BmPortal, (e_DpaaSwPortal)p_BmPortal->cpu);
520
521
XX_Free(p_BmPortal->p_BmPortalDriverParams);
522
p_BmPortal->p_BmPortalDriverParams = NULL;
523
524
DBG(TRACE,("Bman-Portal (%d) @ %p:%p\n",
525
p_BmPortal->cpu,
526
p_BmPortal->p_BmPortalLow->addr.addr_ce,
527
p_BmPortal->p_BmPortalLow->addr.addr_ci
528
));
529
530
DBG(TRACE,("Bman-Portal (%d) @ 0x%016llx:0x%016llx",
531
p_BmPortal->cpu,
532
(uint64_t)XX_VirtToPhys(p_BmPortal->p_BmPortalLow->addr.addr_ce),
533
(uint64_t)XX_VirtToPhys(p_BmPortal->p_BmPortalLow->addr.addr_ci)
534
));
535
536
return E_OK;
537
}
538
539
t_Error BM_PORTAL_Free(t_Handle h_BmPortal)
540
{
541
t_BmPortal *p_BmPortal = (t_BmPortal *)h_BmPortal;
542
543
if (!p_BmPortal)
544
return ERROR_CODE(E_INVALID_HANDLE);
545
BmSetPortalHandle(p_BmPortal->h_Bm, NULL, (e_DpaaSwPortal)p_BmPortal->cpu);
546
bman_destroy_portal(p_BmPortal);
547
XX_Free(p_BmPortal);
548
return E_OK;
549
}
550
551
t_Error BM_PORTAL_ConfigMemAttr(t_Handle h_BmPortal, uint32_t hwExtStructsMemAttr)
552
{
553
t_BmPortal *p_BmPortal = (t_BmPortal *)h_BmPortal;
554
555
SANITY_CHECK_RETURN_ERROR(p_BmPortal, E_INVALID_HANDLE);
556
SANITY_CHECK_RETURN_ERROR(p_BmPortal->p_BmPortalDriverParams, E_INVALID_HANDLE);
557
558
p_BmPortal->p_BmPortalDriverParams->hwExtStructsMemAttr = hwExtStructsMemAttr;
559
560
return E_OK;
561
}
562
563