Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/blackfin/mm/sram-alloc.c
10817 views
1
/*
2
* SRAM allocator for Blackfin on-chip memory
3
*
4
* Copyright 2004-2009 Analog Devices Inc.
5
*
6
* Licensed under the GPL-2 or later.
7
*/
8
9
#include <linux/module.h>
10
#include <linux/kernel.h>
11
#include <linux/types.h>
12
#include <linux/miscdevice.h>
13
#include <linux/ioport.h>
14
#include <linux/fcntl.h>
15
#include <linux/init.h>
16
#include <linux/poll.h>
17
#include <linux/proc_fs.h>
18
#include <linux/seq_file.h>
19
#include <linux/spinlock.h>
20
#include <linux/rtc.h>
21
#include <linux/slab.h>
22
#include <asm/blackfin.h>
23
#include <asm/mem_map.h>
24
#include "blackfin_sram.h"
25
26
/* the data structure for L1 scratchpad and DATA SRAM */
27
struct sram_piece {
28
void *paddr;
29
int size;
30
pid_t pid;
31
struct sram_piece *next;
32
};
33
34
static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
35
static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
36
static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
37
38
#if L1_DATA_A_LENGTH != 0
39
static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
40
static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
41
#endif
42
43
#if L1_DATA_B_LENGTH != 0
44
static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
45
static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
46
#endif
47
48
#if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
49
static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
50
#endif
51
52
#if L1_CODE_LENGTH != 0
53
static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
54
static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
55
static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
56
#endif
57
58
#if L2_LENGTH != 0
59
static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
60
static struct sram_piece free_l2_sram_head, used_l2_sram_head;
61
#endif
62
63
static struct kmem_cache *sram_piece_cache;
64
65
/* L1 Scratchpad SRAM initialization function */
66
static void __init l1sram_init(void)
67
{
68
unsigned int cpu;
69
unsigned long reserve;
70
71
#ifdef CONFIG_SMP
72
reserve = 0;
73
#else
74
reserve = sizeof(struct l1_scratch_task_info);
75
#endif
76
77
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
78
per_cpu(free_l1_ssram_head, cpu).next =
79
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
80
if (!per_cpu(free_l1_ssram_head, cpu).next) {
81
printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
82
return;
83
}
84
85
per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
86
per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
87
per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
88
per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
89
90
per_cpu(used_l1_ssram_head, cpu).next = NULL;
91
92
/* mutex initialize */
93
spin_lock_init(&per_cpu(l1sram_lock, cpu));
94
printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
95
L1_SCRATCH_LENGTH >> 10);
96
}
97
}
98
99
static void __init l1_data_sram_init(void)
100
{
101
#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
102
unsigned int cpu;
103
#endif
104
#if L1_DATA_A_LENGTH != 0
105
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
106
per_cpu(free_l1_data_A_sram_head, cpu).next =
107
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
108
if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
109
printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
110
return;
111
}
112
113
per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
114
(void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
115
per_cpu(free_l1_data_A_sram_head, cpu).next->size =
116
L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
117
per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
118
per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
119
120
per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
121
122
printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
123
L1_DATA_A_LENGTH >> 10,
124
per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
125
}
126
#endif
127
#if L1_DATA_B_LENGTH != 0
128
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
129
per_cpu(free_l1_data_B_sram_head, cpu).next =
130
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
131
if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
132
printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
133
return;
134
}
135
136
per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
137
(void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
138
per_cpu(free_l1_data_B_sram_head, cpu).next->size =
139
L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
140
per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
141
per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
142
143
per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
144
145
printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
146
L1_DATA_B_LENGTH >> 10,
147
per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
148
/* mutex initialize */
149
}
150
#endif
151
152
#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
153
for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
154
spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
155
#endif
156
}
157
158
static void __init l1_inst_sram_init(void)
159
{
160
#if L1_CODE_LENGTH != 0
161
unsigned int cpu;
162
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
163
per_cpu(free_l1_inst_sram_head, cpu).next =
164
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
165
if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
166
printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
167
return;
168
}
169
170
per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
171
(void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
172
per_cpu(free_l1_inst_sram_head, cpu).next->size =
173
L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
174
per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
175
per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
176
177
per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
178
179
printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
180
L1_CODE_LENGTH >> 10,
181
per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
182
183
/* mutex initialize */
184
spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
185
}
186
#endif
187
}
188
189
static void __init l2_sram_init(void)
190
{
191
#if L2_LENGTH != 0
192
free_l2_sram_head.next =
193
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
194
if (!free_l2_sram_head.next) {
195
printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
196
return;
197
}
198
199
free_l2_sram_head.next->paddr =
200
(void *)L2_START + (_ebss_l2 - _stext_l2);
201
free_l2_sram_head.next->size =
202
L2_LENGTH - (_ebss_l2 - _stext_l2);
203
free_l2_sram_head.next->pid = 0;
204
free_l2_sram_head.next->next = NULL;
205
206
used_l2_sram_head.next = NULL;
207
208
printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
209
L2_LENGTH >> 10,
210
free_l2_sram_head.next->size >> 10);
211
212
/* mutex initialize */
213
spin_lock_init(&l2_sram_lock);
214
#endif
215
}
216
217
static int __init bfin_sram_init(void)
218
{
219
sram_piece_cache = kmem_cache_create("sram_piece_cache",
220
sizeof(struct sram_piece),
221
0, SLAB_PANIC, NULL);
222
223
l1sram_init();
224
l1_data_sram_init();
225
l1_inst_sram_init();
226
l2_sram_init();
227
228
return 0;
229
}
230
pure_initcall(bfin_sram_init);
231
232
/* SRAM allocate function */
233
static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
234
struct sram_piece *pused_head)
235
{
236
struct sram_piece *pslot, *plast, *pavail;
237
238
if (size <= 0 || !pfree_head || !pused_head)
239
return NULL;
240
241
/* Align the size */
242
size = (size + 3) & ~3;
243
244
pslot = pfree_head->next;
245
plast = pfree_head;
246
247
/* search an available piece slot */
248
while (pslot != NULL && size > pslot->size) {
249
plast = pslot;
250
pslot = pslot->next;
251
}
252
253
if (!pslot)
254
return NULL;
255
256
if (pslot->size == size) {
257
plast->next = pslot->next;
258
pavail = pslot;
259
} else {
260
/* use atomic so our L1 allocator can be used atomically */
261
pavail = kmem_cache_alloc(sram_piece_cache, GFP_ATOMIC);
262
263
if (!pavail)
264
return NULL;
265
266
pavail->paddr = pslot->paddr;
267
pavail->size = size;
268
pslot->paddr += size;
269
pslot->size -= size;
270
}
271
272
pavail->pid = current->pid;
273
274
pslot = pused_head->next;
275
plast = pused_head;
276
277
/* insert new piece into used piece list !!! */
278
while (pslot != NULL && pavail->paddr < pslot->paddr) {
279
plast = pslot;
280
pslot = pslot->next;
281
}
282
283
pavail->next = pslot;
284
plast->next = pavail;
285
286
return pavail->paddr;
287
}
288
289
/* Allocate the largest available block. */
290
static void *_sram_alloc_max(struct sram_piece *pfree_head,
291
struct sram_piece *pused_head,
292
unsigned long *psize)
293
{
294
struct sram_piece *pslot, *pmax;
295
296
if (!pfree_head || !pused_head)
297
return NULL;
298
299
pmax = pslot = pfree_head->next;
300
301
/* search an available piece slot */
302
while (pslot != NULL) {
303
if (pslot->size > pmax->size)
304
pmax = pslot;
305
pslot = pslot->next;
306
}
307
308
if (!pmax)
309
return NULL;
310
311
*psize = pmax->size;
312
313
return _sram_alloc(*psize, pfree_head, pused_head);
314
}
315
316
/* SRAM free function */
317
static int _sram_free(const void *addr,
318
struct sram_piece *pfree_head,
319
struct sram_piece *pused_head)
320
{
321
struct sram_piece *pslot, *plast, *pavail;
322
323
if (!pfree_head || !pused_head)
324
return -1;
325
326
/* search the relevant memory slot */
327
pslot = pused_head->next;
328
plast = pused_head;
329
330
/* search an available piece slot */
331
while (pslot != NULL && pslot->paddr != addr) {
332
plast = pslot;
333
pslot = pslot->next;
334
}
335
336
if (!pslot)
337
return -1;
338
339
plast->next = pslot->next;
340
pavail = pslot;
341
pavail->pid = 0;
342
343
/* insert free pieces back to the free list */
344
pslot = pfree_head->next;
345
plast = pfree_head;
346
347
while (pslot != NULL && addr > pslot->paddr) {
348
plast = pslot;
349
pslot = pslot->next;
350
}
351
352
if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
353
plast->size += pavail->size;
354
kmem_cache_free(sram_piece_cache, pavail);
355
} else {
356
pavail->next = plast->next;
357
plast->next = pavail;
358
plast = pavail;
359
}
360
361
if (pslot && plast->paddr + plast->size == pslot->paddr) {
362
plast->size += pslot->size;
363
plast->next = pslot->next;
364
kmem_cache_free(sram_piece_cache, pslot);
365
}
366
367
return 0;
368
}
369
370
int sram_free(const void *addr)
371
{
372
373
#if L1_CODE_LENGTH != 0
374
if (addr >= (void *)get_l1_code_start()
375
&& addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
376
return l1_inst_sram_free(addr);
377
else
378
#endif
379
#if L1_DATA_A_LENGTH != 0
380
if (addr >= (void *)get_l1_data_a_start()
381
&& addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
382
return l1_data_A_sram_free(addr);
383
else
384
#endif
385
#if L1_DATA_B_LENGTH != 0
386
if (addr >= (void *)get_l1_data_b_start()
387
&& addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
388
return l1_data_B_sram_free(addr);
389
else
390
#endif
391
#if L2_LENGTH != 0
392
if (addr >= (void *)L2_START
393
&& addr < (void *)(L2_START + L2_LENGTH))
394
return l2_sram_free(addr);
395
else
396
#endif
397
return -1;
398
}
399
EXPORT_SYMBOL(sram_free);
400
401
void *l1_data_A_sram_alloc(size_t size)
402
{
403
#if L1_DATA_A_LENGTH != 0
404
unsigned long flags;
405
void *addr;
406
unsigned int cpu;
407
408
cpu = smp_processor_id();
409
/* add mutex operation */
410
spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
411
412
addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
413
&per_cpu(used_l1_data_A_sram_head, cpu));
414
415
/* add mutex operation */
416
spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
417
418
pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
419
(long unsigned int)addr, size);
420
421
return addr;
422
#else
423
return NULL;
424
#endif
425
}
426
EXPORT_SYMBOL(l1_data_A_sram_alloc);
427
428
int l1_data_A_sram_free(const void *addr)
429
{
430
#if L1_DATA_A_LENGTH != 0
431
unsigned long flags;
432
int ret;
433
unsigned int cpu;
434
435
cpu = smp_processor_id();
436
/* add mutex operation */
437
spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
438
439
ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
440
&per_cpu(used_l1_data_A_sram_head, cpu));
441
442
/* add mutex operation */
443
spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
444
445
return ret;
446
#else
447
return -1;
448
#endif
449
}
450
EXPORT_SYMBOL(l1_data_A_sram_free);
451
452
void *l1_data_B_sram_alloc(size_t size)
453
{
454
#if L1_DATA_B_LENGTH != 0
455
unsigned long flags;
456
void *addr;
457
unsigned int cpu;
458
459
cpu = smp_processor_id();
460
/* add mutex operation */
461
spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
462
463
addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
464
&per_cpu(used_l1_data_B_sram_head, cpu));
465
466
/* add mutex operation */
467
spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
468
469
pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
470
(long unsigned int)addr, size);
471
472
return addr;
473
#else
474
return NULL;
475
#endif
476
}
477
EXPORT_SYMBOL(l1_data_B_sram_alloc);
478
479
int l1_data_B_sram_free(const void *addr)
480
{
481
#if L1_DATA_B_LENGTH != 0
482
unsigned long flags;
483
int ret;
484
unsigned int cpu;
485
486
cpu = smp_processor_id();
487
/* add mutex operation */
488
spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
489
490
ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
491
&per_cpu(used_l1_data_B_sram_head, cpu));
492
493
/* add mutex operation */
494
spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
495
496
return ret;
497
#else
498
return -1;
499
#endif
500
}
501
EXPORT_SYMBOL(l1_data_B_sram_free);
502
503
void *l1_data_sram_alloc(size_t size)
504
{
505
void *addr = l1_data_A_sram_alloc(size);
506
507
if (!addr)
508
addr = l1_data_B_sram_alloc(size);
509
510
return addr;
511
}
512
EXPORT_SYMBOL(l1_data_sram_alloc);
513
514
void *l1_data_sram_zalloc(size_t size)
515
{
516
void *addr = l1_data_sram_alloc(size);
517
518
if (addr)
519
memset(addr, 0x00, size);
520
521
return addr;
522
}
523
EXPORT_SYMBOL(l1_data_sram_zalloc);
524
525
int l1_data_sram_free(const void *addr)
526
{
527
int ret;
528
ret = l1_data_A_sram_free(addr);
529
if (ret == -1)
530
ret = l1_data_B_sram_free(addr);
531
return ret;
532
}
533
EXPORT_SYMBOL(l1_data_sram_free);
534
535
void *l1_inst_sram_alloc(size_t size)
536
{
537
#if L1_CODE_LENGTH != 0
538
unsigned long flags;
539
void *addr;
540
unsigned int cpu;
541
542
cpu = smp_processor_id();
543
/* add mutex operation */
544
spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
545
546
addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
547
&per_cpu(used_l1_inst_sram_head, cpu));
548
549
/* add mutex operation */
550
spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
551
552
pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
553
(long unsigned int)addr, size);
554
555
return addr;
556
#else
557
return NULL;
558
#endif
559
}
560
EXPORT_SYMBOL(l1_inst_sram_alloc);
561
562
int l1_inst_sram_free(const void *addr)
563
{
564
#if L1_CODE_LENGTH != 0
565
unsigned long flags;
566
int ret;
567
unsigned int cpu;
568
569
cpu = smp_processor_id();
570
/* add mutex operation */
571
spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
572
573
ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
574
&per_cpu(used_l1_inst_sram_head, cpu));
575
576
/* add mutex operation */
577
spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
578
579
return ret;
580
#else
581
return -1;
582
#endif
583
}
584
EXPORT_SYMBOL(l1_inst_sram_free);
585
586
/* L1 Scratchpad memory allocate function */
587
void *l1sram_alloc(size_t size)
588
{
589
unsigned long flags;
590
void *addr;
591
unsigned int cpu;
592
593
cpu = smp_processor_id();
594
/* add mutex operation */
595
spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
596
597
addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
598
&per_cpu(used_l1_ssram_head, cpu));
599
600
/* add mutex operation */
601
spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
602
603
return addr;
604
}
605
606
/* L1 Scratchpad memory allocate function */
607
void *l1sram_alloc_max(size_t *psize)
608
{
609
unsigned long flags;
610
void *addr;
611
unsigned int cpu;
612
613
cpu = smp_processor_id();
614
/* add mutex operation */
615
spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
616
617
addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
618
&per_cpu(used_l1_ssram_head, cpu), psize);
619
620
/* add mutex operation */
621
spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
622
623
return addr;
624
}
625
626
/* L1 Scratchpad memory free function */
627
int l1sram_free(const void *addr)
628
{
629
unsigned long flags;
630
int ret;
631
unsigned int cpu;
632
633
cpu = smp_processor_id();
634
/* add mutex operation */
635
spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
636
637
ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
638
&per_cpu(used_l1_ssram_head, cpu));
639
640
/* add mutex operation */
641
spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
642
643
return ret;
644
}
645
646
void *l2_sram_alloc(size_t size)
647
{
648
#if L2_LENGTH != 0
649
unsigned long flags;
650
void *addr;
651
652
/* add mutex operation */
653
spin_lock_irqsave(&l2_sram_lock, flags);
654
655
addr = _sram_alloc(size, &free_l2_sram_head,
656
&used_l2_sram_head);
657
658
/* add mutex operation */
659
spin_unlock_irqrestore(&l2_sram_lock, flags);
660
661
pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
662
(long unsigned int)addr, size);
663
664
return addr;
665
#else
666
return NULL;
667
#endif
668
}
669
EXPORT_SYMBOL(l2_sram_alloc);
670
671
void *l2_sram_zalloc(size_t size)
672
{
673
void *addr = l2_sram_alloc(size);
674
675
if (addr)
676
memset(addr, 0x00, size);
677
678
return addr;
679
}
680
EXPORT_SYMBOL(l2_sram_zalloc);
681
682
int l2_sram_free(const void *addr)
683
{
684
#if L2_LENGTH != 0
685
unsigned long flags;
686
int ret;
687
688
/* add mutex operation */
689
spin_lock_irqsave(&l2_sram_lock, flags);
690
691
ret = _sram_free(addr, &free_l2_sram_head,
692
&used_l2_sram_head);
693
694
/* add mutex operation */
695
spin_unlock_irqrestore(&l2_sram_lock, flags);
696
697
return ret;
698
#else
699
return -1;
700
#endif
701
}
702
EXPORT_SYMBOL(l2_sram_free);
703
704
int sram_free_with_lsl(const void *addr)
705
{
706
struct sram_list_struct *lsl, **tmp;
707
struct mm_struct *mm = current->mm;
708
int ret = -1;
709
710
for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
711
if ((*tmp)->addr == addr) {
712
lsl = *tmp;
713
ret = sram_free(addr);
714
*tmp = lsl->next;
715
kfree(lsl);
716
break;
717
}
718
719
return ret;
720
}
721
EXPORT_SYMBOL(sram_free_with_lsl);
722
723
/* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
724
* tracked. These are designed for userspace so that when a process exits,
725
* we can safely reap their resources.
726
*/
727
void *sram_alloc_with_lsl(size_t size, unsigned long flags)
728
{
729
void *addr = NULL;
730
struct sram_list_struct *lsl = NULL;
731
struct mm_struct *mm = current->mm;
732
733
lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
734
if (!lsl)
735
return NULL;
736
737
if (flags & L1_INST_SRAM)
738
addr = l1_inst_sram_alloc(size);
739
740
if (addr == NULL && (flags & L1_DATA_A_SRAM))
741
addr = l1_data_A_sram_alloc(size);
742
743
if (addr == NULL && (flags & L1_DATA_B_SRAM))
744
addr = l1_data_B_sram_alloc(size);
745
746
if (addr == NULL && (flags & L2_SRAM))
747
addr = l2_sram_alloc(size);
748
749
if (addr == NULL) {
750
kfree(lsl);
751
return NULL;
752
}
753
lsl->addr = addr;
754
lsl->length = size;
755
lsl->next = mm->context.sram_list;
756
mm->context.sram_list = lsl;
757
return addr;
758
}
759
EXPORT_SYMBOL(sram_alloc_with_lsl);
760
761
#ifdef CONFIG_PROC_FS
762
/* Once we get a real allocator, we'll throw all of this away.
763
* Until then, we need some sort of visibility into the L1 alloc.
764
*/
765
/* Need to keep line of output the same. Currently, that is 44 bytes
766
* (including newline).
767
*/
768
static int _sram_proc_show(struct seq_file *m, const char *desc,
769
struct sram_piece *pfree_head,
770
struct sram_piece *pused_head)
771
{
772
struct sram_piece *pslot;
773
774
if (!pfree_head || !pused_head)
775
return -1;
776
777
seq_printf(m, "--- SRAM %-14s Size PID State \n", desc);
778
779
/* search the relevant memory slot */
780
pslot = pused_head->next;
781
782
while (pslot != NULL) {
783
seq_printf(m, "%p-%p %10i %5i %-10s\n",
784
pslot->paddr, pslot->paddr + pslot->size,
785
pslot->size, pslot->pid, "ALLOCATED");
786
787
pslot = pslot->next;
788
}
789
790
pslot = pfree_head->next;
791
792
while (pslot != NULL) {
793
seq_printf(m, "%p-%p %10i %5i %-10s\n",
794
pslot->paddr, pslot->paddr + pslot->size,
795
pslot->size, pslot->pid, "FREE");
796
797
pslot = pslot->next;
798
}
799
800
return 0;
801
}
802
static int sram_proc_show(struct seq_file *m, void *v)
803
{
804
unsigned int cpu;
805
806
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
807
if (_sram_proc_show(m, "Scratchpad",
808
&per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
809
goto not_done;
810
#if L1_DATA_A_LENGTH != 0
811
if (_sram_proc_show(m, "L1 Data A",
812
&per_cpu(free_l1_data_A_sram_head, cpu),
813
&per_cpu(used_l1_data_A_sram_head, cpu)))
814
goto not_done;
815
#endif
816
#if L1_DATA_B_LENGTH != 0
817
if (_sram_proc_show(m, "L1 Data B",
818
&per_cpu(free_l1_data_B_sram_head, cpu),
819
&per_cpu(used_l1_data_B_sram_head, cpu)))
820
goto not_done;
821
#endif
822
#if L1_CODE_LENGTH != 0
823
if (_sram_proc_show(m, "L1 Instruction",
824
&per_cpu(free_l1_inst_sram_head, cpu),
825
&per_cpu(used_l1_inst_sram_head, cpu)))
826
goto not_done;
827
#endif
828
}
829
#if L2_LENGTH != 0
830
if (_sram_proc_show(m, "L2", &free_l2_sram_head, &used_l2_sram_head))
831
goto not_done;
832
#endif
833
not_done:
834
return 0;
835
}
836
837
static int sram_proc_open(struct inode *inode, struct file *file)
838
{
839
return single_open(file, sram_proc_show, NULL);
840
}
841
842
static const struct file_operations sram_proc_ops = {
843
.open = sram_proc_open,
844
.read = seq_read,
845
.llseek = seq_lseek,
846
.release = single_release,
847
};
848
849
static int __init sram_proc_init(void)
850
{
851
struct proc_dir_entry *ptr;
852
853
ptr = proc_create("sram", S_IRUGO, NULL, &sram_proc_ops);
854
if (!ptr) {
855
printk(KERN_WARNING "unable to create /proc/sram\n");
856
return -1;
857
}
858
return 0;
859
}
860
late_initcall(sram_proc_init);
861
#endif
862
863