Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/sound/core/seq/seq_memory.c
10817 views
1
/*
2
* ALSA sequencer Memory Manager
3
* Copyright (c) 1998 by Frank van de Pol <[email protected]>
4
* Jaroslav Kysela <[email protected]>
5
* 2000 by Takashi Iwai <[email protected]>
6
*
7
* This program is free software; you can redistribute it and/or modify
8
* it under the terms of the GNU General Public License as published by
9
* the Free Software Foundation; either version 2 of the License, or
10
* (at your option) any later version.
11
*
12
* This program is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
* GNU General Public License for more details.
16
*
17
* You should have received a copy of the GNU General Public License
18
* along with this program; if not, write to the Free Software
19
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
*
21
*/
22
23
#include <linux/init.h>
24
#include <linux/slab.h>
25
#include <linux/vmalloc.h>
26
#include <sound/core.h>
27
28
#include <sound/seq_kernel.h>
29
#include "seq_memory.h"
30
#include "seq_queue.h"
31
#include "seq_info.h"
32
#include "seq_lock.h"
33
34
static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
35
{
36
return pool->total_elements - atomic_read(&pool->counter);
37
}
38
39
static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
40
{
41
return snd_seq_pool_available(pool) >= pool->room;
42
}
43
44
/*
45
* Variable length event:
46
* The event like sysex uses variable length type.
47
* The external data may be stored in three different formats.
48
* 1) kernel space
49
* This is the normal case.
50
* ext.data.len = length
51
* ext.data.ptr = buffer pointer
52
* 2) user space
53
* When an event is generated via read(), the external data is
54
* kept in user space until expanded.
55
* ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
56
* ext.data.ptr = userspace pointer
57
* 3) chained cells
58
* When the variable length event is enqueued (in prioq or fifo),
59
* the external data is decomposed to several cells.
60
* ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
61
* ext.data.ptr = the additiona cell head
62
* -> cell.next -> cell.next -> ..
63
*/
64
65
/*
66
* exported:
67
* call dump function to expand external data.
68
*/
69
70
static int get_var_len(const struct snd_seq_event *event)
71
{
72
if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
73
return -EINVAL;
74
75
return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
76
}
77
78
int snd_seq_dump_var_event(const struct snd_seq_event *event,
79
snd_seq_dump_func_t func, void *private_data)
80
{
81
int len, err;
82
struct snd_seq_event_cell *cell;
83
84
if ((len = get_var_len(event)) <= 0)
85
return len;
86
87
if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
88
char buf[32];
89
char __user *curptr = (char __force __user *)event->data.ext.ptr;
90
while (len > 0) {
91
int size = sizeof(buf);
92
if (len < size)
93
size = len;
94
if (copy_from_user(buf, curptr, size))
95
return -EFAULT;
96
err = func(private_data, buf, size);
97
if (err < 0)
98
return err;
99
curptr += size;
100
len -= size;
101
}
102
return 0;
103
} if (! (event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) {
104
return func(private_data, event->data.ext.ptr, len);
105
}
106
107
cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
108
for (; len > 0 && cell; cell = cell->next) {
109
int size = sizeof(struct snd_seq_event);
110
if (len < size)
111
size = len;
112
err = func(private_data, &cell->event, size);
113
if (err < 0)
114
return err;
115
len -= size;
116
}
117
return 0;
118
}
119
120
EXPORT_SYMBOL(snd_seq_dump_var_event);
121
122
123
/*
124
* exported:
125
* expand the variable length event to linear buffer space.
126
*/
127
128
static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
129
{
130
memcpy(*bufptr, src, size);
131
*bufptr += size;
132
return 0;
133
}
134
135
static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
136
{
137
if (copy_to_user(*bufptr, src, size))
138
return -EFAULT;
139
*bufptr += size;
140
return 0;
141
}
142
143
int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
144
int in_kernel, int size_aligned)
145
{
146
int len, newlen;
147
int err;
148
149
if ((len = get_var_len(event)) < 0)
150
return len;
151
newlen = len;
152
if (size_aligned > 0)
153
newlen = roundup(len, size_aligned);
154
if (count < newlen)
155
return -EAGAIN;
156
157
if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
158
if (! in_kernel)
159
return -EINVAL;
160
if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len))
161
return -EFAULT;
162
return newlen;
163
}
164
err = snd_seq_dump_var_event(event,
165
in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
166
(snd_seq_dump_func_t)seq_copy_in_user,
167
&buf);
168
return err < 0 ? err : newlen;
169
}
170
171
EXPORT_SYMBOL(snd_seq_expand_var_event);
172
173
/*
174
* release this cell, free extended data if available
175
*/
176
177
static inline void free_cell(struct snd_seq_pool *pool,
178
struct snd_seq_event_cell *cell)
179
{
180
cell->next = pool->free;
181
pool->free = cell;
182
atomic_dec(&pool->counter);
183
}
184
185
void snd_seq_cell_free(struct snd_seq_event_cell * cell)
186
{
187
unsigned long flags;
188
struct snd_seq_pool *pool;
189
190
if (snd_BUG_ON(!cell))
191
return;
192
pool = cell->pool;
193
if (snd_BUG_ON(!pool))
194
return;
195
196
spin_lock_irqsave(&pool->lock, flags);
197
free_cell(pool, cell);
198
if (snd_seq_ev_is_variable(&cell->event)) {
199
if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
200
struct snd_seq_event_cell *curp, *nextptr;
201
curp = cell->event.data.ext.ptr;
202
for (; curp; curp = nextptr) {
203
nextptr = curp->next;
204
curp->next = pool->free;
205
free_cell(pool, curp);
206
}
207
}
208
}
209
if (waitqueue_active(&pool->output_sleep)) {
210
/* has enough space now? */
211
if (snd_seq_output_ok(pool))
212
wake_up(&pool->output_sleep);
213
}
214
spin_unlock_irqrestore(&pool->lock, flags);
215
}
216
217
218
/*
219
* allocate an event cell.
220
*/
221
static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
222
struct snd_seq_event_cell **cellp,
223
int nonblock, struct file *file)
224
{
225
struct snd_seq_event_cell *cell;
226
unsigned long flags;
227
int err = -EAGAIN;
228
wait_queue_t wait;
229
230
if (pool == NULL)
231
return -EINVAL;
232
233
*cellp = NULL;
234
235
init_waitqueue_entry(&wait, current);
236
spin_lock_irqsave(&pool->lock, flags);
237
if (pool->ptr == NULL) { /* not initialized */
238
snd_printd("seq: pool is not initialized\n");
239
err = -EINVAL;
240
goto __error;
241
}
242
while (pool->free == NULL && ! nonblock && ! pool->closing) {
243
244
set_current_state(TASK_INTERRUPTIBLE);
245
add_wait_queue(&pool->output_sleep, &wait);
246
spin_unlock_irq(&pool->lock);
247
schedule();
248
spin_lock_irq(&pool->lock);
249
remove_wait_queue(&pool->output_sleep, &wait);
250
/* interrupted? */
251
if (signal_pending(current)) {
252
err = -ERESTARTSYS;
253
goto __error;
254
}
255
}
256
if (pool->closing) { /* closing.. */
257
err = -ENOMEM;
258
goto __error;
259
}
260
261
cell = pool->free;
262
if (cell) {
263
int used;
264
pool->free = cell->next;
265
atomic_inc(&pool->counter);
266
used = atomic_read(&pool->counter);
267
if (pool->max_used < used)
268
pool->max_used = used;
269
pool->event_alloc_success++;
270
/* clear cell pointers */
271
cell->next = NULL;
272
err = 0;
273
} else
274
pool->event_alloc_failures++;
275
*cellp = cell;
276
277
__error:
278
spin_unlock_irqrestore(&pool->lock, flags);
279
return err;
280
}
281
282
283
/*
284
* duplicate the event to a cell.
285
* if the event has external data, the data is decomposed to additional
286
* cells.
287
*/
288
int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
289
struct snd_seq_event_cell **cellp, int nonblock,
290
struct file *file)
291
{
292
int ncells, err;
293
unsigned int extlen;
294
struct snd_seq_event_cell *cell;
295
296
*cellp = NULL;
297
298
ncells = 0;
299
extlen = 0;
300
if (snd_seq_ev_is_variable(event)) {
301
extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
302
ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event);
303
}
304
if (ncells >= pool->total_elements)
305
return -ENOMEM;
306
307
err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
308
if (err < 0)
309
return err;
310
311
/* copy the event */
312
cell->event = *event;
313
314
/* decompose */
315
if (snd_seq_ev_is_variable(event)) {
316
int len = extlen;
317
int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
318
int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
319
struct snd_seq_event_cell *src, *tmp, *tail;
320
char *buf;
321
322
cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
323
cell->event.data.ext.ptr = NULL;
324
325
src = (struct snd_seq_event_cell *)event->data.ext.ptr;
326
buf = (char *)event->data.ext.ptr;
327
tail = NULL;
328
329
while (ncells-- > 0) {
330
int size = sizeof(struct snd_seq_event);
331
if (len < size)
332
size = len;
333
err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
334
if (err < 0)
335
goto __error;
336
if (cell->event.data.ext.ptr == NULL)
337
cell->event.data.ext.ptr = tmp;
338
if (tail)
339
tail->next = tmp;
340
tail = tmp;
341
/* copy chunk */
342
if (is_chained && src) {
343
tmp->event = src->event;
344
src = src->next;
345
} else if (is_usrptr) {
346
if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
347
err = -EFAULT;
348
goto __error;
349
}
350
} else {
351
memcpy(&tmp->event, buf, size);
352
}
353
buf += size;
354
len -= size;
355
}
356
}
357
358
*cellp = cell;
359
return 0;
360
361
__error:
362
snd_seq_cell_free(cell);
363
return err;
364
}
365
366
367
/* poll wait */
368
int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
369
poll_table *wait)
370
{
371
poll_wait(file, &pool->output_sleep, wait);
372
return snd_seq_output_ok(pool);
373
}
374
375
376
/* allocate room specified number of events */
377
int snd_seq_pool_init(struct snd_seq_pool *pool)
378
{
379
int cell;
380
struct snd_seq_event_cell *cellptr;
381
unsigned long flags;
382
383
if (snd_BUG_ON(!pool))
384
return -EINVAL;
385
if (pool->ptr) /* should be atomic? */
386
return 0;
387
388
pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
389
if (pool->ptr == NULL) {
390
snd_printd("seq: malloc for sequencer events failed\n");
391
return -ENOMEM;
392
}
393
394
/* add new cells to the free cell list */
395
spin_lock_irqsave(&pool->lock, flags);
396
pool->free = NULL;
397
398
for (cell = 0; cell < pool->size; cell++) {
399
cellptr = pool->ptr + cell;
400
cellptr->pool = pool;
401
cellptr->next = pool->free;
402
pool->free = cellptr;
403
}
404
pool->room = (pool->size + 1) / 2;
405
406
/* init statistics */
407
pool->max_used = 0;
408
pool->total_elements = pool->size;
409
spin_unlock_irqrestore(&pool->lock, flags);
410
return 0;
411
}
412
413
/* remove events */
414
int snd_seq_pool_done(struct snd_seq_pool *pool)
415
{
416
unsigned long flags;
417
struct snd_seq_event_cell *ptr;
418
int max_count = 5 * HZ;
419
420
if (snd_BUG_ON(!pool))
421
return -EINVAL;
422
423
/* wait for closing all threads */
424
spin_lock_irqsave(&pool->lock, flags);
425
pool->closing = 1;
426
spin_unlock_irqrestore(&pool->lock, flags);
427
428
if (waitqueue_active(&pool->output_sleep))
429
wake_up(&pool->output_sleep);
430
431
while (atomic_read(&pool->counter) > 0) {
432
if (max_count == 0) {
433
snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
434
break;
435
}
436
schedule_timeout_uninterruptible(1);
437
max_count--;
438
}
439
440
/* release all resources */
441
spin_lock_irqsave(&pool->lock, flags);
442
ptr = pool->ptr;
443
pool->ptr = NULL;
444
pool->free = NULL;
445
pool->total_elements = 0;
446
spin_unlock_irqrestore(&pool->lock, flags);
447
448
vfree(ptr);
449
450
spin_lock_irqsave(&pool->lock, flags);
451
pool->closing = 0;
452
spin_unlock_irqrestore(&pool->lock, flags);
453
454
return 0;
455
}
456
457
458
/* init new memory pool */
459
struct snd_seq_pool *snd_seq_pool_new(int poolsize)
460
{
461
struct snd_seq_pool *pool;
462
463
/* create pool block */
464
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
465
if (pool == NULL) {
466
snd_printd("seq: malloc failed for pool\n");
467
return NULL;
468
}
469
spin_lock_init(&pool->lock);
470
pool->ptr = NULL;
471
pool->free = NULL;
472
pool->total_elements = 0;
473
atomic_set(&pool->counter, 0);
474
pool->closing = 0;
475
init_waitqueue_head(&pool->output_sleep);
476
477
pool->size = poolsize;
478
479
/* init statistics */
480
pool->max_used = 0;
481
return pool;
482
}
483
484
/* remove memory pool */
485
int snd_seq_pool_delete(struct snd_seq_pool **ppool)
486
{
487
struct snd_seq_pool *pool = *ppool;
488
489
*ppool = NULL;
490
if (pool == NULL)
491
return 0;
492
snd_seq_pool_done(pool);
493
kfree(pool);
494
return 0;
495
}
496
497
/* initialize sequencer memory */
498
int __init snd_sequencer_memory_init(void)
499
{
500
return 0;
501
}
502
503
/* release sequencer memory */
504
void __exit snd_sequencer_memory_done(void)
505
{
506
}
507
508
509
/* exported to seq_clientmgr.c */
510
void snd_seq_info_pool(struct snd_info_buffer *buffer,
511
struct snd_seq_pool *pool, char *space)
512
{
513
if (pool == NULL)
514
return;
515
snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements);
516
snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter));
517
snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used);
518
snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success);
519
snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures);
520
}
521
522