Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/sound/core/seq/seq_queue.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* ALSA sequencer Timing queue handling
4
* Copyright (c) 1998-1999 by Frank van de Pol <[email protected]>
5
*
6
* MAJOR CHANGES
7
* Nov. 13, 1999 Takashi Iwai <[email protected]>
8
* - Queues are allocated dynamically via ioctl.
9
* - When owner client is deleted, all owned queues are deleted, too.
10
* - Owner of unlocked queue is kept unmodified even if it is
11
* manipulated by other clients.
12
* - Owner field in SET_QUEUE_OWNER ioctl must be identical with the
13
* caller client. i.e. Changing owner to a third client is not
14
* allowed.
15
*
16
* Aug. 30, 2000 Takashi Iwai
17
* - Queues are managed in static array again, but with better way.
18
* The API itself is identical.
19
* - The queue is locked when struct snd_seq_queue pointer is returned via
20
* queueptr(). This pointer *MUST* be released afterward by
21
* queuefree(ptr).
22
* - Addition of experimental sync support.
23
*/
24
25
#include <linux/init.h>
26
#include <linux/slab.h>
27
#include <sound/core.h>
28
29
#include "seq_memory.h"
30
#include "seq_queue.h"
31
#include "seq_clientmgr.h"
32
#include "seq_fifo.h"
33
#include "seq_timer.h"
34
#include "seq_info.h"
35
36
/* list of allocated queues */
37
static struct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES];
38
static DEFINE_SPINLOCK(queue_list_lock);
39
/* number of queues allocated */
40
static int num_queues;
41
42
int snd_seq_queue_get_cur_queues(void)
43
{
44
return num_queues;
45
}
46
47
/*----------------------------------------------------------------*/
48
49
/* assign queue id and insert to list */
50
static int queue_list_add(struct snd_seq_queue *q)
51
{
52
int i;
53
54
guard(spinlock_irqsave)(&queue_list_lock);
55
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
56
if (! queue_list[i]) {
57
queue_list[i] = q;
58
q->queue = i;
59
num_queues++;
60
return i;
61
}
62
}
63
return -1;
64
}
65
66
static struct snd_seq_queue *queue_list_remove(int id, int client)
67
{
68
struct snd_seq_queue *q;
69
70
guard(spinlock_irqsave)(&queue_list_lock);
71
q = queue_list[id];
72
if (q) {
73
guard(spinlock)(&q->owner_lock);
74
if (q->owner == client) {
75
/* found */
76
q->klocked = 1;
77
queue_list[id] = NULL;
78
num_queues--;
79
return q;
80
}
81
}
82
return NULL;
83
}
84
85
/*----------------------------------------------------------------*/
86
87
/* create new queue (constructor) */
88
static struct snd_seq_queue *queue_new(int owner, int locked)
89
{
90
struct snd_seq_queue *q;
91
92
q = kzalloc(sizeof(*q), GFP_KERNEL);
93
if (!q)
94
return NULL;
95
96
spin_lock_init(&q->owner_lock);
97
spin_lock_init(&q->check_lock);
98
mutex_init(&q->timer_mutex);
99
snd_use_lock_init(&q->use_lock);
100
q->queue = -1;
101
102
q->tickq = snd_seq_prioq_new();
103
q->timeq = snd_seq_prioq_new();
104
q->timer = snd_seq_timer_new();
105
if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) {
106
snd_seq_prioq_delete(&q->tickq);
107
snd_seq_prioq_delete(&q->timeq);
108
snd_seq_timer_delete(&q->timer);
109
kfree(q);
110
return NULL;
111
}
112
113
q->owner = owner;
114
q->locked = locked;
115
q->klocked = 0;
116
117
return q;
118
}
119
120
/* delete queue (destructor) */
121
static void queue_delete(struct snd_seq_queue *q)
122
{
123
/* stop and release the timer */
124
mutex_lock(&q->timer_mutex);
125
snd_seq_timer_stop(q->timer);
126
snd_seq_timer_close(q);
127
mutex_unlock(&q->timer_mutex);
128
/* wait until access free */
129
snd_use_lock_sync(&q->use_lock);
130
/* release resources... */
131
snd_seq_prioq_delete(&q->tickq);
132
snd_seq_prioq_delete(&q->timeq);
133
snd_seq_timer_delete(&q->timer);
134
135
kfree(q);
136
}
137
138
139
/*----------------------------------------------------------------*/
140
141
/* delete all existing queues */
142
void snd_seq_queues_delete(void)
143
{
144
int i;
145
146
/* clear list */
147
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
148
if (queue_list[i])
149
queue_delete(queue_list[i]);
150
}
151
}
152
153
static void queue_use(struct snd_seq_queue *queue, int client, int use);
154
155
/* allocate a new queue -
156
* return pointer to new queue or ERR_PTR(-errno) for error
157
* The new queue's use_lock is set to 1. It is the caller's responsibility to
158
* call snd_use_lock_free(&q->use_lock).
159
*/
160
struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
161
{
162
struct snd_seq_queue *q;
163
164
q = queue_new(client, locked);
165
if (q == NULL)
166
return ERR_PTR(-ENOMEM);
167
q->info_flags = info_flags;
168
queue_use(q, client, 1);
169
snd_use_lock_use(&q->use_lock);
170
if (queue_list_add(q) < 0) {
171
snd_use_lock_free(&q->use_lock);
172
queue_delete(q);
173
return ERR_PTR(-ENOMEM);
174
}
175
return q;
176
}
177
178
/* delete a queue - queue must be owned by the client */
179
int snd_seq_queue_delete(int client, int queueid)
180
{
181
struct snd_seq_queue *q;
182
183
if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
184
return -EINVAL;
185
q = queue_list_remove(queueid, client);
186
if (q == NULL)
187
return -EINVAL;
188
queue_delete(q);
189
190
return 0;
191
}
192
193
194
/* return pointer to queue structure for specified id */
195
struct snd_seq_queue *queueptr(int queueid)
196
{
197
struct snd_seq_queue *q;
198
199
if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
200
return NULL;
201
guard(spinlock_irqsave)(&queue_list_lock);
202
q = queue_list[queueid];
203
if (q)
204
snd_use_lock_use(&q->use_lock);
205
return q;
206
}
207
208
/* return the (first) queue matching with the specified name */
209
struct snd_seq_queue *snd_seq_queue_find_name(char *name)
210
{
211
int i;
212
struct snd_seq_queue *q;
213
214
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
215
q = queueptr(i);
216
if (q) {
217
if (strncmp(q->name, name, sizeof(q->name)) == 0)
218
return q;
219
queuefree(q);
220
}
221
}
222
return NULL;
223
}
224
225
226
/* -------------------------------------------------------- */
227
228
#define MAX_CELL_PROCESSES_IN_QUEUE 1000
229
230
void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
231
{
232
struct snd_seq_event_cell *cell;
233
snd_seq_tick_time_t cur_tick;
234
snd_seq_real_time_t cur_time;
235
int processed = 0;
236
237
if (q == NULL)
238
return;
239
240
/* make this function non-reentrant */
241
scoped_guard(spinlock_irqsave, &q->check_lock) {
242
if (q->check_blocked) {
243
q->check_again = 1;
244
return; /* other thread is already checking queues */
245
}
246
q->check_blocked = 1;
247
}
248
249
__again:
250
/* Process tick queue... */
251
cur_tick = snd_seq_timer_get_cur_tick(q->timer);
252
for (;;) {
253
cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
254
if (!cell)
255
break;
256
snd_seq_dispatch_event(cell, atomic, hop);
257
if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE)
258
goto out; /* the rest processed at the next batch */
259
}
260
261
/* Process time queue... */
262
cur_time = snd_seq_timer_get_cur_time(q->timer, false);
263
for (;;) {
264
cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
265
if (!cell)
266
break;
267
snd_seq_dispatch_event(cell, atomic, hop);
268
if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE)
269
goto out; /* the rest processed at the next batch */
270
}
271
272
out:
273
/* free lock */
274
scoped_guard(spinlock_irqsave, &q->check_lock) {
275
if (q->check_again) {
276
q->check_again = 0;
277
if (processed < MAX_CELL_PROCESSES_IN_QUEUE)
278
goto __again;
279
}
280
q->check_blocked = 0;
281
}
282
}
283
284
285
/* enqueue a event to singe queue */
286
int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop)
287
{
288
int dest, err;
289
struct snd_seq_queue *q;
290
291
if (snd_BUG_ON(!cell))
292
return -EINVAL;
293
dest = cell->event.queue; /* destination queue */
294
q = queueptr(dest);
295
if (q == NULL)
296
return -EINVAL;
297
/* handle relative time stamps, convert them into absolute */
298
if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) {
299
switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
300
case SNDRV_SEQ_TIME_STAMP_TICK:
301
cell->event.time.tick += q->timer->tick.cur_tick;
302
break;
303
304
case SNDRV_SEQ_TIME_STAMP_REAL:
305
snd_seq_inc_real_time(&cell->event.time.time,
306
&q->timer->cur_time);
307
break;
308
}
309
cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK;
310
cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS;
311
}
312
/* enqueue event in the real-time or midi queue */
313
switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
314
case SNDRV_SEQ_TIME_STAMP_TICK:
315
err = snd_seq_prioq_cell_in(q->tickq, cell);
316
break;
317
318
case SNDRV_SEQ_TIME_STAMP_REAL:
319
default:
320
err = snd_seq_prioq_cell_in(q->timeq, cell);
321
break;
322
}
323
324
if (err < 0) {
325
queuefree(q); /* unlock */
326
return err;
327
}
328
329
/* trigger dispatching */
330
snd_seq_check_queue(q, atomic, hop);
331
332
queuefree(q); /* unlock */
333
334
return 0;
335
}
336
337
338
/*----------------------------------------------------------------*/
339
340
static inline int check_access(struct snd_seq_queue *q, int client)
341
{
342
return (q->owner == client) || (!q->locked && !q->klocked);
343
}
344
345
/* check if the client has permission to modify queue parameters.
346
* if it does, lock the queue
347
*/
348
static int queue_access_lock(struct snd_seq_queue *q, int client)
349
{
350
int access_ok;
351
352
guard(spinlock_irqsave)(&q->owner_lock);
353
access_ok = check_access(q, client);
354
if (access_ok)
355
q->klocked = 1;
356
return access_ok;
357
}
358
359
/* unlock the queue */
360
static inline void queue_access_unlock(struct snd_seq_queue *q)
361
{
362
guard(spinlock_irqsave)(&q->owner_lock);
363
q->klocked = 0;
364
}
365
366
/* exported - only checking permission */
367
int snd_seq_queue_check_access(int queueid, int client)
368
{
369
struct snd_seq_queue *q = queueptr(queueid);
370
int access_ok;
371
372
if (! q)
373
return 0;
374
scoped_guard(spinlock_irqsave, &q->owner_lock)
375
access_ok = check_access(q, client);
376
queuefree(q);
377
return access_ok;
378
}
379
380
/*----------------------------------------------------------------*/
381
382
/*
383
* change queue's owner and permission
384
*/
385
int snd_seq_queue_set_owner(int queueid, int client, int locked)
386
{
387
struct snd_seq_queue *q = queueptr(queueid);
388
389
if (q == NULL)
390
return -EINVAL;
391
392
if (! queue_access_lock(q, client)) {
393
queuefree(q);
394
return -EPERM;
395
}
396
397
scoped_guard(spinlock_irqsave, &q->owner_lock) {
398
q->locked = locked ? 1 : 0;
399
q->owner = client;
400
}
401
queue_access_unlock(q);
402
queuefree(q);
403
404
return 0;
405
}
406
407
408
/*----------------------------------------------------------------*/
409
410
/* open timer -
411
* q->use mutex should be down before calling this function to avoid
412
* confliction with snd_seq_queue_use()
413
*/
414
int snd_seq_queue_timer_open(int queueid)
415
{
416
int result = 0;
417
struct snd_seq_queue *queue;
418
struct snd_seq_timer *tmr;
419
420
queue = queueptr(queueid);
421
if (queue == NULL)
422
return -EINVAL;
423
tmr = queue->timer;
424
result = snd_seq_timer_open(queue);
425
if (result < 0) {
426
snd_seq_timer_defaults(tmr);
427
result = snd_seq_timer_open(queue);
428
}
429
queuefree(queue);
430
return result;
431
}
432
433
/* close timer -
434
* q->use mutex should be down before calling this function
435
*/
436
int snd_seq_queue_timer_close(int queueid)
437
{
438
struct snd_seq_queue *queue;
439
int result = 0;
440
441
queue = queueptr(queueid);
442
if (queue == NULL)
443
return -EINVAL;
444
snd_seq_timer_close(queue);
445
queuefree(queue);
446
return result;
447
}
448
449
/* change queue tempo and ppq */
450
int snd_seq_queue_timer_set_tempo(int queueid, int client,
451
struct snd_seq_queue_tempo *info)
452
{
453
struct snd_seq_queue *q = queueptr(queueid);
454
int result;
455
456
if (q == NULL)
457
return -EINVAL;
458
if (! queue_access_lock(q, client)) {
459
queuefree(q);
460
return -EPERM;
461
}
462
463
result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq,
464
info->tempo_base);
465
if (result >= 0 && info->skew_base > 0)
466
result = snd_seq_timer_set_skew(q->timer, info->skew_value,
467
info->skew_base);
468
queue_access_unlock(q);
469
queuefree(q);
470
return result;
471
}
472
473
/* use or unuse this queue */
474
static void queue_use(struct snd_seq_queue *queue, int client, int use)
475
{
476
if (use) {
477
if (!test_and_set_bit(client, queue->clients_bitmap))
478
queue->clients++;
479
} else {
480
if (test_and_clear_bit(client, queue->clients_bitmap))
481
queue->clients--;
482
}
483
if (queue->clients) {
484
if (use && queue->clients == 1)
485
snd_seq_timer_defaults(queue->timer);
486
snd_seq_timer_open(queue);
487
} else {
488
snd_seq_timer_close(queue);
489
}
490
}
491
492
/* use or unuse this queue -
493
* if it is the first client, starts the timer.
494
* if it is not longer used by any clients, stop the timer.
495
*/
496
int snd_seq_queue_use(int queueid, int client, int use)
497
{
498
struct snd_seq_queue *queue;
499
500
queue = queueptr(queueid);
501
if (queue == NULL)
502
return -EINVAL;
503
mutex_lock(&queue->timer_mutex);
504
queue_use(queue, client, use);
505
mutex_unlock(&queue->timer_mutex);
506
queuefree(queue);
507
return 0;
508
}
509
510
/*
511
* check if queue is used by the client
512
* return negative value if the queue is invalid.
513
* return 0 if not used, 1 if used.
514
*/
515
int snd_seq_queue_is_used(int queueid, int client)
516
{
517
struct snd_seq_queue *q;
518
int result;
519
520
q = queueptr(queueid);
521
if (q == NULL)
522
return -EINVAL; /* invalid queue */
523
result = test_bit(client, q->clients_bitmap) ? 1 : 0;
524
queuefree(q);
525
return result;
526
}
527
528
529
/*----------------------------------------------------------------*/
530
531
/* final stage notification -
532
* remove cells for no longer exist client (for non-owned queue)
533
* or delete this queue (for owned queue)
534
*/
535
void snd_seq_queue_client_leave(int client)
536
{
537
int i;
538
struct snd_seq_queue *q;
539
540
/* delete own queues from queue list */
541
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
542
q = queue_list_remove(i, client);
543
if (q)
544
queue_delete(q);
545
}
546
547
/* remove cells from existing queues -
548
* they are not owned by this client
549
*/
550
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
551
q = queueptr(i);
552
if (!q)
553
continue;
554
if (test_bit(client, q->clients_bitmap)) {
555
snd_seq_prioq_leave(q->tickq, client, 0);
556
snd_seq_prioq_leave(q->timeq, client, 0);
557
snd_seq_queue_use(q->queue, client, 0);
558
}
559
queuefree(q);
560
}
561
}
562
563
564
565
/*----------------------------------------------------------------*/
566
567
/* remove cells based on flush criteria */
568
void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info)
569
{
570
int i;
571
struct snd_seq_queue *q;
572
573
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
574
q = queueptr(i);
575
if (!q)
576
continue;
577
if (test_bit(client, q->clients_bitmap) &&
578
(! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) ||
579
q->queue == info->queue)) {
580
snd_seq_prioq_remove_events(q->tickq, client, info);
581
snd_seq_prioq_remove_events(q->timeq, client, info);
582
}
583
queuefree(q);
584
}
585
}
586
587
/*----------------------------------------------------------------*/
588
589
/*
590
* send events to all subscribed ports
591
*/
592
static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
593
int atomic, int hop)
594
{
595
struct snd_seq_event sev;
596
597
sev = *ev;
598
599
sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS;
600
sev.time.tick = q->timer->tick.cur_tick;
601
sev.queue = q->queue;
602
sev.data.queue.queue = q->queue;
603
604
/* broadcast events from Timer port */
605
sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
606
sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
607
sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
608
snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop);
609
}
610
611
/*
612
* process a received queue-control event.
613
* this function is exported for seq_sync.c.
614
*/
615
static void snd_seq_queue_process_event(struct snd_seq_queue *q,
616
struct snd_seq_event *ev,
617
int atomic, int hop)
618
{
619
switch (ev->type) {
620
case SNDRV_SEQ_EVENT_START:
621
snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
622
snd_seq_prioq_leave(q->timeq, ev->source.client, 1);
623
if (! snd_seq_timer_start(q->timer))
624
queue_broadcast_event(q, ev, atomic, hop);
625
break;
626
627
case SNDRV_SEQ_EVENT_CONTINUE:
628
if (! snd_seq_timer_continue(q->timer))
629
queue_broadcast_event(q, ev, atomic, hop);
630
break;
631
632
case SNDRV_SEQ_EVENT_STOP:
633
snd_seq_timer_stop(q->timer);
634
queue_broadcast_event(q, ev, atomic, hop);
635
break;
636
637
case SNDRV_SEQ_EVENT_TEMPO:
638
snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
639
queue_broadcast_event(q, ev, atomic, hop);
640
break;
641
642
case SNDRV_SEQ_EVENT_SETPOS_TICK:
643
if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
644
queue_broadcast_event(q, ev, atomic, hop);
645
}
646
break;
647
648
case SNDRV_SEQ_EVENT_SETPOS_TIME:
649
if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
650
queue_broadcast_event(q, ev, atomic, hop);
651
}
652
break;
653
case SNDRV_SEQ_EVENT_QUEUE_SKEW:
654
if (snd_seq_timer_set_skew(q->timer,
655
ev->data.queue.param.skew.value,
656
ev->data.queue.param.skew.base) == 0) {
657
queue_broadcast_event(q, ev, atomic, hop);
658
}
659
break;
660
}
661
}
662
663
664
/*
665
* Queue control via timer control port:
666
* this function is exported as a callback of timer port.
667
*/
668
int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop)
669
{
670
struct snd_seq_queue *q;
671
672
if (snd_BUG_ON(!ev))
673
return -EINVAL;
674
q = queueptr(ev->data.queue.queue);
675
676
if (q == NULL)
677
return -EINVAL;
678
679
if (! queue_access_lock(q, ev->source.client)) {
680
queuefree(q);
681
return -EPERM;
682
}
683
684
snd_seq_queue_process_event(q, ev, atomic, hop);
685
686
queue_access_unlock(q);
687
queuefree(q);
688
return 0;
689
}
690
691
692
/*----------------------------------------------------------------*/
693
694
#ifdef CONFIG_SND_PROC_FS
695
/* exported to seq_info.c */
696
void snd_seq_info_queues_read(struct snd_info_entry *entry,
697
struct snd_info_buffer *buffer)
698
{
699
int i, bpm;
700
struct snd_seq_queue *q;
701
struct snd_seq_timer *tmr;
702
bool locked;
703
int owner;
704
705
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
706
q = queueptr(i);
707
if (!q)
708
continue;
709
710
tmr = q->timer;
711
if (tmr->tempo)
712
bpm = (60000 * tmr->tempo_base) / tmr->tempo;
713
else
714
bpm = 0;
715
716
scoped_guard(spinlock_irq, &q->owner_lock) {
717
locked = q->locked;
718
owner = q->owner;
719
}
720
721
snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
722
snd_iprintf(buffer, "owned by client : %d\n", owner);
723
snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free");
724
snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
725
snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
726
snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");
727
snd_iprintf(buffer, "timer PPQ : %d\n", tmr->ppq);
728
snd_iprintf(buffer, "current tempo : %d\n", tmr->tempo);
729
snd_iprintf(buffer, "tempo base : %d ns\n", tmr->tempo_base);
730
snd_iprintf(buffer, "current BPM : %d\n", bpm);
731
snd_iprintf(buffer, "current time : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec);
732
snd_iprintf(buffer, "current tick : %d\n", tmr->tick.cur_tick);
733
snd_iprintf(buffer, "\n");
734
queuefree(q);
735
}
736
}
737
#endif /* CONFIG_SND_PROC_FS */
738
739
740