Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/acpica/Osd/OsdSynch.c
39537 views
1
/*-
2
* Copyright (c) 2000 Michael Smith
3
* Copyright (c) 2000 BSDi
4
* Copyright (c) 2007-2009 Jung-uk Kim <[email protected]>
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*/
28
29
/*
30
* 6.1 : Mutual Exclusion and Synchronisation
31
*/
32
33
#include <sys/cdefs.h>
34
#include <contrib/dev/acpica/include/acpi.h>
35
#include <contrib/dev/acpica/include/accommon.h>
36
37
#include <sys/condvar.h>
38
#include <sys/kernel.h>
39
#include <sys/lock.h>
40
#include <sys/malloc.h>
41
#include <sys/mutex.h>
42
43
#define _COMPONENT ACPI_OS_SERVICES
44
ACPI_MODULE_NAME("SYNCH")
45
46
static MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
47
48
/*
49
* Convert milliseconds to ticks.
50
*/
51
static int
52
timeout2hz(UINT16 Timeout)
53
{
54
struct timeval tv;
55
56
tv.tv_sec = (time_t)(Timeout / 1000);
57
tv.tv_usec = (suseconds_t)(Timeout % 1000) * 1000;
58
59
return (tvtohz(&tv));
60
}
61
62
/*
63
* ACPI_SEMAPHORE
64
*/
65
struct acpi_sema {
66
struct mtx as_lock;
67
char as_name[32];
68
struct cv as_cv;
69
UINT32 as_maxunits;
70
UINT32 as_units;
71
int as_waiters;
72
int as_reset;
73
};
74
75
ACPI_STATUS
76
AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
77
ACPI_SEMAPHORE *OutHandle)
78
{
79
struct acpi_sema *as;
80
81
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
82
83
if (OutHandle == NULL || MaxUnits == 0 || InitialUnits > MaxUnits)
84
return_ACPI_STATUS (AE_BAD_PARAMETER);
85
86
if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
87
return_ACPI_STATUS (AE_NO_MEMORY);
88
89
snprintf(as->as_name, sizeof(as->as_name), "ACPI sema (%p)", as);
90
mtx_init(&as->as_lock, as->as_name, NULL, MTX_DEF);
91
cv_init(&as->as_cv, as->as_name);
92
as->as_maxunits = MaxUnits;
93
as->as_units = InitialUnits;
94
95
*OutHandle = (ACPI_SEMAPHORE)as;
96
97
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s, max %u, initial %u\n",
98
as->as_name, MaxUnits, InitialUnits));
99
100
return_ACPI_STATUS (AE_OK);
101
}
102
103
ACPI_STATUS
104
AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle)
105
{
106
struct acpi_sema *as = (struct acpi_sema *)Handle;
107
108
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
109
110
if (as == NULL)
111
return_ACPI_STATUS (AE_BAD_PARAMETER);
112
113
mtx_lock(&as->as_lock);
114
115
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", as->as_name));
116
117
if (as->as_waiters > 0) {
118
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
119
"reset %s, units %u, waiters %d\n",
120
as->as_name, as->as_units, as->as_waiters));
121
as->as_reset = 1;
122
cv_broadcast(&as->as_cv);
123
while (as->as_waiters > 0) {
124
if (mtx_sleep(&as->as_reset, &as->as_lock,
125
PCATCH, "acsrst", hz) == EINTR) {
126
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
127
"failed to reset %s, waiters %d\n",
128
as->as_name, as->as_waiters));
129
mtx_unlock(&as->as_lock);
130
return_ACPI_STATUS (AE_ERROR);
131
}
132
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
133
"wait %s, units %u, waiters %d\n",
134
as->as_name, as->as_units, as->as_waiters));
135
}
136
}
137
138
mtx_unlock(&as->as_lock);
139
140
mtx_destroy(&as->as_lock);
141
cv_destroy(&as->as_cv);
142
free(as, M_ACPISEM);
143
144
return_ACPI_STATUS (AE_OK);
145
}
146
147
#define ACPISEM_AVAIL(s, u) ((s)->as_units >= (u))
148
149
ACPI_STATUS
150
AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout)
151
{
152
struct acpi_sema *as = (struct acpi_sema *)Handle;
153
int error, prevtick, slptick, tmo;
154
ACPI_STATUS status = AE_OK;
155
156
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
157
158
if (as == NULL || Units == 0)
159
return_ACPI_STATUS (AE_BAD_PARAMETER);
160
161
mtx_lock(&as->as_lock);
162
163
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
164
"get %u unit(s) from %s, units %u, waiters %d, timeout %u\n",
165
Units, as->as_name, as->as_units, as->as_waiters, Timeout));
166
167
if (as->as_maxunits != ACPI_NO_UNIT_LIMIT && as->as_maxunits < Units) {
168
mtx_unlock(&as->as_lock);
169
return_ACPI_STATUS (AE_LIMIT);
170
}
171
172
switch (Timeout) {
173
case ACPI_DO_NOT_WAIT:
174
if (!ACPISEM_AVAIL(as, Units))
175
status = AE_TIME;
176
break;
177
case ACPI_WAIT_FOREVER:
178
while (!ACPISEM_AVAIL(as, Units)) {
179
as->as_waiters++;
180
error = cv_wait_sig(&as->as_cv, &as->as_lock);
181
as->as_waiters--;
182
if (error == EINTR || as->as_reset) {
183
status = AE_ERROR;
184
break;
185
}
186
}
187
break;
188
default:
189
if (cold) {
190
/*
191
* Just spin polling the semaphore once a
192
* millisecond.
193
*/
194
while (!ACPISEM_AVAIL(as, Units)) {
195
if (Timeout == 0) {
196
status = AE_TIME;
197
break;
198
}
199
Timeout--;
200
mtx_unlock(&as->as_lock);
201
DELAY(1000);
202
mtx_lock(&as->as_lock);
203
}
204
break;
205
}
206
tmo = timeout2hz(Timeout);
207
while (!ACPISEM_AVAIL(as, Units)) {
208
prevtick = ticks;
209
as->as_waiters++;
210
error = cv_timedwait_sig(&as->as_cv, &as->as_lock, tmo);
211
as->as_waiters--;
212
if (error == EINTR || as->as_reset) {
213
status = AE_ERROR;
214
break;
215
}
216
if (ACPISEM_AVAIL(as, Units))
217
break;
218
slptick = ticks - prevtick;
219
if (slptick >= tmo || slptick < 0) {
220
status = AE_TIME;
221
break;
222
}
223
tmo -= slptick;
224
}
225
}
226
if (ACPI_SUCCESS(status))
227
as->as_units -= Units;
228
229
mtx_unlock(&as->as_lock);
230
231
return_ACPI_STATUS (status);
232
}
233
234
ACPI_STATUS
235
AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units)
236
{
237
struct acpi_sema *as = (struct acpi_sema *)Handle;
238
UINT32 i;
239
240
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
241
242
if (as == NULL || Units == 0)
243
return_ACPI_STATUS (AE_BAD_PARAMETER);
244
245
mtx_lock(&as->as_lock);
246
247
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
248
"return %u units to %s, units %u, waiters %d\n",
249
Units, as->as_name, as->as_units, as->as_waiters));
250
251
if (as->as_maxunits != ACPI_NO_UNIT_LIMIT &&
252
(as->as_maxunits < Units ||
253
as->as_maxunits - Units < as->as_units)) {
254
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
255
"exceeded max units %u\n", as->as_maxunits));
256
mtx_unlock(&as->as_lock);
257
return_ACPI_STATUS (AE_LIMIT);
258
}
259
260
as->as_units += Units;
261
if (as->as_waiters > 0 && ACPISEM_AVAIL(as, Units))
262
for (i = 0; i < Units; i++)
263
cv_signal(&as->as_cv);
264
265
mtx_unlock(&as->as_lock);
266
267
return_ACPI_STATUS (AE_OK);
268
}
269
270
#undef ACPISEM_AVAIL
271
272
/*
273
* ACPI_MUTEX
274
*/
275
struct acpi_mutex {
276
struct mtx am_lock;
277
char am_name[32];
278
struct thread *am_owner;
279
int am_nested;
280
int am_waiters;
281
int am_reset;
282
};
283
284
ACPI_STATUS
285
AcpiOsCreateMutex(ACPI_MUTEX *OutHandle)
286
{
287
struct acpi_mutex *am;
288
289
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
290
291
if (OutHandle == NULL)
292
return_ACPI_STATUS (AE_BAD_PARAMETER);
293
294
if ((am = malloc(sizeof(*am), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
295
return_ACPI_STATUS (AE_NO_MEMORY);
296
297
snprintf(am->am_name, sizeof(am->am_name), "ACPI mutex (%p)", am);
298
mtx_init(&am->am_lock, am->am_name, NULL, MTX_DEF);
299
300
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", am->am_name));
301
302
*OutHandle = (ACPI_MUTEX)am;
303
304
return_ACPI_STATUS (AE_OK);
305
}
306
307
#define ACPIMTX_AVAIL(m) ((m)->am_owner == NULL)
308
#define ACPIMTX_OWNED(m) ((m)->am_owner == curthread)
309
310
void
311
AcpiOsDeleteMutex(ACPI_MUTEX Handle)
312
{
313
struct acpi_mutex *am = (struct acpi_mutex *)Handle;
314
315
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
316
317
if (am == NULL) {
318
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "cannot delete null mutex\n"));
319
return_VOID;
320
}
321
322
mtx_lock(&am->am_lock);
323
324
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", am->am_name));
325
326
if (am->am_waiters > 0) {
327
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
328
"reset %s, owner %p\n", am->am_name, am->am_owner));
329
am->am_reset = 1;
330
wakeup(am);
331
while (am->am_waiters > 0) {
332
if (mtx_sleep(&am->am_reset, &am->am_lock,
333
PCATCH, "acmrst", hz) == EINTR) {
334
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
335
"failed to reset %s, waiters %d\n",
336
am->am_name, am->am_waiters));
337
mtx_unlock(&am->am_lock);
338
return_VOID;
339
}
340
if (ACPIMTX_AVAIL(am))
341
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
342
"wait %s, waiters %d\n",
343
am->am_name, am->am_waiters));
344
else
345
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
346
"wait %s, owner %p, waiters %d\n",
347
am->am_name, am->am_owner, am->am_waiters));
348
}
349
}
350
351
mtx_unlock(&am->am_lock);
352
353
mtx_destroy(&am->am_lock);
354
free(am, M_ACPISEM);
355
}
356
357
ACPI_STATUS
358
AcpiOsAcquireMutex(ACPI_MUTEX Handle, UINT16 Timeout)
359
{
360
struct acpi_mutex *am = (struct acpi_mutex *)Handle;
361
int error, prevtick, slptick, tmo;
362
ACPI_STATUS status = AE_OK;
363
364
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
365
366
if (am == NULL)
367
return_ACPI_STATUS (AE_BAD_PARAMETER);
368
369
mtx_lock(&am->am_lock);
370
371
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", am->am_name));
372
373
if (ACPIMTX_OWNED(am)) {
374
am->am_nested++;
375
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
376
"acquire nested %s, depth %d\n",
377
am->am_name, am->am_nested));
378
mtx_unlock(&am->am_lock);
379
return_ACPI_STATUS (AE_OK);
380
}
381
382
switch (Timeout) {
383
case ACPI_DO_NOT_WAIT:
384
if (!ACPIMTX_AVAIL(am))
385
status = AE_TIME;
386
break;
387
case ACPI_WAIT_FOREVER:
388
while (!ACPIMTX_AVAIL(am)) {
389
am->am_waiters++;
390
error = mtx_sleep(am, &am->am_lock, PCATCH, "acmtx", 0);
391
am->am_waiters--;
392
if (error == EINTR || am->am_reset) {
393
status = AE_ERROR;
394
break;
395
}
396
}
397
break;
398
default:
399
if (cold) {
400
/*
401
* Just spin polling the mutex once a
402
* millisecond.
403
*/
404
while (!ACPIMTX_AVAIL(am)) {
405
if (Timeout == 0) {
406
status = AE_TIME;
407
break;
408
}
409
Timeout--;
410
mtx_unlock(&am->am_lock);
411
DELAY(1000);
412
mtx_lock(&am->am_lock);
413
}
414
break;
415
}
416
tmo = timeout2hz(Timeout);
417
while (!ACPIMTX_AVAIL(am)) {
418
prevtick = ticks;
419
am->am_waiters++;
420
error = mtx_sleep(am, &am->am_lock, PCATCH,
421
"acmtx", tmo);
422
am->am_waiters--;
423
if (error == EINTR || am->am_reset) {
424
status = AE_ERROR;
425
break;
426
}
427
if (ACPIMTX_AVAIL(am))
428
break;
429
slptick = ticks - prevtick;
430
if (slptick >= tmo || slptick < 0) {
431
status = AE_TIME;
432
break;
433
}
434
tmo -= slptick;
435
}
436
}
437
if (ACPI_SUCCESS(status))
438
am->am_owner = curthread;
439
440
mtx_unlock(&am->am_lock);
441
442
return_ACPI_STATUS (status);
443
}
444
445
void
446
AcpiOsReleaseMutex(ACPI_MUTEX Handle)
447
{
448
struct acpi_mutex *am = (struct acpi_mutex *)Handle;
449
450
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
451
452
if (am == NULL) {
453
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
454
"cannot release null mutex\n"));
455
return_VOID;
456
}
457
458
mtx_lock(&am->am_lock);
459
460
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", am->am_name));
461
462
if (ACPIMTX_OWNED(am)) {
463
if (am->am_nested > 0) {
464
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
465
"release nested %s, depth %d\n",
466
am->am_name, am->am_nested));
467
am->am_nested--;
468
} else
469
am->am_owner = NULL;
470
} else {
471
if (ACPIMTX_AVAIL(am))
472
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
473
"release already available %s\n", am->am_name));
474
else
475
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
476
"release unowned %s from %p, depth %d\n",
477
am->am_name, am->am_owner, am->am_nested));
478
}
479
if (am->am_waiters > 0 && ACPIMTX_AVAIL(am))
480
wakeup_one(am);
481
482
mtx_unlock(&am->am_lock);
483
}
484
485
#undef ACPIMTX_AVAIL
486
#undef ACPIMTX_OWNED
487
488
/*
489
* ACPI_SPINLOCK
490
*/
491
struct acpi_spinlock {
492
struct mtx al_lock;
493
char al_name[32];
494
int al_nested;
495
};
496
497
ACPI_STATUS
498
AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
499
{
500
struct acpi_spinlock *al;
501
502
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
503
504
if (OutHandle == NULL)
505
return_ACPI_STATUS (AE_BAD_PARAMETER);
506
507
if ((al = malloc(sizeof(*al), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
508
return_ACPI_STATUS (AE_NO_MEMORY);
509
510
#ifdef ACPI_DEBUG
511
if (OutHandle == &AcpiGbl_GpeLock)
512
snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (GPE)");
513
else if (OutHandle == &AcpiGbl_HardwareLock)
514
snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (HW)");
515
else
516
#endif
517
snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (%p)", al);
518
mtx_init(&al->al_lock, al->al_name, NULL, MTX_SPIN);
519
520
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", al->al_name));
521
522
*OutHandle = (ACPI_SPINLOCK)al;
523
524
return_ACPI_STATUS (AE_OK);
525
}
526
527
void
528
AcpiOsDeleteLock(ACPI_SPINLOCK Handle)
529
{
530
struct acpi_spinlock *al = (struct acpi_spinlock *)Handle;
531
532
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
533
534
if (al == NULL) {
535
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
536
"cannot delete null spinlock\n"));
537
return_VOID;
538
}
539
540
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", al->al_name));
541
542
mtx_destroy(&al->al_lock);
543
free(al, M_ACPISEM);
544
}
545
546
ACPI_CPU_FLAGS
547
AcpiOsAcquireLock(ACPI_SPINLOCK Handle)
548
{
549
struct acpi_spinlock *al = (struct acpi_spinlock *)Handle;
550
551
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
552
553
if (al == NULL) {
554
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
555
"cannot acquire null spinlock\n"));
556
return (0);
557
}
558
559
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", al->al_name));
560
561
if (mtx_owned(&al->al_lock)) {
562
al->al_nested++;
563
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
564
"acquire nested %s, depth %d\n",
565
al->al_name, al->al_nested));
566
} else
567
mtx_lock_spin(&al->al_lock);
568
569
return (0);
570
}
571
572
void
573
AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags)
574
{
575
struct acpi_spinlock *al = (struct acpi_spinlock *)Handle;
576
577
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
578
579
if (al == NULL) {
580
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
581
"cannot release null spinlock\n"));
582
return_VOID;
583
}
584
585
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", al->al_name));
586
587
if (mtx_owned(&al->al_lock)) {
588
if (al->al_nested > 0) {
589
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
590
"release nested %s, depth %d\n",
591
al->al_name, al->al_nested));
592
al->al_nested--;
593
} else
594
mtx_unlock_spin(&al->al_lock);
595
} else
596
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
597
"cannot release unowned %s\n", al->al_name));
598
}
599
600
/* Section 5.2.10.1: global lock acquire/release functions */
601
602
/*
603
* Acquire the global lock. If busy, set the pending bit. The caller
604
* will wait for notification from the BIOS that the lock is available
605
* and then attempt to acquire it again.
606
*/
607
int
608
acpi_acquire_global_lock(volatile uint32_t *lock)
609
{
610
uint32_t new, old;
611
612
do {
613
old = *lock;
614
new = (old & ~ACPI_GLOCK_PENDING) | ACPI_GLOCK_OWNED;
615
if ((old & ACPI_GLOCK_OWNED) != 0)
616
new |= ACPI_GLOCK_PENDING;
617
} while (atomic_cmpset_32(lock, old, new) == 0);
618
619
return ((new & ACPI_GLOCK_PENDING) == 0);
620
}
621
622
/*
623
* Release the global lock, returning whether there is a waiter pending.
624
* If the BIOS set the pending bit, OSPM must notify the BIOS when it
625
* releases the lock.
626
*/
627
int
628
acpi_release_global_lock(volatile uint32_t *lock)
629
{
630
uint32_t new, old;
631
632
do {
633
old = *lock;
634
new = old & ~(ACPI_GLOCK_PENDING | ACPI_GLOCK_OWNED);
635
} while (atomic_cmpset_32(lock, old, new) == 0);
636
637
return ((old & ACPI_GLOCK_PENDING) != 0);
638
}
639
640