Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/acpi/acpica/dsmethod.c
15109 views
1
/******************************************************************************
2
*
3
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
4
*
5
*****************************************************************************/
6
7
/*
8
* Copyright (C) 2000 - 2011, Intel Corp.
9
* All rights reserved.
10
*
11
* Redistribution and use in source and binary forms, with or without
12
* modification, are permitted provided that the following conditions
13
* are met:
14
* 1. Redistributions of source code must retain the above copyright
15
* notice, this list of conditions, and the following disclaimer,
16
* without modification.
17
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
18
* substantially similar to the "NO WARRANTY" disclaimer below
19
* ("Disclaimer") and any redistribution must be conditioned upon
20
* including a substantially similar Disclaimer requirement for further
21
* binary redistribution.
22
* 3. Neither the names of the above-listed copyright holders nor the names
23
* of any contributors may be used to endorse or promote products derived
24
* from this software without specific prior written permission.
25
*
26
* Alternatively, this software may be distributed under the terms of the
27
* GNU General Public License ("GPL") version 2 as published by the Free
28
* Software Foundation.
29
*
30
* NO WARRANTY
31
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41
* POSSIBILITY OF SUCH DAMAGES.
42
*/
43
44
#include <acpi/acpi.h>
45
#include "accommon.h"
46
#include "acdispat.h"
47
#include "acinterp.h"
48
#include "acnamesp.h"
49
#ifdef ACPI_DISASSEMBLER
50
#include <acpi/acdisasm.h>
51
#endif
52
53
#define _COMPONENT ACPI_DISPATCHER
54
ACPI_MODULE_NAME("dsmethod")
55
56
/* Local prototypes */
57
static acpi_status
58
acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
59
60
/*******************************************************************************
61
*
62
* FUNCTION: acpi_ds_method_error
63
*
64
* PARAMETERS: Status - Execution status
65
* walk_state - Current state
66
*
67
* RETURN: Status
68
*
69
* DESCRIPTION: Called on method error. Invoke the global exception handler if
70
* present, dump the method data if the disassembler is configured
71
*
72
* Note: Allows the exception handler to change the status code
73
*
74
******************************************************************************/
75
76
acpi_status
77
acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
78
{
79
ACPI_FUNCTION_ENTRY();
80
81
/* Ignore AE_OK and control exception codes */
82
83
if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
84
return (status);
85
}
86
87
/* Invoke the global exception handler */
88
89
if (acpi_gbl_exception_handler) {
90
91
/* Exit the interpreter, allow handler to execute methods */
92
93
acpi_ex_exit_interpreter();
94
95
/*
96
* Handler can map the exception code to anything it wants, including
97
* AE_OK, in which case the executing method will not be aborted.
98
*/
99
status = acpi_gbl_exception_handler(status,
100
walk_state->method_node ?
101
walk_state->method_node->
102
name.integer : 0,
103
walk_state->opcode,
104
walk_state->aml_offset,
105
NULL);
106
acpi_ex_enter_interpreter();
107
}
108
109
acpi_ds_clear_implicit_return(walk_state);
110
111
#ifdef ACPI_DISASSEMBLER
112
if (ACPI_FAILURE(status)) {
113
114
/* Display method locals/args if disassembler is present */
115
116
acpi_dm_dump_method_info(status, walk_state, walk_state->op);
117
}
118
#endif
119
120
return (status);
121
}
122
123
/*******************************************************************************
124
*
125
* FUNCTION: acpi_ds_create_method_mutex
126
*
127
* PARAMETERS: obj_desc - The method object
128
*
129
* RETURN: Status
130
*
131
* DESCRIPTION: Create a mutex object for a serialized control method
132
*
133
******************************************************************************/
134
135
static acpi_status
136
acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
137
{
138
union acpi_operand_object *mutex_desc;
139
acpi_status status;
140
141
ACPI_FUNCTION_TRACE(ds_create_method_mutex);
142
143
/* Create the new mutex object */
144
145
mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
146
if (!mutex_desc) {
147
return_ACPI_STATUS(AE_NO_MEMORY);
148
}
149
150
/* Create the actual OS Mutex */
151
152
status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
153
if (ACPI_FAILURE(status)) {
154
return_ACPI_STATUS(status);
155
}
156
157
mutex_desc->mutex.sync_level = method_desc->method.sync_level;
158
method_desc->method.mutex = mutex_desc;
159
return_ACPI_STATUS(AE_OK);
160
}
161
162
/*******************************************************************************
163
*
164
* FUNCTION: acpi_ds_begin_method_execution
165
*
166
* PARAMETERS: method_node - Node of the method
167
* obj_desc - The method object
168
* walk_state - current state, NULL if not yet executing
169
* a method.
170
*
171
* RETURN: Status
172
*
173
* DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
174
* increments the thread count, and waits at the method semaphore
175
* for clearance to execute.
176
*
177
******************************************************************************/
178
179
acpi_status
180
acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
181
union acpi_operand_object *obj_desc,
182
struct acpi_walk_state *walk_state)
183
{
184
acpi_status status = AE_OK;
185
186
ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
187
188
if (!method_node) {
189
return_ACPI_STATUS(AE_NULL_ENTRY);
190
}
191
192
/* Prevent wraparound of thread count */
193
194
if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
195
ACPI_ERROR((AE_INFO,
196
"Method reached maximum reentrancy limit (255)"));
197
return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
198
}
199
200
/*
201
* If this method is serialized, we need to acquire the method mutex.
202
*/
203
if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
204
/*
205
* Create a mutex for the method if it is defined to be Serialized
206
* and a mutex has not already been created. We defer the mutex creation
207
* until a method is actually executed, to minimize the object count
208
*/
209
if (!obj_desc->method.mutex) {
210
status = acpi_ds_create_method_mutex(obj_desc);
211
if (ACPI_FAILURE(status)) {
212
return_ACPI_STATUS(status);
213
}
214
}
215
216
/*
217
* The current_sync_level (per-thread) must be less than or equal to
218
* the sync level of the method. This mechanism provides some
219
* deadlock prevention
220
*
221
* Top-level method invocation has no walk state at this point
222
*/
223
if (walk_state &&
224
(walk_state->thread->current_sync_level >
225
obj_desc->method.mutex->mutex.sync_level)) {
226
ACPI_ERROR((AE_INFO,
227
"Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
228
acpi_ut_get_node_name(method_node),
229
walk_state->thread->current_sync_level));
230
231
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
232
}
233
234
/*
235
* Obtain the method mutex if necessary. Do not acquire mutex for a
236
* recursive call.
237
*/
238
if (!walk_state ||
239
!obj_desc->method.mutex->mutex.thread_id ||
240
(walk_state->thread->thread_id !=
241
obj_desc->method.mutex->mutex.thread_id)) {
242
/*
243
* Acquire the method mutex. This releases the interpreter if we
244
* block (and reacquires it before it returns)
245
*/
246
status =
247
acpi_ex_system_wait_mutex(obj_desc->method.mutex->
248
mutex.os_mutex,
249
ACPI_WAIT_FOREVER);
250
if (ACPI_FAILURE(status)) {
251
return_ACPI_STATUS(status);
252
}
253
254
/* Update the mutex and walk info and save the original sync_level */
255
256
if (walk_state) {
257
obj_desc->method.mutex->mutex.
258
original_sync_level =
259
walk_state->thread->current_sync_level;
260
261
obj_desc->method.mutex->mutex.thread_id =
262
walk_state->thread->thread_id;
263
walk_state->thread->current_sync_level =
264
obj_desc->method.sync_level;
265
} else {
266
obj_desc->method.mutex->mutex.
267
original_sync_level =
268
obj_desc->method.mutex->mutex.sync_level;
269
}
270
}
271
272
/* Always increase acquisition depth */
273
274
obj_desc->method.mutex->mutex.acquisition_depth++;
275
}
276
277
/*
278
* Allocate an Owner ID for this method, only if this is the first thread
279
* to begin concurrent execution. We only need one owner_id, even if the
280
* method is invoked recursively.
281
*/
282
if (!obj_desc->method.owner_id) {
283
status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
284
if (ACPI_FAILURE(status)) {
285
goto cleanup;
286
}
287
}
288
289
/*
290
* Increment the method parse tree thread count since it has been
291
* reentered one more time (even if it is the same thread)
292
*/
293
obj_desc->method.thread_count++;
294
return_ACPI_STATUS(status);
295
296
cleanup:
297
/* On error, must release the method mutex (if present) */
298
299
if (obj_desc->method.mutex) {
300
acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
301
}
302
return_ACPI_STATUS(status);
303
}
304
305
/*******************************************************************************
306
*
307
* FUNCTION: acpi_ds_call_control_method
308
*
309
* PARAMETERS: Thread - Info for this thread
310
* this_walk_state - Current walk state
311
* Op - Current Op to be walked
312
*
313
* RETURN: Status
314
*
315
* DESCRIPTION: Transfer execution to a called control method
316
*
317
******************************************************************************/
318
319
acpi_status
320
acpi_ds_call_control_method(struct acpi_thread_state *thread,
321
struct acpi_walk_state *this_walk_state,
322
union acpi_parse_object *op)
323
{
324
acpi_status status;
325
struct acpi_namespace_node *method_node;
326
struct acpi_walk_state *next_walk_state = NULL;
327
union acpi_operand_object *obj_desc;
328
struct acpi_evaluate_info *info;
329
u32 i;
330
331
ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
332
333
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
334
"Calling method %p, currentstate=%p\n",
335
this_walk_state->prev_op, this_walk_state));
336
337
/*
338
* Get the namespace entry for the control method we are about to call
339
*/
340
method_node = this_walk_state->method_call_node;
341
if (!method_node) {
342
return_ACPI_STATUS(AE_NULL_ENTRY);
343
}
344
345
obj_desc = acpi_ns_get_attached_object(method_node);
346
if (!obj_desc) {
347
return_ACPI_STATUS(AE_NULL_OBJECT);
348
}
349
350
/* Init for new method, possibly wait on method mutex */
351
352
status = acpi_ds_begin_method_execution(method_node, obj_desc,
353
this_walk_state);
354
if (ACPI_FAILURE(status)) {
355
return_ACPI_STATUS(status);
356
}
357
358
/* Begin method parse/execution. Create a new walk state */
359
360
next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
361
NULL, obj_desc, thread);
362
if (!next_walk_state) {
363
status = AE_NO_MEMORY;
364
goto cleanup;
365
}
366
367
/*
368
* The resolved arguments were put on the previous walk state's operand
369
* stack. Operands on the previous walk state stack always
370
* start at index 0. Also, null terminate the list of arguments
371
*/
372
this_walk_state->operands[this_walk_state->num_operands] = NULL;
373
374
/*
375
* Allocate and initialize the evaluation information block
376
* TBD: this is somewhat inefficient, should change interface to
377
* ds_init_aml_walk. For now, keeps this struct off the CPU stack
378
*/
379
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
380
if (!info) {
381
return_ACPI_STATUS(AE_NO_MEMORY);
382
}
383
384
info->parameters = &this_walk_state->operands[0];
385
386
status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
387
obj_desc->method.aml_start,
388
obj_desc->method.aml_length, info,
389
ACPI_IMODE_EXECUTE);
390
391
ACPI_FREE(info);
392
if (ACPI_FAILURE(status)) {
393
goto cleanup;
394
}
395
396
/*
397
* Delete the operands on the previous walkstate operand stack
398
* (they were copied to new objects)
399
*/
400
for (i = 0; i < obj_desc->method.param_count; i++) {
401
acpi_ut_remove_reference(this_walk_state->operands[i]);
402
this_walk_state->operands[i] = NULL;
403
}
404
405
/* Clear the operand stack */
406
407
this_walk_state->num_operands = 0;
408
409
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
410
"**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
411
method_node->name.ascii, next_walk_state));
412
413
/* Invoke an internal method if necessary */
414
415
if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
416
status =
417
obj_desc->method.dispatch.implementation(next_walk_state);
418
if (status == AE_OK) {
419
status = AE_CTRL_TERMINATE;
420
}
421
}
422
423
return_ACPI_STATUS(status);
424
425
cleanup:
426
427
/* On error, we must terminate the method properly */
428
429
acpi_ds_terminate_control_method(obj_desc, next_walk_state);
430
if (next_walk_state) {
431
acpi_ds_delete_walk_state(next_walk_state);
432
}
433
434
return_ACPI_STATUS(status);
435
}
436
437
/*******************************************************************************
438
*
439
* FUNCTION: acpi_ds_restart_control_method
440
*
441
* PARAMETERS: walk_state - State for preempted method (caller)
442
* return_desc - Return value from the called method
443
*
444
* RETURN: Status
445
*
446
* DESCRIPTION: Restart a method that was preempted by another (nested) method
447
* invocation. Handle the return value (if any) from the callee.
448
*
449
******************************************************************************/
450
451
acpi_status
452
acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
453
union acpi_operand_object *return_desc)
454
{
455
acpi_status status;
456
int same_as_implicit_return;
457
458
ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
459
460
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
461
"****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
462
acpi_ut_get_node_name(walk_state->method_node),
463
walk_state->method_call_op, return_desc));
464
465
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
466
" ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
467
walk_state->return_used,
468
walk_state->results, walk_state));
469
470
/* Did the called method return a value? */
471
472
if (return_desc) {
473
474
/* Is the implicit return object the same as the return desc? */
475
476
same_as_implicit_return =
477
(walk_state->implicit_return_obj == return_desc);
478
479
/* Are we actually going to use the return value? */
480
481
if (walk_state->return_used) {
482
483
/* Save the return value from the previous method */
484
485
status = acpi_ds_result_push(return_desc, walk_state);
486
if (ACPI_FAILURE(status)) {
487
acpi_ut_remove_reference(return_desc);
488
return_ACPI_STATUS(status);
489
}
490
491
/*
492
* Save as THIS method's return value in case it is returned
493
* immediately to yet another method
494
*/
495
walk_state->return_desc = return_desc;
496
}
497
498
/*
499
* The following code is the optional support for the so-called
500
* "implicit return". Some AML code assumes that the last value of the
501
* method is "implicitly" returned to the caller, in the absence of an
502
* explicit return value.
503
*
504
* Just save the last result of the method as the return value.
505
*
506
* NOTE: this is optional because the ASL language does not actually
507
* support this behavior.
508
*/
509
else if (!acpi_ds_do_implicit_return
510
(return_desc, walk_state, FALSE)
511
|| same_as_implicit_return) {
512
/*
513
* Delete the return value if it will not be used by the
514
* calling method or remove one reference if the explicit return
515
* is the same as the implicit return value.
516
*/
517
acpi_ut_remove_reference(return_desc);
518
}
519
}
520
521
return_ACPI_STATUS(AE_OK);
522
}
523
524
/*******************************************************************************
525
*
526
* FUNCTION: acpi_ds_terminate_control_method
527
*
528
* PARAMETERS: method_desc - Method object
529
* walk_state - State associated with the method
530
*
531
* RETURN: None
532
*
533
* DESCRIPTION: Terminate a control method. Delete everything that the method
534
* created, delete all locals and arguments, and delete the parse
535
* tree if requested.
536
*
537
* MUTEX: Interpreter is locked
538
*
539
******************************************************************************/
540
541
void
542
acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
543
struct acpi_walk_state *walk_state)
544
{
545
546
ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
547
548
/* method_desc is required, walk_state is optional */
549
550
if (!method_desc) {
551
return_VOID;
552
}
553
554
if (walk_state) {
555
556
/* Delete all arguments and locals */
557
558
acpi_ds_method_data_delete_all(walk_state);
559
560
/*
561
* If method is serialized, release the mutex and restore the
562
* current sync level for this thread
563
*/
564
if (method_desc->method.mutex) {
565
566
/* Acquisition Depth handles recursive calls */
567
568
method_desc->method.mutex->mutex.acquisition_depth--;
569
if (!method_desc->method.mutex->mutex.acquisition_depth) {
570
walk_state->thread->current_sync_level =
571
method_desc->method.mutex->mutex.
572
original_sync_level;
573
574
acpi_os_release_mutex(method_desc->method.
575
mutex->mutex.os_mutex);
576
method_desc->method.mutex->mutex.thread_id = 0;
577
}
578
}
579
580
/*
581
* Delete any namespace objects created anywhere within the
582
* namespace by the execution of this method. Unless:
583
* 1) This method is a module-level executable code method, in which
584
* case we want make the objects permanent.
585
* 2) There are other threads executing the method, in which case we
586
* will wait until the last thread has completed.
587
*/
588
if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
589
&& (method_desc->method.thread_count == 1)) {
590
591
/* Delete any direct children of (created by) this method */
592
593
acpi_ns_delete_namespace_subtree(walk_state->
594
method_node);
595
596
/*
597
* Delete any objects that were created by this method
598
* elsewhere in the namespace (if any were created).
599
* Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
600
* deletion such that we don't have to perform an entire
601
* namespace walk for every control method execution.
602
*/
603
if (method_desc->method.
604
info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
605
acpi_ns_delete_namespace_by_owner(method_desc->
606
method.
607
owner_id);
608
method_desc->method.info_flags &=
609
~ACPI_METHOD_MODIFIED_NAMESPACE;
610
}
611
}
612
}
613
614
/* Decrement the thread count on the method */
615
616
if (method_desc->method.thread_count) {
617
method_desc->method.thread_count--;
618
} else {
619
ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
620
}
621
622
/* Are there any other threads currently executing this method? */
623
624
if (method_desc->method.thread_count) {
625
/*
626
* Additional threads. Do not release the owner_id in this case,
627
* we immediately reuse it for the next thread executing this method
628
*/
629
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
630
"*** Completed execution of one thread, %u threads remaining\n",
631
method_desc->method.thread_count));
632
} else {
633
/* This is the only executing thread for this method */
634
635
/*
636
* Support to dynamically change a method from not_serialized to
637
* Serialized if it appears that the method is incorrectly written and
638
* does not support multiple thread execution. The best example of this
639
* is if such a method creates namespace objects and blocks. A second
640
* thread will fail with an AE_ALREADY_EXISTS exception.
641
*
642
* This code is here because we must wait until the last thread exits
643
* before marking the method as serialized.
644
*/
645
if (method_desc->method.
646
info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
647
if (walk_state) {
648
ACPI_INFO((AE_INFO,
649
"Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
650
walk_state->method_node->name.
651
ascii));
652
}
653
654
/*
655
* Method tried to create an object twice and was marked as
656
* "pending serialized". The probable cause is that the method
657
* cannot handle reentrancy.
658
*
659
* The method was created as not_serialized, but it tried to create
660
* a named object and then blocked, causing the second thread
661
* entrance to begin and then fail. Workaround this problem by
662
* marking the method permanently as Serialized when the last
663
* thread exits here.
664
*/
665
method_desc->method.info_flags &=
666
~ACPI_METHOD_SERIALIZED_PENDING;
667
method_desc->method.info_flags |=
668
ACPI_METHOD_SERIALIZED;
669
method_desc->method.sync_level = 0;
670
}
671
672
/* No more threads, we can free the owner_id */
673
674
if (!
675
(method_desc->method.
676
info_flags & ACPI_METHOD_MODULE_LEVEL)) {
677
acpi_ut_release_owner_id(&method_desc->method.owner_id);
678
}
679
}
680
681
return_VOID;
682
}
683
684