Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/acpi/acpica/evgpe.c
15111 views
1
/******************************************************************************
2
*
3
* Module Name: evgpe - General Purpose Event handling and dispatch
4
*
5
*****************************************************************************/
6
7
/*
8
* Copyright (C) 2000 - 2011, Intel Corp.
9
* All rights reserved.
10
*
11
* Redistribution and use in source and binary forms, with or without
12
* modification, are permitted provided that the following conditions
13
* are met:
14
* 1. Redistributions of source code must retain the above copyright
15
* notice, this list of conditions, and the following disclaimer,
16
* without modification.
17
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
18
* substantially similar to the "NO WARRANTY" disclaimer below
19
* ("Disclaimer") and any redistribution must be conditioned upon
20
* including a substantially similar Disclaimer requirement for further
21
* binary redistribution.
22
* 3. Neither the names of the above-listed copyright holders nor the names
23
* of any contributors may be used to endorse or promote products derived
24
* from this software without specific prior written permission.
25
*
26
* Alternatively, this software may be distributed under the terms of the
27
* GNU General Public License ("GPL") version 2 as published by the Free
28
* Software Foundation.
29
*
30
* NO WARRANTY
31
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41
* POSSIBILITY OF SUCH DAMAGES.
42
*/
43
44
#include <acpi/acpi.h>
45
#include "accommon.h"
46
#include "acevents.h"
47
#include "acnamesp.h"
48
49
#define _COMPONENT ACPI_EVENTS
50
ACPI_MODULE_NAME("evgpe")
51
52
/* Local prototypes */
53
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54
55
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
56
57
/*******************************************************************************
58
*
59
* FUNCTION: acpi_ev_update_gpe_enable_mask
60
*
61
* PARAMETERS: gpe_event_info - GPE to update
62
*
63
* RETURN: Status
64
*
65
* DESCRIPTION: Updates GPE register enable mask based upon whether there are
66
* runtime references to this GPE
67
*
68
******************************************************************************/
69
70
acpi_status
71
acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
72
{
73
struct acpi_gpe_register_info *gpe_register_info;
74
u32 register_bit;
75
76
ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
77
78
gpe_register_info = gpe_event_info->register_info;
79
if (!gpe_register_info) {
80
return_ACPI_STATUS(AE_NOT_EXIST);
81
}
82
83
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
84
gpe_register_info);
85
86
/* Clear the run bit up front */
87
88
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
89
90
/* Set the mask bit only if there are references to this GPE */
91
92
if (gpe_event_info->runtime_count) {
93
ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit);
94
}
95
96
return_ACPI_STATUS(AE_OK);
97
}
98
99
/*******************************************************************************
100
*
101
* FUNCTION: acpi_ev_enable_gpe
102
*
103
* PARAMETERS: gpe_event_info - GPE to enable
104
*
105
* RETURN: Status
106
*
107
* DESCRIPTION: Clear a GPE of stale events and enable it.
108
*
109
******************************************************************************/
110
acpi_status
111
acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
112
{
113
acpi_status status;
114
115
ACPI_FUNCTION_TRACE(ev_enable_gpe);
116
117
/*
118
* We will only allow a GPE to be enabled if it has either an associated
119
* method (_Lxx/_Exx) or a handler, or is using the implicit notify
120
* feature. Otherwise, the GPE will be immediately disabled by
121
* acpi_ev_gpe_dispatch the first time it fires.
122
*/
123
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
124
ACPI_GPE_DISPATCH_NONE) {
125
return_ACPI_STATUS(AE_NO_HANDLER);
126
}
127
128
/* Clear the GPE (of stale events) */
129
status = acpi_hw_clear_gpe(gpe_event_info);
130
if (ACPI_FAILURE(status)) {
131
return_ACPI_STATUS(status);
132
}
133
134
/* Enable the requested GPE */
135
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
136
137
return_ACPI_STATUS(status);
138
}
139
140
141
/*******************************************************************************
142
*
143
* FUNCTION: acpi_ev_add_gpe_reference
144
*
145
* PARAMETERS: gpe_event_info - Add a reference to this GPE
146
*
147
* RETURN: Status
148
*
149
* DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
150
* hardware-enabled.
151
*
152
******************************************************************************/
153
154
acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
155
{
156
acpi_status status = AE_OK;
157
158
ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
159
160
if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
161
return_ACPI_STATUS(AE_LIMIT);
162
}
163
164
gpe_event_info->runtime_count++;
165
if (gpe_event_info->runtime_count == 1) {
166
167
/* Enable on first reference */
168
169
status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
170
if (ACPI_SUCCESS(status)) {
171
status = acpi_ev_enable_gpe(gpe_event_info);
172
}
173
174
if (ACPI_FAILURE(status)) {
175
gpe_event_info->runtime_count--;
176
}
177
}
178
179
return_ACPI_STATUS(status);
180
}
181
182
/*******************************************************************************
183
*
184
* FUNCTION: acpi_ev_remove_gpe_reference
185
*
186
* PARAMETERS: gpe_event_info - Remove a reference to this GPE
187
*
188
* RETURN: Status
189
*
190
* DESCRIPTION: Remove a reference to a GPE. When the last reference is
191
* removed, the GPE is hardware-disabled.
192
*
193
******************************************************************************/
194
195
acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
196
{
197
acpi_status status = AE_OK;
198
199
ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
200
201
if (!gpe_event_info->runtime_count) {
202
return_ACPI_STATUS(AE_LIMIT);
203
}
204
205
gpe_event_info->runtime_count--;
206
if (!gpe_event_info->runtime_count) {
207
208
/* Disable on last reference */
209
210
status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
211
if (ACPI_SUCCESS(status)) {
212
status = acpi_hw_low_set_gpe(gpe_event_info,
213
ACPI_GPE_DISABLE);
214
}
215
216
if (ACPI_FAILURE(status)) {
217
gpe_event_info->runtime_count++;
218
}
219
}
220
221
return_ACPI_STATUS(status);
222
}
223
224
/*******************************************************************************
225
*
226
* FUNCTION: acpi_ev_low_get_gpe_info
227
*
228
* PARAMETERS: gpe_number - Raw GPE number
229
* gpe_block - A GPE info block
230
*
231
* RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number
232
* is not within the specified GPE block)
233
*
234
* DESCRIPTION: Returns the event_info struct associated with this GPE. This is
235
* the low-level implementation of ev_get_gpe_event_info.
236
*
237
******************************************************************************/
238
239
struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
240
struct acpi_gpe_block_info
241
*gpe_block)
242
{
243
u32 gpe_index;
244
245
/*
246
* Validate that the gpe_number is within the specified gpe_block.
247
* (Two steps)
248
*/
249
if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
250
return (NULL);
251
}
252
253
gpe_index = gpe_number - gpe_block->block_base_number;
254
if (gpe_index >= gpe_block->gpe_count) {
255
return (NULL);
256
}
257
258
return (&gpe_block->event_info[gpe_index]);
259
}
260
261
262
/*******************************************************************************
263
*
264
* FUNCTION: acpi_ev_get_gpe_event_info
265
*
266
* PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
267
* gpe_number - Raw GPE number
268
*
269
* RETURN: A GPE event_info struct. NULL if not a valid GPE
270
*
271
* DESCRIPTION: Returns the event_info struct associated with this GPE.
272
* Validates the gpe_block and the gpe_number
273
*
274
* Should be called only when the GPE lists are semaphore locked
275
* and not subject to change.
276
*
277
******************************************************************************/
278
279
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
280
u32 gpe_number)
281
{
282
union acpi_operand_object *obj_desc;
283
struct acpi_gpe_event_info *gpe_info;
284
u32 i;
285
286
ACPI_FUNCTION_ENTRY();
287
288
/* A NULL gpe_device means use the FADT-defined GPE block(s) */
289
290
if (!gpe_device) {
291
292
/* Examine GPE Block 0 and 1 (These blocks are permanent) */
293
294
for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
295
gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
296
acpi_gbl_gpe_fadt_blocks
297
[i]);
298
if (gpe_info) {
299
return (gpe_info);
300
}
301
}
302
303
/* The gpe_number was not in the range of either FADT GPE block */
304
305
return (NULL);
306
}
307
308
/* A Non-NULL gpe_device means this is a GPE Block Device */
309
310
obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
311
gpe_device);
312
if (!obj_desc || !obj_desc->device.gpe_block) {
313
return (NULL);
314
}
315
316
return (acpi_ev_low_get_gpe_info
317
(gpe_number, obj_desc->device.gpe_block));
318
}
319
320
/*******************************************************************************
321
*
322
* FUNCTION: acpi_ev_gpe_detect
323
*
324
* PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt.
325
* Can have multiple GPE blocks attached.
326
*
327
* RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
328
*
329
* DESCRIPTION: Detect if any GP events have occurred. This function is
330
* executed at interrupt level.
331
*
332
******************************************************************************/
333
334
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
335
{
336
acpi_status status;
337
struct acpi_gpe_block_info *gpe_block;
338
struct acpi_gpe_register_info *gpe_register_info;
339
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
340
u8 enabled_status_byte;
341
u32 status_reg;
342
u32 enable_reg;
343
acpi_cpu_flags flags;
344
u32 i;
345
u32 j;
346
347
ACPI_FUNCTION_NAME(ev_gpe_detect);
348
349
/* Check for the case where there are no GPEs */
350
351
if (!gpe_xrupt_list) {
352
return (int_status);
353
}
354
355
/*
356
* We need to obtain the GPE lock for both the data structs and registers
357
* Note: Not necessary to obtain the hardware lock, since the GPE
358
* registers are owned by the gpe_lock.
359
*/
360
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
361
362
/* Examine all GPE blocks attached to this interrupt level */
363
364
gpe_block = gpe_xrupt_list->gpe_block_list_head;
365
while (gpe_block) {
366
/*
367
* Read all of the 8-bit GPE status and enable registers in this GPE
368
* block, saving all of them. Find all currently active GP events.
369
*/
370
for (i = 0; i < gpe_block->register_count; i++) {
371
372
/* Get the next status/enable pair */
373
374
gpe_register_info = &gpe_block->register_info[i];
375
376
/*
377
* Optimization: If there are no GPEs enabled within this
378
* register, we can safely ignore the entire register.
379
*/
380
if (!(gpe_register_info->enable_for_run |
381
gpe_register_info->enable_for_wake)) {
382
continue;
383
}
384
385
/* Read the Status Register */
386
387
status =
388
acpi_hw_read(&status_reg,
389
&gpe_register_info->status_address);
390
if (ACPI_FAILURE(status)) {
391
goto unlock_and_exit;
392
}
393
394
/* Read the Enable Register */
395
396
status =
397
acpi_hw_read(&enable_reg,
398
&gpe_register_info->enable_address);
399
if (ACPI_FAILURE(status)) {
400
goto unlock_and_exit;
401
}
402
403
ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
404
"Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n",
405
gpe_register_info->base_gpe_number,
406
status_reg, enable_reg));
407
408
/* Check if there is anything active at all in this register */
409
410
enabled_status_byte = (u8) (status_reg & enable_reg);
411
if (!enabled_status_byte) {
412
413
/* No active GPEs in this register, move on */
414
415
continue;
416
}
417
418
/* Now look at the individual GPEs in this byte register */
419
420
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
421
422
/* Examine one GPE bit */
423
424
if (enabled_status_byte & (1 << j)) {
425
/*
426
* Found an active GPE. Dispatch the event to a handler
427
* or method.
428
*/
429
int_status |=
430
acpi_ev_gpe_dispatch(gpe_block->
431
node,
432
&gpe_block->
433
event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
434
}
435
}
436
}
437
438
gpe_block = gpe_block->next;
439
}
440
441
unlock_and_exit:
442
443
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
444
return (int_status);
445
}
446
447
/*******************************************************************************
448
*
449
* FUNCTION: acpi_ev_asynch_execute_gpe_method
450
*
451
* PARAMETERS: Context (gpe_event_info) - Info for this GPE
452
*
453
* RETURN: None
454
*
455
* DESCRIPTION: Perform the actual execution of a GPE control method. This
456
* function is called from an invocation of acpi_os_execute and
457
* therefore does NOT execute at interrupt level - so that
458
* the control method itself is not executed in the context of
459
* an interrupt handler.
460
*
461
******************************************************************************/
462
463
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
464
{
465
struct acpi_gpe_event_info *gpe_event_info = context;
466
acpi_status status;
467
struct acpi_gpe_event_info *local_gpe_event_info;
468
struct acpi_evaluate_info *info;
469
struct acpi_gpe_notify_object *notify_object;
470
471
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
472
473
/* Allocate a local GPE block */
474
475
local_gpe_event_info =
476
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
477
if (!local_gpe_event_info) {
478
ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
479
return_VOID;
480
}
481
482
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
483
if (ACPI_FAILURE(status)) {
484
ACPI_FREE(local_gpe_event_info);
485
return_VOID;
486
}
487
488
/* Must revalidate the gpe_number/gpe_block */
489
490
if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
491
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
492
ACPI_FREE(local_gpe_event_info);
493
return_VOID;
494
}
495
496
/*
497
* Take a snapshot of the GPE info for this level - we copy the info to
498
* prevent a race condition with remove_handler/remove_block.
499
*/
500
ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
501
sizeof(struct acpi_gpe_event_info));
502
503
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
504
if (ACPI_FAILURE(status)) {
505
return_VOID;
506
}
507
508
/* Do the correct dispatch - normal method or implicit notify */
509
510
switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
511
case ACPI_GPE_DISPATCH_NOTIFY:
512
513
/*
514
* Implicit notify.
515
* Dispatch a DEVICE_WAKE notify to the appropriate handler.
516
* NOTE: the request is queued for execution after this method
517
* completes. The notify handlers are NOT invoked synchronously
518
* from this thread -- because handlers may in turn run other
519
* control methods.
520
*/
521
status = acpi_ev_queue_notify_request(
522
local_gpe_event_info->dispatch.device.node,
523
ACPI_NOTIFY_DEVICE_WAKE);
524
525
notify_object = local_gpe_event_info->dispatch.device.next;
526
while (ACPI_SUCCESS(status) && notify_object) {
527
status = acpi_ev_queue_notify_request(
528
notify_object->node,
529
ACPI_NOTIFY_DEVICE_WAKE);
530
notify_object = notify_object->next;
531
}
532
533
break;
534
535
case ACPI_GPE_DISPATCH_METHOD:
536
537
/* Allocate the evaluation information block */
538
539
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
540
if (!info) {
541
status = AE_NO_MEMORY;
542
} else {
543
/*
544
* Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
545
* control method that corresponds to this GPE
546
*/
547
info->prefix_node =
548
local_gpe_event_info->dispatch.method_node;
549
info->flags = ACPI_IGNORE_RETURN_VALUE;
550
551
status = acpi_ns_evaluate(info);
552
ACPI_FREE(info);
553
}
554
555
if (ACPI_FAILURE(status)) {
556
ACPI_EXCEPTION((AE_INFO, status,
557
"while evaluating GPE method [%4.4s]",
558
acpi_ut_get_node_name
559
(local_gpe_event_info->dispatch.
560
method_node)));
561
}
562
563
break;
564
565
default:
566
return_VOID; /* Should never happen */
567
}
568
569
/* Defer enabling of GPE until all notify handlers are done */
570
571
status = acpi_os_execute(OSL_NOTIFY_HANDLER,
572
acpi_ev_asynch_enable_gpe,
573
local_gpe_event_info);
574
if (ACPI_FAILURE(status)) {
575
ACPI_FREE(local_gpe_event_info);
576
}
577
return_VOID;
578
}
579
580
581
/*******************************************************************************
582
*
583
* FUNCTION: acpi_ev_asynch_enable_gpe
584
*
585
* PARAMETERS: Context (gpe_event_info) - Info for this GPE
586
* Callback from acpi_os_execute
587
*
588
* RETURN: None
589
*
590
* DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
591
* complete (i.e., finish execution of Notify)
592
*
593
******************************************************************************/
594
595
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
596
{
597
struct acpi_gpe_event_info *gpe_event_info = context;
598
599
(void)acpi_ev_finish_gpe(gpe_event_info);
600
601
ACPI_FREE(gpe_event_info);
602
return;
603
}
604
605
606
/*******************************************************************************
607
*
608
* FUNCTION: acpi_ev_finish_gpe
609
*
610
* PARAMETERS: gpe_event_info - Info for this GPE
611
*
612
* RETURN: Status
613
*
614
* DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
615
* of a GPE method or a synchronous or asynchronous GPE handler.
616
*
617
******************************************************************************/
618
619
acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
620
{
621
acpi_status status;
622
623
if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
624
ACPI_GPE_LEVEL_TRIGGERED) {
625
/*
626
* GPE is level-triggered, we clear the GPE status bit after
627
* handling the event.
628
*/
629
status = acpi_hw_clear_gpe(gpe_event_info);
630
if (ACPI_FAILURE(status)) {
631
return (status);
632
}
633
}
634
635
/*
636
* Enable this GPE, conditionally. This means that the GPE will
637
* only be physically enabled if the enable_for_run bit is set
638
* in the event_info.
639
*/
640
(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
641
return (AE_OK);
642
}
643
644
645
/*******************************************************************************
646
*
647
* FUNCTION: acpi_ev_gpe_dispatch
648
*
649
* PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
650
* gpe_event_info - Info for this GPE
651
* gpe_number - Number relative to the parent GPE block
652
*
653
* RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
654
*
655
* DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
656
* or method (e.g. _Lxx/_Exx) handler.
657
*
658
* This function executes at interrupt level.
659
*
660
******************************************************************************/
661
662
u32
663
acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
664
struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
665
{
666
acpi_status status;
667
u32 return_value;
668
669
ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
670
671
/* Invoke global event handler if present */
672
673
acpi_gpe_count++;
674
if (acpi_gbl_global_event_handler) {
675
acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
676
gpe_number,
677
acpi_gbl_global_event_handler_context);
678
}
679
680
/*
681
* If edge-triggered, clear the GPE status bit now. Note that
682
* level-triggered events are cleared after the GPE is serviced.
683
*/
684
if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
685
ACPI_GPE_EDGE_TRIGGERED) {
686
status = acpi_hw_clear_gpe(gpe_event_info);
687
if (ACPI_FAILURE(status)) {
688
ACPI_EXCEPTION((AE_INFO, status,
689
"Unable to clear GPE%02X", gpe_number));
690
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
691
}
692
}
693
694
/*
695
* Always disable the GPE so that it does not keep firing before
696
* any asynchronous activity completes (either from the execution
697
* of a GPE method or an asynchronous GPE handler.)
698
*
699
* If there is no handler or method to run, just disable the
700
* GPE and leave it disabled permanently to prevent further such
701
* pointless events from firing.
702
*/
703
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
704
if (ACPI_FAILURE(status)) {
705
ACPI_EXCEPTION((AE_INFO, status,
706
"Unable to disable GPE%02X", gpe_number));
707
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
708
}
709
710
/*
711
* Dispatch the GPE to either an installed handler or the control
712
* method associated with this GPE (_Lxx or _Exx). If a handler
713
* exists, we invoke it and do not attempt to run the method.
714
* If there is neither a handler nor a method, leave the GPE
715
* disabled.
716
*/
717
switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
718
case ACPI_GPE_DISPATCH_HANDLER:
719
720
/* Invoke the installed handler (at interrupt level) */
721
722
return_value =
723
gpe_event_info->dispatch.handler->address(gpe_device,
724
gpe_number,
725
gpe_event_info->
726
dispatch.handler->
727
context);
728
729
/* If requested, clear (if level-triggered) and reenable the GPE */
730
731
if (return_value & ACPI_REENABLE_GPE) {
732
(void)acpi_ev_finish_gpe(gpe_event_info);
733
}
734
break;
735
736
case ACPI_GPE_DISPATCH_METHOD:
737
case ACPI_GPE_DISPATCH_NOTIFY:
738
739
/*
740
* Execute the method associated with the GPE
741
* NOTE: Level-triggered GPEs are cleared after the method completes.
742
*/
743
status = acpi_os_execute(OSL_GPE_HANDLER,
744
acpi_ev_asynch_execute_gpe_method,
745
gpe_event_info);
746
if (ACPI_FAILURE(status)) {
747
ACPI_EXCEPTION((AE_INFO, status,
748
"Unable to queue handler for GPE%2X - event disabled",
749
gpe_number));
750
}
751
break;
752
753
default:
754
755
/*
756
* No handler or method to run!
757
* 03/2010: This case should no longer be possible. We will not allow
758
* a GPE to be enabled if it has no handler or method.
759
*/
760
ACPI_ERROR((AE_INFO,
761
"No handler or method for GPE%02X, disabling event",
762
gpe_number));
763
764
break;
765
}
766
767
return_UINT32(ACPI_INTERRUPT_HANDLED);
768
}
769
770