Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/acpi/apei/ghes.c
51341 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* APEI Generic Hardware Error Source support
4
*
5
* Generic Hardware Error Source provides a way to report platform
6
* hardware errors (such as that from chipset). It works in so called
7
* "Firmware First" mode, that is, hardware errors are reported to
8
* firmware firstly, then reported to Linux by firmware. This way,
9
* some non-standard hardware error registers or non-standard hardware
10
* link can be checked by firmware to produce more hardware error
11
* information for Linux.
12
*
13
* For more information about Generic Hardware Error Source, please
14
* refer to ACPI Specification version 4.0, section 17.3.2.6
15
*
16
* Copyright 2010,2011 Intel Corp.
17
* Author: Huang Ying <[email protected]>
18
*/
19
20
#include <linux/arm_sdei.h>
21
#include <linux/kernel.h>
22
#include <linux/moduleparam.h>
23
#include <linux/init.h>
24
#include <linux/acpi.h>
25
#include <linux/bitfield.h>
26
#include <linux/io.h>
27
#include <linux/interrupt.h>
28
#include <linux/timer.h>
29
#include <linux/cper.h>
30
#include <linux/cleanup.h>
31
#include <linux/platform_device.h>
32
#include <linux/minmax.h>
33
#include <linux/mutex.h>
34
#include <linux/ratelimit.h>
35
#include <linux/vmalloc.h>
36
#include <linux/irq_work.h>
37
#include <linux/llist.h>
38
#include <linux/genalloc.h>
39
#include <linux/kfifo.h>
40
#include <linux/pci.h>
41
#include <linux/pfn.h>
42
#include <linux/aer.h>
43
#include <linux/nmi.h>
44
#include <linux/sched/clock.h>
45
#include <linux/uuid.h>
46
#include <linux/ras.h>
47
#include <linux/task_work.h>
48
#include <linux/vmcore_info.h>
49
50
#include <acpi/actbl1.h>
51
#include <acpi/ghes.h>
52
#include <acpi/apei.h>
53
#include <asm/fixmap.h>
54
#include <asm/tlbflush.h>
55
#include <cxl/event.h>
56
#include <ras/ras_event.h>
57
58
#include "apei-internal.h"
59
60
#define GHES_PFX "GHES: "
61
62
#define GHES_ESTATUS_MAX_SIZE 65536
63
#define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
64
65
#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
66
67
/* This is just an estimation for memory pool allocation */
68
#define GHES_ESTATUS_CACHE_AVG_SIZE 512
69
70
#define GHES_ESTATUS_CACHES_SIZE 4
71
72
#define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
73
/* Prevent too many caches are allocated because of RCU */
74
#define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
75
76
#define GHES_ESTATUS_CACHE_LEN(estatus_len) \
77
(sizeof(struct ghes_estatus_cache) + (estatus_len))
78
#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
79
((struct acpi_hest_generic_status *) \
80
((struct ghes_estatus_cache *)(estatus_cache) + 1))
81
82
#define GHES_ESTATUS_NODE_LEN(estatus_len) \
83
(sizeof(struct ghes_estatus_node) + (estatus_len))
84
#define GHES_ESTATUS_FROM_NODE(estatus_node) \
85
((struct acpi_hest_generic_status *) \
86
((struct ghes_estatus_node *)(estatus_node) + 1))
87
88
#define GHES_VENDOR_ENTRY_LEN(gdata_len) \
89
(sizeof(struct ghes_vendor_record_entry) + (gdata_len))
90
#define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry) \
91
((struct acpi_hest_generic_data *) \
92
((struct ghes_vendor_record_entry *)(vendor_entry) + 1))
93
94
/*
95
* NMI-like notifications vary by architecture, before the compiler can prune
96
* unused static functions it needs a value for these enums.
97
*/
98
#ifndef CONFIG_ARM_SDE_INTERFACE
99
#define FIX_APEI_GHES_SDEI_NORMAL __end_of_fixed_addresses
100
#define FIX_APEI_GHES_SDEI_CRITICAL __end_of_fixed_addresses
101
#endif
102
103
static ATOMIC_NOTIFIER_HEAD(ghes_report_chain);
104
105
static inline bool is_hest_type_generic_v2(struct ghes *ghes)
106
{
107
return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
108
}
109
110
/*
111
* A platform may describe one error source for the handling of synchronous
112
* errors (e.g. MCE or SEA), or for handling asynchronous errors (e.g. SCI
113
* or External Interrupt). On x86, the HEST notifications are always
114
* asynchronous, so only SEA on ARM is delivered as a synchronous
115
* notification.
116
*/
117
static inline bool is_hest_sync_notify(struct ghes *ghes)
118
{
119
u8 notify_type = ghes->generic->notify.type;
120
121
return notify_type == ACPI_HEST_NOTIFY_SEA;
122
}
123
124
/*
125
* This driver isn't really modular, however for the time being,
126
* continuing to use module_param is the easiest way to remain
127
* compatible with existing boot arg use cases.
128
*/
129
bool ghes_disable;
130
module_param_named(disable, ghes_disable, bool, 0);
131
132
/*
133
* "ghes.edac_force_enable" forcibly enables ghes_edac and skips the platform
134
* check.
135
*/
136
static bool ghes_edac_force_enable;
137
module_param_named(edac_force_enable, ghes_edac_force_enable, bool, 0);
138
139
/*
140
* All error sources notified with HED (Hardware Error Device) share a
141
* single notifier callback, so they need to be linked and checked one
142
* by one. This holds true for NMI too.
143
*
144
* RCU is used for these lists, so ghes_list_mutex is only used for
145
* list changing, not for traversing.
146
*/
147
static LIST_HEAD(ghes_hed);
148
static DEFINE_MUTEX(ghes_list_mutex);
149
150
/*
151
* A list of GHES devices which are given to the corresponding EDAC driver
152
* ghes_edac for further use.
153
*/
154
static LIST_HEAD(ghes_devs);
155
static DEFINE_MUTEX(ghes_devs_mutex);
156
157
/*
158
* Because the memory area used to transfer hardware error information
159
* from BIOS to Linux can be determined only in NMI, IRQ or timer
160
* handler, but general ioremap can not be used in atomic context, so
161
* the fixmap is used instead.
162
*
163
* This spinlock is used to prevent the fixmap entry from being used
164
* simultaneously.
165
*/
166
static DEFINE_SPINLOCK(ghes_notify_lock_irq);
167
168
struct ghes_vendor_record_entry {
169
struct work_struct work;
170
int error_severity;
171
char vendor_record[];
172
};
173
174
static struct gen_pool *ghes_estatus_pool;
175
176
static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
177
static atomic_t ghes_estatus_cache_alloced;
178
179
static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
180
{
181
phys_addr_t paddr;
182
pgprot_t prot;
183
184
paddr = PFN_PHYS(pfn);
185
prot = arch_apei_get_mem_attribute(paddr);
186
__set_fixmap(fixmap_idx, paddr, prot);
187
188
return (void __iomem *) __fix_to_virt(fixmap_idx);
189
}
190
191
static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
192
{
193
int _idx = virt_to_fix((unsigned long)vaddr);
194
195
WARN_ON_ONCE(fixmap_idx != _idx);
196
clear_fixmap(fixmap_idx);
197
}
198
199
int ghes_estatus_pool_init(unsigned int num_ghes)
200
{
201
unsigned long addr, len;
202
int rc;
203
204
ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
205
if (!ghes_estatus_pool)
206
return -ENOMEM;
207
208
len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX;
209
len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE);
210
211
addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
212
if (!addr)
213
goto err_pool_alloc;
214
215
rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
216
if (rc)
217
goto err_pool_add;
218
219
return 0;
220
221
err_pool_add:
222
vfree((void *)addr);
223
224
err_pool_alloc:
225
gen_pool_destroy(ghes_estatus_pool);
226
227
return -ENOMEM;
228
}
229
230
/**
231
* ghes_estatus_pool_region_free - free previously allocated memory
232
* from the ghes_estatus_pool.
233
* @addr: address of memory to free.
234
* @size: size of memory to free.
235
*
236
* Returns none.
237
*/
238
void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
239
{
240
gen_pool_free(ghes_estatus_pool, addr, size);
241
}
242
EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
243
244
static int map_gen_v2(struct ghes *ghes)
245
{
246
return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
247
}
248
249
static void unmap_gen_v2(struct ghes *ghes)
250
{
251
apei_unmap_generic_address(&ghes->generic_v2->read_ack_register);
252
}
253
254
static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
255
{
256
int rc;
257
u64 val = 0;
258
259
rc = apei_read(&val, &gv2->read_ack_register);
260
if (rc)
261
return;
262
263
val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset;
264
val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset;
265
266
apei_write(val, &gv2->read_ack_register);
267
}
268
269
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
270
{
271
struct ghes *ghes;
272
unsigned int error_block_length;
273
int rc;
274
275
ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
276
if (!ghes)
277
return ERR_PTR(-ENOMEM);
278
279
ghes->generic = generic;
280
if (is_hest_type_generic_v2(ghes)) {
281
rc = map_gen_v2(ghes);
282
if (rc)
283
goto err_free;
284
}
285
286
rc = apei_map_generic_address(&generic->error_status_address);
287
if (rc)
288
goto err_unmap_read_ack_addr;
289
error_block_length = generic->error_block_length;
290
if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
291
pr_warn(FW_WARN GHES_PFX
292
"Error status block length is too long: %u for "
293
"generic hardware error source: %d.\n",
294
error_block_length, generic->header.source_id);
295
error_block_length = GHES_ESTATUS_MAX_SIZE;
296
}
297
ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
298
ghes->estatus_length = error_block_length;
299
if (!ghes->estatus) {
300
rc = -ENOMEM;
301
goto err_unmap_status_addr;
302
}
303
304
return ghes;
305
306
err_unmap_status_addr:
307
apei_unmap_generic_address(&generic->error_status_address);
308
err_unmap_read_ack_addr:
309
if (is_hest_type_generic_v2(ghes))
310
unmap_gen_v2(ghes);
311
err_free:
312
kfree(ghes);
313
return ERR_PTR(rc);
314
}
315
316
static void ghes_fini(struct ghes *ghes)
317
{
318
kfree(ghes->estatus);
319
apei_unmap_generic_address(&ghes->generic->error_status_address);
320
if (is_hest_type_generic_v2(ghes))
321
unmap_gen_v2(ghes);
322
}
323
324
static inline int ghes_severity(int severity)
325
{
326
switch (severity) {
327
case CPER_SEV_INFORMATIONAL:
328
return GHES_SEV_NO;
329
case CPER_SEV_CORRECTED:
330
return GHES_SEV_CORRECTED;
331
case CPER_SEV_RECOVERABLE:
332
return GHES_SEV_RECOVERABLE;
333
case CPER_SEV_FATAL:
334
return GHES_SEV_PANIC;
335
default:
336
/* Unknown, go panic */
337
return GHES_SEV_PANIC;
338
}
339
}
340
341
static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
342
int from_phys,
343
enum fixed_addresses fixmap_idx)
344
{
345
void __iomem *vaddr;
346
u64 offset;
347
u32 trunk;
348
349
while (len > 0) {
350
offset = paddr - (paddr & PAGE_MASK);
351
vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
352
trunk = PAGE_SIZE - offset;
353
trunk = min(trunk, len);
354
if (from_phys)
355
memcpy_fromio(buffer, vaddr + offset, trunk);
356
else
357
memcpy_toio(vaddr + offset, buffer, trunk);
358
len -= trunk;
359
paddr += trunk;
360
buffer += trunk;
361
ghes_unmap(vaddr, fixmap_idx);
362
}
363
}
364
365
/* Check the top-level record header has an appropriate size. */
366
static int __ghes_check_estatus(struct ghes *ghes,
367
struct acpi_hest_generic_status *estatus)
368
{
369
u32 len = cper_estatus_len(estatus);
370
u32 max_len = min(ghes->generic->error_block_length,
371
ghes->estatus_length);
372
373
if (len < sizeof(*estatus)) {
374
pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n");
375
return -EIO;
376
}
377
378
if (!len || len > max_len) {
379
pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n");
380
return -EIO;
381
}
382
383
if (cper_estatus_check_header(estatus)) {
384
pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n");
385
return -EIO;
386
}
387
388
return 0;
389
}
390
391
/* Read the CPER block, returning its address, and header in estatus. */
392
static int __ghes_peek_estatus(struct ghes *ghes,
393
struct acpi_hest_generic_status *estatus,
394
u64 *buf_paddr, enum fixed_addresses fixmap_idx)
395
{
396
struct acpi_hest_generic *g = ghes->generic;
397
int rc;
398
399
rc = apei_read(buf_paddr, &g->error_status_address);
400
if (rc) {
401
*buf_paddr = 0;
402
pr_warn_ratelimited(FW_WARN GHES_PFX
403
"Failed to read error status block address for hardware error source: %d.\n",
404
g->header.source_id);
405
return -EIO;
406
}
407
if (!*buf_paddr)
408
return -ENOENT;
409
410
ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1,
411
fixmap_idx);
412
if (!estatus->block_status) {
413
*buf_paddr = 0;
414
return -ENOENT;
415
}
416
417
return 0;
418
}
419
420
static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus,
421
u64 buf_paddr, enum fixed_addresses fixmap_idx,
422
size_t buf_len)
423
{
424
ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx);
425
if (cper_estatus_check(estatus)) {
426
pr_warn_ratelimited(FW_WARN GHES_PFX
427
"Failed to read error status block!\n");
428
return -EIO;
429
}
430
431
return 0;
432
}
433
434
static int ghes_read_estatus(struct ghes *ghes,
435
struct acpi_hest_generic_status *estatus,
436
u64 *buf_paddr, enum fixed_addresses fixmap_idx)
437
{
438
int rc;
439
440
rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx);
441
if (rc)
442
return rc;
443
444
rc = __ghes_check_estatus(ghes, estatus);
445
if (rc)
446
return rc;
447
448
return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx,
449
cper_estatus_len(estatus));
450
}
451
452
static void ghes_clear_estatus(struct ghes *ghes,
453
struct acpi_hest_generic_status *estatus,
454
u64 buf_paddr, enum fixed_addresses fixmap_idx)
455
{
456
estatus->block_status = 0;
457
458
if (!buf_paddr)
459
return;
460
461
ghes_copy_tofrom_phys(estatus, buf_paddr,
462
sizeof(estatus->block_status), 0,
463
fixmap_idx);
464
465
/*
466
* GHESv2 type HEST entries introduce support for error acknowledgment,
467
* so only acknowledge the error if this support is present.
468
*/
469
if (is_hest_type_generic_v2(ghes))
470
ghes_ack_error(ghes->generic_v2);
471
}
472
473
/**
474
* struct ghes_task_work - for synchronous RAS event
475
*
476
* @twork: callback_head for task work
477
* @pfn: page frame number of corrupted page
478
* @flags: work control flags
479
*
480
* Structure to pass task work to be handled before
481
* returning to user-space via task_work_add().
482
*/
483
struct ghes_task_work {
484
struct callback_head twork;
485
u64 pfn;
486
int flags;
487
};
488
489
static void memory_failure_cb(struct callback_head *twork)
490
{
491
struct ghes_task_work *twcb = container_of(twork, struct ghes_task_work, twork);
492
int ret;
493
494
ret = memory_failure(twcb->pfn, twcb->flags);
495
gen_pool_free(ghes_estatus_pool, (unsigned long)twcb, sizeof(*twcb));
496
497
if (!ret || ret == -EHWPOISON || ret == -EOPNOTSUPP)
498
return;
499
500
pr_err("%#llx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
501
twcb->pfn, current->comm, task_pid_nr(current));
502
force_sig(SIGBUS);
503
}
504
505
static bool ghes_do_memory_failure(u64 physical_addr, int flags)
506
{
507
struct ghes_task_work *twcb;
508
unsigned long pfn;
509
510
if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
511
return false;
512
513
pfn = PHYS_PFN(physical_addr);
514
515
if (flags == MF_ACTION_REQUIRED && current->mm) {
516
twcb = (void *)gen_pool_alloc(ghes_estatus_pool, sizeof(*twcb));
517
if (!twcb)
518
return false;
519
520
twcb->pfn = pfn;
521
twcb->flags = flags;
522
init_task_work(&twcb->twork, memory_failure_cb);
523
task_work_add(current, &twcb->twork, TWA_RESUME);
524
return true;
525
}
526
527
memory_failure_queue(pfn, flags);
528
return true;
529
}
530
531
static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
532
int sev, bool sync)
533
{
534
int flags = -1;
535
int sec_sev = ghes_severity(gdata->error_severity);
536
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
537
538
if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
539
return false;
540
541
/* iff following two events can be handled properly by now */
542
if (sec_sev == GHES_SEV_CORRECTED &&
543
(gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
544
flags = MF_SOFT_OFFLINE;
545
if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
546
flags = sync ? MF_ACTION_REQUIRED : 0;
547
548
if (flags != -1)
549
return ghes_do_memory_failure(mem_err->physical_addr, flags);
550
551
return false;
552
}
553
554
static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
555
int sev, bool sync)
556
{
557
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
558
int flags = sync ? MF_ACTION_REQUIRED : 0;
559
int length = gdata->error_data_length;
560
char error_type[120];
561
bool queued = false;
562
int sec_sev, i;
563
char *p;
564
565
sec_sev = ghes_severity(gdata->error_severity);
566
if (length >= sizeof(*err)) {
567
log_arm_hw_error(err, sec_sev);
568
} else {
569
pr_warn(FW_BUG "arm error length: %d\n", length);
570
pr_warn(FW_BUG "length is too small\n");
571
pr_warn(FW_BUG "firmware-generated error record is incorrect\n");
572
return false;
573
}
574
575
if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
576
return false;
577
578
p = (char *)(err + 1);
579
length -= sizeof(err);
580
581
for (i = 0; i < err->err_info_num; i++) {
582
struct cper_arm_err_info *err_info;
583
bool is_cache, has_pa;
584
585
/* Ensure we have enough data for the error info header */
586
if (length < sizeof(*err_info))
587
break;
588
589
err_info = (struct cper_arm_err_info *)p;
590
591
/* Validate the claimed length before using it */
592
length -= err_info->length;
593
if (length < 0)
594
break;
595
596
is_cache = err_info->type & CPER_ARM_CACHE_ERROR;
597
has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
598
599
/*
600
* The field (err_info->error_info & BIT(26)) is fixed to set to
601
* 1 in some old firmware of HiSilicon Kunpeng920. We assume that
602
* firmware won't mix corrected errors in an uncorrected section,
603
* and don't filter out 'corrected' error here.
604
*/
605
if (is_cache && has_pa) {
606
queued = ghes_do_memory_failure(err_info->physical_fault_addr, flags);
607
p += err_info->length;
608
continue;
609
}
610
611
cper_bits_to_str(error_type, sizeof(error_type),
612
FIELD_GET(CPER_ARM_ERR_TYPE_MASK, err_info->type),
613
cper_proc_error_type_strs,
614
ARRAY_SIZE(cper_proc_error_type_strs));
615
616
pr_warn_ratelimited(FW_WARN GHES_PFX
617
"Unhandled processor error type 0x%02x: %s%s\n",
618
err_info->type, error_type,
619
(err_info->type & ~CPER_ARM_ERR_TYPE_MASK) ? " with reserved bit(s)" : "");
620
p += err_info->length;
621
}
622
623
return queued;
624
}
625
626
/*
627
* PCIe AER errors need to be sent to the AER driver for reporting and
628
* recovery. The GHES severities map to the following AER severities and
629
* require the following handling:
630
*
631
* GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
632
* These need to be reported by the AER driver but no recovery is
633
* necessary.
634
* GHES_SEV_RECOVERABLE -> AER_NONFATAL
635
* GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
636
* These both need to be reported and recovered from by the AER driver.
637
* GHES_SEV_PANIC does not make it to this handling since the kernel must
638
* panic.
639
*/
640
static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
641
{
642
#ifdef CONFIG_ACPI_APEI_PCIEAER
643
struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
644
645
if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
646
pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
647
unsigned int devfn;
648
int aer_severity;
649
u8 *aer_info;
650
651
devfn = PCI_DEVFN(pcie_err->device_id.device,
652
pcie_err->device_id.function);
653
aer_severity = cper_severity_to_aer(gdata->error_severity);
654
655
/*
656
* If firmware reset the component to contain
657
* the error, we must reinitialize it before
658
* use, so treat it as a fatal AER error.
659
*/
660
if (gdata->flags & CPER_SEC_RESET)
661
aer_severity = AER_FATAL;
662
663
aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
664
sizeof(struct aer_capability_regs));
665
if (!aer_info)
666
return;
667
memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
668
669
aer_recover_queue(pcie_err->device_id.segment,
670
pcie_err->device_id.bus,
671
devfn, aer_severity,
672
(struct aer_capability_regs *)
673
aer_info);
674
}
675
#endif
676
}
677
678
static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list);
679
680
int ghes_register_vendor_record_notifier(struct notifier_block *nb)
681
{
682
return blocking_notifier_chain_register(&vendor_record_notify_list, nb);
683
}
684
EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier);
685
686
void ghes_unregister_vendor_record_notifier(struct notifier_block *nb)
687
{
688
blocking_notifier_chain_unregister(&vendor_record_notify_list, nb);
689
}
690
EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier);
691
692
static void ghes_vendor_record_work_func(struct work_struct *work)
693
{
694
struct ghes_vendor_record_entry *entry;
695
struct acpi_hest_generic_data *gdata;
696
u32 len;
697
698
entry = container_of(work, struct ghes_vendor_record_entry, work);
699
gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
700
701
blocking_notifier_call_chain(&vendor_record_notify_list,
702
entry->error_severity, gdata);
703
704
len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
705
gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len);
706
}
707
708
static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
709
int sev)
710
{
711
struct acpi_hest_generic_data *copied_gdata;
712
struct ghes_vendor_record_entry *entry;
713
u32 len;
714
715
len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
716
entry = (void *)gen_pool_alloc(ghes_estatus_pool, len);
717
if (!entry)
718
return;
719
720
copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
721
memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata));
722
entry->error_severity = sev;
723
724
INIT_WORK(&entry->work, ghes_vendor_record_work_func);
725
schedule_work(&entry->work);
726
}
727
728
/* Room for 8 entries */
729
#define CXL_CPER_PROT_ERR_FIFO_DEPTH 8
730
static DEFINE_KFIFO(cxl_cper_prot_err_fifo, struct cxl_cper_prot_err_work_data,
731
CXL_CPER_PROT_ERR_FIFO_DEPTH);
732
733
/* Synchronize schedule_work() with cxl_cper_prot_err_work changes */
734
static DEFINE_SPINLOCK(cxl_cper_prot_err_work_lock);
735
struct work_struct *cxl_cper_prot_err_work;
736
737
static void cxl_cper_post_prot_err(struct cxl_cper_sec_prot_err *prot_err,
738
int severity)
739
{
740
#ifdef CONFIG_ACPI_APEI_PCIEAER
741
struct cxl_cper_prot_err_work_data wd;
742
743
if (cxl_cper_sec_prot_err_valid(prot_err))
744
return;
745
746
guard(spinlock_irqsave)(&cxl_cper_prot_err_work_lock);
747
748
if (!cxl_cper_prot_err_work)
749
return;
750
751
if (cxl_cper_setup_prot_err_work_data(&wd, prot_err, severity))
752
return;
753
754
if (!kfifo_put(&cxl_cper_prot_err_fifo, wd)) {
755
pr_err_ratelimited("CXL CPER kfifo overflow\n");
756
return;
757
}
758
759
schedule_work(cxl_cper_prot_err_work);
760
#endif
761
}
762
763
int cxl_cper_register_prot_err_work(struct work_struct *work)
764
{
765
if (cxl_cper_prot_err_work)
766
return -EINVAL;
767
768
guard(spinlock)(&cxl_cper_prot_err_work_lock);
769
cxl_cper_prot_err_work = work;
770
return 0;
771
}
772
EXPORT_SYMBOL_NS_GPL(cxl_cper_register_prot_err_work, "CXL");
773
774
int cxl_cper_unregister_prot_err_work(struct work_struct *work)
775
{
776
if (cxl_cper_prot_err_work != work)
777
return -EINVAL;
778
779
guard(spinlock)(&cxl_cper_prot_err_work_lock);
780
cxl_cper_prot_err_work = NULL;
781
return 0;
782
}
783
EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_prot_err_work, "CXL");
784
785
int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd)
786
{
787
return kfifo_get(&cxl_cper_prot_err_fifo, wd);
788
}
789
EXPORT_SYMBOL_NS_GPL(cxl_cper_prot_err_kfifo_get, "CXL");
790
791
/* Room for 8 entries for each of the 4 event log queues */
792
#define CXL_CPER_FIFO_DEPTH 32
793
DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH);
794
795
/* Synchronize schedule_work() with cxl_cper_work changes */
796
static DEFINE_SPINLOCK(cxl_cper_work_lock);
797
struct work_struct *cxl_cper_work;
798
799
static void cxl_cper_post_event(enum cxl_event_type event_type,
800
struct cxl_cper_event_rec *rec)
801
{
802
struct cxl_cper_work_data wd;
803
804
if (rec->hdr.length <= sizeof(rec->hdr) ||
805
rec->hdr.length > sizeof(*rec)) {
806
pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
807
rec->hdr.length);
808
return;
809
}
810
811
if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
812
pr_err(FW_WARN "CXL CPER invalid event\n");
813
return;
814
}
815
816
guard(spinlock_irqsave)(&cxl_cper_work_lock);
817
818
if (!cxl_cper_work)
819
return;
820
821
wd.event_type = event_type;
822
memcpy(&wd.rec, rec, sizeof(wd.rec));
823
824
if (!kfifo_put(&cxl_cper_fifo, wd)) {
825
pr_err_ratelimited("CXL CPER kfifo overflow\n");
826
return;
827
}
828
829
schedule_work(cxl_cper_work);
830
}
831
832
int cxl_cper_register_work(struct work_struct *work)
833
{
834
if (cxl_cper_work)
835
return -EINVAL;
836
837
guard(spinlock)(&cxl_cper_work_lock);
838
cxl_cper_work = work;
839
return 0;
840
}
841
EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, "CXL");
842
843
int cxl_cper_unregister_work(struct work_struct *work)
844
{
845
if (cxl_cper_work != work)
846
return -EINVAL;
847
848
guard(spinlock)(&cxl_cper_work_lock);
849
cxl_cper_work = NULL;
850
return 0;
851
}
852
EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, "CXL");
853
854
int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
855
{
856
return kfifo_get(&cxl_cper_fifo, wd);
857
}
858
EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL");
859
860
static void ghes_log_hwerr(int sev, guid_t *sec_type)
861
{
862
if (sev != CPER_SEV_RECOVERABLE)
863
return;
864
865
if (guid_equal(sec_type, &CPER_SEC_PROC_ARM) ||
866
guid_equal(sec_type, &CPER_SEC_PROC_GENERIC) ||
867
guid_equal(sec_type, &CPER_SEC_PROC_IA)) {
868
hwerr_log_error_type(HWERR_RECOV_CPU);
869
return;
870
}
871
872
if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR) ||
873
guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID) ||
874
guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID) ||
875
guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) {
876
hwerr_log_error_type(HWERR_RECOV_CXL);
877
return;
878
}
879
880
if (guid_equal(sec_type, &CPER_SEC_PCIE) ||
881
guid_equal(sec_type, &CPER_SEC_PCI_X_BUS)) {
882
hwerr_log_error_type(HWERR_RECOV_PCI);
883
return;
884
}
885
886
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
887
hwerr_log_error_type(HWERR_RECOV_MEMORY);
888
return;
889
}
890
891
hwerr_log_error_type(HWERR_RECOV_OTHERS);
892
}
893
894
static void ghes_do_proc(struct ghes *ghes,
895
const struct acpi_hest_generic_status *estatus)
896
{
897
int sev, sec_sev;
898
struct acpi_hest_generic_data *gdata;
899
guid_t *sec_type;
900
const guid_t *fru_id = &guid_null;
901
char *fru_text = "";
902
bool queued = false;
903
bool sync = is_hest_sync_notify(ghes);
904
905
sev = ghes_severity(estatus->error_severity);
906
apei_estatus_for_each_section(estatus, gdata) {
907
sec_type = (guid_t *)gdata->section_type;
908
sec_sev = ghes_severity(gdata->error_severity);
909
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
910
fru_id = (guid_t *)gdata->fru_id;
911
912
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
913
fru_text = gdata->fru_text;
914
915
ghes_log_hwerr(sev, sec_type);
916
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
917
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
918
919
atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err);
920
921
arch_apei_report_mem_error(sev, mem_err);
922
queued = ghes_handle_memory_failure(gdata, sev, sync);
923
} else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
924
ghes_handle_aer(gdata);
925
} else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
926
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
927
} else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
928
struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
929
930
cxl_cper_post_prot_err(prot_err, gdata->error_severity);
931
} else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
932
struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
933
934
cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
935
} else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
936
struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
937
938
cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
939
} else if (guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) {
940
struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
941
942
cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
943
} else {
944
void *err = acpi_hest_get_payload(gdata);
945
946
ghes_defer_non_standard_event(gdata, sev);
947
log_non_standard_event(sec_type, fru_id, fru_text,
948
sec_sev, err,
949
gdata->error_data_length);
950
}
951
}
952
953
/*
954
* If no memory failure work is queued for abnormal synchronous
955
* errors, do a force kill.
956
*/
957
if (sync && !queued) {
958
dev_err(ghes->dev,
959
HW_ERR GHES_PFX "%s:%d: synchronous unrecoverable error (SIGBUS)\n",
960
current->comm, task_pid_nr(current));
961
force_sig(SIGBUS);
962
}
963
}
964
965
static void __ghes_print_estatus(const char *pfx,
966
const struct acpi_hest_generic *generic,
967
const struct acpi_hest_generic_status *estatus)
968
{
969
static atomic_t seqno;
970
unsigned int curr_seqno;
971
char pfx_seq[64];
972
973
if (pfx == NULL) {
974
if (ghes_severity(estatus->error_severity) <=
975
GHES_SEV_CORRECTED)
976
pfx = KERN_WARNING;
977
else
978
pfx = KERN_ERR;
979
}
980
curr_seqno = atomic_inc_return(&seqno);
981
snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
982
printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
983
pfx_seq, generic->header.source_id);
984
cper_estatus_print(pfx_seq, estatus);
985
}
986
987
static int ghes_print_estatus(const char *pfx,
988
const struct acpi_hest_generic *generic,
989
const struct acpi_hest_generic_status *estatus)
990
{
991
/* Not more than 2 messages every 5 seconds */
992
static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
993
static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
994
struct ratelimit_state *ratelimit;
995
996
if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
997
ratelimit = &ratelimit_corrected;
998
else
999
ratelimit = &ratelimit_uncorrected;
1000
if (__ratelimit(ratelimit)) {
1001
__ghes_print_estatus(pfx, generic, estatus);
1002
return 1;
1003
}
1004
return 0;
1005
}
1006
1007
/*
1008
* GHES error status reporting throttle, to report more kinds of
1009
* errors, instead of just most frequently occurred errors.
1010
*/
1011
static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
1012
{
1013
u32 len;
1014
int i, cached = 0;
1015
unsigned long long now;
1016
struct ghes_estatus_cache *cache;
1017
struct acpi_hest_generic_status *cache_estatus;
1018
1019
len = cper_estatus_len(estatus);
1020
rcu_read_lock();
1021
for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
1022
cache = rcu_dereference(ghes_estatus_caches[i]);
1023
if (cache == NULL)
1024
continue;
1025
if (len != cache->estatus_len)
1026
continue;
1027
cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
1028
if (memcmp(estatus, cache_estatus, len))
1029
continue;
1030
atomic_inc(&cache->count);
1031
now = sched_clock();
1032
if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
1033
cached = 1;
1034
break;
1035
}
1036
rcu_read_unlock();
1037
return cached;
1038
}
1039
1040
static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
1041
struct acpi_hest_generic *generic,
1042
struct acpi_hest_generic_status *estatus)
1043
{
1044
int alloced;
1045
u32 len, cache_len;
1046
struct ghes_estatus_cache *cache;
1047
struct acpi_hest_generic_status *cache_estatus;
1048
1049
alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
1050
if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
1051
atomic_dec(&ghes_estatus_cache_alloced);
1052
return NULL;
1053
}
1054
len = cper_estatus_len(estatus);
1055
cache_len = GHES_ESTATUS_CACHE_LEN(len);
1056
cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
1057
if (!cache) {
1058
atomic_dec(&ghes_estatus_cache_alloced);
1059
return NULL;
1060
}
1061
cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
1062
memcpy(cache_estatus, estatus, len);
1063
cache->estatus_len = len;
1064
atomic_set(&cache->count, 0);
1065
cache->generic = generic;
1066
cache->time_in = sched_clock();
1067
return cache;
1068
}
1069
1070
static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
1071
{
1072
struct ghes_estatus_cache *cache;
1073
u32 len;
1074
1075
cache = container_of(head, struct ghes_estatus_cache, rcu);
1076
len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
1077
len = GHES_ESTATUS_CACHE_LEN(len);
1078
gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
1079
atomic_dec(&ghes_estatus_cache_alloced);
1080
}
1081
1082
static void
1083
ghes_estatus_cache_add(struct acpi_hest_generic *generic,
1084
struct acpi_hest_generic_status *estatus)
1085
{
1086
unsigned long long now, duration, period, max_period = 0;
1087
struct ghes_estatus_cache *cache, *new_cache;
1088
struct ghes_estatus_cache __rcu *victim;
1089
int i, slot = -1, count;
1090
1091
new_cache = ghes_estatus_cache_alloc(generic, estatus);
1092
if (!new_cache)
1093
return;
1094
1095
rcu_read_lock();
1096
now = sched_clock();
1097
for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
1098
cache = rcu_dereference(ghes_estatus_caches[i]);
1099
if (cache == NULL) {
1100
slot = i;
1101
break;
1102
}
1103
duration = now - cache->time_in;
1104
if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
1105
slot = i;
1106
break;
1107
}
1108
count = atomic_read(&cache->count);
1109
period = duration;
1110
do_div(period, (count + 1));
1111
if (period > max_period) {
1112
max_period = period;
1113
slot = i;
1114
}
1115
}
1116
rcu_read_unlock();
1117
1118
if (slot != -1) {
1119
/*
1120
* Use release semantics to ensure that ghes_estatus_cached()
1121
* running on another CPU will see the updated cache fields if
1122
* it can see the new value of the pointer.
1123
*/
1124
victim = xchg_release(&ghes_estatus_caches[slot],
1125
RCU_INITIALIZER(new_cache));
1126
1127
/*
1128
* At this point, victim may point to a cached item different
1129
* from the one based on which we selected the slot. Instead of
1130
* going to the loop again to pick another slot, let's just
1131
* drop the other item anyway: this may cause a false cache
1132
* miss later on, but that won't cause any problems.
1133
*/
1134
if (victim)
1135
call_rcu(&unrcu_pointer(victim)->rcu,
1136
ghes_estatus_cache_rcu_free);
1137
}
1138
}
1139
1140
static void __ghes_panic(struct ghes *ghes,
1141
struct acpi_hest_generic_status *estatus,
1142
u64 buf_paddr, enum fixed_addresses fixmap_idx)
1143
{
1144
const char *msg = GHES_PFX "Fatal hardware error";
1145
1146
__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
1147
1148
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
1149
1150
ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
1151
1152
if (!panic_timeout)
1153
pr_emerg("%s but panic disabled\n", msg);
1154
1155
panic(msg);
1156
}
1157
1158
static int ghes_proc(struct ghes *ghes)
1159
{
1160
struct acpi_hest_generic_status *estatus = ghes->estatus;
1161
u64 buf_paddr;
1162
int rc;
1163
1164
rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ);
1165
if (rc)
1166
goto out;
1167
1168
if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC)
1169
__ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
1170
1171
if (!ghes_estatus_cached(estatus)) {
1172
if (ghes_print_estatus(NULL, ghes->generic, estatus))
1173
ghes_estatus_cache_add(ghes->generic, estatus);
1174
}
1175
ghes_do_proc(ghes, estatus);
1176
1177
out:
1178
ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
1179
1180
return rc;
1181
}
1182
1183
static void ghes_add_timer(struct ghes *ghes)
1184
{
1185
struct acpi_hest_generic *g = ghes->generic;
1186
unsigned long expire;
1187
1188
if (!g->notify.poll_interval) {
1189
pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
1190
g->header.source_id);
1191
return;
1192
}
1193
expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
1194
ghes->timer.expires = round_jiffies_relative(expire);
1195
add_timer(&ghes->timer);
1196
}
1197
1198
static void ghes_poll_func(struct timer_list *t)
1199
{
1200
struct ghes *ghes = timer_container_of(ghes, t, timer);
1201
unsigned long flags;
1202
1203
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1204
ghes_proc(ghes);
1205
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1206
if (!(ghes->flags & GHES_EXITING))
1207
ghes_add_timer(ghes);
1208
}
1209
1210
static irqreturn_t ghes_irq_func(int irq, void *data)
1211
{
1212
struct ghes *ghes = data;
1213
unsigned long flags;
1214
int rc;
1215
1216
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1217
rc = ghes_proc(ghes);
1218
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1219
if (rc)
1220
return IRQ_NONE;
1221
1222
return IRQ_HANDLED;
1223
}
1224
1225
static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
1226
void *data)
1227
{
1228
struct ghes *ghes;
1229
unsigned long flags;
1230
int ret = NOTIFY_DONE;
1231
1232
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1233
list_for_each_entry_rcu(ghes, &ghes_hed, list) {
1234
if (!ghes_proc(ghes))
1235
ret = NOTIFY_OK;
1236
}
1237
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1238
1239
return ret;
1240
}
1241
1242
static struct notifier_block ghes_notifier_hed = {
1243
.notifier_call = ghes_notify_hed,
1244
};
1245
1246
/*
1247
* Handlers for CPER records may not be NMI safe. For example,
1248
* memory_failure_queue() takes spinlocks and calls schedule_work_on().
1249
* In any NMI-like handler, memory from ghes_estatus_pool is used to save
1250
* estatus, and added to the ghes_estatus_llist. irq_work_queue() causes
1251
* ghes_proc_in_irq() to run in IRQ context where each estatus in
1252
* ghes_estatus_llist is processed.
1253
*
1254
* Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache
1255
* to suppress frequent messages.
1256
*/
1257
static struct llist_head ghes_estatus_llist;
1258
static struct irq_work ghes_proc_irq_work;
1259
1260
static void ghes_proc_in_irq(struct irq_work *irq_work)
1261
{
1262
struct llist_node *llnode, *next;
1263
struct ghes_estatus_node *estatus_node;
1264
struct acpi_hest_generic *generic;
1265
struct acpi_hest_generic_status *estatus;
1266
u32 len, node_len;
1267
1268
llnode = llist_del_all(&ghes_estatus_llist);
1269
/*
1270
* Because the time order of estatus in list is reversed,
1271
* revert it back to proper order.
1272
*/
1273
llnode = llist_reverse_order(llnode);
1274
while (llnode) {
1275
next = llnode->next;
1276
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1277
llnode);
1278
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1279
len = cper_estatus_len(estatus);
1280
node_len = GHES_ESTATUS_NODE_LEN(len);
1281
1282
ghes_do_proc(estatus_node->ghes, estatus);
1283
1284
if (!ghes_estatus_cached(estatus)) {
1285
generic = estatus_node->generic;
1286
if (ghes_print_estatus(NULL, generic, estatus))
1287
ghes_estatus_cache_add(generic, estatus);
1288
}
1289
gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
1290
node_len);
1291
1292
llnode = next;
1293
}
1294
}
1295
1296
static void ghes_print_queued_estatus(void)
1297
{
1298
struct llist_node *llnode;
1299
struct ghes_estatus_node *estatus_node;
1300
struct acpi_hest_generic *generic;
1301
struct acpi_hest_generic_status *estatus;
1302
1303
llnode = llist_del_all(&ghes_estatus_llist);
1304
/*
1305
* Because the time order of estatus in list is reversed,
1306
* revert it back to proper order.
1307
*/
1308
llnode = llist_reverse_order(llnode);
1309
while (llnode) {
1310
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1311
llnode);
1312
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1313
generic = estatus_node->generic;
1314
ghes_print_estatus(NULL, generic, estatus);
1315
llnode = llnode->next;
1316
}
1317
}
1318
1319
static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
1320
enum fixed_addresses fixmap_idx)
1321
{
1322
struct acpi_hest_generic_status *estatus, tmp_header;
1323
struct ghes_estatus_node *estatus_node;
1324
u32 len, node_len;
1325
u64 buf_paddr;
1326
int sev, rc;
1327
1328
if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG))
1329
return -EOPNOTSUPP;
1330
1331
rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx);
1332
if (rc) {
1333
ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1334
return rc;
1335
}
1336
1337
rc = __ghes_check_estatus(ghes, &tmp_header);
1338
if (rc) {
1339
ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1340
return rc;
1341
}
1342
1343
len = cper_estatus_len(&tmp_header);
1344
node_len = GHES_ESTATUS_NODE_LEN(len);
1345
estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
1346
if (!estatus_node)
1347
return -ENOMEM;
1348
1349
estatus_node->ghes = ghes;
1350
estatus_node->generic = ghes->generic;
1351
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1352
1353
if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
1354
ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
1355
rc = -ENOENT;
1356
goto no_work;
1357
}
1358
1359
sev = ghes_severity(estatus->error_severity);
1360
if (sev >= GHES_SEV_PANIC) {
1361
ghes_print_queued_estatus();
1362
__ghes_panic(ghes, estatus, buf_paddr, fixmap_idx);
1363
}
1364
1365
ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1366
1367
/* This error has been reported before, don't process it again. */
1368
if (ghes_estatus_cached(estatus))
1369
goto no_work;
1370
1371
llist_add(&estatus_node->llnode, &ghes_estatus_llist);
1372
1373
return rc;
1374
1375
no_work:
1376
gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
1377
node_len);
1378
1379
return rc;
1380
}
1381
1382
static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list,
1383
enum fixed_addresses fixmap_idx)
1384
{
1385
int ret = -ENOENT;
1386
struct ghes *ghes;
1387
1388
rcu_read_lock();
1389
list_for_each_entry_rcu(ghes, rcu_list, list) {
1390
if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx))
1391
ret = 0;
1392
}
1393
rcu_read_unlock();
1394
1395
if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret)
1396
irq_work_queue(&ghes_proc_irq_work);
1397
1398
return ret;
1399
}
1400
1401
/**
1402
* ghes_has_active_errors - Check if there are active errors in error sources
1403
* @ghes_list: List of GHES entries to check for active errors
1404
*
1405
* This function iterates through all GHES entries in the given list and
1406
* checks if any of them has active error status by reading the error
1407
* status register.
1408
*
1409
* Return: true if at least one source has active error, false otherwise.
1410
*/
1411
static bool __maybe_unused ghes_has_active_errors(struct list_head *ghes_list)
1412
{
1413
struct ghes *ghes;
1414
1415
guard(rcu)();
1416
list_for_each_entry_rcu(ghes, ghes_list, list) {
1417
if (ghes->error_status_vaddr &&
1418
readl(ghes->error_status_vaddr))
1419
return true;
1420
}
1421
1422
return false;
1423
}
1424
1425
/**
1426
* ghes_map_error_status - Map error status address to virtual address
1427
* @ghes: pointer to GHES structure
1428
*
1429
* Reads the error status address from ACPI HEST table and maps it to a virtual
1430
* address that can be accessed by the kernel.
1431
*
1432
* Return: 0 on success, error code on failure.
1433
*/
1434
static int __maybe_unused ghes_map_error_status(struct ghes *ghes)
1435
{
1436
struct acpi_hest_generic *g = ghes->generic;
1437
u64 paddr;
1438
int rc;
1439
1440
rc = apei_read(&paddr, &g->error_status_address);
1441
if (rc)
1442
return rc;
1443
1444
ghes->error_status_vaddr =
1445
acpi_os_ioremap(paddr, sizeof(ghes->estatus->block_status));
1446
if (!ghes->error_status_vaddr)
1447
return -EINVAL;
1448
1449
return 0;
1450
}
1451
1452
/**
1453
* ghes_unmap_error_status - Unmap error status virtual address
1454
* @ghes: pointer to GHES structure
1455
*
1456
* Unmaps the error status address if it was previously mapped.
1457
*/
1458
static void __maybe_unused ghes_unmap_error_status(struct ghes *ghes)
1459
{
1460
if (ghes->error_status_vaddr) {
1461
iounmap(ghes->error_status_vaddr);
1462
ghes->error_status_vaddr = NULL;
1463
}
1464
}
1465
1466
#ifdef CONFIG_ACPI_APEI_SEA
1467
static LIST_HEAD(ghes_sea);
1468
1469
/*
1470
* Return 0 only if one of the SEA error sources successfully reported an error
1471
* record sent from the firmware.
1472
*/
1473
int ghes_notify_sea(void)
1474
{
1475
static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
1476
int rv;
1477
1478
if (!ghes_has_active_errors(&ghes_sea))
1479
return -ENOENT;
1480
1481
raw_spin_lock(&ghes_notify_lock_sea);
1482
rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA);
1483
raw_spin_unlock(&ghes_notify_lock_sea);
1484
1485
return rv;
1486
}
1487
1488
static int ghes_sea_add(struct ghes *ghes)
1489
{
1490
int rc;
1491
1492
rc = ghes_map_error_status(ghes);
1493
if (rc)
1494
return rc;
1495
1496
mutex_lock(&ghes_list_mutex);
1497
list_add_rcu(&ghes->list, &ghes_sea);
1498
mutex_unlock(&ghes_list_mutex);
1499
1500
return 0;
1501
}
1502
1503
static void ghes_sea_remove(struct ghes *ghes)
1504
{
1505
mutex_lock(&ghes_list_mutex);
1506
list_del_rcu(&ghes->list);
1507
mutex_unlock(&ghes_list_mutex);
1508
ghes_unmap_error_status(ghes);
1509
synchronize_rcu();
1510
}
1511
#else /* CONFIG_ACPI_APEI_SEA */
1512
static inline int ghes_sea_add(struct ghes *ghes) { return -EINVAL; }
1513
static inline void ghes_sea_remove(struct ghes *ghes) { }
1514
#endif /* CONFIG_ACPI_APEI_SEA */
1515
1516
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
1517
/*
1518
* NMI may be triggered on any CPU, so ghes_in_nmi is used for
1519
* having only one concurrent reader.
1520
*/
1521
static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
1522
1523
static LIST_HEAD(ghes_nmi);
1524
1525
static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
1526
{
1527
static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
1528
int ret = NMI_DONE;
1529
1530
if (!ghes_has_active_errors(&ghes_nmi))
1531
return ret;
1532
1533
if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
1534
return ret;
1535
1536
raw_spin_lock(&ghes_notify_lock_nmi);
1537
if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI))
1538
ret = NMI_HANDLED;
1539
raw_spin_unlock(&ghes_notify_lock_nmi);
1540
1541
atomic_dec(&ghes_in_nmi);
1542
return ret;
1543
}
1544
1545
static int ghes_nmi_add(struct ghes *ghes)
1546
{
1547
int rc;
1548
1549
rc = ghes_map_error_status(ghes);
1550
if (rc)
1551
return rc;
1552
1553
mutex_lock(&ghes_list_mutex);
1554
if (list_empty(&ghes_nmi))
1555
register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
1556
list_add_rcu(&ghes->list, &ghes_nmi);
1557
mutex_unlock(&ghes_list_mutex);
1558
1559
return 0;
1560
}
1561
1562
static void ghes_nmi_remove(struct ghes *ghes)
1563
{
1564
mutex_lock(&ghes_list_mutex);
1565
list_del_rcu(&ghes->list);
1566
if (list_empty(&ghes_nmi))
1567
unregister_nmi_handler(NMI_LOCAL, "ghes");
1568
mutex_unlock(&ghes_list_mutex);
1569
1570
ghes_unmap_error_status(ghes);
1571
1572
/*
1573
* To synchronize with NMI handler, ghes can only be
1574
* freed after NMI handler finishes.
1575
*/
1576
synchronize_rcu();
1577
}
1578
#else /* CONFIG_HAVE_ACPI_APEI_NMI */
1579
static inline int ghes_nmi_add(struct ghes *ghes) { return -EINVAL; }
1580
static inline void ghes_nmi_remove(struct ghes *ghes) { }
1581
#endif /* CONFIG_HAVE_ACPI_APEI_NMI */
1582
1583
static void ghes_nmi_init_cxt(void)
1584
{
1585
init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
1586
}
1587
1588
static int __ghes_sdei_callback(struct ghes *ghes,
1589
enum fixed_addresses fixmap_idx)
1590
{
1591
if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) {
1592
irq_work_queue(&ghes_proc_irq_work);
1593
1594
return 0;
1595
}
1596
1597
return -ENOENT;
1598
}
1599
1600
static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs,
1601
void *arg)
1602
{
1603
static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal);
1604
struct ghes *ghes = arg;
1605
int err;
1606
1607
raw_spin_lock(&ghes_notify_lock_sdei_normal);
1608
err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL);
1609
raw_spin_unlock(&ghes_notify_lock_sdei_normal);
1610
1611
return err;
1612
}
1613
1614
static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs,
1615
void *arg)
1616
{
1617
static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical);
1618
struct ghes *ghes = arg;
1619
int err;
1620
1621
raw_spin_lock(&ghes_notify_lock_sdei_critical);
1622
err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL);
1623
raw_spin_unlock(&ghes_notify_lock_sdei_critical);
1624
1625
return err;
1626
}
1627
1628
static int apei_sdei_register_ghes(struct ghes *ghes)
1629
{
1630
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1631
return -EOPNOTSUPP;
1632
1633
return sdei_register_ghes(ghes, ghes_sdei_normal_callback,
1634
ghes_sdei_critical_callback);
1635
}
1636
1637
static int apei_sdei_unregister_ghes(struct ghes *ghes)
1638
{
1639
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1640
return -EOPNOTSUPP;
1641
1642
return sdei_unregister_ghes(ghes);
1643
}
1644
1645
static int ghes_probe(struct platform_device *ghes_dev)
1646
{
1647
struct acpi_hest_generic *generic;
1648
struct ghes *ghes = NULL;
1649
unsigned long flags;
1650
1651
int rc = -EINVAL;
1652
1653
generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
1654
if (!generic->enabled)
1655
return -ENODEV;
1656
1657
switch (generic->notify.type) {
1658
case ACPI_HEST_NOTIFY_POLLED:
1659
case ACPI_HEST_NOTIFY_EXTERNAL:
1660
case ACPI_HEST_NOTIFY_SCI:
1661
case ACPI_HEST_NOTIFY_GSIV:
1662
case ACPI_HEST_NOTIFY_GPIO:
1663
break;
1664
1665
case ACPI_HEST_NOTIFY_SEA:
1666
if (!IS_ENABLED(CONFIG_ACPI_APEI_SEA)) {
1667
pr_warn(GHES_PFX "Generic hardware error source: %d notified via SEA is not supported\n",
1668
generic->header.source_id);
1669
rc = -ENOTSUPP;
1670
goto err;
1671
}
1672
break;
1673
case ACPI_HEST_NOTIFY_NMI:
1674
if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
1675
pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
1676
generic->header.source_id);
1677
goto err;
1678
}
1679
break;
1680
case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1681
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) {
1682
pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n",
1683
generic->header.source_id);
1684
goto err;
1685
}
1686
break;
1687
case ACPI_HEST_NOTIFY_LOCAL:
1688
pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
1689
generic->header.source_id);
1690
goto err;
1691
default:
1692
pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
1693
generic->notify.type, generic->header.source_id);
1694
goto err;
1695
}
1696
1697
rc = -EIO;
1698
if (generic->error_block_length <
1699
sizeof(struct acpi_hest_generic_status)) {
1700
pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
1701
generic->error_block_length, generic->header.source_id);
1702
goto err;
1703
}
1704
ghes = ghes_new(generic);
1705
if (IS_ERR(ghes)) {
1706
rc = PTR_ERR(ghes);
1707
ghes = NULL;
1708
goto err;
1709
}
1710
1711
switch (generic->notify.type) {
1712
case ACPI_HEST_NOTIFY_POLLED:
1713
timer_setup(&ghes->timer, ghes_poll_func, 0);
1714
ghes_add_timer(ghes);
1715
break;
1716
case ACPI_HEST_NOTIFY_EXTERNAL:
1717
/* External interrupt vector is GSI */
1718
rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
1719
if (rc) {
1720
pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
1721
generic->header.source_id);
1722
goto err;
1723
}
1724
rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
1725
"GHES IRQ", ghes);
1726
if (rc) {
1727
pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
1728
generic->header.source_id);
1729
goto err;
1730
}
1731
break;
1732
1733
case ACPI_HEST_NOTIFY_SCI:
1734
case ACPI_HEST_NOTIFY_GSIV:
1735
case ACPI_HEST_NOTIFY_GPIO:
1736
mutex_lock(&ghes_list_mutex);
1737
if (list_empty(&ghes_hed))
1738
register_acpi_hed_notifier(&ghes_notifier_hed);
1739
list_add_rcu(&ghes->list, &ghes_hed);
1740
mutex_unlock(&ghes_list_mutex);
1741
break;
1742
1743
case ACPI_HEST_NOTIFY_SEA:
1744
rc = ghes_sea_add(ghes);
1745
if (rc)
1746
goto err;
1747
break;
1748
case ACPI_HEST_NOTIFY_NMI:
1749
rc = ghes_nmi_add(ghes);
1750
if (rc)
1751
goto err;
1752
break;
1753
case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1754
rc = apei_sdei_register_ghes(ghes);
1755
if (rc)
1756
goto err;
1757
break;
1758
default:
1759
BUG();
1760
}
1761
1762
platform_set_drvdata(ghes_dev, ghes);
1763
1764
ghes->dev = &ghes_dev->dev;
1765
1766
mutex_lock(&ghes_devs_mutex);
1767
list_add_tail(&ghes->elist, &ghes_devs);
1768
mutex_unlock(&ghes_devs_mutex);
1769
1770
/* Handle any pending errors right away */
1771
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1772
ghes_proc(ghes);
1773
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1774
1775
return 0;
1776
1777
err:
1778
if (ghes) {
1779
ghes_fini(ghes);
1780
kfree(ghes);
1781
}
1782
return rc;
1783
}
1784
1785
static void ghes_remove(struct platform_device *ghes_dev)
1786
{
1787
int rc;
1788
struct ghes *ghes;
1789
struct acpi_hest_generic *generic;
1790
1791
ghes = platform_get_drvdata(ghes_dev);
1792
generic = ghes->generic;
1793
1794
ghes->flags |= GHES_EXITING;
1795
switch (generic->notify.type) {
1796
case ACPI_HEST_NOTIFY_POLLED:
1797
timer_shutdown_sync(&ghes->timer);
1798
break;
1799
case ACPI_HEST_NOTIFY_EXTERNAL:
1800
free_irq(ghes->irq, ghes);
1801
break;
1802
1803
case ACPI_HEST_NOTIFY_SCI:
1804
case ACPI_HEST_NOTIFY_GSIV:
1805
case ACPI_HEST_NOTIFY_GPIO:
1806
mutex_lock(&ghes_list_mutex);
1807
list_del_rcu(&ghes->list);
1808
if (list_empty(&ghes_hed))
1809
unregister_acpi_hed_notifier(&ghes_notifier_hed);
1810
mutex_unlock(&ghes_list_mutex);
1811
synchronize_rcu();
1812
break;
1813
1814
case ACPI_HEST_NOTIFY_SEA:
1815
ghes_sea_remove(ghes);
1816
break;
1817
case ACPI_HEST_NOTIFY_NMI:
1818
ghes_nmi_remove(ghes);
1819
break;
1820
case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1821
rc = apei_sdei_unregister_ghes(ghes);
1822
if (rc) {
1823
/*
1824
* Returning early results in a resource leak, but we're
1825
* only here if stopping the hardware failed.
1826
*/
1827
dev_err(&ghes_dev->dev, "Failed to unregister ghes (%pe)\n",
1828
ERR_PTR(rc));
1829
return;
1830
}
1831
break;
1832
default:
1833
BUG();
1834
break;
1835
}
1836
1837
ghes_fini(ghes);
1838
1839
mutex_lock(&ghes_devs_mutex);
1840
list_del(&ghes->elist);
1841
mutex_unlock(&ghes_devs_mutex);
1842
1843
kfree(ghes);
1844
}
1845
1846
static struct platform_driver ghes_platform_driver = {
1847
.driver = {
1848
.name = "GHES",
1849
},
1850
.probe = ghes_probe,
1851
.remove = ghes_remove,
1852
};
1853
1854
void __init acpi_ghes_init(void)
1855
{
1856
int rc;
1857
1858
acpi_sdei_init();
1859
1860
if (acpi_disabled)
1861
return;
1862
1863
switch (hest_disable) {
1864
case HEST_NOT_FOUND:
1865
return;
1866
case HEST_DISABLED:
1867
pr_info(GHES_PFX "HEST is not enabled!\n");
1868
return;
1869
default:
1870
break;
1871
}
1872
1873
if (ghes_disable) {
1874
pr_info(GHES_PFX "GHES is not enabled!\n");
1875
return;
1876
}
1877
1878
ghes_nmi_init_cxt();
1879
1880
rc = platform_driver_register(&ghes_platform_driver);
1881
if (rc)
1882
return;
1883
1884
rc = apei_osc_setup();
1885
if (rc == 0 && osc_sb_apei_support_acked)
1886
pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1887
else if (rc == 0 && !osc_sb_apei_support_acked)
1888
pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1889
else if (rc && osc_sb_apei_support_acked)
1890
pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1891
else
1892
pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1893
}
1894
1895
/*
1896
* Known x86 systems that prefer GHES error reporting:
1897
*/
1898
static struct acpi_platform_list plat_list[] = {
1899
{"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions},
1900
{"__ZX__", "EDK2 ", 3, ACPI_SIG_FADT, greater_than_or_equal},
1901
{"_BYO_ ", "BYOSOFT ", 3, ACPI_SIG_FADT, greater_than_or_equal},
1902
{ } /* End */
1903
};
1904
1905
struct list_head *ghes_get_devices(void)
1906
{
1907
int idx = -1;
1908
1909
if (IS_ENABLED(CONFIG_X86)) {
1910
idx = acpi_match_platform_list(plat_list);
1911
if (idx < 0) {
1912
if (!ghes_edac_force_enable)
1913
return NULL;
1914
1915
pr_warn_once("Force-loading ghes_edac on an unsupported platform. You're on your own!\n");
1916
}
1917
} else if (list_empty(&ghes_devs)) {
1918
return NULL;
1919
}
1920
1921
return &ghes_devs;
1922
}
1923
EXPORT_SYMBOL_GPL(ghes_get_devices);
1924
1925
void ghes_register_report_chain(struct notifier_block *nb)
1926
{
1927
atomic_notifier_chain_register(&ghes_report_chain, nb);
1928
}
1929
EXPORT_SYMBOL_GPL(ghes_register_report_chain);
1930
1931
void ghes_unregister_report_chain(struct notifier_block *nb)
1932
{
1933
atomic_notifier_chain_unregister(&ghes_report_chain, nb);
1934
}
1935
EXPORT_SYMBOL_GPL(ghes_unregister_report_chain);
1936
1937