1 // SPDX-License-Identifier: GPL-2.0-only
3 * APEI Generic Hardware Error Source support
5 * Generic Hardware Error Source provides a way to report platform
6 * hardware errors (such as that from chipset). It works in so called
7 * "Firmware First" mode, that is, hardware errors are reported to
8 * firmware firstly, then reported to Linux by firmware. This way,
9 * some non-standard hardware error registers or non-standard hardware
10 * link can be checked by firmware to produce more hardware error
11 * information for Linux.
13 * For more information about Generic Hardware Error Source, please
14 * refer to ACPI Specification version 4.0, section 17.3.2.6
16 * Copyright 2010,2011 Intel Corp.
17 * Author: Huang Ying <ying.huang@intel.com>
20 #include <linux/arm_sdei.h>
21 #include <linux/kernel.h>
22 #include <linux/moduleparam.h>
23 #include <linux/init.h>
24 #include <linux/acpi.h>
26 #include <linux/interrupt.h>
27 #include <linux/timer.h>
28 #include <linux/cper.h>
29 #include <linux/platform_device.h>
30 #include <linux/mutex.h>
31 #include <linux/ratelimit.h>
32 #include <linux/vmalloc.h>
33 #include <linux/irq_work.h>
34 #include <linux/llist.h>
35 #include <linux/genalloc.h>
36 #include <linux/pci.h>
37 #include <linux/pfn.h>
38 #include <linux/aer.h>
39 #include <linux/nmi.h>
40 #include <linux/sched/clock.h>
41 #include <linux/uuid.h>
42 #include <linux/ras.h>
43 #include <linux/task_work.h>
45 #include <acpi/actbl1.h>
46 #include <acpi/ghes.h>
47 #include <acpi/apei.h>
48 #include <asm/fixmap.h>
49 #include <asm/tlbflush.h>
50 #include <ras/ras_event.h>
52 #include "apei-internal.h"
54 #define GHES_PFX "GHES: "
56 #define GHES_ESTATUS_MAX_SIZE 65536
57 #define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
59 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
61 /* This is just an estimation for memory pool allocation */
62 #define GHES_ESTATUS_CACHE_AVG_SIZE 512
64 #define GHES_ESTATUS_CACHES_SIZE 4
66 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
67 /* Prevent too many caches are allocated because of RCU */
68 #define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
70 #define GHES_ESTATUS_CACHE_LEN(estatus_len) \
71 (sizeof(struct ghes_estatus_cache) + (estatus_len))
72 #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
73 ((struct acpi_hest_generic_status *) \
74 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
76 #define GHES_ESTATUS_NODE_LEN(estatus_len) \
77 (sizeof(struct ghes_estatus_node) + (estatus_len))
78 #define GHES_ESTATUS_FROM_NODE(estatus_node) \
79 ((struct acpi_hest_generic_status *) \
80 ((struct ghes_estatus_node *)(estatus_node) + 1))
82 #define GHES_VENDOR_ENTRY_LEN(gdata_len) \
83 (sizeof(struct ghes_vendor_record_entry) + (gdata_len))
84 #define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry) \
85 ((struct acpi_hest_generic_data *) \
86 ((struct ghes_vendor_record_entry *)(vendor_entry) + 1))
89 * NMI-like notifications vary by architecture, before the compiler can prune
90 * unused static functions it needs a value for these enums.
92 #ifndef CONFIG_ARM_SDE_INTERFACE
93 #define FIX_APEI_GHES_SDEI_NORMAL __end_of_fixed_addresses
94 #define FIX_APEI_GHES_SDEI_CRITICAL __end_of_fixed_addresses
97 static ATOMIC_NOTIFIER_HEAD(ghes_report_chain);
99 static inline bool is_hest_type_generic_v2(struct ghes *ghes)
101 return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
105 * This driver isn't really modular, however for the time being,
106 * continuing to use module_param is the easiest way to remain
107 * compatible with existing boot arg use cases.
110 module_param_named(disable, ghes_disable, bool, 0);
113 * "ghes.edac_force_enable" forcibly enables ghes_edac and skips the platform
116 static bool ghes_edac_force_enable;
117 module_param_named(edac_force_enable, ghes_edac_force_enable, bool, 0);
120 * All error sources notified with HED (Hardware Error Device) share a
121 * single notifier callback, so they need to be linked and checked one
122 * by one. This holds true for NMI too.
124 * RCU is used for these lists, so ghes_list_mutex is only used for
125 * list changing, not for traversing.
127 static LIST_HEAD(ghes_hed);
128 static DEFINE_MUTEX(ghes_list_mutex);
131 * A list of GHES devices which are given to the corresponding EDAC driver
132 * ghes_edac for further use.
134 static LIST_HEAD(ghes_devs);
135 static DEFINE_MUTEX(ghes_devs_mutex);
138 * Because the memory area used to transfer hardware error information
139 * from BIOS to Linux can be determined only in NMI, IRQ or timer
140 * handler, but general ioremap can not be used in atomic context, so
141 * the fixmap is used instead.
143 * This spinlock is used to prevent the fixmap entry from being used
146 static DEFINE_SPINLOCK(ghes_notify_lock_irq);
148 struct ghes_vendor_record_entry {
149 struct work_struct work;
151 char vendor_record[];
154 static struct gen_pool *ghes_estatus_pool;
156 static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
157 static atomic_t ghes_estatus_cache_alloced;
159 static int ghes_panic_timeout __read_mostly = 30;
161 static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
166 paddr = PFN_PHYS(pfn);
167 prot = arch_apei_get_mem_attribute(paddr);
168 __set_fixmap(fixmap_idx, paddr, prot);
170 return (void __iomem *) __fix_to_virt(fixmap_idx);
173 static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
175 int _idx = virt_to_fix((unsigned long)vaddr);
177 WARN_ON_ONCE(fixmap_idx != _idx);
178 clear_fixmap(fixmap_idx);
181 int ghes_estatus_pool_init(unsigned int num_ghes)
183 unsigned long addr, len;
186 ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
187 if (!ghes_estatus_pool)
190 len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX;
191 len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE);
193 addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
197 rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
207 gen_pool_destroy(ghes_estatus_pool);
213 * ghes_estatus_pool_region_free - free previously allocated memory
214 * from the ghes_estatus_pool.
215 * @addr: address of memory to free.
216 * @size: size of memory to free.
220 void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
222 gen_pool_free(ghes_estatus_pool, addr, size);
224 EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
226 static int map_gen_v2(struct ghes *ghes)
228 return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
231 static void unmap_gen_v2(struct ghes *ghes)
233 apei_unmap_generic_address(&ghes->generic_v2->read_ack_register);
236 static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
241 rc = apei_read(&val, &gv2->read_ack_register);
245 val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset;
246 val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset;
248 apei_write(val, &gv2->read_ack_register);
251 static struct ghes *ghes_new(struct acpi_hest_generic *generic)
254 unsigned int error_block_length;
257 ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
259 return ERR_PTR(-ENOMEM);
261 ghes->generic = generic;
262 if (is_hest_type_generic_v2(ghes)) {
263 rc = map_gen_v2(ghes);
268 rc = apei_map_generic_address(&generic->error_status_address);
270 goto err_unmap_read_ack_addr;
271 error_block_length = generic->error_block_length;
272 if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
273 pr_warn(FW_WARN GHES_PFX
274 "Error status block length is too long: %u for "
275 "generic hardware error source: %d.\n",
276 error_block_length, generic->header.source_id);
277 error_block_length = GHES_ESTATUS_MAX_SIZE;
279 ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
280 if (!ghes->estatus) {
282 goto err_unmap_status_addr;
287 err_unmap_status_addr:
288 apei_unmap_generic_address(&generic->error_status_address);
289 err_unmap_read_ack_addr:
290 if (is_hest_type_generic_v2(ghes))
297 static void ghes_fini(struct ghes *ghes)
299 kfree(ghes->estatus);
300 apei_unmap_generic_address(&ghes->generic->error_status_address);
301 if (is_hest_type_generic_v2(ghes))
305 static inline int ghes_severity(int severity)
308 case CPER_SEV_INFORMATIONAL:
310 case CPER_SEV_CORRECTED:
311 return GHES_SEV_CORRECTED;
312 case CPER_SEV_RECOVERABLE:
313 return GHES_SEV_RECOVERABLE;
315 return GHES_SEV_PANIC;
317 /* Unknown, go panic */
318 return GHES_SEV_PANIC;
322 static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
324 enum fixed_addresses fixmap_idx)
331 offset = paddr - (paddr & PAGE_MASK);
332 vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
333 trunk = PAGE_SIZE - offset;
334 trunk = min(trunk, len);
336 memcpy_fromio(buffer, vaddr + offset, trunk);
338 memcpy_toio(vaddr + offset, buffer, trunk);
342 ghes_unmap(vaddr, fixmap_idx);
346 /* Check the top-level record header has an appropriate size. */
347 static int __ghes_check_estatus(struct ghes *ghes,
348 struct acpi_hest_generic_status *estatus)
350 u32 len = cper_estatus_len(estatus);
352 if (len < sizeof(*estatus)) {
353 pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n");
357 if (len > ghes->generic->error_block_length) {
358 pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n");
362 if (cper_estatus_check_header(estatus)) {
363 pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n");
370 /* Read the CPER block, returning its address, and header in estatus. */
371 static int __ghes_peek_estatus(struct ghes *ghes,
372 struct acpi_hest_generic_status *estatus,
373 u64 *buf_paddr, enum fixed_addresses fixmap_idx)
375 struct acpi_hest_generic *g = ghes->generic;
378 rc = apei_read(buf_paddr, &g->error_status_address);
381 pr_warn_ratelimited(FW_WARN GHES_PFX
382 "Failed to read error status block address for hardware error source: %d.\n",
383 g->header.source_id);
389 ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1,
391 if (!estatus->block_status) {
399 static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus,
400 u64 buf_paddr, enum fixed_addresses fixmap_idx,
403 ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx);
404 if (cper_estatus_check(estatus)) {
405 pr_warn_ratelimited(FW_WARN GHES_PFX
406 "Failed to read error status block!\n");
413 static int ghes_read_estatus(struct ghes *ghes,
414 struct acpi_hest_generic_status *estatus,
415 u64 *buf_paddr, enum fixed_addresses fixmap_idx)
419 rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx);
423 rc = __ghes_check_estatus(ghes, estatus);
427 return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx,
428 cper_estatus_len(estatus));
431 static void ghes_clear_estatus(struct ghes *ghes,
432 struct acpi_hest_generic_status *estatus,
433 u64 buf_paddr, enum fixed_addresses fixmap_idx)
435 estatus->block_status = 0;
440 ghes_copy_tofrom_phys(estatus, buf_paddr,
441 sizeof(estatus->block_status), 0,
445 * GHESv2 type HEST entries introduce support for error acknowledgment,
446 * so only acknowledge the error if this support is present.
448 if (is_hest_type_generic_v2(ghes))
449 ghes_ack_error(ghes->generic_v2);
453 * Called as task_work before returning to user-space.
454 * Ensure any queued work has been done before we return to the context that
455 * triggered the notification.
457 static void ghes_kick_task_work(struct callback_head *head)
459 struct acpi_hest_generic_status *estatus;
460 struct ghes_estatus_node *estatus_node;
463 estatus_node = container_of(head, struct ghes_estatus_node, task_work);
464 if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
465 memory_failure_queue_kick(estatus_node->task_work_cpu);
467 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
468 node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
469 gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
472 static bool ghes_do_memory_failure(u64 physical_addr, int flags)
476 if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
479 pfn = PHYS_PFN(physical_addr);
480 if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
481 pr_warn_ratelimited(FW_WARN GHES_PFX
482 "Invalid address in generic error data: %#llx\n",
487 memory_failure_queue(pfn, flags);
491 static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
495 int sec_sev = ghes_severity(gdata->error_severity);
496 struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
498 if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
501 /* iff following two events can be handled properly by now */
502 if (sec_sev == GHES_SEV_CORRECTED &&
503 (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
504 flags = MF_SOFT_OFFLINE;
505 if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
509 return ghes_do_memory_failure(mem_err->physical_addr, flags);
514 static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
516 struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
521 log_arm_hw_error(err);
523 sec_sev = ghes_severity(gdata->error_severity);
524 if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
527 p = (char *)(err + 1);
528 for (i = 0; i < err->err_info_num; i++) {
529 struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
530 bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
531 bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
532 const char *error_type = "unknown error";
535 * The field (err_info->error_info & BIT(26)) is fixed to set to
536 * 1 in some old firmware of HiSilicon Kunpeng920. We assume that
537 * firmware won't mix corrected errors in an uncorrected section,
538 * and don't filter out 'corrected' error here.
540 if (is_cache && has_pa) {
541 queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
542 p += err_info->length;
546 if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
547 error_type = cper_proc_error_type_strs[err_info->type];
549 pr_warn_ratelimited(FW_WARN GHES_PFX
550 "Unhandled processor error type: %s\n",
552 p += err_info->length;
559 * PCIe AER errors need to be sent to the AER driver for reporting and
560 * recovery. The GHES severities map to the following AER severities and
561 * require the following handling:
563 * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
564 * These need to be reported by the AER driver but no recovery is
566 * GHES_SEV_RECOVERABLE -> AER_NONFATAL
567 * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
568 * These both need to be reported and recovered from by the AER driver.
569 * GHES_SEV_PANIC does not make it to this handling since the kernel must
572 static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
574 #ifdef CONFIG_ACPI_APEI_PCIEAER
575 struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
577 if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
578 pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
583 devfn = PCI_DEVFN(pcie_err->device_id.device,
584 pcie_err->device_id.function);
585 aer_severity = cper_severity_to_aer(gdata->error_severity);
588 * If firmware reset the component to contain
589 * the error, we must reinitialize it before
590 * use, so treat it as a fatal AER error.
592 if (gdata->flags & CPER_SEC_RESET)
593 aer_severity = AER_FATAL;
595 aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
596 sizeof(struct aer_capability_regs));
599 memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
601 aer_recover_queue(pcie_err->device_id.segment,
602 pcie_err->device_id.bus,
604 (struct aer_capability_regs *)
610 static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list);
612 int ghes_register_vendor_record_notifier(struct notifier_block *nb)
614 return blocking_notifier_chain_register(&vendor_record_notify_list, nb);
616 EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier);
618 void ghes_unregister_vendor_record_notifier(struct notifier_block *nb)
620 blocking_notifier_chain_unregister(&vendor_record_notify_list, nb);
622 EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier);
624 static void ghes_vendor_record_work_func(struct work_struct *work)
626 struct ghes_vendor_record_entry *entry;
627 struct acpi_hest_generic_data *gdata;
630 entry = container_of(work, struct ghes_vendor_record_entry, work);
631 gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
633 blocking_notifier_call_chain(&vendor_record_notify_list,
634 entry->error_severity, gdata);
636 len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
637 gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len);
640 static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
643 struct acpi_hest_generic_data *copied_gdata;
644 struct ghes_vendor_record_entry *entry;
647 len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
648 entry = (void *)gen_pool_alloc(ghes_estatus_pool, len);
652 copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
653 memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata));
654 entry->error_severity = sev;
656 INIT_WORK(&entry->work, ghes_vendor_record_work_func);
657 schedule_work(&entry->work);
660 static bool ghes_do_proc(struct ghes *ghes,
661 const struct acpi_hest_generic_status *estatus)
664 struct acpi_hest_generic_data *gdata;
666 const guid_t *fru_id = &guid_null;
670 sev = ghes_severity(estatus->error_severity);
671 apei_estatus_for_each_section(estatus, gdata) {
672 sec_type = (guid_t *)gdata->section_type;
673 sec_sev = ghes_severity(gdata->error_severity);
674 if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
675 fru_id = (guid_t *)gdata->fru_id;
677 if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
678 fru_text = gdata->fru_text;
680 if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
681 struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
683 atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err);
685 arch_apei_report_mem_error(sev, mem_err);
686 queued = ghes_handle_memory_failure(gdata, sev);
688 else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
689 ghes_handle_aer(gdata);
691 else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
692 queued = ghes_handle_arm_hw_error(gdata, sev);
694 void *err = acpi_hest_get_payload(gdata);
696 ghes_defer_non_standard_event(gdata, sev);
697 log_non_standard_event(sec_type, fru_id, fru_text,
699 gdata->error_data_length);
706 static void __ghes_print_estatus(const char *pfx,
707 const struct acpi_hest_generic *generic,
708 const struct acpi_hest_generic_status *estatus)
710 static atomic_t seqno;
711 unsigned int curr_seqno;
715 if (ghes_severity(estatus->error_severity) <=
721 curr_seqno = atomic_inc_return(&seqno);
722 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
723 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
724 pfx_seq, generic->header.source_id);
725 cper_estatus_print(pfx_seq, estatus);
728 static int ghes_print_estatus(const char *pfx,
729 const struct acpi_hest_generic *generic,
730 const struct acpi_hest_generic_status *estatus)
732 /* Not more than 2 messages every 5 seconds */
733 static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
734 static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
735 struct ratelimit_state *ratelimit;
737 if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
738 ratelimit = &ratelimit_corrected;
740 ratelimit = &ratelimit_uncorrected;
741 if (__ratelimit(ratelimit)) {
742 __ghes_print_estatus(pfx, generic, estatus);
749 * GHES error status reporting throttle, to report more kinds of
750 * errors, instead of just most frequently occurred errors.
752 static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
756 unsigned long long now;
757 struct ghes_estatus_cache *cache;
758 struct acpi_hest_generic_status *cache_estatus;
760 len = cper_estatus_len(estatus);
762 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
763 cache = rcu_dereference(ghes_estatus_caches[i]);
766 if (len != cache->estatus_len)
768 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
769 if (memcmp(estatus, cache_estatus, len))
771 atomic_inc(&cache->count);
773 if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
781 static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
782 struct acpi_hest_generic *generic,
783 struct acpi_hest_generic_status *estatus)
787 struct ghes_estatus_cache *cache;
788 struct acpi_hest_generic_status *cache_estatus;
790 alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
791 if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
792 atomic_dec(&ghes_estatus_cache_alloced);
795 len = cper_estatus_len(estatus);
796 cache_len = GHES_ESTATUS_CACHE_LEN(len);
797 cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
799 atomic_dec(&ghes_estatus_cache_alloced);
802 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
803 memcpy(cache_estatus, estatus, len);
804 cache->estatus_len = len;
805 atomic_set(&cache->count, 0);
806 cache->generic = generic;
807 cache->time_in = sched_clock();
811 static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
813 struct ghes_estatus_cache *cache;
816 cache = container_of(head, struct ghes_estatus_cache, rcu);
817 len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
818 len = GHES_ESTATUS_CACHE_LEN(len);
819 gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
820 atomic_dec(&ghes_estatus_cache_alloced);
824 ghes_estatus_cache_add(struct acpi_hest_generic *generic,
825 struct acpi_hest_generic_status *estatus)
827 unsigned long long now, duration, period, max_period = 0;
828 struct ghes_estatus_cache *cache, *new_cache;
829 struct ghes_estatus_cache __rcu *victim;
830 int i, slot = -1, count;
832 new_cache = ghes_estatus_cache_alloc(generic, estatus);
838 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
839 cache = rcu_dereference(ghes_estatus_caches[i]);
844 duration = now - cache->time_in;
845 if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
849 count = atomic_read(&cache->count);
851 do_div(period, (count + 1));
852 if (period > max_period) {
861 * Use release semantics to ensure that ghes_estatus_cached()
862 * running on another CPU will see the updated cache fields if
863 * it can see the new value of the pointer.
865 victim = xchg_release(&ghes_estatus_caches[slot],
866 RCU_INITIALIZER(new_cache));
869 * At this point, victim may point to a cached item different
870 * from the one based on which we selected the slot. Instead of
871 * going to the loop again to pick another slot, let's just
872 * drop the other item anyway: this may cause a false cache
873 * miss later on, but that won't cause any problems.
876 call_rcu(&unrcu_pointer(victim)->rcu,
877 ghes_estatus_cache_rcu_free);
881 static void __ghes_panic(struct ghes *ghes,
882 struct acpi_hest_generic_status *estatus,
883 u64 buf_paddr, enum fixed_addresses fixmap_idx)
885 __ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
887 ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
889 /* reboot to log the error! */
891 panic_timeout = ghes_panic_timeout;
892 panic("Fatal hardware error!");
895 static int ghes_proc(struct ghes *ghes)
897 struct acpi_hest_generic_status *estatus = ghes->estatus;
901 rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ);
905 if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC)
906 __ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
908 if (!ghes_estatus_cached(estatus)) {
909 if (ghes_print_estatus(NULL, ghes->generic, estatus))
910 ghes_estatus_cache_add(ghes->generic, estatus);
912 ghes_do_proc(ghes, estatus);
915 ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
920 static void ghes_add_timer(struct ghes *ghes)
922 struct acpi_hest_generic *g = ghes->generic;
923 unsigned long expire;
925 if (!g->notify.poll_interval) {
926 pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
927 g->header.source_id);
930 expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
931 ghes->timer.expires = round_jiffies_relative(expire);
932 add_timer(&ghes->timer);
935 static void ghes_poll_func(struct timer_list *t)
937 struct ghes *ghes = from_timer(ghes, t, timer);
940 spin_lock_irqsave(&ghes_notify_lock_irq, flags);
942 spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
943 if (!(ghes->flags & GHES_EXITING))
944 ghes_add_timer(ghes);
947 static irqreturn_t ghes_irq_func(int irq, void *data)
949 struct ghes *ghes = data;
953 spin_lock_irqsave(&ghes_notify_lock_irq, flags);
954 rc = ghes_proc(ghes);
955 spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
962 static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
967 int ret = NOTIFY_DONE;
969 spin_lock_irqsave(&ghes_notify_lock_irq, flags);
971 list_for_each_entry_rcu(ghes, &ghes_hed, list) {
972 if (!ghes_proc(ghes))
976 spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
981 static struct notifier_block ghes_notifier_hed = {
982 .notifier_call = ghes_notify_hed,
986 * Handlers for CPER records may not be NMI safe. For example,
987 * memory_failure_queue() takes spinlocks and calls schedule_work_on().
988 * In any NMI-like handler, memory from ghes_estatus_pool is used to save
989 * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes
990 * ghes_proc_in_irq() to run in IRQ context where each estatus in
991 * ghes_estatus_llist is processed.
993 * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache
994 * to suppress frequent messages.
996 static struct llist_head ghes_estatus_llist;
997 static struct irq_work ghes_proc_irq_work;
999 static void ghes_proc_in_irq(struct irq_work *irq_work)
1001 struct llist_node *llnode, *next;
1002 struct ghes_estatus_node *estatus_node;
1003 struct acpi_hest_generic *generic;
1004 struct acpi_hest_generic_status *estatus;
1005 bool task_work_pending;
1009 llnode = llist_del_all(&ghes_estatus_llist);
1011 * Because the time order of estatus in list is reversed,
1012 * revert it back to proper order.
1014 llnode = llist_reverse_order(llnode);
1016 next = llnode->next;
1017 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1019 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1020 len = cper_estatus_len(estatus);
1021 node_len = GHES_ESTATUS_NODE_LEN(len);
1022 task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
1023 if (!ghes_estatus_cached(estatus)) {
1024 generic = estatus_node->generic;
1025 if (ghes_print_estatus(NULL, generic, estatus))
1026 ghes_estatus_cache_add(generic, estatus);
1029 if (task_work_pending && current->mm) {
1030 estatus_node->task_work.func = ghes_kick_task_work;
1031 estatus_node->task_work_cpu = smp_processor_id();
1032 ret = task_work_add(current, &estatus_node->task_work,
1035 estatus_node->task_work.func = NULL;
1038 if (!estatus_node->task_work.func)
1039 gen_pool_free(ghes_estatus_pool,
1040 (unsigned long)estatus_node, node_len);
1046 static void ghes_print_queued_estatus(void)
1048 struct llist_node *llnode;
1049 struct ghes_estatus_node *estatus_node;
1050 struct acpi_hest_generic *generic;
1051 struct acpi_hest_generic_status *estatus;
1053 llnode = llist_del_all(&ghes_estatus_llist);
1055 * Because the time order of estatus in list is reversed,
1056 * revert it back to proper order.
1058 llnode = llist_reverse_order(llnode);
1060 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1062 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1063 generic = estatus_node->generic;
1064 ghes_print_estatus(NULL, generic, estatus);
1065 llnode = llnode->next;
1069 static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
1070 enum fixed_addresses fixmap_idx)
1072 struct acpi_hest_generic_status *estatus, tmp_header;
1073 struct ghes_estatus_node *estatus_node;
1078 if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG))
1081 rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx);
1083 ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1087 rc = __ghes_check_estatus(ghes, &tmp_header);
1089 ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1093 len = cper_estatus_len(&tmp_header);
1094 node_len = GHES_ESTATUS_NODE_LEN(len);
1095 estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
1099 estatus_node->ghes = ghes;
1100 estatus_node->generic = ghes->generic;
1101 estatus_node->task_work.func = NULL;
1102 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1104 if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
1105 ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
1110 sev = ghes_severity(estatus->error_severity);
1111 if (sev >= GHES_SEV_PANIC) {
1112 ghes_print_queued_estatus();
1113 __ghes_panic(ghes, estatus, buf_paddr, fixmap_idx);
1116 ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1118 /* This error has been reported before, don't process it again. */
1119 if (ghes_estatus_cached(estatus))
1122 llist_add(&estatus_node->llnode, &ghes_estatus_llist);
1127 gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
1133 static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list,
1134 enum fixed_addresses fixmap_idx)
1140 list_for_each_entry_rcu(ghes, rcu_list, list) {
1141 if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx))
1146 if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret)
1147 irq_work_queue(&ghes_proc_irq_work);
1152 #ifdef CONFIG_ACPI_APEI_SEA
1153 static LIST_HEAD(ghes_sea);
1156 * Return 0 only if one of the SEA error sources successfully reported an error
1157 * record sent from the firmware.
1159 int ghes_notify_sea(void)
1161 static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
1164 raw_spin_lock(&ghes_notify_lock_sea);
1165 rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA);
1166 raw_spin_unlock(&ghes_notify_lock_sea);
1171 static void ghes_sea_add(struct ghes *ghes)
1173 mutex_lock(&ghes_list_mutex);
1174 list_add_rcu(&ghes->list, &ghes_sea);
1175 mutex_unlock(&ghes_list_mutex);
1178 static void ghes_sea_remove(struct ghes *ghes)
1180 mutex_lock(&ghes_list_mutex);
1181 list_del_rcu(&ghes->list);
1182 mutex_unlock(&ghes_list_mutex);
1185 #else /* CONFIG_ACPI_APEI_SEA */
1186 static inline void ghes_sea_add(struct ghes *ghes) { }
1187 static inline void ghes_sea_remove(struct ghes *ghes) { }
1188 #endif /* CONFIG_ACPI_APEI_SEA */
1190 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
1192 * NMI may be triggered on any CPU, so ghes_in_nmi is used for
1193 * having only one concurrent reader.
1195 static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
1197 static LIST_HEAD(ghes_nmi);
1199 static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
1201 static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
1204 if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
1207 raw_spin_lock(&ghes_notify_lock_nmi);
1208 if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI))
1210 raw_spin_unlock(&ghes_notify_lock_nmi);
1212 atomic_dec(&ghes_in_nmi);
1216 static void ghes_nmi_add(struct ghes *ghes)
1218 mutex_lock(&ghes_list_mutex);
1219 if (list_empty(&ghes_nmi))
1220 register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
1221 list_add_rcu(&ghes->list, &ghes_nmi);
1222 mutex_unlock(&ghes_list_mutex);
1225 static void ghes_nmi_remove(struct ghes *ghes)
1227 mutex_lock(&ghes_list_mutex);
1228 list_del_rcu(&ghes->list);
1229 if (list_empty(&ghes_nmi))
1230 unregister_nmi_handler(NMI_LOCAL, "ghes");
1231 mutex_unlock(&ghes_list_mutex);
1233 * To synchronize with NMI handler, ghes can only be
1234 * freed after NMI handler finishes.
1238 #else /* CONFIG_HAVE_ACPI_APEI_NMI */
1239 static inline void ghes_nmi_add(struct ghes *ghes) { }
1240 static inline void ghes_nmi_remove(struct ghes *ghes) { }
1241 #endif /* CONFIG_HAVE_ACPI_APEI_NMI */
1243 static void ghes_nmi_init_cxt(void)
1245 init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
1248 static int __ghes_sdei_callback(struct ghes *ghes,
1249 enum fixed_addresses fixmap_idx)
1251 if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) {
1252 irq_work_queue(&ghes_proc_irq_work);
1260 static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs,
1263 static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal);
1264 struct ghes *ghes = arg;
1267 raw_spin_lock(&ghes_notify_lock_sdei_normal);
1268 err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL);
1269 raw_spin_unlock(&ghes_notify_lock_sdei_normal);
1274 static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs,
1277 static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical);
1278 struct ghes *ghes = arg;
1281 raw_spin_lock(&ghes_notify_lock_sdei_critical);
1282 err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL);
1283 raw_spin_unlock(&ghes_notify_lock_sdei_critical);
1288 static int apei_sdei_register_ghes(struct ghes *ghes)
1290 if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1293 return sdei_register_ghes(ghes, ghes_sdei_normal_callback,
1294 ghes_sdei_critical_callback);
1297 static int apei_sdei_unregister_ghes(struct ghes *ghes)
1299 if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1302 return sdei_unregister_ghes(ghes);
1305 static int ghes_probe(struct platform_device *ghes_dev)
1307 struct acpi_hest_generic *generic;
1308 struct ghes *ghes = NULL;
1309 unsigned long flags;
1313 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
1314 if (!generic->enabled)
1317 switch (generic->notify.type) {
1318 case ACPI_HEST_NOTIFY_POLLED:
1319 case ACPI_HEST_NOTIFY_EXTERNAL:
1320 case ACPI_HEST_NOTIFY_SCI:
1321 case ACPI_HEST_NOTIFY_GSIV:
1322 case ACPI_HEST_NOTIFY_GPIO:
1325 case ACPI_HEST_NOTIFY_SEA:
1326 if (!IS_ENABLED(CONFIG_ACPI_APEI_SEA)) {
1327 pr_warn(GHES_PFX "Generic hardware error source: %d notified via SEA is not supported\n",
1328 generic->header.source_id);
1333 case ACPI_HEST_NOTIFY_NMI:
1334 if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
1335 pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
1336 generic->header.source_id);
1340 case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1341 if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) {
1342 pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n",
1343 generic->header.source_id);
1347 case ACPI_HEST_NOTIFY_LOCAL:
1348 pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
1349 generic->header.source_id);
1352 pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
1353 generic->notify.type, generic->header.source_id);
1358 if (generic->error_block_length <
1359 sizeof(struct acpi_hest_generic_status)) {
1360 pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
1361 generic->error_block_length, generic->header.source_id);
1364 ghes = ghes_new(generic);
1371 switch (generic->notify.type) {
1372 case ACPI_HEST_NOTIFY_POLLED:
1373 timer_setup(&ghes->timer, ghes_poll_func, 0);
1374 ghes_add_timer(ghes);
1376 case ACPI_HEST_NOTIFY_EXTERNAL:
1377 /* External interrupt vector is GSI */
1378 rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
1380 pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
1381 generic->header.source_id);
1384 rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
1387 pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
1388 generic->header.source_id);
1393 case ACPI_HEST_NOTIFY_SCI:
1394 case ACPI_HEST_NOTIFY_GSIV:
1395 case ACPI_HEST_NOTIFY_GPIO:
1396 mutex_lock(&ghes_list_mutex);
1397 if (list_empty(&ghes_hed))
1398 register_acpi_hed_notifier(&ghes_notifier_hed);
1399 list_add_rcu(&ghes->list, &ghes_hed);
1400 mutex_unlock(&ghes_list_mutex);
1403 case ACPI_HEST_NOTIFY_SEA:
1406 case ACPI_HEST_NOTIFY_NMI:
1409 case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1410 rc = apei_sdei_register_ghes(ghes);
1418 platform_set_drvdata(ghes_dev, ghes);
1420 ghes->dev = &ghes_dev->dev;
1422 mutex_lock(&ghes_devs_mutex);
1423 list_add_tail(&ghes->elist, &ghes_devs);
1424 mutex_unlock(&ghes_devs_mutex);
1426 /* Handle any pending errors right away */
1427 spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1429 spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1441 static int ghes_remove(struct platform_device *ghes_dev)
1445 struct acpi_hest_generic *generic;
1447 ghes = platform_get_drvdata(ghes_dev);
1448 generic = ghes->generic;
1450 ghes->flags |= GHES_EXITING;
1451 switch (generic->notify.type) {
1452 case ACPI_HEST_NOTIFY_POLLED:
1453 timer_shutdown_sync(&ghes->timer);
1455 case ACPI_HEST_NOTIFY_EXTERNAL:
1456 free_irq(ghes->irq, ghes);
1459 case ACPI_HEST_NOTIFY_SCI:
1460 case ACPI_HEST_NOTIFY_GSIV:
1461 case ACPI_HEST_NOTIFY_GPIO:
1462 mutex_lock(&ghes_list_mutex);
1463 list_del_rcu(&ghes->list);
1464 if (list_empty(&ghes_hed))
1465 unregister_acpi_hed_notifier(&ghes_notifier_hed);
1466 mutex_unlock(&ghes_list_mutex);
1470 case ACPI_HEST_NOTIFY_SEA:
1471 ghes_sea_remove(ghes);
1473 case ACPI_HEST_NOTIFY_NMI:
1474 ghes_nmi_remove(ghes);
1476 case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1477 rc = apei_sdei_unregister_ghes(ghes);
1488 mutex_lock(&ghes_devs_mutex);
1489 list_del(&ghes->elist);
1490 mutex_unlock(&ghes_devs_mutex);
1497 static struct platform_driver ghes_platform_driver = {
1501 .probe = ghes_probe,
1502 .remove = ghes_remove,
1505 void __init acpi_ghes_init(void)
1514 switch (hest_disable) {
1515 case HEST_NOT_FOUND:
1518 pr_info(GHES_PFX "HEST is not enabled!\n");
1525 pr_info(GHES_PFX "GHES is not enabled!\n");
1529 ghes_nmi_init_cxt();
1531 rc = platform_driver_register(&ghes_platform_driver);
1535 rc = apei_osc_setup();
1536 if (rc == 0 && osc_sb_apei_support_acked)
1537 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1538 else if (rc == 0 && !osc_sb_apei_support_acked)
1539 pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1540 else if (rc && osc_sb_apei_support_acked)
1541 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1543 pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1547 * Known x86 systems that prefer GHES error reporting:
1549 static struct acpi_platform_list plat_list[] = {
1550 {"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions},
1554 struct list_head *ghes_get_devices(void)
1558 if (IS_ENABLED(CONFIG_X86)) {
1559 idx = acpi_match_platform_list(plat_list);
1561 if (!ghes_edac_force_enable)
1564 pr_warn_once("Force-loading ghes_edac on an unsupported platform. You're on your own!\n");
1566 } else if (list_empty(&ghes_devs)) {
1572 EXPORT_SYMBOL_GPL(ghes_get_devices);
1574 void ghes_register_report_chain(struct notifier_block *nb)
1576 atomic_notifier_chain_register(&ghes_report_chain, nb);
1578 EXPORT_SYMBOL_GPL(ghes_register_report_chain);
1580 void ghes_unregister_report_chain(struct notifier_block *nb)
1582 atomic_notifier_chain_unregister(&ghes_report_chain, nb);
1584 EXPORT_SYMBOL_GPL(ghes_unregister_report_chain);