2 * The file intends to implement the functions needed by EEH, which is
3 * built on IODA compliant chip. Actually, lots of functions related
4 * to EEH would be built based on the OPAL APIs.
6 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/bootmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
19 #include <linux/irq.h>
20 #include <linux/kernel.h>
21 #include <linux/msi.h>
22 #include <linux/notifier.h>
23 #include <linux/pci.h>
24 #include <linux/string.h>
27 #include <asm/eeh_event.h>
29 #include <asm/iommu.h>
30 #include <asm/msi_bitmap.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/ppc-pci.h>
39 static char *hub_diag = NULL;
40 static int ioda_eeh_nb_init = 0;
42 static int ioda_eeh_event(struct notifier_block *nb,
43 unsigned long events, void *change)
45 uint64_t changed_evts = (uint64_t)change;
47 /* We simply send special EEH event */
48 if ((changed_evts & OPAL_EVENT_PCI_ERROR) &&
49 (events & OPAL_EVENT_PCI_ERROR))
50 eeh_send_failure_event(NULL);
55 static struct notifier_block ioda_eeh_nb = {
56 .notifier_call = ioda_eeh_event,
61 #ifdef CONFIG_DEBUG_FS
62 static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
64 struct pci_controller *hose = data;
65 struct pnv_phb *phb = hose->private_data;
67 out_be64(phb->regs + offset, val);
71 static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
73 struct pci_controller *hose = data;
74 struct pnv_phb *phb = hose->private_data;
76 *val = in_be64(phb->regs + offset);
80 static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
82 return ioda_eeh_dbgfs_set(data, 0xD10, val);
85 static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
87 return ioda_eeh_dbgfs_get(data, 0xD10, val);
90 static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
92 return ioda_eeh_dbgfs_set(data, 0xD90, val);
95 static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
97 return ioda_eeh_dbgfs_get(data, 0xD90, val);
100 static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
102 return ioda_eeh_dbgfs_set(data, 0xE10, val);
105 static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
107 return ioda_eeh_dbgfs_get(data, 0xE10, val);
110 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
111 ioda_eeh_outb_dbgfs_set, "0x%llx\n");
112 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
113 ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
114 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
115 ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
116 #endif /* CONFIG_DEBUG_FS */
119 * ioda_eeh_post_init - Chip dependent post initialization
120 * @hose: PCI controller
122 * The function will be called after eeh PEs and devices
123 * have been built. That means the EEH is ready to supply
124 * service with I/O cache.
126 static int ioda_eeh_post_init(struct pci_controller *hose)
128 struct pnv_phb *phb = hose->private_data;
131 /* Register OPAL event notifier */
132 if (!ioda_eeh_nb_init) {
133 ret = opal_notifier_register(&ioda_eeh_nb);
135 pr_err("%s: Can't register OPAL event notifier (%d)\n",
140 ioda_eeh_nb_init = 1;
143 /* We needn't HUB diag-data on PHB3 */
144 if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
145 hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
147 pr_err("%s: Out of memory !\n", __func__);
152 #ifdef CONFIG_DEBUG_FS
154 debugfs_create_file("err_injct_outbound", 0600,
156 &ioda_eeh_outb_dbgfs_ops);
157 debugfs_create_file("err_injct_inboundA", 0600,
159 &ioda_eeh_inbA_dbgfs_ops);
160 debugfs_create_file("err_injct_inboundB", 0600,
162 &ioda_eeh_inbB_dbgfs_ops);
166 phb->eeh_state |= PNV_EEH_STATE_ENABLED;
172 * ioda_eeh_set_option - Set EEH operation or I/O setting
176 * Enable or disable EEH option for the indicated PE. The
177 * function also can be used to enable I/O or DMA for the
180 static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
184 struct pci_controller *hose = pe->phb;
185 struct pnv_phb *phb = hose->private_data;
187 /* Check on PE number */
188 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
189 pr_err("%s: PE address %x out of range [0, %x] "
191 __func__, pe->addr, phb->ioda.total_pe,
192 hose->global_number);
198 case EEH_OPT_DISABLE:
204 case EEH_OPT_THAW_MMIO:
205 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
206 OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO);
208 pr_warning("%s: Failed to enable MMIO for "
209 "PHB#%x-PE#%x, err=%lld\n",
210 __func__, hose->global_number, pe_no, ret);
215 case EEH_OPT_THAW_DMA:
216 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
217 OPAL_EEH_ACTION_CLEAR_FREEZE_DMA);
219 pr_warning("%s: Failed to enable DMA for "
220 "PHB#%x-PE#%x, err=%lld\n",
221 __func__, hose->global_number, pe_no, ret);
227 pr_warning("%s: Invalid option %d\n", __func__, option);
235 * ioda_eeh_get_state - Retrieve the state of PE
238 * The PE's state should be retrieved from the PEEV, PEST
239 * IODA tables. Since the OPAL has exported the function
240 * to do it, it'd better to use that.
242 static int ioda_eeh_get_state(struct eeh_pe *pe)
249 struct pci_controller *hose = pe->phb;
250 struct pnv_phb *phb = hose->private_data;
253 * Sanity check on PE address. The PHB PE address should
256 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
257 pr_err("%s: PE address %x out of range [0, %x] "
259 __func__, pe->addr, phb->ioda.total_pe,
260 hose->global_number);
261 return EEH_STATE_NOT_SUPPORT;
264 /* Retrieve PE status through OPAL */
266 ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
267 &fstate, &pcierr, NULL);
269 pr_err("%s: Failed to get EEH status on "
270 "PHB#%x-PE#%x\n, err=%lld\n",
271 __func__, hose->global_number, pe_no, ret);
272 return EEH_STATE_NOT_SUPPORT;
275 /* Check PHB status */
276 if (pe->type & EEH_PE_PHB) {
278 result &= ~EEH_STATE_RESET_ACTIVE;
280 if (pcierr != OPAL_EEH_PHB_ERROR) {
281 result |= EEH_STATE_MMIO_ACTIVE;
282 result |= EEH_STATE_DMA_ACTIVE;
283 result |= EEH_STATE_MMIO_ENABLED;
284 result |= EEH_STATE_DMA_ENABLED;
290 /* Parse result out */
293 case OPAL_EEH_STOPPED_NOT_FROZEN:
294 result &= ~EEH_STATE_RESET_ACTIVE;
295 result |= EEH_STATE_MMIO_ACTIVE;
296 result |= EEH_STATE_DMA_ACTIVE;
297 result |= EEH_STATE_MMIO_ENABLED;
298 result |= EEH_STATE_DMA_ENABLED;
300 case OPAL_EEH_STOPPED_MMIO_FREEZE:
301 result &= ~EEH_STATE_RESET_ACTIVE;
302 result |= EEH_STATE_DMA_ACTIVE;
303 result |= EEH_STATE_DMA_ENABLED;
305 case OPAL_EEH_STOPPED_DMA_FREEZE:
306 result &= ~EEH_STATE_RESET_ACTIVE;
307 result |= EEH_STATE_MMIO_ACTIVE;
308 result |= EEH_STATE_MMIO_ENABLED;
310 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
311 result &= ~EEH_STATE_RESET_ACTIVE;
313 case OPAL_EEH_STOPPED_RESET:
314 result |= EEH_STATE_RESET_ACTIVE;
316 case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
317 result |= EEH_STATE_UNAVAILABLE;
319 case OPAL_EEH_STOPPED_PERM_UNAVAIL:
320 result |= EEH_STATE_NOT_SUPPORT;
323 pr_warning("%s: Unexpected EEH status 0x%x "
325 __func__, fstate, hose->global_number, pe_no);
331 static int ioda_eeh_pe_clear(struct eeh_pe *pe)
333 struct pci_controller *hose;
342 phb = pe->phb->private_data;
344 /* Clear the EEH error on the PE */
345 ret = opal_pci_eeh_freeze_clear(phb->opal_id,
346 pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
348 pr_err("%s: Failed to clear EEH error for "
349 "PHB#%x-PE#%x, err=%lld\n",
350 __func__, hose->global_number, pe_no, ret);
355 * Read the PE state back and verify that the frozen
356 * state has been removed.
358 ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
359 &fstate, &pcierr, NULL);
361 pr_err("%s: Failed to get EEH status on "
362 "PHB#%x-PE#%x\n, err=%lld\n",
363 __func__, hose->global_number, pe_no, ret);
367 if (fstate != OPAL_EEH_STOPPED_NOT_FROZEN) {
368 pr_err("%s: Frozen state not cleared on "
369 "PHB#%x-PE#%x, sts=%x\n",
370 __func__, hose->global_number, pe_no, fstate);
377 static s64 ioda_eeh_phb_poll(struct pnv_phb *phb)
379 s64 rc = OPAL_HARDWARE;
382 rc = opal_pci_poll(phb->opal_id);
392 static int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
394 struct pnv_phb *phb = hose->private_data;
395 s64 rc = OPAL_HARDWARE;
397 pr_debug("%s: Reset PHB#%x, option=%d\n",
398 __func__, hose->global_number, option);
400 /* Issue PHB complete reset request */
401 if (option == EEH_RESET_FUNDAMENTAL ||
402 option == EEH_RESET_HOT)
403 rc = opal_pci_reset(phb->opal_id,
406 else if (option == EEH_RESET_DEACTIVATE)
407 rc = opal_pci_reset(phb->opal_id,
409 OPAL_DEASSERT_RESET);
414 * Poll state of the PHB until the request is done
417 rc = ioda_eeh_phb_poll(phb);
419 if (rc != OPAL_SUCCESS)
425 static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
427 struct pnv_phb *phb = hose->private_data;
428 s64 rc = OPAL_SUCCESS;
430 pr_debug("%s: Reset PHB#%x, option=%d\n",
431 __func__, hose->global_number, option);
434 * During the reset deassert time, we needn't care
435 * the reset scope because the firmware does nothing
436 * for fundamental or hot reset during deassert phase.
438 if (option == EEH_RESET_FUNDAMENTAL)
439 rc = opal_pci_reset(phb->opal_id,
440 OPAL_PCI_FUNDAMENTAL_RESET,
442 else if (option == EEH_RESET_HOT)
443 rc = opal_pci_reset(phb->opal_id,
446 else if (option == EEH_RESET_DEACTIVATE)
447 rc = opal_pci_reset(phb->opal_id,
449 OPAL_DEASSERT_RESET);
453 /* Poll state of the PHB until the request is done */
454 rc = ioda_eeh_phb_poll(phb);
456 if (rc != OPAL_SUCCESS)
462 static int ioda_eeh_bridge_reset(struct pci_controller *hose,
463 struct pci_dev *dev, int option)
467 pr_debug("%s: Reset device %04x:%02x:%02x.%01x with option %d\n",
468 __func__, hose->global_number, dev->bus->number,
469 PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), option);
472 case EEH_RESET_FUNDAMENTAL:
474 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
475 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
476 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
478 case EEH_RESET_DEACTIVATE:
479 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
480 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
481 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
489 * ioda_eeh_reset - Reset the indicated PE
491 * @option: reset option
493 * Do reset on the indicated PE. For PCI bus sensitive PE,
494 * we need to reset the parent p2p bridge. The PHB has to
495 * be reinitialized if the p2p bridge is root bridge. For
496 * PCI device sensitive PE, we will try to reset the device
497 * through FLR. For now, we don't have OPAL APIs to do HARD
498 * reset yet, so all reset would be SOFT (HOT) reset.
500 static int ioda_eeh_reset(struct eeh_pe *pe, int option)
502 struct pci_controller *hose = pe->phb;
503 struct eeh_dev *edev;
508 * Anyway, we have to clear the problematic state for the
509 * corresponding PE. However, we needn't do it if the PE
510 * is PHB associated. That means the PHB is having fatal
511 * errors and it needs reset. Further more, the AIB interface
512 * isn't reliable any more.
514 if (!(pe->type & EEH_PE_PHB) &&
515 (option == EEH_RESET_HOT ||
516 option == EEH_RESET_FUNDAMENTAL)) {
517 ret = ioda_eeh_pe_clear(pe);
523 * The rules applied to reset, either fundamental or hot reset:
525 * We always reset the direct upstream bridge of the PE. If the
526 * direct upstream bridge isn't root bridge, we always take hot
527 * reset no matter what option (fundamental or hot) is. Otherwise,
528 * we should do the reset according to the required option.
530 if (pe->type & EEH_PE_PHB) {
531 ret = ioda_eeh_phb_reset(hose, option);
533 if (pe->type & EEH_PE_DEVICE) {
535 * If it's device PE, we didn't refer to the parent
536 * PCI bus yet. So we have to figure it out indirectly.
538 edev = list_first_entry(&pe->edevs,
539 struct eeh_dev, list);
540 dev = eeh_dev_to_pci_dev(edev);
541 dev = dev->bus->self;
544 * If it's bus PE, the parent PCI bus is already there
545 * and just pick it up.
551 * Do reset based on the fact that the direct upstream bridge
552 * is root bridge (port) or not.
554 if (dev->bus->number == 0)
555 ret = ioda_eeh_root_reset(hose, option);
557 ret = ioda_eeh_bridge_reset(hose, dev, option);
564 * ioda_eeh_get_log - Retrieve error log
566 * @severity: Severity level of the log
567 * @drv_log: buffer to store the log
568 * @len: space of the log buffer
570 * The function is used to retrieve error log from P7IOC.
572 static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
573 char *drv_log, unsigned long len)
577 struct pci_controller *hose = pe->phb;
578 struct pnv_phb *phb = hose->private_data;
580 spin_lock_irqsave(&phb->lock, flags);
582 ret = opal_pci_get_phb_diag_data2(phb->opal_id,
583 phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
585 spin_unlock_irqrestore(&phb->lock, flags);
586 pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n",
587 __func__, hose->global_number, pe->addr, ret);
592 * FIXME: We probably need log the error in somewhere.
593 * Lets make it up in future.
595 /* pr_info("%s", phb->diag.blob); */
597 spin_unlock_irqrestore(&phb->lock, flags);
603 * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
606 * For particular PE, it might have included PCI bridges. In order
607 * to make the PE work properly, those PCI bridges should be configured
608 * correctly. However, we need do nothing on P7IOC since the reset
609 * function will do everything that should be covered by the function.
611 static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
616 static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
619 pr_info(" GEM XFIR: %016llx\n", data->gemXfir);
620 pr_info(" GEM RFIR: %016llx\n", data->gemRfir);
621 pr_info(" GEM RIRQFIR: %016llx\n", data->gemRirqfir);
622 pr_info(" GEM Mask: %016llx\n", data->gemMask);
623 pr_info(" GEM RWOF: %016llx\n", data->gemRwof);
626 pr_info(" LEM FIR: %016llx\n", data->lemFir);
627 pr_info(" LEM Error Mask: %016llx\n", data->lemErrMask);
628 pr_info(" LEM Action 0: %016llx\n", data->lemAction0);
629 pr_info(" LEM Action 1: %016llx\n", data->lemAction1);
630 pr_info(" LEM WOF: %016llx\n", data->lemWof);
633 static void ioda_eeh_hub_diag(struct pci_controller *hose)
635 struct pnv_phb *phb = hose->private_data;
636 struct OpalIoP7IOCErrorData *data;
639 data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag;
640 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
641 if (rc != OPAL_SUCCESS) {
642 pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
643 __func__, phb->hub_id, rc);
647 switch (data->type) {
648 case OPAL_P7IOC_DIAG_TYPE_RGC:
649 pr_info("P7IOC diag-data for RGC\n\n");
650 ioda_eeh_hub_diag_common(data);
651 pr_info(" RGC Status: %016llx\n", data->rgc.rgcStatus);
652 pr_info(" RGC LDCP: %016llx\n", data->rgc.rgcLdcp);
654 case OPAL_P7IOC_DIAG_TYPE_BI:
655 pr_info("P7IOC diag-data for BI %s\n\n",
656 data->bi.biDownbound ? "Downbound" : "Upbound");
657 ioda_eeh_hub_diag_common(data);
658 pr_info(" BI LDCP 0: %016llx\n", data->bi.biLdcp0);
659 pr_info(" BI LDCP 1: %016llx\n", data->bi.biLdcp1);
660 pr_info(" BI LDCP 2: %016llx\n", data->bi.biLdcp2);
661 pr_info(" BI Fence Status: %016llx\n", data->bi.biFenceStatus);
663 case OPAL_P7IOC_DIAG_TYPE_CI:
664 pr_info("P7IOC diag-data for CI Port %d\\nn",
666 ioda_eeh_hub_diag_common(data);
667 pr_info(" CI Port Status: %016llx\n", data->ci.ciPortStatus);
668 pr_info(" CI Port LDCP: %016llx\n", data->ci.ciPortLdcp);
670 case OPAL_P7IOC_DIAG_TYPE_MISC:
671 pr_info("P7IOC diag-data for MISC\n\n");
672 ioda_eeh_hub_diag_common(data);
674 case OPAL_P7IOC_DIAG_TYPE_I2C:
675 pr_info("P7IOC diag-data for I2C\n\n");
676 ioda_eeh_hub_diag_common(data);
679 pr_warning("%s: Invalid type of HUB#%llx diag-data (%d)\n",
680 __func__, phb->hub_id, data->type);
684 static void ioda_eeh_phb_diag(struct pci_controller *hose)
686 struct pnv_phb *phb = hose->private_data;
689 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
690 PNV_PCI_DIAG_BUF_SIZE);
691 if (rc != OPAL_SUCCESS) {
692 pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
693 __func__, hose->global_number, rc);
697 pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
700 static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
703 struct eeh_pe *phb_pe;
705 phb_pe = eeh_phb_pe_get(hose);
707 pr_warning("%s Can't find PE for PHB#%d\n",
708 __func__, hose->global_number);
716 static int ioda_eeh_get_pe(struct pci_controller *hose,
717 u16 pe_no, struct eeh_pe **pe)
719 struct eeh_pe *phb_pe, *dev_pe;
722 /* Find the PHB PE */
723 if (ioda_eeh_get_phb_pe(hose, &phb_pe))
726 /* Find the PE according to PE# */
727 memset(&dev, 0, sizeof(struct eeh_dev));
729 dev.pe_config_addr = pe_no;
730 dev_pe = eeh_pe_get(&dev);
732 pr_warning("%s: Can't find PE for PHB#%x - PE#%x\n",
733 __func__, hose->global_number, pe_no);
742 * ioda_eeh_next_error - Retrieve next error for EEH core to handle
743 * @pe: The affected PE
745 * The function is expected to be called by EEH core while it gets
746 * special EEH event (without binding PE). The function calls to
747 * OPAL APIs for next error to handle. The informational error is
748 * handled internally by platform. However, the dead IOC, dead PHB,
749 * fenced PHB and frozen PE should be handled by EEH core eventually.
751 static int ioda_eeh_next_error(struct eeh_pe **pe)
753 struct pci_controller *hose, *tmp;
756 u16 err_type, severity;
761 * While running here, it's safe to purge the event queue.
762 * And we should keep the cached OPAL notifier event sychronized
763 * between the kernel and firmware.
765 eeh_remove_event(NULL);
766 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
768 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
770 * If the subordinate PCI buses of the PHB has been
771 * removed, we needn't take care of it any more.
773 phb = hose->private_data;
774 if (phb->eeh_state & PNV_EEH_STATE_REMOVED)
777 rc = opal_pci_next_error(phb->opal_id,
778 &frozen_pe_no, &err_type, &severity);
780 /* If OPAL API returns error, we needn't proceed */
781 if (rc != OPAL_SUCCESS) {
782 pr_devel("%s: Invalid return value on "
783 "PHB#%x (0x%lx) from opal_pci_next_error",
784 __func__, hose->global_number, rc);
788 /* If the PHB doesn't have error, stop processing */
789 if (err_type == OPAL_EEH_NO_ERROR ||
790 severity == OPAL_EEH_SEV_NO_ERROR) {
791 pr_devel("%s: No error found on PHB#%x\n",
792 __func__, hose->global_number);
797 * Processing the error. We're expecting the error with
798 * highest priority reported upon multiple errors on the
801 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
802 __func__, err_type, severity,
803 frozen_pe_no, hose->global_number);
805 case OPAL_EEH_IOC_ERROR:
806 if (severity == OPAL_EEH_SEV_IOC_DEAD) {
807 list_for_each_entry_safe(hose, tmp,
808 &hose_list, list_node) {
809 phb = hose->private_data;
810 phb->eeh_state |= PNV_EEH_STATE_REMOVED;
813 pr_err("EEH: dead IOC detected\n");
816 } else if (severity == OPAL_EEH_SEV_INF) {
817 pr_info("EEH: IOC informative error "
819 ioda_eeh_hub_diag(hose);
823 case OPAL_EEH_PHB_ERROR:
824 if (severity == OPAL_EEH_SEV_PHB_DEAD) {
825 if (ioda_eeh_get_phb_pe(hose, pe))
828 pr_err("EEH: dead PHB#%x detected\n",
829 hose->global_number);
830 phb->eeh_state |= PNV_EEH_STATE_REMOVED;
833 } else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
834 if (ioda_eeh_get_phb_pe(hose, pe))
837 pr_err("EEH: fenced PHB#%x detected\n",
838 hose->global_number);
841 } else if (severity == OPAL_EEH_SEV_INF) {
842 pr_info("EEH: PHB#%x informative error "
844 hose->global_number);
845 ioda_eeh_phb_diag(hose);
849 case OPAL_EEH_PE_ERROR:
850 if (ioda_eeh_get_pe(hose, frozen_pe_no, pe))
853 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
854 (*pe)->addr, (*pe)->phb->global_number);
865 struct pnv_eeh_ops ioda_eeh_ops = {
866 .post_init = ioda_eeh_post_init,
867 .set_option = ioda_eeh_set_option,
868 .get_state = ioda_eeh_get_state,
869 .reset = ioda_eeh_reset,
870 .get_log = ioda_eeh_get_log,
871 .configure_bridge = ioda_eeh_configure_bridge,
872 .next_error = ioda_eeh_next_error