2 * The file intends to implement the functions needed by EEH, which is
3 * built on IODA compliant chip. Actually, lots of functions related
4 * to EEH would be built based on the OPAL APIs.
6 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/bootmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
19 #include <linux/irq.h>
20 #include <linux/kernel.h>
21 #include <linux/msi.h>
22 #include <linux/notifier.h>
23 #include <linux/pci.h>
24 #include <linux/string.h>
27 #include <asm/eeh_event.h>
29 #include <asm/iommu.h>
30 #include <asm/msi_bitmap.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/ppc-pci.h>
39 static char *hub_diag = NULL;
40 static int ioda_eeh_nb_init = 0;
42 static int ioda_eeh_event(struct notifier_block *nb,
43 unsigned long events, void *change)
45 uint64_t changed_evts = (uint64_t)change;
47 /* We simply send special EEH event */
48 if ((changed_evts & OPAL_EVENT_PCI_ERROR) &&
49 (events & OPAL_EVENT_PCI_ERROR))
50 eeh_send_failure_event(NULL);
55 static struct notifier_block ioda_eeh_nb = {
56 .notifier_call = ioda_eeh_event,
61 #ifdef CONFIG_DEBUG_FS
62 static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
64 struct pci_controller *hose = data;
65 struct pnv_phb *phb = hose->private_data;
67 out_be64(phb->regs + offset, val);
71 static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
73 struct pci_controller *hose = data;
74 struct pnv_phb *phb = hose->private_data;
76 *val = in_be64(phb->regs + offset);
80 static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
82 return ioda_eeh_dbgfs_set(data, 0xD10, val);
85 static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
87 return ioda_eeh_dbgfs_get(data, 0xD10, val);
90 static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
92 return ioda_eeh_dbgfs_set(data, 0xD90, val);
95 static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
97 return ioda_eeh_dbgfs_get(data, 0xD90, val);
100 static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
102 return ioda_eeh_dbgfs_set(data, 0xE10, val);
105 static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
107 return ioda_eeh_dbgfs_get(data, 0xE10, val);
110 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
111 ioda_eeh_outb_dbgfs_set, "0x%llx\n");
112 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
113 ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
114 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
115 ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
116 #endif /* CONFIG_DEBUG_FS */
119 * ioda_eeh_post_init - Chip dependent post initialization
120 * @hose: PCI controller
122 * The function will be called after eeh PEs and devices
123 * have been built. That means the EEH is ready to supply
124 * service with I/O cache.
126 static int ioda_eeh_post_init(struct pci_controller *hose)
128 struct pnv_phb *phb = hose->private_data;
131 /* Register OPAL event notifier */
132 if (!ioda_eeh_nb_init) {
133 ret = opal_notifier_register(&ioda_eeh_nb);
135 pr_err("%s: Can't register OPAL event notifier (%d)\n",
140 ioda_eeh_nb_init = 1;
143 /* We needn't HUB diag-data on PHB3 */
144 if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
145 hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
147 pr_err("%s: Out of memory !\n", __func__);
152 #ifdef CONFIG_DEBUG_FS
154 debugfs_create_file("err_injct_outbound", 0600,
156 &ioda_eeh_outb_dbgfs_ops);
157 debugfs_create_file("err_injct_inboundA", 0600,
159 &ioda_eeh_inbA_dbgfs_ops);
160 debugfs_create_file("err_injct_inboundB", 0600,
162 &ioda_eeh_inbB_dbgfs_ops);
166 phb->eeh_state |= PNV_EEH_STATE_ENABLED;
172 * ioda_eeh_set_option - Set EEH operation or I/O setting
176 * Enable or disable EEH option for the indicated PE. The
177 * function also can be used to enable I/O or DMA for the
180 static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
184 struct pci_controller *hose = pe->phb;
185 struct pnv_phb *phb = hose->private_data;
187 /* Check on PE number */
188 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
189 pr_err("%s: PE address %x out of range [0, %x] "
191 __func__, pe->addr, phb->ioda.total_pe,
192 hose->global_number);
198 case EEH_OPT_DISABLE:
204 case EEH_OPT_THAW_MMIO:
205 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
206 OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO);
208 pr_warning("%s: Failed to enable MMIO for "
209 "PHB#%x-PE#%x, err=%lld\n",
210 __func__, hose->global_number, pe_no, ret);
215 case EEH_OPT_THAW_DMA:
216 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
217 OPAL_EEH_ACTION_CLEAR_FREEZE_DMA);
219 pr_warning("%s: Failed to enable DMA for "
220 "PHB#%x-PE#%x, err=%lld\n",
221 __func__, hose->global_number, pe_no, ret);
227 pr_warning("%s: Invalid option %d\n", __func__, option);
235 * ioda_eeh_get_state - Retrieve the state of PE
238 * The PE's state should be retrieved from the PEEV, PEST
239 * IODA tables. Since the OPAL has exported the function
240 * to do it, it'd better to use that.
242 static int ioda_eeh_get_state(struct eeh_pe *pe)
249 struct pci_controller *hose = pe->phb;
250 struct pnv_phb *phb = hose->private_data;
253 * Sanity check on PE address. The PHB PE address should
256 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
257 pr_err("%s: PE address %x out of range [0, %x] "
259 __func__, pe->addr, phb->ioda.total_pe,
260 hose->global_number);
261 return EEH_STATE_NOT_SUPPORT;
264 /* Retrieve PE status through OPAL */
266 ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
267 &fstate, &pcierr, NULL);
269 pr_err("%s: Failed to get EEH status on "
270 "PHB#%x-PE#%x\n, err=%lld\n",
271 __func__, hose->global_number, pe_no, ret);
272 return EEH_STATE_NOT_SUPPORT;
275 /* Check PHB status */
276 if (pe->type & EEH_PE_PHB) {
278 result &= ~EEH_STATE_RESET_ACTIVE;
280 if (pcierr != OPAL_EEH_PHB_ERROR) {
281 result |= EEH_STATE_MMIO_ACTIVE;
282 result |= EEH_STATE_DMA_ACTIVE;
283 result |= EEH_STATE_MMIO_ENABLED;
284 result |= EEH_STATE_DMA_ENABLED;
290 /* Parse result out */
293 case OPAL_EEH_STOPPED_NOT_FROZEN:
294 result &= ~EEH_STATE_RESET_ACTIVE;
295 result |= EEH_STATE_MMIO_ACTIVE;
296 result |= EEH_STATE_DMA_ACTIVE;
297 result |= EEH_STATE_MMIO_ENABLED;
298 result |= EEH_STATE_DMA_ENABLED;
300 case OPAL_EEH_STOPPED_MMIO_FREEZE:
301 result &= ~EEH_STATE_RESET_ACTIVE;
302 result |= EEH_STATE_DMA_ACTIVE;
303 result |= EEH_STATE_DMA_ENABLED;
305 case OPAL_EEH_STOPPED_DMA_FREEZE:
306 result &= ~EEH_STATE_RESET_ACTIVE;
307 result |= EEH_STATE_MMIO_ACTIVE;
308 result |= EEH_STATE_MMIO_ENABLED;
310 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
311 result &= ~EEH_STATE_RESET_ACTIVE;
313 case OPAL_EEH_STOPPED_RESET:
314 result |= EEH_STATE_RESET_ACTIVE;
316 case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
317 result |= EEH_STATE_UNAVAILABLE;
319 case OPAL_EEH_STOPPED_PERM_UNAVAIL:
320 result |= EEH_STATE_NOT_SUPPORT;
323 pr_warning("%s: Unexpected EEH status 0x%x "
325 __func__, fstate, hose->global_number, pe_no);
331 static int ioda_eeh_pe_clear(struct eeh_pe *pe)
333 struct pci_controller *hose;
342 phb = pe->phb->private_data;
344 /* Clear the EEH error on the PE */
345 ret = opal_pci_eeh_freeze_clear(phb->opal_id,
346 pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
348 pr_err("%s: Failed to clear EEH error for "
349 "PHB#%x-PE#%x, err=%lld\n",
350 __func__, hose->global_number, pe_no, ret);
355 * Read the PE state back and verify that the frozen
356 * state has been removed.
358 ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
359 &fstate, &pcierr, NULL);
361 pr_err("%s: Failed to get EEH status on "
362 "PHB#%x-PE#%x\n, err=%lld\n",
363 __func__, hose->global_number, pe_no, ret);
367 if (fstate != OPAL_EEH_STOPPED_NOT_FROZEN) {
368 pr_err("%s: Frozen state not cleared on "
369 "PHB#%x-PE#%x, sts=%x\n",
370 __func__, hose->global_number, pe_no, fstate);
377 static s64 ioda_eeh_phb_poll(struct pnv_phb *phb)
379 s64 rc = OPAL_HARDWARE;
382 rc = opal_pci_poll(phb->opal_id);
392 static int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
394 struct pnv_phb *phb = hose->private_data;
395 s64 rc = OPAL_HARDWARE;
397 pr_debug("%s: Reset PHB#%x, option=%d\n",
398 __func__, hose->global_number, option);
400 /* Issue PHB complete reset request */
401 if (option == EEH_RESET_FUNDAMENTAL ||
402 option == EEH_RESET_HOT)
403 rc = opal_pci_reset(phb->opal_id,
406 else if (option == EEH_RESET_DEACTIVATE)
407 rc = opal_pci_reset(phb->opal_id,
409 OPAL_DEASSERT_RESET);
414 * Poll state of the PHB until the request is done
417 rc = ioda_eeh_phb_poll(phb);
419 if (rc != OPAL_SUCCESS)
425 static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
427 struct pnv_phb *phb = hose->private_data;
428 s64 rc = OPAL_SUCCESS;
430 pr_debug("%s: Reset PHB#%x, option=%d\n",
431 __func__, hose->global_number, option);
434 * During the reset deassert time, we needn't care
435 * the reset scope because the firmware does nothing
436 * for fundamental or hot reset during deassert phase.
438 if (option == EEH_RESET_FUNDAMENTAL)
439 rc = opal_pci_reset(phb->opal_id,
440 OPAL_PCI_FUNDAMENTAL_RESET,
442 else if (option == EEH_RESET_HOT)
443 rc = opal_pci_reset(phb->opal_id,
446 else if (option == EEH_RESET_DEACTIVATE)
447 rc = opal_pci_reset(phb->opal_id,
449 OPAL_DEASSERT_RESET);
453 /* Poll state of the PHB until the request is done */
454 rc = ioda_eeh_phb_poll(phb);
456 if (rc != OPAL_SUCCESS)
462 static int ioda_eeh_bridge_reset(struct pci_controller *hose,
463 struct pci_dev *dev, int option)
467 pr_debug("%s: Reset device %04x:%02x:%02x.%01x with option %d\n",
468 __func__, hose->global_number, dev->bus->number,
469 PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), option);
472 case EEH_RESET_FUNDAMENTAL:
474 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
475 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
476 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
478 case EEH_RESET_DEACTIVATE:
479 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
480 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
481 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
489 * ioda_eeh_reset - Reset the indicated PE
491 * @option: reset option
493 * Do reset on the indicated PE. For PCI bus sensitive PE,
494 * we need to reset the parent p2p bridge. The PHB has to
495 * be reinitialized if the p2p bridge is root bridge. For
496 * PCI device sensitive PE, we will try to reset the device
497 * through FLR. For now, we don't have OPAL APIs to do HARD
498 * reset yet, so all reset would be SOFT (HOT) reset.
500 static int ioda_eeh_reset(struct eeh_pe *pe, int option)
502 struct pci_controller *hose = pe->phb;
503 struct eeh_dev *edev;
508 * Anyway, we have to clear the problematic state for the
509 * corresponding PE. However, we needn't do it if the PE
510 * is PHB associated. That means the PHB is having fatal
511 * errors and it needs reset. Further more, the AIB interface
512 * isn't reliable any more.
514 if (!(pe->type & EEH_PE_PHB) &&
515 (option == EEH_RESET_HOT ||
516 option == EEH_RESET_FUNDAMENTAL)) {
517 ret = ioda_eeh_pe_clear(pe);
523 * The rules applied to reset, either fundamental or hot reset:
525 * We always reset the direct upstream bridge of the PE. If the
526 * direct upstream bridge isn't root bridge, we always take hot
527 * reset no matter what option (fundamental or hot) is. Otherwise,
528 * we should do the reset according to the required option.
530 if (pe->type & EEH_PE_PHB) {
531 ret = ioda_eeh_phb_reset(hose, option);
533 if (pe->type & EEH_PE_DEVICE) {
535 * If it's device PE, we didn't refer to the parent
536 * PCI bus yet. So we have to figure it out indirectly.
538 edev = list_first_entry(&pe->edevs,
539 struct eeh_dev, list);
540 dev = eeh_dev_to_pci_dev(edev);
541 dev = dev->bus->self;
544 * If it's bus PE, the parent PCI bus is already there
545 * and just pick it up.
551 * Do reset based on the fact that the direct upstream bridge
552 * is root bridge (port) or not.
554 if (dev->bus->number == 0)
555 ret = ioda_eeh_root_reset(hose, option);
557 ret = ioda_eeh_bridge_reset(hose, dev, option);
564 * ioda_eeh_get_log - Retrieve error log
566 * @severity: Severity level of the log
567 * @drv_log: buffer to store the log
568 * @len: space of the log buffer
570 * The function is used to retrieve error log from P7IOC.
572 static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
573 char *drv_log, unsigned long len)
577 struct pci_controller *hose = pe->phb;
578 struct pnv_phb *phb = hose->private_data;
580 spin_lock_irqsave(&phb->lock, flags);
582 ret = opal_pci_get_phb_diag_data2(phb->opal_id,
583 phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
585 spin_unlock_irqrestore(&phb->lock, flags);
586 pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n",
587 __func__, hose->global_number, pe->addr, ret);
592 * FIXME: We probably need log the error in somewhere.
593 * Lets make it up in future.
595 /* pr_info("%s", phb->diag.blob); */
597 spin_unlock_irqrestore(&phb->lock, flags);
603 * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
606 * For particular PE, it might have included PCI bridges. In order
607 * to make the PE work properly, those PCI bridges should be configured
608 * correctly. However, we need do nothing on P7IOC since the reset
609 * function will do everything that should be covered by the function.
611 static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
616 static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
619 pr_info(" GEM XFIR: %016llx\n", data->gemXfir);
620 pr_info(" GEM RFIR: %016llx\n", data->gemRfir);
621 pr_info(" GEM RIRQFIR: %016llx\n", data->gemRirqfir);
622 pr_info(" GEM Mask: %016llx\n", data->gemMask);
623 pr_info(" GEM RWOF: %016llx\n", data->gemRwof);
626 pr_info(" LEM FIR: %016llx\n", data->lemFir);
627 pr_info(" LEM Error Mask: %016llx\n", data->lemErrMask);
628 pr_info(" LEM Action 0: %016llx\n", data->lemAction0);
629 pr_info(" LEM Action 1: %016llx\n", data->lemAction1);
630 pr_info(" LEM WOF: %016llx\n", data->lemWof);
633 static void ioda_eeh_hub_diag(struct pci_controller *hose)
635 struct pnv_phb *phb = hose->private_data;
636 struct OpalIoP7IOCErrorData *data;
639 data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag;
640 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
641 if (rc != OPAL_SUCCESS) {
642 pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
643 __func__, phb->hub_id, rc);
647 switch (data->type) {
648 case OPAL_P7IOC_DIAG_TYPE_RGC:
649 pr_info("P7IOC diag-data for RGC\n\n");
650 ioda_eeh_hub_diag_common(data);
651 pr_info(" RGC Status: %016llx\n", data->rgc.rgcStatus);
652 pr_info(" RGC LDCP: %016llx\n", data->rgc.rgcLdcp);
654 case OPAL_P7IOC_DIAG_TYPE_BI:
655 pr_info("P7IOC diag-data for BI %s\n\n",
656 data->bi.biDownbound ? "Downbound" : "Upbound");
657 ioda_eeh_hub_diag_common(data);
658 pr_info(" BI LDCP 0: %016llx\n", data->bi.biLdcp0);
659 pr_info(" BI LDCP 1: %016llx\n", data->bi.biLdcp1);
660 pr_info(" BI LDCP 2: %016llx\n", data->bi.biLdcp2);
661 pr_info(" BI Fence Status: %016llx\n", data->bi.biFenceStatus);
663 case OPAL_P7IOC_DIAG_TYPE_CI:
664 pr_info("P7IOC diag-data for CI Port %d\\nn",
666 ioda_eeh_hub_diag_common(data);
667 pr_info(" CI Port Status: %016llx\n", data->ci.ciPortStatus);
668 pr_info(" CI Port LDCP: %016llx\n", data->ci.ciPortLdcp);
670 case OPAL_P7IOC_DIAG_TYPE_MISC:
671 pr_info("P7IOC diag-data for MISC\n\n");
672 ioda_eeh_hub_diag_common(data);
674 case OPAL_P7IOC_DIAG_TYPE_I2C:
675 pr_info("P7IOC diag-data for I2C\n\n");
676 ioda_eeh_hub_diag_common(data);
679 pr_warning("%s: Invalid type of HUB#%llx diag-data (%d)\n",
680 __func__, phb->hub_id, data->type);
684 static void ioda_eeh_p7ioc_phb_diag(struct pci_controller *hose,
685 struct OpalIoPhbErrorCommon *common)
687 struct OpalIoP7IOCPhbErrorData *data;
690 data = (struct OpalIoP7IOCPhbErrorData *)common;
692 pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n\n",
693 hose->global_number, common->version);
695 pr_info(" brdgCtl: %08x\n", data->brdgCtl);
697 pr_info(" portStatusReg: %08x\n", data->portStatusReg);
698 pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
699 pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
701 pr_info(" deviceStatus: %08x\n", data->deviceStatus);
702 pr_info(" slotStatus: %08x\n", data->slotStatus);
703 pr_info(" linkStatus: %08x\n", data->linkStatus);
704 pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
705 pr_info(" devSecStatus: %08x\n", data->devSecStatus);
707 pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
708 pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
709 pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
710 pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
711 pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
712 pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
713 pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
714 pr_info(" sourceId: %08x\n", data->sourceId);
716 pr_info(" errorClass: %016llx\n", data->errorClass);
717 pr_info(" correlator: %016llx\n", data->correlator);
718 pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr);
719 pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr);
720 pr_info(" lemFir: %016llx\n", data->lemFir);
721 pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
722 pr_info(" lemWOF: %016llx\n", data->lemWOF);
723 pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
724 pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
725 pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
726 pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
727 pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
728 pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
729 pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
730 pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
731 pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
732 pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
733 pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
734 pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
735 pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
736 pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
737 pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
738 pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
740 for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
741 if ((data->pestA[i] >> 63) == 0 &&
742 (data->pestB[i] >> 63) == 0)
745 pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
746 pr_info(" PESTB: %016llx\n", data->pestB[i]);
750 static void ioda_eeh_phb3_phb_diag(struct pci_controller *hose,
751 struct OpalIoPhbErrorCommon *common)
753 struct OpalIoPhb3ErrorData *data;
756 data = (struct OpalIoPhb3ErrorData*)common;
757 pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n\n",
758 hose->global_number, common->version);
760 pr_info(" brdgCtl: %08x\n", data->brdgCtl);
762 pr_info(" portStatusReg: %08x\n", data->portStatusReg);
763 pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
764 pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
766 pr_info(" deviceStatus: %08x\n", data->deviceStatus);
767 pr_info(" slotStatus: %08x\n", data->slotStatus);
768 pr_info(" linkStatus: %08x\n", data->linkStatus);
769 pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
770 pr_info(" devSecStatus: %08x\n", data->devSecStatus);
772 pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
773 pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
774 pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
775 pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
776 pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
777 pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
778 pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
779 pr_info(" sourceId: %08x\n", data->sourceId);
780 pr_info(" errorClass: %016llx\n", data->errorClass);
781 pr_info(" correlator: %016llx\n", data->correlator);
782 pr_info(" nFir: %016llx\n", data->nFir);
783 pr_info(" nFirMask: %016llx\n", data->nFirMask);
784 pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
785 pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
786 pr_info(" PhbCsr: %016llx\n", data->phbCsr);
787 pr_info(" lemFir: %016llx\n", data->lemFir);
788 pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
789 pr_info(" lemWOF: %016llx\n", data->lemWOF);
790 pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
791 pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
792 pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
793 pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
794 pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
795 pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
796 pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
797 pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
798 pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
799 pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
800 pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
801 pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
802 pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
803 pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
804 pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
805 pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
807 for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
808 if ((data->pestA[i] >> 63) == 0 &&
809 (data->pestB[i] >> 63) == 0)
812 pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
813 pr_info(" PESTB: %016llx\n", data->pestB[i]);
817 static void ioda_eeh_phb_diag(struct pci_controller *hose)
819 struct pnv_phb *phb = hose->private_data;
820 struct OpalIoPhbErrorCommon *common;
823 common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
824 rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE);
825 if (rc != OPAL_SUCCESS) {
826 pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
827 __func__, hose->global_number, rc);
831 switch (common->ioType) {
832 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
833 ioda_eeh_p7ioc_phb_diag(hose, common);
835 case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
836 ioda_eeh_phb3_phb_diag(hose, common);
839 pr_warning("%s: Unrecognized I/O chip %d\n",
840 __func__, common->ioType);
844 static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
847 struct eeh_pe *phb_pe;
849 phb_pe = eeh_phb_pe_get(hose);
851 pr_warning("%s Can't find PE for PHB#%d\n",
852 __func__, hose->global_number);
860 static int ioda_eeh_get_pe(struct pci_controller *hose,
861 u16 pe_no, struct eeh_pe **pe)
863 struct eeh_pe *phb_pe, *dev_pe;
866 /* Find the PHB PE */
867 if (ioda_eeh_get_phb_pe(hose, &phb_pe))
870 /* Find the PE according to PE# */
871 memset(&dev, 0, sizeof(struct eeh_dev));
873 dev.pe_config_addr = pe_no;
874 dev_pe = eeh_pe_get(&dev);
876 pr_warning("%s: Can't find PE for PHB#%x - PE#%x\n",
877 __func__, hose->global_number, pe_no);
886 * ioda_eeh_next_error - Retrieve next error for EEH core to handle
887 * @pe: The affected PE
889 * The function is expected to be called by EEH core while it gets
890 * special EEH event (without binding PE). The function calls to
891 * OPAL APIs for next error to handle. The informational error is
892 * handled internally by platform. However, the dead IOC, dead PHB,
893 * fenced PHB and frozen PE should be handled by EEH core eventually.
895 static int ioda_eeh_next_error(struct eeh_pe **pe)
897 struct pci_controller *hose, *tmp;
900 u16 err_type, severity;
905 * While running here, it's safe to purge the event queue.
906 * And we should keep the cached OPAL notifier event sychronized
907 * between the kernel and firmware.
909 eeh_remove_event(NULL);
910 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
912 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
914 * If the subordinate PCI buses of the PHB has been
915 * removed, we needn't take care of it any more.
917 phb = hose->private_data;
918 if (phb->eeh_state & PNV_EEH_STATE_REMOVED)
921 rc = opal_pci_next_error(phb->opal_id,
922 &frozen_pe_no, &err_type, &severity);
924 /* If OPAL API returns error, we needn't proceed */
925 if (rc != OPAL_SUCCESS) {
926 pr_devel("%s: Invalid return value on "
927 "PHB#%x (0x%lx) from opal_pci_next_error",
928 __func__, hose->global_number, rc);
932 /* If the PHB doesn't have error, stop processing */
933 if (err_type == OPAL_EEH_NO_ERROR ||
934 severity == OPAL_EEH_SEV_NO_ERROR) {
935 pr_devel("%s: No error found on PHB#%x\n",
936 __func__, hose->global_number);
941 * Processing the error. We're expecting the error with
942 * highest priority reported upon multiple errors on the
945 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
946 __func__, err_type, severity,
947 frozen_pe_no, hose->global_number);
949 case OPAL_EEH_IOC_ERROR:
950 if (severity == OPAL_EEH_SEV_IOC_DEAD) {
951 list_for_each_entry_safe(hose, tmp,
952 &hose_list, list_node) {
953 phb = hose->private_data;
954 phb->eeh_state |= PNV_EEH_STATE_REMOVED;
957 pr_err("EEH: dead IOC detected\n");
960 } else if (severity == OPAL_EEH_SEV_INF) {
961 pr_info("EEH: IOC informative error "
963 ioda_eeh_hub_diag(hose);
967 case OPAL_EEH_PHB_ERROR:
968 if (severity == OPAL_EEH_SEV_PHB_DEAD) {
969 if (ioda_eeh_get_phb_pe(hose, pe))
972 pr_err("EEH: dead PHB#%x detected\n",
973 hose->global_number);
974 phb->eeh_state |= PNV_EEH_STATE_REMOVED;
977 } else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
978 if (ioda_eeh_get_phb_pe(hose, pe))
981 pr_err("EEH: fenced PHB#%x detected\n",
982 hose->global_number);
985 } else if (severity == OPAL_EEH_SEV_INF) {
986 pr_info("EEH: PHB#%x informative error "
988 hose->global_number);
989 ioda_eeh_phb_diag(hose);
993 case OPAL_EEH_PE_ERROR:
994 if (ioda_eeh_get_pe(hose, frozen_pe_no, pe))
997 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
998 (*pe)->addr, (*pe)->phb->global_number);
1009 struct pnv_eeh_ops ioda_eeh_ops = {
1010 .post_init = ioda_eeh_post_init,
1011 .set_option = ioda_eeh_set_option,
1012 .get_state = ioda_eeh_get_state,
1013 .reset = ioda_eeh_reset,
1014 .get_log = ioda_eeh_get_log,
1015 .configure_bridge = ioda_eeh_configure_bridge,
1016 .next_error = ioda_eeh_next_error