1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
5 static struct edac_pci_ctl_info *pci_ctl;
8 * Set by command line parameter. If BIOS has enabled the ECC, this override is
9 * cleared to prevent re-enabling the hardware by this driver.
11 static int ecc_enable_override;
12 module_param(ecc_enable_override, int, 0644);
14 static struct msr __percpu *msrs;
16 static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg)
18 if (!pvt->flags.zn_regs_v2)
22 case UMCCH_ADDR_CFG: return UMCCH_ADDR_CFG_DDR5;
23 case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5;
24 case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5;
27 WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg);
32 static struct ecc_settings **ecc_stngs;
34 /* Device for the PCI component */
35 static struct device *pci_ctl_dev;
38 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
39 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
42 *FIXME: Produce a better mapping/linearisation.
44 static const struct scrubrate {
45 u32 scrubval; /* bit pattern for scrub rate */
46 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
48 { 0x01, 1600000000UL},
70 { 0x00, 0UL}, /* scrubbing off */
73 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
74 u32 *val, const char *func)
78 err = pci_read_config_dword(pdev, offset, val);
80 amd64_warn("%s: error reading F%dx%03x.\n",
81 func, PCI_FUNC(pdev->devfn), offset);
86 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
87 u32 val, const char *func)
91 err = pci_write_config_dword(pdev, offset, val);
93 amd64_warn("%s: error writing to F%dx%03x.\n",
94 func, PCI_FUNC(pdev->devfn), offset);
100 * Select DCT to which PCI cfg accesses are routed
102 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
106 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
107 reg &= (pvt->model == 0x30) ? ~3 : ~1;
109 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
114 * Depending on the family, F2 DCT reads need special handling:
116 * K8: has a single DCT only and no address offsets >= 0x100
118 * F10h: each DCT has its own set of regs
122 * F16h: has only 1 DCT
124 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
126 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
127 int offset, u32 *val)
131 if (dct || offset >= 0x100)
138 * Note: If ganging is enabled, barring the regs
139 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
140 * return 0. (cf. Section 2.8.1 F10h BKDG)
142 if (dct_ganging_enabled(pvt))
151 * F15h: F2x1xx addresses do not map explicitly to DCT1.
152 * We should select which DCT we access using F1x10C[DctCfgSel]
154 dct = (dct && pvt->model == 0x30) ? 3 : dct;
155 f15h_select_dct(pvt, dct);
166 return amd64_read_pci_cfg(pvt->F2, offset, val);
170 * Memory scrubber control interface. For K8, memory scrubbing is handled by
171 * hardware and can involve L2 cache, dcache as well as the main memory. With
172 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
175 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
176 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
177 * bytes/sec for the setting.
179 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
180 * other archs, we might not have access to the caches directly.
184 * Scan the scrub rate mapping table for a close or matching bandwidth value to
185 * issue. If requested is too big, then use last maximum value found.
187 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
193 * map the configured rate (new_bw) to a value specific to the AMD64
194 * memory controller and apply to register. Search for the first
195 * bandwidth entry that is greater or equal than the setting requested
196 * and program that. If at last entry, turn off DRAM scrubbing.
198 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
199 * by falling back to the last element in scrubrates[].
201 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
203 * skip scrub rates which aren't recommended
204 * (see F10 BKDG, F3x58)
206 if (scrubrates[i].scrubval < min_rate)
209 if (scrubrates[i].bandwidth <= new_bw)
213 scrubval = scrubrates[i].scrubval;
215 if (pvt->fam == 0x15 && pvt->model == 0x60) {
216 f15h_select_dct(pvt, 0);
217 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
218 f15h_select_dct(pvt, 1);
219 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
221 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
225 return scrubrates[i].bandwidth;
230 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
232 struct amd64_pvt *pvt = mci->pvt_info;
233 u32 min_scrubrate = 0x5;
238 if (pvt->fam == 0x15) {
240 if (pvt->model < 0x10)
241 f15h_select_dct(pvt, 0);
243 if (pvt->model == 0x60)
246 return __set_scrub_rate(pvt, bw, min_scrubrate);
249 static int get_scrub_rate(struct mem_ctl_info *mci)
251 struct amd64_pvt *pvt = mci->pvt_info;
252 int i, retval = -EINVAL;
255 if (pvt->fam == 0x15) {
257 if (pvt->model < 0x10)
258 f15h_select_dct(pvt, 0);
260 if (pvt->model == 0x60)
261 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
263 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
265 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
268 scrubval = scrubval & 0x001F;
270 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
271 if (scrubrates[i].scrubval == scrubval) {
272 retval = scrubrates[i].bandwidth;
280 * returns true if the SysAddr given by sys_addr matches the
281 * DRAM base/limit associated with node_id
283 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
287 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
288 * all ones if the most significant implemented address bit is 1.
289 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
290 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
291 * Application Programming.
293 addr = sys_addr & 0x000000ffffffffffull;
295 return ((addr >= get_dram_base(pvt, nid)) &&
296 (addr <= get_dram_limit(pvt, nid)));
300 * Attempt to map a SysAddr to a node. On success, return a pointer to the
301 * mem_ctl_info structure for the node that the SysAddr maps to.
303 * On failure, return NULL.
305 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
308 struct amd64_pvt *pvt;
313 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
314 * 3.4.4.2) registers to map the SysAddr to a node ID.
319 * The value of this field should be the same for all DRAM Base
320 * registers. Therefore we arbitrarily choose to read it from the
321 * register for node 0.
323 intlv_en = dram_intlv_en(pvt, 0);
326 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
327 if (base_limit_match(pvt, sys_addr, node_id))
333 if (unlikely((intlv_en != 0x01) &&
334 (intlv_en != 0x03) &&
335 (intlv_en != 0x07))) {
336 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
340 bits = (((u32) sys_addr) >> 12) & intlv_en;
342 for (node_id = 0; ; ) {
343 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
344 break; /* intlv_sel field matches */
346 if (++node_id >= DRAM_RANGES)
350 /* sanity test for sys_addr */
351 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
352 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
353 "range for node %d with node interleaving enabled.\n",
354 __func__, sys_addr, node_id);
359 return edac_mc_find((int)node_id);
362 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
363 (unsigned long)sys_addr);
369 * compute the CS base address of the @csrow on the DRAM controller @dct.
370 * For details see F2x[5C:40] in the processor's BKDG
372 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
373 u64 *base, u64 *mask)
375 u64 csbase, csmask, base_bits, mask_bits;
378 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
379 csbase = pvt->csels[dct].csbases[csrow];
380 csmask = pvt->csels[dct].csmasks[csrow];
381 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
382 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
386 * F16h and F15h, models 30h and later need two addr_shift values:
387 * 8 for high and 6 for low (cf. F16h BKDG).
389 } else if (pvt->fam == 0x16 ||
390 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
391 csbase = pvt->csels[dct].csbases[csrow];
392 csmask = pvt->csels[dct].csmasks[csrow >> 1];
394 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
395 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
398 /* poke holes for the csmask */
399 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
400 (GENMASK_ULL(30, 19) << 8));
402 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
403 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
407 csbase = pvt->csels[dct].csbases[csrow];
408 csmask = pvt->csels[dct].csmasks[csrow >> 1];
411 if (pvt->fam == 0x15)
412 base_bits = mask_bits =
413 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
415 base_bits = mask_bits =
416 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
419 *base = (csbase & base_bits) << addr_shift;
422 /* poke holes for the csmask */
423 *mask &= ~(mask_bits << addr_shift);
425 *mask |= (csmask & mask_bits) << addr_shift;
428 #define for_each_chip_select(i, dct, pvt) \
429 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
431 #define chip_select_base(i, dct, pvt) \
432 pvt->csels[dct].csbases[i]
434 #define for_each_chip_select_mask(i, dct, pvt) \
435 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
437 #define for_each_umc(i) \
438 for (i = 0; i < pvt->max_mcs; i++)
441 * @input_addr is an InputAddr associated with the node given by mci. Return the
442 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
444 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
446 struct amd64_pvt *pvt;
452 for_each_chip_select(csrow, 0, pvt) {
453 if (!csrow_enabled(csrow, 0, pvt))
456 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
460 if ((input_addr & mask) == (base & mask)) {
461 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
462 (unsigned long)input_addr, csrow,
468 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
469 (unsigned long)input_addr, pvt->mc_node_id);
475 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
476 * for the node represented by mci. Info is passed back in *hole_base,
477 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
478 * info is invalid. Info may be invalid for either of the following reasons:
480 * - The revision of the node is not E or greater. In this case, the DRAM Hole
481 * Address Register does not exist.
483 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
484 * indicating that its contents are not valid.
486 * The values passed back in *hole_base, *hole_offset, and *hole_size are
487 * complete 32-bit values despite the fact that the bitfields in the DHAR
488 * only represent bits 31-24 of the base and offset values.
490 static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
491 u64 *hole_offset, u64 *hole_size)
493 struct amd64_pvt *pvt = mci->pvt_info;
495 /* only revE and later have the DRAM Hole Address Register */
496 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
497 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
498 pvt->ext_model, pvt->mc_node_id);
502 /* valid for Fam10h and above */
503 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
504 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
508 if (!dhar_valid(pvt)) {
509 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
514 /* This node has Memory Hoisting */
516 /* +------------------+--------------------+--------------------+-----
517 * | memory | DRAM hole | relocated |
518 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
520 * | | | [0x100000000, |
521 * | | | (0x100000000+ |
522 * | | | (0xffffffff-x))] |
523 * +------------------+--------------------+--------------------+-----
525 * Above is a diagram of physical memory showing the DRAM hole and the
526 * relocated addresses from the DRAM hole. As shown, the DRAM hole
527 * starts at address x (the base address) and extends through address
528 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
529 * addresses in the hole so that they start at 0x100000000.
532 *hole_base = dhar_base(pvt);
533 *hole_size = (1ULL << 32) - *hole_base;
535 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
536 : k8_dhar_offset(pvt);
538 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
539 pvt->mc_node_id, (unsigned long)*hole_base,
540 (unsigned long)*hole_offset, (unsigned long)*hole_size);
545 #ifdef CONFIG_EDAC_DEBUG
546 #define EDAC_DCT_ATTR_SHOW(reg) \
547 static ssize_t reg##_show(struct device *dev, \
548 struct device_attribute *mattr, char *data) \
550 struct mem_ctl_info *mci = to_mci(dev); \
551 struct amd64_pvt *pvt = mci->pvt_info; \
553 return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
556 EDAC_DCT_ATTR_SHOW(dhar);
557 EDAC_DCT_ATTR_SHOW(dbam0);
558 EDAC_DCT_ATTR_SHOW(top_mem);
559 EDAC_DCT_ATTR_SHOW(top_mem2);
561 static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
564 struct mem_ctl_info *mci = to_mci(dev);
570 get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
572 return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
577 * update NUM_DBG_ATTRS in case you add new members
579 static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
580 static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
581 static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
582 static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
583 static DEVICE_ATTR_RO(dram_hole);
585 static struct attribute *dbg_attrs[] = {
588 &dev_attr_topmem.attr,
589 &dev_attr_topmem2.attr,
590 &dev_attr_dram_hole.attr,
594 static const struct attribute_group dbg_group = {
598 static ssize_t inject_section_show(struct device *dev,
599 struct device_attribute *mattr, char *buf)
601 struct mem_ctl_info *mci = to_mci(dev);
602 struct amd64_pvt *pvt = mci->pvt_info;
603 return sprintf(buf, "0x%x\n", pvt->injection.section);
607 * store error injection section value which refers to one of 4 16-byte sections
608 * within a 64-byte cacheline
612 static ssize_t inject_section_store(struct device *dev,
613 struct device_attribute *mattr,
614 const char *data, size_t count)
616 struct mem_ctl_info *mci = to_mci(dev);
617 struct amd64_pvt *pvt = mci->pvt_info;
621 ret = kstrtoul(data, 10, &value);
626 amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
630 pvt->injection.section = (u32) value;
634 static ssize_t inject_word_show(struct device *dev,
635 struct device_attribute *mattr, char *buf)
637 struct mem_ctl_info *mci = to_mci(dev);
638 struct amd64_pvt *pvt = mci->pvt_info;
639 return sprintf(buf, "0x%x\n", pvt->injection.word);
643 * store error injection word value which refers to one of 9 16-bit word of the
644 * 16-byte (128-bit + ECC bits) section
648 static ssize_t inject_word_store(struct device *dev,
649 struct device_attribute *mattr,
650 const char *data, size_t count)
652 struct mem_ctl_info *mci = to_mci(dev);
653 struct amd64_pvt *pvt = mci->pvt_info;
657 ret = kstrtoul(data, 10, &value);
662 amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
666 pvt->injection.word = (u32) value;
670 static ssize_t inject_ecc_vector_show(struct device *dev,
671 struct device_attribute *mattr,
674 struct mem_ctl_info *mci = to_mci(dev);
675 struct amd64_pvt *pvt = mci->pvt_info;
676 return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
680 * store 16 bit error injection vector which enables injecting errors to the
681 * corresponding bit within the error injection word above. When used during a
682 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
684 static ssize_t inject_ecc_vector_store(struct device *dev,
685 struct device_attribute *mattr,
686 const char *data, size_t count)
688 struct mem_ctl_info *mci = to_mci(dev);
689 struct amd64_pvt *pvt = mci->pvt_info;
693 ret = kstrtoul(data, 16, &value);
697 if (value & 0xFFFF0000) {
698 amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
702 pvt->injection.bit_map = (u32) value;
707 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
708 * fields needed by the injection registers and read the NB Array Data Port.
710 static ssize_t inject_read_store(struct device *dev,
711 struct device_attribute *mattr,
712 const char *data, size_t count)
714 struct mem_ctl_info *mci = to_mci(dev);
715 struct amd64_pvt *pvt = mci->pvt_info;
717 u32 section, word_bits;
720 ret = kstrtoul(data, 10, &value);
724 /* Form value to choose 16-byte section of cacheline */
725 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
727 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
729 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
731 /* Issue 'word' and 'bit' along with the READ request */
732 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
734 edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
740 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
741 * fields needed by the injection registers.
743 static ssize_t inject_write_store(struct device *dev,
744 struct device_attribute *mattr,
745 const char *data, size_t count)
747 struct mem_ctl_info *mci = to_mci(dev);
748 struct amd64_pvt *pvt = mci->pvt_info;
749 u32 section, word_bits, tmp;
753 ret = kstrtoul(data, 10, &value);
757 /* Form value to choose 16-byte section of cacheline */
758 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
760 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
762 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
764 pr_notice_once("Don't forget to decrease MCE polling interval in\n"
765 "/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
766 "so that you can get the error report faster.\n");
768 on_each_cpu(disable_caches, NULL, 1);
770 /* Issue 'word' and 'bit' along with the READ request */
771 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
774 /* wait until injection happens */
775 amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
776 if (tmp & F10_NB_ARR_ECC_WR_REQ) {
781 on_each_cpu(enable_caches, NULL, 1);
783 edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
789 * update NUM_INJ_ATTRS in case you add new members
792 static DEVICE_ATTR_RW(inject_section);
793 static DEVICE_ATTR_RW(inject_word);
794 static DEVICE_ATTR_RW(inject_ecc_vector);
795 static DEVICE_ATTR_WO(inject_write);
796 static DEVICE_ATTR_WO(inject_read);
798 static struct attribute *inj_attrs[] = {
799 &dev_attr_inject_section.attr,
800 &dev_attr_inject_word.attr,
801 &dev_attr_inject_ecc_vector.attr,
802 &dev_attr_inject_write.attr,
803 &dev_attr_inject_read.attr,
807 static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
809 struct device *dev = kobj_to_dev(kobj);
810 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
811 struct amd64_pvt *pvt = mci->pvt_info;
813 /* Families which have that injection hw */
814 if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
820 static const struct attribute_group inj_group = {
822 .is_visible = inj_is_visible,
824 #endif /* CONFIG_EDAC_DEBUG */
827 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
828 * assumed that sys_addr maps to the node given by mci.
830 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
831 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
832 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
833 * then it is also involved in translating a SysAddr to a DramAddr. Sections
834 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
835 * These parts of the documentation are unclear. I interpret them as follows:
837 * When node n receives a SysAddr, it processes the SysAddr as follows:
839 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
840 * Limit registers for node n. If the SysAddr is not within the range
841 * specified by the base and limit values, then node n ignores the Sysaddr
842 * (since it does not map to node n). Otherwise continue to step 2 below.
844 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
845 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
846 * the range of relocated addresses (starting at 0x100000000) from the DRAM
847 * hole. If not, skip to step 3 below. Else get the value of the
848 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
849 * offset defined by this value from the SysAddr.
851 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
852 * Base register for node n. To obtain the DramAddr, subtract the base
853 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
855 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
857 struct amd64_pvt *pvt = mci->pvt_info;
858 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
861 dram_base = get_dram_base(pvt, pvt->mc_node_id);
863 ret = get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
865 if ((sys_addr >= (1ULL << 32)) &&
866 (sys_addr < ((1ULL << 32) + hole_size))) {
867 /* use DHAR to translate SysAddr to DramAddr */
868 dram_addr = sys_addr - hole_offset;
870 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
871 (unsigned long)sys_addr,
872 (unsigned long)dram_addr);
879 * Translate the SysAddr to a DramAddr as shown near the start of
880 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
881 * only deals with 40-bit values. Therefore we discard bits 63-40 of
882 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
883 * discard are all 1s. Otherwise the bits we discard are all 0s. See
884 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
885 * Programmer's Manual Volume 1 Application Programming.
887 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
889 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
890 (unsigned long)sys_addr, (unsigned long)dram_addr);
895 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
896 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
897 * for node interleaving.
899 static int num_node_interleave_bits(unsigned intlv_en)
901 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
904 BUG_ON(intlv_en > 7);
905 n = intlv_shift_table[intlv_en];
909 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
910 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
912 struct amd64_pvt *pvt;
919 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
920 * concerning translating a DramAddr to an InputAddr.
922 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
923 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
926 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
927 intlv_shift, (unsigned long)dram_addr,
928 (unsigned long)input_addr);
934 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
935 * assumed that @sys_addr maps to the node given by mci.
937 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
942 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
944 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
945 (unsigned long)sys_addr, (unsigned long)input_addr);
950 /* Map the Error address to a PAGE and PAGE OFFSET. */
951 static inline void error_address_to_page_and_offset(u64 error_address,
952 struct err_info *err)
954 err->page = (u32) (error_address >> PAGE_SHIFT);
955 err->offset = ((u32) error_address) & ~PAGE_MASK;
959 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
960 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
961 * of a node that detected an ECC memory error. mci represents the node that
962 * the error address maps to (possibly different from the node that detected
963 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
966 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
970 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
973 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
974 "address 0x%lx\n", (unsigned long)sys_addr);
979 * See AMD PPR DF::LclNodeTypeMap
981 * This register gives information for nodes of the same type within a system.
983 * Reading this register from a GPU node will tell how many GPU nodes are in the
984 * system and what the lowest AMD Node ID value is for the GPU nodes. Use this
985 * info to fixup the Linux logical "Node ID" value set in the AMD NB code and EDAC.
987 static struct local_node_map {
992 #define PCI_DEVICE_ID_AMD_MI200_DF_F1 0x14d1
993 #define REG_LOCAL_NODE_TYPE_MAP 0x144
995 /* Local Node Type Map (LNTM) fields */
996 #define LNTM_NODE_COUNT GENMASK(27, 16)
997 #define LNTM_BASE_NODE_ID GENMASK(11, 0)
999 static int gpu_get_node_map(void)
1001 struct pci_dev *pdev;
1006 * Node ID 0 is reserved for CPUs.
1007 * Therefore, a non-zero Node ID means we've already cached the values.
1009 if (gpu_node_map.base_node_id)
1012 pdev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F1, NULL);
1018 ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp);
1022 gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp);
1023 gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp);
1030 static int fixup_node_id(int node_id, struct mce *m)
1032 /* MCA_IPID[InstanceIdHi] give the AMD Node ID for the bank. */
1033 u8 nid = (m->ipid >> 44) & 0xF;
1035 if (smca_get_bank_type(m->extcpu, m->bank) != SMCA_UMC_V2)
1038 /* Nodes below the GPU base node are CPU nodes and don't need a fixup. */
1039 if (nid < gpu_node_map.base_node_id)
1042 /* Convert the hardware-provided AMD Node ID to a Linux logical one. */
1043 return nid - gpu_node_map.base_node_id + 1;
1046 /* Protect the PCI config register pairs used for DF indirect access. */
1047 static DEFINE_MUTEX(df_indirect_mutex);
1050 * Data Fabric Indirect Access uses FICAA/FICAD.
1052 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
1053 * on the device's Instance Id and the PCI function and register offset of
1054 * the desired register.
1056 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
1057 * and FICAD HI registers but so far we only need the LO register.
1059 * Use Instance Id 0xFF to indicate a broadcast read.
1061 #define DF_BROADCAST 0xFF
1062 static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
1068 if (node >= amd_nb_num())
1071 F4 = node_to_amd_nb(node)->link;
1075 ficaa = (instance_id == DF_BROADCAST) ? 0 : 1;
1076 ficaa |= reg & 0x3FC;
1077 ficaa |= (func & 0x7) << 11;
1078 ficaa |= instance_id << 16;
1080 mutex_lock(&df_indirect_mutex);
1082 err = pci_write_config_dword(F4, 0x5C, ficaa);
1084 pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
1088 err = pci_read_config_dword(F4, 0x98, lo);
1090 pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
1093 mutex_unlock(&df_indirect_mutex);
1099 static int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
1101 return __df_indirect_read(node, func, reg, instance_id, lo);
1104 static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
1106 return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
1116 static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
1118 u64 dram_base_addr, dram_limit_addr, dram_hole_base;
1120 u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
1121 u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
1122 u8 intlv_addr_sel, intlv_addr_bit;
1123 u8 num_intlv_bits, hashed_bit;
1124 u8 lgcy_mmio_hole_en, base = 0;
1125 u8 cs_mask, cs_id = 0;
1126 bool hash_enabled = false;
1128 struct addr_ctx ctx;
1130 memset(&ctx, 0, sizeof(ctx));
1132 /* Start from the normalized address */
1133 ctx.ret_addr = norm_addr;
1138 /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
1139 if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp))
1142 /* Remove HiAddrOffset from normalized address, if enabled: */
1143 if (ctx.tmp & BIT(0)) {
1144 u64 hi_addr_offset = (ctx.tmp & GENMASK_ULL(31, 20)) << 8;
1146 if (norm_addr >= hi_addr_offset) {
1147 ctx.ret_addr -= hi_addr_offset;
1152 /* Read D18F0x110 (DramBaseAddress). */
1153 if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp))
1156 /* Check if address range is valid. */
1157 if (!(ctx.tmp & BIT(0))) {
1158 pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
1163 lgcy_mmio_hole_en = ctx.tmp & BIT(1);
1164 intlv_num_chan = (ctx.tmp >> 4) & 0xF;
1165 intlv_addr_sel = (ctx.tmp >> 8) & 0x7;
1166 dram_base_addr = (ctx.tmp & GENMASK_ULL(31, 12)) << 16;
1168 /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
1169 if (intlv_addr_sel > 3) {
1170 pr_err("%s: Invalid interleave address select %d.\n",
1171 __func__, intlv_addr_sel);
1175 /* Read D18F0x114 (DramLimitAddress). */
1176 if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp))
1179 intlv_num_sockets = (ctx.tmp >> 8) & 0x1;
1180 intlv_num_dies = (ctx.tmp >> 10) & 0x3;
1181 dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
1183 intlv_addr_bit = intlv_addr_sel + 8;
1185 /* Re-use intlv_num_chan by setting it equal to log2(#channels) */
1186 switch (intlv_num_chan) {
1187 case 0: intlv_num_chan = 0; break;
1188 case 1: intlv_num_chan = 1; break;
1189 case 3: intlv_num_chan = 2; break;
1190 case 5: intlv_num_chan = 3; break;
1191 case 7: intlv_num_chan = 4; break;
1193 case 8: intlv_num_chan = 1;
1194 hash_enabled = true;
1197 pr_err("%s: Invalid number of interleaved channels %d.\n",
1198 __func__, intlv_num_chan);
1202 num_intlv_bits = intlv_num_chan;
1204 if (intlv_num_dies > 2) {
1205 pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
1206 __func__, intlv_num_dies);
1210 num_intlv_bits += intlv_num_dies;
1212 /* Add a bit if sockets are interleaved. */
1213 num_intlv_bits += intlv_num_sockets;
1215 /* Assert num_intlv_bits <= 4 */
1216 if (num_intlv_bits > 4) {
1217 pr_err("%s: Invalid interleave bits %d.\n",
1218 __func__, num_intlv_bits);
1222 if (num_intlv_bits > 0) {
1223 u64 temp_addr_x, temp_addr_i, temp_addr_y;
1224 u8 die_id_bit, sock_id_bit, cs_fabric_id;
1227 * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
1228 * This is the fabric id for this coherent slave. Use
1229 * umc/channel# as instance id of the coherent slave
1232 if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp))
1235 cs_fabric_id = (ctx.tmp >> 8) & 0xFF;
1238 /* If interleaved over more than 1 channel: */
1239 if (intlv_num_chan) {
1240 die_id_bit = intlv_num_chan;
1241 cs_mask = (1 << die_id_bit) - 1;
1242 cs_id = cs_fabric_id & cs_mask;
1245 sock_id_bit = die_id_bit;
1247 /* Read D18F1x208 (SystemFabricIdMask). */
1248 if (intlv_num_dies || intlv_num_sockets)
1249 if (df_indirect_read_broadcast(nid, 1, 0x208, &ctx.tmp))
1252 /* If interleaved over more than 1 die. */
1253 if (intlv_num_dies) {
1254 sock_id_bit = die_id_bit + intlv_num_dies;
1255 die_id_shift = (ctx.tmp >> 24) & 0xF;
1256 die_id_mask = (ctx.tmp >> 8) & 0xFF;
1258 cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
1261 /* If interleaved over more than 1 socket. */
1262 if (intlv_num_sockets) {
1263 socket_id_shift = (ctx.tmp >> 28) & 0xF;
1264 socket_id_mask = (ctx.tmp >> 16) & 0xFF;
1266 cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
1270 * The pre-interleaved address consists of XXXXXXIIIYYYYY
1271 * where III is the ID for this CS, and XXXXXXYYYYY are the
1272 * address bits from the post-interleaved address.
1273 * "num_intlv_bits" has been calculated to tell us how many "I"
1274 * bits there are. "intlv_addr_bit" tells us how many "Y" bits
1275 * there are (where "I" starts).
1277 temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0);
1278 temp_addr_i = (cs_id << intlv_addr_bit);
1279 temp_addr_x = (ctx.ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
1280 ctx.ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
1283 /* Add dram base address */
1284 ctx.ret_addr += dram_base_addr;
1286 /* If legacy MMIO hole enabled */
1287 if (lgcy_mmio_hole_en) {
1288 if (df_indirect_read_broadcast(nid, 0, 0x104, &ctx.tmp))
1291 dram_hole_base = ctx.tmp & GENMASK(31, 24);
1292 if (ctx.ret_addr >= dram_hole_base)
1293 ctx.ret_addr += (BIT_ULL(32) - dram_hole_base);
1297 /* Save some parentheses and grab ls-bit at the end. */
1298 hashed_bit = (ctx.ret_addr >> 12) ^
1299 (ctx.ret_addr >> 18) ^
1300 (ctx.ret_addr >> 21) ^
1301 (ctx.ret_addr >> 30) ^
1304 hashed_bit &= BIT(0);
1306 if (hashed_bit != ((ctx.ret_addr >> intlv_addr_bit) & BIT(0)))
1307 ctx.ret_addr ^= BIT(intlv_addr_bit);
1310 /* Is calculated system address is above DRAM limit address? */
1311 if (ctx.ret_addr > dram_limit_addr)
1314 *sys_addr = ctx.ret_addr;
1321 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
1324 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
1327 static unsigned long dct_determine_edac_cap(struct amd64_pvt *pvt)
1329 unsigned long edac_cap = EDAC_FLAG_NONE;
1332 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
1336 if (pvt->dclr0 & BIT(bit))
1337 edac_cap = EDAC_FLAG_SECDED;
1342 static unsigned long umc_determine_edac_cap(struct amd64_pvt *pvt)
1344 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
1345 unsigned long edac_cap = EDAC_FLAG_NONE;
1348 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
1351 umc_en_mask |= BIT(i);
1353 /* UMC Configuration bit 12 (DimmEccEn) */
1354 if (pvt->umc[i].umc_cfg & BIT(12))
1355 dimm_ecc_en_mask |= BIT(i);
1358 if (umc_en_mask == dimm_ecc_en_mask)
1359 edac_cap = EDAC_FLAG_SECDED;
1365 * debug routine to display the memory sizes of all logical DIMMs and its
1368 static void dct_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1370 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1371 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1372 int dimm, size0, size1;
1374 if (pvt->fam == 0xf) {
1375 /* K8 families < revF not supported yet */
1376 if (pvt->ext_model < K8_REV_F)
1382 if (pvt->fam == 0x10) {
1383 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
1385 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
1386 pvt->csels[1].csbases :
1387 pvt->csels[0].csbases;
1390 dcsb = pvt->csels[1].csbases;
1392 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1395 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1397 /* Dump memory sizes for DIMM and its CSROWs */
1398 for (dimm = 0; dimm < 4; dimm++) {
1400 if (dcsb[dimm * 2] & DCSB_CS_ENABLE)
1402 * For F15m60h, we need multiplier for LRDIMM cs_size
1403 * calculation. We pass dimm value to the dbam_to_cs
1404 * mapper so we can find the multiplier from the
1405 * corresponding DCSM.
1407 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1408 DBAM_DIMM(dimm, dbam),
1412 if (dcsb[dimm * 2 + 1] & DCSB_CS_ENABLE)
1413 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1414 DBAM_DIMM(dimm, dbam),
1417 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1419 dimm * 2 + 1, size1);
1424 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
1426 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
1428 if (pvt->dram_type == MEM_LRDDR3) {
1429 u32 dcsm = pvt->csels[chan].csmasks[0];
1431 * It's assumed all LRDIMMs in a DCT are going to be of
1432 * same 'type' until proven otherwise. So, use a cs
1433 * value of '0' here to get dcsm value.
1435 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
1438 edac_dbg(1, "All DIMMs support ECC:%s\n",
1439 (dclr & BIT(19)) ? "yes" : "no");
1442 edac_dbg(1, " PAR/ERR parity: %s\n",
1443 (dclr & BIT(8)) ? "enabled" : "disabled");
1445 if (pvt->fam == 0x10)
1446 edac_dbg(1, " DCT 128bit mode width: %s\n",
1447 (dclr & BIT(11)) ? "128b" : "64b");
1449 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
1450 (dclr & BIT(12)) ? "yes" : "no",
1451 (dclr & BIT(13)) ? "yes" : "no",
1452 (dclr & BIT(14)) ? "yes" : "no",
1453 (dclr & BIT(15)) ? "yes" : "no");
1456 #define CS_EVEN_PRIMARY BIT(0)
1457 #define CS_ODD_PRIMARY BIT(1)
1458 #define CS_EVEN_SECONDARY BIT(2)
1459 #define CS_ODD_SECONDARY BIT(3)
1460 #define CS_3R_INTERLEAVE BIT(4)
1462 #define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
1463 #define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
1465 static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
1470 if (csrow_enabled(2 * dimm, ctrl, pvt))
1471 cs_mode |= CS_EVEN_PRIMARY;
1473 if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
1474 cs_mode |= CS_ODD_PRIMARY;
1476 /* Asymmetric dual-rank DIMM support. */
1477 if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
1478 cs_mode |= CS_ODD_SECONDARY;
1481 * 3 Rank inteleaving support.
1482 * There should be only three bases enabled and their two masks should
1485 for_each_chip_select(base, ctrl, pvt)
1486 count += csrow_enabled(base, ctrl, pvt);
1489 pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
1490 edac_dbg(1, "3R interleaving in use.\n");
1491 cs_mode |= CS_3R_INTERLEAVE;
1497 static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
1498 int csrow_nr, int dimm)
1500 u32 msb, weight, num_zero_bits;
1501 u32 addr_mask_deinterleaved;
1505 * The number of zero bits in the mask is equal to the number of bits
1506 * in a full mask minus the number of bits in the current mask.
1508 * The MSB is the number of bits in the full mask because BIT[0] is
1511 * In the special 3 Rank interleaving case, a single bit is flipped
1512 * without swapping with the most significant bit. This can be handled
1513 * by keeping the MSB where it is and ignoring the single zero bit.
1515 msb = fls(addr_mask_orig) - 1;
1516 weight = hweight_long(addr_mask_orig);
1517 num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
1519 /* Take the number of zero bits off from the top of the mask. */
1520 addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1522 edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1523 edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
1524 edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1526 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1527 size = (addr_mask_deinterleaved >> 2) + 1;
1529 /* Return size in MBs. */
1533 static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1534 unsigned int cs_mode, int csrow_nr)
1536 int cs_mask_nr = csrow_nr;
1540 /* No Chip Selects are enabled. */
1544 /* Requested size of an even CS but none are enabled. */
1545 if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1548 /* Requested size of an odd CS but none are enabled. */
1549 if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1553 * Family 17h introduced systems with one mask per DIMM,
1554 * and two Chip Selects per DIMM.
1556 * CS0 and CS1 -> MASK0 / DIMM0
1557 * CS2 and CS3 -> MASK1 / DIMM1
1559 * Family 19h Model 10h introduced systems with one mask per Chip Select,
1560 * and two Chip Selects per DIMM.
1562 * CS0 -> MASK0 -> DIMM0
1563 * CS1 -> MASK1 -> DIMM0
1564 * CS2 -> MASK2 -> DIMM1
1565 * CS3 -> MASK3 -> DIMM1
1567 * Keep the mask number equal to the Chip Select number for newer systems,
1568 * and shift the mask number for older systems.
1570 dimm = csrow_nr >> 1;
1572 if (!pvt->flags.zn_regs_v2)
1575 /* Asymmetric dual-rank DIMM support. */
1576 if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1577 addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
1579 addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
1581 return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm);
1584 static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1586 int dimm, size0, size1, cs0, cs1, cs_mode;
1588 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
1590 for (dimm = 0; dimm < 2; dimm++) {
1594 cs_mode = umc_get_cs_mode(dimm, ctrl, pvt);
1596 size0 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs0);
1597 size1 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs1);
1599 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1605 static void umc_dump_misc_regs(struct amd64_pvt *pvt)
1607 struct amd64_umc *umc;
1608 u32 i, tmp, umc_base;
1611 umc_base = get_umc_base(i);
1614 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
1615 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
1616 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
1617 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
1619 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
1620 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
1622 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
1623 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
1624 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
1626 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
1627 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
1628 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
1629 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
1630 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
1631 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
1632 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
1633 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
1634 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
1636 if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
1637 amd_smn_read(pvt->mc_node_id,
1638 umc_base + get_umc_reg(pvt, UMCCH_ADDR_CFG),
1640 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
1641 i, 1 << ((tmp >> 4) & 0x3));
1644 umc_debug_display_dimm_sizes(pvt, i);
1648 static void dct_dump_misc_regs(struct amd64_pvt *pvt)
1650 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
1652 edac_dbg(1, " NB two channel DRAM capable: %s\n",
1653 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
1655 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
1656 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
1657 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
1659 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
1661 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
1663 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
1664 pvt->dhar, dhar_base(pvt),
1665 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
1666 : f10_dhar_offset(pvt));
1668 dct_debug_display_dimm_sizes(pvt, 0);
1670 /* everything below this point is Fam10h and above */
1671 if (pvt->fam == 0xf)
1674 dct_debug_display_dimm_sizes(pvt, 1);
1676 /* Only if NOT ganged does dclr1 have valid info */
1677 if (!dct_ganging_enabled(pvt))
1678 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
1680 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
1682 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
1686 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
1688 static void dct_prep_chip_selects(struct amd64_pvt *pvt)
1690 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
1691 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1692 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
1693 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
1694 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
1695 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
1697 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1698 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
1702 static void umc_prep_chip_selects(struct amd64_pvt *pvt)
1707 pvt->csels[umc].b_cnt = 4;
1708 pvt->csels[umc].m_cnt = pvt->flags.zn_regs_v2 ? 4 : 2;
1712 static void umc_read_base_mask(struct amd64_pvt *pvt)
1714 u32 umc_base_reg, umc_base_reg_sec;
1715 u32 umc_mask_reg, umc_mask_reg_sec;
1716 u32 base_reg, base_reg_sec;
1717 u32 mask_reg, mask_reg_sec;
1718 u32 *base, *base_sec;
1719 u32 *mask, *mask_sec;
1723 umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
1724 umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
1726 for_each_chip_select(cs, umc, pvt) {
1727 base = &pvt->csels[umc].csbases[cs];
1728 base_sec = &pvt->csels[umc].csbases_sec[cs];
1730 base_reg = umc_base_reg + (cs * 4);
1731 base_reg_sec = umc_base_reg_sec + (cs * 4);
1733 if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
1734 edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
1735 umc, cs, *base, base_reg);
1737 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
1738 edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
1739 umc, cs, *base_sec, base_reg_sec);
1742 umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
1743 umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC);
1745 for_each_chip_select_mask(cs, umc, pvt) {
1746 mask = &pvt->csels[umc].csmasks[cs];
1747 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
1749 mask_reg = umc_mask_reg + (cs * 4);
1750 mask_reg_sec = umc_mask_reg_sec + (cs * 4);
1752 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
1753 edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
1754 umc, cs, *mask, mask_reg);
1756 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
1757 edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
1758 umc, cs, *mask_sec, mask_reg_sec);
1764 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1766 static void dct_read_base_mask(struct amd64_pvt *pvt)
1770 for_each_chip_select(cs, 0, pvt) {
1771 int reg0 = DCSB0 + (cs * 4);
1772 int reg1 = DCSB1 + (cs * 4);
1773 u32 *base0 = &pvt->csels[0].csbases[cs];
1774 u32 *base1 = &pvt->csels[1].csbases[cs];
1776 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1777 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
1780 if (pvt->fam == 0xf)
1783 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1784 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
1785 cs, *base1, (pvt->fam == 0x10) ? reg1
1789 for_each_chip_select_mask(cs, 0, pvt) {
1790 int reg0 = DCSM0 + (cs * 4);
1791 int reg1 = DCSM1 + (cs * 4);
1792 u32 *mask0 = &pvt->csels[0].csmasks[cs];
1793 u32 *mask1 = &pvt->csels[1].csmasks[cs];
1795 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1796 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
1799 if (pvt->fam == 0xf)
1802 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1803 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
1804 cs, *mask1, (pvt->fam == 0x10) ? reg1
1809 static void umc_determine_memory_type(struct amd64_pvt *pvt)
1811 struct amd64_umc *umc;
1817 if (!(umc->sdp_ctrl & UMC_SDP_INIT)) {
1818 umc->dram_type = MEM_EMPTY;
1823 * Check if the system supports the "DDR Type" field in UMC Config
1824 * and has DDR5 DIMMs in use.
1826 if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
1827 if (umc->dimm_cfg & BIT(5))
1828 umc->dram_type = MEM_LRDDR5;
1829 else if (umc->dimm_cfg & BIT(4))
1830 umc->dram_type = MEM_RDDR5;
1832 umc->dram_type = MEM_DDR5;
1834 if (umc->dimm_cfg & BIT(5))
1835 umc->dram_type = MEM_LRDDR4;
1836 else if (umc->dimm_cfg & BIT(4))
1837 umc->dram_type = MEM_RDDR4;
1839 umc->dram_type = MEM_DDR4;
1842 edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]);
1846 static void dct_determine_memory_type(struct amd64_pvt *pvt)
1848 u32 dram_ctrl, dcsm;
1852 if (pvt->ext_model >= K8_REV_F)
1855 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1859 if (pvt->dchr0 & DDR3_MODE)
1862 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1866 if (pvt->model < 0x60)
1870 * Model 0x60h needs special handling:
1872 * We use a Chip Select value of '0' to obtain dcsm.
1873 * Theoretically, it is possible to populate LRDIMMs of different
1874 * 'Rank' value on a DCT. But this is not the common case. So,
1875 * it's reasonable to assume all DIMMs are going to be of same
1876 * 'type' until proven otherwise.
1878 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1879 dcsm = pvt->csels[0].csmasks[0];
1881 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1882 pvt->dram_type = MEM_DDR4;
1883 else if (pvt->dclr0 & BIT(16))
1884 pvt->dram_type = MEM_DDR3;
1885 else if (dcsm & 0x3)
1886 pvt->dram_type = MEM_LRDDR3;
1888 pvt->dram_type = MEM_RDDR3;
1896 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1897 pvt->dram_type = MEM_EMPTY;
1900 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
1904 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1907 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1908 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1910 u16 mce_nid = topology_die_id(m->extcpu);
1911 struct mem_ctl_info *mci;
1916 mci = edac_mc_find(mce_nid);
1920 pvt = mci->pvt_info;
1922 if (pvt->fam == 0xf) {
1927 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1930 * Erratum 637 workaround
1932 if (pvt->fam == 0x15) {
1933 u64 cc6_base, tmp_addr;
1937 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1941 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1942 intlv_en = tmp >> 21 & 0x7;
1944 /* add [47:27] + 3 trailing bits */
1945 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
1947 /* reverse and add DramIntlvEn */
1948 cc6_base |= intlv_en ^ 0x7;
1950 /* pin at [47:24] */
1954 return cc6_base | (addr & GENMASK_ULL(23, 0));
1956 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1959 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1961 /* OR DramIntlvSel into bits [14:12] */
1962 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1964 /* add remaining [11:0] bits from original MC4_ADDR */
1965 tmp_addr |= addr & GENMASK_ULL(11, 0);
1967 return cc6_base | tmp_addr;
1973 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1974 unsigned int device,
1975 struct pci_dev *related)
1977 struct pci_dev *dev = NULL;
1979 while ((dev = pci_get_device(vendor, device, dev))) {
1980 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1981 (dev->bus->number == related->bus->number) &&
1982 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1989 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1991 struct amd_northbridge *nb;
1992 struct pci_dev *f1 = NULL;
1993 unsigned int pci_func;
1994 int off = range << 3;
1997 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1998 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
2000 if (pvt->fam == 0xf)
2003 if (!dram_rw(pvt, range))
2006 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
2007 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
2009 /* F15h: factor in CC6 save area by reading dst node's limit reg */
2010 if (pvt->fam != 0x15)
2013 nb = node_to_amd_nb(dram_dst_node(pvt, range));
2017 if (pvt->model == 0x60)
2018 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
2019 else if (pvt->model == 0x30)
2020 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
2022 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
2024 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
2028 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
2030 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
2032 /* {[39:27],111b} */
2033 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
2035 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
2038 pvt->ranges[range].lim.hi |= llim >> 13;
2043 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2044 struct err_info *err)
2046 struct amd64_pvt *pvt = mci->pvt_info;
2048 error_address_to_page_and_offset(sys_addr, err);
2051 * Find out which node the error address belongs to. This may be
2052 * different from the node that detected the error.
2054 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
2055 if (!err->src_mci) {
2056 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
2057 (unsigned long)sys_addr);
2058 err->err_code = ERR_NODE;
2062 /* Now map the sys_addr to a CSROW */
2063 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
2064 if (err->csrow < 0) {
2065 err->err_code = ERR_CSROW;
2069 /* CHIPKILL enabled */
2070 if (pvt->nbcfg & NBCFG_CHIPKILL) {
2071 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2072 if (err->channel < 0) {
2074 * Syndrome didn't map, so we don't know which of the
2075 * 2 DIMMs is in error. So we need to ID 'both' of them
2078 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
2079 "possible error reporting race\n",
2081 err->err_code = ERR_CHANNEL;
2086 * non-chipkill ecc mode
2088 * The k8 documentation is unclear about how to determine the
2089 * channel number when using non-chipkill memory. This method
2090 * was obtained from email communication with someone at AMD.
2091 * (Wish the email was placed in this comment - norsk)
2093 err->channel = ((sys_addr & BIT(3)) != 0);
2097 static int ddr2_cs_size(unsigned i, bool dct_width)
2103 else if (!(i & 0x1))
2106 shift = (i + 1) >> 1;
2108 return 128 << (shift + !!dct_width);
2111 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2112 unsigned cs_mode, int cs_mask_nr)
2114 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
2116 if (pvt->ext_model >= K8_REV_F) {
2117 WARN_ON(cs_mode > 11);
2118 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
2120 else if (pvt->ext_model >= K8_REV_D) {
2122 WARN_ON(cs_mode > 10);
2125 * the below calculation, besides trying to win an obfuscated C
2126 * contest, maps cs_mode values to DIMM chip select sizes. The
2129 * cs_mode CS size (mb)
2130 * ======= ============
2143 * Basically, it calculates a value with which to shift the
2144 * smallest CS size of 32MB.
2146 * ddr[23]_cs_size have a similar purpose.
2148 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
2150 return 32 << (cs_mode - diff);
2153 WARN_ON(cs_mode > 6);
2154 return 32 << cs_mode;
2158 static int ddr3_cs_size(unsigned i, bool dct_width)
2163 if (i == 0 || i == 3 || i == 4)
2169 else if (!(i & 0x1))
2172 shift = (i + 1) >> 1;
2175 cs_size = (128 * (1 << !!dct_width)) << shift;
2180 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
2185 if (i < 4 || i == 6)
2189 else if (!(i & 0x1))
2192 shift = (i + 1) >> 1;
2195 cs_size = rank_multiply * (128 << shift);
2200 static int ddr4_cs_size(unsigned i)
2209 /* Min cs_size = 1G */
2210 cs_size = 1024 * (1 << (i >> 1));
2215 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2216 unsigned cs_mode, int cs_mask_nr)
2218 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
2220 WARN_ON(cs_mode > 11);
2222 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
2223 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
2225 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
2229 * F15h supports only 64bit DCT interfaces
2231 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2232 unsigned cs_mode, int cs_mask_nr)
2234 WARN_ON(cs_mode > 12);
2236 return ddr3_cs_size(cs_mode, false);
2239 /* F15h M60h supports DDR4 mapping as well.. */
2240 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2241 unsigned cs_mode, int cs_mask_nr)
2244 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
2246 WARN_ON(cs_mode > 12);
2248 if (pvt->dram_type == MEM_DDR4) {
2252 cs_size = ddr4_cs_size(cs_mode);
2253 } else if (pvt->dram_type == MEM_LRDDR3) {
2254 unsigned rank_multiply = dcsm & 0xf;
2256 if (rank_multiply == 3)
2258 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
2260 /* Minimum cs size is 512mb for F15hM60h*/
2264 cs_size = ddr3_cs_size(cs_mode, false);
2271 * F16h and F15h model 30h have only limited cs_modes.
2273 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2274 unsigned cs_mode, int cs_mask_nr)
2276 WARN_ON(cs_mode > 12);
2278 if (cs_mode == 6 || cs_mode == 8 ||
2279 cs_mode == 9 || cs_mode == 12)
2282 return ddr3_cs_size(cs_mode, false);
2285 static void read_dram_ctl_register(struct amd64_pvt *pvt)
2288 if (pvt->fam == 0xf)
2291 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
2292 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
2293 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
2295 edac_dbg(0, " DCTs operate in %s mode\n",
2296 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
2298 if (!dct_ganging_enabled(pvt))
2299 edac_dbg(0, " Address range split per DCT: %s\n",
2300 (dct_high_range_enabled(pvt) ? "yes" : "no"));
2302 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
2303 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
2304 (dct_memory_cleared(pvt) ? "yes" : "no"));
2306 edac_dbg(0, " channel interleave: %s, "
2307 "interleave bits selector: 0x%x\n",
2308 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
2309 dct_sel_interleave_addr(pvt));
2312 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
2316 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
2317 * 2.10.12 Memory Interleaving Modes).
2319 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2320 u8 intlv_en, int num_dcts_intlv,
2327 return (u8)(dct_sel);
2329 if (num_dcts_intlv == 2) {
2330 select = (sys_addr >> 8) & 0x3;
2331 channel = select ? 0x3 : 0;
2332 } else if (num_dcts_intlv == 4) {
2333 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2334 switch (intlv_addr) {
2336 channel = (sys_addr >> 8) & 0x3;
2339 channel = (sys_addr >> 9) & 0x3;
2347 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
2348 * Interleaving Modes.
2350 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2351 bool hi_range_sel, u8 intlv_en)
2353 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
2355 if (dct_ganging_enabled(pvt))
2359 return dct_sel_high;
2362 * see F2x110[DctSelIntLvAddr] - channel interleave mode
2364 if (dct_interleave_enabled(pvt)) {
2365 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2367 /* return DCT select function: 0=DCT0, 1=DCT1 */
2369 return sys_addr >> 6 & 1;
2371 if (intlv_addr & 0x2) {
2372 u8 shift = intlv_addr & 0x1 ? 9 : 6;
2373 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
2375 return ((sys_addr >> shift) & 1) ^ temp;
2378 if (intlv_addr & 0x4) {
2379 u8 shift = intlv_addr & 0x1 ? 9 : 8;
2381 return (sys_addr >> shift) & 1;
2384 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
2387 if (dct_high_range_enabled(pvt))
2388 return ~dct_sel_high & 1;
2393 /* Convert the sys_addr to the normalized DCT address */
2394 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
2395 u64 sys_addr, bool hi_rng,
2396 u32 dct_sel_base_addr)
2399 u64 dram_base = get_dram_base(pvt, range);
2400 u64 hole_off = f10_dhar_offset(pvt);
2401 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
2406 * base address of high range is below 4Gb
2407 * (bits [47:27] at [31:11])
2408 * DRAM address space on this DCT is hoisted above 4Gb &&
2411 * remove hole offset from sys_addr
2413 * remove high range offset from sys_addr
2415 if ((!(dct_sel_base_addr >> 16) ||
2416 dct_sel_base_addr < dhar_base(pvt)) &&
2418 (sys_addr >= BIT_64(32)))
2419 chan_off = hole_off;
2421 chan_off = dct_sel_base_off;
2425 * we have a valid hole &&
2430 * remove dram base to normalize to DCT address
2432 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
2433 chan_off = hole_off;
2435 chan_off = dram_base;
2438 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
2442 * checks if the csrow passed in is marked as SPARED, if so returns the new
2445 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
2449 if (online_spare_swap_done(pvt, dct) &&
2450 csrow == online_spare_bad_dramcs(pvt, dct)) {
2452 for_each_chip_select(tmp_cs, dct, pvt) {
2453 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
2463 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
2464 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
2467 * -EINVAL: NOT FOUND
2468 * 0..csrow = Chip-Select Row
2470 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
2472 struct mem_ctl_info *mci;
2473 struct amd64_pvt *pvt;
2474 u64 cs_base, cs_mask;
2475 int cs_found = -EINVAL;
2478 mci = edac_mc_find(nid);
2482 pvt = mci->pvt_info;
2484 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
2486 for_each_chip_select(csrow, dct, pvt) {
2487 if (!csrow_enabled(csrow, dct, pvt))
2490 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
2492 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
2493 csrow, cs_base, cs_mask);
2497 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
2498 (in_addr & cs_mask), (cs_base & cs_mask));
2500 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
2501 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
2505 cs_found = f10_process_possible_spare(pvt, dct, csrow);
2507 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
2515 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
2516 * swapped with a region located at the bottom of memory so that the GPU can use
2517 * the interleaved region and thus two channels.
2519 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
2521 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
2523 if (pvt->fam == 0x10) {
2524 /* only revC3 and revE have that feature */
2525 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
2529 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
2531 if (!(swap_reg & 0x1))
2534 swap_base = (swap_reg >> 3) & 0x7f;
2535 swap_limit = (swap_reg >> 11) & 0x7f;
2536 rgn_size = (swap_reg >> 20) & 0x7f;
2537 tmp_addr = sys_addr >> 27;
2539 if (!(sys_addr >> 34) &&
2540 (((tmp_addr >= swap_base) &&
2541 (tmp_addr <= swap_limit)) ||
2542 (tmp_addr < rgn_size)))
2543 return sys_addr ^ (u64)swap_base << 27;
2548 /* For a given @dram_range, check if @sys_addr falls within it. */
2549 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2550 u64 sys_addr, int *chan_sel)
2552 int cs_found = -EINVAL;
2556 bool high_range = false;
2558 u8 node_id = dram_dst_node(pvt, range);
2559 u8 intlv_en = dram_intlv_en(pvt, range);
2560 u32 intlv_sel = dram_intlv_sel(pvt, range);
2562 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2563 range, sys_addr, get_dram_limit(pvt, range));
2565 if (dhar_valid(pvt) &&
2566 dhar_base(pvt) <= sys_addr &&
2567 sys_addr < BIT_64(32)) {
2568 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2573 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
2576 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
2578 dct_sel_base = dct_sel_baseaddr(pvt);
2581 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
2582 * select between DCT0 and DCT1.
2584 if (dct_high_range_enabled(pvt) &&
2585 !dct_ganging_enabled(pvt) &&
2586 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
2589 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
2591 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
2592 high_range, dct_sel_base);
2594 /* Remove node interleaving, see F1x120 */
2596 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
2597 (chan_addr & 0xfff);
2599 /* remove channel interleave */
2600 if (dct_interleave_enabled(pvt) &&
2601 !dct_high_range_enabled(pvt) &&
2602 !dct_ganging_enabled(pvt)) {
2604 if (dct_sel_interleave_addr(pvt) != 1) {
2605 if (dct_sel_interleave_addr(pvt) == 0x3)
2607 chan_addr = ((chan_addr >> 10) << 9) |
2608 (chan_addr & 0x1ff);
2610 /* A[6] or hash 6 */
2611 chan_addr = ((chan_addr >> 7) << 6) |
2615 chan_addr = ((chan_addr >> 13) << 12) |
2616 (chan_addr & 0xfff);
2619 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2621 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
2624 *chan_sel = channel;
2629 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2630 u64 sys_addr, int *chan_sel)
2632 int cs_found = -EINVAL;
2633 int num_dcts_intlv = 0;
2634 u64 chan_addr, chan_offset;
2635 u64 dct_base, dct_limit;
2636 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
2637 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
2639 u64 dhar_offset = f10_dhar_offset(pvt);
2640 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2641 u8 node_id = dram_dst_node(pvt, range);
2642 u8 intlv_en = dram_intlv_en(pvt, range);
2644 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2645 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2647 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2648 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
2650 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2651 range, sys_addr, get_dram_limit(pvt, range));
2653 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2654 !(get_dram_limit(pvt, range) >= sys_addr))
2657 if (dhar_valid(pvt) &&
2658 dhar_base(pvt) <= sys_addr &&
2659 sys_addr < BIT_64(32)) {
2660 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2665 /* Verify sys_addr is within DCT Range. */
2666 dct_base = (u64) dct_sel_baseaddr(pvt);
2667 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2669 if (!(dct_cont_base_reg & BIT(0)) &&
2670 !(dct_base <= (sys_addr >> 27) &&
2671 dct_limit >= (sys_addr >> 27)))
2674 /* Verify number of dct's that participate in channel interleaving. */
2675 num_dcts_intlv = (int) hweight8(intlv_en);
2677 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2680 if (pvt->model >= 0x60)
2681 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2683 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2684 num_dcts_intlv, dct_sel);
2686 /* Verify we stay within the MAX number of channels allowed */
2690 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2692 /* Get normalized DCT addr */
2693 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2694 chan_offset = dhar_offset;
2696 chan_offset = dct_base << 27;
2698 chan_addr = sys_addr - chan_offset;
2700 /* remove channel interleave */
2701 if (num_dcts_intlv == 2) {
2702 if (intlv_addr == 0x4)
2703 chan_addr = ((chan_addr >> 9) << 8) |
2705 else if (intlv_addr == 0x5)
2706 chan_addr = ((chan_addr >> 10) << 9) |
2707 (chan_addr & 0x1ff);
2711 } else if (num_dcts_intlv == 4) {
2712 if (intlv_addr == 0x4)
2713 chan_addr = ((chan_addr >> 10) << 8) |
2715 else if (intlv_addr == 0x5)
2716 chan_addr = ((chan_addr >> 11) << 9) |
2717 (chan_addr & 0x1ff);
2722 if (dct_offset_en) {
2723 amd64_read_pci_cfg(pvt->F1,
2724 DRAM_CONT_HIGH_OFF + (int) channel * 4,
2726 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
2729 f15h_select_dct(pvt, channel);
2731 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2735 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2736 * there is support for 4 DCT's, but only 2 are currently functional.
2737 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2738 * pvt->csels[1]. So we need to use '1' here to get correct info.
2739 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2741 alias_channel = (channel == 3) ? 1 : channel;
2743 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2746 *chan_sel = alias_channel;
2751 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2755 int cs_found = -EINVAL;
2758 for (range = 0; range < DRAM_RANGES; range++) {
2759 if (!dram_rw(pvt, range))
2762 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2763 cs_found = f15_m30h_match_to_this_node(pvt, range,
2767 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2768 (get_dram_limit(pvt, range) >= sys_addr)) {
2769 cs_found = f1x_match_to_this_node(pvt, range,
2770 sys_addr, chan_sel);
2779 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2780 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2782 * The @sys_addr is usually an error address received from the hardware
2785 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2786 struct err_info *err)
2788 struct amd64_pvt *pvt = mci->pvt_info;
2790 error_address_to_page_and_offset(sys_addr, err);
2792 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2793 if (err->csrow < 0) {
2794 err->err_code = ERR_CSROW;
2799 * We need the syndromes for channel detection only when we're
2800 * ganged. Otherwise @chan should already contain the channel at
2803 if (dct_ganging_enabled(pvt))
2804 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2808 * These are tables of eigenvectors (one per line) which can be used for the
2809 * construction of the syndrome tables. The modified syndrome search algorithm
2810 * uses those to find the symbol in error and thus the DIMM.
2812 * Algorithm courtesy of Ross LaFetra from AMD.
2814 static const u16 x4_vectors[] = {
2815 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2816 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2817 0x0001, 0x0002, 0x0004, 0x0008,
2818 0x1013, 0x3032, 0x4044, 0x8088,
2819 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2820 0x4857, 0xc4fe, 0x13cc, 0x3288,
2821 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2822 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2823 0x15c1, 0x2a42, 0x89ac, 0x4758,
2824 0x2b03, 0x1602, 0x4f0c, 0xca08,
2825 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2826 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2827 0x2b87, 0x164e, 0x642c, 0xdc18,
2828 0x40b9, 0x80de, 0x1094, 0x20e8,
2829 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2830 0x11c1, 0x2242, 0x84ac, 0x4c58,
2831 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2832 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2833 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2834 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2835 0x16b3, 0x3d62, 0x4f34, 0x8518,
2836 0x1e2f, 0x391a, 0x5cac, 0xf858,
2837 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2838 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2839 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2840 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2841 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2842 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2843 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2844 0x185d, 0x2ca6, 0x7914, 0x9e28,
2845 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2846 0x4199, 0x82ee, 0x19f4, 0x2e58,
2847 0x4807, 0xc40e, 0x130c, 0x3208,
2848 0x1905, 0x2e0a, 0x5804, 0xac08,
2849 0x213f, 0x132a, 0xadfc, 0x5ba8,
2850 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2853 static const u16 x8_vectors[] = {
2854 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2855 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2856 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2857 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2858 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2859 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2860 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2861 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2862 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2863 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2864 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2865 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2866 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2867 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2868 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2869 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2870 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2871 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2872 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2875 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2878 unsigned int i, err_sym;
2880 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2882 unsigned v_idx = err_sym * v_dim;
2883 unsigned v_end = (err_sym + 1) * v_dim;
2885 /* walk over all 16 bits of the syndrome */
2886 for (i = 1; i < (1U << 16); i <<= 1) {
2888 /* if bit is set in that eigenvector... */
2889 if (v_idx < v_end && vectors[v_idx] & i) {
2890 u16 ev_comp = vectors[v_idx++];
2892 /* ... and bit set in the modified syndrome, */
2902 /* can't get to zero, move to next symbol */
2907 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2911 static int map_err_sym_to_channel(int err_sym, int sym_size)
2922 return err_sym >> 4;
2927 /* imaginary bits not in a DIMM */
2929 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2937 return err_sym >> 3;
2942 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2944 struct amd64_pvt *pvt = mci->pvt_info;
2947 if (pvt->ecc_sym_sz == 8)
2948 err_sym = decode_syndrome(syndrome, x8_vectors,
2949 ARRAY_SIZE(x8_vectors),
2951 else if (pvt->ecc_sym_sz == 4)
2952 err_sym = decode_syndrome(syndrome, x4_vectors,
2953 ARRAY_SIZE(x4_vectors),
2956 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2960 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2963 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2966 enum hw_event_mc_err_type err_type;
2970 err_type = HW_EVENT_ERR_CORRECTED;
2971 else if (ecc_type == 1)
2972 err_type = HW_EVENT_ERR_UNCORRECTED;
2973 else if (ecc_type == 3)
2974 err_type = HW_EVENT_ERR_DEFERRED;
2976 WARN(1, "Something is rotten in the state of Denmark.\n");
2980 switch (err->err_code) {
2985 string = "Failed to map error addr to a node";
2988 string = "Failed to map error addr to a csrow";
2991 string = "Unknown syndrome - possible error reporting race";
2994 string = "MCA_SYND not valid - unknown syndrome and csrow";
2997 string = "Cannot decode normalized address";
3000 string = "WTF error";
3004 edac_mc_handle_error(err_type, mci, 1,
3005 err->page, err->offset, err->syndrome,
3006 err->csrow, err->channel, -1,
3010 static inline void decode_bus_error(int node_id, struct mce *m)
3012 struct mem_ctl_info *mci;
3013 struct amd64_pvt *pvt;
3014 u8 ecc_type = (m->status >> 45) & 0x3;
3015 u8 xec = XEC(m->status, 0x1f);
3016 u16 ec = EC(m->status);
3018 struct err_info err;
3020 mci = edac_mc_find(node_id);
3024 pvt = mci->pvt_info;
3026 /* Bail out early if this was an 'observed' error */
3027 if (PP(ec) == NBSL_PP_OBS)
3030 /* Do only ECC errors */
3031 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
3034 memset(&err, 0, sizeof(err));
3036 sys_addr = get_error_address(pvt, m);
3039 err.syndrome = extract_syndrome(m->status);
3041 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
3043 __log_ecc_error(mci, &err, ecc_type);
3047 * To find the UMC channel represented by this bank we need to match on its
3048 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
3051 * Currently, we can derive the channel number by looking at the 6th nibble in
3052 * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
3055 * For DRAM ECC errors, the Chip Select number is given in bits [2:0] of
3056 * the MCA_SYND[ErrorInformation] field.
3058 static void umc_get_err_info(struct mce *m, struct err_info *err)
3060 err->channel = (m->ipid & GENMASK(31, 0)) >> 20;
3061 err->csrow = m->synd & 0x7;
3064 static void decode_umc_error(int node_id, struct mce *m)
3066 u8 ecc_type = (m->status >> 45) & 0x3;
3067 struct mem_ctl_info *mci;
3068 struct amd64_pvt *pvt;
3069 struct err_info err;
3072 node_id = fixup_node_id(node_id, m);
3074 mci = edac_mc_find(node_id);
3078 pvt = mci->pvt_info;
3080 memset(&err, 0, sizeof(err));
3082 if (m->status & MCI_STATUS_DEFERRED)
3085 if (!(m->status & MCI_STATUS_SYNDV)) {
3086 err.err_code = ERR_SYND;
3090 if (ecc_type == 2) {
3091 u8 length = (m->synd >> 18) & 0x3f;
3094 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
3096 err.err_code = ERR_CHANNEL;
3099 pvt->ops->get_err_info(m, &err);
3101 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
3102 err.err_code = ERR_NORM_ADDR;
3106 error_address_to_page_and_offset(sys_addr, &err);
3109 __log_ecc_error(mci, &err, ecc_type);
3113 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
3114 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
3117 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
3119 /* Reserve the ADDRESS MAP Device */
3120 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
3122 edac_dbg(1, "F1 not found: device 0x%x\n", pci_id1);
3126 /* Reserve the DCT Device */
3127 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
3129 pci_dev_put(pvt->F1);
3132 edac_dbg(1, "F2 not found: device 0x%x\n", pci_id2);
3137 pci_ctl_dev = &pvt->F2->dev;
3139 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
3140 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
3141 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
3146 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
3148 pvt->ecc_sym_sz = 4;
3150 if (pvt->fam >= 0x10) {
3153 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
3154 /* F16h has only DCT0, so no need to read dbam1. */
3155 if (pvt->fam != 0x16)
3156 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
3158 /* F10h, revD and later can do x8 ECC too. */
3159 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
3160 pvt->ecc_sym_sz = 8;
3165 * Retrieve the hardware registers of the memory controller.
3167 static void umc_read_mc_regs(struct amd64_pvt *pvt)
3169 u8 nid = pvt->mc_node_id;
3170 struct amd64_umc *umc;
3173 /* Read registers from each UMC */
3176 umc_base = get_umc_base(i);
3179 amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg);
3180 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
3181 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
3182 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
3183 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
3188 * Retrieve the hardware registers of the memory controller (this includes the
3189 * 'Address Map' and 'Misc' device regs)
3191 static void dct_read_mc_regs(struct amd64_pvt *pvt)
3197 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
3198 * those are Read-As-Zero.
3200 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
3201 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
3203 /* Check first whether TOP_MEM2 is enabled: */
3204 rdmsrl(MSR_AMD64_SYSCFG, msr_val);
3205 if (msr_val & BIT(21)) {
3206 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
3207 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
3209 edac_dbg(0, " TOP_MEM2 disabled\n");
3212 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
3214 read_dram_ctl_register(pvt);
3216 for (range = 0; range < DRAM_RANGES; range++) {
3219 /* read settings for this DRAM range */
3220 read_dram_base_limit_regs(pvt, range);
3222 rw = dram_rw(pvt, range);
3226 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
3228 get_dram_base(pvt, range),
3229 get_dram_limit(pvt, range));
3231 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
3232 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
3233 (rw & 0x1) ? "R" : "-",
3234 (rw & 0x2) ? "W" : "-",
3235 dram_intlv_sel(pvt, range),
3236 dram_dst_node(pvt, range));
3239 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
3240 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
3242 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
3244 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
3245 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
3247 if (!dct_ganging_enabled(pvt)) {
3248 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
3249 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
3252 determine_ecc_sym_sz(pvt);
3256 * NOTE: CPU Revision Dependent code
3259 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
3260 * k8 private pointer to -->
3261 * DRAM Bank Address mapping register
3263 * DCL register where dual_channel_active is
3265 * The DBAM register consists of 4 sets of 4 bits each definitions:
3268 * 0-3 CSROWs 0 and 1
3269 * 4-7 CSROWs 2 and 3
3270 * 8-11 CSROWs 4 and 5
3271 * 12-15 CSROWs 6 and 7
3273 * Values range from: 0 to 15
3274 * The meaning of the values depends on CPU revision and dual-channel state,
3275 * see relevant BKDG more info.
3277 * The memory controller provides for total of only 8 CSROWs in its current
3278 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
3279 * single channel or two (2) DIMMs in dual channel mode.
3281 * The following code logic collapses the various tables for CSROW based on CPU
3285 * The number of PAGE_SIZE pages on the specified CSROW number it
3289 static u32 dct_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
3291 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
3292 u32 cs_mode, nr_pages;
3295 cs_mode = DBAM_DIMM(csrow_nr, dbam);
3297 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
3298 nr_pages <<= 20 - PAGE_SHIFT;
3300 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
3301 csrow_nr, dct, cs_mode);
3302 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3307 static u32 umc_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
3309 int csrow_nr = csrow_nr_orig;
3310 u32 cs_mode, nr_pages;
3312 cs_mode = umc_get_cs_mode(csrow_nr >> 1, dct, pvt);
3314 nr_pages = umc_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
3315 nr_pages <<= 20 - PAGE_SHIFT;
3317 edac_dbg(0, "csrow: %d, channel: %d, cs_mode %d\n",
3318 csrow_nr_orig, dct, cs_mode);
3319 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3324 static void umc_init_csrows(struct mem_ctl_info *mci)
3326 struct amd64_pvt *pvt = mci->pvt_info;
3327 enum edac_type edac_mode = EDAC_NONE;
3328 enum dev_type dev_type = DEV_UNKNOWN;
3329 struct dimm_info *dimm;
3332 if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
3333 edac_mode = EDAC_S16ECD16ED;
3335 } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
3336 edac_mode = EDAC_S8ECD8ED;
3338 } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
3339 edac_mode = EDAC_S4ECD4ED;
3341 } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
3342 edac_mode = EDAC_SECDED;
3346 for_each_chip_select(cs, umc, pvt) {
3347 if (!csrow_enabled(cs, umc, pvt))
3350 dimm = mci->csrows[cs]->channels[umc]->dimm;
3352 edac_dbg(1, "MC node: %d, csrow: %d\n",
3353 pvt->mc_node_id, cs);
3355 dimm->nr_pages = umc_get_csrow_nr_pages(pvt, umc, cs);
3356 dimm->mtype = pvt->umc[umc].dram_type;
3357 dimm->edac_mode = edac_mode;
3358 dimm->dtype = dev_type;
3365 * Initialize the array of csrow attribute instances, based on the values
3366 * from pci config hardware registers.
3368 static void dct_init_csrows(struct mem_ctl_info *mci)
3370 struct amd64_pvt *pvt = mci->pvt_info;
3371 enum edac_type edac_mode = EDAC_NONE;
3372 struct csrow_info *csrow;
3373 struct dimm_info *dimm;
3378 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
3382 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
3383 pvt->mc_node_id, val,
3384 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
3387 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
3389 for_each_chip_select(i, 0, pvt) {
3390 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
3391 bool row_dct1 = false;
3393 if (pvt->fam != 0xf)
3394 row_dct1 = !!csrow_enabled(i, 1, pvt);
3396 if (!row_dct0 && !row_dct1)
3399 csrow = mci->csrows[i];
3401 edac_dbg(1, "MC node: %d, csrow: %d\n",
3402 pvt->mc_node_id, i);
3405 nr_pages = dct_get_csrow_nr_pages(pvt, 0, i);
3406 csrow->channels[0]->dimm->nr_pages = nr_pages;
3409 /* K8 has only one DCT */
3410 if (pvt->fam != 0xf && row_dct1) {
3411 int row_dct1_pages = dct_get_csrow_nr_pages(pvt, 1, i);
3413 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3414 nr_pages += row_dct1_pages;
3417 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3419 /* Determine DIMM ECC mode: */
3420 if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3421 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3426 for (j = 0; j < pvt->max_mcs; j++) {
3427 dimm = csrow->channels[j]->dimm;
3428 dimm->mtype = pvt->dram_type;
3429 dimm->edac_mode = edac_mode;
3435 /* get all cores on this DCT */
3436 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3440 for_each_online_cpu(cpu)
3441 if (topology_die_id(cpu) == nid)
3442 cpumask_set_cpu(cpu, mask);
3445 /* check MCG_CTL on all the cpus on this node */
3446 static bool nb_mce_bank_enabled_on_node(u16 nid)
3452 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3453 amd64_warn("%s: Error allocating mask\n", __func__);
3457 get_cpus_on_this_dct_cpumask(mask, nid);
3459 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3461 for_each_cpu(cpu, mask) {
3462 struct msr *reg = per_cpu_ptr(msrs, cpu);
3463 nbe = reg->l & MSR_MCGCTL_NBE;
3465 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3467 (nbe ? "enabled" : "disabled"));
3475 free_cpumask_var(mask);
3479 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3481 cpumask_var_t cmask;
3484 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3485 amd64_warn("%s: error allocating mask\n", __func__);
3489 get_cpus_on_this_dct_cpumask(cmask, nid);
3491 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3493 for_each_cpu(cpu, cmask) {
3495 struct msr *reg = per_cpu_ptr(msrs, cpu);
3498 if (reg->l & MSR_MCGCTL_NBE)
3499 s->flags.nb_mce_enable = 1;
3501 reg->l |= MSR_MCGCTL_NBE;
3504 * Turn off NB MCE reporting only when it was off before
3506 if (!s->flags.nb_mce_enable)
3507 reg->l &= ~MSR_MCGCTL_NBE;
3510 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3512 free_cpumask_var(cmask);
3517 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3521 u32 value, mask = 0x3; /* UECC/CECC enable */
3523 if (toggle_ecc_err_reporting(s, nid, ON)) {
3524 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3528 amd64_read_pci_cfg(F3, NBCTL, &value);
3530 s->old_nbctl = value & mask;
3531 s->nbctl_valid = true;
3534 amd64_write_pci_cfg(F3, NBCTL, value);
3536 amd64_read_pci_cfg(F3, NBCFG, &value);
3538 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3539 nid, value, !!(value & NBCFG_ECC_ENABLE));
3541 if (!(value & NBCFG_ECC_ENABLE)) {
3542 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3544 s->flags.nb_ecc_prev = 0;
3546 /* Attempt to turn on DRAM ECC Enable */
3547 value |= NBCFG_ECC_ENABLE;
3548 amd64_write_pci_cfg(F3, NBCFG, value);
3550 amd64_read_pci_cfg(F3, NBCFG, &value);
3552 if (!(value & NBCFG_ECC_ENABLE)) {
3553 amd64_warn("Hardware rejected DRAM ECC enable,"
3554 "check memory DIMM configuration.\n");
3557 amd64_info("Hardware accepted DRAM ECC Enable\n");
3560 s->flags.nb_ecc_prev = 1;
3563 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3564 nid, value, !!(value & NBCFG_ECC_ENABLE));
3569 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3572 u32 value, mask = 0x3; /* UECC/CECC enable */
3574 if (!s->nbctl_valid)
3577 amd64_read_pci_cfg(F3, NBCTL, &value);
3579 value |= s->old_nbctl;
3581 amd64_write_pci_cfg(F3, NBCTL, value);
3583 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3584 if (!s->flags.nb_ecc_prev) {
3585 amd64_read_pci_cfg(F3, NBCFG, &value);
3586 value &= ~NBCFG_ECC_ENABLE;
3587 amd64_write_pci_cfg(F3, NBCFG, value);
3590 /* restore the NB Enable MCGCTL bit */
3591 if (toggle_ecc_err_reporting(s, nid, OFF))
3592 amd64_warn("Error restoring NB MCGCTL settings!\n");
3595 static bool dct_ecc_enabled(struct amd64_pvt *pvt)
3597 u16 nid = pvt->mc_node_id;
3598 bool nb_mce_en = false;
3602 amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3604 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3606 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3608 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3609 MSR_IA32_MCG_CTL, nid);
3611 edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
3613 if (!ecc_en || !nb_mce_en)
3619 static bool umc_ecc_enabled(struct amd64_pvt *pvt)
3621 u8 umc_en_mask = 0, ecc_en_mask = 0;
3622 u16 nid = pvt->mc_node_id;
3623 struct amd64_umc *umc;
3629 /* Only check enabled UMCs. */
3630 if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3633 umc_en_mask |= BIT(i);
3635 if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3636 ecc_en_mask |= BIT(i);
3639 /* Check whether at least one UMC is enabled: */
3641 ecc_en = umc_en_mask == ecc_en_mask;
3643 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3645 edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
3654 umc_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3656 u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3659 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3660 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3661 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3663 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3664 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3668 /* Set chipkill only if ECC is enabled: */
3670 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3676 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3678 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3680 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3684 static void dct_setup_mci_misc_attrs(struct mem_ctl_info *mci)
3686 struct amd64_pvt *pvt = mci->pvt_info;
3688 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3689 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3691 if (pvt->nbcap & NBCAP_SECDED)
3692 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3694 if (pvt->nbcap & NBCAP_CHIPKILL)
3695 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3697 mci->edac_cap = dct_determine_edac_cap(pvt);
3698 mci->mod_name = EDAC_MOD_STR;
3699 mci->ctl_name = pvt->ctl_name;
3700 mci->dev_name = pci_name(pvt->F3);
3701 mci->ctl_page_to_phys = NULL;
3703 /* memory scrubber interface */
3704 mci->set_sdram_scrub_rate = set_scrub_rate;
3705 mci->get_sdram_scrub_rate = get_scrub_rate;
3707 dct_init_csrows(mci);
3710 static void umc_setup_mci_misc_attrs(struct mem_ctl_info *mci)
3712 struct amd64_pvt *pvt = mci->pvt_info;
3714 mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
3715 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3717 umc_determine_edac_ctl_cap(mci, pvt);
3719 mci->edac_cap = umc_determine_edac_cap(pvt);
3720 mci->mod_name = EDAC_MOD_STR;
3721 mci->ctl_name = pvt->ctl_name;
3722 mci->dev_name = pci_name(pvt->F3);
3723 mci->ctl_page_to_phys = NULL;
3725 umc_init_csrows(mci);
3728 static int dct_hw_info_get(struct amd64_pvt *pvt)
3730 int ret = reserve_mc_sibling_devs(pvt, pvt->f1_id, pvt->f2_id);
3735 dct_prep_chip_selects(pvt);
3736 dct_read_base_mask(pvt);
3737 dct_read_mc_regs(pvt);
3738 dct_determine_memory_type(pvt);
3743 static int umc_hw_info_get(struct amd64_pvt *pvt)
3745 pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3749 umc_prep_chip_selects(pvt);
3750 umc_read_base_mask(pvt);
3751 umc_read_mc_regs(pvt);
3752 umc_determine_memory_type(pvt);
3758 * The CPUs have one channel per UMC, so UMC number is equivalent to a
3759 * channel number. The GPUs have 8 channels per UMC, so the UMC number no
3760 * longer works as a channel number.
3762 * The channel number within a GPU UMC is given in MCA_IPID[15:12].
3763 * However, the IDs are split such that two UMC values go to one UMC, and
3764 * the channel numbers are split in two groups of four.
3766 * Refer to comment on gpu_get_umc_base().
3769 * UMC0 CH[3:0] = 0x0005[3:0]000
3770 * UMC0 CH[7:4] = 0x0015[3:0]000
3771 * UMC1 CH[3:0] = 0x0025[3:0]000
3772 * UMC1 CH[7:4] = 0x0035[3:0]000
3774 static void gpu_get_err_info(struct mce *m, struct err_info *err)
3776 u8 ch = (m->ipid & GENMASK(31, 0)) >> 20;
3777 u8 phy = ((m->ipid >> 12) & 0xf);
3779 err->channel = ch % 2 ? phy + 4 : phy;
3783 static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
3784 unsigned int cs_mode, int csrow_nr)
3786 u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
3788 return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1);
3791 static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
3793 int size, cs_mode, cs = 0;
3795 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
3797 cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY;
3799 for_each_chip_select(cs, ctrl, pvt) {
3800 size = gpu_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs);
3801 amd64_info(EDAC_MC ": %d: %5dMB\n", cs, size);
3805 static void gpu_dump_misc_regs(struct amd64_pvt *pvt)
3807 struct amd64_umc *umc;
3813 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
3814 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
3815 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
3816 edac_dbg(1, "UMC%d All HBMs support ECC: yes\n", i);
3818 gpu_debug_display_dimm_sizes(pvt, i);
3822 static u32 gpu_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
3825 int cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY;
3827 nr_pages = gpu_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
3828 nr_pages <<= 20 - PAGE_SHIFT;
3830 edac_dbg(0, "csrow: %d, channel: %d\n", csrow_nr, dct);
3831 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3836 static void gpu_init_csrows(struct mem_ctl_info *mci)
3838 struct amd64_pvt *pvt = mci->pvt_info;
3839 struct dimm_info *dimm;
3843 for_each_chip_select(cs, umc, pvt) {
3844 if (!csrow_enabled(cs, umc, pvt))
3847 dimm = mci->csrows[umc]->channels[cs]->dimm;
3849 edac_dbg(1, "MC node: %d, csrow: %d\n",
3850 pvt->mc_node_id, cs);
3852 dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs);
3853 dimm->edac_mode = EDAC_SECDED;
3854 dimm->mtype = MEM_HBM2;
3855 dimm->dtype = DEV_X16;
3861 static void gpu_setup_mci_misc_attrs(struct mem_ctl_info *mci)
3863 struct amd64_pvt *pvt = mci->pvt_info;
3865 mci->mtype_cap = MEM_FLAG_HBM2;
3866 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
3868 mci->edac_cap = EDAC_FLAG_EC;
3869 mci->mod_name = EDAC_MOD_STR;
3870 mci->ctl_name = pvt->ctl_name;
3871 mci->dev_name = pci_name(pvt->F3);
3872 mci->ctl_page_to_phys = NULL;
3874 gpu_init_csrows(mci);
3877 /* ECC is enabled by default on GPU nodes */
3878 static bool gpu_ecc_enabled(struct amd64_pvt *pvt)
3883 static inline u32 gpu_get_umc_base(u8 umc, u8 channel)
3886 * On CPUs, there is one channel per UMC, so UMC numbering equals
3887 * channel numbering. On GPUs, there are eight channels per UMC,
3888 * so the channel numbering is different from UMC numbering.
3890 * On CPU nodes channels are selected in 6th nibble
3891 * UMC chY[3:0]= [(chY*2 + 1) : (chY*2)]50000;
3893 * On GPU nodes channels are selected in 3rd nibble
3894 * HBM chX[3:0]= [Y ]5X[3:0]000;
3895 * HBM chX[7:4]= [Y+1]5X[3:0]000
3902 return 0x50000 + (umc << 20) + ((channel % 4) << 12);
3905 static void gpu_read_mc_regs(struct amd64_pvt *pvt)
3907 u8 nid = pvt->mc_node_id;
3908 struct amd64_umc *umc;
3911 /* Read registers from each UMC */
3913 umc_base = gpu_get_umc_base(i, 0);
3916 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
3917 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
3918 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
3922 static void gpu_read_base_mask(struct amd64_pvt *pvt)
3924 u32 base_reg, mask_reg;
3929 for_each_chip_select(cs, umc, pvt) {
3930 base_reg = gpu_get_umc_base(umc, cs) + UMCCH_BASE_ADDR;
3931 base = &pvt->csels[umc].csbases[cs];
3933 if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) {
3934 edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
3935 umc, cs, *base, base_reg);
3938 mask_reg = gpu_get_umc_base(umc, cs) + UMCCH_ADDR_MASK;
3939 mask = &pvt->csels[umc].csmasks[cs];
3941 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) {
3942 edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
3943 umc, cs, *mask, mask_reg);
3949 static void gpu_prep_chip_selects(struct amd64_pvt *pvt)
3954 pvt->csels[umc].b_cnt = 8;
3955 pvt->csels[umc].m_cnt = 8;
3959 static int gpu_hw_info_get(struct amd64_pvt *pvt)
3963 ret = gpu_get_node_map();
3967 pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3971 gpu_prep_chip_selects(pvt);
3972 gpu_read_base_mask(pvt);
3973 gpu_read_mc_regs(pvt);
3978 static void hw_info_put(struct amd64_pvt *pvt)
3980 pci_dev_put(pvt->F1);
3981 pci_dev_put(pvt->F2);
3985 static struct low_ops umc_ops = {
3986 .hw_info_get = umc_hw_info_get,
3987 .ecc_enabled = umc_ecc_enabled,
3988 .setup_mci_misc_attrs = umc_setup_mci_misc_attrs,
3989 .dump_misc_regs = umc_dump_misc_regs,
3990 .get_err_info = umc_get_err_info,
3993 static struct low_ops gpu_ops = {
3994 .hw_info_get = gpu_hw_info_get,
3995 .ecc_enabled = gpu_ecc_enabled,
3996 .setup_mci_misc_attrs = gpu_setup_mci_misc_attrs,
3997 .dump_misc_regs = gpu_dump_misc_regs,
3998 .get_err_info = gpu_get_err_info,
4001 /* Use Family 16h versions for defaults and adjust as needed below. */
4002 static struct low_ops dct_ops = {
4003 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
4004 .dbam_to_cs = f16_dbam_to_chip_select,
4005 .hw_info_get = dct_hw_info_get,
4006 .ecc_enabled = dct_ecc_enabled,
4007 .setup_mci_misc_attrs = dct_setup_mci_misc_attrs,
4008 .dump_misc_regs = dct_dump_misc_regs,
4011 static int per_family_init(struct amd64_pvt *pvt)
4013 pvt->ext_model = boot_cpu_data.x86_model >> 4;
4014 pvt->stepping = boot_cpu_data.x86_stepping;
4015 pvt->model = boot_cpu_data.x86_model;
4016 pvt->fam = boot_cpu_data.x86;
4020 * Decide on which ops group to use here and do any family/model
4023 if (pvt->fam >= 0x17)
4024 pvt->ops = &umc_ops;
4026 pvt->ops = &dct_ops;
4030 pvt->ctl_name = (pvt->ext_model >= K8_REV_F) ?
4031 "K8 revF or later" : "K8 revE or earlier";
4032 pvt->f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP;
4033 pvt->f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL;
4034 pvt->ops->map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow;
4035 pvt->ops->dbam_to_cs = k8_dbam_to_chip_select;
4039 pvt->ctl_name = "F10h";
4040 pvt->f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP;
4041 pvt->f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM;
4042 pvt->ops->dbam_to_cs = f10_dbam_to_chip_select;
4046 switch (pvt->model) {
4048 pvt->ctl_name = "F15h_M30h";
4049 pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
4050 pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2;
4053 pvt->ctl_name = "F15h_M60h";
4054 pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
4055 pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2;
4056 pvt->ops->dbam_to_cs = f15_m60h_dbam_to_chip_select;
4059 /* Richland is only client */
4062 pvt->ctl_name = "F15h";
4063 pvt->f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1;
4064 pvt->f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2;
4065 pvt->ops->dbam_to_cs = f15_dbam_to_chip_select;
4071 switch (pvt->model) {
4073 pvt->ctl_name = "F16h_M30h";
4074 pvt->f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1;
4075 pvt->f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2;
4078 pvt->ctl_name = "F16h";
4079 pvt->f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1;
4080 pvt->f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2;
4086 switch (pvt->model) {
4088 pvt->ctl_name = "F17h_M10h";
4091 pvt->ctl_name = "F17h_M30h";
4095 pvt->ctl_name = "F17h_M60h";
4098 pvt->ctl_name = "F17h_M70h";
4101 pvt->ctl_name = "F17h";
4107 pvt->ctl_name = "F18h";
4111 switch (pvt->model) {
4113 pvt->ctl_name = "F19h";
4117 pvt->ctl_name = "F19h_M10h";
4119 pvt->flags.zn_regs_v2 = 1;
4122 pvt->ctl_name = "F19h_M20h";
4125 if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) {
4126 pvt->ctl_name = "MI200";
4128 pvt->ops = &gpu_ops;
4130 pvt->ctl_name = "F19h_M30h";
4135 pvt->ctl_name = "F19h_M50h";
4138 pvt->ctl_name = "F19h_M60h";
4139 pvt->flags.zn_regs_v2 = 1;
4142 pvt->ctl_name = "F19h_M70h";
4143 pvt->flags.zn_regs_v2 = 1;
4146 pvt->ctl_name = "F19h_MA0h";
4148 pvt->flags.zn_regs_v2 = 1;
4154 switch (pvt->model) {
4156 pvt->ctl_name = "F1Ah";
4158 pvt->flags.zn_regs_v2 = 1;
4161 pvt->ctl_name = "F1Ah_M40h";
4162 pvt->flags.zn_regs_v2 = 1;
4168 amd64_err("Unsupported family!\n");
4175 static const struct attribute_group *amd64_edac_attr_groups[] = {
4176 #ifdef CONFIG_EDAC_DEBUG
4183 static int init_one_instance(struct amd64_pvt *pvt)
4185 struct mem_ctl_info *mci = NULL;
4186 struct edac_mc_layer layers[2];
4190 * For Heterogeneous family EDAC CHIP_SELECT and CHANNEL layers should
4191 * be swapped to fit into the layers.
4193 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
4194 layers[0].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ?
4195 pvt->max_mcs : pvt->csels[0].b_cnt;
4196 layers[0].is_virt_csrow = true;
4197 layers[1].type = EDAC_MC_LAYER_CHANNEL;
4198 layers[1].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ?
4199 pvt->csels[0].b_cnt : pvt->max_mcs;
4200 layers[1].is_virt_csrow = false;
4202 mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
4206 mci->pvt_info = pvt;
4207 mci->pdev = &pvt->F3->dev;
4209 pvt->ops->setup_mci_misc_attrs(mci);
4212 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
4213 edac_dbg(1, "failed edac_mc_add_mc()\n");
4221 static bool instance_has_memory(struct amd64_pvt *pvt)
4223 bool cs_enabled = false;
4224 int cs = 0, dct = 0;
4226 for (dct = 0; dct < pvt->max_mcs; dct++) {
4227 for_each_chip_select(cs, dct, pvt)
4228 cs_enabled |= csrow_enabled(cs, dct, pvt);
4234 static int probe_one_instance(unsigned int nid)
4236 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
4237 struct amd64_pvt *pvt = NULL;
4238 struct ecc_settings *s;
4242 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
4248 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
4252 pvt->mc_node_id = nid;
4255 ret = per_family_init(pvt);
4259 ret = pvt->ops->hw_info_get(pvt);
4264 if (!instance_has_memory(pvt)) {
4265 amd64_info("Node %d: No DIMMs detected.\n", nid);
4269 if (!pvt->ops->ecc_enabled(pvt)) {
4272 if (!ecc_enable_override)
4275 if (boot_cpu_data.x86 >= 0x17) {
4276 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
4279 amd64_warn("Forcing ECC on!\n");
4281 if (!enable_ecc_error_reporting(s, nid, F3))
4285 ret = init_one_instance(pvt);
4287 amd64_err("Error probing instance: %d\n", nid);
4289 if (boot_cpu_data.x86 < 0x17)
4290 restore_ecc_error_reporting(s, nid, F3);
4295 amd64_info("%s detected (node %d).\n", pvt->ctl_name, pvt->mc_node_id);
4297 /* Display and decode various registers for debug purposes. */
4298 pvt->ops->dump_misc_regs(pvt);
4308 ecc_stngs[nid] = NULL;
4314 static void remove_one_instance(unsigned int nid)
4316 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
4317 struct ecc_settings *s = ecc_stngs[nid];
4318 struct mem_ctl_info *mci;
4319 struct amd64_pvt *pvt;
4321 /* Remove from EDAC CORE tracking list */
4322 mci = edac_mc_del_mc(&F3->dev);
4326 pvt = mci->pvt_info;
4328 restore_ecc_error_reporting(s, nid, F3);
4330 kfree(ecc_stngs[nid]);
4331 ecc_stngs[nid] = NULL;
4333 /* Free the EDAC CORE resources */
4334 mci->pvt_info = NULL;
4341 static void setup_pci_device(void)
4346 pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
4348 pr_warn("%s(): Unable to create PCI control\n", __func__);
4349 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
4353 static const struct x86_cpu_id amd64_cpuids[] = {
4354 X86_MATCH_VENDOR_FAM(AMD, 0x0F, NULL),
4355 X86_MATCH_VENDOR_FAM(AMD, 0x10, NULL),
4356 X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
4357 X86_MATCH_VENDOR_FAM(AMD, 0x16, NULL),
4358 X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
4359 X86_MATCH_VENDOR_FAM(HYGON, 0x18, NULL),
4360 X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
4361 X86_MATCH_VENDOR_FAM(AMD, 0x1A, NULL),
4364 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
4366 static int __init amd64_edac_init(void)
4372 if (ghes_get_devices())
4375 owner = edac_get_owner();
4376 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
4379 if (!x86_match_cpu(amd64_cpuids))
4388 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
4392 msrs = msrs_alloc();
4396 for (i = 0; i < amd_nb_num(); i++) {
4397 err = probe_one_instance(i);
4399 /* unwind properly */
4401 remove_one_instance(i);
4407 if (!edac_has_mcs()) {
4412 /* register stuff with EDAC MCE */
4413 if (boot_cpu_data.x86 >= 0x17) {
4414 amd_register_ecc_decoder(decode_umc_error);
4416 amd_register_ecc_decoder(decode_bus_error);
4420 #ifdef CONFIG_X86_32
4421 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
4439 static void __exit amd64_edac_exit(void)
4444 edac_pci_release_generic_ctl(pci_ctl);
4446 /* unregister from EDAC MCE */
4447 if (boot_cpu_data.x86 >= 0x17)
4448 amd_unregister_ecc_decoder(decode_umc_error);
4450 amd_unregister_ecc_decoder(decode_bus_error);
4452 for (i = 0; i < amd_nb_num(); i++)
4453 remove_one_instance(i);
4464 module_init(amd64_edac_init);
4465 module_exit(amd64_edac_exit);
4467 MODULE_LICENSE("GPL");
4468 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, Dave Peterson, Thayne Harbaugh; AMD");
4469 MODULE_DESCRIPTION("MC support for AMD64 memory controllers");
4471 module_param(edac_op_state, int, 0444);
4472 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");