1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
5 static struct edac_pci_ctl_info *pci_ctl;
8 * Set by command line parameter. If BIOS has enabled the ECC, this override is
9 * cleared to prevent re-enabling the hardware by this driver.
11 static int ecc_enable_override;
12 module_param(ecc_enable_override, int, 0644);
14 static struct msr __percpu *msrs;
16 static struct amd64_family_type *fam_type;
19 static struct ecc_settings **ecc_stngs;
21 /* Device for the PCI component */
22 static struct device *pci_ctl_dev;
25 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
26 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
29 *FIXME: Produce a better mapping/linearisation.
31 static const struct scrubrate {
32 u32 scrubval; /* bit pattern for scrub rate */
33 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
35 { 0x01, 1600000000UL},
57 { 0x00, 0UL}, /* scrubbing off */
60 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
61 u32 *val, const char *func)
65 err = pci_read_config_dword(pdev, offset, val);
67 amd64_warn("%s: error reading F%dx%03x.\n",
68 func, PCI_FUNC(pdev->devfn), offset);
73 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
74 u32 val, const char *func)
78 err = pci_write_config_dword(pdev, offset, val);
80 amd64_warn("%s: error writing to F%dx%03x.\n",
81 func, PCI_FUNC(pdev->devfn), offset);
87 * Select DCT to which PCI cfg accesses are routed
89 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
93 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
94 reg &= (pvt->model == 0x30) ? ~3 : ~1;
96 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
101 * Depending on the family, F2 DCT reads need special handling:
103 * K8: has a single DCT only and no address offsets >= 0x100
105 * F10h: each DCT has its own set of regs
109 * F16h: has only 1 DCT
111 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
113 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
114 int offset, u32 *val)
118 if (dct || offset >= 0x100)
125 * Note: If ganging is enabled, barring the regs
126 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
127 * return 0. (cf. Section 2.8.1 F10h BKDG)
129 if (dct_ganging_enabled(pvt))
138 * F15h: F2x1xx addresses do not map explicitly to DCT1.
139 * We should select which DCT we access using F1x10C[DctCfgSel]
141 dct = (dct && pvt->model == 0x30) ? 3 : dct;
142 f15h_select_dct(pvt, dct);
153 return amd64_read_pci_cfg(pvt->F2, offset, val);
157 * Memory scrubber control interface. For K8, memory scrubbing is handled by
158 * hardware and can involve L2 cache, dcache as well as the main memory. With
159 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
162 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
163 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
164 * bytes/sec for the setting.
166 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
167 * other archs, we might not have access to the caches directly.
170 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
173 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
174 * are shifted down by 0x5, so scrubval 0x5 is written to the register
175 * as 0x0, scrubval 0x6 as 0x1, etc.
177 if (scrubval >= 0x5 && scrubval <= 0x14) {
179 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
180 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
182 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
186 * Scan the scrub rate mapping table for a close or matching bandwidth value to
187 * issue. If requested is too big, then use last maximum value found.
189 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
195 * map the configured rate (new_bw) to a value specific to the AMD64
196 * memory controller and apply to register. Search for the first
197 * bandwidth entry that is greater or equal than the setting requested
198 * and program that. If at last entry, turn off DRAM scrubbing.
200 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
201 * by falling back to the last element in scrubrates[].
203 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
205 * skip scrub rates which aren't recommended
206 * (see F10 BKDG, F3x58)
208 if (scrubrates[i].scrubval < min_rate)
211 if (scrubrates[i].bandwidth <= new_bw)
215 scrubval = scrubrates[i].scrubval;
218 __f17h_set_scrubval(pvt, scrubval);
219 } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
220 f15h_select_dct(pvt, 0);
221 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
222 f15h_select_dct(pvt, 1);
223 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
225 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
229 return scrubrates[i].bandwidth;
234 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
236 struct amd64_pvt *pvt = mci->pvt_info;
237 u32 min_scrubrate = 0x5;
242 if (pvt->fam == 0x15) {
244 if (pvt->model < 0x10)
245 f15h_select_dct(pvt, 0);
247 if (pvt->model == 0x60)
250 return __set_scrub_rate(pvt, bw, min_scrubrate);
253 static int get_scrub_rate(struct mem_ctl_info *mci)
255 struct amd64_pvt *pvt = mci->pvt_info;
256 int i, retval = -EINVAL;
260 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
261 if (scrubval & BIT(0)) {
262 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
268 } else if (pvt->fam == 0x15) {
270 if (pvt->model < 0x10)
271 f15h_select_dct(pvt, 0);
273 if (pvt->model == 0x60)
274 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
276 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
278 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
281 scrubval = scrubval & 0x001F;
283 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
284 if (scrubrates[i].scrubval == scrubval) {
285 retval = scrubrates[i].bandwidth;
293 * returns true if the SysAddr given by sys_addr matches the
294 * DRAM base/limit associated with node_id
296 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
300 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
301 * all ones if the most significant implemented address bit is 1.
302 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
303 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
304 * Application Programming.
306 addr = sys_addr & 0x000000ffffffffffull;
308 return ((addr >= get_dram_base(pvt, nid)) &&
309 (addr <= get_dram_limit(pvt, nid)));
313 * Attempt to map a SysAddr to a node. On success, return a pointer to the
314 * mem_ctl_info structure for the node that the SysAddr maps to.
316 * On failure, return NULL.
318 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
321 struct amd64_pvt *pvt;
326 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
327 * 3.4.4.2) registers to map the SysAddr to a node ID.
332 * The value of this field should be the same for all DRAM Base
333 * registers. Therefore we arbitrarily choose to read it from the
334 * register for node 0.
336 intlv_en = dram_intlv_en(pvt, 0);
339 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
340 if (base_limit_match(pvt, sys_addr, node_id))
346 if (unlikely((intlv_en != 0x01) &&
347 (intlv_en != 0x03) &&
348 (intlv_en != 0x07))) {
349 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
353 bits = (((u32) sys_addr) >> 12) & intlv_en;
355 for (node_id = 0; ; ) {
356 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
357 break; /* intlv_sel field matches */
359 if (++node_id >= DRAM_RANGES)
363 /* sanity test for sys_addr */
364 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
365 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
366 "range for node %d with node interleaving enabled.\n",
367 __func__, sys_addr, node_id);
372 return edac_mc_find((int)node_id);
375 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
376 (unsigned long)sys_addr);
382 * compute the CS base address of the @csrow on the DRAM controller @dct.
383 * For details see F2x[5C:40] in the processor's BKDG
385 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
386 u64 *base, u64 *mask)
388 u64 csbase, csmask, base_bits, mask_bits;
391 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
392 csbase = pvt->csels[dct].csbases[csrow];
393 csmask = pvt->csels[dct].csmasks[csrow];
394 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
395 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
399 * F16h and F15h, models 30h and later need two addr_shift values:
400 * 8 for high and 6 for low (cf. F16h BKDG).
402 } else if (pvt->fam == 0x16 ||
403 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
404 csbase = pvt->csels[dct].csbases[csrow];
405 csmask = pvt->csels[dct].csmasks[csrow >> 1];
407 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
408 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
411 /* poke holes for the csmask */
412 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
413 (GENMASK_ULL(30, 19) << 8));
415 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
416 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
420 csbase = pvt->csels[dct].csbases[csrow];
421 csmask = pvt->csels[dct].csmasks[csrow >> 1];
424 if (pvt->fam == 0x15)
425 base_bits = mask_bits =
426 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
428 base_bits = mask_bits =
429 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
432 *base = (csbase & base_bits) << addr_shift;
435 /* poke holes for the csmask */
436 *mask &= ~(mask_bits << addr_shift);
438 *mask |= (csmask & mask_bits) << addr_shift;
441 #define for_each_chip_select(i, dct, pvt) \
442 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
444 #define chip_select_base(i, dct, pvt) \
445 pvt->csels[dct].csbases[i]
447 #define for_each_chip_select_mask(i, dct, pvt) \
448 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
450 #define for_each_umc(i) \
451 for (i = 0; i < fam_type->max_mcs; i++)
454 * @input_addr is an InputAddr associated with the node given by mci. Return the
455 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
457 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
459 struct amd64_pvt *pvt;
465 for_each_chip_select(csrow, 0, pvt) {
466 if (!csrow_enabled(csrow, 0, pvt))
469 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
473 if ((input_addr & mask) == (base & mask)) {
474 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
475 (unsigned long)input_addr, csrow,
481 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
482 (unsigned long)input_addr, pvt->mc_node_id);
488 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
489 * for the node represented by mci. Info is passed back in *hole_base,
490 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
491 * info is invalid. Info may be invalid for either of the following reasons:
493 * - The revision of the node is not E or greater. In this case, the DRAM Hole
494 * Address Register does not exist.
496 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
497 * indicating that its contents are not valid.
499 * The values passed back in *hole_base, *hole_offset, and *hole_size are
500 * complete 32-bit values despite the fact that the bitfields in the DHAR
501 * only represent bits 31-24 of the base and offset values.
503 static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
504 u64 *hole_offset, u64 *hole_size)
506 struct amd64_pvt *pvt = mci->pvt_info;
508 /* only revE and later have the DRAM Hole Address Register */
509 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
510 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
511 pvt->ext_model, pvt->mc_node_id);
515 /* valid for Fam10h and above */
516 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
517 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
521 if (!dhar_valid(pvt)) {
522 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
527 /* This node has Memory Hoisting */
529 /* +------------------+--------------------+--------------------+-----
530 * | memory | DRAM hole | relocated |
531 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
533 * | | | [0x100000000, |
534 * | | | (0x100000000+ |
535 * | | | (0xffffffff-x))] |
536 * +------------------+--------------------+--------------------+-----
538 * Above is a diagram of physical memory showing the DRAM hole and the
539 * relocated addresses from the DRAM hole. As shown, the DRAM hole
540 * starts at address x (the base address) and extends through address
541 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
542 * addresses in the hole so that they start at 0x100000000.
545 *hole_base = dhar_base(pvt);
546 *hole_size = (1ULL << 32) - *hole_base;
548 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
549 : k8_dhar_offset(pvt);
551 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
552 pvt->mc_node_id, (unsigned long)*hole_base,
553 (unsigned long)*hole_offset, (unsigned long)*hole_size);
558 #ifdef CONFIG_EDAC_DEBUG
559 #define EDAC_DCT_ATTR_SHOW(reg) \
560 static ssize_t reg##_show(struct device *dev, \
561 struct device_attribute *mattr, char *data) \
563 struct mem_ctl_info *mci = to_mci(dev); \
564 struct amd64_pvt *pvt = mci->pvt_info; \
566 return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
569 EDAC_DCT_ATTR_SHOW(dhar);
570 EDAC_DCT_ATTR_SHOW(dbam0);
571 EDAC_DCT_ATTR_SHOW(top_mem);
572 EDAC_DCT_ATTR_SHOW(top_mem2);
574 static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
577 struct mem_ctl_info *mci = to_mci(dev);
583 get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
585 return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
590 * update NUM_DBG_ATTRS in case you add new members
592 static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
593 static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
594 static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
595 static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
596 static DEVICE_ATTR_RO(dram_hole);
598 static struct attribute *dbg_attrs[] = {
601 &dev_attr_topmem.attr,
602 &dev_attr_topmem2.attr,
603 &dev_attr_dram_hole.attr,
607 static const struct attribute_group dbg_group = {
611 static ssize_t inject_section_show(struct device *dev,
612 struct device_attribute *mattr, char *buf)
614 struct mem_ctl_info *mci = to_mci(dev);
615 struct amd64_pvt *pvt = mci->pvt_info;
616 return sprintf(buf, "0x%x\n", pvt->injection.section);
620 * store error injection section value which refers to one of 4 16-byte sections
621 * within a 64-byte cacheline
625 static ssize_t inject_section_store(struct device *dev,
626 struct device_attribute *mattr,
627 const char *data, size_t count)
629 struct mem_ctl_info *mci = to_mci(dev);
630 struct amd64_pvt *pvt = mci->pvt_info;
634 ret = kstrtoul(data, 10, &value);
639 amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
643 pvt->injection.section = (u32) value;
647 static ssize_t inject_word_show(struct device *dev,
648 struct device_attribute *mattr, char *buf)
650 struct mem_ctl_info *mci = to_mci(dev);
651 struct amd64_pvt *pvt = mci->pvt_info;
652 return sprintf(buf, "0x%x\n", pvt->injection.word);
656 * store error injection word value which refers to one of 9 16-bit word of the
657 * 16-byte (128-bit + ECC bits) section
661 static ssize_t inject_word_store(struct device *dev,
662 struct device_attribute *mattr,
663 const char *data, size_t count)
665 struct mem_ctl_info *mci = to_mci(dev);
666 struct amd64_pvt *pvt = mci->pvt_info;
670 ret = kstrtoul(data, 10, &value);
675 amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
679 pvt->injection.word = (u32) value;
683 static ssize_t inject_ecc_vector_show(struct device *dev,
684 struct device_attribute *mattr,
687 struct mem_ctl_info *mci = to_mci(dev);
688 struct amd64_pvt *pvt = mci->pvt_info;
689 return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
693 * store 16 bit error injection vector which enables injecting errors to the
694 * corresponding bit within the error injection word above. When used during a
695 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
697 static ssize_t inject_ecc_vector_store(struct device *dev,
698 struct device_attribute *mattr,
699 const char *data, size_t count)
701 struct mem_ctl_info *mci = to_mci(dev);
702 struct amd64_pvt *pvt = mci->pvt_info;
706 ret = kstrtoul(data, 16, &value);
710 if (value & 0xFFFF0000) {
711 amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
715 pvt->injection.bit_map = (u32) value;
720 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
721 * fields needed by the injection registers and read the NB Array Data Port.
723 static ssize_t inject_read_store(struct device *dev,
724 struct device_attribute *mattr,
725 const char *data, size_t count)
727 struct mem_ctl_info *mci = to_mci(dev);
728 struct amd64_pvt *pvt = mci->pvt_info;
730 u32 section, word_bits;
733 ret = kstrtoul(data, 10, &value);
737 /* Form value to choose 16-byte section of cacheline */
738 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
740 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
742 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
744 /* Issue 'word' and 'bit' along with the READ request */
745 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
747 edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
753 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
754 * fields needed by the injection registers.
756 static ssize_t inject_write_store(struct device *dev,
757 struct device_attribute *mattr,
758 const char *data, size_t count)
760 struct mem_ctl_info *mci = to_mci(dev);
761 struct amd64_pvt *pvt = mci->pvt_info;
762 u32 section, word_bits, tmp;
766 ret = kstrtoul(data, 10, &value);
770 /* Form value to choose 16-byte section of cacheline */
771 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
773 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
775 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
777 pr_notice_once("Don't forget to decrease MCE polling interval in\n"
778 "/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
779 "so that you can get the error report faster.\n");
781 on_each_cpu(disable_caches, NULL, 1);
783 /* Issue 'word' and 'bit' along with the READ request */
784 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
787 /* wait until injection happens */
788 amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
789 if (tmp & F10_NB_ARR_ECC_WR_REQ) {
794 on_each_cpu(enable_caches, NULL, 1);
796 edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
802 * update NUM_INJ_ATTRS in case you add new members
805 static DEVICE_ATTR_RW(inject_section);
806 static DEVICE_ATTR_RW(inject_word);
807 static DEVICE_ATTR_RW(inject_ecc_vector);
808 static DEVICE_ATTR_WO(inject_write);
809 static DEVICE_ATTR_WO(inject_read);
811 static struct attribute *inj_attrs[] = {
812 &dev_attr_inject_section.attr,
813 &dev_attr_inject_word.attr,
814 &dev_attr_inject_ecc_vector.attr,
815 &dev_attr_inject_write.attr,
816 &dev_attr_inject_read.attr,
820 static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
822 struct device *dev = kobj_to_dev(kobj);
823 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
824 struct amd64_pvt *pvt = mci->pvt_info;
826 /* Families which have that injection hw */
827 if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
833 static const struct attribute_group inj_group = {
835 .is_visible = inj_is_visible,
837 #endif /* CONFIG_EDAC_DEBUG */
840 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
841 * assumed that sys_addr maps to the node given by mci.
843 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
844 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
845 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
846 * then it is also involved in translating a SysAddr to a DramAddr. Sections
847 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
848 * These parts of the documentation are unclear. I interpret them as follows:
850 * When node n receives a SysAddr, it processes the SysAddr as follows:
852 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
853 * Limit registers for node n. If the SysAddr is not within the range
854 * specified by the base and limit values, then node n ignores the Sysaddr
855 * (since it does not map to node n). Otherwise continue to step 2 below.
857 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
858 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
859 * the range of relocated addresses (starting at 0x100000000) from the DRAM
860 * hole. If not, skip to step 3 below. Else get the value of the
861 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
862 * offset defined by this value from the SysAddr.
864 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
865 * Base register for node n. To obtain the DramAddr, subtract the base
866 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
868 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
870 struct amd64_pvt *pvt = mci->pvt_info;
871 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
874 dram_base = get_dram_base(pvt, pvt->mc_node_id);
876 ret = get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
878 if ((sys_addr >= (1ULL << 32)) &&
879 (sys_addr < ((1ULL << 32) + hole_size))) {
880 /* use DHAR to translate SysAddr to DramAddr */
881 dram_addr = sys_addr - hole_offset;
883 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
884 (unsigned long)sys_addr,
885 (unsigned long)dram_addr);
892 * Translate the SysAddr to a DramAddr as shown near the start of
893 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
894 * only deals with 40-bit values. Therefore we discard bits 63-40 of
895 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
896 * discard are all 1s. Otherwise the bits we discard are all 0s. See
897 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
898 * Programmer's Manual Volume 1 Application Programming.
900 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
902 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
903 (unsigned long)sys_addr, (unsigned long)dram_addr);
908 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
909 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
910 * for node interleaving.
912 static int num_node_interleave_bits(unsigned intlv_en)
914 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
917 BUG_ON(intlv_en > 7);
918 n = intlv_shift_table[intlv_en];
922 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
923 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
925 struct amd64_pvt *pvt;
932 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
933 * concerning translating a DramAddr to an InputAddr.
935 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
936 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
939 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
940 intlv_shift, (unsigned long)dram_addr,
941 (unsigned long)input_addr);
947 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
948 * assumed that @sys_addr maps to the node given by mci.
950 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
955 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
957 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
958 (unsigned long)sys_addr, (unsigned long)input_addr);
963 /* Map the Error address to a PAGE and PAGE OFFSET. */
964 static inline void error_address_to_page_and_offset(u64 error_address,
965 struct err_info *err)
967 err->page = (u32) (error_address >> PAGE_SHIFT);
968 err->offset = ((u32) error_address) & ~PAGE_MASK;
972 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
973 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
974 * of a node that detected an ECC memory error. mci represents the node that
975 * the error address maps to (possibly different from the node that detected
976 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
979 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
983 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
986 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
987 "address 0x%lx\n", (unsigned long)sys_addr);
991 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
994 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
997 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
999 unsigned long edac_cap = EDAC_FLAG_NONE;
1003 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
1006 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
1009 umc_en_mask |= BIT(i);
1011 /* UMC Configuration bit 12 (DimmEccEn) */
1012 if (pvt->umc[i].umc_cfg & BIT(12))
1013 dimm_ecc_en_mask |= BIT(i);
1016 if (umc_en_mask == dimm_ecc_en_mask)
1017 edac_cap = EDAC_FLAG_SECDED;
1019 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
1023 if (pvt->dclr0 & BIT(bit))
1024 edac_cap = EDAC_FLAG_SECDED;
1030 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
1032 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
1034 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
1036 if (pvt->dram_type == MEM_LRDDR3) {
1037 u32 dcsm = pvt->csels[chan].csmasks[0];
1039 * It's assumed all LRDIMMs in a DCT are going to be of
1040 * same 'type' until proven otherwise. So, use a cs
1041 * value of '0' here to get dcsm value.
1043 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
1046 edac_dbg(1, "All DIMMs support ECC:%s\n",
1047 (dclr & BIT(19)) ? "yes" : "no");
1050 edac_dbg(1, " PAR/ERR parity: %s\n",
1051 (dclr & BIT(8)) ? "enabled" : "disabled");
1053 if (pvt->fam == 0x10)
1054 edac_dbg(1, " DCT 128bit mode width: %s\n",
1055 (dclr & BIT(11)) ? "128b" : "64b");
1057 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
1058 (dclr & BIT(12)) ? "yes" : "no",
1059 (dclr & BIT(13)) ? "yes" : "no",
1060 (dclr & BIT(14)) ? "yes" : "no",
1061 (dclr & BIT(15)) ? "yes" : "no");
1064 #define CS_EVEN_PRIMARY BIT(0)
1065 #define CS_ODD_PRIMARY BIT(1)
1066 #define CS_EVEN_SECONDARY BIT(2)
1067 #define CS_ODD_SECONDARY BIT(3)
1069 #define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
1070 #define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
1072 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
1076 if (csrow_enabled(2 * dimm, ctrl, pvt))
1077 cs_mode |= CS_EVEN_PRIMARY;
1079 if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
1080 cs_mode |= CS_ODD_PRIMARY;
1082 /* Asymmetric dual-rank DIMM support. */
1083 if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
1084 cs_mode |= CS_ODD_SECONDARY;
1089 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
1091 int dimm, size0, size1, cs0, cs1, cs_mode;
1093 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
1095 for (dimm = 0; dimm < 2; dimm++) {
1099 cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
1101 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
1102 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
1104 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1110 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
1112 struct amd64_umc *umc;
1113 u32 i, tmp, umc_base;
1116 umc_base = get_umc_base(i);
1119 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
1120 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
1121 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
1122 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
1124 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
1125 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
1127 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
1128 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
1129 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
1131 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
1132 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
1133 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
1134 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
1135 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
1136 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
1137 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
1138 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
1139 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
1141 if (pvt->dram_type == MEM_LRDDR4) {
1142 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
1143 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
1144 i, 1 << ((tmp >> 4) & 0x3));
1147 debug_display_dimm_sizes_df(pvt, i);
1150 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
1151 pvt->dhar, dhar_base(pvt));
1154 /* Display and decode various NB registers for debug purposes. */
1155 static void __dump_misc_regs(struct amd64_pvt *pvt)
1157 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
1159 edac_dbg(1, " NB two channel DRAM capable: %s\n",
1160 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
1162 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
1163 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
1164 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
1166 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
1168 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
1170 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
1171 pvt->dhar, dhar_base(pvt),
1172 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
1173 : f10_dhar_offset(pvt));
1175 debug_display_dimm_sizes(pvt, 0);
1177 /* everything below this point is Fam10h and above */
1178 if (pvt->fam == 0xf)
1181 debug_display_dimm_sizes(pvt, 1);
1183 /* Only if NOT ganged does dclr1 have valid info */
1184 if (!dct_ganging_enabled(pvt))
1185 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
1188 /* Display and decode various NB registers for debug purposes. */
1189 static void dump_misc_regs(struct amd64_pvt *pvt)
1192 __dump_misc_regs_df(pvt);
1194 __dump_misc_regs(pvt);
1196 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
1198 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
1202 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
1204 static void prep_chip_selects(struct amd64_pvt *pvt)
1206 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
1207 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1208 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
1209 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
1210 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
1211 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
1212 } else if (pvt->fam >= 0x17) {
1216 pvt->csels[umc].b_cnt = 4;
1217 pvt->csels[umc].m_cnt = 2;
1221 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1222 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
1226 static void read_umc_base_mask(struct amd64_pvt *pvt)
1228 u32 umc_base_reg, umc_base_reg_sec;
1229 u32 umc_mask_reg, umc_mask_reg_sec;
1230 u32 base_reg, base_reg_sec;
1231 u32 mask_reg, mask_reg_sec;
1232 u32 *base, *base_sec;
1233 u32 *mask, *mask_sec;
1237 umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
1238 umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
1240 for_each_chip_select(cs, umc, pvt) {
1241 base = &pvt->csels[umc].csbases[cs];
1242 base_sec = &pvt->csels[umc].csbases_sec[cs];
1244 base_reg = umc_base_reg + (cs * 4);
1245 base_reg_sec = umc_base_reg_sec + (cs * 4);
1247 if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
1248 edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
1249 umc, cs, *base, base_reg);
1251 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
1252 edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
1253 umc, cs, *base_sec, base_reg_sec);
1256 umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
1257 umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
1259 for_each_chip_select_mask(cs, umc, pvt) {
1260 mask = &pvt->csels[umc].csmasks[cs];
1261 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
1263 mask_reg = umc_mask_reg + (cs * 4);
1264 mask_reg_sec = umc_mask_reg_sec + (cs * 4);
1266 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
1267 edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
1268 umc, cs, *mask, mask_reg);
1270 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
1271 edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
1272 umc, cs, *mask_sec, mask_reg_sec);
1278 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1280 static void read_dct_base_mask(struct amd64_pvt *pvt)
1284 prep_chip_selects(pvt);
1287 return read_umc_base_mask(pvt);
1289 for_each_chip_select(cs, 0, pvt) {
1290 int reg0 = DCSB0 + (cs * 4);
1291 int reg1 = DCSB1 + (cs * 4);
1292 u32 *base0 = &pvt->csels[0].csbases[cs];
1293 u32 *base1 = &pvt->csels[1].csbases[cs];
1295 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1296 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
1299 if (pvt->fam == 0xf)
1302 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1303 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
1304 cs, *base1, (pvt->fam == 0x10) ? reg1
1308 for_each_chip_select_mask(cs, 0, pvt) {
1309 int reg0 = DCSM0 + (cs * 4);
1310 int reg1 = DCSM1 + (cs * 4);
1311 u32 *mask0 = &pvt->csels[0].csmasks[cs];
1312 u32 *mask1 = &pvt->csels[1].csmasks[cs];
1314 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1315 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
1318 if (pvt->fam == 0xf)
1321 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1322 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
1323 cs, *mask1, (pvt->fam == 0x10) ? reg1
1328 static void determine_memory_type(struct amd64_pvt *pvt)
1330 u32 dram_ctrl, dcsm;
1333 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1334 pvt->dram_type = MEM_LRDDR4;
1335 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1336 pvt->dram_type = MEM_RDDR4;
1338 pvt->dram_type = MEM_DDR4;
1344 if (pvt->ext_model >= K8_REV_F)
1347 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1351 if (pvt->dchr0 & DDR3_MODE)
1354 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1358 if (pvt->model < 0x60)
1362 * Model 0x60h needs special handling:
1364 * We use a Chip Select value of '0' to obtain dcsm.
1365 * Theoretically, it is possible to populate LRDIMMs of different
1366 * 'Rank' value on a DCT. But this is not the common case. So,
1367 * it's reasonable to assume all DIMMs are going to be of same
1368 * 'type' until proven otherwise.
1370 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1371 dcsm = pvt->csels[0].csmasks[0];
1373 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1374 pvt->dram_type = MEM_DDR4;
1375 else if (pvt->dclr0 & BIT(16))
1376 pvt->dram_type = MEM_DDR3;
1377 else if (dcsm & 0x3)
1378 pvt->dram_type = MEM_LRDDR3;
1380 pvt->dram_type = MEM_RDDR3;
1388 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1389 pvt->dram_type = MEM_EMPTY;
1394 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1397 /* Get the number of DCT channels the memory controller is using. */
1398 static int k8_early_channel_count(struct amd64_pvt *pvt)
1402 if (pvt->ext_model >= K8_REV_F)
1403 /* RevF (NPT) and later */
1404 flag = pvt->dclr0 & WIDTH_128;
1406 /* RevE and earlier */
1407 flag = pvt->dclr0 & REVE_WIDTH_128;
1412 return (flag) ? 2 : 1;
1415 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1416 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1418 u16 mce_nid = topology_die_id(m->extcpu);
1419 struct mem_ctl_info *mci;
1424 mci = edac_mc_find(mce_nid);
1428 pvt = mci->pvt_info;
1430 if (pvt->fam == 0xf) {
1435 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1438 * Erratum 637 workaround
1440 if (pvt->fam == 0x15) {
1441 u64 cc6_base, tmp_addr;
1445 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1449 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1450 intlv_en = tmp >> 21 & 0x7;
1452 /* add [47:27] + 3 trailing bits */
1453 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
1455 /* reverse and add DramIntlvEn */
1456 cc6_base |= intlv_en ^ 0x7;
1458 /* pin at [47:24] */
1462 return cc6_base | (addr & GENMASK_ULL(23, 0));
1464 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1467 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1469 /* OR DramIntlvSel into bits [14:12] */
1470 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1472 /* add remaining [11:0] bits from original MC4_ADDR */
1473 tmp_addr |= addr & GENMASK_ULL(11, 0);
1475 return cc6_base | tmp_addr;
1481 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1482 unsigned int device,
1483 struct pci_dev *related)
1485 struct pci_dev *dev = NULL;
1487 while ((dev = pci_get_device(vendor, device, dev))) {
1488 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1489 (dev->bus->number == related->bus->number) &&
1490 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1497 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1499 struct amd_northbridge *nb;
1500 struct pci_dev *f1 = NULL;
1501 unsigned int pci_func;
1502 int off = range << 3;
1505 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1506 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1508 if (pvt->fam == 0xf)
1511 if (!dram_rw(pvt, range))
1514 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1515 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1517 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1518 if (pvt->fam != 0x15)
1521 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1525 if (pvt->model == 0x60)
1526 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1527 else if (pvt->model == 0x30)
1528 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1530 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1532 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1536 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1538 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1540 /* {[39:27],111b} */
1541 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1543 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1546 pvt->ranges[range].lim.hi |= llim >> 13;
1551 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1552 struct err_info *err)
1554 struct amd64_pvt *pvt = mci->pvt_info;
1556 error_address_to_page_and_offset(sys_addr, err);
1559 * Find out which node the error address belongs to. This may be
1560 * different from the node that detected the error.
1562 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1563 if (!err->src_mci) {
1564 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1565 (unsigned long)sys_addr);
1566 err->err_code = ERR_NODE;
1570 /* Now map the sys_addr to a CSROW */
1571 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1572 if (err->csrow < 0) {
1573 err->err_code = ERR_CSROW;
1577 /* CHIPKILL enabled */
1578 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1579 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1580 if (err->channel < 0) {
1582 * Syndrome didn't map, so we don't know which of the
1583 * 2 DIMMs is in error. So we need to ID 'both' of them
1586 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1587 "possible error reporting race\n",
1589 err->err_code = ERR_CHANNEL;
1594 * non-chipkill ecc mode
1596 * The k8 documentation is unclear about how to determine the
1597 * channel number when using non-chipkill memory. This method
1598 * was obtained from email communication with someone at AMD.
1599 * (Wish the email was placed in this comment - norsk)
1601 err->channel = ((sys_addr & BIT(3)) != 0);
1605 static int ddr2_cs_size(unsigned i, bool dct_width)
1611 else if (!(i & 0x1))
1614 shift = (i + 1) >> 1;
1616 return 128 << (shift + !!dct_width);
1619 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1620 unsigned cs_mode, int cs_mask_nr)
1622 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1624 if (pvt->ext_model >= K8_REV_F) {
1625 WARN_ON(cs_mode > 11);
1626 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1628 else if (pvt->ext_model >= K8_REV_D) {
1630 WARN_ON(cs_mode > 10);
1633 * the below calculation, besides trying to win an obfuscated C
1634 * contest, maps cs_mode values to DIMM chip select sizes. The
1637 * cs_mode CS size (mb)
1638 * ======= ============
1651 * Basically, it calculates a value with which to shift the
1652 * smallest CS size of 32MB.
1654 * ddr[23]_cs_size have a similar purpose.
1656 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1658 return 32 << (cs_mode - diff);
1661 WARN_ON(cs_mode > 6);
1662 return 32 << cs_mode;
1667 * Get the number of DCT channels in use.
1670 * number of Memory Channels in operation
1672 * contents of the DCL0_LOW register
1674 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1676 int i, j, channels = 0;
1678 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1679 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1683 * Need to check if in unganged mode: In such, there are 2 channels,
1684 * but they are not in 128 bit mode and thus the above 'dclr0' status
1687 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1688 * their CSEnable bit on. If so, then SINGLE DIMM case.
1690 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1693 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1694 * is more than just one DIMM present in unganged mode. Need to check
1695 * both controllers since DIMMs can be placed in either one.
1697 for (i = 0; i < 2; i++) {
1698 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1700 for (j = 0; j < 4; j++) {
1701 if (DBAM_DIMM(j, dbam) > 0) {
1711 amd64_info("MCT channel count: %d\n", channels);
1716 static int f17_early_channel_count(struct amd64_pvt *pvt)
1718 int i, channels = 0;
1720 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1722 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1724 amd64_info("MCT channel count: %d\n", channels);
1729 static int ddr3_cs_size(unsigned i, bool dct_width)
1734 if (i == 0 || i == 3 || i == 4)
1740 else if (!(i & 0x1))
1743 shift = (i + 1) >> 1;
1746 cs_size = (128 * (1 << !!dct_width)) << shift;
1751 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1756 if (i < 4 || i == 6)
1760 else if (!(i & 0x1))
1763 shift = (i + 1) >> 1;
1766 cs_size = rank_multiply * (128 << shift);
1771 static int ddr4_cs_size(unsigned i)
1780 /* Min cs_size = 1G */
1781 cs_size = 1024 * (1 << (i >> 1));
1786 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1787 unsigned cs_mode, int cs_mask_nr)
1789 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1791 WARN_ON(cs_mode > 11);
1793 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1794 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1796 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1800 * F15h supports only 64bit DCT interfaces
1802 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1803 unsigned cs_mode, int cs_mask_nr)
1805 WARN_ON(cs_mode > 12);
1807 return ddr3_cs_size(cs_mode, false);
1810 /* F15h M60h supports DDR4 mapping as well.. */
1811 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1812 unsigned cs_mode, int cs_mask_nr)
1815 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1817 WARN_ON(cs_mode > 12);
1819 if (pvt->dram_type == MEM_DDR4) {
1823 cs_size = ddr4_cs_size(cs_mode);
1824 } else if (pvt->dram_type == MEM_LRDDR3) {
1825 unsigned rank_multiply = dcsm & 0xf;
1827 if (rank_multiply == 3)
1829 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1831 /* Minimum cs size is 512mb for F15hM60h*/
1835 cs_size = ddr3_cs_size(cs_mode, false);
1842 * F16h and F15h model 30h have only limited cs_modes.
1844 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1845 unsigned cs_mode, int cs_mask_nr)
1847 WARN_ON(cs_mode > 12);
1849 if (cs_mode == 6 || cs_mode == 8 ||
1850 cs_mode == 9 || cs_mode == 12)
1853 return ddr3_cs_size(cs_mode, false);
1856 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1857 unsigned int cs_mode, int csrow_nr)
1859 u32 addr_mask_orig, addr_mask_deinterleaved;
1860 u32 msb, weight, num_zero_bits;
1863 /* No Chip Selects are enabled. */
1867 /* Requested size of an even CS but none are enabled. */
1868 if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1871 /* Requested size of an odd CS but none are enabled. */
1872 if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1876 * There is one mask per DIMM, and two Chip Selects per DIMM.
1877 * CS0 and CS1 -> DIMM0
1878 * CS2 and CS3 -> DIMM1
1880 dimm = csrow_nr >> 1;
1882 /* Asymmetric dual-rank DIMM support. */
1883 if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1884 addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
1886 addr_mask_orig = pvt->csels[umc].csmasks[dimm];
1889 * The number of zero bits in the mask is equal to the number of bits
1890 * in a full mask minus the number of bits in the current mask.
1892 * The MSB is the number of bits in the full mask because BIT[0] is
1895 msb = fls(addr_mask_orig) - 1;
1896 weight = hweight_long(addr_mask_orig);
1897 num_zero_bits = msb - weight;
1899 /* Take the number of zero bits off from the top of the mask. */
1900 addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1902 edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1903 edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
1904 edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1906 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1907 size = (addr_mask_deinterleaved >> 2) + 1;
1909 /* Return size in MBs. */
1913 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1916 if (pvt->fam == 0xf)
1919 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1920 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1921 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1923 edac_dbg(0, " DCTs operate in %s mode\n",
1924 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1926 if (!dct_ganging_enabled(pvt))
1927 edac_dbg(0, " Address range split per DCT: %s\n",
1928 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1930 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1931 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1932 (dct_memory_cleared(pvt) ? "yes" : "no"));
1934 edac_dbg(0, " channel interleave: %s, "
1935 "interleave bits selector: 0x%x\n",
1936 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1937 dct_sel_interleave_addr(pvt));
1940 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1944 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1945 * 2.10.12 Memory Interleaving Modes).
1947 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1948 u8 intlv_en, int num_dcts_intlv,
1955 return (u8)(dct_sel);
1957 if (num_dcts_intlv == 2) {
1958 select = (sys_addr >> 8) & 0x3;
1959 channel = select ? 0x3 : 0;
1960 } else if (num_dcts_intlv == 4) {
1961 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1962 switch (intlv_addr) {
1964 channel = (sys_addr >> 8) & 0x3;
1967 channel = (sys_addr >> 9) & 0x3;
1975 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1976 * Interleaving Modes.
1978 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1979 bool hi_range_sel, u8 intlv_en)
1981 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1983 if (dct_ganging_enabled(pvt))
1987 return dct_sel_high;
1990 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1992 if (dct_interleave_enabled(pvt)) {
1993 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1995 /* return DCT select function: 0=DCT0, 1=DCT1 */
1997 return sys_addr >> 6 & 1;
1999 if (intlv_addr & 0x2) {
2000 u8 shift = intlv_addr & 0x1 ? 9 : 6;
2001 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
2003 return ((sys_addr >> shift) & 1) ^ temp;
2006 if (intlv_addr & 0x4) {
2007 u8 shift = intlv_addr & 0x1 ? 9 : 8;
2009 return (sys_addr >> shift) & 1;
2012 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
2015 if (dct_high_range_enabled(pvt))
2016 return ~dct_sel_high & 1;
2021 /* Convert the sys_addr to the normalized DCT address */
2022 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
2023 u64 sys_addr, bool hi_rng,
2024 u32 dct_sel_base_addr)
2027 u64 dram_base = get_dram_base(pvt, range);
2028 u64 hole_off = f10_dhar_offset(pvt);
2029 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
2034 * base address of high range is below 4Gb
2035 * (bits [47:27] at [31:11])
2036 * DRAM address space on this DCT is hoisted above 4Gb &&
2039 * remove hole offset from sys_addr
2041 * remove high range offset from sys_addr
2043 if ((!(dct_sel_base_addr >> 16) ||
2044 dct_sel_base_addr < dhar_base(pvt)) &&
2046 (sys_addr >= BIT_64(32)))
2047 chan_off = hole_off;
2049 chan_off = dct_sel_base_off;
2053 * we have a valid hole &&
2058 * remove dram base to normalize to DCT address
2060 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
2061 chan_off = hole_off;
2063 chan_off = dram_base;
2066 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
2070 * checks if the csrow passed in is marked as SPARED, if so returns the new
2073 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
2077 if (online_spare_swap_done(pvt, dct) &&
2078 csrow == online_spare_bad_dramcs(pvt, dct)) {
2080 for_each_chip_select(tmp_cs, dct, pvt) {
2081 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
2091 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
2092 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
2095 * -EINVAL: NOT FOUND
2096 * 0..csrow = Chip-Select Row
2098 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
2100 struct mem_ctl_info *mci;
2101 struct amd64_pvt *pvt;
2102 u64 cs_base, cs_mask;
2103 int cs_found = -EINVAL;
2106 mci = edac_mc_find(nid);
2110 pvt = mci->pvt_info;
2112 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
2114 for_each_chip_select(csrow, dct, pvt) {
2115 if (!csrow_enabled(csrow, dct, pvt))
2118 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
2120 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
2121 csrow, cs_base, cs_mask);
2125 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
2126 (in_addr & cs_mask), (cs_base & cs_mask));
2128 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
2129 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
2133 cs_found = f10_process_possible_spare(pvt, dct, csrow);
2135 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
2143 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
2144 * swapped with a region located at the bottom of memory so that the GPU can use
2145 * the interleaved region and thus two channels.
2147 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
2149 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
2151 if (pvt->fam == 0x10) {
2152 /* only revC3 and revE have that feature */
2153 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
2157 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
2159 if (!(swap_reg & 0x1))
2162 swap_base = (swap_reg >> 3) & 0x7f;
2163 swap_limit = (swap_reg >> 11) & 0x7f;
2164 rgn_size = (swap_reg >> 20) & 0x7f;
2165 tmp_addr = sys_addr >> 27;
2167 if (!(sys_addr >> 34) &&
2168 (((tmp_addr >= swap_base) &&
2169 (tmp_addr <= swap_limit)) ||
2170 (tmp_addr < rgn_size)))
2171 return sys_addr ^ (u64)swap_base << 27;
2176 /* For a given @dram_range, check if @sys_addr falls within it. */
2177 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2178 u64 sys_addr, int *chan_sel)
2180 int cs_found = -EINVAL;
2184 bool high_range = false;
2186 u8 node_id = dram_dst_node(pvt, range);
2187 u8 intlv_en = dram_intlv_en(pvt, range);
2188 u32 intlv_sel = dram_intlv_sel(pvt, range);
2190 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2191 range, sys_addr, get_dram_limit(pvt, range));
2193 if (dhar_valid(pvt) &&
2194 dhar_base(pvt) <= sys_addr &&
2195 sys_addr < BIT_64(32)) {
2196 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2201 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
2204 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
2206 dct_sel_base = dct_sel_baseaddr(pvt);
2209 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
2210 * select between DCT0 and DCT1.
2212 if (dct_high_range_enabled(pvt) &&
2213 !dct_ganging_enabled(pvt) &&
2214 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
2217 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
2219 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
2220 high_range, dct_sel_base);
2222 /* Remove node interleaving, see F1x120 */
2224 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
2225 (chan_addr & 0xfff);
2227 /* remove channel interleave */
2228 if (dct_interleave_enabled(pvt) &&
2229 !dct_high_range_enabled(pvt) &&
2230 !dct_ganging_enabled(pvt)) {
2232 if (dct_sel_interleave_addr(pvt) != 1) {
2233 if (dct_sel_interleave_addr(pvt) == 0x3)
2235 chan_addr = ((chan_addr >> 10) << 9) |
2236 (chan_addr & 0x1ff);
2238 /* A[6] or hash 6 */
2239 chan_addr = ((chan_addr >> 7) << 6) |
2243 chan_addr = ((chan_addr >> 13) << 12) |
2244 (chan_addr & 0xfff);
2247 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2249 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
2252 *chan_sel = channel;
2257 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2258 u64 sys_addr, int *chan_sel)
2260 int cs_found = -EINVAL;
2261 int num_dcts_intlv = 0;
2262 u64 chan_addr, chan_offset;
2263 u64 dct_base, dct_limit;
2264 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
2265 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
2267 u64 dhar_offset = f10_dhar_offset(pvt);
2268 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2269 u8 node_id = dram_dst_node(pvt, range);
2270 u8 intlv_en = dram_intlv_en(pvt, range);
2272 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2273 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2275 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2276 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
2278 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2279 range, sys_addr, get_dram_limit(pvt, range));
2281 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2282 !(get_dram_limit(pvt, range) >= sys_addr))
2285 if (dhar_valid(pvt) &&
2286 dhar_base(pvt) <= sys_addr &&
2287 sys_addr < BIT_64(32)) {
2288 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2293 /* Verify sys_addr is within DCT Range. */
2294 dct_base = (u64) dct_sel_baseaddr(pvt);
2295 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2297 if (!(dct_cont_base_reg & BIT(0)) &&
2298 !(dct_base <= (sys_addr >> 27) &&
2299 dct_limit >= (sys_addr >> 27)))
2302 /* Verify number of dct's that participate in channel interleaving. */
2303 num_dcts_intlv = (int) hweight8(intlv_en);
2305 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2308 if (pvt->model >= 0x60)
2309 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2311 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2312 num_dcts_intlv, dct_sel);
2314 /* Verify we stay within the MAX number of channels allowed */
2318 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2320 /* Get normalized DCT addr */
2321 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2322 chan_offset = dhar_offset;
2324 chan_offset = dct_base << 27;
2326 chan_addr = sys_addr - chan_offset;
2328 /* remove channel interleave */
2329 if (num_dcts_intlv == 2) {
2330 if (intlv_addr == 0x4)
2331 chan_addr = ((chan_addr >> 9) << 8) |
2333 else if (intlv_addr == 0x5)
2334 chan_addr = ((chan_addr >> 10) << 9) |
2335 (chan_addr & 0x1ff);
2339 } else if (num_dcts_intlv == 4) {
2340 if (intlv_addr == 0x4)
2341 chan_addr = ((chan_addr >> 10) << 8) |
2343 else if (intlv_addr == 0x5)
2344 chan_addr = ((chan_addr >> 11) << 9) |
2345 (chan_addr & 0x1ff);
2350 if (dct_offset_en) {
2351 amd64_read_pci_cfg(pvt->F1,
2352 DRAM_CONT_HIGH_OFF + (int) channel * 4,
2354 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
2357 f15h_select_dct(pvt, channel);
2359 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2363 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2364 * there is support for 4 DCT's, but only 2 are currently functional.
2365 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2366 * pvt->csels[1]. So we need to use '1' here to get correct info.
2367 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2369 alias_channel = (channel == 3) ? 1 : channel;
2371 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2374 *chan_sel = alias_channel;
2379 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2383 int cs_found = -EINVAL;
2386 for (range = 0; range < DRAM_RANGES; range++) {
2387 if (!dram_rw(pvt, range))
2390 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2391 cs_found = f15_m30h_match_to_this_node(pvt, range,
2395 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2396 (get_dram_limit(pvt, range) >= sys_addr)) {
2397 cs_found = f1x_match_to_this_node(pvt, range,
2398 sys_addr, chan_sel);
2407 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2408 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2410 * The @sys_addr is usually an error address received from the hardware
2413 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2414 struct err_info *err)
2416 struct amd64_pvt *pvt = mci->pvt_info;
2418 error_address_to_page_and_offset(sys_addr, err);
2420 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2421 if (err->csrow < 0) {
2422 err->err_code = ERR_CSROW;
2427 * We need the syndromes for channel detection only when we're
2428 * ganged. Otherwise @chan should already contain the channel at
2431 if (dct_ganging_enabled(pvt))
2432 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2436 * debug routine to display the memory sizes of all logical DIMMs and its
2439 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2441 int dimm, size0, size1;
2442 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2443 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
2445 if (pvt->fam == 0xf) {
2446 /* K8 families < revF not supported yet */
2447 if (pvt->ext_model < K8_REV_F)
2453 if (pvt->fam == 0x10) {
2454 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2456 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2457 pvt->csels[1].csbases :
2458 pvt->csels[0].csbases;
2461 dcsb = pvt->csels[1].csbases;
2463 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2466 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2468 /* Dump memory sizes for DIMM and its CSROWs */
2469 for (dimm = 0; dimm < 4; dimm++) {
2472 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2474 * For F15m60h, we need multiplier for LRDIMM cs_size
2475 * calculation. We pass dimm value to the dbam_to_cs
2476 * mapper so we can find the multiplier from the
2477 * corresponding DCSM.
2479 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2480 DBAM_DIMM(dimm, dbam),
2484 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2485 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2486 DBAM_DIMM(dimm, dbam),
2489 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2491 dimm * 2 + 1, size1);
2495 static struct amd64_family_type family_types[] = {
2498 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2499 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2502 .early_channel_count = k8_early_channel_count,
2503 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
2504 .dbam_to_cs = k8_dbam_to_chip_select,
2509 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2510 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2513 .early_channel_count = f1x_early_channel_count,
2514 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2515 .dbam_to_cs = f10_dbam_to_chip_select,
2520 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2521 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2524 .early_channel_count = f1x_early_channel_count,
2525 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2526 .dbam_to_cs = f15_dbam_to_chip_select,
2530 .ctl_name = "F15h_M30h",
2531 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2532 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2535 .early_channel_count = f1x_early_channel_count,
2536 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2537 .dbam_to_cs = f16_dbam_to_chip_select,
2541 .ctl_name = "F15h_M60h",
2542 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2543 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2546 .early_channel_count = f1x_early_channel_count,
2547 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2548 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2553 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2554 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2557 .early_channel_count = f1x_early_channel_count,
2558 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2559 .dbam_to_cs = f16_dbam_to_chip_select,
2563 .ctl_name = "F16h_M30h",
2564 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2565 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2568 .early_channel_count = f1x_early_channel_count,
2569 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2570 .dbam_to_cs = f16_dbam_to_chip_select,
2575 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2576 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2579 .early_channel_count = f17_early_channel_count,
2580 .dbam_to_cs = f17_addr_mask_to_cs_size,
2584 .ctl_name = "F17h_M10h",
2585 .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2586 .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2589 .early_channel_count = f17_early_channel_count,
2590 .dbam_to_cs = f17_addr_mask_to_cs_size,
2594 .ctl_name = "F17h_M30h",
2595 .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2596 .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2599 .early_channel_count = f17_early_channel_count,
2600 .dbam_to_cs = f17_addr_mask_to_cs_size,
2604 .ctl_name = "F17h_M60h",
2605 .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
2606 .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
2609 .early_channel_count = f17_early_channel_count,
2610 .dbam_to_cs = f17_addr_mask_to_cs_size,
2614 .ctl_name = "F17h_M70h",
2615 .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2616 .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2619 .early_channel_count = f17_early_channel_count,
2620 .dbam_to_cs = f17_addr_mask_to_cs_size,
2625 .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
2626 .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
2629 .early_channel_count = f17_early_channel_count,
2630 .dbam_to_cs = f17_addr_mask_to_cs_size,
2636 * These are tables of eigenvectors (one per line) which can be used for the
2637 * construction of the syndrome tables. The modified syndrome search algorithm
2638 * uses those to find the symbol in error and thus the DIMM.
2640 * Algorithm courtesy of Ross LaFetra from AMD.
2642 static const u16 x4_vectors[] = {
2643 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2644 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2645 0x0001, 0x0002, 0x0004, 0x0008,
2646 0x1013, 0x3032, 0x4044, 0x8088,
2647 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2648 0x4857, 0xc4fe, 0x13cc, 0x3288,
2649 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2650 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2651 0x15c1, 0x2a42, 0x89ac, 0x4758,
2652 0x2b03, 0x1602, 0x4f0c, 0xca08,
2653 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2654 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2655 0x2b87, 0x164e, 0x642c, 0xdc18,
2656 0x40b9, 0x80de, 0x1094, 0x20e8,
2657 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2658 0x11c1, 0x2242, 0x84ac, 0x4c58,
2659 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2660 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2661 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2662 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2663 0x16b3, 0x3d62, 0x4f34, 0x8518,
2664 0x1e2f, 0x391a, 0x5cac, 0xf858,
2665 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2666 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2667 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2668 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2669 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2670 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2671 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2672 0x185d, 0x2ca6, 0x7914, 0x9e28,
2673 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2674 0x4199, 0x82ee, 0x19f4, 0x2e58,
2675 0x4807, 0xc40e, 0x130c, 0x3208,
2676 0x1905, 0x2e0a, 0x5804, 0xac08,
2677 0x213f, 0x132a, 0xadfc, 0x5ba8,
2678 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2681 static const u16 x8_vectors[] = {
2682 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2683 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2684 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2685 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2686 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2687 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2688 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2689 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2690 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2691 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2692 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2693 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2694 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2695 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2696 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2697 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2698 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2699 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2700 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2703 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2706 unsigned int i, err_sym;
2708 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2710 unsigned v_idx = err_sym * v_dim;
2711 unsigned v_end = (err_sym + 1) * v_dim;
2713 /* walk over all 16 bits of the syndrome */
2714 for (i = 1; i < (1U << 16); i <<= 1) {
2716 /* if bit is set in that eigenvector... */
2717 if (v_idx < v_end && vectors[v_idx] & i) {
2718 u16 ev_comp = vectors[v_idx++];
2720 /* ... and bit set in the modified syndrome, */
2730 /* can't get to zero, move to next symbol */
2735 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2739 static int map_err_sym_to_channel(int err_sym, int sym_size)
2750 return err_sym >> 4;
2755 /* imaginary bits not in a DIMM */
2757 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2765 return err_sym >> 3;
2770 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2772 struct amd64_pvt *pvt = mci->pvt_info;
2775 if (pvt->ecc_sym_sz == 8)
2776 err_sym = decode_syndrome(syndrome, x8_vectors,
2777 ARRAY_SIZE(x8_vectors),
2779 else if (pvt->ecc_sym_sz == 4)
2780 err_sym = decode_syndrome(syndrome, x4_vectors,
2781 ARRAY_SIZE(x4_vectors),
2784 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2788 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2791 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2794 enum hw_event_mc_err_type err_type;
2798 err_type = HW_EVENT_ERR_CORRECTED;
2799 else if (ecc_type == 1)
2800 err_type = HW_EVENT_ERR_UNCORRECTED;
2801 else if (ecc_type == 3)
2802 err_type = HW_EVENT_ERR_DEFERRED;
2804 WARN(1, "Something is rotten in the state of Denmark.\n");
2808 switch (err->err_code) {
2813 string = "Failed to map error addr to a node";
2816 string = "Failed to map error addr to a csrow";
2819 string = "Unknown syndrome - possible error reporting race";
2822 string = "MCA_SYND not valid - unknown syndrome and csrow";
2825 string = "Cannot decode normalized address";
2828 string = "WTF error";
2832 edac_mc_handle_error(err_type, mci, 1,
2833 err->page, err->offset, err->syndrome,
2834 err->csrow, err->channel, -1,
2838 static inline void decode_bus_error(int node_id, struct mce *m)
2840 struct mem_ctl_info *mci;
2841 struct amd64_pvt *pvt;
2842 u8 ecc_type = (m->status >> 45) & 0x3;
2843 u8 xec = XEC(m->status, 0x1f);
2844 u16 ec = EC(m->status);
2846 struct err_info err;
2848 mci = edac_mc_find(node_id);
2852 pvt = mci->pvt_info;
2854 /* Bail out early if this was an 'observed' error */
2855 if (PP(ec) == NBSL_PP_OBS)
2858 /* Do only ECC errors */
2859 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2862 memset(&err, 0, sizeof(err));
2864 sys_addr = get_error_address(pvt, m);
2867 err.syndrome = extract_syndrome(m->status);
2869 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2871 __log_ecc_error(mci, &err, ecc_type);
2875 * To find the UMC channel represented by this bank we need to match on its
2876 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2879 * Currently, we can derive the channel number by looking at the 6th nibble in
2880 * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2883 static int find_umc_channel(struct mce *m)
2885 return (m->ipid & GENMASK(31, 0)) >> 20;
2888 static void decode_umc_error(int node_id, struct mce *m)
2890 u8 ecc_type = (m->status >> 45) & 0x3;
2891 struct mem_ctl_info *mci;
2892 struct amd64_pvt *pvt;
2893 struct err_info err;
2896 mci = edac_mc_find(node_id);
2900 pvt = mci->pvt_info;
2902 memset(&err, 0, sizeof(err));
2904 if (m->status & MCI_STATUS_DEFERRED)
2907 err.channel = find_umc_channel(m);
2909 if (!(m->status & MCI_STATUS_SYNDV)) {
2910 err.err_code = ERR_SYND;
2914 if (ecc_type == 2) {
2915 u8 length = (m->synd >> 18) & 0x3f;
2918 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2920 err.err_code = ERR_CHANNEL;
2923 err.csrow = m->synd & 0x7;
2925 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2926 err.err_code = ERR_NORM_ADDR;
2930 error_address_to_page_and_offset(sys_addr, &err);
2933 __log_ecc_error(mci, &err, ecc_type);
2937 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2938 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2939 * Reserve F0 and F6 on systems with a UMC.
2942 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2945 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2947 edac_dbg(1, "F0 not found, device 0x%x\n", pci_id1);
2951 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2953 pci_dev_put(pvt->F0);
2956 edac_dbg(1, "F6 not found: device 0x%x\n", pci_id2);
2961 pci_ctl_dev = &pvt->F0->dev;
2963 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2964 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2965 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2970 /* Reserve the ADDRESS MAP Device */
2971 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2973 edac_dbg(1, "F1 not found: device 0x%x\n", pci_id1);
2977 /* Reserve the DCT Device */
2978 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2980 pci_dev_put(pvt->F1);
2983 edac_dbg(1, "F2 not found: device 0x%x\n", pci_id2);
2988 pci_ctl_dev = &pvt->F2->dev;
2990 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2991 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2992 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2997 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
3000 pci_dev_put(pvt->F0);
3001 pci_dev_put(pvt->F6);
3003 pci_dev_put(pvt->F1);
3004 pci_dev_put(pvt->F2);
3008 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
3010 pvt->ecc_sym_sz = 4;
3016 /* Check enabled channels only: */
3017 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3018 if (pvt->umc[i].ecc_ctrl & BIT(9)) {
3019 pvt->ecc_sym_sz = 16;
3021 } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
3022 pvt->ecc_sym_sz = 8;
3027 } else if (pvt->fam >= 0x10) {
3030 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
3031 /* F16h has only DCT0, so no need to read dbam1. */
3032 if (pvt->fam != 0x16)
3033 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
3035 /* F10h, revD and later can do x8 ECC too. */
3036 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
3037 pvt->ecc_sym_sz = 8;
3042 * Retrieve the hardware registers of the memory controller.
3044 static void __read_mc_regs_df(struct amd64_pvt *pvt)
3046 u8 nid = pvt->mc_node_id;
3047 struct amd64_umc *umc;
3050 /* Read registers from each UMC */
3053 umc_base = get_umc_base(i);
3056 amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
3057 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
3058 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
3059 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
3060 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
3065 * Retrieve the hardware registers of the memory controller (this includes the
3066 * 'Address Map' and 'Misc' device regs)
3068 static void read_mc_regs(struct amd64_pvt *pvt)
3074 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
3075 * those are Read-As-Zero.
3077 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
3078 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
3080 /* Check first whether TOP_MEM2 is enabled: */
3081 rdmsrl(MSR_AMD64_SYSCFG, msr_val);
3082 if (msr_val & BIT(21)) {
3083 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
3084 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
3086 edac_dbg(0, " TOP_MEM2 disabled\n");
3090 __read_mc_regs_df(pvt);
3091 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
3096 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
3098 read_dram_ctl_register(pvt);
3100 for (range = 0; range < DRAM_RANGES; range++) {
3103 /* read settings for this DRAM range */
3104 read_dram_base_limit_regs(pvt, range);
3106 rw = dram_rw(pvt, range);
3110 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
3112 get_dram_base(pvt, range),
3113 get_dram_limit(pvt, range));
3115 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
3116 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
3117 (rw & 0x1) ? "R" : "-",
3118 (rw & 0x2) ? "W" : "-",
3119 dram_intlv_sel(pvt, range),
3120 dram_dst_node(pvt, range));
3123 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
3124 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
3126 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
3128 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
3129 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
3131 if (!dct_ganging_enabled(pvt)) {
3132 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
3133 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
3137 read_dct_base_mask(pvt);
3139 determine_memory_type(pvt);
3140 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
3142 determine_ecc_sym_sz(pvt);
3146 * NOTE: CPU Revision Dependent code
3149 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
3150 * k8 private pointer to -->
3151 * DRAM Bank Address mapping register
3153 * DCL register where dual_channel_active is
3155 * The DBAM register consists of 4 sets of 4 bits each definitions:
3158 * 0-3 CSROWs 0 and 1
3159 * 4-7 CSROWs 2 and 3
3160 * 8-11 CSROWs 4 and 5
3161 * 12-15 CSROWs 6 and 7
3163 * Values range from: 0 to 15
3164 * The meaning of the values depends on CPU revision and dual-channel state,
3165 * see relevant BKDG more info.
3167 * The memory controller provides for total of only 8 CSROWs in its current
3168 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
3169 * single channel or two (2) DIMMs in dual channel mode.
3171 * The following code logic collapses the various tables for CSROW based on CPU
3175 * The number of PAGE_SIZE pages on the specified CSROW number it
3179 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
3181 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
3182 int csrow_nr = csrow_nr_orig;
3183 u32 cs_mode, nr_pages;
3187 cs_mode = DBAM_DIMM(csrow_nr, dbam);
3189 cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
3192 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
3193 nr_pages <<= 20 - PAGE_SHIFT;
3195 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
3196 csrow_nr_orig, dct, cs_mode);
3197 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3202 static int init_csrows_df(struct mem_ctl_info *mci)
3204 struct amd64_pvt *pvt = mci->pvt_info;
3205 enum edac_type edac_mode = EDAC_NONE;
3206 enum dev_type dev_type = DEV_UNKNOWN;
3207 struct dimm_info *dimm;
3211 if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
3212 edac_mode = EDAC_S16ECD16ED;
3214 } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
3215 edac_mode = EDAC_S8ECD8ED;
3217 } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
3218 edac_mode = EDAC_S4ECD4ED;
3220 } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
3221 edac_mode = EDAC_SECDED;
3225 for_each_chip_select(cs, umc, pvt) {
3226 if (!csrow_enabled(cs, umc, pvt))
3230 dimm = mci->csrows[cs]->channels[umc]->dimm;
3232 edac_dbg(1, "MC node: %d, csrow: %d\n",
3233 pvt->mc_node_id, cs);
3235 dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
3236 dimm->mtype = pvt->dram_type;
3237 dimm->edac_mode = edac_mode;
3238 dimm->dtype = dev_type;
3247 * Initialize the array of csrow attribute instances, based on the values
3248 * from pci config hardware registers.
3250 static int init_csrows(struct mem_ctl_info *mci)
3252 struct amd64_pvt *pvt = mci->pvt_info;
3253 enum edac_type edac_mode = EDAC_NONE;
3254 struct csrow_info *csrow;
3255 struct dimm_info *dimm;
3256 int i, j, empty = 1;
3261 return init_csrows_df(mci);
3263 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
3267 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
3268 pvt->mc_node_id, val,
3269 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
3272 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
3274 for_each_chip_select(i, 0, pvt) {
3275 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
3276 bool row_dct1 = false;
3278 if (pvt->fam != 0xf)
3279 row_dct1 = !!csrow_enabled(i, 1, pvt);
3281 if (!row_dct0 && !row_dct1)
3284 csrow = mci->csrows[i];
3287 edac_dbg(1, "MC node: %d, csrow: %d\n",
3288 pvt->mc_node_id, i);
3291 nr_pages = get_csrow_nr_pages(pvt, 0, i);
3292 csrow->channels[0]->dimm->nr_pages = nr_pages;
3295 /* K8 has only one DCT */
3296 if (pvt->fam != 0xf && row_dct1) {
3297 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
3299 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3300 nr_pages += row_dct1_pages;
3303 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3305 /* Determine DIMM ECC mode: */
3306 if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3307 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3312 for (j = 0; j < pvt->channel_count; j++) {
3313 dimm = csrow->channels[j]->dimm;
3314 dimm->mtype = pvt->dram_type;
3315 dimm->edac_mode = edac_mode;
3323 /* get all cores on this DCT */
3324 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3328 for_each_online_cpu(cpu)
3329 if (topology_die_id(cpu) == nid)
3330 cpumask_set_cpu(cpu, mask);
3333 /* check MCG_CTL on all the cpus on this node */
3334 static bool nb_mce_bank_enabled_on_node(u16 nid)
3340 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3341 amd64_warn("%s: Error allocating mask\n", __func__);
3345 get_cpus_on_this_dct_cpumask(mask, nid);
3347 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3349 for_each_cpu(cpu, mask) {
3350 struct msr *reg = per_cpu_ptr(msrs, cpu);
3351 nbe = reg->l & MSR_MCGCTL_NBE;
3353 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3355 (nbe ? "enabled" : "disabled"));
3363 free_cpumask_var(mask);
3367 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3369 cpumask_var_t cmask;
3372 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3373 amd64_warn("%s: error allocating mask\n", __func__);
3377 get_cpus_on_this_dct_cpumask(cmask, nid);
3379 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3381 for_each_cpu(cpu, cmask) {
3383 struct msr *reg = per_cpu_ptr(msrs, cpu);
3386 if (reg->l & MSR_MCGCTL_NBE)
3387 s->flags.nb_mce_enable = 1;
3389 reg->l |= MSR_MCGCTL_NBE;
3392 * Turn off NB MCE reporting only when it was off before
3394 if (!s->flags.nb_mce_enable)
3395 reg->l &= ~MSR_MCGCTL_NBE;
3398 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3400 free_cpumask_var(cmask);
3405 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3409 u32 value, mask = 0x3; /* UECC/CECC enable */
3411 if (toggle_ecc_err_reporting(s, nid, ON)) {
3412 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3416 amd64_read_pci_cfg(F3, NBCTL, &value);
3418 s->old_nbctl = value & mask;
3419 s->nbctl_valid = true;
3422 amd64_write_pci_cfg(F3, NBCTL, value);
3424 amd64_read_pci_cfg(F3, NBCFG, &value);
3426 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3427 nid, value, !!(value & NBCFG_ECC_ENABLE));
3429 if (!(value & NBCFG_ECC_ENABLE)) {
3430 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3432 s->flags.nb_ecc_prev = 0;
3434 /* Attempt to turn on DRAM ECC Enable */
3435 value |= NBCFG_ECC_ENABLE;
3436 amd64_write_pci_cfg(F3, NBCFG, value);
3438 amd64_read_pci_cfg(F3, NBCFG, &value);
3440 if (!(value & NBCFG_ECC_ENABLE)) {
3441 amd64_warn("Hardware rejected DRAM ECC enable,"
3442 "check memory DIMM configuration.\n");
3445 amd64_info("Hardware accepted DRAM ECC Enable\n");
3448 s->flags.nb_ecc_prev = 1;
3451 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3452 nid, value, !!(value & NBCFG_ECC_ENABLE));
3457 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3460 u32 value, mask = 0x3; /* UECC/CECC enable */
3462 if (!s->nbctl_valid)
3465 amd64_read_pci_cfg(F3, NBCTL, &value);
3467 value |= s->old_nbctl;
3469 amd64_write_pci_cfg(F3, NBCTL, value);
3471 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3472 if (!s->flags.nb_ecc_prev) {
3473 amd64_read_pci_cfg(F3, NBCFG, &value);
3474 value &= ~NBCFG_ECC_ENABLE;
3475 amd64_write_pci_cfg(F3, NBCFG, value);
3478 /* restore the NB Enable MCGCTL bit */
3479 if (toggle_ecc_err_reporting(s, nid, OFF))
3480 amd64_warn("Error restoring NB MCGCTL settings!\n");
3483 static bool ecc_enabled(struct amd64_pvt *pvt)
3485 u16 nid = pvt->mc_node_id;
3486 bool nb_mce_en = false;
3490 if (boot_cpu_data.x86 >= 0x17) {
3491 u8 umc_en_mask = 0, ecc_en_mask = 0;
3492 struct amd64_umc *umc;
3497 /* Only check enabled UMCs. */
3498 if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3501 umc_en_mask |= BIT(i);
3503 if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3504 ecc_en_mask |= BIT(i);
3507 /* Check whether at least one UMC is enabled: */
3509 ecc_en = umc_en_mask == ecc_en_mask;
3511 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3513 /* Assume UMC MCA banks are enabled. */
3516 amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3518 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3520 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3522 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3523 MSR_IA32_MCG_CTL, nid);
3526 edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
3528 if (!ecc_en || !nb_mce_en)
3535 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3537 u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3540 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3541 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3542 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3544 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3545 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3549 /* Set chipkill only if ECC is enabled: */
3551 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3557 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3559 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3561 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3565 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
3567 struct amd64_pvt *pvt = mci->pvt_info;
3569 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3570 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3573 f17h_determine_edac_ctl_cap(mci, pvt);
3575 if (pvt->nbcap & NBCAP_SECDED)
3576 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3578 if (pvt->nbcap & NBCAP_CHIPKILL)
3579 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3582 mci->edac_cap = determine_edac_cap(pvt);
3583 mci->mod_name = EDAC_MOD_STR;
3584 mci->ctl_name = fam_type->ctl_name;
3585 mci->dev_name = pci_name(pvt->F3);
3586 mci->ctl_page_to_phys = NULL;
3588 /* memory scrubber interface */
3589 mci->set_sdram_scrub_rate = set_scrub_rate;
3590 mci->get_sdram_scrub_rate = get_scrub_rate;
3594 * returns a pointer to the family descriptor on success, NULL otherwise.
3596 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3598 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3599 pvt->stepping = boot_cpu_data.x86_stepping;
3600 pvt->model = boot_cpu_data.x86_model;
3601 pvt->fam = boot_cpu_data.x86;
3605 fam_type = &family_types[K8_CPUS];
3606 pvt->ops = &family_types[K8_CPUS].ops;
3610 fam_type = &family_types[F10_CPUS];
3611 pvt->ops = &family_types[F10_CPUS].ops;
3615 if (pvt->model == 0x30) {
3616 fam_type = &family_types[F15_M30H_CPUS];
3617 pvt->ops = &family_types[F15_M30H_CPUS].ops;
3619 } else if (pvt->model == 0x60) {
3620 fam_type = &family_types[F15_M60H_CPUS];
3621 pvt->ops = &family_types[F15_M60H_CPUS].ops;
3623 /* Richland is only client */
3624 } else if (pvt->model == 0x13) {
3627 fam_type = &family_types[F15_CPUS];
3628 pvt->ops = &family_types[F15_CPUS].ops;
3633 if (pvt->model == 0x30) {
3634 fam_type = &family_types[F16_M30H_CPUS];
3635 pvt->ops = &family_types[F16_M30H_CPUS].ops;
3638 fam_type = &family_types[F16_CPUS];
3639 pvt->ops = &family_types[F16_CPUS].ops;
3643 if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3644 fam_type = &family_types[F17_M10H_CPUS];
3645 pvt->ops = &family_types[F17_M10H_CPUS].ops;
3647 } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3648 fam_type = &family_types[F17_M30H_CPUS];
3649 pvt->ops = &family_types[F17_M30H_CPUS].ops;
3651 } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
3652 fam_type = &family_types[F17_M60H_CPUS];
3653 pvt->ops = &family_types[F17_M60H_CPUS].ops;
3655 } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
3656 fam_type = &family_types[F17_M70H_CPUS];
3657 pvt->ops = &family_types[F17_M70H_CPUS].ops;
3662 fam_type = &family_types[F17_CPUS];
3663 pvt->ops = &family_types[F17_CPUS].ops;
3665 if (pvt->fam == 0x18)
3666 family_types[F17_CPUS].ctl_name = "F18h";
3670 if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
3671 fam_type = &family_types[F17_M70H_CPUS];
3672 pvt->ops = &family_types[F17_M70H_CPUS].ops;
3673 fam_type->ctl_name = "F19h_M20h";
3676 fam_type = &family_types[F19_CPUS];
3677 pvt->ops = &family_types[F19_CPUS].ops;
3678 family_types[F19_CPUS].ctl_name = "F19h";
3682 amd64_err("Unsupported family!\n");
3689 static const struct attribute_group *amd64_edac_attr_groups[] = {
3690 #ifdef CONFIG_EDAC_DEBUG
3697 static int hw_info_get(struct amd64_pvt *pvt)
3699 u16 pci_id1, pci_id2;
3702 if (pvt->fam >= 0x17) {
3703 pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3707 pci_id1 = fam_type->f0_id;
3708 pci_id2 = fam_type->f6_id;
3710 pci_id1 = fam_type->f1_id;
3711 pci_id2 = fam_type->f2_id;
3714 ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3723 static void hw_info_put(struct amd64_pvt *pvt)
3725 if (pvt->F0 || pvt->F1)
3726 free_mc_sibling_devs(pvt);
3731 static int init_one_instance(struct amd64_pvt *pvt)
3733 struct mem_ctl_info *mci = NULL;
3734 struct edac_mc_layer layers[2];
3738 * We need to determine how many memory channels there are. Then use
3739 * that information for calculating the size of the dynamic instance
3740 * tables in the 'mci' structure.
3742 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3743 if (pvt->channel_count < 0)
3747 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3748 layers[0].size = pvt->csels[0].b_cnt;
3749 layers[0].is_virt_csrow = true;
3750 layers[1].type = EDAC_MC_LAYER_CHANNEL;
3753 * Always allocate two channels since we can have setups with DIMMs on
3754 * only one channel. Also, this simplifies handling later for the price
3755 * of a couple of KBs tops.
3757 layers[1].size = fam_type->max_mcs;
3758 layers[1].is_virt_csrow = false;
3760 mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
3764 mci->pvt_info = pvt;
3765 mci->pdev = &pvt->F3->dev;
3767 setup_mci_misc_attrs(mci);
3769 if (init_csrows(mci))
3770 mci->edac_cap = EDAC_FLAG_NONE;
3773 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3774 edac_dbg(1, "failed edac_mc_add_mc()\n");
3782 static bool instance_has_memory(struct amd64_pvt *pvt)
3784 bool cs_enabled = false;
3785 int cs = 0, dct = 0;
3787 for (dct = 0; dct < fam_type->max_mcs; dct++) {
3788 for_each_chip_select(cs, dct, pvt)
3789 cs_enabled |= csrow_enabled(cs, dct, pvt);
3795 static int probe_one_instance(unsigned int nid)
3797 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3798 struct amd64_pvt *pvt = NULL;
3799 struct ecc_settings *s;
3803 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3809 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3813 pvt->mc_node_id = nid;
3817 fam_type = per_family_init(pvt);
3821 ret = hw_info_get(pvt);
3826 if (!instance_has_memory(pvt)) {
3827 amd64_info("Node %d: No DIMMs detected.\n", nid);
3831 if (!ecc_enabled(pvt)) {
3834 if (!ecc_enable_override)
3837 if (boot_cpu_data.x86 >= 0x17) {
3838 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3841 amd64_warn("Forcing ECC on!\n");
3843 if (!enable_ecc_error_reporting(s, nid, F3))
3847 ret = init_one_instance(pvt);
3849 amd64_err("Error probing instance: %d\n", nid);
3851 if (boot_cpu_data.x86 < 0x17)
3852 restore_ecc_error_reporting(s, nid, F3);
3857 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3859 (pvt->ext_model >= K8_REV_F ? "revF or later "
3860 : "revE or earlier ")
3861 : ""), pvt->mc_node_id);
3863 dump_misc_regs(pvt);
3873 ecc_stngs[nid] = NULL;
3879 static void remove_one_instance(unsigned int nid)
3881 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3882 struct ecc_settings *s = ecc_stngs[nid];
3883 struct mem_ctl_info *mci;
3884 struct amd64_pvt *pvt;
3886 /* Remove from EDAC CORE tracking list */
3887 mci = edac_mc_del_mc(&F3->dev);
3891 pvt = mci->pvt_info;
3893 restore_ecc_error_reporting(s, nid, F3);
3895 kfree(ecc_stngs[nid]);
3896 ecc_stngs[nid] = NULL;
3898 /* Free the EDAC CORE resources */
3899 mci->pvt_info = NULL;
3906 static void setup_pci_device(void)
3911 pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
3913 pr_warn("%s(): Unable to create PCI control\n", __func__);
3914 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3918 static const struct x86_cpu_id amd64_cpuids[] = {
3919 X86_MATCH_VENDOR_FAM(AMD, 0x0F, NULL),
3920 X86_MATCH_VENDOR_FAM(AMD, 0x10, NULL),
3921 X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
3922 X86_MATCH_VENDOR_FAM(AMD, 0x16, NULL),
3923 X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
3924 X86_MATCH_VENDOR_FAM(HYGON, 0x18, NULL),
3925 X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
3928 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3930 static int __init amd64_edac_init(void)
3936 owner = edac_get_owner();
3937 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3940 if (!x86_match_cpu(amd64_cpuids))
3943 if (amd_cache_northbridges() < 0)
3949 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3953 msrs = msrs_alloc();
3957 for (i = 0; i < amd_nb_num(); i++) {
3958 err = probe_one_instance(i);
3960 /* unwind properly */
3962 remove_one_instance(i);
3968 if (!edac_has_mcs()) {
3973 /* register stuff with EDAC MCE */
3974 if (boot_cpu_data.x86 >= 0x17)
3975 amd_register_ecc_decoder(decode_umc_error);
3977 amd_register_ecc_decoder(decode_bus_error);
3981 #ifdef CONFIG_X86_32
3982 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3985 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
4002 static void __exit amd64_edac_exit(void)
4007 edac_pci_release_generic_ctl(pci_ctl);
4009 /* unregister from EDAC MCE */
4010 if (boot_cpu_data.x86 >= 0x17)
4011 amd_unregister_ecc_decoder(decode_umc_error);
4013 amd_unregister_ecc_decoder(decode_bus_error);
4015 for (i = 0; i < amd_nb_num(); i++)
4016 remove_one_instance(i);
4027 module_init(amd64_edac_init);
4028 module_exit(amd64_edac_exit);
4030 MODULE_LICENSE("GPL");
4031 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
4032 "Dave Peterson, Thayne Harbaugh");
4033 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
4034 EDAC_AMD64_VERSION);
4036 module_param(edac_op_state, int, 0444);
4037 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");