1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *pci_ctl;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
19 static struct ecc_settings **ecc_stngs;
22 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
23 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
26 *FIXME: Produce a better mapping/linearisation.
28 static const struct scrubrate {
29 u32 scrubval; /* bit pattern for scrub rate */
30 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
32 { 0x01, 1600000000UL},
54 { 0x00, 0UL}, /* scrubbing off */
57 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
58 u32 *val, const char *func)
62 err = pci_read_config_dword(pdev, offset, val);
64 amd64_warn("%s: error reading F%dx%03x.\n",
65 func, PCI_FUNC(pdev->devfn), offset);
70 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
71 u32 val, const char *func)
75 err = pci_write_config_dword(pdev, offset, val);
77 amd64_warn("%s: error writing to F%dx%03x.\n",
78 func, PCI_FUNC(pdev->devfn), offset);
84 * Select DCT to which PCI cfg accesses are routed
86 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
90 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
91 reg &= (pvt->model == 0x30) ? ~3 : ~1;
93 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
98 * Depending on the family, F2 DCT reads need special handling:
100 * K8: has a single DCT only and no address offsets >= 0x100
102 * F10h: each DCT has its own set of regs
106 * F16h: has only 1 DCT
108 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
110 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
111 int offset, u32 *val)
115 if (dct || offset >= 0x100)
122 * Note: If ganging is enabled, barring the regs
123 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
124 * return 0. (cf. Section 2.8.1 F10h BKDG)
126 if (dct_ganging_enabled(pvt))
135 * F15h: F2x1xx addresses do not map explicitly to DCT1.
136 * We should select which DCT we access using F1x10C[DctCfgSel]
138 dct = (dct && pvt->model == 0x30) ? 3 : dct;
139 f15h_select_dct(pvt, dct);
150 return amd64_read_pci_cfg(pvt->F2, offset, val);
154 * Memory scrubber control interface. For K8, memory scrubbing is handled by
155 * hardware and can involve L2 cache, dcache as well as the main memory. With
156 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
159 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
160 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
161 * bytes/sec for the setting.
163 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
164 * other archs, we might not have access to the caches directly.
168 * scan the scrub rate mapping table for a close or matching bandwidth value to
169 * issue. If requested is too big, then use last maximum value found.
171 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
177 * map the configured rate (new_bw) to a value specific to the AMD64
178 * memory controller and apply to register. Search for the first
179 * bandwidth entry that is greater or equal than the setting requested
180 * and program that. If at last entry, turn off DRAM scrubbing.
182 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
183 * by falling back to the last element in scrubrates[].
185 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
187 * skip scrub rates which aren't recommended
188 * (see F10 BKDG, F3x58)
190 if (scrubrates[i].scrubval < min_rate)
193 if (scrubrates[i].bandwidth <= new_bw)
197 scrubval = scrubrates[i].scrubval;
199 if (pvt->fam == 0x15 && pvt->model == 0x60) {
200 f15h_select_dct(pvt, 0);
201 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
202 f15h_select_dct(pvt, 1);
203 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
205 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
209 return scrubrates[i].bandwidth;
214 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
216 struct amd64_pvt *pvt = mci->pvt_info;
217 u32 min_scrubrate = 0x5;
222 if (pvt->fam == 0x15) {
224 if (pvt->model < 0x10)
225 f15h_select_dct(pvt, 0);
227 if (pvt->model == 0x60)
230 return __set_scrub_rate(pvt, bw, min_scrubrate);
233 static int get_scrub_rate(struct mem_ctl_info *mci)
235 struct amd64_pvt *pvt = mci->pvt_info;
237 int i, retval = -EINVAL;
239 if (pvt->fam == 0x15) {
241 if (pvt->model < 0x10)
242 f15h_select_dct(pvt, 0);
244 if (pvt->model == 0x60)
245 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
247 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
249 scrubval = scrubval & 0x001F;
251 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
252 if (scrubrates[i].scrubval == scrubval) {
253 retval = scrubrates[i].bandwidth;
261 * returns true if the SysAddr given by sys_addr matches the
262 * DRAM base/limit associated with node_id
264 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
268 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
269 * all ones if the most significant implemented address bit is 1.
270 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
271 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
272 * Application Programming.
274 addr = sys_addr & 0x000000ffffffffffull;
276 return ((addr >= get_dram_base(pvt, nid)) &&
277 (addr <= get_dram_limit(pvt, nid)));
281 * Attempt to map a SysAddr to a node. On success, return a pointer to the
282 * mem_ctl_info structure for the node that the SysAddr maps to.
284 * On failure, return NULL.
286 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
289 struct amd64_pvt *pvt;
294 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
295 * 3.4.4.2) registers to map the SysAddr to a node ID.
300 * The value of this field should be the same for all DRAM Base
301 * registers. Therefore we arbitrarily choose to read it from the
302 * register for node 0.
304 intlv_en = dram_intlv_en(pvt, 0);
307 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
308 if (base_limit_match(pvt, sys_addr, node_id))
314 if (unlikely((intlv_en != 0x01) &&
315 (intlv_en != 0x03) &&
316 (intlv_en != 0x07))) {
317 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
321 bits = (((u32) sys_addr) >> 12) & intlv_en;
323 for (node_id = 0; ; ) {
324 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
325 break; /* intlv_sel field matches */
327 if (++node_id >= DRAM_RANGES)
331 /* sanity test for sys_addr */
332 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
333 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
334 "range for node %d with node interleaving enabled.\n",
335 __func__, sys_addr, node_id);
340 return edac_mc_find((int)node_id);
343 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
344 (unsigned long)sys_addr);
350 * compute the CS base address of the @csrow on the DRAM controller @dct.
351 * For details see F2x[5C:40] in the processor's BKDG
353 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
354 u64 *base, u64 *mask)
356 u64 csbase, csmask, base_bits, mask_bits;
359 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
360 csbase = pvt->csels[dct].csbases[csrow];
361 csmask = pvt->csels[dct].csmasks[csrow];
362 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
363 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
367 * F16h and F15h, models 30h and later need two addr_shift values:
368 * 8 for high and 6 for low (cf. F16h BKDG).
370 } else if (pvt->fam == 0x16 ||
371 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
372 csbase = pvt->csels[dct].csbases[csrow];
373 csmask = pvt->csels[dct].csmasks[csrow >> 1];
375 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
376 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
379 /* poke holes for the csmask */
380 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
381 (GENMASK_ULL(30, 19) << 8));
383 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
384 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
388 csbase = pvt->csels[dct].csbases[csrow];
389 csmask = pvt->csels[dct].csmasks[csrow >> 1];
392 if (pvt->fam == 0x15)
393 base_bits = mask_bits =
394 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
396 base_bits = mask_bits =
397 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
400 *base = (csbase & base_bits) << addr_shift;
403 /* poke holes for the csmask */
404 *mask &= ~(mask_bits << addr_shift);
406 *mask |= (csmask & mask_bits) << addr_shift;
409 #define for_each_chip_select(i, dct, pvt) \
410 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
412 #define chip_select_base(i, dct, pvt) \
413 pvt->csels[dct].csbases[i]
415 #define for_each_chip_select_mask(i, dct, pvt) \
416 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
419 * @input_addr is an InputAddr associated with the node given by mci. Return the
420 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
422 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
424 struct amd64_pvt *pvt;
430 for_each_chip_select(csrow, 0, pvt) {
431 if (!csrow_enabled(csrow, 0, pvt))
434 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
438 if ((input_addr & mask) == (base & mask)) {
439 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
440 (unsigned long)input_addr, csrow,
446 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
447 (unsigned long)input_addr, pvt->mc_node_id);
453 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
454 * for the node represented by mci. Info is passed back in *hole_base,
455 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
456 * info is invalid. Info may be invalid for either of the following reasons:
458 * - The revision of the node is not E or greater. In this case, the DRAM Hole
459 * Address Register does not exist.
461 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
462 * indicating that its contents are not valid.
464 * The values passed back in *hole_base, *hole_offset, and *hole_size are
465 * complete 32-bit values despite the fact that the bitfields in the DHAR
466 * only represent bits 31-24 of the base and offset values.
468 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
469 u64 *hole_offset, u64 *hole_size)
471 struct amd64_pvt *pvt = mci->pvt_info;
473 /* only revE and later have the DRAM Hole Address Register */
474 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
475 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
476 pvt->ext_model, pvt->mc_node_id);
480 /* valid for Fam10h and above */
481 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
482 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
486 if (!dhar_valid(pvt)) {
487 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
492 /* This node has Memory Hoisting */
494 /* +------------------+--------------------+--------------------+-----
495 * | memory | DRAM hole | relocated |
496 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
498 * | | | [0x100000000, |
499 * | | | (0x100000000+ |
500 * | | | (0xffffffff-x))] |
501 * +------------------+--------------------+--------------------+-----
503 * Above is a diagram of physical memory showing the DRAM hole and the
504 * relocated addresses from the DRAM hole. As shown, the DRAM hole
505 * starts at address x (the base address) and extends through address
506 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
507 * addresses in the hole so that they start at 0x100000000.
510 *hole_base = dhar_base(pvt);
511 *hole_size = (1ULL << 32) - *hole_base;
513 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
514 : k8_dhar_offset(pvt);
516 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
517 pvt->mc_node_id, (unsigned long)*hole_base,
518 (unsigned long)*hole_offset, (unsigned long)*hole_size);
522 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
525 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
526 * assumed that sys_addr maps to the node given by mci.
528 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
529 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
530 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
531 * then it is also involved in translating a SysAddr to a DramAddr. Sections
532 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
533 * These parts of the documentation are unclear. I interpret them as follows:
535 * When node n receives a SysAddr, it processes the SysAddr as follows:
537 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
538 * Limit registers for node n. If the SysAddr is not within the range
539 * specified by the base and limit values, then node n ignores the Sysaddr
540 * (since it does not map to node n). Otherwise continue to step 2 below.
542 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
543 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
544 * the range of relocated addresses (starting at 0x100000000) from the DRAM
545 * hole. If not, skip to step 3 below. Else get the value of the
546 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
547 * offset defined by this value from the SysAddr.
549 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
550 * Base register for node n. To obtain the DramAddr, subtract the base
551 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
553 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
555 struct amd64_pvt *pvt = mci->pvt_info;
556 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
559 dram_base = get_dram_base(pvt, pvt->mc_node_id);
561 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
564 if ((sys_addr >= (1ULL << 32)) &&
565 (sys_addr < ((1ULL << 32) + hole_size))) {
566 /* use DHAR to translate SysAddr to DramAddr */
567 dram_addr = sys_addr - hole_offset;
569 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
570 (unsigned long)sys_addr,
571 (unsigned long)dram_addr);
578 * Translate the SysAddr to a DramAddr as shown near the start of
579 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
580 * only deals with 40-bit values. Therefore we discard bits 63-40 of
581 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
582 * discard are all 1s. Otherwise the bits we discard are all 0s. See
583 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
584 * Programmer's Manual Volume 1 Application Programming.
586 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
588 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
589 (unsigned long)sys_addr, (unsigned long)dram_addr);
594 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
595 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
596 * for node interleaving.
598 static int num_node_interleave_bits(unsigned intlv_en)
600 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
603 BUG_ON(intlv_en > 7);
604 n = intlv_shift_table[intlv_en];
608 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
609 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
611 struct amd64_pvt *pvt;
618 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
619 * concerning translating a DramAddr to an InputAddr.
621 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
622 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
625 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
626 intlv_shift, (unsigned long)dram_addr,
627 (unsigned long)input_addr);
633 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
634 * assumed that @sys_addr maps to the node given by mci.
636 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
641 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
643 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
644 (unsigned long)sys_addr, (unsigned long)input_addr);
649 /* Map the Error address to a PAGE and PAGE OFFSET. */
650 static inline void error_address_to_page_and_offset(u64 error_address,
651 struct err_info *err)
653 err->page = (u32) (error_address >> PAGE_SHIFT);
654 err->offset = ((u32) error_address) & ~PAGE_MASK;
658 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
659 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
660 * of a node that detected an ECC memory error. mci represents the node that
661 * the error address maps to (possibly different from the node that detected
662 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
665 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
669 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
672 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
673 "address 0x%lx\n", (unsigned long)sys_addr);
677 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
680 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
683 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
686 unsigned long edac_cap = EDAC_FLAG_NONE;
688 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
692 if (pvt->dclr0 & BIT(bit))
693 edac_cap = EDAC_FLAG_SECDED;
698 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
700 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
702 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
704 if (pvt->dram_type == MEM_LRDDR3) {
705 u32 dcsm = pvt->csels[chan].csmasks[0];
707 * It's assumed all LRDIMMs in a DCT are going to be of
708 * same 'type' until proven otherwise. So, use a cs
709 * value of '0' here to get dcsm value.
711 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
714 edac_dbg(1, "All DIMMs support ECC:%s\n",
715 (dclr & BIT(19)) ? "yes" : "no");
718 edac_dbg(1, " PAR/ERR parity: %s\n",
719 (dclr & BIT(8)) ? "enabled" : "disabled");
721 if (pvt->fam == 0x10)
722 edac_dbg(1, " DCT 128bit mode width: %s\n",
723 (dclr & BIT(11)) ? "128b" : "64b");
725 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
726 (dclr & BIT(12)) ? "yes" : "no",
727 (dclr & BIT(13)) ? "yes" : "no",
728 (dclr & BIT(14)) ? "yes" : "no",
729 (dclr & BIT(15)) ? "yes" : "no");
732 /* Display and decode various NB registers for debug purposes. */
733 static void dump_misc_regs(struct amd64_pvt *pvt)
735 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
737 edac_dbg(1, " NB two channel DRAM capable: %s\n",
738 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
740 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
741 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
742 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
744 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
746 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
748 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
749 pvt->dhar, dhar_base(pvt),
750 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
751 : f10_dhar_offset(pvt));
753 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
755 debug_display_dimm_sizes(pvt, 0);
757 /* everything below this point is Fam10h and above */
761 debug_display_dimm_sizes(pvt, 1);
763 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
765 /* Only if NOT ganged does dclr1 have valid info */
766 if (!dct_ganging_enabled(pvt))
767 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
771 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
773 static void prep_chip_selects(struct amd64_pvt *pvt)
775 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
776 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
777 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
778 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
779 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
780 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
782 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
783 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
788 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
790 static void read_dct_base_mask(struct amd64_pvt *pvt)
794 prep_chip_selects(pvt);
796 for_each_chip_select(cs, 0, pvt) {
797 int reg0 = DCSB0 + (cs * 4);
798 int reg1 = DCSB1 + (cs * 4);
799 u32 *base0 = &pvt->csels[0].csbases[cs];
800 u32 *base1 = &pvt->csels[1].csbases[cs];
802 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
803 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
809 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
810 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
811 cs, *base1, (pvt->fam == 0x10) ? reg1
815 for_each_chip_select_mask(cs, 0, pvt) {
816 int reg0 = DCSM0 + (cs * 4);
817 int reg1 = DCSM1 + (cs * 4);
818 u32 *mask0 = &pvt->csels[0].csmasks[cs];
819 u32 *mask1 = &pvt->csels[1].csmasks[cs];
821 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
822 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
828 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
829 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
830 cs, *mask1, (pvt->fam == 0x10) ? reg1
835 static void determine_memory_type(struct amd64_pvt *pvt)
841 if (pvt->ext_model >= K8_REV_F)
844 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
848 if (pvt->dchr0 & DDR3_MODE)
851 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
855 if (pvt->model < 0x60)
859 * Model 0x60h needs special handling:
861 * We use a Chip Select value of '0' to obtain dcsm.
862 * Theoretically, it is possible to populate LRDIMMs of different
863 * 'Rank' value on a DCT. But this is not the common case. So,
864 * it's reasonable to assume all DIMMs are going to be of same
865 * 'type' until proven otherwise.
867 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
868 dcsm = pvt->csels[0].csmasks[0];
870 if (((dram_ctrl >> 8) & 0x7) == 0x2)
871 pvt->dram_type = MEM_DDR4;
872 else if (pvt->dclr0 & BIT(16))
873 pvt->dram_type = MEM_DDR3;
875 pvt->dram_type = MEM_LRDDR3;
877 pvt->dram_type = MEM_RDDR3;
885 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
886 pvt->dram_type = MEM_EMPTY;
891 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
894 /* Get the number of DCT channels the memory controller is using. */
895 static int k8_early_channel_count(struct amd64_pvt *pvt)
899 if (pvt->ext_model >= K8_REV_F)
900 /* RevF (NPT) and later */
901 flag = pvt->dclr0 & WIDTH_128;
903 /* RevE and earlier */
904 flag = pvt->dclr0 & REVE_WIDTH_128;
909 return (flag) ? 2 : 1;
912 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
913 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
915 u16 mce_nid = amd_get_nb_id(m->extcpu);
916 struct mem_ctl_info *mci;
921 mci = edac_mc_find(mce_nid);
927 if (pvt->fam == 0xf) {
932 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
935 * Erratum 637 workaround
937 if (pvt->fam == 0x15) {
938 u64 cc6_base, tmp_addr;
942 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
946 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
947 intlv_en = tmp >> 21 & 0x7;
949 /* add [47:27] + 3 trailing bits */
950 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
952 /* reverse and add DramIntlvEn */
953 cc6_base |= intlv_en ^ 0x7;
959 return cc6_base | (addr & GENMASK_ULL(23, 0));
961 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
964 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
966 /* OR DramIntlvSel into bits [14:12] */
967 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
969 /* add remaining [11:0] bits from original MC4_ADDR */
970 tmp_addr |= addr & GENMASK_ULL(11, 0);
972 return cc6_base | tmp_addr;
978 static struct pci_dev *pci_get_related_function(unsigned int vendor,
980 struct pci_dev *related)
982 struct pci_dev *dev = NULL;
984 while ((dev = pci_get_device(vendor, device, dev))) {
985 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
986 (dev->bus->number == related->bus->number) &&
987 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
994 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
996 struct amd_northbridge *nb;
997 struct pci_dev *f1 = NULL;
998 unsigned int pci_func;
999 int off = range << 3;
1002 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1003 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1005 if (pvt->fam == 0xf)
1008 if (!dram_rw(pvt, range))
1011 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1012 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1014 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1015 if (pvt->fam != 0x15)
1018 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1022 if (pvt->model == 0x60)
1023 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1024 else if (pvt->model == 0x30)
1025 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1027 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1029 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1033 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1035 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1037 /* {[39:27],111b} */
1038 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1040 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1043 pvt->ranges[range].lim.hi |= llim >> 13;
1048 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1049 struct err_info *err)
1051 struct amd64_pvt *pvt = mci->pvt_info;
1053 error_address_to_page_and_offset(sys_addr, err);
1056 * Find out which node the error address belongs to. This may be
1057 * different from the node that detected the error.
1059 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1060 if (!err->src_mci) {
1061 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1062 (unsigned long)sys_addr);
1063 err->err_code = ERR_NODE;
1067 /* Now map the sys_addr to a CSROW */
1068 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1069 if (err->csrow < 0) {
1070 err->err_code = ERR_CSROW;
1074 /* CHIPKILL enabled */
1075 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1076 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1077 if (err->channel < 0) {
1079 * Syndrome didn't map, so we don't know which of the
1080 * 2 DIMMs is in error. So we need to ID 'both' of them
1083 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1084 "possible error reporting race\n",
1086 err->err_code = ERR_CHANNEL;
1091 * non-chipkill ecc mode
1093 * The k8 documentation is unclear about how to determine the
1094 * channel number when using non-chipkill memory. This method
1095 * was obtained from email communication with someone at AMD.
1096 * (Wish the email was placed in this comment - norsk)
1098 err->channel = ((sys_addr & BIT(3)) != 0);
1102 static int ddr2_cs_size(unsigned i, bool dct_width)
1108 else if (!(i & 0x1))
1111 shift = (i + 1) >> 1;
1113 return 128 << (shift + !!dct_width);
1116 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1117 unsigned cs_mode, int cs_mask_nr)
1119 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1121 if (pvt->ext_model >= K8_REV_F) {
1122 WARN_ON(cs_mode > 11);
1123 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1125 else if (pvt->ext_model >= K8_REV_D) {
1127 WARN_ON(cs_mode > 10);
1130 * the below calculation, besides trying to win an obfuscated C
1131 * contest, maps cs_mode values to DIMM chip select sizes. The
1134 * cs_mode CS size (mb)
1135 * ======= ============
1148 * Basically, it calculates a value with which to shift the
1149 * smallest CS size of 32MB.
1151 * ddr[23]_cs_size have a similar purpose.
1153 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1155 return 32 << (cs_mode - diff);
1158 WARN_ON(cs_mode > 6);
1159 return 32 << cs_mode;
1164 * Get the number of DCT channels in use.
1167 * number of Memory Channels in operation
1169 * contents of the DCL0_LOW register
1171 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1173 int i, j, channels = 0;
1175 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1176 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1180 * Need to check if in unganged mode: In such, there are 2 channels,
1181 * but they are not in 128 bit mode and thus the above 'dclr0' status
1184 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1185 * their CSEnable bit on. If so, then SINGLE DIMM case.
1187 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1190 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1191 * is more than just one DIMM present in unganged mode. Need to check
1192 * both controllers since DIMMs can be placed in either one.
1194 for (i = 0; i < 2; i++) {
1195 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1197 for (j = 0; j < 4; j++) {
1198 if (DBAM_DIMM(j, dbam) > 0) {
1208 amd64_info("MCT channel count: %d\n", channels);
1213 static int f17_early_channel_count(struct amd64_pvt *pvt)
1215 int i, channels = 0;
1217 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1218 for (i = 0; i < NUM_UMCS; i++)
1219 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1221 amd64_info("MCT channel count: %d\n", channels);
1226 static int ddr3_cs_size(unsigned i, bool dct_width)
1231 if (i == 0 || i == 3 || i == 4)
1237 else if (!(i & 0x1))
1240 shift = (i + 1) >> 1;
1243 cs_size = (128 * (1 << !!dct_width)) << shift;
1248 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1253 if (i < 4 || i == 6)
1257 else if (!(i & 0x1))
1260 shift = (i + 1) >> 1;
1263 cs_size = rank_multiply * (128 << shift);
1268 static int ddr4_cs_size(unsigned i)
1277 /* Min cs_size = 1G */
1278 cs_size = 1024 * (1 << (i >> 1));
1283 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1284 unsigned cs_mode, int cs_mask_nr)
1286 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1288 WARN_ON(cs_mode > 11);
1290 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1291 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1293 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1297 * F15h supports only 64bit DCT interfaces
1299 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1300 unsigned cs_mode, int cs_mask_nr)
1302 WARN_ON(cs_mode > 12);
1304 return ddr3_cs_size(cs_mode, false);
1307 /* F15h M60h supports DDR4 mapping as well.. */
1308 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1309 unsigned cs_mode, int cs_mask_nr)
1312 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1314 WARN_ON(cs_mode > 12);
1316 if (pvt->dram_type == MEM_DDR4) {
1320 cs_size = ddr4_cs_size(cs_mode);
1321 } else if (pvt->dram_type == MEM_LRDDR3) {
1322 unsigned rank_multiply = dcsm & 0xf;
1324 if (rank_multiply == 3)
1326 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1328 /* Minimum cs size is 512mb for F15hM60h*/
1332 cs_size = ddr3_cs_size(cs_mode, false);
1339 * F16h and F15h model 30h have only limited cs_modes.
1341 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1342 unsigned cs_mode, int cs_mask_nr)
1344 WARN_ON(cs_mode > 12);
1346 if (cs_mode == 6 || cs_mode == 8 ||
1347 cs_mode == 9 || cs_mode == 12)
1350 return ddr3_cs_size(cs_mode, false);
1353 static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1354 unsigned int cs_mode, int csrow_nr)
1356 u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
1358 /* Each mask is used for every two base addresses. */
1359 u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
1361 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1362 u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
1364 edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
1366 /* Return size in MBs. */
1370 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1373 if (pvt->fam == 0xf)
1376 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1377 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1378 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1380 edac_dbg(0, " DCTs operate in %s mode\n",
1381 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1383 if (!dct_ganging_enabled(pvt))
1384 edac_dbg(0, " Address range split per DCT: %s\n",
1385 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1387 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1388 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1389 (dct_memory_cleared(pvt) ? "yes" : "no"));
1391 edac_dbg(0, " channel interleave: %s, "
1392 "interleave bits selector: 0x%x\n",
1393 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1394 dct_sel_interleave_addr(pvt));
1397 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1401 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1402 * 2.10.12 Memory Interleaving Modes).
1404 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1405 u8 intlv_en, int num_dcts_intlv,
1412 return (u8)(dct_sel);
1414 if (num_dcts_intlv == 2) {
1415 select = (sys_addr >> 8) & 0x3;
1416 channel = select ? 0x3 : 0;
1417 } else if (num_dcts_intlv == 4) {
1418 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1419 switch (intlv_addr) {
1421 channel = (sys_addr >> 8) & 0x3;
1424 channel = (sys_addr >> 9) & 0x3;
1432 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1433 * Interleaving Modes.
1435 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1436 bool hi_range_sel, u8 intlv_en)
1438 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1440 if (dct_ganging_enabled(pvt))
1444 return dct_sel_high;
1447 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1449 if (dct_interleave_enabled(pvt)) {
1450 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1452 /* return DCT select function: 0=DCT0, 1=DCT1 */
1454 return sys_addr >> 6 & 1;
1456 if (intlv_addr & 0x2) {
1457 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1458 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1460 return ((sys_addr >> shift) & 1) ^ temp;
1463 if (intlv_addr & 0x4) {
1464 u8 shift = intlv_addr & 0x1 ? 9 : 8;
1466 return (sys_addr >> shift) & 1;
1469 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1472 if (dct_high_range_enabled(pvt))
1473 return ~dct_sel_high & 1;
1478 /* Convert the sys_addr to the normalized DCT address */
1479 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1480 u64 sys_addr, bool hi_rng,
1481 u32 dct_sel_base_addr)
1484 u64 dram_base = get_dram_base(pvt, range);
1485 u64 hole_off = f10_dhar_offset(pvt);
1486 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1491 * base address of high range is below 4Gb
1492 * (bits [47:27] at [31:11])
1493 * DRAM address space on this DCT is hoisted above 4Gb &&
1496 * remove hole offset from sys_addr
1498 * remove high range offset from sys_addr
1500 if ((!(dct_sel_base_addr >> 16) ||
1501 dct_sel_base_addr < dhar_base(pvt)) &&
1503 (sys_addr >= BIT_64(32)))
1504 chan_off = hole_off;
1506 chan_off = dct_sel_base_off;
1510 * we have a valid hole &&
1515 * remove dram base to normalize to DCT address
1517 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1518 chan_off = hole_off;
1520 chan_off = dram_base;
1523 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1527 * checks if the csrow passed in is marked as SPARED, if so returns the new
1530 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1534 if (online_spare_swap_done(pvt, dct) &&
1535 csrow == online_spare_bad_dramcs(pvt, dct)) {
1537 for_each_chip_select(tmp_cs, dct, pvt) {
1538 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1548 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1549 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1552 * -EINVAL: NOT FOUND
1553 * 0..csrow = Chip-Select Row
1555 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1557 struct mem_ctl_info *mci;
1558 struct amd64_pvt *pvt;
1559 u64 cs_base, cs_mask;
1560 int cs_found = -EINVAL;
1563 mci = edac_mc_find(nid);
1567 pvt = mci->pvt_info;
1569 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1571 for_each_chip_select(csrow, dct, pvt) {
1572 if (!csrow_enabled(csrow, dct, pvt))
1575 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1577 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1578 csrow, cs_base, cs_mask);
1582 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1583 (in_addr & cs_mask), (cs_base & cs_mask));
1585 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1586 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1590 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1592 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1600 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1601 * swapped with a region located at the bottom of memory so that the GPU can use
1602 * the interleaved region and thus two channels.
1604 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1606 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1608 if (pvt->fam == 0x10) {
1609 /* only revC3 and revE have that feature */
1610 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1614 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1616 if (!(swap_reg & 0x1))
1619 swap_base = (swap_reg >> 3) & 0x7f;
1620 swap_limit = (swap_reg >> 11) & 0x7f;
1621 rgn_size = (swap_reg >> 20) & 0x7f;
1622 tmp_addr = sys_addr >> 27;
1624 if (!(sys_addr >> 34) &&
1625 (((tmp_addr >= swap_base) &&
1626 (tmp_addr <= swap_limit)) ||
1627 (tmp_addr < rgn_size)))
1628 return sys_addr ^ (u64)swap_base << 27;
1633 /* For a given @dram_range, check if @sys_addr falls within it. */
1634 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1635 u64 sys_addr, int *chan_sel)
1637 int cs_found = -EINVAL;
1641 bool high_range = false;
1643 u8 node_id = dram_dst_node(pvt, range);
1644 u8 intlv_en = dram_intlv_en(pvt, range);
1645 u32 intlv_sel = dram_intlv_sel(pvt, range);
1647 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1648 range, sys_addr, get_dram_limit(pvt, range));
1650 if (dhar_valid(pvt) &&
1651 dhar_base(pvt) <= sys_addr &&
1652 sys_addr < BIT_64(32)) {
1653 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1658 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1661 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1663 dct_sel_base = dct_sel_baseaddr(pvt);
1666 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1667 * select between DCT0 and DCT1.
1669 if (dct_high_range_enabled(pvt) &&
1670 !dct_ganging_enabled(pvt) &&
1671 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1674 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1676 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1677 high_range, dct_sel_base);
1679 /* Remove node interleaving, see F1x120 */
1681 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1682 (chan_addr & 0xfff);
1684 /* remove channel interleave */
1685 if (dct_interleave_enabled(pvt) &&
1686 !dct_high_range_enabled(pvt) &&
1687 !dct_ganging_enabled(pvt)) {
1689 if (dct_sel_interleave_addr(pvt) != 1) {
1690 if (dct_sel_interleave_addr(pvt) == 0x3)
1692 chan_addr = ((chan_addr >> 10) << 9) |
1693 (chan_addr & 0x1ff);
1695 /* A[6] or hash 6 */
1696 chan_addr = ((chan_addr >> 7) << 6) |
1700 chan_addr = ((chan_addr >> 13) << 12) |
1701 (chan_addr & 0xfff);
1704 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1706 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1709 *chan_sel = channel;
1714 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1715 u64 sys_addr, int *chan_sel)
1717 int cs_found = -EINVAL;
1718 int num_dcts_intlv = 0;
1719 u64 chan_addr, chan_offset;
1720 u64 dct_base, dct_limit;
1721 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1722 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1724 u64 dhar_offset = f10_dhar_offset(pvt);
1725 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1726 u8 node_id = dram_dst_node(pvt, range);
1727 u8 intlv_en = dram_intlv_en(pvt, range);
1729 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1730 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1732 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1733 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
1735 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1736 range, sys_addr, get_dram_limit(pvt, range));
1738 if (!(get_dram_base(pvt, range) <= sys_addr) &&
1739 !(get_dram_limit(pvt, range) >= sys_addr))
1742 if (dhar_valid(pvt) &&
1743 dhar_base(pvt) <= sys_addr &&
1744 sys_addr < BIT_64(32)) {
1745 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1750 /* Verify sys_addr is within DCT Range. */
1751 dct_base = (u64) dct_sel_baseaddr(pvt);
1752 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
1754 if (!(dct_cont_base_reg & BIT(0)) &&
1755 !(dct_base <= (sys_addr >> 27) &&
1756 dct_limit >= (sys_addr >> 27)))
1759 /* Verify number of dct's that participate in channel interleaving. */
1760 num_dcts_intlv = (int) hweight8(intlv_en);
1762 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1765 if (pvt->model >= 0x60)
1766 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
1768 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1769 num_dcts_intlv, dct_sel);
1771 /* Verify we stay within the MAX number of channels allowed */
1775 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1777 /* Get normalized DCT addr */
1778 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1779 chan_offset = dhar_offset;
1781 chan_offset = dct_base << 27;
1783 chan_addr = sys_addr - chan_offset;
1785 /* remove channel interleave */
1786 if (num_dcts_intlv == 2) {
1787 if (intlv_addr == 0x4)
1788 chan_addr = ((chan_addr >> 9) << 8) |
1790 else if (intlv_addr == 0x5)
1791 chan_addr = ((chan_addr >> 10) << 9) |
1792 (chan_addr & 0x1ff);
1796 } else if (num_dcts_intlv == 4) {
1797 if (intlv_addr == 0x4)
1798 chan_addr = ((chan_addr >> 10) << 8) |
1800 else if (intlv_addr == 0x5)
1801 chan_addr = ((chan_addr >> 11) << 9) |
1802 (chan_addr & 0x1ff);
1807 if (dct_offset_en) {
1808 amd64_read_pci_cfg(pvt->F1,
1809 DRAM_CONT_HIGH_OFF + (int) channel * 4,
1811 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
1814 f15h_select_dct(pvt, channel);
1816 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1820 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1821 * there is support for 4 DCT's, but only 2 are currently functional.
1822 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1823 * pvt->csels[1]. So we need to use '1' here to get correct info.
1824 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1826 alias_channel = (channel == 3) ? 1 : channel;
1828 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
1831 *chan_sel = alias_channel;
1836 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
1840 int cs_found = -EINVAL;
1843 for (range = 0; range < DRAM_RANGES; range++) {
1844 if (!dram_rw(pvt, range))
1847 if (pvt->fam == 0x15 && pvt->model >= 0x30)
1848 cs_found = f15_m30h_match_to_this_node(pvt, range,
1852 else if ((get_dram_base(pvt, range) <= sys_addr) &&
1853 (get_dram_limit(pvt, range) >= sys_addr)) {
1854 cs_found = f1x_match_to_this_node(pvt, range,
1855 sys_addr, chan_sel);
1864 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1865 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1867 * The @sys_addr is usually an error address received from the hardware
1870 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1871 struct err_info *err)
1873 struct amd64_pvt *pvt = mci->pvt_info;
1875 error_address_to_page_and_offset(sys_addr, err);
1877 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
1878 if (err->csrow < 0) {
1879 err->err_code = ERR_CSROW;
1884 * We need the syndromes for channel detection only when we're
1885 * ganged. Otherwise @chan should already contain the channel at
1888 if (dct_ganging_enabled(pvt))
1889 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1893 * debug routine to display the memory sizes of all logical DIMMs and its
1896 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1898 int dimm, size0, size1;
1899 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1900 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1902 if (pvt->fam == 0xf) {
1903 /* K8 families < revF not supported yet */
1904 if (pvt->ext_model < K8_REV_F)
1910 if (pvt->fam == 0x10) {
1911 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
1913 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
1914 pvt->csels[1].csbases :
1915 pvt->csels[0].csbases;
1918 dcsb = pvt->csels[1].csbases;
1920 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1923 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1925 /* Dump memory sizes for DIMM and its CSROWs */
1926 for (dimm = 0; dimm < 4; dimm++) {
1929 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1930 /* For f15m60h, need multiplier for LRDIMM cs_size
1931 * calculation. We pass 'dimm' value to the dbam_to_cs
1932 * mapper so we can find the multiplier from the
1933 * corresponding DCSM.
1935 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1936 DBAM_DIMM(dimm, dbam),
1940 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1941 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1942 DBAM_DIMM(dimm, dbam),
1945 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1947 dimm * 2 + 1, size1);
1951 static struct amd64_family_type family_types[] = {
1954 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1955 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
1957 .early_channel_count = k8_early_channel_count,
1958 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1959 .dbam_to_cs = k8_dbam_to_chip_select,
1964 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1965 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
1967 .early_channel_count = f1x_early_channel_count,
1968 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1969 .dbam_to_cs = f10_dbam_to_chip_select,
1974 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1975 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
1977 .early_channel_count = f1x_early_channel_count,
1978 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1979 .dbam_to_cs = f15_dbam_to_chip_select,
1983 .ctl_name = "F15h_M30h",
1984 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
1985 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
1987 .early_channel_count = f1x_early_channel_count,
1988 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1989 .dbam_to_cs = f16_dbam_to_chip_select,
1993 .ctl_name = "F15h_M60h",
1994 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
1995 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
1997 .early_channel_count = f1x_early_channel_count,
1998 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1999 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2004 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2005 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2007 .early_channel_count = f1x_early_channel_count,
2008 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2009 .dbam_to_cs = f16_dbam_to_chip_select,
2013 .ctl_name = "F16h_M30h",
2014 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2015 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2017 .early_channel_count = f1x_early_channel_count,
2018 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2019 .dbam_to_cs = f16_dbam_to_chip_select,
2024 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2025 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2027 .early_channel_count = f17_early_channel_count,
2028 .dbam_to_cs = f17_base_addr_to_cs_size,
2034 * These are tables of eigenvectors (one per line) which can be used for the
2035 * construction of the syndrome tables. The modified syndrome search algorithm
2036 * uses those to find the symbol in error and thus the DIMM.
2038 * Algorithm courtesy of Ross LaFetra from AMD.
2040 static const u16 x4_vectors[] = {
2041 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2042 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2043 0x0001, 0x0002, 0x0004, 0x0008,
2044 0x1013, 0x3032, 0x4044, 0x8088,
2045 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2046 0x4857, 0xc4fe, 0x13cc, 0x3288,
2047 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2048 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2049 0x15c1, 0x2a42, 0x89ac, 0x4758,
2050 0x2b03, 0x1602, 0x4f0c, 0xca08,
2051 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2052 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2053 0x2b87, 0x164e, 0x642c, 0xdc18,
2054 0x40b9, 0x80de, 0x1094, 0x20e8,
2055 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2056 0x11c1, 0x2242, 0x84ac, 0x4c58,
2057 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2058 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2059 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2060 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2061 0x16b3, 0x3d62, 0x4f34, 0x8518,
2062 0x1e2f, 0x391a, 0x5cac, 0xf858,
2063 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2064 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2065 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2066 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2067 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2068 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2069 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2070 0x185d, 0x2ca6, 0x7914, 0x9e28,
2071 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2072 0x4199, 0x82ee, 0x19f4, 0x2e58,
2073 0x4807, 0xc40e, 0x130c, 0x3208,
2074 0x1905, 0x2e0a, 0x5804, 0xac08,
2075 0x213f, 0x132a, 0xadfc, 0x5ba8,
2076 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2079 static const u16 x8_vectors[] = {
2080 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2081 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2082 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2083 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2084 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2085 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2086 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2087 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2088 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2089 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2090 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2091 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2092 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2093 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2094 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2095 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2096 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2097 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2098 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2101 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2104 unsigned int i, err_sym;
2106 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2108 unsigned v_idx = err_sym * v_dim;
2109 unsigned v_end = (err_sym + 1) * v_dim;
2111 /* walk over all 16 bits of the syndrome */
2112 for (i = 1; i < (1U << 16); i <<= 1) {
2114 /* if bit is set in that eigenvector... */
2115 if (v_idx < v_end && vectors[v_idx] & i) {
2116 u16 ev_comp = vectors[v_idx++];
2118 /* ... and bit set in the modified syndrome, */
2128 /* can't get to zero, move to next symbol */
2133 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2137 static int map_err_sym_to_channel(int err_sym, int sym_size)
2150 return err_sym >> 4;
2156 /* imaginary bits not in a DIMM */
2158 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2170 return err_sym >> 3;
2176 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2178 struct amd64_pvt *pvt = mci->pvt_info;
2181 if (pvt->ecc_sym_sz == 8)
2182 err_sym = decode_syndrome(syndrome, x8_vectors,
2183 ARRAY_SIZE(x8_vectors),
2185 else if (pvt->ecc_sym_sz == 4)
2186 err_sym = decode_syndrome(syndrome, x4_vectors,
2187 ARRAY_SIZE(x4_vectors),
2190 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2194 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2197 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2200 enum hw_event_mc_err_type err_type;
2204 err_type = HW_EVENT_ERR_CORRECTED;
2205 else if (ecc_type == 1)
2206 err_type = HW_EVENT_ERR_UNCORRECTED;
2207 else if (ecc_type == 3)
2208 err_type = HW_EVENT_ERR_DEFERRED;
2210 WARN(1, "Something is rotten in the state of Denmark.\n");
2214 switch (err->err_code) {
2219 string = "Failed to map error addr to a node";
2222 string = "Failed to map error addr to a csrow";
2225 string = "unknown syndrome - possible error reporting race";
2228 string = "WTF error";
2232 edac_mc_handle_error(err_type, mci, 1,
2233 err->page, err->offset, err->syndrome,
2234 err->csrow, err->channel, -1,
2238 static inline void decode_bus_error(int node_id, struct mce *m)
2240 struct mem_ctl_info *mci;
2241 struct amd64_pvt *pvt;
2242 u8 ecc_type = (m->status >> 45) & 0x3;
2243 u8 xec = XEC(m->status, 0x1f);
2244 u16 ec = EC(m->status);
2246 struct err_info err;
2248 mci = edac_mc_find(node_id);
2252 pvt = mci->pvt_info;
2254 /* Bail out early if this was an 'observed' error */
2255 if (PP(ec) == NBSL_PP_OBS)
2258 /* Do only ECC errors */
2259 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2262 memset(&err, 0, sizeof(err));
2264 sys_addr = get_error_address(pvt, m);
2267 err.syndrome = extract_syndrome(m->status);
2269 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2271 __log_ecc_error(mci, &err, ecc_type);
2275 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2276 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2277 * Reserve F0 and F6 on systems with a UMC.
2280 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2283 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2285 amd64_err("error F0 device not found: vendor %x device 0x%x (broken BIOS?)\n",
2286 PCI_VENDOR_ID_AMD, pci_id1);
2290 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2292 pci_dev_put(pvt->F0);
2295 amd64_err("error F6 device not found: vendor %x device 0x%x (broken BIOS?)\n",
2296 PCI_VENDOR_ID_AMD, pci_id2);
2300 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2301 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2302 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2307 /* Reserve the ADDRESS MAP Device */
2308 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2310 amd64_err("error address map device not found: vendor %x device 0x%x (broken BIOS?)\n",
2311 PCI_VENDOR_ID_AMD, pci_id1);
2315 /* Reserve the DCT Device */
2316 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2318 pci_dev_put(pvt->F1);
2321 amd64_err("error F2 device not found: vendor %x device 0x%x (broken BIOS?)\n",
2322 PCI_VENDOR_ID_AMD, pci_id2);
2326 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2327 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2328 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2333 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2336 pci_dev_put(pvt->F0);
2337 pci_dev_put(pvt->F6);
2339 pci_dev_put(pvt->F1);
2340 pci_dev_put(pvt->F2);
2345 * Retrieve the hardware registers of the memory controller (this includes the
2346 * 'Address Map' and 'Misc' device regs)
2348 static void read_mc_regs(struct amd64_pvt *pvt)
2355 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2356 * those are Read-As-Zero
2358 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2359 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2361 /* check first whether TOP_MEM2 is enabled */
2362 rdmsrl(MSR_K8_SYSCFG, msr_val);
2363 if (msr_val & (1U << 21)) {
2364 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2365 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2367 edac_dbg(0, " TOP_MEM2 disabled\n");
2369 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2371 read_dram_ctl_register(pvt);
2373 for (range = 0; range < DRAM_RANGES; range++) {
2376 /* read settings for this DRAM range */
2377 read_dram_base_limit_regs(pvt, range);
2379 rw = dram_rw(pvt, range);
2383 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2385 get_dram_base(pvt, range),
2386 get_dram_limit(pvt, range));
2388 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2389 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2390 (rw & 0x1) ? "R" : "-",
2391 (rw & 0x2) ? "W" : "-",
2392 dram_intlv_sel(pvt, range),
2393 dram_dst_node(pvt, range));
2396 read_dct_base_mask(pvt);
2398 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2399 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2401 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2403 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2404 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2406 if (!dct_ganging_enabled(pvt)) {
2407 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2408 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2411 pvt->ecc_sym_sz = 4;
2412 determine_memory_type(pvt);
2413 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2415 if (pvt->fam >= 0x10) {
2416 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2417 /* F16h has only DCT0, so no need to read dbam1 */
2418 if (pvt->fam != 0x16)
2419 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2421 /* F10h, revD and later can do x8 ECC too */
2422 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2423 pvt->ecc_sym_sz = 8;
2425 dump_misc_regs(pvt);
2429 * NOTE: CPU Revision Dependent code
2432 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2433 * k8 private pointer to -->
2434 * DRAM Bank Address mapping register
2436 * DCL register where dual_channel_active is
2438 * The DBAM register consists of 4 sets of 4 bits each definitions:
2441 * 0-3 CSROWs 0 and 1
2442 * 4-7 CSROWs 2 and 3
2443 * 8-11 CSROWs 4 and 5
2444 * 12-15 CSROWs 6 and 7
2446 * Values range from: 0 to 15
2447 * The meaning of the values depends on CPU revision and dual-channel state,
2448 * see relevant BKDG more info.
2450 * The memory controller provides for total of only 8 CSROWs in its current
2451 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2452 * single channel or two (2) DIMMs in dual channel mode.
2454 * The following code logic collapses the various tables for CSROW based on CPU
2458 * The number of PAGE_SIZE pages on the specified CSROW number it
2462 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2464 u32 cs_mode, nr_pages;
2465 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2469 * The math on this doesn't look right on the surface because x/2*4 can
2470 * be simplified to x*2 but this expression makes use of the fact that
2471 * it is integral math where 1/2=0. This intermediate value becomes the
2472 * number of bits to shift the DBAM register to extract the proper CSROW
2475 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2477 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2))
2478 << (20 - PAGE_SHIFT);
2480 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2481 csrow_nr, dct, cs_mode);
2482 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2488 * Initialize the array of csrow attribute instances, based on the values
2489 * from pci config hardware registers.
2491 static int init_csrows(struct mem_ctl_info *mci)
2493 struct amd64_pvt *pvt = mci->pvt_info;
2494 struct csrow_info *csrow;
2495 struct dimm_info *dimm;
2496 enum edac_type edac_mode;
2497 int i, j, empty = 1;
2501 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2505 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2506 pvt->mc_node_id, val,
2507 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2510 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2512 for_each_chip_select(i, 0, pvt) {
2513 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2514 bool row_dct1 = false;
2516 if (pvt->fam != 0xf)
2517 row_dct1 = !!csrow_enabled(i, 1, pvt);
2519 if (!row_dct0 && !row_dct1)
2522 csrow = mci->csrows[i];
2525 edac_dbg(1, "MC node: %d, csrow: %d\n",
2526 pvt->mc_node_id, i);
2529 nr_pages = get_csrow_nr_pages(pvt, 0, i);
2530 csrow->channels[0]->dimm->nr_pages = nr_pages;
2533 /* K8 has only one DCT */
2534 if (pvt->fam != 0xf && row_dct1) {
2535 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2537 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2538 nr_pages += row_dct1_pages;
2541 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2544 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2546 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2547 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2548 EDAC_S4ECD4ED : EDAC_SECDED;
2550 edac_mode = EDAC_NONE;
2552 for (j = 0; j < pvt->channel_count; j++) {
2553 dimm = csrow->channels[j]->dimm;
2554 dimm->mtype = pvt->dram_type;
2555 dimm->edac_mode = edac_mode;
2562 /* get all cores on this DCT */
2563 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2567 for_each_online_cpu(cpu)
2568 if (amd_get_nb_id(cpu) == nid)
2569 cpumask_set_cpu(cpu, mask);
2572 /* check MCG_CTL on all the cpus on this node */
2573 static bool nb_mce_bank_enabled_on_node(u16 nid)
2579 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2580 amd64_warn("%s: Error allocating mask\n", __func__);
2584 get_cpus_on_this_dct_cpumask(mask, nid);
2586 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2588 for_each_cpu(cpu, mask) {
2589 struct msr *reg = per_cpu_ptr(msrs, cpu);
2590 nbe = reg->l & MSR_MCGCTL_NBE;
2592 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2594 (nbe ? "enabled" : "disabled"));
2602 free_cpumask_var(mask);
2606 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
2608 cpumask_var_t cmask;
2611 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2612 amd64_warn("%s: error allocating mask\n", __func__);
2616 get_cpus_on_this_dct_cpumask(cmask, nid);
2618 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2620 for_each_cpu(cpu, cmask) {
2622 struct msr *reg = per_cpu_ptr(msrs, cpu);
2625 if (reg->l & MSR_MCGCTL_NBE)
2626 s->flags.nb_mce_enable = 1;
2628 reg->l |= MSR_MCGCTL_NBE;
2631 * Turn off NB MCE reporting only when it was off before
2633 if (!s->flags.nb_mce_enable)
2634 reg->l &= ~MSR_MCGCTL_NBE;
2637 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2639 free_cpumask_var(cmask);
2644 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2648 u32 value, mask = 0x3; /* UECC/CECC enable */
2650 if (toggle_ecc_err_reporting(s, nid, ON)) {
2651 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2655 amd64_read_pci_cfg(F3, NBCTL, &value);
2657 s->old_nbctl = value & mask;
2658 s->nbctl_valid = true;
2661 amd64_write_pci_cfg(F3, NBCTL, value);
2663 amd64_read_pci_cfg(F3, NBCFG, &value);
2665 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2666 nid, value, !!(value & NBCFG_ECC_ENABLE));
2668 if (!(value & NBCFG_ECC_ENABLE)) {
2669 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2671 s->flags.nb_ecc_prev = 0;
2673 /* Attempt to turn on DRAM ECC Enable */
2674 value |= NBCFG_ECC_ENABLE;
2675 amd64_write_pci_cfg(F3, NBCFG, value);
2677 amd64_read_pci_cfg(F3, NBCFG, &value);
2679 if (!(value & NBCFG_ECC_ENABLE)) {
2680 amd64_warn("Hardware rejected DRAM ECC enable,"
2681 "check memory DIMM configuration.\n");
2684 amd64_info("Hardware accepted DRAM ECC Enable\n");
2687 s->flags.nb_ecc_prev = 1;
2690 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2691 nid, value, !!(value & NBCFG_ECC_ENABLE));
2696 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2699 u32 value, mask = 0x3; /* UECC/CECC enable */
2701 if (!s->nbctl_valid)
2704 amd64_read_pci_cfg(F3, NBCTL, &value);
2706 value |= s->old_nbctl;
2708 amd64_write_pci_cfg(F3, NBCTL, value);
2710 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2711 if (!s->flags.nb_ecc_prev) {
2712 amd64_read_pci_cfg(F3, NBCFG, &value);
2713 value &= ~NBCFG_ECC_ENABLE;
2714 amd64_write_pci_cfg(F3, NBCFG, value);
2717 /* restore the NB Enable MCGCTL bit */
2718 if (toggle_ecc_err_reporting(s, nid, OFF))
2719 amd64_warn("Error restoring NB MCGCTL settings!\n");
2723 * EDAC requires that the BIOS have ECC enabled before
2724 * taking over the processing of ECC errors. A command line
2725 * option allows to force-enable hardware ECC later in
2726 * enable_ecc_error_reporting().
2728 static const char *ecc_msg =
2729 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2730 " Either enable ECC checking or force module loading by setting "
2731 "'ecc_enable_override'.\n"
2732 " (Note that use of the override may cause unknown side effects.)\n";
2734 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
2736 bool nb_mce_en = false;
2740 if (boot_cpu_data.x86 >= 0x17) {
2741 u8 umc_en_mask = 0, ecc_en_mask = 0;
2743 for (i = 0; i < NUM_UMCS; i++) {
2744 u32 base = get_umc_base(i);
2746 /* Only check enabled UMCs. */
2747 if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
2750 if (!(value & UMC_SDP_INIT))
2753 umc_en_mask |= BIT(i);
2755 if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
2758 if (value & UMC_ECC_ENABLED)
2759 ecc_en_mask |= BIT(i);
2762 /* Check whether at least one UMC is enabled: */
2764 ecc_en = umc_en_mask == ecc_en_mask;
2766 /* Assume UMC MCA banks are enabled. */
2769 amd64_read_pci_cfg(F3, NBCFG, &value);
2771 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2773 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
2775 amd64_notice("NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
2776 MSR_IA32_MCG_CTL, nid);
2779 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2781 if (!ecc_en || !nb_mce_en) {
2782 amd64_notice("%s", ecc_msg);
2788 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2789 struct amd64_family_type *fam)
2791 struct amd64_pvt *pvt = mci->pvt_info;
2793 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2794 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2796 if (pvt->nbcap & NBCAP_SECDED)
2797 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2799 if (pvt->nbcap & NBCAP_CHIPKILL)
2800 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2802 mci->edac_cap = determine_edac_cap(pvt);
2803 mci->mod_name = EDAC_MOD_STR;
2804 mci->mod_ver = EDAC_AMD64_VERSION;
2805 mci->ctl_name = fam->ctl_name;
2806 mci->dev_name = pci_name(pvt->F3);
2807 mci->ctl_page_to_phys = NULL;
2809 /* memory scrubber interface */
2810 mci->set_sdram_scrub_rate = set_scrub_rate;
2811 mci->get_sdram_scrub_rate = get_scrub_rate;
2815 * returns a pointer to the family descriptor on success, NULL otherwise.
2817 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
2819 struct amd64_family_type *fam_type = NULL;
2821 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2822 pvt->stepping = boot_cpu_data.x86_mask;
2823 pvt->model = boot_cpu_data.x86_model;
2824 pvt->fam = boot_cpu_data.x86;
2828 fam_type = &family_types[K8_CPUS];
2829 pvt->ops = &family_types[K8_CPUS].ops;
2833 fam_type = &family_types[F10_CPUS];
2834 pvt->ops = &family_types[F10_CPUS].ops;
2838 if (pvt->model == 0x30) {
2839 fam_type = &family_types[F15_M30H_CPUS];
2840 pvt->ops = &family_types[F15_M30H_CPUS].ops;
2842 } else if (pvt->model == 0x60) {
2843 fam_type = &family_types[F15_M60H_CPUS];
2844 pvt->ops = &family_types[F15_M60H_CPUS].ops;
2848 fam_type = &family_types[F15_CPUS];
2849 pvt->ops = &family_types[F15_CPUS].ops;
2853 if (pvt->model == 0x30) {
2854 fam_type = &family_types[F16_M30H_CPUS];
2855 pvt->ops = &family_types[F16_M30H_CPUS].ops;
2858 fam_type = &family_types[F16_CPUS];
2859 pvt->ops = &family_types[F16_CPUS].ops;
2863 fam_type = &family_types[F17_CPUS];
2864 pvt->ops = &family_types[F17_CPUS].ops;
2868 amd64_err("Unsupported family!\n");
2872 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2874 (pvt->ext_model >= K8_REV_F ? "revF or later "
2875 : "revE or earlier ")
2876 : ""), pvt->mc_node_id);
2880 static const struct attribute_group *amd64_edac_attr_groups[] = {
2881 #ifdef CONFIG_EDAC_DEBUG
2882 &amd64_edac_dbg_group,
2884 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
2885 &amd64_edac_inj_group,
2890 static int init_one_instance(unsigned int nid)
2892 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2893 struct amd64_family_type *fam_type = NULL;
2894 struct mem_ctl_info *mci = NULL;
2895 struct edac_mc_layer layers[2];
2896 struct amd64_pvt *pvt = NULL;
2897 u16 pci_id1, pci_id2;
2901 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2905 pvt->mc_node_id = nid;
2909 fam_type = per_family_init(pvt);
2913 if (pvt->fam >= 0x17) {
2914 pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL);
2920 pci_id1 = fam_type->f0_id;
2921 pci_id2 = fam_type->f6_id;
2923 pci_id1 = fam_type->f1_id;
2924 pci_id2 = fam_type->f2_id;
2927 err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
2934 * We need to determine how many memory channels there are. Then use
2935 * that information for calculating the size of the dynamic instance
2936 * tables in the 'mci' structure.
2939 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2940 if (pvt->channel_count < 0)
2944 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2945 layers[0].size = pvt->csels[0].b_cnt;
2946 layers[0].is_virt_csrow = true;
2947 layers[1].type = EDAC_MC_LAYER_CHANNEL;
2950 * Always allocate two channels since we can have setups with DIMMs on
2951 * only one channel. Also, this simplifies handling later for the price
2952 * of a couple of KBs tops.
2955 layers[1].is_virt_csrow = false;
2957 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2961 mci->pvt_info = pvt;
2962 mci->pdev = &pvt->F3->dev;
2964 setup_mci_misc_attrs(mci, fam_type);
2966 if (init_csrows(mci))
2967 mci->edac_cap = EDAC_FLAG_NONE;
2970 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
2971 edac_dbg(1, "failed edac_mc_add_mc()\n");
2975 /* register stuff with EDAC MCE */
2976 if (report_gart_errors)
2977 amd_report_gart_errors(true);
2979 amd_register_ecc_decoder(decode_bus_error);
2987 free_mc_sibling_devs(pvt);
2990 if (pvt->fam >= 0x17)
3000 static int probe_one_instance(unsigned int nid)
3002 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3003 struct ecc_settings *s;
3007 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3013 if (!ecc_enabled(F3, nid)) {
3016 if (!ecc_enable_override)
3019 if (boot_cpu_data.x86 >= 0x17) {
3020 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3023 amd64_warn("Forcing ECC on!\n");
3025 if (!enable_ecc_error_reporting(s, nid, F3))
3029 ret = init_one_instance(nid);
3031 amd64_err("Error probing instance: %d\n", nid);
3033 if (boot_cpu_data.x86 < 0x17)
3034 restore_ecc_error_reporting(s, nid, F3);
3041 ecc_stngs[nid] = NULL;
3047 static void remove_one_instance(unsigned int nid)
3049 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3050 struct ecc_settings *s = ecc_stngs[nid];
3051 struct mem_ctl_info *mci;
3052 struct amd64_pvt *pvt;
3054 mci = find_mci_by_dev(&F3->dev);
3057 /* Remove from EDAC CORE tracking list */
3058 mci = edac_mc_del_mc(&F3->dev);
3062 pvt = mci->pvt_info;
3064 restore_ecc_error_reporting(s, nid, F3);
3066 free_mc_sibling_devs(pvt);
3068 /* unregister from EDAC MCE */
3069 amd_report_gart_errors(false);
3070 amd_unregister_ecc_decoder(decode_bus_error);
3072 kfree(ecc_stngs[nid]);
3073 ecc_stngs[nid] = NULL;
3075 /* Free the EDAC CORE resources */
3076 mci->pvt_info = NULL;
3082 static void setup_pci_device(void)
3084 struct mem_ctl_info *mci;
3085 struct amd64_pvt *pvt;
3090 mci = edac_mc_find(0);
3094 pvt = mci->pvt_info;
3096 pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3098 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
3100 pr_warn("%s(): Unable to create PCI control\n", __func__);
3101 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3105 static const struct x86_cpu_id amd64_cpuids[] = {
3106 { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3107 { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3108 { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3109 { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3112 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3114 static int __init amd64_edac_init(void)
3119 if (amd_cache_northbridges() < 0)
3125 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
3129 msrs = msrs_alloc();
3133 for (i = 0; i < amd_nb_num(); i++)
3134 if (probe_one_instance(i)) {
3135 /* unwind properly */
3137 remove_one_instance(i);
3144 #ifdef CONFIG_X86_32
3145 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3148 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3164 static void __exit amd64_edac_exit(void)
3169 edac_pci_release_generic_ctl(pci_ctl);
3171 for (i = 0; i < amd_nb_num(); i++)
3172 remove_one_instance(i);
3181 module_init(amd64_edac_init);
3182 module_exit(amd64_edac_exit);
3184 MODULE_LICENSE("GPL");
3185 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3186 "Dave Peterson, Thayne Harbaugh");
3187 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3188 EDAC_AMD64_VERSION);
3190 module_param(edac_op_state, int, 0444);
3191 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");