1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for Pondicherry2 memory controller.
5 * Copyright (c) 2016, Intel Corporation.
7 * [Derived from sb_edac.c]
9 * Translation of system physical addresses to DIMM addresses
10 * is a two stage process:
12 * First the Pondicherry 2 memory controller handles slice and channel interleaving
13 * in "sys2pmi()". This is (almost) completley common between platforms.
15 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
16 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25 #include <linux/edac.h>
26 #include <linux/mmzone.h>
27 #include <linux/smp.h>
28 #include <linux/bitmap.h>
29 #include <linux/math64.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/platform_data/x86/p2sb.h>
33 #include <asm/cpu_device_id.h>
34 #include <asm/intel-family.h>
35 #include <asm/processor.h>
39 #include "edac_module.h"
40 #include "pnd2_edac.h"
42 #define EDAC_MOD_STR "pnd2_edac"
44 #define APL_NUM_CHANNELS 4
45 #define DNV_NUM_CHANNELS 2
46 #define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
50 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
63 int dimm_geom[APL_NUM_CHANNELS];
68 * System address space is divided into multiple regions with
69 * different interleave rules in each. The as0/as1 regions
70 * have no interleaving at all. The as2 region is interleaved
71 * between two channels. The mot region is magic and may overlap
72 * other regions, with its interleave rules taking precedence.
73 * Addresses not in any of these regions are interleaved across
76 static struct region {
82 static struct dunit_ops {
88 int dimms_per_channel;
89 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
90 int (*get_registers)(void);
91 int (*check_ecc)(void);
92 void (*mk_region)(char *name, struct region *rp, void *asym);
93 void (*get_dimm_config)(struct mem_ctl_info *mci);
94 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
95 struct dram_addr *daddr, char *msg);
98 static struct mem_ctl_info *pnd2_mci;
100 #define PND2_MSG_SIZE 256
103 #define pnd2_printk(level, fmt, arg...) \
104 edac_printk(level, "pnd2", fmt, ##arg)
106 #define pnd2_mc_printk(mci, level, fmt, arg...) \
107 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
109 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
110 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
111 #define SELECTOR_DISABLED (-1)
112 #define _4GB (1ul << 32)
114 #define PMI_ADDRESS_WIDTH 31
115 #define PND_MAX_PHYS_BIT 39
117 #define APL_ASYMSHIFT 28
118 #define DNV_ASYMSHIFT 31
119 #define CH_HASH_MASK_LSB 6
120 #define SLICE_HASH_MASK_LSB 6
121 #define MOT_SLC_INTLV_BIT 12
122 #define LOG2_PMI_ADDR_GRANULARITY 5
125 #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
126 #define U64_LSHIFT(val, s) ((u64)(val) << (s))
129 * On Apollo Lake we access memory controller registers via a
130 * side-band mailbox style interface in a hidden PCI device
131 * configuration space.
133 static struct pci_bus *p2sb_bus;
134 #define P2SB_DEVFN PCI_DEVFN(0xd, 0)
135 #define P2SB_ADDR_OFF 0xd0
136 #define P2SB_DATA_OFF 0xd4
137 #define P2SB_STAT_OFF 0xd8
138 #define P2SB_ROUT_OFF 0xda
139 #define P2SB_EADD_OFF 0xdc
140 #define P2SB_HIDE_OFF 0xe1
144 #define P2SB_READ(size, off, ptr) \
145 pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
146 #define P2SB_WRITE(size, off, val) \
147 pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
149 static bool p2sb_is_busy(u16 *status)
151 P2SB_READ(word, P2SB_STAT_OFF, status);
153 return !!(*status & P2SB_BUSY);
156 static int _apl_rd_reg(int port, int off, int op, u32 *data)
158 int retries = 0xff, ret;
162 /* Unhide the P2SB device, if it's hidden */
163 P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
165 P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
167 if (p2sb_is_busy(&status)) {
172 P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
173 P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
174 P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
175 P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
176 P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
178 while (p2sb_is_busy(&status)) {
179 if (retries-- == 0) {
185 P2SB_READ(dword, P2SB_DATA_OFF, data);
186 ret = (status >> 1) & 0x3;
188 /* Hide the P2SB device, if it was hidden before */
190 P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
195 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
199 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
202 ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
205 ret |= _apl_rd_reg(port, off, op, (u32 *)data);
206 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
207 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
214 static u64 get_mem_ctrl_hub_base_addr(void)
216 struct b_cr_mchbar_lo_pci lo;
217 struct b_cr_mchbar_hi_pci hi;
218 struct pci_dev *pdev;
220 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
222 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
223 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
230 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
234 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
237 #define DNV_MCHBAR_SIZE 0x8000
238 #define DNV_SB_PORT_SIZE 0x10000
239 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
241 struct pci_dev *pdev;
247 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
251 pci_read_config_dword(pdev, off, data);
254 /* MMIO via memory controller hub base address */
255 if (op == 0 && port == 0x4c) {
256 memset(&r, 0, sizeof(r));
258 r.start = get_mem_ctrl_hub_base_addr();
261 r.end = r.start + DNV_MCHBAR_SIZE - 1;
263 /* MMIO via sideband register base address */
264 ret = p2sb_bar(NULL, 0, &r);
268 r.start += (port << 16);
269 r.end = r.start + DNV_SB_PORT_SIZE - 1;
272 base = ioremap(r.start, resource_size(&r));
277 *(u64 *)data = readq(base + off);
279 *(u32 *)data = readl(base + off);
284 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
285 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
290 #define RD_REGP(regp, regname, port) \
293 regname##_r_opcode, \
294 regp, sizeof(struct regname), \
297 #define RD_REG(regp, regname) \
298 ops->rd_reg(regname ## _port, \
300 regname##_r_opcode, \
301 regp, sizeof(struct regname), \
304 static u64 top_lm, top_hm;
305 static bool two_slices;
306 static bool two_channels; /* Both PMI channels in one slice enabled */
308 static u8 sym_chan_mask;
309 static u8 asym_chan_mask;
312 static int slice_selector = -1;
313 static int chan_selector = -1;
314 static u64 slice_hash_mask;
315 static u64 chan_hash_mask;
317 static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
322 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
325 static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
328 pr_info(FW_BUG "MOT mask cannot be zero\n");
331 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
332 pr_info(FW_BUG "MOT mask not power of two\n");
336 pr_info(FW_BUG "MOT region base/mask alignment error\n");
340 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
342 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
345 static bool in_region(struct region *rp, u64 addr)
350 return rp->base <= addr && addr <= rp->limit;
353 static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
357 if (!p->slice_0_mem_disabled)
358 mask |= p->sym_slice0_channel_enabled;
360 if (!p->slice_1_disabled)
361 mask |= p->sym_slice1_channel_enabled << 2;
363 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
369 static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
370 struct b_cr_asym_mem_region0_mchbar *as0,
371 struct b_cr_asym_mem_region1_mchbar *as1,
372 struct b_cr_asym_2way_mem_region_mchbar *as2way)
374 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
377 if (as2way->asym_2way_interleave_enable)
378 mask = intlv[as2way->asym_2way_intlv_mode];
379 if (as0->slice0_asym_enable)
380 mask |= (1 << as0->slice0_asym_channel_select);
381 if (as1->slice1_asym_enable)
382 mask |= (4 << as1->slice1_asym_channel_select);
383 if (p->slice_0_mem_disabled)
385 if (p->slice_1_disabled)
387 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
393 static struct b_cr_tolud_pci tolud;
394 static struct b_cr_touud_lo_pci touud_lo;
395 static struct b_cr_touud_hi_pci touud_hi;
396 static struct b_cr_asym_mem_region0_mchbar asym0;
397 static struct b_cr_asym_mem_region1_mchbar asym1;
398 static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
399 static struct b_cr_mot_out_base_mchbar mot_base;
400 static struct b_cr_mot_out_mask_mchbar mot_mask;
401 static struct b_cr_slice_channel_hash chash;
403 /* Apollo Lake dunit */
405 * Validated on board with just two DIMMs in the [0] and [2] positions
406 * in this array. Other port number matches documentation, but caution
409 static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
410 static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
412 /* Denverton dunit */
413 static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
414 static struct d_cr_dsch dsch;
415 static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
416 static struct d_cr_drp drp[DNV_NUM_CHANNELS];
417 static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
418 static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
419 static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
420 static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
421 static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
422 static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
424 static void apl_mk_region(char *name, struct region *rp, void *asym)
426 struct b_cr_asym_mem_region0_mchbar *a = asym;
429 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
430 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
431 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
434 static void dnv_mk_region(char *name, struct region *rp, void *asym)
436 struct b_cr_asym_mem_region_denverton *a = asym;
439 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
440 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
441 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
444 static int apl_get_registers(void)
449 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
453 * RD_REGP() will fail for unpopulated or non-existent
454 * DIMM slots. Return success if we find at least one DIMM.
456 for (i = 0; i < APL_NUM_CHANNELS; i++)
457 if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
463 static int dnv_get_registers(void)
467 if (RD_REG(&dsch, d_cr_dsch))
470 for (i = 0; i < DNV_NUM_CHANNELS; i++)
471 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
472 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
473 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
474 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
475 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
476 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
477 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
478 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
485 * Read all the h/w config registers once here (they don't
486 * change at run time. Figure out which address ranges have
487 * which interleave characteristics.
489 static int get_registers(void)
491 const int intlv[] = { 10, 11, 12, 12 };
493 if (RD_REG(&tolud, b_cr_tolud_pci) ||
494 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
495 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
496 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
497 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
498 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
499 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
500 RD_REG(&chash, b_cr_slice_channel_hash))
503 if (ops->get_registers())
506 if (ops->type == DNV) {
507 /* PMI channel idx (always 0) for asymmetric region */
508 asym0.slice0_asym_channel_select = 0;
509 asym1.slice1_asym_channel_select = 0;
510 /* PMI channel bitmap (always 1) for symmetric region */
511 chash.sym_slice0_channel_enabled = 0x1;
512 chash.sym_slice1_channel_enabled = 0x1;
515 if (asym0.slice0_asym_enable)
516 ops->mk_region("as0", &as0, &asym0);
518 if (asym1.slice1_asym_enable)
519 ops->mk_region("as1", &as1, &asym1);
521 if (asym_2way.asym_2way_interleave_enable) {
522 mk_region("as2way", &as2,
523 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
524 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
525 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
528 if (mot_base.imr_en) {
529 mk_region_mask("mot", &mot,
530 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
531 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
534 top_lm = U64_LSHIFT(tolud.tolud, 20);
535 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
537 two_slices = !chash.slice_1_disabled &&
538 !chash.slice_0_mem_disabled &&
539 (chash.sym_slice0_channel_enabled != 0) &&
540 (chash.sym_slice1_channel_enabled != 0);
541 two_channels = !chash.ch_1_disabled &&
542 !chash.enable_pmi_dual_data_mode &&
543 ((chash.sym_slice0_channel_enabled == 3) ||
544 (chash.sym_slice1_channel_enabled == 3));
546 sym_chan_mask = gen_sym_mask(&chash);
547 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
548 chan_mask = sym_chan_mask | asym_chan_mask;
550 if (two_slices && !two_channels) {
554 slice_selector = intlv[chash.interleave_mode];
555 } else if (!two_slices && two_channels) {
559 chan_selector = intlv[chash.interleave_mode];
560 } else if (two_slices && two_channels) {
561 if (chash.hvm_mode) {
565 slice_selector = intlv[chash.interleave_mode];
566 chan_selector = intlv[chash.interleave_mode] + 1;
572 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
574 slice_hash_mask |= BIT_ULL(slice_selector);
579 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
581 chan_hash_mask |= BIT_ULL(chan_selector);
587 /* Get a contiguous memory address (remove the MMIO gap) */
588 static u64 remove_mmio_gap(u64 sys)
590 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
593 /* Squeeze out one address bit, shift upper part down to fill gap */
594 static void remove_addr_bit(u64 *addr, int bitidx)
601 mask = (1ull << bitidx) - 1;
602 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
605 /* XOR all the bits from addr specified in mask */
606 static int hash_by_mask(u64 addr, u64 mask)
608 u64 result = addr & mask;
610 result = (result >> 32) ^ result;
611 result = (result >> 16) ^ result;
612 result = (result >> 8) ^ result;
613 result = (result >> 4) ^ result;
614 result = (result >> 2) ^ result;
615 result = (result >> 1) ^ result;
617 return (int)result & 1;
621 * First stage decode. Take the system address and figure out which
622 * second stage will deal with it based on interleave modes.
624 static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
626 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
627 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
628 MOT_CHAN_INTLV_BIT_1SLC_2CH;
629 int slice_intlv_bit_rm = SELECTOR_DISABLED;
630 int chan_intlv_bit_rm = SELECTOR_DISABLED;
631 /* Determine if address is in the MOT region. */
632 bool mot_hit = in_region(&mot, addr);
633 /* Calculate the number of symmetric regions enabled. */
634 int sym_channels = hweight8(sym_chan_mask);
637 * The amount we need to shift the asym base can be determined by the
638 * number of enabled symmetric channels.
639 * NOTE: This can only work because symmetric memory is not supposed
640 * to do a 3-way interleave.
642 int sym_chan_shift = sym_channels >> 1;
644 /* Give up if address is out of range, or in MMIO gap */
645 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
646 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
647 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
651 /* Get a contiguous memory address (remove the MMIO gap) */
652 contig_addr = remove_mmio_gap(addr);
654 if (in_region(&as0, addr)) {
655 *pmiidx = asym0.slice0_asym_channel_select;
657 contig_base = remove_mmio_gap(as0.base);
658 contig_offset = contig_addr - contig_base;
659 contig_base_adj = (contig_base >> sym_chan_shift) *
660 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
661 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
662 } else if (in_region(&as1, addr)) {
663 *pmiidx = 2u + asym1.slice1_asym_channel_select;
665 contig_base = remove_mmio_gap(as1.base);
666 contig_offset = contig_addr - contig_base;
667 contig_base_adj = (contig_base >> sym_chan_shift) *
668 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
669 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
670 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
673 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
674 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
675 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
676 hash_by_mask(contig_addr, chan_hash_mask);
677 *pmiidx |= (u32)channel1;
679 contig_base = remove_mmio_gap(as2.base);
680 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
681 contig_offset = contig_addr - contig_base;
682 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
683 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
685 /* Otherwise we're in normal, boring symmetric mode. */
692 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
693 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
695 slice_intlv_bit_rm = slice_selector;
696 slice1 = hash_by_mask(addr, slice_hash_mask);
699 *pmiidx = (u32)slice1 << 1;
705 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
706 MOT_CHAN_INTLV_BIT_1SLC_2CH;
709 chan_intlv_bit_rm = mot_intlv_bit;
710 channel1 = (addr >> mot_intlv_bit) & 1;
712 chan_intlv_bit_rm = chan_selector;
713 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
716 *pmiidx |= (u32)channel1;
720 /* Remove the chan_selector bit first */
721 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
722 /* Remove the slice bit (we remove it second because it must be lower */
723 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
724 *pmiaddr = contig_addr;
729 /* Translate PMI address to memory (rank, row, bank, column) */
730 #define C(n) (0x10 | (n)) /* column */
731 #define B(n) (0x20 | (n)) /* bank */
732 #define R(n) (0x40 | (n)) /* row */
733 #define RS (0x80) /* rank */
749 static struct dimm_geometry {
754 u16 bits[PMI_ADDRESS_WIDTH];
757 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
758 .rowbits = 15, .colbits = 10,
760 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
761 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
762 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
767 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
768 .rowbits = 16, .colbits = 10,
770 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
771 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
772 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
777 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
778 .rowbits = 16, .colbits = 10,
780 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
781 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
782 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
787 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
788 .rowbits = 16, .colbits = 11,
790 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
791 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
792 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
797 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
798 .rowbits = 15, .colbits = 10,
800 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
801 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
802 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
807 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
808 .rowbits = 16, .colbits = 10,
810 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
811 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
812 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
817 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
818 .rowbits = 16, .colbits = 10,
820 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
821 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
822 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
827 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
828 .rowbits = 16, .colbits = 11,
830 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
831 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
832 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
837 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
838 .rowbits = 15, .colbits = 10,
840 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
841 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
842 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
847 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
848 .rowbits = 16, .colbits = 10,
850 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
851 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
852 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
857 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
858 .rowbits = 16, .colbits = 10,
860 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
861 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
862 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
867 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
868 .rowbits = 16, .colbits = 11,
870 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
871 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
872 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
878 static int bank_hash(u64 pmiaddr, int idx, int shft)
884 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
887 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
888 bhash ^= ((pmiaddr >> 22) & 1) << 1;
891 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
898 static int rank_hash(u64 pmiaddr)
900 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
903 /* Second stage decode. Compute rank, bank, row & column. */
904 static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
905 struct dram_addr *daddr, char *msg)
907 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
908 struct pnd2_pvt *pvt = mci->pvt_info;
909 int g = pvt->dimm_geom[pmiidx];
910 struct dimm_geometry *d = &dimms[g];
911 int column = 0, bank = 0, row = 0, rank = 0;
912 int i, idx, type, skiprs = 0;
914 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
915 int bit = (pmiaddr >> i) & 1;
917 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
918 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
922 type = d->bits[i + skiprs] & ~0xf;
923 idx = d->bits[i + skiprs] & 0xf;
926 * On single rank DIMMs ignore the rank select bit
927 * and shift remainder of "bits[]" down one place.
929 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
931 type = d->bits[i + skiprs] & ~0xf;
932 idx = d->bits[i + skiprs] & 0xf;
937 column |= (bit << idx);
940 bank |= (bit << idx);
942 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
950 rank ^= rank_hash(pmiaddr);
954 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
971 /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
972 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
974 static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
975 struct dram_addr *daddr, char *msg)
978 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
980 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
983 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
984 * flip them if DIMM1 is larger than DIMM0.
986 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
988 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
989 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
990 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
992 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
993 if (dmap1[pmiidx].bxor) {
995 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
996 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
997 if (dsch.chan_width == 0)
998 /* 64/72 bit dram channel width */
999 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1001 /* 32/40 bit dram channel width */
1002 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1003 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1005 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1006 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1007 if (dsch.chan_width == 0)
1008 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1010 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1014 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1015 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1016 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1017 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1018 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1019 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1020 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1021 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1022 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1023 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1024 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1025 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1026 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1027 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1028 if (dmap4[pmiidx].row14 != 31)
1029 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1030 if (dmap4[pmiidx].row15 != 31)
1031 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1032 if (dmap4[pmiidx].row16 != 31)
1033 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1034 if (dmap4[pmiidx].row17 != 31)
1035 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1037 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1038 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1039 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1040 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1041 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1042 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1043 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1044 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1045 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1050 static int check_channel(int ch)
1052 if (drp0[ch].dramtype != 0) {
1053 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1055 } else if (drp0[ch].eccen == 0) {
1056 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1062 static int apl_check_ecc_active(void)
1066 /* Check dramtype and ECC mode for each present DIMM */
1067 for (i = 0; i < APL_NUM_CHANNELS; i++)
1068 if (chan_mask & BIT(i))
1069 ret += check_channel(i);
1070 return ret ? -EINVAL : 0;
1073 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1075 static int check_unit(int ch)
1077 struct d_cr_drp *d = &drp[ch];
1079 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1080 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1086 static int dnv_check_ecc_active(void)
1090 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1091 ret += check_unit(i);
1092 return ret ? -EINVAL : 0;
1095 static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1096 struct dram_addr *daddr, char *msg)
1102 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1106 pmiaddr >>= ops->pmiaddr_shift;
1107 /* pmi channel idx to dimm channel idx */
1108 pmiidx >>= ops->pmiidx_shift;
1109 daddr->chan = pmiidx;
1111 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1115 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1116 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1121 static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1122 struct dram_addr *daddr)
1124 enum hw_event_mc_err_type tp_event;
1125 char *optype, msg[PND2_MSG_SIZE];
1126 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1127 bool overflow = m->status & MCI_STATUS_OVER;
1128 bool uc_err = m->status & MCI_STATUS_UC;
1129 bool recov = m->status & MCI_STATUS_S;
1130 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1131 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1132 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1133 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1136 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
1137 HW_EVENT_ERR_CORRECTED;
1140 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1141 * memory errors should fit in this mask:
1142 * 000f 0000 1mmm cccc (binary)
1144 * f = Correction Report Filtering Bit. If 1, subsequent errors
1148 * If the mask doesn't match, report an error to the parsing logic
1150 if (!((errcode & 0xef80) == 0x80)) {
1151 optype = "Can't parse: it is not a mem";
1153 switch (optypenum) {
1155 optype = "generic undef request error";
1158 optype = "memory read error";
1161 optype = "memory write error";
1164 optype = "addr/cmd error";
1167 optype = "memory scrubbing error";
1170 optype = "reserved";
1175 /* Only decode errors with an valid address (ADDRV) */
1176 if (!(m->status & MCI_STATUS_ADDRV))
1179 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1183 snprintf(msg, sizeof(msg),
1184 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1185 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1186 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1188 edac_dbg(0, "%s\n", msg);
1190 /* Call the helper to output message */
1191 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1192 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1197 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1200 static void apl_get_dimm_config(struct mem_ctl_info *mci)
1202 struct pnd2_pvt *pvt = mci->pvt_info;
1203 struct dimm_info *dimm;
1204 struct d_cr_drp0 *d;
1208 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1209 if (!(chan_mask & BIT(i)))
1212 dimm = edac_get_dimm(mci, i, 0, 0);
1214 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1219 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1220 if (dimms[g].addrdec == d->addrdec &&
1221 dimms[g].dden == d->dden &&
1222 dimms[g].dwid == d->dwid)
1225 if (g == ARRAY_SIZE(dimms)) {
1226 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1230 pvt->dimm_geom[i] = g;
1231 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1232 (1ul << dimms[g].colbits);
1233 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1234 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1236 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1237 dimm->mtype = MEM_DDR3;
1238 dimm->edac_mode = EDAC_SECDED;
1239 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1243 static const int dnv_dtypes[] = {
1244 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1247 static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1249 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1250 struct dimm_info *dimm;
1263 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1264 if (dmap4[i].row14 == 31)
1266 else if (dmap4[i].row15 == 31)
1268 else if (dmap4[i].row16 == 31)
1270 else if (dmap4[i].row17 == 31)
1275 if (memtype == MEM_DDR3) {
1276 if (dmap1[i].ca11 != 0x3f)
1283 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1284 ranks_of_dimm[0] = d->rken0 + d->rken1;
1285 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1286 ranks_of_dimm[1] = d->rken2 + d->rken3;
1288 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1289 if (!ranks_of_dimm[j])
1292 dimm = edac_get_dimm(mci, i, j, 0);
1294 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1298 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1299 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1300 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1302 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1303 dimm->mtype = memtype;
1304 dimm->edac_mode = EDAC_SECDED;
1305 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1310 static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1312 struct edac_mc_layer layers[2];
1313 struct mem_ctl_info *mci;
1314 struct pnd2_pvt *pvt;
1317 rc = ops->check_ecc();
1321 /* Allocate a new MC control structure */
1322 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1323 layers[0].size = ops->channels;
1324 layers[0].is_virt_csrow = false;
1325 layers[1].type = EDAC_MC_LAYER_SLOT;
1326 layers[1].size = ops->dimms_per_channel;
1327 layers[1].is_virt_csrow = true;
1328 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1332 pvt = mci->pvt_info;
1333 memset(pvt, 0, sizeof(*pvt));
1335 mci->mod_name = EDAC_MOD_STR;
1336 mci->dev_name = ops->name;
1337 mci->ctl_name = "Pondicherry2";
1339 /* Get dimm basic config and the memory layout */
1340 ops->get_dimm_config(mci);
1342 if (edac_mc_add_mc(mci)) {
1343 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1353 static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1355 if (unlikely(!mci || !mci->pvt_info)) {
1356 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1360 /* Remove MC sysfs nodes */
1361 edac_mc_del_mc(NULL);
1362 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1367 * Callback function registered with core kernel mce code.
1368 * Called once for each logged error.
1370 static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1372 struct mce *mce = (struct mce *)data;
1373 struct mem_ctl_info *mci;
1374 struct dram_addr daddr;
1378 if (!mci || (mce->kflags & MCE_HANDLED_CEC))
1382 * Just let mcelog handle it if the error is
1383 * outside the memory controller. A memory error
1384 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1385 * bit 12 has an special meaning.
1387 if ((mce->status & 0xefff) >> 7 != 1)
1390 if (mce->mcgstatus & MCG_STATUS_MCIP)
1395 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1396 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1397 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1398 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1399 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1400 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1401 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1402 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1404 pnd2_mce_output_error(mci, mce, &daddr);
1406 /* Advice mcelog that the error were handled */
1407 mce->kflags |= MCE_HANDLED_EDAC;
1411 static struct notifier_block pnd2_mce_dec = {
1412 .notifier_call = pnd2_mce_check_error,
1413 .priority = MCE_PRIO_EDAC,
1416 #ifdef CONFIG_EDAC_DEBUG
1418 * Write an address to this file to exercise the address decode
1419 * logic in this driver.
1421 static u64 pnd2_fake_addr;
1422 #define PND2_BLOB_SIZE 1024
1423 static char pnd2_result[PND2_BLOB_SIZE];
1424 static struct dentry *pnd2_test;
1425 static struct debugfs_blob_wrapper pnd2_blob = {
1426 .data = pnd2_result,
1430 static int debugfs_u64_set(void *data, u64 val)
1432 struct dram_addr daddr;
1437 /* ADDRV + MemRd + Unknown channel */
1438 m.status = MCI_STATUS_ADDRV + 0x9f;
1440 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1441 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1442 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1443 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1444 pnd2_blob.size = strlen(pnd2_blob.data);
1448 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1450 static void setup_pnd2_debug(void)
1452 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1453 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1454 &pnd2_fake_addr, &fops_u64_wo);
1455 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1458 static void teardown_pnd2_debug(void)
1460 debugfs_remove_recursive(pnd2_test);
1463 static void setup_pnd2_debug(void) {}
1464 static void teardown_pnd2_debug(void) {}
1465 #endif /* CONFIG_EDAC_DEBUG */
1468 static int pnd2_probe(void)
1473 rc = get_registers();
1477 return pnd2_register_mci(&pnd2_mci);
1480 static void pnd2_remove(void)
1483 pnd2_unregister_mci(pnd2_mci);
1486 static struct dunit_ops apl_ops = {
1489 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1491 .channels = APL_NUM_CHANNELS,
1492 .dimms_per_channel = 1,
1493 .rd_reg = apl_rd_reg,
1494 .get_registers = apl_get_registers,
1495 .check_ecc = apl_check_ecc_active,
1496 .mk_region = apl_mk_region,
1497 .get_dimm_config = apl_get_dimm_config,
1498 .pmi2mem = apl_pmi2mem,
1501 static struct dunit_ops dnv_ops = {
1506 .channels = DNV_NUM_CHANNELS,
1507 .dimms_per_channel = 2,
1508 .rd_reg = dnv_rd_reg,
1509 .get_registers = dnv_get_registers,
1510 .check_ecc = dnv_check_ecc_active,
1511 .mk_region = dnv_mk_region,
1512 .get_dimm_config = dnv_get_dimm_config,
1513 .pmi2mem = dnv_pmi2mem,
1516 static const struct x86_cpu_id pnd2_cpuids[] = {
1517 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &apl_ops),
1518 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &dnv_ops),
1521 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1523 static int __init pnd2_init(void)
1525 const struct x86_cpu_id *id;
1531 owner = edac_get_owner();
1532 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1535 if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
1538 id = x86_match_cpu(pnd2_cpuids);
1542 ops = (struct dunit_ops *)id->driver_data;
1544 if (ops->type == APL) {
1545 p2sb_bus = pci_find_bus(0, 0);
1550 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1555 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1562 mce_register_decode_chain(&pnd2_mce_dec);
1568 static void __exit pnd2_exit(void)
1571 teardown_pnd2_debug();
1572 mce_unregister_decode_chain(&pnd2_mce_dec);
1576 module_init(pnd2_init);
1577 module_exit(pnd2_exit);
1579 module_param(edac_op_state, int, 0444);
1580 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1582 MODULE_LICENSE("GPL v2");
1583 MODULE_AUTHOR("Tony Luck");
1584 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");