2 * edac_mc kernel module
3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
11 * Modified by Dave Peterson and Doug Thompson
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/sysctl.h>
22 #include <linux/highmem.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/jiffies.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28 #include <linux/ctype.h>
29 #include <linux/edac.h>
30 #include <linux/bitops.h>
31 #include <linux/uaccess.h>
34 #include "edac_module.h"
35 #include <ras/ras_event.h>
37 #ifdef CONFIG_EDAC_ATOMIC_SCRUB
40 #define edac_atomic_scrub(va, size) do { } while (0)
43 int edac_op_state = EDAC_OPSTATE_INVAL;
44 EXPORT_SYMBOL_GPL(edac_op_state);
46 static int edac_report = EDAC_REPORTING_ENABLED;
48 /* lock to memory controller's control array */
49 static DEFINE_MUTEX(mem_ctls_mutex);
50 static LIST_HEAD(mc_devices);
53 * Used to lock EDAC MC to just one module, avoiding two drivers e. g.
54 * apei/ghes and i7core_edac to be used at the same time.
56 static const char *edac_mc_owner;
58 int edac_get_report_status(void)
62 EXPORT_SYMBOL_GPL(edac_get_report_status);
64 void edac_set_report_status(int new)
66 if (new == EDAC_REPORTING_ENABLED ||
67 new == EDAC_REPORTING_DISABLED ||
68 new == EDAC_REPORTING_FORCE)
71 EXPORT_SYMBOL_GPL(edac_set_report_status);
73 static int edac_report_set(const char *str, const struct kernel_param *kp)
78 if (!strncmp(str, "on", 2))
79 edac_report = EDAC_REPORTING_ENABLED;
80 else if (!strncmp(str, "off", 3))
81 edac_report = EDAC_REPORTING_DISABLED;
82 else if (!strncmp(str, "force", 5))
83 edac_report = EDAC_REPORTING_FORCE;
88 static int edac_report_get(char *buffer, const struct kernel_param *kp)
92 switch (edac_report) {
93 case EDAC_REPORTING_ENABLED:
94 ret = sprintf(buffer, "on");
96 case EDAC_REPORTING_DISABLED:
97 ret = sprintf(buffer, "off");
99 case EDAC_REPORTING_FORCE:
100 ret = sprintf(buffer, "force");
110 static const struct kernel_param_ops edac_report_ops = {
111 .set = edac_report_set,
112 .get = edac_report_get,
115 module_param_cb(edac_report, &edac_report_ops, &edac_report, 0644);
117 unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
120 struct mem_ctl_info *mci = dimm->mci;
124 for (i = 0; i < mci->n_layers; i++) {
125 n = snprintf(p, len, "%s %d ",
126 edac_layer_name[mci->layers[i].type],
138 #ifdef CONFIG_EDAC_DEBUG
140 static void edac_mc_dump_channel(struct rank_info *chan)
142 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
143 edac_dbg(4, " channel = %p\n", chan);
144 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
145 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
148 static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
152 edac_dimm_info_location(dimm, location, sizeof(location));
154 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
155 dimm->mci->csbased ? "rank" : "dimm",
156 number, location, dimm->csrow, dimm->cschannel);
157 edac_dbg(4, " dimm = %p\n", dimm);
158 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
159 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
160 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
161 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
164 static void edac_mc_dump_csrow(struct csrow_info *csrow)
166 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
167 edac_dbg(4, " csrow = %p\n", csrow);
168 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
169 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
170 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
171 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
172 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
173 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
176 static void edac_mc_dump_mci(struct mem_ctl_info *mci)
178 edac_dbg(3, "\tmci = %p\n", mci);
179 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
180 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
181 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
182 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
183 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
184 mci->nr_csrows, mci->csrows);
185 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
186 mci->tot_dimms, mci->dimms);
187 edac_dbg(3, "\tdev = %p\n", mci->pdev);
188 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
189 mci->mod_name, mci->ctl_name);
190 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
193 #endif /* CONFIG_EDAC_DEBUG */
195 const char * const edac_mem_types[] = {
196 [MEM_EMPTY] = "Empty",
197 [MEM_RESERVED] = "Reserved",
198 [MEM_UNKNOWN] = "Unknown",
202 [MEM_SDR] = "Unbuffered-SDR",
203 [MEM_RDR] = "Registered-SDR",
204 [MEM_DDR] = "Unbuffered-DDR",
205 [MEM_RDDR] = "Registered-DDR",
207 [MEM_DDR2] = "Unbuffered-DDR2",
208 [MEM_FB_DDR2] = "FullyBuffered-DDR2",
209 [MEM_RDDR2] = "Registered-DDR2",
211 [MEM_DDR3] = "Unbuffered-DDR3",
212 [MEM_RDDR3] = "Registered-DDR3",
213 [MEM_LRDDR3] = "Load-Reduced-DDR3-RAM",
214 [MEM_DDR4] = "Unbuffered-DDR4",
215 [MEM_RDDR4] = "Registered-DDR4",
216 [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
217 [MEM_NVDIMM] = "Non-volatile-RAM",
219 EXPORT_SYMBOL_GPL(edac_mem_types);
222 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
223 * @p: pointer to a pointer with the memory offset to be used. At
224 * return, this will be incremented to point to the next offset
225 * @size: Size of the data structure to be reserved
226 * @n_elems: Number of elements that should be reserved
228 * If 'size' is a constant, the compiler will optimize this whole function
229 * down to either a no-op or the addition of a constant to the value of '*p'.
231 * The 'p' pointer is absolutely needed to keep the proper advancing
232 * further in memory to the proper offsets when allocating the struct along
233 * with its embedded structs, as edac_device_alloc_ctl_info() does it
234 * above, for example.
236 * At return, the pointer 'p' will be incremented to be used on a next call
239 void *edac_align_ptr(void **p, unsigned size, int n_elems)
244 *p += size * n_elems;
247 * 'p' can possibly be an unaligned item X such that sizeof(X) is
248 * 'size'. Adjust 'p' so that its alignment is at least as
249 * stringent as what the compiler would provide for X and return
250 * the aligned result.
251 * Here we assume that the alignment of a "long long" is the most
252 * stringent alignment that the compiler will ever provide by default.
253 * As far as I know, this is a reasonable assumption.
255 if (size > sizeof(long))
256 align = sizeof(long long);
257 else if (size > sizeof(int))
258 align = sizeof(long);
259 else if (size > sizeof(short))
261 else if (size > sizeof(char))
262 align = sizeof(short);
266 r = (unsigned long)p % align;
273 return (void *)(((unsigned long)ptr) + align - r);
276 static void _edac_mc_free(struct mem_ctl_info *mci)
279 struct csrow_info *csr;
280 const unsigned int tot_dimms = mci->tot_dimms;
281 const unsigned int tot_channels = mci->num_cschannel;
282 const unsigned int tot_csrows = mci->nr_csrows;
285 for (i = 0; i < tot_dimms; i++)
286 kfree(mci->dimms[i]);
290 for (row = 0; row < tot_csrows; row++) {
291 csr = mci->csrows[row];
294 for (chn = 0; chn < tot_channels; chn++)
295 kfree(csr->channels[chn]);
296 kfree(csr->channels);
306 struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
308 struct edac_mc_layer *layers,
311 struct mem_ctl_info *mci;
312 struct edac_mc_layer *layer;
313 struct csrow_info *csr;
314 struct rank_info *chan;
315 struct dimm_info *dimm;
316 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
317 unsigned pos[EDAC_MAX_LAYERS];
318 unsigned size, tot_dimms = 1, count = 1;
319 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
320 void *pvt, *p, *ptr = NULL;
321 int i, j, row, chn, n, len, off;
322 bool per_rank = false;
324 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
326 * Calculate the total amount of dimms and csrows/cschannels while
327 * in the old API emulation mode
329 for (i = 0; i < n_layers; i++) {
330 tot_dimms *= layers[i].size;
331 if (layers[i].is_virt_csrow)
332 tot_csrows *= layers[i].size;
334 tot_channels *= layers[i].size;
336 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
340 /* Figure out the offsets of the various items from the start of an mc
341 * structure. We want the alignment of each item to be at least as
342 * stringent as what the compiler would provide if we could simply
343 * hardcode everything into a single struct.
345 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
346 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
347 for (i = 0; i < n_layers; i++) {
348 count *= layers[i].size;
349 edac_dbg(4, "errcount layer %d size %d\n", i, count);
350 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
351 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
352 tot_errcount += 2 * count;
355 edac_dbg(4, "allocating %d error counters\n", tot_errcount);
356 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
357 size = ((unsigned long)pvt) + sz_pvt;
359 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
362 per_rank ? "ranks" : "dimms",
363 tot_csrows * tot_channels);
365 mci = kzalloc(size, GFP_KERNEL);
369 /* Adjust pointers so they point within the memory we just allocated
370 * rather than an imaginary chunk of memory located at address 0.
372 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
373 for (i = 0; i < n_layers; i++) {
374 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
375 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
377 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
379 /* setup index and various internal pointers */
380 mci->mc_idx = mc_num;
381 mci->tot_dimms = tot_dimms;
383 mci->n_layers = n_layers;
385 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
386 mci->nr_csrows = tot_csrows;
387 mci->num_cschannel = tot_channels;
388 mci->csbased = per_rank;
391 * Alocate and fill the csrow/channels structs
393 mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
396 for (row = 0; row < tot_csrows; row++) {
397 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
400 mci->csrows[row] = csr;
401 csr->csrow_idx = row;
403 csr->nr_channels = tot_channels;
404 csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
409 for (chn = 0; chn < tot_channels; chn++) {
410 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
413 csr->channels[chn] = chan;
414 chan->chan_idx = chn;
420 * Allocate and fill the dimm structs
422 mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
426 memset(&pos, 0, sizeof(pos));
429 for (i = 0; i < tot_dimms; i++) {
430 chan = mci->csrows[row]->channels[chn];
431 off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
432 if (off < 0 || off >= tot_dimms) {
433 edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
437 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
440 mci->dimms[off] = dimm;
444 * Copy DIMM location and initialize it.
446 len = sizeof(dimm->label);
448 n = snprintf(p, len, "mc#%u", mc_num);
451 for (j = 0; j < n_layers; j++) {
452 n = snprintf(p, len, "%s#%u",
453 edac_layer_name[layers[j].type],
457 dimm->location[j] = pos[j];
463 /* Link it to the csrows old API data */
466 dimm->cschannel = chn;
468 /* Increment csrow location */
469 if (layers[0].is_virt_csrow) {
471 if (chn == tot_channels) {
477 if (row == tot_csrows) {
483 /* Increment dimm location */
484 for (j = n_layers - 1; j >= 0; j--) {
486 if (pos[j] < layers[j].size)
492 mci->op_state = OP_ALLOC;
501 EXPORT_SYMBOL_GPL(edac_mc_alloc);
503 void edac_mc_free(struct mem_ctl_info *mci)
507 /* If we're not yet registered with sysfs free only what was allocated
508 * in edac_mc_alloc().
510 if (!device_is_registered(&mci->dev)) {
515 /* the mci instance is freed here, when the sysfs object is dropped */
516 edac_unregister_sysfs(mci);
518 EXPORT_SYMBOL_GPL(edac_mc_free);
520 bool edac_has_mcs(void)
524 mutex_lock(&mem_ctls_mutex);
526 ret = list_empty(&mc_devices);
528 mutex_unlock(&mem_ctls_mutex);
532 EXPORT_SYMBOL_GPL(edac_has_mcs);
534 /* Caller must hold mem_ctls_mutex */
535 static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
537 struct mem_ctl_info *mci;
538 struct list_head *item;
542 list_for_each(item, &mc_devices) {
543 mci = list_entry(item, struct mem_ctl_info, link);
545 if (mci->pdev == dev)
555 * scan list of controllers looking for the one that manages
557 * @dev: pointer to a struct device related with the MCI
559 struct mem_ctl_info *find_mci_by_dev(struct device *dev)
561 struct mem_ctl_info *ret;
563 mutex_lock(&mem_ctls_mutex);
564 ret = __find_mci_by_dev(dev);
565 mutex_unlock(&mem_ctls_mutex);
569 EXPORT_SYMBOL_GPL(find_mci_by_dev);
572 * edac_mc_workq_function
573 * performs the operation scheduled by a workq request
575 static void edac_mc_workq_function(struct work_struct *work_req)
577 struct delayed_work *d_work = to_delayed_work(work_req);
578 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
580 mutex_lock(&mem_ctls_mutex);
582 if (mci->op_state != OP_RUNNING_POLL) {
583 mutex_unlock(&mem_ctls_mutex);
587 if (edac_op_state == EDAC_OPSTATE_POLL)
588 mci->edac_check(mci);
590 mutex_unlock(&mem_ctls_mutex);
592 /* Queue ourselves again. */
593 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
597 * edac_mc_reset_delay_period(unsigned long value)
599 * user space has updated our poll period value, need to
600 * reset our workq delays
602 void edac_mc_reset_delay_period(unsigned long value)
604 struct mem_ctl_info *mci;
605 struct list_head *item;
607 mutex_lock(&mem_ctls_mutex);
609 list_for_each(item, &mc_devices) {
610 mci = list_entry(item, struct mem_ctl_info, link);
612 if (mci->op_state == OP_RUNNING_POLL)
613 edac_mod_work(&mci->work, value);
615 mutex_unlock(&mem_ctls_mutex);
620 /* Return 0 on success, 1 on failure.
621 * Before calling this function, caller must
622 * assign a unique value to mci->mc_idx.
626 * called with the mem_ctls_mutex lock held
628 static int add_mc_to_global_list(struct mem_ctl_info *mci)
630 struct list_head *item, *insert_before;
631 struct mem_ctl_info *p;
633 insert_before = &mc_devices;
635 p = __find_mci_by_dev(mci->pdev);
636 if (unlikely(p != NULL))
639 list_for_each(item, &mc_devices) {
640 p = list_entry(item, struct mem_ctl_info, link);
642 if (p->mc_idx >= mci->mc_idx) {
643 if (unlikely(p->mc_idx == mci->mc_idx))
646 insert_before = item;
651 list_add_tail_rcu(&mci->link, insert_before);
655 edac_printk(KERN_WARNING, EDAC_MC,
656 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
657 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
661 edac_printk(KERN_WARNING, EDAC_MC,
662 "bug in low-level driver: attempt to assign\n"
663 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
667 static int del_mc_from_global_list(struct mem_ctl_info *mci)
669 list_del_rcu(&mci->link);
671 /* these are for safe removal of devices from global list while
672 * NMI handlers may be traversing list
675 INIT_LIST_HEAD(&mci->link);
677 return list_empty(&mc_devices);
680 struct mem_ctl_info *edac_mc_find(int idx)
682 struct mem_ctl_info *mci;
683 struct list_head *item;
685 mutex_lock(&mem_ctls_mutex);
687 list_for_each(item, &mc_devices) {
688 mci = list_entry(item, struct mem_ctl_info, link);
689 if (mci->mc_idx == idx)
695 mutex_unlock(&mem_ctls_mutex);
698 EXPORT_SYMBOL(edac_mc_find);
700 const char *edac_get_owner(void)
702 return edac_mc_owner;
704 EXPORT_SYMBOL_GPL(edac_get_owner);
706 /* FIXME - should a warning be printed if no error detection? correction? */
707 int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
708 const struct attribute_group **groups)
713 #ifdef CONFIG_EDAC_DEBUG
714 if (edac_debug_level >= 3)
715 edac_mc_dump_mci(mci);
717 if (edac_debug_level >= 4) {
720 for (i = 0; i < mci->nr_csrows; i++) {
721 struct csrow_info *csrow = mci->csrows[i];
725 for (j = 0; j < csrow->nr_channels; j++)
726 nr_pages += csrow->channels[j]->dimm->nr_pages;
729 edac_mc_dump_csrow(csrow);
730 for (j = 0; j < csrow->nr_channels; j++)
731 if (csrow->channels[j]->dimm->nr_pages)
732 edac_mc_dump_channel(csrow->channels[j]);
734 for (i = 0; i < mci->tot_dimms; i++)
735 if (mci->dimms[i]->nr_pages)
736 edac_mc_dump_dimm(mci->dimms[i], i);
739 mutex_lock(&mem_ctls_mutex);
741 if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
746 if (add_mc_to_global_list(mci))
749 /* set load time so that error rate can be tracked */
750 mci->start_time = jiffies;
752 mci->bus = edac_get_sysfs_subsys();
754 if (edac_create_sysfs_mci_device(mci, groups)) {
755 edac_mc_printk(mci, KERN_WARNING,
756 "failed to create sysfs device\n");
760 if (mci->edac_check) {
761 mci->op_state = OP_RUNNING_POLL;
763 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
764 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
767 mci->op_state = OP_RUNNING_INTERRUPT;
770 /* Report action taken */
771 edac_mc_printk(mci, KERN_INFO,
772 "Giving out device to module %s controller %s: DEV %s (%s)\n",
773 mci->mod_name, mci->ctl_name, mci->dev_name,
774 edac_op_state_to_string(mci->op_state));
776 edac_mc_owner = mci->mod_name;
778 mutex_unlock(&mem_ctls_mutex);
782 del_mc_from_global_list(mci);
785 mutex_unlock(&mem_ctls_mutex);
788 EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups);
790 struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
792 struct mem_ctl_info *mci;
796 mutex_lock(&mem_ctls_mutex);
798 /* find the requested mci struct in the global list */
799 mci = __find_mci_by_dev(dev);
801 mutex_unlock(&mem_ctls_mutex);
805 /* mark MCI offline: */
806 mci->op_state = OP_OFFLINE;
808 if (del_mc_from_global_list(mci))
809 edac_mc_owner = NULL;
811 mutex_unlock(&mem_ctls_mutex);
814 edac_stop_work(&mci->work);
816 /* remove from sysfs */
817 edac_remove_sysfs_mci_device(mci);
819 edac_printk(KERN_INFO, EDAC_MC,
820 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
821 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
825 EXPORT_SYMBOL_GPL(edac_mc_del_mc);
827 static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
832 unsigned long flags = 0;
836 /* ECC error page was not in our memory. Ignore it. */
837 if (!pfn_valid(page))
840 /* Find the actual page structure then map it and fix */
841 pg = pfn_to_page(page);
844 local_irq_save(flags);
846 virt_addr = kmap_atomic(pg);
848 /* Perform architecture specific atomic scrub operation */
849 edac_atomic_scrub(virt_addr + offset, size);
851 /* Unmap and complete */
852 kunmap_atomic(virt_addr);
855 local_irq_restore(flags);
858 /* FIXME - should return -1 */
859 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
861 struct csrow_info **csrows = mci->csrows;
864 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
867 for (i = 0; i < mci->nr_csrows; i++) {
868 struct csrow_info *csrow = csrows[i];
870 for (j = 0; j < csrow->nr_channels; j++) {
871 struct dimm_info *dimm = csrow->channels[j]->dimm;
877 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
879 csrow->first_page, page, csrow->last_page,
882 if ((page >= csrow->first_page) &&
883 (page <= csrow->last_page) &&
884 ((page & csrow->page_mask) ==
885 (csrow->first_page & csrow->page_mask))) {
892 edac_mc_printk(mci, KERN_ERR,
893 "could not look up page error address %lx\n",
894 (unsigned long)page);
898 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
900 const char *edac_layer_name[] = {
901 [EDAC_MC_LAYER_BRANCH] = "branch",
902 [EDAC_MC_LAYER_CHANNEL] = "channel",
903 [EDAC_MC_LAYER_SLOT] = "slot",
904 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
905 [EDAC_MC_LAYER_ALL_MEM] = "memory",
907 EXPORT_SYMBOL_GPL(edac_layer_name);
909 static void edac_inc_ce_error(struct mem_ctl_info *mci,
910 bool enable_per_layer_report,
911 const int pos[EDAC_MAX_LAYERS],
918 if (!enable_per_layer_report) {
919 mci->ce_noinfo_count += count;
923 for (i = 0; i < mci->n_layers; i++) {
927 mci->ce_per_layer[i][index] += count;
929 if (i < mci->n_layers - 1)
930 index *= mci->layers[i + 1].size;
934 static void edac_inc_ue_error(struct mem_ctl_info *mci,
935 bool enable_per_layer_report,
936 const int pos[EDAC_MAX_LAYERS],
943 if (!enable_per_layer_report) {
944 mci->ue_noinfo_count += count;
948 for (i = 0; i < mci->n_layers; i++) {
952 mci->ue_per_layer[i][index] += count;
954 if (i < mci->n_layers - 1)
955 index *= mci->layers[i + 1].size;
959 static void edac_ce_error(struct mem_ctl_info *mci,
960 const u16 error_count,
961 const int pos[EDAC_MAX_LAYERS],
963 const char *location,
966 const char *other_detail,
967 const bool enable_per_layer_report,
968 const unsigned long page_frame_number,
969 const unsigned long offset_in_page,
972 unsigned long remapped_page;
978 if (edac_mc_get_log_ce()) {
979 if (other_detail && *other_detail)
980 edac_mc_printk(mci, KERN_WARNING,
981 "%d CE %s%son %s (%s %s - %s)\n",
982 error_count, msg, msg_aux, label,
983 location, detail, other_detail);
985 edac_mc_printk(mci, KERN_WARNING,
986 "%d CE %s%son %s (%s %s)\n",
987 error_count, msg, msg_aux, label,
990 edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
992 if (mci->scrub_mode == SCRUB_SW_SRC) {
994 * Some memory controllers (called MCs below) can remap
995 * memory so that it is still available at a different
996 * address when PCI devices map into memory.
997 * MC's that can't do this, lose the memory where PCI
998 * devices are mapped. This mapping is MC-dependent
999 * and so we call back into the MC driver for it to
1000 * map the MC page to a physical (CPU) page which can
1001 * then be mapped to a virtual page - which can then
1004 remapped_page = mci->ctl_page_to_phys ?
1005 mci->ctl_page_to_phys(mci, page_frame_number) :
1008 edac_mc_scrub_block(remapped_page,
1009 offset_in_page, grain);
1013 static void edac_ue_error(struct mem_ctl_info *mci,
1014 const u16 error_count,
1015 const int pos[EDAC_MAX_LAYERS],
1017 const char *location,
1020 const char *other_detail,
1021 const bool enable_per_layer_report)
1028 if (edac_mc_get_log_ue()) {
1029 if (other_detail && *other_detail)
1030 edac_mc_printk(mci, KERN_WARNING,
1031 "%d UE %s%son %s (%s %s - %s)\n",
1032 error_count, msg, msg_aux, label,
1033 location, detail, other_detail);
1035 edac_mc_printk(mci, KERN_WARNING,
1036 "%d UE %s%son %s (%s %s)\n",
1037 error_count, msg, msg_aux, label,
1041 if (edac_mc_get_panic_on_ue()) {
1042 if (other_detail && *other_detail)
1043 panic("UE %s%son %s (%s%s - %s)\n",
1044 msg, msg_aux, label, location, detail, other_detail);
1046 panic("UE %s%son %s (%s%s)\n",
1047 msg, msg_aux, label, location, detail);
1050 edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
1053 void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
1054 struct mem_ctl_info *mci,
1055 struct edac_raw_error_desc *e)
1058 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
1060 /* Memory type dependent details about the error */
1061 if (type == HW_EVENT_ERR_CORRECTED) {
1062 snprintf(detail, sizeof(detail),
1063 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
1064 e->page_frame_number, e->offset_in_page,
1065 e->grain, e->syndrome);
1066 edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label,
1067 detail, e->other_detail, e->enable_per_layer_report,
1068 e->page_frame_number, e->offset_in_page, e->grain);
1070 snprintf(detail, sizeof(detail),
1071 "page:0x%lx offset:0x%lx grain:%ld",
1072 e->page_frame_number, e->offset_in_page, e->grain);
1074 edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label,
1075 detail, e->other_detail, e->enable_per_layer_report);
1080 EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
1082 void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1083 struct mem_ctl_info *mci,
1084 const u16 error_count,
1085 const unsigned long page_frame_number,
1086 const unsigned long offset_in_page,
1087 const unsigned long syndrome,
1088 const int top_layer,
1089 const int mid_layer,
1090 const int low_layer,
1092 const char *other_detail)
1095 int row = -1, chan = -1;
1096 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
1097 int i, n_labels = 0;
1099 struct edac_raw_error_desc *e = &mci->error_desc;
1101 edac_dbg(3, "MC%d\n", mci->mc_idx);
1103 /* Fills the error report buffer */
1104 memset(e, 0, sizeof (*e));
1105 e->error_count = error_count;
1106 e->top_layer = top_layer;
1107 e->mid_layer = mid_layer;
1108 e->low_layer = low_layer;
1109 e->page_frame_number = page_frame_number;
1110 e->offset_in_page = offset_in_page;
1111 e->syndrome = syndrome;
1113 e->other_detail = other_detail;
1116 * Check if the event report is consistent and if the memory
1117 * location is known. If it is known, enable_per_layer_report will be
1118 * true, the DIMM(s) label info will be filled and the per-layer
1119 * error counters will be incremented.
1121 for (i = 0; i < mci->n_layers; i++) {
1122 if (pos[i] >= (int)mci->layers[i].size) {
1124 edac_mc_printk(mci, KERN_ERR,
1125 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1126 edac_layer_name[mci->layers[i].type],
1127 pos[i], mci->layers[i].size);
1129 * Instead of just returning it, let's use what's
1130 * known about the error. The increment routines and
1131 * the DIMM filter logic will do the right thing by
1132 * pointing the likely damaged DIMMs.
1137 e->enable_per_layer_report = true;
1141 * Get the dimm label/grain that applies to the match criteria.
1142 * As the error algorithm may not be able to point to just one memory
1143 * stick, the logic here will get all possible labels that could
1144 * pottentially be affected by the error.
1145 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1146 * to have only the MC channel and the MC dimm (also called "branch")
1147 * but the channel is not known, as the memory is arranged in pairs,
1148 * where each memory belongs to a separate channel within the same
1154 for (i = 0; i < mci->tot_dimms; i++) {
1155 struct dimm_info *dimm = mci->dimms[i];
1157 if (top_layer >= 0 && top_layer != dimm->location[0])
1159 if (mid_layer >= 0 && mid_layer != dimm->location[1])
1161 if (low_layer >= 0 && low_layer != dimm->location[2])
1164 /* get the max grain, over the error match range */
1165 if (dimm->grain > e->grain)
1166 e->grain = dimm->grain;
1169 * If the error is memory-controller wide, there's no need to
1170 * seek for the affected DIMMs because the whole
1171 * channel/memory controller/... may be affected.
1172 * Also, don't show errors for empty DIMM slots.
1174 if (e->enable_per_layer_report && dimm->nr_pages) {
1175 if (n_labels >= EDAC_MAX_LABELS) {
1176 e->enable_per_layer_report = false;
1180 if (p != e->label) {
1181 strcpy(p, OTHER_LABEL);
1182 p += strlen(OTHER_LABEL);
1184 strcpy(p, dimm->label);
1189 * get csrow/channel of the DIMM, in order to allow
1190 * incrementing the compat API counters
1192 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1193 mci->csbased ? "rank" : "dimm",
1194 dimm->csrow, dimm->cschannel);
1197 else if (row >= 0 && row != dimm->csrow)
1201 chan = dimm->cschannel;
1202 else if (chan >= 0 && chan != dimm->cschannel)
1207 if (!e->enable_per_layer_report) {
1208 strcpy(e->label, "any memory");
1210 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
1212 strcpy(e->label, "unknown memory");
1213 if (type == HW_EVENT_ERR_CORRECTED) {
1215 mci->csrows[row]->ce_count += error_count;
1217 mci->csrows[row]->channels[chan]->ce_count += error_count;
1221 mci->csrows[row]->ue_count += error_count;
1224 /* Fill the RAM location data */
1227 for (i = 0; i < mci->n_layers; i++) {
1231 p += sprintf(p, "%s:%d ",
1232 edac_layer_name[mci->layers[i].type],
1235 if (p > e->location)
1238 /* Report the error via the trace interface */
1239 grain_bits = fls_long(e->grain) + 1;
1241 if (IS_ENABLED(CONFIG_RAS))
1242 trace_mc_event(type, e->msg, e->label, e->error_count,
1243 mci->mc_idx, e->top_layer, e->mid_layer,
1245 (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
1246 grain_bits, e->syndrome, e->other_detail);
1248 edac_raw_mc_handle_error(type, mci, e);
1250 EXPORT_SYMBOL_GPL(edac_mc_handle_error);