2 * Intel e752x Memory Controller kernel module
3 * (C) 2004 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
7 * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
10 * https://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
11 * ftp://download.intel.com/design/intarch/datashts/31345803.pdf
13 * Written by Tom Zimmerman
16 * Thayne Harbaugh at realmsys.com (?)
17 * Wang Zhenyu at intel.com
18 * Dave Jiang at mvista.com
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/pci.h>
25 #include <linux/pci_ids.h>
26 #include <linux/edac.h>
27 #include "edac_module.h"
29 #define EDAC_MOD_STR "e752x_edac"
31 static int report_non_memory_errors;
32 static int force_function_unhide;
33 static int sysbus_parity = -1;
35 static struct edac_pci_ctl_info *e752x_pci;
37 #define e752x_printk(level, fmt, arg...) \
38 edac_printk(level, "e752x", fmt, ##arg)
40 #define e752x_mc_printk(mci, level, fmt, arg...) \
41 edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
43 #ifndef PCI_DEVICE_ID_INTEL_7520_0
44 #define PCI_DEVICE_ID_INTEL_7520_0 0x3590
45 #endif /* PCI_DEVICE_ID_INTEL_7520_0 */
47 #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
48 #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
49 #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
51 #ifndef PCI_DEVICE_ID_INTEL_7525_0
52 #define PCI_DEVICE_ID_INTEL_7525_0 0x359E
53 #endif /* PCI_DEVICE_ID_INTEL_7525_0 */
55 #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
56 #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
57 #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
59 #ifndef PCI_DEVICE_ID_INTEL_7320_0
60 #define PCI_DEVICE_ID_INTEL_7320_0 0x3592
61 #endif /* PCI_DEVICE_ID_INTEL_7320_0 */
63 #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
64 #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
65 #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
67 #ifndef PCI_DEVICE_ID_INTEL_3100_0
68 #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
69 #endif /* PCI_DEVICE_ID_INTEL_3100_0 */
71 #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
72 #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
73 #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
75 #define E752X_NR_CSROWS 8 /* number of csrows */
77 /* E752X register addresses - device 0 function 0 */
78 #define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */
80 * 6:5 Scrub Completion Count
81 * 3:2 Scrub Rate (i3100 only)
83 * 1:0 Scrub Mode enable
86 #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
87 #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
89 * 31:30 Device width row 7
90 * 01=x8 10=x4 11=x8 DDR2
91 * 27:26 Device width row 6
92 * 23:22 Device width row 5
93 * 19:20 Device width row 4
94 * 15:14 Device width row 3
95 * 11:10 Device width row 2
96 * 7:6 Device width row 1
97 * 3:2 Device width row 0
99 #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
100 /* FIXME:IS THIS RIGHT? */
102 * 22 Number channels 0=1,1=2
103 * 19:18 DRB Granularity 32/64MB
105 #define E752X_DRM 0x80 /* Dimm mapping register */
106 #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
108 * 14:12 1 single A, 2 single B, 3 dual
110 #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
111 #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
112 #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
113 #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
115 /* E752X register addresses - device 0 function 1 */
116 #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
117 #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
118 #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
119 #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
120 #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
121 #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
122 #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
123 #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
124 #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
125 #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
126 #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
127 #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
128 #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
129 #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
130 #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
131 #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
132 #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
133 #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
134 #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
135 #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
136 /* error address register (32b) */
139 * 30:2 CE address (64 byte block 34:6
143 #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
144 /* error address register (32b) */
147 * 30:2 CE address (64 byte block 34:6)
151 #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
152 /* error address register (32b) */
155 * 30:2 CE address (64 byte block 34:6)
159 #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
160 /* error address register (32b) */
163 * 30:2 CE address (64 byte block 34:6
167 #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
168 /* error syndrome register (16b) */
169 #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
170 /* error syndrome register (16b) */
171 #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
173 /* 3100 IMCH specific register addresses - device 0 function 1 */
174 #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
175 #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
176 #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
177 #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
179 /* ICH5R register addresses - device 30 function 0 */
180 #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
181 #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
182 #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
192 * Those chips Support single-rank and dual-rank memories only.
194 * On e752x chips, the odd rows are present only on dual-rank memories.
195 * Dividing the rank by two will provide the dimm#
197 * i3100 MC has a different mapping: it supports only 4 ranks.
199 * The mapping is (from 1 to n):
200 * slot single-ranked double-ranked
201 * dimm #1 -> rank #4 NA
202 * dimm #2 -> rank #3 NA
203 * dimm #3 -> rank #2 Ranks 2 and 3
204 * dimm #4 -> rank $1 Ranks 1 and 4
206 * FIXME: The current mapping for i3100 considers that it supports up to 8
207 * ranks/chanel, but datasheet says that the MC supports only 4 ranks.
211 struct pci_dev *dev_d0f0;
212 struct pci_dev *dev_d0f1;
219 const struct e752x_dev_info *dev_info;
222 struct e752x_dev_info {
225 const char *ctl_name;
228 struct e752x_error_info {
231 u32 nsi_ferr; /* 3100 only */
232 u32 nsi_nerr; /* 3100 only */
233 u8 hi_ferr; /* all but 3100 */
234 u8 hi_nerr; /* all but 3100 */
243 u16 dram_sec1_syndrome;
244 u16 dram_sec2_syndrome;
250 static const struct e752x_dev_info e752x_devs[] = {
252 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
253 .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
254 .ctl_name = "E7520"},
256 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
257 .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
258 .ctl_name = "E7525"},
260 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
261 .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
262 .ctl_name = "E7320"},
264 .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
265 .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
269 /* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
270 * map the scrubbing bandwidth to a hardware register value. The 'set'
271 * operation finds the 'matching or higher value'. Note that scrubbing
272 * on the e752x can only be enabled/disabled. The 3100 supports
273 * a normal and fast mode.
276 #define SDRATE_EOT 0xFFFFFFFF
279 u32 bandwidth; /* bandwidth consumed by scrubbing in bytes/sec */
280 u16 scrubval; /* register value for scrub rate */
283 /* Rate below assumes same performance as i3100 using PC3200 DDR2 in
284 * normal mode. e752x bridges don't support choosing normal or fast mode,
285 * so the scrubbing bandwidth value isn't all that important - scrubbing is
288 static const struct scrubrate scrubrates_e752x[] = {
289 {0, 0x00}, /* Scrubbing Off */
290 {500000, 0x02}, /* Scrubbing On */
291 {SDRATE_EOT, 0x00} /* End of Table */
294 /* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
295 * Normal mode: 125 (32000 / 256) times slower than fast mode.
297 static const struct scrubrate scrubrates_i3100[] = {
298 {0, 0x00}, /* Scrubbing Off */
299 {500000, 0x0a}, /* Normal mode - 32k clocks */
300 {62500000, 0x06}, /* Fast mode - 256 clocks */
301 {SDRATE_EOT, 0x00} /* End of Table */
304 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
308 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
312 if (page < pvt->tolm)
315 if ((page >= 0x100000) && (page < pvt->remapbase))
318 remap = (page - pvt->tolm) + pvt->remapbase;
320 if (remap < pvt->remaplimit)
323 e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
324 return pvt->tolm - 1;
327 static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
328 u32 sec1_add, u16 sec1_syndrome)
334 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
338 /* convert the addr to 4k page */
339 page = sec1_add >> (PAGE_SHIFT - 4);
341 /* FIXME - check for -1 */
342 if (pvt->mc_symmetric) {
343 /* chip select are bits 14 & 13 */
344 row = ((page >> 1) & 3);
345 e752x_printk(KERN_WARNING,
346 "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
347 pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
348 pvt->map[4], pvt->map[5], pvt->map[6],
351 /* test for channel remapping */
352 for (i = 0; i < 8; i++) {
353 if (pvt->map[i] == row)
357 e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
362 e752x_mc_printk(mci, KERN_WARNING,
363 "row %d not found in remap table\n",
366 row = edac_mc_find_csrow_by_page(mci, page);
368 /* 0 = channel A, 1 = channel B */
369 channel = !(error_one & 1);
371 /* e752x mc reads 34:6 of the DRAM linear address */
372 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
373 page, offset_in_page(sec1_add << 4), sec1_syndrome,
378 static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
379 u32 sec1_add, u16 sec1_syndrome, int *error_found,
385 do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
388 static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
389 u32 ded_add, u32 scrb_add)
391 u32 error_2b, block_page;
393 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
397 if (error_one & 0x0202) {
400 /* convert to 4k address */
401 block_page = error_2b >> (PAGE_SHIFT - 4);
403 row = pvt->mc_symmetric ?
404 /* chip select are bits 14 & 13 */
405 ((block_page >> 1) & 3) :
406 edac_mc_find_csrow_by_page(mci, block_page);
408 /* e752x mc reads 34:6 of the DRAM linear address */
409 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
411 offset_in_page(error_2b << 4), 0,
413 "e752x UE from Read", "");
416 if (error_one & 0x0404) {
419 /* convert to 4k address */
420 block_page = error_2b >> (PAGE_SHIFT - 4);
422 row = pvt->mc_symmetric ?
423 /* chip select are bits 14 & 13 */
424 ((block_page >> 1) & 3) :
425 edac_mc_find_csrow_by_page(mci, block_page);
427 /* e752x mc reads 34:6 of the DRAM linear address */
428 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
430 offset_in_page(error_2b << 4), 0,
432 "e752x UE from Scruber", "");
436 static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
437 u32 ded_add, u32 scrb_add, int *error_found,
443 do_process_ue(mci, error_one, ded_add, scrb_add);
446 static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
447 int *error_found, int handle_error)
455 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
457 "e752x UE log memory write", "");
460 static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
465 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
467 error_1b = retry_add;
468 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
470 /* chip select are bits 14 & 13 */
471 row = pvt->mc_symmetric ? ((page >> 1) & 3) :
472 edac_mc_find_csrow_by_page(mci, page);
474 e752x_mc_printk(mci, KERN_WARNING,
475 "CE page 0x%lx, row %d : Memory read retry\n",
476 (long unsigned int)page, row);
479 static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
480 u32 retry_add, int *error_found,
486 do_process_ded_retry(mci, error, retry_add);
489 static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
490 int *error_found, int handle_error)
495 e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
498 static char *global_message[11] = {
506 "HUB or NS Interface",
508 "DRAM Controller", /* 9th entry */
514 static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
516 static void do_global_error(int fatal, u32 errors)
520 for (i = 0; i < 11; i++) {
521 if (errors & (1 << i)) {
522 /* If the error is from DRAM Controller OR
523 * we are to report ALL errors, then
526 if ((i == DRAM_ENTRY) || report_non_memory_errors)
527 e752x_printk(KERN_WARNING, "%sError %s\n",
528 fatal_message[fatal],
534 static inline void global_error(int fatal, u32 errors, int *error_found,
540 do_global_error(fatal, errors);
543 static char *hub_message[7] = {
544 "HI Address or Command Parity", "HI Illegal Access",
545 "HI Internal Parity", "Out of Range Access",
546 "HI Data Parity", "Enhanced Config Access",
547 "Hub Interface Target Abort"
550 static void do_hub_error(int fatal, u8 errors)
554 for (i = 0; i < 7; i++) {
555 if (errors & (1 << i))
556 e752x_printk(KERN_WARNING, "%sError %s\n",
557 fatal_message[fatal], hub_message[i]);
561 static inline void hub_error(int fatal, u8 errors, int *error_found,
567 do_hub_error(fatal, errors);
570 #define NSI_FATAL_MASK 0x0c080081
571 #define NSI_NON_FATAL_MASK 0x23a0ba64
572 #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
574 static char *nsi_message[30] = {
575 "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
577 "NSI Parity Error", /* bit 2, non-fatal */
580 "Correctable Error Message", /* bit 5, non-fatal */
581 "Non-Fatal Error Message", /* bit 6, non-fatal */
582 "Fatal Error Message", /* bit 7, fatal */
584 "Receiver Error", /* bit 9, non-fatal */
586 "Bad TLP", /* bit 11, non-fatal */
587 "Bad DLLP", /* bit 12, non-fatal */
588 "REPLAY_NUM Rollover", /* bit 13, non-fatal */
590 "Replay Timer Timeout", /* bit 15, non-fatal */
594 "Data Link Protocol Error", /* bit 19, fatal */
596 "Poisoned TLP", /* bit 21, non-fatal */
598 "Completion Timeout", /* bit 23, non-fatal */
599 "Completer Abort", /* bit 24, non-fatal */
600 "Unexpected Completion", /* bit 25, non-fatal */
601 "Receiver Overflow", /* bit 26, fatal */
602 "Malformed TLP", /* bit 27, fatal */
604 "Unsupported Request" /* bit 29, non-fatal */
607 static void do_nsi_error(int fatal, u32 errors)
611 for (i = 0; i < 30; i++) {
612 if (errors & (1 << i))
613 printk(KERN_WARNING "%sError %s\n",
614 fatal_message[fatal], nsi_message[i]);
618 static inline void nsi_error(int fatal, u32 errors, int *error_found,
624 do_nsi_error(fatal, errors);
627 static char *membuf_message[4] = {
628 "Internal PMWB to DRAM parity",
629 "Internal PMWB to System Bus Parity",
630 "Internal System Bus or IO to PMWB Parity",
631 "Internal DRAM to PMWB Parity"
634 static void do_membuf_error(u8 errors)
638 for (i = 0; i < 4; i++) {
639 if (errors & (1 << i))
640 e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
645 static inline void membuf_error(u8 errors, int *error_found, int handle_error)
650 do_membuf_error(errors);
653 static char *sysbus_message[10] = {
654 "Addr or Request Parity",
655 "Data Strobe Glitch",
656 "Addr Strobe Glitch",
659 "Non DRAM Lock Error",
662 "IO Subsystem Parity"
665 static void do_sysbus_error(int fatal, u32 errors)
669 for (i = 0; i < 10; i++) {
670 if (errors & (1 << i))
671 e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
672 fatal_message[fatal], sysbus_message[i]);
676 static inline void sysbus_error(int fatal, u32 errors, int *error_found,
682 do_sysbus_error(fatal, errors);
685 static void e752x_check_hub_interface(struct e752x_error_info *info,
686 int *error_found, int handle_error)
690 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
692 stat8 = info->hi_ferr;
694 if (stat8 & 0x7f) { /* Error, so process */
698 hub_error(1, stat8 & 0x2b, error_found, handle_error);
701 hub_error(0, stat8 & 0x54, error_found, handle_error);
703 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
705 stat8 = info->hi_nerr;
707 if (stat8 & 0x7f) { /* Error, so process */
711 hub_error(1, stat8 & 0x2b, error_found, handle_error);
714 hub_error(0, stat8 & 0x54, error_found, handle_error);
718 static void e752x_check_ns_interface(struct e752x_error_info *info,
719 int *error_found, int handle_error)
723 stat32 = info->nsi_ferr;
724 if (stat32 & NSI_ERR_MASK) { /* Error, so process */
725 if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */
726 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
728 if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
729 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
732 stat32 = info->nsi_nerr;
733 if (stat32 & NSI_ERR_MASK) {
734 if (stat32 & NSI_FATAL_MASK)
735 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
737 if (stat32 & NSI_NON_FATAL_MASK)
738 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
743 static void e752x_check_sysbus(struct e752x_error_info *info,
744 int *error_found, int handle_error)
748 //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
749 stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
752 return; /* no errors */
754 error32 = (stat32 >> 16) & 0x3ff;
755 stat32 = stat32 & 0x3ff;
758 sysbus_error(1, stat32 & 0x087, error_found, handle_error);
761 sysbus_error(0, stat32 & 0x378, error_found, handle_error);
764 sysbus_error(1, error32 & 0x087, error_found, handle_error);
767 sysbus_error(0, error32 & 0x378, error_found, handle_error);
770 static void e752x_check_membuf(struct e752x_error_info *info,
771 int *error_found, int handle_error)
775 stat8 = info->buf_ferr;
777 if (stat8 & 0x0f) { /* Error, so process */
779 membuf_error(stat8, error_found, handle_error);
782 stat8 = info->buf_nerr;
784 if (stat8 & 0x0f) { /* Error, so process */
786 membuf_error(stat8, error_found, handle_error);
790 static void e752x_check_dram(struct mem_ctl_info *mci,
791 struct e752x_error_info *info, int *error_found,
794 u16 error_one, error_next;
796 error_one = info->dram_ferr;
797 error_next = info->dram_nerr;
799 /* decode and report errors */
800 if (error_one & 0x0101) /* check first error correctable */
801 process_ce(mci, error_one, info->dram_sec1_add,
802 info->dram_sec1_syndrome, error_found, handle_error);
804 if (error_next & 0x0101) /* check next error correctable */
805 process_ce(mci, error_next, info->dram_sec2_add,
806 info->dram_sec2_syndrome, error_found, handle_error);
808 if (error_one & 0x4040)
809 process_ue_no_info_wr(mci, error_found, handle_error);
811 if (error_next & 0x4040)
812 process_ue_no_info_wr(mci, error_found, handle_error);
814 if (error_one & 0x2020)
815 process_ded_retry(mci, error_one, info->dram_retr_add,
816 error_found, handle_error);
818 if (error_next & 0x2020)
819 process_ded_retry(mci, error_next, info->dram_retr_add,
820 error_found, handle_error);
822 if (error_one & 0x0808)
823 process_threshold_ce(mci, error_one, error_found, handle_error);
825 if (error_next & 0x0808)
826 process_threshold_ce(mci, error_next, error_found,
829 if (error_one & 0x0606)
830 process_ue(mci, error_one, info->dram_ded_add,
831 info->dram_scrb_add, error_found, handle_error);
833 if (error_next & 0x0606)
834 process_ue(mci, error_next, info->dram_ded_add,
835 info->dram_scrb_add, error_found, handle_error);
838 static void e752x_get_error_info(struct mem_ctl_info *mci,
839 struct e752x_error_info *info)
842 struct e752x_pvt *pvt;
844 memset(info, 0, sizeof(*info));
845 pvt = (struct e752x_pvt *)mci->pvt_info;
847 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
849 if (info->ferr_global) {
850 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
851 pci_read_config_dword(dev, I3100_NSI_FERR,
855 pci_read_config_byte(dev, E752X_HI_FERR,
859 pci_read_config_word(dev, E752X_SYSBUS_FERR,
861 pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
862 pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
863 pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
864 &info->dram_sec1_add);
865 pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
866 &info->dram_sec1_syndrome);
867 pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
868 &info->dram_ded_add);
869 pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
870 &info->dram_scrb_add);
871 pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
872 &info->dram_retr_add);
874 /* ignore the reserved bits just in case */
875 if (info->hi_ferr & 0x7f)
876 pci_write_config_byte(dev, E752X_HI_FERR,
879 if (info->nsi_ferr & NSI_ERR_MASK)
880 pci_write_config_dword(dev, I3100_NSI_FERR,
883 if (info->sysbus_ferr)
884 pci_write_config_word(dev, E752X_SYSBUS_FERR,
887 if (info->buf_ferr & 0x0f)
888 pci_write_config_byte(dev, E752X_BUF_FERR,
892 pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_FERR,
893 info->dram_ferr, info->dram_ferr);
895 pci_write_config_dword(dev, E752X_FERR_GLOBAL,
899 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
901 if (info->nerr_global) {
902 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
903 pci_read_config_dword(dev, I3100_NSI_NERR,
907 pci_read_config_byte(dev, E752X_HI_NERR,
911 pci_read_config_word(dev, E752X_SYSBUS_NERR,
913 pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
914 pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
915 pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
916 &info->dram_sec2_add);
917 pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
918 &info->dram_sec2_syndrome);
920 if (info->hi_nerr & 0x7f)
921 pci_write_config_byte(dev, E752X_HI_NERR,
924 if (info->nsi_nerr & NSI_ERR_MASK)
925 pci_write_config_dword(dev, I3100_NSI_NERR,
928 if (info->sysbus_nerr)
929 pci_write_config_word(dev, E752X_SYSBUS_NERR,
932 if (info->buf_nerr & 0x0f)
933 pci_write_config_byte(dev, E752X_BUF_NERR,
937 pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_NERR,
938 info->dram_nerr, info->dram_nerr);
940 pci_write_config_dword(dev, E752X_NERR_GLOBAL,
945 static int e752x_process_error_info(struct mem_ctl_info *mci,
946 struct e752x_error_info *info,
953 error32 = (info->ferr_global >> 18) & 0x3ff;
954 stat32 = (info->ferr_global >> 4) & 0x7ff;
957 global_error(1, error32, &error_found, handle_errors);
960 global_error(0, stat32, &error_found, handle_errors);
962 error32 = (info->nerr_global >> 18) & 0x3ff;
963 stat32 = (info->nerr_global >> 4) & 0x7ff;
966 global_error(1, error32, &error_found, handle_errors);
969 global_error(0, stat32, &error_found, handle_errors);
971 e752x_check_hub_interface(info, &error_found, handle_errors);
972 e752x_check_ns_interface(info, &error_found, handle_errors);
973 e752x_check_sysbus(info, &error_found, handle_errors);
974 e752x_check_membuf(info, &error_found, handle_errors);
975 e752x_check_dram(mci, info, &error_found, handle_errors);
979 static void e752x_check(struct mem_ctl_info *mci)
981 struct e752x_error_info info;
983 e752x_get_error_info(mci, &info);
984 e752x_process_error_info(mci, &info, 1);
987 /* Program byte/sec bandwidth scrub rate to hardware */
988 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
990 const struct scrubrate *scrubrates;
991 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
992 struct pci_dev *pdev = pvt->dev_d0f0;
995 if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
996 scrubrates = scrubrates_i3100;
998 scrubrates = scrubrates_e752x;
1000 /* Translate the desired scrub rate to a e752x/3100 register value.
1001 * Search for the bandwidth that is equal or greater than the
1002 * desired rate and program the cooresponding register value.
1004 for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
1005 if (scrubrates[i].bandwidth >= new_bw)
1008 if (scrubrates[i].bandwidth == SDRATE_EOT)
1011 pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
1013 return scrubrates[i].bandwidth;
1016 /* Convert current scrub rate value into byte/sec bandwidth */
1017 static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
1019 const struct scrubrate *scrubrates;
1020 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
1021 struct pci_dev *pdev = pvt->dev_d0f0;
1025 if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
1026 scrubrates = scrubrates_i3100;
1028 scrubrates = scrubrates_e752x;
1030 /* Find the bandwidth matching the memory scrubber configuration */
1031 pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval);
1032 scrubval = scrubval & 0x0f;
1034 for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
1035 if (scrubrates[i].scrubval == scrubval)
1038 if (scrubrates[i].bandwidth == SDRATE_EOT) {
1039 e752x_printk(KERN_WARNING,
1040 "Invalid sdram scrub control value: 0x%x\n", scrubval);
1043 return scrubrates[i].bandwidth;
1047 /* Return 1 if dual channel mode is active. Else return 0. */
1048 static inline int dual_channel_active(u16 ddrcsr)
1050 return (((ddrcsr >> 12) & 3) == 3);
1053 /* Remap csrow index numbers if map_type is "reverse"
1055 static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
1057 struct e752x_pvt *pvt = mci->pvt_info;
1065 static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1068 struct csrow_info *csrow;
1069 enum edac_type edac_mode;
1070 unsigned long last_cumul_size;
1071 int index, mem_dev, drc_chan;
1072 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
1073 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
1075 u32 dra, drc, cumul_size, i, nr_pages;
1078 for (index = 0; index < 4; index++) {
1080 pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
1081 dra |= dra_reg << (index * 8);
1083 pci_read_config_dword(pdev, E752X_DRC, &drc);
1084 drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
1085 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
1086 drc_ddim = (drc >> 20) & 0x3;
1088 /* The dram row boundary (DRB) reg values are boundary address for
1089 * each DRAM row with a granularity of 64 or 128MB (single/dual
1090 * channel operation). DRB regs are cumulative; therefore DRB7 will
1091 * contain the total memory contained in all eight rows.
1093 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
1094 /* mem_dev 0=x8, 1=x4 */
1095 mem_dev = (dra >> (index * 4 + 2)) & 0x3;
1096 csrow = mci->csrows[remap_csrow_index(mci, index)];
1098 mem_dev = (mem_dev == 2);
1099 pci_read_config_byte(pdev, E752X_DRB + index, &value);
1100 /* convert a 128 or 64 MiB DRB to a page size. */
1101 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
1102 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
1103 if (cumul_size == last_cumul_size)
1104 continue; /* not populated */
1106 csrow->first_page = last_cumul_size;
1107 csrow->last_page = cumul_size - 1;
1108 nr_pages = cumul_size - last_cumul_size;
1109 last_cumul_size = cumul_size;
1112 * if single channel or x8 devices then SECDED
1113 * if dual channel and x4 then S4ECD4ED
1116 if (drc_chan && mem_dev) {
1117 edac_mode = EDAC_S4ECD4ED;
1118 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1120 edac_mode = EDAC_SECDED;
1121 mci->edac_cap |= EDAC_FLAG_SECDED;
1124 edac_mode = EDAC_NONE;
1125 for (i = 0; i < csrow->nr_channels; i++) {
1126 struct dimm_info *dimm = csrow->channels[i]->dimm;
1128 edac_dbg(3, "Initializing rank at (%i,%i)\n", index, i);
1129 dimm->nr_pages = nr_pages / csrow->nr_channels;
1130 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
1131 dimm->mtype = MEM_RDDR; /* only one type supported */
1132 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
1133 dimm->edac_mode = edac_mode;
1138 static void e752x_init_mem_map_table(struct pci_dev *pdev,
1139 struct e752x_pvt *pvt)
1142 u8 value, last, row;
1147 for (index = 0; index < 8; index += 2) {
1148 pci_read_config_byte(pdev, E752X_DRB + index, &value);
1149 /* test if there is a dimm in this slot */
1150 if (value == last) {
1151 /* no dimm in the slot, so flag it as empty */
1152 pvt->map[index] = 0xff;
1153 pvt->map[index + 1] = 0xff;
1154 } else { /* there is a dimm in the slot */
1155 pvt->map[index] = row;
1158 /* test the next value to see if the dimm is double
1161 pci_read_config_byte(pdev, E752X_DRB + index + 1,
1164 /* the dimm is single sided, so flag as empty */
1165 /* this is a double sided dimm to save the next row #*/
1166 pvt->map[index + 1] = (value == last) ? 0xff : row;
1173 /* Return 0 on success or 1 on failure. */
1174 static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1175 struct e752x_pvt *pvt)
1177 pvt->dev_d0f1 = pci_get_device(PCI_VENDOR_ID_INTEL,
1178 pvt->dev_info->err_dev, NULL);
1180 if (pvt->dev_d0f1 == NULL) {
1181 pvt->dev_d0f1 = pci_scan_single_device(pdev->bus,
1183 pci_dev_get(pvt->dev_d0f1);
1186 if (pvt->dev_d0f1 == NULL) {
1187 e752x_printk(KERN_ERR, "error reporting device not found:"
1188 "vendor %x device 0x%x (broken BIOS?)\n",
1189 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
1193 pvt->dev_d0f0 = pci_get_device(PCI_VENDOR_ID_INTEL,
1194 e752x_devs[dev_idx].ctl_dev,
1197 if (pvt->dev_d0f0 == NULL)
1203 pci_dev_put(pvt->dev_d0f1);
1207 /* Setup system bus parity mask register.
1208 * Sysbus parity supported on:
1209 * e7320/e7520/e7525 + Xeon
1211 static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
1213 char *cpu_id = cpu_data(0).x86_model_id;
1214 struct pci_dev *dev = pvt->dev_d0f1;
1217 /* Allow module parameter override, else see if CPU supports parity */
1218 if (sysbus_parity != -1) {
1219 enable = sysbus_parity;
1220 } else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) {
1221 e752x_printk(KERN_INFO, "System Bus Parity not "
1222 "supported by CPU, disabling\n");
1227 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
1229 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
1232 static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
1234 struct pci_dev *dev;
1236 dev = pvt->dev_d0f1;
1237 /* Turn off error disable & SMI in case the BIOS turned it on */
1238 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
1239 pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
1240 pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
1242 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
1243 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
1246 e752x_init_sysbus_parity_mask(pvt);
1248 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
1249 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
1250 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
1251 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
1252 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
1255 static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1259 struct mem_ctl_info *mci;
1260 struct edac_mc_layer layers[2];
1261 struct e752x_pvt *pvt;
1263 int drc_chan; /* Number of channels 0=1chan,1=2chan */
1264 struct e752x_error_info discard;
1266 edac_dbg(0, "mci\n");
1267 edac_dbg(0, "Starting Probe1\n");
1269 /* check to see if device 0 function 1 is enabled; if it isn't, we
1270 * assume the BIOS has reserved it for a reason and is expecting
1271 * exclusive access, we take care not to violate that assumption and
1272 * fail the probe. */
1273 pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
1274 if (!force_function_unhide && !(stat8 & (1 << 5))) {
1275 printk(KERN_INFO "Contact your BIOS vendor to see if the "
1276 "E752x error registers can be safely un-hidden\n");
1280 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
1282 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
1283 /* FIXME: should check >>12 or 0xf, true for all? */
1284 /* Dual channel = 1, Single channel = 0 */
1285 drc_chan = dual_channel_active(ddrcsr);
1287 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1288 layers[0].size = E752X_NR_CSROWS;
1289 layers[0].is_virt_csrow = true;
1290 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1291 layers[1].size = drc_chan + 1;
1292 layers[1].is_virt_csrow = false;
1293 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1297 edac_dbg(3, "init mci\n");
1298 mci->mtype_cap = MEM_FLAG_RDDR;
1299 /* 3100 IMCH supports SECDEC only */
1300 mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
1301 (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
1302 /* FIXME - what if different memory types are in different csrows? */
1303 mci->mod_name = EDAC_MOD_STR;
1304 mci->pdev = &pdev->dev;
1306 edac_dbg(3, "init pvt\n");
1307 pvt = (struct e752x_pvt *)mci->pvt_info;
1308 pvt->dev_info = &e752x_devs[dev_idx];
1309 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
1311 if (e752x_get_devs(pdev, dev_idx, pvt)) {
1316 edac_dbg(3, "more mci init\n");
1317 mci->ctl_name = pvt->dev_info->ctl_name;
1318 mci->dev_name = pci_name(pdev);
1319 mci->edac_check = e752x_check;
1320 mci->ctl_page_to_phys = ctl_page_to_phys;
1321 mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
1322 mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
1324 /* set the map type. 1 = normal, 0 = reversed
1325 * Must be set before e752x_init_csrows in case csrow mapping
1328 pci_read_config_byte(pdev, E752X_DRM, &stat8);
1329 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
1331 e752x_init_csrows(mci, pdev, ddrcsr);
1332 e752x_init_mem_map_table(pdev, pvt);
1334 if (dev_idx == I3100)
1335 mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
1337 mci->edac_cap |= EDAC_FLAG_NONE;
1338 edac_dbg(3, "tolm, remapbase, remaplimit\n");
1340 /* load the top of low memory, remap base, and remap limit vars */
1341 pci_read_config_word(pdev, E752X_TOLM, &pci_data);
1342 pvt->tolm = ((u32) pci_data) << 4;
1343 pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
1344 pvt->remapbase = ((u32) pci_data) << 14;
1345 pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
1346 pvt->remaplimit = ((u32) pci_data) << 14;
1347 e752x_printk(KERN_INFO,
1348 "tolm = %x, remapbase = %x, remaplimit = %x\n",
1349 pvt->tolm, pvt->remapbase, pvt->remaplimit);
1351 /* Here we assume that we will never see multiple instances of this
1352 * type of memory controller. The ID is therefore hardcoded to 0.
1354 if (edac_mc_add_mc(mci)) {
1355 edac_dbg(3, "failed edac_mc_add_mc()\n");
1359 e752x_init_error_reporting_regs(pvt);
1360 e752x_get_error_info(mci, &discard); /* clear other MCH errors */
1362 /* allocating generic PCI control info */
1363 e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1366 "%s(): Unable to create PCI control\n", __func__);
1368 "%s(): PCI error report via EDAC not setup\n",
1372 /* get this far and it's successful */
1373 edac_dbg(3, "success\n");
1377 pci_dev_put(pvt->dev_d0f0);
1378 pci_dev_put(pvt->dev_d0f1);
1384 /* returns count (>= 0), or negative on error */
1385 static int e752x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1389 /* wake up and enable device */
1390 if (pci_enable_device(pdev) < 0)
1393 return e752x_probe1(pdev, ent->driver_data);
1396 static void e752x_remove_one(struct pci_dev *pdev)
1398 struct mem_ctl_info *mci;
1399 struct e752x_pvt *pvt;
1404 edac_pci_release_generic_ctl(e752x_pci);
1406 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
1409 pvt = (struct e752x_pvt *)mci->pvt_info;
1410 pci_dev_put(pvt->dev_d0f0);
1411 pci_dev_put(pvt->dev_d0f1);
1415 static const struct pci_device_id e752x_pci_tbl[] = {
1417 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1420 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1423 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1426 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1430 } /* 0 terminated list. */
1433 MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1435 static struct pci_driver e752x_driver = {
1436 .name = EDAC_MOD_STR,
1437 .probe = e752x_init_one,
1438 .remove = e752x_remove_one,
1439 .id_table = e752x_pci_tbl,
1442 static int __init e752x_init(void)
1448 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1451 pci_rc = pci_register_driver(&e752x_driver);
1452 return (pci_rc < 0) ? pci_rc : 0;
1455 static void __exit e752x_exit(void)
1458 pci_unregister_driver(&e752x_driver);
1461 module_init(e752x_init);
1462 module_exit(e752x_exit);
1464 MODULE_LICENSE("GPL");
1465 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1466 MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
1468 module_param(force_function_unhide, int, 0444);
1469 MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
1470 " 1=force unhide and hope BIOS doesn't fight driver for "
1471 "Dev0:Fun1 access");
1473 module_param(edac_op_state, int, 0444);
1474 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1476 module_param(sysbus_parity, int, 0444);
1477 MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
1478 " 1=enable system bus parity checking, default=auto-detect");
1479 module_param(report_non_memory_errors, int, 0644);
1480 MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
1481 "reporting, 1=enable non-memory error reporting");