thermal: core: call put_device() only after device_register() fails
[platform/kernel/linux-rpi.git] / drivers / edac / highbank_mc_edac.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2011-2012 Calxeda, Inc.
4  */
5 #include <linux/types.h>
6 #include <linux/kernel.h>
7 #include <linux/ctype.h>
8 #include <linux/edac.h>
9 #include <linux/interrupt.h>
10 #include <linux/platform_device.h>
11 #include <linux/of_platform.h>
12 #include <linux/uaccess.h>
13
14 #include "edac_module.h"
15
16 /* DDR Ctrlr Error Registers */
17
18 #define HB_DDR_ECC_ERR_BASE             0x128
19 #define MW_DDR_ECC_ERR_BASE             0x1b4
20
21 #define HB_DDR_ECC_OPT                  0x00
22 #define HB_DDR_ECC_U_ERR_ADDR           0x08
23 #define HB_DDR_ECC_U_ERR_STAT           0x0c
24 #define HB_DDR_ECC_U_ERR_DATAL          0x10
25 #define HB_DDR_ECC_U_ERR_DATAH          0x14
26 #define HB_DDR_ECC_C_ERR_ADDR           0x18
27 #define HB_DDR_ECC_C_ERR_STAT           0x1c
28 #define HB_DDR_ECC_C_ERR_DATAL          0x20
29 #define HB_DDR_ECC_C_ERR_DATAH          0x24
30
31 #define HB_DDR_ECC_OPT_MODE_MASK        0x3
32 #define HB_DDR_ECC_OPT_FWC              0x100
33 #define HB_DDR_ECC_OPT_XOR_SHIFT        16
34
35 /* DDR Ctrlr Interrupt Registers */
36
37 #define HB_DDR_ECC_INT_BASE             0x180
38 #define MW_DDR_ECC_INT_BASE             0x218
39
40 #define HB_DDR_ECC_INT_STATUS           0x00
41 #define HB_DDR_ECC_INT_ACK              0x04
42
43 #define HB_DDR_ECC_INT_STAT_CE          0x8
44 #define HB_DDR_ECC_INT_STAT_DOUBLE_CE   0x10
45 #define HB_DDR_ECC_INT_STAT_UE          0x20
46 #define HB_DDR_ECC_INT_STAT_DOUBLE_UE   0x40
47
48 struct hb_mc_drvdata {
49         void __iomem *mc_err_base;
50         void __iomem *mc_int_base;
51 };
52
53 static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
54 {
55         struct mem_ctl_info *mci = dev_id;
56         struct hb_mc_drvdata *drvdata = mci->pvt_info;
57         u32 status, err_addr;
58
59         /* Read the interrupt status register */
60         status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS);
61
62         if (status & HB_DDR_ECC_INT_STAT_UE) {
63                 err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR);
64                 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
65                                      err_addr >> PAGE_SHIFT,
66                                      err_addr & ~PAGE_MASK, 0,
67                                      0, 0, -1,
68                                      mci->ctl_name, "");
69         }
70         if (status & HB_DDR_ECC_INT_STAT_CE) {
71                 u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT);
72                 syndrome = (syndrome >> 8) & 0xff;
73                 err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR);
74                 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
75                                      err_addr >> PAGE_SHIFT,
76                                      err_addr & ~PAGE_MASK, syndrome,
77                                      0, 0, -1,
78                                      mci->ctl_name, "");
79         }
80
81         /* clear the error, clears the interrupt */
82         writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK);
83         return IRQ_HANDLED;
84 }
85
86 static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd)
87 {
88         struct hb_mc_drvdata *pdata = mci->pvt_info;
89         u32 reg;
90
91         reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT);
92         reg &= HB_DDR_ECC_OPT_MODE_MASK;
93         reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
94         writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT);
95 }
96
97 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
98
99 static ssize_t highbank_mc_inject_ctrl(struct device *dev,
100         struct device_attribute *attr, const char *buf, size_t count)
101 {
102         struct mem_ctl_info *mci = to_mci(dev);
103         u8 synd;
104
105         if (kstrtou8(buf, 16, &synd))
106                 return -EINVAL;
107
108         highbank_mc_err_inject(mci, synd);
109
110         return count;
111 }
112
113 static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl);
114
115 static struct attribute *highbank_dev_attrs[] = {
116         &dev_attr_inject_ctrl.attr,
117         NULL
118 };
119
120 ATTRIBUTE_GROUPS(highbank_dev);
121
122 struct hb_mc_settings {
123         int     err_offset;
124         int     int_offset;
125 };
126
127 static struct hb_mc_settings hb_settings = {
128         .err_offset = HB_DDR_ECC_ERR_BASE,
129         .int_offset = HB_DDR_ECC_INT_BASE,
130 };
131
132 static struct hb_mc_settings mw_settings = {
133         .err_offset = MW_DDR_ECC_ERR_BASE,
134         .int_offset = MW_DDR_ECC_INT_BASE,
135 };
136
137 static const struct of_device_id hb_ddr_ctrl_of_match[] = {
138         { .compatible = "calxeda,hb-ddr-ctrl",          .data = &hb_settings },
139         { .compatible = "calxeda,ecx-2000-ddr-ctrl",    .data = &mw_settings },
140         {},
141 };
142 MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
143
144 static int highbank_mc_probe(struct platform_device *pdev)
145 {
146         const struct of_device_id *id;
147         const struct hb_mc_settings *settings;
148         struct edac_mc_layer layers[2];
149         struct mem_ctl_info *mci;
150         struct hb_mc_drvdata *drvdata;
151         struct dimm_info *dimm;
152         struct resource *r;
153         void __iomem *base;
154         u32 control;
155         int irq;
156         int res = 0;
157
158         id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev);
159         if (!id)
160                 return -ENODEV;
161
162         layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
163         layers[0].size = 1;
164         layers[0].is_virt_csrow = true;
165         layers[1].type = EDAC_MC_LAYER_CHANNEL;
166         layers[1].size = 1;
167         layers[1].is_virt_csrow = false;
168         mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
169                             sizeof(struct hb_mc_drvdata));
170         if (!mci)
171                 return -ENOMEM;
172
173         mci->pdev = &pdev->dev;
174         drvdata = mci->pvt_info;
175         platform_set_drvdata(pdev, mci);
176
177         if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
178                 res = -ENOMEM;
179                 goto free;
180         }
181
182         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
183         if (!r) {
184                 dev_err(&pdev->dev, "Unable to get mem resource\n");
185                 res = -ENODEV;
186                 goto err;
187         }
188
189         if (!devm_request_mem_region(&pdev->dev, r->start,
190                                      resource_size(r), dev_name(&pdev->dev))) {
191                 dev_err(&pdev->dev, "Error while requesting mem region\n");
192                 res = -EBUSY;
193                 goto err;
194         }
195
196         base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
197         if (!base) {
198                 dev_err(&pdev->dev, "Unable to map regs\n");
199                 res = -ENOMEM;
200                 goto err;
201         }
202
203         settings = id->data;
204         drvdata->mc_err_base = base + settings->err_offset;
205         drvdata->mc_int_base = base + settings->int_offset;
206
207         control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3;
208         if (!control || (control == 0x2)) {
209                 dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
210                 res = -ENODEV;
211                 goto err;
212         }
213
214         mci->mtype_cap = MEM_FLAG_DDR3;
215         mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
216         mci->edac_cap = EDAC_FLAG_SECDED;
217         mci->mod_name = pdev->dev.driver->name;
218         mci->ctl_name = id->compatible;
219         mci->dev_name = dev_name(&pdev->dev);
220         mci->scrub_mode = SCRUB_SW_SRC;
221
222         /* Only a single 4GB DIMM is supported */
223         dimm = *mci->dimms;
224         dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1;
225         dimm->grain = 8;
226         dimm->dtype = DEV_X8;
227         dimm->mtype = MEM_DDR3;
228         dimm->edac_mode = EDAC_SECDED;
229
230         res = edac_mc_add_mc_with_groups(mci, highbank_dev_groups);
231         if (res < 0)
232                 goto err;
233
234         irq = platform_get_irq(pdev, 0);
235         res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
236                                0, dev_name(&pdev->dev), mci);
237         if (res < 0) {
238                 dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
239                 goto err2;
240         }
241
242         devres_close_group(&pdev->dev, NULL);
243         return 0;
244 err2:
245         edac_mc_del_mc(&pdev->dev);
246 err:
247         devres_release_group(&pdev->dev, NULL);
248 free:
249         edac_mc_free(mci);
250         return res;
251 }
252
253 static int highbank_mc_remove(struct platform_device *pdev)
254 {
255         struct mem_ctl_info *mci = platform_get_drvdata(pdev);
256
257         edac_mc_del_mc(&pdev->dev);
258         edac_mc_free(mci);
259         return 0;
260 }
261
262 static struct platform_driver highbank_mc_edac_driver = {
263         .probe = highbank_mc_probe,
264         .remove = highbank_mc_remove,
265         .driver = {
266                 .name = "hb_mc_edac",
267                 .of_match_table = hb_ddr_ctrl_of_match,
268         },
269 };
270
271 module_platform_driver(highbank_mc_edac_driver);
272
273 MODULE_LICENSE("GPL v2");
274 MODULE_AUTHOR("Calxeda, Inc.");
275 MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank");