Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
[platform/kernel/linux-rpi.git] / drivers / cxl / acpi.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/platform_device.h>
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/kernel.h>
7 #include <linux/acpi.h>
8 #include <linux/pci.h>
9 #include "cxl.h"
10
11 static struct acpi_table_header *acpi_cedt;
12
13 /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
14 #define CFMWS_INTERLEAVE_WAYS(x)        (1 << (x)->interleave_ways)
15 #define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8)
16
17 static unsigned long cfmws_to_decoder_flags(int restrictions)
18 {
19         unsigned long flags = 0;
20
21         if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
22                 flags |= CXL_DECODER_F_TYPE2;
23         if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
24                 flags |= CXL_DECODER_F_TYPE3;
25         if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
26                 flags |= CXL_DECODER_F_RAM;
27         if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM)
28                 flags |= CXL_DECODER_F_PMEM;
29         if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED)
30                 flags |= CXL_DECODER_F_LOCK;
31
32         return flags;
33 }
34
35 static int cxl_acpi_cfmws_verify(struct device *dev,
36                                  struct acpi_cedt_cfmws *cfmws)
37 {
38         int expected_len;
39
40         if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) {
41                 dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n");
42                 return -EINVAL;
43         }
44
45         if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) {
46                 dev_err(dev, "CFMWS Base HPA not 256MB aligned\n");
47                 return -EINVAL;
48         }
49
50         if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) {
51                 dev_err(dev, "CFMWS Window Size not 256MB aligned\n");
52                 return -EINVAL;
53         }
54
55         expected_len = struct_size((cfmws), interleave_targets,
56                                    CFMWS_INTERLEAVE_WAYS(cfmws));
57
58         if (cfmws->header.length < expected_len) {
59                 dev_err(dev, "CFMWS length %d less than expected %d\n",
60                         cfmws->header.length, expected_len);
61                 return -EINVAL;
62         }
63
64         if (cfmws->header.length > expected_len)
65                 dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
66                         cfmws->header.length, expected_len);
67
68         return 0;
69 }
70
71 static void cxl_add_cfmws_decoders(struct device *dev,
72                                    struct cxl_port *root_port)
73 {
74         struct acpi_cedt_cfmws *cfmws;
75         struct cxl_decoder *cxld;
76         acpi_size len, cur = 0;
77         void *cedt_subtable;
78         unsigned long flags;
79         int rc;
80
81         len = acpi_cedt->length - sizeof(*acpi_cedt);
82         cedt_subtable = acpi_cedt + 1;
83
84         while (cur < len) {
85                 struct acpi_cedt_header *c = cedt_subtable + cur;
86
87                 if (c->type != ACPI_CEDT_TYPE_CFMWS) {
88                         cur += c->length;
89                         continue;
90                 }
91
92                 cfmws = cedt_subtable + cur;
93
94                 if (cfmws->header.length < sizeof(*cfmws)) {
95                         dev_warn_once(dev,
96                                       "CFMWS entry skipped:invalid length:%u\n",
97                                       cfmws->header.length);
98                         cur += c->length;
99                         continue;
100                 }
101
102                 rc = cxl_acpi_cfmws_verify(dev, cfmws);
103                 if (rc) {
104                         dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
105                                 cfmws->base_hpa, cfmws->base_hpa +
106                                 cfmws->window_size - 1);
107                         cur += c->length;
108                         continue;
109                 }
110
111                 flags = cfmws_to_decoder_flags(cfmws->restrictions);
112                 cxld = devm_cxl_add_decoder(dev, root_port,
113                                             CFMWS_INTERLEAVE_WAYS(cfmws),
114                                             cfmws->base_hpa, cfmws->window_size,
115                                             CFMWS_INTERLEAVE_WAYS(cfmws),
116                                             CFMWS_INTERLEAVE_GRANULARITY(cfmws),
117                                             CXL_DECODER_EXPANDER,
118                                             flags);
119
120                 if (IS_ERR(cxld)) {
121                         dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
122                                 cfmws->base_hpa, cfmws->base_hpa +
123                                 cfmws->window_size - 1);
124                 } else {
125                         dev_dbg(dev, "add: %s range %#llx-%#llx\n",
126                                 dev_name(&cxld->dev), cfmws->base_hpa,
127                                  cfmws->base_hpa + cfmws->window_size - 1);
128                 }
129                 cur += c->length;
130         }
131 }
132
133 static struct acpi_cedt_chbs *cxl_acpi_match_chbs(struct device *dev, u32 uid)
134 {
135         struct acpi_cedt_chbs *chbs, *chbs_match = NULL;
136         acpi_size len, cur = 0;
137         void *cedt_subtable;
138
139         len = acpi_cedt->length - sizeof(*acpi_cedt);
140         cedt_subtable = acpi_cedt + 1;
141
142         while (cur < len) {
143                 struct acpi_cedt_header *c = cedt_subtable + cur;
144
145                 if (c->type != ACPI_CEDT_TYPE_CHBS) {
146                         cur += c->length;
147                         continue;
148                 }
149
150                 chbs = cedt_subtable + cur;
151
152                 if (chbs->header.length < sizeof(*chbs)) {
153                         dev_warn_once(dev,
154                                       "CHBS entry skipped: invalid length:%u\n",
155                                       chbs->header.length);
156                         cur += c->length;
157                         continue;
158                 }
159
160                 if (chbs->uid != uid) {
161                         cur += c->length;
162                         continue;
163                 }
164
165                 if (chbs_match) {
166                         dev_warn_once(dev,
167                                       "CHBS entry skipped: duplicate UID:%u\n",
168                                       uid);
169                         cur += c->length;
170                         continue;
171                 }
172
173                 chbs_match = chbs;
174                 cur += c->length;
175         }
176
177         return chbs_match ? chbs_match : ERR_PTR(-ENODEV);
178 }
179
180 static resource_size_t get_chbcr(struct acpi_cedt_chbs *chbs)
181 {
182         return IS_ERR(chbs) ? CXL_RESOURCE_NONE : chbs->base;
183 }
184
185 struct cxl_walk_context {
186         struct device *dev;
187         struct pci_bus *root;
188         struct cxl_port *port;
189         int error;
190         int count;
191 };
192
193 static int match_add_root_ports(struct pci_dev *pdev, void *data)
194 {
195         struct cxl_walk_context *ctx = data;
196         struct pci_bus *root_bus = ctx->root;
197         struct cxl_port *port = ctx->port;
198         int type = pci_pcie_type(pdev);
199         struct device *dev = ctx->dev;
200         u32 lnkcap, port_num;
201         int rc;
202
203         if (pdev->bus != root_bus)
204                 return 0;
205         if (!pci_is_pcie(pdev))
206                 return 0;
207         if (type != PCI_EXP_TYPE_ROOT_PORT)
208                 return 0;
209         if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
210                                   &lnkcap) != PCIBIOS_SUCCESSFUL)
211                 return 0;
212
213         /* TODO walk DVSEC to find component register base */
214         port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
215         rc = cxl_add_dport(port, &pdev->dev, port_num, CXL_RESOURCE_NONE);
216         if (rc) {
217                 ctx->error = rc;
218                 return rc;
219         }
220         ctx->count++;
221
222         dev_dbg(dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev));
223
224         return 0;
225 }
226
227 static struct cxl_dport *find_dport_by_dev(struct cxl_port *port, struct device *dev)
228 {
229         struct cxl_dport *dport;
230
231         device_lock(&port->dev);
232         list_for_each_entry(dport, &port->dports, list)
233                 if (dport->dport == dev) {
234                         device_unlock(&port->dev);
235                         return dport;
236                 }
237
238         device_unlock(&port->dev);
239         return NULL;
240 }
241
242 static struct acpi_device *to_cxl_host_bridge(struct device *dev)
243 {
244         struct acpi_device *adev = to_acpi_device(dev);
245
246         if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
247                 return adev;
248         return NULL;
249 }
250
251 /*
252  * A host bridge is a dport to a CFMWS decode and it is a uport to the
253  * dport (PCIe Root Ports) in the host bridge.
254  */
255 static int add_host_bridge_uport(struct device *match, void *arg)
256 {
257         struct acpi_device *bridge = to_cxl_host_bridge(match);
258         struct cxl_port *root_port = arg;
259         struct device *host = root_port->dev.parent;
260         struct acpi_pci_root *pci_root;
261         struct cxl_walk_context ctx;
262         struct cxl_decoder *cxld;
263         struct cxl_dport *dport;
264         struct cxl_port *port;
265
266         if (!bridge)
267                 return 0;
268
269         pci_root = acpi_pci_find_root(bridge->handle);
270         if (!pci_root)
271                 return -ENXIO;
272
273         dport = find_dport_by_dev(root_port, match);
274         if (!dport) {
275                 dev_dbg(host, "host bridge expected and not found\n");
276                 return -ENODEV;
277         }
278
279         port = devm_cxl_add_port(host, match, dport->component_reg_phys,
280                                  root_port);
281         if (IS_ERR(port))
282                 return PTR_ERR(port);
283         dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
284
285         ctx = (struct cxl_walk_context){
286                 .dev = host,
287                 .root = pci_root->bus,
288                 .port = port,
289         };
290         pci_walk_bus(pci_root->bus, match_add_root_ports, &ctx);
291
292         if (ctx.count == 0)
293                 return -ENODEV;
294         if (ctx.error)
295                 return ctx.error;
296
297         /* TODO: Scan CHBCR for HDM Decoder resources */
298
299         /*
300          * In the single-port host-bridge case there are no HDM decoders
301          * in the CHBCR and a 1:1 passthrough decode is implied.
302          */
303         if (ctx.count == 1) {
304                 cxld = devm_cxl_add_passthrough_decoder(host, port);
305                 if (IS_ERR(cxld))
306                         return PTR_ERR(cxld);
307
308                 dev_dbg(host, "add: %s\n", dev_name(&cxld->dev));
309         }
310
311         return 0;
312 }
313
314 static int add_host_bridge_dport(struct device *match, void *arg)
315 {
316         int rc;
317         acpi_status status;
318         unsigned long long uid;
319         struct acpi_cedt_chbs *chbs;
320         struct cxl_port *root_port = arg;
321         struct device *host = root_port->dev.parent;
322         struct acpi_device *bridge = to_cxl_host_bridge(match);
323
324         if (!bridge)
325                 return 0;
326
327         status = acpi_evaluate_integer(bridge->handle, METHOD_NAME__UID, NULL,
328                                        &uid);
329         if (status != AE_OK) {
330                 dev_err(host, "unable to retrieve _UID of %s\n",
331                         dev_name(match));
332                 return -ENODEV;
333         }
334
335         chbs = cxl_acpi_match_chbs(host, uid);
336         if (IS_ERR(chbs))
337                 dev_dbg(host, "No CHBS found for Host Bridge: %s\n",
338                         dev_name(match));
339
340         rc = cxl_add_dport(root_port, match, uid, get_chbcr(chbs));
341         if (rc) {
342                 dev_err(host, "failed to add downstream port: %s\n",
343                         dev_name(match));
344                 return rc;
345         }
346         dev_dbg(host, "add dport%llu: %s\n", uid, dev_name(match));
347         return 0;
348 }
349
350 static int add_root_nvdimm_bridge(struct device *match, void *data)
351 {
352         struct cxl_decoder *cxld;
353         struct cxl_port *root_port = data;
354         struct cxl_nvdimm_bridge *cxl_nvb;
355         struct device *host = root_port->dev.parent;
356
357         if (!is_root_decoder(match))
358                 return 0;
359
360         cxld = to_cxl_decoder(match);
361         if (!(cxld->flags & CXL_DECODER_F_PMEM))
362                 return 0;
363
364         cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port);
365         if (IS_ERR(cxl_nvb)) {
366                 dev_dbg(host, "failed to register pmem\n");
367                 return PTR_ERR(cxl_nvb);
368         }
369         dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev),
370                 dev_name(&cxl_nvb->dev));
371         return 1;
372 }
373
374 static int cxl_acpi_probe(struct platform_device *pdev)
375 {
376         int rc;
377         acpi_status status;
378         struct cxl_port *root_port;
379         struct device *host = &pdev->dev;
380         struct acpi_device *adev = ACPI_COMPANION(host);
381
382         root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
383         if (IS_ERR(root_port))
384                 return PTR_ERR(root_port);
385         dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
386
387         status = acpi_get_table(ACPI_SIG_CEDT, 0, &acpi_cedt);
388         if (ACPI_FAILURE(status))
389                 return -ENXIO;
390
391         rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
392                               add_host_bridge_dport);
393         if (rc)
394                 goto out;
395
396         cxl_add_cfmws_decoders(host, root_port);
397
398         /*
399          * Root level scanned with host-bridge as dports, now scan host-bridges
400          * for their role as CXL uports to their CXL-capable PCIe Root Ports.
401          */
402         rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
403                               add_host_bridge_uport);
404         if (rc)
405                 goto out;
406
407         if (IS_ENABLED(CONFIG_CXL_PMEM))
408                 rc = device_for_each_child(&root_port->dev, root_port,
409                                            add_root_nvdimm_bridge);
410
411 out:
412         acpi_put_table(acpi_cedt);
413         if (rc < 0)
414                 return rc;
415         return 0;
416 }
417
418 static const struct acpi_device_id cxl_acpi_ids[] = {
419         { "ACPI0017", 0 },
420         { "", 0 },
421 };
422 MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
423
424 static struct platform_driver cxl_acpi_driver = {
425         .probe = cxl_acpi_probe,
426         .driver = {
427                 .name = KBUILD_MODNAME,
428                 .acpi_match_table = cxl_acpi_ids,
429         },
430 };
431
432 module_platform_driver(cxl_acpi_driver);
433 MODULE_LICENSE("GPL v2");
434 MODULE_IMPORT_NS(CXL);