cxl/pci: Rename pci.h to cxlpci.h
[platform/kernel/linux-starfive.git] / drivers / cxl / acpi.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/platform_device.h>
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/kernel.h>
7 #include <linux/acpi.h>
8 #include <linux/pci.h>
9 #include "cxlpci.h"
10 #include "cxl.h"
11
12 /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
13 #define CFMWS_INTERLEAVE_WAYS(x)        (1 << (x)->interleave_ways)
14 #define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8)
15
16 static unsigned long cfmws_to_decoder_flags(int restrictions)
17 {
18         unsigned long flags = 0;
19
20         if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
21                 flags |= CXL_DECODER_F_TYPE2;
22         if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
23                 flags |= CXL_DECODER_F_TYPE3;
24         if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
25                 flags |= CXL_DECODER_F_RAM;
26         if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM)
27                 flags |= CXL_DECODER_F_PMEM;
28         if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED)
29                 flags |= CXL_DECODER_F_LOCK;
30
31         return flags;
32 }
33
34 static int cxl_acpi_cfmws_verify(struct device *dev,
35                                  struct acpi_cedt_cfmws *cfmws)
36 {
37         int expected_len;
38
39         if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) {
40                 dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n");
41                 return -EINVAL;
42         }
43
44         if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) {
45                 dev_err(dev, "CFMWS Base HPA not 256MB aligned\n");
46                 return -EINVAL;
47         }
48
49         if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) {
50                 dev_err(dev, "CFMWS Window Size not 256MB aligned\n");
51                 return -EINVAL;
52         }
53
54         if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) {
55                 dev_err(dev, "CFMWS Interleave Ways (%d) too large\n",
56                         CFMWS_INTERLEAVE_WAYS(cfmws));
57                 return -EINVAL;
58         }
59
60         expected_len = struct_size((cfmws), interleave_targets,
61                                    CFMWS_INTERLEAVE_WAYS(cfmws));
62
63         if (cfmws->header.length < expected_len) {
64                 dev_err(dev, "CFMWS length %d less than expected %d\n",
65                         cfmws->header.length, expected_len);
66                 return -EINVAL;
67         }
68
69         if (cfmws->header.length > expected_len)
70                 dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
71                         cfmws->header.length, expected_len);
72
73         return 0;
74 }
75
76 struct cxl_cfmws_context {
77         struct device *dev;
78         struct cxl_port *root_port;
79 };
80
81 static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
82                            const unsigned long end)
83 {
84         int target_map[CXL_DECODER_MAX_INTERLEAVE];
85         struct cxl_cfmws_context *ctx = arg;
86         struct cxl_port *root_port = ctx->root_port;
87         struct device *dev = ctx->dev;
88         struct acpi_cedt_cfmws *cfmws;
89         struct cxl_decoder *cxld;
90         int rc, i;
91
92         cfmws = (struct acpi_cedt_cfmws *) header;
93
94         rc = cxl_acpi_cfmws_verify(dev, cfmws);
95         if (rc) {
96                 dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
97                         cfmws->base_hpa,
98                         cfmws->base_hpa + cfmws->window_size - 1);
99                 return 0;
100         }
101
102         for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
103                 target_map[i] = cfmws->interleave_targets[i];
104
105         cxld = cxl_root_decoder_alloc(root_port, CFMWS_INTERLEAVE_WAYS(cfmws));
106         if (IS_ERR(cxld))
107                 return 0;
108
109         cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
110         cxld->target_type = CXL_DECODER_EXPANDER;
111         cxld->platform_res = (struct resource)DEFINE_RES_MEM(cfmws->base_hpa,
112                                                              cfmws->window_size);
113         cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
114         cxld->interleave_granularity = CFMWS_INTERLEAVE_GRANULARITY(cfmws);
115
116         rc = cxl_decoder_add(cxld, target_map);
117         if (rc)
118                 put_device(&cxld->dev);
119         else
120                 rc = cxl_decoder_autoremove(dev, cxld);
121         if (rc) {
122                 dev_err(dev, "Failed to add decoder for %pr\n",
123                         &cxld->platform_res);
124                 return 0;
125         }
126         dev_dbg(dev, "add: %s node: %d range %pr\n", dev_name(&cxld->dev),
127                 phys_to_target_node(cxld->platform_res.start),
128                 &cxld->platform_res);
129
130         return 0;
131 }
132
133 __mock int match_add_root_ports(struct pci_dev *pdev, void *data)
134 {
135         resource_size_t creg = CXL_RESOURCE_NONE;
136         struct cxl_walk_context *ctx = data;
137         struct pci_bus *root_bus = ctx->root;
138         struct cxl_port *port = ctx->port;
139         int type = pci_pcie_type(pdev);
140         struct device *dev = ctx->dev;
141         struct cxl_register_map map;
142         u32 lnkcap, port_num;
143         int rc;
144
145         if (pdev->bus != root_bus)
146                 return 0;
147         if (!pci_is_pcie(pdev))
148                 return 0;
149         if (type != PCI_EXP_TYPE_ROOT_PORT)
150                 return 0;
151         if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
152                                   &lnkcap) != PCIBIOS_SUCCESSFUL)
153                 return 0;
154
155         /* The driver doesn't rely on component registers for Root Ports yet. */
156         rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
157         if (!rc)
158                 dev_info(&pdev->dev, "No component register block found\n");
159
160         creg = cxl_regmap_to_base(pdev, &map);
161
162         port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
163         rc = cxl_add_dport(port, &pdev->dev, port_num, creg);
164         if (rc) {
165                 ctx->error = rc;
166                 return rc;
167         }
168         ctx->count++;
169
170         dev_dbg(dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev));
171
172         return 0;
173 }
174
175 static struct cxl_dport *find_dport_by_dev(struct cxl_port *port, struct device *dev)
176 {
177         struct cxl_dport *dport;
178
179         cxl_device_lock(&port->dev);
180         list_for_each_entry(dport, &port->dports, list)
181                 if (dport->dport == dev) {
182                         cxl_device_unlock(&port->dev);
183                         return dport;
184                 }
185
186         cxl_device_unlock(&port->dev);
187         return NULL;
188 }
189
190 __mock struct acpi_device *to_cxl_host_bridge(struct device *host,
191                                               struct device *dev)
192 {
193         struct acpi_device *adev = to_acpi_device(dev);
194
195         if (!acpi_pci_find_root(adev->handle))
196                 return NULL;
197
198         if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
199                 return adev;
200         return NULL;
201 }
202
203 /*
204  * A host bridge is a dport to a CFMWS decode and it is a uport to the
205  * dport (PCIe Root Ports) in the host bridge.
206  */
207 static int add_host_bridge_uport(struct device *match, void *arg)
208 {
209         struct cxl_port *root_port = arg;
210         struct device *host = root_port->dev.parent;
211         struct acpi_device *bridge = to_cxl_host_bridge(host, match);
212         struct acpi_pci_root *pci_root;
213         struct cxl_walk_context ctx;
214         int single_port_map[1], rc;
215         struct cxl_decoder *cxld;
216         struct cxl_dport *dport;
217         struct cxl_port *port;
218
219         if (!bridge)
220                 return 0;
221
222         dport = find_dport_by_dev(root_port, match);
223         if (!dport) {
224                 dev_dbg(host, "host bridge expected and not found\n");
225                 return 0;
226         }
227
228         /*
229          * Note that this lookup already succeeded in
230          * to_cxl_host_bridge(), so no need to check for failure here
231          */
232         pci_root = acpi_pci_find_root(bridge->handle);
233         rc = devm_cxl_register_pci_bus(host, match, pci_root->bus);
234         if (rc)
235                 return rc;
236
237         port = devm_cxl_add_port(host, match, dport->component_reg_phys,
238                                  root_port);
239         if (IS_ERR(port))
240                 return PTR_ERR(port);
241         dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
242
243         ctx = (struct cxl_walk_context){
244                 .dev = host,
245                 .root = pci_root->bus,
246                 .port = port,
247         };
248         pci_walk_bus(pci_root->bus, match_add_root_ports, &ctx);
249
250         if (ctx.count == 0)
251                 return -ENODEV;
252         if (ctx.error)
253                 return ctx.error;
254         if (ctx.count > 1)
255                 return 0;
256
257         /* TODO: Scan CHBCR for HDM Decoder resources */
258
259         /*
260          * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability
261          * Structure) single ported host-bridges need not publish a decoder
262          * capability when a passthrough decode can be assumed, i.e. all
263          * transactions that the uport sees are claimed and passed to the single
264          * dport. Disable the range until the first CXL region is enumerated /
265          * activated.
266          */
267         cxld = cxl_switch_decoder_alloc(port, 1);
268         if (IS_ERR(cxld))
269                 return PTR_ERR(cxld);
270
271         cxl_device_lock(&port->dev);
272         dport = list_first_entry(&port->dports, typeof(*dport), list);
273         cxl_device_unlock(&port->dev);
274
275         single_port_map[0] = dport->port_id;
276
277         rc = cxl_decoder_add(cxld, single_port_map);
278         if (rc)
279                 put_device(&cxld->dev);
280         else
281                 rc = cxl_decoder_autoremove(host, cxld);
282
283         if (rc == 0)
284                 dev_dbg(host, "add: %s\n", dev_name(&cxld->dev));
285         return rc;
286 }
287
288 struct cxl_chbs_context {
289         struct device *dev;
290         unsigned long long uid;
291         resource_size_t chbcr;
292 };
293
294 static int cxl_get_chbcr(union acpi_subtable_headers *header, void *arg,
295                          const unsigned long end)
296 {
297         struct cxl_chbs_context *ctx = arg;
298         struct acpi_cedt_chbs *chbs;
299
300         if (ctx->chbcr)
301                 return 0;
302
303         chbs = (struct acpi_cedt_chbs *) header;
304
305         if (ctx->uid != chbs->uid)
306                 return 0;
307         ctx->chbcr = chbs->base;
308
309         return 0;
310 }
311
312 static int add_host_bridge_dport(struct device *match, void *arg)
313 {
314         int rc;
315         acpi_status status;
316         unsigned long long uid;
317         struct cxl_chbs_context ctx;
318         struct cxl_port *root_port = arg;
319         struct device *host = root_port->dev.parent;
320         struct acpi_device *bridge = to_cxl_host_bridge(host, match);
321
322         if (!bridge)
323                 return 0;
324
325         status = acpi_evaluate_integer(bridge->handle, METHOD_NAME__UID, NULL,
326                                        &uid);
327         if (status != AE_OK) {
328                 dev_err(host, "unable to retrieve _UID of %s\n",
329                         dev_name(match));
330                 return -ENODEV;
331         }
332
333         ctx = (struct cxl_chbs_context) {
334                 .dev = host,
335                 .uid = uid,
336         };
337         acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbcr, &ctx);
338
339         if (ctx.chbcr == 0) {
340                 dev_warn(host, "No CHBS found for Host Bridge: %s\n",
341                          dev_name(match));
342                 return 0;
343         }
344
345         cxl_device_lock(&root_port->dev);
346         rc = cxl_add_dport(root_port, match, uid, ctx.chbcr);
347         cxl_device_unlock(&root_port->dev);
348         if (rc) {
349                 dev_err(host, "failed to add downstream port: %s\n",
350                         dev_name(match));
351                 return rc;
352         }
353         dev_dbg(host, "add dport%llu: %s\n", uid, dev_name(match));
354         return 0;
355 }
356
357 static int add_root_nvdimm_bridge(struct device *match, void *data)
358 {
359         struct cxl_decoder *cxld;
360         struct cxl_port *root_port = data;
361         struct cxl_nvdimm_bridge *cxl_nvb;
362         struct device *host = root_port->dev.parent;
363
364         if (!is_root_decoder(match))
365                 return 0;
366
367         cxld = to_cxl_decoder(match);
368         if (!(cxld->flags & CXL_DECODER_F_PMEM))
369                 return 0;
370
371         cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port);
372         if (IS_ERR(cxl_nvb)) {
373                 dev_dbg(host, "failed to register pmem\n");
374                 return PTR_ERR(cxl_nvb);
375         }
376         dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev),
377                 dev_name(&cxl_nvb->dev));
378         return 1;
379 }
380
381 static int cxl_acpi_probe(struct platform_device *pdev)
382 {
383         int rc;
384         struct cxl_port *root_port;
385         struct device *host = &pdev->dev;
386         struct acpi_device *adev = ACPI_COMPANION(host);
387         struct cxl_cfmws_context ctx;
388
389         root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
390         if (IS_ERR(root_port))
391                 return PTR_ERR(root_port);
392         dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
393
394         rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
395                               add_host_bridge_dport);
396         if (rc < 0)
397                 return rc;
398
399         ctx = (struct cxl_cfmws_context) {
400                 .dev = host,
401                 .root_port = root_port,
402         };
403         acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
404
405         /*
406          * Root level scanned with host-bridge as dports, now scan host-bridges
407          * for their role as CXL uports to their CXL-capable PCIe Root Ports.
408          */
409         rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
410                               add_host_bridge_uport);
411         if (rc < 0)
412                 return rc;
413
414         if (IS_ENABLED(CONFIG_CXL_PMEM))
415                 rc = device_for_each_child(&root_port->dev, root_port,
416                                            add_root_nvdimm_bridge);
417         if (rc < 0)
418                 return rc;
419
420         return 0;
421 }
422
423 static const struct acpi_device_id cxl_acpi_ids[] = {
424         { "ACPI0017" },
425         { },
426 };
427 MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
428
429 static struct platform_driver cxl_acpi_driver = {
430         .probe = cxl_acpi_probe,
431         .driver = {
432                 .name = KBUILD_MODNAME,
433                 .acpi_match_table = cxl_acpi_ids,
434         },
435 };
436
437 module_platform_driver(cxl_acpi_driver);
438 MODULE_LICENSE("GPL v2");
439 MODULE_IMPORT_NS(CXL);
440 MODULE_IMPORT_NS(ACPI);