1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-lo-hi.h>
4 #include <linux/memregion.h>
5 #include <linux/workqueue.h>
6 #include <linux/debugfs.h>
7 #include <linux/device.h>
8 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/idr.h>
20 * The CXL core provides a set of interfaces that can be consumed by CXL aware
21 * drivers. The interfaces allow for creation, modification, and destruction of
22 * regions, memory devices, ports, and decoders. CXL aware drivers must register
23 * with the CXL core via these interfaces in order to be able to participate in
24 * cross-device interleave coordination. The CXL core also establishes and
25 * maintains the bridge to the nvdimm subsystem.
27 * CXL core introduces sysfs hierarchy to control the devices that are
28 * instantiated by the core.
31 static DEFINE_IDA(cxl_port_ida);
32 static DEFINE_XARRAY(cxl_root_buses);
34 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
37 return sysfs_emit(buf, "%s\n", dev->type->name);
39 static DEVICE_ATTR_RO(devtype);
41 static int cxl_device_id(const struct device *dev)
43 if (dev->type == &cxl_nvdimm_bridge_type)
44 return CXL_DEVICE_NVDIMM_BRIDGE;
45 if (dev->type == &cxl_nvdimm_type)
46 return CXL_DEVICE_NVDIMM;
47 if (dev->type == CXL_PMEM_REGION_TYPE())
48 return CXL_DEVICE_PMEM_REGION;
49 if (dev->type == CXL_DAX_REGION_TYPE())
50 return CXL_DEVICE_DAX_REGION;
51 if (is_cxl_port(dev)) {
52 if (is_cxl_root(to_cxl_port(dev)))
53 return CXL_DEVICE_ROOT;
54 return CXL_DEVICE_PORT;
56 if (is_cxl_memdev(dev))
57 return CXL_DEVICE_MEMORY_EXPANDER;
58 if (dev->type == CXL_REGION_TYPE())
59 return CXL_DEVICE_REGION;
63 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
66 return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev));
68 static DEVICE_ATTR_RO(modalias);
70 static struct attribute *cxl_base_attributes[] = {
71 &dev_attr_devtype.attr,
72 &dev_attr_modalias.attr,
76 struct attribute_group cxl_base_attribute_group = {
77 .attrs = cxl_base_attributes,
80 static ssize_t start_show(struct device *dev, struct device_attribute *attr,
83 struct cxl_decoder *cxld = to_cxl_decoder(dev);
85 return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
87 static DEVICE_ATTR_ADMIN_RO(start);
89 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
92 struct cxl_decoder *cxld = to_cxl_decoder(dev);
94 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
96 static DEVICE_ATTR_RO(size);
98 #define CXL_DECODER_FLAG_ATTR(name, flag) \
99 static ssize_t name##_show(struct device *dev, \
100 struct device_attribute *attr, char *buf) \
102 struct cxl_decoder *cxld = to_cxl_decoder(dev); \
104 return sysfs_emit(buf, "%s\n", \
105 (cxld->flags & (flag)) ? "1" : "0"); \
107 static DEVICE_ATTR_RO(name)
109 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
110 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
111 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
112 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
113 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
115 static ssize_t target_type_show(struct device *dev,
116 struct device_attribute *attr, char *buf)
118 struct cxl_decoder *cxld = to_cxl_decoder(dev);
120 switch (cxld->target_type) {
121 case CXL_DECODER_ACCELERATOR:
122 return sysfs_emit(buf, "accelerator\n");
123 case CXL_DECODER_EXPANDER:
124 return sysfs_emit(buf, "expander\n");
128 static DEVICE_ATTR_RO(target_type);
130 static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
132 struct cxl_decoder *cxld = &cxlsd->cxld;
136 for (i = 0; i < cxld->interleave_ways; i++) {
137 struct cxl_dport *dport = cxlsd->target[i];
138 struct cxl_dport *next = NULL;
143 if (i + 1 < cxld->interleave_ways)
144 next = cxlsd->target[i + 1];
145 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
155 static ssize_t target_list_show(struct device *dev,
156 struct device_attribute *attr, char *buf)
158 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
164 seq = read_seqbegin(&cxlsd->target_lock);
165 rc = emit_target_list(cxlsd, buf);
166 } while (read_seqretry(&cxlsd->target_lock, seq));
172 rc = sysfs_emit_at(buf, offset, "\n");
178 static DEVICE_ATTR_RO(target_list);
180 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
183 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
185 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode));
188 static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
189 const char *buf, size_t len)
191 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
192 enum cxl_decoder_mode mode;
195 if (sysfs_streq(buf, "pmem"))
196 mode = CXL_DECODER_PMEM;
197 else if (sysfs_streq(buf, "ram"))
198 mode = CXL_DECODER_RAM;
202 rc = cxl_dpa_set_mode(cxled, mode);
208 static DEVICE_ATTR_RW(mode);
210 static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
213 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
214 u64 base = cxl_dpa_resource_start(cxled);
216 return sysfs_emit(buf, "%#llx\n", base);
218 static DEVICE_ATTR_RO(dpa_resource);
220 static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
223 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
224 resource_size_t size = cxl_dpa_size(cxled);
226 return sysfs_emit(buf, "%pa\n", &size);
229 static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
230 const char *buf, size_t len)
232 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
233 unsigned long long size;
236 rc = kstrtoull(buf, 0, &size);
240 if (!IS_ALIGNED(size, SZ_256M))
243 rc = cxl_dpa_free(cxled);
250 rc = cxl_dpa_alloc(cxled, size);
256 static DEVICE_ATTR_RW(dpa_size);
258 static ssize_t interleave_granularity_show(struct device *dev,
259 struct device_attribute *attr,
262 struct cxl_decoder *cxld = to_cxl_decoder(dev);
264 return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
267 static DEVICE_ATTR_RO(interleave_granularity);
269 static ssize_t interleave_ways_show(struct device *dev,
270 struct device_attribute *attr, char *buf)
272 struct cxl_decoder *cxld = to_cxl_decoder(dev);
274 return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
277 static DEVICE_ATTR_RO(interleave_ways);
279 static struct attribute *cxl_decoder_base_attrs[] = {
280 &dev_attr_start.attr,
282 &dev_attr_locked.attr,
283 &dev_attr_interleave_granularity.attr,
284 &dev_attr_interleave_ways.attr,
288 static struct attribute_group cxl_decoder_base_attribute_group = {
289 .attrs = cxl_decoder_base_attrs,
292 static struct attribute *cxl_decoder_root_attrs[] = {
293 &dev_attr_cap_pmem.attr,
294 &dev_attr_cap_ram.attr,
295 &dev_attr_cap_type2.attr,
296 &dev_attr_cap_type3.attr,
297 &dev_attr_target_list.attr,
298 SET_CXL_REGION_ATTR(create_pmem_region)
299 SET_CXL_REGION_ATTR(create_ram_region)
300 SET_CXL_REGION_ATTR(delete_region)
304 static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
306 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
308 return (cxlrd->cxlsd.cxld.flags & flags) == flags;
311 static bool can_create_ram(struct cxl_root_decoder *cxlrd)
313 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM;
315 return (cxlrd->cxlsd.cxld.flags & flags) == flags;
318 static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
320 struct device *dev = kobj_to_dev(kobj);
321 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
323 if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
326 if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd))
329 if (a == CXL_REGION_ATTR(delete_region) &&
330 !(can_create_pmem(cxlrd) || can_create_ram(cxlrd)))
336 static struct attribute_group cxl_decoder_root_attribute_group = {
337 .attrs = cxl_decoder_root_attrs,
338 .is_visible = cxl_root_decoder_visible,
341 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
342 &cxl_decoder_root_attribute_group,
343 &cxl_decoder_base_attribute_group,
344 &cxl_base_attribute_group,
348 static struct attribute *cxl_decoder_switch_attrs[] = {
349 &dev_attr_target_type.attr,
350 &dev_attr_target_list.attr,
351 SET_CXL_REGION_ATTR(region)
355 static struct attribute_group cxl_decoder_switch_attribute_group = {
356 .attrs = cxl_decoder_switch_attrs,
359 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
360 &cxl_decoder_switch_attribute_group,
361 &cxl_decoder_base_attribute_group,
362 &cxl_base_attribute_group,
366 static struct attribute *cxl_decoder_endpoint_attrs[] = {
367 &dev_attr_target_type.attr,
369 &dev_attr_dpa_size.attr,
370 &dev_attr_dpa_resource.attr,
371 SET_CXL_REGION_ATTR(region)
375 static struct attribute_group cxl_decoder_endpoint_attribute_group = {
376 .attrs = cxl_decoder_endpoint_attrs,
379 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
380 &cxl_decoder_base_attribute_group,
381 &cxl_decoder_endpoint_attribute_group,
382 &cxl_base_attribute_group,
386 static void __cxl_decoder_release(struct cxl_decoder *cxld)
388 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
390 ida_free(&port->decoder_ida, cxld->id);
391 put_device(&port->dev);
394 static void cxl_endpoint_decoder_release(struct device *dev)
396 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
398 __cxl_decoder_release(&cxled->cxld);
402 static void cxl_switch_decoder_release(struct device *dev)
404 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
406 __cxl_decoder_release(&cxlsd->cxld);
410 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
412 if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
413 "not a cxl_root_decoder device\n"))
415 return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
417 EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
419 static void cxl_root_decoder_release(struct device *dev)
421 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
423 if (atomic_read(&cxlrd->region_id) >= 0)
424 memregion_free(atomic_read(&cxlrd->region_id));
425 __cxl_decoder_release(&cxlrd->cxlsd.cxld);
429 static const struct device_type cxl_decoder_endpoint_type = {
430 .name = "cxl_decoder_endpoint",
431 .release = cxl_endpoint_decoder_release,
432 .groups = cxl_decoder_endpoint_attribute_groups,
435 static const struct device_type cxl_decoder_switch_type = {
436 .name = "cxl_decoder_switch",
437 .release = cxl_switch_decoder_release,
438 .groups = cxl_decoder_switch_attribute_groups,
441 static const struct device_type cxl_decoder_root_type = {
442 .name = "cxl_decoder_root",
443 .release = cxl_root_decoder_release,
444 .groups = cxl_decoder_root_attribute_groups,
447 bool is_endpoint_decoder(struct device *dev)
449 return dev->type == &cxl_decoder_endpoint_type;
451 EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL);
453 bool is_root_decoder(struct device *dev)
455 return dev->type == &cxl_decoder_root_type;
457 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
459 bool is_switch_decoder(struct device *dev)
461 return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
463 EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL);
465 struct cxl_decoder *to_cxl_decoder(struct device *dev)
467 if (dev_WARN_ONCE(dev,
468 !is_switch_decoder(dev) && !is_endpoint_decoder(dev),
469 "not a cxl_decoder device\n"))
471 return container_of(dev, struct cxl_decoder, dev);
473 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
475 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
477 if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
478 "not a cxl_endpoint_decoder device\n"))
480 return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
482 EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
484 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
486 if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
487 "not a cxl_switch_decoder device\n"))
489 return container_of(dev, struct cxl_switch_decoder, cxld.dev);
491 EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL);
493 static void cxl_ep_release(struct cxl_ep *ep)
499 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
503 xa_erase(&port->endpoints, (unsigned long) ep->ep);
507 static void cxl_port_release(struct device *dev)
509 struct cxl_port *port = to_cxl_port(dev);
513 xa_for_each(&port->endpoints, index, ep)
514 cxl_ep_remove(port, ep);
515 xa_destroy(&port->endpoints);
516 xa_destroy(&port->dports);
517 xa_destroy(&port->regions);
518 ida_free(&cxl_port_ida, port->id);
522 static const struct attribute_group *cxl_port_attribute_groups[] = {
523 &cxl_base_attribute_group,
527 static const struct device_type cxl_port_type = {
529 .release = cxl_port_release,
530 .groups = cxl_port_attribute_groups,
533 bool is_cxl_port(const struct device *dev)
535 return dev->type == &cxl_port_type;
537 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
539 struct cxl_port *to_cxl_port(const struct device *dev)
541 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
542 "not a cxl_port device\n"))
544 return container_of(dev, struct cxl_port, dev);
546 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL);
548 static void unregister_port(void *_port)
550 struct cxl_port *port = _port;
551 struct cxl_port *parent;
552 struct device *lock_dev;
554 if (is_cxl_root(port))
557 parent = to_cxl_port(port->dev.parent);
560 * CXL root port's and the first level of ports are unregistered
561 * under the platform firmware device lock, all other ports are
562 * unregistered while holding their parent port lock.
565 lock_dev = port->uport;
566 else if (is_cxl_root(parent))
567 lock_dev = parent->uport;
569 lock_dev = &parent->dev;
571 device_lock_assert(lock_dev);
573 device_unregister(&port->dev);
576 static void cxl_unlink_uport(void *_port)
578 struct cxl_port *port = _port;
580 sysfs_remove_link(&port->dev.kobj, "uport");
583 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
587 rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
590 return devm_add_action_or_reset(host, cxl_unlink_uport, port);
593 static void cxl_unlink_parent_dport(void *_port)
595 struct cxl_port *port = _port;
597 sysfs_remove_link(&port->dev.kobj, "parent_dport");
600 static int devm_cxl_link_parent_dport(struct device *host,
601 struct cxl_port *port,
602 struct cxl_dport *parent_dport)
609 rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport->kobj,
613 return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port);
616 static struct lock_class_key cxl_port_key;
618 static struct cxl_port *cxl_port_alloc(struct device *uport,
619 resource_size_t component_reg_phys,
620 struct cxl_dport *parent_dport)
622 struct cxl_port *port;
626 port = kzalloc(sizeof(*port), GFP_KERNEL);
628 return ERR_PTR(-ENOMEM);
630 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
637 * The top-level cxl_port "cxl_root" does not have a cxl_port as
638 * its parent and it does not have any corresponding component
639 * registers as its decode is described by a fixed platform
644 struct cxl_port *parent_port = parent_dport->port;
645 struct cxl_port *iter;
647 dev->parent = &parent_port->dev;
648 port->depth = parent_port->depth + 1;
649 port->parent_dport = parent_dport;
652 * walk to the host bridge, or the first ancestor that knows
656 while (!iter->host_bridge &&
657 !is_cxl_root(to_cxl_port(iter->dev.parent)))
658 iter = to_cxl_port(iter->dev.parent);
659 if (iter->host_bridge)
660 port->host_bridge = iter->host_bridge;
661 else if (parent_dport->rch)
662 port->host_bridge = parent_dport->dport;
664 port->host_bridge = iter->uport;
665 dev_dbg(uport, "host-bridge: %s\n", dev_name(port->host_bridge));
669 port->component_reg_phys = component_reg_phys;
670 ida_init(&port->decoder_ida);
672 port->commit_end = -1;
673 xa_init(&port->dports);
674 xa_init(&port->endpoints);
675 xa_init(&port->regions);
677 device_initialize(dev);
678 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
679 device_set_pm_not_required(dev);
680 dev->bus = &cxl_bus_type;
681 dev->type = &cxl_port_type;
690 static struct cxl_port *__devm_cxl_add_port(struct device *host,
691 struct device *uport,
692 resource_size_t component_reg_phys,
693 struct cxl_dport *parent_dport)
695 struct cxl_port *port;
699 port = cxl_port_alloc(uport, component_reg_phys, parent_dport);
704 if (is_cxl_memdev(uport))
705 rc = dev_set_name(dev, "endpoint%d", port->id);
706 else if (parent_dport)
707 rc = dev_set_name(dev, "port%d", port->id);
709 rc = dev_set_name(dev, "root%d", port->id);
713 rc = device_add(dev);
717 rc = devm_add_action_or_reset(host, unregister_port, port);
721 rc = devm_cxl_link_uport(host, port);
725 rc = devm_cxl_link_parent_dport(host, port, parent_dport);
737 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
738 * @host: host device for devm operations
739 * @uport: "physical" device implementing this upstream port
740 * @component_reg_phys: (optional) for configurable cxl_port instances
741 * @parent_dport: next hop up in the CXL memory decode hierarchy
743 struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
744 resource_size_t component_reg_phys,
745 struct cxl_dport *parent_dport)
747 struct cxl_port *port, *parent_port;
749 port = __devm_cxl_add_port(host, uport, component_reg_phys,
752 parent_port = parent_dport ? parent_dport->port : NULL;
754 dev_dbg(uport, "Failed to add %s%s%s%s: %ld\n",
755 dev_name(&port->dev),
756 parent_port ? " to " : "",
757 parent_port ? dev_name(&parent_port->dev) : "",
758 parent_port ? "" : " (root port)",
761 dev_dbg(uport, "%s added%s%s%s\n",
762 dev_name(&port->dev),
763 parent_port ? " to " : "",
764 parent_port ? dev_name(&parent_port->dev) : "",
765 parent_port ? "" : " (root port)");
770 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
772 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
774 /* There is no pci_bus associated with a CXL platform-root port */
775 if (is_cxl_root(port))
778 if (dev_is_pci(port->uport)) {
779 struct pci_dev *pdev = to_pci_dev(port->uport);
781 return pdev->subordinate;
784 return xa_load(&cxl_root_buses, (unsigned long)port->uport);
786 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
788 static void unregister_pci_bus(void *uport)
790 xa_erase(&cxl_root_buses, (unsigned long)uport);
793 int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
798 if (dev_is_pci(uport))
801 rc = xa_insert(&cxl_root_buses, (unsigned long)uport, bus, GFP_KERNEL);
804 return devm_add_action_or_reset(host, unregister_pci_bus, uport);
806 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
808 static bool dev_is_cxl_root_child(struct device *dev)
810 struct cxl_port *port, *parent;
812 if (!is_cxl_port(dev))
815 port = to_cxl_port(dev);
816 if (is_cxl_root(port))
819 parent = to_cxl_port(port->dev.parent);
820 if (is_cxl_root(parent))
826 /* Find a 2nd level CXL port that has a dport that is an ancestor of @match */
827 static int match_root_child(struct device *dev, const void *match)
829 const struct device *iter = NULL;
830 struct cxl_dport *dport;
831 struct cxl_port *port;
833 if (!dev_is_cxl_root_child(dev))
836 port = to_cxl_port(dev);
839 dport = cxl_find_dport_by_dev(port, iter);
848 struct cxl_port *find_cxl_root(struct device *dev)
850 struct device *port_dev;
851 struct cxl_port *root;
853 port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child);
857 root = to_cxl_port(port_dev->parent);
858 get_device(&root->dev);
859 put_device(port_dev);
862 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
864 static struct cxl_dport *find_dport(struct cxl_port *port, int id)
866 struct cxl_dport *dport;
869 device_lock_assert(&port->dev);
870 xa_for_each(&port->dports, index, dport)
871 if (dport->port_id == id)
876 static int add_dport(struct cxl_port *port, struct cxl_dport *new)
878 struct cxl_dport *dup;
881 device_lock_assert(&port->dev);
882 dup = find_dport(port, new->port_id);
885 "unable to add dport%d-%s non-unique port id (%s)\n",
886 new->port_id, dev_name(new->dport),
887 dev_name(dup->dport));
891 rc = xa_insert(&port->dports, (unsigned long)new->dport, new,
901 * Since root-level CXL dports cannot be enumerated by PCI they are not
902 * enumerated by the common port driver that acquires the port lock over
903 * dport add/remove. Instead, root dports are manually added by a
904 * platform driver and cond_cxl_root_lock() is used to take the missing
905 * port lock in that case.
907 static void cond_cxl_root_lock(struct cxl_port *port)
909 if (is_cxl_root(port))
910 device_lock(&port->dev);
913 static void cond_cxl_root_unlock(struct cxl_port *port)
915 if (is_cxl_root(port))
916 device_unlock(&port->dev);
919 static void cxl_dport_remove(void *data)
921 struct cxl_dport *dport = data;
922 struct cxl_port *port = dport->port;
924 xa_erase(&port->dports, (unsigned long) dport->dport);
925 put_device(dport->dport);
928 static void cxl_dport_unlink(void *data)
930 struct cxl_dport *dport = data;
931 struct cxl_port *port = dport->port;
932 char link_name[CXL_TARGET_STRLEN];
934 sprintf(link_name, "dport%d", dport->port_id);
935 sysfs_remove_link(&port->dev.kobj, link_name);
938 static struct cxl_dport *
939 __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
940 int port_id, resource_size_t component_reg_phys,
941 resource_size_t rcrb)
943 char link_name[CXL_TARGET_STRLEN];
944 struct cxl_dport *dport;
948 if (is_cxl_root(port))
954 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n",
955 dev_name(dport_dev));
956 return ERR_PTR(-ENXIO);
959 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
961 return ERR_PTR(-EINVAL);
963 dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL);
965 return ERR_PTR(-ENOMEM);
967 dport->dport = dport_dev;
968 dport->port_id = port_id;
969 dport->component_reg_phys = component_reg_phys;
971 if (rcrb != CXL_RESOURCE_NONE)
975 cond_cxl_root_lock(port);
976 rc = add_dport(port, dport);
977 cond_cxl_root_unlock(port);
981 get_device(dport_dev);
982 rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
986 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
990 rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport);
998 * devm_cxl_add_dport - append VH downstream port data to a cxl_port
999 * @port: the cxl_port that references this dport
1000 * @dport_dev: firmware or PCI device representing the dport
1001 * @port_id: identifier for this dport in a decoder's target list
1002 * @component_reg_phys: optional location of CXL component registers
1004 * Note that dports are appended to the devm release action's of the
1005 * either the port's host (for root ports), or the port itself (for
1008 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
1009 struct device *dport_dev, int port_id,
1010 resource_size_t component_reg_phys)
1012 struct cxl_dport *dport;
1014 dport = __devm_cxl_add_dport(port, dport_dev, port_id,
1015 component_reg_phys, CXL_RESOURCE_NONE);
1016 if (IS_ERR(dport)) {
1017 dev_dbg(dport_dev, "failed to add dport to %s: %ld\n",
1018 dev_name(&port->dev), PTR_ERR(dport));
1020 dev_dbg(dport_dev, "dport added to %s\n",
1021 dev_name(&port->dev));
1026 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
1029 * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port
1030 * @port: the cxl_port that references this dport
1031 * @dport_dev: firmware or PCI device representing the dport
1032 * @port_id: identifier for this dport in a decoder's target list
1033 * @component_reg_phys: optional location of CXL component registers
1034 * @rcrb: mandatory location of a Root Complex Register Block
1036 * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH
1038 struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
1039 struct device *dport_dev, int port_id,
1040 resource_size_t component_reg_phys,
1041 resource_size_t rcrb)
1043 struct cxl_dport *dport;
1045 if (rcrb == CXL_RESOURCE_NONE) {
1046 dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n");
1047 return ERR_PTR(-EINVAL);
1050 dport = __devm_cxl_add_dport(port, dport_dev, port_id,
1051 component_reg_phys, rcrb);
1052 if (IS_ERR(dport)) {
1053 dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n",
1054 dev_name(&port->dev), PTR_ERR(dport));
1056 dev_dbg(dport_dev, "RCH dport added to %s\n",
1057 dev_name(&port->dev));
1062 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
1064 static int add_ep(struct cxl_ep *new)
1066 struct cxl_port *port = new->dport->port;
1069 device_lock(&port->dev);
1071 device_unlock(&port->dev);
1074 rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
1076 device_unlock(&port->dev);
1082 * cxl_add_ep - register an endpoint's interest in a port
1083 * @dport: the dport that routes to @ep_dev
1084 * @ep_dev: device representing the endpoint
1086 * Intermediate CXL ports are scanned based on the arrival of endpoints.
1087 * When those endpoints depart the port can be destroyed once all
1088 * endpoints that care about that port have been removed.
1090 static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
1095 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
1099 ep->ep = get_device(ep_dev);
1108 struct cxl_find_port_ctx {
1109 const struct device *dport_dev;
1110 const struct cxl_port *parent_port;
1111 struct cxl_dport **dport;
1114 static int match_port_by_dport(struct device *dev, const void *data)
1116 const struct cxl_find_port_ctx *ctx = data;
1117 struct cxl_dport *dport;
1118 struct cxl_port *port;
1120 if (!is_cxl_port(dev))
1122 if (ctx->parent_port && dev->parent != &ctx->parent_port->dev)
1125 port = to_cxl_port(dev);
1126 dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
1128 *ctx->dport = dport;
1129 return dport != NULL;
1132 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
1136 if (!ctx->dport_dev)
1139 dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport);
1141 return to_cxl_port(dev);
1145 static struct cxl_port *find_cxl_port(struct device *dport_dev,
1146 struct cxl_dport **dport)
1148 struct cxl_find_port_ctx ctx = {
1149 .dport_dev = dport_dev,
1152 struct cxl_port *port;
1154 port = __find_cxl_port(&ctx);
1158 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
1159 struct device *dport_dev,
1160 struct cxl_dport **dport)
1162 struct cxl_find_port_ctx ctx = {
1163 .dport_dev = dport_dev,
1164 .parent_port = parent_port,
1167 struct cxl_port *port;
1169 port = __find_cxl_port(&ctx);
1174 * All users of grandparent() are using it to walk PCIe-like switch port
1175 * hierarchy. A PCIe switch is comprised of a bridge device representing the
1176 * upstream switch port and N bridges representing downstream switch ports. When
1177 * bridges stack the grand-parent of a downstream switch port is another
1178 * downstream switch port in the immediate ancestor switch.
1180 static struct device *grandparent(struct device *dev)
1182 if (dev && dev->parent)
1183 return dev->parent->parent;
1187 static void delete_endpoint(void *data)
1189 struct cxl_memdev *cxlmd = data;
1190 struct cxl_port *endpoint = dev_get_drvdata(&cxlmd->dev);
1191 struct cxl_port *parent_port;
1192 struct device *parent;
1194 parent_port = cxl_mem_find_port(cxlmd, NULL);
1197 parent = &parent_port->dev;
1199 device_lock(parent);
1200 if (parent->driver && !endpoint->dead) {
1201 devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
1202 devm_release_action(parent, cxl_unlink_uport, endpoint);
1203 devm_release_action(parent, unregister_port, endpoint);
1205 device_unlock(parent);
1208 put_device(&endpoint->dev);
1211 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
1213 struct device *dev = &cxlmd->dev;
1215 get_device(&endpoint->dev);
1216 dev_set_drvdata(dev, endpoint);
1217 cxlmd->depth = endpoint->depth;
1218 return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
1220 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
1223 * The natural end of life of a non-root 'cxl_port' is when its parent port goes
1224 * through a ->remove() event ("top-down" unregistration). The unnatural trigger
1225 * for a port to be unregistered is when all memdevs beneath that port have gone
1226 * through ->remove(). This "bottom-up" removal selectively removes individual
1227 * child ports manually. This depends on devm_cxl_add_port() to not change is
1228 * devm action registration order, and for dports to have already been
1229 * destroyed by reap_dports().
1231 static void delete_switch_port(struct cxl_port *port)
1233 devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port);
1234 devm_release_action(port->dev.parent, cxl_unlink_uport, port);
1235 devm_release_action(port->dev.parent, unregister_port, port);
1238 static void reap_dports(struct cxl_port *port)
1240 struct cxl_dport *dport;
1241 unsigned long index;
1243 device_lock_assert(&port->dev);
1245 xa_for_each(&port->dports, index, dport) {
1246 devm_release_action(&port->dev, cxl_dport_unlink, dport);
1247 devm_release_action(&port->dev, cxl_dport_remove, dport);
1248 devm_kfree(&port->dev, dport);
1253 struct cxl_memdev *cxlmd;
1257 static int port_has_memdev(struct device *dev, const void *data)
1259 const struct detach_ctx *ctx = data;
1260 struct cxl_port *port;
1262 if (!is_cxl_port(dev))
1265 port = to_cxl_port(dev);
1266 if (port->depth != ctx->depth)
1269 return !!cxl_ep_load(port, ctx->cxlmd);
1272 static void cxl_detach_ep(void *data)
1274 struct cxl_memdev *cxlmd = data;
1276 for (int i = cxlmd->depth - 1; i >= 1; i--) {
1277 struct cxl_port *port, *parent_port;
1278 struct detach_ctx ctx = {
1286 dev = bus_find_device(&cxl_bus_type, NULL, &ctx,
1290 port = to_cxl_port(dev);
1292 parent_port = to_cxl_port(port->dev.parent);
1293 device_lock(&parent_port->dev);
1294 device_lock(&port->dev);
1295 ep = cxl_ep_load(port, cxlmd);
1296 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
1297 ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
1298 cxl_ep_remove(port, ep);
1299 if (ep && !port->dead && xa_empty(&port->endpoints) &&
1300 !is_cxl_root(parent_port) && parent_port->dev.driver) {
1302 * This was the last ep attached to a dynamically
1303 * enumerated port. Block new cxl_add_ep() and garbage
1310 device_unlock(&port->dev);
1313 dev_dbg(&cxlmd->dev, "delete %s\n",
1314 dev_name(&port->dev));
1315 delete_switch_port(port);
1317 put_device(&port->dev);
1318 device_unlock(&parent_port->dev);
1322 static resource_size_t find_component_registers(struct device *dev)
1324 struct cxl_register_map map;
1325 struct pci_dev *pdev;
1328 * Theoretically, CXL component registers can be hosted on a
1329 * non-PCI device, in practice, only cxl_test hits this case.
1331 if (!dev_is_pci(dev))
1332 return CXL_RESOURCE_NONE;
1334 pdev = to_pci_dev(dev);
1336 cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
1337 return map.resource;
1340 static int add_port_attach_ep(struct cxl_memdev *cxlmd,
1341 struct device *uport_dev,
1342 struct device *dport_dev)
1344 struct device *dparent = grandparent(dport_dev);
1345 struct cxl_port *port, *parent_port = NULL;
1346 struct cxl_dport *dport, *parent_dport;
1347 resource_size_t component_reg_phys;
1352 * The iteration reached the topology root without finding the
1353 * CXL-root 'cxl_port' on a previous iteration, fail for now to
1354 * be re-probed after platform driver attaches.
1356 dev_dbg(&cxlmd->dev, "%s is a root dport\n",
1357 dev_name(dport_dev));
1361 parent_port = find_cxl_port(dparent, &parent_dport);
1363 /* iterate to create this parent_port */
1367 device_lock(&parent_port->dev);
1368 if (!parent_port->dev.driver) {
1369 dev_warn(&cxlmd->dev,
1370 "port %s:%s disabled, failed to enumerate CXL.mem\n",
1371 dev_name(&parent_port->dev), dev_name(uport_dev));
1372 port = ERR_PTR(-ENXIO);
1376 port = find_cxl_port_at(parent_port, dport_dev, &dport);
1378 component_reg_phys = find_component_registers(uport_dev);
1379 port = devm_cxl_add_port(&parent_port->dev, uport_dev,
1380 component_reg_phys, parent_dport);
1381 /* retry find to pick up the new dport information */
1383 port = find_cxl_port_at(parent_port, dport_dev, &dport);
1386 device_unlock(&parent_port->dev);
1391 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
1392 dev_name(&port->dev), dev_name(port->uport));
1393 rc = cxl_add_ep(dport, &cxlmd->dev);
1396 * "can't" happen, but this error code means
1397 * something to the caller, so translate it.
1401 put_device(&port->dev);
1404 put_device(&parent_port->dev);
1408 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
1410 struct device *dev = &cxlmd->dev;
1411 struct device *iter;
1415 * Skip intermediate port enumeration in the RCH case, there
1416 * are no ports in between a host bridge and an endpoint.
1418 if (cxlmd->cxlds->rcd)
1421 rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd);
1426 * Scan for and add all cxl_ports in this device's ancestry.
1427 * Repeat until no more ports are added. Abort if a port add
1431 for (iter = dev; iter; iter = grandparent(iter)) {
1432 struct device *dport_dev = grandparent(iter);
1433 struct device *uport_dev;
1434 struct cxl_dport *dport;
1435 struct cxl_port *port;
1440 uport_dev = dport_dev->parent;
1442 dev_warn(dev, "at %s no parent for dport: %s\n",
1443 dev_name(iter), dev_name(dport_dev));
1447 dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
1448 dev_name(iter), dev_name(dport_dev),
1449 dev_name(uport_dev));
1450 port = find_cxl_port(dport_dev, &dport);
1452 dev_dbg(&cxlmd->dev,
1453 "found already registered port %s:%s\n",
1454 dev_name(&port->dev), dev_name(port->uport));
1455 rc = cxl_add_ep(dport, &cxlmd->dev);
1458 * If the endpoint already exists in the port's list,
1459 * that's ok, it was added on a previous pass.
1460 * Otherwise, retry in add_port_attach_ep() after taking
1461 * the parent_port lock as the current port may be being
1464 if (rc && rc != -EBUSY) {
1465 put_device(&port->dev);
1469 /* Any more ports to add between this one and the root? */
1470 if (!dev_is_cxl_root_child(&port->dev)) {
1471 put_device(&port->dev);
1475 put_device(&port->dev);
1479 rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev);
1480 /* port missing, try to add parent */
1483 /* failed to add ep or port */
1486 /* port added, new descendants possible, start over */
1492 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
1494 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
1495 struct cxl_dport **dport)
1497 return find_cxl_port(grandparent(&cxlmd->dev), dport);
1499 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
1501 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
1502 struct cxl_port *port, int *target_map)
1509 device_lock_assert(&port->dev);
1511 if (xa_empty(&port->dports))
1514 write_seqlock(&cxlsd->target_lock);
1515 for (i = 0; i < cxlsd->nr_targets; i++) {
1516 struct cxl_dport *dport = find_dport(port, target_map[i]);
1522 cxlsd->target[i] = dport;
1524 write_sequnlock(&cxlsd->target_lock);
1529 struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
1531 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
1532 struct cxl_decoder *cxld = &cxlsd->cxld;
1535 iw = cxld->interleave_ways;
1536 if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets,
1537 "misconfigured root decoder\n"))
1540 return cxlrd->cxlsd.target[pos % iw];
1542 EXPORT_SYMBOL_NS_GPL(cxl_hb_modulo, CXL);
1544 static struct lock_class_key cxl_decoder_key;
1547 * cxl_decoder_init - Common decoder setup / initialization
1548 * @port: owning port of this decoder
1549 * @cxld: common decoder properties to initialize
1551 * A port may contain one or more decoders. Each of those decoders
1552 * enable some address space for CXL.mem utilization. A decoder is
1553 * expected to be configured by the caller before registering via
1556 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
1561 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
1565 /* need parent to stick around to release the id */
1566 get_device(&port->dev);
1570 device_initialize(dev);
1571 lockdep_set_class(&dev->mutex, &cxl_decoder_key);
1572 device_set_pm_not_required(dev);
1573 dev->parent = &port->dev;
1574 dev->bus = &cxl_bus_type;
1576 /* Pre initialize an "empty" decoder */
1577 cxld->interleave_ways = 1;
1578 cxld->interleave_granularity = PAGE_SIZE;
1579 cxld->target_type = CXL_DECODER_EXPANDER;
1580 cxld->hpa_range = (struct range) {
1588 static int cxl_switch_decoder_init(struct cxl_port *port,
1589 struct cxl_switch_decoder *cxlsd,
1592 if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
1595 cxlsd->nr_targets = nr_targets;
1596 seqlock_init(&cxlsd->target_lock);
1597 return cxl_decoder_init(port, &cxlsd->cxld);
1601 * cxl_root_decoder_alloc - Allocate a root level decoder
1602 * @port: owning CXL root of this decoder
1603 * @nr_targets: static number of downstream targets
1604 * @calc_hb: which host bridge covers the n'th position by granularity
1606 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1607 * 'CXL root' decoder is one that decodes from a top-level / static platform
1608 * firmware description of CXL resources into a CXL standard decode
1611 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
1612 unsigned int nr_targets,
1613 cxl_calc_hb_fn calc_hb)
1615 struct cxl_root_decoder *cxlrd;
1616 struct cxl_switch_decoder *cxlsd;
1617 struct cxl_decoder *cxld;
1620 if (!is_cxl_root(port))
1621 return ERR_PTR(-EINVAL);
1623 cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
1626 return ERR_PTR(-ENOMEM);
1628 cxlsd = &cxlrd->cxlsd;
1629 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1635 cxlrd->calc_hb = calc_hb;
1636 mutex_init(&cxlrd->range_lock);
1638 cxld = &cxlsd->cxld;
1639 cxld->dev.type = &cxl_decoder_root_type;
1641 * cxl_root_decoder_release() special cases negative ids to
1642 * detect memregion_alloc() failures.
1644 atomic_set(&cxlrd->region_id, -1);
1645 rc = memregion_alloc(GFP_KERNEL);
1647 put_device(&cxld->dev);
1651 atomic_set(&cxlrd->region_id, rc);
1654 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
1657 * cxl_switch_decoder_alloc - Allocate a switch level decoder
1658 * @port: owning CXL switch port of this decoder
1659 * @nr_targets: max number of dynamically addressable downstream targets
1661 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1662 * 'switch' decoder is any decoder that can be enumerated by PCIe
1663 * topology and the HDM Decoder Capability. This includes the decoders
1664 * that sit between Switch Upstream Ports / Switch Downstream Ports and
1665 * Host Bridges / Root Ports.
1667 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
1668 unsigned int nr_targets)
1670 struct cxl_switch_decoder *cxlsd;
1671 struct cxl_decoder *cxld;
1674 if (is_cxl_root(port) || is_cxl_endpoint(port))
1675 return ERR_PTR(-EINVAL);
1677 cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
1679 return ERR_PTR(-ENOMEM);
1681 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1687 cxld = &cxlsd->cxld;
1688 cxld->dev.type = &cxl_decoder_switch_type;
1691 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
1694 * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
1695 * @port: owning port of this decoder
1697 * Return: A new cxl decoder to be registered by cxl_decoder_add()
1699 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
1701 struct cxl_endpoint_decoder *cxled;
1702 struct cxl_decoder *cxld;
1705 if (!is_cxl_endpoint(port))
1706 return ERR_PTR(-EINVAL);
1708 cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
1710 return ERR_PTR(-ENOMEM);
1713 cxld = &cxled->cxld;
1714 rc = cxl_decoder_init(port, cxld);
1720 cxld->dev.type = &cxl_decoder_endpoint_type;
1723 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
1726 * cxl_decoder_add_locked - Add a decoder with targets
1727 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
1728 * @target_map: A list of downstream ports that this decoder can direct memory
1729 * traffic to. These numbers should correspond with the port number
1730 * in the PCIe Link Capabilities structure.
1732 * Certain types of decoders may not have any targets. The main example of this
1733 * is an endpoint device. A more awkward example is a hostbridge whose root
1734 * ports get hot added (technically possible, though unlikely).
1736 * This is the locked variant of cxl_decoder_add().
1738 * Context: Process context. Expects the device lock of the port that owns the
1741 * Return: Negative error code if the decoder wasn't properly configured; else
1744 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
1746 struct cxl_port *port;
1750 if (WARN_ON_ONCE(!cxld))
1753 if (WARN_ON_ONCE(IS_ERR(cxld)))
1754 return PTR_ERR(cxld);
1756 if (cxld->interleave_ways < 1)
1761 port = to_cxl_port(cxld->dev.parent);
1762 if (!is_endpoint_decoder(dev)) {
1763 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
1765 rc = decoder_populate_targets(cxlsd, port, target_map);
1766 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
1768 "Failed to populate active decoder targets\n");
1773 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
1777 return device_add(dev);
1779 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
1782 * cxl_decoder_add - Add a decoder with targets
1783 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
1784 * @target_map: A list of downstream ports that this decoder can direct memory
1785 * traffic to. These numbers should correspond with the port number
1786 * in the PCIe Link Capabilities structure.
1788 * This is the unlocked variant of cxl_decoder_add_locked().
1789 * See cxl_decoder_add_locked().
1791 * Context: Process context. Takes and releases the device lock of the port that
1794 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
1796 struct cxl_port *port;
1799 if (WARN_ON_ONCE(!cxld))
1802 if (WARN_ON_ONCE(IS_ERR(cxld)))
1803 return PTR_ERR(cxld);
1805 port = to_cxl_port(cxld->dev.parent);
1807 device_lock(&port->dev);
1808 rc = cxl_decoder_add_locked(cxld, target_map);
1809 device_unlock(&port->dev);
1813 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
1815 static void cxld_unregister(void *dev)
1817 struct cxl_endpoint_decoder *cxled;
1819 if (is_endpoint_decoder(dev)) {
1820 cxled = to_cxl_endpoint_decoder(dev);
1821 cxl_decoder_kill_region(cxled);
1824 device_unregister(dev);
1827 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
1829 return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
1831 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
1834 * __cxl_driver_register - register a driver for the cxl bus
1835 * @cxl_drv: cxl driver structure to attach
1836 * @owner: owning module/driver
1837 * @modname: KBUILD_MODNAME for parent driver
1839 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
1840 const char *modname)
1842 if (!cxl_drv->probe) {
1843 pr_debug("%s ->probe() must be specified\n", modname);
1847 if (!cxl_drv->name) {
1848 pr_debug("%s ->name must be specified\n", modname);
1853 pr_debug("%s ->id must be specified\n", modname);
1857 cxl_drv->drv.bus = &cxl_bus_type;
1858 cxl_drv->drv.owner = owner;
1859 cxl_drv->drv.mod_name = modname;
1860 cxl_drv->drv.name = cxl_drv->name;
1862 return driver_register(&cxl_drv->drv);
1864 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
1866 void cxl_driver_unregister(struct cxl_driver *cxl_drv)
1868 driver_unregister(&cxl_drv->drv);
1870 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
1872 static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
1874 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
1875 cxl_device_id(dev));
1878 static int cxl_bus_match(struct device *dev, struct device_driver *drv)
1880 return cxl_device_id(dev) == to_cxl_drv(drv)->id;
1883 static int cxl_bus_probe(struct device *dev)
1887 rc = to_cxl_drv(dev->driver)->probe(dev);
1888 dev_dbg(dev, "probe: %d\n", rc);
1892 static void cxl_bus_remove(struct device *dev)
1894 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
1896 if (cxl_drv->remove)
1897 cxl_drv->remove(dev);
1900 static struct workqueue_struct *cxl_bus_wq;
1902 static void cxl_bus_rescan_queue(struct work_struct *w)
1904 int rc = bus_rescan_devices(&cxl_bus_type);
1906 pr_debug("CXL bus rescan result: %d\n", rc);
1909 void cxl_bus_rescan(void)
1911 static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue);
1913 queue_work(cxl_bus_wq, &rescan_work);
1915 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL);
1917 void cxl_bus_drain(void)
1919 drain_workqueue(cxl_bus_wq);
1921 EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL);
1923 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
1925 return queue_work(cxl_bus_wq, &cxlmd->detach_work);
1927 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
1929 /* for user tooling to ensure port disable work has completed */
1930 static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
1932 if (sysfs_streq(buf, "1")) {
1933 flush_workqueue(cxl_bus_wq);
1940 static BUS_ATTR_WO(flush);
1942 static struct attribute *cxl_bus_attributes[] = {
1943 &bus_attr_flush.attr,
1947 static struct attribute_group cxl_bus_attribute_group = {
1948 .attrs = cxl_bus_attributes,
1951 static const struct attribute_group *cxl_bus_attribute_groups[] = {
1952 &cxl_bus_attribute_group,
1956 struct bus_type cxl_bus_type = {
1958 .uevent = cxl_bus_uevent,
1959 .match = cxl_bus_match,
1960 .probe = cxl_bus_probe,
1961 .remove = cxl_bus_remove,
1962 .bus_groups = cxl_bus_attribute_groups,
1964 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
1966 static struct dentry *cxl_debugfs;
1968 struct dentry *cxl_debugfs_create_dir(const char *dir)
1970 return debugfs_create_dir(dir, cxl_debugfs);
1972 EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
1974 static __init int cxl_core_init(void)
1978 cxl_debugfs = debugfs_create_dir("cxl", NULL);
1982 rc = cxl_memdev_init();
1986 cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0);
1992 rc = bus_register(&cxl_bus_type);
1996 rc = cxl_region_init();
2003 bus_unregister(&cxl_bus_type);
2005 destroy_workqueue(cxl_bus_wq);
2011 static void cxl_core_exit(void)
2014 bus_unregister(&cxl_bus_type);
2015 destroy_workqueue(cxl_bus_wq);
2017 debugfs_remove_recursive(cxl_debugfs);
2020 subsys_initcall(cxl_core_init);
2021 module_exit(cxl_core_exit);
2022 MODULE_LICENSE("GPL v2");