1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/platform_device.h>
4 #include <linux/memregion.h>
5 #include <linux/workqueue.h>
6 #include <linux/debugfs.h>
7 #include <linux/device.h>
8 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/idr.h>
20 * The CXL core provides a set of interfaces that can be consumed by CXL aware
21 * drivers. The interfaces allow for creation, modification, and destruction of
22 * regions, memory devices, ports, and decoders. CXL aware drivers must register
23 * with the CXL core via these interfaces in order to be able to participate in
24 * cross-device interleave coordination. The CXL core also establishes and
25 * maintains the bridge to the nvdimm subsystem.
27 * CXL core introduces sysfs hierarchy to control the devices that are
28 * instantiated by the core.
32 * All changes to the interleave configuration occur with this lock held
35 DECLARE_RWSEM(cxl_region_rwsem);
37 static DEFINE_IDA(cxl_port_ida);
38 static DEFINE_XARRAY(cxl_root_buses);
40 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
43 return sysfs_emit(buf, "%s\n", dev->type->name);
45 static DEVICE_ATTR_RO(devtype);
47 static int cxl_device_id(const struct device *dev)
49 if (dev->type == &cxl_nvdimm_bridge_type)
50 return CXL_DEVICE_NVDIMM_BRIDGE;
51 if (dev->type == &cxl_nvdimm_type)
52 return CXL_DEVICE_NVDIMM;
53 if (dev->type == CXL_PMEM_REGION_TYPE())
54 return CXL_DEVICE_PMEM_REGION;
55 if (dev->type == CXL_DAX_REGION_TYPE())
56 return CXL_DEVICE_DAX_REGION;
57 if (is_cxl_port(dev)) {
58 if (is_cxl_root(to_cxl_port(dev)))
59 return CXL_DEVICE_ROOT;
60 return CXL_DEVICE_PORT;
62 if (is_cxl_memdev(dev))
63 return CXL_DEVICE_MEMORY_EXPANDER;
64 if (dev->type == CXL_REGION_TYPE())
65 return CXL_DEVICE_REGION;
66 if (dev->type == &cxl_pmu_type)
67 return CXL_DEVICE_PMU;
71 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
74 return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev));
76 static DEVICE_ATTR_RO(modalias);
78 static struct attribute *cxl_base_attributes[] = {
79 &dev_attr_devtype.attr,
80 &dev_attr_modalias.attr,
84 struct attribute_group cxl_base_attribute_group = {
85 .attrs = cxl_base_attributes,
88 static ssize_t start_show(struct device *dev, struct device_attribute *attr,
91 struct cxl_decoder *cxld = to_cxl_decoder(dev);
93 return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
95 static DEVICE_ATTR_ADMIN_RO(start);
97 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
100 struct cxl_decoder *cxld = to_cxl_decoder(dev);
102 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
104 static DEVICE_ATTR_RO(size);
106 #define CXL_DECODER_FLAG_ATTR(name, flag) \
107 static ssize_t name##_show(struct device *dev, \
108 struct device_attribute *attr, char *buf) \
110 struct cxl_decoder *cxld = to_cxl_decoder(dev); \
112 return sysfs_emit(buf, "%s\n", \
113 (cxld->flags & (flag)) ? "1" : "0"); \
115 static DEVICE_ATTR_RO(name)
117 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
118 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
119 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
120 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
121 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
123 static ssize_t target_type_show(struct device *dev,
124 struct device_attribute *attr, char *buf)
126 struct cxl_decoder *cxld = to_cxl_decoder(dev);
128 switch (cxld->target_type) {
129 case CXL_DECODER_DEVMEM:
130 return sysfs_emit(buf, "accelerator\n");
131 case CXL_DECODER_HOSTONLYMEM:
132 return sysfs_emit(buf, "expander\n");
136 static DEVICE_ATTR_RO(target_type);
138 static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
140 struct cxl_decoder *cxld = &cxlsd->cxld;
144 for (i = 0; i < cxld->interleave_ways; i++) {
145 struct cxl_dport *dport = cxlsd->target[i];
146 struct cxl_dport *next = NULL;
151 if (i + 1 < cxld->interleave_ways)
152 next = cxlsd->target[i + 1];
153 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
163 static ssize_t target_list_show(struct device *dev,
164 struct device_attribute *attr, char *buf)
166 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
172 seq = read_seqbegin(&cxlsd->target_lock);
173 rc = emit_target_list(cxlsd, buf);
174 } while (read_seqretry(&cxlsd->target_lock, seq));
180 rc = sysfs_emit_at(buf, offset, "\n");
186 static DEVICE_ATTR_RO(target_list);
188 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
191 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
193 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode));
196 static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
197 const char *buf, size_t len)
199 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
200 enum cxl_decoder_mode mode;
203 if (sysfs_streq(buf, "pmem"))
204 mode = CXL_DECODER_PMEM;
205 else if (sysfs_streq(buf, "ram"))
206 mode = CXL_DECODER_RAM;
210 rc = cxl_dpa_set_mode(cxled, mode);
216 static DEVICE_ATTR_RW(mode);
218 static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
221 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
222 u64 base = cxl_dpa_resource_start(cxled);
224 return sysfs_emit(buf, "%#llx\n", base);
226 static DEVICE_ATTR_RO(dpa_resource);
228 static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
231 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
232 resource_size_t size = cxl_dpa_size(cxled);
234 return sysfs_emit(buf, "%pa\n", &size);
237 static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
238 const char *buf, size_t len)
240 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
241 unsigned long long size;
244 rc = kstrtoull(buf, 0, &size);
248 if (!IS_ALIGNED(size, SZ_256M))
251 rc = cxl_dpa_free(cxled);
258 rc = cxl_dpa_alloc(cxled, size);
264 static DEVICE_ATTR_RW(dpa_size);
266 static ssize_t interleave_granularity_show(struct device *dev,
267 struct device_attribute *attr,
270 struct cxl_decoder *cxld = to_cxl_decoder(dev);
272 return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
275 static DEVICE_ATTR_RO(interleave_granularity);
277 static ssize_t interleave_ways_show(struct device *dev,
278 struct device_attribute *attr, char *buf)
280 struct cxl_decoder *cxld = to_cxl_decoder(dev);
282 return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
285 static DEVICE_ATTR_RO(interleave_ways);
287 static struct attribute *cxl_decoder_base_attrs[] = {
288 &dev_attr_start.attr,
290 &dev_attr_locked.attr,
291 &dev_attr_interleave_granularity.attr,
292 &dev_attr_interleave_ways.attr,
296 static struct attribute_group cxl_decoder_base_attribute_group = {
297 .attrs = cxl_decoder_base_attrs,
300 static struct attribute *cxl_decoder_root_attrs[] = {
301 &dev_attr_cap_pmem.attr,
302 &dev_attr_cap_ram.attr,
303 &dev_attr_cap_type2.attr,
304 &dev_attr_cap_type3.attr,
305 &dev_attr_target_list.attr,
306 SET_CXL_REGION_ATTR(create_pmem_region)
307 SET_CXL_REGION_ATTR(create_ram_region)
308 SET_CXL_REGION_ATTR(delete_region)
312 static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
314 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
316 return (cxlrd->cxlsd.cxld.flags & flags) == flags;
319 static bool can_create_ram(struct cxl_root_decoder *cxlrd)
321 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM;
323 return (cxlrd->cxlsd.cxld.flags & flags) == flags;
326 static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
328 struct device *dev = kobj_to_dev(kobj);
329 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
331 if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
334 if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd))
337 if (a == CXL_REGION_ATTR(delete_region) &&
338 !(can_create_pmem(cxlrd) || can_create_ram(cxlrd)))
344 static struct attribute_group cxl_decoder_root_attribute_group = {
345 .attrs = cxl_decoder_root_attrs,
346 .is_visible = cxl_root_decoder_visible,
349 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
350 &cxl_decoder_root_attribute_group,
351 &cxl_decoder_base_attribute_group,
352 &cxl_base_attribute_group,
356 static struct attribute *cxl_decoder_switch_attrs[] = {
357 &dev_attr_target_type.attr,
358 &dev_attr_target_list.attr,
359 SET_CXL_REGION_ATTR(region)
363 static struct attribute_group cxl_decoder_switch_attribute_group = {
364 .attrs = cxl_decoder_switch_attrs,
367 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
368 &cxl_decoder_switch_attribute_group,
369 &cxl_decoder_base_attribute_group,
370 &cxl_base_attribute_group,
374 static struct attribute *cxl_decoder_endpoint_attrs[] = {
375 &dev_attr_target_type.attr,
377 &dev_attr_dpa_size.attr,
378 &dev_attr_dpa_resource.attr,
379 SET_CXL_REGION_ATTR(region)
383 static struct attribute_group cxl_decoder_endpoint_attribute_group = {
384 .attrs = cxl_decoder_endpoint_attrs,
387 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
388 &cxl_decoder_base_attribute_group,
389 &cxl_decoder_endpoint_attribute_group,
390 &cxl_base_attribute_group,
394 static void __cxl_decoder_release(struct cxl_decoder *cxld)
396 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
398 ida_free(&port->decoder_ida, cxld->id);
399 put_device(&port->dev);
402 static void cxl_endpoint_decoder_release(struct device *dev)
404 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
406 __cxl_decoder_release(&cxled->cxld);
410 static void cxl_switch_decoder_release(struct device *dev)
412 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
414 __cxl_decoder_release(&cxlsd->cxld);
418 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
420 if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
421 "not a cxl_root_decoder device\n"))
423 return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
425 EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
427 static void cxl_root_decoder_release(struct device *dev)
429 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
431 if (atomic_read(&cxlrd->region_id) >= 0)
432 memregion_free(atomic_read(&cxlrd->region_id));
433 __cxl_decoder_release(&cxlrd->cxlsd.cxld);
437 static const struct device_type cxl_decoder_endpoint_type = {
438 .name = "cxl_decoder_endpoint",
439 .release = cxl_endpoint_decoder_release,
440 .groups = cxl_decoder_endpoint_attribute_groups,
443 static const struct device_type cxl_decoder_switch_type = {
444 .name = "cxl_decoder_switch",
445 .release = cxl_switch_decoder_release,
446 .groups = cxl_decoder_switch_attribute_groups,
449 static const struct device_type cxl_decoder_root_type = {
450 .name = "cxl_decoder_root",
451 .release = cxl_root_decoder_release,
452 .groups = cxl_decoder_root_attribute_groups,
455 bool is_endpoint_decoder(struct device *dev)
457 return dev->type == &cxl_decoder_endpoint_type;
459 EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL);
461 bool is_root_decoder(struct device *dev)
463 return dev->type == &cxl_decoder_root_type;
465 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
467 bool is_switch_decoder(struct device *dev)
469 return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
471 EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL);
473 struct cxl_decoder *to_cxl_decoder(struct device *dev)
475 if (dev_WARN_ONCE(dev,
476 !is_switch_decoder(dev) && !is_endpoint_decoder(dev),
477 "not a cxl_decoder device\n"))
479 return container_of(dev, struct cxl_decoder, dev);
481 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
483 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
485 if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
486 "not a cxl_endpoint_decoder device\n"))
488 return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
490 EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
492 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
494 if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
495 "not a cxl_switch_decoder device\n"))
497 return container_of(dev, struct cxl_switch_decoder, cxld.dev);
499 EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL);
501 static void cxl_ep_release(struct cxl_ep *ep)
507 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
511 xa_erase(&port->endpoints, (unsigned long) ep->ep);
515 static void cxl_port_release(struct device *dev)
517 struct cxl_port *port = to_cxl_port(dev);
521 xa_for_each(&port->endpoints, index, ep)
522 cxl_ep_remove(port, ep);
523 xa_destroy(&port->endpoints);
524 xa_destroy(&port->dports);
525 xa_destroy(&port->regions);
526 ida_free(&cxl_port_ida, port->id);
530 static const struct attribute_group *cxl_port_attribute_groups[] = {
531 &cxl_base_attribute_group,
535 static const struct device_type cxl_port_type = {
537 .release = cxl_port_release,
538 .groups = cxl_port_attribute_groups,
541 bool is_cxl_port(const struct device *dev)
543 return dev->type == &cxl_port_type;
545 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
547 struct cxl_port *to_cxl_port(const struct device *dev)
549 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
550 "not a cxl_port device\n"))
552 return container_of(dev, struct cxl_port, dev);
554 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL);
556 static void unregister_port(void *_port)
558 struct cxl_port *port = _port;
559 struct cxl_port *parent;
560 struct device *lock_dev;
562 if (is_cxl_root(port))
565 parent = to_cxl_port(port->dev.parent);
568 * CXL root port's and the first level of ports are unregistered
569 * under the platform firmware device lock, all other ports are
570 * unregistered while holding their parent port lock.
573 lock_dev = port->uport_dev;
574 else if (is_cxl_root(parent))
575 lock_dev = parent->uport_dev;
577 lock_dev = &parent->dev;
579 device_lock_assert(lock_dev);
581 device_unregister(&port->dev);
584 static void cxl_unlink_uport(void *_port)
586 struct cxl_port *port = _port;
588 sysfs_remove_link(&port->dev.kobj, "uport");
591 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
595 rc = sysfs_create_link(&port->dev.kobj, &port->uport_dev->kobj,
599 return devm_add_action_or_reset(host, cxl_unlink_uport, port);
602 static void cxl_unlink_parent_dport(void *_port)
604 struct cxl_port *port = _port;
606 sysfs_remove_link(&port->dev.kobj, "parent_dport");
609 static int devm_cxl_link_parent_dport(struct device *host,
610 struct cxl_port *port,
611 struct cxl_dport *parent_dport)
618 rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport_dev->kobj,
622 return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port);
625 static struct lock_class_key cxl_port_key;
627 static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
628 resource_size_t component_reg_phys,
629 struct cxl_dport *parent_dport)
631 struct cxl_port *port;
635 port = kzalloc(sizeof(*port), GFP_KERNEL);
637 return ERR_PTR(-ENOMEM);
639 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
643 port->uport_dev = uport_dev;
646 * The top-level cxl_port "cxl_root" does not have a cxl_port as
647 * its parent and it does not have any corresponding component
648 * registers as its decode is described by a fixed platform
653 struct cxl_port *parent_port = parent_dport->port;
654 struct cxl_port *iter;
656 dev->parent = &parent_port->dev;
657 port->depth = parent_port->depth + 1;
658 port->parent_dport = parent_dport;
661 * walk to the host bridge, or the first ancestor that knows
665 while (!iter->host_bridge &&
666 !is_cxl_root(to_cxl_port(iter->dev.parent)))
667 iter = to_cxl_port(iter->dev.parent);
668 if (iter->host_bridge)
669 port->host_bridge = iter->host_bridge;
670 else if (parent_dport->rch)
671 port->host_bridge = parent_dport->dport_dev;
673 port->host_bridge = iter->uport_dev;
674 dev_dbg(uport_dev, "host-bridge: %s\n",
675 dev_name(port->host_bridge));
677 dev->parent = uport_dev;
679 port->component_reg_phys = component_reg_phys;
680 ida_init(&port->decoder_ida);
682 port->commit_end = -1;
683 xa_init(&port->dports);
684 xa_init(&port->endpoints);
685 xa_init(&port->regions);
687 device_initialize(dev);
688 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
689 device_set_pm_not_required(dev);
690 dev->bus = &cxl_bus_type;
691 dev->type = &cxl_port_type;
700 static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
701 resource_size_t component_reg_phys)
703 if (component_reg_phys == CXL_RESOURCE_NONE)
706 *map = (struct cxl_register_map) {
708 .reg_type = CXL_REGLOC_RBI_COMPONENT,
709 .resource = component_reg_phys,
710 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
713 return cxl_setup_regs(map);
716 static int cxl_port_setup_regs(struct cxl_port *port,
717 resource_size_t component_reg_phys)
719 if (dev_is_platform(port->uport_dev))
721 return cxl_setup_comp_regs(&port->dev, &port->comp_map,
725 static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
726 resource_size_t component_reg_phys)
730 if (dev_is_platform(dport->dport_dev))
734 * use @dport->dport_dev for the context for error messages during
735 * register probing, and fixup @host after the fact, since @host may be
738 rc = cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
740 dport->comp_map.host = host;
744 static struct cxl_port *__devm_cxl_add_port(struct device *host,
745 struct device *uport_dev,
746 resource_size_t component_reg_phys,
747 struct cxl_dport *parent_dport)
749 struct cxl_port *port;
753 port = cxl_port_alloc(uport_dev, component_reg_phys, parent_dport);
758 if (is_cxl_memdev(uport_dev))
759 rc = dev_set_name(dev, "endpoint%d", port->id);
760 else if (parent_dport)
761 rc = dev_set_name(dev, "port%d", port->id);
763 rc = dev_set_name(dev, "root%d", port->id);
767 rc = cxl_port_setup_regs(port, component_reg_phys);
771 rc = device_add(dev);
775 rc = devm_add_action_or_reset(host, unregister_port, port);
779 rc = devm_cxl_link_uport(host, port);
783 rc = devm_cxl_link_parent_dport(host, port, parent_dport);
795 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
796 * @host: host device for devm operations
797 * @uport_dev: "physical" device implementing this upstream port
798 * @component_reg_phys: (optional) for configurable cxl_port instances
799 * @parent_dport: next hop up in the CXL memory decode hierarchy
801 struct cxl_port *devm_cxl_add_port(struct device *host,
802 struct device *uport_dev,
803 resource_size_t component_reg_phys,
804 struct cxl_dport *parent_dport)
806 struct cxl_port *port, *parent_port;
808 port = __devm_cxl_add_port(host, uport_dev, component_reg_phys,
811 parent_port = parent_dport ? parent_dport->port : NULL;
813 dev_dbg(uport_dev, "Failed to add%s%s%s: %ld\n",
814 parent_port ? " port to " : "",
815 parent_port ? dev_name(&parent_port->dev) : "",
816 parent_port ? "" : " root port",
819 dev_dbg(uport_dev, "%s added%s%s%s\n",
820 dev_name(&port->dev),
821 parent_port ? " to " : "",
822 parent_port ? dev_name(&parent_port->dev) : "",
823 parent_port ? "" : " (root port)");
828 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
830 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
832 /* There is no pci_bus associated with a CXL platform-root port */
833 if (is_cxl_root(port))
836 if (dev_is_pci(port->uport_dev)) {
837 struct pci_dev *pdev = to_pci_dev(port->uport_dev);
839 return pdev->subordinate;
842 return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev);
844 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
846 static void unregister_pci_bus(void *uport_dev)
848 xa_erase(&cxl_root_buses, (unsigned long)uport_dev);
851 int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
856 if (dev_is_pci(uport_dev))
859 rc = xa_insert(&cxl_root_buses, (unsigned long)uport_dev, bus,
863 return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev);
865 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
867 static bool dev_is_cxl_root_child(struct device *dev)
869 struct cxl_port *port, *parent;
871 if (!is_cxl_port(dev))
874 port = to_cxl_port(dev);
875 if (is_cxl_root(port))
878 parent = to_cxl_port(port->dev.parent);
879 if (is_cxl_root(parent))
885 struct cxl_port *find_cxl_root(struct cxl_port *port)
887 struct cxl_port *iter = port;
889 while (iter && !is_cxl_root(iter))
890 iter = to_cxl_port(iter->dev.parent);
894 get_device(&iter->dev);
897 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
899 static struct cxl_dport *find_dport(struct cxl_port *port, int id)
901 struct cxl_dport *dport;
904 device_lock_assert(&port->dev);
905 xa_for_each(&port->dports, index, dport)
906 if (dport->port_id == id)
911 static int add_dport(struct cxl_port *port, struct cxl_dport *dport)
913 struct cxl_dport *dup;
916 device_lock_assert(&port->dev);
917 dup = find_dport(port, dport->port_id);
920 "unable to add dport%d-%s non-unique port id (%s)\n",
921 dport->port_id, dev_name(dport->dport_dev),
922 dev_name(dup->dport_dev));
926 rc = xa_insert(&port->dports, (unsigned long)dport->dport_dev, dport,
936 * Since root-level CXL dports cannot be enumerated by PCI they are not
937 * enumerated by the common port driver that acquires the port lock over
938 * dport add/remove. Instead, root dports are manually added by a
939 * platform driver and cond_cxl_root_lock() is used to take the missing
940 * port lock in that case.
942 static void cond_cxl_root_lock(struct cxl_port *port)
944 if (is_cxl_root(port))
945 device_lock(&port->dev);
948 static void cond_cxl_root_unlock(struct cxl_port *port)
950 if (is_cxl_root(port))
951 device_unlock(&port->dev);
954 static void cxl_dport_remove(void *data)
956 struct cxl_dport *dport = data;
957 struct cxl_port *port = dport->port;
959 xa_erase(&port->dports, (unsigned long) dport->dport_dev);
960 put_device(dport->dport_dev);
963 static void cxl_dport_unlink(void *data)
965 struct cxl_dport *dport = data;
966 struct cxl_port *port = dport->port;
967 char link_name[CXL_TARGET_STRLEN];
969 sprintf(link_name, "dport%d", dport->port_id);
970 sysfs_remove_link(&port->dev.kobj, link_name);
973 static struct cxl_dport *
974 __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
975 int port_id, resource_size_t component_reg_phys,
976 resource_size_t rcrb)
978 char link_name[CXL_TARGET_STRLEN];
979 struct cxl_dport *dport;
983 if (is_cxl_root(port))
984 host = port->uport_dev;
989 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n",
990 dev_name(dport_dev));
991 return ERR_PTR(-ENXIO);
994 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
996 return ERR_PTR(-EINVAL);
998 dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL);
1000 return ERR_PTR(-ENOMEM);
1002 dport->dport_dev = dport_dev;
1003 dport->port_id = port_id;
1006 if (rcrb == CXL_RESOURCE_NONE) {
1007 rc = cxl_dport_setup_regs(&port->dev, dport,
1008 component_reg_phys);
1012 dport->rcrb.base = rcrb;
1013 component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb,
1014 CXL_RCRB_DOWNSTREAM);
1015 if (component_reg_phys == CXL_RESOURCE_NONE) {
1016 dev_warn(dport_dev, "Invalid Component Registers in RCRB");
1017 return ERR_PTR(-ENXIO);
1021 * RCH @dport is not ready to map until associated with its
1024 rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys);
1031 if (component_reg_phys != CXL_RESOURCE_NONE)
1032 dev_dbg(dport_dev, "Component Registers found for dport: %pa\n",
1033 &component_reg_phys);
1035 cond_cxl_root_lock(port);
1036 rc = add_dport(port, dport);
1037 cond_cxl_root_unlock(port);
1041 get_device(dport_dev);
1042 rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
1046 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
1050 rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport);
1058 * devm_cxl_add_dport - append VH downstream port data to a cxl_port
1059 * @port: the cxl_port that references this dport
1060 * @dport_dev: firmware or PCI device representing the dport
1061 * @port_id: identifier for this dport in a decoder's target list
1062 * @component_reg_phys: optional location of CXL component registers
1064 * Note that dports are appended to the devm release action's of the
1065 * either the port's host (for root ports), or the port itself (for
1068 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
1069 struct device *dport_dev, int port_id,
1070 resource_size_t component_reg_phys)
1072 struct cxl_dport *dport;
1074 dport = __devm_cxl_add_dport(port, dport_dev, port_id,
1075 component_reg_phys, CXL_RESOURCE_NONE);
1076 if (IS_ERR(dport)) {
1077 dev_dbg(dport_dev, "failed to add dport to %s: %ld\n",
1078 dev_name(&port->dev), PTR_ERR(dport));
1080 dev_dbg(dport_dev, "dport added to %s\n",
1081 dev_name(&port->dev));
1086 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
1089 * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port
1090 * @port: the cxl_port that references this dport
1091 * @dport_dev: firmware or PCI device representing the dport
1092 * @port_id: identifier for this dport in a decoder's target list
1093 * @rcrb: mandatory location of a Root Complex Register Block
1095 * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH
1097 struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
1098 struct device *dport_dev, int port_id,
1099 resource_size_t rcrb)
1101 struct cxl_dport *dport;
1103 if (rcrb == CXL_RESOURCE_NONE) {
1104 dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n");
1105 return ERR_PTR(-EINVAL);
1108 dport = __devm_cxl_add_dport(port, dport_dev, port_id,
1109 CXL_RESOURCE_NONE, rcrb);
1110 if (IS_ERR(dport)) {
1111 dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n",
1112 dev_name(&port->dev), PTR_ERR(dport));
1114 dev_dbg(dport_dev, "RCH dport added to %s\n",
1115 dev_name(&port->dev));
1120 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
1122 static int add_ep(struct cxl_ep *new)
1124 struct cxl_port *port = new->dport->port;
1127 device_lock(&port->dev);
1129 device_unlock(&port->dev);
1132 rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
1134 device_unlock(&port->dev);
1140 * cxl_add_ep - register an endpoint's interest in a port
1141 * @dport: the dport that routes to @ep_dev
1142 * @ep_dev: device representing the endpoint
1144 * Intermediate CXL ports are scanned based on the arrival of endpoints.
1145 * When those endpoints depart the port can be destroyed once all
1146 * endpoints that care about that port have been removed.
1148 static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
1153 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
1157 ep->ep = get_device(ep_dev);
1166 struct cxl_find_port_ctx {
1167 const struct device *dport_dev;
1168 const struct cxl_port *parent_port;
1169 struct cxl_dport **dport;
1172 static int match_port_by_dport(struct device *dev, const void *data)
1174 const struct cxl_find_port_ctx *ctx = data;
1175 struct cxl_dport *dport;
1176 struct cxl_port *port;
1178 if (!is_cxl_port(dev))
1180 if (ctx->parent_port && dev->parent != &ctx->parent_port->dev)
1183 port = to_cxl_port(dev);
1184 dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
1186 *ctx->dport = dport;
1187 return dport != NULL;
1190 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
1194 if (!ctx->dport_dev)
1197 dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport);
1199 return to_cxl_port(dev);
1203 static struct cxl_port *find_cxl_port(struct device *dport_dev,
1204 struct cxl_dport **dport)
1206 struct cxl_find_port_ctx ctx = {
1207 .dport_dev = dport_dev,
1210 struct cxl_port *port;
1212 port = __find_cxl_port(&ctx);
1216 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
1217 struct device *dport_dev,
1218 struct cxl_dport **dport)
1220 struct cxl_find_port_ctx ctx = {
1221 .dport_dev = dport_dev,
1222 .parent_port = parent_port,
1225 struct cxl_port *port;
1227 port = __find_cxl_port(&ctx);
1232 * All users of grandparent() are using it to walk PCIe-like switch port
1233 * hierarchy. A PCIe switch is comprised of a bridge device representing the
1234 * upstream switch port and N bridges representing downstream switch ports. When
1235 * bridges stack the grand-parent of a downstream switch port is another
1236 * downstream switch port in the immediate ancestor switch.
1238 static struct device *grandparent(struct device *dev)
1240 if (dev && dev->parent)
1241 return dev->parent->parent;
1245 static struct device *endpoint_host(struct cxl_port *endpoint)
1247 struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
1249 if (is_cxl_root(port))
1250 return port->uport_dev;
1254 static void delete_endpoint(void *data)
1256 struct cxl_memdev *cxlmd = data;
1257 struct cxl_port *endpoint = cxlmd->endpoint;
1258 struct device *host = endpoint_host(endpoint);
1261 if (host->driver && !endpoint->dead) {
1262 devm_release_action(host, cxl_unlink_parent_dport, endpoint);
1263 devm_release_action(host, cxl_unlink_uport, endpoint);
1264 devm_release_action(host, unregister_port, endpoint);
1266 cxlmd->endpoint = NULL;
1267 device_unlock(host);
1268 put_device(&endpoint->dev);
1272 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
1274 struct device *host = endpoint_host(endpoint);
1275 struct device *dev = &cxlmd->dev;
1278 get_device(&endpoint->dev);
1279 cxlmd->endpoint = endpoint;
1280 cxlmd->depth = endpoint->depth;
1281 return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
1283 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
1286 * The natural end of life of a non-root 'cxl_port' is when its parent port goes
1287 * through a ->remove() event ("top-down" unregistration). The unnatural trigger
1288 * for a port to be unregistered is when all memdevs beneath that port have gone
1289 * through ->remove(). This "bottom-up" removal selectively removes individual
1290 * child ports manually. This depends on devm_cxl_add_port() to not change is
1291 * devm action registration order, and for dports to have already been
1292 * destroyed by reap_dports().
1294 static void delete_switch_port(struct cxl_port *port)
1296 devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port);
1297 devm_release_action(port->dev.parent, cxl_unlink_uport, port);
1298 devm_release_action(port->dev.parent, unregister_port, port);
1301 static void reap_dports(struct cxl_port *port)
1303 struct cxl_dport *dport;
1304 unsigned long index;
1306 device_lock_assert(&port->dev);
1308 xa_for_each(&port->dports, index, dport) {
1309 devm_release_action(&port->dev, cxl_dport_unlink, dport);
1310 devm_release_action(&port->dev, cxl_dport_remove, dport);
1311 devm_kfree(&port->dev, dport);
1316 struct cxl_memdev *cxlmd;
1320 static int port_has_memdev(struct device *dev, const void *data)
1322 const struct detach_ctx *ctx = data;
1323 struct cxl_port *port;
1325 if (!is_cxl_port(dev))
1328 port = to_cxl_port(dev);
1329 if (port->depth != ctx->depth)
1332 return !!cxl_ep_load(port, ctx->cxlmd);
1335 static void cxl_detach_ep(void *data)
1337 struct cxl_memdev *cxlmd = data;
1339 for (int i = cxlmd->depth - 1; i >= 1; i--) {
1340 struct cxl_port *port, *parent_port;
1341 struct detach_ctx ctx = {
1349 dev = bus_find_device(&cxl_bus_type, NULL, &ctx,
1353 port = to_cxl_port(dev);
1355 parent_port = to_cxl_port(port->dev.parent);
1356 device_lock(&parent_port->dev);
1357 device_lock(&port->dev);
1358 ep = cxl_ep_load(port, cxlmd);
1359 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
1360 ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
1361 cxl_ep_remove(port, ep);
1362 if (ep && !port->dead && xa_empty(&port->endpoints) &&
1363 !is_cxl_root(parent_port) && parent_port->dev.driver) {
1365 * This was the last ep attached to a dynamically
1366 * enumerated port. Block new cxl_add_ep() and garbage
1373 device_unlock(&port->dev);
1376 dev_dbg(&cxlmd->dev, "delete %s\n",
1377 dev_name(&port->dev));
1378 delete_switch_port(port);
1380 put_device(&port->dev);
1381 device_unlock(&parent_port->dev);
1385 static resource_size_t find_component_registers(struct device *dev)
1387 struct cxl_register_map map;
1388 struct pci_dev *pdev;
1391 * Theoretically, CXL component registers can be hosted on a
1392 * non-PCI device, in practice, only cxl_test hits this case.
1394 if (!dev_is_pci(dev))
1395 return CXL_RESOURCE_NONE;
1397 pdev = to_pci_dev(dev);
1399 cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
1400 return map.resource;
1403 static int add_port_attach_ep(struct cxl_memdev *cxlmd,
1404 struct device *uport_dev,
1405 struct device *dport_dev)
1407 struct device *dparent = grandparent(dport_dev);
1408 struct cxl_port *port, *parent_port = NULL;
1409 struct cxl_dport *dport, *parent_dport;
1410 resource_size_t component_reg_phys;
1415 * The iteration reached the topology root without finding the
1416 * CXL-root 'cxl_port' on a previous iteration, fail for now to
1417 * be re-probed after platform driver attaches.
1419 dev_dbg(&cxlmd->dev, "%s is a root dport\n",
1420 dev_name(dport_dev));
1424 parent_port = find_cxl_port(dparent, &parent_dport);
1426 /* iterate to create this parent_port */
1430 device_lock(&parent_port->dev);
1431 if (!parent_port->dev.driver) {
1432 dev_warn(&cxlmd->dev,
1433 "port %s:%s disabled, failed to enumerate CXL.mem\n",
1434 dev_name(&parent_port->dev), dev_name(uport_dev));
1435 port = ERR_PTR(-ENXIO);
1439 port = find_cxl_port_at(parent_port, dport_dev, &dport);
1441 component_reg_phys = find_component_registers(uport_dev);
1442 port = devm_cxl_add_port(&parent_port->dev, uport_dev,
1443 component_reg_phys, parent_dport);
1444 /* retry find to pick up the new dport information */
1446 port = find_cxl_port_at(parent_port, dport_dev, &dport);
1449 device_unlock(&parent_port->dev);
1454 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
1455 dev_name(&port->dev), dev_name(port->uport_dev));
1456 rc = cxl_add_ep(dport, &cxlmd->dev);
1459 * "can't" happen, but this error code means
1460 * something to the caller, so translate it.
1464 put_device(&port->dev);
1467 put_device(&parent_port->dev);
1471 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
1473 struct device *dev = &cxlmd->dev;
1474 struct device *iter;
1478 * Skip intermediate port enumeration in the RCH case, there
1479 * are no ports in between a host bridge and an endpoint.
1481 if (cxlmd->cxlds->rcd)
1484 rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd);
1489 * Scan for and add all cxl_ports in this device's ancestry.
1490 * Repeat until no more ports are added. Abort if a port add
1494 for (iter = dev; iter; iter = grandparent(iter)) {
1495 struct device *dport_dev = grandparent(iter);
1496 struct device *uport_dev;
1497 struct cxl_dport *dport;
1498 struct cxl_port *port;
1503 uport_dev = dport_dev->parent;
1505 dev_warn(dev, "at %s no parent for dport: %s\n",
1506 dev_name(iter), dev_name(dport_dev));
1510 dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
1511 dev_name(iter), dev_name(dport_dev),
1512 dev_name(uport_dev));
1513 port = find_cxl_port(dport_dev, &dport);
1515 dev_dbg(&cxlmd->dev,
1516 "found already registered port %s:%s\n",
1517 dev_name(&port->dev),
1518 dev_name(port->uport_dev));
1519 rc = cxl_add_ep(dport, &cxlmd->dev);
1522 * If the endpoint already exists in the port's list,
1523 * that's ok, it was added on a previous pass.
1524 * Otherwise, retry in add_port_attach_ep() after taking
1525 * the parent_port lock as the current port may be being
1528 if (rc && rc != -EBUSY) {
1529 put_device(&port->dev);
1533 /* Any more ports to add between this one and the root? */
1534 if (!dev_is_cxl_root_child(&port->dev)) {
1535 put_device(&port->dev);
1539 put_device(&port->dev);
1543 rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev);
1544 /* port missing, try to add parent */
1547 /* failed to add ep or port */
1550 /* port added, new descendants possible, start over */
1556 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
1558 struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
1559 struct cxl_dport **dport)
1561 return find_cxl_port(pdev->dev.parent, dport);
1563 EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL);
1565 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
1566 struct cxl_dport **dport)
1568 return find_cxl_port(grandparent(&cxlmd->dev), dport);
1570 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
1572 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
1573 struct cxl_port *port, int *target_map)
1580 device_lock_assert(&port->dev);
1582 if (xa_empty(&port->dports))
1585 write_seqlock(&cxlsd->target_lock);
1586 for (i = 0; i < cxlsd->nr_targets; i++) {
1587 struct cxl_dport *dport = find_dport(port, target_map[i]);
1593 cxlsd->target[i] = dport;
1595 write_sequnlock(&cxlsd->target_lock);
1600 struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
1602 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
1603 struct cxl_decoder *cxld = &cxlsd->cxld;
1606 iw = cxld->interleave_ways;
1607 if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets,
1608 "misconfigured root decoder\n"))
1611 return cxlrd->cxlsd.target[pos % iw];
1613 EXPORT_SYMBOL_NS_GPL(cxl_hb_modulo, CXL);
1615 static struct lock_class_key cxl_decoder_key;
1618 * cxl_decoder_init - Common decoder setup / initialization
1619 * @port: owning port of this decoder
1620 * @cxld: common decoder properties to initialize
1622 * A port may contain one or more decoders. Each of those decoders
1623 * enable some address space for CXL.mem utilization. A decoder is
1624 * expected to be configured by the caller before registering via
1627 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
1632 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
1636 /* need parent to stick around to release the id */
1637 get_device(&port->dev);
1641 device_initialize(dev);
1642 lockdep_set_class(&dev->mutex, &cxl_decoder_key);
1643 device_set_pm_not_required(dev);
1644 dev->parent = &port->dev;
1645 dev->bus = &cxl_bus_type;
1647 /* Pre initialize an "empty" decoder */
1648 cxld->interleave_ways = 1;
1649 cxld->interleave_granularity = PAGE_SIZE;
1650 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1651 cxld->hpa_range = (struct range) {
1659 static int cxl_switch_decoder_init(struct cxl_port *port,
1660 struct cxl_switch_decoder *cxlsd,
1663 if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
1666 cxlsd->nr_targets = nr_targets;
1667 seqlock_init(&cxlsd->target_lock);
1668 return cxl_decoder_init(port, &cxlsd->cxld);
1672 * cxl_root_decoder_alloc - Allocate a root level decoder
1673 * @port: owning CXL root of this decoder
1674 * @nr_targets: static number of downstream targets
1675 * @calc_hb: which host bridge covers the n'th position by granularity
1677 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1678 * 'CXL root' decoder is one that decodes from a top-level / static platform
1679 * firmware description of CXL resources into a CXL standard decode
1682 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
1683 unsigned int nr_targets,
1684 cxl_calc_hb_fn calc_hb)
1686 struct cxl_root_decoder *cxlrd;
1687 struct cxl_switch_decoder *cxlsd;
1688 struct cxl_decoder *cxld;
1691 if (!is_cxl_root(port))
1692 return ERR_PTR(-EINVAL);
1694 cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
1697 return ERR_PTR(-ENOMEM);
1699 cxlsd = &cxlrd->cxlsd;
1700 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1706 cxlrd->calc_hb = calc_hb;
1707 mutex_init(&cxlrd->range_lock);
1709 cxld = &cxlsd->cxld;
1710 cxld->dev.type = &cxl_decoder_root_type;
1712 * cxl_root_decoder_release() special cases negative ids to
1713 * detect memregion_alloc() failures.
1715 atomic_set(&cxlrd->region_id, -1);
1716 rc = memregion_alloc(GFP_KERNEL);
1718 put_device(&cxld->dev);
1722 atomic_set(&cxlrd->region_id, rc);
1725 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
1728 * cxl_switch_decoder_alloc - Allocate a switch level decoder
1729 * @port: owning CXL switch port of this decoder
1730 * @nr_targets: max number of dynamically addressable downstream targets
1732 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1733 * 'switch' decoder is any decoder that can be enumerated by PCIe
1734 * topology and the HDM Decoder Capability. This includes the decoders
1735 * that sit between Switch Upstream Ports / Switch Downstream Ports and
1736 * Host Bridges / Root Ports.
1738 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
1739 unsigned int nr_targets)
1741 struct cxl_switch_decoder *cxlsd;
1742 struct cxl_decoder *cxld;
1745 if (is_cxl_root(port) || is_cxl_endpoint(port))
1746 return ERR_PTR(-EINVAL);
1748 cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
1750 return ERR_PTR(-ENOMEM);
1752 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1758 cxld = &cxlsd->cxld;
1759 cxld->dev.type = &cxl_decoder_switch_type;
1762 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
1765 * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
1766 * @port: owning port of this decoder
1768 * Return: A new cxl decoder to be registered by cxl_decoder_add()
1770 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
1772 struct cxl_endpoint_decoder *cxled;
1773 struct cxl_decoder *cxld;
1776 if (!is_cxl_endpoint(port))
1777 return ERR_PTR(-EINVAL);
1779 cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
1781 return ERR_PTR(-ENOMEM);
1784 cxld = &cxled->cxld;
1785 rc = cxl_decoder_init(port, cxld);
1791 cxld->dev.type = &cxl_decoder_endpoint_type;
1794 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
1797 * cxl_decoder_add_locked - Add a decoder with targets
1798 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
1799 * @target_map: A list of downstream ports that this decoder can direct memory
1800 * traffic to. These numbers should correspond with the port number
1801 * in the PCIe Link Capabilities structure.
1803 * Certain types of decoders may not have any targets. The main example of this
1804 * is an endpoint device. A more awkward example is a hostbridge whose root
1805 * ports get hot added (technically possible, though unlikely).
1807 * This is the locked variant of cxl_decoder_add().
1809 * Context: Process context. Expects the device lock of the port that owns the
1812 * Return: Negative error code if the decoder wasn't properly configured; else
1815 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
1817 struct cxl_port *port;
1821 if (WARN_ON_ONCE(!cxld))
1824 if (WARN_ON_ONCE(IS_ERR(cxld)))
1825 return PTR_ERR(cxld);
1827 if (cxld->interleave_ways < 1)
1832 port = to_cxl_port(cxld->dev.parent);
1833 if (!is_endpoint_decoder(dev)) {
1834 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
1836 rc = decoder_populate_targets(cxlsd, port, target_map);
1837 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
1839 "Failed to populate active decoder targets\n");
1844 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
1848 return device_add(dev);
1850 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
1853 * cxl_decoder_add - Add a decoder with targets
1854 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
1855 * @target_map: A list of downstream ports that this decoder can direct memory
1856 * traffic to. These numbers should correspond with the port number
1857 * in the PCIe Link Capabilities structure.
1859 * This is the unlocked variant of cxl_decoder_add_locked().
1860 * See cxl_decoder_add_locked().
1862 * Context: Process context. Takes and releases the device lock of the port that
1865 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
1867 struct cxl_port *port;
1870 if (WARN_ON_ONCE(!cxld))
1873 if (WARN_ON_ONCE(IS_ERR(cxld)))
1874 return PTR_ERR(cxld);
1876 port = to_cxl_port(cxld->dev.parent);
1878 device_lock(&port->dev);
1879 rc = cxl_decoder_add_locked(cxld, target_map);
1880 device_unlock(&port->dev);
1884 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
1886 static void cxld_unregister(void *dev)
1888 struct cxl_endpoint_decoder *cxled;
1890 if (is_endpoint_decoder(dev)) {
1891 cxled = to_cxl_endpoint_decoder(dev);
1892 cxl_decoder_kill_region(cxled);
1895 device_unregister(dev);
1898 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
1900 return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
1902 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
1905 * __cxl_driver_register - register a driver for the cxl bus
1906 * @cxl_drv: cxl driver structure to attach
1907 * @owner: owning module/driver
1908 * @modname: KBUILD_MODNAME for parent driver
1910 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
1911 const char *modname)
1913 if (!cxl_drv->probe) {
1914 pr_debug("%s ->probe() must be specified\n", modname);
1918 if (!cxl_drv->name) {
1919 pr_debug("%s ->name must be specified\n", modname);
1924 pr_debug("%s ->id must be specified\n", modname);
1928 cxl_drv->drv.bus = &cxl_bus_type;
1929 cxl_drv->drv.owner = owner;
1930 cxl_drv->drv.mod_name = modname;
1931 cxl_drv->drv.name = cxl_drv->name;
1933 return driver_register(&cxl_drv->drv);
1935 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
1937 void cxl_driver_unregister(struct cxl_driver *cxl_drv)
1939 driver_unregister(&cxl_drv->drv);
1941 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
1943 static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
1945 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
1946 cxl_device_id(dev));
1949 static int cxl_bus_match(struct device *dev, struct device_driver *drv)
1951 return cxl_device_id(dev) == to_cxl_drv(drv)->id;
1954 static int cxl_bus_probe(struct device *dev)
1958 rc = to_cxl_drv(dev->driver)->probe(dev);
1959 dev_dbg(dev, "probe: %d\n", rc);
1963 static void cxl_bus_remove(struct device *dev)
1965 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
1967 if (cxl_drv->remove)
1968 cxl_drv->remove(dev);
1971 static struct workqueue_struct *cxl_bus_wq;
1973 static void cxl_bus_rescan_queue(struct work_struct *w)
1975 int rc = bus_rescan_devices(&cxl_bus_type);
1977 pr_debug("CXL bus rescan result: %d\n", rc);
1980 void cxl_bus_rescan(void)
1982 static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue);
1984 queue_work(cxl_bus_wq, &rescan_work);
1986 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL);
1988 void cxl_bus_drain(void)
1990 drain_workqueue(cxl_bus_wq);
1992 EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL);
1994 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
1996 return queue_work(cxl_bus_wq, &cxlmd->detach_work);
1998 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
2000 /* for user tooling to ensure port disable work has completed */
2001 static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
2003 if (sysfs_streq(buf, "1")) {
2004 flush_workqueue(cxl_bus_wq);
2011 static BUS_ATTR_WO(flush);
2013 static struct attribute *cxl_bus_attributes[] = {
2014 &bus_attr_flush.attr,
2018 static struct attribute_group cxl_bus_attribute_group = {
2019 .attrs = cxl_bus_attributes,
2022 static const struct attribute_group *cxl_bus_attribute_groups[] = {
2023 &cxl_bus_attribute_group,
2027 struct bus_type cxl_bus_type = {
2029 .uevent = cxl_bus_uevent,
2030 .match = cxl_bus_match,
2031 .probe = cxl_bus_probe,
2032 .remove = cxl_bus_remove,
2033 .bus_groups = cxl_bus_attribute_groups,
2035 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
2037 static struct dentry *cxl_debugfs;
2039 struct dentry *cxl_debugfs_create_dir(const char *dir)
2041 return debugfs_create_dir(dir, cxl_debugfs);
2043 EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
2045 static __init int cxl_core_init(void)
2049 cxl_debugfs = debugfs_create_dir("cxl", NULL);
2053 rc = cxl_memdev_init();
2057 cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0);
2063 rc = bus_register(&cxl_bus_type);
2067 rc = cxl_region_init();
2074 bus_unregister(&cxl_bus_type);
2076 destroy_workqueue(cxl_bus_wq);
2082 static void cxl_core_exit(void)
2085 bus_unregister(&cxl_bus_type);
2086 destroy_workqueue(cxl_bus_wq);
2088 debugfs_remove_recursive(cxl_debugfs);
2091 subsys_initcall(cxl_core_init);
2092 module_exit(cxl_core_exit);
2093 MODULE_LICENSE("GPL v2");