Merge tag 'powerpc-6.6-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[platform/kernel/linux-starfive.git] / drivers / cxl / core / port.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/platform_device.h>
4 #include <linux/memregion.h>
5 #include <linux/workqueue.h>
6 #include <linux/debugfs.h>
7 #include <linux/device.h>
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 #include <linux/slab.h>
11 #include <linux/idr.h>
12 #include <cxlmem.h>
13 #include <cxlpci.h>
14 #include <cxl.h>
15 #include "core.h"
16
17 /**
18  * DOC: cxl core
19  *
20  * The CXL core provides a set of interfaces that can be consumed by CXL aware
21  * drivers. The interfaces allow for creation, modification, and destruction of
22  * regions, memory devices, ports, and decoders. CXL aware drivers must register
23  * with the CXL core via these interfaces in order to be able to participate in
24  * cross-device interleave coordination. The CXL core also establishes and
25  * maintains the bridge to the nvdimm subsystem.
26  *
27  * CXL core introduces sysfs hierarchy to control the devices that are
28  * instantiated by the core.
29  */
30
31 static DEFINE_IDA(cxl_port_ida);
32 static DEFINE_XARRAY(cxl_root_buses);
33
34 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
35                             char *buf)
36 {
37         return sysfs_emit(buf, "%s\n", dev->type->name);
38 }
39 static DEVICE_ATTR_RO(devtype);
40
41 static int cxl_device_id(const struct device *dev)
42 {
43         if (dev->type == &cxl_nvdimm_bridge_type)
44                 return CXL_DEVICE_NVDIMM_BRIDGE;
45         if (dev->type == &cxl_nvdimm_type)
46                 return CXL_DEVICE_NVDIMM;
47         if (dev->type == CXL_PMEM_REGION_TYPE())
48                 return CXL_DEVICE_PMEM_REGION;
49         if (dev->type == CXL_DAX_REGION_TYPE())
50                 return CXL_DEVICE_DAX_REGION;
51         if (is_cxl_port(dev)) {
52                 if (is_cxl_root(to_cxl_port(dev)))
53                         return CXL_DEVICE_ROOT;
54                 return CXL_DEVICE_PORT;
55         }
56         if (is_cxl_memdev(dev))
57                 return CXL_DEVICE_MEMORY_EXPANDER;
58         if (dev->type == CXL_REGION_TYPE())
59                 return CXL_DEVICE_REGION;
60         if (dev->type == &cxl_pmu_type)
61                 return CXL_DEVICE_PMU;
62         return 0;
63 }
64
65 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
66                              char *buf)
67 {
68         return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev));
69 }
70 static DEVICE_ATTR_RO(modalias);
71
72 static struct attribute *cxl_base_attributes[] = {
73         &dev_attr_devtype.attr,
74         &dev_attr_modalias.attr,
75         NULL,
76 };
77
78 struct attribute_group cxl_base_attribute_group = {
79         .attrs = cxl_base_attributes,
80 };
81
82 static ssize_t start_show(struct device *dev, struct device_attribute *attr,
83                           char *buf)
84 {
85         struct cxl_decoder *cxld = to_cxl_decoder(dev);
86
87         return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
88 }
89 static DEVICE_ATTR_ADMIN_RO(start);
90
91 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
92                         char *buf)
93 {
94         struct cxl_decoder *cxld = to_cxl_decoder(dev);
95
96         return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
97 }
98 static DEVICE_ATTR_RO(size);
99
100 #define CXL_DECODER_FLAG_ATTR(name, flag)                            \
101 static ssize_t name##_show(struct device *dev,                       \
102                            struct device_attribute *attr, char *buf) \
103 {                                                                    \
104         struct cxl_decoder *cxld = to_cxl_decoder(dev);              \
105                                                                      \
106         return sysfs_emit(buf, "%s\n",                               \
107                           (cxld->flags & (flag)) ? "1" : "0");       \
108 }                                                                    \
109 static DEVICE_ATTR_RO(name)
110
111 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
112 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
113 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
114 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
115 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
116
117 static ssize_t target_type_show(struct device *dev,
118                                 struct device_attribute *attr, char *buf)
119 {
120         struct cxl_decoder *cxld = to_cxl_decoder(dev);
121
122         switch (cxld->target_type) {
123         case CXL_DECODER_DEVMEM:
124                 return sysfs_emit(buf, "accelerator\n");
125         case CXL_DECODER_HOSTONLYMEM:
126                 return sysfs_emit(buf, "expander\n");
127         }
128         return -ENXIO;
129 }
130 static DEVICE_ATTR_RO(target_type);
131
132 static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
133 {
134         struct cxl_decoder *cxld = &cxlsd->cxld;
135         ssize_t offset = 0;
136         int i, rc = 0;
137
138         for (i = 0; i < cxld->interleave_ways; i++) {
139                 struct cxl_dport *dport = cxlsd->target[i];
140                 struct cxl_dport *next = NULL;
141
142                 if (!dport)
143                         break;
144
145                 if (i + 1 < cxld->interleave_ways)
146                         next = cxlsd->target[i + 1];
147                 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
148                                    next ? "," : "");
149                 if (rc < 0)
150                         return rc;
151                 offset += rc;
152         }
153
154         return offset;
155 }
156
157 static ssize_t target_list_show(struct device *dev,
158                                 struct device_attribute *attr, char *buf)
159 {
160         struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
161         ssize_t offset;
162         unsigned int seq;
163         int rc;
164
165         do {
166                 seq = read_seqbegin(&cxlsd->target_lock);
167                 rc = emit_target_list(cxlsd, buf);
168         } while (read_seqretry(&cxlsd->target_lock, seq));
169
170         if (rc < 0)
171                 return rc;
172         offset = rc;
173
174         rc = sysfs_emit_at(buf, offset, "\n");
175         if (rc < 0)
176                 return rc;
177
178         return offset + rc;
179 }
180 static DEVICE_ATTR_RO(target_list);
181
182 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
183                          char *buf)
184 {
185         struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
186
187         return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode));
188 }
189
190 static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
191                           const char *buf, size_t len)
192 {
193         struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
194         enum cxl_decoder_mode mode;
195         ssize_t rc;
196
197         if (sysfs_streq(buf, "pmem"))
198                 mode = CXL_DECODER_PMEM;
199         else if (sysfs_streq(buf, "ram"))
200                 mode = CXL_DECODER_RAM;
201         else
202                 return -EINVAL;
203
204         rc = cxl_dpa_set_mode(cxled, mode);
205         if (rc)
206                 return rc;
207
208         return len;
209 }
210 static DEVICE_ATTR_RW(mode);
211
212 static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
213                             char *buf)
214 {
215         struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
216         u64 base = cxl_dpa_resource_start(cxled);
217
218         return sysfs_emit(buf, "%#llx\n", base);
219 }
220 static DEVICE_ATTR_RO(dpa_resource);
221
222 static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
223                              char *buf)
224 {
225         struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
226         resource_size_t size = cxl_dpa_size(cxled);
227
228         return sysfs_emit(buf, "%pa\n", &size);
229 }
230
231 static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
232                               const char *buf, size_t len)
233 {
234         struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
235         unsigned long long size;
236         ssize_t rc;
237
238         rc = kstrtoull(buf, 0, &size);
239         if (rc)
240                 return rc;
241
242         if (!IS_ALIGNED(size, SZ_256M))
243                 return -EINVAL;
244
245         rc = cxl_dpa_free(cxled);
246         if (rc)
247                 return rc;
248
249         if (size == 0)
250                 return len;
251
252         rc = cxl_dpa_alloc(cxled, size);
253         if (rc)
254                 return rc;
255
256         return len;
257 }
258 static DEVICE_ATTR_RW(dpa_size);
259
260 static ssize_t interleave_granularity_show(struct device *dev,
261                                            struct device_attribute *attr,
262                                            char *buf)
263 {
264         struct cxl_decoder *cxld = to_cxl_decoder(dev);
265
266         return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
267 }
268
269 static DEVICE_ATTR_RO(interleave_granularity);
270
271 static ssize_t interleave_ways_show(struct device *dev,
272                                     struct device_attribute *attr, char *buf)
273 {
274         struct cxl_decoder *cxld = to_cxl_decoder(dev);
275
276         return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
277 }
278
279 static DEVICE_ATTR_RO(interleave_ways);
280
281 static struct attribute *cxl_decoder_base_attrs[] = {
282         &dev_attr_start.attr,
283         &dev_attr_size.attr,
284         &dev_attr_locked.attr,
285         &dev_attr_interleave_granularity.attr,
286         &dev_attr_interleave_ways.attr,
287         NULL,
288 };
289
290 static struct attribute_group cxl_decoder_base_attribute_group = {
291         .attrs = cxl_decoder_base_attrs,
292 };
293
294 static struct attribute *cxl_decoder_root_attrs[] = {
295         &dev_attr_cap_pmem.attr,
296         &dev_attr_cap_ram.attr,
297         &dev_attr_cap_type2.attr,
298         &dev_attr_cap_type3.attr,
299         &dev_attr_target_list.attr,
300         SET_CXL_REGION_ATTR(create_pmem_region)
301         SET_CXL_REGION_ATTR(create_ram_region)
302         SET_CXL_REGION_ATTR(delete_region)
303         NULL,
304 };
305
306 static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
307 {
308         unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
309
310         return (cxlrd->cxlsd.cxld.flags & flags) == flags;
311 }
312
313 static bool can_create_ram(struct cxl_root_decoder *cxlrd)
314 {
315         unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM;
316
317         return (cxlrd->cxlsd.cxld.flags & flags) == flags;
318 }
319
320 static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
321 {
322         struct device *dev = kobj_to_dev(kobj);
323         struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
324
325         if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
326                 return 0;
327
328         if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd))
329                 return 0;
330
331         if (a == CXL_REGION_ATTR(delete_region) &&
332             !(can_create_pmem(cxlrd) || can_create_ram(cxlrd)))
333                 return 0;
334
335         return a->mode;
336 }
337
338 static struct attribute_group cxl_decoder_root_attribute_group = {
339         .attrs = cxl_decoder_root_attrs,
340         .is_visible = cxl_root_decoder_visible,
341 };
342
343 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
344         &cxl_decoder_root_attribute_group,
345         &cxl_decoder_base_attribute_group,
346         &cxl_base_attribute_group,
347         NULL,
348 };
349
350 static struct attribute *cxl_decoder_switch_attrs[] = {
351         &dev_attr_target_type.attr,
352         &dev_attr_target_list.attr,
353         SET_CXL_REGION_ATTR(region)
354         NULL,
355 };
356
357 static struct attribute_group cxl_decoder_switch_attribute_group = {
358         .attrs = cxl_decoder_switch_attrs,
359 };
360
361 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
362         &cxl_decoder_switch_attribute_group,
363         &cxl_decoder_base_attribute_group,
364         &cxl_base_attribute_group,
365         NULL,
366 };
367
368 static struct attribute *cxl_decoder_endpoint_attrs[] = {
369         &dev_attr_target_type.attr,
370         &dev_attr_mode.attr,
371         &dev_attr_dpa_size.attr,
372         &dev_attr_dpa_resource.attr,
373         SET_CXL_REGION_ATTR(region)
374         NULL,
375 };
376
377 static struct attribute_group cxl_decoder_endpoint_attribute_group = {
378         .attrs = cxl_decoder_endpoint_attrs,
379 };
380
381 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
382         &cxl_decoder_base_attribute_group,
383         &cxl_decoder_endpoint_attribute_group,
384         &cxl_base_attribute_group,
385         NULL,
386 };
387
388 static void __cxl_decoder_release(struct cxl_decoder *cxld)
389 {
390         struct cxl_port *port = to_cxl_port(cxld->dev.parent);
391
392         ida_free(&port->decoder_ida, cxld->id);
393         put_device(&port->dev);
394 }
395
396 static void cxl_endpoint_decoder_release(struct device *dev)
397 {
398         struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
399
400         __cxl_decoder_release(&cxled->cxld);
401         kfree(cxled);
402 }
403
404 static void cxl_switch_decoder_release(struct device *dev)
405 {
406         struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
407
408         __cxl_decoder_release(&cxlsd->cxld);
409         kfree(cxlsd);
410 }
411
412 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
413 {
414         if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
415                           "not a cxl_root_decoder device\n"))
416                 return NULL;
417         return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
418 }
419 EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
420
421 static void cxl_root_decoder_release(struct device *dev)
422 {
423         struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
424
425         if (atomic_read(&cxlrd->region_id) >= 0)
426                 memregion_free(atomic_read(&cxlrd->region_id));
427         __cxl_decoder_release(&cxlrd->cxlsd.cxld);
428         kfree(cxlrd);
429 }
430
431 static const struct device_type cxl_decoder_endpoint_type = {
432         .name = "cxl_decoder_endpoint",
433         .release = cxl_endpoint_decoder_release,
434         .groups = cxl_decoder_endpoint_attribute_groups,
435 };
436
437 static const struct device_type cxl_decoder_switch_type = {
438         .name = "cxl_decoder_switch",
439         .release = cxl_switch_decoder_release,
440         .groups = cxl_decoder_switch_attribute_groups,
441 };
442
443 static const struct device_type cxl_decoder_root_type = {
444         .name = "cxl_decoder_root",
445         .release = cxl_root_decoder_release,
446         .groups = cxl_decoder_root_attribute_groups,
447 };
448
449 bool is_endpoint_decoder(struct device *dev)
450 {
451         return dev->type == &cxl_decoder_endpoint_type;
452 }
453 EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL);
454
455 bool is_root_decoder(struct device *dev)
456 {
457         return dev->type == &cxl_decoder_root_type;
458 }
459 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
460
461 bool is_switch_decoder(struct device *dev)
462 {
463         return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
464 }
465 EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL);
466
467 struct cxl_decoder *to_cxl_decoder(struct device *dev)
468 {
469         if (dev_WARN_ONCE(dev,
470                           !is_switch_decoder(dev) && !is_endpoint_decoder(dev),
471                           "not a cxl_decoder device\n"))
472                 return NULL;
473         return container_of(dev, struct cxl_decoder, dev);
474 }
475 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
476
477 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
478 {
479         if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
480                           "not a cxl_endpoint_decoder device\n"))
481                 return NULL;
482         return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
483 }
484 EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
485
486 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
487 {
488         if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
489                           "not a cxl_switch_decoder device\n"))
490                 return NULL;
491         return container_of(dev, struct cxl_switch_decoder, cxld.dev);
492 }
493 EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL);
494
495 static void cxl_ep_release(struct cxl_ep *ep)
496 {
497         put_device(ep->ep);
498         kfree(ep);
499 }
500
501 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
502 {
503         if (!ep)
504                 return;
505         xa_erase(&port->endpoints, (unsigned long) ep->ep);
506         cxl_ep_release(ep);
507 }
508
509 static void cxl_port_release(struct device *dev)
510 {
511         struct cxl_port *port = to_cxl_port(dev);
512         unsigned long index;
513         struct cxl_ep *ep;
514
515         xa_for_each(&port->endpoints, index, ep)
516                 cxl_ep_remove(port, ep);
517         xa_destroy(&port->endpoints);
518         xa_destroy(&port->dports);
519         xa_destroy(&port->regions);
520         ida_free(&cxl_port_ida, port->id);
521         kfree(port);
522 }
523
524 static const struct attribute_group *cxl_port_attribute_groups[] = {
525         &cxl_base_attribute_group,
526         NULL,
527 };
528
529 static const struct device_type cxl_port_type = {
530         .name = "cxl_port",
531         .release = cxl_port_release,
532         .groups = cxl_port_attribute_groups,
533 };
534
535 bool is_cxl_port(const struct device *dev)
536 {
537         return dev->type == &cxl_port_type;
538 }
539 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
540
541 struct cxl_port *to_cxl_port(const struct device *dev)
542 {
543         if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
544                           "not a cxl_port device\n"))
545                 return NULL;
546         return container_of(dev, struct cxl_port, dev);
547 }
548 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL);
549
550 static void unregister_port(void *_port)
551 {
552         struct cxl_port *port = _port;
553         struct cxl_port *parent;
554         struct device *lock_dev;
555
556         if (is_cxl_root(port))
557                 parent = NULL;
558         else
559                 parent = to_cxl_port(port->dev.parent);
560
561         /*
562          * CXL root port's and the first level of ports are unregistered
563          * under the platform firmware device lock, all other ports are
564          * unregistered while holding their parent port lock.
565          */
566         if (!parent)
567                 lock_dev = port->uport_dev;
568         else if (is_cxl_root(parent))
569                 lock_dev = parent->uport_dev;
570         else
571                 lock_dev = &parent->dev;
572
573         device_lock_assert(lock_dev);
574         port->dead = true;
575         device_unregister(&port->dev);
576 }
577
578 static void cxl_unlink_uport(void *_port)
579 {
580         struct cxl_port *port = _port;
581
582         sysfs_remove_link(&port->dev.kobj, "uport");
583 }
584
585 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
586 {
587         int rc;
588
589         rc = sysfs_create_link(&port->dev.kobj, &port->uport_dev->kobj,
590                                "uport");
591         if (rc)
592                 return rc;
593         return devm_add_action_or_reset(host, cxl_unlink_uport, port);
594 }
595
596 static void cxl_unlink_parent_dport(void *_port)
597 {
598         struct cxl_port *port = _port;
599
600         sysfs_remove_link(&port->dev.kobj, "parent_dport");
601 }
602
603 static int devm_cxl_link_parent_dport(struct device *host,
604                                       struct cxl_port *port,
605                                       struct cxl_dport *parent_dport)
606 {
607         int rc;
608
609         if (!parent_dport)
610                 return 0;
611
612         rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport_dev->kobj,
613                                "parent_dport");
614         if (rc)
615                 return rc;
616         return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port);
617 }
618
619 static struct lock_class_key cxl_port_key;
620
621 static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
622                                        resource_size_t component_reg_phys,
623                                        struct cxl_dport *parent_dport)
624 {
625         struct cxl_port *port;
626         struct device *dev;
627         int rc;
628
629         port = kzalloc(sizeof(*port), GFP_KERNEL);
630         if (!port)
631                 return ERR_PTR(-ENOMEM);
632
633         rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
634         if (rc < 0)
635                 goto err;
636         port->id = rc;
637         port->uport_dev = uport_dev;
638
639         /*
640          * The top-level cxl_port "cxl_root" does not have a cxl_port as
641          * its parent and it does not have any corresponding component
642          * registers as its decode is described by a fixed platform
643          * description.
644          */
645         dev = &port->dev;
646         if (parent_dport) {
647                 struct cxl_port *parent_port = parent_dport->port;
648                 struct cxl_port *iter;
649
650                 dev->parent = &parent_port->dev;
651                 port->depth = parent_port->depth + 1;
652                 port->parent_dport = parent_dport;
653
654                 /*
655                  * walk to the host bridge, or the first ancestor that knows
656                  * the host bridge
657                  */
658                 iter = port;
659                 while (!iter->host_bridge &&
660                        !is_cxl_root(to_cxl_port(iter->dev.parent)))
661                         iter = to_cxl_port(iter->dev.parent);
662                 if (iter->host_bridge)
663                         port->host_bridge = iter->host_bridge;
664                 else if (parent_dport->rch)
665                         port->host_bridge = parent_dport->dport_dev;
666                 else
667                         port->host_bridge = iter->uport_dev;
668                 dev_dbg(uport_dev, "host-bridge: %s\n",
669                         dev_name(port->host_bridge));
670         } else
671                 dev->parent = uport_dev;
672
673         port->component_reg_phys = component_reg_phys;
674         ida_init(&port->decoder_ida);
675         port->hdm_end = -1;
676         port->commit_end = -1;
677         xa_init(&port->dports);
678         xa_init(&port->endpoints);
679         xa_init(&port->regions);
680
681         device_initialize(dev);
682         lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
683         device_set_pm_not_required(dev);
684         dev->bus = &cxl_bus_type;
685         dev->type = &cxl_port_type;
686
687         return port;
688
689 err:
690         kfree(port);
691         return ERR_PTR(rc);
692 }
693
694 static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map,
695                                resource_size_t component_reg_phys)
696 {
697         if (component_reg_phys == CXL_RESOURCE_NONE)
698                 return 0;
699
700         *map = (struct cxl_register_map) {
701                 .dev = dev,
702                 .reg_type = CXL_REGLOC_RBI_COMPONENT,
703                 .resource = component_reg_phys,
704                 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
705         };
706
707         return cxl_setup_regs(map);
708 }
709
710 static int cxl_port_setup_regs(struct cxl_port *port,
711                         resource_size_t component_reg_phys)
712 {
713         if (dev_is_platform(port->uport_dev))
714                 return 0;
715         return cxl_setup_comp_regs(&port->dev, &port->comp_map,
716                                    component_reg_phys);
717 }
718
719 static int cxl_dport_setup_regs(struct cxl_dport *dport,
720                                 resource_size_t component_reg_phys)
721 {
722         if (dev_is_platform(dport->dport_dev))
723                 return 0;
724         return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
725                                    component_reg_phys);
726 }
727
728 static struct cxl_port *__devm_cxl_add_port(struct device *host,
729                                             struct device *uport_dev,
730                                             resource_size_t component_reg_phys,
731                                             struct cxl_dport *parent_dport)
732 {
733         struct cxl_port *port;
734         struct device *dev;
735         int rc;
736
737         port = cxl_port_alloc(uport_dev, component_reg_phys, parent_dport);
738         if (IS_ERR(port))
739                 return port;
740
741         dev = &port->dev;
742         if (is_cxl_memdev(uport_dev))
743                 rc = dev_set_name(dev, "endpoint%d", port->id);
744         else if (parent_dport)
745                 rc = dev_set_name(dev, "port%d", port->id);
746         else
747                 rc = dev_set_name(dev, "root%d", port->id);
748         if (rc)
749                 goto err;
750
751         rc = cxl_port_setup_regs(port, component_reg_phys);
752         if (rc)
753                 goto err;
754
755         rc = device_add(dev);
756         if (rc)
757                 goto err;
758
759         rc = devm_add_action_or_reset(host, unregister_port, port);
760         if (rc)
761                 return ERR_PTR(rc);
762
763         rc = devm_cxl_link_uport(host, port);
764         if (rc)
765                 return ERR_PTR(rc);
766
767         rc = devm_cxl_link_parent_dport(host, port, parent_dport);
768         if (rc)
769                 return ERR_PTR(rc);
770
771         return port;
772
773 err:
774         put_device(dev);
775         return ERR_PTR(rc);
776 }
777
778 /**
779  * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
780  * @host: host device for devm operations
781  * @uport_dev: "physical" device implementing this upstream port
782  * @component_reg_phys: (optional) for configurable cxl_port instances
783  * @parent_dport: next hop up in the CXL memory decode hierarchy
784  */
785 struct cxl_port *devm_cxl_add_port(struct device *host,
786                                    struct device *uport_dev,
787                                    resource_size_t component_reg_phys,
788                                    struct cxl_dport *parent_dport)
789 {
790         struct cxl_port *port, *parent_port;
791
792         port = __devm_cxl_add_port(host, uport_dev, component_reg_phys,
793                                    parent_dport);
794
795         parent_port = parent_dport ? parent_dport->port : NULL;
796         if (IS_ERR(port)) {
797                 dev_dbg(uport_dev, "Failed to add%s%s%s: %ld\n",
798                         parent_port ? " port to " : "",
799                         parent_port ? dev_name(&parent_port->dev) : "",
800                         parent_port ? "" : " root port",
801                         PTR_ERR(port));
802         } else {
803                 dev_dbg(uport_dev, "%s added%s%s%s\n",
804                         dev_name(&port->dev),
805                         parent_port ? " to " : "",
806                         parent_port ? dev_name(&parent_port->dev) : "",
807                         parent_port ? "" : " (root port)");
808         }
809
810         return port;
811 }
812 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
813
814 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
815 {
816         /* There is no pci_bus associated with a CXL platform-root port */
817         if (is_cxl_root(port))
818                 return NULL;
819
820         if (dev_is_pci(port->uport_dev)) {
821                 struct pci_dev *pdev = to_pci_dev(port->uport_dev);
822
823                 return pdev->subordinate;
824         }
825
826         return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev);
827 }
828 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
829
830 static void unregister_pci_bus(void *uport_dev)
831 {
832         xa_erase(&cxl_root_buses, (unsigned long)uport_dev);
833 }
834
835 int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
836                               struct pci_bus *bus)
837 {
838         int rc;
839
840         if (dev_is_pci(uport_dev))
841                 return -EINVAL;
842
843         rc = xa_insert(&cxl_root_buses, (unsigned long)uport_dev, bus,
844                        GFP_KERNEL);
845         if (rc)
846                 return rc;
847         return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev);
848 }
849 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
850
851 static bool dev_is_cxl_root_child(struct device *dev)
852 {
853         struct cxl_port *port, *parent;
854
855         if (!is_cxl_port(dev))
856                 return false;
857
858         port = to_cxl_port(dev);
859         if (is_cxl_root(port))
860                 return false;
861
862         parent = to_cxl_port(port->dev.parent);
863         if (is_cxl_root(parent))
864                 return true;
865
866         return false;
867 }
868
869 struct cxl_port *find_cxl_root(struct cxl_port *port)
870 {
871         struct cxl_port *iter = port;
872
873         while (iter && !is_cxl_root(iter))
874                 iter = to_cxl_port(iter->dev.parent);
875
876         if (!iter)
877                 return NULL;
878         get_device(&iter->dev);
879         return iter;
880 }
881 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
882
883 static struct cxl_dport *find_dport(struct cxl_port *port, int id)
884 {
885         struct cxl_dport *dport;
886         unsigned long index;
887
888         device_lock_assert(&port->dev);
889         xa_for_each(&port->dports, index, dport)
890                 if (dport->port_id == id)
891                         return dport;
892         return NULL;
893 }
894
895 static int add_dport(struct cxl_port *port, struct cxl_dport *dport)
896 {
897         struct cxl_dport *dup;
898         int rc;
899
900         device_lock_assert(&port->dev);
901         dup = find_dport(port, dport->port_id);
902         if (dup) {
903                 dev_err(&port->dev,
904                         "unable to add dport%d-%s non-unique port id (%s)\n",
905                         dport->port_id, dev_name(dport->dport_dev),
906                         dev_name(dup->dport_dev));
907                 return -EBUSY;
908         }
909
910         rc = xa_insert(&port->dports, (unsigned long)dport->dport_dev, dport,
911                        GFP_KERNEL);
912         if (rc)
913                 return rc;
914
915         port->nr_dports++;
916         return 0;
917 }
918
919 /*
920  * Since root-level CXL dports cannot be enumerated by PCI they are not
921  * enumerated by the common port driver that acquires the port lock over
922  * dport add/remove. Instead, root dports are manually added by a
923  * platform driver and cond_cxl_root_lock() is used to take the missing
924  * port lock in that case.
925  */
926 static void cond_cxl_root_lock(struct cxl_port *port)
927 {
928         if (is_cxl_root(port))
929                 device_lock(&port->dev);
930 }
931
932 static void cond_cxl_root_unlock(struct cxl_port *port)
933 {
934         if (is_cxl_root(port))
935                 device_unlock(&port->dev);
936 }
937
938 static void cxl_dport_remove(void *data)
939 {
940         struct cxl_dport *dport = data;
941         struct cxl_port *port = dport->port;
942
943         xa_erase(&port->dports, (unsigned long) dport->dport_dev);
944         put_device(dport->dport_dev);
945 }
946
947 static void cxl_dport_unlink(void *data)
948 {
949         struct cxl_dport *dport = data;
950         struct cxl_port *port = dport->port;
951         char link_name[CXL_TARGET_STRLEN];
952
953         sprintf(link_name, "dport%d", dport->port_id);
954         sysfs_remove_link(&port->dev.kobj, link_name);
955 }
956
957 static struct cxl_dport *
958 __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
959                      int port_id, resource_size_t component_reg_phys,
960                      resource_size_t rcrb)
961 {
962         char link_name[CXL_TARGET_STRLEN];
963         struct cxl_dport *dport;
964         struct device *host;
965         int rc;
966
967         if (is_cxl_root(port))
968                 host = port->uport_dev;
969         else
970                 host = &port->dev;
971
972         if (!host->driver) {
973                 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n",
974                               dev_name(dport_dev));
975                 return ERR_PTR(-ENXIO);
976         }
977
978         if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
979             CXL_TARGET_STRLEN)
980                 return ERR_PTR(-EINVAL);
981
982         dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL);
983         if (!dport)
984                 return ERR_PTR(-ENOMEM);
985
986         if (rcrb != CXL_RESOURCE_NONE) {
987                 dport->rcrb.base = rcrb;
988                 component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb,
989                                                          CXL_RCRB_DOWNSTREAM);
990                 if (component_reg_phys == CXL_RESOURCE_NONE) {
991                         dev_warn(dport_dev, "Invalid Component Registers in RCRB");
992                         return ERR_PTR(-ENXIO);
993                 }
994
995                 dport->rch = true;
996         }
997
998         if (component_reg_phys != CXL_RESOURCE_NONE)
999                 dev_dbg(dport_dev, "Component Registers found for dport: %pa\n",
1000                         &component_reg_phys);
1001
1002         dport->dport_dev = dport_dev;
1003         dport->port_id = port_id;
1004         dport->port = port;
1005
1006         rc = cxl_dport_setup_regs(dport, component_reg_phys);
1007         if (rc)
1008                 return ERR_PTR(rc);
1009
1010         cond_cxl_root_lock(port);
1011         rc = add_dport(port, dport);
1012         cond_cxl_root_unlock(port);
1013         if (rc)
1014                 return ERR_PTR(rc);
1015
1016         get_device(dport_dev);
1017         rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
1018         if (rc)
1019                 return ERR_PTR(rc);
1020
1021         rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
1022         if (rc)
1023                 return ERR_PTR(rc);
1024
1025         rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport);
1026         if (rc)
1027                 return ERR_PTR(rc);
1028
1029         return dport;
1030 }
1031
1032 /**
1033  * devm_cxl_add_dport - append VH downstream port data to a cxl_port
1034  * @port: the cxl_port that references this dport
1035  * @dport_dev: firmware or PCI device representing the dport
1036  * @port_id: identifier for this dport in a decoder's target list
1037  * @component_reg_phys: optional location of CXL component registers
1038  *
1039  * Note that dports are appended to the devm release action's of the
1040  * either the port's host (for root ports), or the port itself (for
1041  * switch ports)
1042  */
1043 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
1044                                      struct device *dport_dev, int port_id,
1045                                      resource_size_t component_reg_phys)
1046 {
1047         struct cxl_dport *dport;
1048
1049         dport = __devm_cxl_add_dport(port, dport_dev, port_id,
1050                                      component_reg_phys, CXL_RESOURCE_NONE);
1051         if (IS_ERR(dport)) {
1052                 dev_dbg(dport_dev, "failed to add dport to %s: %ld\n",
1053                         dev_name(&port->dev), PTR_ERR(dport));
1054         } else {
1055                 dev_dbg(dport_dev, "dport added to %s\n",
1056                         dev_name(&port->dev));
1057         }
1058
1059         return dport;
1060 }
1061 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
1062
1063 /**
1064  * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port
1065  * @port: the cxl_port that references this dport
1066  * @dport_dev: firmware or PCI device representing the dport
1067  * @port_id: identifier for this dport in a decoder's target list
1068  * @rcrb: mandatory location of a Root Complex Register Block
1069  *
1070  * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH
1071  */
1072 struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
1073                                          struct device *dport_dev, int port_id,
1074                                          resource_size_t rcrb)
1075 {
1076         struct cxl_dport *dport;
1077
1078         if (rcrb == CXL_RESOURCE_NONE) {
1079                 dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n");
1080                 return ERR_PTR(-EINVAL);
1081         }
1082
1083         dport = __devm_cxl_add_dport(port, dport_dev, port_id,
1084                                      CXL_RESOURCE_NONE, rcrb);
1085         if (IS_ERR(dport)) {
1086                 dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n",
1087                         dev_name(&port->dev), PTR_ERR(dport));
1088         } else {
1089                 dev_dbg(dport_dev, "RCH dport added to %s\n",
1090                         dev_name(&port->dev));
1091         }
1092
1093         return dport;
1094 }
1095 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
1096
1097 static int add_ep(struct cxl_ep *new)
1098 {
1099         struct cxl_port *port = new->dport->port;
1100         int rc;
1101
1102         device_lock(&port->dev);
1103         if (port->dead) {
1104                 device_unlock(&port->dev);
1105                 return -ENXIO;
1106         }
1107         rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
1108                        GFP_KERNEL);
1109         device_unlock(&port->dev);
1110
1111         return rc;
1112 }
1113
1114 /**
1115  * cxl_add_ep - register an endpoint's interest in a port
1116  * @dport: the dport that routes to @ep_dev
1117  * @ep_dev: device representing the endpoint
1118  *
1119  * Intermediate CXL ports are scanned based on the arrival of endpoints.
1120  * When those endpoints depart the port can be destroyed once all
1121  * endpoints that care about that port have been removed.
1122  */
1123 static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
1124 {
1125         struct cxl_ep *ep;
1126         int rc;
1127
1128         ep = kzalloc(sizeof(*ep), GFP_KERNEL);
1129         if (!ep)
1130                 return -ENOMEM;
1131
1132         ep->ep = get_device(ep_dev);
1133         ep->dport = dport;
1134
1135         rc = add_ep(ep);
1136         if (rc)
1137                 cxl_ep_release(ep);
1138         return rc;
1139 }
1140
1141 struct cxl_find_port_ctx {
1142         const struct device *dport_dev;
1143         const struct cxl_port *parent_port;
1144         struct cxl_dport **dport;
1145 };
1146
1147 static int match_port_by_dport(struct device *dev, const void *data)
1148 {
1149         const struct cxl_find_port_ctx *ctx = data;
1150         struct cxl_dport *dport;
1151         struct cxl_port *port;
1152
1153         if (!is_cxl_port(dev))
1154                 return 0;
1155         if (ctx->parent_port && dev->parent != &ctx->parent_port->dev)
1156                 return 0;
1157
1158         port = to_cxl_port(dev);
1159         dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
1160         if (ctx->dport)
1161                 *ctx->dport = dport;
1162         return dport != NULL;
1163 }
1164
1165 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
1166 {
1167         struct device *dev;
1168
1169         if (!ctx->dport_dev)
1170                 return NULL;
1171
1172         dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport);
1173         if (dev)
1174                 return to_cxl_port(dev);
1175         return NULL;
1176 }
1177
1178 static struct cxl_port *find_cxl_port(struct device *dport_dev,
1179                                       struct cxl_dport **dport)
1180 {
1181         struct cxl_find_port_ctx ctx = {
1182                 .dport_dev = dport_dev,
1183                 .dport = dport,
1184         };
1185         struct cxl_port *port;
1186
1187         port = __find_cxl_port(&ctx);
1188         return port;
1189 }
1190
1191 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
1192                                          struct device *dport_dev,
1193                                          struct cxl_dport **dport)
1194 {
1195         struct cxl_find_port_ctx ctx = {
1196                 .dport_dev = dport_dev,
1197                 .parent_port = parent_port,
1198                 .dport = dport,
1199         };
1200         struct cxl_port *port;
1201
1202         port = __find_cxl_port(&ctx);
1203         return port;
1204 }
1205
1206 /*
1207  * All users of grandparent() are using it to walk PCIe-like switch port
1208  * hierarchy. A PCIe switch is comprised of a bridge device representing the
1209  * upstream switch port and N bridges representing downstream switch ports. When
1210  * bridges stack the grand-parent of a downstream switch port is another
1211  * downstream switch port in the immediate ancestor switch.
1212  */
1213 static struct device *grandparent(struct device *dev)
1214 {
1215         if (dev && dev->parent)
1216                 return dev->parent->parent;
1217         return NULL;
1218 }
1219
1220 static void delete_endpoint(void *data)
1221 {
1222         struct cxl_memdev *cxlmd = data;
1223         struct cxl_port *endpoint = cxlmd->endpoint;
1224         struct cxl_port *parent_port;
1225         struct device *parent;
1226
1227         parent_port = cxl_mem_find_port(cxlmd, NULL);
1228         if (!parent_port)
1229                 goto out;
1230         parent = &parent_port->dev;
1231
1232         device_lock(parent);
1233         if (parent->driver && !endpoint->dead) {
1234                 devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
1235                 devm_release_action(parent, cxl_unlink_uport, endpoint);
1236                 devm_release_action(parent, unregister_port, endpoint);
1237         }
1238         cxlmd->endpoint = NULL;
1239         device_unlock(parent);
1240         put_device(parent);
1241 out:
1242         put_device(&endpoint->dev);
1243 }
1244
1245 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
1246 {
1247         struct device *dev = &cxlmd->dev;
1248
1249         get_device(&endpoint->dev);
1250         cxlmd->endpoint = endpoint;
1251         cxlmd->depth = endpoint->depth;
1252         return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
1253 }
1254 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
1255
1256 /*
1257  * The natural end of life of a non-root 'cxl_port' is when its parent port goes
1258  * through a ->remove() event ("top-down" unregistration). The unnatural trigger
1259  * for a port to be unregistered is when all memdevs beneath that port have gone
1260  * through ->remove(). This "bottom-up" removal selectively removes individual
1261  * child ports manually. This depends on devm_cxl_add_port() to not change is
1262  * devm action registration order, and for dports to have already been
1263  * destroyed by reap_dports().
1264  */
1265 static void delete_switch_port(struct cxl_port *port)
1266 {
1267         devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port);
1268         devm_release_action(port->dev.parent, cxl_unlink_uport, port);
1269         devm_release_action(port->dev.parent, unregister_port, port);
1270 }
1271
1272 static void reap_dports(struct cxl_port *port)
1273 {
1274         struct cxl_dport *dport;
1275         unsigned long index;
1276
1277         device_lock_assert(&port->dev);
1278
1279         xa_for_each(&port->dports, index, dport) {
1280                 devm_release_action(&port->dev, cxl_dport_unlink, dport);
1281                 devm_release_action(&port->dev, cxl_dport_remove, dport);
1282                 devm_kfree(&port->dev, dport);
1283         }
1284 }
1285
1286 struct detach_ctx {
1287         struct cxl_memdev *cxlmd;
1288         int depth;
1289 };
1290
1291 static int port_has_memdev(struct device *dev, const void *data)
1292 {
1293         const struct detach_ctx *ctx = data;
1294         struct cxl_port *port;
1295
1296         if (!is_cxl_port(dev))
1297                 return 0;
1298
1299         port = to_cxl_port(dev);
1300         if (port->depth != ctx->depth)
1301                 return 0;
1302
1303         return !!cxl_ep_load(port, ctx->cxlmd);
1304 }
1305
1306 static void cxl_detach_ep(void *data)
1307 {
1308         struct cxl_memdev *cxlmd = data;
1309
1310         for (int i = cxlmd->depth - 1; i >= 1; i--) {
1311                 struct cxl_port *port, *parent_port;
1312                 struct detach_ctx ctx = {
1313                         .cxlmd = cxlmd,
1314                         .depth = i,
1315                 };
1316                 struct device *dev;
1317                 struct cxl_ep *ep;
1318                 bool died = false;
1319
1320                 dev = bus_find_device(&cxl_bus_type, NULL, &ctx,
1321                                       port_has_memdev);
1322                 if (!dev)
1323                         continue;
1324                 port = to_cxl_port(dev);
1325
1326                 parent_port = to_cxl_port(port->dev.parent);
1327                 device_lock(&parent_port->dev);
1328                 device_lock(&port->dev);
1329                 ep = cxl_ep_load(port, cxlmd);
1330                 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
1331                         ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
1332                 cxl_ep_remove(port, ep);
1333                 if (ep && !port->dead && xa_empty(&port->endpoints) &&
1334                     !is_cxl_root(parent_port) && parent_port->dev.driver) {
1335                         /*
1336                          * This was the last ep attached to a dynamically
1337                          * enumerated port. Block new cxl_add_ep() and garbage
1338                          * collect the port.
1339                          */
1340                         died = true;
1341                         port->dead = true;
1342                         reap_dports(port);
1343                 }
1344                 device_unlock(&port->dev);
1345
1346                 if (died) {
1347                         dev_dbg(&cxlmd->dev, "delete %s\n",
1348                                 dev_name(&port->dev));
1349                         delete_switch_port(port);
1350                 }
1351                 put_device(&port->dev);
1352                 device_unlock(&parent_port->dev);
1353         }
1354 }
1355
1356 static resource_size_t find_component_registers(struct device *dev)
1357 {
1358         struct cxl_register_map map;
1359         struct pci_dev *pdev;
1360
1361         /*
1362          * Theoretically, CXL component registers can be hosted on a
1363          * non-PCI device, in practice, only cxl_test hits this case.
1364          */
1365         if (!dev_is_pci(dev))
1366                 return CXL_RESOURCE_NONE;
1367
1368         pdev = to_pci_dev(dev);
1369
1370         cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
1371         return map.resource;
1372 }
1373
1374 static int add_port_attach_ep(struct cxl_memdev *cxlmd,
1375                               struct device *uport_dev,
1376                               struct device *dport_dev)
1377 {
1378         struct device *dparent = grandparent(dport_dev);
1379         struct cxl_port *port, *parent_port = NULL;
1380         struct cxl_dport *dport, *parent_dport;
1381         resource_size_t component_reg_phys;
1382         int rc;
1383
1384         if (!dparent) {
1385                 /*
1386                  * The iteration reached the topology root without finding the
1387                  * CXL-root 'cxl_port' on a previous iteration, fail for now to
1388                  * be re-probed after platform driver attaches.
1389                  */
1390                 dev_dbg(&cxlmd->dev, "%s is a root dport\n",
1391                         dev_name(dport_dev));
1392                 return -ENXIO;
1393         }
1394
1395         parent_port = find_cxl_port(dparent, &parent_dport);
1396         if (!parent_port) {
1397                 /* iterate to create this parent_port */
1398                 return -EAGAIN;
1399         }
1400
1401         device_lock(&parent_port->dev);
1402         if (!parent_port->dev.driver) {
1403                 dev_warn(&cxlmd->dev,
1404                          "port %s:%s disabled, failed to enumerate CXL.mem\n",
1405                          dev_name(&parent_port->dev), dev_name(uport_dev));
1406                 port = ERR_PTR(-ENXIO);
1407                 goto out;
1408         }
1409
1410         port = find_cxl_port_at(parent_port, dport_dev, &dport);
1411         if (!port) {
1412                 component_reg_phys = find_component_registers(uport_dev);
1413                 port = devm_cxl_add_port(&parent_port->dev, uport_dev,
1414                                          component_reg_phys, parent_dport);
1415                 /* retry find to pick up the new dport information */
1416                 if (!IS_ERR(port))
1417                         port = find_cxl_port_at(parent_port, dport_dev, &dport);
1418         }
1419 out:
1420         device_unlock(&parent_port->dev);
1421
1422         if (IS_ERR(port))
1423                 rc = PTR_ERR(port);
1424         else {
1425                 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
1426                         dev_name(&port->dev), dev_name(port->uport_dev));
1427                 rc = cxl_add_ep(dport, &cxlmd->dev);
1428                 if (rc == -EBUSY) {
1429                         /*
1430                          * "can't" happen, but this error code means
1431                          * something to the caller, so translate it.
1432                          */
1433                         rc = -ENXIO;
1434                 }
1435                 put_device(&port->dev);
1436         }
1437
1438         put_device(&parent_port->dev);
1439         return rc;
1440 }
1441
1442 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
1443 {
1444         struct device *dev = &cxlmd->dev;
1445         struct device *iter;
1446         int rc;
1447
1448         /*
1449          * Skip intermediate port enumeration in the RCH case, there
1450          * are no ports in between a host bridge and an endpoint.
1451          */
1452         if (cxlmd->cxlds->rcd)
1453                 return 0;
1454
1455         rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd);
1456         if (rc)
1457                 return rc;
1458
1459         /*
1460          * Scan for and add all cxl_ports in this device's ancestry.
1461          * Repeat until no more ports are added. Abort if a port add
1462          * attempt fails.
1463          */
1464 retry:
1465         for (iter = dev; iter; iter = grandparent(iter)) {
1466                 struct device *dport_dev = grandparent(iter);
1467                 struct device *uport_dev;
1468                 struct cxl_dport *dport;
1469                 struct cxl_port *port;
1470
1471                 if (!dport_dev)
1472                         return 0;
1473
1474                 uport_dev = dport_dev->parent;
1475                 if (!uport_dev) {
1476                         dev_warn(dev, "at %s no parent for dport: %s\n",
1477                                  dev_name(iter), dev_name(dport_dev));
1478                         return -ENXIO;
1479                 }
1480
1481                 dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
1482                         dev_name(iter), dev_name(dport_dev),
1483                         dev_name(uport_dev));
1484                 port = find_cxl_port(dport_dev, &dport);
1485                 if (port) {
1486                         dev_dbg(&cxlmd->dev,
1487                                 "found already registered port %s:%s\n",
1488                                 dev_name(&port->dev),
1489                                 dev_name(port->uport_dev));
1490                         rc = cxl_add_ep(dport, &cxlmd->dev);
1491
1492                         /*
1493                          * If the endpoint already exists in the port's list,
1494                          * that's ok, it was added on a previous pass.
1495                          * Otherwise, retry in add_port_attach_ep() after taking
1496                          * the parent_port lock as the current port may be being
1497                          * reaped.
1498                          */
1499                         if (rc && rc != -EBUSY) {
1500                                 put_device(&port->dev);
1501                                 return rc;
1502                         }
1503
1504                         /* Any more ports to add between this one and the root? */
1505                         if (!dev_is_cxl_root_child(&port->dev)) {
1506                                 put_device(&port->dev);
1507                                 continue;
1508                         }
1509
1510                         put_device(&port->dev);
1511                         return 0;
1512                 }
1513
1514                 rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev);
1515                 /* port missing, try to add parent */
1516                 if (rc == -EAGAIN)
1517                         continue;
1518                 /* failed to add ep or port */
1519                 if (rc)
1520                         return rc;
1521                 /* port added, new descendants possible, start over */
1522                 goto retry;
1523         }
1524
1525         return 0;
1526 }
1527 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
1528
1529 struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
1530                                    struct cxl_dport **dport)
1531 {
1532         return find_cxl_port(pdev->dev.parent, dport);
1533 }
1534 EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL);
1535
1536 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
1537                                    struct cxl_dport **dport)
1538 {
1539         return find_cxl_port(grandparent(&cxlmd->dev), dport);
1540 }
1541 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
1542
1543 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
1544                                     struct cxl_port *port, int *target_map)
1545 {
1546         int i, rc = 0;
1547
1548         if (!target_map)
1549                 return 0;
1550
1551         device_lock_assert(&port->dev);
1552
1553         if (xa_empty(&port->dports))
1554                 return -EINVAL;
1555
1556         write_seqlock(&cxlsd->target_lock);
1557         for (i = 0; i < cxlsd->nr_targets; i++) {
1558                 struct cxl_dport *dport = find_dport(port, target_map[i]);
1559
1560                 if (!dport) {
1561                         rc = -ENXIO;
1562                         break;
1563                 }
1564                 cxlsd->target[i] = dport;
1565         }
1566         write_sequnlock(&cxlsd->target_lock);
1567
1568         return rc;
1569 }
1570
1571 struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
1572 {
1573         struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
1574         struct cxl_decoder *cxld = &cxlsd->cxld;
1575         int iw;
1576
1577         iw = cxld->interleave_ways;
1578         if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets,
1579                           "misconfigured root decoder\n"))
1580                 return NULL;
1581
1582         return cxlrd->cxlsd.target[pos % iw];
1583 }
1584 EXPORT_SYMBOL_NS_GPL(cxl_hb_modulo, CXL);
1585
1586 static struct lock_class_key cxl_decoder_key;
1587
1588 /**
1589  * cxl_decoder_init - Common decoder setup / initialization
1590  * @port: owning port of this decoder
1591  * @cxld: common decoder properties to initialize
1592  *
1593  * A port may contain one or more decoders. Each of those decoders
1594  * enable some address space for CXL.mem utilization. A decoder is
1595  * expected to be configured by the caller before registering via
1596  * cxl_decoder_add()
1597  */
1598 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
1599 {
1600         struct device *dev;
1601         int rc;
1602
1603         rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
1604         if (rc < 0)
1605                 return rc;
1606
1607         /* need parent to stick around to release the id */
1608         get_device(&port->dev);
1609         cxld->id = rc;
1610
1611         dev = &cxld->dev;
1612         device_initialize(dev);
1613         lockdep_set_class(&dev->mutex, &cxl_decoder_key);
1614         device_set_pm_not_required(dev);
1615         dev->parent = &port->dev;
1616         dev->bus = &cxl_bus_type;
1617
1618         /* Pre initialize an "empty" decoder */
1619         cxld->interleave_ways = 1;
1620         cxld->interleave_granularity = PAGE_SIZE;
1621         cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1622         cxld->hpa_range = (struct range) {
1623                 .start = 0,
1624                 .end = -1,
1625         };
1626
1627         return 0;
1628 }
1629
1630 static int cxl_switch_decoder_init(struct cxl_port *port,
1631                                    struct cxl_switch_decoder *cxlsd,
1632                                    int nr_targets)
1633 {
1634         if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
1635                 return -EINVAL;
1636
1637         cxlsd->nr_targets = nr_targets;
1638         seqlock_init(&cxlsd->target_lock);
1639         return cxl_decoder_init(port, &cxlsd->cxld);
1640 }
1641
1642 /**
1643  * cxl_root_decoder_alloc - Allocate a root level decoder
1644  * @port: owning CXL root of this decoder
1645  * @nr_targets: static number of downstream targets
1646  * @calc_hb: which host bridge covers the n'th position by granularity
1647  *
1648  * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1649  * 'CXL root' decoder is one that decodes from a top-level / static platform
1650  * firmware description of CXL resources into a CXL standard decode
1651  * topology.
1652  */
1653 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
1654                                                 unsigned int nr_targets,
1655                                                 cxl_calc_hb_fn calc_hb)
1656 {
1657         struct cxl_root_decoder *cxlrd;
1658         struct cxl_switch_decoder *cxlsd;
1659         struct cxl_decoder *cxld;
1660         int rc;
1661
1662         if (!is_cxl_root(port))
1663                 return ERR_PTR(-EINVAL);
1664
1665         cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
1666                         GFP_KERNEL);
1667         if (!cxlrd)
1668                 return ERR_PTR(-ENOMEM);
1669
1670         cxlsd = &cxlrd->cxlsd;
1671         rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1672         if (rc) {
1673                 kfree(cxlrd);
1674                 return ERR_PTR(rc);
1675         }
1676
1677         cxlrd->calc_hb = calc_hb;
1678         mutex_init(&cxlrd->range_lock);
1679
1680         cxld = &cxlsd->cxld;
1681         cxld->dev.type = &cxl_decoder_root_type;
1682         /*
1683          * cxl_root_decoder_release() special cases negative ids to
1684          * detect memregion_alloc() failures.
1685          */
1686         atomic_set(&cxlrd->region_id, -1);
1687         rc = memregion_alloc(GFP_KERNEL);
1688         if (rc < 0) {
1689                 put_device(&cxld->dev);
1690                 return ERR_PTR(rc);
1691         }
1692
1693         atomic_set(&cxlrd->region_id, rc);
1694         return cxlrd;
1695 }
1696 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
1697
1698 /**
1699  * cxl_switch_decoder_alloc - Allocate a switch level decoder
1700  * @port: owning CXL switch port of this decoder
1701  * @nr_targets: max number of dynamically addressable downstream targets
1702  *
1703  * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1704  * 'switch' decoder is any decoder that can be enumerated by PCIe
1705  * topology and the HDM Decoder Capability. This includes the decoders
1706  * that sit between Switch Upstream Ports / Switch Downstream Ports and
1707  * Host Bridges / Root Ports.
1708  */
1709 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
1710                                                     unsigned int nr_targets)
1711 {
1712         struct cxl_switch_decoder *cxlsd;
1713         struct cxl_decoder *cxld;
1714         int rc;
1715
1716         if (is_cxl_root(port) || is_cxl_endpoint(port))
1717                 return ERR_PTR(-EINVAL);
1718
1719         cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
1720         if (!cxlsd)
1721                 return ERR_PTR(-ENOMEM);
1722
1723         rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1724         if (rc) {
1725                 kfree(cxlsd);
1726                 return ERR_PTR(rc);
1727         }
1728
1729         cxld = &cxlsd->cxld;
1730         cxld->dev.type = &cxl_decoder_switch_type;
1731         return cxlsd;
1732 }
1733 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
1734
1735 /**
1736  * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
1737  * @port: owning port of this decoder
1738  *
1739  * Return: A new cxl decoder to be registered by cxl_decoder_add()
1740  */
1741 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
1742 {
1743         struct cxl_endpoint_decoder *cxled;
1744         struct cxl_decoder *cxld;
1745         int rc;
1746
1747         if (!is_cxl_endpoint(port))
1748                 return ERR_PTR(-EINVAL);
1749
1750         cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
1751         if (!cxled)
1752                 return ERR_PTR(-ENOMEM);
1753
1754         cxled->pos = -1;
1755         cxld = &cxled->cxld;
1756         rc = cxl_decoder_init(port, cxld);
1757         if (rc)  {
1758                 kfree(cxled);
1759                 return ERR_PTR(rc);
1760         }
1761
1762         cxld->dev.type = &cxl_decoder_endpoint_type;
1763         return cxled;
1764 }
1765 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
1766
1767 /**
1768  * cxl_decoder_add_locked - Add a decoder with targets
1769  * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
1770  * @target_map: A list of downstream ports that this decoder can direct memory
1771  *              traffic to. These numbers should correspond with the port number
1772  *              in the PCIe Link Capabilities structure.
1773  *
1774  * Certain types of decoders may not have any targets. The main example of this
1775  * is an endpoint device. A more awkward example is a hostbridge whose root
1776  * ports get hot added (technically possible, though unlikely).
1777  *
1778  * This is the locked variant of cxl_decoder_add().
1779  *
1780  * Context: Process context. Expects the device lock of the port that owns the
1781  *          @cxld to be held.
1782  *
1783  * Return: Negative error code if the decoder wasn't properly configured; else
1784  *         returns 0.
1785  */
1786 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
1787 {
1788         struct cxl_port *port;
1789         struct device *dev;
1790         int rc;
1791
1792         if (WARN_ON_ONCE(!cxld))
1793                 return -EINVAL;
1794
1795         if (WARN_ON_ONCE(IS_ERR(cxld)))
1796                 return PTR_ERR(cxld);
1797
1798         if (cxld->interleave_ways < 1)
1799                 return -EINVAL;
1800
1801         dev = &cxld->dev;
1802
1803         port = to_cxl_port(cxld->dev.parent);
1804         if (!is_endpoint_decoder(dev)) {
1805                 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
1806
1807                 rc = decoder_populate_targets(cxlsd, port, target_map);
1808                 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
1809                         dev_err(&port->dev,
1810                                 "Failed to populate active decoder targets\n");
1811                         return rc;
1812                 }
1813         }
1814
1815         rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
1816         if (rc)
1817                 return rc;
1818
1819         return device_add(dev);
1820 }
1821 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
1822
1823 /**
1824  * cxl_decoder_add - Add a decoder with targets
1825  * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
1826  * @target_map: A list of downstream ports that this decoder can direct memory
1827  *              traffic to. These numbers should correspond with the port number
1828  *              in the PCIe Link Capabilities structure.
1829  *
1830  * This is the unlocked variant of cxl_decoder_add_locked().
1831  * See cxl_decoder_add_locked().
1832  *
1833  * Context: Process context. Takes and releases the device lock of the port that
1834  *          owns the @cxld.
1835  */
1836 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
1837 {
1838         struct cxl_port *port;
1839         int rc;
1840
1841         if (WARN_ON_ONCE(!cxld))
1842                 return -EINVAL;
1843
1844         if (WARN_ON_ONCE(IS_ERR(cxld)))
1845                 return PTR_ERR(cxld);
1846
1847         port = to_cxl_port(cxld->dev.parent);
1848
1849         device_lock(&port->dev);
1850         rc = cxl_decoder_add_locked(cxld, target_map);
1851         device_unlock(&port->dev);
1852
1853         return rc;
1854 }
1855 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
1856
1857 static void cxld_unregister(void *dev)
1858 {
1859         struct cxl_endpoint_decoder *cxled;
1860
1861         if (is_endpoint_decoder(dev)) {
1862                 cxled = to_cxl_endpoint_decoder(dev);
1863                 cxl_decoder_kill_region(cxled);
1864         }
1865
1866         device_unregister(dev);
1867 }
1868
1869 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
1870 {
1871         return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
1872 }
1873 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
1874
1875 /**
1876  * __cxl_driver_register - register a driver for the cxl bus
1877  * @cxl_drv: cxl driver structure to attach
1878  * @owner: owning module/driver
1879  * @modname: KBUILD_MODNAME for parent driver
1880  */
1881 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
1882                           const char *modname)
1883 {
1884         if (!cxl_drv->probe) {
1885                 pr_debug("%s ->probe() must be specified\n", modname);
1886                 return -EINVAL;
1887         }
1888
1889         if (!cxl_drv->name) {
1890                 pr_debug("%s ->name must be specified\n", modname);
1891                 return -EINVAL;
1892         }
1893
1894         if (!cxl_drv->id) {
1895                 pr_debug("%s ->id must be specified\n", modname);
1896                 return -EINVAL;
1897         }
1898
1899         cxl_drv->drv.bus = &cxl_bus_type;
1900         cxl_drv->drv.owner = owner;
1901         cxl_drv->drv.mod_name = modname;
1902         cxl_drv->drv.name = cxl_drv->name;
1903
1904         return driver_register(&cxl_drv->drv);
1905 }
1906 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
1907
1908 void cxl_driver_unregister(struct cxl_driver *cxl_drv)
1909 {
1910         driver_unregister(&cxl_drv->drv);
1911 }
1912 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
1913
1914 static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
1915 {
1916         return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
1917                               cxl_device_id(dev));
1918 }
1919
1920 static int cxl_bus_match(struct device *dev, struct device_driver *drv)
1921 {
1922         return cxl_device_id(dev) == to_cxl_drv(drv)->id;
1923 }
1924
1925 static int cxl_bus_probe(struct device *dev)
1926 {
1927         int rc;
1928
1929         rc = to_cxl_drv(dev->driver)->probe(dev);
1930         dev_dbg(dev, "probe: %d\n", rc);
1931         return rc;
1932 }
1933
1934 static void cxl_bus_remove(struct device *dev)
1935 {
1936         struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
1937
1938         if (cxl_drv->remove)
1939                 cxl_drv->remove(dev);
1940 }
1941
1942 static struct workqueue_struct *cxl_bus_wq;
1943
1944 static void cxl_bus_rescan_queue(struct work_struct *w)
1945 {
1946         int rc = bus_rescan_devices(&cxl_bus_type);
1947
1948         pr_debug("CXL bus rescan result: %d\n", rc);
1949 }
1950
1951 void cxl_bus_rescan(void)
1952 {
1953         static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue);
1954
1955         queue_work(cxl_bus_wq, &rescan_work);
1956 }
1957 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL);
1958
1959 void cxl_bus_drain(void)
1960 {
1961         drain_workqueue(cxl_bus_wq);
1962 }
1963 EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL);
1964
1965 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
1966 {
1967         return queue_work(cxl_bus_wq, &cxlmd->detach_work);
1968 }
1969 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
1970
1971 /* for user tooling to ensure port disable work has completed */
1972 static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
1973 {
1974         if (sysfs_streq(buf, "1")) {
1975                 flush_workqueue(cxl_bus_wq);
1976                 return count;
1977         }
1978
1979         return -EINVAL;
1980 }
1981
1982 static BUS_ATTR_WO(flush);
1983
1984 static struct attribute *cxl_bus_attributes[] = {
1985         &bus_attr_flush.attr,
1986         NULL,
1987 };
1988
1989 static struct attribute_group cxl_bus_attribute_group = {
1990         .attrs = cxl_bus_attributes,
1991 };
1992
1993 static const struct attribute_group *cxl_bus_attribute_groups[] = {
1994         &cxl_bus_attribute_group,
1995         NULL,
1996 };
1997
1998 struct bus_type cxl_bus_type = {
1999         .name = "cxl",
2000         .uevent = cxl_bus_uevent,
2001         .match = cxl_bus_match,
2002         .probe = cxl_bus_probe,
2003         .remove = cxl_bus_remove,
2004         .bus_groups = cxl_bus_attribute_groups,
2005 };
2006 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
2007
2008 static struct dentry *cxl_debugfs;
2009
2010 struct dentry *cxl_debugfs_create_dir(const char *dir)
2011 {
2012         return debugfs_create_dir(dir, cxl_debugfs);
2013 }
2014 EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
2015
2016 static __init int cxl_core_init(void)
2017 {
2018         int rc;
2019
2020         cxl_debugfs = debugfs_create_dir("cxl", NULL);
2021
2022         cxl_mbox_init();
2023
2024         rc = cxl_memdev_init();
2025         if (rc)
2026                 return rc;
2027
2028         cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0);
2029         if (!cxl_bus_wq) {
2030                 rc = -ENOMEM;
2031                 goto err_wq;
2032         }
2033
2034         rc = bus_register(&cxl_bus_type);
2035         if (rc)
2036                 goto err_bus;
2037
2038         rc = cxl_region_init();
2039         if (rc)
2040                 goto err_region;
2041
2042         return 0;
2043
2044 err_region:
2045         bus_unregister(&cxl_bus_type);
2046 err_bus:
2047         destroy_workqueue(cxl_bus_wq);
2048 err_wq:
2049         cxl_memdev_exit();
2050         return rc;
2051 }
2052
2053 static void cxl_core_exit(void)
2054 {
2055         cxl_region_exit();
2056         bus_unregister(&cxl_bus_type);
2057         destroy_workqueue(cxl_bus_wq);
2058         cxl_memdev_exit();
2059         debugfs_remove_recursive(cxl_debugfs);
2060 }
2061
2062 subsys_initcall(cxl_core_init);
2063 module_exit(cxl_core_exit);
2064 MODULE_LICENSE("GPL v2");