1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/memregion.h>
4 #include <linux/genalloc.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/uuid.h>
9 #include <linux/sort.h>
10 #include <linux/idr.h>
16 * DOC: cxl core region
18 * CXL Regions represent mapped memory capacity in system physical address
19 * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
20 * Memory ranges, Regions represent the active mapped capacity by the HDM
21 * Decoder Capability structures throughout the Host Bridges, Switches, and
22 * Endpoints in the topology.
24 * Region configuration has ordering constraints. UUID may be set at any time
25 * but is only visible for persistent regions.
26 * 1. Interleave granularity
32 * All changes to the interleave configuration occur with this lock held
35 static DECLARE_RWSEM(cxl_region_rwsem);
37 static struct cxl_region *to_cxl_region(struct device *dev);
39 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
42 struct cxl_region *cxlr = to_cxl_region(dev);
43 struct cxl_region_params *p = &cxlr->params;
46 rc = down_read_interruptible(&cxl_region_rwsem);
49 if (cxlr->mode != CXL_DECODER_PMEM)
50 rc = sysfs_emit(buf, "\n");
52 rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
53 up_read(&cxl_region_rwsem);
58 static int is_dup(struct device *match, void *data)
60 struct cxl_region_params *p;
61 struct cxl_region *cxlr;
64 if (!is_cxl_region(match))
67 lockdep_assert_held(&cxl_region_rwsem);
68 cxlr = to_cxl_region(match);
71 if (uuid_equal(&p->uuid, uuid)) {
72 dev_dbg(match, "already has uuid: %pUb\n", uuid);
79 static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
80 const char *buf, size_t len)
82 struct cxl_region *cxlr = to_cxl_region(dev);
83 struct cxl_region_params *p = &cxlr->params;
87 if (len != UUID_STRING_LEN + 1)
90 rc = uuid_parse(buf, &temp);
94 if (uuid_is_null(&temp))
97 rc = down_write_killable(&cxl_region_rwsem);
101 if (uuid_equal(&p->uuid, &temp))
105 if (p->state >= CXL_CONFIG_ACTIVE)
108 rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
112 uuid_copy(&p->uuid, &temp);
114 up_write(&cxl_region_rwsem);
120 static DEVICE_ATTR_RW(uuid);
122 static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
123 struct cxl_region *cxlr)
125 return xa_load(&port->regions, (unsigned long)cxlr);
128 static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
130 if (!cpu_cache_has_invalidate_memregion()) {
131 if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
134 "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
138 "Failed to synchronize CPU cache state\n");
143 cpu_cache_invalidate_memregion(IORES_DESC_CXL);
147 static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
149 struct cxl_region_params *p = &cxlr->params;
153 * Before region teardown attempt to flush, and if the flush
154 * fails cancel the region teardown for data consistency
157 rc = cxl_region_invalidate_memregion(cxlr);
161 for (i = count - 1; i >= 0; i--) {
162 struct cxl_endpoint_decoder *cxled = p->targets[i];
163 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
164 struct cxl_port *iter = cxled_to_port(cxled);
165 struct cxl_dev_state *cxlds = cxlmd->cxlds;
171 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
172 iter = to_cxl_port(iter->dev.parent);
174 for (ep = cxl_ep_load(iter, cxlmd); iter;
175 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
176 struct cxl_region_ref *cxl_rr;
177 struct cxl_decoder *cxld;
179 cxl_rr = cxl_rr_load(iter, cxlr);
180 cxld = cxl_rr->decoder;
182 rc = cxld->reset(cxld);
185 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
189 rc = cxled->cxld.reset(&cxled->cxld);
192 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
195 /* all decoders associated with this region have been torn down */
196 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
201 static int commit_decoder(struct cxl_decoder *cxld)
203 struct cxl_switch_decoder *cxlsd = NULL;
206 return cxld->commit(cxld);
208 if (is_switch_decoder(&cxld->dev))
209 cxlsd = to_cxl_switch_decoder(&cxld->dev);
211 if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1,
212 "->commit() is required\n"))
217 static int cxl_region_decode_commit(struct cxl_region *cxlr)
219 struct cxl_region_params *p = &cxlr->params;
222 for (i = 0; i < p->nr_targets; i++) {
223 struct cxl_endpoint_decoder *cxled = p->targets[i];
224 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
225 struct cxl_region_ref *cxl_rr;
226 struct cxl_decoder *cxld;
227 struct cxl_port *iter;
230 /* commit bottom up */
231 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
232 iter = to_cxl_port(iter->dev.parent)) {
233 cxl_rr = cxl_rr_load(iter, cxlr);
234 cxld = cxl_rr->decoder;
235 rc = commit_decoder(cxld);
241 /* programming @iter failed, teardown */
242 for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
243 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
244 cxl_rr = cxl_rr_load(iter, cxlr);
245 cxld = cxl_rr->decoder;
250 cxled->cxld.reset(&cxled->cxld);
258 /* undo the targets that were successfully committed */
259 cxl_region_decode_reset(cxlr, i);
263 static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
264 const char *buf, size_t len)
266 struct cxl_region *cxlr = to_cxl_region(dev);
267 struct cxl_region_params *p = &cxlr->params;
271 rc = kstrtobool(buf, &commit);
275 rc = down_write_killable(&cxl_region_rwsem);
279 /* Already in the requested state? */
280 if (commit && p->state >= CXL_CONFIG_COMMIT)
282 if (!commit && p->state < CXL_CONFIG_COMMIT)
285 /* Not ready to commit? */
286 if (commit && p->state < CXL_CONFIG_ACTIVE) {
292 * Invalidate caches before region setup to drop any speculative
293 * consumption of this address space
295 rc = cxl_region_invalidate_memregion(cxlr);
300 rc = cxl_region_decode_commit(cxlr);
302 p->state = CXL_CONFIG_COMMIT;
304 p->state = CXL_CONFIG_RESET_PENDING;
305 up_write(&cxl_region_rwsem);
306 device_release_driver(&cxlr->dev);
307 down_write(&cxl_region_rwsem);
310 * The lock was dropped, so need to revalidate that the reset is
313 if (p->state == CXL_CONFIG_RESET_PENDING) {
314 rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
316 * Revert to committed since there may still be active
317 * decoders associated with this region, or move forward
318 * to active to mark the reset successful
321 p->state = CXL_CONFIG_COMMIT;
323 p->state = CXL_CONFIG_ACTIVE;
328 up_write(&cxl_region_rwsem);
335 static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
338 struct cxl_region *cxlr = to_cxl_region(dev);
339 struct cxl_region_params *p = &cxlr->params;
342 rc = down_read_interruptible(&cxl_region_rwsem);
345 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
346 up_read(&cxl_region_rwsem);
350 static DEVICE_ATTR_RW(commit);
352 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
355 struct device *dev = kobj_to_dev(kobj);
356 struct cxl_region *cxlr = to_cxl_region(dev);
359 * Support tooling that expects to find a 'uuid' attribute for all
360 * regions regardless of mode.
362 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
367 static ssize_t interleave_ways_show(struct device *dev,
368 struct device_attribute *attr, char *buf)
370 struct cxl_region *cxlr = to_cxl_region(dev);
371 struct cxl_region_params *p = &cxlr->params;
374 rc = down_read_interruptible(&cxl_region_rwsem);
377 rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
378 up_read(&cxl_region_rwsem);
383 static const struct attribute_group *get_cxl_region_target_group(void);
385 static ssize_t interleave_ways_store(struct device *dev,
386 struct device_attribute *attr,
387 const char *buf, size_t len)
389 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
390 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
391 struct cxl_region *cxlr = to_cxl_region(dev);
392 struct cxl_region_params *p = &cxlr->params;
393 unsigned int val, save;
397 rc = kstrtouint(buf, 0, &val);
401 rc = ways_to_eiw(val, &iw);
406 * Even for x3, x9, and x12 interleaves the region interleave must be a
407 * power of 2 multiple of the host bridge interleave.
409 if (!is_power_of_2(val / cxld->interleave_ways) ||
410 (val % cxld->interleave_ways)) {
411 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
415 rc = down_write_killable(&cxl_region_rwsem);
418 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
423 save = p->interleave_ways;
424 p->interleave_ways = val;
425 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
427 p->interleave_ways = save;
429 up_write(&cxl_region_rwsem);
434 static DEVICE_ATTR_RW(interleave_ways);
436 static ssize_t interleave_granularity_show(struct device *dev,
437 struct device_attribute *attr,
440 struct cxl_region *cxlr = to_cxl_region(dev);
441 struct cxl_region_params *p = &cxlr->params;
444 rc = down_read_interruptible(&cxl_region_rwsem);
447 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
448 up_read(&cxl_region_rwsem);
453 static ssize_t interleave_granularity_store(struct device *dev,
454 struct device_attribute *attr,
455 const char *buf, size_t len)
457 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
458 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
459 struct cxl_region *cxlr = to_cxl_region(dev);
460 struct cxl_region_params *p = &cxlr->params;
464 rc = kstrtoint(buf, 0, &val);
468 rc = granularity_to_eig(val, &ig);
473 * When the host-bridge is interleaved, disallow region granularity !=
474 * root granularity. Regions with a granularity less than the root
475 * interleave result in needing multiple endpoints to support a single
476 * slot in the interleave (possible to support in the future). Regions
477 * with a granularity greater than the root interleave result in invalid
478 * DPA translations (invalid to support).
480 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
483 rc = down_write_killable(&cxl_region_rwsem);
486 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
491 p->interleave_granularity = val;
493 up_write(&cxl_region_rwsem);
498 static DEVICE_ATTR_RW(interleave_granularity);
500 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
503 struct cxl_region *cxlr = to_cxl_region(dev);
504 struct cxl_region_params *p = &cxlr->params;
505 u64 resource = -1ULL;
508 rc = down_read_interruptible(&cxl_region_rwsem);
512 resource = p->res->start;
513 rc = sysfs_emit(buf, "%#llx\n", resource);
514 up_read(&cxl_region_rwsem);
518 static DEVICE_ATTR_RO(resource);
520 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
523 struct cxl_region *cxlr = to_cxl_region(dev);
525 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode));
527 static DEVICE_ATTR_RO(mode);
529 static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
531 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
532 struct cxl_region_params *p = &cxlr->params;
533 struct resource *res;
536 lockdep_assert_held_write(&cxl_region_rwsem);
538 /* Nothing to do... */
539 if (p->res && resource_size(p->res) == size)
542 /* To change size the old size must be freed first */
546 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
549 /* ways, granularity and uuid (if PMEM) need to be set before HPA */
550 if (!p->interleave_ways || !p->interleave_granularity ||
551 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
554 div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
558 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
559 dev_name(&cxlr->dev));
561 dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
567 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
572 static void cxl_region_iomem_release(struct cxl_region *cxlr)
574 struct cxl_region_params *p = &cxlr->params;
576 if (device_is_registered(&cxlr->dev))
577 lockdep_assert_held_write(&cxl_region_rwsem);
580 * Autodiscovered regions may not have been able to insert their
584 remove_resource(p->res);
590 static int free_hpa(struct cxl_region *cxlr)
592 struct cxl_region_params *p = &cxlr->params;
594 lockdep_assert_held_write(&cxl_region_rwsem);
599 if (p->state >= CXL_CONFIG_ACTIVE)
602 cxl_region_iomem_release(cxlr);
603 p->state = CXL_CONFIG_IDLE;
607 static ssize_t size_store(struct device *dev, struct device_attribute *attr,
608 const char *buf, size_t len)
610 struct cxl_region *cxlr = to_cxl_region(dev);
614 rc = kstrtou64(buf, 0, &val);
618 rc = down_write_killable(&cxl_region_rwsem);
623 rc = alloc_hpa(cxlr, val);
626 up_write(&cxl_region_rwsem);
634 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
637 struct cxl_region *cxlr = to_cxl_region(dev);
638 struct cxl_region_params *p = &cxlr->params;
642 rc = down_read_interruptible(&cxl_region_rwsem);
646 size = resource_size(p->res);
647 rc = sysfs_emit(buf, "%#llx\n", size);
648 up_read(&cxl_region_rwsem);
652 static DEVICE_ATTR_RW(size);
654 static struct attribute *cxl_region_attrs[] = {
656 &dev_attr_commit.attr,
657 &dev_attr_interleave_ways.attr,
658 &dev_attr_interleave_granularity.attr,
659 &dev_attr_resource.attr,
665 static const struct attribute_group cxl_region_group = {
666 .attrs = cxl_region_attrs,
667 .is_visible = cxl_region_visible,
670 static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
672 struct cxl_region_params *p = &cxlr->params;
673 struct cxl_endpoint_decoder *cxled;
676 rc = down_read_interruptible(&cxl_region_rwsem);
680 if (pos >= p->interleave_ways) {
681 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
687 cxled = p->targets[pos];
689 rc = sysfs_emit(buf, "\n");
691 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
693 up_read(&cxl_region_rwsem);
698 static int match_free_decoder(struct device *dev, void *data)
700 struct cxl_decoder *cxld;
703 if (!is_switch_decoder(dev))
706 cxld = to_cxl_decoder(dev);
708 /* enforce ordered allocation */
720 static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
721 struct cxl_region *cxlr)
726 dev = device_find_child(&port->dev, &id, match_free_decoder);
730 * This decoder is pinned registered as long as the endpoint decoder is
731 * registered, and endpoint decoder unregistration holds the
732 * cxl_region_rwsem over unregister events, so no need to hold on to
733 * this extra reference.
736 return to_cxl_decoder(dev);
739 static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
740 struct cxl_region *cxlr)
742 struct cxl_region_params *p = &cxlr->params;
743 struct cxl_region_ref *cxl_rr, *iter;
747 xa_for_each(&port->regions, index, iter) {
748 struct cxl_region_params *ip = &iter->region->params;
753 if (ip->res->start > p->res->start) {
755 "%s: HPA order violation %s:%pr vs %pr\n",
756 dev_name(&port->dev),
757 dev_name(&iter->region->dev), ip->res, p->res);
758 return ERR_PTR(-EBUSY);
762 cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
764 return ERR_PTR(-ENOMEM);
766 cxl_rr->region = cxlr;
767 cxl_rr->nr_targets = 1;
768 xa_init(&cxl_rr->endpoints);
770 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
773 "%s: failed to track region reference: %d\n",
774 dev_name(&port->dev), rc);
782 static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr)
784 struct cxl_region *cxlr = cxl_rr->region;
785 struct cxl_decoder *cxld = cxl_rr->decoder;
790 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
791 if (cxld->region == cxlr) {
793 put_device(&cxlr->dev);
797 static void free_region_ref(struct cxl_region_ref *cxl_rr)
799 struct cxl_port *port = cxl_rr->port;
800 struct cxl_region *cxlr = cxl_rr->region;
802 cxl_rr_free_decoder(cxl_rr);
803 xa_erase(&port->regions, (unsigned long)cxlr);
804 xa_destroy(&cxl_rr->endpoints);
808 static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
809 struct cxl_endpoint_decoder *cxled)
812 struct cxl_port *port = cxl_rr->port;
813 struct cxl_region *cxlr = cxl_rr->region;
814 struct cxl_decoder *cxld = cxl_rr->decoder;
815 struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
818 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
827 get_device(&cxlr->dev);
833 static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
834 struct cxl_endpoint_decoder *cxled,
835 struct cxl_region_ref *cxl_rr)
837 struct cxl_decoder *cxld;
839 if (port == cxled_to_port(cxled))
842 cxld = cxl_region_find_decoder(port, cxlr);
844 dev_dbg(&cxlr->dev, "%s: no decoder available\n",
845 dev_name(&port->dev));
850 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
851 dev_name(&port->dev), dev_name(&cxld->dev),
852 dev_name(&cxld->region->dev));
857 * Endpoints should already match the region type, but backstop that
858 * assumption with an assertion. Switch-decoders change mapping-type
859 * based on what is mapped when they are assigned to a region.
861 dev_WARN_ONCE(&cxlr->dev,
862 port == cxled_to_port(cxled) &&
863 cxld->target_type != cxlr->type,
864 "%s:%s mismatch decoder type %d -> %d\n",
865 dev_name(&cxled_to_memdev(cxled)->dev),
866 dev_name(&cxld->dev), cxld->target_type, cxlr->type);
867 cxld->target_type = cxlr->type;
868 cxl_rr->decoder = cxld;
873 * cxl_port_attach_region() - track a region's interest in a port by endpoint
874 * @port: port to add a new region reference 'struct cxl_region_ref'
875 * @cxlr: region to attach to @port
876 * @cxled: endpoint decoder used to create or further pin a region reference
877 * @pos: interleave position of @cxled in @cxlr
879 * The attach event is an opportunity to validate CXL decode setup
880 * constraints and record metadata needed for programming HDM decoders,
881 * in particular decoder target lists.
885 * - validate that there are no other regions with a higher HPA already
886 * associated with @port
887 * - establish a region reference if one is not already present
889 * - additionally allocate a decoder instance that will host @cxlr on
892 * - pin the region reference by the endpoint
893 * - account for how many entries in @port's target list are needed to
894 * cover all of the added endpoints.
896 static int cxl_port_attach_region(struct cxl_port *port,
897 struct cxl_region *cxlr,
898 struct cxl_endpoint_decoder *cxled, int pos)
900 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
901 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
902 struct cxl_region_ref *cxl_rr;
903 bool nr_targets_inc = false;
904 struct cxl_decoder *cxld;
908 lockdep_assert_held_write(&cxl_region_rwsem);
910 cxl_rr = cxl_rr_load(port, cxlr);
912 struct cxl_ep *ep_iter;
916 * Walk the existing endpoints that have been attached to
917 * @cxlr at @port and see if they share the same 'next' port
918 * in the downstream direction. I.e. endpoints that share common
921 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
924 if (ep_iter->next == ep->next) {
931 * New target port, or @port is an endpoint port that always
932 * accounts its own local decode as a target.
934 if (!found || !ep->next) {
935 cxl_rr->nr_targets++;
936 nr_targets_inc = true;
939 cxl_rr = alloc_region_ref(port, cxlr);
940 if (IS_ERR(cxl_rr)) {
942 "%s: failed to allocate region reference\n",
943 dev_name(&port->dev));
944 return PTR_ERR(cxl_rr);
946 nr_targets_inc = true;
948 rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr);
952 cxld = cxl_rr->decoder;
954 rc = cxl_rr_ep_add(cxl_rr, cxled);
957 "%s: failed to track endpoint %s:%s reference\n",
958 dev_name(&port->dev), dev_name(&cxlmd->dev),
959 dev_name(&cxld->dev));
964 "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
965 dev_name(port->uport), dev_name(&port->dev),
966 dev_name(&cxld->dev), dev_name(&cxlmd->dev),
967 dev_name(&cxled->cxld.dev), pos,
968 ep ? ep->next ? dev_name(ep->next->uport) :
969 dev_name(&cxlmd->dev) :
971 cxl_rr->nr_eps, cxl_rr->nr_targets);
976 cxl_rr->nr_targets--;
977 if (cxl_rr->nr_eps == 0)
978 free_region_ref(cxl_rr);
982 static void cxl_port_detach_region(struct cxl_port *port,
983 struct cxl_region *cxlr,
984 struct cxl_endpoint_decoder *cxled)
986 struct cxl_region_ref *cxl_rr;
987 struct cxl_ep *ep = NULL;
989 lockdep_assert_held_write(&cxl_region_rwsem);
991 cxl_rr = cxl_rr_load(port, cxlr);
996 * Endpoint ports do not carry cxl_ep references, and they
997 * never target more than one endpoint by definition
999 if (cxl_rr->decoder == &cxled->cxld)
1002 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
1004 struct cxl_ep *ep_iter;
1005 unsigned long index;
1009 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
1010 if (ep_iter->next == ep->next) {
1016 cxl_rr->nr_targets--;
1019 if (cxl_rr->nr_eps == 0)
1020 free_region_ref(cxl_rr);
1023 static int check_last_peer(struct cxl_endpoint_decoder *cxled,
1024 struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
1027 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1028 struct cxl_region *cxlr = cxl_rr->region;
1029 struct cxl_region_params *p = &cxlr->params;
1030 struct cxl_endpoint_decoder *cxled_peer;
1031 struct cxl_port *port = cxl_rr->port;
1032 struct cxl_memdev *cxlmd_peer;
1033 struct cxl_ep *ep_peer;
1034 int pos = cxled->pos;
1037 * If this position wants to share a dport with the last endpoint mapped
1038 * then that endpoint, at index 'position - distance', must also be
1039 * mapped by this dport.
1041 if (pos < distance) {
1042 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
1043 dev_name(port->uport), dev_name(&port->dev),
1044 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1047 cxled_peer = p->targets[pos - distance];
1048 cxlmd_peer = cxled_to_memdev(cxled_peer);
1049 ep_peer = cxl_ep_load(port, cxlmd_peer);
1050 if (ep->dport != ep_peer->dport) {
1052 "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
1053 dev_name(port->uport), dev_name(&port->dev),
1054 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
1055 dev_name(&cxlmd_peer->dev),
1056 dev_name(&cxled_peer->cxld.dev));
1063 static int cxl_port_setup_targets(struct cxl_port *port,
1064 struct cxl_region *cxlr,
1065 struct cxl_endpoint_decoder *cxled)
1067 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1068 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
1069 struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
1070 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1071 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1072 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
1073 struct cxl_region_params *p = &cxlr->params;
1074 struct cxl_decoder *cxld = cxl_rr->decoder;
1075 struct cxl_switch_decoder *cxlsd;
1080 * While root level decoders support x3, x6, x12, switch level
1081 * decoders only support powers of 2 up to x16.
1083 if (!is_power_of_2(cxl_rr->nr_targets)) {
1084 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
1085 dev_name(port->uport), dev_name(&port->dev),
1086 cxl_rr->nr_targets);
1090 cxlsd = to_cxl_switch_decoder(&cxld->dev);
1091 if (cxl_rr->nr_targets_set) {
1095 * Passthrough decoders impose no distance requirements between
1098 if (cxl_rr->nr_targets == 1)
1101 distance = p->nr_targets / cxl_rr->nr_targets;
1102 for (i = 0; i < cxl_rr->nr_targets_set; i++)
1103 if (ep->dport == cxlsd->target[i]) {
1104 rc = check_last_peer(cxled, ep, cxl_rr,
1108 goto out_target_set;
1113 if (is_cxl_root(parent_port)) {
1114 parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
1115 parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
1117 * For purposes of address bit routing, use power-of-2 math for
1120 if (!is_power_of_2(parent_iw))
1123 struct cxl_region_ref *parent_rr;
1124 struct cxl_decoder *parent_cxld;
1126 parent_rr = cxl_rr_load(parent_port, cxlr);
1127 parent_cxld = parent_rr->decoder;
1128 parent_ig = parent_cxld->interleave_granularity;
1129 parent_iw = parent_cxld->interleave_ways;
1132 rc = granularity_to_eig(parent_ig, &peig);
1134 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
1135 dev_name(parent_port->uport),
1136 dev_name(&parent_port->dev), parent_ig);
1140 rc = ways_to_eiw(parent_iw, &peiw);
1142 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
1143 dev_name(parent_port->uport),
1144 dev_name(&parent_port->dev), parent_iw);
1148 iw = cxl_rr->nr_targets;
1149 rc = ways_to_eiw(iw, &eiw);
1151 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
1152 dev_name(port->uport), dev_name(&port->dev), iw);
1157 * If @parent_port is masking address bits, pick the next unused address
1158 * bit to route @port's targets.
1160 if (parent_iw > 1 && cxl_rr->nr_targets > 1) {
1161 u32 address_bit = max(peig + peiw, eiw + peig);
1163 eig = address_bit - eiw + 1;
1169 rc = eig_to_granularity(eig, &ig);
1171 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
1172 dev_name(port->uport), dev_name(&port->dev),
1177 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1178 if (cxld->interleave_ways != iw ||
1179 cxld->interleave_granularity != ig ||
1180 cxld->hpa_range.start != p->res->start ||
1181 cxld->hpa_range.end != p->res->end ||
1182 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
1184 "%s:%s %s expected iw: %d ig: %d %pr\n",
1185 dev_name(port->uport), dev_name(&port->dev),
1186 __func__, iw, ig, p->res);
1188 "%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n",
1189 dev_name(port->uport), dev_name(&port->dev),
1190 __func__, cxld->interleave_ways,
1191 cxld->interleave_granularity,
1192 (cxld->flags & CXL_DECODER_F_ENABLE) ?
1195 cxld->hpa_range.start, cxld->hpa_range.end);
1199 cxld->interleave_ways = iw;
1200 cxld->interleave_granularity = ig;
1201 cxld->hpa_range = (struct range) {
1202 .start = p->res->start,
1206 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport),
1207 dev_name(&port->dev), iw, ig);
1209 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
1211 "%s:%s: targets full trying to add %s:%s at %d\n",
1212 dev_name(port->uport), dev_name(&port->dev),
1213 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1216 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1217 if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) {
1218 dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n",
1219 dev_name(port->uport), dev_name(&port->dev),
1220 dev_name(&cxlsd->cxld.dev),
1221 dev_name(ep->dport->dport),
1222 cxl_rr->nr_targets_set);
1226 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
1229 cxl_rr->nr_targets_set += inc;
1230 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
1231 dev_name(port->uport), dev_name(&port->dev),
1232 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport),
1233 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1238 static void cxl_port_reset_targets(struct cxl_port *port,
1239 struct cxl_region *cxlr)
1241 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1242 struct cxl_decoder *cxld;
1245 * After the last endpoint has been detached the entire cxl_rr may now
1250 cxl_rr->nr_targets_set = 0;
1252 cxld = cxl_rr->decoder;
1253 cxld->hpa_range = (struct range) {
1259 static void cxl_region_teardown_targets(struct cxl_region *cxlr)
1261 struct cxl_region_params *p = &cxlr->params;
1262 struct cxl_endpoint_decoder *cxled;
1263 struct cxl_dev_state *cxlds;
1264 struct cxl_memdev *cxlmd;
1265 struct cxl_port *iter;
1270 * In the auto-discovery case skip automatic teardown since the
1271 * address space is already active
1273 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
1276 for (i = 0; i < p->nr_targets; i++) {
1277 cxled = p->targets[i];
1278 cxlmd = cxled_to_memdev(cxled);
1279 cxlds = cxlmd->cxlds;
1284 iter = cxled_to_port(cxled);
1285 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1286 iter = to_cxl_port(iter->dev.parent);
1288 for (ep = cxl_ep_load(iter, cxlmd); iter;
1289 iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
1290 cxl_port_reset_targets(iter, cxlr);
1294 static int cxl_region_setup_targets(struct cxl_region *cxlr)
1296 struct cxl_region_params *p = &cxlr->params;
1297 struct cxl_endpoint_decoder *cxled;
1298 struct cxl_dev_state *cxlds;
1299 int i, rc, rch = 0, vh = 0;
1300 struct cxl_memdev *cxlmd;
1301 struct cxl_port *iter;
1304 for (i = 0; i < p->nr_targets; i++) {
1305 cxled = p->targets[i];
1306 cxlmd = cxled_to_memdev(cxled);
1307 cxlds = cxlmd->cxlds;
1309 /* validate that all targets agree on topology */
1317 iter = cxled_to_port(cxled);
1318 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1319 iter = to_cxl_port(iter->dev.parent);
1322 * Descend the topology tree programming / validating
1323 * targets while looking for conflicts.
1325 for (ep = cxl_ep_load(iter, cxlmd); iter;
1326 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
1327 rc = cxl_port_setup_targets(iter, cxlr, cxled);
1329 cxl_region_teardown_targets(cxlr);
1336 dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
1337 cxl_region_teardown_targets(cxlr);
1344 static int cxl_region_validate_position(struct cxl_region *cxlr,
1345 struct cxl_endpoint_decoder *cxled,
1348 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1349 struct cxl_region_params *p = &cxlr->params;
1352 if (pos < 0 || pos >= p->interleave_ways) {
1353 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1354 p->interleave_ways);
1358 if (p->targets[pos] == cxled)
1361 if (p->targets[pos]) {
1362 struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
1363 struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
1365 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
1366 pos, dev_name(&cxlmd_target->dev),
1367 dev_name(&cxled_target->cxld.dev));
1371 for (i = 0; i < p->interleave_ways; i++) {
1372 struct cxl_endpoint_decoder *cxled_target;
1373 struct cxl_memdev *cxlmd_target;
1375 cxled_target = p->targets[i];
1379 cxlmd_target = cxled_to_memdev(cxled_target);
1380 if (cxlmd_target == cxlmd) {
1382 "%s already specified at position %d via: %s\n",
1383 dev_name(&cxlmd->dev), pos,
1384 dev_name(&cxled_target->cxld.dev));
1392 static int cxl_region_attach_position(struct cxl_region *cxlr,
1393 struct cxl_root_decoder *cxlrd,
1394 struct cxl_endpoint_decoder *cxled,
1395 const struct cxl_dport *dport, int pos)
1397 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1398 struct cxl_port *iter;
1401 if (cxlrd->calc_hb(cxlrd, pos) != dport) {
1402 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
1403 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1404 dev_name(&cxlrd->cxlsd.cxld.dev));
1408 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
1409 iter = to_cxl_port(iter->dev.parent)) {
1410 rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
1418 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
1419 iter = to_cxl_port(iter->dev.parent))
1420 cxl_port_detach_region(iter, cxlr, cxled);
1424 static int cxl_region_attach_auto(struct cxl_region *cxlr,
1425 struct cxl_endpoint_decoder *cxled, int pos)
1427 struct cxl_region_params *p = &cxlr->params;
1429 if (cxled->state != CXL_DECODER_STATE_AUTO) {
1431 "%s: unable to add decoder to autodetected region\n",
1432 dev_name(&cxled->cxld.dev));
1437 dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n",
1438 dev_name(&cxled->cxld.dev), pos);
1442 if (p->nr_targets >= p->interleave_ways) {
1443 dev_err(&cxlr->dev, "%s: no more target slots available\n",
1444 dev_name(&cxled->cxld.dev));
1449 * Temporarily record the endpoint decoder into the target array. Yes,
1450 * this means that userspace can view devices in the wrong position
1451 * before the region activates, and must be careful to understand when
1452 * it might be racing region autodiscovery.
1454 pos = p->nr_targets;
1455 p->targets[pos] = cxled;
1462 static struct cxl_port *next_port(struct cxl_port *port)
1464 if (!port->parent_dport)
1466 return port->parent_dport->port;
1469 static int decoder_match_range(struct device *dev, void *data)
1471 struct cxl_endpoint_decoder *cxled = data;
1472 struct cxl_switch_decoder *cxlsd;
1474 if (!is_switch_decoder(dev))
1477 cxlsd = to_cxl_switch_decoder(dev);
1478 return range_contains(&cxlsd->cxld.hpa_range, &cxled->cxld.hpa_range);
1481 static void find_positions(const struct cxl_switch_decoder *cxlsd,
1482 const struct cxl_port *iter_a,
1483 const struct cxl_port *iter_b, int *a_pos,
1488 for (i = 0, *a_pos = -1, *b_pos = -1; i < cxlsd->nr_targets; i++) {
1489 if (cxlsd->target[i] == iter_a->parent_dport)
1491 else if (cxlsd->target[i] == iter_b->parent_dport)
1493 if (*a_pos >= 0 && *b_pos >= 0)
1498 static int cmp_decode_pos(const void *a, const void *b)
1500 struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
1501 struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
1502 struct cxl_memdev *cxlmd_a = cxled_to_memdev(cxled_a);
1503 struct cxl_memdev *cxlmd_b = cxled_to_memdev(cxled_b);
1504 struct cxl_port *port_a = cxled_to_port(cxled_a);
1505 struct cxl_port *port_b = cxled_to_port(cxled_b);
1506 struct cxl_port *iter_a, *iter_b, *port = NULL;
1507 struct cxl_switch_decoder *cxlsd;
1512 /* Exit early if any prior sorting failed */
1513 if (cxled_a->pos < 0 || cxled_b->pos < 0)
1517 * Walk up the hierarchy to find a shared port, find the decoder that
1518 * maps the range, compare the relative position of those dport
1521 for (iter_a = port_a; iter_a; iter_a = next_port(iter_a)) {
1522 struct cxl_port *next_a, *next_b;
1524 next_a = next_port(iter_a);
1528 for (iter_b = port_b; iter_b; iter_b = next_port(iter_b)) {
1529 next_b = next_port(iter_b);
1530 if (next_a != next_b)
1541 dev_err(cxlmd_a->dev.parent,
1542 "failed to find shared port with %s\n",
1543 dev_name(cxlmd_b->dev.parent));
1547 dev = device_find_child(&port->dev, cxled_a, decoder_match_range);
1549 struct range *range = &cxled_a->cxld.hpa_range;
1551 dev_err(port->uport,
1552 "failed to find decoder that maps %#llx-%#llx\n",
1553 range->start, range->end);
1557 cxlsd = to_cxl_switch_decoder(dev);
1559 seq = read_seqbegin(&cxlsd->target_lock);
1560 find_positions(cxlsd, iter_a, iter_b, &a_pos, &b_pos);
1561 } while (read_seqretry(&cxlsd->target_lock, seq));
1565 if (a_pos < 0 || b_pos < 0) {
1566 dev_err(port->uport,
1567 "failed to find shared decoder for %s and %s\n",
1568 dev_name(cxlmd_a->dev.parent),
1569 dev_name(cxlmd_b->dev.parent));
1573 dev_dbg(port->uport, "%s comes %s %s\n", dev_name(cxlmd_a->dev.parent),
1574 a_pos - b_pos < 0 ? "before" : "after",
1575 dev_name(cxlmd_b->dev.parent));
1577 return a_pos - b_pos;
1583 static int cxl_region_sort_targets(struct cxl_region *cxlr)
1585 struct cxl_region_params *p = &cxlr->params;
1588 sort(p->targets, p->nr_targets, sizeof(p->targets[0]), cmp_decode_pos,
1591 for (i = 0; i < p->nr_targets; i++) {
1592 struct cxl_endpoint_decoder *cxled = p->targets[i];
1595 * Record that sorting failed, but still continue to restore
1596 * cxled->pos with its ->targets[] position so that follow-on
1597 * code paths can reliably do p->targets[cxled->pos] to
1598 * self-reference their entry.
1605 dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
1609 static int cxl_region_attach(struct cxl_region *cxlr,
1610 struct cxl_endpoint_decoder *cxled, int pos)
1612 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1613 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1614 struct cxl_region_params *p = &cxlr->params;
1615 struct cxl_port *ep_port, *root_port;
1616 struct cxl_dport *dport;
1619 if (cxled->mode != cxlr->mode) {
1620 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
1621 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
1625 if (cxled->mode == CXL_DECODER_DEAD) {
1626 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
1630 /* all full of members, or interleave config not established? */
1631 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
1632 dev_dbg(&cxlr->dev, "region already active\n");
1634 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
1635 dev_dbg(&cxlr->dev, "interleave config missing\n");
1639 ep_port = cxled_to_port(cxled);
1640 root_port = cxlrd_to_port(cxlrd);
1641 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
1643 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
1644 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1645 dev_name(cxlr->dev.parent));
1649 if (cxled->cxld.target_type != cxlr->type) {
1650 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
1651 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1652 cxled->cxld.target_type, cxlr->type);
1656 if (!cxled->dpa_res) {
1657 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
1658 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
1662 if (resource_size(cxled->dpa_res) * p->interleave_ways !=
1663 resource_size(p->res)) {
1665 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
1666 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1667 (u64)resource_size(cxled->dpa_res), p->interleave_ways,
1668 (u64)resource_size(p->res));
1672 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1675 rc = cxl_region_attach_auto(cxlr, cxled, pos);
1679 /* await more targets to arrive... */
1680 if (p->nr_targets < p->interleave_ways)
1684 * All targets are here, which implies all PCI enumeration that
1685 * affects this region has been completed. Walk the topology to
1686 * sort the devices into their relative region decode position.
1688 rc = cxl_region_sort_targets(cxlr);
1692 for (i = 0; i < p->nr_targets; i++) {
1693 cxled = p->targets[i];
1694 ep_port = cxled_to_port(cxled);
1695 dport = cxl_find_dport_by_dev(root_port,
1696 ep_port->host_bridge);
1697 rc = cxl_region_attach_position(cxlr, cxlrd, cxled,
1703 rc = cxl_region_setup_targets(cxlr);
1708 * If target setup succeeds in the autodiscovery case
1709 * then the region is already committed.
1711 p->state = CXL_CONFIG_COMMIT;
1716 rc = cxl_region_validate_position(cxlr, cxled, pos);
1720 rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos);
1724 p->targets[pos] = cxled;
1728 if (p->nr_targets == p->interleave_ways) {
1729 rc = cxl_region_setup_targets(cxlr);
1732 p->state = CXL_CONFIG_ACTIVE;
1735 cxled->cxld.interleave_ways = p->interleave_ways;
1736 cxled->cxld.interleave_granularity = p->interleave_granularity;
1737 cxled->cxld.hpa_range = (struct range) {
1738 .start = p->res->start,
1747 p->targets[pos] = NULL;
1751 static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
1753 struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
1754 struct cxl_region *cxlr = cxled->cxld.region;
1755 struct cxl_region_params *p;
1758 lockdep_assert_held_write(&cxl_region_rwsem);
1764 get_device(&cxlr->dev);
1766 if (p->state > CXL_CONFIG_ACTIVE) {
1768 * TODO: tear down all impacted regions if a device is
1769 * removed out of order
1771 rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
1774 p->state = CXL_CONFIG_ACTIVE;
1777 for (iter = ep_port; !is_cxl_root(iter);
1778 iter = to_cxl_port(iter->dev.parent))
1779 cxl_port_detach_region(iter, cxlr, cxled);
1781 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
1782 p->targets[cxled->pos] != cxled) {
1783 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1785 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
1786 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1791 if (p->state == CXL_CONFIG_ACTIVE) {
1792 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
1793 cxl_region_teardown_targets(cxlr);
1795 p->targets[cxled->pos] = NULL;
1797 cxled->cxld.hpa_range = (struct range) {
1802 /* notify the region driver that one of its targets has departed */
1803 up_write(&cxl_region_rwsem);
1804 device_release_driver(&cxlr->dev);
1805 down_write(&cxl_region_rwsem);
1807 put_device(&cxlr->dev);
1811 void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
1813 down_write(&cxl_region_rwsem);
1814 cxled->mode = CXL_DECODER_DEAD;
1815 cxl_region_detach(cxled);
1816 up_write(&cxl_region_rwsem);
1819 static int attach_target(struct cxl_region *cxlr,
1820 struct cxl_endpoint_decoder *cxled, int pos,
1825 if (state == TASK_INTERRUPTIBLE)
1826 rc = down_write_killable(&cxl_region_rwsem);
1828 down_write(&cxl_region_rwsem);
1832 down_read(&cxl_dpa_rwsem);
1833 rc = cxl_region_attach(cxlr, cxled, pos);
1834 up_read(&cxl_dpa_rwsem);
1835 up_write(&cxl_region_rwsem);
1839 static int detach_target(struct cxl_region *cxlr, int pos)
1841 struct cxl_region_params *p = &cxlr->params;
1844 rc = down_write_killable(&cxl_region_rwsem);
1848 if (pos >= p->interleave_ways) {
1849 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1850 p->interleave_ways);
1855 if (!p->targets[pos]) {
1860 rc = cxl_region_detach(p->targets[pos]);
1862 up_write(&cxl_region_rwsem);
1866 static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
1871 if (sysfs_streq(buf, "\n"))
1872 rc = detach_target(cxlr, pos);
1876 dev = bus_find_device_by_name(&cxl_bus_type, NULL, buf);
1880 if (!is_endpoint_decoder(dev)) {
1885 rc = attach_target(cxlr, to_cxl_endpoint_decoder(dev), pos,
1886 TASK_INTERRUPTIBLE);
1896 #define TARGET_ATTR_RW(n) \
1897 static ssize_t target##n##_show( \
1898 struct device *dev, struct device_attribute *attr, char *buf) \
1900 return show_targetN(to_cxl_region(dev), buf, (n)); \
1902 static ssize_t target##n##_store(struct device *dev, \
1903 struct device_attribute *attr, \
1904 const char *buf, size_t len) \
1906 return store_targetN(to_cxl_region(dev), buf, (n), len); \
1908 static DEVICE_ATTR_RW(target##n)
1927 static struct attribute *target_attrs[] = {
1928 &dev_attr_target0.attr,
1929 &dev_attr_target1.attr,
1930 &dev_attr_target2.attr,
1931 &dev_attr_target3.attr,
1932 &dev_attr_target4.attr,
1933 &dev_attr_target5.attr,
1934 &dev_attr_target6.attr,
1935 &dev_attr_target7.attr,
1936 &dev_attr_target8.attr,
1937 &dev_attr_target9.attr,
1938 &dev_attr_target10.attr,
1939 &dev_attr_target11.attr,
1940 &dev_attr_target12.attr,
1941 &dev_attr_target13.attr,
1942 &dev_attr_target14.attr,
1943 &dev_attr_target15.attr,
1947 static umode_t cxl_region_target_visible(struct kobject *kobj,
1948 struct attribute *a, int n)
1950 struct device *dev = kobj_to_dev(kobj);
1951 struct cxl_region *cxlr = to_cxl_region(dev);
1952 struct cxl_region_params *p = &cxlr->params;
1954 if (n < p->interleave_ways)
1959 static const struct attribute_group cxl_region_target_group = {
1960 .attrs = target_attrs,
1961 .is_visible = cxl_region_target_visible,
1964 static const struct attribute_group *get_cxl_region_target_group(void)
1966 return &cxl_region_target_group;
1969 static const struct attribute_group *region_groups[] = {
1970 &cxl_base_attribute_group,
1972 &cxl_region_target_group,
1976 static void cxl_region_release(struct device *dev)
1978 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
1979 struct cxl_region *cxlr = to_cxl_region(dev);
1980 int id = atomic_read(&cxlrd->region_id);
1983 * Try to reuse the recently idled id rather than the cached
1984 * next id to prevent the region id space from increasing
1988 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) {
1993 memregion_free(cxlr->id);
1995 put_device(dev->parent);
1999 const struct device_type cxl_region_type = {
2000 .name = "cxl_region",
2001 .release = cxl_region_release,
2002 .groups = region_groups
2005 bool is_cxl_region(struct device *dev)
2007 return dev->type == &cxl_region_type;
2009 EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
2011 static struct cxl_region *to_cxl_region(struct device *dev)
2013 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
2014 "not a cxl_region device\n"))
2017 return container_of(dev, struct cxl_region, dev);
2020 static void unregister_region(void *dev)
2022 struct cxl_region *cxlr = to_cxl_region(dev);
2023 struct cxl_region_params *p = &cxlr->params;
2029 * Now that region sysfs is shutdown, the parameter block is now
2030 * read-only, so no need to hold the region rwsem to access the
2031 * region parameters.
2033 for (i = 0; i < p->interleave_ways; i++)
2034 detach_target(cxlr, i);
2036 cxl_region_iomem_release(cxlr);
2040 static struct lock_class_key cxl_region_key;
2042 static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
2044 struct cxl_region *cxlr;
2047 cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
2050 return ERR_PTR(-ENOMEM);
2054 device_initialize(dev);
2055 lockdep_set_class(&dev->mutex, &cxl_region_key);
2056 dev->parent = &cxlrd->cxlsd.cxld.dev;
2058 * Keep root decoder pinned through cxl_region_release to fixup
2059 * region id allocations
2061 get_device(dev->parent);
2062 device_set_pm_not_required(dev);
2063 dev->bus = &cxl_bus_type;
2064 dev->type = &cxl_region_type;
2071 * devm_cxl_add_region - Adds a region to a decoder
2072 * @cxlrd: root decoder
2073 * @id: memregion id to create, or memregion_free() on failure
2074 * @mode: mode for the endpoint decoders of this region
2075 * @type: select whether this is an expander or accelerator (type-2 or type-3)
2077 * This is the second step of region initialization. Regions exist within an
2078 * address space which is mapped by a @cxlrd.
2080 * Return: 0 if the region was added to the @cxlrd, else returns negative error
2081 * code. The region will be named "regionZ" where Z is the unique region number.
2083 static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
2085 enum cxl_decoder_mode mode,
2086 enum cxl_decoder_type type)
2088 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
2089 struct cxl_region *cxlr;
2094 case CXL_DECODER_RAM:
2095 case CXL_DECODER_PMEM:
2098 dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
2099 return ERR_PTR(-EINVAL);
2102 cxlr = cxl_region_alloc(cxlrd, id);
2109 rc = dev_set_name(dev, "region%d", id);
2113 rc = device_add(dev);
2117 rc = devm_add_action_or_reset(port->uport, unregister_region, cxlr);
2121 dev_dbg(port->uport, "%s: created %s\n",
2122 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
2130 static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf)
2132 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
2135 static ssize_t create_pmem_region_show(struct device *dev,
2136 struct device_attribute *attr, char *buf)
2138 return __create_region_show(to_cxl_root_decoder(dev), buf);
2141 static ssize_t create_ram_region_show(struct device *dev,
2142 struct device_attribute *attr, char *buf)
2144 return __create_region_show(to_cxl_root_decoder(dev), buf);
2147 static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
2148 enum cxl_decoder_mode mode, int id)
2152 rc = memregion_alloc(GFP_KERNEL);
2156 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
2158 return ERR_PTR(-EBUSY);
2161 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
2164 static ssize_t create_pmem_region_store(struct device *dev,
2165 struct device_attribute *attr,
2166 const char *buf, size_t len)
2168 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2169 struct cxl_region *cxlr;
2172 rc = sscanf(buf, "region%d\n", &id);
2176 cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
2178 return PTR_ERR(cxlr);
2182 DEVICE_ATTR_RW(create_pmem_region);
2184 static ssize_t create_ram_region_store(struct device *dev,
2185 struct device_attribute *attr,
2186 const char *buf, size_t len)
2188 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2189 struct cxl_region *cxlr;
2192 rc = sscanf(buf, "region%d\n", &id);
2196 cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
2198 return PTR_ERR(cxlr);
2202 DEVICE_ATTR_RW(create_ram_region);
2204 static ssize_t region_show(struct device *dev, struct device_attribute *attr,
2207 struct cxl_decoder *cxld = to_cxl_decoder(dev);
2210 rc = down_read_interruptible(&cxl_region_rwsem);
2215 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
2217 rc = sysfs_emit(buf, "\n");
2218 up_read(&cxl_region_rwsem);
2222 DEVICE_ATTR_RO(region);
2224 static struct cxl_region *
2225 cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
2227 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
2228 struct device *region_dev;
2230 region_dev = device_find_child_by_name(&cxld->dev, name);
2232 return ERR_PTR(-ENODEV);
2234 return to_cxl_region(region_dev);
2237 static ssize_t delete_region_store(struct device *dev,
2238 struct device_attribute *attr,
2239 const char *buf, size_t len)
2241 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2242 struct cxl_port *port = to_cxl_port(dev->parent);
2243 struct cxl_region *cxlr;
2245 cxlr = cxl_find_region_by_name(cxlrd, buf);
2247 return PTR_ERR(cxlr);
2249 devm_release_action(port->uport, unregister_region, cxlr);
2250 put_device(&cxlr->dev);
2254 DEVICE_ATTR_WO(delete_region);
2256 static void cxl_pmem_region_release(struct device *dev)
2258 struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
2261 for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
2262 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
2264 put_device(&cxlmd->dev);
2270 static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
2271 &cxl_base_attribute_group,
2275 const struct device_type cxl_pmem_region_type = {
2276 .name = "cxl_pmem_region",
2277 .release = cxl_pmem_region_release,
2278 .groups = cxl_pmem_region_attribute_groups,
2281 bool is_cxl_pmem_region(struct device *dev)
2283 return dev->type == &cxl_pmem_region_type;
2285 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
2287 struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
2289 if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
2290 "not a cxl_pmem_region device\n"))
2292 return container_of(dev, struct cxl_pmem_region, dev);
2294 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
2296 struct cxl_poison_context {
2297 struct cxl_port *port;
2298 enum cxl_decoder_mode mode;
2302 static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd,
2303 struct cxl_poison_context *ctx)
2305 struct cxl_dev_state *cxlds = cxlmd->cxlds;
2310 * Collect poison for the remaining unmapped resources
2311 * after poison is collected by committed endpoints.
2313 * Knowing that PMEM must always follow RAM, get poison
2314 * for unmapped resources based on the last decoder's mode:
2315 * ram: scan remains of ram range, then any pmem range
2316 * pmem: scan remains of pmem range
2319 if (ctx->mode == CXL_DECODER_RAM) {
2320 offset = ctx->offset;
2321 length = resource_size(&cxlds->ram_res) - offset;
2322 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2328 if (ctx->mode == CXL_DECODER_PMEM) {
2329 offset = ctx->offset;
2330 length = resource_size(&cxlds->dpa_res) - offset;
2333 } else if (resource_size(&cxlds->pmem_res)) {
2334 offset = cxlds->pmem_res.start;
2335 length = resource_size(&cxlds->pmem_res);
2340 return cxl_mem_get_poison(cxlmd, offset, length, NULL);
2343 static int poison_by_decoder(struct device *dev, void *arg)
2345 struct cxl_poison_context *ctx = arg;
2346 struct cxl_endpoint_decoder *cxled;
2347 struct cxl_memdev *cxlmd;
2351 if (!is_endpoint_decoder(dev))
2354 cxled = to_cxl_endpoint_decoder(dev);
2355 if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
2359 * Regions are only created with single mode decoders: pmem or ram.
2360 * Linux does not support mixed mode decoders. This means that
2361 * reading poison per endpoint decoder adheres to the requirement
2362 * that poison reads of pmem and ram must be separated.
2363 * CXL 3.0 Spec 8.2.9.8.4.1
2365 if (cxled->mode == CXL_DECODER_MIXED) {
2366 dev_dbg(dev, "poison list read unsupported in mixed mode\n");
2370 cxlmd = cxled_to_memdev(cxled);
2372 offset = cxled->dpa_res->start - cxled->skip;
2373 length = cxled->skip;
2374 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2375 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2381 offset = cxled->dpa_res->start;
2382 length = cxled->dpa_res->end - offset + 1;
2383 rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region);
2384 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2389 /* Iterate until commit_end is reached */
2390 if (cxled->cxld.id == ctx->port->commit_end) {
2391 ctx->offset = cxled->dpa_res->end + 1;
2392 ctx->mode = cxled->mode;
2399 int cxl_get_poison_by_endpoint(struct cxl_port *port)
2401 struct cxl_poison_context ctx;
2404 rc = down_read_interruptible(&cxl_region_rwsem);
2408 ctx = (struct cxl_poison_context) {
2412 rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
2414 rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport), &ctx);
2416 up_read(&cxl_region_rwsem);
2420 static struct lock_class_key cxl_pmem_region_key;
2422 static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
2424 struct cxl_region_params *p = &cxlr->params;
2425 struct cxl_nvdimm_bridge *cxl_nvb;
2426 struct cxl_pmem_region *cxlr_pmem;
2430 down_read(&cxl_region_rwsem);
2431 if (p->state != CXL_CONFIG_COMMIT) {
2432 cxlr_pmem = ERR_PTR(-ENXIO);
2436 cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
2439 cxlr_pmem = ERR_PTR(-ENOMEM);
2443 cxlr_pmem->hpa_range.start = p->res->start;
2444 cxlr_pmem->hpa_range.end = p->res->end;
2446 /* Snapshot the region configuration underneath the cxl_region_rwsem */
2447 cxlr_pmem->nr_mappings = p->nr_targets;
2448 for (i = 0; i < p->nr_targets; i++) {
2449 struct cxl_endpoint_decoder *cxled = p->targets[i];
2450 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2451 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
2454 * Regions never span CXL root devices, so by definition the
2455 * bridge for one device is the same for all.
2458 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
2460 cxlr_pmem = ERR_PTR(-ENODEV);
2463 cxlr->cxl_nvb = cxl_nvb;
2466 get_device(&cxlmd->dev);
2467 m->start = cxled->dpa_res->start;
2468 m->size = resource_size(cxled->dpa_res);
2472 dev = &cxlr_pmem->dev;
2473 cxlr_pmem->cxlr = cxlr;
2474 cxlr->cxlr_pmem = cxlr_pmem;
2475 device_initialize(dev);
2476 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
2477 device_set_pm_not_required(dev);
2478 dev->parent = &cxlr->dev;
2479 dev->bus = &cxl_bus_type;
2480 dev->type = &cxl_pmem_region_type;
2482 up_read(&cxl_region_rwsem);
2487 static void cxl_dax_region_release(struct device *dev)
2489 struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev);
2494 static const struct attribute_group *cxl_dax_region_attribute_groups[] = {
2495 &cxl_base_attribute_group,
2499 const struct device_type cxl_dax_region_type = {
2500 .name = "cxl_dax_region",
2501 .release = cxl_dax_region_release,
2502 .groups = cxl_dax_region_attribute_groups,
2505 static bool is_cxl_dax_region(struct device *dev)
2507 return dev->type == &cxl_dax_region_type;
2510 struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
2512 if (dev_WARN_ONCE(dev, !is_cxl_dax_region(dev),
2513 "not a cxl_dax_region device\n"))
2515 return container_of(dev, struct cxl_dax_region, dev);
2517 EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL);
2519 static struct lock_class_key cxl_dax_region_key;
2521 static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
2523 struct cxl_region_params *p = &cxlr->params;
2524 struct cxl_dax_region *cxlr_dax;
2527 down_read(&cxl_region_rwsem);
2528 if (p->state != CXL_CONFIG_COMMIT) {
2529 cxlr_dax = ERR_PTR(-ENXIO);
2533 cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL);
2535 cxlr_dax = ERR_PTR(-ENOMEM);
2539 cxlr_dax->hpa_range.start = p->res->start;
2540 cxlr_dax->hpa_range.end = p->res->end;
2542 dev = &cxlr_dax->dev;
2543 cxlr_dax->cxlr = cxlr;
2544 device_initialize(dev);
2545 lockdep_set_class(&dev->mutex, &cxl_dax_region_key);
2546 device_set_pm_not_required(dev);
2547 dev->parent = &cxlr->dev;
2548 dev->bus = &cxl_bus_type;
2549 dev->type = &cxl_dax_region_type;
2551 up_read(&cxl_region_rwsem);
2556 static void cxlr_pmem_unregister(void *_cxlr_pmem)
2558 struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
2559 struct cxl_region *cxlr = cxlr_pmem->cxlr;
2560 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
2563 * Either the bridge is in ->remove() context under the device_lock(),
2564 * or cxlr_release_nvdimm() is cancelling the bridge's release action
2565 * for @cxlr_pmem and doing it itself (while manually holding the bridge
2568 device_lock_assert(&cxl_nvb->dev);
2569 cxlr->cxlr_pmem = NULL;
2570 cxlr_pmem->cxlr = NULL;
2571 device_unregister(&cxlr_pmem->dev);
2574 static void cxlr_release_nvdimm(void *_cxlr)
2576 struct cxl_region *cxlr = _cxlr;
2577 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
2579 device_lock(&cxl_nvb->dev);
2580 if (cxlr->cxlr_pmem)
2581 devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
2583 device_unlock(&cxl_nvb->dev);
2584 cxlr->cxl_nvb = NULL;
2585 put_device(&cxl_nvb->dev);
2589 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
2590 * @cxlr: parent CXL region for this pmem region bridge device
2592 * Return: 0 on success negative error code on failure.
2594 static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
2596 struct cxl_pmem_region *cxlr_pmem;
2597 struct cxl_nvdimm_bridge *cxl_nvb;
2601 cxlr_pmem = cxl_pmem_region_alloc(cxlr);
2602 if (IS_ERR(cxlr_pmem))
2603 return PTR_ERR(cxlr_pmem);
2604 cxl_nvb = cxlr->cxl_nvb;
2606 dev = &cxlr_pmem->dev;
2607 rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
2611 rc = device_add(dev);
2615 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
2618 device_lock(&cxl_nvb->dev);
2619 if (cxl_nvb->dev.driver)
2620 rc = devm_add_action_or_reset(&cxl_nvb->dev,
2621 cxlr_pmem_unregister, cxlr_pmem);
2624 device_unlock(&cxl_nvb->dev);
2629 /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
2630 return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
2635 put_device(&cxl_nvb->dev);
2636 cxlr->cxl_nvb = NULL;
2640 static void cxlr_dax_unregister(void *_cxlr_dax)
2642 struct cxl_dax_region *cxlr_dax = _cxlr_dax;
2644 device_unregister(&cxlr_dax->dev);
2647 static int devm_cxl_add_dax_region(struct cxl_region *cxlr)
2649 struct cxl_dax_region *cxlr_dax;
2653 cxlr_dax = cxl_dax_region_alloc(cxlr);
2654 if (IS_ERR(cxlr_dax))
2655 return PTR_ERR(cxlr_dax);
2657 dev = &cxlr_dax->dev;
2658 rc = dev_set_name(dev, "dax_region%d", cxlr->id);
2662 rc = device_add(dev);
2666 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
2669 return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister,
2676 static int match_decoder_by_range(struct device *dev, void *data)
2678 struct range *r1, *r2 = data;
2679 struct cxl_root_decoder *cxlrd;
2681 if (!is_root_decoder(dev))
2684 cxlrd = to_cxl_root_decoder(dev);
2685 r1 = &cxlrd->cxlsd.cxld.hpa_range;
2686 return range_contains(r1, r2);
2689 static int match_region_by_range(struct device *dev, void *data)
2691 struct cxl_region_params *p;
2692 struct cxl_region *cxlr;
2693 struct range *r = data;
2696 if (!is_cxl_region(dev))
2699 cxlr = to_cxl_region(dev);
2702 down_read(&cxl_region_rwsem);
2703 if (p->res && p->res->start == r->start && p->res->end == r->end)
2705 up_read(&cxl_region_rwsem);
2710 /* Establish an empty region covering the given HPA range */
2711 static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
2712 struct cxl_endpoint_decoder *cxled)
2714 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2715 struct cxl_port *port = cxlrd_to_port(cxlrd);
2716 struct range *hpa = &cxled->cxld.hpa_range;
2717 struct cxl_region_params *p;
2718 struct cxl_region *cxlr;
2719 struct resource *res;
2723 cxlr = __create_region(cxlrd, cxled->mode,
2724 atomic_read(&cxlrd->region_id));
2725 } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
2728 dev_err(cxlmd->dev.parent,
2729 "%s:%s: %s failed assign region: %ld\n",
2730 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2731 __func__, PTR_ERR(cxlr));
2735 down_write(&cxl_region_rwsem);
2737 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
2738 dev_err(cxlmd->dev.parent,
2739 "%s:%s: %s autodiscovery interrupted\n",
2740 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2746 set_bit(CXL_REGION_F_AUTO, &cxlr->flags);
2748 res = kmalloc(sizeof(*res), GFP_KERNEL);
2754 *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
2755 dev_name(&cxlr->dev));
2756 rc = insert_resource(cxlrd->res, res);
2759 * Platform-firmware may not have split resources like "System
2760 * RAM" on CXL window boundaries see cxl_region_iomem_release()
2762 dev_warn(cxlmd->dev.parent,
2763 "%s:%s: %s %s cannot insert resource\n",
2764 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2765 __func__, dev_name(&cxlr->dev));
2769 p->interleave_ways = cxled->cxld.interleave_ways;
2770 p->interleave_granularity = cxled->cxld.interleave_granularity;
2771 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
2773 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
2777 dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n",
2778 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__,
2779 dev_name(&cxlr->dev), p->res, p->interleave_ways,
2780 p->interleave_granularity);
2782 /* ...to match put_device() in cxl_add_to_region() */
2783 get_device(&cxlr->dev);
2784 up_write(&cxl_region_rwsem);
2789 up_write(&cxl_region_rwsem);
2790 devm_release_action(port->uport, unregister_region, cxlr);
2794 int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
2796 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2797 struct range *hpa = &cxled->cxld.hpa_range;
2798 struct cxl_decoder *cxld = &cxled->cxld;
2799 struct device *cxlrd_dev, *region_dev;
2800 struct cxl_root_decoder *cxlrd;
2801 struct cxl_region_params *p;
2802 struct cxl_region *cxlr;
2803 bool attach = false;
2806 cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
2807 match_decoder_by_range);
2809 dev_err(cxlmd->dev.parent,
2810 "%s:%s no CXL window for range %#llx:%#llx\n",
2811 dev_name(&cxlmd->dev), dev_name(&cxld->dev),
2812 cxld->hpa_range.start, cxld->hpa_range.end);
2816 cxlrd = to_cxl_root_decoder(cxlrd_dev);
2819 * Ensure that if multiple threads race to construct_region() for @hpa
2820 * one does the construction and the others add to that.
2822 mutex_lock(&cxlrd->range_lock);
2823 region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
2824 match_region_by_range);
2826 cxlr = construct_region(cxlrd, cxled);
2827 region_dev = &cxlr->dev;
2829 cxlr = to_cxl_region(region_dev);
2830 mutex_unlock(&cxlrd->range_lock);
2832 rc = PTR_ERR_OR_ZERO(cxlr);
2836 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
2838 down_read(&cxl_region_rwsem);
2840 attach = p->state == CXL_CONFIG_COMMIT;
2841 up_read(&cxl_region_rwsem);
2845 * If device_attach() fails the range may still be active via
2846 * the platform-firmware memory map, otherwise the driver for
2847 * regions is local to this file, so driver matching can't fail.
2849 if (device_attach(&cxlr->dev) < 0)
2850 dev_err(&cxlr->dev, "failed to enable, range: %pr\n",
2854 put_device(region_dev);
2856 put_device(cxlrd_dev);
2859 EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
2861 static int is_system_ram(struct resource *res, void *arg)
2863 struct cxl_region *cxlr = arg;
2864 struct cxl_region_params *p = &cxlr->params;
2866 dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res);
2870 static int cxl_region_probe(struct device *dev)
2872 struct cxl_region *cxlr = to_cxl_region(dev);
2873 struct cxl_region_params *p = &cxlr->params;
2876 rc = down_read_interruptible(&cxl_region_rwsem);
2878 dev_dbg(&cxlr->dev, "probe interrupted\n");
2882 if (p->state < CXL_CONFIG_COMMIT) {
2883 dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
2888 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
2890 "failed to activate, re-commit region and retry\n");
2896 * From this point on any path that changes the region's state away from
2897 * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
2900 up_read(&cxl_region_rwsem);
2905 switch (cxlr->mode) {
2906 case CXL_DECODER_PMEM:
2907 return devm_cxl_add_pmem_region(cxlr);
2908 case CXL_DECODER_RAM:
2910 * The region can not be manged by CXL if any portion of
2911 * it is already online as 'System RAM'
2913 if (walk_iomem_res_desc(IORES_DESC_NONE,
2914 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
2915 p->res->start, p->res->end, cxlr,
2918 return devm_cxl_add_dax_region(cxlr);
2920 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
2926 static struct cxl_driver cxl_region_driver = {
2927 .name = "cxl_region",
2928 .probe = cxl_region_probe,
2929 .id = CXL_DEVICE_REGION,
2932 int cxl_region_init(void)
2934 return cxl_driver_register(&cxl_region_driver);
2937 void cxl_region_exit(void)
2939 cxl_driver_unregister(&cxl_region_driver);
2942 MODULE_IMPORT_NS(CXL);
2943 MODULE_IMPORT_NS(DEVMEM);
2944 MODULE_ALIAS_CXL(CXL_DEVICE_REGION);