1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 2023 Addiva Elektronik
4 * Author: Tobias Waldekranz <tobias@waldekranz.com>
14 #include <dm/device-internal.h>
21 * struct blkmap_slice - Region mapped to a blkmap
23 * Common data for a region mapped to a blkmap, specialized by each
26 * @node: List node used to associate this slice with a blkmap
27 * @blknr: Start block number of the mapping
28 * @blkcnt: Number of blocks covered by this mapping
31 struct list_head node;
37 * @read: - Read from slice
39 * @read.bm: Blkmap to which this slice belongs
40 * @read.bms: This slice
41 * @read.blknr: Start block number to read from
42 * @read.blkcnt: Number of blocks to read
43 * @read.buffer: Buffer to store read data to
45 ulong (*read)(struct blkmap *bm, struct blkmap_slice *bms,
46 lbaint_t blknr, lbaint_t blkcnt, void *buffer);
49 * @write: - Write to slice
51 * @write.bm: Blkmap to which this slice belongs
52 * @write.bms: This slice
53 * @write.blknr: Start block number to write to
54 * @write.blkcnt: Number of blocks to write
55 * @write.buffer: Data to be written
57 ulong (*write)(struct blkmap *bm, struct blkmap_slice *bms,
58 lbaint_t blknr, lbaint_t blkcnt, const void *buffer);
61 * @destroy: - Tear down slice
63 * @read.bm: Blkmap to which this slice belongs
64 * @read.bms: This slice
66 void (*destroy)(struct blkmap *bm, struct blkmap_slice *bms);
70 * struct blkmap - Block map
72 * Data associated with a blkmap.
74 * @label: Human readable name of this blkmap
75 * @blk: Underlying block device
76 * @slices: List of slices associated with this blkmap
81 struct list_head slices;
84 static bool blkmap_slice_contains(struct blkmap_slice *bms, lbaint_t blknr)
86 return (blknr >= bms->blknr) && (blknr < (bms->blknr + bms->blkcnt));
89 static bool blkmap_slice_available(struct blkmap *bm, struct blkmap_slice *new)
91 struct blkmap_slice *bms;
95 last = new->blknr + new->blkcnt - 1;
97 list_for_each_entry(bms, &bm->slices, node) {
98 if (blkmap_slice_contains(bms, first) ||
99 blkmap_slice_contains(bms, last) ||
100 blkmap_slice_contains(new, bms->blknr) ||
101 blkmap_slice_contains(new, bms->blknr + bms->blkcnt - 1))
108 static int blkmap_slice_add(struct blkmap *bm, struct blkmap_slice *new)
110 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
111 struct list_head *insert = &bm->slices;
112 struct blkmap_slice *bms;
114 if (!blkmap_slice_available(bm, new))
117 list_for_each_entry(bms, &bm->slices, node) {
118 if (bms->blknr < new->blknr)
125 list_add_tail(&new->node, insert);
127 /* Disk might have grown, update the size */
128 bms = list_last_entry(&bm->slices, struct blkmap_slice, node);
129 bd->lba = bms->blknr + bms->blkcnt;
134 * struct blkmap_linear - Linear mapping to other block device
136 * @slice: Common map data
137 * @blk: Target block device of this mapping
138 * @blknr: Start block number of the target device
140 struct blkmap_linear {
141 struct blkmap_slice slice;
147 static ulong blkmap_linear_read(struct blkmap *bm, struct blkmap_slice *bms,
148 lbaint_t blknr, lbaint_t blkcnt, void *buffer)
150 struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
152 return blk_read(bml->blk, bml->blknr + blknr, blkcnt, buffer);
155 static ulong blkmap_linear_write(struct blkmap *bm, struct blkmap_slice *bms,
156 lbaint_t blknr, lbaint_t blkcnt,
159 struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
161 return blk_write(bml->blk, bml->blknr + blknr, blkcnt, buffer);
164 int blkmap_map_linear(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
165 struct udevice *lblk, lbaint_t lblknr)
167 struct blkmap *bm = dev_get_plat(dev);
168 struct blkmap_linear *linear;
169 struct blk_desc *bd, *lbd;
172 bd = dev_get_uclass_plat(bm->blk);
173 lbd = dev_get_uclass_plat(lblk);
174 if (lbd->blksz != bd->blksz)
175 /* We could support block size translation, but we
180 linear = malloc(sizeof(*linear));
184 *linear = (struct blkmap_linear) {
189 .read = blkmap_linear_read,
190 .write = blkmap_linear_write,
197 err = blkmap_slice_add(bm, &linear->slice);
205 * struct blkmap_mem - Memory mapping
207 * @slice: Common map data
208 * @addr: Target memory region of this mapping
209 * @remapped: True if @addr is backed by a physical to virtual memory
210 * mapping that must be torn down at the end of this mapping's
214 struct blkmap_slice slice;
219 static ulong blkmap_mem_read(struct blkmap *bm, struct blkmap_slice *bms,
220 lbaint_t blknr, lbaint_t blkcnt, void *buffer)
222 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
223 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
226 src = bmm->addr + (blknr << bd->log2blksz);
227 memcpy(buffer, src, blkcnt << bd->log2blksz);
231 static ulong blkmap_mem_write(struct blkmap *bm, struct blkmap_slice *bms,
232 lbaint_t blknr, lbaint_t blkcnt,
235 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
236 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
239 dst = bmm->addr + (blknr << bd->log2blksz);
240 memcpy(dst, buffer, blkcnt << bd->log2blksz);
244 static void blkmap_mem_destroy(struct blkmap *bm, struct blkmap_slice *bms)
246 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
249 unmap_sysmem(bmm->addr);
252 int __blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
253 void *addr, bool remapped)
255 struct blkmap *bm = dev_get_plat(dev);
256 struct blkmap_mem *bmm;
259 bmm = malloc(sizeof(*bmm));
263 *bmm = (struct blkmap_mem) {
268 .read = blkmap_mem_read,
269 .write = blkmap_mem_write,
270 .destroy = blkmap_mem_destroy,
274 .remapped = remapped,
277 err = blkmap_slice_add(bm, &bmm->slice);
284 int blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
287 return __blkmap_map_mem(dev, blknr, blkcnt, addr, false);
290 int blkmap_map_pmem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
293 struct blkmap *bm = dev_get_plat(dev);
294 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
298 addr = map_sysmem(paddr, blkcnt << bd->log2blksz);
302 err = __blkmap_map_mem(dev, blknr, blkcnt, addr, true);
309 static ulong blkmap_blk_read_slice(struct blkmap *bm, struct blkmap_slice *bms,
310 lbaint_t blknr, lbaint_t blkcnt,
315 nr = blknr - bms->blknr;
316 cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
317 return bms->read(bm, bms, nr, cnt, buffer);
320 static ulong blkmap_blk_read(struct udevice *dev, lbaint_t blknr,
321 lbaint_t blkcnt, void *buffer)
323 struct blk_desc *bd = dev_get_uclass_plat(dev);
324 struct blkmap *bm = dev_get_plat(dev->parent);
325 struct blkmap_slice *bms;
326 lbaint_t cnt, total = 0;
328 list_for_each_entry(bms, &bm->slices, node) {
329 if (!blkmap_slice_contains(bms, blknr))
332 cnt = blkmap_blk_read_slice(bm, bms, blknr, blkcnt, buffer);
335 buffer += cnt << bd->log2blksz;
342 static ulong blkmap_blk_write_slice(struct blkmap *bm, struct blkmap_slice *bms,
343 lbaint_t blknr, lbaint_t blkcnt,
348 nr = blknr - bms->blknr;
349 cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
350 return bms->write(bm, bms, nr, cnt, buffer);
353 static ulong blkmap_blk_write(struct udevice *dev, lbaint_t blknr,
354 lbaint_t blkcnt, const void *buffer)
356 struct blk_desc *bd = dev_get_uclass_plat(dev);
357 struct blkmap *bm = dev_get_plat(dev->parent);
358 struct blkmap_slice *bms;
359 lbaint_t cnt, total = 0;
361 list_for_each_entry(bms, &bm->slices, node) {
362 if (!blkmap_slice_contains(bms, blknr))
365 cnt = blkmap_blk_write_slice(bm, bms, blknr, blkcnt, buffer);
368 buffer += cnt << bd->log2blksz;
375 static const struct blk_ops blkmap_blk_ops = {
376 .read = blkmap_blk_read,
377 .write = blkmap_blk_write,
380 U_BOOT_DRIVER(blkmap_blk) = {
381 .name = "blkmap_blk",
383 .ops = &blkmap_blk_ops,
386 int blkmap_dev_bind(struct udevice *dev)
388 struct blkmap *bm = dev_get_plat(dev);
392 err = blk_create_devicef(dev, "blkmap_blk", "blk", UCLASS_BLKMAP,
393 dev_seq(dev), 512, 0, &bm->blk);
395 return log_msg_ret("blk", err);
397 INIT_LIST_HEAD(&bm->slices);
399 bd = dev_get_uclass_plat(bm->blk);
400 snprintf(bd->vendor, BLK_VEN_SIZE, "U-Boot");
401 snprintf(bd->product, BLK_PRD_SIZE, "blkmap");
402 snprintf(bd->revision, BLK_REV_SIZE, "1.0");
404 /* EFI core isn't keen on zero-sized disks, so we lie. This is
405 * updated with the correct size once the user adds a
413 int blkmap_dev_unbind(struct udevice *dev)
415 struct blkmap *bm = dev_get_plat(dev);
416 struct blkmap_slice *bms, *tmp;
419 list_for_each_entry_safe(bms, tmp, &bm->slices, node) {
420 list_del(&bms->node);
424 err = device_remove(bm->blk, DM_REMOVE_NORMAL);
428 return device_unbind(bm->blk);
431 U_BOOT_DRIVER(blkmap_root) = {
432 .name = "blkmap_dev",
434 .bind = blkmap_dev_bind,
435 .unbind = blkmap_dev_unbind,
436 .plat_auto = sizeof(struct blkmap),
439 struct udevice *blkmap_from_label(const char *label)
445 uclass_id_foreach_dev(UCLASS_BLKMAP, dev, uc) {
446 bm = dev_get_plat(dev);
447 if (bm->label && !strcmp(label, bm->label))
454 int blkmap_create(const char *label, struct udevice **devp)
456 char *hname, *hlabel;
462 dev = blkmap_from_label(label);
468 hlabel = strdup(label);
474 namelen = strlen("blkmap-") + strlen(label) + 1;
475 hname = malloc(namelen);
478 goto err_free_hlabel;
481 strlcpy(hname, "blkmap-", namelen);
482 strlcat(hname, label, namelen);
484 err = device_bind_driver(dm_root(), "blkmap_dev", hname, &dev);
488 device_set_name_alloced(dev);
489 bm = dev_get_plat(dev);
505 int blkmap_destroy(struct udevice *dev)
509 err = device_remove(dev, DM_REMOVE_NORMAL);
513 return device_unbind(dev);
516 UCLASS_DRIVER(blkmap) = {