2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/miscdevice.h>
13 #include <linux/sched/mm.h>
14 #include <linux/init.h>
15 #include <linux/wait.h>
16 #include <linux/slab.h>
17 #include <linux/rbtree.h>
18 #include <linux/dm-ioctl.h>
19 #include <linux/hdreg.h>
20 #include <linux/compat.h>
21 #include <linux/nospec.h>
23 #include <linux/uaccess.h>
24 #include <linux/ima.h>
26 #define DM_MSG_PREFIX "ioctl"
27 #define DM_DRIVER_EMAIL "dm-devel@redhat.com"
31 * poll will wait until the global event number is greater than
34 volatile unsigned global_event_nr;
37 /*-----------------------------------------------------------------
38 * The ioctl interface needs to be able to look up devices by
40 *---------------------------------------------------------------*/
42 struct rb_node name_node;
43 struct rb_node uuid_node;
49 struct mapped_device *md;
50 struct dm_table *new_map;
55 struct dm_target_versions *vers, *old_vers;
61 static struct rb_root name_rb_tree = RB_ROOT;
62 static struct rb_root uuid_rb_tree = RB_ROOT;
64 static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred);
67 * Guards access to both hash tables.
69 static DECLARE_RWSEM(_hash_lock);
72 * Protects use of mdptr to obtain hash cell name and uuid from mapped device.
74 static DEFINE_MUTEX(dm_hash_cells_mutex);
76 static void dm_hash_exit(void)
78 dm_hash_remove_all(false, false, false);
81 /*-----------------------------------------------------------------
82 * Code for looking up a device by name
83 *---------------------------------------------------------------*/
84 static struct hash_cell *__get_name_cell(const char *str)
86 struct rb_node *n = name_rb_tree.rb_node;
89 struct hash_cell *hc = container_of(n, struct hash_cell, name_node);
90 int c = strcmp(hc->name, str);
95 n = c >= 0 ? n->rb_left : n->rb_right;
101 static struct hash_cell *__get_uuid_cell(const char *str)
103 struct rb_node *n = uuid_rb_tree.rb_node;
106 struct hash_cell *hc = container_of(n, struct hash_cell, uuid_node);
107 int c = strcmp(hc->uuid, str);
112 n = c >= 0 ? n->rb_left : n->rb_right;
118 static void __unlink_name(struct hash_cell *hc)
121 hc->name_set = false;
122 rb_erase(&hc->name_node, &name_rb_tree);
126 static void __unlink_uuid(struct hash_cell *hc)
129 hc->uuid_set = false;
130 rb_erase(&hc->uuid_node, &uuid_rb_tree);
134 static void __link_name(struct hash_cell *new_hc)
136 struct rb_node **n, *parent;
138 __unlink_name(new_hc);
140 new_hc->name_set = true;
142 n = &name_rb_tree.rb_node;
146 struct hash_cell *hc = container_of(*n, struct hash_cell, name_node);
147 int c = strcmp(hc->name, new_hc->name);
150 n = c >= 0 ? &hc->name_node.rb_left : &hc->name_node.rb_right;
153 rb_link_node(&new_hc->name_node, parent, n);
154 rb_insert_color(&new_hc->name_node, &name_rb_tree);
157 static void __link_uuid(struct hash_cell *new_hc)
159 struct rb_node **n, *parent;
161 __unlink_uuid(new_hc);
163 new_hc->uuid_set = true;
165 n = &uuid_rb_tree.rb_node;
169 struct hash_cell *hc = container_of(*n, struct hash_cell, uuid_node);
170 int c = strcmp(hc->uuid, new_hc->uuid);
173 n = c > 0 ? &hc->uuid_node.rb_left : &hc->uuid_node.rb_right;
176 rb_link_node(&new_hc->uuid_node, parent, n);
177 rb_insert_color(&new_hc->uuid_node, &uuid_rb_tree);
180 static struct hash_cell *__get_dev_cell(uint64_t dev)
182 struct mapped_device *md;
183 struct hash_cell *hc;
185 md = dm_get_md(huge_decode_dev(dev));
189 hc = dm_get_mdptr(md);
198 /*-----------------------------------------------------------------
199 * Inserting, removing and renaming a device.
200 *---------------------------------------------------------------*/
201 static struct hash_cell *alloc_cell(const char *name, const char *uuid,
202 struct mapped_device *md)
204 struct hash_cell *hc;
206 hc = kmalloc(sizeof(*hc), GFP_KERNEL);
210 hc->name = kstrdup(name, GFP_KERNEL);
220 hc->uuid = kstrdup(uuid, GFP_KERNEL);
228 hc->name_set = hc->uuid_set = false;
234 static void free_cell(struct hash_cell *hc)
244 * The kdev_t and uuid of a device can never change once it is
245 * initially inserted.
247 static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md)
249 struct hash_cell *cell, *hc;
252 * Allocate the new cells.
254 cell = alloc_cell(name, uuid, md);
259 * Insert the cell into both hash tables.
261 down_write(&_hash_lock);
262 hc = __get_name_cell(name);
271 hc = __get_uuid_cell(uuid);
280 mutex_lock(&dm_hash_cells_mutex);
281 dm_set_mdptr(md, cell);
282 mutex_unlock(&dm_hash_cells_mutex);
283 up_write(&_hash_lock);
288 up_write(&_hash_lock);
293 static struct dm_table *__hash_remove(struct hash_cell *hc)
295 struct dm_table *table;
298 /* remove from the dev trees */
301 mutex_lock(&dm_hash_cells_mutex);
302 dm_set_mdptr(hc->md, NULL);
303 mutex_unlock(&dm_hash_cells_mutex);
305 table = dm_get_live_table(hc->md, &srcu_idx);
307 dm_table_event(table);
308 dm_put_live_table(hc->md, srcu_idx);
319 static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred)
323 struct hash_cell *hc;
324 struct mapped_device *md;
330 down_write(&_hash_lock);
332 for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
333 hc = container_of(n, struct hash_cell, name_node);
337 if (keep_open_devices &&
338 dm_lock_for_deletion(md, mark_deferred, only_deferred)) {
344 t = __hash_remove(hc);
346 up_write(&_hash_lock);
352 dm_ima_measure_on_device_remove(md, true);
354 if (likely(keep_open_devices))
357 dm_destroy_immediate(md);
360 * Some mapped devices may be using other mapped
361 * devices, so repeat until we make no further
362 * progress. If a new mapped device is created
363 * here it will also get removed.
368 up_write(&_hash_lock);
371 DMWARN("remove_all left %d open device(s)", dev_skipped);
375 * Set the uuid of a hash_cell that isn't already set.
377 static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid)
379 mutex_lock(&dm_hash_cells_mutex);
381 mutex_unlock(&dm_hash_cells_mutex);
387 * Changes the name of a hash_cell and returns the old name for
388 * the caller to free.
390 static char *__change_cell_name(struct hash_cell *hc, char *new_name)
395 * Rename and move the name cell.
400 mutex_lock(&dm_hash_cells_mutex);
402 mutex_unlock(&dm_hash_cells_mutex);
409 static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
412 char *new_data, *old_name = NULL;
413 struct hash_cell *hc;
414 struct dm_table *table;
415 struct mapped_device *md;
416 unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
422 new_data = kstrdup(new, GFP_KERNEL);
424 return ERR_PTR(-ENOMEM);
426 down_write(&_hash_lock);
432 hc = __get_uuid_cell(new);
434 hc = __get_name_cell(new);
437 DMWARN("Unable to change %s on mapped device %s to one that "
438 "already exists: %s",
439 change_uuid ? "uuid" : "name",
442 up_write(&_hash_lock);
444 return ERR_PTR(-EBUSY);
448 * Is there such a device as 'old' ?
450 hc = __get_name_cell(param->name);
452 DMWARN("Unable to rename non-existent device, %s to %s%s",
453 param->name, change_uuid ? "uuid " : "", new);
454 up_write(&_hash_lock);
456 return ERR_PTR(-ENXIO);
460 * Does this device already have a uuid?
462 if (change_uuid && hc->uuid) {
463 DMWARN("Unable to change uuid of mapped device %s to %s "
464 "because uuid is already set to %s",
465 param->name, new, hc->uuid);
467 up_write(&_hash_lock);
469 return ERR_PTR(-EINVAL);
473 __set_cell_uuid(hc, new_data);
475 old_name = __change_cell_name(hc, new_data);
478 * Wake up any dm event waiters.
480 table = dm_get_live_table(hc->md, &srcu_idx);
482 dm_table_event(table);
483 dm_put_live_table(hc->md, srcu_idx);
485 if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr))
486 param->flags |= DM_UEVENT_GENERATED_FLAG;
490 dm_ima_measure_on_device_rename(md);
492 up_write(&_hash_lock);
498 void dm_deferred_remove(void)
500 dm_hash_remove_all(true, false, true);
503 /*-----------------------------------------------------------------
504 * Implementation of the ioctl commands
505 *---------------------------------------------------------------*/
507 * All the ioctl commands get dispatched to functions with this
510 typedef int (*ioctl_fn)(struct file *filp, struct dm_ioctl *param, size_t param_size);
512 static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_size)
514 dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false);
515 param->data_size = 0;
520 * Round up the ptr to an 8-byte boundary.
523 static inline size_t align_val(size_t val)
525 return (val + ALIGN_MASK) & ~ALIGN_MASK;
527 static inline void *align_ptr(void *ptr)
529 return (void *)align_val((size_t)ptr);
533 * Retrieves the data payload buffer from an already allocated
536 static void *get_result_buffer(struct dm_ioctl *param, size_t param_size,
539 param->data_start = align_ptr(param + 1) - (void *) param;
541 if (param->data_start < param_size)
542 *len = param_size - param->data_start;
546 return ((void *) param) + param->data_start;
549 static bool filter_device(struct hash_cell *hc, const char *pfx_name, const char *pfx_uuid)
552 size_t val_len, pfx_len;
555 val_len = strlen(val);
556 pfx_len = strnlen(pfx_name, DM_NAME_LEN);
557 if (pfx_len > val_len)
559 if (memcmp(val, pfx_name, pfx_len))
562 val = hc->uuid ? hc->uuid : "";
563 val_len = strlen(val);
564 pfx_len = strnlen(pfx_uuid, DM_UUID_LEN);
565 if (pfx_len > val_len)
567 if (memcmp(val, pfx_uuid, pfx_len))
573 static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_size)
576 struct hash_cell *hc;
577 size_t len, needed = 0;
578 struct gendisk *disk;
579 struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
582 down_write(&_hash_lock);
585 * Loop through all the devices working out how much
588 for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
589 hc = container_of(n, struct hash_cell, name_node);
590 if (!filter_device(hc, param->name, param->uuid))
592 needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
593 needed += align_val(sizeof(uint32_t) * 2);
594 if (param->flags & DM_UUID_FLAG && hc->uuid)
595 needed += align_val(strlen(hc->uuid) + 1);
599 * Grab our output buffer.
601 nl = orig_nl = get_result_buffer(param, param_size, &len);
602 if (len < needed || len < sizeof(nl->dev)) {
603 param->flags |= DM_BUFFER_FULL_FLAG;
606 param->data_size = param->data_start + needed;
608 nl->dev = 0; /* Flags no data */
611 * Now loop through filling out the names.
613 for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
615 hc = container_of(n, struct hash_cell, name_node);
616 if (!filter_device(hc, param->name, param->uuid))
619 old_nl->next = (uint32_t) ((void *) nl -
621 disk = dm_disk(hc->md);
622 nl->dev = huge_encode_dev(disk_devt(disk));
624 strcpy(nl->name, hc->name);
627 event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
628 event_nr[0] = dm_get_event_nr(hc->md);
630 uuid_ptr = align_ptr(event_nr + 2);
631 if (param->flags & DM_UUID_FLAG) {
633 event_nr[1] |= DM_NAME_LIST_FLAG_HAS_UUID;
634 strcpy(uuid_ptr, hc->uuid);
635 uuid_ptr = align_ptr(uuid_ptr + strlen(hc->uuid) + 1);
637 event_nr[1] |= DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID;
643 * If mismatch happens, security may be compromised due to buffer
644 * overflow, so it's better to crash.
646 BUG_ON((char *)nl - (char *)orig_nl != needed);
649 up_write(&_hash_lock);
653 static void list_version_get_needed(struct target_type *tt, void *needed_param)
655 size_t *needed = needed_param;
657 *needed += sizeof(struct dm_target_versions);
658 *needed += strlen(tt->name);
659 *needed += ALIGN_MASK;
662 static void list_version_get_info(struct target_type *tt, void *param)
664 struct vers_iter *info = param;
666 /* Check space - it might have changed since the first iteration */
667 if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 >
670 info->flags = DM_BUFFER_FULL_FLAG;
675 info->old_vers->next = (uint32_t) ((void *)info->vers -
676 (void *)info->old_vers);
677 info->vers->version[0] = tt->version[0];
678 info->vers->version[1] = tt->version[1];
679 info->vers->version[2] = tt->version[2];
680 info->vers->next = 0;
681 strcpy(info->vers->name, tt->name);
683 info->old_vers = info->vers;
684 info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1);
687 static int __list_versions(struct dm_ioctl *param, size_t param_size, const char *name)
689 size_t len, needed = 0;
690 struct dm_target_versions *vers;
691 struct vers_iter iter_info;
692 struct target_type *tt = NULL;
695 tt = dm_get_target_type(name);
701 * Loop through all the devices working out how much
705 dm_target_iterate(list_version_get_needed, &needed);
707 list_version_get_needed(tt, &needed);
710 * Grab our output buffer.
712 vers = get_result_buffer(param, param_size, &len);
714 param->flags |= DM_BUFFER_FULL_FLAG;
717 param->data_size = param->data_start + needed;
719 iter_info.param_size = param_size;
720 iter_info.old_vers = NULL;
721 iter_info.vers = vers;
723 iter_info.end = (char *)vers+len;
726 * Now loop through filling out the names & versions.
729 dm_target_iterate(list_version_get_info, &iter_info);
731 list_version_get_info(tt, &iter_info);
732 param->flags |= iter_info.flags;
736 dm_put_target_type(tt);
740 static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param_size)
742 return __list_versions(param, param_size, NULL);
745 static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t param_size)
747 return __list_versions(param, param_size, param->name);
750 static int check_name(const char *name)
752 if (strchr(name, '/')) {
753 DMWARN("invalid device name");
761 * On successful return, the caller must not attempt to acquire
762 * _hash_lock without first calling dm_put_live_table, because dm_table_destroy
763 * waits for this dm_put_live_table and could be called under this lock.
765 static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx)
767 struct hash_cell *hc;
768 struct dm_table *table = NULL;
770 /* increment rcu count, we don't care about the table pointer */
771 dm_get_live_table(md, srcu_idx);
773 down_read(&_hash_lock);
774 hc = dm_get_mdptr(md);
775 if (!hc || hc->md != md) {
776 DMWARN("device has been removed from the dev hash table.");
783 up_read(&_hash_lock);
788 static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md,
789 struct dm_ioctl *param,
792 return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ?
793 dm_get_inactive_table(md, srcu_idx) : dm_get_live_table(md, srcu_idx);
797 * Fills in a dm_ioctl structure, ready for sending back to
800 static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
802 struct gendisk *disk = dm_disk(md);
803 struct dm_table *table;
806 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG |
807 DM_ACTIVE_PRESENT_FLAG | DM_INTERNAL_SUSPEND_FLAG);
809 if (dm_suspended_md(md))
810 param->flags |= DM_SUSPEND_FLAG;
812 if (dm_suspended_internally_md(md))
813 param->flags |= DM_INTERNAL_SUSPEND_FLAG;
815 if (dm_test_deferred_remove_flag(md))
816 param->flags |= DM_DEFERRED_REMOVE;
818 param->dev = huge_encode_dev(disk_devt(disk));
821 * Yes, this will be out of date by the time it gets back
822 * to userland, but it is still very useful for
825 param->open_count = dm_open_count(md);
827 param->event_nr = dm_get_event_nr(md);
828 param->target_count = 0;
830 table = dm_get_live_table(md, &srcu_idx);
832 if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) {
833 if (get_disk_ro(disk))
834 param->flags |= DM_READONLY_FLAG;
835 param->target_count = dm_table_get_num_targets(table);
838 param->flags |= DM_ACTIVE_PRESENT_FLAG;
840 dm_put_live_table(md, srcu_idx);
842 if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) {
844 table = dm_get_inactive_table(md, &srcu_idx);
846 if (!(dm_table_get_mode(table) & FMODE_WRITE))
847 param->flags |= DM_READONLY_FLAG;
848 param->target_count = dm_table_get_num_targets(table);
850 dm_put_live_table(md, srcu_idx);
854 static int dev_create(struct file *filp, struct dm_ioctl *param, size_t param_size)
856 int r, m = DM_ANY_MINOR;
857 struct mapped_device *md;
859 r = check_name(param->name);
863 if (param->flags & DM_PERSISTENT_DEV_FLAG)
864 m = MINOR(huge_decode_dev(param->dev));
866 r = dm_create(m, &md);
870 r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
877 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
879 __dev_status(md, param);
887 * Always use UUID for lookups if it's present, otherwise use name or dev.
889 static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
891 struct hash_cell *hc = NULL;
894 if (*param->name || param->dev)
897 hc = __get_uuid_cell(param->uuid);
900 } else if (*param->name) {
904 hc = __get_name_cell(param->name);
907 } else if (param->dev) {
908 hc = __get_dev_cell(param->dev);
915 * Sneakily write in both the name and the uuid
916 * while we have the cell.
918 strlcpy(param->name, hc->name, sizeof(param->name));
920 strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
922 param->uuid[0] = '\0';
925 param->flags |= DM_INACTIVE_PRESENT_FLAG;
927 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
932 static struct mapped_device *find_device(struct dm_ioctl *param)
934 struct hash_cell *hc;
935 struct mapped_device *md = NULL;
937 down_read(&_hash_lock);
938 hc = __find_device_hash_cell(param);
941 up_read(&_hash_lock);
946 static int dev_remove(struct file *filp, struct dm_ioctl *param, size_t param_size)
948 struct hash_cell *hc;
949 struct mapped_device *md;
953 down_write(&_hash_lock);
954 hc = __find_device_hash_cell(param);
957 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
958 up_write(&_hash_lock);
965 * Ensure the device is not open and nothing further can open it.
967 r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false);
969 if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) {
970 up_write(&_hash_lock);
974 DMDEBUG_LIMIT("unable to remove open device %s", hc->name);
975 up_write(&_hash_lock);
980 t = __hash_remove(hc);
981 up_write(&_hash_lock);
988 param->flags &= ~DM_DEFERRED_REMOVE;
990 dm_ima_measure_on_device_remove(md, false);
992 if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
993 param->flags |= DM_UEVENT_GENERATED_FLAG;
1001 * Check a string doesn't overrun the chunk of
1002 * memory we copied from userland.
1004 static int invalid_str(char *str, void *end)
1006 while ((void *) str < end)
1013 static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_size)
1016 char *new_data = (char *) param + param->data_start;
1017 struct mapped_device *md;
1018 unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
1020 if (new_data < param->data ||
1021 invalid_str(new_data, (void *) param + param_size) || !*new_data ||
1022 strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
1023 DMWARN("Invalid new mapped device name or uuid string supplied.");
1028 r = check_name(new_data);
1033 md = dm_hash_rename(param, new_data);
1037 __dev_status(md, param);
1043 static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t param_size)
1046 struct mapped_device *md;
1047 struct hd_geometry geometry;
1048 unsigned long indata[4];
1049 char *geostr = (char *) param + param->data_start;
1052 md = find_device(param);
1056 if (geostr < param->data ||
1057 invalid_str(geostr, (void *) param + param_size)) {
1058 DMWARN("Invalid geometry supplied.");
1062 x = sscanf(geostr, "%lu %lu %lu %lu%c", indata,
1063 indata + 1, indata + 2, indata + 3, &dummy);
1066 DMWARN("Unable to interpret geometry settings.");
1070 if (indata[0] > 65535 || indata[1] > 255 ||
1071 indata[2] > 255 || indata[3] > ULONG_MAX) {
1072 DMWARN("Geometry exceeds range limits.");
1076 geometry.cylinders = indata[0];
1077 geometry.heads = indata[1];
1078 geometry.sectors = indata[2];
1079 geometry.start = indata[3];
1081 r = dm_set_geometry(md, &geometry);
1083 param->data_size = 0;
1090 static int do_suspend(struct dm_ioctl *param)
1093 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
1094 struct mapped_device *md;
1096 md = find_device(param);
1100 if (param->flags & DM_SKIP_LOCKFS_FLAG)
1101 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
1102 if (param->flags & DM_NOFLUSH_FLAG)
1103 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
1105 if (!dm_suspended_md(md)) {
1106 r = dm_suspend(md, suspend_flags);
1111 __dev_status(md, param);
1119 static int do_resume(struct dm_ioctl *param)
1122 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
1123 struct hash_cell *hc;
1124 struct mapped_device *md;
1125 struct dm_table *new_map, *old_map = NULL;
1127 down_write(&_hash_lock);
1129 hc = __find_device_hash_cell(param);
1131 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
1132 up_write(&_hash_lock);
1138 new_map = hc->new_map;
1140 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
1142 up_write(&_hash_lock);
1144 /* Do we need to load a new map ? */
1146 /* Suspend if it isn't already suspended */
1147 if (param->flags & DM_SKIP_LOCKFS_FLAG)
1148 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
1149 if (param->flags & DM_NOFLUSH_FLAG)
1150 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
1151 if (!dm_suspended_md(md))
1152 dm_suspend(md, suspend_flags);
1154 old_map = dm_swap_table(md, new_map);
1155 if (IS_ERR(old_map)) {
1157 dm_table_destroy(new_map);
1159 return PTR_ERR(old_map);
1162 if (dm_table_get_mode(new_map) & FMODE_WRITE)
1163 set_disk_ro(dm_disk(md), 0);
1165 set_disk_ro(dm_disk(md), 1);
1168 if (dm_suspended_md(md)) {
1171 dm_ima_measure_on_device_resume(md, new_map ? true : false);
1173 if (!dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
1174 param->flags |= DM_UEVENT_GENERATED_FLAG;
1179 * Since dm_swap_table synchronizes RCU, nobody should be in
1180 * read-side critical section already.
1183 dm_table_destroy(old_map);
1186 __dev_status(md, param);
1193 * Set or unset the suspension state of a device.
1194 * If the device already is in the requested state we just return its status.
1196 static int dev_suspend(struct file *filp, struct dm_ioctl *param, size_t param_size)
1198 if (param->flags & DM_SUSPEND_FLAG)
1199 return do_suspend(param);
1201 return do_resume(param);
1205 * Copies device info back to user space, used by
1206 * the create and info ioctls.
1208 static int dev_status(struct file *filp, struct dm_ioctl *param, size_t param_size)
1210 struct mapped_device *md;
1212 md = find_device(param);
1216 __dev_status(md, param);
1223 * Build up the status struct for each target
1225 static void retrieve_status(struct dm_table *table,
1226 struct dm_ioctl *param, size_t param_size)
1228 unsigned int i, num_targets;
1229 struct dm_target_spec *spec;
1230 char *outbuf, *outptr;
1232 size_t remaining, len, used = 0;
1233 unsigned status_flags = 0;
1235 outptr = outbuf = get_result_buffer(param, param_size, &len);
1237 if (param->flags & DM_STATUS_TABLE_FLAG)
1238 type = STATUSTYPE_TABLE;
1239 else if (param->flags & DM_IMA_MEASUREMENT_FLAG)
1240 type = STATUSTYPE_IMA;
1242 type = STATUSTYPE_INFO;
1244 /* Get all the target info */
1245 num_targets = dm_table_get_num_targets(table);
1246 for (i = 0; i < num_targets; i++) {
1247 struct dm_target *ti = dm_table_get_target(table, i);
1250 remaining = len - (outptr - outbuf);
1251 if (remaining <= sizeof(struct dm_target_spec)) {
1252 param->flags |= DM_BUFFER_FULL_FLAG;
1256 spec = (struct dm_target_spec *) outptr;
1259 spec->sector_start = ti->begin;
1260 spec->length = ti->len;
1261 strncpy(spec->target_type, ti->type->name,
1262 sizeof(spec->target_type) - 1);
1264 outptr += sizeof(struct dm_target_spec);
1265 remaining = len - (outptr - outbuf);
1266 if (remaining <= 0) {
1267 param->flags |= DM_BUFFER_FULL_FLAG;
1271 /* Get the status/table string from the target driver */
1272 if (ti->type->status) {
1273 if (param->flags & DM_NOFLUSH_FLAG)
1274 status_flags |= DM_STATUS_NOFLUSH_FLAG;
1275 ti->type->status(ti, type, status_flags, outptr, remaining);
1279 l = strlen(outptr) + 1;
1280 if (l == remaining) {
1281 param->flags |= DM_BUFFER_FULL_FLAG;
1286 used = param->data_start + (outptr - outbuf);
1288 outptr = align_ptr(outptr);
1289 spec->next = outptr - outbuf;
1293 param->data_size = used;
1295 param->target_count = num_targets;
1299 * Wait for a device to report an event
1301 static int dev_wait(struct file *filp, struct dm_ioctl *param, size_t param_size)
1304 struct mapped_device *md;
1305 struct dm_table *table;
1308 md = find_device(param);
1313 * Wait for a notification event
1315 if (dm_wait_event(md, param->event_nr)) {
1321 * The userland program is going to want to know what
1322 * changed to trigger the event, so we may as well tell
1323 * him and save an ioctl.
1325 __dev_status(md, param);
1327 table = dm_get_live_or_inactive_table(md, param, &srcu_idx);
1329 retrieve_status(table, param, param_size);
1330 dm_put_live_table(md, srcu_idx);
1339 * Remember the global event number and make it possible to poll
1340 * for further events.
1342 static int dev_arm_poll(struct file *filp, struct dm_ioctl *param, size_t param_size)
1344 struct dm_file *priv = filp->private_data;
1346 priv->global_event_nr = atomic_read(&dm_global_event_nr);
1351 static inline fmode_t get_mode(struct dm_ioctl *param)
1353 fmode_t mode = FMODE_READ | FMODE_WRITE;
1355 if (param->flags & DM_READONLY_FLAG)
1361 static int next_target(struct dm_target_spec *last, uint32_t next, void *end,
1362 struct dm_target_spec **spec, char **target_params)
1364 *spec = (struct dm_target_spec *) ((unsigned char *) last + next);
1365 *target_params = (char *) (*spec + 1);
1367 if (*spec < (last + 1))
1370 return invalid_str(*target_params, end);
1373 static int populate_table(struct dm_table *table,
1374 struct dm_ioctl *param, size_t param_size)
1378 struct dm_target_spec *spec = (struct dm_target_spec *) param;
1379 uint32_t next = param->data_start;
1380 void *end = (void *) param + param_size;
1381 char *target_params;
1383 if (!param->target_count) {
1384 DMWARN("populate_table: no targets specified");
1388 for (i = 0; i < param->target_count; i++) {
1390 r = next_target(spec, next, end, &spec, &target_params);
1392 DMWARN("unable to find target");
1396 r = dm_table_add_target(table, spec->target_type,
1397 (sector_t) spec->sector_start,
1398 (sector_t) spec->length,
1401 DMWARN("error adding target to table");
1408 return dm_table_complete(table);
1411 static bool is_valid_type(enum dm_queue_mode cur, enum dm_queue_mode new)
1414 (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED))
1420 static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_size)
1423 struct hash_cell *hc;
1424 struct dm_table *t, *old_map = NULL;
1425 struct mapped_device *md;
1426 struct target_type *immutable_target_type;
1428 md = find_device(param);
1432 r = dm_table_create(&t, get_mode(param), param->target_count, md);
1436 /* Protect md->type and md->queue against concurrent table loads. */
1437 dm_lock_md_type(md);
1438 r = populate_table(t, param, param_size);
1440 goto err_unlock_md_type;
1442 dm_ima_measure_on_table_load(t, STATUSTYPE_IMA);
1444 immutable_target_type = dm_get_immutable_target_type(md);
1445 if (immutable_target_type &&
1446 (immutable_target_type != dm_table_get_immutable_target_type(t)) &&
1447 !dm_table_get_wildcard_target(t)) {
1448 DMWARN("can't replace immutable target type %s",
1449 immutable_target_type->name);
1451 goto err_unlock_md_type;
1454 if (dm_get_md_type(md) == DM_TYPE_NONE) {
1455 /* setup md->queue to reflect md's type (may block) */
1456 r = dm_setup_md_queue(md, t);
1458 DMWARN("unable to set up device queue for new table.");
1459 goto err_unlock_md_type;
1461 } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) {
1462 DMWARN("can't change device type (old=%u vs new=%u) after initial table load.",
1463 dm_get_md_type(md), dm_table_get_type(t));
1465 goto err_unlock_md_type;
1468 dm_unlock_md_type(md);
1470 /* stage inactive table */
1471 down_write(&_hash_lock);
1472 hc = dm_get_mdptr(md);
1473 if (!hc || hc->md != md) {
1474 DMWARN("device has been removed from the dev hash table.");
1475 up_write(&_hash_lock);
1477 goto err_destroy_table;
1481 old_map = hc->new_map;
1483 up_write(&_hash_lock);
1485 param->flags |= DM_INACTIVE_PRESENT_FLAG;
1486 __dev_status(md, param);
1490 dm_table_destroy(old_map);
1498 dm_unlock_md_type(md);
1500 dm_table_destroy(t);
1507 static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_size)
1509 struct hash_cell *hc;
1510 struct mapped_device *md;
1511 struct dm_table *old_map = NULL;
1512 bool has_new_map = false;
1514 down_write(&_hash_lock);
1516 hc = __find_device_hash_cell(param);
1518 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
1519 up_write(&_hash_lock);
1524 old_map = hc->new_map;
1529 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
1531 __dev_status(hc->md, param);
1533 up_write(&_hash_lock);
1536 dm_table_destroy(old_map);
1538 dm_ima_measure_on_table_clear(md, has_new_map);
1545 * Retrieves a list of devices used by a particular dm device.
1547 static void retrieve_deps(struct dm_table *table,
1548 struct dm_ioctl *param, size_t param_size)
1550 unsigned int count = 0;
1551 struct list_head *tmp;
1553 struct dm_dev_internal *dd;
1554 struct dm_target_deps *deps;
1556 deps = get_result_buffer(param, param_size, &len);
1559 * Count the devices.
1561 list_for_each (tmp, dm_table_get_devices(table))
1565 * Check we have enough space.
1567 needed = struct_size(deps, dev, count);
1569 param->flags |= DM_BUFFER_FULL_FLAG;
1574 * Fill in the devices.
1576 deps->count = count;
1578 list_for_each_entry (dd, dm_table_get_devices(table), list)
1579 deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
1581 param->data_size = param->data_start + needed;
1584 static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size)
1586 struct mapped_device *md;
1587 struct dm_table *table;
1590 md = find_device(param);
1594 __dev_status(md, param);
1596 table = dm_get_live_or_inactive_table(md, param, &srcu_idx);
1598 retrieve_deps(table, param, param_size);
1599 dm_put_live_table(md, srcu_idx);
1607 * Return the status of a device as a text string for each
1610 static int table_status(struct file *filp, struct dm_ioctl *param, size_t param_size)
1612 struct mapped_device *md;
1613 struct dm_table *table;
1616 md = find_device(param);
1620 __dev_status(md, param);
1622 table = dm_get_live_or_inactive_table(md, param, &srcu_idx);
1624 retrieve_status(table, param, param_size);
1625 dm_put_live_table(md, srcu_idx);
1633 * Process device-mapper dependent messages. Messages prefixed with '@'
1634 * are processed by the DM core. All others are delivered to the target.
1635 * Returns a number <= 1 if message was processed by device mapper.
1636 * Returns 2 if message should be delivered to the target.
1638 static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
1639 char *result, unsigned maxlen)
1644 return 2; /* no '@' prefix, deliver to target */
1646 if (!strcasecmp(argv[0], "@cancel_deferred_remove")) {
1648 DMERR("Invalid arguments for @cancel_deferred_remove");
1651 return dm_cancel_deferred_remove(md);
1654 r = dm_stats_message(md, argc, argv, result, maxlen);
1658 DMERR("Unsupported message sent to DM core: %s", argv[0]);
1663 * Pass a message to the target that's at the supplied device offset.
1665 static int target_message(struct file *filp, struct dm_ioctl *param, size_t param_size)
1669 struct mapped_device *md;
1670 struct dm_table *table;
1671 struct dm_target *ti;
1672 struct dm_target_msg *tmsg = (void *) param + param->data_start;
1674 char *result = get_result_buffer(param, param_size, &maxlen);
1677 md = find_device(param);
1681 if (tmsg < (struct dm_target_msg *) param->data ||
1682 invalid_str(tmsg->message, (void *) param + param_size)) {
1683 DMWARN("Invalid target message parameters.");
1688 r = dm_split_args(&argc, &argv, tmsg->message);
1690 DMWARN("Failed to split target message parameters");
1695 DMWARN("Empty message received.");
1700 r = message_for_md(md, argc, argv, result, maxlen);
1704 table = dm_get_live_table(md, &srcu_idx);
1708 if (dm_deleting_md(md)) {
1713 ti = dm_table_find_target(table, tmsg->sector);
1715 DMWARN("Target message sector outside device.");
1717 } else if (ti->type->message)
1718 r = ti->type->message(ti, argc, argv, result, maxlen);
1720 DMWARN("Target type does not support messages");
1725 dm_put_live_table(md, srcu_idx);
1730 __dev_status(md, param);
1733 param->flags |= DM_DATA_OUT_FLAG;
1734 if (dm_message_test_buffer_overflow(result, maxlen))
1735 param->flags |= DM_BUFFER_FULL_FLAG;
1737 param->data_size = param->data_start + strlen(result) + 1;
1746 * The ioctl parameter block consists of two parts, a dm_ioctl struct
1747 * followed by a data buffer. This flag is set if the second part,
1748 * which has a variable size, is not used by the function processing
1751 #define IOCTL_FLAGS_NO_PARAMS 1
1752 #define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
1754 /*-----------------------------------------------------------------
1755 * Implementation of open/close/ioctl on the special char
1757 *---------------------------------------------------------------*/
1758 static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
1760 static const struct {
1765 {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
1766 {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
1767 {DM_LIST_DEVICES_CMD, 0, list_devices},
1769 {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
1770 {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
1771 {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
1772 {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
1773 {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
1774 {DM_DEV_WAIT_CMD, 0, dev_wait},
1776 {DM_TABLE_LOAD_CMD, 0, table_load},
1777 {DM_TABLE_CLEAR_CMD, IOCTL_FLAGS_NO_PARAMS, table_clear},
1778 {DM_TABLE_DEPS_CMD, 0, table_deps},
1779 {DM_TABLE_STATUS_CMD, 0, table_status},
1781 {DM_LIST_VERSIONS_CMD, 0, list_versions},
1783 {DM_TARGET_MSG_CMD, 0, target_message},
1784 {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry},
1785 {DM_DEV_ARM_POLL, IOCTL_FLAGS_NO_PARAMS, dev_arm_poll},
1786 {DM_GET_TARGET_VERSION, 0, get_target_version},
1789 if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
1792 cmd = array_index_nospec(cmd, ARRAY_SIZE(_ioctls));
1793 *ioctl_flags = _ioctls[cmd].flags;
1794 return _ioctls[cmd].fn;
1798 * As well as checking the version compatibility this always
1799 * copies the kernel interface version out.
1801 static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
1803 uint32_t version[3];
1806 if (copy_from_user(version, user->version, sizeof(version)))
1809 if ((DM_VERSION_MAJOR != version[0]) ||
1810 (DM_VERSION_MINOR < version[1])) {
1811 DMWARN("ioctl interface mismatch: "
1812 "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
1813 DM_VERSION_MAJOR, DM_VERSION_MINOR,
1814 DM_VERSION_PATCHLEVEL,
1815 version[0], version[1], version[2], cmd);
1820 * Fill in the kernel version.
1822 version[0] = DM_VERSION_MAJOR;
1823 version[1] = DM_VERSION_MINOR;
1824 version[2] = DM_VERSION_PATCHLEVEL;
1825 if (copy_to_user(user->version, version, sizeof(version)))
1831 #define DM_PARAMS_MALLOC 0x0001 /* Params allocated with kvmalloc() */
1832 #define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */
1834 static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags)
1836 if (param_flags & DM_WIPE_BUFFER)
1837 memset(param, 0, param_size);
1839 if (param_flags & DM_PARAMS_MALLOC)
1843 static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
1844 int ioctl_flags, struct dm_ioctl **param, int *param_flags)
1846 struct dm_ioctl *dmi;
1848 const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
1851 if (copy_from_user(param_kernel, user, minimum_data_size))
1854 if (param_kernel->data_size < minimum_data_size)
1857 secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG;
1859 *param_flags = secure_data ? DM_WIPE_BUFFER : 0;
1861 if (ioctl_flags & IOCTL_FLAGS_NO_PARAMS) {
1863 dmi->data_size = minimum_data_size;
1868 * Use __GFP_HIGH to avoid low memory issues when a device is
1869 * suspended and the ioctl is needed to resume it.
1870 * Use kmalloc() rather than vmalloc() when we can.
1873 noio_flag = memalloc_noio_save();
1874 dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH);
1875 memalloc_noio_restore(noio_flag);
1878 if (secure_data && clear_user(user, param_kernel->data_size))
1883 *param_flags |= DM_PARAMS_MALLOC;
1885 /* Copy from param_kernel (which was already copied from user) */
1886 memcpy(dmi, param_kernel, minimum_data_size);
1888 if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
1889 param_kernel->data_size - minimum_data_size))
1892 /* Wipe the user buffer so we do not return it to userspace */
1893 if (secure_data && clear_user(user, param_kernel->data_size))
1900 free_params(dmi, param_kernel->data_size, *param_flags);
1905 static int validate_params(uint cmd, struct dm_ioctl *param)
1907 /* Always clear this flag */
1908 param->flags &= ~DM_BUFFER_FULL_FLAG;
1909 param->flags &= ~DM_UEVENT_GENERATED_FLAG;
1910 param->flags &= ~DM_SECURE_DATA_FLAG;
1911 param->flags &= ~DM_DATA_OUT_FLAG;
1913 /* Ignores parameters */
1914 if (cmd == DM_REMOVE_ALL_CMD ||
1915 cmd == DM_LIST_DEVICES_CMD ||
1916 cmd == DM_LIST_VERSIONS_CMD)
1919 if (cmd == DM_DEV_CREATE_CMD) {
1920 if (!*param->name) {
1921 DMWARN("name not supplied when creating device");
1924 } else if (*param->uuid && *param->name) {
1925 DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
1929 /* Ensure strings are terminated */
1930 param->name[DM_NAME_LEN - 1] = '\0';
1931 param->uuid[DM_UUID_LEN - 1] = '\0';
1936 static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *user)
1942 struct dm_ioctl *param;
1944 size_t input_param_size;
1945 struct dm_ioctl param_kernel;
1947 /* only root can play with this */
1948 if (!capable(CAP_SYS_ADMIN))
1951 if (_IOC_TYPE(command) != DM_IOCTL)
1954 cmd = _IOC_NR(command);
1957 * Check the interface version passed in. This also
1958 * writes out the kernel's interface version.
1960 r = check_version(cmd, user);
1965 * Nothing more to do for the version command.
1967 if (cmd == DM_VERSION_CMD)
1970 fn = lookup_ioctl(cmd, &ioctl_flags);
1972 DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
1977 * Copy the parameters into kernel space.
1979 r = copy_params(user, ¶m_kernel, ioctl_flags, ¶m, ¶m_flags);
1984 input_param_size = param->data_size;
1985 r = validate_params(cmd, param);
1989 param->data_size = offsetof(struct dm_ioctl, data);
1990 r = fn(file, param, input_param_size);
1992 if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
1993 unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
1994 DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
1996 if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
1997 dm_issue_global_event();
2000 * Copy the results back to userland.
2002 if (!r && copy_to_user(user, param, param->data_size))
2006 free_params(param, input_param_size, param_flags);
2010 static long dm_ctl_ioctl(struct file *file, uint command, ulong u)
2012 return (long)ctl_ioctl(file, command, (struct dm_ioctl __user *)u);
2015 #ifdef CONFIG_COMPAT
2016 static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u)
2018 return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u));
2021 #define dm_compat_ctl_ioctl NULL
2024 static int dm_open(struct inode *inode, struct file *filp)
2027 struct dm_file *priv;
2029 r = nonseekable_open(inode, filp);
2033 priv = filp->private_data = kmalloc(sizeof(struct dm_file), GFP_KERNEL);
2037 priv->global_event_nr = atomic_read(&dm_global_event_nr);
2042 static int dm_release(struct inode *inode, struct file *filp)
2044 kfree(filp->private_data);
2048 static __poll_t dm_poll(struct file *filp, poll_table *wait)
2050 struct dm_file *priv = filp->private_data;
2053 poll_wait(filp, &dm_global_eventq, wait);
2055 if ((int)(atomic_read(&dm_global_event_nr) - priv->global_event_nr) > 0)
2061 static const struct file_operations _ctl_fops = {
2063 .release = dm_release,
2065 .unlocked_ioctl = dm_ctl_ioctl,
2066 .compat_ioctl = dm_compat_ctl_ioctl,
2067 .owner = THIS_MODULE,
2068 .llseek = noop_llseek,
2071 static struct miscdevice _dm_misc = {
2072 .minor = MAPPER_CTRL_MINOR,
2074 .nodename = DM_DIR "/" DM_CONTROL_NODE,
2078 MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR);
2079 MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE);
2082 * Create misc character device and link to DM_DIR/control.
2084 int __init dm_interface_init(void)
2088 r = misc_register(&_dm_misc);
2090 DMERR("misc_register failed for control device");
2094 DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR,
2095 DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA,
2100 void dm_interface_exit(void)
2102 misc_deregister(&_dm_misc);
2107 * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers
2108 * @md: Pointer to mapped_device
2109 * @name: Buffer (size DM_NAME_LEN) for name
2110 * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined
2112 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
2115 struct hash_cell *hc;
2120 mutex_lock(&dm_hash_cells_mutex);
2121 hc = dm_get_mdptr(md);
2122 if (!hc || hc->md != md) {
2128 strcpy(name, hc->name);
2130 strcpy(uuid, hc->uuid ? : "");
2133 mutex_unlock(&dm_hash_cells_mutex);
2137 EXPORT_SYMBOL_GPL(dm_copy_name_and_uuid);
2140 * dm_early_create - create a mapped device in early boot.
2142 * @dmi: Contains main information of the device mapping to be created.
2143 * @spec_array: array of pointers to struct dm_target_spec. Describes the
2144 * mapping table of the device.
2145 * @target_params_array: array of strings with the parameters to a specific
2148 * Instead of having the struct dm_target_spec and the parameters for every
2149 * target embedded at the end of struct dm_ioctl (as performed in a normal
2150 * ioctl), pass them as arguments, so the caller doesn't need to serialize them.
2151 * The size of the spec_array and target_params_array is given by
2152 * @dmi->target_count.
2153 * This function is supposed to be called in early boot, so locking mechanisms
2154 * to protect against concurrent loads are not required.
2156 int __init dm_early_create(struct dm_ioctl *dmi,
2157 struct dm_target_spec **spec_array,
2158 char **target_params_array)
2160 int r, m = DM_ANY_MINOR;
2161 struct dm_table *t, *old_map;
2162 struct mapped_device *md;
2165 if (!dmi->target_count)
2168 r = check_name(dmi->name);
2172 if (dmi->flags & DM_PERSISTENT_DEV_FLAG)
2173 m = MINOR(huge_decode_dev(dmi->dev));
2175 /* alloc dm device */
2176 r = dm_create(m, &md);
2181 r = dm_hash_insert(dmi->name, *dmi->uuid ? dmi->uuid : NULL, md);
2183 goto err_destroy_dm;
2186 r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md);
2188 goto err_hash_remove;
2191 for (i = 0; i < dmi->target_count; i++) {
2192 r = dm_table_add_target(t, spec_array[i]->target_type,
2193 (sector_t) spec_array[i]->sector_start,
2194 (sector_t) spec_array[i]->length,
2195 target_params_array[i]);
2197 DMWARN("error adding target to table");
2198 goto err_destroy_table;
2203 r = dm_table_complete(t);
2205 goto err_destroy_table;
2207 /* setup md->queue to reflect md's type (may block) */
2208 r = dm_setup_md_queue(md, t);
2210 DMWARN("unable to set up device queue for new table.");
2211 goto err_destroy_table;
2216 old_map = dm_swap_table(md, t);
2217 if (IS_ERR(old_map)) {
2218 r = PTR_ERR(old_map);
2219 goto err_destroy_table;
2221 set_disk_ro(dm_disk(md), !!(dmi->flags & DM_READONLY_FLAG));
2226 goto err_destroy_table;
2228 DMINFO("%s (%s) is ready", md->disk->disk_name, dmi->name);
2233 dm_table_destroy(t);
2235 (void) __hash_remove(__get_name_cell(dmi->name));
2236 /* release reference from __get_name_cell */