2 * snippets copied from device-mapper dmsetup.c
3 * Copyright (c) 2004, 2005 Christophe Varoqui
4 * Copyright (c) 2005 Kiyoshi Ueda, NEC
5 * Copyright (c) 2005 Patrick Caulfield, Redhat
11 #include <libdevmapper.h>
21 #include "devmapper.h"
24 #include "log_pthread.h"
25 #include <sys/types.h>
29 #define LOOPS_PER_SEC 5
31 #define UUID_PREFIX "mpath-"
32 #define UUID_PREFIX_LEN 6
34 static int dm_conf_verbosity;
36 #ifdef LIBDM_API_DEFERRED
37 static int dm_cancel_remove_partmaps(const char * mapname);
40 static int do_foreach_partmaps(const char * mapname,
41 int (*partmap_func)(const char *, void *),
44 #ifndef LIBDM_API_COOKIE
45 static inline int dm_task_set_cookie(struct dm_task *dmt, uint32_t *c, int a)
50 void dm_udev_wait(unsigned int c)
54 void dm_udev_set_sync_support(int c)
61 dm_write_log (int level, const char *file, int line, const char *f, ...)
69 thres = dm_conf_verbosity;
70 if (thres <= 3 || level > thres)
76 time_t t = time(NULL);
77 struct tm *tb = localtime(&t);
80 strftime(buff, sizeof(buff), "%b %d %H:%M:%S", tb);
81 buff[sizeof(buff)-1] = '\0';
83 fprintf(stdout, "%s | ", buff);
85 fprintf(stdout, "libdevmapper: %s(%i): ", file, line);
86 vfprintf(stdout, f, ap);
87 fprintf(stdout, "\n");
89 condlog(level, "libdevmapper: %s(%i): ", file, line);
90 log_safe(level + 3, f, ap);
99 dm_log_init(&dm_write_log);
100 dm_log_init_verbose(v + 3);
108 #if defined(LIBDM_API_DEFERRED)
109 int minv[3] = {1, 2, 89};
110 #elif defined(DM_SUBSYSTEM_UDEV_FLAG0)
111 int minv[3] = {1, 2, 82};
112 #elif defined(LIBDM_API_COOKIE)
113 int minv[3] = {1, 2, 38};
115 int minv[3] = {1, 2, 8};
118 dm_get_library_version(version, sizeof(version));
119 condlog(3, "libdevmapper version %s", version);
120 if (sscanf(version, "%d.%d.%d ", &v[0], &v[1], &v[2]) != 3) {
121 condlog(0, "invalid libdevmapper version %s", version);
125 if VERSION_GE(v, minv)
127 condlog(0, "libdevmapper version must be >= %d.%.2d.%.2d",
128 minv[0], minv[1], minv[2]);
133 dm_drv_version (unsigned int * version, char * str)
137 struct dm_versions *target;
138 struct dm_versions *last_target;
145 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
148 dm_task_no_open_count(dmt);
150 if (!dm_task_run(dmt)) {
151 condlog(0, "Can not communicate with kernel DM");
154 target = dm_task_get_versions(dmt);
157 last_target = target;
158 if (!strncmp(str, target->name, strlen(str))) {
162 target = (void *) target + target->next;
163 } while (last_target != target);
166 condlog(0, "DM %s kernel driver not loaded", str);
175 dm_task_destroy(dmt);
182 unsigned int minv[3] = {1, 0, 3};
183 unsigned int version[3] = {0, 0, 0};
184 unsigned int * v = version;
186 if (dm_drv_version(v, TGT_MPATH)) {
187 /* in doubt return not capable */
191 /* test request based multipath capability */
192 condlog(3, "DM multipath kernel driver v%u.%u.%u",
195 if VERSION_GE(v, minv)
198 condlog(0, "DM multipath kernel driver must be >= v%u.%u.%u",
199 minv[0], minv[1], minv[2]);
208 return dm_drv_prereq();
211 #define do_deferred(x) ((x) == DEFERRED_REMOVE_ON || (x) == DEFERRED_REMOVE_IN_PROGRESS)
214 dm_simplecmd (int task, const char *name, int no_flush, int need_sync, uint16_t udev_flags, int deferred_remove) {
216 int udev_wait_flag = (need_sync && (task == DM_DEVICE_RESUME ||
217 task == DM_DEVICE_REMOVE));
221 if (!(dmt = dm_task_create (task)))
224 if (!dm_task_set_name (dmt, name))
227 dm_task_no_open_count(dmt);
228 dm_task_skip_lockfs(dmt); /* for DM_DEVICE_RESUME */
229 #ifdef LIBDM_API_FLUSH
231 dm_task_no_flush(dmt); /* for DM_DEVICE_SUSPEND/RESUME */
233 #ifdef LIBDM_API_DEFERRED
234 if (do_deferred(deferred_remove))
235 dm_task_deferred_remove(dmt);
237 if (udev_wait_flag &&
238 !dm_task_set_cookie(dmt, &cookie,
239 DM_UDEV_DISABLE_LIBRARY_FALLBACK | udev_flags))
242 r = dm_task_run (dmt);
245 dm_udev_wait(cookie);
247 dm_task_destroy (dmt);
252 dm_simplecmd_flush (int task, const char *name, uint16_t udev_flags) {
253 return dm_simplecmd(task, name, 0, 1, udev_flags, 0);
257 dm_simplecmd_noflush (int task, const char *name, uint16_t udev_flags) {
258 return dm_simplecmd(task, name, 1, 1, udev_flags, 0);
262 dm_device_remove (const char *name, int needsync, int deferred_remove) {
263 return dm_simplecmd(DM_DEVICE_REMOVE, name, 0, needsync, 0,
268 dm_addmap (int task, const char *target, struct multipath *mpp,
269 char * params, int ro) {
272 char *prefixed_uuid = NULL;
275 if (!(dmt = dm_task_create (task)))
278 if (!dm_task_set_name (dmt, mpp->alias))
281 if (!dm_task_add_target (dmt, 0, mpp->size, target, params))
287 if (task == DM_DEVICE_CREATE) {
288 if (strlen(mpp->wwid) > 0) {
289 prefixed_uuid = MALLOC(UUID_PREFIX_LEN +
290 strlen(mpp->wwid) + 1);
291 if (!prefixed_uuid) {
292 condlog(0, "cannot create prefixed uuid : %s",
296 sprintf(prefixed_uuid, UUID_PREFIX "%s", mpp->wwid);
297 if (!dm_task_set_uuid(dmt, prefixed_uuid))
300 dm_task_skip_lockfs(dmt);
301 #ifdef LIBDM_API_FLUSH
302 dm_task_no_flush(dmt);
306 if (mpp->attribute_flags & (1 << ATTR_MODE) &&
307 !dm_task_set_mode(dmt, mpp->mode))
309 if (mpp->attribute_flags & (1 << ATTR_UID) &&
310 !dm_task_set_uid(dmt, mpp->uid))
312 if (mpp->attribute_flags & (1 << ATTR_GID) &&
313 !dm_task_set_gid(dmt, mpp->gid))
315 condlog(4, "%s: %s [0 %llu %s %s]", mpp->alias,
316 task == DM_DEVICE_RELOAD ? "reload" : "addmap", mpp->size,
319 dm_task_no_open_count(dmt);
321 if (task == DM_DEVICE_CREATE &&
322 !dm_task_set_cookie(dmt, &cookie,
323 DM_UDEV_DISABLE_LIBRARY_FALLBACK))
326 r = dm_task_run (dmt);
328 if (task == DM_DEVICE_CREATE)
329 dm_udev_wait(cookie);
335 dm_task_destroy (dmt);
341 dm_addmap_create (struct multipath *mpp, char * params) {
344 for (ro = 0; ro <= 1; ro++) {
347 if (dm_addmap(DM_DEVICE_CREATE, TGT_MPATH, mpp, params, ro))
350 * DM_DEVICE_CREATE is actually DM_DEV_CREATE + DM_TABLE_LOAD.
351 * Failing the second part leaves an empty map. Clean it up.
354 if (dm_map_present(mpp->alias)) {
355 condlog(3, "%s: failed to load map (a path might be in use)", mpp->alias);
356 dm_flush_map_nosync(mpp->alias);
359 condlog(3, "%s: failed to load map, error %d",
371 dm_addmap_reload (struct multipath *mpp, char *params, int flush)
374 uint16_t udev_flags = flush ? 0 : MPATH_UDEV_RELOAD_FLAG;
377 * DM_DEVICE_RELOAD cannot wait on a cookie, as
378 * the cookie will only ever be released after an
379 * DM_DEVICE_RESUME. So call DM_DEVICE_RESUME
380 * after each successful call to DM_DEVICE_RELOAD.
382 r = dm_addmap(DM_DEVICE_RELOAD, TGT_MPATH, mpp, params, ADDMAP_RW);
386 r = dm_addmap(DM_DEVICE_RELOAD, TGT_MPATH, mpp,
390 r = dm_simplecmd(DM_DEVICE_RESUME, mpp->alias, flush,
396 dm_map_present (const char * str)
402 if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
405 if (!dm_task_set_name(dmt, str))
408 dm_task_no_open_count(dmt);
410 if (!dm_task_run(dmt))
413 if (!dm_task_get_info(dmt, &info))
419 dm_task_destroy(dmt);
424 dm_get_map(const char * name, unsigned long long * size, char * outparams)
428 uint64_t start, length;
429 char *target_type = NULL;
432 if (!(dmt = dm_task_create(DM_DEVICE_TABLE)))
435 if (!dm_task_set_name(dmt, name))
438 dm_task_no_open_count(dmt);
440 if (!dm_task_run(dmt))
443 /* Fetch 1st target */
444 dm_get_next_target(dmt, NULL, &start, &length,
445 &target_type, ¶ms);
454 if (snprintf(outparams, PARAMS_SIZE, "%s", params) <= PARAMS_SIZE)
457 dm_task_destroy(dmt);
462 dm_get_prefixed_uuid(const char *name, char *uuid)
468 dmt = dm_task_create(DM_DEVICE_INFO);
472 if (!dm_task_set_name (dmt, name))
475 if (!dm_task_run(dmt))
478 uuidtmp = dm_task_get_uuid(dmt);
480 strcpy(uuid, uuidtmp);
486 dm_task_destroy(dmt);
491 dm_get_uuid(char *name, char *uuid)
493 char uuidtmp[WWID_SIZE];
495 if (dm_get_prefixed_uuid(name, uuidtmp))
498 if (!strncmp(uuidtmp, UUID_PREFIX, UUID_PREFIX_LEN))
499 strcpy(uuid, uuidtmp + UUID_PREFIX_LEN);
501 strcpy(uuid, uuidtmp);
508 * 0 : if both uuids end with same suffix which starts with UUID_PREFIX
512 dm_compare_uuid(const char* mapname1, const char* mapname2)
515 char uuid1[WWID_SIZE], uuid2[WWID_SIZE];
517 if (dm_get_prefixed_uuid(mapname1, uuid1))
520 if (dm_get_prefixed_uuid(mapname2, uuid2))
523 p1 = strstr(uuid1, UUID_PREFIX);
524 p2 = strstr(uuid2, UUID_PREFIX);
525 if (p1 && p2 && !strcmp(p1, p2))
532 dm_get_status(char * name, char * outstatus)
536 uint64_t start, length;
540 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
543 if (!dm_task_set_name(dmt, name))
546 dm_task_no_open_count(dmt);
548 if (!dm_task_run(dmt))
551 /* Fetch 1st target */
552 dm_get_next_target(dmt, NULL, &start, &length,
553 &target_type, &status);
555 if (snprintf(outstatus, PARAMS_SIZE, "%s", status) <= PARAMS_SIZE)
559 condlog(0, "%s: error getting map status string", name);
561 dm_task_destroy(dmt);
572 dm_type(const char * name, char * type)
576 uint64_t start, length;
577 char *target_type = NULL;
580 if (!(dmt = dm_task_create(DM_DEVICE_TABLE)))
583 if (!dm_task_set_name(dmt, name))
586 dm_task_no_open_count(dmt);
588 if (!dm_task_run(dmt))
591 /* Fetch 1st target */
592 dm_get_next_target(dmt, NULL, &start, &length,
593 &target_type, ¶ms);
597 else if (!strcmp(target_type, type))
601 dm_task_destroy(dmt);
606 dm_is_mpath(const char * name)
611 uint64_t start, length;
612 char *target_type = NULL;
616 if (!(dmt = dm_task_create(DM_DEVICE_TABLE)))
619 if (!dm_task_set_name(dmt, name))
622 dm_task_no_open_count(dmt);
624 if (!dm_task_run(dmt))
627 if (!dm_task_get_info(dmt, &info) || !info.exists)
630 uuid = dm_task_get_uuid(dmt);
632 if (!uuid || strncmp(uuid, UUID_PREFIX, UUID_PREFIX_LEN) != 0)
635 /* Fetch 1st target */
636 dm_get_next_target(dmt, NULL, &start, &length, &target_type, ¶ms);
638 if (!target_type || strcmp(target_type, TGT_MPATH) != 0)
643 dm_task_destroy(dmt);
648 dm_dev_t (const char * mapname, char * dev_t, int len)
654 if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
657 if (!dm_task_set_name(dmt, mapname))
660 if (!dm_task_run(dmt))
663 if (!dm_task_get_info(dmt, &info) || !info.exists)
666 if (snprintf(dev_t, len, "%i:%i", info.major, info.minor) > len)
671 dm_task_destroy(dmt);
676 dm_get_opencount (const char * mapname)
682 if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
685 if (!dm_task_set_name(dmt, mapname))
688 if (!dm_task_run(dmt))
691 if (!dm_task_get_info(dmt, &info))
699 dm_task_destroy(dmt);
704 dm_get_major (char * mapname)
710 if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
713 if (!dm_task_set_name(dmt, mapname))
716 if (!dm_task_run(dmt))
719 if (!dm_task_get_info(dmt, &info))
727 dm_task_destroy(dmt);
732 dm_get_minor (char * mapname)
738 if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
741 if (!dm_task_set_name(dmt, mapname))
744 if (!dm_task_run(dmt))
747 if (!dm_task_get_info(dmt, &info))
755 dm_task_destroy(dmt);
760 partmap_in_use(const char *name, void *data)
762 int part_count, *ret_count = (int *)data;
763 int open_count = dm_get_opencount(name);
769 if (do_foreach_partmaps(name, partmap_in_use, &part_count))
771 if (open_count != part_count) {
772 condlog(2, "%s: map in use", name);
780 _dm_flush_map (const char * mapname, int need_sync, int deferred_remove)
784 if (!dm_is_mpath(mapname))
785 return 0; /* nothing to do */
787 /* If you aren't doing a deferred remove, make sure that no
788 * devices are in use */
789 if (!do_deferred(deferred_remove) && partmap_in_use(mapname, NULL))
792 if (dm_remove_partmaps(mapname, need_sync, deferred_remove))
795 if (!do_deferred(deferred_remove) && dm_get_opencount(mapname)) {
796 condlog(2, "%s: map in use", mapname);
800 r = dm_device_remove(mapname, need_sync, deferred_remove);
803 if (do_deferred(deferred_remove) && dm_map_present(mapname)) {
804 condlog(4, "multipath map %s remove deferred",
808 condlog(4, "multipath map %s removed", mapname);
814 #ifdef LIBDM_API_DEFERRED
817 dm_flush_map_nopaths(const char * mapname, int deferred_remove)
819 return _dm_flush_map(mapname, 1, deferred_remove);
825 dm_flush_map_nopaths(const char * mapname, int deferred_remove)
827 return _dm_flush_map(mapname, 1, 0);
833 dm_suspend_and_flush_map (const char * mapname)
835 int s = 0, queue_if_no_path = 0;
836 unsigned long long mapsize;
837 char params[PARAMS_SIZE] = {0};
839 if (!dm_is_mpath(mapname))
840 return 0; /* nothing to do */
842 if (!dm_get_map(mapname, &mapsize, params)) {
843 if (strstr(params, "queue_if_no_path"))
844 queue_if_no_path = 1;
847 if (queue_if_no_path)
848 s = dm_queue_if_no_path((char *)mapname, 0);
849 /* Leave queue_if_no_path alone if unset failed */
851 queue_if_no_path = 0;
853 s = dm_simplecmd_flush(DM_DEVICE_SUSPEND, mapname, 0);
855 if (!dm_flush_map(mapname)) {
856 condlog(4, "multipath map %s removed", mapname);
859 condlog(2, "failed to remove multipath map %s", mapname);
860 dm_simplecmd_noflush(DM_DEVICE_RESUME, mapname, 0);
861 if (queue_if_no_path)
862 s = dm_queue_if_no_path((char *)mapname, 1);
871 struct dm_names *names;
874 if (!(dmt = dm_task_create (DM_DEVICE_LIST)))
877 dm_task_no_open_count(dmt);
879 if (!dm_task_run (dmt))
882 if (!(names = dm_task_get_names (dmt)))
889 r |= dm_suspend_and_flush_map(names->name);
891 names = (void *) names + next;
895 dm_task_destroy (dmt);
900 dm_message(const char * mapname, char * message)
905 if (!(dmt = dm_task_create(DM_DEVICE_TARGET_MSG)))
908 if (!dm_task_set_name(dmt, mapname))
911 if (!dm_task_set_sector(dmt, 0))
914 if (!dm_task_set_message(dmt, message))
917 dm_task_no_open_count(dmt);
919 if (!dm_task_run(dmt))
925 condlog(0, "DM message failed [%s]", message);
927 dm_task_destroy(dmt);
932 dm_fail_path(char * mapname, char * path)
936 if (snprintf(message, 32, "fail_path %s", path) > 32)
939 return dm_message(mapname, message);
943 dm_reinstate_path(char * mapname, char * path)
947 if (snprintf(message, 32, "reinstate_path %s", path) > 32)
950 return dm_message(mapname, message);
954 dm_queue_if_no_path(char *mapname, int enable)
959 message = "queue_if_no_path";
961 message = "fail_if_no_path";
963 return dm_message(mapname, message);
967 dm_groupmsg (char * msg, char * mapname, int index)
971 if (snprintf(message, 32, "%s_group %i", msg, index) > 32)
974 return dm_message(mapname, message);
978 dm_switchgroup(char * mapname, int index)
980 return dm_groupmsg("switch", mapname, index);
984 dm_enablegroup(char * mapname, int index)
986 return dm_groupmsg("enable", mapname, index);
990 dm_disablegroup(char * mapname, int index)
992 return dm_groupmsg("disable", mapname, index);
996 dm_get_maps (vector mp)
998 struct multipath * mpp;
1000 struct dm_task *dmt;
1001 struct dm_names *names;
1007 if (!(dmt = dm_task_create(DM_DEVICE_LIST)))
1010 dm_task_no_open_count(dmt);
1012 if (!dm_task_run(dmt))
1015 if (!(names = dm_task_get_names(dmt)))
1019 r = 0; /* this is perfectly valid */
1024 if (!dm_is_mpath(names->name))
1027 mpp = alloc_multipath();
1032 mpp->alias = STRDUP(names->name);
1037 if (dm_get_map(names->name, &mpp->size, NULL))
1040 dm_get_uuid(names->name, mpp->wwid);
1041 dm_get_info(names->name, &mpp->dmi);
1043 if (!vector_alloc_slot(mp))
1046 vector_set_slot(mp, mpp);
1050 names = (void *) names + next;
1056 free_multipath(mpp, KEEP_PATHS);
1058 dm_task_destroy (dmt);
1063 dm_geteventnr (char *name)
1065 struct dm_task *dmt;
1066 struct dm_info info;
1069 if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
1072 if (!dm_task_set_name(dmt, name))
1075 dm_task_no_open_count(dmt);
1077 if (!dm_task_run(dmt))
1080 if (!dm_task_get_info(dmt, &info))
1084 event = info.event_nr;
1087 dm_task_destroy(dmt);
1093 dm_mapname(int major, int minor)
1095 char * response = NULL;
1097 struct dm_task *dmt;
1099 int loop = MAX_WAIT * LOOPS_PER_SEC;
1101 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
1104 if (!dm_task_set_major(dmt, major) ||
1105 !dm_task_set_minor(dmt, minor))
1108 dm_task_no_open_count(dmt);
1111 * device map might not be ready when we get here from
1112 * daemon uev_trigger -> uev_add_map
1115 r = dm_task_run(dmt);
1120 usleep(1000 * 1000 / LOOPS_PER_SEC);
1124 condlog(0, "%i:%i: timeout fetching map name", major, minor);
1128 map = dm_task_get_name(dmt);
1129 if (map && strlen(map))
1130 response = STRDUP((char *)dm_task_get_name(dmt));
1132 dm_task_destroy(dmt);
1135 dm_task_destroy(dmt);
1136 condlog(0, "%i:%i: error fetching map name", major, minor);
1141 do_foreach_partmaps (const char * mapname,
1142 int (*partmap_func)(const char *, void *),
1145 struct dm_task *dmt;
1146 struct dm_names *names;
1148 char params[PARAMS_SIZE];
1149 unsigned long long size;
1153 if (!(dmt = dm_task_create(DM_DEVICE_LIST)))
1156 dm_task_no_open_count(dmt);
1158 if (!dm_task_run(dmt))
1161 if (!(names = dm_task_get_names(dmt)))
1165 r = 0; /* this is perfectly valid */
1169 if (dm_dev_t(mapname, &dev_t[0], 32))
1175 * if devmap target is "linear"
1177 (dm_type(names->name, TGT_PART) > 0) &&
1180 * and both uuid end with same suffix starting
1183 (!dm_compare_uuid(names->name, mapname)) &&
1186 * and we can fetch the map table from the kernel
1188 !dm_get_map(names->name, &size, ¶ms[0]) &&
1191 * and the table maps over the multipath map
1193 strstr(params, dev_t)
1195 if (partmap_func(names->name, data) != 0)
1200 names = (void *) names + next;
1205 dm_task_destroy (dmt);
1209 struct remove_data {
1211 int deferred_remove;
1215 remove_partmap(const char *name, void *data)
1217 struct remove_data *rd = (struct remove_data *)data;
1219 if (dm_get_opencount(name)) {
1220 dm_remove_partmaps(name, rd->need_sync, rd->deferred_remove);
1221 if (!do_deferred(rd->deferred_remove) &&
1222 dm_get_opencount(name)) {
1223 condlog(2, "%s: map in use", name);
1227 condlog(4, "partition map %s removed", name);
1228 dm_device_remove(name, rd->need_sync, rd->deferred_remove);
1233 dm_remove_partmaps (const char * mapname, int need_sync, int deferred_remove)
1235 struct remove_data rd = { need_sync, deferred_remove };
1236 return do_foreach_partmaps(mapname, remove_partmap, &rd);
1239 #ifdef LIBDM_API_DEFERRED
1242 cancel_remove_partmap (const char *name, void *unused)
1244 if (dm_get_opencount(name))
1245 dm_cancel_remove_partmaps(name);
1246 if (dm_message(name, "@cancel_deferred_remove") != 0)
1247 condlog(0, "%s: can't cancel deferred remove: %s", name,
1253 dm_get_deferred_remove (char * mapname)
1256 struct dm_task *dmt;
1257 struct dm_info info;
1259 if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
1262 if (!dm_task_set_name(dmt, mapname))
1265 if (!dm_task_run(dmt))
1268 if (!dm_task_get_info(dmt, &info))
1271 r = info.deferred_remove;
1273 dm_task_destroy(dmt);
1278 dm_cancel_remove_partmaps(const char * mapname) {
1279 return do_foreach_partmaps(mapname, cancel_remove_partmap, NULL);
1283 dm_cancel_deferred_remove (struct multipath *mpp)
1287 if (!dm_get_deferred_remove(mpp->alias))
1289 if (mpp->deferred_remove == DEFERRED_REMOVE_IN_PROGRESS)
1290 mpp->deferred_remove = DEFERRED_REMOVE_ON;
1292 dm_cancel_remove_partmaps(mpp->alias);
1293 r = dm_message(mpp->alias, "@cancel_deferred_remove");
1295 condlog(0, "%s: can't cancel deferred remove: %s", mpp->alias,
1298 condlog(2, "%s: canceled deferred remove", mpp->alias);
1305 dm_cancel_deferred_remove (struct multipath *mpp)
1312 static struct dm_info *
1315 return MALLOC(sizeof(struct dm_info));
1319 dm_get_info (char * mapname, struct dm_info ** dmi)
1322 struct dm_task *dmt = NULL;
1328 *dmi = alloc_dminfo();
1333 if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
1336 if (!dm_task_set_name(dmt, mapname))
1339 dm_task_no_open_count(dmt);
1341 if (!dm_task_run(dmt))
1344 if (!dm_task_get_info(dmt, *dmi))
1350 memset(*dmi, 0, sizeof(struct dm_info));
1356 dm_task_destroy(dmt);
1361 struct rename_data {
1368 rename_partmap (const char *name, void *data)
1370 char buff[PARAMS_SIZE];
1372 struct rename_data *rd = (struct rename_data *)data;
1374 if (strncmp(name, rd->old, strlen(rd->old)) != 0)
1376 for (offset = strlen(rd->old); name[offset] && !(isdigit(name[offset])); offset++); /* do nothing */
1377 snprintf(buff, PARAMS_SIZE, "%s%s%s", rd->new, rd->delim,
1379 dm_rename(name, buff, rd->delim);
1380 condlog(4, "partition map %s renamed", name);
1385 dm_rename_partmaps (const char * old, char * new, char *delim)
1387 struct rename_data rd;
1394 if (isdigit(new[strlen(new)-1]))
1398 return do_foreach_partmaps(old, rename_partmap, &rd);
1402 dm_rename (const char * old, char * new, char *delim)
1405 struct dm_task *dmt;
1408 if (dm_rename_partmaps(old, new, delim))
1411 if (!(dmt = dm_task_create(DM_DEVICE_RENAME)))
1414 if (!dm_task_set_name(dmt, old))
1417 if (!dm_task_set_newname(dmt, new))
1420 dm_task_no_open_count(dmt);
1422 if (!dm_task_set_cookie(dmt, &cookie,
1423 DM_UDEV_DISABLE_LIBRARY_FALLBACK))
1425 r = dm_task_run(dmt);
1427 dm_udev_wait(cookie);
1430 dm_task_destroy(dmt);
1435 void dm_reassign_deps(char *table, char *dep, char *newdep)
1440 newtable = strdup(table);
1443 p = strstr(newtable, dep);
1444 n = table + (p - newtable);
1446 n += strlen(newdep);
1452 int dm_reassign_table(const char *name, char *old, char *new)
1454 int r = 0, modified = 0;
1455 uint64_t start, length;
1456 struct dm_task *dmt, *reload_dmt;
1457 char *target, *params = NULL;
1461 if (!(dmt = dm_task_create(DM_DEVICE_TABLE)))
1464 if (!dm_task_set_name(dmt, name))
1467 dm_task_no_open_count(dmt);
1469 if (!dm_task_run(dmt))
1471 if (!(reload_dmt = dm_task_create(DM_DEVICE_RELOAD)))
1473 if (!dm_task_set_name(reload_dmt, name))
1477 next = dm_get_next_target(dmt, next, &start, &length,
1479 buff = strdup(params);
1481 condlog(3, "%s: failed to replace target %s, "
1482 "out of memory", name, target);
1485 if (strcmp(target, TGT_MPATH) && strstr(params, old)) {
1486 condlog(3, "%s: replace target %s %s",
1487 name, target, buff);
1488 dm_reassign_deps(buff, old, new);
1489 condlog(3, "%s: with target %s %s",
1490 name, target, buff);
1493 dm_task_add_target(reload_dmt, start, length, target, buff);
1498 dm_task_no_open_count(reload_dmt);
1500 if (!dm_task_run(reload_dmt)) {
1501 condlog(3, "%s: failed to reassign targets", name);
1504 dm_simplecmd_noflush(DM_DEVICE_RESUME, name,
1505 MPATH_UDEV_RELOAD_FLAG);
1510 dm_task_destroy(reload_dmt);
1512 dm_task_destroy(dmt);
1518 * Reassign existing device-mapper table(s) to not use
1519 * the block devices but point to the multipathed
1522 int dm_reassign(const char *mapname)
1524 struct dm_deps *deps;
1525 struct dm_task *dmt;
1526 struct dm_info info;
1527 char dev_t[32], dm_dep[32];
1530 if (dm_dev_t(mapname, &dev_t[0], 32)) {
1531 condlog(3, "%s: failed to get device number", mapname);
1535 if (!(dmt = dm_task_create(DM_DEVICE_DEPS))) {
1536 condlog(3, "%s: couldn't make dm task", mapname);
1540 if (!dm_task_set_name(dmt, mapname))
1543 dm_task_no_open_count(dmt);
1545 if (!dm_task_run(dmt))
1548 if (!dm_task_get_info(dmt, &info))
1551 if (!(deps = dm_task_get_deps(dmt)))
1557 for (i = 0; i < deps->count; i++) {
1558 sprintf(dm_dep, "%d:%d",
1559 major(deps->device[i]),
1560 minor(deps->device[i]));
1561 sysfs_check_holders(dm_dep, dev_t);
1564 dm_task_destroy (dmt);
1571 int dm_setgeometry(struct multipath *mpp)
1573 struct dm_task *dmt;
1575 char heads[4], sectors[4];
1576 char cylinders[10], start[32];
1582 pp = first_path(mpp);
1584 condlog(3, "%s: no path for geometry", mpp->alias);
1587 if (pp->geom.cylinders == 0 ||
1588 pp->geom.heads == 0 ||
1589 pp->geom.sectors == 0) {
1590 condlog(3, "%s: invalid geometry on %s", mpp->alias, pp->dev);
1594 if (!(dmt = dm_task_create(DM_DEVICE_SET_GEOMETRY)))
1597 if (!dm_task_set_name(dmt, mpp->alias))
1600 dm_task_no_open_count(dmt);
1602 /* What a sick interface ... */
1603 snprintf(heads, 4, "%u", pp->geom.heads);
1604 snprintf(sectors, 4, "%u", pp->geom.sectors);
1605 snprintf(cylinders, 10, "%u", pp->geom.cylinders);
1606 snprintf(start, 32, "%lu", pp->geom.start);
1607 if (!dm_task_set_geometry(dmt, cylinders, heads, sectors, start)) {
1608 condlog(3, "%s: Failed to set geometry", mpp->alias);
1612 r = dm_task_run(dmt);
1614 dm_task_destroy(dmt);