2 * snippets copied from device-mapper dmsetup.c
3 * Copyright (c) 2004, 2005 Christophe Varoqui
4 * Copyright (c) 2005 Kiyoshi Ueda, NEC
5 * Copyright (c) 2005 Patrick Caulfield, Redhat
11 #include <libdevmapper.h>
16 #include <sys/sysmacros.h>
17 #include <linux/dm-ioctl.h>
24 #include "devmapper.h"
29 #include "time-util.h"
31 #include "log_pthread.h"
32 #include <sys/types.h>
36 #define LOOPS_PER_SEC 5
38 #define INVALID_VERSION ~0U
39 static unsigned int dm_library_version[3] = { INVALID_VERSION, };
40 static unsigned int dm_kernel_version[3] = { INVALID_VERSION, };
41 static unsigned int dm_mpath_target_version[3] = { INVALID_VERSION, };
43 static pthread_once_t dm_initialized = PTHREAD_ONCE_INIT;
44 static pthread_once_t versions_initialized = PTHREAD_ONCE_INIT;
45 static pthread_mutex_t libmp_dm_lock = PTHREAD_MUTEX_INITIALIZER;
47 static int dm_conf_verbosity;
49 #ifdef LIBDM_API_DEFERRED
50 static int dm_cancel_remove_partmaps(const char * mapname);
51 #define __DR_UNUSED__ /* empty */
53 #define __DR_UNUSED__ __attribute__((unused))
56 static int do_foreach_partmaps(const char * mapname,
57 int (*partmap_func)(const char *, void *),
60 #ifndef LIBDM_API_COOKIE
61 static inline int dm_task_set_cookie(struct dm_task *dmt, uint32_t *c, int a)
66 static void libmp_udev_wait(unsigned int c)
70 static void dm_udev_set_sync_support(int c)
74 static void libmp_udev_wait(unsigned int c)
76 pthread_mutex_lock(&libmp_dm_lock);
77 pthread_cleanup_push(cleanup_mutex, &libmp_dm_lock);
79 pthread_cleanup_pop(1);
83 int libmp_dm_task_run(struct dm_task *dmt)
87 pthread_mutex_lock(&libmp_dm_lock);
88 pthread_cleanup_push(cleanup_mutex, &libmp_dm_lock);
90 pthread_cleanup_pop(1);
94 __attribute__((format(printf, 4, 5))) static void
95 dm_write_log (int level, const char *file, int line, const char *f, ...)
100 * libdm uses the same log levels as syslog,
101 * except that EMERG/ALERT are not used
103 if (level > LOG_DEBUG)
106 if (level > dm_conf_verbosity)
110 if (logsink != LOGSINK_SYSLOG) {
111 if (logsink == LOGSINK_STDERR_WITH_TIME) {
115 get_monotonic_time(&ts);
116 safe_sprintf(buff, "%ld.%06ld",
117 (long)ts.tv_sec, ts.tv_nsec/1000);
118 fprintf(stderr, "%s | ", buff);
120 fprintf(stderr, "libdevmapper: %s(%i): ", file, line);
121 vfprintf(stderr, f, ap);
122 fprintf(stderr, "\n");
124 condlog(level >= LOG_ERR ? level - LOG_ERR : 0,
125 "libdevmapper: %s(%i): ", file, line);
126 log_safe(level, f, ap);
133 static void dm_init(int v)
136 * This maps libdm's standard loglevel _LOG_WARN (= 4), which is rather
137 * quiet in practice, to multipathd's default verbosity 2
139 dm_conf_verbosity = v + 2;
140 dm_log_init(&dm_write_log);
143 static void init_dm_library_version(void)
148 dm_get_library_version(version, sizeof(version));
149 if (sscanf(version, "%u.%u.%u ", &v[0], &v[1], &v[2]) != 3) {
150 condlog(0, "invalid libdevmapper version %s", version);
153 memcpy(dm_library_version, v, sizeof(dm_library_version));
154 condlog(3, "libdevmapper version %u.%.2u.%.2u",
155 dm_library_version[0], dm_library_version[1],
156 dm_library_version[2]);
163 #if defined(LIBDM_API_HOLD_CONTROL)
164 unsigned int minv[3] = {1, 2, 111};
165 #elif defined(LIBDM_API_GET_ERRNO)
166 unsigned int minv[3] = {1, 2, 99};
167 #elif defined(LIBDM_API_DEFERRED)
168 unsigned int minv[3] = {1, 2, 89};
169 #elif defined(DM_SUBSYSTEM_UDEV_FLAG0)
170 unsigned int minv[3] = {1, 2, 82};
171 #elif defined(LIBDM_API_COOKIE)
172 unsigned int minv[3] = {1, 2, 38};
174 unsigned int minv[3] = {1, 2, 8};
177 if (VERSION_GE(dm_library_version, minv))
179 condlog(0, "libdevmapper version must be >= %u.%.2u.%.2u",
180 minv[0], minv[1], minv[2]);
184 static void init_dm_drv_version(void)
189 if (!dm_driver_version(buff, sizeof(buff))) {
190 condlog(0, "cannot get kernel dm version");
193 if (sscanf(buff, "%u.%u.%u ", &v[0], &v[1], &v[2]) != 3) {
194 condlog(0, "invalid kernel dm version '%s'", buff);
197 memcpy(dm_kernel_version, v, sizeof(dm_library_version));
198 condlog(3, "kernel device mapper v%u.%u.%u",
199 dm_kernel_version[0],
200 dm_kernel_version[1],
201 dm_kernel_version[2]);
204 static int dm_tgt_version (unsigned int *version, char *str)
208 struct dm_versions *target;
209 struct dm_versions *last_target;
213 * We have to call dm_task_create() and not libmp_dm_task_create()
214 * here to avoid a recursive invocation of
215 * pthread_once(&dm_initialized), which would cause a deadlock.
217 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
220 dm_task_no_open_count(dmt);
222 if (!libmp_dm_task_run(dmt)) {
223 dm_log_error(2, DM_DEVICE_LIST_VERSIONS, dmt);
224 condlog(0, "Can not communicate with kernel DM");
227 target = dm_task_get_versions(dmt);
230 last_target = target;
231 if (!strncmp(str, target->name, strlen(str))) {
235 target = (void *) target + target->next;
236 } while (last_target != target);
239 condlog(0, "DM %s kernel driver not loaded", str);
248 dm_task_destroy(dmt);
252 static void init_dm_mpath_version(void)
254 if (!dm_tgt_version(dm_mpath_target_version, TGT_MPATH))
255 condlog(3, "DM multipath kernel driver v%u.%u.%u",
256 dm_mpath_target_version[0],
257 dm_mpath_target_version[1],
258 dm_mpath_target_version[2]);
261 static int dm_tgt_prereq (unsigned int *ver)
263 unsigned int minv[3] = {1, 0, 3};
265 if (VERSION_GE(dm_mpath_target_version, minv)) {
267 ver[0] = dm_mpath_target_version[0];
268 ver[1] = dm_mpath_target_version[1];
269 ver[2] = dm_mpath_target_version[2];
274 condlog(0, "DM multipath kernel driver must be >= v%u.%u.%u",
275 minv[0], minv[1], minv[2]);
279 static void _init_versions(void)
281 /* Can't use condlog here because of how VERSION_STRING is defined */
282 if (3 <= libmp_verbosity)
283 dlog(3, VERSION_STRING);
284 init_dm_library_version();
285 init_dm_drv_version();
286 init_dm_mpath_version();
289 static int init_versions(void) {
290 pthread_once(&versions_initialized, _init_versions);
291 return (dm_library_version[0] == INVALID_VERSION ||
292 dm_kernel_version[0] == INVALID_VERSION ||
293 dm_mpath_target_version[0] == INVALID_VERSION);
296 int dm_prereq(unsigned int *v)
302 return dm_tgt_prereq(v);
305 int libmp_get_version(int which, unsigned int version[3])
307 unsigned int *src_version;
311 case DM_LIBRARY_VERSION:
312 src_version = dm_library_version;
314 case DM_KERNEL_VERSION:
315 src_version = dm_kernel_version;
317 case DM_MPATH_TARGET_VERSION:
318 src_version = dm_mpath_target_version;
320 case MULTIPATH_VERSION:
321 version[0] = (VERSION_CODE >> 16) & 0xff;
322 version[1] = (VERSION_CODE >> 8) & 0xff;
323 version[2] = VERSION_CODE & 0xff;
326 condlog(0, "%s: invalid value for 'which'", __func__);
329 if (src_version[0] == INVALID_VERSION)
331 memcpy(version, src_version, 3 * sizeof(*version));
335 static int libmp_dm_udev_sync = 0;
337 void libmp_udev_set_sync_support(int on)
339 libmp_dm_udev_sync = !!on;
342 static bool libmp_dm_init_called;
343 void libmp_dm_exit(void)
345 if (!libmp_dm_init_called)
348 /* switch back to default libdm logging */
350 #ifdef LIBDM_API_HOLD_CONTROL
351 /* make sure control fd is closed in dm_lib_release() */
352 dm_hold_control_dev(0);
356 static void libmp_dm_init(void)
358 unsigned int version[3];
360 if (dm_prereq(version))
362 dm_init(libmp_verbosity);
363 #ifdef LIBDM_API_HOLD_CONTROL
364 dm_hold_control_dev(1);
366 dm_udev_set_sync_support(libmp_dm_udev_sync);
367 libmp_dm_init_called = true;
370 static void _do_skip_libmp_dm_init(void)
374 void skip_libmp_dm_init(void)
376 pthread_once(&dm_initialized, _do_skip_libmp_dm_init);
380 libmp_dm_task_create(int task)
382 pthread_once(&dm_initialized, libmp_dm_init);
383 return dm_task_create(task);
386 #define do_deferred(x) ((x) == DEFERRED_REMOVE_ON || (x) == DEFERRED_REMOVE_IN_PROGRESS)
389 dm_simplecmd (int task, const char *name, int no_flush, int need_sync,
390 uint16_t udev_flags, int deferred_remove __DR_UNUSED__) {
392 int udev_wait_flag = ((need_sync || udev_flags) &&
393 (task == DM_DEVICE_RESUME ||
394 task == DM_DEVICE_REMOVE));
398 if (!(dmt = libmp_dm_task_create (task)))
401 if (!dm_task_set_name (dmt, name))
404 dm_task_no_open_count(dmt);
405 dm_task_skip_lockfs(dmt); /* for DM_DEVICE_RESUME */
406 #ifdef LIBDM_API_FLUSH
408 dm_task_no_flush(dmt); /* for DM_DEVICE_SUSPEND/RESUME */
410 #ifdef LIBDM_API_DEFERRED
411 if (do_deferred(deferred_remove))
412 dm_task_deferred_remove(dmt);
414 if (udev_wait_flag &&
415 !dm_task_set_cookie(dmt, &cookie,
416 DM_UDEV_DISABLE_LIBRARY_FALLBACK | udev_flags))
419 r = libmp_dm_task_run (dmt);
421 dm_log_error(2, task, dmt);
424 libmp_udev_wait(cookie);
426 dm_task_destroy (dmt);
430 int dm_simplecmd_flush (int task, const char *name, uint16_t udev_flags)
432 return dm_simplecmd(task, name, 0, 1, udev_flags, 0);
435 int dm_simplecmd_noflush (int task, const char *name, uint16_t udev_flags)
437 return dm_simplecmd(task, name, 1, 1, udev_flags, 0);
441 dm_device_remove (const char *name, int needsync, int deferred_remove) {
442 return dm_simplecmd(DM_DEVICE_REMOVE, name, 0, needsync, 0,
447 dm_addmap (int task, const char *target, struct multipath *mpp,
448 char * params, int ro, uint16_t udev_flags) {
451 char *prefixed_uuid = NULL;
454 if (task == DM_DEVICE_CREATE && strlen(mpp->wwid) == 0) {
455 condlog(1, "%s: refusing to create map with empty WWID",
460 /* Need to add this here to allow 0 to be passed in udev_flags */
461 udev_flags |= DM_UDEV_DISABLE_LIBRARY_FALLBACK;
463 if (!(dmt = libmp_dm_task_create (task)))
466 if (!dm_task_set_name (dmt, mpp->alias))
469 if (!dm_task_add_target (dmt, 0, mpp->size, target, params))
475 if (task == DM_DEVICE_CREATE) {
476 prefixed_uuid = calloc(1, UUID_PREFIX_LEN +
477 strlen(mpp->wwid) + 1);
478 if (!prefixed_uuid) {
479 condlog(0, "cannot create prefixed uuid : %s",
483 sprintf(prefixed_uuid, UUID_PREFIX "%s", mpp->wwid);
484 if (!dm_task_set_uuid(dmt, prefixed_uuid))
486 dm_task_skip_lockfs(dmt);
487 #ifdef LIBDM_API_FLUSH
488 dm_task_no_flush(dmt);
492 if (mpp->attribute_flags & (1 << ATTR_MODE) &&
493 !dm_task_set_mode(dmt, mpp->mode))
495 if (mpp->attribute_flags & (1 << ATTR_UID) &&
496 !dm_task_set_uid(dmt, mpp->uid))
498 if (mpp->attribute_flags & (1 << ATTR_GID) &&
499 !dm_task_set_gid(dmt, mpp->gid))
501 condlog(2, "%s: %s [0 %llu %s %s]", mpp->alias,
502 task == DM_DEVICE_RELOAD ? "reload" : "addmap", mpp->size,
505 dm_task_no_open_count(dmt);
507 if (task == DM_DEVICE_CREATE &&
508 !dm_task_set_cookie(dmt, &cookie, udev_flags))
511 r = libmp_dm_task_run (dmt);
513 dm_log_error(2, task, dmt);
515 if (task == DM_DEVICE_CREATE)
516 libmp_udev_wait(cookie);
522 dm_task_destroy (dmt);
525 mpp->need_reload = false;
529 static uint16_t build_udev_flags(const struct multipath *mpp, int reload)
531 /* DM_UDEV_DISABLE_LIBRARY_FALLBACK is added in dm_addmap */
532 return (mpp->skip_kpartx == SKIP_KPARTX_ON ?
533 MPATH_UDEV_NO_KPARTX_FLAG : 0) |
534 ((count_active_pending_paths(mpp) == 0 ||
535 mpp->ghost_delay_tick > 0) ?
536 MPATH_UDEV_NO_PATHS_FLAG : 0) |
537 (reload && !mpp->force_udev_reload ?
538 MPATH_UDEV_RELOAD_FLAG : 0);
541 int dm_addmap_create (struct multipath *mpp, char * params)
544 uint16_t udev_flags = build_udev_flags(mpp, 0);
546 for (ro = 0; ro <= 1; ro++) {
549 if (dm_addmap(DM_DEVICE_CREATE, TGT_MPATH, mpp, params, ro,
551 if (unmark_failed_wwid(mpp->wwid) ==
553 mpp->needs_paths_uevent = 1;
557 * DM_DEVICE_CREATE is actually DM_DEV_CREATE + DM_TABLE_LOAD.
558 * Failing the second part leaves an empty map. Clean it up.
561 if (dm_map_present(mpp->alias)) {
562 condlog(3, "%s: failed to load map (a path might be in use)", mpp->alias);
563 dm_flush_map_nosync(mpp->alias);
566 condlog(3, "%s: failed to load map, error %d",
571 if (mark_failed_wwid(mpp->wwid) == WWID_FAILED_CHANGED)
572 mpp->needs_paths_uevent = 1;
579 int dm_addmap_reload(struct multipath *mpp, char *params, int flush)
582 uint16_t udev_flags = build_udev_flags(mpp, 1);
585 * DM_DEVICE_RELOAD cannot wait on a cookie, as
586 * the cookie will only ever be released after an
587 * DM_DEVICE_RESUME. So call DM_DEVICE_RESUME
588 * after each successful call to DM_DEVICE_RELOAD.
590 if (!mpp->force_readonly)
591 r = dm_addmap(DM_DEVICE_RELOAD, TGT_MPATH, mpp, params,
594 if (!mpp->force_readonly && errno != EROFS)
596 r = dm_addmap(DM_DEVICE_RELOAD, TGT_MPATH, mpp,
597 params, ADDMAP_RO, 0);
600 r = dm_simplecmd(DM_DEVICE_RESUME, mpp->alias, !flush,
605 /* If the resume failed, dm will leave the device suspended, and
606 * drop the new table, so doing a second resume will try using
607 * the original table */
608 if (dm_is_suspended(mpp->alias))
609 dm_simplecmd(DM_DEVICE_RESUME, mpp->alias, !flush, 1,
615 do_get_info(const char *name, struct dm_info *info)
620 if (!(dmt = libmp_dm_task_create(DM_DEVICE_INFO)))
623 if (!dm_task_set_name(dmt, name))
626 dm_task_no_open_count(dmt);
628 if (!libmp_dm_task_run(dmt)) {
629 dm_log_error(3, DM_DEVICE_INFO, dmt);
633 if (!dm_task_get_info(dmt, info))
641 dm_task_destroy(dmt);
645 int dm_map_present(const char * str)
649 return (do_get_info(str, &info) == 0);
652 int dm_get_map(const char *name, unsigned long long *size, char **outparams)
656 uint64_t start, length;
657 char *target_type = NULL;
660 if (!(dmt = libmp_dm_task_create(DM_DEVICE_TABLE)))
663 if (!dm_task_set_name(dmt, name))
666 dm_task_no_open_count(dmt);
669 if (!libmp_dm_task_run(dmt)) {
670 dm_log_error(3, DM_DEVICE_TABLE, dmt);
671 if (dm_task_get_errno(dmt) == ENXIO)
677 /* Fetch 1st target */
678 if (dm_get_next_target(dmt, NULL, &start, &length,
679 &target_type, ¶ms) != NULL)
680 /* more than one target */
689 *outparams = strdup(params);
690 r = *outparams ? DMP_OK : DMP_ERR;
694 dm_task_destroy(dmt);
699 dm_get_prefixed_uuid(const char *name, char *uuid, int uuid_len)
705 dmt = libmp_dm_task_create(DM_DEVICE_INFO);
709 if (!dm_task_set_name (dmt, name))
712 if (!libmp_dm_task_run(dmt)) {
713 dm_log_error(3, DM_DEVICE_INFO, dmt);
717 uuidtmp = dm_task_get_uuid(dmt);
719 strlcpy(uuid, uuidtmp, uuid_len);
725 dm_task_destroy(dmt);
729 int dm_get_uuid(const char *name, char *uuid, int uuid_len)
731 char tmp[DM_UUID_LEN];
733 if (dm_get_prefixed_uuid(name, tmp, sizeof(tmp)))
736 if (!strncmp(tmp, UUID_PREFIX, UUID_PREFIX_LEN))
737 strlcpy(uuid, tmp + UUID_PREFIX_LEN, uuid_len);
745 is_mpath_part(const char *part_name, const char *map_name)
748 char part_uuid[DM_UUID_LEN], map_uuid[DM_UUID_LEN];
750 if (dm_get_prefixed_uuid(part_name, part_uuid, sizeof(part_uuid)))
753 if (dm_get_prefixed_uuid(map_name, map_uuid, sizeof(map_uuid)))
756 if (strncmp(part_uuid, "part", 4) != 0)
759 p = strstr(part_uuid, UUID_PREFIX);
760 if (p && !strcmp(p, map_uuid))
766 int dm_get_status(const char *name, char **outstatus)
770 uint64_t start, length;
771 char *target_type = NULL;
774 if (!(dmt = libmp_dm_task_create(DM_DEVICE_STATUS)))
777 if (!dm_task_set_name(dmt, name))
780 dm_task_no_open_count(dmt);
783 if (!libmp_dm_task_run(dmt)) {
784 dm_log_error(3, DM_DEVICE_STATUS, dmt);
785 if (dm_task_get_errno(dmt) == ENXIO)
791 /* Fetch 1st target */
792 if (dm_get_next_target(dmt, NULL, &start, &length,
793 &target_type, &status) != NULL)
796 if (!target_type || strcmp(target_type, TGT_MPATH) != 0)
800 condlog(2, "get null status.");
807 *outstatus = strdup(status);
808 r = *outstatus ? DMP_OK : DMP_ERR;
812 condlog(0, "%s: error getting map status string", name);
814 dm_task_destroy(dmt);
822 * -1 : empty map, or more than 1 target
824 int dm_type(const char *name, char *type)
828 uint64_t start, length;
829 char *target_type = NULL;
832 if (!(dmt = libmp_dm_task_create(DM_DEVICE_TABLE)))
835 if (!dm_task_set_name(dmt, name))
838 dm_task_no_open_count(dmt);
840 if (!libmp_dm_task_run(dmt)) {
841 dm_log_error(3, DM_DEVICE_TABLE, dmt);
845 /* Fetch 1st target */
846 if (dm_get_next_target(dmt, NULL, &start, &length,
847 &target_type, ¶ms) != NULL)
848 /* multiple targets */
850 else if (!target_type)
852 else if (!strcmp(target_type, type))
856 dm_task_destroy(dmt);
862 * 1 : is multipath device
863 * 0 : is not multipath device
866 int dm_is_mpath(const char *name)
871 uint64_t start, length;
872 char *target_type = NULL;
876 if (!(dmt = libmp_dm_task_create(DM_DEVICE_TABLE)))
879 if (!dm_task_set_name(dmt, name))
882 dm_task_no_open_count(dmt);
884 if (!libmp_dm_task_run(dmt)) {
885 dm_log_error(3, DM_DEVICE_TABLE, dmt);
889 if (!dm_task_get_info(dmt, &info))
897 uuid = dm_task_get_uuid(dmt);
899 if (!uuid || strncmp(uuid, UUID_PREFIX, UUID_PREFIX_LEN) != 0)
902 /* Fetch 1st target */
903 if (dm_get_next_target(dmt, NULL, &start, &length, &target_type,
905 /* multiple targets */
908 if (!target_type || strcmp(target_type, TGT_MPATH) != 0)
913 dm_task_destroy(dmt);
916 condlog(3, "%s: dm command failed in %s: %s", name, __FUNCTION__, strerror(errno));
922 * 1 : map with uuid exists
923 * 0 : map with uuid doesn't exist
927 dm_map_present_by_uuid(const char *uuid)
931 char prefixed_uuid[WWID_SIZE + UUID_PREFIX_LEN];
934 if (!uuid || uuid[0] == '\0')
937 if (safe_sprintf(prefixed_uuid, UUID_PREFIX "%s", uuid))
940 if (!(dmt = libmp_dm_task_create(DM_DEVICE_INFO)))
943 dm_task_no_open_count(dmt);
945 if (!dm_task_set_uuid(dmt, prefixed_uuid))
948 if (!libmp_dm_task_run(dmt)) {
949 dm_log_error(3, DM_DEVICE_INFO, dmt);
953 if (!dm_task_get_info(dmt, &info))
959 dm_task_destroy(dmt);
962 condlog(3, "%s: dm command failed in %s: %s", uuid,
963 __FUNCTION__, strerror(errno));
968 dm_dev_t (const char * mapname, char * dev_t, int len)
972 if (do_get_info(mapname, &info) != 0)
975 if (snprintf(dev_t, len, "%i:%i", info.major, info.minor) > len)
982 dm_get_opencount (const char * mapname)
988 if (!(dmt = libmp_dm_task_create(DM_DEVICE_INFO)))
991 if (!dm_task_set_name(dmt, mapname))
994 if (!libmp_dm_task_run(dmt)) {
995 dm_log_error(3, DM_DEVICE_INFO, dmt);
999 if (!dm_task_get_info(dmt, &info))
1005 r = info.open_count;
1007 dm_task_destroy(dmt);
1012 dm_get_major_minor(const char *name, int *major, int *minor)
1014 struct dm_info info;
1016 if (do_get_info(name, &info) != 0)
1019 *major = info.major;
1020 *minor = info.minor;
1025 has_partmap(const char *name __attribute__((unused)),
1026 void *data __attribute__((unused)))
1032 partmap_in_use(const char *name, void *data)
1034 int part_count, *ret_count = (int *)data;
1035 int open_count = dm_get_opencount(name);
1041 if (do_foreach_partmaps(name, partmap_in_use, &part_count))
1043 if (open_count != part_count) {
1044 condlog(2, "%s: map in use", name);
1051 int _dm_flush_map (const char * mapname, int need_sync, int deferred_remove,
1052 int need_suspend, int retries)
1055 int queue_if_no_path = 0;
1057 unsigned long long mapsize;
1058 char *params = NULL;
1060 if (dm_is_mpath(mapname) != 1)
1061 return 0; /* nothing to do */
1063 /* if the device currently has no partitions, do not
1064 run kpartx on it if you fail to delete it */
1065 if (do_foreach_partmaps(mapname, has_partmap, NULL) == 0)
1066 udev_flags |= MPATH_UDEV_NO_KPARTX_FLAG;
1068 /* If you aren't doing a deferred remove, make sure that no
1069 * devices are in use */
1070 if (!do_deferred(deferred_remove) && partmap_in_use(mapname, NULL))
1074 dm_get_map(mapname, &mapsize, ¶ms) == DMP_OK &&
1075 strstr(params, "queue_if_no_path")) {
1076 if (!dm_queue_if_no_path(mapname, 0))
1077 queue_if_no_path = 1;
1079 /* Leave queue_if_no_path alone if unset failed */
1080 queue_if_no_path = -1;
1085 if (dm_remove_partmaps(mapname, need_sync, deferred_remove))
1088 if (!do_deferred(deferred_remove) && dm_get_opencount(mapname)) {
1089 condlog(2, "%s: map in use", mapname);
1094 if (need_suspend && queue_if_no_path != -1)
1095 dm_simplecmd_flush(DM_DEVICE_SUSPEND, mapname, 0);
1097 r = dm_device_remove(mapname, need_sync, deferred_remove);
1100 if (do_deferred(deferred_remove)
1101 && dm_map_present(mapname)) {
1102 condlog(4, "multipath map %s remove deferred",
1106 condlog(4, "multipath map %s removed", mapname);
1109 condlog(2, "failed to remove multipath map %s",
1111 if (need_suspend && queue_if_no_path != -1) {
1112 dm_simplecmd_noflush(DM_DEVICE_RESUME,
1113 mapname, udev_flags);
1118 } while (retries-- > 0);
1120 if (queue_if_no_path == 1)
1121 dm_queue_if_no_path(mapname, 1);
1126 #ifdef LIBDM_API_DEFERRED
1129 dm_flush_map_nopaths(const char * mapname, int deferred_remove)
1131 return _dm_flush_map(mapname, 1, deferred_remove, 0, 0);
1137 dm_flush_map_nopaths(const char * mapname,
1138 int deferred_remove __attribute__((unused)))
1140 return _dm_flush_map(mapname, 1, 0, 0, 0);
1145 int dm_flush_maps (int need_suspend, int retries)
1148 struct dm_task *dmt;
1149 struct dm_names *names;
1152 if (!(dmt = libmp_dm_task_create (DM_DEVICE_LIST)))
1155 dm_task_no_open_count(dmt);
1157 if (!libmp_dm_task_run (dmt)) {
1158 dm_log_error(3, DM_DEVICE_LIST, dmt);
1162 if (!(names = dm_task_get_names (dmt)))
1171 r |= dm_suspend_and_flush_map(names->name, retries);
1173 r |= dm_flush_map(names->name);
1175 names = (void *) names + next;
1179 dm_task_destroy (dmt);
1184 dm_message(const char * mapname, char * message)
1187 struct dm_task *dmt;
1189 if (!(dmt = libmp_dm_task_create(DM_DEVICE_TARGET_MSG)))
1192 if (!dm_task_set_name(dmt, mapname))
1195 if (!dm_task_set_sector(dmt, 0))
1198 if (!dm_task_set_message(dmt, message))
1201 dm_task_no_open_count(dmt);
1203 if (!libmp_dm_task_run(dmt)) {
1204 dm_log_error(2, DM_DEVICE_TARGET_MSG, dmt);
1211 condlog(0, "DM message failed [%s]", message);
1213 dm_task_destroy(dmt);
1218 dm_fail_path(const char * mapname, char * path)
1222 if (snprintf(message, 32, "fail_path %s", path) > 32)
1225 return dm_message(mapname, message);
1229 dm_reinstate_path(const char * mapname, char * path)
1233 if (snprintf(message, 32, "reinstate_path %s", path) > 32)
1236 return dm_message(mapname, message);
1240 dm_queue_if_no_path(const char *mapname, int enable)
1245 message = "queue_if_no_path";
1247 message = "fail_if_no_path";
1249 return dm_message(mapname, message);
1253 dm_groupmsg (const char * msg, const char * mapname, int index)
1257 if (snprintf(message, 32, "%s_group %i", msg, index) > 32)
1260 return dm_message(mapname, message);
1264 dm_switchgroup(const char * mapname, int index)
1266 return dm_groupmsg("switch", mapname, index);
1270 dm_enablegroup(const char * mapname, int index)
1272 return dm_groupmsg("enable", mapname, index);
1276 dm_disablegroup(const char * mapname, int index)
1278 return dm_groupmsg("disable", mapname, index);
1281 struct multipath *dm_get_multipath(const char *name)
1283 struct multipath *mpp = NULL;
1285 mpp = alloc_multipath();
1289 mpp->alias = strdup(name);
1294 if (dm_get_map(name, &mpp->size, NULL) != DMP_OK)
1297 if (dm_get_uuid(name, mpp->wwid, WWID_SIZE) != 0)
1298 condlog(2, "%s: failed to get uuid for %s", __func__, name);
1299 if (dm_get_info(name, &mpp->dmi) != 0)
1300 condlog(2, "%s: failed to get info for %s", __func__, name);
1304 free_multipath(mpp, KEEP_PATHS);
1309 dm_get_maps (vector mp)
1311 struct multipath * mpp;
1313 struct dm_task *dmt;
1314 struct dm_names *names;
1320 if (!(dmt = libmp_dm_task_create(DM_DEVICE_LIST)))
1323 dm_task_no_open_count(dmt);
1325 if (!libmp_dm_task_run(dmt)) {
1326 dm_log_error(3, DM_DEVICE_LIST, dmt);
1330 if (!(names = dm_task_get_names(dmt)))
1334 r = 0; /* this is perfectly valid */
1339 if (dm_is_mpath(names->name) != 1)
1342 mpp = dm_get_multipath(names->name);
1346 if (!vector_alloc_slot(mp)) {
1347 free_multipath(mpp, KEEP_PATHS);
1351 vector_set_slot(mp, mpp);
1355 names = (void *) names + next;
1361 dm_task_destroy (dmt);
1366 dm_geteventnr (const char *name)
1368 struct dm_info info;
1370 if (do_get_info(name, &info) != 0)
1373 return info.event_nr;
1377 dm_is_suspended(const char *name)
1379 struct dm_info info;
1381 if (do_get_info(name, &info) != 0)
1384 return info.suspended;
1388 dm_mapname(int major, int minor)
1390 char * response = NULL;
1392 struct dm_task *dmt;
1395 if (!(dmt = libmp_dm_task_create(DM_DEVICE_STATUS)))
1398 if (!dm_task_set_major(dmt, major) ||
1399 !dm_task_set_minor(dmt, minor))
1402 dm_task_no_open_count(dmt);
1403 r = libmp_dm_task_run(dmt);
1405 dm_log_error(2, DM_DEVICE_STATUS, dmt);
1409 map = dm_task_get_name(dmt);
1410 if (map && strlen(map))
1411 response = strdup((const char *)map);
1413 dm_task_destroy(dmt);
1416 dm_task_destroy(dmt);
1417 condlog(0, "%i:%i: error fetching map name", major, minor);
1422 do_foreach_partmaps (const char * mapname,
1423 int (*partmap_func)(const char *, void *),
1426 struct dm_task *dmt;
1427 struct dm_names *names;
1429 char *params = NULL;
1430 unsigned long long size;
1435 if (!(dmt = libmp_dm_task_create(DM_DEVICE_LIST)))
1438 dm_task_no_open_count(dmt);
1440 if (!libmp_dm_task_run(dmt)) {
1441 dm_log_error(3, DM_DEVICE_LIST, dmt);
1445 if (!(names = dm_task_get_names(dmt)))
1449 r = 0; /* this is perfectly valid */
1453 if (dm_dev_t(mapname, &dev_t[0], 32))
1459 * if there is only a single "linear" target
1461 (dm_type(names->name, TGT_PART) == 1) &&
1464 * and the uuid of the target is a partition of the
1465 * uuid of the multipath device
1467 is_mpath_part(names->name, mapname) &&
1470 * and we can fetch the map table from the kernel
1472 dm_get_map(names->name, &size, ¶ms) == DMP_OK &&
1475 * and the table maps over the multipath map
1477 (p = strstr(params, dev_t)) &&
1478 !isdigit(*(p + strlen(dev_t)))
1480 if (partmap_func(names->name, data) != 0)
1487 names = (void *) names + next;
1493 dm_task_destroy (dmt);
1497 struct remove_data {
1499 int deferred_remove;
1503 remove_partmap(const char *name, void *data)
1505 struct remove_data *rd = (struct remove_data *)data;
1507 if (dm_get_opencount(name)) {
1508 dm_remove_partmaps(name, rd->need_sync, rd->deferred_remove);
1509 if (!do_deferred(rd->deferred_remove) &&
1510 dm_get_opencount(name)) {
1511 condlog(2, "%s: map in use", name);
1515 condlog(4, "partition map %s removed", name);
1516 dm_device_remove(name, rd->need_sync, rd->deferred_remove);
1521 dm_remove_partmaps (const char * mapname, int need_sync, int deferred_remove)
1523 struct remove_data rd = { need_sync, deferred_remove };
1524 return do_foreach_partmaps(mapname, remove_partmap, &rd);
1527 #ifdef LIBDM_API_DEFERRED
1530 cancel_remove_partmap (const char *name, void *unused __attribute__((unused)))
1532 if (dm_get_opencount(name))
1533 dm_cancel_remove_partmaps(name);
1534 if (dm_message(name, "@cancel_deferred_remove") != 0)
1535 condlog(0, "%s: can't cancel deferred remove: %s", name,
1541 dm_get_deferred_remove (const char * mapname)
1543 struct dm_info info;
1545 if (do_get_info(mapname, &info) != 0)
1548 return info.deferred_remove;
1552 dm_cancel_remove_partmaps(const char * mapname) {
1553 return do_foreach_partmaps(mapname, cancel_remove_partmap, NULL);
1557 dm_cancel_deferred_remove (struct multipath *mpp)
1561 if (!dm_get_deferred_remove(mpp->alias))
1563 if (mpp->deferred_remove == DEFERRED_REMOVE_IN_PROGRESS)
1564 mpp->deferred_remove = DEFERRED_REMOVE_ON;
1566 dm_cancel_remove_partmaps(mpp->alias);
1567 r = dm_message(mpp->alias, "@cancel_deferred_remove");
1569 condlog(0, "%s: can't cancel deferred remove: %s", mpp->alias,
1572 condlog(2, "%s: canceled deferred remove", mpp->alias);
1579 dm_cancel_deferred_remove (struct multipath *mpp __attribute__((unused)))
1586 static struct dm_info *
1589 return calloc(1, sizeof(struct dm_info));
1593 dm_get_info (const char * mapname, struct dm_info ** dmi)
1599 *dmi = alloc_dminfo();
1604 if (do_get_info(mapname, *dmi) != 0) {
1612 struct rename_data {
1619 rename_partmap (const char *name, void *data)
1623 struct rename_data *rd = (struct rename_data *)data;
1625 if (strncmp(name, rd->old, strlen(rd->old)) != 0)
1627 for (offset = strlen(rd->old); name[offset] && !(isdigit(name[offset])); offset++); /* do nothing */
1628 if (asprintf(&buff, "%s%s%s", rd->new, rd->delim, name + offset) >= 0) {
1629 dm_rename(name, buff, rd->delim, SKIP_KPARTX_OFF);
1631 condlog(4, "partition map %s renamed", name);
1633 condlog(1, "failed to rename partition map %s", name);
1638 dm_rename_partmaps (const char * old, char * new, char *delim)
1640 struct rename_data rd;
1648 if (isdigit(new[strlen(new)-1]))
1653 return do_foreach_partmaps(old, rename_partmap, &rd);
1657 dm_rename (const char * old, char * new, char *delim, int skip_kpartx)
1660 struct dm_task *dmt;
1661 uint32_t cookie = 0;
1662 uint16_t udev_flags = DM_UDEV_DISABLE_LIBRARY_FALLBACK | ((skip_kpartx == SKIP_KPARTX_ON)? MPATH_UDEV_NO_KPARTX_FLAG : 0);
1664 if (dm_rename_partmaps(old, new, delim))
1667 if (!(dmt = libmp_dm_task_create(DM_DEVICE_RENAME)))
1670 if (!dm_task_set_name(dmt, old))
1673 if (!dm_task_set_newname(dmt, new))
1676 dm_task_no_open_count(dmt);
1678 if (!dm_task_set_cookie(dmt, &cookie, udev_flags))
1680 r = libmp_dm_task_run(dmt);
1682 dm_log_error(2, DM_DEVICE_RENAME, dmt);
1684 libmp_udev_wait(cookie);
1687 dm_task_destroy(dmt);
1692 void dm_reassign_deps(char *table, const char *dep, const char *newdep)
1697 newtable = strdup(table);
1700 p = strstr(newtable, dep);
1701 n = table + (p - newtable);
1703 n += strlen(newdep);
1709 int dm_reassign_table(const char *name, char *old, char *new)
1711 int r = 0, modified = 0;
1712 uint64_t start, length;
1713 struct dm_task *dmt, *reload_dmt;
1714 char *target, *params = NULL;
1718 if (!(dmt = libmp_dm_task_create(DM_DEVICE_TABLE)))
1721 if (!dm_task_set_name(dmt, name))
1724 dm_task_no_open_count(dmt);
1726 if (!libmp_dm_task_run(dmt)) {
1727 dm_log_error(3, DM_DEVICE_TABLE, dmt);
1730 if (!(reload_dmt = libmp_dm_task_create(DM_DEVICE_RELOAD)))
1732 if (!dm_task_set_name(reload_dmt, name))
1736 next = dm_get_next_target(dmt, next, &start, &length,
1738 buff = strdup(params);
1740 condlog(3, "%s: failed to replace target %s, "
1741 "out of memory", name, target);
1744 if (strcmp(target, TGT_MPATH) && strstr(params, old)) {
1745 condlog(3, "%s: replace target %s %s",
1746 name, target, buff);
1747 dm_reassign_deps(buff, old, new);
1748 condlog(3, "%s: with target %s %s",
1749 name, target, buff);
1752 dm_task_add_target(reload_dmt, start, length, target, buff);
1757 dm_task_no_open_count(reload_dmt);
1759 if (!libmp_dm_task_run(reload_dmt)) {
1760 dm_log_error(3, DM_DEVICE_RELOAD, reload_dmt);
1761 condlog(3, "%s: failed to reassign targets", name);
1764 dm_simplecmd_noflush(DM_DEVICE_RESUME, name,
1765 MPATH_UDEV_RELOAD_FLAG);
1770 dm_task_destroy(reload_dmt);
1772 dm_task_destroy(dmt);
1778 * Reassign existing device-mapper table(s) to not use
1779 * the block devices but point to the multipathed
1782 int dm_reassign(const char *mapname)
1784 struct dm_deps *deps;
1785 struct dm_task *dmt;
1786 struct dm_info info;
1787 char dev_t[32], dm_dep[32];
1791 if (dm_dev_t(mapname, &dev_t[0], 32)) {
1792 condlog(3, "%s: failed to get device number", mapname);
1796 if (!(dmt = libmp_dm_task_create(DM_DEVICE_DEPS))) {
1797 condlog(3, "%s: couldn't make dm task", mapname);
1801 if (!dm_task_set_name(dmt, mapname))
1804 dm_task_no_open_count(dmt);
1806 if (!libmp_dm_task_run(dmt)) {
1807 dm_log_error(3, DM_DEVICE_DEPS, dmt);
1811 if (!dm_task_get_info(dmt, &info))
1814 if (!(deps = dm_task_get_deps(dmt)))
1820 for (i = 0; i < deps->count; i++) {
1821 sprintf(dm_dep, "%d:%d",
1822 major(deps->device[i]),
1823 minor(deps->device[i]));
1824 sysfs_check_holders(dm_dep, dev_t);
1829 dm_task_destroy (dmt);
1833 int dm_setgeometry(struct multipath *mpp)
1835 struct dm_task *dmt;
1837 char heads[4], sectors[4];
1838 char cylinders[10], start[32];
1844 pp = first_path(mpp);
1846 condlog(3, "%s: no path for geometry", mpp->alias);
1849 if (pp->geom.cylinders == 0 ||
1850 pp->geom.heads == 0 ||
1851 pp->geom.sectors == 0) {
1852 condlog(3, "%s: invalid geometry on %s", mpp->alias, pp->dev);
1856 if (!(dmt = libmp_dm_task_create(DM_DEVICE_SET_GEOMETRY)))
1859 if (!dm_task_set_name(dmt, mpp->alias))
1862 dm_task_no_open_count(dmt);
1864 /* What a sick interface ... */
1865 snprintf(heads, 4, "%u", pp->geom.heads);
1866 snprintf(sectors, 4, "%u", pp->geom.sectors);
1867 snprintf(cylinders, 10, "%u", pp->geom.cylinders);
1868 snprintf(start, 32, "%lu", pp->geom.start);
1869 if (!dm_task_set_geometry(dmt, cylinders, heads, sectors, start)) {
1870 condlog(3, "%s: Failed to set geometry", mpp->alias);
1874 r = libmp_dm_task_run(dmt);
1876 dm_log_error(3, DM_DEVICE_SET_GEOMETRY, dmt);
1878 dm_task_destroy(dmt);