2 * snippets copied from device-mapper dmsetup.c
3 * Copyright (c) 2004, 2005 Christophe Varoqui
4 * Copyright (c) 2005 Kiyoshi Ueda, NEC
5 * Copyright (c) 2005 Patrick Caulfield, Redhat
11 #include <libdevmapper.h>
16 #include <sys/sysmacros.h>
17 #include <linux/dm-ioctl.h>
24 #include "devmapper.h"
29 #include "time-util.h"
31 #include "log_pthread.h"
32 #include <sys/types.h>
36 #define LOOPS_PER_SEC 5
38 #define INVALID_VERSION ~0U
39 static unsigned int dm_library_version[3] = { INVALID_VERSION, };
40 static unsigned int dm_kernel_version[3] = { INVALID_VERSION, };
41 static unsigned int dm_mpath_target_version[3] = { INVALID_VERSION, };
43 static pthread_once_t dm_initialized = PTHREAD_ONCE_INIT;
44 static pthread_once_t versions_initialized = PTHREAD_ONCE_INIT;
45 static pthread_mutex_t libmp_dm_lock = PTHREAD_MUTEX_INITIALIZER;
47 static int dm_conf_verbosity;
49 #ifdef LIBDM_API_DEFERRED
50 static int dm_cancel_remove_partmaps(const char * mapname);
51 #define __DR_UNUSED__ /* empty */
53 #define __DR_UNUSED__ __attribute__((unused))
56 static int do_foreach_partmaps(const char * mapname,
57 int (*partmap_func)(const char *, void *),
60 #ifndef LIBDM_API_COOKIE
61 static inline int dm_task_set_cookie(struct dm_task *dmt, uint32_t *c, int a)
66 static void libmp_udev_wait(unsigned int c)
70 static void dm_udev_set_sync_support(int c)
74 static void libmp_udev_wait(unsigned int c)
76 pthread_mutex_lock(&libmp_dm_lock);
77 pthread_cleanup_push(cleanup_mutex, &libmp_dm_lock);
79 pthread_cleanup_pop(1);
83 int libmp_dm_task_run(struct dm_task *dmt)
87 pthread_mutex_lock(&libmp_dm_lock);
88 pthread_cleanup_push(cleanup_mutex, &libmp_dm_lock);
90 pthread_cleanup_pop(1);
94 __attribute__((format(printf, 4, 5))) static void
95 dm_write_log (int level, const char *file, int line, const char *f, ...)
100 * libdm uses the same log levels as syslog,
101 * except that EMERG/ALERT are not used
103 if (level > LOG_DEBUG)
106 if (level > dm_conf_verbosity)
110 if (logsink != LOGSINK_SYSLOG) {
111 if (logsink == LOGSINK_STDERR_WITH_TIME) {
115 get_monotonic_time(&ts);
116 safe_sprintf(buff, "%ld.%06ld",
117 (long)ts.tv_sec, ts.tv_nsec/1000);
118 fprintf(stderr, "%s | ", buff);
120 fprintf(stderr, "libdevmapper: %s(%i): ", file, line);
121 vfprintf(stderr, f, ap);
122 fprintf(stderr, "\n");
124 condlog(level >= LOG_ERR ? level - LOG_ERR : 0,
125 "libdevmapper: %s(%i): ", file, line);
126 log_safe(level, f, ap);
133 static void dm_init(int v)
136 * This maps libdm's standard loglevel _LOG_WARN (= 4), which is rather
137 * quiet in practice, to multipathd's default verbosity 2
139 dm_conf_verbosity = v + 2;
140 dm_log_init(&dm_write_log);
143 static void init_dm_library_version(void)
148 dm_get_library_version(version, sizeof(version));
149 if (sscanf(version, "%u.%u.%u ", &v[0], &v[1], &v[2]) != 3) {
150 condlog(0, "invalid libdevmapper version %s", version);
153 memcpy(dm_library_version, v, sizeof(dm_library_version));
154 condlog(3, "libdevmapper version %u.%.2u.%.2u",
155 dm_library_version[0], dm_library_version[1],
156 dm_library_version[2]);
163 #if defined(LIBDM_API_HOLD_CONTROL)
164 unsigned int minv[3] = {1, 2, 111};
165 #elif defined(LIBDM_API_GET_ERRNO)
166 unsigned int minv[3] = {1, 2, 99};
167 #elif defined(LIBDM_API_DEFERRED)
168 unsigned int minv[3] = {1, 2, 89};
169 #elif defined(DM_SUBSYSTEM_UDEV_FLAG0)
170 unsigned int minv[3] = {1, 2, 82};
171 #elif defined(LIBDM_API_COOKIE)
172 unsigned int minv[3] = {1, 2, 38};
174 unsigned int minv[3] = {1, 2, 8};
177 if (VERSION_GE(dm_library_version, minv))
179 condlog(0, "libdevmapper version must be >= %u.%.2u.%.2u",
180 minv[0], minv[1], minv[2]);
184 static void init_dm_drv_version(void)
189 if (!dm_driver_version(buff, sizeof(buff))) {
190 condlog(0, "cannot get kernel dm version");
193 if (sscanf(buff, "%u.%u.%u ", &v[0], &v[1], &v[2]) != 3) {
194 condlog(0, "invalid kernel dm version '%s'", buff);
197 memcpy(dm_kernel_version, v, sizeof(dm_library_version));
198 condlog(3, "kernel device mapper v%u.%u.%u",
199 dm_kernel_version[0],
200 dm_kernel_version[1],
201 dm_kernel_version[2]);
204 static int dm_tgt_version (unsigned int *version, char *str)
208 struct dm_versions *target;
209 struct dm_versions *last_target;
213 * We have to call dm_task_create() and not libmp_dm_task_create()
214 * here to avoid a recursive invocation of
215 * pthread_once(&dm_initialized), which would cause a deadlock.
217 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
220 dm_task_no_open_count(dmt);
222 if (!libmp_dm_task_run(dmt)) {
223 dm_log_error(2, DM_DEVICE_LIST_VERSIONS, dmt);
224 condlog(0, "Can not communicate with kernel DM");
227 target = dm_task_get_versions(dmt);
230 last_target = target;
231 if (!strncmp(str, target->name, strlen(str))) {
235 target = (void *) target + target->next;
236 } while (last_target != target);
239 condlog(0, "DM %s kernel driver not loaded", str);
248 dm_task_destroy(dmt);
252 static void init_dm_mpath_version(void)
254 if (!dm_tgt_version(dm_mpath_target_version, TGT_MPATH))
255 condlog(3, "DM multipath kernel driver v%u.%u.%u",
256 dm_mpath_target_version[0],
257 dm_mpath_target_version[1],
258 dm_mpath_target_version[2]);
261 static int dm_tgt_prereq (unsigned int *ver)
263 unsigned int minv[3] = {1, 0, 3};
265 if (VERSION_GE(dm_mpath_target_version, minv)) {
267 ver[0] = dm_mpath_target_version[0];
268 ver[1] = dm_mpath_target_version[1];
269 ver[2] = dm_mpath_target_version[2];
274 condlog(0, "DM multipath kernel driver must be >= v%u.%u.%u",
275 minv[0], minv[1], minv[2]);
279 static void _init_versions(void)
281 /* Can't use condlog here because of how VERSION_STRING is defined */
282 if (3 <= libmp_verbosity)
283 dlog(3, VERSION_STRING);
284 init_dm_library_version();
285 init_dm_drv_version();
286 init_dm_mpath_version();
289 static int init_versions(void) {
290 pthread_once(&versions_initialized, _init_versions);
291 return (dm_library_version[0] == INVALID_VERSION ||
292 dm_kernel_version[0] == INVALID_VERSION ||
293 dm_mpath_target_version[0] == INVALID_VERSION);
296 int dm_prereq(unsigned int *v)
302 return dm_tgt_prereq(v);
305 int libmp_get_version(int which, unsigned int version[3])
307 unsigned int *src_version;
311 case DM_LIBRARY_VERSION:
312 src_version = dm_library_version;
314 case DM_KERNEL_VERSION:
315 src_version = dm_kernel_version;
317 case DM_MPATH_TARGET_VERSION:
318 src_version = dm_mpath_target_version;
320 case MULTIPATH_VERSION:
321 version[0] = (VERSION_CODE >> 16) & 0xff;
322 version[1] = (VERSION_CODE >> 8) & 0xff;
323 version[2] = VERSION_CODE & 0xff;
326 condlog(0, "%s: invalid value for 'which'", __func__);
329 if (src_version[0] == INVALID_VERSION)
331 memcpy(version, src_version, 3 * sizeof(*version));
335 static int libmp_dm_udev_sync = 0;
337 void libmp_udev_set_sync_support(int on)
339 libmp_dm_udev_sync = !!on;
342 static bool libmp_dm_init_called;
343 void libmp_dm_exit(void)
345 if (!libmp_dm_init_called)
348 /* switch back to default libdm logging */
350 #ifdef LIBDM_API_HOLD_CONTROL
351 /* make sure control fd is closed in dm_lib_release() */
352 dm_hold_control_dev(0);
356 static void libmp_dm_init(void)
358 unsigned int version[3];
360 if (dm_prereq(version))
362 dm_init(libmp_verbosity);
363 #ifdef LIBDM_API_HOLD_CONTROL
364 dm_hold_control_dev(1);
366 dm_udev_set_sync_support(libmp_dm_udev_sync);
367 libmp_dm_init_called = true;
370 static void _do_skip_libmp_dm_init(void)
374 void skip_libmp_dm_init(void)
376 pthread_once(&dm_initialized, _do_skip_libmp_dm_init);
380 libmp_dm_task_create(int task)
382 pthread_once(&dm_initialized, libmp_dm_init);
383 return dm_task_create(task);
386 #define do_deferred(x) ((x) == DEFERRED_REMOVE_ON || (x) == DEFERRED_REMOVE_IN_PROGRESS)
389 dm_simplecmd (int task, const char *name, int no_flush, int need_sync,
390 uint16_t udev_flags, int deferred_remove __DR_UNUSED__) {
392 int udev_wait_flag = ((need_sync || udev_flags) &&
393 (task == DM_DEVICE_RESUME ||
394 task == DM_DEVICE_REMOVE));
398 if (!(dmt = libmp_dm_task_create (task)))
401 if (!dm_task_set_name (dmt, name))
404 dm_task_no_open_count(dmt);
405 dm_task_skip_lockfs(dmt); /* for DM_DEVICE_RESUME */
406 #ifdef LIBDM_API_FLUSH
408 dm_task_no_flush(dmt); /* for DM_DEVICE_SUSPEND/RESUME */
410 #ifdef LIBDM_API_DEFERRED
411 if (do_deferred(deferred_remove))
412 dm_task_deferred_remove(dmt);
414 if (udev_wait_flag &&
415 !dm_task_set_cookie(dmt, &cookie,
416 DM_UDEV_DISABLE_LIBRARY_FALLBACK | udev_flags))
419 r = libmp_dm_task_run (dmt);
421 dm_log_error(2, task, dmt);
424 libmp_udev_wait(cookie);
426 dm_task_destroy (dmt);
430 int dm_simplecmd_flush (int task, const char *name, uint16_t udev_flags)
432 return dm_simplecmd(task, name, 0, 1, udev_flags, 0);
435 int dm_simplecmd_noflush (int task, const char *name, uint16_t udev_flags)
437 return dm_simplecmd(task, name, 1, 1, udev_flags, 0);
441 dm_device_remove (const char *name, int needsync, int deferred_remove) {
442 return dm_simplecmd(DM_DEVICE_REMOVE, name, 0, needsync, 0,
447 dm_addmap (int task, const char *target, struct multipath *mpp,
448 char * params, int ro, uint16_t udev_flags) {
451 char *prefixed_uuid = NULL;
454 if (task == DM_DEVICE_CREATE && strlen(mpp->wwid) == 0) {
455 condlog(1, "%s: refusing to create map with empty WWID",
460 /* Need to add this here to allow 0 to be passed in udev_flags */
461 udev_flags |= DM_UDEV_DISABLE_LIBRARY_FALLBACK;
463 if (!(dmt = libmp_dm_task_create (task)))
466 if (!dm_task_set_name (dmt, mpp->alias))
469 if (!dm_task_add_target (dmt, 0, mpp->size, target, params))
475 if (task == DM_DEVICE_CREATE) {
476 if (asprintf(&prefixed_uuid, UUID_PREFIX "%s", mpp->wwid) < 0) {
477 condlog(0, "cannot create prefixed uuid : %s",
481 if (!dm_task_set_uuid(dmt, prefixed_uuid))
483 dm_task_skip_lockfs(dmt);
484 #ifdef LIBDM_API_FLUSH
485 dm_task_no_flush(dmt);
489 if (mpp->attribute_flags & (1 << ATTR_MODE) &&
490 !dm_task_set_mode(dmt, mpp->mode))
492 if (mpp->attribute_flags & (1 << ATTR_UID) &&
493 !dm_task_set_uid(dmt, mpp->uid))
495 if (mpp->attribute_flags & (1 << ATTR_GID) &&
496 !dm_task_set_gid(dmt, mpp->gid))
498 condlog(2, "%s: %s [0 %llu %s %s]", mpp->alias,
499 task == DM_DEVICE_RELOAD ? "reload" : "addmap", mpp->size,
502 dm_task_no_open_count(dmt);
504 if (task == DM_DEVICE_CREATE &&
505 !dm_task_set_cookie(dmt, &cookie, udev_flags))
508 r = libmp_dm_task_run (dmt);
510 dm_log_error(2, task, dmt);
512 if (task == DM_DEVICE_CREATE)
513 libmp_udev_wait(cookie);
519 dm_task_destroy (dmt);
522 mpp->need_reload = false;
526 static uint16_t build_udev_flags(const struct multipath *mpp, int reload)
528 /* DM_UDEV_DISABLE_LIBRARY_FALLBACK is added in dm_addmap */
529 return (mpp->skip_kpartx == SKIP_KPARTX_ON ?
530 MPATH_UDEV_NO_KPARTX_FLAG : 0) |
531 ((count_active_pending_paths(mpp) == 0 ||
532 mpp->ghost_delay_tick > 0) ?
533 MPATH_UDEV_NO_PATHS_FLAG : 0) |
534 (reload && !mpp->force_udev_reload ?
535 MPATH_UDEV_RELOAD_FLAG : 0);
538 int dm_addmap_create (struct multipath *mpp, char * params)
541 uint16_t udev_flags = build_udev_flags(mpp, 0);
543 for (ro = 0; ro <= 1; ro++) {
546 if (dm_addmap(DM_DEVICE_CREATE, TGT_MPATH, mpp, params, ro,
548 if (unmark_failed_wwid(mpp->wwid) ==
550 mpp->needs_paths_uevent = 1;
554 * DM_DEVICE_CREATE is actually DM_DEV_CREATE + DM_TABLE_LOAD.
555 * Failing the second part leaves an empty map. Clean it up.
558 if (dm_map_present(mpp->alias)) {
559 condlog(3, "%s: failed to load map (a path might be in use)", mpp->alias);
560 dm_flush_map_nosync(mpp->alias);
563 condlog(3, "%s: failed to load map, error %d",
568 if (mark_failed_wwid(mpp->wwid) == WWID_FAILED_CHANGED)
569 mpp->needs_paths_uevent = 1;
576 int dm_addmap_reload(struct multipath *mpp, char *params, int flush)
579 uint16_t udev_flags = build_udev_flags(mpp, 1);
582 * DM_DEVICE_RELOAD cannot wait on a cookie, as
583 * the cookie will only ever be released after an
584 * DM_DEVICE_RESUME. So call DM_DEVICE_RESUME
585 * after each successful call to DM_DEVICE_RELOAD.
587 if (!mpp->force_readonly)
588 r = dm_addmap(DM_DEVICE_RELOAD, TGT_MPATH, mpp, params,
591 if (!mpp->force_readonly && errno != EROFS)
593 r = dm_addmap(DM_DEVICE_RELOAD, TGT_MPATH, mpp,
594 params, ADDMAP_RO, 0);
597 r = dm_simplecmd(DM_DEVICE_RESUME, mpp->alias, !flush,
602 /* If the resume failed, dm will leave the device suspended, and
603 * drop the new table, so doing a second resume will try using
604 * the original table */
605 if (dm_is_suspended(mpp->alias))
606 dm_simplecmd(DM_DEVICE_RESUME, mpp->alias, !flush, 1,
612 has_dm_info(const struct multipath *mpp)
614 return (mpp && mpp->dmi.exists != 0);
618 dm_get_info(const char *name, struct dm_info *info)
626 if (!(dmt = libmp_dm_task_create(DM_DEVICE_INFO)))
629 if (!dm_task_set_name(dmt, name))
632 dm_task_no_open_count(dmt);
634 if (!libmp_dm_task_run(dmt)) {
635 dm_log_error(3, DM_DEVICE_INFO, dmt);
639 if (!dm_task_get_info(dmt, info))
647 dm_task_destroy(dmt);
651 int dm_map_present(const char * str)
655 return (dm_get_info(str, &info) == 0);
658 int dm_get_map(const char *name, unsigned long long *size, char **outparams)
662 uint64_t start, length;
663 char *target_type = NULL;
666 if (!(dmt = libmp_dm_task_create(DM_DEVICE_TABLE)))
669 if (!dm_task_set_name(dmt, name))
672 dm_task_no_open_count(dmt);
675 if (!libmp_dm_task_run(dmt)) {
676 dm_log_error(3, DM_DEVICE_TABLE, dmt);
677 if (dm_task_get_errno(dmt) == ENXIO)
683 /* Fetch 1st target */
684 if (dm_get_next_target(dmt, NULL, &start, &length,
685 &target_type, ¶ms) != NULL)
686 /* more than one target */
695 *outparams = strdup(params);
696 r = *outparams ? DMP_OK : DMP_ERR;
700 dm_task_destroy(dmt);
705 dm_get_prefixed_uuid(const char *name, char *uuid, int uuid_len)
711 dmt = libmp_dm_task_create(DM_DEVICE_INFO);
715 if (!dm_task_set_name (dmt, name))
718 if (!libmp_dm_task_run(dmt)) {
719 dm_log_error(3, DM_DEVICE_INFO, dmt);
723 uuidtmp = dm_task_get_uuid(dmt);
725 strlcpy(uuid, uuidtmp, uuid_len);
731 dm_task_destroy(dmt);
735 int dm_get_uuid(const char *name, char *uuid, int uuid_len)
737 char tmp[DM_UUID_LEN];
739 if (dm_get_prefixed_uuid(name, tmp, sizeof(tmp)))
742 if (!strncmp(tmp, UUID_PREFIX, UUID_PREFIX_LEN))
743 strlcpy(uuid, tmp + UUID_PREFIX_LEN, uuid_len);
751 is_mpath_part(const char *part_name, const char *map_name)
754 char part_uuid[DM_UUID_LEN], map_uuid[DM_UUID_LEN];
756 if (dm_get_prefixed_uuid(part_name, part_uuid, sizeof(part_uuid)))
759 if (dm_get_prefixed_uuid(map_name, map_uuid, sizeof(map_uuid)))
762 if (strncmp(part_uuid, "part", 4) != 0)
765 p = strstr(part_uuid, UUID_PREFIX);
766 if (p && !strcmp(p, map_uuid))
772 int dm_get_status(const char *name, char **outstatus)
776 uint64_t start, length;
777 char *target_type = NULL;
780 if (!(dmt = libmp_dm_task_create(DM_DEVICE_STATUS)))
783 if (!dm_task_set_name(dmt, name))
786 dm_task_no_open_count(dmt);
789 if (!libmp_dm_task_run(dmt)) {
790 dm_log_error(3, DM_DEVICE_STATUS, dmt);
791 if (dm_task_get_errno(dmt) == ENXIO)
797 /* Fetch 1st target */
798 if (dm_get_next_target(dmt, NULL, &start, &length,
799 &target_type, &status) != NULL)
802 if (!target_type || strcmp(target_type, TGT_MPATH) != 0)
806 condlog(2, "get null status.");
813 *outstatus = strdup(status);
814 r = *outstatus ? DMP_OK : DMP_ERR;
818 condlog(0, "%s: error getting map status string", name);
820 dm_task_destroy(dmt);
828 * -1 : empty map, or more than 1 target
830 int dm_type(const char *name, char *type)
834 uint64_t start, length;
835 char *target_type = NULL;
838 if (!(dmt = libmp_dm_task_create(DM_DEVICE_TABLE)))
841 if (!dm_task_set_name(dmt, name))
844 dm_task_no_open_count(dmt);
846 if (!libmp_dm_task_run(dmt)) {
847 dm_log_error(3, DM_DEVICE_TABLE, dmt);
851 /* Fetch 1st target */
852 if (dm_get_next_target(dmt, NULL, &start, &length,
853 &target_type, ¶ms) != NULL)
854 /* multiple targets */
856 else if (!target_type)
858 else if (!strcmp(target_type, type))
862 dm_task_destroy(dmt);
868 * 1 : is multipath device
869 * 0 : is not multipath device
872 int dm_is_mpath(const char *name)
877 uint64_t start, length;
878 char *target_type = NULL;
882 if (!(dmt = libmp_dm_task_create(DM_DEVICE_TABLE)))
885 if (!dm_task_set_name(dmt, name))
888 dm_task_no_open_count(dmt);
890 if (!libmp_dm_task_run(dmt)) {
891 dm_log_error(3, DM_DEVICE_TABLE, dmt);
895 if (!dm_task_get_info(dmt, &info))
903 uuid = dm_task_get_uuid(dmt);
905 if (!uuid || strncmp(uuid, UUID_PREFIX, UUID_PREFIX_LEN) != 0)
908 /* Fetch 1st target */
909 if (dm_get_next_target(dmt, NULL, &start, &length, &target_type,
911 /* multiple targets */
914 if (!target_type || strcmp(target_type, TGT_MPATH) != 0)
919 dm_task_destroy(dmt);
922 condlog(3, "%s: dm command failed in %s: %s", name, __FUNCTION__, strerror(errno));
928 * 1 : map with uuid exists
929 * 0 : map with uuid doesn't exist
933 dm_map_present_by_uuid(const char *uuid)
937 char prefixed_uuid[WWID_SIZE + UUID_PREFIX_LEN];
940 if (!uuid || uuid[0] == '\0')
943 if (safe_sprintf(prefixed_uuid, UUID_PREFIX "%s", uuid))
946 if (!(dmt = libmp_dm_task_create(DM_DEVICE_INFO)))
949 dm_task_no_open_count(dmt);
951 if (!dm_task_set_uuid(dmt, prefixed_uuid))
954 if (!libmp_dm_task_run(dmt)) {
955 dm_log_error(3, DM_DEVICE_INFO, dmt);
959 if (!dm_task_get_info(dmt, &info))
965 dm_task_destroy(dmt);
968 condlog(3, "%s: dm command failed in %s: %s", uuid,
969 __FUNCTION__, strerror(errno));
974 dm_dev_t (const char * mapname, char * dev_t, int len)
978 if (dm_get_info(mapname, &info) != 0)
981 if (snprintf(dev_t, len, "%i:%i", info.major, info.minor) > len)
988 dm_get_opencount (const char * mapname)
994 if (!(dmt = libmp_dm_task_create(DM_DEVICE_INFO)))
997 if (!dm_task_set_name(dmt, mapname))
1000 if (!libmp_dm_task_run(dmt)) {
1001 dm_log_error(3, DM_DEVICE_INFO, dmt);
1005 if (!dm_task_get_info(dmt, &info))
1011 r = info.open_count;
1013 dm_task_destroy(dmt);
1018 dm_get_major_minor(const char *name, int *major, int *minor)
1020 struct dm_info info;
1022 if (dm_get_info(name, &info) != 0)
1025 *major = info.major;
1026 *minor = info.minor;
1031 has_partmap(const char *name __attribute__((unused)),
1032 void *data __attribute__((unused)))
1038 partmap_in_use(const char *name, void *data)
1040 int part_count, *ret_count = (int *)data;
1041 int open_count = dm_get_opencount(name);
1047 if (do_foreach_partmaps(name, partmap_in_use, &part_count))
1049 if (open_count != part_count) {
1050 condlog(2, "%s: map in use", name);
1057 int _dm_flush_map (const char * mapname, int need_sync, int deferred_remove,
1058 int need_suspend, int retries)
1061 int queue_if_no_path = 0;
1063 unsigned long long mapsize;
1064 char *params = NULL;
1066 if (dm_is_mpath(mapname) != 1)
1067 return 0; /* nothing to do */
1069 /* if the device currently has no partitions, do not
1070 run kpartx on it if you fail to delete it */
1071 if (do_foreach_partmaps(mapname, has_partmap, NULL) == 0)
1072 udev_flags |= MPATH_UDEV_NO_KPARTX_FLAG;
1074 /* If you aren't doing a deferred remove, make sure that no
1075 * devices are in use */
1076 if (!do_deferred(deferred_remove) && partmap_in_use(mapname, NULL))
1080 dm_get_map(mapname, &mapsize, ¶ms) == DMP_OK &&
1081 strstr(params, "queue_if_no_path")) {
1082 if (!dm_queue_if_no_path(mapname, 0))
1083 queue_if_no_path = 1;
1085 /* Leave queue_if_no_path alone if unset failed */
1086 queue_if_no_path = -1;
1091 if (dm_remove_partmaps(mapname, need_sync, deferred_remove))
1094 if (!do_deferred(deferred_remove) && dm_get_opencount(mapname)) {
1095 condlog(2, "%s: map in use", mapname);
1100 if (need_suspend && queue_if_no_path != -1)
1101 dm_simplecmd_flush(DM_DEVICE_SUSPEND, mapname, 0);
1103 r = dm_device_remove(mapname, need_sync, deferred_remove);
1106 if (do_deferred(deferred_remove)
1107 && dm_map_present(mapname)) {
1108 condlog(4, "multipath map %s remove deferred",
1112 condlog(4, "multipath map %s removed", mapname);
1115 condlog(2, "failed to remove multipath map %s",
1117 if (need_suspend && queue_if_no_path != -1) {
1118 dm_simplecmd_noflush(DM_DEVICE_RESUME,
1119 mapname, udev_flags);
1124 } while (retries-- > 0);
1126 if (queue_if_no_path == 1)
1127 dm_queue_if_no_path(mapname, 1);
1132 #ifdef LIBDM_API_DEFERRED
1135 dm_flush_map_nopaths(const char * mapname, int deferred_remove)
1137 return _dm_flush_map(mapname, 1, deferred_remove, 0, 0);
1143 dm_flush_map_nopaths(const char * mapname,
1144 int deferred_remove __attribute__((unused)))
1146 return _dm_flush_map(mapname, 1, 0, 0, 0);
1151 int dm_flush_maps (int need_suspend, int retries)
1154 struct dm_task *dmt;
1155 struct dm_names *names;
1158 if (!(dmt = libmp_dm_task_create (DM_DEVICE_LIST)))
1161 dm_task_no_open_count(dmt);
1163 if (!libmp_dm_task_run (dmt)) {
1164 dm_log_error(3, DM_DEVICE_LIST, dmt);
1168 if (!(names = dm_task_get_names (dmt)))
1177 r |= dm_suspend_and_flush_map(names->name, retries);
1179 r |= dm_flush_map(names->name);
1181 names = (void *) names + next;
1185 dm_task_destroy (dmt);
1190 dm_message(const char * mapname, char * message)
1193 struct dm_task *dmt;
1195 if (!(dmt = libmp_dm_task_create(DM_DEVICE_TARGET_MSG)))
1198 if (!dm_task_set_name(dmt, mapname))
1201 if (!dm_task_set_sector(dmt, 0))
1204 if (!dm_task_set_message(dmt, message))
1207 dm_task_no_open_count(dmt);
1209 if (!libmp_dm_task_run(dmt)) {
1210 dm_log_error(2, DM_DEVICE_TARGET_MSG, dmt);
1217 condlog(0, "DM message failed [%s]", message);
1219 dm_task_destroy(dmt);
1224 dm_fail_path(const char * mapname, char * path)
1228 if (snprintf(message, 32, "fail_path %s", path) > 32)
1231 return dm_message(mapname, message);
1235 dm_reinstate_path(const char * mapname, char * path)
1239 if (snprintf(message, 32, "reinstate_path %s", path) > 32)
1242 return dm_message(mapname, message);
1246 dm_queue_if_no_path(const char *mapname, int enable)
1251 message = "queue_if_no_path";
1253 message = "fail_if_no_path";
1255 return dm_message(mapname, message);
1259 dm_groupmsg (const char * msg, const char * mapname, int index)
1263 if (snprintf(message, 32, "%s_group %i", msg, index) > 32)
1266 return dm_message(mapname, message);
1270 dm_switchgroup(const char * mapname, int index)
1272 return dm_groupmsg("switch", mapname, index);
1276 dm_enablegroup(const char * mapname, int index)
1278 return dm_groupmsg("enable", mapname, index);
1282 dm_disablegroup(const char * mapname, int index)
1284 return dm_groupmsg("disable", mapname, index);
1287 struct multipath *dm_get_multipath(const char *name)
1289 struct multipath *mpp = NULL;
1291 mpp = alloc_multipath();
1295 mpp->alias = strdup(name);
1300 if (dm_get_map(name, &mpp->size, NULL) != DMP_OK)
1303 if (dm_get_uuid(name, mpp->wwid, WWID_SIZE) != 0)
1304 condlog(2, "%s: failed to get uuid for %s", __func__, name);
1305 if (dm_get_info(name, &mpp->dmi) != 0)
1306 condlog(2, "%s: failed to get info for %s", __func__, name);
1310 free_multipath(mpp, KEEP_PATHS);
1315 dm_get_maps (vector mp)
1317 struct multipath * mpp;
1319 struct dm_task *dmt;
1320 struct dm_names *names;
1326 if (!(dmt = libmp_dm_task_create(DM_DEVICE_LIST)))
1329 dm_task_no_open_count(dmt);
1331 if (!libmp_dm_task_run(dmt)) {
1332 dm_log_error(3, DM_DEVICE_LIST, dmt);
1336 if (!(names = dm_task_get_names(dmt)))
1340 r = 0; /* this is perfectly valid */
1345 if (dm_is_mpath(names->name) != 1)
1348 mpp = dm_get_multipath(names->name);
1352 if (!vector_alloc_slot(mp)) {
1353 free_multipath(mpp, KEEP_PATHS);
1357 vector_set_slot(mp, mpp);
1361 names = (void *) names + next;
1367 dm_task_destroy (dmt);
1372 dm_geteventnr (const char *name)
1374 struct dm_info info;
1376 if (dm_get_info(name, &info) != 0)
1379 return info.event_nr;
1383 dm_is_suspended(const char *name)
1385 struct dm_info info;
1387 if (dm_get_info(name, &info) != 0)
1390 return info.suspended;
1394 dm_mapname(int major, int minor)
1396 char * response = NULL;
1398 struct dm_task *dmt;
1401 if (!(dmt = libmp_dm_task_create(DM_DEVICE_STATUS)))
1404 if (!dm_task_set_major(dmt, major) ||
1405 !dm_task_set_minor(dmt, minor))
1408 dm_task_no_open_count(dmt);
1409 r = libmp_dm_task_run(dmt);
1411 dm_log_error(2, DM_DEVICE_STATUS, dmt);
1415 map = dm_task_get_name(dmt);
1416 if (map && strlen(map))
1417 response = strdup((const char *)map);
1419 dm_task_destroy(dmt);
1422 dm_task_destroy(dmt);
1423 condlog(0, "%i:%i: error fetching map name", major, minor);
1428 do_foreach_partmaps (const char * mapname,
1429 int (*partmap_func)(const char *, void *),
1432 struct dm_task *dmt;
1433 struct dm_names *names;
1435 char *params = NULL;
1436 unsigned long long size;
1441 if (!(dmt = libmp_dm_task_create(DM_DEVICE_LIST)))
1444 dm_task_no_open_count(dmt);
1446 if (!libmp_dm_task_run(dmt)) {
1447 dm_log_error(3, DM_DEVICE_LIST, dmt);
1451 if (!(names = dm_task_get_names(dmt)))
1455 r = 0; /* this is perfectly valid */
1459 if (dm_dev_t(mapname, &dev_t[0], 32))
1465 * if there is only a single "linear" target
1467 (dm_type(names->name, TGT_PART) == 1) &&
1470 * and the uuid of the target is a partition of the
1471 * uuid of the multipath device
1473 is_mpath_part(names->name, mapname) &&
1476 * and we can fetch the map table from the kernel
1478 dm_get_map(names->name, &size, ¶ms) == DMP_OK &&
1481 * and the table maps over the multipath map
1483 (p = strstr(params, dev_t)) &&
1484 !isdigit(*(p + strlen(dev_t)))
1486 if (partmap_func(names->name, data) != 0)
1493 names = (void *) names + next;
1499 dm_task_destroy (dmt);
1503 struct remove_data {
1505 int deferred_remove;
1509 remove_partmap(const char *name, void *data)
1511 struct remove_data *rd = (struct remove_data *)data;
1513 if (dm_get_opencount(name)) {
1514 dm_remove_partmaps(name, rd->need_sync, rd->deferred_remove);
1515 if (!do_deferred(rd->deferred_remove) &&
1516 dm_get_opencount(name)) {
1517 condlog(2, "%s: map in use", name);
1521 condlog(4, "partition map %s removed", name);
1522 dm_device_remove(name, rd->need_sync, rd->deferred_remove);
1527 dm_remove_partmaps (const char * mapname, int need_sync, int deferred_remove)
1529 struct remove_data rd = { need_sync, deferred_remove };
1530 return do_foreach_partmaps(mapname, remove_partmap, &rd);
1533 #ifdef LIBDM_API_DEFERRED
1536 cancel_remove_partmap (const char *name, void *unused __attribute__((unused)))
1538 if (dm_get_opencount(name))
1539 dm_cancel_remove_partmaps(name);
1540 if (dm_message(name, "@cancel_deferred_remove") != 0)
1541 condlog(0, "%s: can't cancel deferred remove: %s", name,
1547 dm_get_deferred_remove (const char * mapname)
1549 struct dm_info info;
1551 if (dm_get_info(mapname, &info) != 0)
1554 return info.deferred_remove;
1558 dm_cancel_remove_partmaps(const char * mapname) {
1559 return do_foreach_partmaps(mapname, cancel_remove_partmap, NULL);
1563 dm_cancel_deferred_remove (struct multipath *mpp)
1567 if (!dm_get_deferred_remove(mpp->alias))
1569 if (mpp->deferred_remove == DEFERRED_REMOVE_IN_PROGRESS)
1570 mpp->deferred_remove = DEFERRED_REMOVE_ON;
1572 dm_cancel_remove_partmaps(mpp->alias);
1573 r = dm_message(mpp->alias, "@cancel_deferred_remove");
1575 condlog(0, "%s: can't cancel deferred remove: %s", mpp->alias,
1578 condlog(2, "%s: canceled deferred remove", mpp->alias);
1585 dm_cancel_deferred_remove (struct multipath *mpp __attribute__((unused)))
1592 struct rename_data {
1599 rename_partmap (const char *name, void *data)
1603 struct rename_data *rd = (struct rename_data *)data;
1605 if (strncmp(name, rd->old, strlen(rd->old)) != 0)
1607 for (offset = strlen(rd->old); name[offset] && !(isdigit(name[offset])); offset++); /* do nothing */
1608 if (asprintf(&buff, "%s%s%s", rd->new, rd->delim, name + offset) >= 0) {
1609 dm_rename(name, buff, rd->delim, SKIP_KPARTX_OFF);
1611 condlog(4, "partition map %s renamed", name);
1613 condlog(1, "failed to rename partition map %s", name);
1618 dm_rename_partmaps (const char * old, char * new, char *delim)
1620 struct rename_data rd;
1628 if (isdigit(new[strlen(new)-1]))
1633 return do_foreach_partmaps(old, rename_partmap, &rd);
1637 dm_rename (const char * old, char * new, char *delim, int skip_kpartx)
1640 struct dm_task *dmt;
1641 uint32_t cookie = 0;
1642 uint16_t udev_flags = DM_UDEV_DISABLE_LIBRARY_FALLBACK | ((skip_kpartx == SKIP_KPARTX_ON)? MPATH_UDEV_NO_KPARTX_FLAG : 0);
1644 if (dm_rename_partmaps(old, new, delim))
1647 if (!(dmt = libmp_dm_task_create(DM_DEVICE_RENAME)))
1650 if (!dm_task_set_name(dmt, old))
1653 if (!dm_task_set_newname(dmt, new))
1656 dm_task_no_open_count(dmt);
1658 if (!dm_task_set_cookie(dmt, &cookie, udev_flags))
1660 r = libmp_dm_task_run(dmt);
1662 dm_log_error(2, DM_DEVICE_RENAME, dmt);
1664 libmp_udev_wait(cookie);
1667 dm_task_destroy(dmt);
1672 void dm_reassign_deps(char *table, const char *dep, const char *newdep)
1677 newtable = strdup(table);
1680 p = strstr(newtable, dep);
1681 n = table + (p - newtable);
1683 n += strlen(newdep);
1689 int dm_reassign_table(const char *name, char *old, char *new)
1691 int r = 0, modified = 0;
1692 uint64_t start, length;
1693 struct dm_task *dmt, *reload_dmt;
1694 char *target, *params = NULL;
1698 if (!(dmt = libmp_dm_task_create(DM_DEVICE_TABLE)))
1701 if (!dm_task_set_name(dmt, name))
1704 dm_task_no_open_count(dmt);
1706 if (!libmp_dm_task_run(dmt)) {
1707 dm_log_error(3, DM_DEVICE_TABLE, dmt);
1710 if (!(reload_dmt = libmp_dm_task_create(DM_DEVICE_RELOAD)))
1712 if (!dm_task_set_name(reload_dmt, name))
1716 next = dm_get_next_target(dmt, next, &start, &length,
1718 buff = strdup(params);
1720 condlog(3, "%s: failed to replace target %s, "
1721 "out of memory", name, target);
1724 if (strcmp(target, TGT_MPATH) && strstr(params, old)) {
1725 condlog(3, "%s: replace target %s %s",
1726 name, target, buff);
1727 dm_reassign_deps(buff, old, new);
1728 condlog(3, "%s: with target %s %s",
1729 name, target, buff);
1732 dm_task_add_target(reload_dmt, start, length, target, buff);
1737 dm_task_no_open_count(reload_dmt);
1739 if (!libmp_dm_task_run(reload_dmt)) {
1740 dm_log_error(3, DM_DEVICE_RELOAD, reload_dmt);
1741 condlog(3, "%s: failed to reassign targets", name);
1744 dm_simplecmd_noflush(DM_DEVICE_RESUME, name,
1745 MPATH_UDEV_RELOAD_FLAG);
1750 dm_task_destroy(reload_dmt);
1752 dm_task_destroy(dmt);
1758 * Reassign existing device-mapper table(s) to not use
1759 * the block devices but point to the multipathed
1762 int dm_reassign(const char *mapname)
1764 struct dm_deps *deps;
1765 struct dm_task *dmt;
1766 struct dm_info info;
1767 char dev_t[32], dm_dep[32];
1771 if (dm_dev_t(mapname, &dev_t[0], 32)) {
1772 condlog(3, "%s: failed to get device number", mapname);
1776 if (!(dmt = libmp_dm_task_create(DM_DEVICE_DEPS))) {
1777 condlog(3, "%s: couldn't make dm task", mapname);
1781 if (!dm_task_set_name(dmt, mapname))
1784 dm_task_no_open_count(dmt);
1786 if (!libmp_dm_task_run(dmt)) {
1787 dm_log_error(3, DM_DEVICE_DEPS, dmt);
1791 if (!dm_task_get_info(dmt, &info))
1794 if (!(deps = dm_task_get_deps(dmt)))
1800 for (i = 0; i < deps->count; i++) {
1801 sprintf(dm_dep, "%d:%d",
1802 major(deps->device[i]),
1803 minor(deps->device[i]));
1804 sysfs_check_holders(dm_dep, dev_t);
1809 dm_task_destroy (dmt);
1813 int dm_setgeometry(struct multipath *mpp)
1815 struct dm_task *dmt;
1817 char heads[4], sectors[4];
1818 char cylinders[10], start[32];
1824 pp = first_path(mpp);
1826 condlog(3, "%s: no path for geometry", mpp->alias);
1829 if (pp->geom.cylinders == 0 ||
1830 pp->geom.heads == 0 ||
1831 pp->geom.sectors == 0) {
1832 condlog(3, "%s: invalid geometry on %s", mpp->alias, pp->dev);
1836 if (!(dmt = libmp_dm_task_create(DM_DEVICE_SET_GEOMETRY)))
1839 if (!dm_task_set_name(dmt, mpp->alias))
1842 dm_task_no_open_count(dmt);
1844 /* What a sick interface ... */
1845 snprintf(heads, 4, "%u", pp->geom.heads);
1846 snprintf(sectors, 4, "%u", pp->geom.sectors);
1847 snprintf(cylinders, 10, "%u", pp->geom.cylinders);
1848 snprintf(start, 32, "%lu", pp->geom.start);
1849 if (!dm_task_set_geometry(dmt, cylinders, heads, sectors, start)) {
1850 condlog(3, "%s: Failed to set geometry", mpp->alias);
1854 r = libmp_dm_task_run(dmt);
1856 dm_log_error(3, DM_DEVICE_SET_GEOMETRY, dmt);
1858 dm_task_destroy(dmt);