2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
18 #include <linux/oom.h>
22 #include <systemd/sd-daemon.h>
24 #include <semaphore.h>
31 #include "time-util.h"
39 static int use_watchdog;
55 #include "blacklist.h"
56 #include "structs_vec.h"
58 #include "devmapper.h"
61 #include "discovery.h"
65 #include "switchgroup.h"
67 #include "configure.h"
70 #include "pgpolicies.h"
74 #include "mpath_cmd.h"
75 #include "mpath_persist.h"
77 #include "prioritizers/alua_rtpg.h"
84 #include "cli_handlers.h"
88 #include "../third-party/valgrind/drd.h"
90 #define FILE_NAME_SIZE 256
93 #define LOG_MSG(a, b) \
96 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
98 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
101 struct mpath_event_param
104 struct multipath *mpp;
107 unsigned int mpath_mx_alloc_len;
111 int bindings_read_only;
113 enum daemon_status running_state = DAEMON_INIT;
115 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
116 pthread_cond_t config_cond;
119 * global copy of vecs for use in sig handlers
121 struct vectors * gvecs;
125 struct config *multipath_conf;
127 /* Local variables */
128 static volatile sig_atomic_t exit_sig;
129 static volatile sig_atomic_t reconfig_sig;
130 static volatile sig_atomic_t log_reset_sig;
135 switch (running_state) {
140 case DAEMON_CONFIGURE:
146 case DAEMON_SHUTDOWN:
153 * I love you too, systemd ...
156 sd_notify_status(void)
158 switch (running_state) {
160 return "STATUS=init";
162 return "STATUS=startup";
163 case DAEMON_CONFIGURE:
164 return "STATUS=configure";
166 return "STATUS=idle";
168 return "STATUS=running";
169 case DAEMON_SHUTDOWN:
170 return "STATUS=shutdown";
175 static void config_cleanup(void *arg)
177 pthread_mutex_unlock(&config_lock);
180 void post_config_state(enum daemon_status state)
182 pthread_mutex_lock(&config_lock);
183 if (state != running_state) {
184 running_state = state;
185 pthread_cond_broadcast(&config_cond);
187 sd_notify(0, sd_notify_status());
190 pthread_mutex_unlock(&config_lock);
193 int set_config_state(enum daemon_status state)
197 pthread_cleanup_push(config_cleanup, NULL);
198 pthread_mutex_lock(&config_lock);
199 if (running_state != state) {
200 if (running_state != DAEMON_IDLE) {
203 clock_gettime(CLOCK_MONOTONIC, &ts);
205 rc = pthread_cond_timedwait(&config_cond,
209 running_state = state;
210 pthread_cond_broadcast(&config_cond);
212 sd_notify(0, sd_notify_status());
216 pthread_cleanup_pop(1);
220 struct config *get_multipath_config(void)
223 return rcu_dereference(multipath_conf);
226 void put_multipath_config(struct config *conf)
232 need_switch_pathgroup (struct multipath * mpp, int refresh)
234 struct pathgroup * pgp;
239 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
243 * Refresh path priority values
246 vector_foreach_slot (mpp->pg, pgp, i) {
247 vector_foreach_slot (pgp->paths, pp, j) {
248 conf = get_multipath_config();
249 pathinfo(pp, conf, DI_PRIO);
250 put_multipath_config(conf);
255 if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
258 mpp->bestpg = select_path_group(mpp);
260 if (mpp->bestpg != mpp->nextpg)
267 switch_pathgroup (struct multipath * mpp)
269 mpp->stat_switchgroup++;
270 dm_switchgroup(mpp->alias, mpp->bestpg);
271 condlog(2, "%s: switch to path group #%i",
272 mpp->alias, mpp->bestpg);
276 coalesce_maps(struct vectors *vecs, vector nmpv)
278 struct multipath * ompp;
279 vector ompv = vecs->mpvec;
280 unsigned int i, reassign_maps;
283 conf = get_multipath_config();
284 reassign_maps = conf->reassign_maps;
285 put_multipath_config(conf);
286 vector_foreach_slot (ompv, ompp, i) {
287 condlog(3, "%s: coalesce map", ompp->alias);
288 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
290 * remove all current maps not allowed by the
291 * current configuration
293 if (dm_flush_map(ompp->alias)) {
294 condlog(0, "%s: unable to flush devmap",
297 * may be just because the device is open
299 if (setup_multipath(vecs, ompp) != 0) {
303 if (!vector_alloc_slot(nmpv))
306 vector_set_slot(nmpv, ompp);
308 vector_del_slot(ompv, i);
313 condlog(2, "%s devmap removed", ompp->alias);
315 } else if (reassign_maps) {
316 condlog(3, "%s: Reassign existing device-mapper"
317 " devices", ompp->alias);
318 dm_reassign(ompp->alias);
325 sync_map_state(struct multipath *mpp)
327 struct pathgroup *pgp;
334 vector_foreach_slot (mpp->pg, pgp, i){
335 vector_foreach_slot (pgp->paths, pp, j){
336 if (pp->state == PATH_UNCHECKED ||
337 pp->state == PATH_WILD ||
338 pp->state == PATH_DELAYED)
340 if ((pp->dmstate == PSTATE_FAILED ||
341 pp->dmstate == PSTATE_UNDEF) &&
342 (pp->state == PATH_UP || pp->state == PATH_GHOST))
343 dm_reinstate_path(mpp->alias, pp->dev_t);
344 else if ((pp->dmstate == PSTATE_ACTIVE ||
345 pp->dmstate == PSTATE_UNDEF) &&
346 (pp->state == PATH_DOWN ||
347 pp->state == PATH_SHAKY))
348 dm_fail_path(mpp->alias, pp->dev_t);
354 sync_maps_state(vector mpvec)
357 struct multipath *mpp;
359 vector_foreach_slot (mpvec, mpp, i)
364 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
369 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
371 r = dm_flush_map(mpp->alias);
373 * clear references to this map before flushing so we can ignore
374 * the spurious uevent we may generate with the dm_flush_map call below
378 * May not really be an error -- if the map was already flushed
379 * from the device mapper by dmsetup(8) for instance.
382 condlog(0, "%s: can't flush", mpp->alias);
384 condlog(2, "%s: devmap deferred remove", mpp->alias);
385 mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
391 condlog(2, "%s: map flushed", mpp->alias);
394 orphan_paths(vecs->pathvec, mpp);
395 remove_map_and_stop_waiter(mpp, vecs, 1);
401 update_map (struct multipath *mpp, struct vectors *vecs)
404 char params[PARAMS_SIZE] = {0};
407 condlog(4, "%s: updating new map", mpp->alias);
408 if (adopt_paths(vecs->pathvec, mpp)) {
409 condlog(0, "%s: failed to adopt paths for new map update",
414 verify_paths(mpp, vecs);
415 mpp->flush_on_last_del = FLUSH_UNDEF;
416 mpp->action = ACT_RELOAD;
418 if (setup_map(mpp, params, PARAMS_SIZE)) {
419 condlog(0, "%s: failed to setup new map in update", mpp->alias);
423 if (domap(mpp, params, 1) <= 0 && retries-- > 0) {
424 condlog(0, "%s: map_udate sleep", mpp->alias);
431 if (setup_multipath(vecs, mpp))
437 condlog(0, "%s: failed reload in new map update", mpp->alias);
442 uev_add_map (struct uevent * uev, struct vectors * vecs)
445 int major = -1, minor = -1, rc;
447 condlog(3, "%s: add map (uevent)", uev->kernel);
448 alias = uevent_get_dm_name(uev);
450 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
451 major = uevent_get_major(uev);
452 minor = uevent_get_minor(uev);
453 alias = dm_mapname(major, minor);
455 condlog(2, "%s: mapname not found for %d:%d",
456 uev->kernel, major, minor);
460 pthread_cleanup_push(cleanup_lock, &vecs->lock);
462 pthread_testcancel();
463 rc = ev_add_map(uev->kernel, alias, vecs);
464 lock_cleanup_pop(vecs->lock);
470 ev_add_map (char * dev, char * alias, struct vectors * vecs)
473 struct multipath * mpp;
475 int r = 1, delayed_reconfig, reassign_maps;
478 map_present = dm_map_present(alias);
480 if (map_present && !dm_is_mpath(alias)) {
481 condlog(4, "%s: not a multipath map", alias);
485 mpp = find_mp_by_alias(vecs->mpvec, alias);
488 if (mpp->wait_for_udev > 1) {
489 condlog(2, "%s: performing delayed actions",
491 if (update_map(mpp, vecs))
492 /* setup multipathd removed the map */
495 conf = get_multipath_config();
496 delayed_reconfig = conf->delayed_reconfig;
497 reassign_maps = conf->reassign_maps;
498 put_multipath_config(conf);
499 if (mpp->wait_for_udev) {
500 mpp->wait_for_udev = 0;
501 if (delayed_reconfig &&
502 !need_to_delay_reconfig(vecs)) {
503 condlog(2, "reconfigure (delayed)");
504 set_config_state(DAEMON_CONFIGURE);
509 * Not really an error -- we generate our own uevent
510 * if we create a multipath mapped device as a result
514 condlog(3, "%s: Reassign existing device-mapper devices",
520 condlog(2, "%s: adding map", alias);
523 * now we can register the map
526 if ((mpp = add_map_without_path(vecs, alias))) {
528 condlog(2, "%s: devmap %s registered", alias, dev);
531 condlog(2, "%s: uev_add_map failed", dev);
535 r = get_refwwid(CMD_NONE, dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
538 r = coalesce_paths(vecs, NULL, refwwid, FORCE_RELOAD_NONE,
544 condlog(2, "%s: devmap %s added", alias, dev);
546 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
548 condlog(0, "%s: uev_add_map %s failed", alias, dev);
555 uev_remove_map (struct uevent * uev, struct vectors * vecs)
559 struct multipath *mpp;
561 condlog(2, "%s: remove map (uevent)", uev->kernel);
562 alias = uevent_get_dm_name(uev);
564 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
567 minor = uevent_get_minor(uev);
569 pthread_cleanup_push(cleanup_lock, &vecs->lock);
571 pthread_testcancel();
572 mpp = find_mp_by_minor(vecs->mpvec, minor);
575 condlog(2, "%s: devmap not registered, can't remove",
579 if (strcmp(mpp->alias, alias)) {
580 condlog(2, "%s: minor number mismatch (map %d, event %d)",
581 mpp->alias, mpp->dmi->minor, minor);
585 orphan_paths(vecs->pathvec, mpp);
586 remove_map_and_stop_waiter(mpp, vecs, 1);
588 lock_cleanup_pop(vecs->lock);
593 /* Called from CLI handler */
595 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
597 struct multipath * mpp;
599 mpp = find_mp_by_minor(vecs->mpvec, minor);
602 condlog(2, "%s: devmap not registered, can't remove",
606 if (strcmp(mpp->alias, alias)) {
607 condlog(2, "%s: minor number mismatch (map %d, event %d)",
608 mpp->alias, mpp->dmi->minor, minor);
611 return flush_map(mpp, vecs, 0);
615 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
621 condlog(2, "%s: add path (uevent)", uev->kernel);
622 if (strstr(uev->kernel, "..") != NULL) {
624 * Don't allow relative device names in the pathvec
626 condlog(0, "%s: path name is invalid", uev->kernel);
630 pthread_cleanup_push(cleanup_lock, &vecs->lock);
632 pthread_testcancel();
633 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
637 condlog(0, "%s: spurious uevent, path already in pathvec",
639 if (!pp->mpp && !strlen(pp->wwid)) {
640 condlog(3, "%s: reinitialize path", uev->kernel);
641 udev_device_unref(pp->udev);
642 pp->udev = udev_device_ref(uev->udev);
643 conf = get_multipath_config();
644 r = pathinfo(pp, conf,
645 DI_ALL | DI_BLACKLIST);
646 put_multipath_config(conf);
647 if (r == PATHINFO_OK)
648 ret = ev_add_path(pp, vecs, need_do_map);
649 else if (r == PATHINFO_SKIPPED) {
650 condlog(3, "%s: remove blacklisted path",
652 i = find_slot(vecs->pathvec, (void *)pp);
654 vector_del_slot(vecs->pathvec, i);
657 condlog(0, "%s: failed to reinitialize path",
663 lock_cleanup_pop(vecs->lock);
668 * get path vital state
670 conf = get_multipath_config();
671 ret = alloc_path_with_pathinfo(conf, uev->udev,
672 uev->wwid, DI_ALL, &pp);
673 put_multipath_config(conf);
675 if (ret == PATHINFO_SKIPPED)
677 condlog(3, "%s: failed to get path info", uev->kernel);
680 pthread_cleanup_push(cleanup_lock, &vecs->lock);
682 pthread_testcancel();
683 ret = store_path(vecs->pathvec, pp);
685 conf = get_multipath_config();
686 pp->checkint = conf->checkint;
687 put_multipath_config(conf);
688 ret = ev_add_path(pp, vecs, need_do_map);
690 condlog(0, "%s: failed to store path info, "
696 lock_cleanup_pop(vecs->lock);
706 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
708 struct multipath * mpp;
709 char params[PARAMS_SIZE] = {0};
711 int start_waiter = 0;
715 * need path UID to go any further
717 if (strlen(pp->wwid) == 0) {
718 condlog(0, "%s: failed to get path uid", pp->dev);
719 goto fail; /* leave path added to pathvec */
721 mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
722 if (mpp && mpp->wait_for_udev &&
723 (pathcount(mpp, PATH_UP) > 0 ||
724 (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT))) {
725 /* if wait_for_udev is set and valid paths exist */
726 condlog(2, "%s: delaying path addition until %s is fully initialized", pp->dev, mpp->alias);
727 mpp->wait_for_udev = 2;
728 orphan_path(pp, "waiting for create to complete");
735 if (pp->size && mpp->size != pp->size) {
736 condlog(0, "%s: failed to add new path %s, "
737 "device size mismatch",
738 mpp->alias, pp->dev);
739 int i = find_slot(vecs->pathvec, (void *)pp);
741 vector_del_slot(vecs->pathvec, i);
746 condlog(4,"%s: adopting all paths for path %s",
747 mpp->alias, pp->dev);
748 if (adopt_paths(vecs->pathvec, mpp))
749 goto fail; /* leave path added to pathvec */
751 verify_paths(mpp, vecs);
752 mpp->flush_on_last_del = FLUSH_UNDEF;
753 mpp->action = ACT_RELOAD;
755 if (!should_multipath(pp, vecs->pathvec)) {
756 orphan_path(pp, "only one path");
759 condlog(4,"%s: creating new map", pp->dev);
760 if ((mpp = add_map_with_path(vecs, pp, 1))) {
761 mpp->action = ACT_CREATE;
763 * We don't depend on ACT_CREATE, as domap will
764 * set it to ACT_NOTHING when complete.
769 goto fail; /* leave path added to pathvec */
772 /* persistent reservation check*/
773 mpath_pr_event_handle(pp);
778 if (!dm_map_present(mpp->alias)) {
779 mpp->action = ACT_CREATE;
783 * push the map to the device-mapper
785 if (setup_map(mpp, params, PARAMS_SIZE)) {
786 condlog(0, "%s: failed to setup map for addition of new "
787 "path %s", mpp->alias, pp->dev);
791 * reload the map for the multipath mapped device
794 ret = domap(mpp, params, 1);
796 if (ret < 0 && retries-- > 0) {
797 condlog(0, "%s: retry domap for addition of new "
798 "path %s", mpp->alias, pp->dev);
802 condlog(0, "%s: failed in domap for addition of new "
803 "path %s", mpp->alias, pp->dev);
805 * deal with asynchronous uevents :((
807 if (mpp->action == ACT_RELOAD && retries-- > 0) {
808 condlog(0, "%s: ev_add_path sleep", mpp->alias);
810 update_mpp_paths(mpp, vecs->pathvec);
813 else if (mpp->action == ACT_RELOAD)
814 condlog(0, "%s: giving up reload", mpp->alias);
821 * update our state from kernel regardless of create or reload
823 if (setup_multipath(vecs, mpp))
824 goto fail; /* if setup_multipath fails, it removes the map */
828 if ((mpp->action == ACT_CREATE ||
829 (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
830 start_waiter_thread(mpp, vecs))
834 condlog(2, "%s [%s]: path added to devmap %s",
835 pp->dev, pp->dev_t, mpp->alias);
841 remove_map(mpp, vecs, 1);
843 orphan_path(pp, "failed to add path");
848 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
853 condlog(2, "%s: remove path (uevent)", uev->kernel);
854 pthread_cleanup_push(cleanup_lock, &vecs->lock);
856 pthread_testcancel();
857 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
859 ret = ev_remove_path(pp, vecs, need_do_map);
860 lock_cleanup_pop(vecs->lock);
862 /* Not an error; path might have been purged earlier */
863 condlog(0, "%s: path already removed", uev->kernel);
870 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
872 struct multipath * mpp;
874 char params[PARAMS_SIZE] = {0};
877 * avoid referring to the map of an orphaned path
879 if ((mpp = pp->mpp)) {
881 * transform the mp->pg vector of vectors of paths
882 * into a mp->params string to feed the device-mapper
884 if (update_mpp_paths(mpp, vecs->pathvec)) {
885 condlog(0, "%s: failed to update paths",
889 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
890 vector_del_slot(mpp->paths, i);
893 * remove the map IFF removing the last path
895 if (VECTOR_SIZE(mpp->paths) == 0) {
896 char alias[WWID_SIZE];
899 * flush_map will fail if the device is open
901 strncpy(alias, mpp->alias, WWID_SIZE);
902 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
903 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
905 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
906 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
907 mpp->stat_map_failures++;
908 dm_queue_if_no_path(mpp->alias, 0);
910 if (!flush_map(mpp, vecs, 1)) {
911 condlog(2, "%s: removed map after"
912 " removing all paths",
918 * Not an error, continue
922 if (setup_map(mpp, params, PARAMS_SIZE)) {
923 condlog(0, "%s: failed to setup map for"
924 " removal of path %s", mpp->alias, pp->dev);
928 if (mpp->wait_for_udev) {
929 mpp->wait_for_udev = 2;
938 mpp->action = ACT_RELOAD;
939 if (domap(mpp, params, 1) <= 0) {
940 condlog(0, "%s: failed in domap for "
941 "removal of path %s",
942 mpp->alias, pp->dev);
946 * update our state from kernel
948 if (setup_multipath(vecs, mpp))
952 condlog(2, "%s [%s]: path removed from map %s",
953 pp->dev, pp->dev_t, mpp->alias);
958 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
959 vector_del_slot(vecs->pathvec, i);
966 remove_map_and_stop_waiter(mpp, vecs, 1);
971 uev_update_path (struct uevent *uev, struct vectors * vecs)
976 int disable_changed_wwids;
977 int needs_reinit = 0;
979 conf = get_multipath_config();
980 disable_changed_wwids = conf->disable_changed_wwids;
981 put_multipath_config(conf);
983 ro = uevent_get_disk_ro(uev);
985 pthread_cleanup_push(cleanup_lock, &vecs->lock);
987 pthread_testcancel();
989 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
991 struct multipath *mpp = pp->mpp;
993 if (disable_changed_wwids &&
994 (strlen(pp->wwid) || pp->wwid_changed)) {
995 char wwid[WWID_SIZE];
997 strcpy(wwid, pp->wwid);
998 get_uid(pp, pp->state, uev->udev);
999 if (strcmp(wwid, pp->wwid) != 0) {
1000 condlog(0, "%s: path wwid changed from '%s' to '%s'. disallowing", uev->kernel, wwid, pp->wwid);
1001 strcpy(pp->wwid, wwid);
1002 if (!pp->wwid_changed) {
1003 pp->wwid_changed = 1;
1006 dm_fail_path(pp->mpp->alias, pp->dev_t);
1010 pp->wwid_changed = 0;
1013 if (pp->initialized == INIT_REQUESTED_UDEV)
1015 else if (mpp && ro >= 0) {
1016 condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1018 if (mpp->wait_for_udev)
1019 mpp->wait_for_udev = 2;
1022 pp->mpp->force_readonly = 1;
1023 retval = reload_map(vecs, mpp, 0, 1);
1024 pp->mpp->force_readonly = 0;
1025 condlog(2, "%s: map %s reloaded (retval %d)",
1026 uev->kernel, mpp->alias, retval);
1031 lock_cleanup_pop(vecs->lock);
1033 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1035 int flag = DI_SYSFS | DI_WWID;
1037 conf = get_multipath_config();
1038 retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1039 put_multipath_config(conf);
1041 if (retval == PATHINFO_SKIPPED) {
1042 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1047 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1050 retval = uev_add_path(uev, vecs, 1);
1055 map_discovery (struct vectors * vecs)
1057 struct multipath * mpp;
1060 if (dm_get_maps(vecs->mpvec))
1063 vector_foreach_slot (vecs->mpvec, mpp, i)
1064 if (setup_multipath(vecs, mpp))
1071 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1072 void * trigger_data)
1074 struct vectors * vecs;
1079 vecs = (struct vectors *)trigger_data;
1081 if ((str != NULL) && (is_root == false) &&
1082 (strncmp(str, "list", strlen("list")) != 0) &&
1083 (strncmp(str, "show", strlen("show")) != 0)) {
1084 *reply = STRDUP("permission deny: need to be root");
1086 *len = strlen(*reply) + 1;
1090 r = parse_cmd(str, reply, len, vecs, uxsock_timeout);
1094 *reply = STRDUP("timeout\n");
1096 *reply = STRDUP("fail\n");
1098 *len = strlen(*reply) + 1;
1101 else if (!r && *len == 0) {
1102 *reply = STRDUP("ok\n");
1104 *len = strlen(*reply) + 1;
1107 /* else if (r < 0) leave *reply alone */
1113 uev_trigger (struct uevent * uev, void * trigger_data)
1116 struct vectors * vecs;
1117 struct uevent *merge_uev, *tmp;
1119 vecs = (struct vectors *)trigger_data;
1121 pthread_cleanup_push(config_cleanup, NULL);
1122 pthread_mutex_lock(&config_lock);
1123 if (running_state != DAEMON_IDLE &&
1124 running_state != DAEMON_RUNNING)
1125 pthread_cond_wait(&config_cond, &config_lock);
1126 pthread_cleanup_pop(1);
1128 if (running_state == DAEMON_SHUTDOWN)
1133 * Add events are ignored here as the tables
1134 * are not fully initialised then.
1136 if (!strncmp(uev->kernel, "dm-", 3)) {
1137 if (!strncmp(uev->action, "change", 6)) {
1138 r = uev_add_map(uev, vecs);
1141 if (!strncmp(uev->action, "remove", 6)) {
1142 r = uev_remove_map(uev, vecs);
1149 * path add/remove/change event, add/remove maybe merged
1151 list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1152 if (!strncmp(merge_uev->action, "add", 3))
1153 r += uev_add_path(merge_uev, vecs, 0);
1154 if (!strncmp(merge_uev->action, "remove", 6))
1155 r += uev_remove_path(merge_uev, vecs, 0);
1158 if (!strncmp(uev->action, "add", 3))
1159 r += uev_add_path(uev, vecs, 1);
1160 if (!strncmp(uev->action, "remove", 6))
1161 r += uev_remove_path(uev, vecs, 1);
1162 if (!strncmp(uev->action, "change", 6))
1163 r += uev_update_path(uev, vecs);
1169 static void rcu_unregister(void *param)
1171 rcu_unregister_thread();
1175 ueventloop (void * ap)
1177 struct udev *udev = ap;
1179 pthread_cleanup_push(rcu_unregister, NULL);
1180 rcu_register_thread();
1181 if (uevent_listen(udev))
1182 condlog(0, "error starting uevent listener");
1183 pthread_cleanup_pop(1);
1188 uevqloop (void * ap)
1190 pthread_cleanup_push(rcu_unregister, NULL);
1191 rcu_register_thread();
1192 if (uevent_dispatch(&uev_trigger, ap))
1193 condlog(0, "error starting uevent dispatcher");
1194 pthread_cleanup_pop(1);
1198 uxlsnrloop (void * ap)
1201 condlog(1, "Failed to init uxsock listener");
1204 pthread_cleanup_push(rcu_unregister, NULL);
1205 rcu_register_thread();
1206 set_handler_callback(LIST+PATHS, cli_list_paths);
1207 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1208 set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1209 set_handler_callback(LIST+PATH, cli_list_path);
1210 set_handler_callback(LIST+MAPS, cli_list_maps);
1211 set_unlocked_handler_callback(LIST+STATUS, cli_list_status);
1212 set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1213 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1214 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1215 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1216 set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1217 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1218 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1219 set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1220 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1221 set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1222 set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1223 set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1224 set_handler_callback(LIST+CONFIG, cli_list_config);
1225 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1226 set_handler_callback(LIST+DEVICES, cli_list_devices);
1227 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1228 set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1229 set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1230 set_handler_callback(ADD+PATH, cli_add_path);
1231 set_handler_callback(DEL+PATH, cli_del_path);
1232 set_handler_callback(ADD+MAP, cli_add_map);
1233 set_handler_callback(DEL+MAP, cli_del_map);
1234 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1235 set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1236 set_handler_callback(SUSPEND+MAP, cli_suspend);
1237 set_handler_callback(RESUME+MAP, cli_resume);
1238 set_handler_callback(RESIZE+MAP, cli_resize);
1239 set_handler_callback(RELOAD+MAP, cli_reload);
1240 set_handler_callback(RESET+MAP, cli_reassign);
1241 set_handler_callback(REINSTATE+PATH, cli_reinstate);
1242 set_handler_callback(FAIL+PATH, cli_fail);
1243 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1244 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1245 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1246 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1247 set_unlocked_handler_callback(QUIT, cli_quit);
1248 set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1249 set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1250 set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1251 set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1252 set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1253 set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1256 uxsock_listen(&uxsock_trigger, ap);
1257 pthread_cleanup_pop(1);
1264 post_config_state(DAEMON_SHUTDOWN);
1268 fail_path (struct path * pp, int del_active)
1273 condlog(2, "checker failed path %s in map %s",
1274 pp->dev_t, pp->mpp->alias);
1276 dm_fail_path(pp->mpp->alias, pp->dev_t);
1278 update_queue_mode_del_path(pp->mpp);
1282 * caller must have locked the path list before calling that function
1285 reinstate_path (struct path * pp, int add_active)
1292 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1293 condlog(0, "%s: reinstate failed", pp->dev_t);
1296 condlog(2, "%s: reinstated", pp->dev_t);
1298 update_queue_mode_add_path(pp->mpp);
1304 enable_group(struct path * pp)
1306 struct pathgroup * pgp;
1309 * if path is added through uev_add_path, pgindex can be unset.
1310 * next update_strings() will set it, upon map reload event.
1312 * we can safely return here, because upon map reload, all
1313 * PG will be enabled.
1315 if (!pp->mpp->pg || !pp->pgindex)
1318 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1320 if (pgp->status == PGSTATE_DISABLED) {
1321 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1322 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1327 mpvec_garbage_collector (struct vectors * vecs)
1329 struct multipath * mpp;
1335 vector_foreach_slot (vecs->mpvec, mpp, i) {
1336 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1337 condlog(2, "%s: remove dead map", mpp->alias);
1338 remove_map_and_stop_waiter(mpp, vecs, 1);
1344 /* This is called after a path has started working again. It the multipath
1345 * device for this path uses the followover failback type, and this is the
1346 * best pathgroup, and this is the first path in the pathgroup to come back
1347 * up, then switch to this pathgroup */
1349 followover_should_failback(struct path * pp)
1351 struct pathgroup * pgp;
1355 if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1356 !pp->mpp->pg || !pp->pgindex ||
1357 pp->pgindex != pp->mpp->bestpg)
1360 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1361 vector_foreach_slot(pgp->paths, pp1, i) {
1364 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1371 missing_uev_wait_tick(struct vectors *vecs)
1373 struct multipath * mpp;
1375 int timed_out = 0, delayed_reconfig;
1376 struct config *conf;
1378 vector_foreach_slot (vecs->mpvec, mpp, i) {
1379 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1381 condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1382 if (mpp->wait_for_udev > 1 && update_map(mpp, vecs)) {
1383 /* update_map removed map */
1387 mpp->wait_for_udev = 0;
1391 conf = get_multipath_config();
1392 delayed_reconfig = conf->delayed_reconfig;
1393 put_multipath_config(conf);
1394 if (timed_out && delayed_reconfig &&
1395 !need_to_delay_reconfig(vecs)) {
1396 condlog(2, "reconfigure (delayed)");
1397 set_config_state(DAEMON_CONFIGURE);
1402 defered_failback_tick (vector mpvec)
1404 struct multipath * mpp;
1407 vector_foreach_slot (mpvec, mpp, i) {
1409 * defered failback getting sooner
1411 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1412 mpp->failback_tick--;
1414 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1415 switch_pathgroup(mpp);
1421 retry_count_tick(vector mpvec)
1423 struct multipath *mpp;
1426 vector_foreach_slot (mpvec, mpp, i) {
1427 if (mpp->retry_tick > 0) {
1428 mpp->stat_total_queueing_time++;
1429 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1430 if(--mpp->retry_tick == 0) {
1431 mpp->stat_map_failures++;
1432 dm_queue_if_no_path(mpp->alias, 0);
1433 condlog(2, "%s: Disable queueing", mpp->alias);
1439 int update_prio(struct path *pp, int refresh_all)
1443 struct pathgroup * pgp;
1444 int i, j, changed = 0;
1445 struct config *conf;
1448 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1449 vector_foreach_slot (pgp->paths, pp1, j) {
1450 oldpriority = pp1->priority;
1451 conf = get_multipath_config();
1452 pathinfo(pp1, conf, DI_PRIO);
1453 put_multipath_config(conf);
1454 if (pp1->priority != oldpriority)
1460 oldpriority = pp->priority;
1461 conf = get_multipath_config();
1462 if (pp->state != PATH_DOWN)
1463 pathinfo(pp, conf, DI_PRIO);
1464 put_multipath_config(conf);
1466 if (pp->priority == oldpriority)
1471 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1473 if (reload_map(vecs, mpp, refresh, 1))
1477 if (setup_multipath(vecs, mpp) != 0)
1479 sync_map_state(mpp);
1484 void repair_path(struct path * pp)
1486 if (pp->state != PATH_DOWN)
1489 checker_repair(&pp->checker);
1490 LOG_MSG(1, checker_message(&pp->checker));
1493 static int check_path_reinstate_state(struct path * pp) {
1494 struct timespec curr_time;
1495 if (!((pp->mpp->san_path_err_threshold > 0) &&
1496 (pp->mpp->san_path_err_forget_rate > 0) &&
1497 (pp->mpp->san_path_err_recovery_time >0))) {
1501 if (pp->disable_reinstate) {
1502 /* If we don't know how much time has passed, automatically
1503 * reinstate the path, just to be safe. Also, if there are
1504 * no other usable paths, reinstate the path
1506 if (clock_gettime(CLOCK_MONOTONIC, &curr_time) != 0 ||
1507 pp->mpp->nr_active == 0) {
1508 condlog(2, "%s : reinstating path early", pp->dev);
1509 goto reinstate_path;
1511 if ((curr_time.tv_sec - pp->dis_reinstate_time ) > pp->mpp->san_path_err_recovery_time) {
1512 condlog(2,"%s : reinstate the path after err recovery time", pp->dev);
1513 goto reinstate_path;
1517 /* forget errors on a working path */
1518 if ((pp->state == PATH_UP || pp->state == PATH_GHOST) &&
1519 pp->path_failures > 0) {
1520 if (pp->san_path_err_forget_rate > 0){
1521 pp->san_path_err_forget_rate--;
1523 /* for every san_path_err_forget_rate number of
1524 * successful path checks decrement path_failures by 1
1526 pp->path_failures--;
1527 pp->san_path_err_forget_rate = pp->mpp->san_path_err_forget_rate;
1532 /* If the path isn't recovering from a failed state, do nothing */
1533 if (pp->state != PATH_DOWN && pp->state != PATH_SHAKY &&
1534 pp->state != PATH_TIMEOUT)
1537 if (pp->path_failures == 0)
1538 pp->san_path_err_forget_rate = pp->mpp->san_path_err_forget_rate;
1540 pp->path_failures++;
1542 /* if we don't know the currently time, we don't know how long to
1543 * delay the path, so there's no point in checking if we should
1546 if (clock_gettime(CLOCK_MONOTONIC, &curr_time) != 0)
1548 /* when path failures has exceeded the san_path_err_threshold
1549 * place the path in delayed state till san_path_err_recovery_time
1550 * so that the cutomer can rectify the issue within this time. After
1551 * the completion of san_path_err_recovery_time it should
1552 * automatically reinstate the path
1554 if (pp->path_failures > pp->mpp->san_path_err_threshold) {
1555 condlog(2, "%s : hit error threshold. Delaying path reinstatement", pp->dev);
1556 pp->dis_reinstate_time = curr_time.tv_sec;
1557 pp->disable_reinstate = 1;
1564 pp->path_failures = 0;
1565 pp->disable_reinstate = 0;
1566 pp->san_path_err_forget_rate = 0;
1571 * Returns '1' if the path has been checked, '-1' if it was blacklisted
1575 check_path (struct vectors * vecs, struct path * pp, int ticks)
1578 int new_path_up = 0;
1579 int chkr_new_path_up = 0;
1581 int disable_reinstate = 0;
1582 int oldchkrstate = pp->chkrstate;
1583 int retrigger_tries, checkint;
1584 struct config *conf;
1587 if ((pp->initialized == INIT_OK ||
1588 pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1592 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1594 return 0; /* don't check this path yet */
1596 conf = get_multipath_config();
1597 retrigger_tries = conf->retrigger_tries;
1598 checkint = conf->checkint;
1599 put_multipath_config(conf);
1600 if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV &&
1601 pp->retriggers < retrigger_tries) {
1602 condlog(2, "%s: triggering change event to reinitialize",
1604 pp->initialized = INIT_REQUESTED_UDEV;
1606 sysfs_attr_set_value(pp->udev, "uevent", "change",
1612 * provision a next check soonest,
1613 * in case we exit abnormaly from here
1615 pp->tick = checkint;
1617 newstate = path_offline(pp);
1619 * Wait for uevent for removed paths;
1620 * some LLDDs like zfcp keep paths unavailable
1621 * without sending uevents.
1623 if (newstate == PATH_REMOVED)
1624 newstate = PATH_DOWN;
1626 if (newstate == PATH_UP) {
1627 conf = get_multipath_config();
1628 newstate = get_state(pp, conf, 1);
1629 put_multipath_config(conf);
1631 checker_clear_message(&pp->checker);
1633 if (pp->wwid_changed) {
1634 condlog(2, "%s: path wwid has changed. Refusing to use",
1636 newstate = PATH_DOWN;
1639 if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1640 condlog(2, "%s: unusable path", pp->dev);
1641 conf = get_multipath_config();
1642 pathinfo(pp, conf, 0);
1643 put_multipath_config(conf);
1647 if (!strlen(pp->wwid) && pp->initialized != INIT_MISSING_UDEV &&
1648 (newstate == PATH_UP || newstate == PATH_GHOST)) {
1649 condlog(2, "%s: add missing path", pp->dev);
1650 conf = get_multipath_config();
1651 ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
1652 if (ret == PATHINFO_OK) {
1653 ev_add_path(pp, vecs, 1);
1655 } else if (ret == PATHINFO_SKIPPED) {
1656 put_multipath_config(conf);
1659 put_multipath_config(conf);
1664 * Async IO in flight. Keep the previous path state
1665 * and reschedule as soon as possible
1667 if (newstate == PATH_PENDING) {
1672 * Synchronize with kernel state
1674 if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1675 condlog(1, "%s: Could not synchronize with kernel state",
1677 pp->dmstate = PSTATE_UNDEF;
1679 /* if update_multipath_strings orphaned the path, quit early */
1683 if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1684 check_path_reinstate_state(pp)) {
1685 pp->state = PATH_DELAYED;
1689 if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1690 pp->wait_checks > 0) {
1691 if (pp->mpp->nr_active > 0) {
1692 pp->state = PATH_DELAYED;
1696 pp->wait_checks = 0;
1700 * don't reinstate failed path, if its in stand-by
1701 * and if target supports only implicit tpgs mode.
1702 * this will prevent unnecessary i/o by dm on stand-by
1703 * paths if there are no other active paths in map.
1705 disable_reinstate = (newstate == PATH_GHOST &&
1706 pp->mpp->nr_active == 0 &&
1707 pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
1709 pp->chkrstate = newstate;
1710 if (newstate != pp->state) {
1711 int oldstate = pp->state;
1712 pp->state = newstate;
1714 LOG_MSG(1, checker_message(&pp->checker));
1717 * upon state change, reset the checkint
1718 * to the shortest delay
1720 conf = get_multipath_config();
1721 pp->checkint = conf->checkint;
1722 put_multipath_config(conf);
1724 if (newstate != PATH_UP && newstate != PATH_GHOST) {
1726 * proactively fail path in the DM
1728 if (oldstate == PATH_UP ||
1729 oldstate == PATH_GHOST) {
1731 if (pp->mpp->delay_wait_checks > 0 &&
1732 pp->watch_checks > 0) {
1733 pp->wait_checks = pp->mpp->delay_wait_checks;
1734 pp->watch_checks = 0;
1740 * cancel scheduled failback
1742 pp->mpp->failback_tick = 0;
1744 pp->mpp->stat_path_failures++;
1749 if(newstate == PATH_UP || newstate == PATH_GHOST){
1750 if ( pp->mpp && pp->mpp->prflag ){
1752 * Check Persistent Reservation.
1754 condlog(2, "%s: checking persistent reservation "
1755 "registration", pp->dev);
1756 mpath_pr_event_handle(pp);
1761 * reinstate this path
1763 if (oldstate != PATH_UP &&
1764 oldstate != PATH_GHOST) {
1765 if (pp->mpp->delay_watch_checks > 0)
1766 pp->watch_checks = pp->mpp->delay_watch_checks;
1769 if (pp->watch_checks > 0)
1773 if (!disable_reinstate && reinstate_path(pp, add_active)) {
1774 condlog(3, "%s: reload map", pp->dev);
1775 ev_add_path(pp, vecs, 1);
1781 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1782 chkr_new_path_up = 1;
1785 * if at least one path is up in a group, and
1786 * the group is disabled, re-enable it
1788 if (newstate == PATH_UP)
1791 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1792 if ((pp->dmstate == PSTATE_FAILED ||
1793 pp->dmstate == PSTATE_UNDEF) &&
1794 !disable_reinstate) {
1795 /* Clear IO errors */
1796 if (reinstate_path(pp, 0)) {
1797 condlog(3, "%s: reload map", pp->dev);
1798 ev_add_path(pp, vecs, 1);
1803 unsigned int max_checkint;
1804 LOG_MSG(4, checker_message(&pp->checker));
1805 conf = get_multipath_config();
1806 max_checkint = conf->max_checkint;
1807 put_multipath_config(conf);
1808 if (pp->checkint != max_checkint) {
1810 * double the next check delay.
1811 * max at conf->max_checkint
1813 if (pp->checkint < (max_checkint / 2))
1814 pp->checkint = 2 * pp->checkint;
1816 pp->checkint = max_checkint;
1818 condlog(4, "%s: delay next check %is",
1819 pp->dev_t, pp->checkint);
1821 if (pp->watch_checks > 0)
1823 pp->tick = pp->checkint;
1826 else if (newstate == PATH_DOWN) {
1827 int log_checker_err;
1829 conf = get_multipath_config();
1830 log_checker_err = conf->log_checker_err;
1831 put_multipath_config(conf);
1832 if (log_checker_err == LOG_CHKR_ERR_ONCE)
1833 LOG_MSG(3, checker_message(&pp->checker));
1835 LOG_MSG(2, checker_message(&pp->checker));
1838 pp->state = newstate;
1841 if (pp->mpp->wait_for_udev)
1844 * path prio refreshing
1846 condlog(4, "path prio refresh");
1848 if (update_prio(pp, new_path_up) &&
1849 (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1850 pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1851 update_path_groups(pp->mpp, vecs, !new_path_up);
1852 else if (need_switch_pathgroup(pp->mpp, 0)) {
1853 if (pp->mpp->pgfailback > 0 &&
1854 (new_path_up || pp->mpp->failback_tick <= 0))
1855 pp->mpp->failback_tick =
1856 pp->mpp->pgfailback + 1;
1857 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1858 (chkr_new_path_up && followover_should_failback(pp)))
1859 switch_pathgroup(pp->mpp);
1864 static void init_path_check_interval(struct vectors *vecs)
1866 struct config *conf;
1870 vector_foreach_slot (vecs->pathvec, pp, i) {
1871 conf = get_multipath_config();
1872 pp->checkint = conf->checkint;
1873 put_multipath_config(conf);
1878 checkerloop (void *ap)
1880 struct vectors *vecs;
1884 struct itimerval timer_tick_it;
1885 struct timespec last_time;
1886 struct config *conf;
1888 pthread_cleanup_push(rcu_unregister, NULL);
1889 rcu_register_thread();
1890 mlockall(MCL_CURRENT | MCL_FUTURE);
1891 vecs = (struct vectors *)ap;
1892 condlog(2, "path checkers start up");
1894 /* Tweak start time for initial path check */
1895 if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
1896 last_time.tv_sec = 0;
1898 last_time.tv_sec -= 1;
1901 struct timespec diff_time, start_time, end_time;
1902 int num_paths = 0, ticks = 0, signo, strict_timing, rc = 0;
1905 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
1906 start_time.tv_sec = 0;
1907 if (start_time.tv_sec && last_time.tv_sec) {
1908 timespecsub(&start_time, &last_time, &diff_time);
1909 condlog(4, "tick (%lu.%06lu secs)",
1910 diff_time.tv_sec, diff_time.tv_nsec / 1000);
1911 last_time = start_time;
1912 ticks = diff_time.tv_sec;
1915 condlog(4, "tick (%d ticks)", ticks);
1919 sd_notify(0, "WATCHDOG=1");
1921 rc = set_config_state(DAEMON_RUNNING);
1922 if (rc == ETIMEDOUT) {
1923 condlog(4, "timeout waiting for DAEMON_IDLE");
1927 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1929 pthread_testcancel();
1930 vector_foreach_slot (vecs->pathvec, pp, i) {
1931 rc = check_path(vecs, pp, ticks);
1933 vector_del_slot(vecs->pathvec, i);
1939 lock_cleanup_pop(vecs->lock);
1941 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1943 pthread_testcancel();
1944 defered_failback_tick(vecs->mpvec);
1945 retry_count_tick(vecs->mpvec);
1946 missing_uev_wait_tick(vecs);
1947 lock_cleanup_pop(vecs->lock);
1952 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1954 pthread_testcancel();
1955 condlog(4, "map garbage collection");
1956 mpvec_garbage_collector(vecs);
1958 lock_cleanup_pop(vecs->lock);
1961 diff_time.tv_nsec = 0;
1962 if (start_time.tv_sec &&
1963 clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
1964 timespecsub(&end_time, &start_time, &diff_time);
1966 unsigned int max_checkint;
1968 condlog(3, "checked %d path%s in %lu.%06lu secs",
1969 num_paths, num_paths > 1 ? "s" : "",
1971 diff_time.tv_nsec / 1000);
1972 conf = get_multipath_config();
1973 max_checkint = conf->max_checkint;
1974 put_multipath_config(conf);
1975 if (diff_time.tv_sec > max_checkint)
1976 condlog(1, "path checkers took longer "
1977 "than %lu seconds, consider "
1978 "increasing max_polling_interval",
1983 post_config_state(DAEMON_IDLE);
1984 conf = get_multipath_config();
1985 strict_timing = conf->strict_timing;
1986 put_multipath_config(conf);
1990 timer_tick_it.it_interval.tv_sec = 0;
1991 timer_tick_it.it_interval.tv_usec = 0;
1992 if (diff_time.tv_nsec) {
1993 timer_tick_it.it_value.tv_sec = 0;
1994 timer_tick_it.it_value.tv_usec =
1995 1000UL * 1000 * 1000 - diff_time.tv_nsec;
1997 timer_tick_it.it_value.tv_sec = 1;
1998 timer_tick_it.it_value.tv_usec = 0;
2000 setitimer(ITIMER_REAL, &timer_tick_it, NULL);
2003 sigaddset(&mask, SIGALRM);
2004 condlog(3, "waiting for %lu.%06lu secs",
2005 timer_tick_it.it_value.tv_sec,
2006 timer_tick_it.it_value.tv_usec);
2007 if (sigwait(&mask, &signo) != 0) {
2008 condlog(3, "sigwait failed with error %d",
2010 conf = get_multipath_config();
2011 conf->strict_timing = 0;
2012 put_multipath_config(conf);
2017 pthread_cleanup_pop(1);
2022 configure (struct vectors * vecs, int start_waiters)
2024 struct multipath * mpp;
2028 struct config *conf;
2029 static int force_reload = FORCE_RELOAD_WEAK;
2031 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2032 condlog(0, "couldn't allocate path vec in configure");
2036 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2037 condlog(0, "couldn't allocate multipath vec in configure");
2041 if (!(mpvec = vector_alloc())) {
2042 condlog(0, "couldn't allocate new maps vec in configure");
2047 * probe for current path (from sysfs) and map (from dm) sets
2049 ret = path_discovery(vecs->pathvec, DI_ALL);
2051 condlog(0, "configure failed at path discovery");
2055 vector_foreach_slot (vecs->pathvec, pp, i){
2056 conf = get_multipath_config();
2057 if (filter_path(conf, pp) > 0){
2058 vector_del_slot(vecs->pathvec, i);
2063 pp->checkint = conf->checkint;
2064 put_multipath_config(conf);
2066 if (map_discovery(vecs)) {
2067 condlog(0, "configure failed at map discovery");
2072 * create new set of maps & push changed ones into dm
2073 * In the first call, use FORCE_RELOAD_WEAK to avoid making
2074 * superfluous ACT_RELOAD ioctls. Later calls are done
2075 * with FORCE_RELOAD_YES.
2077 ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2078 if (force_reload == FORCE_RELOAD_WEAK)
2079 force_reload = FORCE_RELOAD_YES;
2081 condlog(0, "configure failed while coalescing paths");
2086 * may need to remove some maps which are no longer relevant
2087 * e.g., due to blacklist changes in conf file
2089 if (coalesce_maps(vecs, mpvec)) {
2090 condlog(0, "configure failed while coalescing maps");
2096 sync_maps_state(mpvec);
2097 vector_foreach_slot(mpvec, mpp, i){
2098 remember_wwid(mpp->wwid);
2103 * purge dm of old maps
2108 * save new set of maps formed by considering current path state
2110 vector_free(vecs->mpvec);
2111 vecs->mpvec = mpvec;
2114 * start dm event waiter threads for these new maps
2116 vector_foreach_slot(vecs->mpvec, mpp, i) {
2117 if (setup_multipath(vecs, mpp)) {
2121 if (start_waiters) {
2122 if (start_waiter_thread(mpp, vecs)) {
2123 remove_map(mpp, vecs, 1);
2132 need_to_delay_reconfig(struct vectors * vecs)
2134 struct multipath *mpp;
2137 if (!VECTOR_SIZE(vecs->mpvec))
2140 vector_foreach_slot(vecs->mpvec, mpp, i) {
2141 if (mpp->wait_for_udev)
2147 void rcu_free_config(struct rcu_head *head)
2149 struct config *conf = container_of(head, struct config, rcu);
2155 reconfigure (struct vectors * vecs)
2157 struct config * old, *conf;
2159 conf = load_config(DEFAULT_CONFIGFILE);
2164 * free old map and path vectors ... they use old conf state
2166 if (VECTOR_SIZE(vecs->mpvec))
2167 remove_maps_and_stop_waiters(vecs);
2169 free_pathvec(vecs->pathvec, FREE_PATHS);
2170 vecs->pathvec = NULL;
2172 /* Re-read any timezone changes */
2175 dm_drv_version(conf->version, TGT_MPATH);
2177 conf->verbosity = verbosity;
2178 if (bindings_read_only)
2179 conf->bindings_read_only = bindings_read_only;
2180 if (conf->find_multipaths) {
2181 condlog(2, "find_multipaths is set: -n is implied");
2182 ignore_new_devs = 1;
2184 if (ignore_new_devs)
2185 conf->ignore_new_devs = ignore_new_devs;
2186 uxsock_timeout = conf->uxsock_timeout;
2188 old = rcu_dereference(multipath_conf);
2189 rcu_assign_pointer(multipath_conf, conf);
2190 call_rcu(&old->rcu, rcu_free_config);
2198 static struct vectors *
2201 struct vectors * vecs;
2203 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2208 pthread_mutex_init(&vecs->lock.mutex, NULL);
2214 signal_set(int signo, void (*func) (int))
2217 struct sigaction sig;
2218 struct sigaction osig;
2220 sig.sa_handler = func;
2221 sigemptyset(&sig.sa_mask);
2224 r = sigaction(signo, &sig, &osig);
2229 return (osig.sa_handler);
2233 handle_signals(void)
2236 condlog(2, "exit (signal)");
2240 condlog(2, "reconfigure (signal)");
2241 set_config_state(DAEMON_CONFIGURE);
2243 if (log_reset_sig) {
2244 condlog(2, "reset log (signal)");
2245 pthread_mutex_lock(&logq_lock);
2246 log_reset("multipathd");
2247 pthread_mutex_unlock(&logq_lock);
2275 condlog(3, "SIGUSR2 received");
2284 sigaddset(&set, SIGUSR2);
2285 pthread_sigmask(SIG_SETMASK, &set, NULL);
2287 signal_set(SIGHUP, sighup);
2288 signal_set(SIGUSR1, sigusr1);
2289 signal_set(SIGUSR2, sigusr2);
2290 signal_set(SIGINT, sigend);
2291 signal_set(SIGTERM, sigend);
2292 signal_set(SIGPIPE, sigend);
2299 static struct sched_param sched_param = {
2300 .sched_priority = 99
2303 res = sched_setscheduler (0, SCHED_RR, &sched_param);
2306 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2313 #ifdef OOM_SCORE_ADJ_MIN
2315 char *file = "/proc/self/oom_score_adj";
2316 int score = OOM_SCORE_ADJ_MIN;
2319 char *file = "/proc/self/oom_adj";
2320 int score = OOM_ADJUST_MIN;
2326 envp = getenv("OOMScoreAdjust");
2328 condlog(3, "Using systemd provided OOMScoreAdjust");
2332 if (stat(file, &st) == 0){
2333 fp = fopen(file, "w");
2335 condlog(0, "couldn't fopen %s : %s", file,
2339 fprintf(fp, "%i", score);
2343 if (errno != ENOENT) {
2344 condlog(0, "couldn't stat %s : %s", file,
2348 #ifdef OOM_ADJUST_MIN
2349 file = "/proc/self/oom_adj";
2350 score = OOM_ADJUST_MIN;
2355 condlog(0, "couldn't adjust oom score");
2359 child (void * param)
2361 pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
2362 pthread_attr_t log_attr, misc_attr, uevent_attr;
2363 struct vectors * vecs;
2364 struct multipath * mpp;
2367 unsigned long checkint;
2368 int startup_done = 0;
2372 struct config *conf;
2375 mlockall(MCL_CURRENT | MCL_FUTURE);
2379 setup_thread_attr(&misc_attr, 64 * 1024, 0);
2380 setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2381 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2384 setup_thread_attr(&log_attr, 64 * 1024, 0);
2385 log_thread_start(&log_attr);
2386 pthread_attr_destroy(&log_attr);
2388 pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2390 condlog(1, "failed to create pidfile");
2396 post_config_state(DAEMON_START);
2398 condlog(2, "--------start up--------");
2399 condlog(2, "read " DEFAULT_CONFIGFILE);
2401 conf = load_config(DEFAULT_CONFIGFILE);
2406 conf->verbosity = verbosity;
2407 if (bindings_read_only)
2408 conf->bindings_read_only = bindings_read_only;
2409 if (ignore_new_devs)
2410 conf->ignore_new_devs = ignore_new_devs;
2411 uxsock_timeout = conf->uxsock_timeout;
2412 rcu_assign_pointer(multipath_conf, conf);
2413 dm_init(conf->verbosity);
2414 dm_drv_version(conf->version, TGT_MPATH);
2415 if (init_checkers(conf->multipath_dir)) {
2416 condlog(0, "failed to initialize checkers");
2419 if (init_prio(conf->multipath_dir)) {
2420 condlog(0, "failed to initialize prioritizers");
2424 setlogmask(LOG_UPTO(conf->verbosity + 3));
2426 envp = getenv("LimitNOFILE");
2429 condlog(2,"Using systemd provided open fds limit of %s", envp);
2430 } else if (conf->max_fds) {
2431 struct rlimit fd_limit;
2433 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2434 condlog(0, "can't get open fds limit: %s",
2436 fd_limit.rlim_cur = 0;
2437 fd_limit.rlim_max = 0;
2439 if (fd_limit.rlim_cur < conf->max_fds) {
2440 fd_limit.rlim_cur = conf->max_fds;
2441 if (fd_limit.rlim_max < conf->max_fds)
2442 fd_limit.rlim_max = conf->max_fds;
2443 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2444 condlog(0, "can't set open fds limit to "
2446 fd_limit.rlim_cur, fd_limit.rlim_max,
2449 condlog(3, "set open fds limit to %lu/%lu",
2450 fd_limit.rlim_cur, fd_limit.rlim_max);
2456 vecs = gvecs = init_vecs();
2463 dm_udev_set_sync_support(0);
2465 envp = getenv("WATCHDOG_USEC");
2466 if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2467 /* Value is in microseconds */
2468 conf->max_checkint = checkint / 1000000;
2469 /* Rescale checkint */
2470 if (conf->checkint > conf->max_checkint)
2471 conf->checkint = conf->max_checkint;
2473 conf->checkint = conf->max_checkint / 4;
2474 condlog(3, "enabling watchdog, interval %d max %d",
2475 conf->checkint, conf->max_checkint);
2476 use_watchdog = conf->checkint;
2480 * Startup done, invalidate configuration
2485 * Signal start of configuration
2487 post_config_state(DAEMON_CONFIGURE);
2489 init_path_check_interval(vecs);
2492 * Start uevent listener early to catch events
2494 if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2495 condlog(0, "failed to create uevent thread: %d", rc);
2498 pthread_attr_destroy(&uevent_attr);
2499 if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
2500 condlog(0, "failed to create cli listener: %d", rc);
2507 if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2508 condlog(0,"failed to create checker loop thread: %d", rc);
2511 if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2512 condlog(0, "failed to create uevent dispatcher: %d", rc);
2515 pthread_attr_destroy(&misc_attr);
2517 while (running_state != DAEMON_SHUTDOWN) {
2518 pthread_cleanup_push(config_cleanup, NULL);
2519 pthread_mutex_lock(&config_lock);
2520 if (running_state != DAEMON_CONFIGURE &&
2521 running_state != DAEMON_SHUTDOWN) {
2522 pthread_cond_wait(&config_cond, &config_lock);
2524 pthread_cleanup_pop(1);
2525 if (running_state == DAEMON_CONFIGURE) {
2526 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2528 pthread_testcancel();
2529 if (!need_to_delay_reconfig(vecs)) {
2532 conf = get_multipath_config();
2533 conf->delayed_reconfig = 1;
2534 put_multipath_config(conf);
2536 lock_cleanup_pop(vecs->lock);
2537 post_config_state(DAEMON_IDLE);
2539 if (!startup_done) {
2540 sd_notify(0, "READY=1");
2548 conf = get_multipath_config();
2549 if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
2550 vector_foreach_slot(vecs->mpvec, mpp, i)
2551 dm_queue_if_no_path(mpp->alias, 0);
2552 put_multipath_config(conf);
2553 remove_maps_and_stop_waiters(vecs);
2554 unlock(&vecs->lock);
2556 pthread_cancel(check_thr);
2557 pthread_cancel(uevent_thr);
2558 pthread_cancel(uxlsnr_thr);
2559 pthread_cancel(uevq_thr);
2561 pthread_join(check_thr, NULL);
2562 pthread_join(uevent_thr, NULL);
2563 pthread_join(uxlsnr_thr, NULL);
2564 pthread_join(uevq_thr, NULL);
2567 free_pathvec(vecs->pathvec, FREE_PATHS);
2568 vecs->pathvec = NULL;
2569 unlock(&vecs->lock);
2571 pthread_mutex_destroy(&vecs->lock.mutex);
2581 /* We're done here */
2582 condlog(3, "unlink pidfile");
2583 unlink(DEFAULT_PIDFILE);
2585 condlog(2, "--------shut down-------");
2591 * Freeing config must be done after condlog() and dm_lib_exit(),
2592 * because logging functions like dlog() and dm_write_log()
2593 * reference the config.
2595 conf = rcu_dereference(multipath_conf);
2596 rcu_assign_pointer(multipath_conf, NULL);
2597 call_rcu(&conf->rcu, rcu_free_config);
2600 pthread_attr_destroy(&waiter_attr);
2602 dbg_free_final(NULL);
2606 sd_notify(0, "ERRNO=0");
2612 sd_notify(0, "ERRNO=1");
2625 if( (pid = fork()) < 0){
2626 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2634 if ( (pid = fork()) < 0)
2635 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2640 fprintf(stderr, "cannot chdir to '/', continuing\n");
2642 dev_null_fd = open("/dev/null", O_RDWR);
2643 if (dev_null_fd < 0){
2644 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2649 close(STDIN_FILENO);
2650 if (dup(dev_null_fd) < 0) {
2651 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2655 close(STDOUT_FILENO);
2656 if (dup(dev_null_fd) < 0) {
2657 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2661 close(STDERR_FILENO);
2662 if (dup(dev_null_fd) < 0) {
2663 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2668 daemon_pid = getpid();
2673 main (int argc, char *argv[])
2675 extern char *optarg;
2680 struct config *conf;
2682 ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
2683 "Manipulated through RCU");
2684 ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
2685 "Suppress complaints about unprotected running_state reads");
2686 ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
2687 "Suppress complaints about this scalar variable");
2691 if (getuid() != 0) {
2692 fprintf(stderr, "need to be root\n");
2696 /* make sure we don't lock any path */
2698 fprintf(stderr, "can't chdir to root directory : %s\n",
2700 umask(umask(077) | 022);
2702 pthread_cond_init_mono(&config_cond);
2706 while ((arg = getopt(argc, argv, ":dsv:k::Bn")) != EOF ) {
2712 //debug=1; /* ### comment me out ### */
2715 if (sizeof(optarg) > sizeof(char *) ||
2716 !isdigit(optarg[0]))
2719 verbosity = atoi(optarg);
2725 conf = load_config(DEFAULT_CONFIGFILE);
2729 conf->verbosity = verbosity;
2730 uxsock_timeout = conf->uxsock_timeout;
2731 uxclnt(optarg, uxsock_timeout + 100);
2735 bindings_read_only = 1;
2738 ignore_new_devs = 1;
2741 fprintf(stderr, "Invalid argument '-%c'\n",
2746 if (optind < argc) {
2751 conf = load_config(DEFAULT_CONFIGFILE);
2755 conf->verbosity = verbosity;
2756 uxsock_timeout = conf->uxsock_timeout;
2757 memset(cmd, 0x0, CMDSIZE);
2758 while (optind < argc) {
2759 if (strchr(argv[optind], ' '))
2760 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
2762 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
2765 c += snprintf(c, s + CMDSIZE - c, "\n");
2766 uxclnt(s, uxsock_timeout + 100);
2772 if (!isatty(fileno(stdout)))
2773 setbuf(stdout, NULL);
2775 daemon_pid = getpid();
2787 return (child(NULL));
2790 void * mpath_pr_event_handler_fn (void * pathp )
2792 struct multipath * mpp;
2793 int i,j, ret, isFound;
2794 struct path * pp = (struct path *)pathp;
2795 unsigned char *keyp;
2797 struct prout_param_descriptor *param;
2798 struct prin_resp *resp;
2802 resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
2804 condlog(0,"%s Alloc failed for prin response", pp->dev);
2808 ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
2809 if (ret != MPATH_PR_SUCCESS )
2811 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
2815 condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
2816 resp->prin_descriptor.prin_readkeys.additional_length );
2818 if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
2820 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
2821 ret = MPATH_PR_SUCCESS;
2825 keyp = (unsigned char *)mpp->reservation_key;
2826 for (j = 0; j < 8; ++j) {
2832 condlog(2, "Multipath reservation_key: 0x%" PRIx64 " ", prkey);
2835 for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
2837 condlog(2, "PR IN READKEYS[%d] reservation key:",i);
2838 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
2839 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
2841 condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
2848 condlog(0, "%s: Either device not registered or ", pp->dev);
2849 condlog(0, "host is not authorised for registration. Skip path");
2850 ret = MPATH_PR_OTHER;
2854 param= malloc(sizeof(struct prout_param_descriptor));
2855 memset(param, 0 , sizeof(struct prout_param_descriptor));
2857 for (j = 7; j >= 0; --j) {
2858 param->sa_key[j] = (prkey & 0xff);
2861 param->num_transportid = 0;
2863 condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
2865 ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
2866 if (ret != MPATH_PR_SUCCESS )
2868 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
2878 int mpath_pr_event_handle(struct path *pp)
2882 pthread_attr_t attr;
2883 struct multipath * mpp;
2887 if (!mpp->reservation_key)
2890 pthread_attr_init(&attr);
2891 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2893 rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2895 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2898 pthread_attr_destroy(&attr);
2899 rc = pthread_join(thread, NULL);