2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
18 #include <linux/oom.h>
19 #include <mpath_persist.h>
38 #include <blacklist.h>
39 #include <structs_vec.h>
41 #include <devmapper.h>
44 #include <discovery.h>
48 #include <switchgroup.h>
50 #include <configure.h>
52 #include <pgpolicies.h>
60 #include "cli_handlers.h"
64 #define FILE_NAME_SIZE 256
67 #define LOG_MSG(a, b) \
70 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
72 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
75 struct mpath_event_param
78 struct multipath *mpp;
81 unsigned int mpath_mx_alloc_len;
83 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
84 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
87 enum daemon_status running_state;
91 * global copy of vecs for use in sig handlers
93 struct vectors * gvecs;
96 need_switch_pathgroup (struct multipath * mpp, int refresh)
98 struct pathgroup * pgp;
102 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
106 * Refresh path priority values
109 vector_foreach_slot (mpp->pg, pgp, i)
110 vector_foreach_slot (pgp->paths, pp, j)
111 pathinfo(pp, conf->hwtable, DI_PRIO);
113 mpp->bestpg = select_path_group(mpp);
115 if (mpp->bestpg != mpp->nextpg)
122 switch_pathgroup (struct multipath * mpp)
124 mpp->stat_switchgroup++;
125 dm_switchgroup(mpp->alias, mpp->bestpg);
126 condlog(2, "%s: switch to path group #%i",
127 mpp->alias, mpp->bestpg);
131 coalesce_maps(struct vectors *vecs, vector nmpv)
133 struct multipath * ompp;
134 vector ompv = vecs->mpvec;
138 vector_foreach_slot (ompv, ompp, i) {
139 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
141 * remove all current maps not allowed by the
142 * current configuration
144 if (dm_flush_map(ompp->alias)) {
145 condlog(0, "%s: unable to flush devmap",
148 * may be just because the device is open
150 if (!vector_alloc_slot(nmpv))
153 vector_set_slot(nmpv, ompp);
154 setup_multipath(vecs, ompp);
156 if ((j = find_slot(ompv, (void *)ompp)) != -1)
157 vector_del_slot(ompv, j);
163 condlog(2, "%s devmap removed", ompp->alias);
165 } else if (conf->reassign_maps) {
166 condlog(3, "%s: Reassign existing device-mapper"
167 " devices", ompp->alias);
168 dm_reassign(ompp->alias);
175 sync_map_state(struct multipath *mpp)
177 struct pathgroup *pgp;
184 vector_foreach_slot (mpp->pg, pgp, i){
185 vector_foreach_slot (pgp->paths, pp, j){
186 if (pp->state == PATH_UNCHECKED ||
187 pp->state == PATH_WILD)
189 if ((pp->dmstate == PSTATE_FAILED ||
190 pp->dmstate == PSTATE_UNDEF) &&
191 (pp->state == PATH_UP || pp->state == PATH_GHOST))
192 dm_reinstate_path(mpp->alias, pp->dev_t);
193 else if ((pp->dmstate == PSTATE_ACTIVE ||
194 pp->dmstate == PSTATE_UNDEF) &&
195 (pp->state == PATH_DOWN ||
196 pp->state == PATH_SHAKY))
197 dm_fail_path(mpp->alias, pp->dev_t);
203 sync_maps_state(vector mpvec)
206 struct multipath *mpp;
208 vector_foreach_slot (mpvec, mpp, i)
213 flush_map(struct multipath * mpp, struct vectors * vecs)
216 * clear references to this map before flushing so we can ignore
217 * the spurious uevent we may generate with the dm_flush_map call below
219 if (dm_flush_map(mpp->alias)) {
221 * May not really be an error -- if the map was already flushed
222 * from the device mapper by dmsetup(8) for instance.
224 condlog(0, "%s: can't flush", mpp->alias);
229 condlog(2, "%s: devmap removed", mpp->alias);
232 orphan_paths(vecs->pathvec, mpp);
233 remove_map_and_stop_waiter(mpp, vecs, 1);
239 uev_add_map (struct uevent * uev, struct vectors * vecs)
242 int major = -1, minor = -1, rc;
244 condlog(3, "%s: add map (uevent)", uev->kernel);
245 alias = uevent_get_dm_name(uev);
247 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
248 major = uevent_get_major(uev);
249 minor = uevent_get_minor(uev);
250 alias = dm_mapname(major, minor);
252 condlog(2, "%s: mapname not found for %d:%d",
253 uev->kernel, major, minor);
257 rc = ev_add_map(uev->kernel, alias, vecs);
263 ev_add_map (char * dev, char * alias, struct vectors * vecs)
266 struct multipath * mpp;
270 map_present = dm_map_present(alias);
272 if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
273 condlog(4, "%s: not a multipath map", alias);
277 mpp = find_mp_by_alias(vecs->mpvec, alias);
281 * Not really an error -- we generate our own uevent
282 * if we create a multipath mapped device as a result
285 if (conf->reassign_maps) {
286 condlog(3, "%s: Reassign existing device-mapper devices",
292 condlog(2, "%s: adding map", alias);
295 * now we can register the map
297 if (map_present && (mpp = add_map_without_path(vecs, alias))) {
299 condlog(2, "%s: devmap %s registered", alias, dev);
302 refwwid = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec);
305 r = coalesce_paths(vecs, NULL, refwwid, 0);
310 condlog(2, "%s: devmap %s added", alias, dev);
312 condlog(0, "%s: uev_add_map %s failed", alias, dev);
319 uev_remove_map (struct uevent * uev, struct vectors * vecs)
323 struct multipath *mpp;
325 condlog(2, "%s: remove map (uevent)", uev->kernel);
326 alias = uevent_get_dm_name(uev);
328 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
331 minor = uevent_get_minor(uev);
332 mpp = find_mp_by_minor(vecs->mpvec, minor);
335 condlog(2, "%s: devmap not registered, can't remove",
339 if (strcmp(mpp->alias, alias)) {
340 condlog(2, "%s: minor number mismatch (map %d, event %d)",
341 mpp->alias, mpp->dmi->minor, minor);
345 orphan_paths(vecs->pathvec, mpp);
346 remove_map_and_stop_waiter(mpp, vecs, 1);
353 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
355 struct multipath * mpp;
357 mpp = find_mp_by_minor(vecs->mpvec, minor);
360 condlog(2, "%s: devmap not registered, can't remove",
364 if (strcmp(mpp->alias, alias)) {
365 condlog(2, "%s: minor number mismatch (map %d, event %d)",
366 mpp->alias, mpp->dmi->minor, minor);
369 return flush_map(mpp, vecs);
373 uev_add_path (struct uevent *uev, struct vectors * vecs)
375 condlog(2, "%s: add path (uevent)", uev->kernel);
376 return (ev_add_path(dev->kernel, vecs) != 1)? 0 : 1;
386 ev_add_path (char * devname, struct vectors * vecs)
388 struct multipath * mpp;
390 char empty_buff[WWID_SIZE] = {0};
391 char params[PARAMS_SIZE] = {0};
393 int start_waiter = 0;
395 if (strstr(devname, "..") != NULL) {
397 * Don't allow relative device names in the pathvec
399 condlog(0, "%s: path name is invalid", devname);
403 pp = find_path_by_dev(vecs->pathvec, devname);
406 condlog(0, "%s: spurious uevent, path already in pathvec",
413 * get path vital state
415 if (!(pp = store_pathinfo(vecs->pathvec, conf->hwtable,
417 condlog(0, "%s: failed to store path info", devname);
420 pp->checkint = conf->checkint;
424 * need path UID to go any further
426 if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
427 condlog(0, "%s: failed to get path uid", devname);
428 goto fail; /* leave path added to pathvec */
430 if (filter_path(conf, pp) > 0){
431 int i = find_slot(vecs->pathvec, (void *)pp);
433 vector_del_slot(vecs->pathvec, i);
437 mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
440 if ((!pp->size) || (mpp->size != pp->size)) {
442 condlog(0, "%s: failed to add new path %s, "
446 condlog(0, "%s: failed to add new path %s, "
447 "device size mismatch",
449 int i = find_slot(vecs->pathvec, (void *)pp);
451 vector_del_slot(vecs->pathvec, i);
456 condlog(4,"%s: adopting all paths for path %s",
457 mpp->alias, pp->dev);
458 if (adopt_paths(vecs->pathvec, mpp, 1))
459 goto fail; /* leave path added to pathvec */
461 verify_paths(mpp, vecs, NULL);
462 mpp->flush_on_last_del = FLUSH_UNDEF;
463 mpp->action = ACT_RELOAD;
467 condlog(0, "%s: failed to create new map,"
468 " %s device size is 0 ", devname, pp->dev);
469 int i = find_slot(vecs->pathvec, (void *)pp);
471 vector_del_slot(vecs->pathvec, i);
476 condlog(4,"%s: creating new map", pp->dev);
477 if ((mpp = add_map_with_path(vecs, pp, 1))) {
478 mpp->action = ACT_CREATE;
480 * We don't depend on ACT_CREATE, as domap will
481 * set it to ACT_NOTHING when complete.
486 goto fail; /* leave path added to pathvec */
489 /* persistent reseravtion check*/
490 mpath_pr_event_handle(pp);
493 * push the map to the device-mapper
495 if (setup_map(mpp, params, PARAMS_SIZE)) {
496 condlog(0, "%s: failed to setup map for addition of new "
497 "path %s", mpp->alias, devname);
501 * reload the map for the multipath mapped device
503 if (domap(mpp, params) <= 0) {
504 condlog(0, "%s: failed in domap for addition of new "
505 "path %s", mpp->alias, devname);
507 * deal with asynchronous uevents :((
509 if (mpp->action == ACT_RELOAD && retries-- > 0) {
510 condlog(0, "%s: uev_add_path sleep", mpp->alias);
512 update_mpp_paths(mpp, vecs->pathvec);
515 else if (mpp->action == ACT_RELOAD)
516 condlog(0, "%s: giving up reload", mpp->alias);
523 * update our state from kernel regardless of create or reload
525 if (setup_multipath(vecs, mpp))
526 goto fail; /* if setup_multipath fails, it removes the map */
530 if ((mpp->action == ACT_CREATE ||
531 (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
532 start_waiter_thread(mpp, vecs))
536 condlog(2, "%s path added to devmap %s", devname, mpp->alias);
543 remove_map(mpp, vecs, 1);
550 uev_remove_path (struct uevent *uev, struct vectors * vecs)
554 condlog(2, "%s: remove path (uevent)", uev->kernel);
555 return ev_remove_path(uev->kernel, vecs);
559 ev_remove_path (char * devname, struct vectors * vecs)
561 struct multipath * mpp;
564 char params[PARAMS_SIZE] = {0};
566 pp = find_path_by_dev(vecs->pathvec, devname);
569 /* Not an error; path might have been purged earlier */
570 condlog(0, "%s: path already removed", devname);
575 * avoid referring to the map of an orphaned path
577 if ((mpp = pp->mpp)) {
579 * transform the mp->pg vector of vectors of paths
580 * into a mp->params string to feed the device-mapper
582 if (update_mpp_paths(mpp, vecs->pathvec)) {
583 condlog(0, "%s: failed to update paths",
587 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
588 vector_del_slot(mpp->paths, i);
591 * remove the map IFF removing the last path
593 if (VECTOR_SIZE(mpp->paths) == 0) {
594 char alias[WWID_SIZE];
597 * flush_map will fail if the device is open
599 strncpy(alias, mpp->alias, WWID_SIZE);
600 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
601 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
603 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
604 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
605 dm_queue_if_no_path(mpp->alias, 0);
607 if (!flush_map(mpp, vecs)) {
608 condlog(2, "%s: removed map after"
609 " removing all paths",
615 * Not an error, continue
619 if (setup_map(mpp, params, PARAMS_SIZE)) {
620 condlog(0, "%s: failed to setup map for"
621 " removal of path %s", mpp->alias,
628 mpp->action = ACT_RELOAD;
629 if (domap(mpp, params) <= 0) {
630 condlog(0, "%s: failed in domap for "
631 "removal of path %s",
632 mpp->alias, devname);
636 * update our state from kernel
638 if (setup_multipath(vecs, mpp)) {
643 condlog(2, "%s: path removed from map %s",
644 devname, mpp->alias);
649 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
650 vector_del_slot(vecs->pathvec, i);
657 remove_map_and_stop_waiter(mpp, vecs, 1);
662 uev_update_path (struct uevent *uev, struct vectors * vecs)
666 ro = uevent_get_disk_ro(uev);
671 condlog(2, "%s: update path write_protect to '%d' (uevent)",
673 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
675 condlog(0, "%s: spurious uevent, path not found",
680 retval = reload_map(vecs, pp->mpp);
682 condlog(2, "%s: map %s reloaded (retval %d)",
683 uev->kernel, pp->mpp->alias, retval);
691 map_discovery (struct vectors * vecs)
693 struct multipath * mpp;
696 if (dm_get_maps(vecs->mpvec))
699 vector_foreach_slot (vecs->mpvec, mpp, i)
700 if (setup_multipath(vecs, mpp))
707 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
709 struct vectors * vecs;
714 vecs = (struct vectors *)trigger_data;
716 pthread_cleanup_push(cleanup_lock, &vecs->lock);
718 pthread_testcancel();
720 r = parse_cmd(str, reply, len, vecs);
723 *reply = STRDUP("fail\n");
724 *len = strlen(*reply) + 1;
727 else if (!r && *len == 0) {
728 *reply = STRDUP("ok\n");
729 *len = strlen(*reply) + 1;
732 /* else if (r < 0) leave *reply alone */
734 lock_cleanup_pop(vecs->lock);
739 uev_discard(char * devpath)
745 * keep only block devices, discard partitions
747 tmp = strstr(devpath, "/block/");
749 condlog(4, "no /block/ in '%s'", devpath);
752 if (sscanf(tmp, "/block/%10s", a) != 1 ||
753 sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
754 condlog(4, "discard event on %s", devpath);
761 uev_trigger (struct uevent * uev, void * trigger_data)
764 struct vectors * vecs;
766 vecs = (struct vectors *)trigger_data;
768 if (uev_discard(uev->devpath))
771 pthread_cleanup_push(cleanup_lock, &vecs->lock);
773 pthread_testcancel();
777 * Add events are ignored here as the tables
778 * are not fully initialised then.
780 if (!strncmp(uev->kernel, "dm-", 3)) {
781 if (!strncmp(uev->action, "change", 6)) {
782 r = uev_add_map(uev, vecs);
785 if (!strncmp(uev->action, "remove", 6)) {
786 r = uev_remove_map(uev, vecs);
793 * path add/remove event
795 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
799 if (!strncmp(uev->action, "add", 3)) {
800 r = uev_add_path(uev, vecs);
803 if (!strncmp(uev->action, "remove", 6)) {
804 r = uev_remove_path(uev, vecs);
807 if (!strncmp(uev->action, "change", 6)) {
808 r = uev_update_path(uev, vecs);
813 lock_cleanup_pop(vecs->lock);
818 ueventloop (void * ap)
820 block_signal(SIGUSR1, NULL);
821 block_signal(SIGHUP, NULL);
824 condlog(0, "error starting uevent listener");
832 block_signal(SIGUSR1, NULL);
833 block_signal(SIGHUP, NULL);
835 if (uevent_dispatch(&uev_trigger, ap))
836 condlog(0, "error starting uevent dispatcher");
841 uxlsnrloop (void * ap)
843 block_signal(SIGUSR1, NULL);
844 block_signal(SIGHUP, NULL);
849 set_handler_callback(LIST+PATHS, cli_list_paths);
850 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
851 set_handler_callback(LIST+MAPS, cli_list_maps);
852 set_handler_callback(LIST+STATUS, cli_list_status);
853 set_handler_callback(LIST+DAEMON, cli_list_daemon);
854 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
855 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
856 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
857 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
858 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
859 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
860 set_handler_callback(LIST+CONFIG, cli_list_config);
861 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
862 set_handler_callback(LIST+DEVICES, cli_list_devices);
863 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
864 set_handler_callback(ADD+PATH, cli_add_path);
865 set_handler_callback(DEL+PATH, cli_del_path);
866 set_handler_callback(ADD+MAP, cli_add_map);
867 set_handler_callback(DEL+MAP, cli_del_map);
868 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
869 set_handler_callback(RECONFIGURE, cli_reconfigure);
870 set_handler_callback(SUSPEND+MAP, cli_suspend);
871 set_handler_callback(RESUME+MAP, cli_resume);
872 set_handler_callback(RESIZE+MAP, cli_resize);
873 set_handler_callback(RELOAD+MAP, cli_reload);
874 set_handler_callback(RESET+MAP, cli_reassign);
875 set_handler_callback(REINSTATE+PATH, cli_reinstate);
876 set_handler_callback(FAIL+PATH, cli_fail);
877 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
878 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
879 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
880 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
881 set_handler_callback(QUIT, cli_quit);
882 set_handler_callback(SHUTDOWN, cli_shutdown);
883 set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
884 set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
885 set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
888 uxsock_listen(&uxsock_trigger, ap);
894 exit_daemon (int status)
897 fprintf(stderr, "bad exit status. see daemon.log\n");
899 condlog(3, "unlink pidfile");
900 unlink(DEFAULT_PIDFILE);
902 pthread_mutex_lock(&exit_mutex);
903 pthread_cond_signal(&exit_cond);
904 pthread_mutex_unlock(&exit_mutex);
912 switch (running_state) {
917 case DAEMON_CONFIGURE:
921 case DAEMON_SHUTDOWN:
928 fail_path (struct path * pp, int del_active)
933 condlog(2, "checker failed path %s in map %s",
934 pp->dev_t, pp->mpp->alias);
936 dm_fail_path(pp->mpp->alias, pp->dev_t);
938 update_queue_mode_del_path(pp->mpp);
942 * caller must have locked the path list before calling that function
945 reinstate_path (struct path * pp, int add_active)
950 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
951 condlog(0, "%s: reinstate failed", pp->dev_t);
953 condlog(2, "%s: reinstated", pp->dev_t);
955 update_queue_mode_add_path(pp->mpp);
960 enable_group(struct path * pp)
962 struct pathgroup * pgp;
965 * if path is added through uev_add_path, pgindex can be unset.
966 * next update_strings() will set it, upon map reload event.
968 * we can safely return here, because upon map reload, all
969 * PG will be enabled.
971 if (!pp->mpp->pg || !pp->pgindex)
974 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
976 if (pgp->status == PGSTATE_DISABLED) {
977 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
978 dm_enablegroup(pp->mpp->alias, pp->pgindex);
983 mpvec_garbage_collector (struct vectors * vecs)
985 struct multipath * mpp;
991 vector_foreach_slot (vecs->mpvec, mpp, i) {
992 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
993 condlog(2, "%s: remove dead map", mpp->alias);
994 remove_map_and_stop_waiter(mpp, vecs, 1);
1001 defered_failback_tick (vector mpvec)
1003 struct multipath * mpp;
1006 vector_foreach_slot (mpvec, mpp, i) {
1008 * defered failback getting sooner
1010 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1011 mpp->failback_tick--;
1013 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1014 switch_pathgroup(mpp);
1020 retry_count_tick(vector mpvec)
1022 struct multipath *mpp;
1025 vector_foreach_slot (mpvec, mpp, i) {
1026 if (mpp->retry_tick) {
1027 mpp->stat_total_queueing_time++;
1028 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1029 if(--mpp->retry_tick == 0) {
1030 dm_queue_if_no_path(mpp->alias, 0);
1031 condlog(2, "%s: Disable queueing", mpp->alias);
1037 int update_prio(struct path *pp, int refresh_all)
1041 struct pathgroup * pgp;
1042 int i, j, changed = 0;
1045 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1046 vector_foreach_slot (pgp->paths, pp1, j) {
1047 oldpriority = pp1->priority;
1048 pathinfo(pp1, conf->hwtable, DI_PRIO);
1049 if (pp1->priority != oldpriority)
1055 oldpriority = pp->priority;
1056 pathinfo(pp, conf->hwtable, DI_PRIO);
1058 if (pp->priority == oldpriority)
1063 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1067 char params[PARAMS_SIZE];
1069 update_mpp_paths(mpp, vecs->pathvec);
1071 vector_foreach_slot (mpp->paths, pp, i)
1072 pathinfo(pp, conf->hwtable, DI_PRIO);
1075 if (setup_map(mpp, params, PARAMS_SIZE))
1078 mpp->action = ACT_RELOAD;
1079 if (domap(mpp, params) <= 0) {
1080 condlog(0, "%s: failed to update map : %s", mpp->alias,
1085 if (setup_multipath(vecs, mpp) != 0)
1087 sync_map_state(mpp);
1093 check_path (struct vectors * vecs, struct path * pp)
1096 int new_path_up = 0;
1101 if (pp->tick && --pp->tick)
1102 return; /* don't check this path yet */
1105 * provision a next check soonest,
1106 * in case we exit abnormaly from here
1108 pp->tick = conf->checkint;
1110 newstate = path_offline(pp);
1111 if (newstate == PATH_UP)
1112 newstate = get_state(pp, 1);
1114 if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1115 condlog(2, "%s: unusable path", pp->dev);
1116 pathinfo(pp, conf->hwtable, 0);
1120 * Async IO in flight. Keep the previous path state
1121 * and reschedule as soon as possible
1123 if (newstate == PATH_PENDING) {
1128 * Synchronize with kernel state
1130 if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1131 condlog(1, "%s: Could not synchronize with kernel state\n",
1133 pp->dmstate = PSTATE_UNDEF;
1135 if (newstate != pp->state) {
1136 int oldstate = pp->state;
1137 pp->state = newstate;
1138 LOG_MSG(1, checker_message(&pp->checker));
1141 * upon state change, reset the checkint
1142 * to the shortest delay
1144 pp->checkint = conf->checkint;
1146 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1148 * proactively fail path in the DM
1150 if (oldstate == PATH_UP ||
1151 oldstate == PATH_GHOST)
1157 * cancel scheduled failback
1159 pp->mpp->failback_tick = 0;
1161 pp->mpp->stat_path_failures++;
1165 if(newstate == PATH_UP || newstate == PATH_GHOST){
1166 if ( pp->mpp && pp->mpp->prflag ){
1168 * Check Persistent Reservation.
1170 condlog(2, "%s: checking persistent reservation "
1171 "registration", pp->dev);
1172 mpath_pr_event_handle(pp);
1177 * reinstate this path
1179 if (oldstate != PATH_UP &&
1180 oldstate != PATH_GHOST)
1181 reinstate_path(pp, 1);
1183 reinstate_path(pp, 0);
1188 * if at least one path is up in a group, and
1189 * the group is disabled, re-enable it
1191 if (newstate == PATH_UP)
1194 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1195 if (pp->dmstate == PSTATE_FAILED ||
1196 pp->dmstate == PSTATE_UNDEF) {
1197 /* Clear IO errors */
1198 reinstate_path(pp, 0);
1200 LOG_MSG(4, checker_message(&pp->checker));
1202 * double the next check delay.
1203 * max at conf->max_checkint
1205 if (pp->checkint < (conf->max_checkint / 2))
1206 pp->checkint = 2 * pp->checkint;
1208 pp->checkint = conf->max_checkint;
1210 pp->tick = pp->checkint;
1211 condlog(4, "%s: delay next check %is",
1212 pp->dev_t, pp->tick);
1215 else if (newstate == PATH_DOWN) {
1216 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1217 LOG_MSG(3, checker_message(&pp->checker));
1219 LOG_MSG(2, checker_message(&pp->checker));
1222 pp->state = newstate;
1225 * path prio refreshing
1227 condlog(4, "path prio refresh");
1229 if (update_prio(pp, new_path_up) &&
1230 (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1231 pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1232 update_path_groups(pp->mpp, vecs, !new_path_up);
1233 else if (need_switch_pathgroup(pp->mpp, 0)) {
1234 if (pp->mpp->pgfailback > 0 &&
1235 (new_path_up || pp->mpp->failback_tick <= 0))
1236 pp->mpp->failback_tick =
1237 pp->mpp->pgfailback + 1;
1238 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1239 switch_pathgroup(pp->mpp);
1244 checkerloop (void *ap)
1246 struct vectors *vecs;
1252 mlockall(MCL_CURRENT | MCL_FUTURE);
1253 vecs = (struct vectors *)ap;
1254 condlog(2, "path checkers start up");
1257 * init the path check interval
1259 vector_foreach_slot (vecs->pathvec, pp, i) {
1260 pp->checkint = conf->checkint;
1264 block_signal(SIGHUP, &old);
1265 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1267 pthread_testcancel();
1270 if (vecs->pathvec) {
1271 vector_foreach_slot (vecs->pathvec, pp, i) {
1272 check_path(vecs, pp);
1276 defered_failback_tick(vecs->mpvec);
1277 retry_count_tick(vecs->mpvec);
1282 condlog(4, "map garbage collection");
1283 mpvec_garbage_collector(vecs);
1287 lock_cleanup_pop(vecs->lock);
1288 pthread_sigmask(SIG_SETMASK, &old, NULL);
1295 configure (struct vectors * vecs, int start_waiters)
1297 struct multipath * mpp;
1302 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1305 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1308 if (!(mpvec = vector_alloc()))
1312 * probe for current path (from sysfs) and map (from dm) sets
1314 path_discovery(vecs->pathvec, conf, DI_ALL);
1316 vector_foreach_slot (vecs->pathvec, pp, i){
1317 if (filter_path(conf, pp) > 0){
1318 vector_del_slot(vecs->pathvec, i);
1323 pp->checkint = conf->checkint;
1325 if (map_discovery(vecs))
1329 * create new set of maps & push changed ones into dm
1331 if (coalesce_paths(vecs, mpvec, NULL, 1))
1335 * may need to remove some maps which are no longer relevant
1336 * e.g., due to blacklist changes in conf file
1338 if (coalesce_maps(vecs, mpvec))
1343 sync_maps_state(mpvec);
1344 vector_foreach_slot(mpvec, mpp, i){
1349 * purge dm of old maps
1354 * save new set of maps formed by considering current path state
1356 vector_free(vecs->mpvec);
1357 vecs->mpvec = mpvec;
1360 * start dm event waiter threads for these new maps
1362 vector_foreach_slot(vecs->mpvec, mpp, i) {
1363 if (setup_multipath(vecs, mpp))
1366 if (start_waiter_thread(mpp, vecs))
1373 reconfigure (struct vectors * vecs)
1375 struct config * old = conf;
1379 * free old map and path vectors ... they use old conf state
1381 if (VECTOR_SIZE(vecs->mpvec))
1382 remove_maps_and_stop_waiters(vecs);
1384 if (VECTOR_SIZE(vecs->pathvec))
1385 free_pathvec(vecs->pathvec, FREE_PATHS);
1387 vecs->pathvec = NULL;
1390 if (!load_config(DEFAULT_CONFIGFILE)) {
1391 conf->verbosity = old->verbosity;
1401 static struct vectors *
1404 struct vectors * vecs;
1406 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1412 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1414 if (!vecs->lock.mutex)
1417 pthread_mutex_init(vecs->lock.mutex, NULL);
1418 vecs->lock.depth = 0;
1424 condlog(0, "failed to init paths");
1429 signal_set(int signo, void (*func) (int))
1432 struct sigaction sig;
1433 struct sigaction osig;
1435 sig.sa_handler = func;
1436 sigemptyset(&sig.sa_mask);
1439 r = sigaction(signo, &sig, &osig);
1444 return (osig.sa_handler);
1450 condlog(2, "reconfigure (SIGHUP)");
1452 if (running_state != DAEMON_RUNNING)
1457 unlock(gvecs->lock);
1460 dbg_free_final(NULL);
1473 condlog(3, "SIGUSR1 received");
1479 signal_set(SIGHUP, sighup);
1480 signal_set(SIGUSR1, sigusr1);
1481 signal_set(SIGINT, sigend);
1482 signal_set(SIGTERM, sigend);
1483 signal(SIGPIPE, SIG_IGN);
1490 static struct sched_param sched_param = {
1491 .sched_priority = 99
1494 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1497 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1505 char *file = "/proc/self/oom_score_adj";
1506 int score = OOM_SCORE_ADJ_MIN;
1511 if (stat(file, &st) == 0){
1512 fp = fopen(file, "w");
1514 condlog(0, "couldn't fopen %s : %s", file,
1518 fprintf(fp, "%i", score);
1522 if (errno != ENOENT) {
1523 condlog(0, "couldn't stat %s : %s", file,
1527 file = "/proc/self/oom_adj";
1528 score = OOM_ADJUST_MIN;
1530 condlog(0, "couldn't adjust oom score");
1534 child (void * param)
1536 pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1537 pthread_attr_t log_attr, misc_attr;
1538 struct vectors * vecs;
1539 struct multipath * mpp;
1543 mlockall(MCL_CURRENT | MCL_FUTURE);
1545 setup_thread_attr(&misc_attr, 64 * 1024, 1);
1546 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1549 setup_thread_attr(&log_attr, 64 * 1024, 0);
1550 log_thread_start(&log_attr);
1551 pthread_attr_destroy(&log_attr);
1554 running_state = DAEMON_START;
1556 condlog(2, "--------start up--------");
1557 condlog(2, "read " DEFAULT_CONFIGFILE);
1559 if (load_config(DEFAULT_CONFIGFILE))
1562 if (init_checkers()) {
1563 condlog(0, "failed to initialize checkers");
1567 condlog(0, "failed to initialize prioritizers");
1571 setlogmask(LOG_UPTO(conf->verbosity + 3));
1573 if (conf->max_fds) {
1574 struct rlimit fd_limit;
1576 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1577 condlog(0, "can't get open fds limit: %s\n",
1579 fd_limit.rlim_cur = 0;
1580 fd_limit.rlim_max = 0;
1582 if (fd_limit.rlim_cur < conf->max_fds) {
1583 fd_limit.rlim_cur = conf->max_fds;
1584 if (fd_limit.rlim_max < conf->max_fds)
1585 fd_limit.rlim_max = conf->max_fds;
1586 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1587 condlog(0, "can't set open fds limit to "
1589 fd_limit.rlim_cur, fd_limit.rlim_max,
1592 condlog(3, "set open fds limit to %lu/%lu\n",
1593 fd_limit.rlim_cur, fd_limit.rlim_max);
1602 vecs = gvecs = init_vecs();
1607 if (sysfs_init(conf->sysfs_dir, FILE_NAME_SIZE)) {
1608 condlog(0, "can not find sysfs mount point");
1612 udev_set_sync_support(0);
1614 * Start uevent listener early to catch events
1616 if ((rc = pthread_create(&uevent_thr, &misc_attr, ueventloop, vecs))) {
1617 condlog(0, "failed to create uevent thread: %d", rc);
1620 if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
1621 condlog(0, "failed to create cli listener: %d", rc);
1625 * fetch and configure both paths and multipaths
1628 running_state = DAEMON_CONFIGURE;
1630 if (configure(vecs, 1)) {
1632 condlog(0, "failure during configuration");
1640 if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
1641 condlog(0,"failed to create checker loop thread: %d", rc);
1644 if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
1645 condlog(0, "failed to create uevent dispatcher: %d", rc);
1648 pthread_attr_destroy(&misc_attr);
1650 pthread_mutex_lock(&exit_mutex);
1651 /* Startup complete, create logfile */
1652 if (pidfile_create(DEFAULT_PIDFILE, daemon_pid))
1653 /* Ignore errors, we can live without */
1654 condlog(1, "failed to create pidfile");
1656 running_state = DAEMON_RUNNING;
1657 pthread_cond_wait(&exit_cond, &exit_mutex);
1662 running_state = DAEMON_SHUTDOWN;
1663 block_signal(SIGHUP, NULL);
1665 if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
1666 vector_foreach_slot(vecs->mpvec, mpp, i)
1667 dm_queue_if_no_path(mpp->alias, 0);
1668 remove_maps_and_stop_waiters(vecs);
1671 pthread_cancel(check_thr);
1672 pthread_cancel(uevent_thr);
1673 pthread_cancel(uxlsnr_thr);
1674 pthread_cancel(uevq_thr);
1679 free_pathvec(vecs->pathvec, FREE_PATHS);
1680 vecs->pathvec = NULL;
1682 /* Now all the waitevent threads will start rushing in. */
1683 while (vecs->lock.depth > 0) {
1684 sleep (1); /* This is weak. */
1685 condlog(3,"Have %d wait event checkers threads to de-alloc, waiting..\n", vecs->lock.depth);
1687 pthread_mutex_destroy(vecs->lock.mutex);
1688 FREE(vecs->lock.mutex);
1689 vecs->lock.depth = 0;
1690 vecs->lock.mutex = NULL;
1700 /* We're done here */
1701 condlog(3, "unlink pidfile");
1702 unlink(DEFAULT_PIDFILE);
1704 condlog(2, "--------shut down-------");
1710 * Freeing config must be done after condlog() and dm_lib_exit(),
1711 * because logging functions like dlog() and dm_write_log()
1712 * reference the config.
1718 dbg_free_final(NULL);
1730 if( (pid = fork()) < 0){
1731 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1739 if ( (pid = fork()) < 0)
1740 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1745 fprintf(stderr, "cannot chdir to '/', continuing\n");
1747 dev_null_fd = open("/dev/null", O_RDWR);
1748 if (dev_null_fd < 0){
1749 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
1754 close(STDIN_FILENO);
1756 close(STDOUT_FILENO);
1758 close(STDERR_FILENO);
1761 daemon_pid = getpid();
1766 main (int argc, char *argv[])
1768 extern char *optarg;
1774 running_state = DAEMON_INIT;
1777 if (getuid() != 0) {
1778 fprintf(stderr, "need to be root\n");
1782 /* make sure we don't lock any path */
1784 umask(umask(077) | 022);
1786 conf = alloc_config();
1791 while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1795 //debug=1; /* ### comment me out ### */
1798 if (sizeof(optarg) > sizeof(char *) ||
1799 !isdigit(optarg[0]))
1802 conf->verbosity = atoi(optarg);
1811 if (optind < argc) {
1816 while (optind < argc) {
1817 if (strchr(argv[optind], ' '))
1818 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1820 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1823 c += snprintf(c, s + CMDSIZE - c, "\n");
1841 return (child(NULL));
1844 void * mpath_pr_event_handler_fn (void * pathp )
1846 struct multipath * mpp;
1847 int i,j, ret, isFound;
1848 struct path * pp = (struct path *)pathp;
1849 unsigned char *keyp;
1851 struct prout_param_descriptor *param;
1852 struct prin_resp *resp;
1856 resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
1858 condlog(0,"%s Alloc failed for prin response \n", pp->dev);
1862 ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
1863 if (ret != MPATH_PR_SUCCESS )
1865 condlog(0,"%s : pr in read keys service action failed. Error=%d\n", pp->dev, ret);
1869 condlog(3, " event pr=%d addlen=%d\n",resp->prin_descriptor.prin_readkeys.prgeneration,
1870 resp->prin_descriptor.prin_readkeys.additional_length );
1872 if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
1874 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
1875 ret = MPATH_PR_SUCCESS;
1879 keyp = (unsigned char *)mpp->reservation_key;
1880 for (j = 0; j < 8; ++j) {
1886 condlog(2, "Multipath reservation_key: 0x%" PRIx64 " ", prkey);
1889 for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
1891 condlog(2, "PR IN READKEYS[%d] reservation key:\n",i);
1892 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
1893 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
1895 condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
1902 condlog(0, "%s: Either device not registered or ", pp->dev);
1903 condlog(0, "host is not authorised for registration. Skip path\n");
1904 ret = MPATH_PR_OTHER;
1908 param= malloc(sizeof(struct prout_param_descriptor));
1909 memset(param, 0 , sizeof(struct prout_param_descriptor));
1911 for (j = 7; j >= 0; --j) {
1912 param->sa_key[j] = (prkey & 0xff);
1915 param->num_transportid = 0;
1917 condlog(3, "device %s:%s \n", pp->dev, pp->mpp->wwid);
1919 ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
1920 if (ret != MPATH_PR_SUCCESS )
1922 condlog(0,"%s: Reservation registration failed. Error: %d\n", pp->dev, ret);
1932 int mpath_pr_event_handle(struct path *pp)
1936 pthread_attr_t attr;
1937 struct multipath * mpp;
1941 if (!mpp->reservation_key)
1944 pthread_attr_init(&attr);
1945 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
1947 rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
1949 condlog(0, "%s: ERROR; return code from pthread_create() is %d\n", pp->dev, rc);
1952 pthread_attr_destroy(&attr);
1953 rc = pthread_join(thread, NULL);