2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
36 #include <blacklist.h>
37 #include <structs_vec.h>
39 #include <devmapper.h>
42 #include <discovery.h>
46 #include <switchgroup.h>
48 #include <configure.h>
56 #include "cli_handlers.h"
60 #define FILE_NAME_SIZE 256
63 #define LOG_MSG(a,b) \
64 if (strlen(b)) condlog(a, "%s: %s", pp->dev, b);
66 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
67 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
72 * global copy of vecs for use in sig handlers
74 struct vectors * gvecs;
77 need_switch_pathgroup (struct multipath * mpp, int refresh)
79 struct pathgroup * pgp;
83 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
87 * Refresh path priority values
90 vector_foreach_slot (mpp->pg, pgp, i)
91 vector_foreach_slot (pgp->paths, pp, j)
92 pathinfo(pp, conf->hwtable, DI_PRIO);
94 mpp->bestpg = select_path_group(mpp);
96 if (mpp->bestpg != mpp->nextpg)
103 switch_pathgroup (struct multipath * mpp)
105 mpp->stat_switchgroup++;
106 dm_switchgroup(mpp->alias, mpp->bestpg);
107 condlog(2, "%s: switch to path group #%i",
108 mpp->alias, mpp->bestpg);
112 coalesce_maps(struct vectors *vecs, vector nmpv)
114 struct multipath * ompp;
115 vector ompv = vecs->mpvec;
119 vector_foreach_slot (ompv, ompp, i) {
120 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
122 * remove all current maps not allowed by the
123 * current configuration
125 if (dm_flush_map(ompp->alias)) {
126 condlog(0, "%s: unable to flush devmap",
129 * may be just because the device is open
131 if (!vector_alloc_slot(nmpv))
134 vector_set_slot(nmpv, ompp);
135 setup_multipath(vecs, ompp);
137 if ((j = find_slot(ompv, (void *)ompp)) != -1)
138 vector_del_slot(ompv, j);
144 condlog(2, "%s devmap removed", ompp->alias);
152 sync_map_state(struct multipath *mpp)
154 struct pathgroup *pgp;
161 vector_foreach_slot (mpp->pg, pgp, i){
162 vector_foreach_slot (pgp->paths, pp, j){
163 if (pp->state == PATH_UNCHECKED ||
164 pp->state == PATH_WILD)
166 if ((pp->dmstate == PSTATE_FAILED ||
167 pp->dmstate == PSTATE_UNDEF) &&
168 (pp->state == PATH_UP || pp->state == PATH_GHOST))
169 dm_reinstate_path(mpp->alias, pp->dev_t);
170 else if ((pp->dmstate == PSTATE_ACTIVE ||
171 pp->dmstate == PSTATE_UNDEF) &&
172 (pp->state == PATH_DOWN ||
173 pp->state == PATH_SHAKY))
174 dm_fail_path(mpp->alias, pp->dev_t);
180 sync_maps_state(vector mpvec)
183 struct multipath *mpp;
185 vector_foreach_slot (mpvec, mpp, i)
190 flush_map(struct multipath * mpp, struct vectors * vecs)
193 * clear references to this map before flushing so we can ignore
194 * the spurious uevent we may generate with the dm_flush_map call below
196 if (dm_flush_map(mpp->alias)) {
198 * May not really be an error -- if the map was already flushed
199 * from the device mapper by dmsetup(8) for instance.
201 condlog(0, "%s: can't flush", mpp->alias);
206 condlog(2, "%s: devmap removed", mpp->alias);
209 orphan_paths(vecs->pathvec, mpp);
210 remove_map_and_stop_waiter(mpp, vecs, 1);
216 uev_add_map (struct sysfs_device * dev, struct vectors * vecs)
218 condlog(2, "%s: add map (uevent)", dev->kernel);
219 return ev_add_map(dev, vecs);
223 ev_add_map (struct sysfs_device * dev, struct vectors * vecs)
229 struct multipath * mpp;
233 dev_t = sysfs_attr_get_value(dev->devpath, "dev");
235 if (!dev_t || sscanf(dev_t, "%d:%d", &major, &minor) != 2)
238 alias = dm_mapname(major, minor);
243 map_present = dm_map_present(alias);
245 if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
246 condlog(4, "%s: not a multipath map", alias);
251 mpp = find_mp_by_alias(vecs->mpvec, alias);
255 * Not really an error -- we generate our own uevent
256 * if we create a multipath mapped device as a result
259 condlog(0, "%s: devmap already registered",
266 * now we can register the map
268 if (map_present && (mpp = add_map_without_path(vecs, minor, alias))) {
270 condlog(2, "%s: devmap %s added", alias, dev->kernel);
273 refwwid = get_refwwid(dev->kernel, DEV_DEVMAP, vecs->pathvec);
276 r = coalesce_paths(vecs, NULL, refwwid, 0);
281 condlog(2, "%s: devmap %s added", alias, dev->kernel);
283 condlog(0, "%s: uev_add_map %s failed", alias, dev->kernel);
291 uev_remove_map (struct sysfs_device * dev, struct vectors * vecs)
293 condlog(2, "%s: remove map (uevent)", dev->kernel);
294 return ev_remove_map(dev->kernel, vecs);
298 ev_remove_map (char * devname, struct vectors * vecs)
300 struct multipath * mpp;
302 mpp = find_mp_by_str(vecs->mpvec, devname);
305 condlog(2, "%s: devmap not registered, can't remove",
309 flush_map(mpp, vecs);
315 uev_umount_map (struct sysfs_device * dev, struct vectors * vecs)
317 struct multipath * mpp;
319 condlog(2, "%s: umount map (uevent)", dev->kernel);
321 mpp = find_mp_by_str(vecs->mpvec, dev->kernel);
326 update_mpp_paths(mpp, vecs->pathvec);
327 verify_paths(mpp, vecs, NULL);
329 if (!VECTOR_SIZE(mpp->paths))
330 flush_map(mpp, vecs);
336 uev_add_path (struct sysfs_device * dev, struct vectors * vecs)
338 condlog(2, "%s: add path (uevent)", dev->kernel);
339 return (ev_add_path(dev->kernel, vecs) != 1)? 0 : 1;
350 ev_add_path (char * devname, struct vectors * vecs)
352 struct multipath * mpp;
354 char empty_buff[WWID_SIZE] = {0};
356 if (strstr(devname, "..") != NULL) {
358 * Don't allow relative device names in the pathvec
360 condlog(0, "%s: path name is invalid", devname);
364 pp = find_path_by_dev(vecs->pathvec, devname);
367 condlog(0, "%s: spurious uevent, path already in pathvec",
374 * get path vital state
376 if (!(pp = store_pathinfo(vecs->pathvec, conf->hwtable,
378 condlog(0, "%s: failed to store path info", devname);
381 pp->checkint = conf->checkint;
385 * need path UID to go any further
387 if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
388 condlog(0, "%s: failed to get path uid", devname);
389 return 1; /* leave path added to pathvec */
391 if (filter_path(conf, pp) > 0){
392 int i = find_slot(vecs->pathvec, (void *)pp);
394 vector_del_slot(vecs->pathvec, i);
398 mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
401 condlog(4,"%s: adopting all paths for path %s",
402 mpp->alias, pp->dev);
403 if (adopt_paths(vecs->pathvec, mpp))
404 return 1; /* leave path added to pathvec */
406 verify_paths(mpp, vecs, NULL);
407 mpp->flush_on_last_del = FLUSH_UNDEF;
408 mpp->action = ACT_RELOAD;
411 condlog(4,"%s: creating new map", pp->dev);
412 if ((mpp = add_map_with_path(vecs, pp, 1)))
413 mpp->action = ACT_CREATE;
415 return 1; /* leave path added to pathvec */
419 * push the map to the device-mapper
421 if (setup_map(mpp)) {
422 condlog(0, "%s: failed to setup map for addition of new "
423 "path %s", mpp->alias, devname);
427 * reload the map for the multipath mapped device
429 if (domap(mpp) <= 0) {
430 condlog(0, "%s: failed in domap for addition of new "
431 "path %s", mpp->alias, devname);
433 * deal with asynchronous uevents :((
435 if (mpp->action == ACT_RELOAD) {
436 condlog(0, "%s: uev_add_path sleep", mpp->alias);
438 update_mpp_paths(mpp, vecs->pathvec);
447 * update our state from kernel regardless of create or reload
449 if (setup_multipath(vecs, mpp))
454 if (mpp->action == ACT_CREATE &&
455 start_waiter_thread(mpp, vecs))
458 condlog(2, "%s path added to devmap %s", devname, mpp->alias);
462 remove_map(mpp, vecs, 1);
467 uev_remove_path (struct sysfs_device * dev, struct vectors * vecs)
471 condlog(2, "%s: remove path (uevent)", dev->kernel);
472 retval = ev_remove_path(dev->kernel, vecs);
474 sysfs_device_put(dev);
480 ev_remove_path (char * devname, struct vectors * vecs)
482 struct multipath * mpp;
486 pp = find_path_by_dev(vecs->pathvec, devname);
489 /* Not an error; path might have been purged earlier */
490 condlog(0, "%s: path already removed", devname);
495 * avoid referring to the map of an orphaned path
497 if ((mpp = pp->mpp)) {
499 * transform the mp->pg vector of vectors of paths
500 * into a mp->params string to feed the device-mapper
502 if (update_mpp_paths(mpp, vecs->pathvec)) {
503 condlog(0, "%s: failed to update paths",
507 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
508 vector_del_slot(mpp->paths, i);
511 * remove the map IFF removing the last path
513 if (VECTOR_SIZE(mpp->paths) == 0) {
514 char alias[WWID_SIZE];
517 * flush_map will fail if the device is open
519 strncpy(alias, mpp->alias, WWID_SIZE);
520 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
521 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
523 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
524 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
525 dm_queue_if_no_path(mpp->alias, 0);
527 if (!flush_map(mpp, vecs)) {
528 condlog(2, "%s: removed map after"
529 " removing all paths",
535 * Not an error, continue
539 if (setup_map(mpp)) {
540 condlog(0, "%s: failed to setup map for"
541 " removal of path %s", mpp->alias,
548 mpp->action = ACT_RELOAD;
549 if (domap(mpp) <= 0) {
550 condlog(0, "%s: failed in domap for "
551 "removal of path %s",
552 mpp->alias, devname);
556 * update our state from kernel
558 if (setup_multipath(vecs, mpp)) {
563 condlog(2, "%s: path removed from map %s",
564 devname, mpp->alias);
569 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
570 vector_del_slot(vecs->pathvec, i);
577 remove_map_and_stop_waiter(mpp, vecs, 1);
582 map_discovery (struct vectors * vecs)
584 struct multipath * mpp;
587 if (dm_get_maps(vecs->mpvec))
590 vector_foreach_slot (vecs->mpvec, mpp, i)
591 if (setup_multipath(vecs, mpp))
598 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
600 struct vectors * vecs;
605 vecs = (struct vectors *)trigger_data;
607 pthread_cleanup_push(cleanup_lock, &vecs->lock);
610 r = parse_cmd(str, reply, len, vecs);
613 *reply = STRDUP("fail\n");
614 *len = strlen(*reply) + 1;
617 else if (!r && *len == 0) {
618 *reply = STRDUP("ok\n");
619 *len = strlen(*reply) + 1;
622 /* else if (r < 0) leave *reply alone */
624 lock_cleanup_pop(vecs->lock);
629 uev_discard(char * devpath)
635 * keep only block devices, discard partitions
637 tmp = strstr(devpath, "/block/");
639 condlog(4, "no /block/ in '%s'", devpath);
642 if (sscanf(tmp, "/block/%10s", a) != 1 ||
643 sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
644 condlog(4, "discard event on %s", devpath);
651 uev_trigger (struct uevent * uev, void * trigger_data)
654 struct sysfs_device *sysdev;
655 struct vectors * vecs;
657 vecs = (struct vectors *)trigger_data;
659 if (uev_discard(uev->devpath))
662 sysdev = sysfs_device_get(uev->devpath);
670 * Add events are ignored here as the tables
671 * are not fully initialised then.
673 if (!strncmp(sysdev->kernel, "dm-", 3)) {
674 if (!strncmp(uev->action, "change", 6)) {
675 r = uev_add_map(sysdev, vecs);
678 if (!strncmp(uev->action, "remove", 6)) {
679 r = uev_remove_map(sysdev, vecs);
682 if (!strncmp(uev->action, "umount", 6)) {
683 r = uev_umount_map(sysdev, vecs);
690 * path add/remove event
692 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
696 if (!strncmp(uev->action, "add", 3)) {
697 r = uev_add_path(sysdev, vecs);
700 if (!strncmp(uev->action, "remove", 6)) {
701 r = uev_remove_path(sysdev, vecs);
711 ueventloop (void * ap)
713 block_signal(SIGUSR1, NULL);
714 block_signal(SIGHUP, NULL);
716 if (uevent_listen(&uev_trigger, ap))
717 fprintf(stderr, "error starting uevent listener");
723 uxlsnrloop (void * ap)
725 block_signal(SIGUSR1, NULL);
726 block_signal(SIGHUP, NULL);
731 set_handler_callback(LIST+PATHS, cli_list_paths);
732 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
733 set_handler_callback(LIST+MAPS, cli_list_maps);
734 set_handler_callback(LIST+STATUS, cli_list_status);
735 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
736 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
737 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
738 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
739 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
740 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
741 set_handler_callback(LIST+CONFIG, cli_list_config);
742 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
743 set_handler_callback(LIST+DEVICES, cli_list_devices);
744 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
745 set_handler_callback(ADD+PATH, cli_add_path);
746 set_handler_callback(DEL+PATH, cli_del_path);
747 set_handler_callback(ADD+MAP, cli_add_map);
748 set_handler_callback(DEL+MAP, cli_del_map);
749 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
750 set_handler_callback(RECONFIGURE, cli_reconfigure);
751 set_handler_callback(SUSPEND+MAP, cli_suspend);
752 set_handler_callback(RESUME+MAP, cli_resume);
753 set_handler_callback(RESIZE+MAP, cli_resize);
754 set_handler_callback(REINSTATE+PATH, cli_reinstate);
755 set_handler_callback(FAIL+PATH, cli_fail);
756 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
757 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
758 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
759 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
760 set_handler_callback(QUIT, cli_quit);
763 uxsock_listen(&uxsock_trigger, ap);
769 exit_daemon (int status)
772 fprintf(stderr, "bad exit status. see daemon.log\n");
774 condlog(3, "unlink pidfile");
775 unlink(DEFAULT_PIDFILE);
777 pthread_mutex_lock(&exit_mutex);
778 pthread_cond_signal(&exit_cond);
779 pthread_mutex_unlock(&exit_mutex);
785 fail_path (struct path * pp, int del_active)
790 condlog(2, "checker failed path %s in map %s",
791 pp->dev_t, pp->mpp->alias);
793 dm_fail_path(pp->mpp->alias, pp->dev_t);
795 update_queue_mode_del_path(pp->mpp);
799 * caller must have locked the path list before calling that function
802 reinstate_path (struct path * pp, int add_active)
807 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
808 condlog(0, "%s: reinstate failed", pp->dev_t);
810 condlog(2, "%s: reinstated", pp->dev_t);
812 update_queue_mode_add_path(pp->mpp);
817 enable_group(struct path * pp)
819 struct pathgroup * pgp;
822 * if path is added through uev_add_path, pgindex can be unset.
823 * next update_strings() will set it, upon map reload event.
825 * we can safely return here, because upon map reload, all
826 * PG will be enabled.
828 if (!pp->mpp->pg || !pp->pgindex)
831 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
833 if (pgp->status == PGSTATE_DISABLED) {
834 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
835 dm_enablegroup(pp->mpp->alias, pp->pgindex);
840 mpvec_garbage_collector (struct vectors * vecs)
842 struct multipath * mpp;
848 vector_foreach_slot (vecs->mpvec, mpp, i) {
849 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
850 condlog(2, "%s: remove dead map", mpp->alias);
851 remove_map_and_stop_waiter(mpp, vecs, 1);
858 defered_failback_tick (vector mpvec)
860 struct multipath * mpp;
863 vector_foreach_slot (mpvec, mpp, i) {
865 * defered failback getting sooner
867 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
868 mpp->failback_tick--;
870 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
871 switch_pathgroup(mpp);
877 retry_count_tick(vector mpvec)
879 struct multipath *mpp;
882 vector_foreach_slot (mpvec, mpp, i) {
883 if (mpp->retry_tick) {
884 mpp->stat_total_queueing_time++;
885 condlog(4, "%s: Retrying.. No active path", mpp->alias);
886 if(--mpp->retry_tick == 0) {
887 dm_queue_if_no_path(mpp->alias, 0);
888 condlog(2, "%s: Disable queueing", mpp->alias);
895 check_path (struct vectors * vecs, struct path * pp)
902 if (pp->tick && --pp->tick)
903 return; /* don't check this path yet */
906 * provision a next check soonest,
907 * in case we exit abnormaly from here
909 pp->tick = conf->checkint;
911 if (!checker_selected(&pp->checker)) {
912 pathinfo(pp, conf->hwtable, DI_SYSFS);
915 if (!checker_selected(&pp->checker)) {
916 condlog(0, "%s: checker is not set", pp->dev);
920 * Set checker in async mode.
921 * Honored only by checker implementing the said mode.
923 checker_set_async(&pp->checker);
925 if (path_offline(pp))
926 newstate = PATH_DOWN;
928 newstate = checker_check(&pp->checker);
931 condlog(2, "%s: unusable path", pp->dev);
932 pathinfo(pp, conf->hwtable, 0);
936 * Async IO in flight. Keep the previous path state
937 * and reschedule as soon as possible
939 if (newstate == PATH_PENDING) {
943 if (newstate != pp->state) {
944 int oldstate = pp->state;
945 pp->state = newstate;
946 LOG_MSG(1, checker_message(&pp->checker));
949 * upon state change, reset the checkint
950 * to the shortest delay
952 pp->checkint = conf->checkint;
954 if (newstate == PATH_DOWN || newstate == PATH_SHAKY ||
955 update_multipath_strings(pp->mpp, vecs->pathvec)) {
957 * proactively fail path in the DM
959 if (oldstate == PATH_UP ||
960 oldstate == PATH_GHOST)
966 * cancel scheduled failback
968 pp->mpp->failback_tick = 0;
970 pp->mpp->stat_path_failures++;
975 * reinstate this path
977 if (oldstate != PATH_UP &&
978 oldstate != PATH_GHOST)
979 reinstate_path(pp, 1);
981 reinstate_path(pp, 0);
984 * schedule [defered] failback
986 if (pp->mpp->pgfailback > 0)
987 pp->mpp->failback_tick =
988 pp->mpp->pgfailback + 1;
989 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE &&
990 need_switch_pathgroup(pp->mpp, 1))
991 switch_pathgroup(pp->mpp);
994 * if at least one path is up in a group, and
995 * the group is disabled, re-enable it
997 if (newstate == PATH_UP)
1000 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1001 LOG_MSG(4, checker_message(&pp->checker));
1003 * double the next check delay.
1004 * max at conf->max_checkint
1006 if (pp->checkint < (conf->max_checkint / 2))
1007 pp->checkint = 2 * pp->checkint;
1009 pp->checkint = conf->max_checkint;
1011 pp->tick = pp->checkint;
1012 condlog(4, "%s: delay next check %is",
1013 pp->dev_t, pp->tick);
1015 else if (newstate == PATH_DOWN)
1016 LOG_MSG(2, checker_message(&pp->checker));
1018 pp->state = newstate;
1021 * path prio refreshing
1023 condlog(4, "path prio refresh");
1024 pathinfo(pp, conf->hwtable, DI_PRIO);
1027 * pathgroup failback policy
1029 if (need_switch_pathgroup(pp->mpp, 0)) {
1030 if (pp->mpp->pgfailback > 0 &&
1031 pp->mpp->failback_tick <= 0)
1032 pp->mpp->failback_tick =
1033 pp->mpp->pgfailback + 1;
1034 else if (pp->mpp->pgfailback ==
1035 -FAILBACK_IMMEDIATE)
1036 switch_pathgroup(pp->mpp);
1041 checkerloop (void *ap)
1043 struct vectors *vecs;
1049 mlockall(MCL_CURRENT | MCL_FUTURE);
1050 vecs = (struct vectors *)ap;
1051 condlog(2, "path checkers start up");
1054 * init the path check interval
1056 vector_foreach_slot (vecs->pathvec, pp, i) {
1057 pp->checkint = conf->checkint;
1061 block_signal(SIGHUP, &old);
1062 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1066 if (vecs->pathvec) {
1067 vector_foreach_slot (vecs->pathvec, pp, i) {
1068 check_path(vecs, pp);
1072 defered_failback_tick(vecs->mpvec);
1073 retry_count_tick(vecs->mpvec);
1078 condlog(4, "map garbage collection");
1079 mpvec_garbage_collector(vecs);
1083 lock_cleanup_pop(vecs->lock);
1084 pthread_sigmask(SIG_SETMASK, &old, NULL);
1091 configure (struct vectors * vecs, int start_waiters)
1093 struct multipath * mpp;
1098 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1101 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1104 if (!(mpvec = vector_alloc()))
1108 * probe for current path (from sysfs) and map (from dm) sets
1110 path_discovery(vecs->pathvec, conf, DI_ALL);
1112 vector_foreach_slot (vecs->pathvec, pp, i){
1113 if (filter_path(conf, pp) > 0){
1114 vector_del_slot(vecs->pathvec, i);
1119 pp->checkint = conf->checkint;
1121 if (map_discovery(vecs))
1125 * create new set of maps & push changed ones into dm
1127 if (coalesce_paths(vecs, mpvec, NULL, 0))
1131 * may need to remove some maps which are no longer relevant
1132 * e.g., due to blacklist changes in conf file
1134 if (coalesce_maps(vecs, mpvec))
1139 sync_maps_state(mpvec);
1142 * purge dm of old maps
1147 * save new set of maps formed by considering current path state
1149 vector_free(vecs->mpvec);
1150 vecs->mpvec = mpvec;
1153 * start dm event waiter threads for these new maps
1155 vector_foreach_slot(vecs->mpvec, mpp, i) {
1156 if (setup_multipath(vecs, mpp))
1159 if (start_waiter_thread(mpp, vecs))
1166 reconfigure (struct vectors * vecs)
1168 struct config * old = conf;
1171 * free old map and path vectors ... they use old conf state
1173 if (VECTOR_SIZE(vecs->mpvec))
1174 remove_maps_and_stop_waiters(vecs);
1176 if (VECTOR_SIZE(vecs->pathvec))
1177 free_pathvec(vecs->pathvec, FREE_PATHS);
1179 vecs->pathvec = NULL;
1182 if (load_config(DEFAULT_CONFIGFILE))
1185 conf->verbosity = old->verbosity;
1187 if (!conf->checkint) {
1188 conf->checkint = DEFAULT_CHECKINT;
1189 conf->max_checkint = MAX_CHECKINT(conf->checkint);
1196 static struct vectors *
1199 struct vectors * vecs;
1201 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1207 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1209 if (!vecs->lock.mutex)
1212 pthread_mutex_init(vecs->lock.mutex, NULL);
1213 vecs->lock.depth = 0;
1219 condlog(0, "failed to init paths");
1224 signal_set(int signo, void (*func) (int))
1227 struct sigaction sig;
1228 struct sigaction osig;
1230 sig.sa_handler = func;
1231 sigemptyset(&sig.sa_mask);
1234 r = sigaction(signo, &sig, &osig);
1239 return (osig.sa_handler);
1245 condlog(2, "reconfigure (SIGHUP)");
1249 unlock(gvecs->lock);
1252 dbg_free_final(NULL);
1265 condlog(3, "SIGUSR1 received");
1271 signal_set(SIGHUP, sighup);
1272 signal_set(SIGUSR1, sigusr1);
1273 signal_set(SIGINT, sigend);
1274 signal_set(SIGTERM, sigend);
1275 signal(SIGPIPE, SIG_IGN);
1282 static struct sched_param sched_param = {
1283 .sched_priority = 99
1286 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1289 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1294 set_oom_adj (int val)
1298 fp = fopen("/proc/self/oom_adj", "w");
1303 fprintf(fp, "%i", val);
1308 setup_thread_attr(pthread_attr_t *attr, size_t stacksize, int detached)
1310 if (pthread_attr_init(attr)) {
1311 fprintf(stderr, "can't initialize thread attr: %s\n",
1315 if (stacksize < PTHREAD_STACK_MIN)
1316 stacksize = PTHREAD_STACK_MIN;
1318 if (pthread_attr_setstacksize(attr, stacksize)) {
1319 fprintf(stderr, "can't set thread stack size to %lu: %s\n",
1320 (unsigned long)stacksize, strerror(errno));
1323 if (detached && pthread_attr_setdetachstate(attr,
1324 PTHREAD_CREATE_DETACHED)) {
1325 fprintf(stderr, "can't set thread to detached: %s\n",
1332 child (void * param)
1334 pthread_t check_thr, uevent_thr, uxlsnr_thr;
1335 pthread_attr_t log_attr, misc_attr;
1336 struct vectors * vecs;
1338 mlockall(MCL_CURRENT | MCL_FUTURE);
1340 setup_thread_attr(&misc_attr, 64 * 1024, 1);
1341 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1344 setup_thread_attr(&log_attr, 64 * 1024, 0);
1345 log_thread_start(&log_attr);
1346 pthread_attr_destroy(&log_attr);
1349 condlog(2, "--------start up--------");
1350 condlog(2, "read " DEFAULT_CONFIGFILE);
1352 if (load_config(DEFAULT_CONFIGFILE))
1355 if (init_checkers()) {
1356 condlog(0, "failed to initialize checkers");
1360 condlog(0, "failed to initialize prioritizers");
1364 setlogmask(LOG_UPTO(conf->verbosity + 3));
1367 * fill the voids left in the config file
1369 if (!conf->checkint) {
1370 conf->checkint = DEFAULT_CHECKINT;
1371 conf->max_checkint = MAX_CHECKINT(conf->checkint);
1374 if (conf->max_fds) {
1375 struct rlimit fd_limit;
1377 fd_limit.rlim_cur = conf->max_fds;
1378 fd_limit.rlim_max = conf->max_fds;
1379 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0)
1380 condlog(0, "can't set open fds limit to %d : %s\n",
1381 conf->max_fds, strerror(errno));
1384 if (pidfile_create(DEFAULT_PIDFILE, getpid())) {
1393 vecs = gvecs = init_vecs();
1398 if (sysfs_init(conf->sysfs_dir, FILE_NAME_SIZE)) {
1399 condlog(0, "can not find sysfs mount point");
1404 * fetch and configure both paths and multipaths
1406 if (configure(vecs, 1)) {
1407 condlog(0, "failure during configuration");
1413 pthread_create(&check_thr, &misc_attr, checkerloop, vecs);
1414 pthread_create(&uevent_thr, &misc_attr, ueventloop, vecs);
1415 pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs);
1416 pthread_attr_destroy(&misc_attr);
1418 pthread_cond_wait(&exit_cond, &exit_mutex);
1423 block_signal(SIGHUP, NULL);
1425 remove_maps_and_stop_waiters(vecs);
1426 free_pathvec(vecs->pathvec, FREE_PATHS);
1428 pthread_cancel(check_thr);
1429 pthread_cancel(uevent_thr);
1430 pthread_cancel(uxlsnr_thr);
1436 free_handlers(handlers);
1441 /* Now all the waitevent threads will start rushing in. */
1442 while (vecs->lock.depth > 0) {
1443 sleep (1); /* This is weak. */
1444 condlog(3,"Have %d wait event checkers threads to de-alloc, waiting..\n", vecs->lock.depth);
1446 pthread_mutex_destroy(vecs->lock.mutex);
1447 FREE(vecs->lock.mutex);
1448 vecs->lock.depth = 0;
1449 vecs->lock.mutex = NULL;
1453 condlog(2, "--------shut down-------");
1464 * Freeing config must be done after condlog() and dm_lib_exit(),
1465 * because logging functions like dlog() and dm_write_log()
1466 * reference the config.
1472 dbg_free_final(NULL);
1484 if( (pid = fork()) < 0){
1485 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1493 if ( (pid = fork()) < 0)
1494 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1498 in_fd = open("/dev/null", O_RDONLY);
1500 fprintf(stderr, "cannot open /dev/null for input : %s\n",
1504 out_fd = open("/dev/console", O_WRONLY);
1506 fprintf(stderr, "cannot open /dev/console for output : %s\n",
1511 close(STDIN_FILENO);
1513 close(STDOUT_FILENO);
1515 close(STDERR_FILENO);
1521 fprintf(stderr, "cannot chdir to '/', continuing\n");
1527 main (int argc, char *argv[])
1529 extern char *optarg;
1537 if (getuid() != 0) {
1538 fprintf(stderr, "need to be root\n");
1542 /* make sure we don't lock any path */
1544 umask(umask(077) | 022);
1546 conf = alloc_config();
1551 while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1555 //debug=1; /* ### comment me out ### */
1558 if (sizeof(optarg) > sizeof(char *) ||
1559 !isdigit(optarg[0]))
1562 conf->verbosity = atoi(optarg);
1571 if (optind < argc) {
1576 while (optind < argc) {
1577 if (strchr(argv[optind], ' '))
1578 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1580 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1583 c += snprintf(c, s + CMDSIZE - c, "\n");
1601 return (child(NULL));