2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
36 #include <blacklist.h>
37 #include <structs_vec.h>
39 #include <devmapper.h>
42 #include <discovery.h>
46 #include <switchgroup.h>
48 #include <configure.h>
56 #include "cli_handlers.h"
60 #define FILE_NAME_SIZE 256
63 #define LOG_MSG(a,b) \
64 if (strlen(b)) condlog(a, "%s: %s", pp->dev, b);
66 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
67 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
72 * global copy of vecs for use in sig handlers
74 struct vectors * gvecs;
77 need_switch_pathgroup (struct multipath * mpp, int refresh)
79 struct pathgroup * pgp;
83 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
87 * Refresh path priority values
90 vector_foreach_slot (mpp->pg, pgp, i)
91 vector_foreach_slot (pgp->paths, pp, j)
92 pathinfo(pp, conf->hwtable, DI_PRIO);
94 mpp->bestpg = select_path_group(mpp);
96 if (mpp->bestpg != mpp->nextpg)
103 switch_pathgroup (struct multipath * mpp)
105 mpp->stat_switchgroup++;
106 dm_switchgroup(mpp->alias, mpp->bestpg);
107 condlog(2, "%s: switch to path group #%i",
108 mpp->alias, mpp->bestpg);
112 coalesce_maps(struct vectors *vecs, vector nmpv)
114 struct multipath * ompp;
115 vector ompv = vecs->mpvec;
119 vector_foreach_slot (ompv, ompp, i) {
120 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
122 * remove all current maps not allowed by the
123 * current configuration
125 if (dm_flush_map(ompp->alias)) {
126 condlog(0, "%s: unable to flush devmap",
129 * may be just because the device is open
131 if (!vector_alloc_slot(nmpv))
134 vector_set_slot(nmpv, ompp);
135 setup_multipath(vecs, ompp);
137 if ((j = find_slot(ompv, (void *)ompp)) != -1)
138 vector_del_slot(ompv, j);
144 condlog(2, "%s devmap removed", ompp->alias);
152 sync_map_state(struct multipath *mpp)
154 struct pathgroup *pgp;
161 vector_foreach_slot (mpp->pg, pgp, i){
162 vector_foreach_slot (pgp->paths, pp, j){
163 if (pp->state == PATH_UNCHECKED ||
164 pp->state == PATH_WILD)
166 if ((pp->dmstate == PSTATE_FAILED ||
167 pp->dmstate == PSTATE_UNDEF) &&
168 (pp->state == PATH_UP || pp->state == PATH_GHOST))
169 dm_reinstate_path(mpp->alias, pp->dev_t);
170 else if ((pp->dmstate == PSTATE_ACTIVE ||
171 pp->dmstate == PSTATE_UNDEF) &&
172 (pp->state == PATH_DOWN ||
173 pp->state == PATH_SHAKY))
174 dm_fail_path(mpp->alias, pp->dev_t);
180 sync_maps_state(vector mpvec)
183 struct multipath *mpp;
185 vector_foreach_slot (mpvec, mpp, i)
190 flush_map(struct multipath * mpp, struct vectors * vecs)
193 * clear references to this map before flushing so we can ignore
194 * the spurious uevent we may generate with the dm_flush_map call below
196 if (dm_flush_map(mpp->alias)) {
198 * May not really be an error -- if the map was already flushed
199 * from the device mapper by dmsetup(8) for instance.
201 condlog(0, "%s: can't flush", mpp->alias);
206 condlog(2, "%s: devmap removed", mpp->alias);
209 orphan_paths(vecs->pathvec, mpp);
210 remove_map_and_stop_waiter(mpp, vecs, 1);
216 uev_add_map (struct sysfs_device * dev, struct vectors * vecs)
218 condlog(2, "%s: add map (uevent)", dev->kernel);
219 return ev_add_map(dev, vecs);
223 ev_add_map (struct sysfs_device * dev, struct vectors * vecs)
229 struct multipath * mpp;
233 dev_t = sysfs_attr_get_value(dev->devpath, "dev");
235 if (!dev_t || sscanf(dev_t, "%d:%d", &major, &minor) != 2)
238 alias = dm_mapname(major, minor);
243 map_present = dm_map_present(alias);
245 if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
246 condlog(4, "%s: not a multipath map", alias);
251 mpp = find_mp_by_alias(vecs->mpvec, alias);
255 * Not really an error -- we generate our own uevent
256 * if we create a multipath mapped device as a result
259 condlog(0, "%s: devmap already registered",
266 * now we can register the map
268 if (map_present && (mpp = add_map_without_path(vecs, minor, alias))) {
270 condlog(2, "%s: devmap %s added", alias, dev->kernel);
273 refwwid = get_refwwid(dev->kernel, DEV_DEVMAP, vecs->pathvec);
276 r = coalesce_paths(vecs, NULL, refwwid, 0);
281 condlog(2, "%s: devmap %s added", alias, dev->kernel);
283 condlog(0, "%s: uev_add_map %s failed", alias, dev->kernel);
291 uev_remove_map (struct sysfs_device * dev, struct vectors * vecs)
293 condlog(2, "%s: remove map (uevent)", dev->kernel);
294 return ev_remove_map(dev->kernel, vecs);
298 ev_remove_map (char * devname, struct vectors * vecs)
300 struct multipath * mpp;
302 mpp = find_mp_by_str(vecs->mpvec, devname);
305 condlog(2, "%s: devmap not registered, can't remove",
309 flush_map(mpp, vecs);
315 uev_umount_map (struct sysfs_device * dev, struct vectors * vecs)
317 struct multipath * mpp;
319 condlog(2, "%s: umount map (uevent)", dev->kernel);
321 mpp = find_mp_by_str(vecs->mpvec, dev->kernel);
326 update_mpp_paths(mpp, vecs->pathvec);
327 verify_paths(mpp, vecs, NULL);
329 if (!VECTOR_SIZE(mpp->paths))
330 flush_map(mpp, vecs);
336 uev_add_path (struct sysfs_device * dev, struct vectors * vecs)
338 condlog(2, "%s: add path (uevent)", dev->kernel);
339 return (ev_add_path(dev->kernel, vecs) != 1)? 0 : 1;
350 ev_add_path (char * devname, struct vectors * vecs)
352 struct multipath * mpp;
354 char empty_buff[WWID_SIZE] = {0};
356 pp = find_path_by_dev(vecs->pathvec, devname);
359 condlog(0, "%s: spurious uevent, path already in pathvec",
366 * get path vital state
368 if (!(pp = store_pathinfo(vecs->pathvec, conf->hwtable,
370 condlog(0, "%s: failed to store path info", devname);
373 pp->checkint = conf->checkint;
377 * need path UID to go any further
379 if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
380 condlog(0, "%s: failed to get path uid", devname);
381 return 1; /* leave path added to pathvec */
383 if (filter_path(conf, pp) > 0){
384 int i = find_slot(vecs->pathvec, (void *)pp);
386 vector_del_slot(vecs->pathvec, i);
390 mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
393 condlog(4,"%s: adopting all paths for path %s",
394 mpp->alias, pp->dev);
395 if (adopt_paths(vecs->pathvec, mpp))
396 return 1; /* leave path added to pathvec */
398 verify_paths(mpp, vecs, NULL);
399 mpp->flush_on_last_del = FLUSH_UNDEF;
400 mpp->action = ACT_RELOAD;
403 condlog(4,"%s: creating new map", pp->dev);
404 if ((mpp = add_map_with_path(vecs, pp, 1)))
405 mpp->action = ACT_CREATE;
407 return 1; /* leave path added to pathvec */
411 * push the map to the device-mapper
413 if (setup_map(mpp)) {
414 condlog(0, "%s: failed to setup map for addition of new "
415 "path %s", mpp->alias, devname);
419 * reload the map for the multipath mapped device
421 if (domap(mpp) <= 0) {
422 condlog(0, "%s: failed in domap for addition of new "
423 "path %s", mpp->alias, devname);
425 * deal with asynchronous uevents :((
427 if (mpp->action == ACT_RELOAD) {
428 condlog(0, "%s: uev_add_path sleep", mpp->alias);
430 update_mpp_paths(mpp, vecs->pathvec);
439 * update our state from kernel regardless of create or reload
441 if (setup_multipath(vecs, mpp))
446 if (mpp->action == ACT_CREATE &&
447 start_waiter_thread(mpp, vecs))
450 condlog(2, "%s path added to devmap %s", devname, mpp->alias);
454 remove_map(mpp, vecs, 1);
459 uev_remove_path (struct sysfs_device * dev, struct vectors * vecs)
463 condlog(2, "%s: remove path (uevent)", dev->kernel);
464 retval = ev_remove_path(dev->kernel, vecs);
466 sysfs_device_put(dev);
472 ev_remove_path (char * devname, struct vectors * vecs)
474 struct multipath * mpp;
478 pp = find_path_by_dev(vecs->pathvec, devname);
481 /* Not an error; path might have been purged earlier */
482 condlog(0, "%s: path already removed", devname);
487 * avoid referring to the map of an orphaned path
489 if ((mpp = pp->mpp)) {
491 * transform the mp->pg vector of vectors of paths
492 * into a mp->params string to feed the device-mapper
494 if (update_mpp_paths(mpp, vecs->pathvec)) {
495 condlog(0, "%s: failed to update paths",
499 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
500 vector_del_slot(mpp->paths, i);
503 * remove the map IFF removing the last path
505 if (VECTOR_SIZE(mpp->paths) == 0) {
506 char alias[WWID_SIZE];
509 * flush_map will fail if the device is open
511 strncpy(alias, mpp->alias, WWID_SIZE);
512 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
513 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
515 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
516 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
517 dm_queue_if_no_path(mpp->alias, 0);
519 if (!flush_map(mpp, vecs)) {
520 condlog(2, "%s: removed map after"
521 " removing all paths",
527 * Not an error, continue
531 if (setup_map(mpp)) {
532 condlog(0, "%s: failed to setup map for"
533 " removal of path %s", mpp->alias,
540 mpp->action = ACT_RELOAD;
541 if (domap(mpp) <= 0) {
542 condlog(0, "%s: failed in domap for "
543 "removal of path %s",
544 mpp->alias, devname);
548 * update our state from kernel
550 if (setup_multipath(vecs, mpp)) {
555 condlog(2, "%s: path removed from map %s",
556 devname, mpp->alias);
561 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
562 vector_del_slot(vecs->pathvec, i);
569 remove_map_and_stop_waiter(mpp, vecs, 1);
574 map_discovery (struct vectors * vecs)
576 struct multipath * mpp;
579 if (dm_get_maps(vecs->mpvec))
582 vector_foreach_slot (vecs->mpvec, mpp, i)
583 if (setup_multipath(vecs, mpp))
590 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
592 struct vectors * vecs;
597 vecs = (struct vectors *)trigger_data;
599 pthread_cleanup_push(cleanup_lock, &vecs->lock);
602 r = parse_cmd(str, reply, len, vecs);
605 *reply = STRDUP("fail\n");
606 *len = strlen(*reply) + 1;
609 else if (!r && *len == 0) {
610 *reply = STRDUP("ok\n");
611 *len = strlen(*reply) + 1;
614 /* else if (r < 0) leave *reply alone */
616 lock_cleanup_pop(vecs->lock);
621 uev_discard(char * devpath)
627 * keep only block devices, discard partitions
629 tmp = strstr(devpath, "/block/");
631 condlog(4, "no /block/ in '%s'", devpath);
634 if (sscanf(tmp, "/block/%10s", a) != 1 ||
635 sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
636 condlog(4, "discard event on %s", devpath);
643 uev_trigger (struct uevent * uev, void * trigger_data)
646 struct sysfs_device *sysdev;
647 struct vectors * vecs;
649 vecs = (struct vectors *)trigger_data;
651 if (uev_discard(uev->devpath))
654 sysdev = sysfs_device_get(uev->devpath);
662 * Add events are ignored here as the tables
663 * are not fully initialised then.
665 if (!strncmp(sysdev->kernel, "dm-", 3)) {
666 if (!strncmp(uev->action, "change", 6)) {
667 r = uev_add_map(sysdev, vecs);
670 if (!strncmp(uev->action, "remove", 6)) {
671 r = uev_remove_map(sysdev, vecs);
674 if (!strncmp(uev->action, "umount", 6)) {
675 r = uev_umount_map(sysdev, vecs);
682 * path add/remove event
684 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
688 if (!strncmp(uev->action, "add", 3)) {
689 r = uev_add_path(sysdev, vecs);
692 if (!strncmp(uev->action, "remove", 6)) {
693 r = uev_remove_path(sysdev, vecs);
703 ueventloop (void * ap)
705 block_signal(SIGUSR1, NULL);
706 block_signal(SIGHUP, NULL);
708 if (uevent_listen(&uev_trigger, ap))
709 fprintf(stderr, "error starting uevent listener");
715 uxlsnrloop (void * ap)
717 block_signal(SIGUSR1, NULL);
718 block_signal(SIGHUP, NULL);
723 set_handler_callback(LIST+PATHS, cli_list_paths);
724 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
725 set_handler_callback(LIST+MAPS, cli_list_maps);
726 set_handler_callback(LIST+STATUS, cli_list_status);
727 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
728 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
729 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
730 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
731 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
732 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
733 set_handler_callback(LIST+CONFIG, cli_list_config);
734 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
735 set_handler_callback(LIST+DEVICES, cli_list_devices);
736 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
737 set_handler_callback(ADD+PATH, cli_add_path);
738 set_handler_callback(DEL+PATH, cli_del_path);
739 set_handler_callback(ADD+MAP, cli_add_map);
740 set_handler_callback(DEL+MAP, cli_del_map);
741 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
742 set_handler_callback(RECONFIGURE, cli_reconfigure);
743 set_handler_callback(SUSPEND+MAP, cli_suspend);
744 set_handler_callback(RESUME+MAP, cli_resume);
745 set_handler_callback(RESIZE+MAP, cli_resize);
746 set_handler_callback(REINSTATE+PATH, cli_reinstate);
747 set_handler_callback(FAIL+PATH, cli_fail);
748 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
749 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
750 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
751 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
752 set_handler_callback(QUIT, cli_quit);
754 uxsock_listen(&uxsock_trigger, ap);
760 exit_daemon (int status)
763 fprintf(stderr, "bad exit status. see daemon.log\n");
765 condlog(3, "unlink pidfile");
766 unlink(DEFAULT_PIDFILE);
768 pthread_mutex_lock(&exit_mutex);
769 pthread_cond_signal(&exit_cond);
770 pthread_mutex_unlock(&exit_mutex);
776 fail_path (struct path * pp, int del_active)
781 condlog(2, "checker failed path %s in map %s",
782 pp->dev_t, pp->mpp->alias);
784 dm_fail_path(pp->mpp->alias, pp->dev_t);
786 update_queue_mode_del_path(pp->mpp);
790 * caller must have locked the path list before calling that function
793 reinstate_path (struct path * pp, int add_active)
798 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
799 condlog(0, "%s: reinstate failed", pp->dev_t);
801 condlog(2, "%s: reinstated", pp->dev_t);
803 update_queue_mode_add_path(pp->mpp);
808 enable_group(struct path * pp)
810 struct pathgroup * pgp;
813 * if path is added through uev_add_path, pgindex can be unset.
814 * next update_strings() will set it, upon map reload event.
816 * we can safely return here, because upon map reload, all
817 * PG will be enabled.
819 if (!pp->mpp->pg || !pp->pgindex)
822 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
824 if (pgp->status == PGSTATE_DISABLED) {
825 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
826 dm_enablegroup(pp->mpp->alias, pp->pgindex);
831 mpvec_garbage_collector (struct vectors * vecs)
833 struct multipath * mpp;
839 vector_foreach_slot (vecs->mpvec, mpp, i) {
840 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
841 condlog(2, "%s: remove dead map", mpp->alias);
842 remove_map_and_stop_waiter(mpp, vecs, 1);
849 defered_failback_tick (vector mpvec)
851 struct multipath * mpp;
854 vector_foreach_slot (mpvec, mpp, i) {
856 * defered failback getting sooner
858 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
859 mpp->failback_tick--;
861 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
862 switch_pathgroup(mpp);
868 retry_count_tick(vector mpvec)
870 struct multipath *mpp;
873 vector_foreach_slot (mpvec, mpp, i) {
874 if (mpp->retry_tick) {
875 mpp->stat_total_queueing_time++;
876 condlog(4, "%s: Retrying.. No active path", mpp->alias);
877 if(--mpp->retry_tick == 0) {
878 dm_queue_if_no_path(mpp->alias, 0);
879 condlog(2, "%s: Disable queueing", mpp->alias);
886 check_path (struct vectors * vecs, struct path * pp)
893 if (pp->tick && --pp->tick)
894 return; /* don't check this path yet */
897 * provision a next check soonest,
898 * in case we exit abnormaly from here
900 pp->tick = conf->checkint;
902 if (!checker_selected(&pp->checker)) {
903 pathinfo(pp, conf->hwtable, DI_SYSFS);
906 if (!checker_selected(&pp->checker)) {
907 condlog(0, "%s: checker is not set", pp->dev);
911 * Set checker in async mode.
912 * Honored only by checker implementing the said mode.
914 checker_set_async(&pp->checker);
916 if (path_offline(pp))
917 newstate = PATH_DOWN;
919 newstate = checker_check(&pp->checker);
922 condlog(2, "%s: unusable path", pp->dev);
923 pathinfo(pp, conf->hwtable, 0);
927 * Async IO in flight. Keep the previous path state
928 * and reschedule as soon as possible
930 if (newstate == PATH_PENDING) {
934 if (newstate != pp->state) {
935 int oldstate = pp->state;
936 pp->state = newstate;
937 LOG_MSG(1, checker_message(&pp->checker));
940 * upon state change, reset the checkint
941 * to the shortest delay
943 pp->checkint = conf->checkint;
945 if (newstate == PATH_DOWN || newstate == PATH_SHAKY ||
946 update_multipath_strings(pp->mpp, vecs->pathvec)) {
948 * proactively fail path in the DM
950 if (oldstate == PATH_UP ||
951 oldstate == PATH_GHOST)
957 * cancel scheduled failback
959 pp->mpp->failback_tick = 0;
961 pp->mpp->stat_path_failures++;
966 * reinstate this path
968 if (oldstate != PATH_UP &&
969 oldstate != PATH_GHOST)
970 reinstate_path(pp, 1);
972 reinstate_path(pp, 0);
975 * schedule [defered] failback
977 if (pp->mpp->pgfailback > 0)
978 pp->mpp->failback_tick =
979 pp->mpp->pgfailback + 1;
980 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE &&
981 need_switch_pathgroup(pp->mpp, 1))
982 switch_pathgroup(pp->mpp);
985 * if at least one path is up in a group, and
986 * the group is disabled, re-enable it
988 if (newstate == PATH_UP)
991 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
992 LOG_MSG(4, checker_message(&pp->checker));
994 * double the next check delay.
995 * max at conf->max_checkint
997 if (pp->checkint < (conf->max_checkint / 2))
998 pp->checkint = 2 * pp->checkint;
1000 pp->checkint = conf->max_checkint;
1002 pp->tick = pp->checkint;
1003 condlog(4, "%s: delay next check %is",
1004 pp->dev_t, pp->tick);
1006 else if (newstate == PATH_DOWN)
1007 LOG_MSG(2, checker_message(&pp->checker));
1009 pp->state = newstate;
1012 * path prio refreshing
1014 condlog(4, "path prio refresh");
1015 pathinfo(pp, conf->hwtable, DI_PRIO);
1018 * pathgroup failback policy
1020 if (need_switch_pathgroup(pp->mpp, 0)) {
1021 if (pp->mpp->pgfailback > 0 &&
1022 pp->mpp->failback_tick <= 0)
1023 pp->mpp->failback_tick =
1024 pp->mpp->pgfailback + 1;
1025 else if (pp->mpp->pgfailback ==
1026 -FAILBACK_IMMEDIATE)
1027 switch_pathgroup(pp->mpp);
1032 checkerloop (void *ap)
1034 struct vectors *vecs;
1040 mlockall(MCL_CURRENT | MCL_FUTURE);
1041 vecs = (struct vectors *)ap;
1042 condlog(2, "path checkers start up");
1045 * init the path check interval
1047 vector_foreach_slot (vecs->pathvec, pp, i) {
1048 pp->checkint = conf->checkint;
1052 block_signal(SIGHUP, &old);
1053 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1057 if (vecs->pathvec) {
1058 vector_foreach_slot (vecs->pathvec, pp, i) {
1059 check_path(vecs, pp);
1063 defered_failback_tick(vecs->mpvec);
1064 retry_count_tick(vecs->mpvec);
1069 condlog(4, "map garbage collection");
1070 mpvec_garbage_collector(vecs);
1074 lock_cleanup_pop(vecs->lock);
1075 pthread_sigmask(SIG_SETMASK, &old, NULL);
1082 configure (struct vectors * vecs, int start_waiters)
1084 struct multipath * mpp;
1089 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1092 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1095 if (!(mpvec = vector_alloc()))
1099 * probe for current path (from sysfs) and map (from dm) sets
1101 path_discovery(vecs->pathvec, conf, DI_ALL);
1103 vector_foreach_slot (vecs->pathvec, pp, i){
1104 if (filter_path(conf, pp) > 0){
1105 vector_del_slot(vecs->pathvec, i);
1110 pp->checkint = conf->checkint;
1112 if (map_discovery(vecs))
1116 * create new set of maps & push changed ones into dm
1118 if (coalesce_paths(vecs, mpvec, NULL, 0))
1122 * may need to remove some maps which are no longer relevant
1123 * e.g., due to blacklist changes in conf file
1125 if (coalesce_maps(vecs, mpvec))
1130 sync_maps_state(mpvec);
1133 * purge dm of old maps
1138 * save new set of maps formed by considering current path state
1140 vector_free(vecs->mpvec);
1141 vecs->mpvec = mpvec;
1144 * start dm event waiter threads for these new maps
1146 vector_foreach_slot(vecs->mpvec, mpp, i) {
1147 if (setup_multipath(vecs, mpp))
1150 if (start_waiter_thread(mpp, vecs))
1157 reconfigure (struct vectors * vecs)
1159 struct config * old = conf;
1162 * free old map and path vectors ... they use old conf state
1164 if (VECTOR_SIZE(vecs->mpvec))
1165 remove_maps_and_stop_waiters(vecs);
1167 if (VECTOR_SIZE(vecs->pathvec))
1168 free_pathvec(vecs->pathvec, FREE_PATHS);
1170 vecs->pathvec = NULL;
1173 if (load_config(DEFAULT_CONFIGFILE))
1176 conf->verbosity = old->verbosity;
1178 if (!conf->checkint) {
1179 conf->checkint = DEFAULT_CHECKINT;
1180 conf->max_checkint = MAX_CHECKINT(conf->checkint);
1187 static struct vectors *
1190 struct vectors * vecs;
1192 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1198 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1200 if (!vecs->lock.mutex)
1203 pthread_mutex_init(vecs->lock.mutex, NULL);
1204 vecs->lock.depth = 0;
1210 condlog(0, "failed to init paths");
1215 signal_set(int signo, void (*func) (int))
1218 struct sigaction sig;
1219 struct sigaction osig;
1221 sig.sa_handler = func;
1222 sigemptyset(&sig.sa_mask);
1225 r = sigaction(signo, &sig, &osig);
1230 return (osig.sa_handler);
1236 condlog(2, "reconfigure (SIGHUP)");
1240 unlock(gvecs->lock);
1243 dbg_free_final(NULL);
1256 condlog(3, "SIGUSR1 received");
1262 signal_set(SIGHUP, sighup);
1263 signal_set(SIGUSR1, sigusr1);
1264 signal_set(SIGINT, sigend);
1265 signal_set(SIGTERM, sigend);
1266 signal(SIGPIPE, SIG_IGN);
1273 static struct sched_param sched_param = {
1274 .sched_priority = 99
1277 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1280 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1285 set_oom_adj (int val)
1289 fp = fopen("/proc/self/oom_adj", "w");
1294 fprintf(fp, "%i", val);
1299 setup_thread_attr(pthread_attr_t *attr, size_t stacksize, int detached)
1301 if (pthread_attr_init(attr)) {
1302 fprintf(stderr, "can't initialize thread attr: %s\n",
1306 if (stacksize < PTHREAD_STACK_MIN)
1307 stacksize = PTHREAD_STACK_MIN;
1309 if (pthread_attr_setstacksize(attr, stacksize)) {
1310 fprintf(stderr, "can't set thread stack size to %lu: %s\n",
1311 (unsigned long)stacksize, strerror(errno));
1314 if (detached && pthread_attr_setdetachstate(attr,
1315 PTHREAD_CREATE_DETACHED)) {
1316 fprintf(stderr, "can't set thread to detached: %s\n",
1323 child (void * param)
1325 pthread_t check_thr, uevent_thr, uxlsnr_thr;
1326 pthread_attr_t log_attr, misc_attr;
1327 struct vectors * vecs;
1329 mlockall(MCL_CURRENT | MCL_FUTURE);
1331 setup_thread_attr(&misc_attr, 64 * 1024, 1);
1332 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1335 setup_thread_attr(&log_attr, 64 * 1024, 0);
1336 log_thread_start(&log_attr);
1337 pthread_attr_destroy(&log_attr);
1340 condlog(2, "--------start up--------");
1341 condlog(2, "read " DEFAULT_CONFIGFILE);
1343 if (load_config(DEFAULT_CONFIGFILE))
1346 if (init_checkers()) {
1347 condlog(0, "failed to initialize checkers");
1351 condlog(0, "failed to initialize prioritizers");
1355 setlogmask(LOG_UPTO(conf->verbosity + 3));
1358 * fill the voids left in the config file
1360 if (!conf->checkint) {
1361 conf->checkint = DEFAULT_CHECKINT;
1362 conf->max_checkint = MAX_CHECKINT(conf->checkint);
1365 if (conf->max_fds) {
1366 struct rlimit fd_limit;
1367 if (conf->max_fds > 0) {
1368 fd_limit.rlim_cur = conf->max_fds;
1369 fd_limit.rlim_max = conf->max_fds;
1372 fd_limit.rlim_cur = RLIM_INFINITY;
1373 fd_limit.rlim_max = RLIM_INFINITY;
1375 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0)
1376 condlog(0, "can't set open fds limit to %d : %s\n",
1377 conf->max_fds, strerror(errno));
1380 if (pidfile_create(DEFAULT_PIDFILE, getpid())) {
1389 vecs = gvecs = init_vecs();
1394 if (sysfs_init(conf->sysfs_dir, FILE_NAME_SIZE)) {
1395 condlog(0, "can not find sysfs mount point");
1400 * fetch and configure both paths and multipaths
1402 if (configure(vecs, 1)) {
1403 condlog(0, "failure during configuration");
1409 pthread_create(&check_thr, &misc_attr, checkerloop, vecs);
1410 pthread_create(&uevent_thr, &misc_attr, ueventloop, vecs);
1411 pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs);
1412 pthread_attr_destroy(&misc_attr);
1414 pthread_cond_wait(&exit_cond, &exit_mutex);
1419 block_signal(SIGHUP, NULL);
1421 remove_maps_and_stop_waiters(vecs);
1422 free_pathvec(vecs->pathvec, FREE_PATHS);
1424 pthread_cancel(check_thr);
1425 pthread_cancel(uevent_thr);
1426 pthread_cancel(uxlsnr_thr);
1432 free_handlers(handlers);
1437 /* Now all the waitevent threads will start rushing in. */
1438 while (vecs->lock.depth > 0) {
1439 sleep (1); /* This is weak. */
1440 condlog(3,"Have %d wait event checkers threads to de-alloc, waiting..\n", vecs->lock.depth);
1442 pthread_mutex_destroy(vecs->lock.mutex);
1443 FREE(vecs->lock.mutex);
1444 vecs->lock.depth = 0;
1445 vecs->lock.mutex = NULL;
1449 condlog(2, "--------shut down-------");
1460 * Freeing config must be done after condlog() and dm_lib_exit(),
1461 * because logging functions like dlog() and dm_write_log()
1462 * reference the config.
1468 dbg_free_final(NULL);
1480 if( (pid = fork()) < 0){
1481 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1489 if ( (pid = fork()) < 0)
1490 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1494 in_fd = open("/dev/null", O_RDONLY);
1496 fprintf(stderr, "cannot open /dev/null for input : %s\n",
1500 out_fd = open("/dev/console", O_WRONLY);
1502 fprintf(stderr, "cannot open /dev/console for output : %s\n",
1507 close(STDIN_FILENO);
1509 close(STDOUT_FILENO);
1511 close(STDERR_FILENO);
1517 fprintf(stderr, "cannot chdir to '/', continuing\n");
1523 main (int argc, char *argv[])
1525 extern char *optarg;
1533 if (getuid() != 0) {
1534 fprintf(stderr, "need to be root\n");
1538 /* make sure we don't lock any path */
1540 umask(umask(077) | 022);
1542 conf = alloc_config();
1547 while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1551 //debug=1; /* ### comment me out ### */
1554 if (sizeof(optarg) > sizeof(char *) ||
1555 !isdigit(optarg[0]))
1558 conf->verbosity = atoi(optarg);
1567 if (optind < argc) {
1572 while (optind < argc) {
1573 if (strchr(argv[optind], ' '))
1574 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1576 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1579 c += snprintf(c, s + CMDSIZE - c, "\n");
1597 return (child(NULL));