2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
36 #include <blacklist.h>
37 #include <structs_vec.h>
39 #include <devmapper.h>
42 #include <discovery.h>
46 #include <switchgroup.h>
48 #include <configure.h>
56 #include "cli_handlers.h"
60 #define FILE_NAME_SIZE 256
63 #define LOG_MSG(a,b) \
64 if (strlen(b)) condlog(a, "%s: %s", pp->dev, b);
66 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
67 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
72 * global copy of vecs for use in sig handlers
74 struct vectors * gvecs;
77 need_switch_pathgroup (struct multipath * mpp, int refresh)
79 struct pathgroup * pgp;
83 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
87 * Refresh path priority values
90 vector_foreach_slot (mpp->pg, pgp, i)
91 vector_foreach_slot (pgp->paths, pp, j)
92 pathinfo(pp, conf->hwtable, DI_PRIO);
94 mpp->bestpg = select_path_group(mpp);
96 if (mpp->bestpg != mpp->nextpg)
103 switch_pathgroup (struct multipath * mpp)
105 mpp->stat_switchgroup++;
106 dm_switchgroup(mpp->alias, mpp->bestpg);
107 condlog(2, "%s: switch to path group #%i",
108 mpp->alias, mpp->bestpg);
112 coalesce_maps(struct vectors *vecs, vector nmpv)
114 struct multipath * ompp;
115 vector ompv = vecs->mpvec;
119 vector_foreach_slot (ompv, ompp, i) {
120 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
122 * remove all current maps not allowed by the
123 * current configuration
125 if (dm_flush_map(ompp->alias)) {
126 condlog(0, "%s: unable to flush devmap",
129 * may be just because the device is open
131 if (!vector_alloc_slot(nmpv))
134 vector_set_slot(nmpv, ompp);
135 setup_multipath(vecs, ompp);
137 if ((j = find_slot(ompv, (void *)ompp)) != -1)
138 vector_del_slot(ompv, j);
144 condlog(2, "%s devmap removed", ompp->alias);
152 sync_map_state(struct multipath *mpp)
154 struct pathgroup *pgp;
161 vector_foreach_slot (mpp->pg, pgp, i){
162 vector_foreach_slot (pgp->paths, pp, j){
163 if (pp->state == PATH_UNCHECKED ||
164 pp->state == PATH_WILD)
166 if ((pp->dmstate == PSTATE_FAILED ||
167 pp->dmstate == PSTATE_UNDEF) &&
168 (pp->state == PATH_UP || pp->state == PATH_GHOST))
169 dm_reinstate_path(mpp->alias, pp->dev_t);
170 else if ((pp->dmstate == PSTATE_ACTIVE ||
171 pp->dmstate == PSTATE_UNDEF) &&
172 (pp->state == PATH_DOWN ||
173 pp->state == PATH_SHAKY))
174 dm_fail_path(mpp->alias, pp->dev_t);
180 sync_maps_state(vector mpvec)
183 struct multipath *mpp;
185 vector_foreach_slot (mpvec, mpp, i)
190 flush_map(struct multipath * mpp, struct vectors * vecs)
193 * clear references to this map before flushing so we can ignore
194 * the spurious uevent we may generate with the dm_flush_map call below
196 if (dm_flush_map(mpp->alias)) {
198 * May not really be an error -- if the map was already flushed
199 * from the device mapper by dmsetup(8) for instance.
201 condlog(0, "%s: can't flush", mpp->alias);
206 condlog(2, "%s: devmap removed", mpp->alias);
209 orphan_paths(vecs->pathvec, mpp);
210 remove_map_and_stop_waiter(mpp, vecs, 1);
216 uev_add_map (struct sysfs_device * dev, struct vectors * vecs)
218 condlog(2, "%s: add map (uevent)", dev->kernel);
219 return ev_add_map(dev, vecs);
223 ev_add_map (struct sysfs_device * dev, struct vectors * vecs)
229 struct multipath * mpp;
233 dev_t = sysfs_attr_get_value(dev->devpath, "dev");
235 if (!dev_t || sscanf(dev_t, "%d:%d", &major, &minor) != 2)
238 alias = dm_mapname(major, minor);
243 map_present = dm_map_present(alias);
245 if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
246 condlog(4, "%s: not a multipath map", alias);
251 mpp = find_mp_by_alias(vecs->mpvec, alias);
255 * Not really an error -- we generate our own uevent
256 * if we create a multipath mapped device as a result
259 condlog(0, "%s: devmap already registered",
266 * now we can register the map
268 if (map_present && (mpp = add_map_without_path(vecs, minor, alias))) {
270 condlog(2, "%s: devmap %s added", alias, dev->kernel);
273 refwwid = get_refwwid(dev->kernel, DEV_DEVMAP, vecs->pathvec);
276 r = coalesce_paths(vecs, NULL, refwwid, 0);
281 condlog(2, "%s: devmap %s added", alias, dev->kernel);
283 condlog(0, "%s: uev_add_map %s failed", alias, dev->kernel);
291 uev_remove_map (struct sysfs_device * dev, struct vectors * vecs)
293 condlog(2, "%s: remove map (uevent)", dev->kernel);
294 return ev_remove_map(dev->kernel, vecs);
298 ev_remove_map (char * devname, struct vectors * vecs)
300 struct multipath * mpp;
302 mpp = find_mp_by_str(vecs->mpvec, devname);
305 condlog(2, "%s: devmap not registered, can't remove",
309 flush_map(mpp, vecs);
315 uev_umount_map (struct sysfs_device * dev, struct vectors * vecs)
317 struct multipath * mpp;
319 condlog(2, "%s: umount map (uevent)", dev->kernel);
321 mpp = find_mp_by_str(vecs->mpvec, dev->kernel);
326 update_mpp_paths(mpp, vecs->pathvec);
327 verify_paths(mpp, vecs, NULL);
329 if (!VECTOR_SIZE(mpp->paths))
330 flush_map(mpp, vecs);
336 uev_add_path (struct sysfs_device * dev, struct vectors * vecs)
338 condlog(2, "%s: add path (uevent)", dev->kernel);
339 return (ev_add_path(dev->kernel, vecs) != 1)? 0 : 1;
350 ev_add_path (char * devname, struct vectors * vecs)
352 struct multipath * mpp;
354 char empty_buff[WWID_SIZE] = {0};
356 pp = find_path_by_dev(vecs->pathvec, devname);
359 condlog(0, "%s: spurious uevent, path already in pathvec",
366 * get path vital state
368 if (!(pp = store_pathinfo(vecs->pathvec, conf->hwtable,
370 condlog(0, "%s: failed to store path info", devname);
373 pp->checkint = conf->checkint;
377 * need path UID to go any further
379 if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
380 condlog(0, "%s: failed to get path uid", devname);
381 return 1; /* leave path added to pathvec */
383 if (filter_path(conf, pp) > 0){
384 int i = find_slot(vecs->pathvec, (void *)pp);
386 vector_del_slot(vecs->pathvec, i);
390 mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
393 if (adopt_paths(vecs->pathvec, mpp))
394 return 1; /* leave path added to pathvec */
396 verify_paths(mpp, vecs, NULL);
397 mpp->flush_on_last_del = FLUSH_UNDEF;
398 mpp->action = ACT_RELOAD;
401 if ((mpp = add_map_with_path(vecs, pp, 1)))
402 mpp->action = ACT_CREATE;
404 return 1; /* leave path added to pathvec */
408 * push the map to the device-mapper
410 if (setup_map(mpp)) {
411 condlog(0, "%s: failed to setup map for addition of new "
412 "path %s", mpp->alias, devname);
416 * reload the map for the multipath mapped device
418 if (domap(mpp) <= 0) {
419 condlog(0, "%s: failed in domap for addition of new "
420 "path %s", mpp->alias, devname);
422 * deal with asynchronous uevents :((
424 if (mpp->action == ACT_RELOAD) {
425 condlog(0, "%s: uev_add_path sleep", mpp->alias);
427 update_mpp_paths(mpp, vecs->pathvec);
436 * update our state from kernel regardless of create or reload
438 if (setup_multipath(vecs, mpp))
443 if (mpp->action == ACT_CREATE &&
444 start_waiter_thread(mpp, vecs))
447 condlog(2, "%s path added to devmap %s", devname, mpp->alias);
451 remove_map(mpp, vecs, 1);
456 uev_remove_path (struct sysfs_device * dev, struct vectors * vecs)
460 condlog(2, "%s: remove path (uevent)", dev->kernel);
461 retval = ev_remove_path(dev->kernel, vecs);
463 sysfs_device_put(dev);
469 ev_remove_path (char * devname, struct vectors * vecs)
471 struct multipath * mpp;
475 pp = find_path_by_dev(vecs->pathvec, devname);
478 condlog(0, "%s: spurious uevent, path not in pathvec", devname);
483 * avoid referring to the map of an orphaned path
485 if ((mpp = pp->mpp)) {
487 * transform the mp->pg vector of vectors of paths
488 * into a mp->params string to feed the device-mapper
490 if (update_mpp_paths(mpp, vecs->pathvec)) {
491 condlog(0, "%s: failed to update paths",
495 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
496 vector_del_slot(mpp->paths, i);
499 * remove the map IFF removing the last path
501 if (VECTOR_SIZE(mpp->paths) == 0) {
502 char alias[WWID_SIZE];
505 * flush_map will fail if the device is open
507 strncpy(alias, mpp->alias, WWID_SIZE);
508 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
509 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
511 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
512 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
513 dm_queue_if_no_path(mpp->alias, 0);
515 if (!flush_map(mpp, vecs)) {
516 condlog(2, "%s: removed map after"
517 " removing all paths",
523 * Not an error, continue
527 if (setup_map(mpp)) {
528 condlog(0, "%s: failed to setup map for"
529 " removal of path %s", mpp->alias,
536 mpp->action = ACT_RELOAD;
537 if (domap(mpp) <= 0) {
538 condlog(0, "%s: failed in domap for "
539 "removal of path %s",
540 mpp->alias, devname);
544 * update our state from kernel
546 if (setup_multipath(vecs, mpp)) {
551 condlog(2, "%s: path removed from map %s",
552 devname, mpp->alias);
557 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
558 vector_del_slot(vecs->pathvec, i);
565 remove_map_and_stop_waiter(mpp, vecs, 1);
570 map_discovery (struct vectors * vecs)
572 struct multipath * mpp;
575 if (dm_get_maps(vecs->mpvec))
578 vector_foreach_slot (vecs->mpvec, mpp, i)
579 if (setup_multipath(vecs, mpp))
586 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
588 struct vectors * vecs;
593 vecs = (struct vectors *)trigger_data;
595 pthread_cleanup_push(cleanup_lock, &vecs->lock);
598 r = parse_cmd(str, reply, len, vecs);
601 *reply = STRDUP("fail\n");
602 *len = strlen(*reply) + 1;
605 else if (!r && *len == 0) {
606 *reply = STRDUP("ok\n");
607 *len = strlen(*reply) + 1;
610 /* else if (r < 0) leave *reply alone */
612 lock_cleanup_pop(vecs->lock);
617 uev_discard(char * devpath)
623 * keep only block devices, discard partitions
625 tmp = strstr(devpath, "/block/");
627 condlog(4, "no /block/ in '%s'", devpath);
630 if (sscanf(tmp, "/block/%10s", a) != 1 ||
631 sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
632 condlog(4, "discard event on %s", devpath);
639 uev_trigger (struct uevent * uev, void * trigger_data)
642 struct sysfs_device *sysdev;
643 struct vectors * vecs;
645 vecs = (struct vectors *)trigger_data;
647 if (uev_discard(uev->devpath))
650 sysdev = sysfs_device_get(uev->devpath);
658 * Add events are ignored here as the tables
659 * are not fully initialised then.
661 if (!strncmp(sysdev->kernel, "dm-", 3)) {
662 if (!strncmp(uev->action, "change", 6)) {
663 r = uev_add_map(sysdev, vecs);
666 if (!strncmp(uev->action, "remove", 6)) {
667 r = uev_remove_map(sysdev, vecs);
670 if (!strncmp(uev->action, "umount", 6)) {
671 r = uev_umount_map(sysdev, vecs);
678 * path add/remove event
680 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
684 if (!strncmp(uev->action, "add", 3)) {
685 r = uev_add_path(sysdev, vecs);
688 if (!strncmp(uev->action, "remove", 6)) {
689 r = uev_remove_path(sysdev, vecs);
699 ueventloop (void * ap)
701 block_signal(SIGUSR1, NULL);
702 block_signal(SIGHUP, NULL);
704 if (uevent_listen(&uev_trigger, ap))
705 fprintf(stderr, "error starting uevent listener");
711 uxlsnrloop (void * ap)
713 block_signal(SIGUSR1, NULL);
714 block_signal(SIGHUP, NULL);
719 set_handler_callback(LIST+PATHS, cli_list_paths);
720 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
721 set_handler_callback(LIST+MAPS, cli_list_maps);
722 set_handler_callback(LIST+STATUS, cli_list_status);
723 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
724 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
725 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
726 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
727 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
728 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
729 set_handler_callback(LIST+CONFIG, cli_list_config);
730 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
731 set_handler_callback(LIST+DEVICES, cli_list_devices);
732 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
733 set_handler_callback(ADD+PATH, cli_add_path);
734 set_handler_callback(DEL+PATH, cli_del_path);
735 set_handler_callback(ADD+MAP, cli_add_map);
736 set_handler_callback(DEL+MAP, cli_del_map);
737 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
738 set_handler_callback(RECONFIGURE, cli_reconfigure);
739 set_handler_callback(SUSPEND+MAP, cli_suspend);
740 set_handler_callback(RESUME+MAP, cli_resume);
741 set_handler_callback(RESIZE+MAP, cli_resize);
742 set_handler_callback(REINSTATE+PATH, cli_reinstate);
743 set_handler_callback(FAIL+PATH, cli_fail);
744 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
745 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
746 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
747 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
748 set_handler_callback(QUIT, cli_quit);
750 uxsock_listen(&uxsock_trigger, ap);
756 exit_daemon (int status)
759 fprintf(stderr, "bad exit status. see daemon.log\n");
761 condlog(3, "unlink pidfile");
762 unlink(DEFAULT_PIDFILE);
764 pthread_mutex_lock(&exit_mutex);
765 pthread_cond_signal(&exit_cond);
766 pthread_mutex_unlock(&exit_mutex);
772 fail_path (struct path * pp, int del_active)
777 condlog(2, "checker failed path %s in map %s",
778 pp->dev_t, pp->mpp->alias);
780 dm_fail_path(pp->mpp->alias, pp->dev_t);
782 update_queue_mode_del_path(pp->mpp);
786 * caller must have locked the path list before calling that function
789 reinstate_path (struct path * pp, int add_active)
794 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
795 condlog(0, "%s: reinstate failed", pp->dev_t);
797 condlog(2, "%s: reinstated", pp->dev_t);
799 update_queue_mode_add_path(pp->mpp);
804 enable_group(struct path * pp)
806 struct pathgroup * pgp;
809 * if path is added through uev_add_path, pgindex can be unset.
810 * next update_strings() will set it, upon map reload event.
812 * we can safely return here, because upon map reload, all
813 * PG will be enabled.
815 if (!pp->mpp->pg || !pp->pgindex)
818 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
820 if (pgp->status == PGSTATE_DISABLED) {
821 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
822 dm_enablegroup(pp->mpp->alias, pp->pgindex);
827 mpvec_garbage_collector (struct vectors * vecs)
829 struct multipath * mpp;
835 vector_foreach_slot (vecs->mpvec, mpp, i) {
836 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
837 condlog(2, "%s: remove dead map", mpp->alias);
838 remove_map_and_stop_waiter(mpp, vecs, 1);
845 defered_failback_tick (vector mpvec)
847 struct multipath * mpp;
850 vector_foreach_slot (mpvec, mpp, i) {
852 * defered failback getting sooner
854 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
855 mpp->failback_tick--;
857 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
858 switch_pathgroup(mpp);
864 retry_count_tick(vector mpvec)
866 struct multipath *mpp;
869 vector_foreach_slot (mpvec, mpp, i) {
870 if (mpp->retry_tick) {
871 mpp->stat_total_queueing_time++;
872 condlog(4, "%s: Retrying.. No active path", mpp->alias);
873 if(--mpp->retry_tick == 0) {
874 dm_queue_if_no_path(mpp->alias, 0);
875 condlog(2, "%s: Disable queueing", mpp->alias);
882 check_path (struct vectors * vecs, struct path * pp)
889 if (pp->tick && --pp->tick)
890 return; /* don't check this path yet */
893 * provision a next check soonest,
894 * in case we exit abnormaly from here
896 pp->tick = conf->checkint;
898 if (!checker_selected(&pp->checker)) {
899 pathinfo(pp, conf->hwtable, DI_SYSFS);
902 if (!checker_selected(&pp->checker)) {
903 condlog(0, "%s: checker is not set", pp->dev);
907 * Set checker in async mode.
908 * Honored only by checker implementing the said mode.
910 checker_set_async(&pp->checker);
912 if (path_offline(pp))
913 newstate = PATH_DOWN;
915 newstate = checker_check(&pp->checker);
918 condlog(2, "%s: unusable path", pp->dev);
919 pathinfo(pp, conf->hwtable, 0);
923 * Async IO in flight. Keep the previous path state
924 * and reschedule as soon as possible
926 if (newstate == PATH_PENDING) {
930 if (newstate != pp->state) {
931 int oldstate = pp->state;
932 pp->state = newstate;
933 LOG_MSG(1, checker_message(&pp->checker));
936 * upon state change, reset the checkint
937 * to the shortest delay
939 pp->checkint = conf->checkint;
941 if (newstate == PATH_DOWN || newstate == PATH_SHAKY ||
942 update_multipath_strings(pp->mpp, vecs->pathvec)) {
944 * proactively fail path in the DM
946 if (oldstate == PATH_UP ||
947 oldstate == PATH_GHOST)
953 * cancel scheduled failback
955 pp->mpp->failback_tick = 0;
957 pp->mpp->stat_path_failures++;
962 * reinstate this path
964 if (oldstate != PATH_UP &&
965 oldstate != PATH_GHOST)
966 reinstate_path(pp, 1);
968 reinstate_path(pp, 0);
971 * schedule [defered] failback
973 if (pp->mpp->pgfailback > 0)
974 pp->mpp->failback_tick =
975 pp->mpp->pgfailback + 1;
976 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE &&
977 need_switch_pathgroup(pp->mpp, 1))
978 switch_pathgroup(pp->mpp);
981 * if at least one path is up in a group, and
982 * the group is disabled, re-enable it
984 if (newstate == PATH_UP)
987 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
988 LOG_MSG(4, checker_message(&pp->checker));
990 * double the next check delay.
991 * max at conf->max_checkint
993 if (pp->checkint < (conf->max_checkint / 2))
994 pp->checkint = 2 * pp->checkint;
996 pp->checkint = conf->max_checkint;
998 pp->tick = pp->checkint;
999 condlog(4, "%s: delay next check %is",
1000 pp->dev_t, pp->tick);
1002 else if (newstate == PATH_DOWN)
1003 LOG_MSG(2, checker_message(&pp->checker));
1005 pp->state = newstate;
1008 * path prio refreshing
1010 condlog(4, "path prio refresh");
1011 pathinfo(pp, conf->hwtable, DI_PRIO);
1014 * pathgroup failback policy
1016 if (need_switch_pathgroup(pp->mpp, 0)) {
1017 if (pp->mpp->pgfailback > 0 &&
1018 pp->mpp->failback_tick <= 0)
1019 pp->mpp->failback_tick =
1020 pp->mpp->pgfailback + 1;
1021 else if (pp->mpp->pgfailback ==
1022 -FAILBACK_IMMEDIATE)
1023 switch_pathgroup(pp->mpp);
1028 checkerloop (void *ap)
1030 struct vectors *vecs;
1036 mlockall(MCL_CURRENT | MCL_FUTURE);
1037 vecs = (struct vectors *)ap;
1038 condlog(2, "path checkers start up");
1041 * init the path check interval
1043 vector_foreach_slot (vecs->pathvec, pp, i) {
1044 pp->checkint = conf->checkint;
1048 block_signal(SIGHUP, &old);
1049 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1053 if (vecs->pathvec) {
1054 vector_foreach_slot (vecs->pathvec, pp, i) {
1055 check_path(vecs, pp);
1059 defered_failback_tick(vecs->mpvec);
1060 retry_count_tick(vecs->mpvec);
1065 condlog(4, "map garbage collection");
1066 mpvec_garbage_collector(vecs);
1070 lock_cleanup_pop(vecs->lock);
1071 pthread_sigmask(SIG_SETMASK, &old, NULL);
1078 configure (struct vectors * vecs, int start_waiters)
1080 struct multipath * mpp;
1085 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1088 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1091 if (!(mpvec = vector_alloc()))
1095 * probe for current path (from sysfs) and map (from dm) sets
1097 path_discovery(vecs->pathvec, conf, DI_ALL);
1099 vector_foreach_slot (vecs->pathvec, pp, i){
1100 if (filter_path(conf, pp) > 0){
1101 vector_del_slot(vecs->pathvec, i);
1106 pp->checkint = conf->checkint;
1108 if (map_discovery(vecs))
1112 * create new set of maps & push changed ones into dm
1114 if (coalesce_paths(vecs, mpvec, NULL, 0))
1118 * may need to remove some maps which are no longer relevant
1119 * e.g., due to blacklist changes in conf file
1121 if (coalesce_maps(vecs, mpvec))
1126 sync_maps_state(mpvec);
1129 * purge dm of old maps
1134 * save new set of maps formed by considering current path state
1136 vector_free(vecs->mpvec);
1137 vecs->mpvec = mpvec;
1140 * start dm event waiter threads for these new maps
1142 vector_foreach_slot(vecs->mpvec, mpp, i) {
1143 if (setup_multipath(vecs, mpp))
1146 if (start_waiter_thread(mpp, vecs))
1153 reconfigure (struct vectors * vecs)
1155 struct config * old = conf;
1158 * free old map and path vectors ... they use old conf state
1160 if (VECTOR_SIZE(vecs->mpvec))
1161 remove_maps_and_stop_waiters(vecs);
1163 if (VECTOR_SIZE(vecs->pathvec))
1164 free_pathvec(vecs->pathvec, FREE_PATHS);
1166 vecs->pathvec = NULL;
1169 if (load_config(DEFAULT_CONFIGFILE))
1172 conf->verbosity = old->verbosity;
1174 if (!conf->checkint) {
1175 conf->checkint = DEFAULT_CHECKINT;
1176 conf->max_checkint = MAX_CHECKINT(conf->checkint);
1183 static struct vectors *
1186 struct vectors * vecs;
1188 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1194 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1196 if (!vecs->lock.mutex)
1199 pthread_mutex_init(vecs->lock.mutex, NULL);
1200 vecs->lock.depth = 0;
1206 condlog(0, "failed to init paths");
1211 signal_set(int signo, void (*func) (int))
1214 struct sigaction sig;
1215 struct sigaction osig;
1217 sig.sa_handler = func;
1218 sigemptyset(&sig.sa_mask);
1221 r = sigaction(signo, &sig, &osig);
1226 return (osig.sa_handler);
1232 condlog(2, "reconfigure (SIGHUP)");
1236 unlock(gvecs->lock);
1239 dbg_free_final(NULL);
1252 condlog(3, "SIGUSR1 received");
1258 signal_set(SIGHUP, sighup);
1259 signal_set(SIGUSR1, sigusr1);
1260 signal_set(SIGINT, sigend);
1261 signal_set(SIGTERM, sigend);
1262 signal(SIGPIPE, SIG_IGN);
1269 static struct sched_param sched_param = {
1270 .sched_priority = 99
1273 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1276 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1281 set_oom_adj (int val)
1285 fp = fopen("/proc/self/oom_adj", "w");
1290 fprintf(fp, "%i", val);
1295 setup_thread_attr(pthread_attr_t *attr, size_t stacksize, int detached)
1297 if (pthread_attr_init(attr)) {
1298 fprintf(stderr, "can't initialize thread attr: %s\n",
1302 if (stacksize < PTHREAD_STACK_MIN)
1303 stacksize = PTHREAD_STACK_MIN;
1305 if (pthread_attr_setstacksize(attr, stacksize)) {
1306 fprintf(stderr, "can't set thread stack size to %lu: %s\n",
1307 (unsigned long)stacksize, strerror(errno));
1310 if (detached && pthread_attr_setdetachstate(attr,
1311 PTHREAD_CREATE_DETACHED)) {
1312 fprintf(stderr, "can't set thread to detached: %s\n",
1319 child (void * param)
1321 pthread_t check_thr, uevent_thr, uxlsnr_thr;
1322 pthread_attr_t log_attr, misc_attr;
1323 struct vectors * vecs;
1325 mlockall(MCL_CURRENT | MCL_FUTURE);
1327 setup_thread_attr(&misc_attr, 64 * 1024, 1);
1328 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1331 setup_thread_attr(&log_attr, 64 * 1024, 0);
1332 log_thread_start(&log_attr);
1333 pthread_attr_destroy(&log_attr);
1336 condlog(2, "--------start up--------");
1337 condlog(2, "read " DEFAULT_CONFIGFILE);
1339 if (load_config(DEFAULT_CONFIGFILE))
1342 if (init_checkers()) {
1343 condlog(0, "failed to initialize checkers");
1347 condlog(0, "failed to initialize prioritizers");
1351 setlogmask(LOG_UPTO(conf->verbosity + 3));
1354 * fill the voids left in the config file
1356 if (!conf->checkint) {
1357 conf->checkint = DEFAULT_CHECKINT;
1358 conf->max_checkint = MAX_CHECKINT(conf->checkint);
1361 if (conf->max_fds) {
1362 struct rlimit fd_limit;
1363 if (conf->max_fds > 0) {
1364 fd_limit.rlim_cur = conf->max_fds;
1365 fd_limit.rlim_max = conf->max_fds;
1368 fd_limit.rlim_cur = RLIM_INFINITY;
1369 fd_limit.rlim_max = RLIM_INFINITY;
1371 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0)
1372 condlog(0, "can't set open fds limit to %d : %s\n",
1373 conf->max_fds, strerror(errno));
1376 if (pidfile_create(DEFAULT_PIDFILE, getpid())) {
1385 vecs = gvecs = init_vecs();
1390 if (sysfs_init(conf->sysfs_dir, FILE_NAME_SIZE)) {
1391 condlog(0, "can not find sysfs mount point");
1396 * fetch and configure both paths and multipaths
1398 if (configure(vecs, 1)) {
1399 condlog(0, "failure during configuration");
1405 pthread_create(&check_thr, &misc_attr, checkerloop, vecs);
1406 pthread_create(&uevent_thr, &misc_attr, ueventloop, vecs);
1407 pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs);
1408 pthread_attr_destroy(&misc_attr);
1410 pthread_cond_wait(&exit_cond, &exit_mutex);
1415 block_signal(SIGHUP, NULL);
1417 remove_maps_and_stop_waiters(vecs);
1418 free_pathvec(vecs->pathvec, FREE_PATHS);
1420 pthread_cancel(check_thr);
1421 pthread_cancel(uevent_thr);
1422 pthread_cancel(uxlsnr_thr);
1428 free_handlers(handlers);
1433 /* Now all the waitevent threads will start rushing in. */
1434 while (vecs->lock.depth > 0) {
1435 sleep (1); /* This is weak. */
1436 condlog(3,"Have %d wait event checkers threads to de-alloc, waiting..\n", vecs->lock.depth);
1438 pthread_mutex_destroy(vecs->lock.mutex);
1439 FREE(vecs->lock.mutex);
1440 vecs->lock.depth = 0;
1441 vecs->lock.mutex = NULL;
1445 condlog(2, "--------shut down-------");
1456 * Freeing config must be done after condlog() and dm_lib_exit(),
1457 * because logging functions like dlog() and dm_write_log()
1458 * reference the config.
1464 dbg_free_final(NULL);
1476 if( (pid = fork()) < 0){
1477 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1485 if ( (pid = fork()) < 0)
1486 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1490 in_fd = open("/dev/null", O_RDONLY);
1492 fprintf(stderr, "cannot open /dev/null for input : %s\n",
1496 out_fd = open("/dev/console", O_WRONLY);
1498 fprintf(stderr, "cannot open /dev/console for output : %s\n",
1503 close(STDIN_FILENO);
1505 close(STDOUT_FILENO);
1507 close(STDERR_FILENO);
1513 fprintf(stderr, "cannot chdir to '/', continuing\n");
1519 main (int argc, char *argv[])
1521 extern char *optarg;
1529 if (getuid() != 0) {
1530 fprintf(stderr, "need to be root\n");
1534 /* make sure we don't lock any path */
1536 umask(umask(077) | 022);
1538 conf = alloc_config();
1543 while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1547 //debug=1; /* ### comment me out ### */
1550 if (sizeof(optarg) > sizeof(char *) ||
1551 !isdigit(optarg[0]))
1554 conf->verbosity = atoi(optarg);
1563 if (optind < argc) {
1568 while (optind < argc) {
1569 if (strchr(argv[optind], ' '))
1570 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1572 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1575 c += snprintf(c, s + CMDSIZE - c, "\n");
1593 return (child(NULL));