2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
35 #include <blacklist.h>
36 #include <structs_vec.h>
38 #include <devmapper.h>
41 #include <discovery.h>
45 #include <switchgroup.h>
47 #include <configure.h>
55 #include "cli_handlers.h"
59 #define FILE_NAME_SIZE 256
62 #define LOG_MSG(a,b) \
63 if (strlen(b)) condlog(a, "%s: %s", pp->dev, b);
65 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
66 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
71 * global copy of vecs for use in sig handlers
73 struct vectors * gvecs;
76 need_switch_pathgroup (struct multipath * mpp, int refresh)
78 struct pathgroup * pgp;
82 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
86 * Refresh path priority values
89 vector_foreach_slot (mpp->pg, pgp, i)
90 vector_foreach_slot (pgp->paths, pp, j)
91 pathinfo(pp, conf->hwtable, DI_PRIO);
93 mpp->bestpg = select_path_group(mpp);
95 if (mpp->bestpg != mpp->nextpg)
102 switch_pathgroup (struct multipath * mpp)
104 mpp->stat_switchgroup++;
105 dm_switchgroup(mpp->alias, mpp->bestpg);
106 condlog(2, "%s: switch to path group #%i",
107 mpp->alias, mpp->bestpg);
111 coalesce_maps(struct vectors *vecs, vector nmpv)
113 struct multipath * ompp;
114 vector ompv = vecs->mpvec;
118 vector_foreach_slot (ompv, ompp, i) {
119 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
121 * remove all current maps not allowed by the
122 * current configuration
124 if (dm_flush_map(ompp->alias, DEFAULT_TARGET)) {
125 condlog(0, "%s: unable to flush devmap",
128 * may be just because the device is open
130 if (!vector_alloc_slot(nmpv))
133 vector_set_slot(nmpv, ompp);
134 setup_multipath(vecs, ompp);
136 if ((j = find_slot(ompv, (void *)ompp)) != -1)
137 vector_del_slot(ompv, j);
143 condlog(3, "%s devmap removed", ompp->alias);
151 sync_map_state(struct multipath *mpp)
153 struct pathgroup *pgp;
157 vector_foreach_slot (mpp->pg, pgp, i){
158 vector_foreach_slot (pgp->paths, pp, j){
159 if (pp->state <= PATH_UNCHECKED)
161 if ((pp->dmstate == PSTATE_FAILED ||
162 pp->dmstate == PSTATE_UNDEF) &&
163 (pp->state == PATH_UP || pp->state == PATH_GHOST))
164 dm_reinstate_path(mpp->alias, pp->dev_t);
165 else if ((pp->dmstate == PSTATE_ACTIVE ||
166 pp->dmstate == PSTATE_UNDEF) &&
167 (pp->state == PATH_DOWN ||
168 pp->state == PATH_SHAKY))
169 dm_fail_path(mpp->alias, pp->dev_t);
175 sync_maps_state(vector mpvec)
178 struct multipath *mpp;
180 vector_foreach_slot (mpvec, mpp, i)
185 flush_map(struct multipath * mpp, struct vectors * vecs)
188 * clear references to this map before flushing so we can ignore
189 * the spurious uevent we may generate with the dm_flush_map call below
191 if (dm_flush_map(mpp->alias, DEFAULT_TARGET)) {
193 * May not really be an error -- if the map was already flushed
194 * from the device mapper by dmsetup(8) for instance.
196 condlog(0, "%s: can't flush", mpp->alias);
201 condlog(3, "%s: devmap removed", mpp->alias);
204 orphan_paths(vecs->pathvec, mpp);
205 remove_map_and_stop_waiter(mpp, vecs, 1);
211 uev_add_map (struct sysfs_device * dev, struct vectors * vecs)
213 condlog(2, "%s: add map (uevent)", dev->kernel);
214 return ev_add_map(dev, vecs);
218 ev_add_map (struct sysfs_device * dev, struct vectors * vecs)
224 struct multipath * mpp;
228 dev_t = sysfs_attr_get_value(dev->devpath, "dev");
230 if (!dev_t || sscanf(dev_t, "%d:%d", &major, &minor) != 2)
233 alias = dm_mapname(major, minor);
238 map_present = dm_map_present(alias);
240 if (map_present && dm_type(alias, DEFAULT_TARGET) <= 0) {
241 condlog(4, "%s: not a multipath map", alias);
245 mpp = find_mp_by_alias(vecs->mpvec, alias);
249 * Not really an error -- we generate our own uevent
250 * if we create a multipath mapped device as a result
253 condlog(0, "%s: devmap already registered",
259 * now we can register the map
261 if (map_present && (mpp = add_map_without_path(vecs, minor, alias))) {
263 condlog(3, "%s: devmap %s added", alias, dev->kernel);
266 refwwid = get_refwwid(dev->kernel, DEV_DEVMAP, vecs->pathvec);
269 r = coalesce_paths(vecs, NULL, refwwid);
274 condlog(3, "%s: devmap %s added", alias, dev->kernel);
276 condlog(0, "%s: uev_add_map %s failed", alias, dev->kernel);
283 uev_remove_map (struct sysfs_device * dev, struct vectors * vecs)
285 condlog(2, "%s: remove map (uevent)", dev->kernel);
286 return ev_remove_map(dev->kernel, vecs);
290 ev_remove_map (char * devname, struct vectors * vecs)
292 struct multipath * mpp;
294 mpp = find_mp_by_str(vecs->mpvec, devname);
297 condlog(3, "%s: devmap not registered, can't remove",
301 flush_map(mpp, vecs);
307 uev_umount_map (struct sysfs_device * dev, struct vectors * vecs)
309 struct multipath * mpp;
311 condlog(2, "%s: umount map (uevent)", dev->kernel);
313 mpp = find_mp_by_str(vecs->mpvec, dev->kernel);
318 update_mpp_paths(mpp, vecs->pathvec);
319 verify_paths(mpp, vecs, NULL);
321 if (!VECTOR_SIZE(mpp->paths))
322 flush_map(mpp, vecs);
328 uev_add_path (struct sysfs_device * dev, struct vectors * vecs)
330 condlog(2, "%s: add path (uevent)", dev->kernel);
331 return (ev_add_path(dev->kernel, vecs) != 1)? 0 : 1;
342 ev_add_path (char * devname, struct vectors * vecs)
344 struct multipath * mpp;
346 char empty_buff[WWID_SIZE] = {0};
348 pp = find_path_by_dev(vecs->pathvec, devname);
351 condlog(0, "%s: spurious uevent, path already in pathvec",
358 * get path vital state
360 if (!(pp = store_pathinfo(vecs->pathvec, conf->hwtable,
362 condlog(0, "%s: failed to store path info", devname);
365 pp->checkint = conf->checkint;
369 * need path UID to go any further
371 if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
372 condlog(0, "%s: failed to get path uid", devname);
373 return 1; /* leave path added to pathvec */
375 if (filter_path(conf, pp) > 0){
376 int i = find_slot(vecs->pathvec, (void *)pp);
378 vector_del_slot(vecs->pathvec, i);
382 mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
385 if (adopt_paths(vecs->pathvec, mpp))
386 return 1; /* leave path added to pathvec */
388 verify_paths(mpp, vecs, NULL);
389 mpp->action = ACT_RELOAD;
392 if ((mpp = add_map_with_path(vecs, pp, 1)))
393 mpp->action = ACT_CREATE;
395 return 1; /* leave path added to pathvec */
399 * push the map to the device-mapper
401 if (setup_map(mpp)) {
402 condlog(0, "%s: failed to setup map for addition of new "
403 "path %s", mpp->alias, devname);
407 * reload the map for the multipath mapped device
409 if (domap(mpp) <= 0) {
410 condlog(0, "%s: failed in domap for addition of new "
411 "path %s", mpp->alias, devname);
413 * deal with asynchronous uevents :((
415 if (mpp->action == ACT_RELOAD) {
416 condlog(0, "%s: uev_add_path sleep", mpp->alias);
418 update_mpp_paths(mpp, vecs->pathvec);
427 * update our state from kernel regardless of create or reload
429 if (setup_multipath(vecs, mpp))
434 if (mpp->action == ACT_CREATE &&
435 start_waiter_thread(mpp, vecs))
438 condlog(3, "%s path added to devmap %s", devname, mpp->alias);
442 remove_map(mpp, vecs, 1);
447 uev_remove_path (struct sysfs_device * dev, struct vectors * vecs)
451 condlog(2, "%s: remove path (uevent)", dev->kernel);
452 retval = ev_remove_path(dev->kernel, vecs);
454 sysfs_device_put(dev);
460 ev_remove_path (char * devname, struct vectors * vecs)
462 struct multipath * mpp;
467 pp = find_path_by_dev(vecs->pathvec, devname);
470 condlog(0, "%s: spurious uevent, path not in pathvec", devname);
475 * avoid referring to the map of an orphanned path
477 if ((mpp = pp->mpp)) {
480 * remove the map IFF removing the last path
482 if (pathcount(mpp, PATH_WILD) > 1) {
483 vector rpvec = vector_alloc();
486 * transform the mp->pg vector of vectors of paths
487 * into a mp->params string to feed the device-mapper
489 update_mpp_paths(mpp, vecs->pathvec);
490 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
491 vector_del_slot(mpp->paths, i);
493 if (VECTOR_SIZE(mpp->paths) == 0) {
494 char alias[WWID_SIZE];
497 * flush_map will fail if the device is open
499 strncpy(alias, mpp->alias, WWID_SIZE);
500 if (flush_map(mpp, vecs))
503 condlog(3, "%s: removed map after removing"
504 " multiple paths", alias);
507 if (setup_map(mpp)) {
508 condlog(0, "%s: failed to setup map for"
509 " removal of path %s", mpp->alias, devname);
510 free_pathvec(rpvec, KEEP_PATHS);
516 mpp->action = ACT_RELOAD;
517 if (domap(mpp) <= 0) {
518 condlog(0, "%s: failed in domap for "
519 "removal of path %s",
520 mpp->alias, devname);
522 * Delete path from pathvec so that
523 * update_mpp_paths wont find it later
524 * when/if another path is removed.
526 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
527 vector_del_slot(vecs->pathvec, i);
532 * update our state from kernel
534 if (setup_multipath(vecs, mpp)) {
535 free_pathvec(rpvec, KEEP_PATHS);
540 condlog(3, "%s: path removed from map %s",
541 devname, mpp->alias);
543 free_pathvec(rpvec, KEEP_PATHS);
546 char alias[WWID_SIZE];
549 * flush_map will fail if the device is open
551 strncpy(alias, mpp->alias, WWID_SIZE);
552 if (flush_map(mpp, vecs))
555 condlog(3, "%s: removed map", alias);
560 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
561 vector_del_slot(vecs->pathvec, i);
568 remove_map_and_stop_waiter(mpp, vecs, 1);
573 map_discovery (struct vectors * vecs)
575 struct multipath * mpp;
578 if (dm_get_maps(vecs->mpvec, "multipath"))
581 vector_foreach_slot (vecs->mpvec, mpp, i)
582 if (setup_multipath(vecs, mpp))
589 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
591 struct vectors * vecs;
596 vecs = (struct vectors *)trigger_data;
598 pthread_cleanup_push(cleanup_lock, vecs->lock);
601 r = parse_cmd(str, reply, len, vecs);
604 *reply = STRDUP("fail\n");
605 *len = strlen(*reply) + 1;
608 else if (!r && *len == 0) {
609 *reply = STRDUP("ok\n");
610 *len = strlen(*reply) + 1;
613 /* else if (r < 0) leave *reply alone */
615 lock_cleanup_pop(vecs->lock);
620 uev_discard(char * devpath)
625 * keep only block devices, discard partitions
627 if (sscanf(devpath, "/block/%10s", a) != 1 ||
628 sscanf(devpath, "/block/%10[^/]/%10s", a, b) == 2) {
629 condlog(4, "discard event on %s", devpath);
636 uev_trigger (struct uevent * uev, void * trigger_data)
639 struct sysfs_device *sysdev;
640 struct vectors * vecs;
642 vecs = (struct vectors *)trigger_data;
644 if (uev_discard(uev->devpath))
647 sysdev = sysfs_device_get(uev->devpath);
655 * Add events are ignored here as the tables
656 * are not fully initialised then.
658 if (!strncmp(sysdev->kernel, "dm-", 3)) {
659 if (!strncmp(uev->action, "change", 6)) {
660 r = uev_add_map(sysdev, vecs);
663 if (!strncmp(uev->action, "remove", 6)) {
664 r = uev_remove_map(sysdev, vecs);
667 if (!strncmp(uev->action, "umount", 6)) {
668 r = uev_umount_map(sysdev, vecs);
675 * path add/remove event
677 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
681 if (!strncmp(uev->action, "add", 3)) {
682 r = uev_add_path(sysdev, vecs);
685 if (!strncmp(uev->action, "remove", 6)) {
686 r = uev_remove_path(sysdev, vecs);
696 ueventloop (void * ap)
698 if (uevent_listen(&uev_trigger, ap))
699 fprintf(stderr, "error starting uevent listener");
705 uxlsnrloop (void * ap)
710 set_handler_callback(LIST+PATHS, cli_list_paths);
711 set_handler_callback(LIST+MAPS, cli_list_maps);
712 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
713 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
714 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
715 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
716 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
717 set_handler_callback(LIST+CONFIG, cli_list_config);
718 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
719 set_handler_callback(LIST+DEVICES, cli_list_devices);
720 set_handler_callback(ADD+PATH, cli_add_path);
721 set_handler_callback(DEL+PATH, cli_del_path);
722 set_handler_callback(ADD+MAP, cli_add_map);
723 set_handler_callback(DEL+MAP, cli_del_map);
724 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
725 set_handler_callback(RECONFIGURE, cli_reconfigure);
726 set_handler_callback(SUSPEND+MAP, cli_suspend);
727 set_handler_callback(RESUME+MAP, cli_resume);
728 set_handler_callback(REINSTATE+PATH, cli_reinstate);
729 set_handler_callback(FAIL+PATH, cli_fail);
731 uxsock_listen(&uxsock_trigger, ap);
737 exit_daemon (int status)
740 fprintf(stderr, "bad exit status. see daemon.log\n");
742 condlog(3, "unlink pidfile");
743 unlink(DEFAULT_PIDFILE);
746 pthread_cond_signal(&exit_cond);
753 fail_path (struct path * pp, int del_active)
758 condlog(2, "checker failed path %s in map %s",
759 pp->dev_t, pp->mpp->alias);
761 dm_fail_path(pp->mpp->alias, pp->dev_t);
763 update_queue_mode_del_path(pp->mpp);
767 * caller must have locked the path list before calling that function
770 reinstate_path (struct path * pp, int add_active)
775 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
776 condlog(0, "%s: reinstate failed", pp->dev_t);
778 condlog(2, "%s: reinstated", pp->dev_t);
780 update_queue_mode_add_path(pp->mpp);
785 enable_group(struct path * pp)
787 struct pathgroup * pgp;
790 * if path is added through uev_add_path, pgindex can be unset.
791 * next update_strings() will set it, upon map reload event.
793 * we can safely return here, because upon map reload, all
794 * PG will be enabled.
796 if (!pp->mpp->pg || !pp->pgindex)
799 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
801 if (pgp->status == PGSTATE_DISABLED) {
802 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
803 dm_enablegroup(pp->mpp->alias, pp->pgindex);
808 mpvec_garbage_collector (struct vectors * vecs)
810 struct multipath * mpp;
813 vector_foreach_slot (vecs->mpvec, mpp, i) {
814 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
815 condlog(2, "%s: remove dead map", mpp->alias);
816 remove_map_and_stop_waiter(mpp, vecs, 1);
823 defered_failback_tick (vector mpvec)
825 struct multipath * mpp;
828 vector_foreach_slot (mpvec, mpp, i) {
830 * defered failback getting sooner
832 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
833 mpp->failback_tick--;
835 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
836 switch_pathgroup(mpp);
842 retry_count_tick(vector mpvec)
844 struct multipath *mpp;
847 vector_foreach_slot (mpvec, mpp, i) {
848 if (mpp->retry_tick) {
849 mpp->stat_total_queueing_time++;
850 condlog(4, "%s: Retrying.. No active path", mpp->alias);
851 if(--mpp->retry_tick == 0) {
852 dm_queue_if_no_path(mpp->alias, 0);
853 condlog(2, "%s: Disable queueing", mpp->alias);
860 check_path (struct vectors * vecs, struct path * pp)
867 if (pp->tick && --pp->tick)
868 return; /* don't check this path yet */
871 * provision a next check soonest,
872 * in case we exit abnormaly from here
874 pp->tick = conf->checkint;
876 if (!checker_selected(&pp->checker)) {
877 pathinfo(pp, conf->hwtable, DI_SYSFS);
880 if (!checker_selected(&pp->checker)) {
881 condlog(0, "%s: checker is not set", pp->dev);
885 * Set checker in async mode.
886 * Honored only by checker implementing the said mode.
888 checker_set_async(&pp->checker);
890 newstate = checker_check(&pp->checker);
893 condlog(2, "%s: unusable path", pp->dev);
894 pathinfo(pp, conf->hwtable, 0);
898 * Async IO in flight. Keep the previous path state
899 * and reschedule as soon as possible
901 if (newstate == PATH_PENDING) {
905 if (newstate != pp->state) {
906 int oldstate = pp->state;
907 pp->state = newstate;
908 LOG_MSG(1, checker_message(&pp->checker));
911 * upon state change, reset the checkint
912 * to the shortest delay
914 pp->checkint = conf->checkint;
916 if (newstate == PATH_DOWN || newstate == PATH_SHAKY ||
917 update_multipath_strings(pp->mpp, vecs->pathvec)) {
919 * proactively fail path in the DM
921 if (oldstate == PATH_UP ||
922 oldstate == PATH_GHOST)
928 * cancel scheduled failback
930 pp->mpp->failback_tick = 0;
932 pp->mpp->stat_path_failures++;
937 * reinstate this path
939 if (oldstate != PATH_UP &&
940 oldstate != PATH_GHOST)
941 reinstate_path(pp, 1);
943 reinstate_path(pp, 0);
946 * schedule [defered] failback
948 if (pp->mpp->pgfailback > 0)
949 pp->mpp->failback_tick =
950 pp->mpp->pgfailback + 1;
951 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE &&
952 need_switch_pathgroup(pp->mpp, 1))
953 switch_pathgroup(pp->mpp);
956 * if at least one path is up in a group, and
957 * the group is disabled, re-enable it
959 if (newstate == PATH_UP)
962 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
963 LOG_MSG(4, checker_message(&pp->checker));
965 * double the next check delay.
966 * max at conf->max_checkint
968 if (pp->checkint < (conf->max_checkint / 2))
969 pp->checkint = 2 * pp->checkint;
971 pp->checkint = conf->max_checkint;
973 pp->tick = pp->checkint;
974 condlog(4, "%s: delay next check %is",
975 pp->dev_t, pp->tick);
977 else if (newstate == PATH_DOWN)
978 LOG_MSG(2, checker_message(&pp->checker));
980 pp->state = newstate;
983 * path prio refreshing
985 condlog(4, "path prio refresh");
986 pathinfo(pp, conf->hwtable, DI_PRIO);
989 * pathgroup failback policy
991 if (need_switch_pathgroup(pp->mpp, 0)) {
992 if (pp->mpp->pgfailback > 0 &&
993 pp->mpp->failback_tick <= 0)
994 pp->mpp->failback_tick =
995 pp->mpp->pgfailback + 1;
996 else if (pp->mpp->pgfailback ==
998 switch_pathgroup(pp->mpp);
1003 checkerloop (void *ap)
1005 struct vectors *vecs;
1010 mlockall(MCL_CURRENT | MCL_FUTURE);
1011 vecs = (struct vectors *)ap;
1012 condlog(2, "path checkers start up");
1015 * init the path check interval
1017 vector_foreach_slot (vecs->pathvec, pp, i) {
1018 pp->checkint = conf->checkint;
1022 pthread_cleanup_push(cleanup_lock, vecs->lock);
1026 vector_foreach_slot (vecs->pathvec, pp, i) {
1027 check_path(vecs, pp);
1029 defered_failback_tick(vecs->mpvec);
1030 retry_count_tick(vecs->mpvec);
1035 condlog(4, "map garbage collection");
1036 mpvec_garbage_collector(vecs);
1040 lock_cleanup_pop(vecs->lock);
1047 configure (struct vectors * vecs, int start_waiters)
1049 struct multipath * mpp;
1054 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1057 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1060 if (!(mpvec = vector_alloc()))
1064 * probe for current path (from sysfs) and map (from dm) sets
1066 path_discovery(vecs->pathvec, conf, DI_ALL);
1068 vector_foreach_slot (vecs->pathvec, pp, i){
1069 if (filter_path(conf, pp) > 0){
1070 vector_del_slot(vecs->pathvec, i);
1075 pp->checkint = conf->checkint;
1077 if (map_discovery(vecs))
1081 * create new set of maps & push changed ones into dm
1083 if (coalesce_paths(vecs, mpvec, NULL))
1087 * may need to remove some maps which are no longer relevant
1088 * e.g., due to blacklist changes in conf file
1090 if (coalesce_maps(vecs, mpvec))
1095 sync_maps_state(mpvec);
1098 * purge dm of old maps
1103 * save new set of maps formed by considering current path state
1105 vector_free(vecs->mpvec);
1106 vecs->mpvec = mpvec;
1109 * start dm event waiter threads for these new maps
1111 vector_foreach_slot(vecs->mpvec, mpp, i) {
1112 if (setup_multipath(vecs, mpp))
1115 if (start_waiter_thread(mpp, vecs))
1122 reconfigure (struct vectors * vecs)
1124 struct config * old = conf;
1127 * free old map and path vectors ... they use old conf state
1129 if (VECTOR_SIZE(vecs->mpvec))
1130 remove_maps_and_stop_waiters(vecs);
1132 if (VECTOR_SIZE(vecs->pathvec))
1133 free_pathvec(vecs->pathvec, FREE_PATHS);
1135 vecs->pathvec = NULL;
1138 if (load_config(DEFAULT_CONFIGFILE))
1141 conf->verbosity = old->verbosity;
1143 if (!conf->checkint) {
1144 conf->checkint = DEFAULT_CHECKINT;
1145 conf->max_checkint = MAX_CHECKINT(conf->checkint);
1152 static struct vectors *
1155 struct vectors * vecs;
1157 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1163 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1168 pthread_mutex_init(vecs->lock, NULL);
1174 condlog(0, "failed to init paths");
1179 signal_set(int signo, void (*func) (int))
1182 struct sigaction sig;
1183 struct sigaction osig;
1185 sig.sa_handler = func;
1186 sigemptyset(&sig.sa_mask);
1189 r = sigaction(signo, &sig, &osig);
1194 return (osig.sa_handler);
1200 condlog(2, "reconfigure (SIGHUP)");
1204 unlock(gvecs->lock);
1207 dbg_free_final(NULL);
1220 condlog(3, "SIGUSR1 received");
1226 signal_set(SIGHUP, sighup);
1227 signal_set(SIGUSR1, sigusr1);
1228 signal_set(SIGINT, sigend);
1229 signal_set(SIGTERM, sigend);
1230 signal(SIGPIPE, SIG_IGN);
1237 static struct sched_param sched_param = {
1238 .sched_priority = 99
1241 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1244 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1249 set_oom_adj (int val)
1253 fp = fopen("/proc/self/oom_adj", "w");
1258 fprintf(fp, "%i", val);
1263 child (void * param)
1265 pthread_t check_thr, uevent_thr, uxlsnr_thr;
1266 pthread_attr_t attr;
1267 struct vectors * vecs;
1269 mlockall(MCL_CURRENT | MCL_FUTURE);
1274 condlog(2, "--------start up--------");
1275 condlog(2, "read " DEFAULT_CONFIGFILE);
1277 if (load_config(DEFAULT_CONFIGFILE))
1280 if (init_checkers()) {
1281 condlog(0, "failed to initialize checkers");
1285 condlog(0, "failed to initialize prioritizers");
1289 setlogmask(LOG_UPTO(conf->verbosity + 3));
1292 * fill the voids left in the config file
1294 if (!conf->checkint) {
1295 conf->checkint = DEFAULT_CHECKINT;
1296 conf->max_checkint = MAX_CHECKINT(conf->checkint);
1299 if (conf->max_fds) {
1300 struct rlimit fd_limit;
1301 if (conf->max_fds > 0) {
1302 fd_limit.rlim_cur = conf->max_fds;
1303 fd_limit.rlim_max = conf->max_fds;
1306 fd_limit.rlim_cur = RLIM_INFINITY;
1307 fd_limit.rlim_max = RLIM_INFINITY;
1309 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0)
1310 condlog(0, "can't set open fds limit to %d : %s\n",
1311 conf->max_fds, strerror(errno));
1314 if (pidfile_create(DEFAULT_PIDFILE, getpid())) {
1323 vecs = gvecs = init_vecs();
1328 if (sysfs_init(conf->sysfs_dir, FILE_NAME_SIZE)) {
1329 condlog(0, "can not find sysfs mount point");
1334 * fetch and configure both paths and multipaths
1336 if (configure(vecs, 1)) {
1337 condlog(0, "failure during configuration");
1344 pthread_attr_init(&attr);
1345 pthread_attr_setstacksize(&attr, 64 * 1024);
1346 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
1348 pthread_create(&check_thr, &attr, checkerloop, vecs);
1349 pthread_create(&uevent_thr, &attr, ueventloop, vecs);
1350 pthread_create(&uxlsnr_thr, &attr, uxlsnrloop, vecs);
1352 pthread_cond_wait(&exit_cond, &exit_mutex);
1358 remove_maps_and_stop_waiters(vecs);
1359 free_pathvec(vecs->pathvec, FREE_PATHS);
1361 pthread_cancel(check_thr);
1362 pthread_cancel(uevent_thr);
1363 pthread_cancel(uxlsnr_thr);
1369 free_handlers(handlers);
1374 pthread_mutex_destroy(vecs->lock);
1380 condlog(2, "--------shut down-------");
1389 * Freeing config must be done after condlog() and dm_lib_exit(),
1390 * because logging functions like dlog() and dm_write_log()
1391 * reference the config.
1397 dbg_free_final(NULL);
1409 if( (pid = fork()) < 0){
1410 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1418 if ( (pid = fork()) < 0)
1419 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1423 in_fd = open("/dev/null", O_RDONLY);
1425 fprintf(stderr, "cannot open /dev/null for input : %s\n",
1429 out_fd = open("/dev/console", O_WRONLY);
1431 fprintf(stderr, "cannot open /dev/console for output : %s\n",
1436 close(STDIN_FILENO);
1438 close(STDOUT_FILENO);
1440 close(STDERR_FILENO);
1451 main (int argc, char *argv[])
1453 extern char *optarg;
1461 if (getuid() != 0) {
1462 fprintf(stderr, "need to be root\n");
1466 /* make sure we don't lock any path */
1468 umask(umask(077) | 022);
1470 conf = alloc_config();
1475 while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1479 //debug=1; /* ### comment me out ### */
1482 if (sizeof(optarg) > sizeof(char *) ||
1483 !isdigit(optarg[0]))
1486 conf->verbosity = atoi(optarg);
1509 return (child(NULL));