3 #include <libdevmapper.h>
10 #include <sysfs/libsysfs.h>
11 #include <sysfs/dlist.h>
17 #include <path_state.h>
28 #include <blacklist.h>
33 #include <devmapper.h>
35 #include <discovery.h>
39 #include <switchgroup.h>
40 #include <path_state.h>
48 #include "cli_handlers.h"
50 #define FILE_NAME_SIZE 256
53 #define LOG_MSG(a,b) \
55 condlog(a, "%s: %s", pp->dev_t, b); \
56 memset(b, 0, MAX_CHECKER_MSG_SIZE); \
61 fprintf(stderr, "%s:%s(%i) lock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
64 fprintf(stderr, "%s:%s(%i) unlock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
65 pthread_mutex_unlock(a)
66 #define lock_cleanup_pop(a) \
67 fprintf(stderr, "%s:%s(%i) unlock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
68 pthread_cleanup_pop(1);
70 #define lock(a) pthread_mutex_lock(a)
71 #define unlock(a) pthread_mutex_unlock(a)
72 #define lock_cleanup_pop(a) pthread_cleanup_pop(1);
75 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
76 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
85 char mapname[WWID_SIZE];
89 static struct event_thread *
93 struct event_thread * wp;
95 wp = (struct event_thread *)MALLOC(sizeof(struct event_thread));
101 free_waiter (void * data)
103 struct event_thread * wp = (struct event_thread *)data;
106 dm_task_destroy(wp->dmt);
111 stop_waiter_thread (struct multipath * mpp, struct vectors * vecs)
113 struct event_thread * wp = (struct event_thread *)mpp->waiter;
117 condlog(3, "%s: no waiter thread", mpp->alias);
123 condlog(3, "%s: thread not started", mpp->alias);
126 condlog(2, "%s: stop event checker thread", wp->mapname);
127 pthread_kill(thread, SIGHUP);
131 cleanup_lock (void * data)
133 pthread_mutex_unlock((pthread_mutex_t *)data);
137 adopt_paths (struct vectors * vecs, struct multipath * mpp)
145 vector_foreach_slot (vecs->pathvec, pp, i) {
146 if (!strncmp(mpp->wwid, pp->wwid, WWID_SIZE)) {
147 condlog(4, "%s ownership set", pp->dev_t);
154 orphan_path (struct path * pp)
158 pp->dmstate = PSTATE_UNDEF;
159 pp->checker_context = NULL;
170 orphan_paths (struct vectors * vecs, struct multipath * mpp)
175 vector_foreach_slot (vecs->pathvec, pp, i) {
176 if (pp->mpp == mpp) {
177 condlog(4, "%s is orphaned", pp->dev_t);
184 update_multipath_table (struct multipath *mpp, vector pathvec)
189 if (dm_get_map(mpp->alias, &mpp->size, mpp->params))
192 if (disassemble_map(pathvec, mpp->params, mpp))
199 update_multipath_status (struct multipath *mpp)
204 if(dm_get_status(mpp->alias, mpp->status))
207 if (disassemble_status(mpp->status, mpp))
214 update_multipath_strings (struct multipath *mpp, vector pathvec)
218 mpp->selector = NULL;
223 mpp->features = NULL;
226 if (mpp->hwhandler) {
227 FREE(mpp->hwhandler);
228 mpp->hwhandler = NULL;
231 free_pgvec(mpp->pg, KEEP_PATHS);
234 if (update_multipath_table(mpp, pathvec))
237 if (update_multipath_status(mpp))
244 set_multipath_wwid (struct multipath * mpp)
249 dm_get_uuid(mpp->alias, mpp->wwid);
253 pathcount (struct multipath *mpp, int state)
255 struct pathgroup *pgp;
260 vector_foreach_slot (mpp->pg, pgp, i)
261 vector_foreach_slot (pgp->paths, pp, j)
262 if (pp->state == state)
268 * mpp->no_path_retry:
269 * -2 (QUEUE) : queue_if_no_path enabled, never turned off
270 * -1 (FAIL) : fail_if_no_path
271 * 0 (UNDEF) : nothing
272 * >0 : queue_if_no_path enabled, turned off after polling n times
275 update_queue_mode_del_path(struct multipath *mpp)
277 if (--mpp->nr_active == 0 && mpp->no_path_retry > 0) {
280 * meaning of +1: retry_tick may be decremented in
281 * checkerloop before starting retry.
283 mpp->retry_tick = mpp->no_path_retry * conf->checkint + 1;
284 condlog(1, "%s: Entering recovery mode: max_retries=%d",
285 mpp->alias, mpp->no_path_retry);
287 condlog(2, "%s: remaining active paths: %d", mpp->alias, mpp->nr_active);
291 update_queue_mode_add_path(struct multipath *mpp)
293 if (mpp->nr_active++ == 0 && mpp->no_path_retry > 0) {
294 /* come back to normal mode from retry mode */
296 dm_queue_if_no_path(mpp->alias, 1);
297 condlog(2, "%s: queue_if_no_path enabled", mpp->alias);
298 condlog(1, "%s: Recovered to normal mode", mpp->alias);
300 condlog(2, "%s: remaining active paths: %d", mpp->alias, mpp->nr_active);
304 set_no_path_retry(struct multipath *mpp)
307 mpp->nr_active = pathcount(mpp, PATH_UP);
308 select_no_path_retry(mpp);
310 switch (mpp->no_path_retry) {
311 case NO_PATH_RETRY_UNDEF:
313 case NO_PATH_RETRY_FAIL:
314 dm_queue_if_no_path(mpp->alias, 0);
316 case NO_PATH_RETRY_QUEUE:
317 dm_queue_if_no_path(mpp->alias, 1);
320 dm_queue_if_no_path(mpp->alias, 1);
321 if (mpp->nr_active == 0) {
322 /* Enter retry mode */
323 mpp->retry_tick = mpp->no_path_retry * conf->checkint;
324 condlog(1, "%s: Entering recovery mode: max_retries=%d",
325 mpp->alias, mpp->no_path_retry);
331 static struct hwentry *
332 extract_hwe_from_path(struct multipath * mpp)
335 struct pathgroup * pgp;
337 pgp = VECTOR_SLOT(mpp->pg, 0);
338 pp = VECTOR_SLOT(pgp->paths, 0);
344 remove_map (struct multipath * mpp, struct vectors * vecs)
348 stop_waiter_thread(mpp, vecs);
351 * clear references to this map
353 orphan_paths(vecs, mpp);
356 * purge the multipath vector
358 i = find_slot(vecs->mpvec, (void *)mpp);
359 vector_del_slot(vecs->mpvec, i);
364 free_multipath(mpp, KEEP_PATHS);
369 remove_maps (struct vectors * vecs)
372 struct multipath * mpp;
374 vector_foreach_slot (vecs->mpvec, mpp, i) {
375 remove_map(mpp, vecs);
379 vector_free(vecs->mpvec);
384 setup_multipath (struct vectors * vecs, struct multipath * mpp)
386 set_multipath_wwid(mpp);
387 mpp->mpe = find_mpe(mpp->wwid);
388 condlog(4, "discovered map %s", mpp->alias);
390 if (update_multipath_strings(mpp, vecs->pathvec))
393 adopt_paths(vecs, mpp);
394 select_pgfailback(mpp);
395 mpp->hwe = extract_hwe_from_path(mpp);
396 set_no_path_retry(mpp);
400 condlog(0, "%s: failed to setup multipath", mpp->alias);
401 remove_map(mpp, vecs);
406 need_switch_pathgroup (struct multipath * mpp, int refresh)
408 struct pathgroup * pgp;
412 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
416 * Refresh path priority values
419 vector_foreach_slot (mpp->pg, pgp, i)
420 vector_foreach_slot (pgp->paths, pp, j)
421 pathinfo(pp, conf->hwtable, DI_PRIO);
423 select_path_group(mpp); /* sets mpp->nextpg */
424 pgp = VECTOR_SLOT(mpp->pg, mpp->nextpg - 1);
426 if (pgp && pgp->status != PGSTATE_ACTIVE)
433 switch_pathgroup (struct multipath * mpp)
435 struct pathgroup * pgp;
437 pgp = VECTOR_SLOT(mpp->pg, mpp->nextpg - 1);
439 if (pgp && pgp->status != PGSTATE_ACTIVE) {
440 dm_switchgroup(mpp->alias, mpp->nextpg);
441 condlog(2, "%s: switch to path group #%i",
442 mpp->alias, mpp->nextpg);
447 update_multipath (struct vectors *vecs, char *mapname)
449 struct multipath *mpp;
450 struct pathgroup *pgp;
455 mpp = find_mp(vecs->mpvec, mapname);
460 free_pgvec(mpp->pg, KEEP_PATHS);
463 if (setup_multipath(vecs, mpp))
464 goto out; /* mpp freed in setup_multipath */
467 * compare checkers states with DM states
469 vector_foreach_slot (mpp->pg, pgp, i) {
470 vector_foreach_slot (pgp->paths, pp, j) {
471 if (pp->dmstate != PSTATE_FAILED)
474 if (pp->state != PATH_DOWN) {
475 condlog(2, "%s: mark as failed", pp->dev_t);
476 pp->state = PATH_DOWN;
477 update_queue_mode_del_path(mpp);
481 * schedule the next check earlier
483 if (pp->tick > conf->checkint)
484 pp->tick = conf->checkint;
491 condlog(0, "failed to update multipath");
496 static sigset_t unblock_sighup(void)
501 sigaddset(&set, SIGHUP);
502 pthread_sigmask(SIG_UNBLOCK, &set, &old);
507 * returns the reschedule delay
508 * negative means *stop*
511 waiteventloop (struct event_thread * waiter)
517 if (!waiter->event_nr)
518 waiter->event_nr = dm_geteventnr(waiter->mapname);
520 if (!(waiter->dmt = dm_task_create(DM_DEVICE_WAITEVENT)))
523 if (!dm_task_set_name(waiter->dmt, waiter->mapname)) {
524 dm_task_destroy(waiter->dmt);
528 if (waiter->event_nr && !dm_task_set_event_nr(waiter->dmt,
530 dm_task_destroy(waiter->dmt);
534 dm_task_no_open_count(waiter->dmt);
536 /* accept wait interruption */
537 set = unblock_sighup();
539 /* interruption spits messages */
543 r = dm_task_run(waiter->dmt);
545 /* wait is over : event or interrupt */
546 pthread_sigmask(SIG_SETMASK, &set, NULL);
549 if (!r) /* wait interrupted by signal */
552 dm_task_destroy(waiter->dmt);
560 condlog(3, "%s: devmap event #%i",
561 waiter->mapname, waiter->event_nr);
566 * 1) a table reload, which means our mpp structure is
567 * obsolete : refresh it through update_multipath()
568 * 2) a path failed by DM : mark as such through
570 * 3) map has gone away : stop the thread.
571 * 4) a path reinstate : nothing to do
572 * 5) a switch group : nothing to do
574 pthread_cleanup_push(cleanup_lock, waiter->vecs->lock);
575 lock(waiter->vecs->lock);
576 r = update_multipath(waiter->vecs, waiter->mapname);
577 lock_cleanup_pop(waiter->vecs->lock);
580 return -1; /* stop the thread */
582 event_nr = dm_geteventnr(waiter->mapname);
584 if (waiter->event_nr == event_nr)
585 return 1; /* upon problem reschedule 1s later */
587 waiter->event_nr = event_nr;
589 return -1; /* never reach there */
593 waitevent (void * et)
596 struct event_thread *waiter;
598 mlockall(MCL_CURRENT | MCL_FUTURE);
600 waiter = (struct event_thread *)et;
601 pthread_cleanup_push(free_waiter, et);
604 r = waiteventloop(waiter);
612 pthread_cleanup_pop(1);
617 start_waiter_thread (struct multipath * mpp, struct vectors * vecs)
620 struct event_thread * wp;
625 if (pthread_attr_init(&attr))
628 pthread_attr_setstacksize(&attr, 32 * 1024);
629 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
636 mpp->waiter = (void *)wp;
637 strncpy(wp->mapname, mpp->alias, WWID_SIZE);
640 if (pthread_create(&wp->thread, &attr, waitevent, wp)) {
641 condlog(0, "%s: cannot create event checker", wp->mapname);
644 condlog(2, "%s: event checker started", wp->mapname);
651 condlog(0, "failed to start waiter thread");
656 uev_add_map (char * devname, struct vectors * vecs)
659 char dev_t[BLK_DEV_SIZE];
661 struct multipath * mpp;
663 if (sysfs_get_dev(sysfs_path, devname, dev_t, BLK_DEV_SIZE))
666 if (sscanf(dev_t, "%d:%d", &major, &minor) != 2)
669 alias = dm_mapname(major, minor);
674 if (!dm_type(alias, DEFAULT_TARGET)) {
675 condlog(4, "%s: not a multipath map", alias);
680 mpp = find_mp(vecs->mpvec, alias);
684 * this should not happen,
685 * we missed a remove map event (not sent ?)
687 condlog(2, "%s: already registered", alias);
688 remove_map(mpp, vecs);
692 * now we can allocate
694 mpp = alloc_multipath();
702 if (setup_multipath(vecs, mpp))
703 return 1; /* mpp freed in setup_multipath */
705 if (!vector_alloc_slot(vecs->mpvec))
708 vector_set_slot(vecs->mpvec, mpp);
709 adopt_paths(vecs, mpp);
711 if (start_waiter_thread(mpp, vecs))
716 condlog(2, "%s: add devmap failed", mpp->alias);
717 remove_map(mpp, vecs);
722 uev_remove_map (char * devname, struct vectors * vecs)
725 struct multipath * mpp;
727 if (sscanf(devname, "dm-%d", &minor) != 1)
730 mpp = find_mp_by_minor(vecs->mpvec, minor);
733 condlog(3, "%s: devmap not registered, can't remove",
738 condlog(2, "remove %s devmap", mpp->alias);
739 remove_map(mpp, vecs);
745 uev_add_path (char * devname, struct vectors * vecs)
749 pp = find_path_by_dev(vecs->pathvec, devname);
752 condlog(3, "%s: already in pathvec");
755 pp = store_pathinfo(vecs->pathvec, conf->hwtable,
756 devname, DI_SYSFS | DI_WWID);
759 condlog(0, "%s: failed to store path info", devname);
763 condlog(2, "%s: path checker registered", devname);
764 pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
767 condlog(4, "%s: ownership set to %s",
768 pp->dev_t, pp->mpp->alias);
770 condlog(4, "%s: orphaned", pp->dev_t);
778 uev_remove_path (char * devname, struct vectors * vecs)
783 pp = find_path_by_dev(vecs->pathvec, devname);
786 condlog(3, "%s: not in pathvec");
790 if (pp->mpp && pp->state == PATH_UP)
791 update_queue_mode_del_path(pp->mpp);
793 condlog(2, "remove %s path checker", devname);
794 i = find_slot(vecs->pathvec, (void *)pp);
795 vector_del_slot(vecs->pathvec, i);
802 show_paths (char ** r, int * len, struct vectors * vecs)
808 struct path_layout pl;
810 get_path_layout(&pl, vecs->pathvec);
811 reply = MALLOC(MAX_REPLY_LEN);
818 if (VECTOR_SIZE(vecs->pathvec) > 0)
819 c += snprint_path_header(c, reply + MAX_REPLY_LEN - c,
820 PRINT_PATH_CHECKER, &pl);
822 vector_foreach_slot(vecs->pathvec, pp, i)
823 c += snprint_path(c, reply + MAX_REPLY_LEN - c,
824 PRINT_PATH_CHECKER, pp, &pl);
827 *len = (int)(c - reply + 1);
832 show_maps (char ** r, int *len, struct vectors * vecs)
835 struct multipath * mpp;
838 struct map_layout ml;
840 get_map_layout(&ml, vecs->mpvec);
841 reply = MALLOC(MAX_REPLY_LEN);
847 if (VECTOR_SIZE(vecs->mpvec) > 0)
848 c += snprint_map_header(c, reply + MAX_REPLY_LEN - c,
849 PRINT_MAP_FAILBACK, &ml);
851 vector_foreach_slot(vecs->mpvec, mpp, i)
852 c += snprint_map(c, reply + MAX_REPLY_LEN - c,
853 PRINT_MAP_FAILBACK, mpp, &ml);
856 *len = (int)(c - reply + 1);
861 dump_pathvec (char ** r, int * len, struct vectors * vecs)
868 *len = VECTOR_SIZE(vecs->pathvec) * sizeof(struct path);
869 reply = (char *)MALLOC(*len);
877 vector_foreach_slot (vecs->pathvec, pp, i) {
878 memcpy((void *)p, pp, sizeof(struct path));
879 p += sizeof(struct path);
882 /* return negative to hint caller not to add "ok" to the dump */
887 map_discovery (struct vectors * vecs)
890 struct multipath * mpp;
892 if (dm_get_maps(vecs->mpvec, "multipath"))
895 vector_foreach_slot (vecs->mpvec, mpp, i) {
896 if (setup_multipath(vecs, mpp))
898 mpp->minor = dm_get_minor(mpp->alias);
899 start_waiter_thread(mpp, vecs);
906 reconfigure (struct vectors * vecs)
908 struct config * old = conf;
909 struct multipath * mpp;
915 if (load_config(DEFAULT_CONFIGFILE)) {
917 condlog(2, "reconfigure failed, continue with old config");
920 conf->verbosity = old->verbosity;
923 vector_foreach_slot (vecs->mpvec, mpp, i) {
924 mpp->mpe = find_mpe(mpp->wwid);
925 mpp->hwe = extract_hwe_from_path(mpp);
926 adopt_paths(vecs, mpp);
927 set_no_path_retry(mpp);
929 vector_foreach_slot (vecs->pathvec, pp, i) {
934 condlog(2, "reconfigured");
939 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
941 struct vectors * vecs;
946 vecs = (struct vectors *)trigger_data;
948 pthread_cleanup_push(cleanup_lock, vecs->lock);
951 r = parse_cmd(str, reply, len, vecs);
954 *reply = STRDUP("fail\n");
955 *len = strlen(*reply) + 1;
958 else if (!r && *len == 0) {
959 *reply = STRDUP("ok\n");
960 *len = strlen(*reply) + 1;
963 /* else if (r < 0) leave *reply alone */
965 lock_cleanup_pop(vecs->lock);
970 uev_discard(char * devpath)
975 * keep only block devices, discard partitions
977 if (sscanf(devpath, "/block/%10s", a) != 1 ||
978 sscanf(devpath, "/block/%10[^/]/%10s", a, b) == 2) {
979 condlog(4, "discard event on %s", devpath);
986 uev_trigger (struct uevent * uev, void * trigger_data)
990 struct vectors * vecs;
992 vecs = (struct vectors *)trigger_data;
994 if (uev_discard(uev->devpath))
997 basename(uev->devpath, devname);
1001 * device map add/remove event
1003 if (!strncmp(devname, "dm-", 3)) {
1004 if (!strncmp(uev->action, "add", 3)) {
1005 r = uev_add_map(devname, vecs);
1009 if (!strncmp(uev->action, "remove", 6)) {
1010 r = uev_remove_map(devname, vecs);
1018 * path add/remove event
1020 if (blacklist(conf->blist, devname))
1023 if (!strncmp(uev->action, "add", 3)) {
1024 r = uev_add_path(devname, vecs);
1027 if (!strncmp(uev->action, "remove", 6)) {
1028 r = uev_remove_path(devname, vecs);
1038 ueventloop (void * ap)
1040 if (uevent_listen(&uev_trigger, ap))
1041 fprintf(stderr, "error starting uevent listener");
1047 uxlsnrloop (void * ap)
1052 if (alloc_handlers())
1055 add_handler(LIST+PATHS, cli_list_paths);
1056 add_handler(LIST+MAPS, cli_list_maps);
1057 add_handler(ADD+PATH, cli_add_path);
1058 add_handler(DEL+PATH, cli_del_path);
1059 add_handler(ADD+MAP, cli_add_map);
1060 add_handler(DEL+MAP, cli_del_map);
1061 add_handler(SWITCH+MAP+GROUP, cli_switch_group);
1062 add_handler(DUMP+PATHVEC, cli_dump_pathvec);
1063 add_handler(RECONFIGURE, cli_reconfigure);
1065 uxsock_listen(&uxsock_trigger, ap);
1071 exit_daemon (int status)
1074 fprintf(stderr, "bad exit status. see daemon.log\n");
1076 condlog(3, "unlink pidfile");
1077 unlink(DEFAULT_PIDFILE);
1080 pthread_cond_signal(&exit_cond);
1081 unlock(&exit_mutex);
1087 fail_path (struct path * pp)
1092 condlog(2, "checker failed path %s in map %s",
1093 pp->dev_t, pp->mpp->alias);
1095 dm_fail_path(pp->mpp->alias, pp->dev_t);
1096 update_queue_mode_del_path(pp->mpp);
1100 * caller must have locked the path list before calling that function
1103 reinstate_path (struct path * pp)
1108 if (dm_reinstate(pp->mpp->alias, pp->dev_t))
1109 condlog(0, "%s: reinstate failed", pp->dev_t);
1111 condlog(2, "%s: reinstated", pp->dev_t);
1112 update_queue_mode_add_path(pp->mpp);
1117 enable_group(struct path * pp)
1119 struct pathgroup * pgp;
1122 * if path is added through uev_add_path, pgindex can be unset.
1123 * next update_strings() will set it, upon map reload event.
1125 * we can safely return here, because upon map reload, all
1126 * PG will be enabled.
1131 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1133 if (pgp->status == PGSTATE_DISABLED) {
1134 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1135 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1140 mpvec_garbage_collector (struct vectors * vecs)
1142 struct multipath * mpp;
1145 vector_foreach_slot (vecs->mpvec, mpp, i) {
1146 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1147 condlog(2, "%s: remove dead map", mpp->alias);
1148 remove_map(mpp, vecs);
1155 defered_failback_tick (vector mpvec)
1157 struct multipath * mpp;
1160 vector_foreach_slot (mpvec, mpp, i) {
1162 * defered failback getting sooner
1164 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1165 mpp->failback_tick--;
1167 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1168 switch_pathgroup(mpp);
1174 retry_count_tick(vector mpvec)
1176 struct multipath *mpp;
1179 vector_foreach_slot (mpvec, mpp, i) {
1180 if (mpp->retry_tick) {
1181 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1182 if(--mpp->retry_tick == 0) {
1183 dm_queue_if_no_path(mpp->alias, 0);
1184 condlog(2, "%s: Disable queueing", mpp->alias);
1191 checkerloop (void *ap)
1193 struct vectors *vecs;
1197 char checker_msg[MAX_CHECKER_MSG_SIZE];
1199 mlockall(MCL_CURRENT | MCL_FUTURE);
1201 memset(checker_msg, 0, MAX_CHECKER_MSG_SIZE);
1202 vecs = (struct vectors *)ap;
1204 condlog(2, "path checkers start up");
1207 * init the path check interval
1209 vector_foreach_slot (vecs->pathvec, pp, i) {
1210 pp->checkint = conf->checkint;
1214 pthread_cleanup_push(cleanup_lock, vecs->lock);
1218 vector_foreach_slot (vecs->pathvec, pp, i) {
1222 if (pp->tick && --pp->tick)
1223 continue; /* don't check this path yet */
1226 * provision a next check soonest,
1227 * in case we exit abnormaly from here
1229 pp->tick = conf->checkint;
1232 pathinfo(pp, conf->hwtable, DI_SYSFS);
1237 condlog(0, "%s: checkfn is void", pp->dev);
1240 newstate = pp->checkfn(pp->fd, checker_msg,
1241 &pp->checker_context);
1243 if (newstate != pp->state) {
1244 pp->state = newstate;
1245 LOG_MSG(1, checker_msg);
1248 * upon state change, reset the checkint
1249 * to the shortest delay
1251 pp->checkint = conf->checkint;
1253 if (newstate == PATH_DOWN ||
1254 newstate == PATH_SHAKY) {
1256 * proactively fail path in the DM
1261 * cancel scheduled failback
1263 pp->mpp->failback_tick = 0;
1269 * reinstate this path
1274 * need to switch group ?
1276 update_multipath_strings(pp->mpp,
1280 * schedule defered failback
1282 if (pp->mpp->pgfailback > 0)
1283 pp->mpp->failback_tick =
1284 pp->mpp->pgfailback + 1;
1285 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE &&
1286 need_switch_pathgroup(pp->mpp, 1))
1287 switch_pathgroup(pp->mpp);
1290 * if at least one path is up in a group, and
1291 * the group is disabled, re-enable it
1293 if (newstate == PATH_UP)
1296 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1297 LOG_MSG(4, checker_msg);
1299 * double the next check delay.
1300 * max at conf->max_checkint
1302 if (pp->checkint < (conf->max_checkint / 2))
1303 pp->checkint = 2 * pp->checkint;
1305 pp->checkint = conf->max_checkint;
1307 pp->tick = pp->checkint;
1308 condlog(4, "%s: delay next check %is",
1309 pp->dev_t, pp->tick);
1312 pp->state = newstate;
1315 * path prio refreshing
1317 condlog(4, "path prio refresh");
1318 pathinfo(pp, conf->hwtable, DI_PRIO);
1320 if (need_switch_pathgroup(pp->mpp, 0)) {
1321 if (pp->mpp->pgfailback > 0)
1322 pp->mpp->failback_tick =
1323 pp->mpp->pgfailback + 1;
1324 else if (pp->mpp->pgfailback ==
1325 -FAILBACK_IMMEDIATE)
1326 switch_pathgroup(pp->mpp);
1329 defered_failback_tick(vecs->mpvec);
1330 retry_count_tick(vecs->mpvec);
1335 condlog(4, "map garbage collection");
1336 mpvec_garbage_collector(vecs);
1340 lock_cleanup_pop(vecs->lock);
1346 static struct vectors *
1349 struct vectors * vecs;
1351 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1357 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1362 vecs->pathvec = vector_alloc();
1367 vecs->mpvec = vector_alloc();
1372 pthread_mutex_init(vecs->lock, NULL);
1377 vector_free(vecs->pathvec);
1382 condlog(0, "failed to init paths");
1387 signal_set(int signo, void (*func) (int))
1390 struct sigaction sig;
1391 struct sigaction osig;
1393 sig.sa_handler = func;
1394 sigemptyset(&sig.sa_mask);
1397 r = sigaction(signo, &sig, &osig);
1402 return (osig.sa_handler);
1408 condlog(3, "SIGHUP received");
1411 dbg_free_final(NULL);
1424 signal_set(SIGHUP, sighup);
1425 signal_set(SIGINT, sigend);
1426 signal_set(SIGTERM, sigend);
1427 signal_set(SIGKILL, sigend);
1434 static struct sched_param sched_param = {
1438 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1441 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1446 set_oom_adj (int val)
1450 fp = fopen("/proc/self/oom_adj", "w");
1455 fprintf(fp, "%i", val);
1460 child (void * param)
1462 pthread_t check_thr, uevent_thr, uxlsnr_thr;
1463 pthread_attr_t attr;
1464 struct vectors * vecs;
1466 mlockall(MCL_CURRENT | MCL_FUTURE);
1471 condlog(2, "--------start up--------");
1472 condlog(2, "read " DEFAULT_CONFIGFILE);
1474 if (load_config(DEFAULT_CONFIGFILE))
1477 setlogmask(LOG_UPTO(conf->verbosity + 3));
1480 * fill the voids left in the config file
1482 if (!conf->checkint) {
1483 conf->checkint = CHECKINT;
1484 conf->max_checkint = MAX_CHECKINT;
1487 if (pidfile_create(DEFAULT_PIDFILE, getpid())) {
1496 vecs = init_paths();
1501 if (sysfs_get_mnt_path(sysfs_path, FILE_NAME_SIZE)) {
1502 condlog(0, "can not find sysfs mount point");
1507 * fetch paths and multipaths lists
1508 * no paths and/or no multipaths are valid scenarii
1509 * vectors maintenance will be driven by events
1511 path_discovery(vecs->pathvec, conf, DI_SYSFS | DI_WWID | DI_CHECKER);
1512 map_discovery(vecs);
1517 pthread_attr_init(&attr);
1518 pthread_attr_setstacksize(&attr, 64 * 1024);
1519 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
1521 pthread_create(&check_thr, &attr, checkerloop, vecs);
1522 pthread_create(&uevent_thr, &attr, ueventloop, vecs);
1523 pthread_create(&uxlsnr_thr, &attr, uxlsnrloop, vecs);
1525 pthread_cond_wait(&exit_cond, &exit_mutex);
1532 free_pathvec(vecs->pathvec, FREE_PATHS);
1534 pthread_cancel(check_thr);
1535 pthread_cancel(uevent_thr);
1536 pthread_cancel(uxlsnr_thr);
1540 free_handlers(handlers);
1545 pthread_mutex_destroy(vecs->lock);
1553 condlog(2, "--------shut down-------");
1559 dbg_free_final(NULL);
1566 main (int argc, char *argv[])
1568 extern char *optarg;
1575 if (getuid() != 0) {
1576 fprintf(stderr, "need to be root\n");
1580 /* make sure we don't lock any path */
1582 umask(umask(077) | 022);
1584 conf = alloc_config();
1589 while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1593 //debug=1; /* ### comment me out ### */
1596 if (sizeof(optarg) > sizeof(char *) ||
1597 !isdigit(optarg[0]))
1600 conf->verbosity = atoi(optarg);
1623 return (child(NULL));