3 #include <libdevmapper.h>
10 #include <sysfs/libsysfs.h>
11 #include <sysfs/dlist.h>
17 #include <path_state.h>
28 #include <blacklist.h>
33 #include <devmapper.h>
35 #include <discovery.h>
39 #include <switchgroup.h>
40 #include <path_state.h>
48 #include "cli_handlers.h"
50 #define FILE_NAME_SIZE 256
53 #define LOG_MSG(a,b) \
55 condlog(a, "%s: %s", pp->dev_t, b); \
56 memset(b, 0, MAX_CHECKER_MSG_SIZE); \
61 fprintf(stderr, "%s:%s(%i) lock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
64 fprintf(stderr, "%s:%s(%i) unlock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
65 pthread_mutex_unlock(a)
66 #define lock_cleanup_pop(a) \
67 fprintf(stderr, "%s:%s(%i) unlock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
68 pthread_cleanup_pop(1);
70 #define lock(a) pthread_mutex_lock(a)
71 #define unlock(a) pthread_mutex_unlock(a)
72 #define lock_cleanup_pop(a) pthread_cleanup_pop(1);
75 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
76 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
85 char mapname[WWID_SIZE];
89 static struct event_thread *
93 struct event_thread * wp;
95 wp = (struct event_thread *)MALLOC(sizeof(struct event_thread));
101 free_waiter (void * data)
103 struct event_thread * wp = (struct event_thread *)data;
106 dm_task_destroy(wp->dmt);
111 stop_waiter_thread (struct multipath * mpp, struct vectors * vecs)
113 struct event_thread * wp = (struct event_thread *)mpp->waiter;
117 condlog(3, "%s: no waiter thread", mpp->alias);
123 condlog(3, "%s: thread not started", mpp->alias);
126 condlog(2, "%s: stop event checker thread", wp->mapname);
127 pthread_kill(thread, SIGHUP);
131 cleanup_lock (void * data)
133 pthread_mutex_unlock((pthread_mutex_t *)data);
137 adopt_paths (struct vectors * vecs, struct multipath * mpp)
145 vector_foreach_slot (vecs->pathvec, pp, i) {
146 if (!strncmp(mpp->wwid, pp->wwid, WWID_SIZE)) {
147 condlog(4, "%s ownership set", pp->dev_t);
154 orphan_path (struct path * pp)
158 pp->dmstate = PSTATE_UNDEF;
159 pp->checker_context = NULL;
170 orphan_paths (struct vectors * vecs, struct multipath * mpp)
175 vector_foreach_slot (vecs->pathvec, pp, i) {
176 if (pp->mpp == mpp) {
177 condlog(4, "%s is orphaned", pp->dev_t);
184 update_multipath_table (struct multipath *mpp, vector pathvec)
189 if (dm_get_map(mpp->alias, &mpp->size, mpp->params))
192 if (disassemble_map(pathvec, mpp->params, mpp))
199 update_multipath_status (struct multipath *mpp)
204 if(dm_get_status(mpp->alias, mpp->status))
207 if (disassemble_status(mpp->status, mpp))
214 update_multipath_strings (struct multipath *mpp, vector pathvec)
218 mpp->selector = NULL;
223 mpp->features = NULL;
226 if (mpp->hwhandler) {
227 FREE(mpp->hwhandler);
228 mpp->hwhandler = NULL;
231 free_pgvec(mpp->pg, KEEP_PATHS);
234 if (update_multipath_table(mpp, pathvec))
237 if (update_multipath_status(mpp))
244 set_multipath_wwid (struct multipath * mpp)
249 dm_get_uuid(mpp->alias, mpp->wwid);
253 pathcount (struct multipath *mpp, int state)
255 struct pathgroup *pgp;
260 vector_foreach_slot (mpp->pg, pgp, i)
261 vector_foreach_slot (pgp->paths, pp, j)
262 if (pp->state == state)
268 * mpp->no_path_retry:
269 * -2 (QUEUE) : queue_if_no_path enabled, never turned off
270 * -1 (FAIL) : fail_if_no_path
271 * 0 (UNDEF) : nothing
272 * >0 : queue_if_no_path enabled, turned off after polling n times
275 update_queue_mode_del_path(struct multipath *mpp)
277 if (--mpp->nr_active == 0 && mpp->no_path_retry > 0) {
280 * meaning of +1: retry_tick may be decremented in
281 * checkerloop before starting retry.
283 mpp->retry_tick = mpp->no_path_retry * conf->checkint + 1;
284 condlog(1, "%s: Entering recovery mode: max_retries=%d",
285 mpp->alias, mpp->no_path_retry);
287 condlog(2, "%s: remaining active paths: %d", mpp->alias, mpp->nr_active);
291 update_queue_mode_add_path(struct multipath *mpp)
293 if (mpp->nr_active++ == 0 && mpp->no_path_retry > 0) {
294 /* come back to normal mode from retry mode */
296 dm_queue_if_no_path(mpp->alias, 1);
297 condlog(2, "%s: queue_if_no_path enabled", mpp->alias);
298 condlog(1, "%s: Recovered to normal mode", mpp->alias);
300 condlog(2, "%s: remaining active paths: %d", mpp->alias, mpp->nr_active);
304 set_no_path_retry(struct multipath *mpp)
307 mpp->nr_active = pathcount(mpp, PATH_UP);
308 select_no_path_retry(mpp);
310 switch (mpp->no_path_retry) {
311 case NO_PATH_RETRY_UNDEF:
313 case NO_PATH_RETRY_FAIL:
314 dm_queue_if_no_path(mpp->alias, 0);
316 case NO_PATH_RETRY_QUEUE:
317 dm_queue_if_no_path(mpp->alias, 1);
320 dm_queue_if_no_path(mpp->alias, 1);
321 if (mpp->nr_active == 0) {
322 /* Enter retry mode */
323 mpp->retry_tick = mpp->no_path_retry * conf->checkint;
324 condlog(1, "%s: Entering recovery mode: max_retries=%d",
325 mpp->alias, mpp->no_path_retry);
331 static struct hwentry *
332 extract_hwe_from_path(struct multipath * mpp)
335 struct pathgroup * pgp;
337 pgp = VECTOR_SLOT(mpp->pg, 0);
338 pp = VECTOR_SLOT(pgp->paths, 0);
344 remove_map (struct multipath * mpp, struct vectors * vecs)
348 stop_waiter_thread(mpp, vecs);
351 * clear references to this map
353 orphan_paths(vecs, mpp);
356 * purge the multipath vector
358 i = find_slot(vecs->mpvec, (void *)mpp);
359 vector_del_slot(vecs->mpvec, i);
364 free_multipath(mpp, KEEP_PATHS);
369 remove_maps (struct vectors * vecs)
372 struct multipath * mpp;
374 vector_foreach_slot (vecs->mpvec, mpp, i) {
375 remove_map(mpp, vecs);
379 vector_free(vecs->mpvec);
384 setup_multipath (struct vectors * vecs, struct multipath * mpp)
386 set_multipath_wwid(mpp);
387 mpp->mpe = find_mpe(mpp->wwid);
388 condlog(4, "discovered map %s", mpp->alias);
390 if (update_multipath_strings(mpp, vecs->pathvec))
393 adopt_paths(vecs, mpp);
394 select_pgfailback(mpp);
395 mpp->hwe = extract_hwe_from_path(mpp);
396 set_no_path_retry(mpp);
400 condlog(0, "%s: failed to setup multipath", mpp->alias);
401 remove_map(mpp, vecs);
406 need_switch_pathgroup (struct multipath * mpp, int refresh)
408 struct pathgroup * pgp;
412 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
416 * Refresh path priority values
419 vector_foreach_slot (mpp->pg, pgp, i)
420 vector_foreach_slot (pgp->paths, pp, j)
421 pathinfo(pp, conf->hwtable, DI_PRIO);
423 select_path_group(mpp); /* sets mpp->nextpg */
424 pgp = VECTOR_SLOT(mpp->pg, mpp->nextpg - 1);
426 if (pgp && pgp->status != PGSTATE_ACTIVE)
433 switch_pathgroup (struct multipath * mpp)
435 struct pathgroup * pgp;
437 pgp = VECTOR_SLOT(mpp->pg, mpp->nextpg - 1);
439 if (pgp && pgp->status != PGSTATE_ACTIVE) {
440 dm_switchgroup(mpp->alias, mpp->nextpg);
441 condlog(2, "%s: switch to path group #%i",
442 mpp->alias, mpp->nextpg);
447 update_multipath (struct vectors *vecs, char *mapname)
449 struct multipath *mpp;
450 struct pathgroup *pgp;
455 mpp = find_mp(vecs->mpvec, mapname);
460 free_pgvec(mpp->pg, KEEP_PATHS);
463 if (setup_multipath(vecs, mpp))
464 goto out; /* mpp freed in setup_multipath */
467 * compare checkers states with DM states
469 vector_foreach_slot (mpp->pg, pgp, i) {
470 vector_foreach_slot (pgp->paths, pp, j) {
471 if (pp->dmstate != PSTATE_FAILED)
474 if (pp->state != PATH_DOWN) {
475 condlog(2, "%s: mark as failed", pp->dev_t);
476 pp->state = PATH_DOWN;
477 update_queue_mode_del_path(mpp);
481 * schedule the next check earlier
483 if (pp->tick > conf->checkint)
484 pp->tick = conf->checkint;
491 condlog(0, "failed to update multipath");
496 static sigset_t unblock_sighup(void)
501 sigaddset(&set, SIGHUP);
502 pthread_sigmask(SIG_UNBLOCK, &set, &old);
507 * returns the reschedule delay
508 * negative means *stop*
511 waiteventloop (struct event_thread * waiter)
517 if (!waiter->event_nr)
518 waiter->event_nr = dm_geteventnr(waiter->mapname);
520 if (!(waiter->dmt = dm_task_create(DM_DEVICE_WAITEVENT)))
523 if (!dm_task_set_name(waiter->dmt, waiter->mapname)) {
524 dm_task_destroy(waiter->dmt);
528 if (waiter->event_nr && !dm_task_set_event_nr(waiter->dmt,
530 dm_task_destroy(waiter->dmt);
534 dm_task_no_open_count(waiter->dmt);
536 /* accept wait interruption */
537 set = unblock_sighup();
539 /* interruption spits messages */
543 r = dm_task_run(waiter->dmt);
545 /* wait is over : event or interrupt */
546 pthread_sigmask(SIG_SETMASK, &set, NULL);
549 if (!r) /* wait interrupted by signal */
552 dm_task_destroy(waiter->dmt);
560 condlog(3, "%s: devmap event #%i",
561 waiter->mapname, waiter->event_nr);
566 * 1) a table reload, which means our mpp structure is
567 * obsolete : refresh it through update_multipath()
568 * 2) a path failed by DM : mark as such through
570 * 3) map has gone away : stop the thread.
571 * 4) a path reinstate : nothing to do
572 * 5) a switch group : nothing to do
574 pthread_cleanup_push(cleanup_lock, waiter->vecs->lock);
575 lock(waiter->vecs->lock);
576 r = update_multipath(waiter->vecs, waiter->mapname);
577 lock_cleanup_pop(waiter->vecs->lock);
580 return -1; /* stop the thread */
582 event_nr = dm_geteventnr(waiter->mapname);
584 if (waiter->event_nr == event_nr)
585 return 1; /* upon problem reschedule 1s later */
587 waiter->event_nr = event_nr;
589 return -1; /* never reach there */
593 waitevent (void * et)
596 struct event_thread *waiter;
598 mlockall(MCL_CURRENT | MCL_FUTURE);
600 waiter = (struct event_thread *)et;
601 pthread_cleanup_push(free_waiter, et);
604 r = waiteventloop(waiter);
612 pthread_cleanup_pop(1);
617 start_waiter_thread (struct multipath * mpp, struct vectors * vecs)
620 struct event_thread * wp;
625 if (pthread_attr_init(&attr))
628 pthread_attr_setstacksize(&attr, 32 * 1024);
629 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
636 mpp->waiter = (void *)wp;
637 strncpy(wp->mapname, mpp->alias, WWID_SIZE);
640 if (pthread_create(&wp->thread, &attr, waitevent, wp)) {
641 condlog(0, "%s: cannot create event checker", wp->mapname);
644 condlog(2, "%s: event checker started", wp->mapname);
651 condlog(0, "failed to start waiter thread");
656 uev_add_map (char * devname, struct vectors * vecs)
659 char dev_t[BLK_DEV_SIZE];
661 struct multipath * mpp;
663 if (sysfs_get_dev(sysfs_path, devname, dev_t, BLK_DEV_SIZE))
666 if (sscanf(dev_t, "%d:%d", &major, &minor) != 2)
669 alias = dm_mapname(major, minor);
674 if (!dm_type(alias, DEFAULT_TARGET)) {
675 condlog(4, "%s: not a multipath map", alias);
680 mpp = find_mp(vecs->mpvec, alias);
684 * this should not happen,
685 * we missed a remove map event (not sent ?)
687 condlog(2, "%s: already registered", alias);
688 remove_map(mpp, vecs);
692 * now we can allocate
694 mpp = alloc_multipath();
702 if (setup_multipath(vecs, mpp))
703 return 1; /* mpp freed in setup_multipath */
705 if (!vector_alloc_slot(vecs->mpvec))
708 vector_set_slot(vecs->mpvec, mpp);
709 adopt_paths(vecs, mpp);
711 if (start_waiter_thread(mpp, vecs))
716 condlog(2, "%s: add devmap failed", mpp->alias);
717 remove_map(mpp, vecs);
722 uev_remove_map (char * devname, struct vectors * vecs)
725 struct multipath * mpp;
727 if (sscanf(devname, "dm-%d", &minor) != 1)
730 mpp = find_mp_by_minor(vecs->mpvec, minor);
733 condlog(3, "%s: devmap not registered, can't remove",
738 condlog(2, "remove %s devmap", mpp->alias);
739 remove_map(mpp, vecs);
745 uev_add_path (char * devname, struct vectors * vecs)
749 pp = find_path_by_dev(vecs->pathvec, devname);
752 condlog(3, "%s: already in pathvec");
755 pp = store_pathinfo(vecs->pathvec, conf->hwtable,
756 devname, DI_SYSFS | DI_WWID);
759 condlog(0, "%s: failed to store path info", devname);
763 condlog(2, "%s: path checker registered", devname);
764 pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
767 condlog(4, "%s: ownership set to %s",
768 pp->dev_t, pp->mpp->alias);
770 condlog(4, "%s: orphaned", pp->dev_t);
778 uev_remove_path (char * devname, struct vectors * vecs)
783 pp = find_path_by_dev(vecs->pathvec, devname);
786 condlog(3, "%s: not in pathvec");
790 if (pp->mpp && pp->state == PATH_UP)
791 update_queue_mode_del_path(pp->mpp);
793 condlog(2, "remove %s path checker", devname);
794 i = find_slot(vecs->pathvec, (void *)pp);
795 vector_del_slot(vecs->pathvec, i);
802 show_paths (char ** r, int * len, struct vectors * vecs)
808 struct path_layout pl;
810 get_path_layout(&pl, vecs->pathvec);
811 reply = MALLOC(MAX_REPLY_LEN);
818 if (VECTOR_SIZE(vecs->pathvec) > 0)
819 c += snprint_path_header(c, reply + MAX_REPLY_LEN - c,
820 PRINT_PATH_CHECKER, &pl);
822 vector_foreach_slot(vecs->pathvec, pp, i)
823 c += snprint_path(c, reply + MAX_REPLY_LEN - c,
824 PRINT_PATH_CHECKER, pp, &pl);
827 *len = (int)(c - reply + 1);
832 show_maps (char ** r, int *len, struct vectors * vecs)
835 struct multipath * mpp;
838 struct map_layout ml;
840 get_map_layout(&ml, vecs->mpvec);
841 reply = MALLOC(MAX_REPLY_LEN);
847 if (VECTOR_SIZE(vecs->mpvec) > 0)
848 c += snprint_map_header(c, reply + MAX_REPLY_LEN - c,
849 PRINT_MAP_FAILBACK, &ml);
851 vector_foreach_slot(vecs->mpvec, mpp, i)
852 c += snprint_map(c, reply + MAX_REPLY_LEN - c,
853 PRINT_MAP_FAILBACK, mpp, &ml);
856 *len = (int)(c - reply + 1);
861 dump_pathvec (char ** r, int * len, struct vectors * vecs)
868 *len = VECTOR_SIZE(vecs->pathvec) * sizeof(struct path);
869 reply = (char *)MALLOC(*len);
877 vector_foreach_slot (vecs->pathvec, pp, i) {
878 memcpy((void *)p, pp, sizeof(struct path));
879 p += sizeof(struct path);
882 /* return negative to hint caller not to add "ok" to the dump */
887 map_discovery (struct vectors * vecs)
890 struct multipath * mpp;
892 if (dm_get_maps(vecs->mpvec, "multipath"))
895 vector_foreach_slot (vecs->mpvec, mpp, i) {
896 if (setup_multipath(vecs, mpp))
898 mpp->minor = dm_get_minor(mpp->alias);
899 start_waiter_thread(mpp, vecs);
906 reconfigure (struct vectors * vecs)
908 struct config * old = conf;
909 struct multipath * mpp;
915 if (load_config(DEFAULT_CONFIGFILE)) {
917 condlog(2, "reconfigure failed, continue with old config");
920 conf->verbosity = old->verbosity;
923 vector_foreach_slot (vecs->mpvec, mpp, i) {
924 mpp->mpe = find_mpe(mpp->wwid);
925 mpp->hwe = extract_hwe_from_path(mpp);
926 adopt_paths(vecs, mpp);
927 set_no_path_retry(mpp);
929 vector_foreach_slot (vecs->pathvec, pp, i) {
934 condlog(2, "reconfigured");
939 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
941 struct vectors * vecs;
946 vecs = (struct vectors *)trigger_data;
948 pthread_cleanup_push(cleanup_lock, vecs->lock);
951 r = parse_cmd(str, reply, len, vecs);
954 *reply = STRDUP("fail\n");
955 *len = strlen(*reply) + 1;
958 else if (!r && *len == 0) {
959 *reply = STRDUP("ok\n");
960 *len = strlen(*reply) + 1;
963 /* else if (r < 0) leave *reply alone */
965 lock_cleanup_pop(vecs->lock);
970 uev_discard(char * devpath)
975 * keep only block devices, discard partitions
977 if (sscanf(devpath, "/block/%10s", a) != 1 ||
978 sscanf(devpath, "/block/%10[^/]/%10s", a, b) == 2) {
979 condlog(4, "discard event on %s", devpath);
986 uev_trigger (struct uevent * uev, void * trigger_data)
990 struct vectors * vecs;
992 vecs = (struct vectors *)trigger_data;
994 if (uev_discard(uev->devpath))
997 basename(uev->devpath, devname);
1001 * device map add/remove event
1003 if (!strncmp(devname, "dm-", 3)) {
1004 if (!strncmp(uev->action, "add", 3)) {
1005 r = uev_add_map(devname, vecs);
1009 if (!strncmp(uev->action, "remove", 6)) {
1010 r = uev_remove_map(devname, vecs);
1018 * path add/remove event
1020 if (blacklist(conf->blist, devname))
1023 if (!strncmp(uev->action, "add", 3)) {
1024 r = uev_add_path(devname, vecs);
1027 if (!strncmp(uev->action, "remove", 6)) {
1028 r = uev_remove_path(devname, vecs);
1038 ueventloop (void * ap)
1040 if (uevent_listen(&uev_trigger, ap))
1041 fprintf(stderr, "error starting uevent listener");
1047 uxlsnrloop (void * ap)
1052 if (alloc_handlers())
1055 add_handler(LIST+PATHS, cli_list_paths);
1056 add_handler(LIST+MAPS, cli_list_maps);
1057 add_handler(ADD+PATH, cli_add_path);
1058 add_handler(DEL+PATH, cli_del_path);
1059 add_handler(ADD+MAP, cli_add_map);
1060 add_handler(DEL+MAP, cli_del_map);
1061 add_handler(SWITCH+MAP+GROUP, cli_switch_group);
1062 add_handler(DUMP+PATHVEC, cli_dump_pathvec);
1063 add_handler(RECONFIGURE, cli_reconfigure);
1064 add_handler(SUSPEND+MAP, cli_suspend);
1065 add_handler(RESUME+MAP, cli_resume);
1067 uxsock_listen(&uxsock_trigger, ap);
1073 exit_daemon (int status)
1076 fprintf(stderr, "bad exit status. see daemon.log\n");
1078 condlog(3, "unlink pidfile");
1079 unlink(DEFAULT_PIDFILE);
1082 pthread_cond_signal(&exit_cond);
1083 unlock(&exit_mutex);
1089 fail_path (struct path * pp)
1094 condlog(2, "checker failed path %s in map %s",
1095 pp->dev_t, pp->mpp->alias);
1097 dm_fail_path(pp->mpp->alias, pp->dev_t);
1098 update_queue_mode_del_path(pp->mpp);
1102 * caller must have locked the path list before calling that function
1105 reinstate_path (struct path * pp)
1110 if (dm_reinstate(pp->mpp->alias, pp->dev_t))
1111 condlog(0, "%s: reinstate failed", pp->dev_t);
1113 condlog(2, "%s: reinstated", pp->dev_t);
1114 update_queue_mode_add_path(pp->mpp);
1119 enable_group(struct path * pp)
1121 struct pathgroup * pgp;
1124 * if path is added through uev_add_path, pgindex can be unset.
1125 * next update_strings() will set it, upon map reload event.
1127 * we can safely return here, because upon map reload, all
1128 * PG will be enabled.
1133 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1135 if (pgp->status == PGSTATE_DISABLED) {
1136 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1137 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1142 mpvec_garbage_collector (struct vectors * vecs)
1144 struct multipath * mpp;
1147 vector_foreach_slot (vecs->mpvec, mpp, i) {
1148 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1149 condlog(2, "%s: remove dead map", mpp->alias);
1150 remove_map(mpp, vecs);
1157 defered_failback_tick (vector mpvec)
1159 struct multipath * mpp;
1162 vector_foreach_slot (mpvec, mpp, i) {
1164 * defered failback getting sooner
1166 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1167 mpp->failback_tick--;
1169 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1170 switch_pathgroup(mpp);
1176 retry_count_tick(vector mpvec)
1178 struct multipath *mpp;
1181 vector_foreach_slot (mpvec, mpp, i) {
1182 if (mpp->retry_tick) {
1183 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1184 if(--mpp->retry_tick == 0) {
1185 dm_queue_if_no_path(mpp->alias, 0);
1186 condlog(2, "%s: Disable queueing", mpp->alias);
1193 checkerloop (void *ap)
1195 struct vectors *vecs;
1199 char checker_msg[MAX_CHECKER_MSG_SIZE];
1201 mlockall(MCL_CURRENT | MCL_FUTURE);
1203 memset(checker_msg, 0, MAX_CHECKER_MSG_SIZE);
1204 vecs = (struct vectors *)ap;
1206 condlog(2, "path checkers start up");
1209 * init the path check interval
1211 vector_foreach_slot (vecs->pathvec, pp, i) {
1212 pp->checkint = conf->checkint;
1216 pthread_cleanup_push(cleanup_lock, vecs->lock);
1220 vector_foreach_slot (vecs->pathvec, pp, i) {
1224 if (pp->tick && --pp->tick)
1225 continue; /* don't check this path yet */
1228 * provision a next check soonest,
1229 * in case we exit abnormaly from here
1231 pp->tick = conf->checkint;
1234 pathinfo(pp, conf->hwtable, DI_SYSFS);
1239 condlog(0, "%s: checkfn is void", pp->dev);
1242 newstate = pp->checkfn(pp->fd, checker_msg,
1243 &pp->checker_context);
1245 if (newstate != pp->state) {
1246 pp->state = newstate;
1247 LOG_MSG(1, checker_msg);
1250 * upon state change, reset the checkint
1251 * to the shortest delay
1253 pp->checkint = conf->checkint;
1255 if (newstate == PATH_DOWN ||
1256 newstate == PATH_SHAKY) {
1258 * proactively fail path in the DM
1263 * cancel scheduled failback
1265 pp->mpp->failback_tick = 0;
1271 * reinstate this path
1276 * need to switch group ?
1278 update_multipath_strings(pp->mpp,
1282 * schedule defered failback
1284 if (pp->mpp->pgfailback > 0)
1285 pp->mpp->failback_tick =
1286 pp->mpp->pgfailback + 1;
1287 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE &&
1288 need_switch_pathgroup(pp->mpp, 1))
1289 switch_pathgroup(pp->mpp);
1292 * if at least one path is up in a group, and
1293 * the group is disabled, re-enable it
1295 if (newstate == PATH_UP)
1298 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1299 LOG_MSG(4, checker_msg);
1301 * double the next check delay.
1302 * max at conf->max_checkint
1304 if (pp->checkint < (conf->max_checkint / 2))
1305 pp->checkint = 2 * pp->checkint;
1307 pp->checkint = conf->max_checkint;
1309 pp->tick = pp->checkint;
1310 condlog(4, "%s: delay next check %is",
1311 pp->dev_t, pp->tick);
1314 pp->state = newstate;
1317 * path prio refreshing
1319 condlog(4, "path prio refresh");
1320 pathinfo(pp, conf->hwtable, DI_PRIO);
1322 if (need_switch_pathgroup(pp->mpp, 0)) {
1323 if (pp->mpp->pgfailback > 0)
1324 pp->mpp->failback_tick =
1325 pp->mpp->pgfailback + 1;
1326 else if (pp->mpp->pgfailback ==
1327 -FAILBACK_IMMEDIATE)
1328 switch_pathgroup(pp->mpp);
1331 defered_failback_tick(vecs->mpvec);
1332 retry_count_tick(vecs->mpvec);
1337 condlog(4, "map garbage collection");
1338 mpvec_garbage_collector(vecs);
1342 lock_cleanup_pop(vecs->lock);
1348 static struct vectors *
1351 struct vectors * vecs;
1353 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1359 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1364 vecs->pathvec = vector_alloc();
1369 vecs->mpvec = vector_alloc();
1374 pthread_mutex_init(vecs->lock, NULL);
1379 vector_free(vecs->pathvec);
1384 condlog(0, "failed to init paths");
1389 signal_set(int signo, void (*func) (int))
1392 struct sigaction sig;
1393 struct sigaction osig;
1395 sig.sa_handler = func;
1396 sigemptyset(&sig.sa_mask);
1399 r = sigaction(signo, &sig, &osig);
1404 return (osig.sa_handler);
1410 condlog(3, "SIGHUP received");
1413 dbg_free_final(NULL);
1426 signal_set(SIGHUP, sighup);
1427 signal_set(SIGINT, sigend);
1428 signal_set(SIGTERM, sigend);
1429 signal_set(SIGKILL, sigend);
1436 static struct sched_param sched_param = {
1440 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1443 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1448 set_oom_adj (int val)
1452 fp = fopen("/proc/self/oom_adj", "w");
1457 fprintf(fp, "%i", val);
1462 child (void * param)
1464 pthread_t check_thr, uevent_thr, uxlsnr_thr;
1465 pthread_attr_t attr;
1466 struct vectors * vecs;
1468 mlockall(MCL_CURRENT | MCL_FUTURE);
1473 condlog(2, "--------start up--------");
1474 condlog(2, "read " DEFAULT_CONFIGFILE);
1476 if (load_config(DEFAULT_CONFIGFILE))
1479 setlogmask(LOG_UPTO(conf->verbosity + 3));
1482 * fill the voids left in the config file
1484 if (!conf->checkint) {
1485 conf->checkint = CHECKINT;
1486 conf->max_checkint = MAX_CHECKINT;
1489 if (pidfile_create(DEFAULT_PIDFILE, getpid())) {
1498 vecs = init_paths();
1503 if (sysfs_get_mnt_path(sysfs_path, FILE_NAME_SIZE)) {
1504 condlog(0, "can not find sysfs mount point");
1509 * fetch paths and multipaths lists
1510 * no paths and/or no multipaths are valid scenarii
1511 * vectors maintenance will be driven by events
1513 path_discovery(vecs->pathvec, conf, DI_SYSFS | DI_WWID | DI_CHECKER);
1514 map_discovery(vecs);
1519 pthread_attr_init(&attr);
1520 pthread_attr_setstacksize(&attr, 64 * 1024);
1521 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
1523 pthread_create(&check_thr, &attr, checkerloop, vecs);
1524 pthread_create(&uevent_thr, &attr, ueventloop, vecs);
1525 pthread_create(&uxlsnr_thr, &attr, uxlsnrloop, vecs);
1527 pthread_cond_wait(&exit_cond, &exit_mutex);
1534 free_pathvec(vecs->pathvec, FREE_PATHS);
1536 pthread_cancel(check_thr);
1537 pthread_cancel(uevent_thr);
1538 pthread_cancel(uxlsnr_thr);
1542 free_handlers(handlers);
1547 pthread_mutex_destroy(vecs->lock);
1555 condlog(2, "--------shut down-------");
1561 dbg_free_final(NULL);
1568 main (int argc, char *argv[])
1570 extern char *optarg;
1577 if (getuid() != 0) {
1578 fprintf(stderr, "need to be root\n");
1582 /* make sure we don't lock any path */
1584 umask(umask(077) | 022);
1586 conf = alloc_config();
1591 while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1595 //debug=1; /* ### comment me out ### */
1598 if (sizeof(optarg) > sizeof(char *) ||
1599 !isdigit(optarg[0]))
1602 conf->verbosity = atoi(optarg);
1625 return (child(NULL));