3 #include <libdevmapper.h>
10 #include <sysfs/libsysfs.h>
11 #include <sysfs/dlist.h>
17 #include <path_state.h>
28 #include <blacklist.h>
33 #include <devmapper.h>
35 #include <discovery.h>
39 #include <switchgroup.h>
40 #include <path_state.h>
48 #include "cli_handlers.h"
50 #define FILE_NAME_SIZE 256
53 #define LOG_MSG(a,b) \
55 condlog(a, "%s: %s", pp->dev_t, b); \
56 memset(b, 0, MAX_CHECKER_MSG_SIZE); \
61 fprintf(stderr, "%s:%s(%i) lock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
64 fprintf(stderr, "%s:%s(%i) unlock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
65 pthread_mutex_unlock(a)
66 #define lock_cleanup_pop(a) \
67 fprintf(stderr, "%s:%s(%i) unlock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
68 pthread_cleanup_pop(1);
70 #define lock(a) pthread_mutex_lock(a)
71 #define unlock(a) pthread_mutex_unlock(a)
72 #define lock_cleanup_pop(a) pthread_cleanup_pop(1);
75 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
76 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
85 char mapname[WWID_SIZE];
89 static struct event_thread *
93 struct event_thread * wp;
95 wp = (struct event_thread *)MALLOC(sizeof(struct event_thread));
101 free_waiter (void * data)
103 struct event_thread * wp = (struct event_thread *)data;
106 dm_task_destroy(wp->dmt);
111 stop_waiter_thread (struct multipath * mpp, struct vectors * vecs)
113 struct event_thread * wp = (struct event_thread *)mpp->waiter;
117 condlog(3, "%s: no waiter thread", mpp->alias);
123 condlog(3, "%s: thread not started", mpp->alias);
126 condlog(2, "%s: stop event checker thread", wp->mapname);
127 pthread_kill(thread, SIGHUP);
131 cleanup_lock (void * data)
133 pthread_mutex_unlock((pthread_mutex_t *)data);
137 adopt_paths (struct vectors * vecs, struct multipath * mpp)
145 vector_foreach_slot (vecs->pathvec, pp, i) {
146 if (!strncmp(mpp->wwid, pp->wwid, WWID_SIZE)) {
147 condlog(4, "%s ownership set", pp->dev_t);
154 orphan_path (struct path * pp)
158 pp->dmstate = PSTATE_UNDEF;
159 pp->checker_context = NULL;
170 orphan_paths (struct vectors * vecs, struct multipath * mpp)
175 vector_foreach_slot (vecs->pathvec, pp, i) {
176 if (pp->mpp == mpp) {
177 condlog(4, "%s is orphaned", pp->dev_t);
184 update_multipath_table (struct multipath *mpp, vector pathvec)
189 if (dm_get_map(mpp->alias, &mpp->size, mpp->params))
192 if (disassemble_map(pathvec, mpp->params, mpp))
199 update_multipath_status (struct multipath *mpp)
204 if(dm_get_status(mpp->alias, mpp->status))
207 if (disassemble_status(mpp->status, mpp))
214 update_multipath_strings (struct multipath *mpp, vector pathvec)
218 mpp->selector = NULL;
223 mpp->features = NULL;
226 if (mpp->hwhandler) {
227 FREE(mpp->hwhandler);
228 mpp->hwhandler = NULL;
231 free_pgvec(mpp->pg, KEEP_PATHS);
234 if (update_multipath_table(mpp, pathvec))
237 if (update_multipath_status(mpp))
244 set_multipath_wwid (struct multipath * mpp)
249 dm_get_uuid(mpp->alias, mpp->wwid);
253 pathcount (struct multipath *mpp, int state)
255 struct pathgroup *pgp;
260 vector_foreach_slot (mpp->pg, pgp, i)
261 vector_foreach_slot (pgp->paths, pp, j)
262 if (pp->state == state)
268 * mpp->no_path_retry:
269 * -2 (QUEUE) : queue_if_no_path enabled, never turned off
270 * -1 (FAIL) : fail_if_no_path
271 * 0 (UNDEF) : nothing
272 * >0 : queue_if_no_path enabled, turned off after polling n times
275 update_queue_mode_del_path(struct multipath *mpp)
277 if (--mpp->nr_active == 0 && mpp->no_path_retry > 0) {
280 * meaning of +1: retry_tick may be decremented in
281 * checkerloop before starting retry.
283 mpp->retry_tick = mpp->no_path_retry * conf->checkint + 1;
284 condlog(1, "%s: Entering recovery mode: max_retries=%d",
285 mpp->alias, mpp->no_path_retry);
287 condlog(2, "%s: remaining active paths: %d", mpp->alias, mpp->nr_active);
291 update_queue_mode_add_path(struct multipath *mpp)
293 if (mpp->nr_active++ == 0 && mpp->no_path_retry > 0) {
294 /* come back to normal mode from retry mode */
296 dm_queue_if_no_path(mpp->alias, 1);
297 condlog(2, "%s: queue_if_no_path enabled", mpp->alias);
298 condlog(1, "%s: Recovered to normal mode", mpp->alias);
300 condlog(2, "%s: remaining active paths: %d", mpp->alias, mpp->nr_active);
304 set_no_path_retry(struct multipath *mpp)
307 mpp->nr_active = pathcount(mpp, PATH_UP);
308 select_no_path_retry(mpp);
310 switch (mpp->no_path_retry) {
311 case NO_PATH_RETRY_UNDEF:
313 case NO_PATH_RETRY_FAIL:
314 dm_queue_if_no_path(mpp->alias, 0);
316 case NO_PATH_RETRY_QUEUE:
317 dm_queue_if_no_path(mpp->alias, 1);
320 dm_queue_if_no_path(mpp->alias, 1);
321 if (mpp->nr_active == 0) {
322 /* Enter retry mode */
323 mpp->retry_tick = mpp->no_path_retry * conf->checkint;
324 condlog(1, "%s: Entering recovery mode: max_retries=%d",
325 mpp->alias, mpp->no_path_retry);
331 static struct hwentry *
332 extract_hwe_from_path(struct multipath * mpp)
335 struct pathgroup * pgp;
337 pgp = VECTOR_SLOT(mpp->pg, 0);
338 pp = VECTOR_SLOT(pgp->paths, 0);
344 remove_map (struct multipath * mpp, struct vectors * vecs)
348 stop_waiter_thread(mpp, vecs);
351 * clear references to this map
353 orphan_paths(vecs, mpp);
356 * purge the multipath vector
358 i = find_slot(vecs->mpvec, (void *)mpp);
359 vector_del_slot(vecs->mpvec, i);
364 free_multipath(mpp, KEEP_PATHS);
369 remove_maps (struct vectors * vecs)
372 struct multipath * mpp;
374 vector_foreach_slot (vecs->mpvec, mpp, i) {
375 remove_map(mpp, vecs);
379 vector_free(vecs->mpvec);
384 setup_multipath (struct vectors * vecs, struct multipath * mpp)
386 set_multipath_wwid(mpp);
387 mpp->mpe = find_mpe(mpp->wwid);
388 condlog(4, "discovered map %s", mpp->alias);
390 if (update_multipath_strings(mpp, vecs->pathvec))
393 adopt_paths(vecs, mpp);
394 select_pgfailback(mpp);
395 mpp->hwe = extract_hwe_from_path(mpp);
396 set_no_path_retry(mpp);
400 condlog(0, "%s: failed to setup multipath", mpp->alias);
401 remove_map(mpp, vecs);
406 need_switch_pathgroup (struct multipath * mpp, int refresh)
408 struct pathgroup * pgp;
412 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
416 * Refresh path priority values
419 vector_foreach_slot (mpp->pg, pgp, i)
420 vector_foreach_slot (pgp->paths, pp, j)
421 pathinfo(pp, conf->hwtable, DI_PRIO);
423 select_path_group(mpp); /* sets mpp->nextpg */
424 pgp = VECTOR_SLOT(mpp->pg, mpp->nextpg - 1);
426 if (pgp && pgp->status != PGSTATE_ACTIVE)
433 switch_pathgroup (struct multipath * mpp)
435 struct pathgroup * pgp;
437 pgp = VECTOR_SLOT(mpp->pg, mpp->nextpg - 1);
439 if (pgp && pgp->status != PGSTATE_ACTIVE) {
440 dm_switchgroup(mpp->alias, mpp->nextpg);
441 condlog(2, "%s: switch to path group #%i",
442 mpp->alias, mpp->nextpg);
447 update_multipath (struct vectors *vecs, char *mapname)
449 struct multipath *mpp;
450 struct pathgroup *pgp;
455 mpp = find_mp_by_alias(vecs->mpvec, mapname);
460 free_pgvec(mpp->pg, KEEP_PATHS);
463 if (setup_multipath(vecs, mpp))
464 goto out; /* mpp freed in setup_multipath */
467 * compare checkers states with DM states
469 vector_foreach_slot (mpp->pg, pgp, i) {
470 vector_foreach_slot (pgp->paths, pp, j) {
471 if (pp->dmstate != PSTATE_FAILED)
474 if (pp->state != PATH_DOWN) {
475 condlog(2, "%s: mark as failed", pp->dev_t);
476 pp->state = PATH_DOWN;
477 update_queue_mode_del_path(mpp);
481 * schedule the next check earlier
483 if (pp->tick > conf->checkint)
484 pp->tick = conf->checkint;
491 condlog(0, "failed to update multipath");
496 static sigset_t unblock_sighup(void)
501 sigaddset(&set, SIGHUP);
502 pthread_sigmask(SIG_UNBLOCK, &set, &old);
507 * returns the reschedule delay
508 * negative means *stop*
511 waiteventloop (struct event_thread * waiter)
517 if (!waiter->event_nr)
518 waiter->event_nr = dm_geteventnr(waiter->mapname);
520 if (!(waiter->dmt = dm_task_create(DM_DEVICE_WAITEVENT)))
523 if (!dm_task_set_name(waiter->dmt, waiter->mapname)) {
524 dm_task_destroy(waiter->dmt);
528 if (waiter->event_nr && !dm_task_set_event_nr(waiter->dmt,
530 dm_task_destroy(waiter->dmt);
534 dm_task_no_open_count(waiter->dmt);
536 /* accept wait interruption */
537 set = unblock_sighup();
539 /* interruption spits messages */
543 r = dm_task_run(waiter->dmt);
545 /* wait is over : event or interrupt */
546 pthread_sigmask(SIG_SETMASK, &set, NULL);
549 if (!r) /* wait interrupted by signal */
552 dm_task_destroy(waiter->dmt);
560 condlog(3, "%s: devmap event #%i",
561 waiter->mapname, waiter->event_nr);
566 * 1) a table reload, which means our mpp structure is
567 * obsolete : refresh it through update_multipath()
568 * 2) a path failed by DM : mark as such through
570 * 3) map has gone away : stop the thread.
571 * 4) a path reinstate : nothing to do
572 * 5) a switch group : nothing to do
574 pthread_cleanup_push(cleanup_lock, waiter->vecs->lock);
575 lock(waiter->vecs->lock);
576 r = update_multipath(waiter->vecs, waiter->mapname);
577 lock_cleanup_pop(waiter->vecs->lock);
580 return -1; /* stop the thread */
582 event_nr = dm_geteventnr(waiter->mapname);
584 if (waiter->event_nr == event_nr)
585 return 1; /* upon problem reschedule 1s later */
587 waiter->event_nr = event_nr;
589 return -1; /* never reach there */
593 waitevent (void * et)
596 struct event_thread *waiter;
598 mlockall(MCL_CURRENT | MCL_FUTURE);
600 waiter = (struct event_thread *)et;
601 pthread_cleanup_push(free_waiter, et);
604 r = waiteventloop(waiter);
612 pthread_cleanup_pop(1);
617 start_waiter_thread (struct multipath * mpp, struct vectors * vecs)
620 struct event_thread * wp;
625 if (pthread_attr_init(&attr))
628 pthread_attr_setstacksize(&attr, 32 * 1024);
629 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
636 mpp->waiter = (void *)wp;
637 strncpy(wp->mapname, mpp->alias, WWID_SIZE);
640 if (pthread_create(&wp->thread, &attr, waitevent, wp)) {
641 condlog(0, "%s: cannot create event checker", wp->mapname);
644 condlog(2, "%s: event checker started", wp->mapname);
651 condlog(0, "failed to start waiter thread");
656 uev_add_map (char * devname, struct vectors * vecs)
659 char dev_t[BLK_DEV_SIZE];
661 struct multipath * mpp;
663 if (sysfs_get_dev(sysfs_path, devname, dev_t, BLK_DEV_SIZE))
666 if (sscanf(dev_t, "%d:%d", &major, &minor) != 2)
669 alias = dm_mapname(major, minor);
674 if (!dm_type(alias, DEFAULT_TARGET)) {
675 condlog(4, "%s: not a multipath map", alias);
680 mpp = find_mp_by_alias(vecs->mpvec, alias);
684 * this should not happen,
685 * we missed a remove map event (not sent ?)
687 condlog(2, "%s: already registered", alias);
688 remove_map(mpp, vecs);
692 * now we can allocate
694 mpp = alloc_multipath();
702 if (setup_multipath(vecs, mpp))
703 return 1; /* mpp freed in setup_multipath */
705 if (!vector_alloc_slot(vecs->mpvec))
708 vector_set_slot(vecs->mpvec, mpp);
709 adopt_paths(vecs, mpp);
711 if (start_waiter_thread(mpp, vecs))
716 condlog(2, "%s: add devmap failed", mpp->alias);
717 remove_map(mpp, vecs);
722 uev_remove_map (char * devname, struct vectors * vecs)
725 struct multipath * mpp;
727 if (sscanf(devname, "dm-%d", &minor) != 1)
730 mpp = find_mp_by_minor(vecs->mpvec, minor);
733 condlog(3, "%s: devmap not registered, can't remove",
738 condlog(2, "remove %s devmap", mpp->alias);
739 remove_map(mpp, vecs);
745 uev_add_path (char * devname, struct vectors * vecs)
749 pp = find_path_by_dev(vecs->pathvec, devname);
752 condlog(3, "%s: already in pathvec");
755 pp = store_pathinfo(vecs->pathvec, conf->hwtable,
756 devname, DI_SYSFS | DI_WWID);
759 condlog(0, "%s: failed to store path info", devname);
763 condlog(2, "%s: path checker registered", devname);
764 pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
767 condlog(4, "%s: ownership set to %s",
768 pp->dev_t, pp->mpp->alias);
770 condlog(4, "%s: orphaned", pp->dev_t);
778 uev_remove_path (char * devname, struct vectors * vecs)
783 pp = find_path_by_dev(vecs->pathvec, devname);
786 condlog(3, "%s: not in pathvec");
790 if (pp->mpp && pp->state == PATH_UP)
791 update_queue_mode_del_path(pp->mpp);
793 condlog(2, "remove %s path checker", devname);
794 i = find_slot(vecs->pathvec, (void *)pp);
795 vector_del_slot(vecs->pathvec, i);
802 show_paths (char ** r, int * len, struct vectors * vecs)
808 struct path_layout pl;
810 get_path_layout(&pl, vecs->pathvec);
811 reply = MALLOC(MAX_REPLY_LEN);
818 if (VECTOR_SIZE(vecs->pathvec) > 0)
819 c += snprint_path_header(c, reply + MAX_REPLY_LEN - c,
820 PRINT_PATH_CHECKER, &pl);
822 vector_foreach_slot(vecs->pathvec, pp, i)
823 c += snprint_path(c, reply + MAX_REPLY_LEN - c,
824 PRINT_PATH_CHECKER, pp, &pl);
827 *len = (int)(c - reply + 1);
832 show_maps (char ** r, int *len, struct vectors * vecs)
835 struct multipath * mpp;
838 struct map_layout ml;
840 get_map_layout(&ml, vecs->mpvec);
841 reply = MALLOC(MAX_REPLY_LEN);
847 if (VECTOR_SIZE(vecs->mpvec) > 0)
848 c += snprint_map_header(c, reply + MAX_REPLY_LEN - c,
849 PRINT_MAP_FAILBACK, &ml);
851 vector_foreach_slot(vecs->mpvec, mpp, i)
852 c += snprint_map(c, reply + MAX_REPLY_LEN - c,
853 PRINT_MAP_FAILBACK, mpp, &ml);
856 *len = (int)(c - reply + 1);
861 dump_pathvec (char ** r, int * len, struct vectors * vecs)
868 *len = VECTOR_SIZE(vecs->pathvec) * sizeof(struct path);
869 reply = (char *)MALLOC(*len);
877 vector_foreach_slot (vecs->pathvec, pp, i) {
878 memcpy((void *)p, pp, sizeof(struct path));
879 p += sizeof(struct path);
882 /* return negative to hint caller not to add "ok" to the dump */
887 map_discovery (struct vectors * vecs)
890 struct multipath * mpp;
892 if (dm_get_maps(vecs->mpvec, "multipath"))
895 vector_foreach_slot (vecs->mpvec, mpp, i) {
896 if (setup_multipath(vecs, mpp))
898 mpp->minor = dm_get_minor(mpp->alias);
899 start_waiter_thread(mpp, vecs);
906 reconfigure (struct vectors * vecs)
908 struct config * old = conf;
909 struct multipath * mpp;
915 if (load_config(DEFAULT_CONFIGFILE)) {
917 condlog(2, "reconfigure failed, continue with old config");
920 conf->verbosity = old->verbosity;
923 vector_foreach_slot (vecs->mpvec, mpp, i) {
924 mpp->mpe = find_mpe(mpp->wwid);
925 mpp->hwe = extract_hwe_from_path(mpp);
926 adopt_paths(vecs, mpp);
927 set_no_path_retry(mpp);
929 vector_foreach_slot (vecs->pathvec, pp, i) {
934 condlog(2, "reconfigured");
939 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
941 struct vectors * vecs;
946 vecs = (struct vectors *)trigger_data;
948 pthread_cleanup_push(cleanup_lock, vecs->lock);
951 r = parse_cmd(str, reply, len, vecs);
954 *reply = STRDUP("fail\n");
955 *len = strlen(*reply) + 1;
958 else if (!r && *len == 0) {
959 *reply = STRDUP("ok\n");
960 *len = strlen(*reply) + 1;
963 /* else if (r < 0) leave *reply alone */
965 lock_cleanup_pop(vecs->lock);
970 uev_discard(char * devpath)
975 * keep only block devices, discard partitions
977 if (sscanf(devpath, "/block/%10s", a) != 1 ||
978 sscanf(devpath, "/block/%10[^/]/%10s", a, b) == 2) {
979 condlog(4, "discard event on %s", devpath);
986 uev_trigger (struct uevent * uev, void * trigger_data)
990 struct vectors * vecs;
992 vecs = (struct vectors *)trigger_data;
994 if (uev_discard(uev->devpath))
997 basename(uev->devpath, devname);
1001 * device map add/remove event
1003 if (!strncmp(devname, "dm-", 3)) {
1004 if (!strncmp(uev->action, "add", 3)) {
1005 r = uev_add_map(devname, vecs);
1009 if (!strncmp(uev->action, "remove", 6)) {
1010 r = uev_remove_map(devname, vecs);
1018 * path add/remove event
1020 if (blacklist(conf->blist, devname))
1023 if (!strncmp(uev->action, "add", 3)) {
1024 r = uev_add_path(devname, vecs);
1027 if (!strncmp(uev->action, "remove", 6)) {
1028 r = uev_remove_path(devname, vecs);
1038 ueventloop (void * ap)
1040 if (uevent_listen(&uev_trigger, ap))
1041 fprintf(stderr, "error starting uevent listener");
1047 uxlsnrloop (void * ap)
1052 if (alloc_handlers())
1055 add_handler(LIST+PATHS, cli_list_paths);
1056 add_handler(LIST+MAPS, cli_list_maps);
1057 add_handler(ADD+PATH, cli_add_path);
1058 add_handler(DEL+PATH, cli_del_path);
1059 add_handler(ADD+MAP, cli_add_map);
1060 add_handler(DEL+MAP, cli_del_map);
1061 add_handler(SWITCH+MAP+GROUP, cli_switch_group);
1062 add_handler(DUMP+PATHVEC, cli_dump_pathvec);
1063 add_handler(RECONFIGURE, cli_reconfigure);
1064 add_handler(SUSPEND+MAP, cli_suspend);
1065 add_handler(RESUME+MAP, cli_resume);
1066 add_handler(REINSTATE+PATH, cli_reinstate);
1067 add_handler(FAIL+PATH, cli_fail);
1069 uxsock_listen(&uxsock_trigger, ap);
1075 exit_daemon (int status)
1078 fprintf(stderr, "bad exit status. see daemon.log\n");
1080 condlog(3, "unlink pidfile");
1081 unlink(DEFAULT_PIDFILE);
1084 pthread_cond_signal(&exit_cond);
1085 unlock(&exit_mutex);
1091 fail_path (struct path * pp)
1096 condlog(2, "checker failed path %s in map %s",
1097 pp->dev_t, pp->mpp->alias);
1099 dm_fail_path(pp->mpp->alias, pp->dev_t);
1100 update_queue_mode_del_path(pp->mpp);
1104 * caller must have locked the path list before calling that function
1107 reinstate_path (struct path * pp)
1112 if (dm_reinstate(pp->mpp->alias, pp->dev_t))
1113 condlog(0, "%s: reinstate failed", pp->dev_t);
1115 condlog(2, "%s: reinstated", pp->dev_t);
1116 update_queue_mode_add_path(pp->mpp);
1121 enable_group(struct path * pp)
1123 struct pathgroup * pgp;
1126 * if path is added through uev_add_path, pgindex can be unset.
1127 * next update_strings() will set it, upon map reload event.
1129 * we can safely return here, because upon map reload, all
1130 * PG will be enabled.
1135 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1137 if (pgp->status == PGSTATE_DISABLED) {
1138 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1139 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1144 mpvec_garbage_collector (struct vectors * vecs)
1146 struct multipath * mpp;
1149 vector_foreach_slot (vecs->mpvec, mpp, i) {
1150 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1151 condlog(2, "%s: remove dead map", mpp->alias);
1152 remove_map(mpp, vecs);
1159 defered_failback_tick (vector mpvec)
1161 struct multipath * mpp;
1164 vector_foreach_slot (mpvec, mpp, i) {
1166 * defered failback getting sooner
1168 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1169 mpp->failback_tick--;
1171 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1172 switch_pathgroup(mpp);
1178 retry_count_tick(vector mpvec)
1180 struct multipath *mpp;
1183 vector_foreach_slot (mpvec, mpp, i) {
1184 if (mpp->retry_tick) {
1185 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1186 if(--mpp->retry_tick == 0) {
1187 dm_queue_if_no_path(mpp->alias, 0);
1188 condlog(2, "%s: Disable queueing", mpp->alias);
1195 checkerloop (void *ap)
1197 struct vectors *vecs;
1201 char checker_msg[MAX_CHECKER_MSG_SIZE];
1203 mlockall(MCL_CURRENT | MCL_FUTURE);
1205 memset(checker_msg, 0, MAX_CHECKER_MSG_SIZE);
1206 vecs = (struct vectors *)ap;
1208 condlog(2, "path checkers start up");
1211 * init the path check interval
1213 vector_foreach_slot (vecs->pathvec, pp, i) {
1214 pp->checkint = conf->checkint;
1218 pthread_cleanup_push(cleanup_lock, vecs->lock);
1222 vector_foreach_slot (vecs->pathvec, pp, i) {
1226 if (pp->tick && --pp->tick)
1227 continue; /* don't check this path yet */
1230 * provision a next check soonest,
1231 * in case we exit abnormaly from here
1233 pp->tick = conf->checkint;
1236 pathinfo(pp, conf->hwtable, DI_SYSFS);
1241 condlog(0, "%s: checkfn is void", pp->dev);
1244 newstate = pp->checkfn(pp->fd, checker_msg,
1245 &pp->checker_context);
1247 if (newstate != pp->state) {
1248 pp->state = newstate;
1249 LOG_MSG(1, checker_msg);
1252 * upon state change, reset the checkint
1253 * to the shortest delay
1255 pp->checkint = conf->checkint;
1257 if (newstate == PATH_DOWN ||
1258 newstate == PATH_SHAKY) {
1260 * proactively fail path in the DM
1265 * cancel scheduled failback
1267 pp->mpp->failback_tick = 0;
1273 * reinstate this path
1278 * need to switch group ?
1280 update_multipath_strings(pp->mpp,
1284 * schedule defered failback
1286 if (pp->mpp->pgfailback > 0)
1287 pp->mpp->failback_tick =
1288 pp->mpp->pgfailback + 1;
1289 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE &&
1290 need_switch_pathgroup(pp->mpp, 1))
1291 switch_pathgroup(pp->mpp);
1294 * if at least one path is up in a group, and
1295 * the group is disabled, re-enable it
1297 if (newstate == PATH_UP)
1300 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1301 LOG_MSG(4, checker_msg);
1303 * double the next check delay.
1304 * max at conf->max_checkint
1306 if (pp->checkint < (conf->max_checkint / 2))
1307 pp->checkint = 2 * pp->checkint;
1309 pp->checkint = conf->max_checkint;
1311 pp->tick = pp->checkint;
1312 condlog(4, "%s: delay next check %is",
1313 pp->dev_t, pp->tick);
1316 pp->state = newstate;
1319 * path prio refreshing
1321 condlog(4, "path prio refresh");
1322 pathinfo(pp, conf->hwtable, DI_PRIO);
1324 if (need_switch_pathgroup(pp->mpp, 0)) {
1325 if (pp->mpp->pgfailback > 0)
1326 pp->mpp->failback_tick =
1327 pp->mpp->pgfailback + 1;
1328 else if (pp->mpp->pgfailback ==
1329 -FAILBACK_IMMEDIATE)
1330 switch_pathgroup(pp->mpp);
1333 defered_failback_tick(vecs->mpvec);
1334 retry_count_tick(vecs->mpvec);
1339 condlog(4, "map garbage collection");
1340 mpvec_garbage_collector(vecs);
1344 lock_cleanup_pop(vecs->lock);
1350 static struct vectors *
1353 struct vectors * vecs;
1355 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1361 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1366 vecs->pathvec = vector_alloc();
1371 vecs->mpvec = vector_alloc();
1376 pthread_mutex_init(vecs->lock, NULL);
1381 vector_free(vecs->pathvec);
1386 condlog(0, "failed to init paths");
1391 signal_set(int signo, void (*func) (int))
1394 struct sigaction sig;
1395 struct sigaction osig;
1397 sig.sa_handler = func;
1398 sigemptyset(&sig.sa_mask);
1401 r = sigaction(signo, &sig, &osig);
1406 return (osig.sa_handler);
1412 condlog(3, "SIGHUP received");
1415 dbg_free_final(NULL);
1428 signal_set(SIGHUP, sighup);
1429 signal_set(SIGINT, sigend);
1430 signal_set(SIGTERM, sigend);
1431 signal_set(SIGKILL, sigend);
1438 static struct sched_param sched_param = {
1442 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1445 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1450 set_oom_adj (int val)
1454 fp = fopen("/proc/self/oom_adj", "w");
1459 fprintf(fp, "%i", val);
1464 child (void * param)
1466 pthread_t check_thr, uevent_thr, uxlsnr_thr;
1467 pthread_attr_t attr;
1468 struct vectors * vecs;
1470 mlockall(MCL_CURRENT | MCL_FUTURE);
1475 condlog(2, "--------start up--------");
1476 condlog(2, "read " DEFAULT_CONFIGFILE);
1478 if (load_config(DEFAULT_CONFIGFILE))
1481 setlogmask(LOG_UPTO(conf->verbosity + 3));
1484 * fill the voids left in the config file
1486 if (!conf->checkint) {
1487 conf->checkint = CHECKINT;
1488 conf->max_checkint = MAX_CHECKINT;
1491 if (pidfile_create(DEFAULT_PIDFILE, getpid())) {
1500 vecs = init_paths();
1505 if (sysfs_get_mnt_path(sysfs_path, FILE_NAME_SIZE)) {
1506 condlog(0, "can not find sysfs mount point");
1511 * fetch paths and multipaths lists
1512 * no paths and/or no multipaths are valid scenarii
1513 * vectors maintenance will be driven by events
1515 path_discovery(vecs->pathvec, conf, DI_SYSFS | DI_WWID | DI_CHECKER);
1516 map_discovery(vecs);
1521 pthread_attr_init(&attr);
1522 pthread_attr_setstacksize(&attr, 64 * 1024);
1523 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
1525 pthread_create(&check_thr, &attr, checkerloop, vecs);
1526 pthread_create(&uevent_thr, &attr, ueventloop, vecs);
1527 pthread_create(&uxlsnr_thr, &attr, uxlsnrloop, vecs);
1529 pthread_cond_wait(&exit_cond, &exit_mutex);
1536 free_pathvec(vecs->pathvec, FREE_PATHS);
1538 pthread_cancel(check_thr);
1539 pthread_cancel(uevent_thr);
1540 pthread_cancel(uxlsnr_thr);
1544 free_handlers(handlers);
1549 pthread_mutex_destroy(vecs->lock);
1557 condlog(2, "--------shut down-------");
1563 dbg_free_final(NULL);
1570 main (int argc, char *argv[])
1572 extern char *optarg;
1579 if (getuid() != 0) {
1580 fprintf(stderr, "need to be root\n");
1584 /* make sure we don't lock any path */
1586 umask(umask(077) | 022);
1588 conf = alloc_config();
1593 while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1597 //debug=1; /* ### comment me out ### */
1600 if (sizeof(optarg) > sizeof(char *) ||
1601 !isdigit(optarg[0]))
1604 conf->verbosity = atoi(optarg);
1627 return (child(NULL));