3 #include <libdevmapper.h>
10 #include <sysfs/libsysfs.h>
11 #include <sysfs/dlist.h>
17 #include <path_state.h>
28 #include <blacklist.h>
33 #include <devmapper.h>
35 #include <discovery.h>
39 #include <switchgroup.h>
40 #include <path_state.h>
48 #include "cli_handlers.h"
50 #define FILE_NAME_SIZE 256
53 #define LOG_MSG(a,b) \
55 condlog(a, "%s: %s", pp->dev_t, b); \
56 memset(b, 0, MAX_CHECKER_MSG_SIZE); \
61 fprintf(stderr, "%s:%s(%i) lock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
64 fprintf(stderr, "%s:%s(%i) unlock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
65 pthread_mutex_unlock(a)
66 #define lock_cleanup_pop(a) \
67 fprintf(stderr, "%s:%s(%i) unlock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
68 pthread_cleanup_pop(1);
70 #define lock(a) pthread_mutex_lock(a)
71 #define unlock(a) pthread_mutex_unlock(a)
72 #define lock_cleanup_pop(a) pthread_cleanup_pop(1);
75 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
76 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
85 char mapname[WWID_SIZE];
89 static struct event_thread *
93 struct event_thread * wp;
95 wp = (struct event_thread *)MALLOC(sizeof(struct event_thread));
101 cleanup_lock (void * data)
103 pthread_mutex_unlock((pthread_mutex_t *)data);
107 adopt_paths (struct vectors * vecs, struct multipath * mpp)
115 vector_foreach_slot (vecs->pathvec, pp, i) {
116 if (!strncmp(mpp->wwid, pp->wwid, WWID_SIZE)) {
117 condlog(4, "%s ownership set", pp->dev_t);
124 orphan_path (struct path * pp)
128 pp->dmstate = PSTATE_UNDEF;
129 pp->checker_context = NULL;
140 orphan_paths (struct vectors * vecs, struct multipath * mpp)
145 vector_foreach_slot (vecs->pathvec, pp, i) {
146 if (pp->mpp == mpp) {
147 condlog(4, "%s is orphaned", pp->dev_t);
154 update_multipath_table (struct multipath *mpp, vector pathvec)
159 if (dm_get_map(mpp->alias, &mpp->size, mpp->params))
162 if (disassemble_map(pathvec, mpp->params, mpp))
169 update_multipath_status (struct multipath *mpp)
174 if(dm_get_status(mpp->alias, mpp->status))
177 if (disassemble_status(mpp->status, mpp))
184 update_multipath_strings (struct multipath *mpp, vector pathvec)
188 mpp->selector = NULL;
193 mpp->features = NULL;
196 if (mpp->hwhandler) {
197 FREE(mpp->hwhandler);
198 mpp->hwhandler = NULL;
201 free_pgvec(mpp->pg, KEEP_PATHS);
204 if (update_multipath_table(mpp, pathvec))
207 if (update_multipath_status(mpp))
214 set_multipath_wwid (struct multipath * mpp)
219 dm_get_uuid(mpp->alias, mpp->wwid);
223 pathcount (struct multipath *mpp, int state)
225 struct pathgroup *pgp;
230 vector_foreach_slot (mpp->pg, pgp, i)
231 vector_foreach_slot (pgp->paths, pp, j)
232 if (pp->state == state)
238 * mpp->no_path_retry:
239 * -2 (QUEUE) : queue_if_no_path enabled, never turned off
240 * -1 (FAIL) : fail_if_no_path
241 * 0 (UNDEF) : nothing
242 * >0 : queue_if_no_path enabled, turned off after polling n times
245 update_queue_mode_del_path(struct multipath *mpp)
247 if (--mpp->nr_active == 0 && mpp->no_path_retry > 0) {
250 * meaning of +1: retry_tick may be decremented in
251 * checkerloop before starting retry.
253 mpp->retry_tick = mpp->no_path_retry * conf->checkint + 1;
254 condlog(1, "%s: Entering recovery mode: max_retries=%d",
255 mpp->alias, mpp->no_path_retry);
257 condlog(2, "%s: remaining active paths: %d", mpp->alias, mpp->nr_active);
261 update_queue_mode_add_path(struct multipath *mpp)
263 if (mpp->nr_active++ == 0 && mpp->no_path_retry > 0) {
264 /* come back to normal mode from retry mode */
266 dm_queue_if_no_path(mpp->alias, 1);
267 condlog(2, "%s: queue_if_no_path enabled", mpp->alias);
268 condlog(1, "%s: Recovered to normal mode", mpp->alias);
270 condlog(2, "%s: remaining active paths: %d", mpp->alias, mpp->nr_active);
274 set_no_path_retry(struct multipath *mpp)
277 mpp->nr_active = pathcount(mpp, PATH_UP);
278 select_no_path_retry(mpp);
280 switch (mpp->no_path_retry) {
281 case NO_PATH_RETRY_UNDEF:
283 case NO_PATH_RETRY_FAIL:
284 dm_queue_if_no_path(mpp->alias, 0);
286 case NO_PATH_RETRY_QUEUE:
287 dm_queue_if_no_path(mpp->alias, 1);
290 dm_queue_if_no_path(mpp->alias, 1);
291 if (mpp->nr_active == 0) {
292 /* Enter retry mode */
293 mpp->retry_tick = mpp->no_path_retry * conf->checkint;
294 condlog(1, "%s: Entering recovery mode: max_retries=%d",
295 mpp->alias, mpp->no_path_retry);
301 static struct hwentry *
302 extract_hwe_from_path(struct multipath * mpp)
305 struct pathgroup * pgp;
307 pgp = VECTOR_SLOT(mpp->pg, 0);
308 pp = VECTOR_SLOT(pgp->paths, 0);
314 setup_multipath (struct vectors * vecs, struct multipath * mpp)
318 set_multipath_wwid(mpp);
319 mpp->mpe = find_mpe(mpp->wwid);
320 condlog(4, "discovered map %s", mpp->alias);
322 if (update_multipath_strings(mpp, vecs->pathvec))
325 adopt_paths(vecs, mpp);
326 select_pgfailback(mpp);
327 mpp->hwe = extract_hwe_from_path(mpp);
328 set_no_path_retry(mpp);
333 * purge the multipath vector
335 if ((i = find_slot(vecs->mpvec, (void *)mpp)) != -1)
336 vector_del_slot(vecs->mpvec, i);
338 free_multipath(mpp, KEEP_PATHS);
339 condlog(0, "failed to setup multipath");
344 need_switch_pathgroup (struct multipath * mpp, int refresh)
346 struct pathgroup * pgp;
350 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
354 * Refresh path priority values
357 vector_foreach_slot (mpp->pg, pgp, i)
358 vector_foreach_slot (pgp->paths, pp, j)
359 pathinfo(pp, conf->hwtable, DI_PRIO);
361 select_path_group(mpp); /* sets mpp->nextpg */
362 pgp = VECTOR_SLOT(mpp->pg, mpp->nextpg - 1);
364 if (pgp && pgp->status != PGSTATE_ACTIVE)
371 switch_pathgroup (struct multipath * mpp)
373 struct pathgroup * pgp;
375 pgp = VECTOR_SLOT(mpp->pg, mpp->nextpg - 1);
377 if (pgp && pgp->status != PGSTATE_ACTIVE) {
378 dm_switchgroup(mpp->alias, mpp->nextpg);
379 condlog(2, "%s: switch to path group #%i",
380 mpp->alias, mpp->nextpg);
385 update_multipath (struct vectors *vecs, char *mapname)
387 struct multipath *mpp;
388 struct pathgroup *pgp;
393 mpp = find_mp(vecs->mpvec, mapname);
398 free_pgvec(mpp->pg, KEEP_PATHS);
401 if (setup_multipath(vecs, mpp))
402 goto out; /* mpp freed in setup_multipath */
405 * compare checkers states with DM states
407 vector_foreach_slot (mpp->pg, pgp, i) {
408 vector_foreach_slot (pgp->paths, pp, j) {
409 if (pp->dmstate != PSTATE_FAILED)
412 if (pp->state != PATH_DOWN) {
413 condlog(2, "%s: mark as failed", pp->dev_t);
414 pp->state = PATH_DOWN;
415 update_queue_mode_del_path(mpp);
419 * schedule the next check earlier
421 if (pp->tick > conf->checkint)
422 pp->tick = conf->checkint;
429 condlog(0, "failed to update multipath");
435 free_waiter (void * data)
437 struct event_thread * wp = (struct event_thread *)data;
440 dm_task_destroy(wp->dmt);
444 static sigset_t unblock_sighup(void)
449 sigaddset(&set, SIGHUP);
450 pthread_sigmask(SIG_UNBLOCK, &set, &old);
455 * returns the reschedule delay
456 * negative means *stop*
459 waiteventloop (struct event_thread * waiter)
465 if (!waiter->event_nr)
466 waiter->event_nr = dm_geteventnr(waiter->mapname);
468 if (!(waiter->dmt = dm_task_create(DM_DEVICE_WAITEVENT)))
471 if (!dm_task_set_name(waiter->dmt, waiter->mapname))
474 if (waiter->event_nr && !dm_task_set_event_nr(waiter->dmt,
478 dm_task_no_open_count(waiter->dmt);
480 set = unblock_sighup();
481 dm_task_run(waiter->dmt);
482 pthread_sigmask(SIG_SETMASK, &set, NULL);
483 pthread_testcancel();
484 dm_task_destroy(waiter->dmt);
493 condlog(3, "%s: devmap event #%i",
494 waiter->mapname, waiter->event_nr);
499 * 1) a table reload, which means our mpp structure is
500 * obsolete : refresh it through update_multipath()
501 * 2) a path failed by DM : mark as such through
503 * 3) map has gone away : stop the thread.
504 * 4) a path reinstate : nothing to do
505 * 5) a switch group : nothing to do
507 pthread_cleanup_push(cleanup_lock, waiter->vecs->lock);
508 lock(waiter->vecs->lock);
509 r = update_multipath(waiter->vecs, waiter->mapname);
510 lock_cleanup_pop(waiter->vecs->lock);
513 return -1; /* stop the thread */
515 event_nr = dm_geteventnr(waiter->mapname);
517 if (waiter->event_nr == event_nr)
518 return 1; /* upon problem reschedule 1s later */
520 waiter->event_nr = event_nr;
522 return -1; /* never reach there */
526 waitevent (void * et)
529 struct event_thread *waiter;
531 mlockall(MCL_CURRENT | MCL_FUTURE);
533 waiter = (struct event_thread *)et;
534 pthread_cleanup_push(free_waiter, et);
537 r = waiteventloop(waiter);
542 pthread_testcancel();
544 pthread_testcancel();
547 pthread_cleanup_pop(1);
552 stop_waiter_thread (struct multipath * mpp, struct vectors * vecs)
554 struct event_thread * wp = (struct event_thread *)mpp->waiter;
555 pthread_t thread = wp->thread;
561 condlog(2, "%s: stop event checker thread", wp->mapname);
563 if ((r = pthread_cancel(thread)))
566 condlog(2, "%s: couldn't stop event checker thread gracefully, kill",
568 pthread_kill(thread, SIGHUP);
573 start_waiter_thread (struct multipath * mpp, struct vectors * vecs)
576 struct event_thread * wp;
581 if (pthread_attr_init(&attr))
584 pthread_attr_setstacksize(&attr, 32 * 1024);
585 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
592 mpp->waiter = (void *)wp;
593 strncpy(wp->mapname, mpp->alias, WWID_SIZE);
596 if (pthread_create(&wp->thread, &attr, waitevent, wp)) {
597 condlog(0, "%s: cannot create event checker", wp->mapname);
600 condlog(2, "%s: event checker started", wp->mapname);
607 condlog(0, "failed to start waiter thread");
612 remove_map (struct multipath * mpp, struct vectors * vecs)
617 * stop the DM event waiter thread
619 if (stop_waiter_thread(mpp, vecs)) {
620 condlog(0, "%s: error canceling waiter thread", mpp->alias);
624 free_waiter(mpp->waiter);
628 * clear references to this map
630 orphan_paths(vecs, mpp);
633 * purge the multipath vector
635 i = find_slot(vecs->mpvec, (void *)mpp);
636 vector_del_slot(vecs->mpvec, i);
641 free_multipath(mpp, KEEP_PATHS);
646 remove_maps (struct vectors * vecs)
649 struct multipath * mpp;
651 vector_foreach_slot (vecs->mpvec, mpp, i) {
652 remove_map(mpp, vecs);
656 vector_free(vecs->mpvec);
661 uev_add_map (char * devname, struct vectors * vecs)
664 char dev_t[BLK_DEV_SIZE];
666 struct multipath * mpp;
668 if (sysfs_get_dev(sysfs_path, devname, dev_t, BLK_DEV_SIZE))
671 if (sscanf(dev_t, "%d:%d", &major, &minor) != 2)
674 alias = dm_mapname(major, minor);
679 if (!dm_type(alias, DEFAULT_TARGET)) {
680 condlog(4, "%s: not a multipath map", alias);
685 mpp = find_mp(vecs->mpvec, alias);
689 * this should not happen,
690 * we missed a remove map event (not sent ?)
692 condlog(2, "%s: already registered", alias);
693 remove_map(mpp, vecs);
697 * now we can allocate
699 mpp = alloc_multipath();
707 if (setup_multipath(vecs, mpp))
708 return 1; /* mpp freed in setup_multipath */
710 if (!vector_alloc_slot(vecs->mpvec))
713 vector_set_slot(vecs->mpvec, mpp);
714 adopt_paths(vecs, mpp);
716 if (start_waiter_thread(mpp, vecs))
721 condlog(2, "%s: add devmap failed", mpp->alias);
723 * purge the multipath vector
725 if ((i = find_slot(vecs->mpvec, (void *)mpp)) != -1)
726 vector_del_slot(vecs->mpvec, i);
728 free_multipath(mpp, KEEP_PATHS);
733 uev_remove_map (char * devname, struct vectors * vecs)
736 struct multipath * mpp;
738 if (sscanf(devname, "dm-%d", &minor) != 1)
741 mpp = find_mp_by_minor(vecs->mpvec, minor);
744 condlog(3, "%s: devmap not registered, can't remove",
749 condlog(2, "remove %s devmap", mpp->alias);
750 remove_map(mpp, vecs);
756 uev_add_path (char * devname, struct vectors * vecs)
760 pp = find_path_by_dev(vecs->pathvec, devname);
763 condlog(3, "%s: already in pathvec");
766 pp = store_pathinfo(vecs->pathvec, conf->hwtable,
767 devname, DI_SYSFS | DI_WWID);
770 condlog(0, "%s: failed to store path info", devname);
774 condlog(2, "%s: path checker registered", devname);
775 pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
778 condlog(4, "%s: ownership set to %s",
779 pp->dev_t, pp->mpp->alias);
781 condlog(4, "%s: orphaned", pp->dev_t);
789 uev_remove_path (char * devname, struct vectors * vecs)
794 pp = find_path_by_dev(vecs->pathvec, devname);
797 condlog(3, "%s: not in pathvec");
801 if (pp->mpp && pp->state == PATH_UP)
802 update_queue_mode_del_path(pp->mpp);
804 condlog(2, "remove %s path checker", devname);
805 i = find_slot(vecs->pathvec, (void *)pp);
806 vector_del_slot(vecs->pathvec, i);
813 show_paths (char ** r, int * len, struct vectors * vecs)
819 struct path_layout pl;
821 get_path_layout(&pl, vecs->pathvec);
822 reply = MALLOC(MAX_REPLY_LEN);
828 c += snprint_path_header(c, reply + MAX_REPLY_LEN - c,
829 PRINT_PATH_CHECKER, &pl);
831 vector_foreach_slot(vecs->pathvec, pp, i)
832 c += snprint_path(c, reply + MAX_REPLY_LEN - c,
833 PRINT_PATH_CHECKER, pp, &pl);
836 *len = (int)(c - reply + 1);
841 show_maps (char ** r, int *len, struct vectors * vecs)
844 struct multipath * mpp;
847 struct map_layout ml;
849 get_map_layout(&ml, vecs->mpvec);
850 reply = MALLOC(MAX_REPLY_LEN);
856 c += snprint_map_header(c, reply + MAX_REPLY_LEN - c,
857 PRINT_MAP_FAILBACK, &ml);
859 vector_foreach_slot(vecs->mpvec, mpp, i)
860 c += snprint_map(c, reply + MAX_REPLY_LEN - c,
861 PRINT_MAP_FAILBACK, mpp, &ml);
864 *len = (int)(c - reply + 1);
869 dump_pathvec (char ** r, int * len, struct vectors * vecs)
876 *len = VECTOR_SIZE(vecs->pathvec) * sizeof(struct path);
877 reply = (char *)MALLOC(*len);
885 vector_foreach_slot (vecs->pathvec, pp, i) {
886 memcpy((void *)p, pp, sizeof(struct path));
887 p += sizeof(struct path);
890 /* return negative to hint caller not to add "ok" to the dump */
895 map_discovery (struct vectors * vecs)
898 struct multipath * mpp;
900 if (dm_get_maps(vecs->mpvec, "multipath"))
903 vector_foreach_slot (vecs->mpvec, mpp, i) {
904 if (setup_multipath(vecs, mpp))
906 mpp->minor = dm_get_minor(mpp->alias);
907 start_waiter_thread(mpp, vecs);
914 reconfigure (struct vectors * vecs)
916 struct config * old = conf;
917 struct multipath * mpp;
923 if (load_config(DEFAULT_CONFIGFILE)) {
925 condlog(2, "reconfigure failed, continue with old config");
928 conf->verbosity = old->verbosity;
931 vector_foreach_slot (vecs->mpvec, mpp, i) {
932 mpp->mpe = find_mpe(mpp->wwid);
933 mpp->hwe = extract_hwe_from_path(mpp);
934 adopt_paths(vecs, mpp);
935 set_no_path_retry(mpp);
937 vector_foreach_slot (vecs->pathvec, pp, i) {
942 condlog(2, "reconfigured");
947 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
949 struct vectors * vecs;
954 vecs = (struct vectors *)trigger_data;
956 pthread_cleanup_push(cleanup_lock, vecs->lock);
959 r = parse_cmd(str, reply, len, vecs);
962 *reply = STRDUP("fail\n");
963 *len = strlen(*reply) + 1;
966 else if (!r && *len == 0) {
967 *reply = STRDUP("ok\n");
968 *len = strlen(*reply) + 1;
971 /* else if (r < 0) leave *reply alone */
973 lock_cleanup_pop(vecs->lock);
978 uev_discard(char * devpath)
983 * keep only block devices, discard partitions
985 if (sscanf(devpath, "/block/%10s", a) != 1 ||
986 sscanf(devpath, "/block/%10[^/]/%10s", a, b) == 2) {
987 condlog(4, "discard event on %s", devpath);
994 uev_trigger (struct uevent * uev, void * trigger_data)
998 struct vectors * vecs;
1000 vecs = (struct vectors *)trigger_data;
1002 if (uev_discard(uev->devpath))
1005 basename(uev->devpath, devname);
1009 * device map add/remove event
1011 if (!strncmp(devname, "dm-", 3)) {
1012 if (!strncmp(uev->action, "add", 3)) {
1013 r = uev_add_map(devname, vecs);
1017 if (!strncmp(uev->action, "remove", 6)) {
1018 r = uev_remove_map(devname, vecs);
1026 * path add/remove event
1028 if (blacklist(conf->blist, devname))
1031 if (!strncmp(uev->action, "add", 3)) {
1032 r = uev_add_path(devname, vecs);
1035 if (!strncmp(uev->action, "remove", 6)) {
1036 r = uev_remove_path(devname, vecs);
1046 ueventloop (void * ap)
1048 uevent_listen(&uev_trigger, ap);
1054 uxlsnrloop (void * ap)
1059 if (alloc_handlers())
1062 add_handler(LIST+PATHS, cli_list_paths);
1063 add_handler(LIST+MAPS, cli_list_maps);
1064 add_handler(ADD+PATH, cli_add_path);
1065 add_handler(DEL+PATH, cli_del_path);
1066 add_handler(ADD+MAP, cli_add_map);
1067 add_handler(DEL+MAP, cli_del_map);
1068 add_handler(SWITCH+MAP+GROUP, cli_switch_group);
1069 add_handler(DUMP+PATHVEC, cli_dump_pathvec);
1070 add_handler(RECONFIGURE, cli_reconfigure);
1072 uxsock_listen(&uxsock_trigger, ap);
1078 exit_daemon (int status)
1081 fprintf(stderr, "bad exit status. see daemon.log\n");
1083 condlog(3, "unlink pidfile");
1084 unlink(DEFAULT_PIDFILE);
1087 pthread_cond_signal(&exit_cond);
1088 unlock(&exit_mutex);
1094 fail_path (struct path * pp)
1099 condlog(2, "checker failed path %s in map %s",
1100 pp->dev_t, pp->mpp->alias);
1102 dm_fail_path(pp->mpp->alias, pp->dev_t);
1103 update_queue_mode_del_path(pp->mpp);
1107 * caller must have locked the path list before calling that function
1110 reinstate_path (struct path * pp)
1115 if (dm_reinstate(pp->mpp->alias, pp->dev_t))
1116 condlog(0, "%s: reinstate failed", pp->dev_t);
1118 condlog(2, "%s: reinstated", pp->dev_t);
1119 update_queue_mode_add_path(pp->mpp);
1124 enable_group(struct path * pp)
1126 struct pathgroup * pgp;
1129 * if path is added through uev_add_path, pgindex can be unset.
1130 * next update_strings() will set it, upon map reload event.
1132 * we can safely return here, because upon map reload, all
1133 * PG will be enabled.
1138 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1140 if (pgp->status == PGSTATE_DISABLED) {
1141 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1142 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1147 mpvec_garbage_collector (struct vectors * vecs)
1149 struct multipath * mpp;
1152 vector_foreach_slot (vecs->mpvec, mpp, i) {
1153 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1154 condlog(2, "%s: remove dead map", mpp->alias);
1155 remove_map(mpp, vecs);
1162 defered_failback_tick (vector mpvec)
1164 struct multipath * mpp;
1167 vector_foreach_slot (mpvec, mpp, i) {
1169 * defered failback getting sooner
1171 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1172 mpp->failback_tick--;
1174 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1175 switch_pathgroup(mpp);
1181 retry_count_tick(vector mpvec)
1183 struct multipath *mpp;
1186 vector_foreach_slot (mpvec, mpp, i) {
1187 if (mpp->retry_tick) {
1188 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1189 if(--mpp->retry_tick == 0) {
1190 dm_queue_if_no_path(mpp->alias, 0);
1191 condlog(2, "%s: Disable queueing", mpp->alias);
1198 checkerloop (void *ap)
1200 struct vectors *vecs;
1204 char checker_msg[MAX_CHECKER_MSG_SIZE];
1206 mlockall(MCL_CURRENT | MCL_FUTURE);
1208 memset(checker_msg, 0, MAX_CHECKER_MSG_SIZE);
1209 vecs = (struct vectors *)ap;
1211 condlog(2, "path checkers start up");
1214 * init the path check interval
1216 vector_foreach_slot (vecs->pathvec, pp, i) {
1217 pp->checkint = conf->checkint;
1221 pthread_cleanup_push(cleanup_lock, vecs->lock);
1225 vector_foreach_slot (vecs->pathvec, pp, i) {
1229 if (pp->tick && --pp->tick)
1230 continue; /* don't check this path yet */
1233 * provision a next check soonest,
1234 * in case we exit abnormaly from here
1236 pp->tick = conf->checkint;
1239 pathinfo(pp, conf->hwtable, DI_SYSFS);
1244 condlog(0, "%s: checkfn is void", pp->dev);
1247 newstate = pp->checkfn(pp->fd, checker_msg,
1248 &pp->checker_context);
1250 if (newstate != pp->state) {
1251 pp->state = newstate;
1252 LOG_MSG(1, checker_msg);
1255 * upon state change, reset the checkint
1256 * to the shortest delay
1258 pp->checkint = conf->checkint;
1260 if (newstate == PATH_DOWN ||
1261 newstate == PATH_SHAKY) {
1263 * proactively fail path in the DM
1268 * cancel scheduled failback
1270 pp->mpp->failback_tick = 0;
1276 * reinstate this path
1281 * need to switch group ?
1283 update_multipath_strings(pp->mpp,
1287 * schedule defered failback
1289 if (pp->mpp->pgfailback > 0)
1290 pp->mpp->failback_tick =
1291 pp->mpp->pgfailback + 1;
1292 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE &&
1293 need_switch_pathgroup(pp->mpp, 1))
1294 switch_pathgroup(pp->mpp);
1297 * if at least one path is up in a group, and
1298 * the group is disabled, re-enable it
1300 if (newstate == PATH_UP)
1303 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1304 LOG_MSG(4, checker_msg);
1306 * double the next check delay.
1307 * max at conf->max_checkint
1309 if (pp->checkint < (conf->max_checkint / 2))
1310 pp->checkint = 2 * pp->checkint;
1312 pp->checkint = conf->max_checkint;
1314 pp->tick = pp->checkint;
1315 condlog(4, "%s: delay next check %is",
1316 pp->dev_t, pp->tick);
1319 pp->state = newstate;
1322 * path prio refreshing
1324 condlog(4, "path prio refresh");
1325 pathinfo(pp, conf->hwtable, DI_PRIO);
1327 if (need_switch_pathgroup(pp->mpp, 0)) {
1328 if (pp->mpp->pgfailback > 0)
1329 pp->mpp->failback_tick =
1330 pp->mpp->pgfailback + 1;
1331 else if (pp->mpp->pgfailback ==
1332 -FAILBACK_IMMEDIATE)
1333 switch_pathgroup(pp->mpp);
1336 defered_failback_tick(vecs->mpvec);
1337 retry_count_tick(vecs->mpvec);
1342 condlog(4, "map garbage collection");
1343 mpvec_garbage_collector(vecs);
1347 lock_cleanup_pop(vecs->lock);
1353 static struct vectors *
1356 struct vectors * vecs;
1358 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1364 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1369 vecs->pathvec = vector_alloc();
1374 vecs->mpvec = vector_alloc();
1379 pthread_mutex_init(vecs->lock, NULL);
1384 vector_free(vecs->pathvec);
1389 condlog(0, "failed to init paths");
1394 signal_set(int signo, void (*func) (int))
1397 struct sigaction sig;
1398 struct sigaction osig;
1400 sig.sa_handler = func;
1401 sigemptyset(&sig.sa_mask);
1404 r = sigaction(signo, &sig, &osig);
1409 return (osig.sa_handler);
1415 condlog(3, "SIGHUP received");
1418 dbg_free_final(NULL);
1431 signal_set(SIGHUP, sighup);
1432 signal_set(SIGINT, sigend);
1433 signal_set(SIGTERM, sigend);
1434 signal_set(SIGKILL, sigend);
1441 static struct sched_param sched_param = {
1445 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1448 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1453 set_oom_adj (int val)
1457 fp = fopen("/proc/self/oom_adj", "w");
1462 fprintf(fp, "%i", val);
1467 child (void * param)
1469 pthread_t check_thr, uevent_thr, uxlsnr_thr;
1470 pthread_attr_t attr;
1471 struct vectors * vecs;
1473 mlockall(MCL_CURRENT | MCL_FUTURE);
1478 condlog(2, "--------start up--------");
1479 condlog(2, "read " DEFAULT_CONFIGFILE);
1481 if (load_config(DEFAULT_CONFIGFILE))
1484 setlogmask(LOG_UPTO(conf->verbosity + 3));
1487 * fill the voids left in the config file
1489 if (!conf->checkint) {
1490 conf->checkint = CHECKINT;
1491 conf->max_checkint = MAX_CHECKINT;
1494 if (pidfile_create(DEFAULT_PIDFILE, getpid())) {
1503 vecs = init_paths();
1508 if (sysfs_get_mnt_path(sysfs_path, FILE_NAME_SIZE)) {
1509 condlog(0, "can not find sysfs mount point");
1514 * fetch paths and multipaths lists
1515 * no paths and/or no multipaths are valid scenarii
1516 * vectors maintenance will be driven by events
1518 path_discovery(vecs->pathvec, conf, DI_SYSFS | DI_WWID | DI_CHECKER);
1519 map_discovery(vecs);
1524 pthread_attr_init(&attr);
1525 pthread_attr_setstacksize(&attr, 64 * 1024);
1526 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
1528 pthread_create(&check_thr, &attr, checkerloop, vecs);
1529 pthread_create(&uevent_thr, &attr, ueventloop, vecs);
1530 pthread_create(&uxlsnr_thr, &attr, uxlsnrloop, vecs);
1532 pthread_cond_wait(&exit_cond, &exit_mutex);
1539 free_pathvec(vecs->pathvec, FREE_PATHS);
1541 pthread_cancel(check_thr);
1542 pthread_cancel(uevent_thr);
1543 pthread_cancel(uxlsnr_thr);
1546 free_handlers(handlers);
1550 pthread_mutex_destroy(vecs->lock);
1555 condlog(2, "--------shut down-------");
1561 dbg_free_final(NULL);
1568 main (int argc, char *argv[])
1570 extern char *optarg;
1577 if (getuid() != 0) {
1578 fprintf(stderr, "need to be root\n");
1582 /* make sure we don't lock any path */
1584 umask(umask(077) | 022);
1586 conf = alloc_config();
1591 while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1595 //debug=1; /* ### comment me out ### */
1598 if (sizeof(optarg) > sizeof(char *) ||
1599 !isdigit(optarg[0]))
1602 conf->verbosity = atoi(optarg);
1625 return (child(NULL));