2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
18 #include <linux/oom.h>
21 #include <systemd/sd-daemon.h>
23 #include <semaphore.h>
24 #include <mpath_cmd.h>
25 #include <mpath_persist.h>
34 static int use_watchdog;
48 #include <blacklist.h>
49 #include <structs_vec.h>
51 #include <devmapper.h>
54 #include <discovery.h>
58 #include <switchgroup.h>
60 #include <configure.h>
63 #include <pgpolicies.h>
66 #include "prioritizers/alua_rtpg.h"
73 #include "cli_handlers.h"
78 #define FILE_NAME_SIZE 256
81 #define LOG_MSG(a, b) \
84 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
86 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
89 struct mpath_event_param
92 struct multipath *mpp;
95 unsigned int mpath_mx_alloc_len;
98 enum daemon_status running_state;
101 static sem_t exit_sem;
103 * global copy of vecs for use in sig handlers
105 struct vectors * gvecs;
110 need_switch_pathgroup (struct multipath * mpp, int refresh)
112 struct pathgroup * pgp;
116 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
120 * Refresh path priority values
123 vector_foreach_slot (mpp->pg, pgp, i)
124 vector_foreach_slot (pgp->paths, pp, j)
125 pathinfo(pp, conf->hwtable, DI_PRIO);
127 mpp->bestpg = select_path_group(mpp);
129 if (mpp->bestpg != mpp->nextpg)
136 switch_pathgroup (struct multipath * mpp)
138 mpp->stat_switchgroup++;
139 dm_switchgroup(mpp->alias, mpp->bestpg);
140 condlog(2, "%s: switch to path group #%i",
141 mpp->alias, mpp->bestpg);
145 coalesce_maps(struct vectors *vecs, vector nmpv)
147 struct multipath * ompp;
148 vector ompv = vecs->mpvec;
151 vector_foreach_slot (ompv, ompp, i) {
152 condlog(3, "%s: coalesce map", ompp->alias);
153 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
155 * remove all current maps not allowed by the
156 * current configuration
158 if (dm_flush_map(ompp->alias)) {
159 condlog(0, "%s: unable to flush devmap",
162 * may be just because the device is open
164 if (setup_multipath(vecs, ompp) != 0) {
168 if (!vector_alloc_slot(nmpv))
171 vector_set_slot(nmpv, ompp);
173 vector_del_slot(ompv, i);
178 condlog(2, "%s devmap removed", ompp->alias);
180 } else if (conf->reassign_maps) {
181 condlog(3, "%s: Reassign existing device-mapper"
182 " devices", ompp->alias);
183 dm_reassign(ompp->alias);
190 sync_map_state(struct multipath *mpp)
192 struct pathgroup *pgp;
199 vector_foreach_slot (mpp->pg, pgp, i){
200 vector_foreach_slot (pgp->paths, pp, j){
201 if (pp->state == PATH_UNCHECKED ||
202 pp->state == PATH_WILD ||
203 pp->state == PATH_DELAYED)
205 if ((pp->dmstate == PSTATE_FAILED ||
206 pp->dmstate == PSTATE_UNDEF) &&
207 (pp->state == PATH_UP || pp->state == PATH_GHOST))
208 dm_reinstate_path(mpp->alias, pp->dev_t);
209 else if ((pp->dmstate == PSTATE_ACTIVE ||
210 pp->dmstate == PSTATE_UNDEF) &&
211 (pp->state == PATH_DOWN ||
212 pp->state == PATH_SHAKY))
213 dm_fail_path(mpp->alias, pp->dev_t);
219 sync_maps_state(vector mpvec)
222 struct multipath *mpp;
224 vector_foreach_slot (mpvec, mpp, i)
229 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
234 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
236 r = dm_flush_map(mpp->alias);
238 * clear references to this map before flushing so we can ignore
239 * the spurious uevent we may generate with the dm_flush_map call below
243 * May not really be an error -- if the map was already flushed
244 * from the device mapper by dmsetup(8) for instance.
247 condlog(0, "%s: can't flush", mpp->alias);
249 condlog(2, "%s: devmap deferred remove", mpp->alias);
250 mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
256 condlog(2, "%s: map flushed", mpp->alias);
259 orphan_paths(vecs->pathvec, mpp);
260 remove_map_and_stop_waiter(mpp, vecs, 1);
266 update_map (struct multipath *mpp, struct vectors *vecs)
269 char params[PARAMS_SIZE] = {0};
272 condlog(4, "%s: updating new map", mpp->alias);
273 if (adopt_paths(vecs->pathvec, mpp, 1)) {
274 condlog(0, "%s: failed to adopt paths for new map update",
279 verify_paths(mpp, vecs);
280 mpp->flush_on_last_del = FLUSH_UNDEF;
281 mpp->action = ACT_RELOAD;
283 if (setup_map(mpp, params, PARAMS_SIZE)) {
284 condlog(0, "%s: failed to setup new map in update", mpp->alias);
288 if (domap(mpp, params) <= 0 && retries-- > 0) {
289 condlog(0, "%s: map_udate sleep", mpp->alias);
296 if (setup_multipath(vecs, mpp))
302 condlog(0, "%s: failed reload in new map update", mpp->alias);
307 uev_add_map (struct uevent * uev, struct vectors * vecs)
310 int major = -1, minor = -1, rc;
312 condlog(3, "%s: add map (uevent)", uev->kernel);
313 alias = uevent_get_dm_name(uev);
315 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
316 major = uevent_get_major(uev);
317 minor = uevent_get_minor(uev);
318 alias = dm_mapname(major, minor);
320 condlog(2, "%s: mapname not found for %d:%d",
321 uev->kernel, major, minor);
325 rc = ev_add_map(uev->kernel, alias, vecs);
331 ev_add_map (char * dev, char * alias, struct vectors * vecs)
334 struct multipath * mpp;
338 map_present = dm_map_present(alias);
340 if (map_present && !dm_is_mpath(alias)) {
341 condlog(4, "%s: not a multipath map", alias);
345 mpp = find_mp_by_alias(vecs->mpvec, alias);
348 if (mpp->wait_for_udev > 1) {
349 if (update_map(mpp, vecs))
350 /* setup multipathd removed the map */
353 if (mpp->wait_for_udev) {
354 mpp->wait_for_udev = 0;
355 if (conf->delayed_reconfig &&
356 !need_to_delay_reconfig(vecs)) {
357 condlog(2, "reconfigure (delayed)");
363 * Not really an error -- we generate our own uevent
364 * if we create a multipath mapped device as a result
367 if (conf->reassign_maps) {
368 condlog(3, "%s: Reassign existing device-mapper devices",
374 condlog(2, "%s: adding map", alias);
377 * now we can register the map
380 if ((mpp = add_map_without_path(vecs, alias))) {
382 condlog(2, "%s: devmap %s registered", alias, dev);
385 condlog(2, "%s: uev_add_map failed", dev);
389 r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
392 r = coalesce_paths(vecs, NULL, refwwid, 0);
397 condlog(2, "%s: devmap %s added", alias, dev);
399 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
401 condlog(0, "%s: uev_add_map %s failed", alias, dev);
408 uev_remove_map (struct uevent * uev, struct vectors * vecs)
412 struct multipath *mpp;
414 condlog(2, "%s: remove map (uevent)", uev->kernel);
415 alias = uevent_get_dm_name(uev);
417 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
420 minor = uevent_get_minor(uev);
421 mpp = find_mp_by_minor(vecs->mpvec, minor);
424 condlog(2, "%s: devmap not registered, can't remove",
428 if (strcmp(mpp->alias, alias)) {
429 condlog(2, "%s: minor number mismatch (map %d, event %d)",
430 mpp->alias, mpp->dmi->minor, minor);
434 orphan_paths(vecs->pathvec, mpp);
435 remove_map_and_stop_waiter(mpp, vecs, 1);
442 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
444 struct multipath * mpp;
446 mpp = find_mp_by_minor(vecs->mpvec, minor);
449 condlog(2, "%s: devmap not registered, can't remove",
453 if (strcmp(mpp->alias, alias)) {
454 condlog(2, "%s: minor number mismatch (map %d, event %d)",
455 mpp->alias, mpp->dmi->minor, minor);
458 return flush_map(mpp, vecs, 0);
462 uev_add_path (struct uevent *uev, struct vectors * vecs)
467 condlog(2, "%s: add path (uevent)", uev->kernel);
468 if (strstr(uev->kernel, "..") != NULL) {
470 * Don't allow relative device names in the pathvec
472 condlog(0, "%s: path name is invalid", uev->kernel);
476 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
480 condlog(0, "%s: spurious uevent, path already in pathvec",
482 if (!pp->mpp && !strlen(pp->wwid)) {
483 condlog(3, "%s: reinitialize path", uev->kernel);
484 udev_device_unref(pp->udev);
485 pp->udev = udev_device_ref(uev->udev);
486 r = pathinfo(pp, conf->hwtable,
487 DI_ALL | DI_BLACKLIST);
488 if (r == PATHINFO_OK)
489 ret = ev_add_path(pp, vecs);
490 else if (r == PATHINFO_SKIPPED) {
491 condlog(3, "%s: remove blacklisted path",
493 i = find_slot(vecs->pathvec, (void *)pp);
495 vector_del_slot(vecs->pathvec, i);
498 condlog(0, "%s: failed to reinitialize path",
507 * get path vital state
509 ret = alloc_path_with_pathinfo(conf->hwtable, uev->udev,
512 if (ret == PATHINFO_SKIPPED)
514 condlog(3, "%s: failed to get path info", uev->kernel);
517 ret = store_path(vecs->pathvec, pp);
519 pp->checkint = conf->checkint;
520 ret = ev_add_path(pp, vecs);
522 condlog(0, "%s: failed to store path info, "
538 ev_add_path (struct path * pp, struct vectors * vecs)
540 struct multipath * mpp;
541 char params[PARAMS_SIZE] = {0};
543 int start_waiter = 0;
547 * need path UID to go any further
549 if (strlen(pp->wwid) == 0) {
550 condlog(0, "%s: failed to get path uid", pp->dev);
551 goto fail; /* leave path added to pathvec */
553 mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
554 if (mpp && mpp->wait_for_udev) {
555 mpp->wait_for_udev = 2;
556 orphan_path(pp, "waiting for create to complete");
563 if (mpp->size != pp->size) {
564 condlog(0, "%s: failed to add new path %s, "
565 "device size mismatch",
566 mpp->alias, pp->dev);
567 int i = find_slot(vecs->pathvec, (void *)pp);
569 vector_del_slot(vecs->pathvec, i);
574 condlog(4,"%s: adopting all paths for path %s",
575 mpp->alias, pp->dev);
576 if (adopt_paths(vecs->pathvec, mpp, 1))
577 goto fail; /* leave path added to pathvec */
579 verify_paths(mpp, vecs);
580 mpp->flush_on_last_del = FLUSH_UNDEF;
581 mpp->action = ACT_RELOAD;
583 if (!should_multipath(pp, vecs->pathvec)) {
584 orphan_path(pp, "only one path");
587 condlog(4,"%s: creating new map", pp->dev);
588 if ((mpp = add_map_with_path(vecs, pp, 1))) {
589 mpp->action = ACT_CREATE;
591 * We don't depend on ACT_CREATE, as domap will
592 * set it to ACT_NOTHING when complete.
597 goto fail; /* leave path added to pathvec */
600 /* persistent reseravtion check*/
601 mpath_pr_event_handle(pp);
604 * push the map to the device-mapper
606 if (setup_map(mpp, params, PARAMS_SIZE)) {
607 condlog(0, "%s: failed to setup map for addition of new "
608 "path %s", mpp->alias, pp->dev);
612 * reload the map for the multipath mapped device
615 ret = domap(mpp, params);
617 if (ret < 0 && retries-- > 0) {
618 condlog(0, "%s: retry domap for addition of new "
619 "path %s", mpp->alias, pp->dev);
623 condlog(0, "%s: failed in domap for addition of new "
624 "path %s", mpp->alias, pp->dev);
626 * deal with asynchronous uevents :((
628 if (mpp->action == ACT_RELOAD && retries-- > 0) {
629 condlog(0, "%s: uev_add_path sleep", mpp->alias);
631 update_mpp_paths(mpp, vecs->pathvec);
634 else if (mpp->action == ACT_RELOAD)
635 condlog(0, "%s: giving up reload", mpp->alias);
642 * update our state from kernel regardless of create or reload
644 if (setup_multipath(vecs, mpp))
645 goto fail; /* if setup_multipath fails, it removes the map */
649 if ((mpp->action == ACT_CREATE ||
650 (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
651 start_waiter_thread(mpp, vecs))
655 condlog(2, "%s [%s]: path added to devmap %s",
656 pp->dev, pp->dev_t, mpp->alias);
663 remove_map(mpp, vecs, 1);
665 orphan_path(pp, "failed to add path");
670 uev_remove_path (struct uevent *uev, struct vectors * vecs)
674 condlog(2, "%s: remove path (uevent)", uev->kernel);
675 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
678 /* Not an error; path might have been purged earlier */
679 condlog(0, "%s: path already removed", uev->kernel);
683 return ev_remove_path(pp, vecs);
687 ev_remove_path (struct path *pp, struct vectors * vecs)
689 struct multipath * mpp;
691 char params[PARAMS_SIZE] = {0};
694 * avoid referring to the map of an orphaned path
696 if ((mpp = pp->mpp)) {
698 * transform the mp->pg vector of vectors of paths
699 * into a mp->params string to feed the device-mapper
701 if (update_mpp_paths(mpp, vecs->pathvec)) {
702 condlog(0, "%s: failed to update paths",
706 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
707 vector_del_slot(mpp->paths, i);
710 * remove the map IFF removing the last path
712 if (VECTOR_SIZE(mpp->paths) == 0) {
713 char alias[WWID_SIZE];
716 * flush_map will fail if the device is open
718 strncpy(alias, mpp->alias, WWID_SIZE);
719 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
720 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
722 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
723 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
724 dm_queue_if_no_path(mpp->alias, 0);
726 if (!flush_map(mpp, vecs, 1)) {
727 condlog(2, "%s: removed map after"
728 " removing all paths",
734 * Not an error, continue
738 if (setup_map(mpp, params, PARAMS_SIZE)) {
739 condlog(0, "%s: failed to setup map for"
740 " removal of path %s", mpp->alias, pp->dev);
744 if (mpp->wait_for_udev) {
745 mpp->wait_for_udev = 2;
752 mpp->action = ACT_RELOAD;
753 if (domap(mpp, params) <= 0) {
754 condlog(0, "%s: failed in domap for "
755 "removal of path %s",
756 mpp->alias, pp->dev);
760 * update our state from kernel
762 if (setup_multipath(vecs, mpp))
766 condlog(2, "%s [%s]: path removed from map %s",
767 pp->dev, pp->dev_t, mpp->alias);
772 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
773 vector_del_slot(vecs->pathvec, i);
780 remove_map_and_stop_waiter(mpp, vecs, 1);
785 uev_update_path (struct uevent *uev, struct vectors * vecs)
790 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
792 condlog(0, "%s: spurious uevent, path not found",
797 if (pp->initialized == INIT_REQUESTED_UDEV)
798 return uev_add_path(uev, vecs);
800 ro = uevent_get_disk_ro(uev);
803 condlog(2, "%s: update path write_protect to '%d' (uevent)",
806 if (pp->mpp->wait_for_udev) {
807 pp->mpp->wait_for_udev = 2;
811 retval = reload_map(vecs, pp->mpp, 0);
813 condlog(2, "%s: map %s reloaded (retval %d)",
814 uev->kernel, pp->mpp->alias, retval);
823 map_discovery (struct vectors * vecs)
825 struct multipath * mpp;
828 if (dm_get_maps(vecs->mpvec))
831 vector_foreach_slot (vecs->mpvec, mpp, i)
832 if (setup_multipath(vecs, mpp))
839 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
841 struct vectors * vecs;
846 vecs = (struct vectors *)trigger_data;
848 pthread_cleanup_push(cleanup_lock, &vecs->lock);
850 pthread_testcancel();
852 r = parse_cmd(str, reply, len, vecs);
855 *reply = STRDUP("fail\n");
856 *len = strlen(*reply) + 1;
859 else if (!r && *len == 0) {
860 *reply = STRDUP("ok\n");
861 *len = strlen(*reply) + 1;
864 /* else if (r < 0) leave *reply alone */
866 lock_cleanup_pop(vecs->lock);
871 uev_discard(char * devpath)
877 * keep only block devices, discard partitions
879 tmp = strstr(devpath, "/block/");
881 condlog(4, "no /block/ in '%s'", devpath);
884 if (sscanf(tmp, "/block/%10s", a) != 1 ||
885 sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
886 condlog(4, "discard event on %s", devpath);
893 uev_trigger (struct uevent * uev, void * trigger_data)
896 struct vectors * vecs;
898 vecs = (struct vectors *)trigger_data;
900 if (uev_discard(uev->devpath))
903 pthread_cleanup_push(cleanup_lock, &vecs->lock);
905 pthread_testcancel();
909 * Add events are ignored here as the tables
910 * are not fully initialised then.
912 if (!strncmp(uev->kernel, "dm-", 3)) {
913 if (!strncmp(uev->action, "change", 6)) {
914 r = uev_add_map(uev, vecs);
917 if (!strncmp(uev->action, "remove", 6)) {
918 r = uev_remove_map(uev, vecs);
925 * path add/remove event
927 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
931 if (!strncmp(uev->action, "add", 3)) {
932 r = uev_add_path(uev, vecs);
935 if (!strncmp(uev->action, "remove", 6)) {
936 r = uev_remove_path(uev, vecs);
939 if (!strncmp(uev->action, "change", 6)) {
940 r = uev_update_path(uev, vecs);
945 lock_cleanup_pop(vecs->lock);
950 ueventloop (void * ap)
952 struct udev *udev = ap;
954 if (uevent_listen(udev))
955 condlog(0, "error starting uevent listener");
963 if (uevent_dispatch(&uev_trigger, ap))
964 condlog(0, "error starting uevent dispatcher");
969 uxlsnrloop (void * ap)
974 set_handler_callback(LIST+PATHS, cli_list_paths);
975 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
976 set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
977 set_handler_callback(LIST+PATH, cli_list_path);
978 set_handler_callback(LIST+MAPS, cli_list_maps);
979 set_handler_callback(LIST+STATUS, cli_list_status);
980 set_handler_callback(LIST+DAEMON, cli_list_daemon);
981 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
982 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
983 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
984 set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
985 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
986 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
987 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
988 set_handler_callback(LIST+CONFIG, cli_list_config);
989 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
990 set_handler_callback(LIST+DEVICES, cli_list_devices);
991 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
992 set_handler_callback(ADD+PATH, cli_add_path);
993 set_handler_callback(DEL+PATH, cli_del_path);
994 set_handler_callback(ADD+MAP, cli_add_map);
995 set_handler_callback(DEL+MAP, cli_del_map);
996 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
997 set_handler_callback(RECONFIGURE, cli_reconfigure);
998 set_handler_callback(SUSPEND+MAP, cli_suspend);
999 set_handler_callback(RESUME+MAP, cli_resume);
1000 set_handler_callback(RESIZE+MAP, cli_resize);
1001 set_handler_callback(RELOAD+MAP, cli_reload);
1002 set_handler_callback(RESET+MAP, cli_reassign);
1003 set_handler_callback(REINSTATE+PATH, cli_reinstate);
1004 set_handler_callback(FAIL+PATH, cli_fail);
1005 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1006 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1007 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1008 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1009 set_handler_callback(QUIT, cli_quit);
1010 set_handler_callback(SHUTDOWN, cli_shutdown);
1011 set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1012 set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1013 set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1014 set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1015 set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1018 uxsock_listen(&uxsock_trigger, ap);
1026 sem_post(&exit_sem);
1032 switch (running_state) {
1037 case DAEMON_CONFIGURE:
1039 case DAEMON_RUNNING:
1041 case DAEMON_SHUTDOWN:
1048 fail_path (struct path * pp, int del_active)
1053 condlog(2, "checker failed path %s in map %s",
1054 pp->dev_t, pp->mpp->alias);
1056 dm_fail_path(pp->mpp->alias, pp->dev_t);
1058 update_queue_mode_del_path(pp->mpp);
1062 * caller must have locked the path list before calling that function
1065 reinstate_path (struct path * pp, int add_active)
1072 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1073 condlog(0, "%s: reinstate failed", pp->dev_t);
1076 condlog(2, "%s: reinstated", pp->dev_t);
1078 update_queue_mode_add_path(pp->mpp);
1084 enable_group(struct path * pp)
1086 struct pathgroup * pgp;
1089 * if path is added through uev_add_path, pgindex can be unset.
1090 * next update_strings() will set it, upon map reload event.
1092 * we can safely return here, because upon map reload, all
1093 * PG will be enabled.
1095 if (!pp->mpp->pg || !pp->pgindex)
1098 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1100 if (pgp->status == PGSTATE_DISABLED) {
1101 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1102 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1107 mpvec_garbage_collector (struct vectors * vecs)
1109 struct multipath * mpp;
1115 vector_foreach_slot (vecs->mpvec, mpp, i) {
1116 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1117 condlog(2, "%s: remove dead map", mpp->alias);
1118 remove_map_and_stop_waiter(mpp, vecs, 1);
1124 /* This is called after a path has started working again. It the multipath
1125 * device for this path uses the followover failback type, and this is the
1126 * best pathgroup, and this is the first path in the pathgroup to come back
1127 * up, then switch to this pathgroup */
1129 followover_should_failback(struct path * pp)
1131 struct pathgroup * pgp;
1135 if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1136 !pp->mpp->pg || !pp->pgindex ||
1137 pp->pgindex != pp->mpp->bestpg)
1140 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1141 vector_foreach_slot(pgp->paths, pp1, i) {
1144 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1151 missing_uev_wait_tick(struct vectors *vecs)
1153 struct multipath * mpp;
1157 vector_foreach_slot (vecs->mpvec, mpp, i) {
1158 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1160 condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1161 if (mpp->wait_for_udev > 1 && update_map(mpp, vecs)) {
1162 /* update_map removed map */
1166 mpp->wait_for_udev = 0;
1170 if (timed_out && conf->delayed_reconfig &&
1171 !need_to_delay_reconfig(vecs)) {
1172 condlog(2, "reconfigure (delayed)");
1178 defered_failback_tick (vector mpvec)
1180 struct multipath * mpp;
1183 vector_foreach_slot (mpvec, mpp, i) {
1185 * defered failback getting sooner
1187 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1188 mpp->failback_tick--;
1190 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1191 switch_pathgroup(mpp);
1197 retry_count_tick(vector mpvec)
1199 struct multipath *mpp;
1202 vector_foreach_slot (mpvec, mpp, i) {
1203 if (mpp->retry_tick) {
1204 mpp->stat_total_queueing_time++;
1205 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1206 if(--mpp->retry_tick == 0) {
1207 dm_queue_if_no_path(mpp->alias, 0);
1208 condlog(2, "%s: Disable queueing", mpp->alias);
1214 int update_prio(struct path *pp, int refresh_all)
1218 struct pathgroup * pgp;
1219 int i, j, changed = 0;
1222 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1223 vector_foreach_slot (pgp->paths, pp1, j) {
1224 oldpriority = pp1->priority;
1225 pathinfo(pp1, conf->hwtable, DI_PRIO);
1226 if (pp1->priority != oldpriority)
1232 oldpriority = pp->priority;
1233 pathinfo(pp, conf->hwtable, DI_PRIO);
1235 if (pp->priority == oldpriority)
1240 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1242 if (reload_map(vecs, mpp, refresh))
1246 if (setup_multipath(vecs, mpp) != 0)
1248 sync_map_state(mpp);
1254 * Returns '1' if the path has been checked, '0' otherwise
1257 check_path (struct vectors * vecs, struct path * pp)
1260 int new_path_up = 0;
1261 int chkr_new_path_up = 0;
1263 int disable_reinstate = 0;
1264 int oldchkrstate = pp->chkrstate;
1266 if ((pp->initialized == INIT_OK ||
1267 pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1270 if (pp->tick && --pp->tick)
1271 return 0; /* don't check this path yet */
1273 if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV &&
1274 pp->retriggers < conf->retrigger_tries) {
1275 condlog(2, "%s: triggering change event to reinitialize",
1277 pp->initialized = INIT_REQUESTED_UDEV;
1279 sysfs_attr_set_value(pp->udev, "uevent", "change",
1285 * provision a next check soonest,
1286 * in case we exit abnormaly from here
1288 pp->tick = conf->checkint;
1290 newstate = path_offline(pp);
1292 * Wait for uevent for removed paths;
1293 * some LLDDs like zfcp keep paths unavailable
1294 * without sending uevents.
1296 if (newstate == PATH_REMOVED)
1297 newstate = PATH_DOWN;
1299 if (newstate == PATH_UP)
1300 newstate = get_state(pp, 1);
1302 checker_clear_message(&pp->checker);
1304 if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1305 condlog(2, "%s: unusable path", pp->dev);
1306 pathinfo(pp, conf->hwtable, 0);
1310 if (!strlen(pp->wwid) && pp->initialized != INIT_MISSING_UDEV &&
1311 (newstate == PATH_UP || newstate == PATH_GHOST)) {
1312 condlog(2, "%s: add missing path", pp->dev);
1313 if (pathinfo(pp, conf->hwtable, DI_ALL) == 0) {
1314 ev_add_path(pp, vecs);
1321 * Async IO in flight. Keep the previous path state
1322 * and reschedule as soon as possible
1324 if (newstate == PATH_PENDING) {
1329 * Synchronize with kernel state
1331 if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1332 condlog(1, "%s: Could not synchronize with kernel state",
1334 pp->dmstate = PSTATE_UNDEF;
1336 /* if update_multipath_strings orphaned the path, quit early */
1340 if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1341 pp->wait_checks > 0) {
1342 if (pp->mpp && pp->mpp->nr_active > 0) {
1343 pp->state = PATH_DELAYED;
1347 pp->wait_checks = 0;
1351 * don't reinstate failed path, if its in stand-by
1352 * and if target supports only implicit tpgs mode.
1353 * this will prevent unnecessary i/o by dm on stand-by
1354 * paths if there are no other active paths in map.
1356 disable_reinstate = (newstate == PATH_GHOST &&
1357 pp->mpp->nr_active == 0 &&
1358 pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
1360 pp->chkrstate = newstate;
1361 if (newstate != pp->state) {
1362 int oldstate = pp->state;
1363 pp->state = newstate;
1365 if (strlen(checker_message(&pp->checker)))
1366 LOG_MSG(1, checker_message(&pp->checker));
1369 * upon state change, reset the checkint
1370 * to the shortest delay
1372 pp->checkint = conf->checkint;
1374 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1376 * proactively fail path in the DM
1378 if (oldstate == PATH_UP ||
1379 oldstate == PATH_GHOST) {
1381 if (pp->mpp->delay_wait_checks > 0 &&
1382 pp->watch_checks > 0) {
1383 pp->wait_checks = pp->mpp->delay_wait_checks;
1384 pp->watch_checks = 0;
1390 * cancel scheduled failback
1392 pp->mpp->failback_tick = 0;
1394 pp->mpp->stat_path_failures++;
1398 if(newstate == PATH_UP || newstate == PATH_GHOST){
1399 if ( pp->mpp && pp->mpp->prflag ){
1401 * Check Persistent Reservation.
1403 condlog(2, "%s: checking persistent reservation "
1404 "registration", pp->dev);
1405 mpath_pr_event_handle(pp);
1410 * reinstate this path
1412 if (oldstate != PATH_UP &&
1413 oldstate != PATH_GHOST) {
1414 if (pp->mpp->delay_watch_checks > 0)
1415 pp->watch_checks = pp->mpp->delay_watch_checks;
1418 if (pp->watch_checks > 0)
1422 if (!disable_reinstate && reinstate_path(pp, add_active)) {
1423 condlog(3, "%s: reload map", pp->dev);
1424 ev_add_path(pp, vecs);
1430 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1431 chkr_new_path_up = 1;
1434 * if at least one path is up in a group, and
1435 * the group is disabled, re-enable it
1437 if (newstate == PATH_UP)
1440 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1441 if ((pp->dmstate == PSTATE_FAILED ||
1442 pp->dmstate == PSTATE_UNDEF) &&
1443 !disable_reinstate) {
1444 /* Clear IO errors */
1445 if (reinstate_path(pp, 0)) {
1446 condlog(3, "%s: reload map", pp->dev);
1447 ev_add_path(pp, vecs);
1452 LOG_MSG(4, checker_message(&pp->checker));
1453 if (pp->checkint != conf->max_checkint) {
1455 * double the next check delay.
1456 * max at conf->max_checkint
1458 if (pp->checkint < (conf->max_checkint / 2))
1459 pp->checkint = 2 * pp->checkint;
1461 pp->checkint = conf->max_checkint;
1463 condlog(4, "%s: delay next check %is",
1464 pp->dev_t, pp->checkint);
1466 if (pp->watch_checks > 0)
1468 pp->tick = pp->checkint;
1471 else if (newstate == PATH_DOWN &&
1472 strlen(checker_message(&pp->checker))) {
1473 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1474 LOG_MSG(3, checker_message(&pp->checker));
1476 LOG_MSG(2, checker_message(&pp->checker));
1479 pp->state = newstate;
1482 if (pp->mpp->wait_for_udev)
1485 * path prio refreshing
1487 condlog(4, "path prio refresh");
1489 if (update_prio(pp, new_path_up) &&
1490 (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1491 pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1492 update_path_groups(pp->mpp, vecs, !new_path_up);
1493 else if (need_switch_pathgroup(pp->mpp, 0)) {
1494 if (pp->mpp->pgfailback > 0 &&
1495 (new_path_up || pp->mpp->failback_tick <= 0))
1496 pp->mpp->failback_tick =
1497 pp->mpp->pgfailback + 1;
1498 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1499 (chkr_new_path_up && followover_should_failback(pp)))
1500 switch_pathgroup(pp->mpp);
1506 checkerloop (void *ap)
1508 struct vectors *vecs;
1513 mlockall(MCL_CURRENT | MCL_FUTURE);
1514 vecs = (struct vectors *)ap;
1515 condlog(2, "path checkers start up");
1518 * init the path check interval
1520 vector_foreach_slot (vecs->pathvec, pp, i) {
1521 pp->checkint = conf->checkint;
1525 struct timeval diff_time, start_time, end_time;
1528 if (gettimeofday(&start_time, NULL) != 0)
1529 start_time.tv_sec = 0;
1530 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1532 pthread_testcancel();
1536 sd_notify(0, "WATCHDOG=1");
1538 if (vecs->pathvec) {
1539 vector_foreach_slot (vecs->pathvec, pp, i) {
1540 num_paths += check_path(vecs, pp);
1544 defered_failback_tick(vecs->mpvec);
1545 retry_count_tick(vecs->mpvec);
1546 missing_uev_wait_tick(vecs);
1551 condlog(4, "map garbage collection");
1552 mpvec_garbage_collector(vecs);
1556 lock_cleanup_pop(vecs->lock);
1557 if (start_time.tv_sec &&
1558 gettimeofday(&end_time, NULL) == 0 &&
1560 timersub(&end_time, &start_time, &diff_time);
1561 condlog(3, "checked %d path%s in %lu.%06lu secs",
1562 num_paths, num_paths > 1 ? "s" : "",
1563 diff_time.tv_sec, diff_time.tv_usec);
1571 configure (struct vectors * vecs, int start_waiters)
1573 struct multipath * mpp;
1578 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1581 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1584 if (!(mpvec = vector_alloc()))
1588 * probe for current path (from sysfs) and map (from dm) sets
1590 ret = path_discovery(vecs->pathvec, conf, DI_ALL);
1594 vector_foreach_slot (vecs->pathvec, pp, i){
1595 if (filter_path(conf, pp) > 0){
1596 vector_del_slot(vecs->pathvec, i);
1601 pp->checkint = conf->checkint;
1603 if (map_discovery(vecs))
1607 * create new set of maps & push changed ones into dm
1609 if (coalesce_paths(vecs, mpvec, NULL, 1))
1613 * may need to remove some maps which are no longer relevant
1614 * e.g., due to blacklist changes in conf file
1616 if (coalesce_maps(vecs, mpvec))
1621 sync_maps_state(mpvec);
1622 vector_foreach_slot(mpvec, mpp, i){
1623 remember_wwid(mpp->wwid);
1628 * purge dm of old maps
1633 * save new set of maps formed by considering current path state
1635 vector_free(vecs->mpvec);
1636 vecs->mpvec = mpvec;
1639 * start dm event waiter threads for these new maps
1641 vector_foreach_slot(vecs->mpvec, mpp, i) {
1642 if (setup_multipath(vecs, mpp))
1645 if (start_waiter_thread(mpp, vecs))
1652 need_to_delay_reconfig(struct vectors * vecs)
1654 struct multipath *mpp;
1657 if (!VECTOR_SIZE(vecs->mpvec))
1660 vector_foreach_slot(vecs->mpvec, mpp, i) {
1661 if (mpp->wait_for_udev)
1668 reconfigure (struct vectors * vecs)
1670 struct config * old = conf;
1673 running_state = DAEMON_CONFIGURE;
1676 * free old map and path vectors ... they use old conf state
1678 if (VECTOR_SIZE(vecs->mpvec))
1679 remove_maps_and_stop_waiters(vecs);
1681 if (VECTOR_SIZE(vecs->pathvec))
1682 free_pathvec(vecs->pathvec, FREE_PATHS);
1684 vecs->pathvec = NULL;
1687 /* Re-read any timezone changes */
1690 if (!load_config(DEFAULT_CONFIGFILE, udev)) {
1691 dm_drv_version(conf->version, TGT_MPATH);
1692 conf->verbosity = old->verbosity;
1693 conf->bindings_read_only = old->bindings_read_only;
1694 conf->ignore_new_devs = old->ignore_new_devs;
1701 running_state = DAEMON_RUNNING;
1706 static struct vectors *
1709 struct vectors * vecs;
1711 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1717 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1719 if (!vecs->lock.mutex)
1722 pthread_mutex_init(vecs->lock.mutex, NULL);
1723 vecs->lock.depth = 0;
1729 condlog(0, "failed to init paths");
1734 signal_set(int signo, void (*func) (int))
1737 struct sigaction sig;
1738 struct sigaction osig;
1740 sig.sa_handler = func;
1741 sigemptyset(&sig.sa_mask);
1744 r = sigaction(signo, &sig, &osig);
1749 return (osig.sa_handler);
1753 handle_signals(void)
1755 if (reconfig_sig && running_state == DAEMON_RUNNING) {
1756 pthread_cleanup_push(cleanup_lock,
1759 pthread_testcancel();
1760 if (need_to_delay_reconfig(gvecs)) {
1761 conf->delayed_reconfig = 1;
1762 condlog(2, "delaying reconfigure (signal)");
1765 condlog(2, "reconfigure (signal)");
1768 lock_cleanup_pop(gvecs->lock);
1770 if (log_reset_sig) {
1771 condlog(2, "reset log (signal)");
1772 pthread_mutex_lock(&logq_lock);
1773 log_reset("multipathd");
1774 pthread_mutex_unlock(&logq_lock);
1801 condlog(3, "SIGUSR2 received");
1810 sigaddset(&set, SIGHUP);
1811 sigaddset(&set, SIGUSR1);
1812 sigaddset(&set, SIGUSR2);
1813 pthread_sigmask(SIG_BLOCK, &set, NULL);
1815 signal_set(SIGHUP, sighup);
1816 signal_set(SIGUSR1, sigusr1);
1817 signal_set(SIGUSR2, sigusr2);
1818 signal_set(SIGINT, sigend);
1819 signal_set(SIGTERM, sigend);
1820 signal(SIGPIPE, SIG_IGN);
1827 static struct sched_param sched_param = {
1828 .sched_priority = 99
1831 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1834 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1841 #ifdef OOM_SCORE_ADJ_MIN
1843 char *file = "/proc/self/oom_score_adj";
1844 int score = OOM_SCORE_ADJ_MIN;
1847 char *file = "/proc/self/oom_adj";
1848 int score = OOM_ADJUST_MIN;
1854 envp = getenv("OOMScoreAdjust");
1856 condlog(3, "Using systemd provided OOMScoreAdjust");
1860 if (stat(file, &st) == 0){
1861 fp = fopen(file, "w");
1863 condlog(0, "couldn't fopen %s : %s", file,
1867 fprintf(fp, "%i", score);
1871 if (errno != ENOENT) {
1872 condlog(0, "couldn't stat %s : %s", file,
1876 #ifdef OOM_ADJUST_MIN
1877 file = "/proc/self/oom_adj";
1878 score = OOM_ADJUST_MIN;
1883 condlog(0, "couldn't adjust oom score");
1887 child (void * param)
1889 pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1890 pthread_attr_t log_attr, misc_attr, uevent_attr;
1891 struct vectors * vecs;
1892 struct multipath * mpp;
1895 unsigned long checkint;
1900 mlockall(MCL_CURRENT | MCL_FUTURE);
1901 sem_init(&exit_sem, 0, 0);
1906 setup_thread_attr(&misc_attr, 64 * 1024, 1);
1907 setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 1);
1908 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1911 setup_thread_attr(&log_attr, 64 * 1024, 0);
1912 log_thread_start(&log_attr);
1913 pthread_attr_destroy(&log_attr);
1915 if (pidfile_create(DEFAULT_PIDFILE, daemon_pid)) {
1916 condlog(1, "failed to create pidfile");
1922 running_state = DAEMON_START;
1925 sd_notify(0, "STATUS=startup");
1927 condlog(2, "--------start up--------");
1928 condlog(2, "read " DEFAULT_CONFIGFILE);
1930 if (load_config(DEFAULT_CONFIGFILE, udev))
1933 dm_drv_version(conf->version, TGT_MPATH);
1934 if (init_checkers()) {
1935 condlog(0, "failed to initialize checkers");
1939 condlog(0, "failed to initialize prioritizers");
1943 setlogmask(LOG_UPTO(conf->verbosity + 3));
1945 envp = getenv("LimitNOFILE");
1948 condlog(2,"Using systemd provided open fds limit of %s", envp);
1949 } else if (conf->max_fds) {
1950 struct rlimit fd_limit;
1952 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1953 condlog(0, "can't get open fds limit: %s",
1955 fd_limit.rlim_cur = 0;
1956 fd_limit.rlim_max = 0;
1958 if (fd_limit.rlim_cur < conf->max_fds) {
1959 fd_limit.rlim_cur = conf->max_fds;
1960 if (fd_limit.rlim_max < conf->max_fds)
1961 fd_limit.rlim_max = conf->max_fds;
1962 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1963 condlog(0, "can't set open fds limit to "
1965 fd_limit.rlim_cur, fd_limit.rlim_max,
1968 condlog(3, "set open fds limit to %lu/%lu",
1969 fd_limit.rlim_cur, fd_limit.rlim_max);
1975 vecs = gvecs = init_vecs();
1983 dm_udev_set_sync_support(0);
1985 envp = getenv("WATCHDOG_USEC");
1986 if (envp && sscanf(envp, "%lu", &checkint) == 1) {
1987 /* Value is in microseconds */
1988 conf->max_checkint = checkint / 1000000;
1989 /* Rescale checkint */
1990 if (conf->checkint > conf->max_checkint)
1991 conf->checkint = conf->max_checkint;
1993 conf->checkint = conf->max_checkint / 4;
1994 condlog(3, "enabling watchdog, interval %d max %d",
1995 conf->checkint, conf->max_checkint);
1996 use_watchdog = conf->checkint;
2000 * Start uevent listener early to catch events
2002 if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2003 condlog(0, "failed to create uevent thread: %d", rc);
2006 pthread_attr_destroy(&uevent_attr);
2007 if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
2008 condlog(0, "failed to create cli listener: %d", rc);
2012 * fetch and configure both paths and multipaths
2015 sd_notify(0, "STATUS=configure");
2017 running_state = DAEMON_CONFIGURE;
2020 if (configure(vecs, 1)) {
2022 condlog(0, "failure during configuration");
2030 if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2031 condlog(0,"failed to create checker loop thread: %d", rc);
2034 if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2035 condlog(0, "failed to create uevent dispatcher: %d", rc);
2038 pthread_attr_destroy(&misc_attr);
2040 running_state = DAEMON_RUNNING;
2042 sd_notify(0, "READY=1\nSTATUS=running");
2048 while(sem_wait(&exit_sem) != 0); /* Do nothing */
2051 sd_notify(0, "STATUS=shutdown");
2053 running_state = DAEMON_SHUTDOWN;
2055 if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
2056 vector_foreach_slot(vecs->mpvec, mpp, i)
2057 dm_queue_if_no_path(mpp->alias, 0);
2058 remove_maps_and_stop_waiters(vecs);
2061 pthread_cancel(check_thr);
2062 pthread_cancel(uevent_thr);
2063 pthread_cancel(uxlsnr_thr);
2064 pthread_cancel(uevq_thr);
2067 free_pathvec(vecs->pathvec, FREE_PATHS);
2068 vecs->pathvec = NULL;
2070 /* Now all the waitevent threads will start rushing in. */
2071 while (vecs->lock.depth > 0) {
2072 sleep (1); /* This is weak. */
2073 condlog(3, "Have %d wait event checkers threads to de-alloc,"
2074 " waiting...", vecs->lock.depth);
2076 pthread_mutex_destroy(vecs->lock.mutex);
2077 FREE(vecs->lock.mutex);
2078 vecs->lock.depth = 0;
2079 vecs->lock.mutex = NULL;
2089 /* We're done here */
2090 condlog(3, "unlink pidfile");
2091 unlink(DEFAULT_PIDFILE);
2093 condlog(2, "--------shut down-------");
2099 * Freeing config must be done after condlog() and dm_lib_exit(),
2100 * because logging functions like dlog() and dm_write_log()
2101 * reference the config.
2108 dbg_free_final(NULL);
2112 sd_notify(0, "ERRNO=0");
2118 sd_notify(0, "ERRNO=1");
2129 if( (pid = fork()) < 0){
2130 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2138 if ( (pid = fork()) < 0)
2139 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2144 fprintf(stderr, "cannot chdir to '/', continuing\n");
2146 dev_null_fd = open("/dev/null", O_RDWR);
2147 if (dev_null_fd < 0){
2148 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2153 close(STDIN_FILENO);
2154 if (dup(dev_null_fd) < 0) {
2155 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2159 close(STDOUT_FILENO);
2160 if (dup(dev_null_fd) < 0) {
2161 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2165 close(STDERR_FILENO);
2166 if (dup(dev_null_fd) < 0) {
2167 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2172 daemon_pid = getpid();
2177 main (int argc, char *argv[])
2179 extern char *optarg;
2186 running_state = DAEMON_INIT;
2189 if (getuid() != 0) {
2190 fprintf(stderr, "need to be root\n");
2194 /* make sure we don't lock any path */
2196 fprintf(stderr, "can't chdir to root directory : %s\n",
2198 umask(umask(077) | 022);
2200 conf = alloc_config();
2205 while ((arg = getopt(argc, argv, ":dsv:k::Bn")) != EOF ) {
2211 //debug=1; /* ### comment me out ### */
2214 if (sizeof(optarg) > sizeof(char *) ||
2215 !isdigit(optarg[0]))
2218 conf->verbosity = atoi(optarg);
2224 if (load_config(DEFAULT_CONFIGFILE, udev_new()))
2226 uxclnt(optarg, conf->uxsock_timeout);
2229 conf->bindings_read_only = 1;
2232 conf->ignore_new_devs = 1;
2238 if (optind < argc) {
2243 if (load_config(DEFAULT_CONFIGFILE, udev_new()))
2245 while (optind < argc) {
2246 if (strchr(argv[optind], ' '))
2247 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
2249 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
2252 c += snprintf(c, s + CMDSIZE - c, "\n");
2253 uxclnt(s, conf->uxsock_timeout);
2258 if (!isatty(fileno(stdout)))
2259 setbuf(stdout, NULL);
2261 daemon_pid = getpid();
2273 return (child(NULL));
2276 void * mpath_pr_event_handler_fn (void * pathp )
2278 struct multipath * mpp;
2279 int i,j, ret, isFound;
2280 struct path * pp = (struct path *)pathp;
2281 unsigned char *keyp;
2283 struct prout_param_descriptor *param;
2284 struct prin_resp *resp;
2288 resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
2290 condlog(0,"%s Alloc failed for prin response", pp->dev);
2294 ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
2295 if (ret != MPATH_PR_SUCCESS )
2297 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
2301 condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
2302 resp->prin_descriptor.prin_readkeys.additional_length );
2304 if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
2306 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
2307 ret = MPATH_PR_SUCCESS;
2311 keyp = (unsigned char *)mpp->reservation_key;
2312 for (j = 0; j < 8; ++j) {
2318 condlog(2, "Multipath reservation_key: 0x%" PRIx64 " ", prkey);
2321 for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
2323 condlog(2, "PR IN READKEYS[%d] reservation key:",i);
2324 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
2325 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
2327 condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
2334 condlog(0, "%s: Either device not registered or ", pp->dev);
2335 condlog(0, "host is not authorised for registration. Skip path");
2336 ret = MPATH_PR_OTHER;
2340 param= malloc(sizeof(struct prout_param_descriptor));
2341 memset(param, 0 , sizeof(struct prout_param_descriptor));
2343 for (j = 7; j >= 0; --j) {
2344 param->sa_key[j] = (prkey & 0xff);
2347 param->num_transportid = 0;
2349 condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
2351 ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
2352 if (ret != MPATH_PR_SUCCESS )
2354 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
2364 int mpath_pr_event_handle(struct path *pp)
2368 pthread_attr_t attr;
2369 struct multipath * mpp;
2373 if (!mpp->reservation_key)
2376 pthread_attr_init(&attr);
2377 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2379 rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2381 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2384 pthread_attr_destroy(&attr);
2385 rc = pthread_join(thread, NULL);