2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Benjamin Marzinski, Redhat
4 * Copyright (c) 2005 Kiyoshi Ueda, NEC
14 #include "pgpolicies.h"
17 #include "devmapper.h"
19 #include "discovery.h"
23 #include "prioritizers/alua_rtpg.h"
30 pgpolicyfn *pgpolicies[] = {
39 #define do_set(var, src, dest, msg) \
41 if (src && src->var) { \
48 #define __do_set_from_vec(type, var, src, dest) \
51 bool _found = false; \
54 vector_foreach_slot(src, _p, i) { \
64 #define __do_set_from_hwe(var, src, dest) \
65 __do_set_from_vec(struct hwentry, var, (src)->hwe, dest)
67 #define do_set_from_hwe(var, src, dest, msg) \
68 if (src->hwe && __do_set_from_hwe(var, src, dest)) { \
73 static const char default_origin[] = "(setting: multipath internal)";
74 static const char hwe_origin[] =
75 "(setting: storage device configuration)";
76 static const char multipaths_origin[] =
77 "(setting: multipath.conf multipaths section)";
78 static const char conf_origin[] =
79 "(setting: multipath.conf defaults/devices section)";
80 static const char overrides_origin[] =
81 "(setting: multipath.conf overrides section)";
82 static const char cmdline_origin[] =
83 "(setting: multipath command line [-p] flag)";
84 static const char autodetect_origin[] =
85 "(setting: storage device autodetected)";
86 static const char fpin_marginal_path_origin[] =
87 "(setting: overridden by marginal_path_fpin)";
88 static const char marginal_path_origin[] =
89 "(setting: implied by marginal_path check)";
90 static const char delay_watch_origin[] =
91 "(setting: implied by delay_watch_checks)";
92 static const char delay_wait_origin[] =
93 "(setting: implied by delay_wait_checks)";
95 #define do_default(dest, value) \
98 origin = default_origin; \
101 #define mp_set_mpe(var) \
102 do_set(var, mp->mpe, mp->var, multipaths_origin)
103 #define mp_set_hwe(var) \
104 do_set_from_hwe(var, mp, mp->var, hwe_origin)
105 #define mp_set_ovr(var) \
106 do_set(var, conf->overrides, mp->var, overrides_origin)
107 #define mp_set_conf(var) \
108 do_set(var, conf, mp->var, conf_origin)
109 #define mp_set_default(var, value) \
110 do_default(mp->var, value)
112 #define pp_set_mpe(var) \
113 do_set(var, mpe, pp->var, multipaths_origin)
114 #define pp_set_hwe(var) \
115 do_set_from_hwe(var, pp, pp->var, hwe_origin)
116 #define pp_set_conf(var) \
117 do_set(var, conf, pp->var, conf_origin)
118 #define pp_set_ovr(var) \
119 do_set(var, conf->overrides, pp->var, overrides_origin)
120 #define pp_set_default(var, value) \
121 do_default(pp->var, value)
123 #define do_attr_set(var, src, shift, msg) \
125 if (src && (src->attribute_flags & (1 << shift))) { \
126 mp->attribute_flags |= (1 << shift); \
127 mp->var = src->var; \
133 #define set_attr_mpe(var, shift) \
134 do_attr_set(var, mp->mpe, shift, "(setting: multipath.conf multipaths section)")
135 #define set_attr_conf(var, shift) \
136 do_attr_set(var, conf, shift, "(setting: multipath.conf defaults/devices section)")
138 #define do_prkey_set(src, msg) \
140 if (src && src->prkey_source != PRKEY_SOURCE_NONE) { \
141 mp->prkey_source = src->prkey_source; \
142 mp->reservation_key = src->reservation_key; \
143 mp->sa_flags = src->sa_flags; \
149 int select_mode(struct config *conf, struct multipath *mp)
153 set_attr_mpe(mode, ATTR_MODE);
154 set_attr_conf(mode, ATTR_MODE);
155 mp->attribute_flags &= ~(1 << ATTR_MODE);
158 condlog(3, "%s: mode = 0%o %s", mp->alias, mp->mode, origin);
162 int select_uid(struct config *conf, struct multipath *mp)
166 set_attr_mpe(uid, ATTR_UID);
167 set_attr_conf(uid, ATTR_UID);
168 mp->attribute_flags &= ~(1 << ATTR_UID);
171 condlog(3, "%s: uid = 0%o %s", mp->alias, mp->uid, origin);
175 int select_gid(struct config *conf, struct multipath *mp)
179 set_attr_mpe(gid, ATTR_GID);
180 set_attr_conf(gid, ATTR_GID);
181 mp->attribute_flags &= ~(1 << ATTR_GID);
184 condlog(3, "%s: gid = 0%o %s", mp->alias, mp->gid, origin);
190 * traverse the configuration layers from most specific to most generic
191 * stop at first explicit setting found
193 int select_rr_weight(struct config *conf, struct multipath * mp)
196 STRBUF_ON_STACK(buff);
198 mp_set_mpe(rr_weight);
199 mp_set_ovr(rr_weight);
200 mp_set_hwe(rr_weight);
201 mp_set_conf(rr_weight);
202 mp_set_default(rr_weight, DEFAULT_RR_WEIGHT);
204 print_rr_weight(&buff, mp->rr_weight);
205 condlog(3, "%s: rr_weight = %s %s", mp->alias,
206 get_strbuf_str(&buff), origin);
210 int select_pgfailback(struct config *conf, struct multipath * mp)
213 STRBUF_ON_STACK(buff);
215 mp_set_mpe(pgfailback);
216 mp_set_ovr(pgfailback);
217 mp_set_hwe(pgfailback);
218 mp_set_conf(pgfailback);
219 mp_set_default(pgfailback, DEFAULT_FAILBACK);
221 print_pgfailback(&buff, mp->pgfailback);
222 condlog(3, "%s: failback = %s %s", mp->alias,
223 get_strbuf_str(&buff), origin);
227 int select_pgpolicy(struct config *conf, struct multipath * mp)
230 char buff[POLICY_NAME_SIZE];
232 if (conf->pgpolicy_flag > 0) {
233 mp->pgpolicy = conf->pgpolicy_flag;
234 origin = cmdline_origin;
237 mp_set_mpe(pgpolicy);
238 mp_set_ovr(pgpolicy);
239 mp_set_hwe(pgpolicy);
240 mp_set_conf(pgpolicy);
241 mp_set_default(pgpolicy, DEFAULT_PGPOLICY);
243 mp->pgpolicyfn = pgpolicies[mp->pgpolicy];
244 get_pgpolicy_name(buff, POLICY_NAME_SIZE, mp->pgpolicy);
245 condlog(3, "%s: path_grouping_policy = %s %s", mp->alias, buff, origin);
249 int select_selector(struct config *conf, struct multipath * mp)
253 mp_set_mpe(selector);
254 mp_set_ovr(selector);
255 mp_set_hwe(selector);
256 mp_set_conf(selector);
257 mp_set_default(selector, DEFAULT_SELECTOR);
259 mp->selector = strdup(mp->selector);
260 condlog(3, "%s: path_selector = \"%s\" %s", mp->alias, mp->selector,
266 select_alias_prefix (struct config *conf, struct multipath * mp)
270 mp_set_ovr(alias_prefix);
271 mp_set_hwe(alias_prefix);
272 mp_set_conf(alias_prefix);
273 mp_set_default(alias_prefix, DEFAULT_ALIAS_PREFIX);
275 condlog(3, "%s: alias_prefix = %s %s", mp->wwid, mp->alias_prefix,
280 want_user_friendly_names(struct config *conf, struct multipath * mp)
284 int user_friendly_names;
286 do_set(user_friendly_names, mp->mpe, user_friendly_names,
288 do_set(user_friendly_names, conf->overrides, user_friendly_names,
290 do_set_from_hwe(user_friendly_names, mp, user_friendly_names,
292 do_set(user_friendly_names, conf, user_friendly_names,
294 do_default(user_friendly_names, DEFAULT_USER_FRIENDLY_NAMES);
296 condlog(3, "%s: user_friendly_names = %s %s", mp->wwid,
297 (user_friendly_names == USER_FRIENDLY_NAMES_ON)? "yes" : "no",
299 return (user_friendly_names == USER_FRIENDLY_NAMES_ON);
302 int select_alias(struct config *conf, struct multipath * mp)
304 const char *origin = NULL;
306 if (mp->mpe && mp->mpe->alias) {
307 mp->alias = strdup(mp->mpe->alias);
308 origin = multipaths_origin;
313 if (!want_user_friendly_names(conf, mp))
316 select_alias_prefix(conf, mp);
318 if (strlen(mp->alias_old) > 0) {
319 mp->alias = use_existing_alias(mp->wwid, conf->bindings_file,
320 mp->alias_old, mp->alias_prefix,
321 conf->bindings_read_only);
322 memset (mp->alias_old, 0, WWID_SIZE);
323 origin = "(setting: using existing alias)";
326 if (mp->alias == NULL) {
327 mp->alias = get_user_friendly_alias(mp->wwid,
328 conf->bindings_file, mp->alias_prefix, conf->bindings_read_only);
329 origin = "(setting: user_friendly_name)";
332 if (mp->alias == NULL) {
333 mp->alias = strdup(mp->wwid);
334 origin = "(setting: default to WWID)";
337 condlog(3, "%s: alias = %s %s", mp->wwid, mp->alias, origin);
338 return mp->alias ? 0 : 1;
341 void reconcile_features_with_options(const char *id, char **features, int* no_path_retry,
342 int *retain_hwhandler)
344 static const char q_i_n_p[] = "queue_if_no_path";
345 static const char r_a_h_h[] = "retain_attached_hw_handler";
346 STRBUF_ON_STACK(buff);
348 if (*features == NULL)
354 * We only use no_path_retry internally. The "queue_if_no_path"
355 * device-mapper feature is derived from it when the map is loaded.
356 * For consistency, "queue_if_no_path" is removed from the
357 * internal libmultipath features string.
358 * For backward compatibility we allow 'features "1 queue_if_no_path"';
359 * it's translated into "no_path_retry queue" here.
361 if (strstr(*features, q_i_n_p)) {
362 condlog(0, "%s: option 'features \"1 %s\"' is deprecated, "
363 "please use 'no_path_retry queue' instead",
365 if (*no_path_retry == NO_PATH_RETRY_UNDEF) {
366 *no_path_retry = NO_PATH_RETRY_QUEUE;
367 print_no_path_retry(&buff, *no_path_retry);
368 condlog(3, "%s: no_path_retry = %s (inherited setting from feature '%s')",
369 id, get_strbuf_str(&buff), q_i_n_p);
371 /* Warn only if features string is overridden */
372 if (*no_path_retry != NO_PATH_RETRY_QUEUE) {
373 print_no_path_retry(&buff, *no_path_retry);
374 condlog(2, "%s: ignoring feature '%s' because no_path_retry is set to '%s'",
375 id, q_i_n_p, get_strbuf_str(&buff));
377 remove_feature(features, q_i_n_p);
379 if (strstr(*features, r_a_h_h)) {
380 condlog(0, "%s: option 'features \"1 %s\"' is deprecated",
382 if (*retain_hwhandler == RETAIN_HWHANDLER_UNDEF) {
383 condlog(3, "%s: %s = on (inherited setting from feature '%s')",
384 id, r_a_h_h, r_a_h_h);
385 *retain_hwhandler = RETAIN_HWHANDLER_ON;
386 } else if (*retain_hwhandler == RETAIN_HWHANDLER_OFF)
387 condlog(2, "%s: ignoring feature '%s' because %s is set to 'off'",
388 id, r_a_h_h, r_a_h_h);
389 remove_feature(features, r_a_h_h);
393 int select_features(struct config *conf, struct multipath *mp)
397 mp_set_mpe(features);
398 mp_set_ovr(features);
399 mp_set_hwe(features);
400 mp_set_conf(features);
401 mp_set_default(features, DEFAULT_FEATURES);
403 mp->features = strdup(mp->features);
405 reconcile_features_with_options(mp->alias, &mp->features,
407 &mp->retain_hwhandler);
408 condlog(3, "%s: features = \"%s\" %s", mp->alias, mp->features, origin);
412 static int get_dh_state(struct path *pp, char *value, size_t value_len)
414 struct udev_device *ud;
416 if (pp->udev == NULL)
419 ud = udev_device_get_parent_with_subsystem_devtype(
420 pp->udev, "scsi", "scsi_device");
424 return sysfs_attr_get_value(ud, "dh_state", value, value_len);
427 int select_hwhandler(struct config *conf, struct multipath *mp)
431 /* dh_state is no longer than "detached" */
433 static char alua_name[] = "1 alua";
434 static const char tpgs_origin[]= "(setting: autodetected from TPGS)";
437 bool all_tpgs = true, one_tpgs = false;
439 dh_state = &handler[2];
442 * TPGS_UNDEF means that ALUA support couldn't determined either way
443 * yet, probably because the path was always down.
444 * If at least one path does have TPGS support, and no path has
445 * TPGS_NONE, assume that TPGS would be supported by all paths if
448 vector_foreach_slot(mp->paths, pp, i) {
449 int tpgs = path_get_tpgs(pp);
451 all_tpgs = all_tpgs && tpgs != TPGS_NONE;
452 one_tpgs = one_tpgs ||
453 (tpgs != TPGS_NONE && tpgs != TPGS_UNDEF);
455 all_tpgs = all_tpgs && one_tpgs;
457 if (mp->retain_hwhandler != RETAIN_HWHANDLER_OFF) {
458 vector_foreach_slot(mp->paths, pp, i) {
459 if (get_dh_state(pp, dh_state, sizeof(handler) - 2) > 0
460 && strcmp(dh_state, "detached")) {
461 memcpy(handler, "1 ", 2);
462 mp->hwhandler = handler;
463 origin = "(setting: retained by kernel driver)";
469 mp_set_hwe(hwhandler);
470 mp_set_conf(hwhandler);
471 mp_set_default(hwhandler, DEFAULT_HWHANDLER);
473 if (all_tpgs && !strcmp(mp->hwhandler, DEFAULT_HWHANDLER) &&
474 origin == default_origin) {
475 mp->hwhandler = alua_name;
476 origin = tpgs_origin;
477 } else if (!all_tpgs && !strcmp(mp->hwhandler, alua_name)) {
478 mp->hwhandler = DEFAULT_HWHANDLER;
479 origin = tpgs_origin;
481 mp->hwhandler = strdup(mp->hwhandler);
482 condlog(3, "%s: hardware_handler = \"%s\" %s", mp->alias, mp->hwhandler,
488 * Current RDAC (NetApp E-Series) firmware relies
489 * on periodic REPORT TARGET PORT GROUPS for
490 * internal load balancing.
491 * Using the sysfs priority checker defeats this purpose.
493 * Moreover, NetApp would also prefer the RDAC checker over ALUA.
494 * (https://www.redhat.com/archives/dm-devel/2017-September/msg00326.html)
497 check_rdac(struct path * pp)
501 const char *checker_name = NULL;
503 if (pp->bus != SYSFS_BUS_SCSI)
505 /* Avoid checking 0xc9 if this is likely not an RDAC array */
506 if (!__do_set_from_hwe(checker_name, pp, checker_name) &&
507 !is_vpd_page_supported(pp->fd, 0xC9))
509 if (checker_name && strcmp(checker_name, RDAC))
511 len = get_vpd_sgio(pp->fd, 0xC9, 0, buff, 44);
514 return !(memcmp(buff + 4, "vac1", 4));
517 int select_checker(struct config *conf, struct path *pp)
521 struct checker * c = &pp->checker;
523 if (pp->detect_checker == DETECT_CHECKER_ON) {
524 origin = autodetect_origin;
525 if (check_rdac(pp)) {
529 (void)path_get_tpgs(pp);
530 if (pp->tpgs != TPGS_NONE && pp->tpgs != TPGS_UNDEF) {
535 do_set(checker_name, conf->overrides, ckr_name, overrides_origin);
536 do_set_from_hwe(checker_name, pp, ckr_name, hwe_origin);
537 do_set(checker_name, conf, ckr_name, conf_origin);
538 do_default(ckr_name, DEFAULT_CHECKER);
540 checker_get(conf->multipath_dir, c, ckr_name);
541 condlog(3, "%s: path_checker = %s %s", pp->dev,
542 checker_name(c), origin);
543 if (conf->checker_timeout) {
544 c->timeout = conf->checker_timeout;
545 condlog(3, "%s: checker timeout = %u s %s",
546 pp->dev, c->timeout, conf_origin);
548 else if (sysfs_get_timeout(pp, &c->timeout) > 0)
549 condlog(3, "%s: checker timeout = %u s (setting: kernel sysfs)",
550 pp->dev, c->timeout);
552 c->timeout = DEF_TIMEOUT;
553 condlog(3, "%s: checker timeout = %u s %s",
554 pp->dev, c->timeout, default_origin);
559 int select_getuid(struct config *conf, struct path *pp)
563 pp->uid_attribute = get_uid_attribute_by_attrs(conf, pp->dev);
564 if (pp->uid_attribute) {
565 origin = "(setting: multipath.conf defaults section / uid_attrs)";
570 pp_set_ovr(uid_attribute);
572 pp_set_hwe(uid_attribute);
574 pp_set_conf(uid_attribute);
575 pp_set_default(uid_attribute, DEFAULT_UID_ATTRIBUTE);
577 if (pp->uid_attribute)
578 condlog(3, "%s: uid_attribute = %s %s", pp->dev,
579 pp->uid_attribute, origin);
581 condlog(3, "%s: getuid = \"%s\" %s", pp->dev, pp->getuid,
586 /* must be called after select_getuid */
587 int select_recheck_wwid(struct config *conf, struct path * pp)
591 pp_set_ovr(recheck_wwid);
592 pp_set_hwe(recheck_wwid);
593 pp_set_conf(recheck_wwid);
594 pp_set_default(recheck_wwid, DEFAULT_RECHECK_WWID);
596 if (pp->recheck_wwid == RECHECK_WWID_ON &&
597 (pp->bus != SYSFS_BUS_SCSI || pp->getuid != NULL ||
598 !has_uid_fallback(pp))) {
599 pp->recheck_wwid = RECHECK_WWID_OFF;
600 origin = "(setting: unsupported by device type/config)";
602 condlog(3, "%s: recheck_wwid = %i %s", pp->dev, pp->recheck_wwid,
608 detect_prio(struct config *conf, struct path * pp)
610 struct prio *p = &pp->prio;
617 if (nvme_id_ctrl_ana(pp->fd, NULL) == 0)
619 default_prio = PRIO_ANA;
622 tpgs = path_get_tpgs(pp);
623 if (tpgs == TPGS_NONE)
625 if ((tpgs == TPGS_EXPLICIT || !check_rdac(pp)) &&
626 sysfs_get_asymmetric_access_state(pp, buff, 512) >= 0)
627 default_prio = PRIO_SYSFS;
629 default_prio = PRIO_ALUA;
634 prio_get(conf->multipath_dir, p, default_prio, DEFAULT_PRIO_ARGS);
637 #define set_prio(dir, src, msg) \
639 if (src && src->prio_name) { \
640 prio_get(dir, p, src->prio_name, src->prio_args); \
646 #define set_prio_from_vec(type, dir, src, msg, p) \
650 char *prio_name = NULL, *prio_args = NULL; \
652 vector_foreach_slot(src, _p, i) { \
653 if (prio_name == NULL && _p->prio_name) \
654 prio_name = _p->prio_name; \
655 if (prio_args == NULL && _p->prio_args) \
656 prio_args = _p->prio_args; \
658 if (prio_name != NULL) { \
659 prio_get(dir, p, prio_name, prio_args); \
665 int select_prio(struct config *conf, struct path *pp)
668 struct mpentry * mpe;
669 struct prio * p = &pp->prio;
672 if (pp->detect_prio == DETECT_PRIO_ON) {
673 detect_prio(conf, pp);
674 if (prio_selected(p)) {
675 origin = autodetect_origin;
679 mpe = find_mpe(conf->mptable, pp->wwid);
680 set_prio(conf->multipath_dir, mpe, multipaths_origin);
681 set_prio(conf->multipath_dir, conf->overrides, overrides_origin);
682 set_prio_from_vec(struct hwentry, conf->multipath_dir,
683 pp->hwe, hwe_origin, p);
684 set_prio(conf->multipath_dir, conf, conf_origin);
685 prio_get(conf->multipath_dir, p, DEFAULT_PRIO, DEFAULT_PRIO_ARGS);
686 origin = default_origin;
689 * fetch tpgs mode for alua, if its not already obtained
691 if (!strncmp(prio_name(p), PRIO_ALUA, PRIO_NAME_LEN)) {
692 int tpgs = path_get_tpgs(pp);
694 if (tpgs == TPGS_NONE) {
695 prio_get(conf->multipath_dir,
696 p, DEFAULT_PRIO, DEFAULT_PRIO_ARGS);
697 origin = "(setting: emergency fallback - alua failed)";
701 condlog(log_prio, "%s: prio = %s %s", pp->dev, prio_name(p), origin);
702 condlog(3, "%s: prio args = \"%s\" %s", pp->dev, prio_args(p), origin);
706 int select_no_path_retry(struct config *conf, struct multipath *mp)
708 const char *origin = NULL;
709 STRBUF_ON_STACK(buff);
711 if (mp->disable_queueing) {
712 condlog(0, "%s: queueing disabled", mp->alias);
713 mp->no_path_retry = NO_PATH_RETRY_FAIL;
716 mp_set_mpe(no_path_retry);
717 mp_set_ovr(no_path_retry);
718 mp_set_hwe(no_path_retry);
719 mp_set_conf(no_path_retry);
721 print_no_path_retry(&buff, mp->no_path_retry);
723 condlog(3, "%s: no_path_retry = %s %s", mp->alias,
724 get_strbuf_str(&buff), origin);
726 condlog(3, "%s: no_path_retry = undef %s",
727 mp->alias, default_origin);
732 select_minio_rq (struct config *conf, struct multipath * mp)
736 do_set(minio_rq, mp->mpe, mp->minio, multipaths_origin);
737 do_set(minio_rq, conf->overrides, mp->minio, overrides_origin);
738 do_set_from_hwe(minio_rq, mp, mp->minio, hwe_origin);
739 do_set(minio_rq, conf, mp->minio, conf_origin);
740 do_default(mp->minio, DEFAULT_MINIO_RQ);
742 condlog(3, "%s: minio = %i %s", mp->alias, mp->minio, origin);
747 select_minio_bio (struct config *conf, struct multipath * mp)
755 mp_set_default(minio, DEFAULT_MINIO);
757 condlog(3, "%s: minio = %i %s", mp->alias, mp->minio, origin);
761 int select_minio(struct config *conf, struct multipath *mp)
763 unsigned int minv_dmrq[3] = {1, 1, 0}, version[3];
765 if (!libmp_get_version(DM_MPATH_TARGET_VERSION, version)
766 && VERSION_GE(version, minv_dmrq))
767 return select_minio_rq(conf, mp);
769 return select_minio_bio(conf, mp);
772 int select_fast_io_fail(struct config *conf, struct multipath *mp)
775 STRBUF_ON_STACK(buff);
777 mp_set_ovr(fast_io_fail);
778 mp_set_hwe(fast_io_fail);
779 mp_set_conf(fast_io_fail);
780 mp_set_default(fast_io_fail, DEFAULT_FAST_IO_FAIL);
782 print_undef_off_zero(&buff, mp->fast_io_fail);
783 condlog(3, "%s: fast_io_fail_tmo = %s %s", mp->alias,
784 get_strbuf_str(&buff), origin);
788 int select_dev_loss(struct config *conf, struct multipath *mp)
791 STRBUF_ON_STACK(buff);
793 mp_set_ovr(dev_loss);
794 mp_set_hwe(dev_loss);
795 mp_set_conf(dev_loss);
796 mp->dev_loss = DEV_LOSS_TMO_UNSET;
799 print_dev_loss(&buff, mp->dev_loss);
800 condlog(3, "%s: dev_loss_tmo = %s %s", mp->alias,
801 get_strbuf_str(&buff), origin);
805 int select_eh_deadline(struct config *conf, struct multipath *mp)
808 STRBUF_ON_STACK(buff);
810 mp_set_ovr(eh_deadline);
811 mp_set_hwe(eh_deadline);
812 mp_set_conf(eh_deadline);
813 mp->eh_deadline = EH_DEADLINE_UNSET;
814 /* not changing sysfs in default cause, so don't print anything */
817 print_undef_off_zero(&buff, mp->eh_deadline);
818 condlog(3, "%s: eh_deadline = %s %s", mp->alias,
819 get_strbuf_str(&buff), origin);
823 int select_flush_on_last_del(struct config *conf, struct multipath *mp)
827 mp_set_mpe(flush_on_last_del);
828 mp_set_ovr(flush_on_last_del);
829 mp_set_hwe(flush_on_last_del);
830 mp_set_conf(flush_on_last_del);
831 mp_set_default(flush_on_last_del, DEFAULT_FLUSH);
833 condlog(3, "%s: flush_on_last_del = %s %s", mp->alias,
834 (mp->flush_on_last_del == FLUSH_ENABLED)? "yes" : "no", origin);
838 int select_reservation_key(struct config *conf, struct multipath *mp)
841 STRBUF_ON_STACK(buff);
842 char *from_file = "";
845 do_prkey_set(mp->mpe, multipaths_origin);
846 do_prkey_set(conf, conf_origin);
847 put_be64(mp->reservation_key, 0);
849 mp->prkey_source = PRKEY_SOURCE_NONE;
852 if (mp->prkey_source == PRKEY_SOURCE_FILE) {
853 from_file = " (from prkeys file)";
854 if (get_prkey(conf, mp, &prkey, &mp->sa_flags) != 0)
855 put_be64(mp->reservation_key, 0);
857 put_be64(mp->reservation_key, prkey);
859 print_reservation_key(&buff, mp->reservation_key,
860 mp->sa_flags, mp->prkey_source);
861 condlog(3, "%s: reservation_key = %s %s%s", mp->alias,
862 get_strbuf_str(&buff), origin, from_file);
866 int select_retain_hwhandler(struct config *conf, struct multipath *mp)
869 unsigned int minv_dm_retain[3] = {1, 5, 0}, version[3];
871 if (!libmp_get_version(DM_MPATH_TARGET_VERSION, version) &&
872 !VERSION_GE(version, minv_dm_retain)) {
873 mp->retain_hwhandler = RETAIN_HWHANDLER_OFF;
874 origin = "(setting: WARNING, requires kernel dm-mpath version >= 1.5.0)";
877 if (get_linux_version_code() >= KERNEL_VERSION(4, 3, 0)) {
878 mp->retain_hwhandler = RETAIN_HWHANDLER_ON;
879 origin = "(setting: implied in kernel >= 4.3.0)";
882 mp_set_ovr(retain_hwhandler);
883 mp_set_hwe(retain_hwhandler);
884 mp_set_conf(retain_hwhandler);
885 mp_set_default(retain_hwhandler, DEFAULT_RETAIN_HWHANDLER);
887 condlog(3, "%s: retain_attached_hw_handler = %s %s", mp->alias,
888 (mp->retain_hwhandler == RETAIN_HWHANDLER_ON)? "yes" : "no",
893 int select_detect_prio(struct config *conf, struct path *pp)
897 pp_set_ovr(detect_prio);
898 pp_set_hwe(detect_prio);
899 pp_set_conf(detect_prio);
900 pp_set_default(detect_prio, DEFAULT_DETECT_PRIO);
902 condlog(3, "%s: detect_prio = %s %s", pp->dev,
903 (pp->detect_prio == DETECT_PRIO_ON)? "yes" : "no", origin);
907 int select_detect_checker(struct config *conf, struct path *pp)
911 pp_set_ovr(detect_checker);
912 pp_set_hwe(detect_checker);
913 pp_set_conf(detect_checker);
914 pp_set_default(detect_checker, DEFAULT_DETECT_CHECKER);
916 condlog(3, "%s: detect_checker = %s %s", pp->dev,
917 (pp->detect_checker == DETECT_CHECKER_ON)? "yes" : "no",
922 int select_deferred_remove(struct config *conf, struct multipath *mp)
926 #ifndef LIBDM_API_DEFERRED
927 mp->deferred_remove = DEFERRED_REMOVE_OFF;
928 origin = "(setting: WARNING, not compiled with support)";
931 if (mp->deferred_remove == DEFERRED_REMOVE_IN_PROGRESS) {
932 condlog(3, "%s: deferred remove in progress", mp->alias);
935 mp_set_mpe(deferred_remove);
936 mp_set_ovr(deferred_remove);
937 mp_set_hwe(deferred_remove);
938 mp_set_conf(deferred_remove);
939 mp_set_default(deferred_remove, DEFAULT_DEFERRED_REMOVE);
941 condlog(3, "%s: deferred_remove = %s %s", mp->alias,
942 (mp->deferred_remove == DEFERRED_REMOVE_ON)? "yes" : "no",
947 static inline int san_path_check_options_set(const struct multipath *mp)
949 return mp->san_path_err_threshold > 0 ||
950 mp->san_path_err_forget_rate > 0 ||
951 mp->san_path_err_recovery_time > 0;
955 use_delay_watch_checks(struct config *conf, struct multipath *mp)
957 int value = NU_UNDEF;
958 const char *origin = default_origin;
959 STRBUF_ON_STACK(buff);
961 do_set(delay_watch_checks, mp->mpe, value, multipaths_origin);
962 do_set(delay_watch_checks, conf->overrides, value, overrides_origin);
963 do_set_from_hwe(delay_watch_checks, mp, value, hwe_origin);
964 do_set(delay_watch_checks, conf, value, conf_origin);
966 if (print_off_int_undef(&buff, value) > 0)
967 condlog(3, "%s: delay_watch_checks = %s %s", mp->alias,
968 get_strbuf_str(&buff), origin);
973 use_delay_wait_checks(struct config *conf, struct multipath *mp)
975 int value = NU_UNDEF;
976 const char *origin = default_origin;
977 STRBUF_ON_STACK(buff);
979 do_set(delay_wait_checks, mp->mpe, value, multipaths_origin);
980 do_set(delay_wait_checks, conf->overrides, value, overrides_origin);
981 do_set_from_hwe(delay_wait_checks, mp, value, hwe_origin);
982 do_set(delay_wait_checks, conf, value, conf_origin);
984 if (print_off_int_undef(&buff, value) > 0)
985 condlog(3, "%s: delay_wait_checks = %s %s", mp->alias,
986 get_strbuf_str(&buff), origin);
990 int select_delay_checks(struct config *conf, struct multipath *mp)
992 int watch_checks, wait_checks;
993 STRBUF_ON_STACK(buff);
995 watch_checks = use_delay_watch_checks(conf, mp);
996 wait_checks = use_delay_wait_checks(conf, mp);
997 if (watch_checks <= 0 && wait_checks <= 0)
999 if (san_path_check_options_set(mp)) {
1000 condlog(3, "%s: both marginal_path and delay_checks error detection options selected", mp->alias);
1001 condlog(3, "%s: ignoring delay_checks options", mp->alias);
1004 mp->san_path_err_threshold = 1;
1005 condlog(3, "%s: san_path_err_threshold = 1 %s", mp->alias,
1006 (watch_checks > 0)? delay_watch_origin : delay_wait_origin);
1007 if (watch_checks > 0) {
1008 mp->san_path_err_forget_rate = watch_checks;
1009 print_off_int_undef(&buff, mp->san_path_err_forget_rate);
1010 condlog(3, "%s: san_path_err_forget_rate = %s %s", mp->alias,
1011 get_strbuf_str(&buff), delay_watch_origin);
1012 reset_strbuf(&buff);
1014 if (wait_checks > 0) {
1015 mp->san_path_err_recovery_time = wait_checks *
1017 print_off_int_undef(&buff, mp->san_path_err_recovery_time);
1018 condlog(3, "%s: san_path_err_recovery_time = %s %s", mp->alias,
1019 get_strbuf_str(&buff), delay_wait_origin);
1024 static int san_path_deprecated_warned;
1025 #define warn_san_path_deprecated(v, x) \
1027 if (v->x > 0 && !san_path_deprecated_warned) { \
1028 san_path_deprecated_warned = 1; \
1029 condlog(1, "WARNING: option %s is deprecated, " \
1030 "please use marginal_path options instead", \
1035 int select_san_path_err_threshold(struct config *conf, struct multipath *mp)
1038 STRBUF_ON_STACK(buff);
1040 if (marginal_path_check_enabled(mp) || (conf->marginal_pathgroups == MARGINAL_PATHGROUP_FPIN)) {
1041 mp->san_path_err_threshold = NU_NO;
1042 if (conf->marginal_pathgroups == MARGINAL_PATHGROUP_FPIN)
1043 origin = fpin_marginal_path_origin;
1045 origin = marginal_path_origin;
1048 mp_set_mpe(san_path_err_threshold);
1049 mp_set_ovr(san_path_err_threshold);
1050 mp_set_hwe(san_path_err_threshold);
1051 mp_set_conf(san_path_err_threshold);
1052 mp_set_default(san_path_err_threshold, DEFAULT_ERR_CHECKS);
1054 if (print_off_int_undef(&buff, mp->san_path_err_threshold) > 0)
1055 condlog(3, "%s: san_path_err_threshold = %s %s",
1056 mp->alias, get_strbuf_str(&buff), origin);
1057 warn_san_path_deprecated(mp, san_path_err_threshold);
1061 int select_san_path_err_forget_rate(struct config *conf, struct multipath *mp)
1064 STRBUF_ON_STACK(buff);
1066 if (marginal_path_check_enabled(mp) || (conf->marginal_pathgroups == MARGINAL_PATHGROUP_FPIN)) {
1067 mp->san_path_err_forget_rate = NU_NO;
1068 if (conf->marginal_pathgroups == MARGINAL_PATHGROUP_FPIN)
1069 origin = fpin_marginal_path_origin;
1071 origin = marginal_path_origin;
1074 mp_set_mpe(san_path_err_forget_rate);
1075 mp_set_ovr(san_path_err_forget_rate);
1076 mp_set_hwe(san_path_err_forget_rate);
1077 mp_set_conf(san_path_err_forget_rate);
1078 mp_set_default(san_path_err_forget_rate, DEFAULT_ERR_CHECKS);
1080 if (print_off_int_undef(&buff, mp->san_path_err_forget_rate) > 0)
1081 condlog(3, "%s: san_path_err_forget_rate = %s %s",
1082 mp->alias, get_strbuf_str(&buff), origin);
1083 warn_san_path_deprecated(mp, san_path_err_forget_rate);
1088 int select_san_path_err_recovery_time(struct config *conf, struct multipath *mp)
1091 STRBUF_ON_STACK(buff);
1093 if (marginal_path_check_enabled(mp) || (conf->marginal_pathgroups == MARGINAL_PATHGROUP_FPIN)) {
1094 mp->san_path_err_recovery_time = NU_NO;
1095 if (conf->marginal_pathgroups == MARGINAL_PATHGROUP_FPIN)
1096 origin = fpin_marginal_path_origin;
1098 origin = marginal_path_origin;
1101 mp_set_mpe(san_path_err_recovery_time);
1102 mp_set_ovr(san_path_err_recovery_time);
1103 mp_set_hwe(san_path_err_recovery_time);
1104 mp_set_conf(san_path_err_recovery_time);
1105 mp_set_default(san_path_err_recovery_time, DEFAULT_ERR_CHECKS);
1107 if (print_off_int_undef(&buff, mp->san_path_err_recovery_time) != 0)
1108 condlog(3, "%s: san_path_err_recovery_time = %s %s", mp->alias,
1109 get_strbuf_str(&buff), origin);
1110 warn_san_path_deprecated(mp, san_path_err_recovery_time);
1115 int select_marginal_path_err_sample_time(struct config *conf, struct multipath *mp)
1118 STRBUF_ON_STACK(buff);
1120 if (conf->marginal_pathgroups == MARGINAL_PATHGROUP_FPIN) {
1121 mp->marginal_path_err_sample_time = NU_NO;
1122 origin = fpin_marginal_path_origin;
1126 mp_set_mpe(marginal_path_err_sample_time);
1127 mp_set_ovr(marginal_path_err_sample_time);
1128 mp_set_hwe(marginal_path_err_sample_time);
1129 mp_set_conf(marginal_path_err_sample_time);
1130 mp_set_default(marginal_path_err_sample_time, DEFAULT_ERR_CHECKS);
1132 if (mp->marginal_path_err_sample_time > 0 &&
1133 mp->marginal_path_err_sample_time < 2 * IOTIMEOUT_SEC) {
1134 condlog(2, "%s: configuration error: marginal_path_err_sample_time must be >= %d",
1135 mp->alias, 2 * IOTIMEOUT_SEC);
1136 mp->marginal_path_err_sample_time = 2 * IOTIMEOUT_SEC;
1138 if (print_off_int_undef(&buff, mp->marginal_path_err_sample_time) > 0)
1139 condlog(3, "%s: marginal_path_err_sample_time = %s %s",
1140 mp->alias, get_strbuf_str(&buff), origin);
1144 int select_marginal_path_err_rate_threshold(struct config *conf, struct multipath *mp)
1147 STRBUF_ON_STACK(buff);
1149 if (conf->marginal_pathgroups == MARGINAL_PATHGROUP_FPIN) {
1150 mp->marginal_path_err_rate_threshold = NU_NO;
1151 origin = fpin_marginal_path_origin;
1155 mp_set_mpe(marginal_path_err_rate_threshold);
1156 mp_set_ovr(marginal_path_err_rate_threshold);
1157 mp_set_hwe(marginal_path_err_rate_threshold);
1158 mp_set_conf(marginal_path_err_rate_threshold);
1159 mp_set_default(marginal_path_err_rate_threshold, DEFAULT_ERR_CHECKS);
1161 if (print_off_int_undef(&buff, mp->marginal_path_err_rate_threshold) > 0)
1162 condlog(3, "%s: marginal_path_err_rate_threshold = %s %s",
1163 mp->alias, get_strbuf_str(&buff), origin);
1167 int select_marginal_path_err_recheck_gap_time(struct config *conf, struct multipath *mp)
1170 STRBUF_ON_STACK(buff);
1172 if (conf->marginal_pathgroups == MARGINAL_PATHGROUP_FPIN) {
1173 mp->marginal_path_err_recheck_gap_time = NU_NO;
1174 origin = fpin_marginal_path_origin;
1178 mp_set_mpe(marginal_path_err_recheck_gap_time);
1179 mp_set_ovr(marginal_path_err_recheck_gap_time);
1180 mp_set_hwe(marginal_path_err_recheck_gap_time);
1181 mp_set_conf(marginal_path_err_recheck_gap_time);
1182 mp_set_default(marginal_path_err_recheck_gap_time, DEFAULT_ERR_CHECKS);
1184 if (print_off_int_undef(&buff,
1185 mp->marginal_path_err_recheck_gap_time) > 0)
1186 condlog(3, "%s: marginal_path_err_recheck_gap_time = %s %s",
1187 mp->alias, get_strbuf_str(&buff), origin);
1191 int select_marginal_path_double_failed_time(struct config *conf, struct multipath *mp)
1194 STRBUF_ON_STACK(buff);
1196 if (conf->marginal_pathgroups == MARGINAL_PATHGROUP_FPIN) {
1197 mp->marginal_path_double_failed_time = NU_NO;
1198 origin = fpin_marginal_path_origin;
1202 mp_set_mpe(marginal_path_double_failed_time);
1203 mp_set_ovr(marginal_path_double_failed_time);
1204 mp_set_hwe(marginal_path_double_failed_time);
1205 mp_set_conf(marginal_path_double_failed_time);
1206 mp_set_default(marginal_path_double_failed_time, DEFAULT_ERR_CHECKS);
1208 if (print_off_int_undef(&buff, mp->marginal_path_double_failed_time) > 0)
1209 condlog(3, "%s: marginal_path_double_failed_time = %s %s",
1210 mp->alias, get_strbuf_str(&buff), origin);
1214 int select_skip_kpartx (struct config *conf, struct multipath * mp)
1218 mp_set_mpe(skip_kpartx);
1219 mp_set_ovr(skip_kpartx);
1220 mp_set_hwe(skip_kpartx);
1221 mp_set_conf(skip_kpartx);
1222 mp_set_default(skip_kpartx, DEFAULT_SKIP_KPARTX);
1224 condlog(3, "%s: skip_kpartx = %s %s", mp->alias,
1225 (mp->skip_kpartx == SKIP_KPARTX_ON)? "yes" : "no",
1230 int select_max_sectors_kb(struct config *conf, struct multipath * mp)
1234 mp_set_mpe(max_sectors_kb);
1235 mp_set_ovr(max_sectors_kb);
1236 mp_set_hwe(max_sectors_kb);
1237 mp_set_conf(max_sectors_kb);
1238 mp_set_default(max_sectors_kb, DEFAULT_MAX_SECTORS_KB);
1240 * In the default case, we will not modify max_sectors_kb in sysfs
1241 * (see sysfs_set_max_sectors_kb()).
1242 * Don't print a log message here to avoid user confusion.
1246 condlog(3, "%s: max_sectors_kb = %i %s", mp->alias, mp->max_sectors_kb,
1251 int select_ghost_delay (struct config *conf, struct multipath * mp)
1254 STRBUF_ON_STACK(buff);
1256 mp_set_mpe(ghost_delay);
1257 mp_set_ovr(ghost_delay);
1258 mp_set_hwe(ghost_delay);
1259 mp_set_conf(ghost_delay);
1260 mp_set_default(ghost_delay, DEFAULT_GHOST_DELAY);
1262 if (print_off_int_undef(&buff, mp->ghost_delay) != 0)
1263 condlog(3, "%s: ghost_delay = %s %s", mp->alias,
1264 get_strbuf_str(&buff), origin);
1268 int select_find_multipaths_timeout(struct config *conf, struct path *pp)
1272 pp_set_conf(find_multipaths_timeout);
1273 pp_set_default(find_multipaths_timeout,
1274 DEFAULT_FIND_MULTIPATHS_TIMEOUT);
1277 * If configured value is negative, and this "unknown" hardware
1278 * (no hwentry), use very small timeout to avoid delays.
1280 if (pp->find_multipaths_timeout < 0) {
1281 pp->find_multipaths_timeout = -pp->find_multipaths_timeout;
1283 pp->find_multipaths_timeout =
1284 DEFAULT_UNKNOWN_FIND_MULTIPATHS_TIMEOUT;
1285 origin = "(default for unknown hardware)";
1288 condlog(3, "%s: timeout for find_multipaths \"smart\" = %ds %s",
1289 pp->dev, pp->find_multipaths_timeout, origin);
1293 int select_all_tg_pt (struct config *conf, struct multipath * mp)
1297 mp_set_ovr(all_tg_pt);
1298 mp_set_hwe(all_tg_pt);
1299 mp_set_conf(all_tg_pt);
1300 mp_set_default(all_tg_pt, DEFAULT_ALL_TG_PT);
1302 condlog(3, "%s: all_tg_pt = %s %s", mp->alias,
1303 (mp->all_tg_pt == ALL_TG_PT_ON)? "yes" : "no",
1308 int select_vpd_vendor_id (struct path *pp)
1312 pp_set_hwe(vpd_vendor_id);
1313 pp_set_default(vpd_vendor_id, 0);
1315 if (pp->vpd_vendor_id < 0 || pp->vpd_vendor_id >= VPD_VP_ARRAY_SIZE) {
1316 condlog(3, "%s: vpd_vendor_id = %d (invalid, setting to 0)",
1317 pp->dev, pp->vpd_vendor_id);
1318 pp->vpd_vendor_id = 0;
1320 condlog(3, "%s: vpd_vendor_id = %d \"%s\" %s", pp->dev,
1321 pp->vpd_vendor_id, vpd_vendor_pages[pp->vpd_vendor_id].name,