endif
ifndef SYSTEMD
- ifeq ($(shell systemctl --version > /dev/null 2>&1 && echo 1), 1)
- SYSTEMD = $(shell systemctl --version 2> /dev/null | sed -n 's/systemd \([0-9]*\)/\1/p')
+ ifeq ($(shell pkg-config --modversion libsystemd >/dev/null 2>&1 && echo 1), 1)
+ SYSTEMD = $(shell pkg-config --modversion libsystemd)
+ else
+ ifeq ($(shell systemctl --version >/dev/null 2>&1 && echo 1), 1)
+ SYSTEMD = $(shell systemctl --version 2> /dev/null | \
+ sed -n 's/systemd \([0-9]*\).*/\1/p')
+ endif
endif
endif
conf->retrigger_tries = DEFAULT_RETRIGGER_TRIES;
conf->retrigger_delay = DEFAULT_RETRIGGER_DELAY;
conf->uev_wait_timeout = DEFAULT_UEV_WAIT_TIMEOUT;
- conf->disable_changed_wwids = DEFAULT_DISABLE_CHANGED_WWIDS;
conf->remove_retries = 0;
conf->ghost_delay = DEFAULT_GHOST_DELAY;
conf->all_tg_pt = DEFAULT_ALL_TG_PT;
int delayed_reconfig;
int uev_wait_timeout;
int skip_kpartx;
- int disable_changed_wwids;
int remove_retries;
int max_sectors_kb;
int ghost_delay;
dm_task_destroy(dmt);
out:
if (r < 0)
- condlog(2, "%s: dm command failed in %s", name, __FUNCTION__);
+ condlog(3, "%s: dm command failed in %s: %s", name, __FUNCTION__, strerror(errno));
return r;
}
}
static int
+print_ignored (char *buff, int len)
+{
+ return snprintf(buff, len, "ignored");
+}
+
+static int
print_yes_no (char *buff, int len, long v)
{
return snprintf(buff, len, "\"%s\"",
declare_hw_snprint(skip_kpartx, print_yes_no_undef)
declare_mp_handler(skip_kpartx, set_yes_no_undef)
declare_mp_snprint(skip_kpartx, print_yes_no_undef)
-
-declare_def_handler(disable_changed_wwids, set_yes_no)
-declare_def_snprint(disable_changed_wwids, print_yes_no)
+static int def_disable_changed_wwids_handler(struct config *conf, vector strvec)
+{
+ return 0;
+}
+static int snprint_def_disable_changed_wwids(struct config *conf, char *buff,
+ int len, const void *data)
+{
+ return print_ignored(buff, len);
+}
declare_def_handler(remove_retries, set_int)
declare_def_snprint(remove_retries, print_int)
declare_sysfs_get_str(model);
declare_sysfs_get_str(rev);
-ssize_t
-sysfs_get_vpd (struct udev_device * udev, int pg,
- unsigned char * buff, size_t len)
+static ssize_t
+sysfs_get_binary (struct udev_device * udev, const char *attrname,
+ unsigned char *buff, size_t len)
{
ssize_t attr_len;
- char attrname[9];
const char * devname;
if (!udev) {
}
devname = udev_device_get_sysname(udev);
- sprintf(attrname, "vpd_pg%02x", pg);
attr_len = sysfs_bin_attr_get_value(udev, attrname, buff, len);
if (attr_len < 0) {
condlog(3, "%s: attribute %s not found in sysfs",
return attr_len;
}
+ssize_t sysfs_get_vpd(struct udev_device * udev, unsigned char pg,
+ unsigned char *buff, size_t len)
+{
+ char attrname[9];
+
+ snprintf(attrname, sizeof(attrname), "vpd_pg%02x", pg);
+ return sysfs_get_binary(udev, attrname, buff, len);
+}
+
+ssize_t sysfs_get_inquiry(struct udev_device * udev,
+ unsigned char *buff, size_t len)
+{
+ return sysfs_get_binary(udev, "inquiry", buff, len);
+}
+
int
-sysfs_get_timeout(struct path *pp, unsigned int *timeout)
+sysfs_get_timeout(const struct path *pp, unsigned int *timeout)
{
const char *attr = NULL;
const char *subsys;
}
static void
-detect_alua(struct path * pp, struct config *conf)
+detect_alua(struct path * pp)
{
int ret;
int tpgs;
- unsigned int timeout = conf->checker_timeout;
+ unsigned int timeout;
- if ((tpgs = get_target_port_group_support(pp->fd, timeout)) <= 0) {
+ if (sysfs_get_timeout(pp, &timeout) <= 0)
+ timeout = DEF_TIMEOUT;
+
+ if ((tpgs = get_target_port_group_support(pp, timeout)) <= 0) {
pp->tpgs = TPGS_NONE;
return;
}
ret = get_target_port_group(pp, timeout);
- if (ret < 0 || get_asymmetric_access_state(pp->fd, ret, timeout) < 0) {
+ if (ret < 0 || get_asymmetric_access_state(pp, ret, timeout) < 0) {
pp->tpgs = TPGS_NONE;
return;
}
pp->tpgs = tpgs;
}
+int path_get_tpgs(struct path *pp)
+{
+ if (pp->tpgs == TPGS_UNDEF)
+ detect_alua(pp);
+ return pp->tpgs;
+}
+
#define DEFAULT_SGIO_LEN 254
/* Query VPD page @pg. Returns number of INQUIRY bytes
struct udev_device *parent;
const char *attr_path = NULL;
- if (pp->tpgs == TPGS_UNDEF)
- detect_alua(pp, conf);
-
if (!(mask & DI_SERIAL))
return;
return get_vpd_sysfs(parent, 0x83, pp->wwid, WWID_SIZE);
}
-static ssize_t scsi_uid_fallback(struct path *pp, int path_state,
- const char **origin)
+static ssize_t uid_fallback(struct path *pp, int path_state,
+ const char **origin)
{
- ssize_t len = 0;
- int retrigger;
- struct config *conf;
+ ssize_t len = -1;
- conf = get_multipath_config();
- retrigger = conf->retrigger_tries;
- put_multipath_config(conf);
- if (pp->retriggers >= retrigger &&
+ if (pp->bus == SYSFS_BUS_SCSI &&
!strcmp(pp->uid_attribute, DEFAULT_UID_ATTRIBUTE)) {
len = get_vpd_uid(pp);
*origin = "sysfs";
- pp->uid_attribute = NULL;
if (len < 0 && path_state == PATH_UP) {
condlog(1, "%s: failed to get sysfs uid: %s",
pp->dev, strerror(-len));
WWID_SIZE);
*origin = "sgio";
}
+ } else if (pp->bus == SYSFS_BUS_NVME) {
+ char value[256];
+ len = sysfs_attr_get_value(pp->udev, "wwid", value,
+ sizeof(value));
+ if (len <= 0)
+ return -1;
+ len = strlcpy(pp->wwid, value, WWID_SIZE);
+ if (len >= WWID_SIZE) {
+ len = fix_broken_nvme_wwid(pp, value,
+ WWID_SIZE);
+ if (len > 0)
+ return len;
+ condlog(0, "%s: wwid overflow", pp->dev);
+ len = WWID_SIZE;
+ }
+ *origin = "sysfs";
}
return len;
}
+static int has_uid_fallback(struct path *pp)
+{
+ return ((pp->bus == SYSFS_BUS_SCSI &&
+ !strcmp(pp->uid_attribute, DEFAULT_UID_ATTRIBUTE)) ||
+ pp->bus == SYSFS_BUS_NVME);
+}
+
int
-get_uid (struct path * pp, int path_state, struct udev_device *udev)
+get_uid (struct path * pp, int path_state, struct udev_device *udev,
+ int allow_fallback)
{
char *c;
const char *origin = "unknown";
ssize_t len = 0;
struct config *conf;
+ int used_fallback = 0;
if (!pp->uid_attribute && !pp->getuid) {
conf = get_multipath_config();
len = get_vpd_uid(pp);
origin = "sysfs";
}
- if (len <= 0 && pp->bus == SYSFS_BUS_SCSI)
- len = scsi_uid_fallback(pp, path_state, &origin);
+ if (len <= 0 && allow_fallback && has_uid_fallback(pp)) {
+ used_fallback = 1;
+ len = uid_fallback(pp, path_state, &origin);
+ }
}
if ( len < 0 ) {
condlog(1, "%s: failed to get %s uid: %s",
c--;
}
}
- condlog(3, "%s: uid = %s (%s)", pp->dev,
+ condlog((used_fallback)? 1 : 3, "%s: uid = %s (%s)", pp->dev,
*pp->wwid == '\0' ? "<empty>" : pp->wwid, origin);
return 0;
}
if (path_state == PATH_REMOVED)
goto blank;
else if (mask & DI_NOIO) {
- /*
- * Avoid any IO on the device itself.
- * Behave like DI_CHECKER in the "path unavailable" case.
- */
- pp->chkrstate = pp->state = path_state;
+ if (mask & DI_CHECKER)
+ /*
+ * Avoid any IO on the device itself.
+ * simply use the path_offline() return as its state
+ */
+ pp->chkrstate = pp->state = path_state;
return PATHINFO_OK;
}
if (mask & DI_CHECKER) {
if (path_state == PATH_UP) {
- pp->chkrstate = pp->state = get_state(pp, conf, 0,
- path_state);
+ int newstate = get_state(pp, conf, 0, path_state);
+ if (newstate != PATH_PENDING ||
+ pp->state == PATH_UNCHECKED ||
+ pp->state == PATH_WILD)
+ pp->chkrstate = pp->state = newstate;
if (pp->state == PATH_TIMEOUT)
pp->state = PATH_DOWN;
if (pp->state == PATH_UP && !pp->size) {
}
if ((mask & DI_WWID) && !strlen(pp->wwid)) {
- get_uid(pp, path_state, pp->udev);
+ get_uid(pp, path_state, pp->udev,
+ (pp->retriggers >= conf->retrigger_tries));
if (!strlen(pp->wwid)) {
if (pp->bus == SYSFS_BUS_UNDEF)
return PATHINFO_SKIPPED;
struct config;
int path_discovery (vector pathvec, int flag);
-
+int path_get_tpgs(struct path *pp); /* This function never returns TPGS_UNDEF */
int do_tur (char *);
int path_offline (struct path *);
int get_state (struct path * pp, struct config * conf, int daemon, int state);
struct udev_device *udevice, int flag,
struct path **pp_ptr);
int sysfs_set_scsi_tmo (struct multipath *mpp, int checkint);
-int sysfs_get_timeout(struct path *pp, unsigned int *timeout);
+int sysfs_get_timeout(const struct path *pp, unsigned int *timeout);
int sysfs_get_host_pci_name(const struct path *pp, char *pci_name);
int sysfs_get_iscsi_ip_address(const struct path *pp, char *ip_address);
int sysfs_get_host_adapter_name(const struct path *pp,
char *adapter_name);
-ssize_t sysfs_get_vpd (struct udev_device * udev, int pg, unsigned char * buff,
- size_t len);
+ssize_t sysfs_get_vpd (struct udev_device *udev, unsigned char pg,
+ unsigned char *buff, size_t len);
+ssize_t sysfs_get_inquiry(struct udev_device *udev,
+ unsigned char *buff, size_t len);
int sysfs_get_asymmetric_access_state(struct path *pp,
char *buff, int buflen);
-int get_uid(struct path * pp, int path_state, struct udev_device *udev);
+int get_uid(struct path * pp, int path_state, struct udev_device *udev,
+ int allow_fallback);
/*
* discovery bitmask
.pgpolicy = MULTIBUS,
},
{
- /* Storwize family / SAN Volume Controller / Flex System V7000 / FlashSystem V840/V9000 */
+ /* Storwize family / SAN Volume Controller / Flex System V7000 / FlashSystem V840/V9000/9100 */
.vendor = "IBM",
.product = "^2145",
.no_path_retry = NO_PATH_RETRY_QUEUE,
.no_path_retry = (300 / DEFAULT_CHECKINT),
.prio_name = PRIO_ALUA,
},
+ /*
+ * Lenovo
+ */
+ {
+ /*
+ * DE Series
+ *
+ * Maintainer: ng-eseries-upstream-maintainers@netapp.com
+ */
+ .vendor = "LENOVO",
+ .product = "DE_Series",
+ .bl_product = "Universal Xport",
+ .pgpolicy = GROUP_BY_PRIO,
+ .checker_name = RDAC,
+ .features = "2 pg_init_retries 50",
+ .hwhandler = "1 rdac",
+ .prio_name = PRIO_RDAC,
+ .pgfailback = -FAILBACK_IMMEDIATE,
+ .no_path_retry = 30,
+ },
/*
* NetApp
*/
.flush_on_last_del = FLUSH_ENABLED,
.dev_loss = MAX_DEV_LOSS_TMO,
.prio_name = PRIO_ONTAP,
+ .user_friendly_names = USER_FRIENDLY_NAMES_OFF,
},
{
/*
.no_path_retry = 30,
},
/*
- * Xiotech
- */
- {
- /* Intelligent Storage Elements family */
- .vendor = "(XIOTECH|XIOtech)",
- .product = "ISE",
- .pgpolicy = MULTIBUS,
- .no_path_retry = 12,
- },
- {
- /* iglu blaze family */
- .vendor = "(XIOTECH|XIOtech)",
- .product = "IGLU DISK",
- .pgpolicy = MULTIBUS,
- .no_path_retry = 30,
- },
- {
- /* Magnitude family */
- .vendor = "(XIOTECH|XIOtech)",
- .product = "Magnitude",
- .pgpolicy = MULTIBUS,
- .no_path_retry = 30,
- },
- /*
- * Violin Memory
+ * Violin Systems
*/
{
/* 3000 / 6000 Series */
.pgpolicy = MULTIBUS,
.no_path_retry = 30,
},
+ /* Xiotech */
+ {
+ /* Intelligent Storage Elements family */
+ .vendor = "(XIOTECH|XIOtech)",
+ .product = "ISE",
+ .pgpolicy = MULTIBUS,
+ .no_path_retry = 12,
+ },
+ {
+ /* iglu blaze family */
+ .vendor = "(XIOTECH|XIOtech)",
+ .product = "IGLU DISK",
+ .pgpolicy = MULTIBUS,
+ .no_path_retry = 30,
+ },
+ {
+ /* Magnitude family */
+ .vendor = "(XIOTECH|XIOtech)",
+ .product = "Magnitude",
+ .pgpolicy = MULTIBUS,
+ .no_path_retry = 30,
+ },
/*
* Promise Technology
*/
#define CONCUR_NR_EVENT 32
#define PATH_IO_ERR_IN_CHECKING -1
-#define PATH_IO_ERR_IN_POLLING_RECHECK -2
+#define PATH_IO_ERR_WAITING_TO_CHECK -2
#define io_err_stat_log(prio, fmt, args...) \
condlog(prio, "io error statistic: " fmt, ##args)
* return value
* 0: enqueue OK
* 1: fails because of internal error
- * 2: fails because of existing already
*/
static int enqueue_io_err_stat_by_path(struct path *path)
{
p = find_err_path_by_dev(paths->pathvec, path->dev);
if (p) {
pthread_mutex_unlock(&paths->mutex);
- return 2;
+ return 0;
}
pthread_mutex_unlock(&paths->mutex);
vector_set_slot(paths->pathvec, p);
pthread_mutex_unlock(&paths->mutex);
- if (!path->io_err_disable_reinstate) {
- /*
- *fail the path in the kernel for the time of the to make
- *the test more reliable
- */
- io_err_stat_log(3, "%s: fail dm path %s before checking",
- path->mpp->alias, path->dev);
- path->io_err_disable_reinstate = 1;
- dm_fail_path(path->mpp->alias, path->dev_t);
- update_queue_mode_del_path(path->mpp);
-
- /*
- * schedule path check as soon as possible to
- * update path state to delayed state
- */
- path->tick = 1;
-
- }
io_err_stat_log(2, "%s: enqueue path %s to check",
path->mpp->alias, path->dev);
return 0;
int io_err_stat_handle_pathfail(struct path *path)
{
struct timespec curr_time;
- int res;
if (uatomic_read(&io_err_thread_running) == 0)
return 1;
if (!path->mpp)
return 1;
- if (path->mpp->nr_active <= 1)
- return 1;
if (path->mpp->marginal_path_double_failed_time <= 0 ||
path->mpp->marginal_path_err_sample_time <= 0 ||
path->mpp->marginal_path_err_recheck_gap_time <= 0 ||
}
path->io_err_pathfail_cnt++;
if (path->io_err_pathfail_cnt >= FLAKY_PATHFAIL_THRESHOLD) {
- res = enqueue_io_err_stat_by_path(path);
- if (!res)
- path->io_err_pathfail_cnt = PATH_IO_ERR_IN_CHECKING;
- else
- path->io_err_pathfail_cnt = 0;
+ path->io_err_disable_reinstate = 1;
+ path->io_err_pathfail_cnt = PATH_IO_ERR_WAITING_TO_CHECK;
+ /* enqueue path as soon as it comes up */
+ path->io_err_dis_reinstate_time = 0;
+ if (path->state != PATH_DOWN) {
+ struct config *conf;
+ int oldstate = path->state;
+ int checkint;
+
+ conf = get_multipath_config();
+ checkint = conf->checkint;
+ put_multipath_config(conf);
+ io_err_stat_log(2, "%s: mark as failed", path->dev);
+ path->mpp->stat_path_failures++;
+ path->state = PATH_DOWN;
+ path->dmstate = PSTATE_FAILED;
+ if (oldstate == PATH_UP || oldstate == PATH_GHOST)
+ update_queue_mode_del_path(path->mpp);
+ if (path->tick > checkint)
+ path->tick = checkint;
+ }
}
return 0;
}
-int hit_io_err_recheck_time(struct path *pp)
+int need_io_err_check(struct path *pp)
{
struct timespec curr_time;
int r;
io_err_stat_log(2, "%s: recover path early", pp->dev);
goto recover;
}
- if (pp->io_err_pathfail_cnt != PATH_IO_ERR_IN_POLLING_RECHECK)
+ if (pp->io_err_pathfail_cnt != PATH_IO_ERR_WAITING_TO_CHECK)
return 1;
if (clock_gettime(CLOCK_MONOTONIC, &curr_time) != 0 ||
(curr_time.tv_sec - pp->io_err_dis_reinstate_time) >
io_err_stat_log(4, "%s: reschedule checking after %d seconds",
pp->dev,
pp->mpp->marginal_path_err_recheck_gap_time);
- /*
- * to reschedule io error checking again
- * if the path is good enough, we claim it is good
- * and can be reinsated as soon as possible in the
- * check_path routine.
- */
- pp->io_err_dis_reinstate_time = curr_time.tv_sec;
r = enqueue_io_err_stat_by_path(pp);
/*
* Enqueue fails because of internal error.
io_err_stat_log(3, "%s: enqueue fails, to recover",
pp->dev);
goto recover;
- } else if (!r) {
+ } else
pp->io_err_pathfail_cnt = PATH_IO_ERR_IN_CHECKING;
- }
}
return 1;
recover:
pp->io_err_pathfail_cnt = 0;
pp->io_err_disable_reinstate = 0;
- pp->tick = 1;
return 0;
}
*/
path->tick = 1;
- } else if (path->mpp && path->mpp->nr_active > 1) {
+ } else if (path->mpp && path->mpp->nr_active > 0) {
io_err_stat_log(3, "%s: keep failing the dm path %s",
path->mpp->alias, path->dev);
- path->io_err_pathfail_cnt = PATH_IO_ERR_IN_POLLING_RECHECK;
+ path->io_err_pathfail_cnt = PATH_IO_ERR_WAITING_TO_CHECK;
path->io_err_disable_reinstate = 1;
path->io_err_dis_reinstate_time = currtime.tv_sec;
io_err_stat_log(3, "%s: disable reinstating of %s",
int start_io_err_stat_thread(void *data);
void stop_io_err_stat_thread(void);
int io_err_stat_handle_pathfail(struct path *path);
-int hit_io_err_recheck_time(struct path *pp);
+int need_io_err_check(struct path *pp);
#endif /* _IO_ERR_STAT_H */
all: $(LIBS)
-libprioalua.so: alua.o alua_rtpg.o
- $(CC) $(LDFLAGS) $(SHARED_FLAGS) -o $@ $^
-
libpriopath_latency.so: path_latency.o ../checkers/libsg.o
$(CC) $(LDFLAGS) $(SHARED_FLAGS) -o $@ $^ -lm
tpg = get_target_port_group(pp, timeout);
if (tpg < 0) {
- rc = get_target_port_group_support(pp->fd, timeout);
+ rc = get_target_port_group_support(pp, timeout);
if (rc < 0)
return -ALUA_PRIO_TPGS_FAILED;
if (rc == TPGS_NONE)
return -ALUA_PRIO_RTPG_FAILED;
}
condlog(3, "%s: reported target port group is %i", pp->dev, tpg);
- rc = get_asymmetric_access_state(pp->fd, tpg, timeout);
+ rc = get_asymmetric_access_state(pp, tpg, timeout);
if (rc < 0) {
condlog(2, "%s: get_asymmetric_access_state returned %d",
__func__, rc);
/*
* Helper function to setup and run a SCSI inquiry command.
*/
-int
-do_inquiry(int fd, int evpd, unsigned int codepage,
- void *resp, int resplen, unsigned int timeout)
+static int
+do_inquiry_sg(int fd, int evpd, unsigned int codepage,
+ void *resp, int resplen, unsigned int timeout)
{
struct inquiry_command cmd;
struct sg_io_hdr hdr;
return 0;
}
+int do_inquiry(const struct path *pp, int evpd, unsigned int codepage,
+ void *resp, int resplen, unsigned int timeout)
+{
+ struct udev_device *ud;
+
+ ud = udev_device_get_parent_with_subsystem_devtype(pp->udev, "scsi",
+ "scsi_device");
+ if (ud != NULL) {
+ int rc;
+
+ if (!evpd)
+ rc = sysfs_get_inquiry(ud, resp, resplen);
+ else
+ rc = sysfs_get_vpd(ud, codepage, resp, resplen);
+
+ if (rc >= 0) {
+ PRINT_HEX((unsigned char *) resp, resplen);
+ return 0;
+ }
+ }
+ return do_inquiry_sg(pp->fd, evpd, codepage, resp, resplen, timeout);
+}
+
/*
* This function returns the support for target port groups by evaluating the
* data returned by the standard inquiry command.
*/
int
-get_target_port_group_support(int fd, unsigned int timeout)
+get_target_port_group_support(const struct path *pp, unsigned int timeout)
{
struct inquiry_data inq;
int rc;
memset((unsigned char *)&inq, 0, sizeof(inq));
- rc = do_inquiry(fd, 0, 0x00, &inq, sizeof(inq), timeout);
+ rc = do_inquiry(pp, 0, 0x00, &inq, sizeof(inq), timeout);
if (!rc) {
rc = inquiry_data_get_tpgs(&inq);
}
}
static int
-get_sysfs_pg83(struct path *pp, unsigned char *buff, int buflen)
+get_sysfs_pg83(const struct path *pp, unsigned char *buff, int buflen)
{
struct udev_device *parent = pp->udev;
}
int
-get_target_port_group(struct path * pp, unsigned int timeout)
+get_target_port_group(const struct path * pp, unsigned int timeout)
{
unsigned char *buf;
struct vpd83_data * vpd83;
rc = get_sysfs_pg83(pp, buf, buflen);
if (rc < 0) {
- rc = do_inquiry(pp->fd, 1, 0x83, buf, buflen, timeout);
+ rc = do_inquiry(pp, 1, 0x83, buf, buflen, timeout);
if (rc < 0)
goto out;
}
buflen = scsi_buflen;
memset(buf, 0, buflen);
- rc = do_inquiry(pp->fd, 1, 0x83, buf, buflen, timeout);
+ rc = do_inquiry(pp, 1, 0x83, buf, buflen, timeout);
if (rc < 0)
goto out;
}
}
int
-get_asymmetric_access_state(int fd, unsigned int tpg, unsigned int timeout)
+get_asymmetric_access_state(const struct path *pp, unsigned int tpg,
+ unsigned int timeout)
{
unsigned char *buf;
struct rtpg_data * tpgd;
int rc;
int buflen;
uint64_t scsi_buflen;
+ int fd = pp->fd;
buflen = 4096;
buf = (unsigned char *)malloc(buflen);
#define RTPG_RTPG_FAILED 3
#define RTPG_TPG_NOT_FOUND 4
-int get_target_port_group_support(int fd, unsigned int timeout);
-int get_target_port_group(struct path * pp, unsigned int timeout);
-int get_asymmetric_access_state(int fd, unsigned int tpg, unsigned int timeout);
+int get_target_port_group_support(const struct path *pp, unsigned int timeout);
+int get_target_port_group(const struct path *pp, unsigned int timeout);
+int get_asymmetric_access_state(const struct path *pp,
+ unsigned int tpg, unsigned int timeout);
#endif /* __RTPG_H__ */
} \
} while(0)
-#define do_set_from_vec(type, var, src, dest, msg) \
-do { \
+#define __do_set_from_vec(type, var, src, dest) \
+({ \
type *_p; \
+ bool _found = false; \
int i; \
\
vector_foreach_slot(src, _p, i) { \
if (_p->var) { \
dest = _p->var; \
- origin = msg; \
- goto out; \
+ _found = true; \
+ break; \
} \
} \
-} while (0)
+ _found; \
+})
-#define do_set_from_hwe(var, src, dest, msg) \
- do_set_from_vec(struct hwentry, var, src->hwe, dest, msg)
+#define __do_set_from_hwe(var, src, dest) \
+ __do_set_from_vec(struct hwentry, var, (src)->hwe, dest)
+
+#define do_set_from_hwe(var, src, dest, msg) \
+ if (__do_set_from_hwe(var, src, dest)) { \
+ origin = msg; \
+ goto out; \
+ }
static const char default_origin[] = "(setting: multipath internal)";
static const char hwe_origin[] =
dh_state = &handler[2];
vector_foreach_slot(mp->paths, pp, i)
- all_tpgs = all_tpgs && (pp->tpgs > 0);
+ all_tpgs = all_tpgs && (path_get_tpgs(pp) > 0);
if (mp->retain_hwhandler != RETAIN_HWHANDLER_OFF) {
vector_foreach_slot(mp->paths, pp, i) {
if (get_dh_state(pp, dh_state, sizeof(handler) - 2) > 0
{
int len;
char buff[44];
+ const char *checker_name;
if (pp->bus != SYSFS_BUS_SCSI)
return 0;
+ /* Avoid ioctl if this is likely not an RDAC array */
+ if (__do_set_from_hwe(checker_name, pp, checker_name) &&
+ strcmp(checker_name, RDAC))
+ return 0;
len = get_vpd_sgio(pp->fd, 0xC9, buff, 44);
if (len <= 0)
return 0;
if (check_rdac(pp)) {
ckr_name = RDAC;
goto out;
- } else if (pp->tpgs > 0) {
+ } else if (path_get_tpgs(pp) != TPGS_NONE) {
ckr_name = TUR;
goto out;
}
struct prio *p = &pp->prio;
char buff[512];
char *default_prio;
+ int tpgs;
switch(pp->bus) {
case SYSFS_BUS_NVME:
default_prio = PRIO_ANA;
break;
case SYSFS_BUS_SCSI:
- if (pp->tpgs <= 0)
+ tpgs = path_get_tpgs(pp);
+ if (tpgs == TPGS_NONE)
return;
- if ((pp->tpgs == 2 || !check_rdac(pp)) &&
+ if ((tpgs == TPGS_EXPLICIT || !check_rdac(pp)) &&
sysfs_get_asymmetric_access_state(pp, buff, 512) >= 0)
default_prio = PRIO_SYSFS;
else
const char *origin;
struct mpentry * mpe;
struct prio * p = &pp->prio;
+ int log_prio = 3;
if (pp->detect_prio == DETECT_PRIO_ON) {
detect_prio(conf, pp);
* fetch tpgs mode for alua, if its not already obtained
*/
if (!strncmp(prio_name(p), PRIO_ALUA, PRIO_NAME_LEN)) {
- int tpgs = 0;
- unsigned int timeout = conf->checker_timeout;
+ int tpgs = path_get_tpgs(pp);
- if(!pp->tpgs &&
- (tpgs = get_target_port_group_support(pp->fd, timeout)) >= 0)
- pp->tpgs = tpgs;
+ if (tpgs == TPGS_NONE) {
+ prio_get(conf->multipath_dir,
+ p, DEFAULT_PRIO, DEFAULT_PRIO_ARGS);
+ origin = "(setting: emergency fallback - alua failed)";
+ log_prio = 1;
+ }
}
- condlog(3, "%s: prio = %s %s", pp->dev, prio_name(p), origin);
+ condlog(log_prio, "%s: prio = %s %s", pp->dev, prio_name(p), origin);
condlog(3, "%s: prio args = \"%s\" %s", pp->dev, prio_args(p), origin);
return 0;
}
int fd;
int initialized;
int retriggers;
- int wwid_changed;
unsigned int path_failures;
time_t dis_reinstate_time;
int disable_reinstate;
#ifndef _VERSION_H
#define _VERSION_H
-#define VERSION_CODE 0x000800
-#define DATE_CODE 0x020e13
+#define VERSION_CODE 0x000801
+#define DATE_CODE 0x041213
#define PROG "multipath-tools"
pp->udev = get_udev_device(pp->dev_t, DEV_DEVT);
if (pp->udev == NULL)
continue;
- if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
+ if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO|DI_CHECKER) != PATHINFO_OK)
continue;
if (pp->state == PATH_UP &&
The udev attribute providing a unique path identifier.
.RS
.TP
-The default is: for SCSI devices \fBID_SERIAL\fR
+The default is: \fBID_SERIAL\fR, for SCSI devices
.TP
-The default is: for DASD devices \fBID_UID\fR
+The default is: \fBID_UID\fR, for DASD devices
.TP
-The default is: for NVME devices \fBID_WWN\fR
+The default is: \fBID_WWN\fR, for NVMe devices
.RE
.
.
will disable the timeout.
.RS
.TP
-The default is: in \fB5\fR
+The default is: \fB5\fR
.RE
.
.
\fIreservation_key\fR is set to \fBfile\fR.
.RS
.TP
-The default is \fB/etc/multipath/prkeys\fR
+The default is: \fB/etc/multipath/prkeys\fR
.RE
.
.
.
.TP
.B disable_changed_wwids
-If set to \fIyes\fR, multipathd will check the path wwid on change events, and
-if it has changed from the wwid of the multipath device, multipathd will
-disable access to the path until the wwid changes back.
-.RS
-.TP
-The default is: \fBno\fR
+This option is deprecated and ignored. If the WWID of a path suddenly changes,
+multipathd handles it as if it was removed and then added again.
.RE
.
.
makes multipath immediately mark a device with only ghost paths as ready.
.RS
.TP
-The default is \fBno\fR
+The default is: \fBno\fR
.RE
.
.
.TP
.B uid_attribute
.TP
+.B getuid_callout
+.TP
.B path_selector
.TP
.B path_checker
.TP
.B flush_on_last_del
.TP
+.B user_friendly_names
+.TP
.B retain_attached_hw_handler
.TP
.B detect_prio
.B max_sectors_kb
.TP
.B ghost_delay
+.TP
+.B all_tg_pt
.RE
.PD
.LP
.TP
.B skip_kpartx
.TP
+.B max_sectors_kb
+.TP
.B ghost_delay
+.TP
+.B all_tg_pt
.RE
.PD
.LP
return 1;
}
- return reload_map(vecs, mpp, 0, 1);
+ return update_path_groups(mpp, vecs, 0);
}
int resize_map(struct multipath *mpp, unsigned long long size,
#else
int poll_dmevents = 1;
#endif
+/* Don't access this variable without holding config_lock */
enum daemon_status running_state = DAEMON_INIT;
pid_t daemon_pid;
pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t config_cond;
+static inline enum daemon_status get_running_state(void)
+{
+ enum daemon_status st;
+
+ pthread_mutex_lock(&config_lock);
+ st = running_state;
+ pthread_mutex_unlock(&config_lock);
+ return st;
+}
+
/*
* global copy of vecs for use in sig handlers
*/
const char *
daemon_status(void)
{
- switch (running_state) {
+ switch (get_running_state()) {
case DAEMON_INIT:
return "init";
case DAEMON_START:
/*
* I love you too, systemd ...
*/
-const char *
-sd_notify_status(void)
+static const char *
+sd_notify_status(enum daemon_status state)
{
- switch (running_state) {
+ switch (state) {
case DAEMON_INIT:
return "STATUS=init";
case DAEMON_START:
}
#ifdef USE_SYSTEMD
-static void do_sd_notify(enum daemon_status old_state)
+static void do_sd_notify(enum daemon_status old_state,
+ enum daemon_status new_state)
{
/*
* Checkerloop switches back and forth between idle and running state.
* No need to tell systemd each time.
* These notifications cause a lot of overhead on dbus.
*/
- if ((running_state == DAEMON_IDLE || running_state == DAEMON_RUNNING) &&
+ if ((new_state == DAEMON_IDLE || new_state == DAEMON_RUNNING) &&
(old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
return;
- sd_notify(0, sd_notify_status());
+ sd_notify(0, sd_notify_status(new_state));
}
#endif
pthread_mutex_unlock(&config_lock);
}
+/* must be called with config_lock held */
static void __post_config_state(enum daemon_status state)
{
if (state != running_state && running_state != DAEMON_SHUTDOWN) {
running_state = state;
pthread_cond_broadcast(&config_cond);
#ifdef USE_SYSTEMD
- do_sd_notify(old_state);
+ do_sd_notify(old_state, state);
#endif
}
}
&config_lock, &ts);
}
}
- if (!rc) {
+ if (!rc && (running_state != DAEMON_SHUTDOWN)) {
running_state = state;
pthread_cond_broadcast(&config_cond);
#ifdef USE_SYSTEMD
- do_sd_notify(old_state);
+ do_sd_notify(old_state, state);
#endif
}
}
default:
if (mpp->nr_active > 0) {
mpp->retry_tick = 0;
- dm_queue_if_no_path(mpp->alias, 1);
+ if (!is_queueing)
+ dm_queue_if_no_path(mpp->alias, 1);
} else if (is_queueing && mpp->retry_tick == 0)
enter_recovery_mode(mpp);
break;
}
if (mpp && mpp->wait_for_udev &&
(pathcount(mpp, PATH_UP) > 0 ||
- (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT &&
+ (pathcount(mpp, PATH_GHOST) > 0 &&
+ path_get_tpgs(pp) != TPGS_IMPLICIT &&
mpp->ghost_delay_tick <= 0))) {
/* if wait_for_udev is set and valid paths exist */
condlog(3, "%s: delaying path addition until %s is fully initialized",
int ro, retval = 0, rc;
struct path * pp;
struct config *conf;
- int disable_changed_wwids;
int needs_reinit = 0;
switch ((rc = change_foreign(uev->udev))) {
break;
}
- conf = get_multipath_config();
- disable_changed_wwids = conf->disable_changed_wwids;
- put_multipath_config(conf);
-
- ro = uevent_get_disk_ro(uev);
-
pthread_cleanup_push(cleanup_lock, &vecs->lock);
lock(&vecs->lock);
pthread_testcancel();
goto out;
strcpy(wwid, pp->wwid);
- get_uid(pp, pp->state, uev->udev);
+ rc = get_uid(pp, pp->state, uev->udev, 0);
- if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
- condlog(0, "%s: path wwid changed from '%s' to '%s'. %s",
- uev->kernel, wwid, pp->wwid,
- (disable_changed_wwids ? "disallowing" :
- "continuing"));
+ if (rc != 0)
strcpy(pp->wwid, wwid);
- if (disable_changed_wwids) {
- if (!pp->wwid_changed) {
- pp->wwid_changed = 1;
- pp->tick = 1;
- if (pp->mpp)
- dm_fail_path(pp->mpp->alias, pp->dev_t);
- }
- goto out;
- }
+ else if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
+ condlog(0, "%s: path wwid changed from '%s' to '%s'",
+ uev->kernel, wwid, pp->wwid);
+ ev_remove_path(pp, vecs, 1);
+ needs_reinit = 1;
+ goto out;
} else {
- pp->wwid_changed = 0;
udev_device_unref(pp->udev);
pp->udev = udev_device_ref(uev->udev);
conf = get_multipath_config();
pthread_cleanup_pop(1);
}
+ ro = uevent_get_disk_ro(uev);
if (mpp && ro >= 0) {
condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
else {
if (ro == 1)
pp->mpp->force_readonly = 1;
- retval = reload_map(vecs, mpp, 0, 1);
- pp->mpp->force_readonly = 0;
- condlog(2, "%s: map %s reloaded (retval %d)",
- uev->kernel, mpp->alias, retval);
+ retval = update_path_groups(mpp, vecs, 0);
+ if (retval == 2)
+ condlog(2, "%s: map removed during reload", pp->dev);
+ else {
+ pp->mpp->force_readonly = 0;
+ condlog(2, "%s: map %s reloaded (retval %d)", uev->kernel, mpp->alias, retval);
+ }
}
}
}
int r = 0;
struct vectors * vecs;
struct uevent *merge_uev, *tmp;
+ enum daemon_status state;
vecs = (struct vectors *)trigger_data;
pthread_cleanup_push(config_cleanup, NULL);
pthread_mutex_lock(&config_lock);
- if (running_state != DAEMON_IDLE &&
- running_state != DAEMON_RUNNING)
+ while (running_state != DAEMON_IDLE &&
+ running_state != DAEMON_RUNNING &&
+ running_state != DAEMON_SHUTDOWN)
pthread_cond_wait(&config_cond, &config_lock);
+ state = running_state;
pthread_cleanup_pop(1);
- if (running_state == DAEMON_SHUTDOWN)
+ if (state == DAEMON_SHUTDOWN)
return 0;
/*
dm_lib_release();
if (setup_multipath(vecs, mpp) != 0)
- return 1;
+ return 2;
sync_map_state(mpp);
return 0;
if (newstate == PATH_REMOVED)
newstate = PATH_DOWN;
- if (pp->wwid_changed) {
- condlog(2, "%s: path wwid has changed. Refusing to use",
- pp->dev);
- newstate = PATH_DOWN;
- }
-
if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
condlog(2, "%s: unusable path (%s) - checker failed",
pp->dev, checker_state_name(newstate));
/* if update_multipath_strings orphaned the path, quit early */
if (!pp->mpp)
return 0;
+ set_no_path_retry(pp->mpp);
if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
check_path_reinstate_state(pp)) {
return 1;
}
- if (pp->io_err_disable_reinstate && hit_io_err_recheck_time(pp)) {
+ if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
+ pp->io_err_disable_reinstate && need_io_err_check(pp)) {
pp->state = PATH_SHAKY;
/*
* to reschedule as soon as possible,so that this path can
* paths if there are no other active paths in map.
*/
disable_reinstate = (newstate == PATH_GHOST &&
- pp->mpp->nr_active == 0 &&
- pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
+ pp->mpp->nr_active == 0 &&
+ path_get_tpgs(pp) == TPGS_IMPLICIT) ? 1 : 0;
pp->chkrstate = newstate;
if (newstate != pp->state) {
struct config *conf;
char *envp;
int queue_without_daemon;
+ enum daemon_status state;
mlockall(MCL_CURRENT | MCL_FUTURE);
signal_init();
/* Wait for uxlsnr startup */
while (running_state == DAEMON_IDLE)
pthread_cond_wait(&config_cond, &config_lock);
+ state = running_state;
}
pthread_cleanup_pop(1);
condlog(0, "failed to create cli listener: %d", rc);
goto failed;
}
- else if (running_state != DAEMON_CONFIGURE) {
+ else if (state != DAEMON_CONFIGURE) {
condlog(0, "cli listener failed to start");
goto failed;
}
}
pthread_attr_destroy(&misc_attr);
- while (running_state != DAEMON_SHUTDOWN) {
+ while (1) {
pthread_cleanup_push(config_cleanup, NULL);
pthread_mutex_lock(&config_lock);
- if (running_state != DAEMON_CONFIGURE &&
- running_state != DAEMON_SHUTDOWN) {
+ while (running_state != DAEMON_CONFIGURE &&
+ running_state != DAEMON_SHUTDOWN)
pthread_cond_wait(&config_cond, &config_lock);
- }
+ state = running_state;
pthread_cleanup_pop(1);
- if (running_state == DAEMON_CONFIGURE) {
+ if (state == DAEMON_SHUTDOWN)
+ break;
+ if (state == DAEMON_CONFIGURE) {
pthread_cleanup_push(cleanup_lock, &vecs->lock);
lock(&vecs->lock);
pthread_testcancel();
ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
"Manipulated through RCU");
- ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
- "Suppress complaints about unprotected running_state reads");
ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
"Suppress complaints about this scalar variable");
int reset);
#define setup_multipath(vecs, mpp) __setup_multipath(vecs, mpp, 1)
int update_multipath (struct vectors *vecs, char *mapname, int reset);
+int update_path_groups(struct multipath *mpp, struct vectors *vecs,
+ int refresh);
#endif /* MAIN_H */