Fix error
[platform/upstream/multipath-tools.git] / libmultipath / configure.c
1 /*
2  * Copyright (c) 2003, 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Benjamin Marzinski, Redhat
4  * Copyright (c) 2005 Kiyoshi Ueda, NEC
5  * Copyright (c) 2005 Patrick Caulfield, Redhat
6  * Copyright (c) 2005 Edward Goggin, EMC
7  */
8
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <unistd.h>
12 #include <string.h>
13 #include <sys/file.h>
14 #include <errno.h>
15 #include <ctype.h>
16 #include <libdevmapper.h>
17 #include <libudev.h>
18 #include "mpath_cmd.h"
19
20 #include "checkers.h"
21 #include "vector.h"
22 #include "devmapper.h"
23 #include "defaults.h"
24 #include "structs.h"
25 #include "structs_vec.h"
26 #include "dmparser.h"
27 #include "config.h"
28 #include "blacklist.h"
29 #include "propsel.h"
30 #include "discovery.h"
31 #include "debug.h"
32 #include "switchgroup.h"
33 #include "dm-generic.h"
34 #include "print.h"
35 #include "configure.h"
36 #include "pgpolicies.h"
37 #include "dict.h"
38 #include "alias.h"
39 #include "prio.h"
40 #include "util.h"
41 #include "uxsock.h"
42 #include "wwids.h"
43 #include "sysfs.h"
44 #include "io_err_stat.h"
45
46 /* group paths in pg by host adapter
47  */
48 int group_by_host_adapter(struct pathgroup *pgp, vector adapters)
49 {
50         struct adapter_group *agp;
51         struct host_group *hgp;
52         struct path *pp, *pp1;
53         char adapter_name1[SLOT_NAME_SIZE];
54         char adapter_name2[SLOT_NAME_SIZE];
55         int i, j;
56         int found_hostgroup = 0;
57
58         while (VECTOR_SIZE(pgp->paths) > 0) {
59
60                 pp = VECTOR_SLOT(pgp->paths, 0);
61
62                 if (sysfs_get_host_adapter_name(pp, adapter_name1))
63                         goto out;
64                 /* create a new host adapter group
65                  */
66                 agp = alloc_adaptergroup();
67                 if (!agp)
68                         goto out;
69                 agp->pgp = pgp;
70
71                 strlcpy(agp->adapter_name, adapter_name1, SLOT_NAME_SIZE);
72                 store_adaptergroup(adapters, agp);
73
74                 /* create a new host port group
75                  */
76                 hgp = alloc_hostgroup();
77                 if (!hgp)
78                         goto out;
79                 if (store_hostgroup(agp->host_groups, hgp))
80                         goto out;
81
82                 hgp->host_no = pp->sg_id.host_no;
83                 agp->num_hosts++;
84                 if (store_path(hgp->paths, pp))
85                         goto out;
86
87                 hgp->num_paths++;
88                 /* delete path from path group
89                  */
90                 vector_del_slot(pgp->paths, 0);
91
92                 /* add all paths belonging to same host adapter
93                  */
94                 vector_foreach_slot(pgp->paths, pp1, i) {
95                         if (sysfs_get_host_adapter_name(pp1, adapter_name2))
96                                 goto out;
97                         if (strcmp(adapter_name1, adapter_name2) == 0) {
98                                 found_hostgroup = 0;
99                                 vector_foreach_slot(agp->host_groups, hgp, j) {
100                                         if (hgp->host_no == pp1->sg_id.host_no) {
101                                                 if (store_path(hgp->paths, pp1))
102                                                         goto out;
103                                                 hgp->num_paths++;
104                                                 found_hostgroup = 1;
105                                                 break;
106                                         }
107                                 }
108                                 if (!found_hostgroup) {
109                                         /* this path belongs to new host port
110                                          * within this adapter
111                                          */
112                                         hgp = alloc_hostgroup();
113                                         if (!hgp)
114                                                 goto out;
115
116                                         if (store_hostgroup(agp->host_groups, hgp))
117                                                 goto out;
118
119                                         agp->num_hosts++;
120                                         if (store_path(hgp->paths, pp1))
121                                                 goto out;
122
123                                         hgp->host_no = pp1->sg_id.host_no;
124                                         hgp->num_paths++;
125                                 }
126                                 /* delete paths from original path_group
127                                  * as they are added into adapter group now
128                                  */
129                                 vector_del_slot(pgp->paths, i);
130                                 i--;
131                         }
132                 }
133         }
134         return 0;
135
136 out:    /* add back paths into pg as re-ordering failed
137          */
138         vector_foreach_slot(adapters, agp, i) {
139                         vector_foreach_slot(agp->host_groups, hgp, j) {
140                                 while (VECTOR_SIZE(hgp->paths) > 0) {
141                                         pp = VECTOR_SLOT(hgp->paths, 0);
142                                         if (store_path(pgp->paths, pp))
143                                                 condlog(3, "failed to restore "
144                                                 "path %s into path group",
145                                                  pp->dev);
146                                         vector_del_slot(hgp->paths, 0);
147                                 }
148                         }
149                 }
150         free_adaptergroup(adapters);
151         return 1;
152 }
153
154 /* re-order paths in pg by alternating adapters and host ports
155  * for optimized selection
156  */
157 int order_paths_in_pg_by_alt_adapters(struct pathgroup *pgp, vector adapters,
158                  int total_paths)
159 {
160         int next_adapter_index = 0;
161         struct adapter_group *agp;
162         struct host_group *hgp;
163         struct path *pp;
164
165         while (total_paths > 0) {
166                 agp = VECTOR_SLOT(adapters, next_adapter_index);
167                 if (!agp) {
168                         condlog(0, "can't get adapter group %d", next_adapter_index);
169                         return 1;
170                 }
171
172                 hgp = VECTOR_SLOT(agp->host_groups, agp->next_host_index);
173                 if (!hgp) {
174                         condlog(0, "can't get host group %d of adapter group %d", next_adapter_index, agp->next_host_index);
175                         return 1;
176                 }
177
178                 if (!hgp->num_paths) {
179                         agp->next_host_index++;
180                         agp->next_host_index %= agp->num_hosts;
181                         next_adapter_index++;
182                         next_adapter_index %= VECTOR_SIZE(adapters);
183                         continue;
184                 }
185
186                 pp  = VECTOR_SLOT(hgp->paths, 0);
187
188                 if (store_path(pgp->paths, pp))
189                         return 1;
190
191                 total_paths--;
192
193                 vector_del_slot(hgp->paths, 0);
194
195                 hgp->num_paths--;
196
197                 agp->next_host_index++;
198                 agp->next_host_index %= agp->num_hosts;
199                 next_adapter_index++;
200                 next_adapter_index %= VECTOR_SIZE(adapters);
201         }
202
203         /* all paths are added into path_group
204          * in crafted child order
205          */
206         return 0;
207 }
208
209 /* round-robin: order paths in path group to alternate
210  * between all host adapters
211  */
212 int rr_optimize_path_order(struct pathgroup *pgp)
213 {
214         vector adapters;
215         struct path *pp;
216         int total_paths;
217         int i;
218
219         total_paths = VECTOR_SIZE(pgp->paths);
220         vector_foreach_slot(pgp->paths, pp, i) {
221                 if (pp->bus != SYSFS_BUS_SCSI ||
222                     (pp->sg_id.proto_id != SCSI_PROTOCOL_FCP &&
223                      pp->sg_id.proto_id != SCSI_PROTOCOL_SAS &&
224                      pp->sg_id.proto_id != SCSI_PROTOCOL_ISCSI &&
225                      pp->sg_id.proto_id != SCSI_PROTOCOL_SRP)) {
226                         /* return success as default path order
227                          * is maintained in path group
228                          */
229                         return 0;
230                 }
231         }
232         adapters = vector_alloc();
233         if (!adapters)
234                 return 0;
235
236         /* group paths in path group by host adapters
237          */
238         if (group_by_host_adapter(pgp, adapters)) {
239                 /* already freed adapters */
240                 condlog(3, "Failed to group paths by adapters");
241                 return 0;
242         }
243
244         /* re-order paths in pg to alternate between adapters and host ports
245          */
246         if (order_paths_in_pg_by_alt_adapters(pgp, adapters, total_paths)) {
247                 condlog(3, "Failed to re-order paths in pg by adapters "
248                         "and host ports");
249                 free_adaptergroup(adapters);
250                 /* return failure as original paths are
251                  * removed form pgp
252                  */
253                 return 1;
254         }
255
256         free_adaptergroup(adapters);
257         return 0;
258 }
259
260 int setup_map(struct multipath *mpp, char **params, struct vectors *vecs)
261 {
262         struct pathgroup * pgp;
263         struct path *pp;
264         struct config *conf;
265         int i, marginal_pathgroups;
266         char *save_attr;
267
268         /*
269          * don't bother if devmap size is unknown
270          */
271         if (mpp->size <= 0) {
272                 condlog(3, "%s: devmap size is unknown", mpp->alias);
273                 return 1;
274         }
275
276         if (mpp->disable_queueing && VECTOR_SIZE(mpp->paths) != 0)
277                 mpp->disable_queueing = 0;
278
279         /* Force QUEUE_MODE_BIO for maps with nvme:tcp paths */
280         vector_foreach_slot(mpp->paths, pp, i) {
281                 if (pp->bus == SYSFS_BUS_NVME &&
282                     pp->sg_id.proto_id == NVME_PROTOCOL_TCP) {
283                         mpp->queue_mode = QUEUE_MODE_BIO;
284                         break;
285                 }
286         }
287         /*
288          * If this map was created with add_map_without_path(),
289          * mpp->hwe might not be set yet.
290          */
291         if (!mpp->hwe)
292                 extract_hwe_from_path(mpp);
293
294         /*
295          * properties selectors
296          *
297          * Ordering matters for some properties:
298          * - features after no_path_retry and retain_hwhandler
299          * - hwhandler after retain_hwhandler
300          * No guarantee that this list is complete, check code in
301          * propsel.c if in doubt.
302          */
303         conf = get_multipath_config();
304         pthread_cleanup_push(put_multipath_config, conf);
305
306         select_pgfailback(conf, mpp);
307         select_detect_pgpolicy(conf, mpp);
308         select_detect_pgpolicy_use_tpg(conf, mpp);
309         select_pgpolicy(conf, mpp);
310
311         /*
312          * If setup_map() is called from e.g. from reload_map() or resize_map(),
313          * make sure that we don't corrupt attributes.
314          */
315         save_attr = steal_ptr(mpp->selector);
316         select_selector(conf, mpp);
317         if (!mpp->selector)
318                 mpp->selector = save_attr;
319         else
320                 free(save_attr);
321
322         select_no_path_retry(conf, mpp);
323         select_retain_hwhandler(conf, mpp);
324
325         save_attr = steal_ptr(mpp->features);
326         select_features(conf, mpp);
327         if (!mpp->features)
328                 mpp->features = save_attr;
329         else
330                 free(save_attr);
331
332         save_attr = steal_ptr(mpp->hwhandler);
333         select_hwhandler(conf, mpp);
334         if (!mpp->hwhandler)
335                 mpp->hwhandler = save_attr;
336         else
337                 free(save_attr);
338
339         select_rr_weight(conf, mpp);
340         select_minio(conf, mpp);
341         select_mode(conf, mpp);
342         select_uid(conf, mpp);
343         select_gid(conf, mpp);
344         select_reservation_key(conf, mpp);
345         select_deferred_remove(conf, mpp);
346         select_marginal_path_err_sample_time(conf, mpp);
347         select_marginal_path_err_rate_threshold(conf, mpp);
348         select_marginal_path_err_recheck_gap_time(conf, mpp);
349         select_marginal_path_double_failed_time(conf, mpp);
350         select_san_path_err_threshold(conf, mpp);
351         select_san_path_err_forget_rate(conf, mpp);
352         select_san_path_err_recovery_time(conf, mpp);
353         select_delay_checks(conf, mpp);
354         select_skip_kpartx(conf, mpp);
355         select_max_sectors_kb(conf, mpp);
356         select_ghost_delay(conf, mpp);
357         select_flush_on_last_del(conf, mpp);
358
359         sysfs_set_scsi_tmo(conf, mpp);
360         marginal_pathgroups = conf->marginal_pathgroups;
361         pthread_cleanup_pop(1);
362
363         if (!mpp->features || !mpp->hwhandler || !mpp->selector) {
364                 condlog(0, "%s: map select failed", mpp->alias);
365                 return 1;
366         }
367
368         if (marginal_path_check_enabled(mpp))
369                 start_io_err_stat_thread(vecs);
370
371         /*
372          * assign paths to path groups -- start with no groups and all paths
373          * in mpp->paths
374          */
375         if (mpp->pg) {
376                 vector_foreach_slot (mpp->pg, pgp, i)
377                         free_pathgroup(pgp, KEEP_PATHS);
378
379                 vector_free(mpp->pg);
380                 mpp->pg = NULL;
381         }
382         if (group_paths(mpp, marginal_pathgroups))
383                 return 1;
384
385         /*
386          * ponders each path group and determine highest prio pg
387          * to switch over (default to first)
388          */
389         mpp->bestpg = select_path_group(mpp);
390
391         /* re-order paths in all path groups in an optimized way
392          * for round-robin path selectors to get maximum throughput.
393          */
394         if (!strncmp(mpp->selector, "round-robin", 11)) {
395                 vector_foreach_slot(mpp->pg, pgp, i) {
396                         if (VECTOR_SIZE(pgp->paths) <= 2)
397                                 continue;
398                         if (rr_optimize_path_order(pgp)) {
399                                 condlog(2, "cannot re-order paths for "
400                                         "optimization: %s",
401                                         mpp->alias);
402                                 return 1;
403                         }
404                 }
405         }
406
407         /*
408          * transform the mp->pg vector of vectors of paths
409          * into a mp->params strings to feed the device-mapper
410          */
411         if (assemble_map(mpp, params)) {
412                 condlog(0, "%s: problem assembling map", mpp->alias);
413                 return 1;
414         }
415         return 0;
416 }
417
418 static void
419 compute_pgid(struct pathgroup * pgp)
420 {
421         struct path * pp;
422         int i;
423
424         vector_foreach_slot (pgp->paths, pp, i)
425                 pgp->id ^= (long)pp;
426 }
427
428 static int
429 pgcmp (struct multipath * mpp, struct multipath * cmpp)
430 {
431         int i, j;
432         struct pathgroup * pgp;
433         struct pathgroup * cpgp;
434         int r = 0;
435
436         if (!mpp)
437                 return 0;
438
439         vector_foreach_slot (mpp->pg, pgp, i) {
440                 compute_pgid(pgp);
441
442                 vector_foreach_slot (cmpp->pg, cpgp, j) {
443                         if (pgp->id == cpgp->id &&
444                             !pathcmp(pgp, cpgp)) {
445                                 r = 0;
446                                 break;
447                         }
448                         r++;
449                 }
450                 if (r)
451                         return r;
452         }
453         return r;
454 }
455
456 static struct udev_device *
457 get_udev_for_mpp(const struct multipath *mpp)
458 {
459         dev_t devnum;
460         struct udev_device *udd;
461
462         if (!mpp || !has_dm_info(mpp)) {
463                 condlog(1, "%s called with empty mpp", __func__);
464                 return NULL;
465         }
466
467         devnum = makedev(mpp->dmi.major, mpp->dmi.minor);
468         udd = udev_device_new_from_devnum(udev, 'b', devnum);
469         if (!udd) {
470                 condlog(1, "failed to get udev device for %s", mpp->alias);
471                 return NULL;
472         }
473         return udd;
474 }
475
476 void trigger_partitions_udev_change(struct udev_device *dev,
477                                     const char *action, int len)
478 {
479         struct udev_enumerate *part_enum;
480         struct udev_list_entry *item;
481         const char *devtype;
482
483         part_enum = udev_enumerate_new(udev);
484         if (!part_enum)
485                 return;
486
487         if (udev_enumerate_add_match_parent(part_enum, dev) < 0 ||
488             udev_enumerate_add_match_subsystem(part_enum, "block") < 0 ||
489             udev_enumerate_scan_devices(part_enum) < 0)
490                 goto unref;
491
492         udev_list_entry_foreach(item,
493                                 udev_enumerate_get_list_entry(part_enum)) {
494                 const char *syspath;
495                 struct udev_device *part;
496
497                 syspath = udev_list_entry_get_name(item);
498                 part = udev_device_new_from_syspath(udev, syspath);
499                 if (!part)
500                         continue;
501
502                 devtype = udev_device_get_devtype(part);
503                 if (devtype && !strcmp("partition", devtype)) {
504                         ssize_t ret;
505
506                         condlog(4, "%s: triggering %s event for %s", __func__,
507                                 action, syspath);
508                         ret = sysfs_attr_set_value(part, "uevent", action, len);
509                         if (ret != len)
510                                 log_sysfs_attr_set_value(2, ret,
511                                         "%s: failed to trigger %s uevent",
512                                         syspath, action);
513                 }
514                 udev_device_unref(part);
515         }
516 unref:
517         udev_enumerate_unref(part_enum);
518 }
519
520 void
521 trigger_path_udev_change(struct path *pp, bool is_mpath)
522 {
523         /*
524          * If a path changes from multipath to non-multipath, we must
525          * synthesize an artificial "add" event, otherwise the LVM2 rules
526          * (69-lvm2-lvmetad.rules) won't pick it up. Otherwise, we'd just
527          * irritate ourselves with an "add", so use "change".
528          */
529         const char *action = is_mpath ? "change" : "add";
530         const char *env;
531         ssize_t len, ret;
532
533         if (!pp->udev)
534                 return;
535         /*
536          * Paths that are already classified as multipath
537          * members don't need another uevent.
538          */
539         env = udev_device_get_property_value(
540                 pp->udev, "DM_MULTIPATH_DEVICE_PATH");
541
542         if (is_mpath && env != NULL && !strcmp(env, "1")) {
543                 /*
544                  * If FIND_MULTIPATHS_WAIT_UNTIL is not "0",
545                  * path is in "maybe" state and timer is running
546                  * Send uevent now (see multipath.rules).
547                  */
548                 env = udev_device_get_property_value(
549                         pp->udev, "FIND_MULTIPATHS_WAIT_UNTIL");
550                 if (env == NULL || !strcmp(env, "0"))
551                         return;
552         } else if (!is_mpath &&
553                    (env == NULL || !strcmp(env, "0")))
554                 return;
555
556         condlog(3, "triggering %s uevent for %s (is %smultipath member)",
557                 action, pp->dev, is_mpath ? "" : "no ");
558
559         len = strlen(action);
560         ret = sysfs_attr_set_value(pp->udev, "uevent", action, len);
561         if (ret != len)
562                 log_sysfs_attr_set_value(2, ret,
563                                          "%s: failed to trigger %s uevent",
564                                          pp->dev, action);
565         trigger_partitions_udev_change(pp->udev, action,
566                                        strlen(action));
567 }
568
569 void
570 trigger_paths_udev_change(struct multipath *mpp, bool is_mpath)
571 {
572         struct pathgroup *pgp;
573         struct path *pp;
574         int i, j;
575
576         if (!mpp || !mpp->pg)
577                 return;
578
579         vector_foreach_slot (mpp->pg, pgp, i) {
580                 if (!pgp->paths)
581                         continue;
582                 vector_foreach_slot(pgp->paths, pp, j)
583                         trigger_path_udev_change(pp, is_mpath);
584         }
585
586         mpp->needs_paths_uevent = 0;
587 }
588
589 static int
590 sysfs_set_max_sectors_kb(struct multipath *mpp, int is_reload)
591 {
592         struct pathgroup * pgp;
593         struct path *pp;
594         char buff[11];
595         ssize_t len;
596         int i, j, ret, err = 0;
597         struct udev_device *udd;
598         int max_sectors_kb = mpp->max_sectors_kb;
599
600         /* by default, do not initialize max_sectors_kb on the device */
601         if (max_sectors_kb == MAX_SECTORS_KB_UNDEF && !is_reload)
602                 return 0;
603         /* on reload, re-apply the user tuning on all the path devices */
604         if (is_reload) {
605                 if (!has_dm_info(mpp) &&
606                     dm_get_info(mpp->alias, &mpp->dmi) != 0) {
607                         condlog(1, "failed to get dm info for %s", mpp->alias);
608                         return 1;
609                 }
610                 udd = get_udev_for_mpp(mpp);
611                 if (!udd) {
612                         condlog(1, "failed to get udev device to set max_sectors_kb for %s", mpp->alias);
613                         return 1;
614                 }
615                 ret = sysfs_attr_get_value(udd, "queue/max_sectors_kb", buff,
616                                            sizeof(buff));
617                 udev_device_unref(udd);
618                 if (!sysfs_attr_value_ok(ret, sizeof(buff))) {
619                         condlog(1, "failed to get current max_sectors_kb from %s", mpp->alias);
620                         return 1;
621                 }
622                 if (sscanf(buff, "%u\n", &max_sectors_kb) != 1) {
623                         condlog(1, "can't parse current max_sectors_kb from %s",
624                                 mpp->alias);
625                         return 1;
626                 }
627         }
628         snprintf(buff, 11, "%d", max_sectors_kb);
629         len = strlen(buff);
630
631         vector_foreach_slot (mpp->pg, pgp, i) {
632                 vector_foreach_slot(pgp->paths, pp, j) {
633                         ret = sysfs_attr_set_value(pp->udev,
634                                                    "queue/max_sectors_kb",
635                                                    buff, len);
636                         if (ret != len) {
637                                 log_sysfs_attr_set_value(1, ret,
638                                         "failed setting max_sectors_kb on %s",
639                                         pp->dev);
640                                 err = 1;
641                         }
642                 }
643         }
644         return err;
645 }
646
647 static bool is_udev_ready(struct multipath *cmpp)
648 {
649         struct udev_device *mpp_ud;
650         const char *env;
651         bool rc;
652
653         /*
654          * MPATH_DEVICE_READY != 1 can mean two things:
655          *  (a) no usable paths
656          *  (b) device was never fully processed (e.g. udev killed)
657          * If we are in this code path (startup or forced reconfigure),
658          * (b) can mean that upper layers like kpartx have never been
659          * run for this map. Thus force udev reload.
660          */
661
662         mpp_ud = get_udev_for_mpp(cmpp);
663         if (!mpp_ud)
664                 return true;
665         env = udev_device_get_property_value(mpp_ud, "MPATH_DEVICE_READY");
666         rc = (env != NULL && !strcmp(env, "1"));
667         udev_device_unref(mpp_ud);
668         condlog(4, "%s: %s: \"%s\" -> %d\n", __func__, cmpp->alias,
669                 env ? env : "", rc);
670         return rc;
671 }
672
673 static void
674 select_reload_action(struct multipath *mpp, const char *reason)
675 {
676         mpp->action = mpp->action == ACT_RENAME ? ACT_RELOAD_RENAME :
677                       ACT_RELOAD;
678         condlog(3, "%s: set ACT_RELOAD (%s)", mpp->alias, reason);
679 }
680
681 void select_action (struct multipath *mpp, const struct _vector *curmp,
682                     int force_reload)
683 {
684         struct multipath * cmpp;
685         struct multipath * cmpp_by_name;
686         char * mpp_feat, * cmpp_feat;
687
688         mpp->action = ACT_NOTHING;
689         cmpp = find_mp_by_wwid(curmp, mpp->wwid);
690         cmpp_by_name = find_mp_by_alias(curmp, mpp->alias);
691         if (mpp->need_reload || (cmpp && cmpp->need_reload))
692                 force_reload = 1;
693
694         if (!cmpp) {
695                 if (cmpp_by_name) {
696                         condlog(1, "%s: can't use alias \"%s\" used by %s, falling back to WWID",
697                                 mpp->wwid, mpp->alias, cmpp_by_name->wwid);
698                         /* We can do this because wwid wasn't found */
699                         free(mpp->alias);
700                         mpp->alias = strdup(mpp->wwid);
701                 }
702                 mpp->action = ACT_CREATE;
703                 condlog(3, "%s: set ACT_CREATE (map does not exist%s)",
704                         mpp->alias, cmpp_by_name ? ", name changed" : "");
705                 return;
706         }
707
708         if (!cmpp_by_name) {
709                 condlog(2, "%s: rename %s to %s", mpp->wwid, cmpp->alias,
710                         mpp->alias);
711                 strlcpy(mpp->alias_old, cmpp->alias, WWID_SIZE);
712                 mpp->action = ACT_RENAME;
713                 /* don't return here. Check for other needed actions */
714         } else if (cmpp != cmpp_by_name) {
715                 condlog(2, "%s: unable to rename %s to %s (%s is used by %s)",
716                         mpp->wwid, cmpp->alias, mpp->alias,
717                         mpp->alias, cmpp_by_name->wwid);
718                 /* reset alias to existing alias */
719                 free(mpp->alias);
720                 mpp->alias = strdup(cmpp->alias);
721                 mpp->action = ACT_IMPOSSIBLE;
722                 /* don't return here. Check for other needed actions */
723         }
724
725         if (cmpp->size != mpp->size) {
726                 mpp->force_udev_reload = 1;
727                 mpp->action = mpp->action == ACT_RENAME ? ACT_RESIZE_RENAME :
728                               ACT_RESIZE;
729                 condlog(3, "%s: set ACT_RESIZE (size change)",
730                         mpp->alias);
731                 return;
732         }
733
734         if (force_reload) {
735                 mpp->force_udev_reload = 1;
736                 select_reload_action(mpp, "forced by user");
737                 return;
738         }
739
740         if (!is_udev_ready(cmpp) && count_active_paths(mpp) > 0) {
741                 mpp->force_udev_reload = 1;
742                 select_reload_action(mpp, "udev incomplete");
743                 return;
744         }
745
746         if (mpp->no_path_retry != NO_PATH_RETRY_UNDEF &&
747             !!strstr(mpp->features, "queue_if_no_path") !=
748             !!strstr(cmpp->features, "queue_if_no_path")) {
749                 select_reload_action(mpp, "no_path_retry change");
750                 return;
751         }
752         if ((mpp->retain_hwhandler != RETAIN_HWHANDLER_ON ||
753              strcmp(cmpp->hwhandler, "0") == 0) &&
754             (strlen(cmpp->hwhandler) != strlen(mpp->hwhandler) ||
755              strncmp(cmpp->hwhandler, mpp->hwhandler,
756                     strlen(mpp->hwhandler)))) {
757                 select_reload_action(mpp, "hwhandler change");
758                 return;
759         }
760
761         if (mpp->retain_hwhandler != RETAIN_HWHANDLER_UNDEF &&
762             !!strstr(mpp->features, "retain_attached_hw_handler") !=
763             !!strstr(cmpp->features, "retain_attached_hw_handler") &&
764             get_linux_version_code() < KERNEL_VERSION(4, 3, 0)) {
765                 select_reload_action(mpp, "retain_hwhandler change");
766                 return;
767         }
768
769         cmpp_feat = strdup(cmpp->features);
770         mpp_feat = strdup(mpp->features);
771         if (cmpp_feat && mpp_feat) {
772                 remove_feature(&mpp_feat, "queue_if_no_path");
773                 remove_feature(&mpp_feat, "retain_attached_hw_handler");
774                 remove_feature(&cmpp_feat, "queue_if_no_path");
775                 remove_feature(&cmpp_feat, "retain_attached_hw_handler");
776                 if (strcmp(mpp_feat, cmpp_feat)) {
777                         select_reload_action(mpp, "features change");
778                         free(cmpp_feat);
779                         free(mpp_feat);
780                         return;
781                 }
782         }
783         free(cmpp_feat);
784         free(mpp_feat);
785
786         if (!cmpp->selector || strncmp(cmpp->selector, mpp->selector,
787                     strlen(mpp->selector))) {
788                 select_reload_action(mpp, "selector change");
789                 return;
790         }
791         if (cmpp->minio != mpp->minio) {
792                 select_reload_action(mpp, "minio change");
793                 return;
794         }
795         if (!cmpp->pg || VECTOR_SIZE(cmpp->pg) != VECTOR_SIZE(mpp->pg)) {
796                 select_reload_action(mpp, "path group number change");
797                 return;
798         }
799         if (pgcmp(mpp, cmpp)) {
800                 select_reload_action(mpp, "path group topology change");
801                 return;
802         }
803         if (cmpp->nextpg != mpp->bestpg) {
804                 mpp->action = mpp->action == ACT_RENAME ? ACT_SWITCHPG_RENAME :
805                               ACT_SWITCHPG;
806                 condlog(3, "%s: set ACT_SWITCHPG (next path group change)",
807                         mpp->alias);
808                 return;
809         }
810         if (mpp->action == ACT_NOTHING)
811                 condlog(3, "%s: set ACT_NOTHING (map unchanged)", mpp->alias);
812         return;
813 }
814
815 int reinstate_paths(struct multipath *mpp)
816 {
817         int i, j;
818         struct pathgroup * pgp;
819         struct path * pp;
820
821         if (!mpp->pg)
822                 return 0;
823
824         vector_foreach_slot (mpp->pg, pgp, i) {
825                 if (!pgp->paths)
826                         continue;
827
828                 vector_foreach_slot (pgp->paths, pp, j) {
829                         if (pp->state != PATH_UP &&
830                             (pgp->status == PGSTATE_DISABLED ||
831                              pgp->status == PGSTATE_ACTIVE))
832                                 continue;
833
834                         if (pp->dmstate == PSTATE_FAILED) {
835                                 if (dm_reinstate_path(mpp->alias, pp->dev_t))
836                                         condlog(0, "%s: error reinstating",
837                                                 pp->dev);
838                         }
839                 }
840         }
841         return 0;
842 }
843
844 static int
845 lock_multipath (struct multipath * mpp, int lock)
846 {
847         struct pathgroup * pgp;
848         struct path * pp;
849         int i, j;
850         int x, y;
851
852         if (!mpp || !mpp->pg)
853                 return 0;
854
855         vector_foreach_slot (mpp->pg, pgp, i) {
856                 if (!pgp->paths)
857                         continue;
858                 vector_foreach_slot(pgp->paths, pp, j) {
859                         if (lock && flock(pp->fd, LOCK_SH | LOCK_NB) &&
860                             errno == EWOULDBLOCK)
861                                 goto fail;
862                         else if (!lock)
863                                 flock(pp->fd, LOCK_UN);
864                 }
865         }
866         return 0;
867 fail:
868         vector_foreach_slot (mpp->pg, pgp, x) {
869                 if (x > i)
870                         return 1;
871                 if (!pgp->paths)
872                         continue;
873                 vector_foreach_slot(pgp->paths, pp, y) {
874                         if (x == i && y >= j)
875                                 return 1;
876                         flock(pp->fd, LOCK_UN);
877                 }
878         }
879         return 1;
880 }
881
882 int domap(struct multipath *mpp, char *params, int is_daemon)
883 {
884         int r = DOMAP_FAIL;
885         struct config *conf;
886
887         /*
888          * last chance to quit before touching the devmaps
889          */
890         if (mpp->action == ACT_DRY_RUN) {
891                 print_multipath_topology(mpp, libmp_verbosity);
892                 return DOMAP_DRY;
893         }
894
895         if (mpp->action == ACT_CREATE && dm_map_present(mpp->alias)) {
896                 char wwid[WWID_SIZE];
897
898                 if (dm_get_uuid(mpp->alias, wwid, sizeof(wwid)) == 0) {
899                         if (!strncmp(mpp->wwid, wwid, sizeof(wwid))) {
900                                 condlog(3, "%s: map already present",
901                                         mpp->alias);
902                                 mpp->action = ACT_RELOAD;
903                         } else {
904                                 condlog(0, "%s: map \"%s\" already present with WWID %s, skipping",
905                                         mpp->wwid, mpp->alias, wwid);
906                                 condlog(0, "please check alias settings in config and bindings file");
907                                 mpp->action = ACT_REJECT;
908                         }
909                 }
910         }
911
912         if (mpp->action == ACT_RENAME || mpp->action == ACT_SWITCHPG_RENAME ||
913             mpp->action == ACT_RELOAD_RENAME ||
914             mpp->action == ACT_RESIZE_RENAME) {
915                 conf = get_multipath_config();
916                 pthread_cleanup_push(put_multipath_config, conf);
917                 r = dm_rename(mpp->alias_old, mpp->alias,
918                               conf->partition_delim, mpp->skip_kpartx);
919                 pthread_cleanup_pop(1);
920                 if (r == DOMAP_FAIL)
921                         return r;
922         }
923         switch (mpp->action) {
924         case ACT_REJECT:
925         case ACT_NOTHING:
926         case ACT_IMPOSSIBLE:
927                 return DOMAP_EXIST;
928
929         case ACT_SWITCHPG:
930         case ACT_SWITCHPG_RENAME:
931                 dm_switchgroup(mpp->alias, mpp->bestpg);
932                 /*
933                  * we may have avoided reinstating paths because there where in
934                  * active or disabled PG. Now that the topology has changed,
935                  * retry.
936                  */
937                 reinstate_paths(mpp);
938                 return DOMAP_EXIST;
939
940         case ACT_CREATE:
941                 if (lock_multipath(mpp, 1)) {
942                         condlog(3, "%s: failed to create map (in use)",
943                                 mpp->alias);
944                         return DOMAP_RETRY;
945                 }
946
947                 sysfs_set_max_sectors_kb(mpp, 0);
948                 if (is_daemon && mpp->ghost_delay > 0 && count_active_paths(mpp) &&
949                     pathcount(mpp, PATH_UP) == 0)
950                         mpp->ghost_delay_tick = mpp->ghost_delay;
951                 r = dm_addmap_create(mpp, params);
952
953                 lock_multipath(mpp, 0);
954                 break;
955
956         case ACT_RELOAD:
957         case ACT_RELOAD_RENAME:
958                 sysfs_set_max_sectors_kb(mpp, 1);
959                 if (mpp->ghost_delay_tick > 0 && pathcount(mpp, PATH_UP))
960                         mpp->ghost_delay_tick = 0;
961                 r = dm_addmap_reload(mpp, params, 0);
962                 break;
963
964         case ACT_RESIZE:
965         case ACT_RESIZE_RENAME:
966                 sysfs_set_max_sectors_kb(mpp, 1);
967                 if (mpp->ghost_delay_tick > 0 && pathcount(mpp, PATH_UP))
968                         mpp->ghost_delay_tick = 0;
969                 r = dm_addmap_reload(mpp, params, 1);
970                 break;
971
972         case ACT_RENAME:
973                 break;
974
975         default:
976                 r = DOMAP_FAIL;
977                 break;
978         }
979
980         if (r == DOMAP_OK) {
981                 /*
982                  * DM_DEVICE_CREATE, DM_DEVICE_RENAME, or DM_DEVICE_RELOAD
983                  * succeeded
984                  */
985                 mpp->force_udev_reload = 0;
986                 if (mpp->action == ACT_CREATE &&
987                     (remember_wwid(mpp->wwid) == 1 ||
988                      mpp->needs_paths_uevent))
989                         trigger_paths_udev_change(mpp, true);
990                 if (!is_daemon) {
991                         /* multipath client mode */
992                         dm_switchgroup(mpp->alias, mpp->bestpg);
993                 } else  {
994                         /* multipath daemon mode */
995                         mpp->stat_map_loads++;
996                         condlog(4, "%s: load table [0 %llu %s %s]", mpp->alias,
997                                 mpp->size, TGT_MPATH, params);
998                         /*
999                          * Required action is over, reset for the stateful daemon.
1000                          * But don't do it for creation as we use in the caller the
1001                          * mpp->action to figure out whether to start the watievent checker.
1002                          */
1003                         if (mpp->action != ACT_CREATE)
1004                                 mpp->action = ACT_NOTHING;
1005                         else {
1006                                 conf = get_multipath_config();
1007                                 mpp->wait_for_udev = 1;
1008                                 mpp->uev_wait_tick = conf->uev_wait_timeout;
1009                                 put_multipath_config(conf);
1010                         }
1011                 }
1012                 dm_setgeometry(mpp);
1013                 return DOMAP_OK;
1014         } else if (r == DOMAP_FAIL && mpp->action == ACT_CREATE &&
1015                    mpp->needs_paths_uevent)
1016                 trigger_paths_udev_change(mpp, false);
1017
1018         return DOMAP_FAIL;
1019 }
1020
1021 extern int
1022 check_daemon(void)
1023 {
1024         int fd;
1025         char *reply;
1026         int ret = 0;
1027         unsigned int timeout;
1028         struct config *conf;
1029
1030         fd = mpath_connect();
1031         if (fd == -1)
1032                 return 0;
1033
1034         if (send_packet(fd, "show daemon") != 0)
1035                 goto out;
1036         conf = get_multipath_config();
1037         timeout = conf->uxsock_timeout;
1038         put_multipath_config(conf);
1039         if (recv_packet(fd, &reply, timeout) != 0)
1040                 goto out;
1041
1042         if (reply && strstr(reply, "shutdown"))
1043                 goto out_free;
1044
1045         ret = 1;
1046
1047 out_free:
1048         free(reply);
1049 out:
1050         mpath_disconnect(fd);
1051         return ret;
1052 }
1053
1054 /*
1055  * The force_reload parameter determines how coalesce_paths treats existing maps.
1056  * FORCE_RELOAD_NONE: existing maps aren't touched at all
1057  * FORCE_RELOAD_YES: all maps are rebuilt from scratch and (re)loaded in DM
1058  * FORCE_RELOAD_WEAK: existing maps are compared to the current conf and only
1059  * reloaded in DM if there's a difference. This is normally sufficient.
1060  */
1061 int coalesce_paths (struct vectors *vecs, vector mpvec, char *refwwid,
1062                     int force_reload, enum mpath_cmds cmd)
1063 {
1064         int ret = CP_FAIL;
1065         int k, i, r;
1066         int is_daemon = (cmd == CMD_NONE) ? 1 : 0;
1067         char *params __attribute__((cleanup(cleanup_charp))) = NULL;
1068         struct multipath * mpp;
1069         struct path * pp1 = NULL;
1070         struct path * pp2;
1071         vector curmp = vecs->mpvec;
1072         vector pathvec = vecs->pathvec;
1073         vector newmp;
1074         struct config *conf = NULL;
1075         int allow_queueing;
1076         struct bitfield *size_mismatch_seen;
1077         struct multipath * cmpp;
1078
1079         /* ignore refwwid if it's empty */
1080         if (refwwid && !strlen(refwwid))
1081                 refwwid = NULL;
1082
1083         if (force_reload != FORCE_RELOAD_NONE) {
1084                 vector_foreach_slot (pathvec, pp1, k) {
1085                         pp1->mpp = NULL;
1086                 }
1087         }
1088
1089         if (VECTOR_SIZE(pathvec) == 0)
1090                 return CP_OK;
1091         size_mismatch_seen = alloc_bitfield(VECTOR_SIZE(pathvec));
1092         if (size_mismatch_seen == NULL)
1093                 return CP_FAIL;
1094
1095         if (mpvec)
1096                 newmp = mpvec;
1097         else
1098                 newmp = vector_alloc();
1099         if (!newmp) {
1100                 condlog(0, "can not allocate newmp");
1101                 goto out;
1102         }
1103
1104         vector_foreach_slot (pathvec, pp1, k) {
1105                 int invalid;
1106
1107                 if (should_exit()) {
1108                         ret = CP_FAIL;
1109                         goto out;
1110                 }
1111
1112                 /* skip this path for some reason */
1113
1114                 /* 1. if path has no unique id or wwid blacklisted */
1115                 if (strlen(pp1->wwid) == 0) {
1116                         orphan_path(pp1, "no WWID");
1117                         continue;
1118                 }
1119
1120                 conf = get_multipath_config();
1121                 pthread_cleanup_push(put_multipath_config, conf);
1122                 invalid = (filter_path(conf, pp1) > 0);
1123                 pthread_cleanup_pop(1);
1124                 if (invalid) {
1125                         orphan_path(pp1, "blacklisted");
1126                         continue;
1127                 }
1128
1129                 /* 2. if path already coalesced, or seen and discarded */
1130                 if (pp1->mpp || is_bit_set_in_bitfield(k, size_mismatch_seen))
1131                         continue;
1132
1133                 /* 3. if path has disappeared */
1134                 if (pp1->state == PATH_REMOVED) {
1135                         orphan_path(pp1, "path removed");
1136                         continue;
1137                 }
1138
1139                 /* 4. path is out of scope */
1140                 if (refwwid && strncmp(pp1->wwid, refwwid, WWID_SIZE - 1))
1141                         continue;
1142
1143                 /* If find_multipaths was selected check if the path is valid */
1144                 if (!refwwid && !should_multipath(pp1, pathvec, curmp)) {
1145                         orphan_path(pp1, "only one path");
1146                         continue;
1147                 }
1148
1149                 cmpp = find_mp_by_wwid(curmp, pp1->wwid);
1150                 if (cmpp && cmpp->queue_mode == QUEUE_MODE_RQ &&
1151                     pp1->bus == SYSFS_BUS_NVME && pp1->sg_id.proto_id ==
1152                     NVME_PROTOCOL_TCP) {
1153                         orphan_path(pp1, "nvme:tcp path not allowed with request queue_mode multipath device");
1154                         continue;
1155                 }
1156                 /*
1157                  * at this point, we know we really got a new mp
1158                  */
1159                 mpp = add_map_with_path(vecs, pp1, 0);
1160                 if (!mpp) {
1161                         orphan_path(pp1, "failed to create multipath device");
1162                         continue;
1163                 }
1164
1165                 if (!mpp->paths) {
1166                         condlog(0, "%s: skip coalesce (no paths)", mpp->alias);
1167                         remove_map(mpp, vecs->pathvec, NULL);
1168                         continue;
1169                 }
1170
1171                 for (i = k + 1; i < VECTOR_SIZE(pathvec); i++) {
1172                         pp2 = VECTOR_SLOT(pathvec, i);
1173
1174                         if (strcmp(pp1->wwid, pp2->wwid))
1175                                 continue;
1176
1177                         if (!mpp->size && pp2->size)
1178                                 mpp->size = pp2->size;
1179
1180                         if (mpp->size && pp2->size &&
1181                             pp2->size != mpp->size) {
1182                                 /*
1183                                  * ouch, avoid feeding that to the DM
1184                                  */
1185                                 condlog(0, "%s: size %llu, expected %llu. "
1186                                         "Discard", pp2->dev, pp2->size,
1187                                         mpp->size);
1188                                 mpp->action = ACT_REJECT;
1189                                 set_bit_in_bitfield(i, size_mismatch_seen);
1190                         }
1191                 }
1192                 verify_paths(mpp);
1193
1194                 if (cmpp)
1195                         mpp->queue_mode = cmpp->queue_mode;
1196                 if (cmd == CMD_DRY_RUN && mpp->action == ACT_UNDEF)
1197                         mpp->action = ACT_DRY_RUN;
1198                 if (setup_map(mpp, &params, vecs)) {
1199                         remove_map(mpp, vecs->pathvec, NULL);
1200                         continue;
1201                 }
1202
1203                 if (mpp->action == ACT_UNDEF)
1204                         select_action(mpp, curmp,
1205                                       force_reload == FORCE_RELOAD_YES ? 1 : 0);
1206
1207                 r = domap(mpp, params, is_daemon);
1208                 free(params);
1209                 params = NULL;
1210
1211                 if (r == DOMAP_FAIL || r == DOMAP_RETRY) {
1212                         condlog(3, "%s: domap (%u) failure "
1213                                    "for create/reload map",
1214                                 mpp->alias, r);
1215                         if (r == DOMAP_FAIL || is_daemon) {
1216                                 condlog(2, "%s: %s map",
1217                                         mpp->alias, (mpp->action == ACT_CREATE)?
1218                                         "ignoring" : "removing");
1219                                 remove_map(mpp, vecs->pathvec, NULL);
1220                                 continue;
1221                         } else /* if (r == DOMAP_RETRY && !is_daemon) */ {
1222                                 ret = CP_RETRY;
1223                                 goto out;
1224                         }
1225                 }
1226                 if (r == DOMAP_DRY) {
1227                         if (!vector_alloc_slot(newmp)) {
1228                                 remove_map(mpp, vecs->pathvec, NULL);
1229                                 goto out;
1230                         }
1231                         vector_set_slot(newmp, mpp);
1232                         continue;
1233                 }
1234
1235                 conf = get_multipath_config();
1236                 allow_queueing = conf->allow_queueing;
1237                 put_multipath_config(conf);
1238                 if (!is_daemon && !allow_queueing && !check_daemon()) {
1239                         if (mpp->no_path_retry != NO_PATH_RETRY_UNDEF &&
1240                             mpp->no_path_retry != NO_PATH_RETRY_FAIL)
1241                                 condlog(3, "%s: multipathd not running, unset "
1242                                         "queue_if_no_path feature", mpp->alias);
1243                         if (!dm_queue_if_no_path(mpp->alias, 0))
1244                                 remove_feature(&mpp->features,
1245                                                "queue_if_no_path");
1246                 }
1247
1248                 if (!is_daemon && mpp->action != ACT_NOTHING)
1249                         print_multipath_topology(mpp, libmp_verbosity);
1250
1251                 if (mpp->action != ACT_REJECT) {
1252                         if (!vector_alloc_slot(newmp)) {
1253                                 remove_map(mpp, vecs->pathvec, NULL);
1254                                 goto out;
1255                         }
1256                         vector_set_slot(newmp, mpp);
1257                 }
1258                 else
1259                         remove_map(mpp, vecs->pathvec, NULL);
1260         }
1261         ret = CP_OK;
1262 out:
1263         free(size_mismatch_seen);
1264         if (!mpvec) {
1265                 vector_foreach_slot (newmp, mpp, i)
1266                         remove_map(mpp, vecs->pathvec, NULL);
1267                 vector_free(newmp);
1268         }
1269         return ret;
1270 }
1271
1272 struct udev_device *get_udev_device(const char *dev, enum devtypes dev_type)
1273 {
1274         struct udev_device *ud = NULL;
1275         const char *base;
1276
1277         if (dev == NULL || *dev == '\0')
1278                 return NULL;
1279
1280         switch (dev_type) {
1281         case DEV_DEVNODE:
1282         case DEV_DEVMAP:
1283                 /* This should be GNU basename, compiler will warn if not */
1284                 base = basename(dev);
1285                 if (*base == '\0')
1286                         break;
1287                 ud = udev_device_new_from_subsystem_sysname(udev, "block",
1288                                                             base);
1289                 break;
1290         case DEV_DEVT:
1291                 ud = udev_device_new_from_devnum(udev, 'b', parse_devt(dev));
1292                 break;
1293         case DEV_UEVENT:
1294                 ud = udev_device_new_from_environment(udev);
1295                 break;
1296         default:
1297                 condlog(0, "Internal error: get_udev_device called with invalid type %d\n",
1298                         dev_type);
1299                 break;
1300         }
1301         if (ud == NULL)
1302                 condlog(2, "get_udev_device: failed to look up %s with type %d",
1303                         dev, dev_type);
1304         return ud;
1305 }
1306
1307 static int _get_refwwid(enum mpath_cmds cmd, const char *dev,
1308                         enum devtypes dev_type,
1309                         vector pathvec, struct config *conf, char **wwid)
1310 {
1311         int ret = 1;
1312         struct path * pp;
1313         char buff[FILE_NAME_SIZE];
1314         const char *refwwid = NULL;
1315         char tmpwwid[WWID_SIZE];
1316         struct udev_device *udevice;
1317         int flags = DI_SYSFS | DI_WWID;
1318
1319         if (!wwid)
1320                 return PATHINFO_FAILED;
1321         *wwid = NULL;
1322
1323         if (dev_type == DEV_NONE)
1324                 return PATHINFO_FAILED;
1325
1326         if (cmd != CMD_REMOVE_WWID)
1327                 flags |= DI_BLACKLIST;
1328
1329         switch (dev_type) {
1330         case DEV_DEVNODE:
1331                 if (basenamecpy(dev, buff, FILE_NAME_SIZE) == 0) {
1332                         condlog(1, "basename failed for '%s' (%s)",
1333                                 dev, buff);
1334                         return PATHINFO_FAILED;
1335                 }
1336
1337                 /* dev is used in common code below */
1338                 dev = buff;
1339                 pp = find_path_by_dev(pathvec, dev);
1340                 goto common;
1341
1342         case DEV_DEVT:
1343                 pp = find_path_by_devt(pathvec, dev);
1344                 goto common;
1345
1346         case DEV_UEVENT:
1347                 pp = NULL;
1348                 /* For condlog below, dev is unused in get_udev_device() */
1349                 dev = "environment";
1350         common:
1351                 if (!pp) {
1352                         udevice = get_udev_device(dev, dev_type);
1353
1354                         if (!udevice) {
1355                                 condlog(0, "%s: cannot find block device", dev);
1356                                 return PATHINFO_FAILED;
1357                         }
1358
1359                         ret = store_pathinfo(pathvec, conf, udevice,
1360                                              flags, &pp);
1361                         udev_device_unref(udevice);
1362                         if (!pp) {
1363                                 if (ret == PATHINFO_FAILED)
1364                                         condlog(0, "%s: can't store path info",
1365                                                 dev);
1366                                 return ret;
1367                         }
1368                 }
1369                 if (flags & DI_BLACKLIST &&
1370                     filter_property(conf, pp->udev, 3, pp->uid_attribute) > 0)
1371                         return PATHINFO_SKIPPED;
1372                 refwwid = pp->wwid;
1373                 break;
1374
1375         case DEV_DEVMAP:
1376                 if (((dm_get_uuid(dev, tmpwwid, WWID_SIZE)) == 0)
1377                     && (strlen(tmpwwid)))
1378                         refwwid = tmpwwid;
1379
1380                 /* or may be a binding */
1381                 else if (get_user_friendly_wwid(dev, tmpwwid) == 0)
1382                         refwwid = tmpwwid;
1383
1384                 /* or may be an alias */
1385                 else {
1386                         refwwid = get_mpe_wwid(conf->mptable, dev);
1387
1388                         /* or directly a wwid */
1389                         if (!refwwid)
1390                                 refwwid = dev;
1391                 }
1392
1393                 if (flags & DI_BLACKLIST && refwwid && strlen(refwwid) &&
1394                     filter_wwid(conf->blist_wwid, conf->elist_wwid, refwwid,
1395                                 NULL) > 0)
1396                         return PATHINFO_SKIPPED;
1397                 break;
1398         default:
1399                 break;
1400         }
1401
1402         if (refwwid && strlen(refwwid)) {
1403                 *wwid = strdup(refwwid);
1404                 return PATHINFO_OK;
1405         }
1406
1407         return PATHINFO_FAILED;
1408 }
1409
1410 /*
1411  * Returns: PATHINFO_OK, PATHINFO_FAILED, or PATHINFO_SKIPPED (see pathinfo())
1412  */
1413 int get_refwwid(enum mpath_cmds cmd, const char *dev, enum devtypes dev_type,
1414                 vector pathvec, char **wwid)
1415
1416 {
1417         int ret;
1418         struct config *conf = get_multipath_config();
1419
1420         pthread_cleanup_push(put_multipath_config, conf);
1421         ret = _get_refwwid(cmd, dev, dev_type, pathvec, conf, wwid);
1422         pthread_cleanup_pop(1);
1423         return ret;
1424 }