2 * Copyright (c) 2003, 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Benjamin Marzinski, Redhat
4 * Copyright (c) 2005 Kiyoshi Ueda, NEC
5 * Copyright (c) 2005 Patrick Caulfield, Redhat
6 * Copyright (c) 2005 Edward Goggin, EMC
15 #include <libdevmapper.h>
20 #include "devmapper.h"
23 #include "structs_vec.h"
26 #include "blacklist.h"
28 #include "discovery.h"
30 #include "switchgroup.h"
32 #include "configure.h"
33 #include "pgpolicies.h"
39 setup_map (struct multipath * mpp)
41 struct pathgroup * pgp;
45 * don't bother if devmap size is unknown
48 condlog(3, "%s: devmap size is unknown", mpp->alias);
53 * properties selectors
55 select_pgfailback(mpp);
59 select_hwhandler(mpp);
60 select_rr_weight(mpp);
62 select_no_path_retry(mpp);
63 select_pg_timeout(mpp);
66 * assign paths to path groups -- start with no groups and all paths
70 vector_foreach_slot (mpp->pg, pgp, i)
71 free_pathgroup(pgp, KEEP_PATHS);
76 if (mpp->pgpolicyfn && mpp->pgpolicyfn(mpp))
79 mpp->nr_active = pathcount(mpp, PATH_UP);
82 * ponders each path group and determine highest prio pg
83 * to switch over (default to first)
85 mpp->bestpg = select_path_group(mpp);
88 * transform the mp->pg vector of vectors of paths
89 * into a mp->params strings to feed the device-mapper
91 if (assemble_map(mpp)) {
92 condlog(0, "%s: problem assembing map", mpp->alias);
99 compute_pgid(struct pathgroup * pgp)
104 vector_foreach_slot (pgp->paths, pp, i)
109 pgcmp (struct multipath * mpp, struct multipath * cmpp)
112 struct pathgroup * pgp;
113 struct pathgroup * cpgp;
116 vector_foreach_slot (mpp->pg, pgp, i) {
119 vector_foreach_slot (cmpp->pg, cpgp, j) {
120 if (pgp->id == cpgp->id) {
133 select_action (struct multipath * mpp, vector curmp)
135 struct multipath * cmpp;
137 cmpp = find_mp_by_alias(curmp, mpp->alias);
140 cmpp = find_mp_by_wwid(curmp, mpp->wwid);
142 if (cmpp && !conf->dry_run) {
143 condlog(2, "%s: rename %s to %s", mpp->wwid,
144 cmpp->alias, mpp->alias);
145 strncpy(mpp->alias_old, cmpp->alias, WWID_SIZE);
146 mpp->action = ACT_RENAME;
149 mpp->action = ACT_CREATE;
150 condlog(3, "%s: set ACT_CREATE (map does not exist)",
155 if (!find_mp_by_wwid(curmp, mpp->wwid)) {
156 condlog(2, "%s: remove (wwid changed)", cmpp->alias);
157 dm_flush_map(mpp->alias, DEFAULT_TARGET);
158 strncat(cmpp->wwid, mpp->wwid, WWID_SIZE);
159 drop_multipath(curmp, cmpp->wwid, KEEP_PATHS);
160 mpp->action = ACT_CREATE;
161 condlog(3, "%s: set ACT_CREATE (map wwid change)",
166 if (pathcount(mpp, PATH_UP) == 0) {
167 mpp->action = ACT_NOTHING;
168 condlog(3, "%s: set ACT_NOTHING (no usable path)",
172 if (cmpp->size != mpp->size) {
173 mpp->action = ACT_RELOAD;
174 condlog(3, "%s: set ACT_RELOAD (size change)",
178 if (!mpp->no_path_retry && !mpp->pg_timeout &&
179 (strlen(cmpp->features) != strlen(mpp->features) ||
180 strcmp(cmpp->features, mpp->features))) {
181 mpp->action = ACT_RELOAD;
182 condlog(3, "%s: set ACT_RELOAD (features change)",
186 if (strncmp(cmpp->hwhandler, mpp->hwhandler,
187 strlen(mpp->hwhandler))) {
188 mpp->action = ACT_RELOAD;
189 condlog(3, "%s: set ACT_RELOAD (hwhandler change)",
193 if (strncmp(cmpp->selector, mpp->selector,
194 strlen(mpp->selector))) {
195 mpp->action = ACT_RELOAD;
196 condlog(3, "%s: set ACT_RELOAD (selector change)",
200 if (cmpp->minio != mpp->minio) {
201 mpp->action = ACT_RELOAD;
202 condlog(3, "%s: set ACT_RELOAD (minio change, %u->%u)",
203 mpp->alias, cmpp->minio, mpp->minio);
206 if (VECTOR_SIZE(cmpp->pg) != VECTOR_SIZE(mpp->pg)) {
207 mpp->action = ACT_RELOAD;
208 condlog(3, "%s: set ACT_RELOAD (path group number change)",
212 if (pgcmp(mpp, cmpp)) {
213 mpp->action = ACT_RELOAD;
214 condlog(3, "%s: set ACT_RELOAD (path group topology change)",
218 if (cmpp->nextpg != mpp->bestpg) {
219 mpp->action = ACT_SWITCHPG;
220 condlog(3, "%s: set ACT_SWITCHPG (next path group change)",
224 mpp->action = ACT_NOTHING;
225 condlog(3, "%s: set ACT_NOTHING (map unchanged)",
231 reinstate_paths (struct multipath * mpp)
234 struct pathgroup * pgp;
240 vector_foreach_slot (mpp->pg, pgp, i) {
244 vector_foreach_slot (pgp->paths, pp, j) {
245 if (pp->state != PATH_UP &&
246 (pgp->status == PGSTATE_DISABLED ||
247 pgp->status == PGSTATE_ACTIVE))
250 if (pp->dmstate == PSTATE_FAILED) {
251 if (dm_reinstate_path(mpp->alias, pp->dev_t))
252 condlog(0, "%s: error reinstating",
261 lock_multipath (struct multipath * mpp, int lock)
263 struct pathgroup * pgp;
267 if (!mpp || !mpp->pg)
270 vector_foreach_slot (mpp->pg, pgp, i) {
273 vector_foreach_slot(pgp->paths, pp, j) {
274 if (lock && flock(pp->fd, LOCK_EX | LOCK_NB) &&
275 errno == EWOULDBLOCK)
278 flock(pp->fd, LOCK_UN);
287 #define DOMAP_RETRY -1
290 #define DOMAP_EXIST 2
294 domap (struct multipath * mpp)
299 * last chance to quit before touching the devmaps
301 if (conf->dry_run && mpp->action != ACT_NOTHING) {
302 print_multipath_topology(mpp, conf->verbosity);
306 switch (mpp->action) {
312 dm_switchgroup(mpp->alias, mpp->bestpg);
314 * we may have avoided reinstating paths because there where in
315 * active or disabled PG. Now that the topology has changed,
318 reinstate_paths(mpp);
322 if (lock_multipath(mpp, 1)) {
323 condlog(3, "%s: failed to create map (in use)",
328 if (dm_map_present(mpp->alias)) {
329 condlog(3, "%s: map already present", mpp->alias);
333 r = dm_addmap(DM_DEVICE_CREATE, mpp->alias, DEFAULT_TARGET,
334 mpp->params, mpp->size, mpp->wwid);
337 * DM_DEVICE_CREATE is actually DM_DEV_CREATE plus
338 * DM_TABLE_LOAD. Failing the second part leaves an
339 * empty map. Clean it up.
341 if (!r && dm_map_present(mpp->alias)) {
342 condlog(3, "%s: failed to load map "
343 "(a path might be in use)",
345 dm_flush_map(mpp->alias, DEFAULT_TARGET);
348 lock_multipath(mpp, 0);
352 r = (dm_addmap(DM_DEVICE_RELOAD, mpp->alias, DEFAULT_TARGET,
353 mpp->params, mpp->size, NULL) &&
354 dm_simplecmd(DM_DEVICE_RESUME, mpp->alias));
358 r = dm_rename(mpp->alias_old, mpp->alias);
367 * DM_DEVICE_CREATE, DM_DEVICE_RENAME, or DM_DEVICE_RELOAD
371 /* multipath client mode */
372 dm_switchgroup(mpp->alias, mpp->bestpg);
373 if (mpp->action != ACT_NOTHING)
374 print_multipath_topology(mpp, conf->verbosity);
376 /* multipath daemon mode */
377 mpp->stat_map_loads++;
378 condlog(2, "%s: load table [0 %llu %s %s]", mpp->alias,
379 mpp->size, DEFAULT_TARGET, mpp->params);
381 * Required action is over, reset for the stateful daemon
383 mpp->action = ACT_NOTHING;
391 deadmap (struct multipath * mpp)
394 struct pathgroup * pgp;
400 vector_foreach_slot (mpp->pg, pgp, i) {
404 vector_foreach_slot (pgp->paths, pp, j)
406 return 0; /* alive */
413 coalesce_paths (struct vectors * vecs, vector newmp, char * refwwid)
417 char empty_buff[WWID_SIZE];
418 struct multipath * mpp;
421 vector curmp = vecs->mpvec;
422 vector pathvec = vecs->pathvec;
424 memset(empty_buff, 0, WWID_SIZE);
426 vector_foreach_slot (pathvec, pp1, k) {
427 /* skip this path for some reason */
429 /* 1. if path has no unique id or wwid blacklisted */
430 if (memcmp(empty_buff, pp1->wwid, WWID_SIZE) == 0 ||
431 filter_path(conf, pp1) > 0)
434 /* 2. if path already coalesced */
438 /* 3. if path has disappeared */
442 /* 4. path is out of scope */
443 if (refwwid && strncmp(pp1->wwid, refwwid, WWID_SIZE))
447 * at this point, we know we really got a new mp
449 if ((mpp = add_map_with_path(vecs, pp1, 0)) == NULL)
452 if (pp1->priority == PRIO_UNDEF)
453 mpp->action = ACT_REJECT;
456 condlog(0, "%s: skip coalesce (no paths)", mpp->alias);
457 remove_map(mpp, vecs, 0);
461 for (i = k + 1; i < VECTOR_SIZE(pathvec); i++) {
462 pp2 = VECTOR_SLOT(pathvec, i);
464 if (strcmp(pp1->wwid, pp2->wwid))
470 if (pp2->size != mpp->size) {
472 * ouch, avoid feeding that to the DM
474 condlog(0, "%s: size %llu, expected %llu. "
475 "Discard", pp2->dev_t, pp2->size,
477 mpp->action = ACT_REJECT;
479 if (pp2->priority == PRIO_UNDEF)
480 mpp->action = ACT_REJECT;
482 verify_paths(mpp, vecs, NULL);
484 if (setup_map(mpp)) {
485 remove_map(mpp, vecs, 0);
489 if (mpp->action == ACT_UNDEF)
490 select_action(mpp, curmp);
494 if (r == DOMAP_FAIL || r == DOMAP_RETRY) {
495 condlog(3, "%s: domap (%u) failure "
496 "for create/reload map",
498 if (r == DOMAP_FAIL) {
499 remove_map(mpp, vecs, 0);
501 } else /* if (r == DOMAP_RETRY) */
507 if (mpp->no_path_retry != NO_PATH_RETRY_UNDEF) {
508 if (mpp->no_path_retry == NO_PATH_RETRY_FAIL)
509 dm_queue_if_no_path(mpp->alias, 0);
511 dm_queue_if_no_path(mpp->alias, 1);
513 if (mpp->pg_timeout != PGTIMEOUT_UNDEF) {
514 if (mpp->pg_timeout == -PGTIMEOUT_NONE)
515 dm_set_pg_timeout(mpp->alias, 0);
517 dm_set_pg_timeout(mpp->alias, mpp->pg_timeout);
521 if (mpp->action != ACT_REJECT) {
522 if (!vector_alloc_slot(newmp))
524 vector_set_slot(newmp, mpp);
527 remove_map(mpp, vecs, 0);
531 * Flush maps with only dead paths (ie not in sysfs)
532 * Keep maps with only failed paths
535 vector_foreach_slot (newmp, mpp, i) {
536 char alias[WWID_SIZE];
542 strncpy(alias, mpp->alias, WWID_SIZE);
544 if ((j = find_slot(newmp, (void *)mpp)) != -1)
545 vector_del_slot(newmp, j);
547 remove_map(mpp, vecs, 0);
549 if (dm_flush_map(mpp->alias, DEFAULT_TARGET))
550 condlog(2, "%s: remove failed (dead)",
553 condlog(2, "%s: remove (dead)", mpp->alias);
560 get_refwwid (char * dev, enum devtypes dev_type, vector pathvec)
563 char buff[FILE_NAME_SIZE];
564 char * refwwid = NULL, tmpwwid[WWID_SIZE];
566 if (dev_type == DEV_NONE)
569 if (dev_type == DEV_DEVNODE) {
571 pp = find_path_by_dev(pathvec, buff);
579 strncpy(pp->dev, buff, FILE_NAME_SIZE);
581 if (pathinfo(pp, conf->hwtable, DI_SYSFS | DI_WWID))
584 if (store_path(pathvec, pp)) {
593 if (dev_type == DEV_DEVT) {
594 pp = find_path_by_devt(pathvec, dev);
597 if (devt2devname(buff, dev))
605 strncpy(pp->dev, buff, FILE_NAME_SIZE);
607 if (pathinfo(pp, conf->hwtable, DI_SYSFS | DI_WWID))
610 if (store_path(pathvec, pp)) {
618 if (dev_type == DEV_DEVMAP) {
620 if (((dm_get_uuid(dev, tmpwwid)) == 0) && (strlen(tmpwwid))) {
628 refwwid = get_user_friendly_wwid(dev,
629 conf->bindings_file);
637 refwwid = get_mpe_wwid(dev);
646 if (refwwid && strlen(refwwid))
647 return STRDUP(refwwid);