2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This file is part of LVM2.
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * Translates between disk and in-core formats.
22 #include "lvm-string.h"
24 #include "toolcontext.h"
33 static int _check_vg_name(const char *name)
35 return strlen(name) < NAME_LEN;
39 * Extracts the last part of a path.
41 static char *_create_lv_name(struct dm_pool *mem, const char *full_name)
43 const char *ptr = strrchr(full_name, '/');
50 return dm_pool_strdup(mem, ptr);
53 int import_pv(const struct format_type *fmt, struct dm_pool *mem,
54 struct device *dev, struct volume_group *vg,
55 struct physical_volume *pv, struct pv_disk *pvd,
60 memset(pv, 0, sizeof(*pv));
61 memcpy(&pv->id, pvd->pv_uuid, ID_LEN);
65 pv->vg_name = fmt->orphan_vg_name;
66 else if (!(pv->vg_name = dm_pool_strdup(mem, (char *)pvd->vg_name))) {
67 log_error("Volume Group name allocation failed.");
71 memcpy(&pv->vgid, vgd->vg_uuid, sizeof(vg->id));
73 /* Store system_id from first PV if PV belongs to a VG */
74 if (vg && !*vg->system_id)
75 strncpy(vg->system_id, (char *)pvd->system_id, NAME_LEN);
78 strncmp(vg->system_id, (char *)pvd->system_id, sizeof(pvd->system_id)))
79 log_very_verbose("System ID %s on %s differs from %s for "
80 "volume group", pvd->system_id,
81 pv_dev_name(pv), vg->system_id);
84 * If exported, we still need to flag in pv->status too because
85 * we don't always have a struct volume_group when we need this.
87 if (pvd->pv_status & VG_EXPORTED)
88 pv->status |= EXPORTED_VG;
90 if (pvd->pv_allocatable)
91 pv->status |= ALLOCATABLE_PV;
93 pv->size = pvd->pv_size;
94 pv->pe_size = pvd->pe_size;
95 pv->pe_start = pvd->pe_start;
96 pv->pe_count = pvd->pe_total;
97 pv->pe_alloc_count = 0;
100 /* Fix up pv size if missing or impossibly large */
101 if (!pv->size || pv->size > (1ULL << 62)) {
102 if (!dev_get_size(dev, &pv->size)) {
103 log_error("%s: Couldn't get size.", pv_dev_name(pv));
106 log_verbose("Fixing up missing format1 size (%s) "
107 "for PV %s", display_size(fmt->cmd, pv->size),
110 size = pv->pe_count * (uint64_t) vg->extent_size +
113 log_warn("WARNING: Physical Volume %s is too "
114 "large for underlying device",
119 dm_list_init(&pv->tags);
120 dm_list_init(&pv->segments);
122 if (!alloc_pv_segment_whole_pv(mem, pv))
128 static int _system_id(struct cmd_context *cmd, char *s, const char *prefix)
131 if (dm_snprintf(s, NAME_LEN, "%s%s%lu",
132 prefix, cmd->hostname, time(NULL)) < 0) {
133 log_error("Generated system_id too long");
140 int export_pv(struct cmd_context *cmd, struct dm_pool *mem __attribute__((unused)),
141 struct volume_group *vg,
142 struct pv_disk *pvd, struct physical_volume *pv)
144 memset(pvd, 0, sizeof(*pvd));
150 memcpy(pvd->pv_uuid, pv->id.uuid, ID_LEN);
152 if (pv->vg_name && !is_orphan(pv)) {
153 if (!_check_vg_name(pv->vg_name))
155 strncpy((char *)pvd->vg_name, pv->vg_name, sizeof(pvd->vg_name));
158 /* Preserve existing system_id if it exists */
159 if (vg && *vg->system_id)
160 strncpy((char *)pvd->system_id, vg->system_id, sizeof(pvd->system_id));
162 /* Is VG already exported or being exported? */
163 if (vg && vg_is_exported(vg)) {
164 /* Does system_id need setting? */
165 if (!*vg->system_id ||
166 strncmp(vg->system_id, EXPORTED_TAG,
167 sizeof(EXPORTED_TAG) - 1)) {
168 if (!_system_id(cmd, (char *)pvd->system_id, EXPORTED_TAG))
171 if (strlen((char *)pvd->vg_name) + sizeof(EXPORTED_TAG) >
172 sizeof(pvd->vg_name)) {
173 log_error("Volume group name %s too long to export",
177 strcat((char *)pvd->vg_name, EXPORTED_TAG);
180 /* Is VG being imported? */
181 if (vg && !vg_is_exported(vg) && *vg->system_id &&
182 !strncmp(vg->system_id, EXPORTED_TAG, sizeof(EXPORTED_TAG) - 1)) {
183 if (!_system_id(cmd, (char *)pvd->system_id, IMPORTED_TAG))
187 /* Generate system_id if PV is in VG */
188 if (!pvd->system_id[0])
189 if (!_system_id(cmd, (char *)pvd->system_id, ""))
192 /* Update internal system_id if we changed it */
195 strncmp(vg->system_id, (char *)pvd->system_id, sizeof(pvd->system_id))))
196 strncpy(vg->system_id, (char *)pvd->system_id, NAME_LEN);
198 //pvd->pv_major = MAJOR(pv->dev);
200 if (pv->status & ALLOCATABLE_PV)
201 pvd->pv_allocatable = PV_ALLOCATABLE;
203 pvd->pv_size = pv->size;
204 pvd->lv_cur = 0; /* this is set when exporting the lv list */
206 pvd->pe_size = vg->extent_size;
208 pvd->pe_size = pv->pe_size;
209 pvd->pe_total = pv->pe_count;
210 pvd->pe_allocated = pv->pe_alloc_count;
211 pvd->pe_start = pv->pe_start;
216 int import_vg(struct dm_pool *mem,
217 struct volume_group *vg, struct disk_list *dl)
219 struct vg_disk *vgd = &dl->vgd;
220 memcpy(vg->id.uuid, vgd->vg_uuid, ID_LEN);
222 if (!_check_vg_name((char *)dl->pvd.vg_name))
225 if (!(vg->name = dm_pool_strdup(mem, (char *)dl->pvd.vg_name)))
228 if (!(vg->system_id = dm_pool_alloc(mem, NAME_LEN)))
231 *vg->system_id = '\0';
233 if (vgd->vg_status & VG_EXPORTED)
234 vg->status |= EXPORTED_VG;
236 if (vgd->vg_status & VG_EXTENDABLE)
237 vg->status |= RESIZEABLE_VG;
239 if (vgd->vg_access & VG_READ)
240 vg->status |= LVM_READ;
242 if (vgd->vg_access & VG_WRITE)
243 vg->status |= LVM_WRITE;
245 if (vgd->vg_access & VG_CLUSTERED)
246 vg->status |= CLUSTERED;
248 if (vgd->vg_access & VG_SHARED)
249 vg->status |= SHARED;
251 vg->extent_size = vgd->pe_size;
252 vg->extent_count = vgd->pe_total;
253 vg->free_count = vgd->pe_total;
254 vg->max_lv = vgd->lv_max;
255 vg->max_pv = vgd->pv_max;
256 vg->alloc = ALLOC_NORMAL;
261 int export_vg(struct vg_disk *vgd, struct volume_group *vg)
263 memset(vgd, 0, sizeof(*vgd));
264 memcpy(vgd->vg_uuid, vg->id.uuid, ID_LEN);
266 if (vg->status & LVM_READ)
267 vgd->vg_access |= VG_READ;
269 if (vg->status & LVM_WRITE)
270 vgd->vg_access |= VG_WRITE;
272 if (vg_is_clustered(vg))
273 vgd->vg_access |= VG_CLUSTERED;
275 if (vg->status & SHARED)
276 vgd->vg_access |= VG_SHARED;
278 if (vg_is_exported(vg))
279 vgd->vg_status |= VG_EXPORTED;
281 if (vg_is_resizeable(vg))
282 vgd->vg_status |= VG_EXTENDABLE;
284 vgd->lv_max = vg->max_lv;
285 vgd->lv_cur = vg_visible_lvs(vg) + snapshot_count(vg);
287 vgd->pv_max = vg->max_pv;
288 vgd->pv_cur = vg->pv_count;
290 vgd->pe_size = vg->extent_size;
291 vgd->pe_total = vg->extent_count;
292 vgd->pe_allocated = vg->extent_count - vg->free_count;
297 int import_lv(struct cmd_context *cmd, struct dm_pool *mem,
298 struct logical_volume *lv, struct lv_disk *lvd)
300 if (!(lv->name = _create_lv_name(mem, (char *)lvd->lv_name)))
303 lv->status |= VISIBLE_LV;
305 if (lvd->lv_status & LV_SPINDOWN)
306 lv->status |= SPINDOWN_LV;
308 if (lvd->lv_status & LV_PERSISTENT_MINOR) {
309 lv->status |= FIXED_MINOR;
310 lv->minor = MINOR(lvd->lv_dev);
311 lv->major = MAJOR(lvd->lv_dev);
317 if (lvd->lv_access & LV_READ)
318 lv->status |= LVM_READ;
320 if (lvd->lv_access & LV_WRITE)
321 lv->status |= LVM_WRITE;
323 if (lvd->lv_badblock)
324 lv->status |= BADBLOCK_ON;
326 /* Drop the unused LV_STRICT here */
327 if (lvd->lv_allocation & LV_CONTIGUOUS)
328 lv->alloc = ALLOC_CONTIGUOUS;
330 lv->alloc = ALLOC_NORMAL;
332 if (!lvd->lv_read_ahead)
333 lv->read_ahead = cmd->default_settings.read_ahead;
335 lv->read_ahead = lvd->lv_read_ahead;
337 lv->size = lvd->lv_size;
338 lv->le_count = lvd->lv_allocated_le;
343 static void _export_lv(struct lv_disk *lvd, struct volume_group *vg,
344 struct logical_volume *lv, const char *dev_dir)
346 memset(lvd, 0, sizeof(*lvd));
347 snprintf((char *)lvd->lv_name, sizeof(lvd->lv_name), "%s%s/%s",
348 dev_dir, vg->name, lv->name);
350 strcpy((char *)lvd->vg_name, vg->name);
352 if (lv->status & LVM_READ)
353 lvd->lv_access |= LV_READ;
355 if (lv->status & LVM_WRITE)
356 lvd->lv_access |= LV_WRITE;
358 if (lv->status & SPINDOWN_LV)
359 lvd->lv_status |= LV_SPINDOWN;
361 if (lv->status & FIXED_MINOR) {
362 lvd->lv_status |= LV_PERSISTENT_MINOR;
363 lvd->lv_dev = MKDEV(lv->major, lv->minor);
365 lvd->lv_dev = MKDEV(LVM_BLK_MAJOR, lvnum_from_lvid(&lv->lvid));
368 if (lv->read_ahead == DM_READ_AHEAD_AUTO ||
369 lv->read_ahead == DM_READ_AHEAD_NONE)
370 lvd->lv_read_ahead = 0;
372 lvd->lv_read_ahead = lv->read_ahead;
375 dm_list_item(lv->segments.n, struct lv_segment)->area_count;
377 dm_list_item(lv->segments.n, struct lv_segment)->stripe_size;
379 lvd->lv_size = lv->size;
380 lvd->lv_allocated_le = lv->le_count;
382 if (lv->status & BADBLOCK_ON)
383 lvd->lv_badblock = LV_BADBLOCK_ON;
385 if (lv->alloc == ALLOC_CONTIGUOUS)
386 lvd->lv_allocation |= LV_CONTIGUOUS;
389 int export_extents(struct disk_list *dl, uint32_t lv_num,
390 struct logical_volume *lv, struct physical_volume *pv)
393 struct lv_segment *seg;
396 dm_list_iterate_items(seg, &lv->segments) {
397 for (s = 0; s < seg->area_count; s++) {
398 if (!(seg->segtype->flags & SEG_FORMAT1_SUPPORT)) {
399 log_error("Segment type %s in LV %s: "
400 "unsupported by format1",
401 seg->segtype->name, lv->name);
404 if (seg_type(seg, s) != AREA_PV) {
405 log_error("Non-PV stripe found in LV %s: "
406 "unsupported by format1", lv->name);
409 if (seg_pv(seg, s) != pv)
410 continue; /* not our pv */
412 for (pe = 0; pe < (seg->len / seg->area_count); pe++) {
413 ped = &dl->extents[pe + seg_pe(seg, s)];
414 ped->lv_num = lv_num;
415 ped->le_num = (seg->le / seg->area_count) + pe +
416 s * (lv->le_count / seg->area_count);
424 int import_pvs(const struct format_type *fmt, struct dm_pool *mem,
425 struct volume_group *vg, struct dm_list *pvds)
427 struct disk_list *dl;
431 dm_list_iterate_items(dl, pvds) {
432 if (!(pvl = dm_pool_zalloc(mem, sizeof(*pvl))) ||
433 !(pvl->pv = dm_pool_alloc(mem, sizeof(*pvl->pv))))
436 if (!import_pv(fmt, mem, dl->dev, vg, pvl->pv, &dl->pvd, &dl->vgd))
440 add_pvl_to_vgs(vg, pvl);
446 static struct logical_volume *_add_lv(struct dm_pool *mem,
447 struct volume_group *vg,
450 struct logical_volume *lv;
452 if (!(lv = alloc_lv(mem)))
455 lvid_from_lvnum(&lv->lvid, &vg->id, lvd->lv_number);
457 if (!import_lv(vg->cmd, mem, lv, lvd))
460 if (!link_lv_to_vg(vg, lv))
465 dm_pool_free(mem, lv);
469 int import_lvs(struct dm_pool *mem, struct volume_group *vg, struct dm_list *pvds)
471 struct disk_list *dl;
475 dm_list_iterate_items(dl, pvds) {
476 dm_list_iterate_items(ll, &dl->lvds) {
479 if (!find_lv(vg, (char *)lvd->lv_name) &&
480 !_add_lv(mem, vg, lvd))
489 int export_lvs(struct disk_list *dl, struct volume_group *vg,
490 struct physical_volume *pv, const char *dev_dir)
494 struct lvd_list *lvdl;
497 struct dm_hash_table *lvd_hash;
499 if (!_check_vg_name(vg->name))
502 if (!(lvd_hash = dm_hash_create(32)))
506 * setup the pv's extents array
508 len = sizeof(struct pe_disk) * dl->pvd.pe_total;
509 if (!(dl->extents = dm_pool_zalloc(dl->mem, len)))
512 dm_list_iterate_items(ll, &vg->lvs) {
513 if (ll->lv->status & SNAPSHOT)
516 if (!(lvdl = dm_pool_alloc(dl->mem, sizeof(*lvdl))))
519 _export_lv(&lvdl->lvd, vg, ll->lv, dev_dir);
521 lv_num = lvnum_from_lvid(&ll->lv->lvid);
522 lvdl->lvd.lv_number = lv_num;
524 if (!dm_hash_insert(lvd_hash, ll->lv->name, &lvdl->lvd))
527 if (!export_extents(dl, lv_num + 1, ll->lv, pv))
530 if (lv_is_origin(ll->lv))
531 lvdl->lvd.lv_access |= LV_SNAPSHOT_ORG;
533 if (lv_is_cow(ll->lv)) {
534 lvdl->lvd.lv_access |= LV_SNAPSHOT;
535 lvdl->lvd.lv_chunk_size = ll->lv->snapshot->chunk_size;
536 lvdl->lvd.lv_snapshot_minor =
537 lvnum_from_lvid(&ll->lv->snapshot->origin->lvid);
540 dm_list_add(&dl->lvds, &lvdl->list);
547 dm_hash_destroy(lvd_hash);
552 * FIXME: More inefficient code.
554 int import_snapshots(struct dm_pool *mem __attribute__((unused)), struct volume_group *vg,
555 struct dm_list *pvds)
557 struct logical_volume *lvs[MAX_LV];
558 struct disk_list *dl;
562 struct logical_volume *org, *cow;
564 /* build an index of lv numbers */
565 memset(lvs, 0, sizeof(lvs));
566 dm_list_iterate_items(dl, pvds) {
567 dm_list_iterate_items(ll, &dl->lvds) {
570 lvnum = lvd->lv_number;
572 if (lvnum >= MAX_LV) {
573 log_error("Logical volume number "
579 !(lvs[lvnum] = find_lv(vg, (char *)lvd->lv_name))) {
580 log_error("Couldn't find logical volume '%s'.",
588 * Now iterate through yet again adding the snapshots.
590 dm_list_iterate_items(dl, pvds) {
591 dm_list_iterate_items(ll, &dl->lvds) {
594 if (!(lvd->lv_access & LV_SNAPSHOT))
597 lvnum = lvd->lv_number;
599 if (!(org = lvs[lvd->lv_snapshot_minor])) {
600 log_error("Couldn't find origin logical volume "
601 "for snapshot '%s'.", lvd->lv_name);
605 /* we may have already added this snapshot */
609 /* insert the snapshot */
610 if (!vg_add_snapshot(org, cow, NULL,
612 lvd->lv_chunk_size)) {
613 log_error("Couldn't add snapshot.");
622 int export_uuids(struct disk_list *dl, struct volume_group *vg)
624 struct uuid_list *ul;
627 dm_list_iterate_items(pvl, &vg->pvs) {
628 if (!(ul = dm_pool_alloc(dl->mem, sizeof(*ul))))
631 memset(ul->uuid, 0, sizeof(ul->uuid));
632 memcpy(ul->uuid, pvl->pv->id.uuid, ID_LEN);
634 dm_list_add(&dl->uuids, &ul->list);
640 * This calculates the nasty pv_number field
643 void export_numbers(struct dm_list *pvds, struct volume_group *vg __attribute__((unused)))
645 struct disk_list *dl;
648 dm_list_iterate_items(dl, pvds)
649 dl->pvd.pv_number = pv_num++;
653 * Calculate vg_disk->pv_act.
655 void export_pv_act(struct dm_list *pvds)
657 struct disk_list *dl;
660 dm_list_iterate_items(dl, pvds)
661 if (dl->pvd.pv_status & PV_ACTIVE)
664 dm_list_iterate_items(dl, pvds)
665 dl->vgd.pv_act = act;
668 int export_vg_number(struct format_instance *fid, struct dm_list *pvds,
669 const char *vg_name, struct dev_filter *filter)
671 struct disk_list *dl;
674 if (!get_free_vg_number(fid, filter, vg_name, &vg_num))
677 dm_list_iterate_items(dl, pvds)
678 dl->vgd.vg_number = vg_num;