2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This file is part of LVM2.
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 #include "locking_types.h"
19 #include "lvm-string.h"
21 #include "toolcontext.h"
32 static struct locking_type _locking;
33 static sigset_t _oldset;
35 static int _vg_lock_count = 0; /* Number of locks held */
36 static int _vg_write_lock_held = 0; /* VG write lock held? */
37 static int _signals_blocked = 0;
38 static int _blocking_supported = 0;
40 static volatile sig_atomic_t _sigint_caught = 0;
41 static volatile sig_atomic_t _handler_installed;
42 static struct sigaction _oldhandler;
43 static int _oldmasked;
51 static void _catch_sigint(int unused __attribute__((unused)))
56 int sigint_caught(void) {
57 return _sigint_caught;
60 void sigint_clear(void)
66 * Temporarily allow keyboard interrupts to be intercepted and noted;
67 * saves interrupt handler state for sigint_restore(). Users should
68 * use the sigint_caught() predicate to check whether interrupt was
69 * requested and act appropriately. Interrupt flags are never
70 * cleared automatically by this code, but the tools clear the flag
71 * before running each command in lvm_run_command(). All other places
72 * where the flag needs to be cleared need to call sigint_clear().
75 void sigint_allow(void)
77 struct sigaction handler;
81 * Do not overwrite the backed-up handler data -
82 * just increase nesting count.
84 if (_handler_installed) {
89 /* Grab old sigaction for SIGINT: shall not fail. */
90 sigaction(SIGINT, NULL, &handler);
91 handler.sa_flags &= ~SA_RESTART; /* Clear restart flag */
92 handler.sa_handler = _catch_sigint;
94 _handler_installed = 1;
96 /* Override the signal handler: shall not fail. */
97 sigaction(SIGINT, &handler, &_oldhandler);
99 /* Unmask SIGINT. Remember to mask it again on restore. */
100 sigprocmask(0, NULL, &sigs);
101 if ((_oldmasked = sigismember(&sigs, SIGINT))) {
102 sigdelset(&sigs, SIGINT);
103 sigprocmask(SIG_SETMASK, &sigs, NULL);
107 void sigint_restore(void)
109 if (!_handler_installed)
112 if (_handler_installed > 1) {
113 _handler_installed--;
117 /* Nesting count went down to 0. */
118 _handler_installed = 0;
122 sigprocmask(0, NULL, &sigs);
123 sigaddset(&sigs, SIGINT);
124 sigprocmask(SIG_SETMASK, &sigs, NULL);
127 sigaction(SIGINT, &_oldhandler, NULL);
130 static void _block_signals(uint32_t flags __attribute__((unused)))
134 if (_signals_blocked)
137 if (sigfillset(&set)) {
138 log_sys_error("sigfillset", "_block_signals");
142 if (sigprocmask(SIG_SETMASK, &set, &_oldset)) {
143 log_sys_error("sigprocmask", "_block_signals");
147 _signals_blocked = 1;
150 static void _unblock_signals(void)
152 /* Don't unblock signals while any locks are held */
153 if (!_signals_blocked || _vg_lock_count)
156 if (sigprocmask(SIG_SETMASK, &_oldset, NULL)) {
157 log_sys_error("sigprocmask", "_block_signals");
161 _signals_blocked = 0;
164 static void _lock_memory(struct cmd_context *cmd, lv_operation_t lv_op)
166 if (!(_locking.flags & LCK_PRE_MEMLOCK))
169 if (lv_op == LV_SUSPEND)
173 static void _unlock_memory(struct cmd_context *cmd, lv_operation_t lv_op)
175 if (!(_locking.flags & LCK_PRE_MEMLOCK))
178 if (lv_op == LV_RESUME)
182 void reset_locking(void)
184 int was_locked = _vg_lock_count;
187 _vg_write_lock_held = 0;
189 if (_locking.reset_locking)
190 _locking.reset_locking();
196 static void _update_vg_lock_count(const char *resource, uint32_t flags)
198 /* Ignore locks not associated with updating VG metadata */
199 if ((flags & LCK_SCOPE_MASK) != LCK_VG ||
200 (flags & LCK_CACHE) ||
201 !strcmp(resource, VG_GLOBAL))
204 if ((flags & LCK_TYPE_MASK) == LCK_UNLOCK)
209 /* We don't bother to reset this until all VG locks are dropped */
210 if ((flags & LCK_TYPE_MASK) == LCK_WRITE)
211 _vg_write_lock_held = 1;
212 else if (!_vg_lock_count)
213 _vg_write_lock_held = 0;
217 * Select a locking type
218 * type: locking type; if < 0, then read config tree value
220 int init_locking(int type, struct cmd_context *cmd, int suppress_messages)
222 if (ignorelockingfailure() && getenv("LVM_SUPPRESS_LOCKING_FAILURE_MESSAGES"))
223 suppress_messages = 1;
226 type = find_config_tree_int(cmd, "global/locking_type", 1);
228 _blocking_supported = find_config_tree_int(cmd,
229 "global/wait_for_locks", DEFAULT_WAIT_FOR_LOCKS);
233 init_no_locking(&_locking, cmd);
234 log_warn("WARNING: Locking disabled. Be careful! "
235 "This could corrupt your metadata.");
239 log_very_verbose("%sFile-based locking selected.",
240 _blocking_supported ? "" : "Non-blocking ");
242 if (!init_file_locking(&_locking, cmd)) {
243 log_error_suppress(suppress_messages,
244 "File-based locking initialisation failed.");
252 log_very_verbose("External locking selected.");
253 if (init_external_locking(&_locking, cmd))
256 if (!find_config_tree_int(cmd, "locking/fallback_to_clustered_locking",
257 find_config_tree_int(cmd, "global/fallback_to_clustered_locking",
258 DEFAULT_FALLBACK_TO_CLUSTERED_LOCKING))) {
259 log_error("External locking initialisation failed.");
264 #ifdef CLUSTER_LOCKING_INTERNAL
265 log_very_verbose("Falling back to internal clustered locking.");
269 log_very_verbose("Cluster locking selected.");
270 if (!init_cluster_locking(&_locking, cmd)) {
271 log_error_suppress(suppress_messages,
272 "Internal cluster locking initialisation failed.");
279 log_verbose("Read-only locking selected. "
280 "Only read operations permitted.");
281 if (!init_readonly_locking(&_locking, cmd))
286 log_error("Unknown locking type requested.");
290 if ((type == 2 || type == 3) &&
291 find_config_tree_int(cmd, "locking/fallback_to_local_locking",
292 find_config_tree_int(cmd, "global/fallback_to_local_locking",
293 DEFAULT_FALLBACK_TO_LOCAL_LOCKING))) {
294 log_warn_suppress(suppress_messages, "WARNING: Falling back to local file-based locking.");
295 log_warn_suppress(suppress_messages,
296 "Volume Groups with the clustered attribute will "
298 if (init_file_locking(&_locking, cmd))
301 log_error_suppress(suppress_messages,
302 "File-based locking initialisation failed.");
305 if (!ignorelockingfailure())
308 log_verbose("Locking disabled - only read operations permitted.");
309 init_readonly_locking(&_locking, cmd);
314 void fin_locking(void)
316 _locking.fin_locking();
320 * Does the LVM1 driver know of this VG name?
322 int check_lvm1_vg_inactive(struct cmd_context *cmd, const char *vgname)
327 /* We'll allow operations on orphans */
328 if (is_orphan_vg(vgname) || is_global_vg(vgname))
331 /* LVM1 is only present in 2.4 kernels. */
332 if (strncmp(cmd->kernel_vsn, "2.4.", 4))
335 if (dm_snprintf(path, sizeof(path), "%s/lvm/VGs/%s", cmd->proc_dir,
337 log_error("LVM1 proc VG pathname too long for %s", vgname);
341 if (stat(path, &info) == 0) {
342 log_error("%s exists: Is the original LVM driver using "
343 "this volume group?", path);
345 } else if (errno != ENOENT && errno != ENOTDIR) {
346 log_sys_error("stat", path);
354 * VG locking is by VG name.
355 * FIXME This should become VG uuid.
357 static int _lock_vol(struct cmd_context *cmd, const char *resource,
358 uint32_t flags, lv_operation_t lv_op)
362 _block_signals(flags);
363 _lock_memory(cmd, lv_op);
368 log_error(INTERNAL_ERROR "Use of P_orphans is deprecated.");
372 if ((is_orphan_vg(resource) || is_global_vg(resource)) && (flags & LCK_CACHE)) {
373 log_error(INTERNAL_ERROR "P_%s referenced", resource);
377 if (cmd->metadata_read_only &&
378 ((flags & LCK_TYPE_MASK) == LCK_WRITE) &&
379 strcmp(resource, VG_GLOBAL)) {
380 log_error("Operation prohibited while global/metadata_read_only is set.");
384 if ((ret = _locking.lock_resource(cmd, resource, flags))) {
385 if ((flags & LCK_SCOPE_MASK) == LCK_VG &&
386 !(flags & LCK_CACHE)) {
387 if ((flags & LCK_TYPE_MASK) == LCK_UNLOCK)
388 lvmcache_unlock_vgname(resource);
390 lvmcache_lock_vgname(resource, (flags & LCK_TYPE_MASK)
392 dev_reset_error_count(cmd);
395 _update_vg_lock_count(resource, flags);
399 _unlock_memory(cmd, lv_op);
405 int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags)
407 char resource[258] __attribute__((aligned(8)));
408 lv_operation_t lv_op;
410 switch (flags & (LCK_SCOPE_MASK | LCK_TYPE_MASK)) {
417 default: lv_op = LV_NOOP;
421 if (flags == LCK_NONE) {
422 log_debug(INTERNAL_ERROR "%s: LCK_NONE lock requested", vol);
426 switch (flags & LCK_SCOPE_MASK) {
428 if (!_blocking_supported)
429 flags |= LCK_NONBLOCK;
431 /* Global VG_ORPHANS lock covers all orphan formats. */
432 if (is_orphan_vg(vol))
434 /* VG locks alphabetical, ORPHAN lock last */
435 if (((flags & LCK_TYPE_MASK) != LCK_UNLOCK) &&
436 !(flags & LCK_CACHE) &&
437 !lvmcache_verify_lock_order(vol))
440 /* Lock VG to change on-disk metadata. */
441 /* If LVM1 driver knows about the VG, it can't be accessed. */
442 if (!check_lvm1_vg_inactive(cmd, vol))
446 /* All LV locks are non-blocking. */
447 flags |= LCK_NONBLOCK;
450 log_error("Unrecognised lock scope: %d",
451 flags & LCK_SCOPE_MASK);
455 strncpy(resource, vol, sizeof(resource));
457 if (!_lock_vol(cmd, resource, flags, lv_op))
461 * If a real lock was acquired (i.e. not LCK_CACHE),
462 * perform an immediate unlock unless LCK_HOLD was requested.
464 if (!(flags & LCK_CACHE) && !(flags & LCK_HOLD) &&
465 ((flags & LCK_TYPE_MASK) != LCK_UNLOCK)) {
466 if (!_lock_vol(cmd, resource,
467 (flags & ~LCK_TYPE_MASK) | LCK_UNLOCK, lv_op))
474 /* Unlock list of LVs */
475 int resume_lvs(struct cmd_context *cmd, struct dm_list *lvs)
480 dm_list_iterate_items(lvl, lvs)
481 if (!resume_lv(cmd, lvl->lv)) {
489 /* Lock a list of LVs */
490 int suspend_lvs(struct cmd_context *cmd, struct dm_list *lvs)
495 dm_list_iterate_items(lvl, lvs) {
496 if (!suspend_lv(cmd, lvl->lv)) {
497 log_error("Failed to suspend %s", lvl->lv->name);
498 dm_list_uniterate(lvh, lvs, &lvl->list) {
499 lvl = dm_list_item(lvh, struct lv_list);
500 if (!resume_lv(cmd, lvl->lv))
511 /* Lock a list of LVs */
512 int activate_lvs(struct cmd_context *cmd, struct dm_list *lvs, unsigned exclusive)
517 dm_list_iterate_items(lvl, lvs) {
519 if (!activate_lv(cmd, lvl->lv)) {
520 log_error("Failed to activate %s", lvl->lv->name);
523 } else if (!activate_lv_excl(cmd, lvl->lv)) {
524 log_error("Failed to activate %s", lvl->lv->name);
525 dm_list_uniterate(lvh, lvs, &lvl->list) {
526 lvl = dm_list_item(lvh, struct lv_list);
527 if (!activate_lv(cmd, lvl->lv))
537 int vg_write_lock_held(void)
539 return _vg_write_lock_held;
542 int locking_is_clustered(void)
544 return (_locking.flags & LCK_CLUSTERED) ? 1 : 0;
547 int remote_lock_held(const char *vol)
551 if (!locking_is_clustered())
554 if (!_locking.query_resource)
558 * If an error occured, expect that volume is active
560 if (!_locking.query_resource(vol, &mode)) {
565 return mode == LCK_NULL ? 0 : 1;