1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
9 *******************************************************************************
10 ******************************************************************************/
12 #include <linux/module.h>
14 #include "dlm_internal.h"
15 #include "lockspace.h"
25 #include "requestqueue.h"
30 static struct mutex ls_lock;
31 static struct list_head lslist;
32 static spinlock_t lslist_lock;
33 static struct task_struct * scand_task;
36 static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
40 int rc = kstrtoint(buf, 0, &n);
44 ls = dlm_find_lockspace_local(ls->ls_local_handle);
58 dlm_put_lockspace(ls);
62 static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
64 int rc = kstrtoint(buf, 0, &ls->ls_uevent_result);
68 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
69 wake_up(&ls->ls_uevent_wait);
73 static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
75 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
78 static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
80 int rc = kstrtouint(buf, 0, &ls->ls_global_id);
87 static ssize_t dlm_nodir_show(struct dlm_ls *ls, char *buf)
89 return snprintf(buf, PAGE_SIZE, "%u\n", dlm_no_directory(ls));
92 static ssize_t dlm_nodir_store(struct dlm_ls *ls, const char *buf, size_t len)
95 int rc = kstrtoint(buf, 0, &val);
100 set_bit(LSFL_NODIR, &ls->ls_flags);
104 static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
106 uint32_t status = dlm_recover_status(ls);
107 return snprintf(buf, PAGE_SIZE, "%x\n", status);
110 static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
112 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
116 struct attribute attr;
117 ssize_t (*show)(struct dlm_ls *, char *);
118 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
121 static struct dlm_attr dlm_attr_control = {
122 .attr = {.name = "control", .mode = S_IWUSR},
123 .store = dlm_control_store
126 static struct dlm_attr dlm_attr_event = {
127 .attr = {.name = "event_done", .mode = S_IWUSR},
128 .store = dlm_event_store
131 static struct dlm_attr dlm_attr_id = {
132 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
134 .store = dlm_id_store
137 static struct dlm_attr dlm_attr_nodir = {
138 .attr = {.name = "nodir", .mode = S_IRUGO | S_IWUSR},
139 .show = dlm_nodir_show,
140 .store = dlm_nodir_store
143 static struct dlm_attr dlm_attr_recover_status = {
144 .attr = {.name = "recover_status", .mode = S_IRUGO},
145 .show = dlm_recover_status_show
148 static struct dlm_attr dlm_attr_recover_nodeid = {
149 .attr = {.name = "recover_nodeid", .mode = S_IRUGO},
150 .show = dlm_recover_nodeid_show
153 static struct attribute *dlm_attrs[] = {
154 &dlm_attr_control.attr,
155 &dlm_attr_event.attr,
157 &dlm_attr_nodir.attr,
158 &dlm_attr_recover_status.attr,
159 &dlm_attr_recover_nodeid.attr,
162 ATTRIBUTE_GROUPS(dlm);
164 static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
167 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
168 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
169 return a->show ? a->show(ls, buf) : 0;
172 static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
173 const char *buf, size_t len)
175 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
176 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
177 return a->store ? a->store(ls, buf, len) : len;
180 static void lockspace_kobj_release(struct kobject *k)
182 struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
186 static const struct sysfs_ops dlm_attr_ops = {
187 .show = dlm_attr_show,
188 .store = dlm_attr_store,
191 static struct kobj_type dlm_ktype = {
192 .default_groups = dlm_groups,
193 .sysfs_ops = &dlm_attr_ops,
194 .release = lockspace_kobj_release,
197 static struct kset *dlm_kset;
199 static int do_uevent(struct dlm_ls *ls, int in)
202 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
204 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
206 log_rinfo(ls, "%s the lockspace group...", in ? "joining" : "leaving");
208 /* dlm_controld will see the uevent, do the necessary group management
209 and then write to sysfs to wake us */
211 wait_event(ls->ls_uevent_wait,
212 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
214 log_rinfo(ls, "group event done %d", ls->ls_uevent_result);
216 return ls->ls_uevent_result;
219 static int dlm_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
221 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
223 add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name);
227 static const struct kset_uevent_ops dlm_uevent_ops = {
228 .uevent = dlm_uevent,
231 int __init dlm_lockspace_init(void)
234 mutex_init(&ls_lock);
235 INIT_LIST_HEAD(&lslist);
236 spin_lock_init(&lslist_lock);
238 dlm_kset = kset_create_and_add("dlm", &dlm_uevent_ops, kernel_kobj);
240 printk(KERN_WARNING "%s: can not create kset\n", __func__);
246 void dlm_lockspace_exit(void)
248 kset_unregister(dlm_kset);
251 static struct dlm_ls *find_ls_to_scan(void)
255 spin_lock(&lslist_lock);
256 list_for_each_entry(ls, &lslist, ls_list) {
257 if (time_after_eq(jiffies, ls->ls_scan_time +
258 dlm_config.ci_scan_secs * HZ)) {
259 spin_unlock(&lslist_lock);
263 spin_unlock(&lslist_lock);
267 static int dlm_scand(void *data)
271 while (!kthread_should_stop()) {
272 ls = find_ls_to_scan();
274 if (dlm_lock_recovery_try(ls)) {
275 ls->ls_scan_time = jiffies;
277 dlm_scan_timeout(ls);
278 dlm_unlock_recovery(ls);
280 ls->ls_scan_time += HZ;
284 schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
289 static int dlm_scand_start(void)
291 struct task_struct *p;
294 p = kthread_run(dlm_scand, NULL, "dlm_scand");
302 static void dlm_scand_stop(void)
304 kthread_stop(scand_task);
307 struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
311 spin_lock(&lslist_lock);
313 list_for_each_entry(ls, &lslist, ls_list) {
314 if (ls->ls_global_id == id) {
315 atomic_inc(&ls->ls_count);
321 spin_unlock(&lslist_lock);
325 struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
329 spin_lock(&lslist_lock);
330 list_for_each_entry(ls, &lslist, ls_list) {
331 if (ls->ls_local_handle == lockspace) {
332 atomic_inc(&ls->ls_count);
338 spin_unlock(&lslist_lock);
342 struct dlm_ls *dlm_find_lockspace_device(int minor)
346 spin_lock(&lslist_lock);
347 list_for_each_entry(ls, &lslist, ls_list) {
348 if (ls->ls_device.minor == minor) {
349 atomic_inc(&ls->ls_count);
355 spin_unlock(&lslist_lock);
359 void dlm_put_lockspace(struct dlm_ls *ls)
361 if (atomic_dec_and_test(&ls->ls_count))
362 wake_up(&ls->ls_count_wait);
365 static void remove_lockspace(struct dlm_ls *ls)
368 wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0);
370 spin_lock(&lslist_lock);
371 if (atomic_read(&ls->ls_count) != 0) {
372 spin_unlock(&lslist_lock);
376 WARN_ON(ls->ls_create_count != 0);
377 list_del(&ls->ls_list);
378 spin_unlock(&lslist_lock);
381 static int threads_start(void)
385 error = dlm_scand_start();
387 log_print("cannot start dlm_scand thread %d", error);
391 /* Thread for sending/receiving messages for all lockspace's */
392 error = dlm_midcomms_start();
394 log_print("cannot start dlm lowcomms %d", error);
406 static int new_lockspace(const char *name, const char *cluster,
407 uint32_t flags, int lvblen,
408 const struct dlm_lockspace_ops *ops, void *ops_arg,
409 int *ops_result, dlm_lockspace_t **lockspace)
414 int namelen = strlen(name);
416 if (namelen > DLM_LOCKSPACE_LEN || namelen == 0)
422 if (!try_module_get(THIS_MODULE))
425 if (!dlm_user_daemon_available()) {
426 log_print("dlm user daemon not available");
431 if (ops && ops_result) {
432 if (!dlm_config.ci_recover_callbacks)
433 *ops_result = -EOPNOTSUPP;
439 log_print("dlm cluster name '%s' is being used without an application provided cluster name",
440 dlm_config.ci_cluster_name);
442 if (dlm_config.ci_recover_callbacks && cluster &&
443 strncmp(cluster, dlm_config.ci_cluster_name, DLM_LOCKSPACE_LEN)) {
444 log_print("dlm cluster name '%s' does not match "
445 "the application cluster name '%s'",
446 dlm_config.ci_cluster_name, cluster);
453 spin_lock(&lslist_lock);
454 list_for_each_entry(ls, &lslist, ls_list) {
455 WARN_ON(ls->ls_create_count <= 0);
456 if (ls->ls_namelen != namelen)
458 if (memcmp(ls->ls_name, name, namelen))
460 if (flags & DLM_LSFL_NEWEXCL) {
464 ls->ls_create_count++;
469 spin_unlock(&lslist_lock);
476 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_NOFS);
479 memcpy(ls->ls_name, name, namelen);
480 ls->ls_namelen = namelen;
481 ls->ls_lvblen = lvblen;
482 atomic_set(&ls->ls_count, 0);
483 init_waitqueue_head(&ls->ls_count_wait);
485 ls->ls_scan_time = jiffies;
487 if (ops && dlm_config.ci_recover_callbacks) {
489 ls->ls_ops_arg = ops_arg;
492 #ifdef CONFIG_DLM_DEPRECATED_API
493 if (flags & DLM_LSFL_TIMEWARN) {
494 pr_warn_once("===============================================================\n"
495 "WARNING: the dlm DLM_LSFL_TIMEWARN flag is being deprecated and\n"
496 " will be removed in v6.2!\n"
497 " Inclusive DLM_LSFL_TIMEWARN define in UAPI header!\n"
498 "===============================================================\n");
500 set_bit(LSFL_TIMEWARN, &ls->ls_flags);
503 /* ls_exflags are forced to match among nodes, and we don't
504 * need to require all nodes to have some flags set
506 ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
509 /* ls_exflags are forced to match among nodes, and we don't
510 * need to require all nodes to have some flags set
512 ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL));
515 size = READ_ONCE(dlm_config.ci_rsbtbl_size);
516 ls->ls_rsbtbl_size = size;
518 ls->ls_rsbtbl = vmalloc(array_size(size, sizeof(struct dlm_rsbtable)));
521 for (i = 0; i < size; i++) {
522 ls->ls_rsbtbl[i].keep.rb_node = NULL;
523 ls->ls_rsbtbl[i].toss.rb_node = NULL;
524 spin_lock_init(&ls->ls_rsbtbl[i].lock);
527 spin_lock_init(&ls->ls_remove_spin);
528 init_waitqueue_head(&ls->ls_remove_wait);
530 for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
531 ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1,
533 if (!ls->ls_remove_names[i])
537 idr_init(&ls->ls_lkbidr);
538 spin_lock_init(&ls->ls_lkbidr_spin);
540 INIT_LIST_HEAD(&ls->ls_waiters);
541 mutex_init(&ls->ls_waiters_mutex);
542 INIT_LIST_HEAD(&ls->ls_orphans);
543 mutex_init(&ls->ls_orphans_mutex);
544 #ifdef CONFIG_DLM_DEPRECATED_API
545 INIT_LIST_HEAD(&ls->ls_timeout);
546 mutex_init(&ls->ls_timeout_mutex);
549 INIT_LIST_HEAD(&ls->ls_new_rsb);
550 spin_lock_init(&ls->ls_new_rsb_spin);
552 INIT_LIST_HEAD(&ls->ls_nodes);
553 INIT_LIST_HEAD(&ls->ls_nodes_gone);
554 ls->ls_num_nodes = 0;
555 ls->ls_low_nodeid = 0;
556 ls->ls_total_weight = 0;
557 ls->ls_node_array = NULL;
559 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
560 ls->ls_stub_rsb.res_ls = ls;
562 ls->ls_debug_rsb_dentry = NULL;
563 ls->ls_debug_waiters_dentry = NULL;
565 init_waitqueue_head(&ls->ls_uevent_wait);
566 ls->ls_uevent_result = 0;
567 init_completion(&ls->ls_recovery_done);
568 ls->ls_recovery_result = -1;
570 mutex_init(&ls->ls_cb_mutex);
571 INIT_LIST_HEAD(&ls->ls_cb_delay);
573 ls->ls_recoverd_task = NULL;
574 mutex_init(&ls->ls_recoverd_active);
575 spin_lock_init(&ls->ls_recover_lock);
576 spin_lock_init(&ls->ls_rcom_spin);
577 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
578 ls->ls_recover_status = 0;
579 ls->ls_recover_seq = 0;
580 ls->ls_recover_args = NULL;
581 init_rwsem(&ls->ls_in_recovery);
582 init_rwsem(&ls->ls_recv_active);
583 INIT_LIST_HEAD(&ls->ls_requestqueue);
584 atomic_set(&ls->ls_requestqueue_cnt, 0);
585 init_waitqueue_head(&ls->ls_requestqueue_wait);
586 mutex_init(&ls->ls_requestqueue_mutex);
587 spin_lock_init(&ls->ls_clear_proc_locks);
589 /* Due backwards compatibility with 3.1 we need to use maximum
590 * possible dlm message size to be sure the message will fit and
591 * not having out of bounds issues. However on sending side 3.2
594 ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS);
595 if (!ls->ls_recover_buf)
599 ls->ls_num_slots = 0;
600 ls->ls_slots_size = 0;
603 INIT_LIST_HEAD(&ls->ls_recover_list);
604 spin_lock_init(&ls->ls_recover_list_lock);
605 idr_init(&ls->ls_recover_idr);
606 spin_lock_init(&ls->ls_recover_idr_lock);
607 ls->ls_recover_list_count = 0;
608 ls->ls_local_handle = ls;
609 init_waitqueue_head(&ls->ls_wait_general);
610 INIT_LIST_HEAD(&ls->ls_root_list);
611 init_rwsem(&ls->ls_root_sem);
613 spin_lock(&lslist_lock);
614 ls->ls_create_count = 1;
615 list_add(&ls->ls_list, &lslist);
616 spin_unlock(&lslist_lock);
618 if (flags & DLM_LSFL_FS) {
619 error = dlm_callback_start(ls);
621 log_error(ls, "can't start dlm_callback %d", error);
626 init_waitqueue_head(&ls->ls_recover_lock_wait);
629 * Once started, dlm_recoverd first looks for ls in lslist, then
630 * initializes ls_in_recovery as locked in "down" mode. We need
631 * to wait for the wakeup from dlm_recoverd because in_recovery
632 * has to start out in down mode.
635 error = dlm_recoverd_start(ls);
637 log_error(ls, "can't start dlm_recoverd %d", error);
641 wait_event(ls->ls_recover_lock_wait,
642 test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags));
644 /* let kobject handle freeing of ls if there's an error */
647 ls->ls_kobj.kset = dlm_kset;
648 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
652 kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
654 /* This uevent triggers dlm_controld in userspace to add us to the
655 group of nodes that are members of this lockspace (managed by the
656 cluster infrastructure.) Once it's done that, it tells us who the
657 current lockspace members are (via configfs) and then tells the
658 lockspace to start running (via sysfs) in dlm_ls_start(). */
660 error = do_uevent(ls, 1);
664 /* wait until recovery is successful or failed */
665 wait_for_completion(&ls->ls_recovery_done);
666 error = ls->ls_recovery_result;
670 dlm_create_debug_file(ls);
672 log_rinfo(ls, "join complete");
678 dlm_clear_members(ls);
679 kfree(ls->ls_node_array);
681 dlm_recoverd_stop(ls);
683 dlm_callback_stop(ls);
685 spin_lock(&lslist_lock);
686 list_del(&ls->ls_list);
687 spin_unlock(&lslist_lock);
688 idr_destroy(&ls->ls_recover_idr);
689 kfree(ls->ls_recover_buf);
691 idr_destroy(&ls->ls_lkbidr);
693 for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
694 kfree(ls->ls_remove_names[i]);
695 vfree(ls->ls_rsbtbl);
698 kobject_put(&ls->ls_kobj);
702 module_put(THIS_MODULE);
706 static int __dlm_new_lockspace(const char *name, const char *cluster,
707 uint32_t flags, int lvblen,
708 const struct dlm_lockspace_ops *ops,
709 void *ops_arg, int *ops_result,
710 dlm_lockspace_t **lockspace)
714 mutex_lock(&ls_lock);
716 error = threads_start();
720 error = new_lockspace(name, cluster, flags, lvblen, ops, ops_arg,
721 ops_result, lockspace);
728 dlm_midcomms_shutdown();
732 mutex_unlock(&ls_lock);
736 int dlm_new_lockspace(const char *name, const char *cluster, uint32_t flags,
737 int lvblen, const struct dlm_lockspace_ops *ops,
738 void *ops_arg, int *ops_result,
739 dlm_lockspace_t **lockspace)
741 return __dlm_new_lockspace(name, cluster, flags | DLM_LSFL_FS, lvblen,
742 ops, ops_arg, ops_result, lockspace);
745 int dlm_new_user_lockspace(const char *name, const char *cluster,
746 uint32_t flags, int lvblen,
747 const struct dlm_lockspace_ops *ops,
748 void *ops_arg, int *ops_result,
749 dlm_lockspace_t **lockspace)
751 return __dlm_new_lockspace(name, cluster, flags, lvblen, ops,
752 ops_arg, ops_result, lockspace);
755 static int lkb_idr_is_local(int id, void *p, void *data)
757 struct dlm_lkb *lkb = p;
759 return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV;
762 static int lkb_idr_is_any(int id, void *p, void *data)
767 static int lkb_idr_free(int id, void *p, void *data)
769 struct dlm_lkb *lkb = p;
771 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
772 dlm_free_lvb(lkb->lkb_lvbptr);
778 /* NOTE: We check the lkbidr here rather than the resource table.
779 This is because there may be LKBs queued as ASTs that have been unlinked
780 from their RSBs and are pending deletion once the AST has been delivered */
782 static int lockspace_busy(struct dlm_ls *ls, int force)
786 spin_lock(&ls->ls_lkbidr_spin);
788 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
789 } else if (force == 1) {
790 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
794 spin_unlock(&ls->ls_lkbidr_spin);
798 static int release_lockspace(struct dlm_ls *ls, int force)
804 busy = lockspace_busy(ls, force);
806 spin_lock(&lslist_lock);
807 if (ls->ls_create_count == 1) {
811 /* remove_lockspace takes ls off lslist */
812 ls->ls_create_count = 0;
815 } else if (ls->ls_create_count > 1) {
816 rv = --ls->ls_create_count;
820 spin_unlock(&lslist_lock);
823 log_debug(ls, "release_lockspace no remove %d", rv);
827 dlm_device_deregister(ls);
829 if (force < 3 && dlm_user_daemon_available())
832 dlm_recoverd_stop(ls);
836 dlm_clear_members(ls);
837 dlm_midcomms_shutdown();
840 dlm_callback_stop(ls);
842 remove_lockspace(ls);
844 dlm_delete_debug_file(ls);
846 idr_destroy(&ls->ls_recover_idr);
847 kfree(ls->ls_recover_buf);
850 * Free all lkb's in idr
853 idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
854 idr_destroy(&ls->ls_lkbidr);
857 * Free all rsb's on rsbtbl[] lists
860 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
861 while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) {
862 rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
863 rb_erase(n, &ls->ls_rsbtbl[i].keep);
867 while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) {
868 rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
869 rb_erase(n, &ls->ls_rsbtbl[i].toss);
874 vfree(ls->ls_rsbtbl);
876 for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
877 kfree(ls->ls_remove_names[i]);
879 while (!list_empty(&ls->ls_new_rsb)) {
880 rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
882 list_del(&rsb->res_hashchain);
887 * Free structures on any other lists
890 dlm_purge_requestqueue(ls);
891 kfree(ls->ls_recover_args);
892 dlm_clear_members(ls);
893 dlm_clear_members_gone(ls);
894 kfree(ls->ls_node_array);
895 log_rinfo(ls, "release_lockspace final free");
896 kobject_put(&ls->ls_kobj);
897 /* The ls structure will be freed when the kobject is done with */
899 module_put(THIS_MODULE);
904 * Called when a system has released all its locks and is not going to use the
905 * lockspace any longer. We free everything we're managing for this lockspace.
906 * Remaining nodes will go through the recovery process as if we'd died. The
907 * lockspace must continue to function as usual, participating in recoveries,
908 * until this returns.
910 * Force has 4 possible values:
911 * 0 - don't destroy lockspace if it has any LKBs
912 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
913 * 2 - destroy lockspace regardless of LKBs
914 * 3 - destroy lockspace as part of a forced shutdown
917 int dlm_release_lockspace(void *lockspace, int force)
922 ls = dlm_find_lockspace_local(lockspace);
925 dlm_put_lockspace(ls);
927 mutex_lock(&ls_lock);
928 error = release_lockspace(ls, force);
933 mutex_unlock(&ls_lock);
938 void dlm_stop_lockspaces(void)
945 spin_lock(&lslist_lock);
946 list_for_each_entry(ls, &lslist, ls_list) {
947 if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) {
951 spin_unlock(&lslist_lock);
952 log_error(ls, "no userland control daemon, stopping lockspace");
956 spin_unlock(&lslist_lock);
959 log_print("dlm user daemon left %d lockspaces", count);
962 void dlm_stop_lockspaces_check(void)
966 spin_lock(&lslist_lock);
967 list_for_each_entry(ls, &lslist, ls_list) {
968 if (WARN_ON(!rwsem_is_locked(&ls->ls_in_recovery) ||
969 !dlm_locking_stopped(ls)))
972 spin_unlock(&lslist_lock);