1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
6 #include <linux/miscdevice.h>
7 #include <linux/init.h>
8 #include <linux/wait.h>
9 #include <linux/file.h>
11 #include <linux/poll.h>
12 #include <linux/signal.h>
13 #include <linux/spinlock.h>
14 #include <linux/dlm.h>
15 #include <linux/dlm_device.h>
16 #include <linux/slab.h>
17 #include <linux/sched/signal.h>
19 #include <trace/events/dlm.h>
21 #include "dlm_internal.h"
22 #include "lockspace.h"
24 #include "lvb_table.h"
30 static const char name_prefix[] = "dlm";
31 static const struct file_operations device_fops;
32 static atomic_t dlm_monitor_opened;
33 static int dlm_monitor_unused = 1;
37 struct dlm_lock_params32 {
51 char lvb[DLM_USER_LVB_LEN];
55 struct dlm_write_request32 {
62 struct dlm_lock_params32 lock;
63 struct dlm_lspace_params lspace;
64 struct dlm_purge_params purge;
75 struct dlm_lock_result32 {
81 struct dlm_lksb32 lksb;
84 /* Offsets may be zero if no data is present */
88 static void compat_input(struct dlm_write_request *kb,
89 struct dlm_write_request32 *kb32,
92 kb->version[0] = kb32->version[0];
93 kb->version[1] = kb32->version[1];
94 kb->version[2] = kb32->version[2];
97 kb->is64bit = kb32->is64bit;
98 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
99 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
100 kb->i.lspace.flags = kb32->i.lspace.flags;
101 kb->i.lspace.minor = kb32->i.lspace.minor;
102 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
103 } else if (kb->cmd == DLM_USER_PURGE) {
104 kb->i.purge.nodeid = kb32->i.purge.nodeid;
105 kb->i.purge.pid = kb32->i.purge.pid;
107 kb->i.lock.mode = kb32->i.lock.mode;
108 kb->i.lock.namelen = kb32->i.lock.namelen;
109 kb->i.lock.flags = kb32->i.lock.flags;
110 kb->i.lock.lkid = kb32->i.lock.lkid;
111 kb->i.lock.parent = kb32->i.lock.parent;
112 kb->i.lock.xid = kb32->i.lock.xid;
113 kb->i.lock.timeout = kb32->i.lock.timeout;
114 kb->i.lock.castparam = (__user void *)(long)kb32->i.lock.castparam;
115 kb->i.lock.castaddr = (__user void *)(long)kb32->i.lock.castaddr;
116 kb->i.lock.bastparam = (__user void *)(long)kb32->i.lock.bastparam;
117 kb->i.lock.bastaddr = (__user void *)(long)kb32->i.lock.bastaddr;
118 kb->i.lock.lksb = (__user void *)(long)kb32->i.lock.lksb;
119 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
120 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
124 static void compat_output(struct dlm_lock_result *res,
125 struct dlm_lock_result32 *res32)
127 memset(res32, 0, sizeof(*res32));
129 res32->version[0] = res->version[0];
130 res32->version[1] = res->version[1];
131 res32->version[2] = res->version[2];
133 res32->user_astaddr = (__u32)(__force long)res->user_astaddr;
134 res32->user_astparam = (__u32)(__force long)res->user_astparam;
135 res32->user_lksb = (__u32)(__force long)res->user_lksb;
136 res32->bast_mode = res->bast_mode;
138 res32->lvb_offset = res->lvb_offset;
139 res32->length = res->length;
141 res32->lksb.sb_status = res->lksb.sb_status;
142 res32->lksb.sb_flags = res->lksb.sb_flags;
143 res32->lksb.sb_lkid = res->lksb.sb_lkid;
144 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
148 /* should held proc->asts_spin lock */
149 void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb)
151 struct dlm_callback *cb, *safe;
153 list_for_each_entry_safe(cb, safe, &lkb->lkb_callbacks, list) {
155 kref_put(&cb->ref, dlm_release_callback);
158 clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
161 dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL);
162 dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL);
163 lkb->lkb_last_bast_mode = -1;
166 /* Figure out if this lock is at the end of its life and no longer
167 available for the application to use. The lkb still exists until
168 the final ast is read. A lock becomes EOL in three situations:
169 1. a noqueue request fails with EAGAIN
170 2. an unlock completes with EUNLOCK
171 3. a cancel of a waiting request completes with ECANCEL/EDEADLK
172 An EOL lock needs to be removed from the process's list of locks.
173 And we can't allow any new operation on an EOL lock. This is
174 not related to the lifetime of the lkb struct which is managed
175 entirely by refcount. */
177 static int lkb_is_endoflife(int mode, int status)
186 if (mode == DLM_LOCK_IV)
193 /* we could possibly check if the cancel of an orphan has resulted in the lkb
194 being removed and then remove that lkb from the orphans list and free it */
196 void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
197 int status, uint32_t sbflags)
200 struct dlm_user_args *ua;
201 struct dlm_user_proc *proc;
204 if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
205 test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
208 ls = lkb->lkb_resource->res_ls;
209 spin_lock(&ls->ls_clear_proc_locks);
211 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
212 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
213 lkb->ua so we can't try to use it. This second check is necessary
214 for cases where a completion ast is received for an operation that
215 began before clear_proc_locks did its cancel/unlock. */
217 if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
218 test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
221 DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
225 if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
228 if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
229 set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags);
231 spin_lock(&proc->asts_spin);
233 rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
235 case DLM_ENQUEUE_CALLBACK_FAILURE:
236 spin_unlock(&proc->asts_spin);
239 case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
240 kref_get(&lkb->lkb_ref);
241 list_add_tail(&lkb->lkb_cb_list, &proc->asts);
242 wake_up_interruptible(&proc->wait);
244 case DLM_ENQUEUE_CALLBACK_SUCCESS:
250 spin_unlock(&proc->asts_spin);
252 if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) {
253 /* N.B. spin_lock locks_spin, not asts_spin */
254 spin_lock(&proc->locks_spin);
255 if (!list_empty(&lkb->lkb_ownqueue)) {
256 list_del_init(&lkb->lkb_ownqueue);
259 spin_unlock(&proc->locks_spin);
262 spin_unlock(&ls->ls_clear_proc_locks);
265 static int device_user_lock(struct dlm_user_proc *proc,
266 struct dlm_lock_params *params)
269 struct dlm_user_args *ua;
273 ls = dlm_find_lockspace_local(proc->lockspace);
277 if (!params->castaddr || !params->lksb) {
282 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
286 ua->user_lksb = params->lksb;
287 ua->castparam = params->castparam;
288 ua->castaddr = params->castaddr;
289 ua->bastparam = params->bastparam;
290 ua->bastaddr = params->bastaddr;
291 ua->xid = params->xid;
293 if (params->flags & DLM_LKF_CONVERT) {
294 error = dlm_user_convert(ls, ua,
295 params->mode, params->flags,
296 params->lkid, params->lvb);
297 } else if (params->flags & DLM_LKF_ORPHAN) {
298 error = dlm_user_adopt_orphan(ls, ua,
299 params->mode, params->flags,
300 params->name, params->namelen,
305 error = dlm_user_request(ls, ua,
306 params->mode, params->flags,
307 params->name, params->namelen);
309 error = ua->lksb.sb_lkid;
312 dlm_put_lockspace(ls);
316 static int device_user_unlock(struct dlm_user_proc *proc,
317 struct dlm_lock_params *params)
320 struct dlm_user_args *ua;
323 ls = dlm_find_lockspace_local(proc->lockspace);
327 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
331 ua->user_lksb = params->lksb;
332 ua->castparam = params->castparam;
333 ua->castaddr = params->castaddr;
335 if (params->flags & DLM_LKF_CANCEL)
336 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
338 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
341 dlm_put_lockspace(ls);
345 static int device_user_deadlock(struct dlm_user_proc *proc,
346 struct dlm_lock_params *params)
351 ls = dlm_find_lockspace_local(proc->lockspace);
355 error = dlm_user_deadlock(ls, params->flags, params->lkid);
357 dlm_put_lockspace(ls);
361 static int dlm_device_register(struct dlm_ls *ls, char *name)
365 /* The device is already registered. This happens when the
366 lockspace is created multiple times from userspace. */
367 if (ls->ls_device.name)
371 len = strlen(name) + strlen(name_prefix) + 2;
372 ls->ls_device.name = kzalloc(len, GFP_NOFS);
373 if (!ls->ls_device.name)
376 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
378 ls->ls_device.fops = &device_fops;
379 ls->ls_device.minor = MISC_DYNAMIC_MINOR;
381 error = misc_register(&ls->ls_device);
383 kfree(ls->ls_device.name);
384 /* this has to be set to NULL
385 * to avoid a double-free in dlm_device_deregister
387 ls->ls_device.name = NULL;
393 int dlm_device_deregister(struct dlm_ls *ls)
395 /* The device is not registered. This happens when the lockspace
396 was never used from userspace, or when device_create_lockspace()
397 calls dlm_release_lockspace() after the register fails. */
398 if (!ls->ls_device.name)
401 misc_deregister(&ls->ls_device);
402 kfree(ls->ls_device.name);
406 static int device_user_purge(struct dlm_user_proc *proc,
407 struct dlm_purge_params *params)
412 ls = dlm_find_lockspace_local(proc->lockspace);
416 error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
418 dlm_put_lockspace(ls);
422 static int device_create_lockspace(struct dlm_lspace_params *params)
424 dlm_lockspace_t *lockspace;
428 if (!capable(CAP_SYS_ADMIN))
431 error = dlm_new_user_lockspace(params->name, dlm_config.ci_cluster_name,
432 params->flags, DLM_USER_LVB_LEN, NULL,
433 NULL, NULL, &lockspace);
437 ls = dlm_find_lockspace_local(lockspace);
441 error = dlm_device_register(ls, params->name);
442 dlm_put_lockspace(ls);
445 dlm_release_lockspace(lockspace, 0);
447 error = ls->ls_device.minor;
452 static int device_remove_lockspace(struct dlm_lspace_params *params)
454 dlm_lockspace_t *lockspace;
456 int error, force = 0;
458 if (!capable(CAP_SYS_ADMIN))
461 ls = dlm_find_lockspace_device(params->minor);
465 if (params->flags & DLM_USER_LSFLG_FORCEFREE)
468 lockspace = ls->ls_local_handle;
469 dlm_put_lockspace(ls);
471 /* The final dlm_release_lockspace waits for references to go to
472 zero, so all processes will need to close their device for the
473 ls before the release will proceed. release also calls the
474 device_deregister above. Converting a positive return value
475 from release to zero means that userspace won't know when its
476 release was the final one, but it shouldn't need to know. */
478 error = dlm_release_lockspace(lockspace, force);
484 /* Check the user's version matches ours */
485 static int check_version(struct dlm_write_request *req)
487 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
488 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
489 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
491 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
492 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
494 task_pid_nr(current),
498 DLM_DEVICE_VERSION_MAJOR,
499 DLM_DEVICE_VERSION_MINOR,
500 DLM_DEVICE_VERSION_PATCH);
510 * dlm_user_request -> request_lock
511 * dlm_user_convert -> convert_lock
514 * dlm_user_unlock -> unlock_lock
515 * dlm_user_cancel -> cancel_lock
517 * device_create_lockspace
520 * device_remove_lockspace
521 * dlm_release_lockspace
524 /* a write to a lockspace device is a lock or unlock request, a write
525 to the control device is to create/remove a lockspace */
527 static ssize_t device_write(struct file *file, const char __user *buf,
528 size_t count, loff_t *ppos)
530 struct dlm_user_proc *proc = file->private_data;
531 struct dlm_write_request *kbuf;
535 if (count < sizeof(struct dlm_write_request32))
537 if (count < sizeof(struct dlm_write_request))
542 * can't compare against COMPAT/dlm_write_request32 because
543 * we don't yet know if is64bit is zero
545 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
548 kbuf = memdup_user_nul(buf, count);
550 return PTR_ERR(kbuf);
552 if (check_version(kbuf)) {
558 if (!kbuf->is64bit) {
559 struct dlm_write_request32 *k32buf;
562 if (count > sizeof(struct dlm_write_request32))
563 namelen = count - sizeof(struct dlm_write_request32);
565 k32buf = (struct dlm_write_request32 *)kbuf;
567 /* add 1 after namelen so that the name string is terminated */
568 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
576 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
578 compat_input(kbuf, k32buf, namelen);
583 /* do we really need this? can a write happen after a close? */
584 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
585 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
596 log_print("no locking on control device");
599 error = device_user_lock(proc, &kbuf->i.lock);
602 case DLM_USER_UNLOCK:
604 log_print("no locking on control device");
607 error = device_user_unlock(proc, &kbuf->i.lock);
610 case DLM_USER_DEADLOCK:
612 log_print("no locking on control device");
615 error = device_user_deadlock(proc, &kbuf->i.lock);
618 case DLM_USER_CREATE_LOCKSPACE:
620 log_print("create/remove only on control device");
623 error = device_create_lockspace(&kbuf->i.lspace);
626 case DLM_USER_REMOVE_LOCKSPACE:
628 log_print("create/remove only on control device");
631 error = device_remove_lockspace(&kbuf->i.lspace);
636 log_print("no locking on control device");
639 error = device_user_purge(proc, &kbuf->i.purge);
643 log_print("Unknown command passed to DLM device : %d\n",
652 /* Every process that opens the lockspace device has its own "proc" structure
653 hanging off the open file that's used to keep track of locks owned by the
654 process and asts that need to be delivered to the process. */
656 static int device_open(struct inode *inode, struct file *file)
658 struct dlm_user_proc *proc;
661 ls = dlm_find_lockspace_device(iminor(inode));
665 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
667 dlm_put_lockspace(ls);
671 proc->lockspace = ls->ls_local_handle;
672 INIT_LIST_HEAD(&proc->asts);
673 INIT_LIST_HEAD(&proc->locks);
674 INIT_LIST_HEAD(&proc->unlocking);
675 spin_lock_init(&proc->asts_spin);
676 spin_lock_init(&proc->locks_spin);
677 init_waitqueue_head(&proc->wait);
678 file->private_data = proc;
683 static int device_close(struct inode *inode, struct file *file)
685 struct dlm_user_proc *proc = file->private_data;
688 ls = dlm_find_lockspace_local(proc->lockspace);
692 set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
694 dlm_clear_proc_locks(ls, proc);
696 /* at this point no more lkb's should exist for this lockspace,
697 so there's no chance of dlm_user_add_ast() being called and
698 looking for lkb->ua->proc */
701 file->private_data = NULL;
703 dlm_put_lockspace(ls);
704 dlm_put_lockspace(ls); /* for the find in device_open() */
706 /* FIXME: AUTOFREE: if this ls is no longer used do
707 device_remove_lockspace() */
712 static int copy_result_to_user(struct dlm_user_args *ua, int compat,
713 uint32_t flags, int mode, int copy_lvb,
714 char __user *buf, size_t count)
717 struct dlm_lock_result32 result32;
719 struct dlm_lock_result result;
725 memset(&result, 0, sizeof(struct dlm_lock_result));
726 result.version[0] = DLM_DEVICE_VERSION_MAJOR;
727 result.version[1] = DLM_DEVICE_VERSION_MINOR;
728 result.version[2] = DLM_DEVICE_VERSION_PATCH;
729 memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
730 result.user_lksb = ua->user_lksb;
732 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
733 in a conversion unless the conversion is successful. See code
734 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
735 notes that a new blocking AST address and parameter are set even if
736 the conversion fails, so maybe we should just do that. */
738 if (flags & DLM_CB_BAST) {
739 result.user_astaddr = ua->bastaddr;
740 result.user_astparam = ua->bastparam;
741 result.bast_mode = mode;
743 result.user_astaddr = ua->castaddr;
744 result.user_astparam = ua->castparam;
749 len = sizeof(struct dlm_lock_result32);
752 len = sizeof(struct dlm_lock_result);
755 /* copy lvb to userspace if there is one, it's been updated, and
756 the user buffer has space for it */
758 if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
759 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
765 result.lvb_offset = len;
766 len += DLM_USER_LVB_LEN;
773 compat_output(&result, &result32);
774 resultptr = &result32;
778 if (copy_to_user(buf, resultptr, struct_len))
786 static int copy_version_to_user(char __user *buf, size_t count)
788 struct dlm_device_version ver;
790 memset(&ver, 0, sizeof(struct dlm_device_version));
791 ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
792 ver.version[1] = DLM_DEVICE_VERSION_MINOR;
793 ver.version[2] = DLM_DEVICE_VERSION_PATCH;
795 if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
797 return sizeof(struct dlm_device_version);
800 /* a read returns a single ast described in a struct dlm_lock_result */
802 static ssize_t device_read(struct file *file, char __user *buf, size_t count,
805 struct dlm_user_proc *proc = file->private_data;
807 DECLARE_WAITQUEUE(wait, current);
808 struct dlm_callback *cb;
809 int rv, copy_lvb = 0;
810 int old_mode, new_mode;
812 if (count == sizeof(struct dlm_device_version)) {
813 rv = copy_version_to_user(buf, count);
818 log_print("non-version read from control device %zu", count);
823 if (count < sizeof(struct dlm_lock_result32))
825 if (count < sizeof(struct dlm_lock_result))
831 /* do we really need this? can a read happen after a close? */
832 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
835 spin_lock(&proc->asts_spin);
836 if (list_empty(&proc->asts)) {
837 if (file->f_flags & O_NONBLOCK) {
838 spin_unlock(&proc->asts_spin);
842 add_wait_queue(&proc->wait, &wait);
845 set_current_state(TASK_INTERRUPTIBLE);
846 if (list_empty(&proc->asts) && !signal_pending(current)) {
847 spin_unlock(&proc->asts_spin);
849 spin_lock(&proc->asts_spin);
852 set_current_state(TASK_RUNNING);
853 remove_wait_queue(&proc->wait, &wait);
855 if (signal_pending(current)) {
856 spin_unlock(&proc->asts_spin);
861 /* if we empty lkb_callbacks, we don't want to unlock the spinlock
862 without removing lkb_cb_list; so empty lkb_cb_list is always
863 consistent with empty lkb_callbacks */
865 lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list);
867 /* rem_lkb_callback sets a new lkb_last_cast */
868 old_mode = lkb->lkb_last_cast->mode;
870 rv = dlm_dequeue_lkb_callback(lkb, &cb);
872 case DLM_DEQUEUE_CALLBACK_EMPTY:
873 /* this shouldn't happen; lkb should have been removed from
874 * list when last item was dequeued
876 log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
877 list_del_init(&lkb->lkb_cb_list);
878 spin_unlock(&proc->asts_spin);
879 /* removes ref for proc->asts, may cause lkb to be freed */
883 case DLM_DEQUEUE_CALLBACK_LAST:
884 list_del_init(&lkb->lkb_cb_list);
885 clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
887 case DLM_DEQUEUE_CALLBACK_SUCCESS:
893 spin_unlock(&proc->asts_spin);
895 if (cb->flags & DLM_CB_BAST) {
896 trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode);
897 } else if (cb->flags & DLM_CB_CAST) {
900 if (!cb->sb_status && lkb->lkb_lksb->sb_lvbptr &&
901 dlm_lvb_operations[old_mode + 1][new_mode + 1])
904 lkb->lkb_lksb->sb_status = cb->sb_status;
905 lkb->lkb_lksb->sb_flags = cb->sb_flags;
906 trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
909 rv = copy_result_to_user(lkb->lkb_ua,
910 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
911 cb->flags, cb->mode, copy_lvb, buf, count);
913 kref_put(&cb->ref, dlm_release_callback);
915 /* removes ref for proc->asts, may cause lkb to be freed */
916 if (rv == DLM_DEQUEUE_CALLBACK_LAST)
922 static __poll_t device_poll(struct file *file, poll_table *wait)
924 struct dlm_user_proc *proc = file->private_data;
926 poll_wait(file, &proc->wait, wait);
928 spin_lock(&proc->asts_spin);
929 if (!list_empty(&proc->asts)) {
930 spin_unlock(&proc->asts_spin);
931 return EPOLLIN | EPOLLRDNORM;
933 spin_unlock(&proc->asts_spin);
937 int dlm_user_daemon_available(void)
939 /* dlm_controld hasn't started (or, has started, but not
940 properly populated configfs) */
942 if (!dlm_our_nodeid())
945 /* This is to deal with versions of dlm_controld that don't
946 know about the monitor device. We assume that if the
947 dlm_controld was started (above), but the monitor device
948 was never opened, that it's an old version. dlm_controld
949 should open the monitor device before populating configfs. */
951 if (dlm_monitor_unused)
954 return atomic_read(&dlm_monitor_opened) ? 1 : 0;
957 static int ctl_device_open(struct inode *inode, struct file *file)
959 file->private_data = NULL;
963 static int ctl_device_close(struct inode *inode, struct file *file)
968 static int monitor_device_open(struct inode *inode, struct file *file)
970 atomic_inc(&dlm_monitor_opened);
971 dlm_monitor_unused = 0;
975 static int monitor_device_close(struct inode *inode, struct file *file)
977 if (atomic_dec_and_test(&dlm_monitor_opened))
978 dlm_stop_lockspaces();
982 static const struct file_operations device_fops = {
984 .release = device_close,
986 .write = device_write,
988 .owner = THIS_MODULE,
989 .llseek = noop_llseek,
992 static const struct file_operations ctl_device_fops = {
993 .open = ctl_device_open,
994 .release = ctl_device_close,
996 .write = device_write,
997 .owner = THIS_MODULE,
998 .llseek = noop_llseek,
1001 static struct miscdevice ctl_device = {
1002 .name = "dlm-control",
1003 .fops = &ctl_device_fops,
1004 .minor = MISC_DYNAMIC_MINOR,
1007 static const struct file_operations monitor_device_fops = {
1008 .open = monitor_device_open,
1009 .release = monitor_device_close,
1010 .owner = THIS_MODULE,
1011 .llseek = noop_llseek,
1014 static struct miscdevice monitor_device = {
1015 .name = "dlm-monitor",
1016 .fops = &monitor_device_fops,
1017 .minor = MISC_DYNAMIC_MINOR,
1020 int __init dlm_user_init(void)
1024 atomic_set(&dlm_monitor_opened, 0);
1026 error = misc_register(&ctl_device);
1028 log_print("misc_register failed for control device");
1032 error = misc_register(&monitor_device);
1034 log_print("misc_register failed for monitor device");
1035 misc_deregister(&ctl_device);
1041 void dlm_user_exit(void)
1043 misc_deregister(&ctl_device);
1044 misc_deregister(&monitor_device);