1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
6 #include <linux/miscdevice.h>
7 #include <linux/init.h>
8 #include <linux/wait.h>
9 #include <linux/file.h>
11 #include <linux/poll.h>
12 #include <linux/signal.h>
13 #include <linux/spinlock.h>
14 #include <linux/dlm.h>
15 #include <linux/dlm_device.h>
16 #include <linux/slab.h>
17 #include <linux/sched/signal.h>
19 #include <trace/events/dlm.h>
21 #include "dlm_internal.h"
22 #include "lockspace.h"
24 #include "lvb_table.h"
30 static const char name_prefix[] = "dlm";
31 static const struct file_operations device_fops;
32 static atomic_t dlm_monitor_opened;
33 static int dlm_monitor_unused = 1;
37 struct dlm_lock_params32 {
51 char lvb[DLM_USER_LVB_LEN];
55 struct dlm_write_request32 {
62 struct dlm_lock_params32 lock;
63 struct dlm_lspace_params lspace;
64 struct dlm_purge_params purge;
75 struct dlm_lock_result32 {
81 struct dlm_lksb32 lksb;
84 /* Offsets may be zero if no data is present */
88 static void compat_input(struct dlm_write_request *kb,
89 struct dlm_write_request32 *kb32,
92 kb->version[0] = kb32->version[0];
93 kb->version[1] = kb32->version[1];
94 kb->version[2] = kb32->version[2];
97 kb->is64bit = kb32->is64bit;
98 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
99 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
100 kb->i.lspace.flags = kb32->i.lspace.flags;
101 kb->i.lspace.minor = kb32->i.lspace.minor;
102 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
103 } else if (kb->cmd == DLM_USER_PURGE) {
104 kb->i.purge.nodeid = kb32->i.purge.nodeid;
105 kb->i.purge.pid = kb32->i.purge.pid;
107 kb->i.lock.mode = kb32->i.lock.mode;
108 kb->i.lock.namelen = kb32->i.lock.namelen;
109 kb->i.lock.flags = kb32->i.lock.flags;
110 kb->i.lock.lkid = kb32->i.lock.lkid;
111 kb->i.lock.parent = kb32->i.lock.parent;
112 kb->i.lock.xid = kb32->i.lock.xid;
113 kb->i.lock.timeout = kb32->i.lock.timeout;
114 kb->i.lock.castparam = (__user void *)(long)kb32->i.lock.castparam;
115 kb->i.lock.castaddr = (__user void *)(long)kb32->i.lock.castaddr;
116 kb->i.lock.bastparam = (__user void *)(long)kb32->i.lock.bastparam;
117 kb->i.lock.bastaddr = (__user void *)(long)kb32->i.lock.bastaddr;
118 kb->i.lock.lksb = (__user void *)(long)kb32->i.lock.lksb;
119 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
120 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
124 static void compat_output(struct dlm_lock_result *res,
125 struct dlm_lock_result32 *res32)
127 memset(res32, 0, sizeof(*res32));
129 res32->version[0] = res->version[0];
130 res32->version[1] = res->version[1];
131 res32->version[2] = res->version[2];
133 res32->user_astaddr = (__u32)(__force long)res->user_astaddr;
134 res32->user_astparam = (__u32)(__force long)res->user_astparam;
135 res32->user_lksb = (__u32)(__force long)res->user_lksb;
136 res32->bast_mode = res->bast_mode;
138 res32->lvb_offset = res->lvb_offset;
139 res32->length = res->length;
141 res32->lksb.sb_status = res->lksb.sb_status;
142 res32->lksb.sb_flags = res->lksb.sb_flags;
143 res32->lksb.sb_lkid = res->lksb.sb_lkid;
144 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
148 /* Figure out if this lock is at the end of its life and no longer
149 available for the application to use. The lkb still exists until
150 the final ast is read. A lock becomes EOL in three situations:
151 1. a noqueue request fails with EAGAIN
152 2. an unlock completes with EUNLOCK
153 3. a cancel of a waiting request completes with ECANCEL/EDEADLK
154 An EOL lock needs to be removed from the process's list of locks.
155 And we can't allow any new operation on an EOL lock. This is
156 not related to the lifetime of the lkb struct which is managed
157 entirely by refcount. */
159 static int lkb_is_endoflife(int mode, int status)
168 if (mode == DLM_LOCK_IV)
175 /* we could possibly check if the cancel of an orphan has resulted in the lkb
176 being removed and then remove that lkb from the orphans list and free it */
178 void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
179 int status, uint32_t sbflags)
182 struct dlm_user_args *ua;
183 struct dlm_user_proc *proc;
186 if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
187 test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
190 ls = lkb->lkb_resource->res_ls;
191 spin_lock(&ls->ls_clear_proc_locks);
193 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
194 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
195 lkb->ua so we can't try to use it. This second check is necessary
196 for cases where a completion ast is received for an operation that
197 began before clear_proc_locks did its cancel/unlock. */
199 if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
200 test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
203 DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
207 if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
210 if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
211 set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags);
213 spin_lock(&proc->asts_spin);
215 rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
217 case DLM_ENQUEUE_CALLBACK_FAILURE:
218 spin_unlock(&proc->asts_spin);
221 case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
222 kref_get(&lkb->lkb_ref);
223 list_add_tail(&lkb->lkb_cb_list, &proc->asts);
224 wake_up_interruptible(&proc->wait);
226 case DLM_ENQUEUE_CALLBACK_SUCCESS:
232 spin_unlock(&proc->asts_spin);
234 if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) {
235 /* N.B. spin_lock locks_spin, not asts_spin */
236 spin_lock(&proc->locks_spin);
237 if (!list_empty(&lkb->lkb_ownqueue)) {
238 list_del_init(&lkb->lkb_ownqueue);
241 spin_unlock(&proc->locks_spin);
244 spin_unlock(&ls->ls_clear_proc_locks);
247 static int device_user_lock(struct dlm_user_proc *proc,
248 struct dlm_lock_params *params)
251 struct dlm_user_args *ua;
255 ls = dlm_find_lockspace_local(proc->lockspace);
259 if (!params->castaddr || !params->lksb) {
264 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
268 ua->user_lksb = params->lksb;
269 ua->castparam = params->castparam;
270 ua->castaddr = params->castaddr;
271 ua->bastparam = params->bastparam;
272 ua->bastaddr = params->bastaddr;
273 ua->xid = params->xid;
275 if (params->flags & DLM_LKF_CONVERT) {
276 error = dlm_user_convert(ls, ua,
277 params->mode, params->flags,
278 params->lkid, params->lvb);
279 } else if (params->flags & DLM_LKF_ORPHAN) {
280 error = dlm_user_adopt_orphan(ls, ua,
281 params->mode, params->flags,
282 params->name, params->namelen,
287 error = dlm_user_request(ls, ua,
288 params->mode, params->flags,
289 params->name, params->namelen);
291 error = ua->lksb.sb_lkid;
294 dlm_put_lockspace(ls);
298 static int device_user_unlock(struct dlm_user_proc *proc,
299 struct dlm_lock_params *params)
302 struct dlm_user_args *ua;
305 ls = dlm_find_lockspace_local(proc->lockspace);
309 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
313 ua->user_lksb = params->lksb;
314 ua->castparam = params->castparam;
315 ua->castaddr = params->castaddr;
317 if (params->flags & DLM_LKF_CANCEL)
318 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
320 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
323 dlm_put_lockspace(ls);
327 static int device_user_deadlock(struct dlm_user_proc *proc,
328 struct dlm_lock_params *params)
333 ls = dlm_find_lockspace_local(proc->lockspace);
337 error = dlm_user_deadlock(ls, params->flags, params->lkid);
339 dlm_put_lockspace(ls);
343 static int dlm_device_register(struct dlm_ls *ls, char *name)
347 /* The device is already registered. This happens when the
348 lockspace is created multiple times from userspace. */
349 if (ls->ls_device.name)
353 len = strlen(name) + strlen(name_prefix) + 2;
354 ls->ls_device.name = kzalloc(len, GFP_NOFS);
355 if (!ls->ls_device.name)
358 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
360 ls->ls_device.fops = &device_fops;
361 ls->ls_device.minor = MISC_DYNAMIC_MINOR;
363 error = misc_register(&ls->ls_device);
365 kfree(ls->ls_device.name);
366 /* this has to be set to NULL
367 * to avoid a double-free in dlm_device_deregister
369 ls->ls_device.name = NULL;
375 int dlm_device_deregister(struct dlm_ls *ls)
377 /* The device is not registered. This happens when the lockspace
378 was never used from userspace, or when device_create_lockspace()
379 calls dlm_release_lockspace() after the register fails. */
380 if (!ls->ls_device.name)
383 misc_deregister(&ls->ls_device);
384 kfree(ls->ls_device.name);
388 static int device_user_purge(struct dlm_user_proc *proc,
389 struct dlm_purge_params *params)
394 ls = dlm_find_lockspace_local(proc->lockspace);
398 error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
400 dlm_put_lockspace(ls);
404 static int device_create_lockspace(struct dlm_lspace_params *params)
406 dlm_lockspace_t *lockspace;
410 if (!capable(CAP_SYS_ADMIN))
413 error = dlm_new_user_lockspace(params->name, dlm_config.ci_cluster_name,
414 params->flags, DLM_USER_LVB_LEN, NULL,
415 NULL, NULL, &lockspace);
419 ls = dlm_find_lockspace_local(lockspace);
423 error = dlm_device_register(ls, params->name);
424 dlm_put_lockspace(ls);
427 dlm_release_lockspace(lockspace, 0);
429 error = ls->ls_device.minor;
434 static int device_remove_lockspace(struct dlm_lspace_params *params)
436 dlm_lockspace_t *lockspace;
438 int error, force = 0;
440 if (!capable(CAP_SYS_ADMIN))
443 ls = dlm_find_lockspace_device(params->minor);
447 if (params->flags & DLM_USER_LSFLG_FORCEFREE)
450 lockspace = ls->ls_local_handle;
451 dlm_put_lockspace(ls);
453 /* The final dlm_release_lockspace waits for references to go to
454 zero, so all processes will need to close their device for the
455 ls before the release will proceed. release also calls the
456 device_deregister above. Converting a positive return value
457 from release to zero means that userspace won't know when its
458 release was the final one, but it shouldn't need to know. */
460 error = dlm_release_lockspace(lockspace, force);
466 /* Check the user's version matches ours */
467 static int check_version(struct dlm_write_request *req)
469 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
470 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
471 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
473 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
474 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
476 task_pid_nr(current),
480 DLM_DEVICE_VERSION_MAJOR,
481 DLM_DEVICE_VERSION_MINOR,
482 DLM_DEVICE_VERSION_PATCH);
492 * dlm_user_request -> request_lock
493 * dlm_user_convert -> convert_lock
496 * dlm_user_unlock -> unlock_lock
497 * dlm_user_cancel -> cancel_lock
499 * device_create_lockspace
502 * device_remove_lockspace
503 * dlm_release_lockspace
506 /* a write to a lockspace device is a lock or unlock request, a write
507 to the control device is to create/remove a lockspace */
509 static ssize_t device_write(struct file *file, const char __user *buf,
510 size_t count, loff_t *ppos)
512 struct dlm_user_proc *proc = file->private_data;
513 struct dlm_write_request *kbuf;
517 if (count < sizeof(struct dlm_write_request32))
519 if (count < sizeof(struct dlm_write_request))
524 * can't compare against COMPAT/dlm_write_request32 because
525 * we don't yet know if is64bit is zero
527 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
530 kbuf = memdup_user_nul(buf, count);
532 return PTR_ERR(kbuf);
534 if (check_version(kbuf)) {
540 if (!kbuf->is64bit) {
541 struct dlm_write_request32 *k32buf;
544 if (count > sizeof(struct dlm_write_request32))
545 namelen = count - sizeof(struct dlm_write_request32);
547 k32buf = (struct dlm_write_request32 *)kbuf;
549 /* add 1 after namelen so that the name string is terminated */
550 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
558 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
560 compat_input(kbuf, k32buf, namelen);
565 /* do we really need this? can a write happen after a close? */
566 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
567 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
578 log_print("no locking on control device");
581 error = device_user_lock(proc, &kbuf->i.lock);
584 case DLM_USER_UNLOCK:
586 log_print("no locking on control device");
589 error = device_user_unlock(proc, &kbuf->i.lock);
592 case DLM_USER_DEADLOCK:
594 log_print("no locking on control device");
597 error = device_user_deadlock(proc, &kbuf->i.lock);
600 case DLM_USER_CREATE_LOCKSPACE:
602 log_print("create/remove only on control device");
605 error = device_create_lockspace(&kbuf->i.lspace);
608 case DLM_USER_REMOVE_LOCKSPACE:
610 log_print("create/remove only on control device");
613 error = device_remove_lockspace(&kbuf->i.lspace);
618 log_print("no locking on control device");
621 error = device_user_purge(proc, &kbuf->i.purge);
625 log_print("Unknown command passed to DLM device : %d\n",
634 /* Every process that opens the lockspace device has its own "proc" structure
635 hanging off the open file that's used to keep track of locks owned by the
636 process and asts that need to be delivered to the process. */
638 static int device_open(struct inode *inode, struct file *file)
640 struct dlm_user_proc *proc;
643 ls = dlm_find_lockspace_device(iminor(inode));
647 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
649 dlm_put_lockspace(ls);
653 proc->lockspace = ls->ls_local_handle;
654 INIT_LIST_HEAD(&proc->asts);
655 INIT_LIST_HEAD(&proc->locks);
656 INIT_LIST_HEAD(&proc->unlocking);
657 spin_lock_init(&proc->asts_spin);
658 spin_lock_init(&proc->locks_spin);
659 init_waitqueue_head(&proc->wait);
660 file->private_data = proc;
665 static int device_close(struct inode *inode, struct file *file)
667 struct dlm_user_proc *proc = file->private_data;
670 ls = dlm_find_lockspace_local(proc->lockspace);
674 set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
676 dlm_clear_proc_locks(ls, proc);
678 /* at this point no more lkb's should exist for this lockspace,
679 so there's no chance of dlm_user_add_ast() being called and
680 looking for lkb->ua->proc */
683 file->private_data = NULL;
685 dlm_put_lockspace(ls);
686 dlm_put_lockspace(ls); /* for the find in device_open() */
688 /* FIXME: AUTOFREE: if this ls is no longer used do
689 device_remove_lockspace() */
694 static int copy_result_to_user(struct dlm_user_args *ua, int compat,
695 uint32_t flags, int mode, int copy_lvb,
696 char __user *buf, size_t count)
699 struct dlm_lock_result32 result32;
701 struct dlm_lock_result result;
707 memset(&result, 0, sizeof(struct dlm_lock_result));
708 result.version[0] = DLM_DEVICE_VERSION_MAJOR;
709 result.version[1] = DLM_DEVICE_VERSION_MINOR;
710 result.version[2] = DLM_DEVICE_VERSION_PATCH;
711 memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
712 result.user_lksb = ua->user_lksb;
714 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
715 in a conversion unless the conversion is successful. See code
716 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
717 notes that a new blocking AST address and parameter are set even if
718 the conversion fails, so maybe we should just do that. */
720 if (flags & DLM_CB_BAST) {
721 result.user_astaddr = ua->bastaddr;
722 result.user_astparam = ua->bastparam;
723 result.bast_mode = mode;
725 result.user_astaddr = ua->castaddr;
726 result.user_astparam = ua->castparam;
731 len = sizeof(struct dlm_lock_result32);
734 len = sizeof(struct dlm_lock_result);
737 /* copy lvb to userspace if there is one, it's been updated, and
738 the user buffer has space for it */
740 if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
741 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
747 result.lvb_offset = len;
748 len += DLM_USER_LVB_LEN;
755 compat_output(&result, &result32);
756 resultptr = &result32;
760 if (copy_to_user(buf, resultptr, struct_len))
768 static int copy_version_to_user(char __user *buf, size_t count)
770 struct dlm_device_version ver;
772 memset(&ver, 0, sizeof(struct dlm_device_version));
773 ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
774 ver.version[1] = DLM_DEVICE_VERSION_MINOR;
775 ver.version[2] = DLM_DEVICE_VERSION_PATCH;
777 if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
779 return sizeof(struct dlm_device_version);
782 /* a read returns a single ast described in a struct dlm_lock_result */
784 static ssize_t device_read(struct file *file, char __user *buf, size_t count,
787 struct dlm_user_proc *proc = file->private_data;
789 DECLARE_WAITQUEUE(wait, current);
790 struct dlm_callback *cb;
791 int rv, copy_lvb = 0;
792 int old_mode, new_mode;
794 if (count == sizeof(struct dlm_device_version)) {
795 rv = copy_version_to_user(buf, count);
800 log_print("non-version read from control device %zu", count);
805 if (count < sizeof(struct dlm_lock_result32))
807 if (count < sizeof(struct dlm_lock_result))
813 /* do we really need this? can a read happen after a close? */
814 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
817 spin_lock(&proc->asts_spin);
818 if (list_empty(&proc->asts)) {
819 if (file->f_flags & O_NONBLOCK) {
820 spin_unlock(&proc->asts_spin);
824 add_wait_queue(&proc->wait, &wait);
827 set_current_state(TASK_INTERRUPTIBLE);
828 if (list_empty(&proc->asts) && !signal_pending(current)) {
829 spin_unlock(&proc->asts_spin);
831 spin_lock(&proc->asts_spin);
834 set_current_state(TASK_RUNNING);
835 remove_wait_queue(&proc->wait, &wait);
837 if (signal_pending(current)) {
838 spin_unlock(&proc->asts_spin);
843 /* if we empty lkb_callbacks, we don't want to unlock the spinlock
844 without removing lkb_cb_list; so empty lkb_cb_list is always
845 consistent with empty lkb_callbacks */
847 lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list);
849 /* rem_lkb_callback sets a new lkb_last_cast */
850 old_mode = lkb->lkb_last_cast->mode;
852 rv = dlm_dequeue_lkb_callback(lkb, &cb);
854 case DLM_DEQUEUE_CALLBACK_EMPTY:
855 /* this shouldn't happen; lkb should have been removed from
856 * list when last item was dequeued
858 log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
859 list_del_init(&lkb->lkb_cb_list);
860 spin_unlock(&proc->asts_spin);
861 /* removes ref for proc->asts, may cause lkb to be freed */
865 case DLM_DEQUEUE_CALLBACK_LAST:
866 list_del_init(&lkb->lkb_cb_list);
867 clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
869 case DLM_DEQUEUE_CALLBACK_SUCCESS:
875 spin_unlock(&proc->asts_spin);
877 if (cb->flags & DLM_CB_BAST) {
878 trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode);
879 } else if (cb->flags & DLM_CB_CAST) {
882 if (!cb->sb_status && lkb->lkb_lksb->sb_lvbptr &&
883 dlm_lvb_operations[old_mode + 1][new_mode + 1])
886 lkb->lkb_lksb->sb_status = cb->sb_status;
887 lkb->lkb_lksb->sb_flags = cb->sb_flags;
888 trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
891 rv = copy_result_to_user(lkb->lkb_ua,
892 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
893 cb->flags, cb->mode, copy_lvb, buf, count);
895 kref_put(&cb->ref, dlm_release_callback);
897 /* removes ref for proc->asts, may cause lkb to be freed */
898 if (rv == DLM_DEQUEUE_CALLBACK_LAST)
904 static __poll_t device_poll(struct file *file, poll_table *wait)
906 struct dlm_user_proc *proc = file->private_data;
908 poll_wait(file, &proc->wait, wait);
910 spin_lock(&proc->asts_spin);
911 if (!list_empty(&proc->asts)) {
912 spin_unlock(&proc->asts_spin);
913 return EPOLLIN | EPOLLRDNORM;
915 spin_unlock(&proc->asts_spin);
919 int dlm_user_daemon_available(void)
921 /* dlm_controld hasn't started (or, has started, but not
922 properly populated configfs) */
924 if (!dlm_our_nodeid())
927 /* This is to deal with versions of dlm_controld that don't
928 know about the monitor device. We assume that if the
929 dlm_controld was started (above), but the monitor device
930 was never opened, that it's an old version. dlm_controld
931 should open the monitor device before populating configfs. */
933 if (dlm_monitor_unused)
936 return atomic_read(&dlm_monitor_opened) ? 1 : 0;
939 static int ctl_device_open(struct inode *inode, struct file *file)
941 file->private_data = NULL;
945 static int ctl_device_close(struct inode *inode, struct file *file)
950 static int monitor_device_open(struct inode *inode, struct file *file)
952 atomic_inc(&dlm_monitor_opened);
953 dlm_monitor_unused = 0;
957 static int monitor_device_close(struct inode *inode, struct file *file)
959 if (atomic_dec_and_test(&dlm_monitor_opened))
960 dlm_stop_lockspaces();
964 static const struct file_operations device_fops = {
966 .release = device_close,
968 .write = device_write,
970 .owner = THIS_MODULE,
971 .llseek = noop_llseek,
974 static const struct file_operations ctl_device_fops = {
975 .open = ctl_device_open,
976 .release = ctl_device_close,
978 .write = device_write,
979 .owner = THIS_MODULE,
980 .llseek = noop_llseek,
983 static struct miscdevice ctl_device = {
984 .name = "dlm-control",
985 .fops = &ctl_device_fops,
986 .minor = MISC_DYNAMIC_MINOR,
989 static const struct file_operations monitor_device_fops = {
990 .open = monitor_device_open,
991 .release = monitor_device_close,
992 .owner = THIS_MODULE,
993 .llseek = noop_llseek,
996 static struct miscdevice monitor_device = {
997 .name = "dlm-monitor",
998 .fops = &monitor_device_fops,
999 .minor = MISC_DYNAMIC_MINOR,
1002 int __init dlm_user_init(void)
1006 atomic_set(&dlm_monitor_opened, 0);
1008 error = misc_register(&ctl_device);
1010 log_print("misc_register failed for control device");
1014 error = misc_register(&monitor_device);
1016 log_print("misc_register failed for monitor device");
1017 misc_deregister(&ctl_device);
1023 void dlm_user_exit(void)
1025 misc_deregister(&ctl_device);
1026 misc_deregister(&monitor_device);