1 // SPDX-License-Identifier: GPL-2.0-only
5 * We implement four types of file locks: BSD locks, posix locks, open
6 * file description locks, and leases. For details about BSD locks,
7 * see the flock(2) man page; for details about the other three, see
11 * Locking conflicts and dependencies:
12 * If multiple threads attempt to lock the same byte (or flock the same file)
13 * only one can be granted the lock, and other must wait their turn.
14 * The first lock has been "applied" or "granted", the others are "waiting"
15 * and are "blocked" by the "applied" lock..
17 * Waiting and applied locks are all kept in trees whose properties are:
19 * - the root of a tree may be an applied or waiting lock.
20 * - every other node in the tree is a waiting lock that
21 * conflicts with every ancestor of that node.
23 * Every such tree begins life as a waiting singleton which obviously
24 * satisfies the above properties.
26 * The only ways we modify trees preserve these properties:
28 * 1. We may add a new leaf node, but only after first verifying that it
29 * conflicts with all of its ancestors.
30 * 2. We may remove the root of a tree, creating a new singleton
31 * tree from the root and N new trees rooted in the immediate
33 * 3. If the root of a tree is not currently an applied lock, we may
34 * apply it (if possible).
35 * 4. We may upgrade the root of the tree (either extend its range,
36 * or upgrade its entire range from read to write).
38 * When an applied lock is modified in a way that reduces or downgrades any
39 * part of its range, we remove all its children (2 above). This particularly
40 * happens when a lock is unlocked.
42 * For each of those child trees we "wake up" the thread which is
43 * waiting for the lock so it can continue handling as follows: if the
44 * root of the tree applies, we do so (3). If it doesn't, it must
45 * conflict with some applied lock. We remove (wake up) all of its children
46 * (2), and add it is a new leaf to the tree rooted in the applied
47 * lock (1). We then repeat the process recursively with those
52 #include <linux/capability.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
55 #include <linux/filelock.h>
57 #include <linux/init.h>
58 #include <linux/security.h>
59 #include <linux/slab.h>
60 #include <linux/syscalls.h>
61 #include <linux/time.h>
62 #include <linux/rcupdate.h>
63 #include <linux/pid_namespace.h>
64 #include <linux/hashtable.h>
65 #include <linux/percpu.h>
66 #include <linux/sysctl.h>
68 #define CREATE_TRACE_POINTS
69 #include <trace/events/filelock.h>
71 #include <linux/uaccess.h>
73 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
74 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
75 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
76 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
77 #define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
79 static bool lease_breaking(struct file_lock *fl)
81 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
84 static int target_leasetype(struct file_lock *fl)
86 if (fl->fl_flags & FL_UNLOCK_PENDING)
88 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
93 static int leases_enable = 1;
94 static int lease_break_time = 45;
97 static struct ctl_table locks_sysctls[] = {
99 .procname = "leases-enable",
100 .data = &leases_enable,
101 .maxlen = sizeof(int),
103 .proc_handler = proc_dointvec,
107 .procname = "lease-break-time",
108 .data = &lease_break_time,
109 .maxlen = sizeof(int),
111 .proc_handler = proc_dointvec,
113 #endif /* CONFIG_MMU */
117 static int __init init_fs_locks_sysctls(void)
119 register_sysctl_init("fs", locks_sysctls);
122 early_initcall(init_fs_locks_sysctls);
123 #endif /* CONFIG_SYSCTL */
126 * The global file_lock_list is only used for displaying /proc/locks, so we
127 * keep a list on each CPU, with each list protected by its own spinlock.
128 * Global serialization is done using file_rwsem.
130 * Note that alterations to the list also require that the relevant flc_lock is
133 struct file_lock_list_struct {
135 struct hlist_head hlist;
137 static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
138 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
142 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
143 * It is protected by blocked_lock_lock.
145 * We hash locks by lockowner in order to optimize searching for the lock a
146 * particular lockowner is waiting on.
148 * FIXME: make this value scale via some heuristic? We generally will want more
149 * buckets when we have more lockowners holding locks, but that's a little
150 * difficult to determine without knowing what the workload will look like.
152 #define BLOCKED_HASH_BITS 7
153 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
156 * This lock protects the blocked_hash. Generally, if you're accessing it, you
157 * want to be holding this lock.
159 * In addition, it also protects the fl->fl_blocked_requests list, and the
160 * fl->fl_blocker pointer for file_lock structures that are acting as lock
161 * requests (in contrast to those that are acting as records of acquired locks).
163 * Note that when we acquire this lock in order to change the above fields,
164 * we often hold the flc_lock as well. In certain cases, when reading the fields
165 * protected by this lock, we can skip acquiring it iff we already hold the
168 static DEFINE_SPINLOCK(blocked_lock_lock);
170 static struct kmem_cache *flctx_cache __read_mostly;
171 static struct kmem_cache *filelock_cache __read_mostly;
173 static struct file_lock_context *
174 locks_get_lock_context(struct inode *inode, int type)
176 struct file_lock_context *ctx;
178 /* paired with cmpxchg() below */
179 ctx = locks_inode_context(inode);
180 if (likely(ctx) || type == F_UNLCK)
183 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
187 spin_lock_init(&ctx->flc_lock);
188 INIT_LIST_HEAD(&ctx->flc_flock);
189 INIT_LIST_HEAD(&ctx->flc_posix);
190 INIT_LIST_HEAD(&ctx->flc_lease);
193 * Assign the pointer if it's not already assigned. If it is, then
194 * free the context we just allocated.
196 if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
197 kmem_cache_free(flctx_cache, ctx);
198 ctx = locks_inode_context(inode);
201 trace_locks_get_lock_context(inode, type, ctx);
206 locks_dump_ctx_list(struct list_head *list, char *list_type)
208 struct file_lock *fl;
210 list_for_each_entry(fl, list, fl_list) {
211 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
216 locks_check_ctx_lists(struct inode *inode)
218 struct file_lock_context *ctx = inode->i_flctx;
220 if (unlikely(!list_empty(&ctx->flc_flock) ||
221 !list_empty(&ctx->flc_posix) ||
222 !list_empty(&ctx->flc_lease))) {
223 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
224 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
226 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
227 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
228 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
233 locks_check_ctx_file_list(struct file *filp, struct list_head *list,
236 struct file_lock *fl;
237 struct inode *inode = file_inode(filp);
239 list_for_each_entry(fl, list, fl_list)
240 if (fl->fl_file == filp)
241 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
242 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
243 list_type, MAJOR(inode->i_sb->s_dev),
244 MINOR(inode->i_sb->s_dev), inode->i_ino,
245 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
249 locks_free_lock_context(struct inode *inode)
251 struct file_lock_context *ctx = locks_inode_context(inode);
254 locks_check_ctx_lists(inode);
255 kmem_cache_free(flctx_cache, ctx);
259 static void locks_init_lock_heads(struct file_lock *fl)
261 INIT_HLIST_NODE(&fl->fl_link);
262 INIT_LIST_HEAD(&fl->fl_list);
263 INIT_LIST_HEAD(&fl->fl_blocked_requests);
264 INIT_LIST_HEAD(&fl->fl_blocked_member);
265 init_waitqueue_head(&fl->fl_wait);
268 /* Allocate an empty lock structure. */
269 struct file_lock *locks_alloc_lock(void)
271 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
274 locks_init_lock_heads(fl);
278 EXPORT_SYMBOL_GPL(locks_alloc_lock);
280 void locks_release_private(struct file_lock *fl)
282 BUG_ON(waitqueue_active(&fl->fl_wait));
283 BUG_ON(!list_empty(&fl->fl_list));
284 BUG_ON(!list_empty(&fl->fl_blocked_requests));
285 BUG_ON(!list_empty(&fl->fl_blocked_member));
286 BUG_ON(!hlist_unhashed(&fl->fl_link));
289 if (fl->fl_ops->fl_release_private)
290 fl->fl_ops->fl_release_private(fl);
295 if (fl->fl_lmops->lm_put_owner) {
296 fl->fl_lmops->lm_put_owner(fl->fl_owner);
302 EXPORT_SYMBOL_GPL(locks_release_private);
305 * locks_owner_has_blockers - Check for blocking lock requests
306 * @flctx: file lock context
310 * %true: @owner has at least one blocker
311 * %false: @owner has no blockers
313 bool locks_owner_has_blockers(struct file_lock_context *flctx,
316 struct file_lock *fl;
318 spin_lock(&flctx->flc_lock);
319 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
320 if (fl->fl_owner != owner)
322 if (!list_empty(&fl->fl_blocked_requests)) {
323 spin_unlock(&flctx->flc_lock);
327 spin_unlock(&flctx->flc_lock);
330 EXPORT_SYMBOL_GPL(locks_owner_has_blockers);
332 /* Free a lock which is not in use. */
333 void locks_free_lock(struct file_lock *fl)
335 locks_release_private(fl);
336 kmem_cache_free(filelock_cache, fl);
338 EXPORT_SYMBOL(locks_free_lock);
341 locks_dispose_list(struct list_head *dispose)
343 struct file_lock *fl;
345 while (!list_empty(dispose)) {
346 fl = list_first_entry(dispose, struct file_lock, fl_list);
347 list_del_init(&fl->fl_list);
352 void locks_init_lock(struct file_lock *fl)
354 memset(fl, 0, sizeof(struct file_lock));
355 locks_init_lock_heads(fl);
357 EXPORT_SYMBOL(locks_init_lock);
360 * Initialize a new lock from an existing file_lock structure.
362 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
364 new->fl_owner = fl->fl_owner;
365 new->fl_pid = fl->fl_pid;
367 new->fl_flags = fl->fl_flags;
368 new->fl_type = fl->fl_type;
369 new->fl_start = fl->fl_start;
370 new->fl_end = fl->fl_end;
371 new->fl_lmops = fl->fl_lmops;
375 if (fl->fl_lmops->lm_get_owner)
376 fl->fl_lmops->lm_get_owner(fl->fl_owner);
379 EXPORT_SYMBOL(locks_copy_conflock);
381 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
383 /* "new" must be a freshly-initialized lock */
384 WARN_ON_ONCE(new->fl_ops);
386 locks_copy_conflock(new, fl);
388 new->fl_file = fl->fl_file;
389 new->fl_ops = fl->fl_ops;
392 if (fl->fl_ops->fl_copy_lock)
393 fl->fl_ops->fl_copy_lock(new, fl);
396 EXPORT_SYMBOL(locks_copy_lock);
398 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
403 * As ctx->flc_lock is held, new requests cannot be added to
404 * ->fl_blocked_requests, so we don't need a lock to check if it
407 if (list_empty(&fl->fl_blocked_requests))
409 spin_lock(&blocked_lock_lock);
410 list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
411 list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
413 spin_unlock(&blocked_lock_lock);
416 static inline int flock_translate_cmd(int cmd) {
428 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
429 static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
435 fl->fl_pid = current->tgid;
436 fl->fl_flags = FL_FLOCK;
438 fl->fl_end = OFFSET_MAX;
441 static int assign_type(struct file_lock *fl, long type)
455 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
458 switch (l->l_whence) {
463 fl->fl_start = filp->f_pos;
466 fl->fl_start = i_size_read(file_inode(filp));
471 if (l->l_start > OFFSET_MAX - fl->fl_start)
473 fl->fl_start += l->l_start;
474 if (fl->fl_start < 0)
477 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
478 POSIX-2001 defines it. */
480 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
482 fl->fl_end = fl->fl_start + (l->l_len - 1);
484 } else if (l->l_len < 0) {
485 if (fl->fl_start + l->l_len < 0)
487 fl->fl_end = fl->fl_start - 1;
488 fl->fl_start += l->l_len;
490 fl->fl_end = OFFSET_MAX;
492 fl->fl_owner = current->files;
493 fl->fl_pid = current->tgid;
495 fl->fl_flags = FL_POSIX;
499 return assign_type(fl, l->l_type);
502 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
505 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
508 struct flock64 ll = {
510 .l_whence = l->l_whence,
511 .l_start = l->l_start,
515 return flock64_to_posix_lock(filp, fl, &ll);
518 /* default lease lock manager operations */
520 lease_break_callback(struct file_lock *fl)
522 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
527 lease_setup(struct file_lock *fl, void **priv)
529 struct file *filp = fl->fl_file;
530 struct fasync_struct *fa = *priv;
533 * fasync_insert_entry() returns the old entry if any. If there was no
534 * old entry, then it used "priv" and inserted it into the fasync list.
535 * Clear the pointer to indicate that it shouldn't be freed.
537 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
540 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
543 static const struct lock_manager_operations lease_manager_ops = {
544 .lm_break = lease_break_callback,
545 .lm_change = lease_modify,
546 .lm_setup = lease_setup,
550 * Initialize a lease, use the default lock manager operations
552 static int lease_init(struct file *filp, long type, struct file_lock *fl)
554 if (assign_type(fl, type) != 0)
558 fl->fl_pid = current->tgid;
561 fl->fl_flags = FL_LEASE;
563 fl->fl_end = OFFSET_MAX;
565 fl->fl_lmops = &lease_manager_ops;
569 /* Allocate a file_lock initialised to this type of lease */
570 static struct file_lock *lease_alloc(struct file *filp, long type)
572 struct file_lock *fl = locks_alloc_lock();
576 return ERR_PTR(error);
578 error = lease_init(filp, type, fl);
581 return ERR_PTR(error);
586 /* Check if two locks overlap each other.
588 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
590 return ((fl1->fl_end >= fl2->fl_start) &&
591 (fl2->fl_end >= fl1->fl_start));
595 * Check whether two locks have the same owner.
597 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
599 return fl1->fl_owner == fl2->fl_owner;
602 /* Must be called with the flc_lock held! */
603 static void locks_insert_global_locks(struct file_lock *fl)
605 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
607 percpu_rwsem_assert_held(&file_rwsem);
609 spin_lock(&fll->lock);
610 fl->fl_link_cpu = smp_processor_id();
611 hlist_add_head(&fl->fl_link, &fll->hlist);
612 spin_unlock(&fll->lock);
615 /* Must be called with the flc_lock held! */
616 static void locks_delete_global_locks(struct file_lock *fl)
618 struct file_lock_list_struct *fll;
620 percpu_rwsem_assert_held(&file_rwsem);
623 * Avoid taking lock if already unhashed. This is safe since this check
624 * is done while holding the flc_lock, and new insertions into the list
625 * also require that it be held.
627 if (hlist_unhashed(&fl->fl_link))
630 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
631 spin_lock(&fll->lock);
632 hlist_del_init(&fl->fl_link);
633 spin_unlock(&fll->lock);
637 posix_owner_key(struct file_lock *fl)
639 return (unsigned long)fl->fl_owner;
642 static void locks_insert_global_blocked(struct file_lock *waiter)
644 lockdep_assert_held(&blocked_lock_lock);
646 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
649 static void locks_delete_global_blocked(struct file_lock *waiter)
651 lockdep_assert_held(&blocked_lock_lock);
653 hash_del(&waiter->fl_link);
656 /* Remove waiter from blocker's block list.
657 * When blocker ends up pointing to itself then the list is empty.
659 * Must be called with blocked_lock_lock held.
661 static void __locks_delete_block(struct file_lock *waiter)
663 locks_delete_global_blocked(waiter);
664 list_del_init(&waiter->fl_blocked_member);
667 static void __locks_wake_up_blocks(struct file_lock *blocker)
669 while (!list_empty(&blocker->fl_blocked_requests)) {
670 struct file_lock *waiter;
672 waiter = list_first_entry(&blocker->fl_blocked_requests,
673 struct file_lock, fl_blocked_member);
674 __locks_delete_block(waiter);
675 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
676 waiter->fl_lmops->lm_notify(waiter);
678 wake_up(&waiter->fl_wait);
681 * The setting of fl_blocker to NULL marks the "done"
682 * point in deleting a block. Paired with acquire at the top
683 * of locks_delete_block().
685 smp_store_release(&waiter->fl_blocker, NULL);
690 * locks_delete_block - stop waiting for a file lock
691 * @waiter: the lock which was waiting
693 * lockd/nfsd need to disconnect the lock while working on it.
695 int locks_delete_block(struct file_lock *waiter)
697 int status = -ENOENT;
700 * If fl_blocker is NULL, it won't be set again as this thread "owns"
701 * the lock and is the only one that might try to claim the lock.
703 * We use acquire/release to manage fl_blocker so that we can
704 * optimize away taking the blocked_lock_lock in many cases.
706 * The smp_load_acquire guarantees two things:
708 * 1/ that fl_blocked_requests can be tested locklessly. If something
709 * was recently added to that list it must have been in a locked region
710 * *before* the locked region when fl_blocker was set to NULL.
712 * 2/ that no other thread is accessing 'waiter', so it is safe to free
713 * it. __locks_wake_up_blocks is careful not to touch waiter after
714 * fl_blocker is released.
716 * If a lockless check of fl_blocker shows it to be NULL, we know that
717 * no new locks can be inserted into its fl_blocked_requests list, and
718 * can avoid doing anything further if the list is empty.
720 if (!smp_load_acquire(&waiter->fl_blocker) &&
721 list_empty(&waiter->fl_blocked_requests))
724 spin_lock(&blocked_lock_lock);
725 if (waiter->fl_blocker)
727 __locks_wake_up_blocks(waiter);
728 __locks_delete_block(waiter);
731 * The setting of fl_blocker to NULL marks the "done" point in deleting
732 * a block. Paired with acquire at the top of this function.
734 smp_store_release(&waiter->fl_blocker, NULL);
735 spin_unlock(&blocked_lock_lock);
738 EXPORT_SYMBOL(locks_delete_block);
740 /* Insert waiter into blocker's block list.
741 * We use a circular list so that processes can be easily woken up in
742 * the order they blocked. The documentation doesn't require this but
743 * it seems like the reasonable thing to do.
745 * Must be called with both the flc_lock and blocked_lock_lock held. The
746 * fl_blocked_requests list itself is protected by the blocked_lock_lock,
747 * but by ensuring that the flc_lock is also held on insertions we can avoid
748 * taking the blocked_lock_lock in some cases when we see that the
749 * fl_blocked_requests list is empty.
751 * Rather than just adding to the list, we check for conflicts with any existing
752 * waiters, and add beneath any waiter that blocks the new waiter.
753 * Thus wakeups don't happen until needed.
755 static void __locks_insert_block(struct file_lock *blocker,
756 struct file_lock *waiter,
757 bool conflict(struct file_lock *,
760 struct file_lock *fl;
761 BUG_ON(!list_empty(&waiter->fl_blocked_member));
764 list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
765 if (conflict(fl, waiter)) {
769 waiter->fl_blocker = blocker;
770 list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
771 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
772 locks_insert_global_blocked(waiter);
774 /* The requests in waiter->fl_blocked are known to conflict with
775 * waiter, but might not conflict with blocker, or the requests
776 * and lock which block it. So they all need to be woken.
778 __locks_wake_up_blocks(waiter);
781 /* Must be called with flc_lock held. */
782 static void locks_insert_block(struct file_lock *blocker,
783 struct file_lock *waiter,
784 bool conflict(struct file_lock *,
787 spin_lock(&blocked_lock_lock);
788 __locks_insert_block(blocker, waiter, conflict);
789 spin_unlock(&blocked_lock_lock);
793 * Wake up processes blocked waiting for blocker.
795 * Must be called with the inode->flc_lock held!
797 static void locks_wake_up_blocks(struct file_lock *blocker)
800 * Avoid taking global lock if list is empty. This is safe since new
801 * blocked requests are only added to the list under the flc_lock, and
802 * the flc_lock is always held here. Note that removal from the
803 * fl_blocked_requests list does not require the flc_lock, so we must
804 * recheck list_empty() after acquiring the blocked_lock_lock.
806 if (list_empty(&blocker->fl_blocked_requests))
809 spin_lock(&blocked_lock_lock);
810 __locks_wake_up_blocks(blocker);
811 spin_unlock(&blocked_lock_lock);
815 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
817 list_add_tail(&fl->fl_list, before);
818 locks_insert_global_locks(fl);
822 locks_unlink_lock_ctx(struct file_lock *fl)
824 locks_delete_global_locks(fl);
825 list_del_init(&fl->fl_list);
826 locks_wake_up_blocks(fl);
830 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
832 locks_unlink_lock_ctx(fl);
834 list_add(&fl->fl_list, dispose);
839 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
840 * checks for shared/exclusive status of overlapping locks.
842 static bool locks_conflict(struct file_lock *caller_fl,
843 struct file_lock *sys_fl)
845 if (sys_fl->fl_type == F_WRLCK)
847 if (caller_fl->fl_type == F_WRLCK)
852 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
853 * checking before calling the locks_conflict().
855 static bool posix_locks_conflict(struct file_lock *caller_fl,
856 struct file_lock *sys_fl)
858 /* POSIX locks owned by the same process do not conflict with
861 if (posix_same_owner(caller_fl, sys_fl))
864 /* Check whether they overlap */
865 if (!locks_overlap(caller_fl, sys_fl))
868 return locks_conflict(caller_fl, sys_fl);
871 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
872 * checking before calling the locks_conflict().
874 static bool flock_locks_conflict(struct file_lock *caller_fl,
875 struct file_lock *sys_fl)
877 /* FLOCK locks referring to the same filp do not conflict with
880 if (caller_fl->fl_file == sys_fl->fl_file)
883 return locks_conflict(caller_fl, sys_fl);
887 posix_test_lock(struct file *filp, struct file_lock *fl)
889 struct file_lock *cfl;
890 struct file_lock_context *ctx;
891 struct inode *inode = file_inode(filp);
895 ctx = locks_inode_context(inode);
896 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
897 fl->fl_type = F_UNLCK;
902 spin_lock(&ctx->flc_lock);
903 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
904 if (!posix_locks_conflict(fl, cfl))
906 if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
907 && (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
908 owner = cfl->fl_lmops->lm_mod_owner;
909 func = cfl->fl_lmops->lm_expire_lock;
911 spin_unlock(&ctx->flc_lock);
916 locks_copy_conflock(fl, cfl);
919 fl->fl_type = F_UNLCK;
921 spin_unlock(&ctx->flc_lock);
924 EXPORT_SYMBOL(posix_test_lock);
927 * Deadlock detection:
929 * We attempt to detect deadlocks that are due purely to posix file
932 * We assume that a task can be waiting for at most one lock at a time.
933 * So for any acquired lock, the process holding that lock may be
934 * waiting on at most one other lock. That lock in turns may be held by
935 * someone waiting for at most one other lock. Given a requested lock
936 * caller_fl which is about to wait for a conflicting lock block_fl, we
937 * follow this chain of waiters to ensure we are not about to create a
940 * Since we do this before we ever put a process to sleep on a lock, we
941 * are ensured that there is never a cycle; that is what guarantees that
942 * the while() loop in posix_locks_deadlock() eventually completes.
944 * Note: the above assumption may not be true when handling lock
945 * requests from a broken NFS client. It may also fail in the presence
946 * of tasks (such as posix threads) sharing the same open file table.
947 * To handle those cases, we just bail out after a few iterations.
949 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
950 * Because the owner is not even nominally tied to a thread of
951 * execution, the deadlock detection below can't reasonably work well. Just
954 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
955 * locks that just checks for the case where two tasks are attempting to
956 * upgrade from read to write locks on the same inode.
959 #define MAX_DEADLK_ITERATIONS 10
961 /* Find a lock that the owner of the given block_fl is blocking on. */
962 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
964 struct file_lock *fl;
966 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
967 if (posix_same_owner(fl, block_fl)) {
968 while (fl->fl_blocker)
976 /* Must be called with the blocked_lock_lock held! */
977 static int posix_locks_deadlock(struct file_lock *caller_fl,
978 struct file_lock *block_fl)
982 lockdep_assert_held(&blocked_lock_lock);
985 * This deadlock detector can't reasonably detect deadlocks with
986 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
988 if (IS_OFDLCK(caller_fl))
991 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
992 if (i++ > MAX_DEADLK_ITERATIONS)
994 if (posix_same_owner(caller_fl, block_fl))
1000 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1001 * after any leases, but before any posix locks.
1003 * Note that if called with an FL_EXISTS argument, the caller may determine
1004 * whether or not a lock was successfully freed by testing the return
1005 * value for -ENOENT.
1007 static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1009 struct file_lock *new_fl = NULL;
1010 struct file_lock *fl;
1011 struct file_lock_context *ctx;
1016 ctx = locks_get_lock_context(inode, request->fl_type);
1018 if (request->fl_type != F_UNLCK)
1020 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
1023 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
1024 new_fl = locks_alloc_lock();
1029 percpu_down_read(&file_rwsem);
1030 spin_lock(&ctx->flc_lock);
1031 if (request->fl_flags & FL_ACCESS)
1034 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1035 if (request->fl_file != fl->fl_file)
1037 if (request->fl_type == fl->fl_type)
1040 locks_delete_lock_ctx(fl, &dispose);
1044 if (request->fl_type == F_UNLCK) {
1045 if ((request->fl_flags & FL_EXISTS) && !found)
1051 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1052 if (!flock_locks_conflict(request, fl))
1055 if (!(request->fl_flags & FL_SLEEP))
1057 error = FILE_LOCK_DEFERRED;
1058 locks_insert_block(fl, request, flock_locks_conflict);
1061 if (request->fl_flags & FL_ACCESS)
1063 locks_copy_lock(new_fl, request);
1064 locks_move_blocks(new_fl, request);
1065 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1070 spin_unlock(&ctx->flc_lock);
1071 percpu_up_read(&file_rwsem);
1073 locks_free_lock(new_fl);
1074 locks_dispose_list(&dispose);
1075 trace_flock_lock_inode(inode, request, error);
1079 static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1080 struct file_lock *conflock)
1082 struct file_lock *fl, *tmp;
1083 struct file_lock *new_fl = NULL;
1084 struct file_lock *new_fl2 = NULL;
1085 struct file_lock *left = NULL;
1086 struct file_lock *right = NULL;
1087 struct file_lock_context *ctx;
1094 ctx = locks_get_lock_context(inode, request->fl_type);
1096 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1099 * We may need two file_lock structures for this operation,
1100 * so we get them in advance to avoid races.
1102 * In some cases we can be sure, that no new locks will be needed
1104 if (!(request->fl_flags & FL_ACCESS) &&
1105 (request->fl_type != F_UNLCK ||
1106 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1107 new_fl = locks_alloc_lock();
1108 new_fl2 = locks_alloc_lock();
1112 percpu_down_read(&file_rwsem);
1113 spin_lock(&ctx->flc_lock);
1115 * New lock request. Walk all POSIX locks and look for conflicts. If
1116 * there are any, either return error or put the request on the
1117 * blocker's list of waiters and the global blocked_hash.
1119 if (request->fl_type != F_UNLCK) {
1120 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1121 if (!posix_locks_conflict(request, fl))
1123 if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
1124 && (*fl->fl_lmops->lm_lock_expirable)(fl)) {
1125 owner = fl->fl_lmops->lm_mod_owner;
1126 func = fl->fl_lmops->lm_expire_lock;
1127 __module_get(owner);
1128 spin_unlock(&ctx->flc_lock);
1129 percpu_up_read(&file_rwsem);
1135 locks_copy_conflock(conflock, fl);
1137 if (!(request->fl_flags & FL_SLEEP))
1140 * Deadlock detection and insertion into the blocked
1141 * locks list must be done while holding the same lock!
1144 spin_lock(&blocked_lock_lock);
1146 * Ensure that we don't find any locks blocked on this
1147 * request during deadlock detection.
1149 __locks_wake_up_blocks(request);
1150 if (likely(!posix_locks_deadlock(request, fl))) {
1151 error = FILE_LOCK_DEFERRED;
1152 __locks_insert_block(fl, request,
1153 posix_locks_conflict);
1155 spin_unlock(&blocked_lock_lock);
1160 /* If we're just looking for a conflict, we're done. */
1162 if (request->fl_flags & FL_ACCESS)
1165 /* Find the first old lock with the same owner as the new lock */
1166 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1167 if (posix_same_owner(request, fl))
1171 /* Process locks with this owner. */
1172 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1173 if (!posix_same_owner(request, fl))
1176 /* Detect adjacent or overlapping regions (if same lock type) */
1177 if (request->fl_type == fl->fl_type) {
1178 /* In all comparisons of start vs end, use
1179 * "start - 1" rather than "end + 1". If end
1180 * is OFFSET_MAX, end + 1 will become negative.
1182 if (fl->fl_end < request->fl_start - 1)
1184 /* If the next lock in the list has entirely bigger
1185 * addresses than the new one, insert the lock here.
1187 if (fl->fl_start - 1 > request->fl_end)
1190 /* If we come here, the new and old lock are of the
1191 * same type and adjacent or overlapping. Make one
1192 * lock yielding from the lower start address of both
1193 * locks to the higher end address.
1195 if (fl->fl_start > request->fl_start)
1196 fl->fl_start = request->fl_start;
1198 request->fl_start = fl->fl_start;
1199 if (fl->fl_end < request->fl_end)
1200 fl->fl_end = request->fl_end;
1202 request->fl_end = fl->fl_end;
1204 locks_delete_lock_ctx(fl, &dispose);
1210 /* Processing for different lock types is a bit
1213 if (fl->fl_end < request->fl_start)
1215 if (fl->fl_start > request->fl_end)
1217 if (request->fl_type == F_UNLCK)
1219 if (fl->fl_start < request->fl_start)
1221 /* If the next lock in the list has a higher end
1222 * address than the new one, insert the new one here.
1224 if (fl->fl_end > request->fl_end) {
1228 if (fl->fl_start >= request->fl_start) {
1229 /* The new lock completely replaces an old
1230 * one (This may happen several times).
1233 locks_delete_lock_ctx(fl, &dispose);
1237 * Replace the old lock with new_fl, and
1238 * remove the old one. It's safe to do the
1239 * insert here since we know that we won't be
1240 * using new_fl later, and that the lock is
1241 * just replacing an existing lock.
1246 locks_copy_lock(new_fl, request);
1247 locks_move_blocks(new_fl, request);
1250 locks_insert_lock_ctx(request, &fl->fl_list);
1251 locks_delete_lock_ctx(fl, &dispose);
1258 * The above code only modifies existing locks in case of merging or
1259 * replacing. If new lock(s) need to be inserted all modifications are
1260 * done below this, so it's safe yet to bail out.
1262 error = -ENOLCK; /* "no luck" */
1263 if (right && left == right && !new_fl2)
1268 if (request->fl_type == F_UNLCK) {
1269 if (request->fl_flags & FL_EXISTS)
1278 locks_copy_lock(new_fl, request);
1279 locks_move_blocks(new_fl, request);
1280 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1285 if (left == right) {
1286 /* The new lock breaks the old one in two pieces,
1287 * so we have to use the second new lock.
1291 locks_copy_lock(left, right);
1292 locks_insert_lock_ctx(left, &fl->fl_list);
1294 right->fl_start = request->fl_end + 1;
1295 locks_wake_up_blocks(right);
1298 left->fl_end = request->fl_start - 1;
1299 locks_wake_up_blocks(left);
1302 spin_unlock(&ctx->flc_lock);
1303 percpu_up_read(&file_rwsem);
1305 * Free any unused locks.
1308 locks_free_lock(new_fl);
1310 locks_free_lock(new_fl2);
1311 locks_dispose_list(&dispose);
1312 trace_posix_lock_inode(inode, request, error);
1318 * posix_lock_file - Apply a POSIX-style lock to a file
1319 * @filp: The file to apply the lock to
1320 * @fl: The lock to be applied
1321 * @conflock: Place to return a copy of the conflicting lock, if found.
1323 * Add a POSIX style lock to a file.
1324 * We merge adjacent & overlapping locks whenever possible.
1325 * POSIX locks are sorted by owner task, then by starting address
1327 * Note that if called with an FL_EXISTS argument, the caller may determine
1328 * whether or not a lock was successfully freed by testing the return
1329 * value for -ENOENT.
1331 int posix_lock_file(struct file *filp, struct file_lock *fl,
1332 struct file_lock *conflock)
1334 return posix_lock_inode(file_inode(filp), fl, conflock);
1336 EXPORT_SYMBOL(posix_lock_file);
1339 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1340 * @inode: inode of file to which lock request should be applied
1341 * @fl: The lock to be applied
1343 * Apply a POSIX style lock request to an inode.
1345 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1350 error = posix_lock_inode(inode, fl, NULL);
1351 if (error != FILE_LOCK_DEFERRED)
1353 error = wait_event_interruptible(fl->fl_wait,
1354 list_empty(&fl->fl_blocked_member));
1358 locks_delete_block(fl);
1362 static void lease_clear_pending(struct file_lock *fl, int arg)
1366 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1369 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1373 /* We already had a lease on this file; just change its type */
1374 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1376 int error = assign_type(fl, arg);
1380 lease_clear_pending(fl, arg);
1381 locks_wake_up_blocks(fl);
1382 if (arg == F_UNLCK) {
1383 struct file *filp = fl->fl_file;
1386 filp->f_owner.signum = 0;
1387 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1388 if (fl->fl_fasync != NULL) {
1389 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1390 fl->fl_fasync = NULL;
1392 locks_delete_lock_ctx(fl, dispose);
1396 EXPORT_SYMBOL(lease_modify);
1398 static bool past_time(unsigned long then)
1401 /* 0 is a special value meaning "this never expires": */
1403 return time_after(jiffies, then);
1406 static void time_out_leases(struct inode *inode, struct list_head *dispose)
1408 struct file_lock_context *ctx = inode->i_flctx;
1409 struct file_lock *fl, *tmp;
1411 lockdep_assert_held(&ctx->flc_lock);
1413 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1414 trace_time_out_leases(inode, fl);
1415 if (past_time(fl->fl_downgrade_time))
1416 lease_modify(fl, F_RDLCK, dispose);
1417 if (past_time(fl->fl_break_time))
1418 lease_modify(fl, F_UNLCK, dispose);
1422 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1426 if (lease->fl_lmops->lm_breaker_owns_lease
1427 && lease->fl_lmops->lm_breaker_owns_lease(lease))
1429 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
1433 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
1438 rc = locks_conflict(breaker, lease);
1440 trace_leases_conflict(rc, lease, breaker);
1445 any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1447 struct file_lock_context *ctx = inode->i_flctx;
1448 struct file_lock *fl;
1450 lockdep_assert_held(&ctx->flc_lock);
1452 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1453 if (leases_conflict(fl, breaker))
1460 * __break_lease - revoke all outstanding leases on file
1461 * @inode: the inode of the file to return
1462 * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1464 * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1467 * break_lease (inlined for speed) has checked there already is at least
1468 * some kind of lock (maybe a lease) on this file. Leases are broken on
1469 * a call to open() or truncate(). This function can sleep unless you
1470 * specified %O_NONBLOCK to your open().
1472 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1475 struct file_lock_context *ctx;
1476 struct file_lock *new_fl, *fl, *tmp;
1477 unsigned long break_time;
1478 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1481 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1483 return PTR_ERR(new_fl);
1484 new_fl->fl_flags = type;
1486 /* typically we will check that ctx is non-NULL before calling */
1487 ctx = locks_inode_context(inode);
1493 percpu_down_read(&file_rwsem);
1494 spin_lock(&ctx->flc_lock);
1496 time_out_leases(inode, &dispose);
1498 if (!any_leases_conflict(inode, new_fl))
1502 if (lease_break_time > 0) {
1503 break_time = jiffies + lease_break_time * HZ;
1504 if (break_time == 0)
1505 break_time++; /* so that 0 means no break time */
1508 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1509 if (!leases_conflict(fl, new_fl))
1512 if (fl->fl_flags & FL_UNLOCK_PENDING)
1514 fl->fl_flags |= FL_UNLOCK_PENDING;
1515 fl->fl_break_time = break_time;
1517 if (lease_breaking(fl))
1519 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1520 fl->fl_downgrade_time = break_time;
1522 if (fl->fl_lmops->lm_break(fl))
1523 locks_delete_lock_ctx(fl, &dispose);
1526 if (list_empty(&ctx->flc_lease))
1529 if (mode & O_NONBLOCK) {
1530 trace_break_lease_noblock(inode, new_fl);
1531 error = -EWOULDBLOCK;
1536 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1537 break_time = fl->fl_break_time;
1538 if (break_time != 0)
1539 break_time -= jiffies;
1540 if (break_time == 0)
1542 locks_insert_block(fl, new_fl, leases_conflict);
1543 trace_break_lease_block(inode, new_fl);
1544 spin_unlock(&ctx->flc_lock);
1545 percpu_up_read(&file_rwsem);
1547 locks_dispose_list(&dispose);
1548 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1549 list_empty(&new_fl->fl_blocked_member),
1552 percpu_down_read(&file_rwsem);
1553 spin_lock(&ctx->flc_lock);
1554 trace_break_lease_unblock(inode, new_fl);
1555 locks_delete_block(new_fl);
1558 * Wait for the next conflicting lease that has not been
1562 time_out_leases(inode, &dispose);
1563 if (any_leases_conflict(inode, new_fl))
1568 spin_unlock(&ctx->flc_lock);
1569 percpu_up_read(&file_rwsem);
1570 locks_dispose_list(&dispose);
1572 locks_free_lock(new_fl);
1575 EXPORT_SYMBOL(__break_lease);
1578 * lease_get_mtime - update modified time of an inode with exclusive lease
1580 * @time: pointer to a timespec which contains the last modified time
1582 * This is to force NFS clients to flush their caches for files with
1583 * exclusive leases. The justification is that if someone has an
1584 * exclusive lease, then they could be modifying it.
1586 void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1588 bool has_lease = false;
1589 struct file_lock_context *ctx;
1590 struct file_lock *fl;
1592 ctx = locks_inode_context(inode);
1593 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1594 spin_lock(&ctx->flc_lock);
1595 fl = list_first_entry_or_null(&ctx->flc_lease,
1596 struct file_lock, fl_list);
1597 if (fl && (fl->fl_type == F_WRLCK))
1599 spin_unlock(&ctx->flc_lock);
1603 *time = current_time(inode);
1605 EXPORT_SYMBOL(lease_get_mtime);
1608 * fcntl_getlease - Enquire what lease is currently active
1611 * The value returned by this function will be one of
1612 * (if no lease break is pending):
1614 * %F_RDLCK to indicate a shared lease is held.
1616 * %F_WRLCK to indicate an exclusive lease is held.
1618 * %F_UNLCK to indicate no lease is held.
1620 * (if a lease break is pending):
1622 * %F_RDLCK to indicate an exclusive lease needs to be
1623 * changed to a shared lease (or removed).
1625 * %F_UNLCK to indicate the lease needs to be removed.
1627 * XXX: sfr & willy disagree over whether F_INPROGRESS
1628 * should be returned to userspace.
1630 int fcntl_getlease(struct file *filp)
1632 struct file_lock *fl;
1633 struct inode *inode = file_inode(filp);
1634 struct file_lock_context *ctx;
1638 ctx = locks_inode_context(inode);
1639 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1640 percpu_down_read(&file_rwsem);
1641 spin_lock(&ctx->flc_lock);
1642 time_out_leases(inode, &dispose);
1643 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1644 if (fl->fl_file != filp)
1646 type = target_leasetype(fl);
1649 spin_unlock(&ctx->flc_lock);
1650 percpu_up_read(&file_rwsem);
1652 locks_dispose_list(&dispose);
1658 * check_conflicting_open - see if the given file points to an inode that has
1659 * an existing open that would conflict with the
1661 * @filp: file to check
1662 * @arg: type of lease that we're trying to acquire
1663 * @flags: current lock flags
1665 * Check to see if there's an existing open fd on this file that would
1666 * conflict with the lease we're trying to set.
1669 check_conflicting_open(struct file *filp, const long arg, int flags)
1671 struct inode *inode = file_inode(filp);
1672 int self_wcount = 0, self_rcount = 0;
1674 if (flags & FL_LAYOUT)
1676 if (flags & FL_DELEG)
1677 /* We leave these checks to the caller */
1681 return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1682 else if (arg != F_WRLCK)
1686 * Make sure that only read/write count is from lease requestor.
1687 * Note that this will result in denying write leases when i_writecount
1688 * is negative, which is what we want. (We shouldn't grant write leases
1689 * on files open for execution.)
1691 if (filp->f_mode & FMODE_WRITE)
1693 else if (filp->f_mode & FMODE_READ)
1696 if (atomic_read(&inode->i_writecount) != self_wcount ||
1697 atomic_read(&inode->i_readcount) != self_rcount)
1704 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1706 struct file_lock *fl, *my_fl = NULL, *lease;
1707 struct inode *inode = file_inode(filp);
1708 struct file_lock_context *ctx;
1709 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1714 trace_generic_add_lease(inode, lease);
1716 /* Note that arg is never F_UNLCK here */
1717 ctx = locks_get_lock_context(inode, arg);
1722 * In the delegation case we need mutual exclusion with
1723 * a number of operations that take the i_mutex. We trylock
1724 * because delegations are an optional optimization, and if
1725 * there's some chance of a conflict--we'd rather not
1726 * bother, maybe that's a sign this just isn't a good file to
1727 * hand out a delegation on.
1729 if (is_deleg && !inode_trylock(inode))
1732 if (is_deleg && arg == F_WRLCK) {
1733 /* Write delegations are not currently supported: */
1734 inode_unlock(inode);
1739 percpu_down_read(&file_rwsem);
1740 spin_lock(&ctx->flc_lock);
1741 time_out_leases(inode, &dispose);
1742 error = check_conflicting_open(filp, arg, lease->fl_flags);
1747 * At this point, we know that if there is an exclusive
1748 * lease on this file, then we hold it on this filp
1749 * (otherwise our open of this file would have blocked).
1750 * And if we are trying to acquire an exclusive lease,
1751 * then the file is not open by anyone (including us)
1752 * except for this filp.
1755 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1756 if (fl->fl_file == filp &&
1757 fl->fl_owner == lease->fl_owner) {
1763 * No exclusive leases if someone else has a lease on
1769 * Modifying our existing lease is OK, but no getting a
1770 * new lease if someone else is opening for write:
1772 if (fl->fl_flags & FL_UNLOCK_PENDING)
1776 if (my_fl != NULL) {
1778 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1788 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1790 * The check in break_lease() is lockless. It's possible for another
1791 * open to race in after we did the earlier check for a conflicting
1792 * open but before the lease was inserted. Check again for a
1793 * conflicting open and cancel the lease if there is one.
1795 * We also add a barrier here to ensure that the insertion of the lock
1796 * precedes these checks.
1799 error = check_conflicting_open(filp, arg, lease->fl_flags);
1801 locks_unlink_lock_ctx(lease);
1806 if (lease->fl_lmops->lm_setup)
1807 lease->fl_lmops->lm_setup(lease, priv);
1809 spin_unlock(&ctx->flc_lock);
1810 percpu_up_read(&file_rwsem);
1811 locks_dispose_list(&dispose);
1813 inode_unlock(inode);
1814 if (!error && !my_fl)
1819 static int generic_delete_lease(struct file *filp, void *owner)
1821 int error = -EAGAIN;
1822 struct file_lock *fl, *victim = NULL;
1823 struct inode *inode = file_inode(filp);
1824 struct file_lock_context *ctx;
1827 ctx = locks_inode_context(inode);
1829 trace_generic_delete_lease(inode, NULL);
1833 percpu_down_read(&file_rwsem);
1834 spin_lock(&ctx->flc_lock);
1835 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1836 if (fl->fl_file == filp &&
1837 fl->fl_owner == owner) {
1842 trace_generic_delete_lease(inode, victim);
1844 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1845 spin_unlock(&ctx->flc_lock);
1846 percpu_up_read(&file_rwsem);
1847 locks_dispose_list(&dispose);
1852 * generic_setlease - sets a lease on an open file
1853 * @filp: file pointer
1854 * @arg: type of lease to obtain
1855 * @flp: input - file_lock to use, output - file_lock inserted
1856 * @priv: private data for lm_setup (may be NULL if lm_setup
1857 * doesn't require it)
1859 * The (input) flp->fl_lmops->lm_break function is required
1862 int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1865 struct inode *inode = file_inode(filp);
1866 vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(filp), inode);
1869 if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
1871 if (!S_ISREG(inode->i_mode))
1873 error = security_file_lock(filp, arg);
1879 return generic_delete_lease(filp, *priv);
1882 if (!(*flp)->fl_lmops->lm_break) {
1887 return generic_add_lease(filp, arg, flp, priv);
1892 EXPORT_SYMBOL(generic_setlease);
1895 * Kernel subsystems can register to be notified on any attempt to set
1896 * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
1897 * to close files that it may have cached when there is an attempt to set a
1898 * conflicting lease.
1900 static struct srcu_notifier_head lease_notifier_chain;
1903 lease_notifier_chain_init(void)
1905 srcu_init_notifier_head(&lease_notifier_chain);
1909 setlease_notifier(long arg, struct file_lock *lease)
1912 srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
1915 int lease_register_notifier(struct notifier_block *nb)
1917 return srcu_notifier_chain_register(&lease_notifier_chain, nb);
1919 EXPORT_SYMBOL_GPL(lease_register_notifier);
1921 void lease_unregister_notifier(struct notifier_block *nb)
1923 srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
1925 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1928 * vfs_setlease - sets a lease on an open file
1929 * @filp: file pointer
1930 * @arg: type of lease to obtain
1931 * @lease: file_lock to use when adding a lease
1932 * @priv: private info for lm_setup when adding a lease (may be
1933 * NULL if lm_setup doesn't require it)
1935 * Call this to establish a lease on the file. The "lease" argument is not
1936 * used for F_UNLCK requests and may be NULL. For commands that set or alter
1937 * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
1938 * set; if not, this function will return -ENOLCK (and generate a scary-looking
1941 * The "priv" pointer is passed directly to the lm_setup function as-is. It
1942 * may be NULL if the lm_setup operation doesn't require it.
1945 vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1948 setlease_notifier(arg, *lease);
1949 if (filp->f_op->setlease)
1950 return filp->f_op->setlease(filp, arg, lease, priv);
1952 return generic_setlease(filp, arg, lease, priv);
1954 EXPORT_SYMBOL_GPL(vfs_setlease);
1956 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1958 struct file_lock *fl;
1959 struct fasync_struct *new;
1962 fl = lease_alloc(filp, arg);
1966 new = fasync_alloc();
1968 locks_free_lock(fl);
1973 error = vfs_setlease(filp, arg, &fl, (void **)&new);
1975 locks_free_lock(fl);
1982 * fcntl_setlease - sets a lease on an open file
1983 * @fd: open file descriptor
1984 * @filp: file pointer
1985 * @arg: type of lease to obtain
1987 * Call this fcntl to establish a lease on the file.
1988 * Note that you also need to call %F_SETSIG to
1989 * receive a signal when the lease is broken.
1991 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1994 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
1995 return do_fcntl_add_lease(fd, filp, arg);
1999 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
2000 * @inode: inode of the file to apply to
2001 * @fl: The lock to be applied
2003 * Apply a FLOCK style lock request to an inode.
2005 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2010 error = flock_lock_inode(inode, fl);
2011 if (error != FILE_LOCK_DEFERRED)
2013 error = wait_event_interruptible(fl->fl_wait,
2014 list_empty(&fl->fl_blocked_member));
2018 locks_delete_block(fl);
2023 * locks_lock_inode_wait - Apply a lock to an inode
2024 * @inode: inode of the file to apply to
2025 * @fl: The lock to be applied
2027 * Apply a POSIX or FLOCK style lock request to an inode.
2029 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2032 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2034 res = posix_lock_inode_wait(inode, fl);
2037 res = flock_lock_inode_wait(inode, fl);
2044 EXPORT_SYMBOL(locks_lock_inode_wait);
2047 * sys_flock: - flock() system call.
2048 * @fd: the file descriptor to lock.
2049 * @cmd: the type of lock to apply.
2051 * Apply a %FL_FLOCK style lock to an open file descriptor.
2052 * The @cmd can be one of:
2054 * - %LOCK_SH -- a shared lock.
2055 * - %LOCK_EX -- an exclusive lock.
2056 * - %LOCK_UN -- remove an existing lock.
2057 * - %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED)
2059 * %LOCK_MAND support has been removed from the kernel.
2061 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2063 int can_sleep, error, type;
2064 struct file_lock fl;
2068 * LOCK_MAND locks were broken for a long time in that they never
2069 * conflicted with one another and didn't prevent any sort of open,
2070 * read or write activity.
2072 * Just ignore these requests now, to preserve legacy behavior, but
2073 * throw a warning to let people know that they don't actually work.
2075 if (cmd & LOCK_MAND) {
2076 pr_warn_once("%s(%d): Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n", current->comm, current->pid);
2080 type = flock_translate_cmd(cmd & ~LOCK_NB);
2089 if (type != F_UNLCK && !(f.file->f_mode & (FMODE_READ | FMODE_WRITE)))
2092 flock_make_lock(f.file, &fl, type);
2094 error = security_file_lock(f.file, fl.fl_type);
2098 can_sleep = !(cmd & LOCK_NB);
2100 fl.fl_flags |= FL_SLEEP;
2102 if (f.file->f_op->flock)
2103 error = f.file->f_op->flock(f.file,
2104 (can_sleep) ? F_SETLKW : F_SETLK,
2107 error = locks_lock_file_wait(f.file, &fl);
2109 locks_release_private(&fl);
2117 * vfs_test_lock - test file byte range lock
2118 * @filp: The file to test lock for
2119 * @fl: The lock to test; also used to hold result
2121 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
2122 * setting conf->fl_type to something other than F_UNLCK.
2124 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2126 WARN_ON_ONCE(filp != fl->fl_file);
2127 if (filp->f_op->lock)
2128 return filp->f_op->lock(filp, F_GETLK, fl);
2129 posix_test_lock(filp, fl);
2132 EXPORT_SYMBOL_GPL(vfs_test_lock);
2135 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2136 * @fl: The file_lock who's fl_pid should be translated
2137 * @ns: The namespace into which the pid should be translated
2139 * Used to tranlate a fl_pid into a namespace virtual pid number
2141 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2148 if (IS_REMOTELCK(fl))
2151 * If the flock owner process is dead and its pid has been already
2152 * freed, the translation below won't work, but we still want to show
2153 * flock owner pid number in init pidns.
2155 if (ns == &init_pid_ns)
2156 return (pid_t)fl->fl_pid;
2159 pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2160 vnr = pid_nr_ns(pid, ns);
2165 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2167 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2168 #if BITS_PER_LONG == 32
2170 * Make sure we can represent the posix lock via
2171 * legacy 32bit flock.
2173 if (fl->fl_start > OFFT_OFFSET_MAX)
2175 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2178 flock->l_start = fl->fl_start;
2179 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2180 fl->fl_end - fl->fl_start + 1;
2181 flock->l_whence = 0;
2182 flock->l_type = fl->fl_type;
2186 #if BITS_PER_LONG == 32
2187 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2189 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2190 flock->l_start = fl->fl_start;
2191 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2192 fl->fl_end - fl->fl_start + 1;
2193 flock->l_whence = 0;
2194 flock->l_type = fl->fl_type;
2198 /* Report the first existing lock that would conflict with l.
2199 * This implements the F_GETLK command of fcntl().
2201 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2203 struct file_lock *fl;
2206 fl = locks_alloc_lock();
2210 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2213 error = flock_to_posix_lock(filp, fl, flock);
2217 if (cmd == F_OFD_GETLK) {
2219 if (flock->l_pid != 0)
2222 fl->fl_flags |= FL_OFDLCK;
2223 fl->fl_owner = filp;
2226 error = vfs_test_lock(filp, fl);
2230 flock->l_type = fl->fl_type;
2231 if (fl->fl_type != F_UNLCK) {
2232 error = posix_lock_to_flock(flock, fl);
2237 locks_free_lock(fl);
2242 * vfs_lock_file - file byte range lock
2243 * @filp: The file to apply the lock to
2244 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2245 * @fl: The lock to be applied
2246 * @conf: Place to return a copy of the conflicting lock, if found.
2248 * A caller that doesn't care about the conflicting lock may pass NULL
2249 * as the final argument.
2251 * If the filesystem defines a private ->lock() method, then @conf will
2252 * be left unchanged; so a caller that cares should initialize it to
2253 * some acceptable default.
2255 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2256 * locks, the ->lock() interface may return asynchronously, before the lock has
2257 * been granted or denied by the underlying filesystem, if (and only if)
2258 * lm_grant is set. Callers expecting ->lock() to return asynchronously
2259 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2260 * the request is for a blocking lock. When ->lock() does return asynchronously,
2261 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2262 * request completes.
2263 * If the request is for non-blocking lock the file system should return
2264 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2265 * with the result. If the request timed out the callback routine will return a
2266 * nonzero return code and the file system should release the lock. The file
2267 * system is also responsible to keep a corresponding posix lock when it
2268 * grants a lock so the VFS can find out which locks are locally held and do
2269 * the correct lock cleanup when required.
2270 * The underlying filesystem must not drop the kernel lock or call
2271 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2274 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2276 WARN_ON_ONCE(filp != fl->fl_file);
2277 if (filp->f_op->lock)
2278 return filp->f_op->lock(filp, cmd, fl);
2280 return posix_lock_file(filp, fl, conf);
2282 EXPORT_SYMBOL_GPL(vfs_lock_file);
2284 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2285 struct file_lock *fl)
2289 error = security_file_lock(filp, fl->fl_type);
2294 error = vfs_lock_file(filp, cmd, fl, NULL);
2295 if (error != FILE_LOCK_DEFERRED)
2297 error = wait_event_interruptible(fl->fl_wait,
2298 list_empty(&fl->fl_blocked_member));
2302 locks_delete_block(fl);
2307 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2309 check_fmode_for_setlk(struct file_lock *fl)
2311 switch (fl->fl_type) {
2313 if (!(fl->fl_file->f_mode & FMODE_READ))
2317 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2323 /* Apply the lock described by l to an open file descriptor.
2324 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2326 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2327 struct flock *flock)
2329 struct file_lock *file_lock = locks_alloc_lock();
2330 struct inode *inode = file_inode(filp);
2334 if (file_lock == NULL)
2337 error = flock_to_posix_lock(filp, file_lock, flock);
2341 error = check_fmode_for_setlk(file_lock);
2346 * If the cmd is requesting file-private locks, then set the
2347 * FL_OFDLCK flag and override the owner.
2352 if (flock->l_pid != 0)
2356 file_lock->fl_flags |= FL_OFDLCK;
2357 file_lock->fl_owner = filp;
2361 if (flock->l_pid != 0)
2365 file_lock->fl_flags |= FL_OFDLCK;
2366 file_lock->fl_owner = filp;
2369 file_lock->fl_flags |= FL_SLEEP;
2372 error = do_lock_file_wait(filp, cmd, file_lock);
2375 * Attempt to detect a close/fcntl race and recover by releasing the
2376 * lock that was just acquired. There is no need to do that when we're
2377 * unlocking though, or for OFD locks.
2379 if (!error && file_lock->fl_type != F_UNLCK &&
2380 !(file_lock->fl_flags & FL_OFDLCK)) {
2381 struct files_struct *files = current->files;
2383 * We need that spin_lock here - it prevents reordering between
2384 * update of i_flctx->flc_posix and check for it done in
2385 * close(). rcu_read_lock() wouldn't do.
2387 spin_lock(&files->file_lock);
2388 f = files_lookup_fd_locked(files, fd);
2389 spin_unlock(&files->file_lock);
2391 file_lock->fl_type = F_UNLCK;
2392 error = do_lock_file_wait(filp, cmd, file_lock);
2393 WARN_ON_ONCE(error);
2398 trace_fcntl_setlk(inode, file_lock, error);
2399 locks_free_lock(file_lock);
2403 #if BITS_PER_LONG == 32
2404 /* Report the first existing lock that would conflict with l.
2405 * This implements the F_GETLK command of fcntl().
2407 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2409 struct file_lock *fl;
2412 fl = locks_alloc_lock();
2417 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2420 error = flock64_to_posix_lock(filp, fl, flock);
2424 if (cmd == F_OFD_GETLK) {
2426 if (flock->l_pid != 0)
2429 fl->fl_flags |= FL_OFDLCK;
2430 fl->fl_owner = filp;
2433 error = vfs_test_lock(filp, fl);
2437 flock->l_type = fl->fl_type;
2438 if (fl->fl_type != F_UNLCK)
2439 posix_lock_to_flock64(flock, fl);
2442 locks_free_lock(fl);
2446 /* Apply the lock described by l to an open file descriptor.
2447 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2449 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2450 struct flock64 *flock)
2452 struct file_lock *file_lock = locks_alloc_lock();
2456 if (file_lock == NULL)
2459 error = flock64_to_posix_lock(filp, file_lock, flock);
2463 error = check_fmode_for_setlk(file_lock);
2468 * If the cmd is requesting file-private locks, then set the
2469 * FL_OFDLCK flag and override the owner.
2474 if (flock->l_pid != 0)
2478 file_lock->fl_flags |= FL_OFDLCK;
2479 file_lock->fl_owner = filp;
2483 if (flock->l_pid != 0)
2487 file_lock->fl_flags |= FL_OFDLCK;
2488 file_lock->fl_owner = filp;
2491 file_lock->fl_flags |= FL_SLEEP;
2494 error = do_lock_file_wait(filp, cmd, file_lock);
2497 * Attempt to detect a close/fcntl race and recover by releasing the
2498 * lock that was just acquired. There is no need to do that when we're
2499 * unlocking though, or for OFD locks.
2501 if (!error && file_lock->fl_type != F_UNLCK &&
2502 !(file_lock->fl_flags & FL_OFDLCK)) {
2503 struct files_struct *files = current->files;
2505 * We need that spin_lock here - it prevents reordering between
2506 * update of i_flctx->flc_posix and check for it done in
2507 * close(). rcu_read_lock() wouldn't do.
2509 spin_lock(&files->file_lock);
2510 f = files_lookup_fd_locked(files, fd);
2511 spin_unlock(&files->file_lock);
2513 file_lock->fl_type = F_UNLCK;
2514 error = do_lock_file_wait(filp, cmd, file_lock);
2515 WARN_ON_ONCE(error);
2520 locks_free_lock(file_lock);
2523 #endif /* BITS_PER_LONG == 32 */
2526 * This function is called when the file is being removed
2527 * from the task's fd array. POSIX locks belonging to this task
2528 * are deleted at this time.
2530 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2533 struct inode *inode = file_inode(filp);
2534 struct file_lock lock;
2535 struct file_lock_context *ctx;
2538 * If there are no locks held on this file, we don't need to call
2539 * posix_lock_file(). Another process could be setting a lock on this
2540 * file at the same time, but we wouldn't remove that lock anyway.
2542 ctx = locks_inode_context(inode);
2543 if (!ctx || list_empty(&ctx->flc_posix))
2546 locks_init_lock(&lock);
2547 lock.fl_type = F_UNLCK;
2548 lock.fl_flags = FL_POSIX | FL_CLOSE;
2550 lock.fl_end = OFFSET_MAX;
2551 lock.fl_owner = owner;
2552 lock.fl_pid = current->tgid;
2553 lock.fl_file = filp;
2555 lock.fl_lmops = NULL;
2557 error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2559 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2560 lock.fl_ops->fl_release_private(&lock);
2561 trace_locks_remove_posix(inode, &lock, error);
2563 EXPORT_SYMBOL(locks_remove_posix);
2565 /* The i_flctx must be valid when calling into here */
2567 locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2569 struct file_lock fl;
2570 struct inode *inode = file_inode(filp);
2572 if (list_empty(&flctx->flc_flock))
2575 flock_make_lock(filp, &fl, F_UNLCK);
2576 fl.fl_flags |= FL_CLOSE;
2578 if (filp->f_op->flock)
2579 filp->f_op->flock(filp, F_SETLKW, &fl);
2581 flock_lock_inode(inode, &fl);
2583 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2584 fl.fl_ops->fl_release_private(&fl);
2587 /* The i_flctx must be valid when calling into here */
2589 locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2591 struct file_lock *fl, *tmp;
2594 if (list_empty(&ctx->flc_lease))
2597 percpu_down_read(&file_rwsem);
2598 spin_lock(&ctx->flc_lock);
2599 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2600 if (filp == fl->fl_file)
2601 lease_modify(fl, F_UNLCK, &dispose);
2602 spin_unlock(&ctx->flc_lock);
2603 percpu_up_read(&file_rwsem);
2605 locks_dispose_list(&dispose);
2609 * This function is called on the last close of an open file.
2611 void locks_remove_file(struct file *filp)
2613 struct file_lock_context *ctx;
2615 ctx = locks_inode_context(file_inode(filp));
2619 /* remove any OFD locks */
2620 locks_remove_posix(filp, filp);
2622 /* remove flock locks */
2623 locks_remove_flock(filp, ctx);
2625 /* remove any leases */
2626 locks_remove_lease(filp, ctx);
2628 spin_lock(&ctx->flc_lock);
2629 locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2630 locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2631 locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2632 spin_unlock(&ctx->flc_lock);
2636 * vfs_cancel_lock - file byte range unblock lock
2637 * @filp: The file to apply the unblock to
2638 * @fl: The lock to be unblocked
2640 * Used by lock managers to cancel blocked requests
2642 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2644 WARN_ON_ONCE(filp != fl->fl_file);
2645 if (filp->f_op->lock)
2646 return filp->f_op->lock(filp, F_CANCELLK, fl);
2649 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2652 * vfs_inode_has_locks - are any file locks held on @inode?
2653 * @inode: inode to check for locks
2655 * Return true if there are any FL_POSIX or FL_FLOCK locks currently
2658 bool vfs_inode_has_locks(struct inode *inode)
2660 struct file_lock_context *ctx;
2663 ctx = locks_inode_context(inode);
2667 spin_lock(&ctx->flc_lock);
2668 ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock);
2669 spin_unlock(&ctx->flc_lock);
2672 EXPORT_SYMBOL_GPL(vfs_inode_has_locks);
2674 #ifdef CONFIG_PROC_FS
2675 #include <linux/proc_fs.h>
2676 #include <linux/seq_file.h>
2678 struct locks_iterator {
2683 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2684 loff_t id, char *pfx, int repeat)
2686 struct inode *inode = NULL;
2687 unsigned int fl_pid;
2688 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2691 fl_pid = locks_translate_pid(fl, proc_pidns);
2693 * If lock owner is dead (and pid is freed) or not visible in current
2694 * pidns, zero is shown as a pid value. Check lock info from
2695 * init_pid_ns to get saved lock pid value.
2698 if (fl->fl_file != NULL)
2699 inode = file_inode(fl->fl_file);
2701 seq_printf(f, "%lld: ", id);
2704 seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
2707 if (fl->fl_flags & FL_ACCESS)
2708 seq_puts(f, "ACCESS");
2709 else if (IS_OFDLCK(fl))
2710 seq_puts(f, "OFDLCK");
2712 seq_puts(f, "POSIX ");
2714 seq_printf(f, " %s ",
2715 (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
2716 } else if (IS_FLOCK(fl)) {
2717 seq_puts(f, "FLOCK ADVISORY ");
2718 } else if (IS_LEASE(fl)) {
2719 if (fl->fl_flags & FL_DELEG)
2720 seq_puts(f, "DELEG ");
2722 seq_puts(f, "LEASE ");
2724 if (lease_breaking(fl))
2725 seq_puts(f, "BREAKING ");
2726 else if (fl->fl_file)
2727 seq_puts(f, "ACTIVE ");
2729 seq_puts(f, "BREAKER ");
2731 seq_puts(f, "UNKNOWN UNKNOWN ");
2733 type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
2735 seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
2736 (type == F_RDLCK) ? "READ" : "UNLCK");
2738 /* userspace relies on this representation of dev_t */
2739 seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
2740 MAJOR(inode->i_sb->s_dev),
2741 MINOR(inode->i_sb->s_dev), inode->i_ino);
2743 seq_printf(f, "%d <none>:0 ", fl_pid);
2746 if (fl->fl_end == OFFSET_MAX)
2747 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2749 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2751 seq_puts(f, "0 EOF\n");
2755 static struct file_lock *get_next_blocked_member(struct file_lock *node)
2757 struct file_lock *tmp;
2759 /* NULL node or root node */
2760 if (node == NULL || node->fl_blocker == NULL)
2763 /* Next member in the linked list could be itself */
2764 tmp = list_next_entry(node, fl_blocked_member);
2765 if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member)
2773 static int locks_show(struct seq_file *f, void *v)
2775 struct locks_iterator *iter = f->private;
2776 struct file_lock *cur, *tmp;
2777 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2780 cur = hlist_entry(v, struct file_lock, fl_link);
2782 if (locks_translate_pid(cur, proc_pidns) == 0)
2785 /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests
2786 * is the left child of current node, the next silibing in fl_blocked_member is the
2787 * right child, we can alse get the parent of current node from fl_blocker, so this
2788 * question becomes traversal of a binary tree
2790 while (cur != NULL) {
2792 lock_get_status(f, cur, iter->li_pos, "-> ", level);
2794 lock_get_status(f, cur, iter->li_pos, "", level);
2796 if (!list_empty(&cur->fl_blocked_requests)) {
2798 cur = list_first_entry_or_null(&cur->fl_blocked_requests,
2799 struct file_lock, fl_blocked_member);
2803 tmp = get_next_blocked_member(cur);
2804 /* Fall back to parent node */
2805 while (tmp == NULL && cur->fl_blocker != NULL) {
2806 cur = cur->fl_blocker;
2808 tmp = get_next_blocked_member(cur);
2817 static void __show_fd_locks(struct seq_file *f,
2818 struct list_head *head, int *id,
2819 struct file *filp, struct files_struct *files)
2821 struct file_lock *fl;
2823 list_for_each_entry(fl, head, fl_list) {
2825 if (filp != fl->fl_file)
2827 if (fl->fl_owner != files &&
2828 fl->fl_owner != filp)
2832 seq_puts(f, "lock:\t");
2833 lock_get_status(f, fl, *id, "", 0);
2837 void show_fd_locks(struct seq_file *f,
2838 struct file *filp, struct files_struct *files)
2840 struct inode *inode = file_inode(filp);
2841 struct file_lock_context *ctx;
2844 ctx = locks_inode_context(inode);
2848 spin_lock(&ctx->flc_lock);
2849 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2850 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2851 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2852 spin_unlock(&ctx->flc_lock);
2855 static void *locks_start(struct seq_file *f, loff_t *pos)
2856 __acquires(&blocked_lock_lock)
2858 struct locks_iterator *iter = f->private;
2860 iter->li_pos = *pos + 1;
2861 percpu_down_write(&file_rwsem);
2862 spin_lock(&blocked_lock_lock);
2863 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2866 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2868 struct locks_iterator *iter = f->private;
2871 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2874 static void locks_stop(struct seq_file *f, void *v)
2875 __releases(&blocked_lock_lock)
2877 spin_unlock(&blocked_lock_lock);
2878 percpu_up_write(&file_rwsem);
2881 static const struct seq_operations locks_seq_operations = {
2882 .start = locks_start,
2888 static int __init proc_locks_init(void)
2890 proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2891 sizeof(struct locks_iterator), NULL);
2894 fs_initcall(proc_locks_init);
2897 static int __init filelock_init(void)
2901 flctx_cache = kmem_cache_create("file_lock_ctx",
2902 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2904 filelock_cache = kmem_cache_create("file_lock_cache",
2905 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2907 for_each_possible_cpu(i) {
2908 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2910 spin_lock_init(&fll->lock);
2911 INIT_HLIST_HEAD(&fll->hlist);
2914 lease_notifier_chain_init();
2917 core_initcall(filelock_init);