1 // SPDX-License-Identifier: GPL-2.0-only
5 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
6 * Doug Evans (dje@spiff.uucp), August 07, 1992
8 * Deadlock detection added.
9 * FIXME: one thing isn't handled yet:
10 * - mandatory locks (requires lots of changes elsewhere)
11 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
13 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
14 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
16 * Converted file_lock_table to a linked list from an array, which eliminates
17 * the limits on how many active file locks are open.
18 * Chad Page (pageone@netcom.com), November 27, 1994
20 * Removed dependency on file descriptors. dup()'ed file descriptors now
21 * get the same locks as the original file descriptors, and a close() on
22 * any file descriptor removes ALL the locks on the file for the current
23 * process. Since locks still depend on the process id, locks are inherited
24 * after an exec() but not after a fork(). This agrees with POSIX, and both
25 * BSD and SVR4 practice.
26 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
28 * Scrapped free list which is redundant now that we allocate locks
29 * dynamically with kmalloc()/kfree().
30 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
32 * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
34 * FL_POSIX locks are created with calls to fcntl() and lockf() through the
35 * fcntl() system call. They have the semantics described above.
37 * FL_FLOCK locks are created with calls to flock(), through the flock()
38 * system call, which is new. Old C libraries implement flock() via fcntl()
39 * and will continue to use the old, broken implementation.
41 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
42 * with a file pointer (filp). As a result they can be shared by a parent
43 * process and its children after a fork(). They are removed when the last
44 * file descriptor referring to the file pointer is closed (unless explicitly
47 * FL_FLOCK locks never deadlock, an existing lock is always removed before
48 * upgrading from shared to exclusive (or vice versa). When this happens
49 * any processes blocked by the current lock are woken up and allowed to
50 * run before the new lock is applied.
51 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
53 * Removed some race conditions in flock_lock_file(), marked other possible
54 * races. Just grep for FIXME to see them.
55 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
57 * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
58 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
59 * once we've checked for blocking and deadlocking.
60 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
62 * Initial implementation of mandatory locks. SunOS turned out to be
63 * a rotten model, so I implemented the "obvious" semantics.
64 * See 'Documentation/filesystems/mandatory-locking.rst' for details.
65 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
67 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
68 * check if a file has mandatory locks, used by mmap(), open() and creat() to
69 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
71 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
73 * Tidied up block list handling. Added '/proc/locks' interface.
74 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
76 * Fixed deadlock condition for pathological code that mixes calls to
77 * flock() and fcntl().
78 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
80 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
81 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
82 * guarantee sensible behaviour in the case where file system modules might
83 * be compiled with different options than the kernel itself.
84 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
86 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
87 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
88 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
90 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
91 * locks. Changed process synchronisation to avoid dereferencing locks that
92 * have already been freed.
93 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
95 * Made the block list a circular list to minimise searching in the list.
96 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
98 * Made mandatory locking a mount option. Default is not to allow mandatory
100 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
102 * Some adaptations for NFS support.
103 * Olaf Kirch (okir@monad.swb.de), Dec 1996,
105 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
106 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
108 * Use slab allocator instead of kmalloc/kfree.
109 * Use generic list implementation from <linux/list.h>.
110 * Sped up posix_locks_deadlock by only considering blocked locks.
111 * Matthew Wilcox <willy@debian.org>, March, 2000.
113 * Leases and LOCK_MAND
114 * Matthew Wilcox <willy@debian.org>, June, 2000.
115 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
117 * Locking conflicts and dependencies:
118 * If multiple threads attempt to lock the same byte (or flock the same file)
119 * only one can be granted the lock, and other must wait their turn.
120 * The first lock has been "applied" or "granted", the others are "waiting"
121 * and are "blocked" by the "applied" lock..
123 * Waiting and applied locks are all kept in trees whose properties are:
125 * - the root of a tree may be an applied or waiting lock.
126 * - every other node in the tree is a waiting lock that
127 * conflicts with every ancestor of that node.
129 * Every such tree begins life as a waiting singleton which obviously
130 * satisfies the above properties.
132 * The only ways we modify trees preserve these properties:
134 * 1. We may add a new leaf node, but only after first verifying that it
135 * conflicts with all of its ancestors.
136 * 2. We may remove the root of a tree, creating a new singleton
137 * tree from the root and N new trees rooted in the immediate
139 * 3. If the root of a tree is not currently an applied lock, we may
140 * apply it (if possible).
141 * 4. We may upgrade the root of the tree (either extend its range,
142 * or upgrade its entire range from read to write).
144 * When an applied lock is modified in a way that reduces or downgrades any
145 * part of its range, we remove all its children (2 above). This particularly
146 * happens when a lock is unlocked.
148 * For each of those child trees we "wake up" the thread which is
149 * waiting for the lock so it can continue handling as follows: if the
150 * root of the tree applies, we do so (3). If it doesn't, it must
151 * conflict with some applied lock. We remove (wake up) all of its children
152 * (2), and add it is a new leaf to the tree rooted in the applied
153 * lock (1). We then repeat the process recursively with those
158 #include <linux/capability.h>
159 #include <linux/file.h>
160 #include <linux/fdtable.h>
161 #include <linux/fs.h>
162 #include <linux/init.h>
163 #include <linux/security.h>
164 #include <linux/slab.h>
165 #include <linux/syscalls.h>
166 #include <linux/time.h>
167 #include <linux/rcupdate.h>
168 #include <linux/pid_namespace.h>
169 #include <linux/hashtable.h>
170 #include <linux/percpu.h>
172 #define CREATE_TRACE_POINTS
173 #include <trace/events/filelock.h>
175 #include <linux/uaccess.h>
177 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
178 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
179 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
180 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
181 #define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
183 static bool lease_breaking(struct file_lock *fl)
185 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
188 static int target_leasetype(struct file_lock *fl)
190 if (fl->fl_flags & FL_UNLOCK_PENDING)
192 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
197 int leases_enable = 1;
198 int lease_break_time = 45;
201 * The global file_lock_list is only used for displaying /proc/locks, so we
202 * keep a list on each CPU, with each list protected by its own spinlock.
203 * Global serialization is done using file_rwsem.
205 * Note that alterations to the list also require that the relevant flc_lock is
208 struct file_lock_list_struct {
210 struct hlist_head hlist;
212 static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
213 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
217 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
218 * It is protected by blocked_lock_lock.
220 * We hash locks by lockowner in order to optimize searching for the lock a
221 * particular lockowner is waiting on.
223 * FIXME: make this value scale via some heuristic? We generally will want more
224 * buckets when we have more lockowners holding locks, but that's a little
225 * difficult to determine without knowing what the workload will look like.
227 #define BLOCKED_HASH_BITS 7
228 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
231 * This lock protects the blocked_hash. Generally, if you're accessing it, you
232 * want to be holding this lock.
234 * In addition, it also protects the fl->fl_blocked_requests list, and the
235 * fl->fl_blocker pointer for file_lock structures that are acting as lock
236 * requests (in contrast to those that are acting as records of acquired locks).
238 * Note that when we acquire this lock in order to change the above fields,
239 * we often hold the flc_lock as well. In certain cases, when reading the fields
240 * protected by this lock, we can skip acquiring it iff we already hold the
243 static DEFINE_SPINLOCK(blocked_lock_lock);
245 static struct kmem_cache *flctx_cache __read_mostly;
246 static struct kmem_cache *filelock_cache __read_mostly;
248 static struct file_lock_context *
249 locks_get_lock_context(struct inode *inode, int type)
251 struct file_lock_context *ctx;
253 /* paired with cmpxchg() below */
254 ctx = smp_load_acquire(&inode->i_flctx);
255 if (likely(ctx) || type == F_UNLCK)
258 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
262 spin_lock_init(&ctx->flc_lock);
263 INIT_LIST_HEAD(&ctx->flc_flock);
264 INIT_LIST_HEAD(&ctx->flc_posix);
265 INIT_LIST_HEAD(&ctx->flc_lease);
268 * Assign the pointer if it's not already assigned. If it is, then
269 * free the context we just allocated.
271 if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
272 kmem_cache_free(flctx_cache, ctx);
273 ctx = smp_load_acquire(&inode->i_flctx);
276 trace_locks_get_lock_context(inode, type, ctx);
281 locks_dump_ctx_list(struct list_head *list, char *list_type)
283 struct file_lock *fl;
285 list_for_each_entry(fl, list, fl_list) {
286 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
291 locks_check_ctx_lists(struct inode *inode)
293 struct file_lock_context *ctx = inode->i_flctx;
295 if (unlikely(!list_empty(&ctx->flc_flock) ||
296 !list_empty(&ctx->flc_posix) ||
297 !list_empty(&ctx->flc_lease))) {
298 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
299 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
301 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
302 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
303 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
308 locks_check_ctx_file_list(struct file *filp, struct list_head *list,
311 struct file_lock *fl;
312 struct inode *inode = locks_inode(filp);
314 list_for_each_entry(fl, list, fl_list)
315 if (fl->fl_file == filp)
316 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
317 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
318 list_type, MAJOR(inode->i_sb->s_dev),
319 MINOR(inode->i_sb->s_dev), inode->i_ino,
320 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
324 locks_free_lock_context(struct inode *inode)
326 struct file_lock_context *ctx = inode->i_flctx;
329 locks_check_ctx_lists(inode);
330 kmem_cache_free(flctx_cache, ctx);
334 static void locks_init_lock_heads(struct file_lock *fl)
336 INIT_HLIST_NODE(&fl->fl_link);
337 INIT_LIST_HEAD(&fl->fl_list);
338 INIT_LIST_HEAD(&fl->fl_blocked_requests);
339 INIT_LIST_HEAD(&fl->fl_blocked_member);
340 init_waitqueue_head(&fl->fl_wait);
343 /* Allocate an empty lock structure. */
344 struct file_lock *locks_alloc_lock(void)
346 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
349 locks_init_lock_heads(fl);
353 EXPORT_SYMBOL_GPL(locks_alloc_lock);
355 void locks_release_private(struct file_lock *fl)
357 BUG_ON(waitqueue_active(&fl->fl_wait));
358 BUG_ON(!list_empty(&fl->fl_list));
359 BUG_ON(!list_empty(&fl->fl_blocked_requests));
360 BUG_ON(!list_empty(&fl->fl_blocked_member));
361 BUG_ON(!hlist_unhashed(&fl->fl_link));
364 if (fl->fl_ops->fl_release_private)
365 fl->fl_ops->fl_release_private(fl);
370 if (fl->fl_lmops->lm_put_owner) {
371 fl->fl_lmops->lm_put_owner(fl->fl_owner);
377 EXPORT_SYMBOL_GPL(locks_release_private);
379 /* Free a lock which is not in use. */
380 void locks_free_lock(struct file_lock *fl)
382 locks_release_private(fl);
383 kmem_cache_free(filelock_cache, fl);
385 EXPORT_SYMBOL(locks_free_lock);
388 locks_dispose_list(struct list_head *dispose)
390 struct file_lock *fl;
392 while (!list_empty(dispose)) {
393 fl = list_first_entry(dispose, struct file_lock, fl_list);
394 list_del_init(&fl->fl_list);
399 void locks_init_lock(struct file_lock *fl)
401 memset(fl, 0, sizeof(struct file_lock));
402 locks_init_lock_heads(fl);
404 EXPORT_SYMBOL(locks_init_lock);
407 * Initialize a new lock from an existing file_lock structure.
409 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
411 new->fl_owner = fl->fl_owner;
412 new->fl_pid = fl->fl_pid;
414 new->fl_flags = fl->fl_flags;
415 new->fl_type = fl->fl_type;
416 new->fl_start = fl->fl_start;
417 new->fl_end = fl->fl_end;
418 new->fl_lmops = fl->fl_lmops;
422 if (fl->fl_lmops->lm_get_owner)
423 fl->fl_lmops->lm_get_owner(fl->fl_owner);
426 EXPORT_SYMBOL(locks_copy_conflock);
428 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
430 /* "new" must be a freshly-initialized lock */
431 WARN_ON_ONCE(new->fl_ops);
433 locks_copy_conflock(new, fl);
435 new->fl_file = fl->fl_file;
436 new->fl_ops = fl->fl_ops;
439 if (fl->fl_ops->fl_copy_lock)
440 fl->fl_ops->fl_copy_lock(new, fl);
443 EXPORT_SYMBOL(locks_copy_lock);
445 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
450 * As ctx->flc_lock is held, new requests cannot be added to
451 * ->fl_blocked_requests, so we don't need a lock to check if it
454 if (list_empty(&fl->fl_blocked_requests))
456 spin_lock(&blocked_lock_lock);
457 list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
458 list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
460 spin_unlock(&blocked_lock_lock);
463 static inline int flock_translate_cmd(int cmd) {
465 return cmd & (LOCK_MAND | LOCK_RW);
477 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
478 static struct file_lock *
479 flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
481 int type = flock_translate_cmd(cmd);
484 return ERR_PTR(type);
487 fl = locks_alloc_lock();
489 return ERR_PTR(-ENOMEM);
496 fl->fl_pid = current->tgid;
497 fl->fl_flags = FL_FLOCK;
499 fl->fl_end = OFFSET_MAX;
504 static int assign_type(struct file_lock *fl, long type)
518 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
521 switch (l->l_whence) {
526 fl->fl_start = filp->f_pos;
529 fl->fl_start = i_size_read(file_inode(filp));
534 if (l->l_start > OFFSET_MAX - fl->fl_start)
536 fl->fl_start += l->l_start;
537 if (fl->fl_start < 0)
540 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
541 POSIX-2001 defines it. */
543 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
545 fl->fl_end = fl->fl_start + l->l_len - 1;
547 } else if (l->l_len < 0) {
548 if (fl->fl_start + l->l_len < 0)
550 fl->fl_end = fl->fl_start - 1;
551 fl->fl_start += l->l_len;
553 fl->fl_end = OFFSET_MAX;
555 fl->fl_owner = current->files;
556 fl->fl_pid = current->tgid;
558 fl->fl_flags = FL_POSIX;
562 return assign_type(fl, l->l_type);
565 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
568 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
571 struct flock64 ll = {
573 .l_whence = l->l_whence,
574 .l_start = l->l_start,
578 return flock64_to_posix_lock(filp, fl, &ll);
581 /* default lease lock manager operations */
583 lease_break_callback(struct file_lock *fl)
585 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
590 lease_setup(struct file_lock *fl, void **priv)
592 struct file *filp = fl->fl_file;
593 struct fasync_struct *fa = *priv;
596 * fasync_insert_entry() returns the old entry if any. If there was no
597 * old entry, then it used "priv" and inserted it into the fasync list.
598 * Clear the pointer to indicate that it shouldn't be freed.
600 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
603 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
606 static const struct lock_manager_operations lease_manager_ops = {
607 .lm_break = lease_break_callback,
608 .lm_change = lease_modify,
609 .lm_setup = lease_setup,
613 * Initialize a lease, use the default lock manager operations
615 static int lease_init(struct file *filp, long type, struct file_lock *fl)
617 if (assign_type(fl, type) != 0)
621 fl->fl_pid = current->tgid;
624 fl->fl_flags = FL_LEASE;
626 fl->fl_end = OFFSET_MAX;
628 fl->fl_lmops = &lease_manager_ops;
632 /* Allocate a file_lock initialised to this type of lease */
633 static struct file_lock *lease_alloc(struct file *filp, long type)
635 struct file_lock *fl = locks_alloc_lock();
639 return ERR_PTR(error);
641 error = lease_init(filp, type, fl);
644 return ERR_PTR(error);
649 /* Check if two locks overlap each other.
651 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
653 return ((fl1->fl_end >= fl2->fl_start) &&
654 (fl2->fl_end >= fl1->fl_start));
658 * Check whether two locks have the same owner.
660 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
662 return fl1->fl_owner == fl2->fl_owner;
665 /* Must be called with the flc_lock held! */
666 static void locks_insert_global_locks(struct file_lock *fl)
668 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
670 percpu_rwsem_assert_held(&file_rwsem);
672 spin_lock(&fll->lock);
673 fl->fl_link_cpu = smp_processor_id();
674 hlist_add_head(&fl->fl_link, &fll->hlist);
675 spin_unlock(&fll->lock);
678 /* Must be called with the flc_lock held! */
679 static void locks_delete_global_locks(struct file_lock *fl)
681 struct file_lock_list_struct *fll;
683 percpu_rwsem_assert_held(&file_rwsem);
686 * Avoid taking lock if already unhashed. This is safe since this check
687 * is done while holding the flc_lock, and new insertions into the list
688 * also require that it be held.
690 if (hlist_unhashed(&fl->fl_link))
693 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
694 spin_lock(&fll->lock);
695 hlist_del_init(&fl->fl_link);
696 spin_unlock(&fll->lock);
700 posix_owner_key(struct file_lock *fl)
702 return (unsigned long)fl->fl_owner;
705 static void locks_insert_global_blocked(struct file_lock *waiter)
707 lockdep_assert_held(&blocked_lock_lock);
709 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
712 static void locks_delete_global_blocked(struct file_lock *waiter)
714 lockdep_assert_held(&blocked_lock_lock);
716 hash_del(&waiter->fl_link);
719 /* Remove waiter from blocker's block list.
720 * When blocker ends up pointing to itself then the list is empty.
722 * Must be called with blocked_lock_lock held.
724 static void __locks_delete_block(struct file_lock *waiter)
726 locks_delete_global_blocked(waiter);
727 list_del_init(&waiter->fl_blocked_member);
730 static void __locks_wake_up_blocks(struct file_lock *blocker)
732 while (!list_empty(&blocker->fl_blocked_requests)) {
733 struct file_lock *waiter;
735 waiter = list_first_entry(&blocker->fl_blocked_requests,
736 struct file_lock, fl_blocked_member);
737 __locks_delete_block(waiter);
738 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
739 waiter->fl_lmops->lm_notify(waiter);
741 wake_up(&waiter->fl_wait);
744 * The setting of fl_blocker to NULL marks the "done"
745 * point in deleting a block. Paired with acquire at the top
746 * of locks_delete_block().
748 smp_store_release(&waiter->fl_blocker, NULL);
753 * locks_delete_lock - stop waiting for a file lock
754 * @waiter: the lock which was waiting
756 * lockd/nfsd need to disconnect the lock while working on it.
758 int locks_delete_block(struct file_lock *waiter)
760 int status = -ENOENT;
763 * If fl_blocker is NULL, it won't be set again as this thread "owns"
764 * the lock and is the only one that might try to claim the lock.
766 * We use acquire/release to manage fl_blocker so that we can
767 * optimize away taking the blocked_lock_lock in many cases.
769 * The smp_load_acquire guarantees two things:
771 * 1/ that fl_blocked_requests can be tested locklessly. If something
772 * was recently added to that list it must have been in a locked region
773 * *before* the locked region when fl_blocker was set to NULL.
775 * 2/ that no other thread is accessing 'waiter', so it is safe to free
776 * it. __locks_wake_up_blocks is careful not to touch waiter after
777 * fl_blocker is released.
779 * If a lockless check of fl_blocker shows it to be NULL, we know that
780 * no new locks can be inserted into its fl_blocked_requests list, and
781 * can avoid doing anything further if the list is empty.
783 if (!smp_load_acquire(&waiter->fl_blocker) &&
784 list_empty(&waiter->fl_blocked_requests))
787 spin_lock(&blocked_lock_lock);
788 if (waiter->fl_blocker)
790 __locks_wake_up_blocks(waiter);
791 __locks_delete_block(waiter);
794 * The setting of fl_blocker to NULL marks the "done" point in deleting
795 * a block. Paired with acquire at the top of this function.
797 smp_store_release(&waiter->fl_blocker, NULL);
798 spin_unlock(&blocked_lock_lock);
801 EXPORT_SYMBOL(locks_delete_block);
803 /* Insert waiter into blocker's block list.
804 * We use a circular list so that processes can be easily woken up in
805 * the order they blocked. The documentation doesn't require this but
806 * it seems like the reasonable thing to do.
808 * Must be called with both the flc_lock and blocked_lock_lock held. The
809 * fl_blocked_requests list itself is protected by the blocked_lock_lock,
810 * but by ensuring that the flc_lock is also held on insertions we can avoid
811 * taking the blocked_lock_lock in some cases when we see that the
812 * fl_blocked_requests list is empty.
814 * Rather than just adding to the list, we check for conflicts with any existing
815 * waiters, and add beneath any waiter that blocks the new waiter.
816 * Thus wakeups don't happen until needed.
818 static void __locks_insert_block(struct file_lock *blocker,
819 struct file_lock *waiter,
820 bool conflict(struct file_lock *,
823 struct file_lock *fl;
824 BUG_ON(!list_empty(&waiter->fl_blocked_member));
827 list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
828 if (conflict(fl, waiter)) {
832 waiter->fl_blocker = blocker;
833 list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
834 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
835 locks_insert_global_blocked(waiter);
837 /* The requests in waiter->fl_blocked are known to conflict with
838 * waiter, but might not conflict with blocker, or the requests
839 * and lock which block it. So they all need to be woken.
841 __locks_wake_up_blocks(waiter);
844 /* Must be called with flc_lock held. */
845 static void locks_insert_block(struct file_lock *blocker,
846 struct file_lock *waiter,
847 bool conflict(struct file_lock *,
850 spin_lock(&blocked_lock_lock);
851 __locks_insert_block(blocker, waiter, conflict);
852 spin_unlock(&blocked_lock_lock);
856 * Wake up processes blocked waiting for blocker.
858 * Must be called with the inode->flc_lock held!
860 static void locks_wake_up_blocks(struct file_lock *blocker)
863 * Avoid taking global lock if list is empty. This is safe since new
864 * blocked requests are only added to the list under the flc_lock, and
865 * the flc_lock is always held here. Note that removal from the
866 * fl_blocked_requests list does not require the flc_lock, so we must
867 * recheck list_empty() after acquiring the blocked_lock_lock.
869 if (list_empty(&blocker->fl_blocked_requests))
872 spin_lock(&blocked_lock_lock);
873 __locks_wake_up_blocks(blocker);
874 spin_unlock(&blocked_lock_lock);
878 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
880 list_add_tail(&fl->fl_list, before);
881 locks_insert_global_locks(fl);
885 locks_unlink_lock_ctx(struct file_lock *fl)
887 locks_delete_global_locks(fl);
888 list_del_init(&fl->fl_list);
889 locks_wake_up_blocks(fl);
893 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
895 locks_unlink_lock_ctx(fl);
897 list_add(&fl->fl_list, dispose);
902 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
903 * checks for shared/exclusive status of overlapping locks.
905 static bool locks_conflict(struct file_lock *caller_fl,
906 struct file_lock *sys_fl)
908 if (sys_fl->fl_type == F_WRLCK)
910 if (caller_fl->fl_type == F_WRLCK)
915 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
916 * checking before calling the locks_conflict().
918 static bool posix_locks_conflict(struct file_lock *caller_fl,
919 struct file_lock *sys_fl)
921 /* POSIX locks owned by the same process do not conflict with
924 if (posix_same_owner(caller_fl, sys_fl))
927 /* Check whether they overlap */
928 if (!locks_overlap(caller_fl, sys_fl))
931 return locks_conflict(caller_fl, sys_fl);
934 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
935 * checking before calling the locks_conflict().
937 static bool flock_locks_conflict(struct file_lock *caller_fl,
938 struct file_lock *sys_fl)
940 /* FLOCK locks referring to the same filp do not conflict with
943 if (caller_fl->fl_file == sys_fl->fl_file)
945 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
948 return locks_conflict(caller_fl, sys_fl);
952 posix_test_lock(struct file *filp, struct file_lock *fl)
954 struct file_lock *cfl;
955 struct file_lock_context *ctx;
956 struct inode *inode = locks_inode(filp);
958 ctx = smp_load_acquire(&inode->i_flctx);
959 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
960 fl->fl_type = F_UNLCK;
964 spin_lock(&ctx->flc_lock);
965 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
966 if (posix_locks_conflict(fl, cfl)) {
967 locks_copy_conflock(fl, cfl);
971 fl->fl_type = F_UNLCK;
973 spin_unlock(&ctx->flc_lock);
976 EXPORT_SYMBOL(posix_test_lock);
979 * Deadlock detection:
981 * We attempt to detect deadlocks that are due purely to posix file
984 * We assume that a task can be waiting for at most one lock at a time.
985 * So for any acquired lock, the process holding that lock may be
986 * waiting on at most one other lock. That lock in turns may be held by
987 * someone waiting for at most one other lock. Given a requested lock
988 * caller_fl which is about to wait for a conflicting lock block_fl, we
989 * follow this chain of waiters to ensure we are not about to create a
992 * Since we do this before we ever put a process to sleep on a lock, we
993 * are ensured that there is never a cycle; that is what guarantees that
994 * the while() loop in posix_locks_deadlock() eventually completes.
996 * Note: the above assumption may not be true when handling lock
997 * requests from a broken NFS client. It may also fail in the presence
998 * of tasks (such as posix threads) sharing the same open file table.
999 * To handle those cases, we just bail out after a few iterations.
1001 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
1002 * Because the owner is not even nominally tied to a thread of
1003 * execution, the deadlock detection below can't reasonably work well. Just
1004 * skip it for those.
1006 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
1007 * locks that just checks for the case where two tasks are attempting to
1008 * upgrade from read to write locks on the same inode.
1011 #define MAX_DEADLK_ITERATIONS 10
1013 /* Find a lock that the owner of the given block_fl is blocking on. */
1014 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
1016 struct file_lock *fl;
1018 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
1019 if (posix_same_owner(fl, block_fl)) {
1020 while (fl->fl_blocker)
1021 fl = fl->fl_blocker;
1028 /* Must be called with the blocked_lock_lock held! */
1029 static int posix_locks_deadlock(struct file_lock *caller_fl,
1030 struct file_lock *block_fl)
1034 lockdep_assert_held(&blocked_lock_lock);
1037 * This deadlock detector can't reasonably detect deadlocks with
1038 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
1040 if (IS_OFDLCK(caller_fl))
1043 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
1044 if (i++ > MAX_DEADLK_ITERATIONS)
1046 if (posix_same_owner(caller_fl, block_fl))
1052 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1053 * after any leases, but before any posix locks.
1055 * Note that if called with an FL_EXISTS argument, the caller may determine
1056 * whether or not a lock was successfully freed by testing the return
1057 * value for -ENOENT.
1059 static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1061 struct file_lock *new_fl = NULL;
1062 struct file_lock *fl;
1063 struct file_lock_context *ctx;
1068 ctx = locks_get_lock_context(inode, request->fl_type);
1070 if (request->fl_type != F_UNLCK)
1072 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
1075 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
1076 new_fl = locks_alloc_lock();
1081 percpu_down_read(&file_rwsem);
1082 spin_lock(&ctx->flc_lock);
1083 if (request->fl_flags & FL_ACCESS)
1086 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1087 if (request->fl_file != fl->fl_file)
1089 if (request->fl_type == fl->fl_type)
1092 locks_delete_lock_ctx(fl, &dispose);
1096 if (request->fl_type == F_UNLCK) {
1097 if ((request->fl_flags & FL_EXISTS) && !found)
1103 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1104 if (!flock_locks_conflict(request, fl))
1107 if (!(request->fl_flags & FL_SLEEP))
1109 error = FILE_LOCK_DEFERRED;
1110 locks_insert_block(fl, request, flock_locks_conflict);
1113 if (request->fl_flags & FL_ACCESS)
1115 locks_copy_lock(new_fl, request);
1116 locks_move_blocks(new_fl, request);
1117 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1122 spin_unlock(&ctx->flc_lock);
1123 percpu_up_read(&file_rwsem);
1125 locks_free_lock(new_fl);
1126 locks_dispose_list(&dispose);
1127 trace_flock_lock_inode(inode, request, error);
1131 static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1132 struct file_lock *conflock)
1134 struct file_lock *fl, *tmp;
1135 struct file_lock *new_fl = NULL;
1136 struct file_lock *new_fl2 = NULL;
1137 struct file_lock *left = NULL;
1138 struct file_lock *right = NULL;
1139 struct file_lock_context *ctx;
1144 ctx = locks_get_lock_context(inode, request->fl_type);
1146 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1149 * We may need two file_lock structures for this operation,
1150 * so we get them in advance to avoid races.
1152 * In some cases we can be sure, that no new locks will be needed
1154 if (!(request->fl_flags & FL_ACCESS) &&
1155 (request->fl_type != F_UNLCK ||
1156 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1157 new_fl = locks_alloc_lock();
1158 new_fl2 = locks_alloc_lock();
1161 percpu_down_read(&file_rwsem);
1162 spin_lock(&ctx->flc_lock);
1164 * New lock request. Walk all POSIX locks and look for conflicts. If
1165 * there are any, either return error or put the request on the
1166 * blocker's list of waiters and the global blocked_hash.
1168 if (request->fl_type != F_UNLCK) {
1169 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1170 if (!posix_locks_conflict(request, fl))
1173 locks_copy_conflock(conflock, fl);
1175 if (!(request->fl_flags & FL_SLEEP))
1178 * Deadlock detection and insertion into the blocked
1179 * locks list must be done while holding the same lock!
1182 spin_lock(&blocked_lock_lock);
1184 * Ensure that we don't find any locks blocked on this
1185 * request during deadlock detection.
1187 __locks_wake_up_blocks(request);
1188 if (likely(!posix_locks_deadlock(request, fl))) {
1189 error = FILE_LOCK_DEFERRED;
1190 __locks_insert_block(fl, request,
1191 posix_locks_conflict);
1193 spin_unlock(&blocked_lock_lock);
1198 /* If we're just looking for a conflict, we're done. */
1200 if (request->fl_flags & FL_ACCESS)
1203 /* Find the first old lock with the same owner as the new lock */
1204 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1205 if (posix_same_owner(request, fl))
1209 /* Process locks with this owner. */
1210 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1211 if (!posix_same_owner(request, fl))
1214 /* Detect adjacent or overlapping regions (if same lock type) */
1215 if (request->fl_type == fl->fl_type) {
1216 /* In all comparisons of start vs end, use
1217 * "start - 1" rather than "end + 1". If end
1218 * is OFFSET_MAX, end + 1 will become negative.
1220 if (fl->fl_end < request->fl_start - 1)
1222 /* If the next lock in the list has entirely bigger
1223 * addresses than the new one, insert the lock here.
1225 if (fl->fl_start - 1 > request->fl_end)
1228 /* If we come here, the new and old lock are of the
1229 * same type and adjacent or overlapping. Make one
1230 * lock yielding from the lower start address of both
1231 * locks to the higher end address.
1233 if (fl->fl_start > request->fl_start)
1234 fl->fl_start = request->fl_start;
1236 request->fl_start = fl->fl_start;
1237 if (fl->fl_end < request->fl_end)
1238 fl->fl_end = request->fl_end;
1240 request->fl_end = fl->fl_end;
1242 locks_delete_lock_ctx(fl, &dispose);
1248 /* Processing for different lock types is a bit
1251 if (fl->fl_end < request->fl_start)
1253 if (fl->fl_start > request->fl_end)
1255 if (request->fl_type == F_UNLCK)
1257 if (fl->fl_start < request->fl_start)
1259 /* If the next lock in the list has a higher end
1260 * address than the new one, insert the new one here.
1262 if (fl->fl_end > request->fl_end) {
1266 if (fl->fl_start >= request->fl_start) {
1267 /* The new lock completely replaces an old
1268 * one (This may happen several times).
1271 locks_delete_lock_ctx(fl, &dispose);
1275 * Replace the old lock with new_fl, and
1276 * remove the old one. It's safe to do the
1277 * insert here since we know that we won't be
1278 * using new_fl later, and that the lock is
1279 * just replacing an existing lock.
1284 locks_copy_lock(new_fl, request);
1287 locks_insert_lock_ctx(request, &fl->fl_list);
1288 locks_delete_lock_ctx(fl, &dispose);
1295 * The above code only modifies existing locks in case of merging or
1296 * replacing. If new lock(s) need to be inserted all modifications are
1297 * done below this, so it's safe yet to bail out.
1299 error = -ENOLCK; /* "no luck" */
1300 if (right && left == right && !new_fl2)
1305 if (request->fl_type == F_UNLCK) {
1306 if (request->fl_flags & FL_EXISTS)
1315 locks_copy_lock(new_fl, request);
1316 locks_move_blocks(new_fl, request);
1317 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1322 if (left == right) {
1323 /* The new lock breaks the old one in two pieces,
1324 * so we have to use the second new lock.
1328 locks_copy_lock(left, right);
1329 locks_insert_lock_ctx(left, &fl->fl_list);
1331 right->fl_start = request->fl_end + 1;
1332 locks_wake_up_blocks(right);
1335 left->fl_end = request->fl_start - 1;
1336 locks_wake_up_blocks(left);
1339 spin_unlock(&ctx->flc_lock);
1340 percpu_up_read(&file_rwsem);
1342 * Free any unused locks.
1345 locks_free_lock(new_fl);
1347 locks_free_lock(new_fl2);
1348 locks_dispose_list(&dispose);
1349 trace_posix_lock_inode(inode, request, error);
1355 * posix_lock_file - Apply a POSIX-style lock to a file
1356 * @filp: The file to apply the lock to
1357 * @fl: The lock to be applied
1358 * @conflock: Place to return a copy of the conflicting lock, if found.
1360 * Add a POSIX style lock to a file.
1361 * We merge adjacent & overlapping locks whenever possible.
1362 * POSIX locks are sorted by owner task, then by starting address
1364 * Note that if called with an FL_EXISTS argument, the caller may determine
1365 * whether or not a lock was successfully freed by testing the return
1366 * value for -ENOENT.
1368 int posix_lock_file(struct file *filp, struct file_lock *fl,
1369 struct file_lock *conflock)
1371 return posix_lock_inode(locks_inode(filp), fl, conflock);
1373 EXPORT_SYMBOL(posix_lock_file);
1376 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1377 * @inode: inode of file to which lock request should be applied
1378 * @fl: The lock to be applied
1380 * Apply a POSIX style lock request to an inode.
1382 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1387 error = posix_lock_inode(inode, fl, NULL);
1388 if (error != FILE_LOCK_DEFERRED)
1390 error = wait_event_interruptible(fl->fl_wait,
1391 list_empty(&fl->fl_blocked_member));
1395 locks_delete_block(fl);
1399 #ifdef CONFIG_MANDATORY_FILE_LOCKING
1401 * locks_mandatory_locked - Check for an active lock
1402 * @file: the file to check
1404 * Searches the inode's list of locks to find any POSIX locks which conflict.
1405 * This function is called from locks_verify_locked() only.
1407 int locks_mandatory_locked(struct file *file)
1410 struct inode *inode = locks_inode(file);
1411 struct file_lock_context *ctx;
1412 struct file_lock *fl;
1414 ctx = smp_load_acquire(&inode->i_flctx);
1415 if (!ctx || list_empty_careful(&ctx->flc_posix))
1419 * Search the lock list for this inode for any POSIX locks.
1421 spin_lock(&ctx->flc_lock);
1423 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1424 if (fl->fl_owner != current->files &&
1425 fl->fl_owner != file) {
1430 spin_unlock(&ctx->flc_lock);
1435 * locks_mandatory_area - Check for a conflicting lock
1436 * @inode: the file to check
1437 * @filp: how the file was opened (if it was)
1438 * @start: first byte in the file to check
1439 * @end: lastbyte in the file to check
1440 * @type: %F_WRLCK for a write lock, else %F_RDLCK
1442 * Searches the inode's list of locks to find any POSIX locks which conflict.
1444 int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
1445 loff_t end, unsigned char type)
1447 struct file_lock fl;
1451 locks_init_lock(&fl);
1452 fl.fl_pid = current->tgid;
1454 fl.fl_flags = FL_POSIX | FL_ACCESS;
1455 if (filp && !(filp->f_flags & O_NONBLOCK))
1458 fl.fl_start = start;
1464 fl.fl_flags &= ~FL_SLEEP;
1465 error = posix_lock_inode(inode, &fl, NULL);
1471 fl.fl_flags |= FL_SLEEP;
1472 fl.fl_owner = current->files;
1473 error = posix_lock_inode(inode, &fl, NULL);
1474 if (error != FILE_LOCK_DEFERRED)
1476 error = wait_event_interruptible(fl.fl_wait,
1477 list_empty(&fl.fl_blocked_member));
1480 * If we've been sleeping someone might have
1481 * changed the permissions behind our back.
1483 if (__mandatory_lock(inode))
1489 locks_delete_block(&fl);
1493 EXPORT_SYMBOL(locks_mandatory_area);
1494 #endif /* CONFIG_MANDATORY_FILE_LOCKING */
1496 static void lease_clear_pending(struct file_lock *fl, int arg)
1500 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1503 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1507 /* We already had a lease on this file; just change its type */
1508 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1510 int error = assign_type(fl, arg);
1514 lease_clear_pending(fl, arg);
1515 locks_wake_up_blocks(fl);
1516 if (arg == F_UNLCK) {
1517 struct file *filp = fl->fl_file;
1520 filp->f_owner.signum = 0;
1521 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1522 if (fl->fl_fasync != NULL) {
1523 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1524 fl->fl_fasync = NULL;
1526 locks_delete_lock_ctx(fl, dispose);
1530 EXPORT_SYMBOL(lease_modify);
1532 static bool past_time(unsigned long then)
1535 /* 0 is a special value meaning "this never expires": */
1537 return time_after(jiffies, then);
1540 static void time_out_leases(struct inode *inode, struct list_head *dispose)
1542 struct file_lock_context *ctx = inode->i_flctx;
1543 struct file_lock *fl, *tmp;
1545 lockdep_assert_held(&ctx->flc_lock);
1547 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1548 trace_time_out_leases(inode, fl);
1549 if (past_time(fl->fl_downgrade_time))
1550 lease_modify(fl, F_RDLCK, dispose);
1551 if (past_time(fl->fl_break_time))
1552 lease_modify(fl, F_UNLCK, dispose);
1556 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1560 if (lease->fl_lmops->lm_breaker_owns_lease
1561 && lease->fl_lmops->lm_breaker_owns_lease(lease))
1563 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
1567 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
1572 rc = locks_conflict(breaker, lease);
1574 trace_leases_conflict(rc, lease, breaker);
1579 any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1581 struct file_lock_context *ctx = inode->i_flctx;
1582 struct file_lock *fl;
1584 lockdep_assert_held(&ctx->flc_lock);
1586 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1587 if (leases_conflict(fl, breaker))
1594 * __break_lease - revoke all outstanding leases on file
1595 * @inode: the inode of the file to return
1596 * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1598 * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1601 * break_lease (inlined for speed) has checked there already is at least
1602 * some kind of lock (maybe a lease) on this file. Leases are broken on
1603 * a call to open() or truncate(). This function can sleep unless you
1604 * specified %O_NONBLOCK to your open().
1606 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1609 struct file_lock_context *ctx;
1610 struct file_lock *new_fl, *fl, *tmp;
1611 unsigned long break_time;
1612 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1615 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1617 return PTR_ERR(new_fl);
1618 new_fl->fl_flags = type;
1620 /* typically we will check that ctx is non-NULL before calling */
1621 ctx = smp_load_acquire(&inode->i_flctx);
1627 percpu_down_read(&file_rwsem);
1628 spin_lock(&ctx->flc_lock);
1630 time_out_leases(inode, &dispose);
1632 if (!any_leases_conflict(inode, new_fl))
1636 if (lease_break_time > 0) {
1637 break_time = jiffies + lease_break_time * HZ;
1638 if (break_time == 0)
1639 break_time++; /* so that 0 means no break time */
1642 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1643 if (!leases_conflict(fl, new_fl))
1646 if (fl->fl_flags & FL_UNLOCK_PENDING)
1648 fl->fl_flags |= FL_UNLOCK_PENDING;
1649 fl->fl_break_time = break_time;
1651 if (lease_breaking(fl))
1653 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1654 fl->fl_downgrade_time = break_time;
1656 if (fl->fl_lmops->lm_break(fl))
1657 locks_delete_lock_ctx(fl, &dispose);
1660 if (list_empty(&ctx->flc_lease))
1663 if (mode & O_NONBLOCK) {
1664 trace_break_lease_noblock(inode, new_fl);
1665 error = -EWOULDBLOCK;
1670 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1671 break_time = fl->fl_break_time;
1672 if (break_time != 0)
1673 break_time -= jiffies;
1674 if (break_time == 0)
1676 locks_insert_block(fl, new_fl, leases_conflict);
1677 trace_break_lease_block(inode, new_fl);
1678 spin_unlock(&ctx->flc_lock);
1679 percpu_up_read(&file_rwsem);
1681 locks_dispose_list(&dispose);
1682 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1683 list_empty(&new_fl->fl_blocked_member),
1686 percpu_down_read(&file_rwsem);
1687 spin_lock(&ctx->flc_lock);
1688 trace_break_lease_unblock(inode, new_fl);
1689 locks_delete_block(new_fl);
1692 * Wait for the next conflicting lease that has not been
1696 time_out_leases(inode, &dispose);
1697 if (any_leases_conflict(inode, new_fl))
1702 spin_unlock(&ctx->flc_lock);
1703 percpu_up_read(&file_rwsem);
1704 locks_dispose_list(&dispose);
1706 locks_free_lock(new_fl);
1709 EXPORT_SYMBOL(__break_lease);
1712 * lease_get_mtime - update modified time of an inode with exclusive lease
1714 * @time: pointer to a timespec which contains the last modified time
1716 * This is to force NFS clients to flush their caches for files with
1717 * exclusive leases. The justification is that if someone has an
1718 * exclusive lease, then they could be modifying it.
1720 void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1722 bool has_lease = false;
1723 struct file_lock_context *ctx;
1724 struct file_lock *fl;
1726 ctx = smp_load_acquire(&inode->i_flctx);
1727 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1728 spin_lock(&ctx->flc_lock);
1729 fl = list_first_entry_or_null(&ctx->flc_lease,
1730 struct file_lock, fl_list);
1731 if (fl && (fl->fl_type == F_WRLCK))
1733 spin_unlock(&ctx->flc_lock);
1737 *time = current_time(inode);
1739 EXPORT_SYMBOL(lease_get_mtime);
1742 * fcntl_getlease - Enquire what lease is currently active
1745 * The value returned by this function will be one of
1746 * (if no lease break is pending):
1748 * %F_RDLCK to indicate a shared lease is held.
1750 * %F_WRLCK to indicate an exclusive lease is held.
1752 * %F_UNLCK to indicate no lease is held.
1754 * (if a lease break is pending):
1756 * %F_RDLCK to indicate an exclusive lease needs to be
1757 * changed to a shared lease (or removed).
1759 * %F_UNLCK to indicate the lease needs to be removed.
1761 * XXX: sfr & willy disagree over whether F_INPROGRESS
1762 * should be returned to userspace.
1764 int fcntl_getlease(struct file *filp)
1766 struct file_lock *fl;
1767 struct inode *inode = locks_inode(filp);
1768 struct file_lock_context *ctx;
1772 ctx = smp_load_acquire(&inode->i_flctx);
1773 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1774 percpu_down_read(&file_rwsem);
1775 spin_lock(&ctx->flc_lock);
1776 time_out_leases(inode, &dispose);
1777 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1778 if (fl->fl_file != filp)
1780 type = target_leasetype(fl);
1783 spin_unlock(&ctx->flc_lock);
1784 percpu_up_read(&file_rwsem);
1786 locks_dispose_list(&dispose);
1792 * check_conflicting_open - see if the given file points to an inode that has
1793 * an existing open that would conflict with the
1795 * @filp: file to check
1796 * @arg: type of lease that we're trying to acquire
1797 * @flags: current lock flags
1799 * Check to see if there's an existing open fd on this file that would
1800 * conflict with the lease we're trying to set.
1803 check_conflicting_open(struct file *filp, const long arg, int flags)
1805 struct inode *inode = locks_inode(filp);
1806 int self_wcount = 0, self_rcount = 0;
1808 if (flags & FL_LAYOUT)
1812 return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1813 else if (arg != F_WRLCK)
1817 * Make sure that only read/write count is from lease requestor.
1818 * Note that this will result in denying write leases when i_writecount
1819 * is negative, which is what we want. (We shouldn't grant write leases
1820 * on files open for execution.)
1822 if (filp->f_mode & FMODE_WRITE)
1824 else if (filp->f_mode & FMODE_READ)
1827 if (atomic_read(&inode->i_writecount) != self_wcount ||
1828 atomic_read(&inode->i_readcount) != self_rcount)
1835 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1837 struct file_lock *fl, *my_fl = NULL, *lease;
1838 struct inode *inode = locks_inode(filp);
1839 struct file_lock_context *ctx;
1840 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1845 trace_generic_add_lease(inode, lease);
1847 /* Note that arg is never F_UNLCK here */
1848 ctx = locks_get_lock_context(inode, arg);
1853 * In the delegation case we need mutual exclusion with
1854 * a number of operations that take the i_mutex. We trylock
1855 * because delegations are an optional optimization, and if
1856 * there's some chance of a conflict--we'd rather not
1857 * bother, maybe that's a sign this just isn't a good file to
1858 * hand out a delegation on.
1860 if (is_deleg && !inode_trylock(inode))
1863 if (is_deleg && arg == F_WRLCK) {
1864 /* Write delegations are not currently supported: */
1865 inode_unlock(inode);
1870 percpu_down_read(&file_rwsem);
1871 spin_lock(&ctx->flc_lock);
1872 time_out_leases(inode, &dispose);
1873 error = check_conflicting_open(filp, arg, lease->fl_flags);
1878 * At this point, we know that if there is an exclusive
1879 * lease on this file, then we hold it on this filp
1880 * (otherwise our open of this file would have blocked).
1881 * And if we are trying to acquire an exclusive lease,
1882 * then the file is not open by anyone (including us)
1883 * except for this filp.
1886 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1887 if (fl->fl_file == filp &&
1888 fl->fl_owner == lease->fl_owner) {
1894 * No exclusive leases if someone else has a lease on
1900 * Modifying our existing lease is OK, but no getting a
1901 * new lease if someone else is opening for write:
1903 if (fl->fl_flags & FL_UNLOCK_PENDING)
1907 if (my_fl != NULL) {
1909 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1919 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1921 * The check in break_lease() is lockless. It's possible for another
1922 * open to race in after we did the earlier check for a conflicting
1923 * open but before the lease was inserted. Check again for a
1924 * conflicting open and cancel the lease if there is one.
1926 * We also add a barrier here to ensure that the insertion of the lock
1927 * precedes these checks.
1930 error = check_conflicting_open(filp, arg, lease->fl_flags);
1932 locks_unlink_lock_ctx(lease);
1937 if (lease->fl_lmops->lm_setup)
1938 lease->fl_lmops->lm_setup(lease, priv);
1940 spin_unlock(&ctx->flc_lock);
1941 percpu_up_read(&file_rwsem);
1942 locks_dispose_list(&dispose);
1944 inode_unlock(inode);
1945 if (!error && !my_fl)
1950 static int generic_delete_lease(struct file *filp, void *owner)
1952 int error = -EAGAIN;
1953 struct file_lock *fl, *victim = NULL;
1954 struct inode *inode = locks_inode(filp);
1955 struct file_lock_context *ctx;
1958 ctx = smp_load_acquire(&inode->i_flctx);
1960 trace_generic_delete_lease(inode, NULL);
1964 percpu_down_read(&file_rwsem);
1965 spin_lock(&ctx->flc_lock);
1966 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1967 if (fl->fl_file == filp &&
1968 fl->fl_owner == owner) {
1973 trace_generic_delete_lease(inode, victim);
1975 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1976 spin_unlock(&ctx->flc_lock);
1977 percpu_up_read(&file_rwsem);
1978 locks_dispose_list(&dispose);
1983 * generic_setlease - sets a lease on an open file
1984 * @filp: file pointer
1985 * @arg: type of lease to obtain
1986 * @flp: input - file_lock to use, output - file_lock inserted
1987 * @priv: private data for lm_setup (may be NULL if lm_setup
1988 * doesn't require it)
1990 * The (input) flp->fl_lmops->lm_break function is required
1993 int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1996 struct inode *inode = locks_inode(filp);
1999 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
2001 if (!S_ISREG(inode->i_mode))
2003 error = security_file_lock(filp, arg);
2009 return generic_delete_lease(filp, *priv);
2012 if (!(*flp)->fl_lmops->lm_break) {
2017 return generic_add_lease(filp, arg, flp, priv);
2022 EXPORT_SYMBOL(generic_setlease);
2024 #if IS_ENABLED(CONFIG_SRCU)
2026 * Kernel subsystems can register to be notified on any attempt to set
2027 * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
2028 * to close files that it may have cached when there is an attempt to set a
2029 * conflicting lease.
2031 static struct srcu_notifier_head lease_notifier_chain;
2034 lease_notifier_chain_init(void)
2036 srcu_init_notifier_head(&lease_notifier_chain);
2040 setlease_notifier(long arg, struct file_lock *lease)
2043 srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
2046 int lease_register_notifier(struct notifier_block *nb)
2048 return srcu_notifier_chain_register(&lease_notifier_chain, nb);
2050 EXPORT_SYMBOL_GPL(lease_register_notifier);
2052 void lease_unregister_notifier(struct notifier_block *nb)
2054 srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
2056 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
2058 #else /* !IS_ENABLED(CONFIG_SRCU) */
2060 lease_notifier_chain_init(void)
2065 setlease_notifier(long arg, struct file_lock *lease)
2069 int lease_register_notifier(struct notifier_block *nb)
2073 EXPORT_SYMBOL_GPL(lease_register_notifier);
2075 void lease_unregister_notifier(struct notifier_block *nb)
2078 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
2080 #endif /* IS_ENABLED(CONFIG_SRCU) */
2083 * vfs_setlease - sets a lease on an open file
2084 * @filp: file pointer
2085 * @arg: type of lease to obtain
2086 * @lease: file_lock to use when adding a lease
2087 * @priv: private info for lm_setup when adding a lease (may be
2088 * NULL if lm_setup doesn't require it)
2090 * Call this to establish a lease on the file. The "lease" argument is not
2091 * used for F_UNLCK requests and may be NULL. For commands that set or alter
2092 * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
2093 * set; if not, this function will return -ENOLCK (and generate a scary-looking
2096 * The "priv" pointer is passed directly to the lm_setup function as-is. It
2097 * may be NULL if the lm_setup operation doesn't require it.
2100 vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
2103 setlease_notifier(arg, *lease);
2104 if (filp->f_op->setlease)
2105 return filp->f_op->setlease(filp, arg, lease, priv);
2107 return generic_setlease(filp, arg, lease, priv);
2109 EXPORT_SYMBOL_GPL(vfs_setlease);
2111 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
2113 struct file_lock *fl;
2114 struct fasync_struct *new;
2117 fl = lease_alloc(filp, arg);
2121 new = fasync_alloc();
2123 locks_free_lock(fl);
2128 error = vfs_setlease(filp, arg, &fl, (void **)&new);
2130 locks_free_lock(fl);
2137 * fcntl_setlease - sets a lease on an open file
2138 * @fd: open file descriptor
2139 * @filp: file pointer
2140 * @arg: type of lease to obtain
2142 * Call this fcntl to establish a lease on the file.
2143 * Note that you also need to call %F_SETSIG to
2144 * receive a signal when the lease is broken.
2146 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
2149 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
2150 return do_fcntl_add_lease(fd, filp, arg);
2154 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
2155 * @inode: inode of the file to apply to
2156 * @fl: The lock to be applied
2158 * Apply a FLOCK style lock request to an inode.
2160 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2165 error = flock_lock_inode(inode, fl);
2166 if (error != FILE_LOCK_DEFERRED)
2168 error = wait_event_interruptible(fl->fl_wait,
2169 list_empty(&fl->fl_blocked_member));
2173 locks_delete_block(fl);
2178 * locks_lock_inode_wait - Apply a lock to an inode
2179 * @inode: inode of the file to apply to
2180 * @fl: The lock to be applied
2182 * Apply a POSIX or FLOCK style lock request to an inode.
2184 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2187 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2189 res = posix_lock_inode_wait(inode, fl);
2192 res = flock_lock_inode_wait(inode, fl);
2199 EXPORT_SYMBOL(locks_lock_inode_wait);
2202 * sys_flock: - flock() system call.
2203 * @fd: the file descriptor to lock.
2204 * @cmd: the type of lock to apply.
2206 * Apply a %FL_FLOCK style lock to an open file descriptor.
2207 * The @cmd can be one of:
2209 * - %LOCK_SH -- a shared lock.
2210 * - %LOCK_EX -- an exclusive lock.
2211 * - %LOCK_UN -- remove an existing lock.
2212 * - %LOCK_MAND -- a 'mandatory' flock.
2213 * This exists to emulate Windows Share Modes.
2215 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
2216 * processes read and write access respectively.
2218 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2220 struct fd f = fdget(fd);
2221 struct file_lock *lock;
2222 int can_sleep, unlock;
2229 can_sleep = !(cmd & LOCK_NB);
2231 unlock = (cmd == LOCK_UN);
2233 if (!unlock && !(cmd & LOCK_MAND) &&
2234 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
2237 lock = flock_make_lock(f.file, cmd, NULL);
2239 error = PTR_ERR(lock);
2244 lock->fl_flags |= FL_SLEEP;
2246 error = security_file_lock(f.file, lock->fl_type);
2250 if (f.file->f_op->flock)
2251 error = f.file->f_op->flock(f.file,
2252 (can_sleep) ? F_SETLKW : F_SETLK,
2255 error = locks_lock_file_wait(f.file, lock);
2258 locks_free_lock(lock);
2267 * vfs_test_lock - test file byte range lock
2268 * @filp: The file to test lock for
2269 * @fl: The lock to test; also used to hold result
2271 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
2272 * setting conf->fl_type to something other than F_UNLCK.
2274 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2276 if (filp->f_op->lock)
2277 return filp->f_op->lock(filp, F_GETLK, fl);
2278 posix_test_lock(filp, fl);
2281 EXPORT_SYMBOL_GPL(vfs_test_lock);
2284 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2285 * @fl: The file_lock who's fl_pid should be translated
2286 * @ns: The namespace into which the pid should be translated
2288 * Used to tranlate a fl_pid into a namespace virtual pid number
2290 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2297 if (IS_REMOTELCK(fl))
2300 * If the flock owner process is dead and its pid has been already
2301 * freed, the translation below won't work, but we still want to show
2302 * flock owner pid number in init pidns.
2304 if (ns == &init_pid_ns)
2305 return (pid_t)fl->fl_pid;
2308 pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2309 vnr = pid_nr_ns(pid, ns);
2314 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2316 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2317 #if BITS_PER_LONG == 32
2319 * Make sure we can represent the posix lock via
2320 * legacy 32bit flock.
2322 if (fl->fl_start > OFFT_OFFSET_MAX)
2324 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2327 flock->l_start = fl->fl_start;
2328 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2329 fl->fl_end - fl->fl_start + 1;
2330 flock->l_whence = 0;
2331 flock->l_type = fl->fl_type;
2335 #if BITS_PER_LONG == 32
2336 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2338 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2339 flock->l_start = fl->fl_start;
2340 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2341 fl->fl_end - fl->fl_start + 1;
2342 flock->l_whence = 0;
2343 flock->l_type = fl->fl_type;
2347 /* Report the first existing lock that would conflict with l.
2348 * This implements the F_GETLK command of fcntl().
2350 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2352 struct file_lock *fl;
2355 fl = locks_alloc_lock();
2359 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2362 error = flock_to_posix_lock(filp, fl, flock);
2366 if (cmd == F_OFD_GETLK) {
2368 if (flock->l_pid != 0)
2372 fl->fl_flags |= FL_OFDLCK;
2373 fl->fl_owner = filp;
2376 error = vfs_test_lock(filp, fl);
2380 flock->l_type = fl->fl_type;
2381 if (fl->fl_type != F_UNLCK) {
2382 error = posix_lock_to_flock(flock, fl);
2387 locks_free_lock(fl);
2392 * vfs_lock_file - file byte range lock
2393 * @filp: The file to apply the lock to
2394 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2395 * @fl: The lock to be applied
2396 * @conf: Place to return a copy of the conflicting lock, if found.
2398 * A caller that doesn't care about the conflicting lock may pass NULL
2399 * as the final argument.
2401 * If the filesystem defines a private ->lock() method, then @conf will
2402 * be left unchanged; so a caller that cares should initialize it to
2403 * some acceptable default.
2405 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2406 * locks, the ->lock() interface may return asynchronously, before the lock has
2407 * been granted or denied by the underlying filesystem, if (and only if)
2408 * lm_grant is set. Callers expecting ->lock() to return asynchronously
2409 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2410 * the request is for a blocking lock. When ->lock() does return asynchronously,
2411 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2412 * request completes.
2413 * If the request is for non-blocking lock the file system should return
2414 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2415 * with the result. If the request timed out the callback routine will return a
2416 * nonzero return code and the file system should release the lock. The file
2417 * system is also responsible to keep a corresponding posix lock when it
2418 * grants a lock so the VFS can find out which locks are locally held and do
2419 * the correct lock cleanup when required.
2420 * The underlying filesystem must not drop the kernel lock or call
2421 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2424 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2426 if (filp->f_op->lock)
2427 return filp->f_op->lock(filp, cmd, fl);
2429 return posix_lock_file(filp, fl, conf);
2431 EXPORT_SYMBOL_GPL(vfs_lock_file);
2433 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2434 struct file_lock *fl)
2438 error = security_file_lock(filp, fl->fl_type);
2443 error = vfs_lock_file(filp, cmd, fl, NULL);
2444 if (error != FILE_LOCK_DEFERRED)
2446 error = wait_event_interruptible(fl->fl_wait,
2447 list_empty(&fl->fl_blocked_member));
2451 locks_delete_block(fl);
2456 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2458 check_fmode_for_setlk(struct file_lock *fl)
2460 switch (fl->fl_type) {
2462 if (!(fl->fl_file->f_mode & FMODE_READ))
2466 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2472 /* Apply the lock described by l to an open file descriptor.
2473 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2475 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2476 struct flock *flock)
2478 struct file_lock *file_lock = locks_alloc_lock();
2479 struct inode *inode = locks_inode(filp);
2483 if (file_lock == NULL)
2486 /* Don't allow mandatory locks on files that may be memory mapped
2489 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2494 error = flock_to_posix_lock(filp, file_lock, flock);
2498 error = check_fmode_for_setlk(file_lock);
2503 * If the cmd is requesting file-private locks, then set the
2504 * FL_OFDLCK flag and override the owner.
2509 if (flock->l_pid != 0)
2513 file_lock->fl_flags |= FL_OFDLCK;
2514 file_lock->fl_owner = filp;
2518 if (flock->l_pid != 0)
2522 file_lock->fl_flags |= FL_OFDLCK;
2523 file_lock->fl_owner = filp;
2526 file_lock->fl_flags |= FL_SLEEP;
2529 error = do_lock_file_wait(filp, cmd, file_lock);
2532 * Attempt to detect a close/fcntl race and recover by releasing the
2533 * lock that was just acquired. There is no need to do that when we're
2534 * unlocking though, or for OFD locks.
2536 if (!error && file_lock->fl_type != F_UNLCK &&
2537 !(file_lock->fl_flags & FL_OFDLCK)) {
2539 * We need that spin_lock here - it prevents reordering between
2540 * update of i_flctx->flc_posix and check for it done in
2541 * close(). rcu_read_lock() wouldn't do.
2543 spin_lock(¤t->files->file_lock);
2545 spin_unlock(¤t->files->file_lock);
2547 file_lock->fl_type = F_UNLCK;
2548 error = do_lock_file_wait(filp, cmd, file_lock);
2549 WARN_ON_ONCE(error);
2554 trace_fcntl_setlk(inode, file_lock, error);
2555 locks_free_lock(file_lock);
2559 #if BITS_PER_LONG == 32
2560 /* Report the first existing lock that would conflict with l.
2561 * This implements the F_GETLK command of fcntl().
2563 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2565 struct file_lock *fl;
2568 fl = locks_alloc_lock();
2573 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2576 error = flock64_to_posix_lock(filp, fl, flock);
2580 if (cmd == F_OFD_GETLK) {
2582 if (flock->l_pid != 0)
2586 fl->fl_flags |= FL_OFDLCK;
2587 fl->fl_owner = filp;
2590 error = vfs_test_lock(filp, fl);
2594 flock->l_type = fl->fl_type;
2595 if (fl->fl_type != F_UNLCK)
2596 posix_lock_to_flock64(flock, fl);
2599 locks_free_lock(fl);
2603 /* Apply the lock described by l to an open file descriptor.
2604 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2606 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2607 struct flock64 *flock)
2609 struct file_lock *file_lock = locks_alloc_lock();
2610 struct inode *inode = locks_inode(filp);
2614 if (file_lock == NULL)
2617 /* Don't allow mandatory locks on files that may be memory mapped
2620 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2625 error = flock64_to_posix_lock(filp, file_lock, flock);
2629 error = check_fmode_for_setlk(file_lock);
2634 * If the cmd is requesting file-private locks, then set the
2635 * FL_OFDLCK flag and override the owner.
2640 if (flock->l_pid != 0)
2644 file_lock->fl_flags |= FL_OFDLCK;
2645 file_lock->fl_owner = filp;
2649 if (flock->l_pid != 0)
2653 file_lock->fl_flags |= FL_OFDLCK;
2654 file_lock->fl_owner = filp;
2657 file_lock->fl_flags |= FL_SLEEP;
2660 error = do_lock_file_wait(filp, cmd, file_lock);
2663 * Attempt to detect a close/fcntl race and recover by releasing the
2664 * lock that was just acquired. There is no need to do that when we're
2665 * unlocking though, or for OFD locks.
2667 if (!error && file_lock->fl_type != F_UNLCK &&
2668 !(file_lock->fl_flags & FL_OFDLCK)) {
2670 * We need that spin_lock here - it prevents reordering between
2671 * update of i_flctx->flc_posix and check for it done in
2672 * close(). rcu_read_lock() wouldn't do.
2674 spin_lock(¤t->files->file_lock);
2676 spin_unlock(¤t->files->file_lock);
2678 file_lock->fl_type = F_UNLCK;
2679 error = do_lock_file_wait(filp, cmd, file_lock);
2680 WARN_ON_ONCE(error);
2685 locks_free_lock(file_lock);
2688 #endif /* BITS_PER_LONG == 32 */
2691 * This function is called when the file is being removed
2692 * from the task's fd array. POSIX locks belonging to this task
2693 * are deleted at this time.
2695 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2698 struct inode *inode = locks_inode(filp);
2699 struct file_lock lock;
2700 struct file_lock_context *ctx;
2703 * If there are no locks held on this file, we don't need to call
2704 * posix_lock_file(). Another process could be setting a lock on this
2705 * file at the same time, but we wouldn't remove that lock anyway.
2707 ctx = smp_load_acquire(&inode->i_flctx);
2708 if (!ctx || list_empty(&ctx->flc_posix))
2711 locks_init_lock(&lock);
2712 lock.fl_type = F_UNLCK;
2713 lock.fl_flags = FL_POSIX | FL_CLOSE;
2715 lock.fl_end = OFFSET_MAX;
2716 lock.fl_owner = owner;
2717 lock.fl_pid = current->tgid;
2718 lock.fl_file = filp;
2720 lock.fl_lmops = NULL;
2722 error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2724 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2725 lock.fl_ops->fl_release_private(&lock);
2726 trace_locks_remove_posix(inode, &lock, error);
2728 EXPORT_SYMBOL(locks_remove_posix);
2730 /* The i_flctx must be valid when calling into here */
2732 locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2734 struct file_lock fl;
2735 struct inode *inode = locks_inode(filp);
2737 if (list_empty(&flctx->flc_flock))
2740 flock_make_lock(filp, LOCK_UN, &fl);
2741 fl.fl_flags |= FL_CLOSE;
2743 if (filp->f_op->flock)
2744 filp->f_op->flock(filp, F_SETLKW, &fl);
2746 flock_lock_inode(inode, &fl);
2748 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2749 fl.fl_ops->fl_release_private(&fl);
2752 /* The i_flctx must be valid when calling into here */
2754 locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2756 struct file_lock *fl, *tmp;
2759 if (list_empty(&ctx->flc_lease))
2762 percpu_down_read(&file_rwsem);
2763 spin_lock(&ctx->flc_lock);
2764 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2765 if (filp == fl->fl_file)
2766 lease_modify(fl, F_UNLCK, &dispose);
2767 spin_unlock(&ctx->flc_lock);
2768 percpu_up_read(&file_rwsem);
2770 locks_dispose_list(&dispose);
2774 * This function is called on the last close of an open file.
2776 void locks_remove_file(struct file *filp)
2778 struct file_lock_context *ctx;
2780 ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2784 /* remove any OFD locks */
2785 locks_remove_posix(filp, filp);
2787 /* remove flock locks */
2788 locks_remove_flock(filp, ctx);
2790 /* remove any leases */
2791 locks_remove_lease(filp, ctx);
2793 spin_lock(&ctx->flc_lock);
2794 locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2795 locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2796 locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2797 spin_unlock(&ctx->flc_lock);
2801 * vfs_cancel_lock - file byte range unblock lock
2802 * @filp: The file to apply the unblock to
2803 * @fl: The lock to be unblocked
2805 * Used by lock managers to cancel blocked requests
2807 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2809 if (filp->f_op->lock)
2810 return filp->f_op->lock(filp, F_CANCELLK, fl);
2813 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2815 #ifdef CONFIG_PROC_FS
2816 #include <linux/proc_fs.h>
2817 #include <linux/seq_file.h>
2819 struct locks_iterator {
2824 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2825 loff_t id, char *pfx)
2827 struct inode *inode = NULL;
2828 unsigned int fl_pid;
2829 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2831 fl_pid = locks_translate_pid(fl, proc_pidns);
2833 * If lock owner is dead (and pid is freed) or not visible in current
2834 * pidns, zero is shown as a pid value. Check lock info from
2835 * init_pid_ns to get saved lock pid value.
2838 if (fl->fl_file != NULL)
2839 inode = locks_inode(fl->fl_file);
2841 seq_printf(f, "%lld:%s ", id, pfx);
2843 if (fl->fl_flags & FL_ACCESS)
2844 seq_puts(f, "ACCESS");
2845 else if (IS_OFDLCK(fl))
2846 seq_puts(f, "OFDLCK");
2848 seq_puts(f, "POSIX ");
2850 seq_printf(f, " %s ",
2851 (inode == NULL) ? "*NOINODE*" :
2852 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2853 } else if (IS_FLOCK(fl)) {
2854 if (fl->fl_type & LOCK_MAND) {
2855 seq_puts(f, "FLOCK MSNFS ");
2857 seq_puts(f, "FLOCK ADVISORY ");
2859 } else if (IS_LEASE(fl)) {
2860 if (fl->fl_flags & FL_DELEG)
2861 seq_puts(f, "DELEG ");
2863 seq_puts(f, "LEASE ");
2865 if (lease_breaking(fl))
2866 seq_puts(f, "BREAKING ");
2867 else if (fl->fl_file)
2868 seq_puts(f, "ACTIVE ");
2870 seq_puts(f, "BREAKER ");
2872 seq_puts(f, "UNKNOWN UNKNOWN ");
2874 if (fl->fl_type & LOCK_MAND) {
2875 seq_printf(f, "%s ",
2876 (fl->fl_type & LOCK_READ)
2877 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2878 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2880 int type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
2882 seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
2883 (type == F_RDLCK) ? "READ" : "UNLCK");
2886 /* userspace relies on this representation of dev_t */
2887 seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
2888 MAJOR(inode->i_sb->s_dev),
2889 MINOR(inode->i_sb->s_dev), inode->i_ino);
2891 seq_printf(f, "%d <none>:0 ", fl_pid);
2894 if (fl->fl_end == OFFSET_MAX)
2895 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2897 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2899 seq_puts(f, "0 EOF\n");
2903 static int locks_show(struct seq_file *f, void *v)
2905 struct locks_iterator *iter = f->private;
2906 struct file_lock *fl, *bfl;
2907 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2909 fl = hlist_entry(v, struct file_lock, fl_link);
2911 if (locks_translate_pid(fl, proc_pidns) == 0)
2914 lock_get_status(f, fl, iter->li_pos, "");
2916 list_for_each_entry(bfl, &fl->fl_blocked_requests, fl_blocked_member)
2917 lock_get_status(f, bfl, iter->li_pos, " ->");
2922 static void __show_fd_locks(struct seq_file *f,
2923 struct list_head *head, int *id,
2924 struct file *filp, struct files_struct *files)
2926 struct file_lock *fl;
2928 list_for_each_entry(fl, head, fl_list) {
2930 if (filp != fl->fl_file)
2932 if (fl->fl_owner != files &&
2933 fl->fl_owner != filp)
2937 seq_puts(f, "lock:\t");
2938 lock_get_status(f, fl, *id, "");
2942 void show_fd_locks(struct seq_file *f,
2943 struct file *filp, struct files_struct *files)
2945 struct inode *inode = locks_inode(filp);
2946 struct file_lock_context *ctx;
2949 ctx = smp_load_acquire(&inode->i_flctx);
2953 spin_lock(&ctx->flc_lock);
2954 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2955 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2956 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2957 spin_unlock(&ctx->flc_lock);
2960 static void *locks_start(struct seq_file *f, loff_t *pos)
2961 __acquires(&blocked_lock_lock)
2963 struct locks_iterator *iter = f->private;
2965 iter->li_pos = *pos + 1;
2966 percpu_down_write(&file_rwsem);
2967 spin_lock(&blocked_lock_lock);
2968 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2971 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2973 struct locks_iterator *iter = f->private;
2976 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2979 static void locks_stop(struct seq_file *f, void *v)
2980 __releases(&blocked_lock_lock)
2982 spin_unlock(&blocked_lock_lock);
2983 percpu_up_write(&file_rwsem);
2986 static const struct seq_operations locks_seq_operations = {
2987 .start = locks_start,
2993 static int __init proc_locks_init(void)
2995 proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2996 sizeof(struct locks_iterator), NULL);
2999 fs_initcall(proc_locks_init);
3002 static int __init filelock_init(void)
3006 flctx_cache = kmem_cache_create("file_lock_ctx",
3007 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
3009 filelock_cache = kmem_cache_create("file_lock_cache",
3010 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
3012 for_each_possible_cpu(i) {
3013 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
3015 spin_lock_init(&fll->lock);
3016 INIT_HLIST_HEAD(&fll->hlist);
3019 lease_notifier_chain_init();
3022 core_initcall(filelock_init);