5 The text below describes the locking rules for VFS-related methods.
6 It is (believed to be) up-to-date. *Please*, if you change anything in
7 prototypes or locking protocols - update this file. And update the relevant
8 instances in the tree, don't leave that to maintainers of filesystems/devices/
9 etc. At the very least, put the list of dubious cases in the end of this file.
10 Don't turn it into log - maintainers of out-of-the-tree code are supposed to
11 be able to use diff(1).
13 Thing currently missing here: socket operations. Alexey?
20 int (*d_revalidate)(struct dentry *, unsigned int);
21 int (*d_weak_revalidate)(struct dentry *, unsigned int);
22 int (*d_hash)(const struct dentry *, struct qstr *);
23 int (*d_compare)(const struct dentry *,
24 unsigned int, const char *, const struct qstr *);
25 int (*d_delete)(struct dentry *);
26 int (*d_init)(struct dentry *);
27 void (*d_release)(struct dentry *);
28 void (*d_iput)(struct dentry *, struct inode *);
29 char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
30 struct vfsmount *(*d_automount)(struct path *path);
31 int (*d_manage)(const struct path *, bool);
32 struct dentry *(*d_real)(struct dentry *, const struct inode *);
36 ================== =========== ======== ============== ========
37 ops rename_lock ->d_lock may block rcu-walk
38 ================== =========== ======== ============== ========
39 d_revalidate: no no yes (ref-walk) maybe
40 d_weak_revalidate: no no yes no
42 d_compare: yes no no maybe
43 d_delete: no yes no no
45 d_release: no no yes no
49 d_automount: no no yes no
50 d_manage: no no yes (ref-walk) maybe
52 ================== =========== ======== ============== ========
59 int (*create) (struct inode *,struct dentry *,umode_t, bool);
60 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
61 int (*link) (struct dentry *,struct inode *,struct dentry *);
62 int (*unlink) (struct inode *,struct dentry *);
63 int (*symlink) (struct inode *,struct dentry *,const char *);
64 int (*mkdir) (struct inode *,struct dentry *,umode_t);
65 int (*rmdir) (struct inode *,struct dentry *);
66 int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
67 int (*rename) (struct inode *, struct dentry *,
68 struct inode *, struct dentry *, unsigned int);
69 int (*readlink) (struct dentry *, char __user *,int);
70 const char *(*get_link) (struct dentry *, struct inode *, struct delayed_call *);
71 void (*truncate) (struct inode *);
72 int (*permission) (struct inode *, int, unsigned int);
73 struct posix_acl * (*get_acl)(struct inode *, int, bool);
74 int (*setattr) (struct dentry *, struct iattr *);
75 int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
76 ssize_t (*listxattr) (struct dentry *, char *, size_t);
77 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
78 void (*update_time)(struct inode *, struct timespec *, int);
79 int (*atomic_open)(struct inode *, struct dentry *,
80 struct file *, unsigned open_flag,
82 int (*tmpfile) (struct inode *, struct dentry *, umode_t);
83 int (*fileattr_set)(struct user_namespace *mnt_userns,
84 struct dentry *dentry, struct fileattr *fa);
85 int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa);
90 ============= =============================================
92 ============= =============================================
95 link: exclusive (both)
99 unlink: exclusive (both)
100 rmdir: exclusive (both)(see below)
101 rename: exclusive (all) (see below)
105 permission: no (may not block if called in rcu-walk mode)
111 atomic_open: shared (exclusive if O_CREAT is set in open flags)
113 fileattr_get: no or exclusive
114 fileattr_set: exclusive
115 ============= =============================================
118 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_rwsem
120 cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
122 See Documentation/filesystems/directory-locking.rst for more detailed discussion
123 of the locking scheme for directory operations.
125 xattr_handler operations
126 ========================
130 bool (*list)(struct dentry *dentry);
131 int (*get)(const struct xattr_handler *handler, struct dentry *dentry,
132 struct inode *inode, const char *name, void *buffer,
134 int (*set)(const struct xattr_handler *handler,
135 struct user_namespace *mnt_userns,
136 struct dentry *dentry, struct inode *inode, const char *name,
137 const void *buffer, size_t size, int flags);
155 struct inode *(*alloc_inode)(struct super_block *sb);
156 void (*free_inode)(struct inode *);
157 void (*destroy_inode)(struct inode *);
158 void (*dirty_inode) (struct inode *, int flags);
159 int (*write_inode) (struct inode *, struct writeback_control *wbc);
160 int (*drop_inode) (struct inode *);
161 void (*evict_inode) (struct inode *);
162 void (*put_super) (struct super_block *);
163 int (*sync_fs)(struct super_block *sb, int wait);
164 int (*freeze_fs) (struct super_block *);
165 int (*unfreeze_fs) (struct super_block *);
166 int (*statfs) (struct dentry *, struct kstatfs *);
167 int (*remount_fs) (struct super_block *, int *, char *);
168 void (*umount_begin) (struct super_block *);
169 int (*show_options)(struct seq_file *, struct dentry *);
170 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
171 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
172 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
175 All may block [not true, see below]
177 ====================== ============ ========================
179 ====================== ============ ========================
181 free_inode: called from RCU callback
185 drop_inode: !!!inode->i_lock!!!
191 statfs: maybe(read) (see below)
194 show_options: no (namespace_sem)
195 quota_read: no (see below)
196 quota_write: no (see below)
197 bdev_try_to_free_page: no (see below)
198 ====================== ============ ========================
200 ->statfs() has s_umount (shared) when called by ustat(2) (native or
201 compat), but that's an accident of bad API; s_umount is used to pin
202 the superblock down when we only have dev_t given us by userland to
203 identify the superblock. Everything else (statfs(), fstatfs(), etc.)
204 doesn't hold it when calling ->statfs() - superblock is pinned down
205 by resolving the pathname passed to syscall.
207 ->quota_read() and ->quota_write() functions are both guaranteed to
208 be the only ones operating on the quota file by the quota code (via
209 dqio_sem) (unless an admin really wants to screw up something and
210 writes to quota files with quotas on). For other details about locking
211 see also dquot_operations section.
213 ->bdev_try_to_free_page is called from the ->releasepage handler of
214 the block device inode. See there for more details.
221 struct dentry *(*mount) (struct file_system_type *, int,
222 const char *, void *);
223 void (*kill_sb) (struct super_block *);
234 ->mount() returns ERR_PTR or the root dentry; its superblock should be locked
237 ->kill_sb() takes a write-locked superblock, does all shutdown work on it,
238 unlocks and drops the reference.
240 address_space_operations
241 ========================
244 int (*writepage)(struct page *page, struct writeback_control *wbc);
245 int (*readpage)(struct file *, struct page *);
246 int (*writepages)(struct address_space *, struct writeback_control *);
247 int (*set_page_dirty)(struct page *page);
248 void (*readahead)(struct readahead_control *);
249 int (*readpages)(struct file *filp, struct address_space *mapping,
250 struct list_head *pages, unsigned nr_pages);
251 int (*write_begin)(struct file *, struct address_space *mapping,
252 loff_t pos, unsigned len, unsigned flags,
253 struct page **pagep, void **fsdata);
254 int (*write_end)(struct file *, struct address_space *mapping,
255 loff_t pos, unsigned len, unsigned copied,
256 struct page *page, void *fsdata);
257 sector_t (*bmap)(struct address_space *, sector_t);
258 void (*invalidatepage) (struct page *, unsigned int, unsigned int);
259 int (*releasepage) (struct page *, int);
260 void (*freepage)(struct page *);
261 int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
262 bool (*isolate_page) (struct page *, isolate_mode_t);
263 int (*migratepage)(struct address_space *, struct page *, struct page *);
264 void (*putback_page) (struct page *);
265 int (*launder_page)(struct page *);
266 int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
267 int (*error_remove_page)(struct address_space *, struct page *);
268 int (*swap_activate)(struct file *);
269 int (*swap_deactivate)(struct file *);
272 All except set_page_dirty and freepage may block
274 ====================== ======================== ========= ===============
275 ops PageLocked(page) i_rwsem invalidate_lock
276 ====================== ======================== ========= ===============
277 writepage: yes, unlocks (see below)
278 readpage: yes, unlocks shared
281 readahead: yes, unlocks shared
283 write_begin: locks the page exclusive
284 write_end: yes, unlocks exclusive
286 invalidatepage: yes exclusive
291 migratepage: yes (both)
294 is_partially_uptodate: yes
295 error_remove_page: yes
298 ====================== ======================== ========= ===============
300 ->write_begin(), ->write_end() and ->readpage() may be called from
301 the request handler (/dev/loop).
303 ->readpage() unlocks the page, either synchronously or via I/O
306 ->readahead() unlocks the pages that I/O is attempted on like ->readpage().
308 ->readpages() populates the pagecache with the passed pages and starts
309 I/O against them. They come unlocked upon I/O completion.
311 ->writepage() is used for two purposes: for "memory cleansing" and for
312 "sync". These are quite different operations and the behaviour may differ
313 depending upon the mode.
315 If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then
316 it *must* start I/O against the page, even if that would involve
317 blocking on in-progress I/O.
319 If writepage is called for memory cleansing (sync_mode ==
320 WBC_SYNC_NONE) then its role is to get as much writeout underway as
321 possible. So writepage should try to avoid blocking against
322 currently-in-progress I/O.
324 If the filesystem is not called for "sync" and it determines that it
325 would need to block against in-progress I/O to be able to start new I/O
326 against the page the filesystem should redirty the page with
327 redirty_page_for_writepage(), then unlock the page and return zero.
328 This may also be done to avoid internal deadlocks, but rarely.
330 If the filesystem is called for sync then it must wait on any
331 in-progress I/O and then start new I/O.
333 The filesystem should unlock the page synchronously, before returning to the
334 caller, unless ->writepage() returns special WRITEPAGE_ACTIVATE
335 value. WRITEPAGE_ACTIVATE means that page cannot really be written out
336 currently, and VM should stop calling ->writepage() on this page for some
337 time. VM does this by moving page to the head of the active list, hence the
340 Unless the filesystem is going to redirty_page_for_writepage(), unlock the page
341 and return zero, writepage *must* run set_page_writeback() against the page,
342 followed by unlocking it. Once set_page_writeback() has been run against the
343 page, write I/O can be submitted and the write I/O completion handler must run
344 end_page_writeback() once the I/O is complete. If no I/O is submitted, the
345 filesystem must run end_page_writeback() against the page before returning from
348 That is: after 2.5.12, pages which are under writeout are *not* locked. Note,
349 if the filesystem needs the page to be locked during writeout, that is ok, too,
350 the page is allowed to be unlocked at any point in time between the calls to
351 set_page_writeback() and end_page_writeback().
353 Note, failure to run either redirty_page_for_writepage() or the combination of
354 set_page_writeback()/end_page_writeback() on a page submitted to writepage
355 will leave the page itself marked clean but it will be tagged as dirty in the
356 radix tree. This incoherency can lead to all sorts of hard-to-debug problems
357 in the filesystem like having dirty inodes at umount and losing written data.
359 ->writepages() is used for periodic writeback and for syscall-initiated
360 sync operations. The address_space should start I/O against at least
361 ``*nr_to_write`` pages. ``*nr_to_write`` must be decremented for each page
362 which is written. The address_space implementation may write more (or less)
363 pages than ``*nr_to_write`` asks for, but it should try to be reasonably close.
364 If nr_to_write is NULL, all dirty pages must be written.
366 writepages should _only_ write pages which are present on
369 ->set_page_dirty() is called from various places in the kernel
370 when the target page is marked as needing writeback. It may be called
371 under spinlock (it cannot block) and is sometimes called with the page
374 ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some
375 filesystems and by the swapper. The latter will eventually go away. Please,
376 keep it that way and don't breed new callers.
378 ->invalidatepage() is called when the filesystem must attempt to drop
379 some or all of the buffers from the page when it is being truncated. It
380 returns zero on success. If ->invalidatepage is zero, the kernel uses
381 block_invalidatepage() instead. The filesystem must exclusively acquire
382 invalidate_lock before invalidating page cache in truncate / hole punch path
383 (and thus calling into ->invalidatepage) to block races between page cache
384 invalidation and page cache filling functions (fault, read, ...).
386 ->releasepage() is called when the kernel is about to try to drop the
387 buffers from the page in preparation for freeing it. It returns zero to
388 indicate that the buffers are (or may be) freeable. If ->releasepage is zero,
389 the kernel assumes that the fs has no private interest in the buffers.
391 ->freepage() is called when the kernel is done dropping the page
394 ->launder_page() may be called prior to releasing a page if
395 it is still found to be dirty. It returns zero if the page was successfully
396 cleaned, or an error value if not. Note that in order to prevent the page
397 getting mapped back in and redirtied, it needs to be kept locked
398 across the entire operation.
400 ->swap_activate will be called with a non-zero argument on
401 files backing (non block device backed) swapfiles. A return value
402 of zero indicates success, in which case this file can be used for
403 backing swapspace. The swapspace operations will be proxied to the
404 address space operations.
406 ->swap_deactivate() will be called in the sys_swapoff()
407 path after ->swap_activate() returned success.
414 void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
415 void (*fl_release_private)(struct file_lock *);
420 =================== ============= =========
421 ops inode->i_lock may block
422 =================== ============= =========
424 fl_release_private: maybe maybe[1]_
425 =================== ============= =========
428 ->fl_release_private for flock or POSIX locks is currently allowed
429 to block. Leases however can still be freed while the i_lock is held and
430 so fl_release_private called on a lease should not block.
432 lock_manager_operations
433 =======================
437 void (*lm_notify)(struct file_lock *); /* unblock callback */
438 int (*lm_grant)(struct file_lock *, struct file_lock *, int);
439 void (*lm_break)(struct file_lock *); /* break_lease callback */
440 int (*lm_change)(struct file_lock **, int);
441 bool (*lm_breaker_owns_lease)(struct file_lock *);
445 ====================== ============= ================= =========
446 ops inode->i_lock blocked_lock_lock may block
447 ====================== ============= ================= =========
448 lm_notify: yes yes no
452 lm_breaker_owns_lease: no no no
453 ====================== ============= ================= =========
460 void (*b_end_io)(struct buffer_head *bh, int uptodate);
464 called from interrupts. In other words, extreme care is needed here.
465 bh is locked, but that's all warranties we have here. Currently only RAID1,
466 highmem, fs/buffer.c, and fs/ntfs/aops.c are providing these. Block devices
467 call this method upon the IO completion.
469 block_device_operations
470 =======================
473 int (*open) (struct block_device *, fmode_t);
474 int (*release) (struct gendisk *, fmode_t);
475 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
476 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
477 int (*direct_access) (struct block_device *, sector_t, void **,
479 void (*unlock_native_capacity) (struct gendisk *);
480 int (*getgeo)(struct block_device *, struct hd_geometry *);
481 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
485 ======================= ===================
487 ======================= ===================
493 unlock_native_capacity: no
495 swap_slot_free_notify: no (see below)
496 ======================= ===================
498 swap_slot_free_notify is called with swap_lock and sometimes the page lock
507 loff_t (*llseek) (struct file *, loff_t, int);
508 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
509 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
510 ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
511 ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
512 int (*iopoll) (struct kiocb *kiocb, bool spin);
513 int (*iterate) (struct file *, struct dir_context *);
514 int (*iterate_shared) (struct file *, struct dir_context *);
515 __poll_t (*poll) (struct file *, struct poll_table_struct *);
516 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
517 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
518 int (*mmap) (struct file *, struct vm_area_struct *);
519 int (*open) (struct inode *, struct file *);
520 int (*flush) (struct file *);
521 int (*release) (struct inode *, struct file *);
522 int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
523 int (*fasync) (int, struct file *, int);
524 int (*lock) (struct file *, int, struct file_lock *);
525 ssize_t (*sendpage) (struct file *, struct page *, int, size_t,
527 unsigned long (*get_unmapped_area)(struct file *, unsigned long,
528 unsigned long, unsigned long, unsigned long);
529 int (*check_flags)(int);
530 int (*flock) (struct file *, int, struct file_lock *);
531 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *,
532 size_t, unsigned int);
533 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
534 size_t, unsigned int);
535 int (*setlease)(struct file *, long, struct file_lock **, void **);
536 long (*fallocate)(struct file *, int, loff_t, loff_t);
537 void (*show_fdinfo)(struct seq_file *m, struct file *f);
538 unsigned (*mmap_capabilities)(struct file *);
539 ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
540 loff_t, size_t, unsigned int);
541 loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
542 struct file *file_out, loff_t pos_out,
543 loff_t len, unsigned int remap_flags);
544 int (*fadvise)(struct file *, loff_t, loff_t, int);
549 ->llseek() locking has moved from llseek to the individual llseek
550 implementations. If your fs is not using generic_file_llseek, you
551 need to acquire and release the appropriate locks in your ->llseek().
552 For many filesystems, it is probably safe to acquire the inode
553 mutex or just to use i_size_read() instead.
554 Note: this does not protect the file->f_pos against concurrent modifications
555 since this is something the userspace has to take care about.
557 ->iterate() is called with i_rwsem exclusive.
559 ->iterate_shared() is called with i_rwsem at least shared.
561 ->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags.
562 Most instances call fasync_helper(), which does that maintenance, so it's
563 not normally something one needs to worry about. Return values > 0 will be
564 mapped to zero in the VFS layer.
566 ->readdir() and ->ioctl() on directories must be changed. Ideally we would
567 move ->readdir() to inode_operations and use a separate method for directory
568 ->ioctl() or kill the latter completely. One of the problems is that for
569 anything that resembles union-mount we won't have a struct file for all
570 components. And there are other reasons why the current interface is a mess...
572 ->read on directories probably must go away - we should just enforce -EISDIR
573 in sys_read() and friends.
575 ->setlease operations should call generic_setlease() before or after setting
576 the lease within the individual filesystem to record the result of the
579 ->fallocate implementation must be really careful to maintain page cache
580 consistency when punching holes or performing other operations that invalidate
581 page cache contents. Usually the filesystem needs to call
582 truncate_inode_pages_range() to invalidate relevant range of the page cache.
583 However the filesystem usually also needs to update its internal (and on disk)
584 view of file offset -> disk block mapping. Until this update is finished, the
585 filesystem needs to block page faults and reads from reloading now-stale page
586 cache contents from the disk. Since VFS acquires mapping->invalidate_lock in
587 shared mode when loading pages from disk (filemap_fault(), filemap_read(),
588 readahead paths), the fallocate implementation must take the invalidate_lock to
591 ->copy_file_range and ->remap_file_range implementations need to serialize
592 against modifications of file data while the operation is running. For
593 blocking changes through write(2) and similar operations inode->i_rwsem can be
594 used. To block changes to file contents via a memory mapping during the
595 operation, the filesystem must take mapping->invalidate_lock to coordinate
603 int (*write_dquot) (struct dquot *);
604 int (*acquire_dquot) (struct dquot *);
605 int (*release_dquot) (struct dquot *);
606 int (*mark_dirty) (struct dquot *);
607 int (*write_info) (struct super_block *, int);
609 These operations are intended to be more or less wrapping functions that ensure
610 a proper locking wrt the filesystem and call the generic quota operations.
612 What filesystem should expect from the generic quota functions:
614 ============== ============ =========================
615 ops FS recursion Held locks when called
616 ============== ============ =========================
617 write_dquot: yes dqonoff_sem or dqptr_sem
618 acquire_dquot: yes dqonoff_sem or dqptr_sem
619 release_dquot: yes dqonoff_sem or dqptr_sem
621 write_info: yes dqonoff_sem
622 ============== ============ =========================
624 FS recursion means calling ->quota_read() and ->quota_write() from superblock
627 More details about quota locking can be found in fs/dquot.c.
634 void (*open)(struct vm_area_struct*);
635 void (*close)(struct vm_area_struct*);
636 vm_fault_t (*fault)(struct vm_area_struct*, struct vm_fault *);
637 vm_fault_t (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
638 vm_fault_t (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);
639 int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
643 ============= ========= ===========================
644 ops mmap_lock PageLocked(page)
645 ============= ========= ===========================
648 fault: yes can return with page locked
650 page_mkwrite: yes can return with page locked
653 ============= ========= ===========================
655 ->fault() is called when a previously not present pte is about to be faulted
656 in. The filesystem must find and return the page associated with the passed in
657 "pgoff" in the vm_fault structure. If it is possible that the page may be
658 truncated and/or invalidated, then the filesystem must lock invalidate_lock,
659 then ensure the page is not already truncated (invalidate_lock will block
660 subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
661 locked. The VM will unlock the page.
663 ->map_pages() is called when VM asks to map easy accessible pages.
664 Filesystem should find and map pages associated with offsets from "start_pgoff"
665 till "end_pgoff". ->map_pages() is called with page table locked and must
666 not block. If it's not possible to reach a page without blocking,
667 filesystem should skip it. Filesystem should use do_set_pte() to setup
668 page table entry. Pointer to entry associated with the page is passed in
669 "pte" field in vm_fault structure. Pointers to entries for other offsets
670 should be calculated relative to "pte".
672 ->page_mkwrite() is called when a previously read-only pte is about to become
673 writeable. The filesystem again must ensure that there are no
674 truncate/invalidate races or races with operations such as ->remap_file_range
675 or ->copy_file_range, and then return with the page locked. Usually
676 mapping->invalidate_lock is suitable for proper serialization. If the page has
677 been truncated, the filesystem should not look up a new page like the ->fault()
678 handler, but simply return with VM_FAULT_NOPAGE, which will cause the VM to
681 ->pfn_mkwrite() is the same as page_mkwrite but when the pte is
682 VM_PFNMAP or VM_MIXEDMAP with a page-less entry. Expected return is
683 VM_FAULT_NOPAGE. Or one of the VM_FAULT_ERROR types. The default behavior
684 after this call is to make the pte read-write, unless pfn_mkwrite returns
687 ->access() is called when get_user_pages() fails in
688 access_process_vm(), typically used to debug a process through
689 /proc/pid/mem or ptrace. This function is needed only for
690 VM_IO | VM_PFNMAP VMAs.
692 --------------------------------------------------------------------------------
696 (if you break something or notice that it is broken and do not fix it yourself
697 - at least put it here)