1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #include <linux/slab.h>
8 #include <linux/spinlock.h>
9 #include <linux/compat.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/uio.h>
14 #include <linux/blkdev.h>
16 #include <linux/mount.h>
18 #include <linux/filelock.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/falloc.h>
21 #include <linux/swap.h>
22 #include <linux/crc32.h>
23 #include <linux/writeback.h>
24 #include <linux/uaccess.h>
25 #include <linux/dlm.h>
26 #include <linux/dlm_plock.h>
27 #include <linux/delay.h>
28 #include <linux/backing-dev.h>
29 #include <linux/fileattr.h>
47 * gfs2_llseek - seek to a location in a file
50 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
52 * SEEK_END requires the glock for the file because it references the
55 * Returns: The new offset, or errno
58 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
60 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
61 struct gfs2_holder i_gh;
66 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
69 error = generic_file_llseek(file, offset, whence);
70 gfs2_glock_dq_uninit(&i_gh);
75 error = gfs2_seek_data(file, offset);
79 error = gfs2_seek_hole(file, offset);
85 * These don't reference inode->i_size and don't depend on the
86 * block mapping, so we don't need the glock.
88 error = generic_file_llseek(file, offset, whence);
98 * gfs2_readdir - Iterator for a directory
99 * @file: The directory to read from
100 * @ctx: What to feed directory entries to
105 static int gfs2_readdir(struct file *file, struct dir_context *ctx)
107 struct inode *dir = file->f_mapping->host;
108 struct gfs2_inode *dip = GFS2_I(dir);
109 struct gfs2_holder d_gh;
112 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
116 error = gfs2_dir_read(dir, ctx, &file->f_ra);
118 gfs2_glock_dq_uninit(&d_gh);
124 * struct fsflag_gfs2flag
126 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
127 * and to GFS2_DIF_JDATA for non-directories.
132 } fsflag_gfs2flag[] = {
133 {FS_SYNC_FL, GFS2_DIF_SYNC},
134 {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
135 {FS_APPEND_FL, GFS2_DIF_APPENDONLY},
136 {FS_NOATIME_FL, GFS2_DIF_NOATIME},
137 {FS_INDEX_FL, GFS2_DIF_EXHASH},
138 {FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
139 {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
142 static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags)
147 if (S_ISDIR(inode->i_mode))
148 gfsflags &= ~GFS2_DIF_JDATA;
150 gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
152 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
153 if (gfsflags & fsflag_gfs2flag[i].gfsflag)
154 fsflags |= fsflag_gfs2flag[i].fsflag;
158 int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
160 struct inode *inode = d_inode(dentry);
161 struct gfs2_inode *ip = GFS2_I(inode);
162 struct gfs2_holder gh;
166 if (d_is_special(dentry))
169 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
170 error = gfs2_glock_nq(&gh);
174 fsflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
176 fileattr_fill_flags(fa, fsflags);
180 gfs2_holder_uninit(&gh);
184 void gfs2_set_inode_flags(struct inode *inode)
186 struct gfs2_inode *ip = GFS2_I(inode);
187 unsigned int flags = inode->i_flags;
189 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
190 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
192 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
193 flags |= S_IMMUTABLE;
194 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
196 if (ip->i_diskflags & GFS2_DIF_NOATIME)
198 if (ip->i_diskflags & GFS2_DIF_SYNC)
200 inode->i_flags = flags;
203 /* Flags that can be set by user space */
204 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
205 GFS2_DIF_IMMUTABLE| \
206 GFS2_DIF_APPENDONLY| \
210 GFS2_DIF_INHERIT_JDATA)
213 * do_gfs2_set_flags - set flags on an inode
215 * @reqflags: The flags to set
216 * @mask: Indicates which flags are valid
219 static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
221 struct gfs2_inode *ip = GFS2_I(inode);
222 struct gfs2_sbd *sdp = GFS2_SB(inode);
223 struct buffer_head *bh;
224 struct gfs2_holder gh;
226 u32 new_flags, flags;
228 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
233 flags = ip->i_diskflags;
234 new_flags = (flags & ~mask) | (reqflags & mask);
235 if ((new_flags ^ flags) == 0)
238 if (!IS_IMMUTABLE(inode)) {
239 error = gfs2_permission(&nop_mnt_idmap, inode, MAY_WRITE);
243 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
244 if (new_flags & GFS2_DIF_JDATA)
245 gfs2_log_flush(sdp, ip->i_gl,
246 GFS2_LOG_HEAD_FLUSH_NORMAL |
248 error = filemap_fdatawrite(inode->i_mapping);
251 error = filemap_fdatawait(inode->i_mapping);
254 if (new_flags & GFS2_DIF_JDATA)
255 gfs2_ordered_del_inode(ip);
257 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
260 error = gfs2_meta_inode_buffer(ip, &bh);
263 inode->i_ctime = current_time(inode);
264 gfs2_trans_add_meta(ip->i_gl, bh);
265 ip->i_diskflags = new_flags;
266 gfs2_dinode_out(ip, bh->b_data);
268 gfs2_set_inode_flags(inode);
269 gfs2_set_aops(inode);
273 gfs2_glock_dq_uninit(&gh);
277 int gfs2_fileattr_set(struct mnt_idmap *idmap,
278 struct dentry *dentry, struct fileattr *fa)
280 struct inode *inode = d_inode(dentry);
281 u32 fsflags = fa->flags, gfsflags = 0;
285 if (d_is_special(dentry))
288 if (fileattr_has_fsx(fa))
291 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
292 if (fsflags & fsflag_gfs2flag[i].fsflag) {
293 fsflags &= ~fsflag_gfs2flag[i].fsflag;
294 gfsflags |= fsflag_gfs2flag[i].gfsflag;
297 if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
300 mask = GFS2_FLAGS_USER_SET;
301 if (S_ISDIR(inode->i_mode)) {
302 mask &= ~GFS2_DIF_JDATA;
304 /* The GFS2_DIF_TOPDIR flag is only valid for directories. */
305 if (gfsflags & GFS2_DIF_TOPDIR)
307 mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
310 return do_gfs2_set_flags(inode, gfsflags, mask);
313 static int gfs2_getlabel(struct file *filp, char __user *label)
315 struct inode *inode = file_inode(filp);
316 struct gfs2_sbd *sdp = GFS2_SB(inode);
318 if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
324 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
328 return gfs2_fitrim(filp, (void __user *)arg);
329 case FS_IOC_GETFSLABEL:
330 return gfs2_getlabel(filp, (char __user *)arg);
337 static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
340 /* Keep this list in sync with gfs2_ioctl */
342 case FS_IOC_GETFSLABEL:
348 return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
351 #define gfs2_compat_ioctl NULL
355 * gfs2_size_hint - Give a hint to the size of a write request
356 * @filep: The struct file
357 * @offset: The file offset of the write
358 * @size: The length of the write
360 * When we are about to do a write, this function records the total
361 * write size in order to provide a suitable hint to the lower layers
362 * about how many blocks will be required.
366 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
368 struct inode *inode = file_inode(filep);
369 struct gfs2_sbd *sdp = GFS2_SB(inode);
370 struct gfs2_inode *ip = GFS2_I(inode);
371 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
372 int hint = min_t(size_t, INT_MAX, blks);
374 if (hint > atomic_read(&ip->i_sizehint))
375 atomic_set(&ip->i_sizehint, hint);
379 * gfs2_allocate_page_backing - Allocate blocks for a write fault
380 * @page: The (locked) page to allocate backing for
381 * @length: Size of the allocation
383 * We try to allocate all the blocks required for the page in one go. This
384 * might fail for various reasons, so we keep trying until all the blocks to
385 * back this page are allocated. If some of the blocks are already allocated,
388 static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
390 u64 pos = page_offset(page);
393 struct iomap iomap = { };
395 if (gfs2_iomap_alloc(page->mapping->host, pos, length, &iomap))
398 if (length < iomap.length)
399 iomap.length = length;
400 length -= iomap.length;
402 } while (length > 0);
408 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
409 * @vmf: The virtual memory fault containing the page to become writable
411 * When the page becomes writable, we need to ensure that we have
412 * blocks allocated on disk to back that page.
415 static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
417 struct page *page = vmf->page;
418 struct inode *inode = file_inode(vmf->vma->vm_file);
419 struct gfs2_inode *ip = GFS2_I(inode);
420 struct gfs2_sbd *sdp = GFS2_SB(inode);
421 struct gfs2_alloc_parms ap = { .aflags = 0, };
422 u64 offset = page_offset(page);
423 unsigned int data_blocks, ind_blocks, rblocks;
424 vm_fault_t ret = VM_FAULT_LOCKED;
425 struct gfs2_holder gh;
430 sb_start_pagefault(inode->i_sb);
432 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
433 err = gfs2_glock_nq(&gh);
435 ret = block_page_mkwrite_return(err);
439 /* Check page index against inode size */
440 size = i_size_read(inode);
441 if (offset >= size) {
442 ret = VM_FAULT_SIGBUS;
446 /* Update file times before taking page lock */
447 file_update_time(vmf->vma->vm_file);
449 /* page is wholly or partially inside EOF */
450 if (size - offset < PAGE_SIZE)
451 length = size - offset;
455 gfs2_size_hint(vmf->vma->vm_file, offset, length);
457 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
458 set_bit(GIF_SW_PAGED, &ip->i_flags);
461 * iomap_writepage / iomap_writepages currently don't support inline
462 * files, so always unstuff here.
465 if (!gfs2_is_stuffed(ip) &&
466 !gfs2_write_alloc_required(ip, offset, length)) {
468 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
469 ret = VM_FAULT_NOPAGE;
475 err = gfs2_rindex_update(sdp);
477 ret = block_page_mkwrite_return(err);
481 gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks);
482 ap.target = data_blocks + ind_blocks;
483 err = gfs2_quota_lock_check(ip, &ap);
485 ret = block_page_mkwrite_return(err);
488 err = gfs2_inplace_reserve(ip, &ap);
490 ret = block_page_mkwrite_return(err);
491 goto out_quota_unlock;
494 rblocks = RES_DINODE + ind_blocks;
495 if (gfs2_is_jdata(ip))
496 rblocks += data_blocks ? data_blocks : 1;
497 if (ind_blocks || data_blocks) {
498 rblocks += RES_STATFS + RES_QUOTA;
499 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
501 err = gfs2_trans_begin(sdp, rblocks, 0);
503 ret = block_page_mkwrite_return(err);
507 /* Unstuff, if required, and allocate backing blocks for page */
508 if (gfs2_is_stuffed(ip)) {
509 err = gfs2_unstuff_dinode(ip);
511 ret = block_page_mkwrite_return(err);
517 /* If truncated, we must retry the operation, we may have raced
518 * with the glock demotion code.
520 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
521 ret = VM_FAULT_NOPAGE;
522 goto out_page_locked;
525 err = gfs2_allocate_page_backing(page, length);
527 ret = block_page_mkwrite_return(err);
530 if (ret != VM_FAULT_LOCKED)
535 gfs2_inplace_release(ip);
537 gfs2_quota_unlock(ip);
541 gfs2_holder_uninit(&gh);
542 if (ret == VM_FAULT_LOCKED) {
543 set_page_dirty(page);
544 wait_for_stable_page(page);
546 sb_end_pagefault(inode->i_sb);
550 static vm_fault_t gfs2_fault(struct vm_fault *vmf)
552 struct inode *inode = file_inode(vmf->vma->vm_file);
553 struct gfs2_inode *ip = GFS2_I(inode);
554 struct gfs2_holder gh;
558 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
559 err = gfs2_glock_nq(&gh);
561 ret = block_page_mkwrite_return(err);
564 ret = filemap_fault(vmf);
567 gfs2_holder_uninit(&gh);
571 static const struct vm_operations_struct gfs2_vm_ops = {
573 .map_pages = filemap_map_pages,
574 .page_mkwrite = gfs2_page_mkwrite,
579 * @file: The file to map
580 * @vma: The VMA which described the mapping
582 * There is no need to get a lock here unless we should be updating
583 * atime. We ignore any locking errors since the only consequence is
584 * a missed atime update (which will just be deferred until later).
589 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
591 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
593 if (!(file->f_flags & O_NOATIME) &&
594 !IS_NOATIME(&ip->i_inode)) {
595 struct gfs2_holder i_gh;
598 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
602 /* grab lock to update inode */
603 gfs2_glock_dq_uninit(&i_gh);
606 vma->vm_ops = &gfs2_vm_ops;
612 * gfs2_open_common - This is common to open and atomic_open
613 * @inode: The inode being opened
614 * @file: The file being opened
616 * This maybe called under a glock or not depending upon how it has
617 * been called. We must always be called under a glock for regular
618 * files, however. For other file types, it does not matter whether
619 * we hold the glock or not.
621 * Returns: Error code or 0 for success
624 int gfs2_open_common(struct inode *inode, struct file *file)
626 struct gfs2_file *fp;
629 if (S_ISREG(inode->i_mode)) {
630 ret = generic_file_open(inode, file);
635 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
639 mutex_init(&fp->f_fl_mutex);
641 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
642 file->private_data = fp;
643 if (file->f_mode & FMODE_WRITE) {
644 ret = gfs2_qa_get(GFS2_I(inode));
651 kfree(file->private_data);
652 file->private_data = NULL;
657 * gfs2_open - open a file
658 * @inode: the inode to open
659 * @file: the struct file for this opening
661 * After atomic_open, this function is only used for opening files
662 * which are already cached. We must still get the glock for regular
663 * files to ensure that we have the file size uptodate for the large
664 * file check which is in the common code. That is only an issue for
665 * regular files though.
670 static int gfs2_open(struct inode *inode, struct file *file)
672 struct gfs2_inode *ip = GFS2_I(inode);
673 struct gfs2_holder i_gh;
675 bool need_unlock = false;
677 if (S_ISREG(ip->i_inode.i_mode)) {
678 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
685 error = gfs2_open_common(inode, file);
688 gfs2_glock_dq_uninit(&i_gh);
694 * gfs2_release - called to close a struct file
695 * @inode: the inode the struct file belongs to
696 * @file: the struct file being closed
701 static int gfs2_release(struct inode *inode, struct file *file)
703 struct gfs2_inode *ip = GFS2_I(inode);
705 kfree(file->private_data);
706 file->private_data = NULL;
708 if (file->f_mode & FMODE_WRITE) {
709 if (gfs2_rs_active(&ip->i_res))
717 * gfs2_fsync - sync the dirty data for a file (across the cluster)
718 * @file: the file that points to the dentry
719 * @start: the start position in the file to sync
720 * @end: the end position in the file to sync
721 * @datasync: set if we can ignore timestamp changes
723 * We split the data flushing here so that we don't wait for the data
724 * until after we've also sent the metadata to disk. Note that for
725 * data=ordered, we will write & wait for the data at the log flush
726 * stage anyway, so this is unlikely to make much of a difference
727 * except in the data=writeback case.
729 * If the fdatawrite fails due to any reason except -EIO, we will
730 * continue the remainder of the fsync, although we'll still report
731 * the error at the end. This is to match filemap_write_and_wait_range()
737 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
740 struct address_space *mapping = file->f_mapping;
741 struct inode *inode = mapping->host;
742 int sync_state = inode->i_state & I_DIRTY;
743 struct gfs2_inode *ip = GFS2_I(inode);
744 int ret = 0, ret1 = 0;
746 if (mapping->nrpages) {
747 ret1 = filemap_fdatawrite_range(mapping, start, end);
752 if (!gfs2_is_jdata(ip))
753 sync_state &= ~I_DIRTY_PAGES;
755 sync_state &= ~I_DIRTY_SYNC;
758 ret = sync_inode_metadata(inode, 1);
761 if (gfs2_is_jdata(ip))
762 ret = file_write_and_wait(file);
765 gfs2_ail_flush(ip->i_gl, 1);
768 if (mapping->nrpages)
769 ret = file_fdatawait_range(file, start, end);
771 return ret ? ret : ret1;
774 static inline bool should_fault_in_pages(struct iov_iter *i,
779 size_t count = iov_iter_count(i);
784 if (!user_backed_iter(i))
788 * Try to fault in multiple pages initially. When that doesn't result
789 * in any progress, fall back to a single page.
792 offs = offset_in_page(iocb->ki_pos);
793 if (*prev_count != count) {
796 nr_dirtied = max(current->nr_dirtied_pause -
797 current->nr_dirtied, 8);
798 size = min_t(size_t, SZ_1M, nr_dirtied << PAGE_SHIFT);
802 *window_size = size - offs;
806 static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
807 struct gfs2_holder *gh)
809 struct file *file = iocb->ki_filp;
810 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
811 size_t prev_count = 0, window_size = 0;
816 * In this function, we disable page faults when we're holding the
817 * inode glock while doing I/O. If a page fault occurs, we indicate
818 * that the inode glock may be dropped, fault in the pages manually,
821 * Unlike generic_file_read_iter, for reads, iomap_dio_rw can trigger
822 * physical as well as manual page faults, and we need to disable both
825 * For direct I/O, gfs2 takes the inode glock in deferred mode. This
826 * locking mode is compatible with other deferred holders, so multiple
827 * processes and nodes can do direct I/O to a file at the same time.
828 * There's no guarantee that reads or writes will be atomic. Any
829 * coordination among readers and writers needs to happen externally.
832 if (!iov_iter_count(to))
833 return 0; /* skip atime */
835 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
837 ret = gfs2_glock_nq(gh);
842 ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
843 IOMAP_DIO_PARTIAL, NULL, read);
846 if (ret <= 0 && ret != -EFAULT)
848 /* No increment (+=) because iomap_dio_rw returns a cumulative value. */
852 if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
854 window_size -= fault_in_iov_iter_writeable(to, window_size);
859 if (gfs2_holder_queued(gh))
862 gfs2_holder_uninit(gh);
863 /* User space doesn't expect partial success. */
869 static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
870 struct gfs2_holder *gh)
872 struct file *file = iocb->ki_filp;
873 struct inode *inode = file->f_mapping->host;
874 struct gfs2_inode *ip = GFS2_I(inode);
875 size_t prev_count = 0, window_size = 0;
881 * In this function, we disable page faults when we're holding the
882 * inode glock while doing I/O. If a page fault occurs, we indicate
883 * that the inode glock may be dropped, fault in the pages manually,
886 * For writes, iomap_dio_rw only triggers manual page faults, so we
887 * don't need to disable physical ones.
891 * Deferred lock, even if its a write, since we do no allocation on
892 * this path. All we need to change is the atime, and this lock mode
893 * ensures that other nodes have flushed their buffered read caches
894 * (i.e. their page cache entries for this inode). We do not,
895 * unfortunately, have the option of only flushing a range like the
898 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
900 ret = gfs2_glock_nq(gh);
903 /* Silently fall back to buffered I/O when writing beyond EOF */
904 if (iocb->ki_pos + iov_iter_count(from) > i_size_read(&ip->i_inode))
907 from->nofault = true;
908 ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
909 IOMAP_DIO_PARTIAL, NULL, written);
910 from->nofault = false;
917 /* No increment (+=) because iomap_dio_rw returns a cumulative value. */
921 enough_retries = prev_count == iov_iter_count(from) &&
922 window_size <= PAGE_SIZE;
923 if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
925 window_size -= fault_in_iov_iter_readable(from, window_size);
929 /* fall back to buffered I/O */
934 if (gfs2_holder_queued(gh))
937 gfs2_holder_uninit(gh);
938 /* User space doesn't expect partial success. */
944 static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
946 struct gfs2_inode *ip;
947 struct gfs2_holder gh;
948 size_t prev_count = 0, window_size = 0;
953 * In this function, we disable page faults when we're holding the
954 * inode glock while doing I/O. If a page fault occurs, we indicate
955 * that the inode glock may be dropped, fault in the pages manually,
959 if (iocb->ki_flags & IOCB_DIRECT)
960 return gfs2_file_direct_read(iocb, to, &gh);
963 iocb->ki_flags |= IOCB_NOIO;
964 ret = generic_file_read_iter(iocb, to);
965 iocb->ki_flags &= ~IOCB_NOIO;
968 if (!iov_iter_count(to))
971 } else if (ret != -EFAULT) {
974 if (iocb->ki_flags & IOCB_NOWAIT)
977 ip = GFS2_I(iocb->ki_filp->f_mapping->host);
978 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
980 ret = gfs2_glock_nq(&gh);
984 ret = generic_file_read_iter(iocb, to);
986 if (ret <= 0 && ret != -EFAULT)
991 if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
993 window_size -= fault_in_iov_iter_writeable(to, window_size);
998 if (gfs2_holder_queued(&gh))
1001 gfs2_holder_uninit(&gh);
1002 return read ? read : ret;
1005 static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
1006 struct iov_iter *from,
1007 struct gfs2_holder *gh)
1009 struct file *file = iocb->ki_filp;
1010 struct inode *inode = file_inode(file);
1011 struct gfs2_inode *ip = GFS2_I(inode);
1012 struct gfs2_sbd *sdp = GFS2_SB(inode);
1013 struct gfs2_holder *statfs_gh = NULL;
1014 size_t prev_count = 0, window_size = 0;
1015 size_t orig_count = iov_iter_count(from);
1020 * In this function, we disable page faults when we're holding the
1021 * inode glock while doing I/O. If a page fault occurs, we indicate
1022 * that the inode glock may be dropped, fault in the pages manually,
1026 if (inode == sdp->sd_rindex) {
1027 statfs_gh = kmalloc(sizeof(*statfs_gh), GFP_NOFS);
1032 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
1034 if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
1035 window_size -= fault_in_iov_iter_readable(from, window_size);
1040 from->count = min(from->count, window_size);
1042 ret = gfs2_glock_nq(gh);
1046 if (inode == sdp->sd_rindex) {
1047 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1049 ret = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
1050 GL_NOCACHE, statfs_gh);
1055 pagefault_disable();
1056 ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
1061 if (inode == sdp->sd_rindex)
1062 gfs2_glock_dq_uninit(statfs_gh);
1064 if (ret <= 0 && ret != -EFAULT)
1067 from->count = orig_count - written;
1068 if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
1073 if (gfs2_holder_queued(gh))
1076 gfs2_holder_uninit(gh);
1078 from->count = orig_count - written;
1079 return written ? written : ret;
1083 * gfs2_file_write_iter - Perform a write to a file
1084 * @iocb: The io context
1085 * @from: The data to write
1087 * We have to do a lock/unlock here to refresh the inode size for
1088 * O_APPEND writes, otherwise we can land up writing at the wrong
1089 * offset. There is still a race, but provided the app is using its
1090 * own file locking, this will make O_APPEND work as expected.
1094 static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1096 struct file *file = iocb->ki_filp;
1097 struct inode *inode = file_inode(file);
1098 struct gfs2_inode *ip = GFS2_I(inode);
1099 struct gfs2_holder gh;
1102 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
1104 if (iocb->ki_flags & IOCB_APPEND) {
1105 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
1108 gfs2_glock_dq_uninit(&gh);
1112 ret = generic_write_checks(iocb, from);
1116 ret = file_remove_privs(file);
1120 ret = file_update_time(file);
1124 if (iocb->ki_flags & IOCB_DIRECT) {
1125 struct address_space *mapping = file->f_mapping;
1126 ssize_t buffered, ret2;
1128 ret = gfs2_file_direct_write(iocb, from, &gh);
1129 if (ret < 0 || !iov_iter_count(from))
1132 iocb->ki_flags |= IOCB_DSYNC;
1133 buffered = gfs2_file_buffered_write(iocb, from, &gh);
1134 if (unlikely(buffered <= 0)) {
1141 * We need to ensure that the page cache pages are written to
1142 * disk and invalidated to preserve the expected O_DIRECT
1143 * semantics. If the writeback or invalidate fails, only report
1144 * the direct I/O range as we don't know if the buffered pages
1147 ret2 = generic_write_sync(iocb, buffered);
1148 invalidate_mapping_pages(mapping,
1149 (iocb->ki_pos - buffered) >> PAGE_SHIFT,
1150 (iocb->ki_pos - 1) >> PAGE_SHIFT);
1151 if (!ret || ret2 > 0)
1154 ret = gfs2_file_buffered_write(iocb, from, &gh);
1155 if (likely(ret > 0))
1156 ret = generic_write_sync(iocb, ret);
1160 inode_unlock(inode);
1164 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
1167 struct super_block *sb = inode->i_sb;
1168 struct gfs2_inode *ip = GFS2_I(inode);
1169 loff_t end = offset + len;
1170 struct buffer_head *dibh;
1173 error = gfs2_meta_inode_buffer(ip, &dibh);
1174 if (unlikely(error))
1177 gfs2_trans_add_meta(ip->i_gl, dibh);
1179 if (gfs2_is_stuffed(ip)) {
1180 error = gfs2_unstuff_dinode(ip);
1181 if (unlikely(error))
1185 while (offset < end) {
1186 struct iomap iomap = { };
1188 error = gfs2_iomap_alloc(inode, offset, end - offset, &iomap);
1191 offset = iomap.offset + iomap.length;
1192 if (!(iomap.flags & IOMAP_F_NEW))
1194 error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
1195 iomap.length >> inode->i_blkbits,
1198 fs_err(GFS2_SB(inode), "Failed to zero data buffers\n");
1208 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
1209 * blocks, determine how many bytes can be written.
1210 * @ip: The inode in question.
1211 * @len: Max cap of bytes. What we return in *len must be <= this.
1212 * @data_blocks: Compute and return the number of data blocks needed
1213 * @ind_blocks: Compute and return the number of indirect blocks needed
1214 * @max_blocks: The total blocks available to work with.
1216 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
1218 static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
1219 unsigned int *data_blocks, unsigned int *ind_blocks,
1220 unsigned int max_blocks)
1223 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1224 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
1226 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
1227 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
1231 *data_blocks = max_data;
1232 *ind_blocks = max_blocks - max_data;
1233 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
1236 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
1240 static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1242 struct inode *inode = file_inode(file);
1243 struct gfs2_sbd *sdp = GFS2_SB(inode);
1244 struct gfs2_inode *ip = GFS2_I(inode);
1245 struct gfs2_alloc_parms ap = { .aflags = 0, };
1246 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
1247 loff_t bytes, max_bytes, max_blks;
1249 const loff_t pos = offset;
1250 const loff_t count = len;
1251 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
1252 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
1253 loff_t max_chunk_size = UINT_MAX & bsize_mask;
1255 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
1257 offset &= bsize_mask;
1259 len = next - offset;
1260 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
1263 bytes &= bsize_mask;
1265 bytes = sdp->sd_sb.sb_bsize;
1267 gfs2_size_hint(file, offset, len);
1269 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
1270 ap.min_target = data_blocks + ind_blocks;
1275 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
1281 /* We need to determine how many bytes we can actually
1282 * fallocate without exceeding quota or going over the
1283 * end of the fs. We start off optimistically by assuming
1284 * we can write max_bytes */
1285 max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
1287 /* Since max_bytes is most likely a theoretical max, we
1288 * calculate a more realistic 'bytes' to serve as a good
1289 * starting point for the number of bytes we may be able
1291 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
1292 ap.target = data_blocks + ind_blocks;
1294 error = gfs2_quota_lock_check(ip, &ap);
1297 /* ap.allowed tells us how many blocks quota will allow
1298 * us to write. Check if this reduces max_blks */
1299 max_blks = UINT_MAX;
1301 max_blks = ap.allowed;
1303 error = gfs2_inplace_reserve(ip, &ap);
1307 /* check if the selected rgrp limits our max_blks further */
1308 if (ip->i_res.rs_reserved < max_blks)
1309 max_blks = ip->i_res.rs_reserved;
1311 /* Almost done. Calculate bytes that can be written using
1312 * max_blks. We also recompute max_bytes, data_blocks and
1314 calc_max_reserv(ip, &max_bytes, &data_blocks,
1315 &ind_blocks, max_blks);
1317 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
1318 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1319 if (gfs2_is_jdata(ip))
1320 rblocks += data_blocks ? data_blocks : 1;
1322 error = gfs2_trans_begin(sdp, rblocks,
1323 PAGE_SIZE >> inode->i_blkbits);
1325 goto out_trans_fail;
1327 error = fallocate_chunk(inode, offset, max_bytes, mode);
1328 gfs2_trans_end(sdp);
1331 goto out_trans_fail;
1334 offset += max_bytes;
1335 gfs2_inplace_release(ip);
1336 gfs2_quota_unlock(ip);
1339 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size)
1340 i_size_write(inode, pos + count);
1341 file_update_time(file);
1342 mark_inode_dirty(inode);
1344 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
1345 return vfs_fsync_range(file, pos, pos + count - 1,
1346 (file->f_flags & __O_SYNC) ? 0 : 1);
1350 gfs2_inplace_release(ip);
1352 gfs2_quota_unlock(ip);
1356 static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1358 struct inode *inode = file_inode(file);
1359 struct gfs2_sbd *sdp = GFS2_SB(inode);
1360 struct gfs2_inode *ip = GFS2_I(inode);
1361 struct gfs2_holder gh;
1364 if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
1366 /* fallocate is needed by gfs2_grow to reserve space in the rindex */
1367 if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
1372 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1373 ret = gfs2_glock_nq(&gh);
1377 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1378 (offset + len) > inode->i_size) {
1379 ret = inode_newsize_ok(inode, offset + len);
1384 ret = get_write_access(inode);
1388 if (mode & FALLOC_FL_PUNCH_HOLE) {
1389 ret = __gfs2_punch_hole(file, offset, len);
1391 ret = __gfs2_fallocate(file, mode, offset, len);
1393 gfs2_rs_deltree(&ip->i_res);
1396 put_write_access(inode);
1400 gfs2_holder_uninit(&gh);
1401 inode_unlock(inode);
1405 static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
1406 struct file *out, loff_t *ppos,
1407 size_t len, unsigned int flags)
1411 gfs2_size_hint(out, *ppos, len);
1413 ret = iter_file_splice_write(pipe, out, ppos, len, flags);
1417 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
1420 * gfs2_lock - acquire/release a posix lock on a file
1421 * @file: the file pointer
1422 * @cmd: either modify or retrieve lock state, possibly wait
1423 * @fl: type and range of lock
1428 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
1430 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
1431 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
1432 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1434 if (!(fl->fl_flags & FL_POSIX))
1436 if (cmd == F_CANCELLK) {
1439 fl->fl_type = F_UNLCK;
1441 if (unlikely(gfs2_withdrawn(sdp))) {
1442 if (fl->fl_type == F_UNLCK)
1443 locks_lock_file_wait(file, fl);
1447 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1448 else if (fl->fl_type == F_UNLCK)
1449 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1451 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1454 static void __flock_holder_uninit(struct file *file, struct gfs2_holder *fl_gh)
1456 struct gfs2_glock *gl = gfs2_glock_hold(fl_gh->gh_gl);
1459 * Make sure gfs2_glock_put() won't sleep under the file->f_lock
1463 spin_lock(&file->f_lock);
1464 gfs2_holder_uninit(fl_gh);
1465 spin_unlock(&file->f_lock);
1469 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1471 struct gfs2_file *fp = file->private_data;
1472 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1473 struct gfs2_inode *ip = GFS2_I(file_inode(file));
1474 struct gfs2_glock *gl;
1480 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1481 flags = GL_EXACT | GL_NOPID;
1482 if (!IS_SETLKW(cmd))
1483 flags |= LM_FLAG_TRY_1CB;
1485 mutex_lock(&fp->f_fl_mutex);
1487 if (gfs2_holder_initialized(fl_gh)) {
1488 struct file_lock request;
1489 if (fl_gh->gh_state == state)
1491 locks_init_lock(&request);
1492 request.fl_type = F_UNLCK;
1493 request.fl_flags = FL_FLOCK;
1494 locks_lock_file_wait(file, &request);
1495 gfs2_glock_dq(fl_gh);
1496 gfs2_holder_reinit(state, flags, fl_gh);
1498 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1499 &gfs2_flock_glops, CREATE, &gl);
1502 spin_lock(&file->f_lock);
1503 gfs2_holder_init(gl, state, flags, fl_gh);
1504 spin_unlock(&file->f_lock);
1507 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1508 error = gfs2_glock_nq(fl_gh);
1509 if (error != GLR_TRYFAILED)
1511 fl_gh->gh_flags &= ~LM_FLAG_TRY_1CB;
1512 fl_gh->gh_flags |= LM_FLAG_TRY;
1516 __flock_holder_uninit(file, fl_gh);
1517 if (error == GLR_TRYFAILED)
1520 error = locks_lock_file_wait(file, fl);
1521 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1525 mutex_unlock(&fp->f_fl_mutex);
1529 static void do_unflock(struct file *file, struct file_lock *fl)
1531 struct gfs2_file *fp = file->private_data;
1532 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1534 mutex_lock(&fp->f_fl_mutex);
1535 locks_lock_file_wait(file, fl);
1536 if (gfs2_holder_initialized(fl_gh)) {
1537 gfs2_glock_dq(fl_gh);
1538 __flock_holder_uninit(file, fl_gh);
1540 mutex_unlock(&fp->f_fl_mutex);
1544 * gfs2_flock - acquire/release a flock lock on a file
1545 * @file: the file pointer
1546 * @cmd: either modify or retrieve lock state, possibly wait
1547 * @fl: type and range of lock
1552 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1554 if (!(fl->fl_flags & FL_FLOCK))
1557 if (fl->fl_type == F_UNLCK) {
1558 do_unflock(file, fl);
1561 return do_flock(file, cmd, fl);
1565 const struct file_operations gfs2_file_fops = {
1566 .llseek = gfs2_llseek,
1567 .read_iter = gfs2_file_read_iter,
1568 .write_iter = gfs2_file_write_iter,
1569 .iopoll = iocb_bio_iopoll,
1570 .unlocked_ioctl = gfs2_ioctl,
1571 .compat_ioctl = gfs2_compat_ioctl,
1574 .release = gfs2_release,
1575 .fsync = gfs2_fsync,
1577 .flock = gfs2_flock,
1578 .splice_read = filemap_splice_read,
1579 .splice_write = gfs2_file_splice_write,
1580 .setlease = simple_nosetlease,
1581 .fallocate = gfs2_fallocate,
1584 const struct file_operations gfs2_dir_fops = {
1585 .iterate_shared = gfs2_readdir,
1586 .unlocked_ioctl = gfs2_ioctl,
1587 .compat_ioctl = gfs2_compat_ioctl,
1589 .release = gfs2_release,
1590 .fsync = gfs2_fsync,
1592 .flock = gfs2_flock,
1593 .llseek = default_llseek,
1596 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1598 const struct file_operations gfs2_file_fops_nolock = {
1599 .llseek = gfs2_llseek,
1600 .read_iter = gfs2_file_read_iter,
1601 .write_iter = gfs2_file_write_iter,
1602 .iopoll = iocb_bio_iopoll,
1603 .unlocked_ioctl = gfs2_ioctl,
1604 .compat_ioctl = gfs2_compat_ioctl,
1607 .release = gfs2_release,
1608 .fsync = gfs2_fsync,
1609 .splice_read = filemap_splice_read,
1610 .splice_write = gfs2_file_splice_write,
1611 .setlease = generic_setlease,
1612 .fallocate = gfs2_fallocate,
1615 const struct file_operations gfs2_dir_fops_nolock = {
1616 .iterate_shared = gfs2_readdir,
1617 .unlocked_ioctl = gfs2_ioctl,
1618 .compat_ioctl = gfs2_compat_ioctl,
1620 .release = gfs2_release,
1621 .fsync = gfs2_fsync,
1622 .llseek = default_llseek,