1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #include <linux/slab.h>
8 #include <linux/spinlock.h>
9 #include <linux/compat.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/uio.h>
14 #include <linux/blkdev.h>
16 #include <linux/mount.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/falloc.h>
20 #include <linux/swap.h>
21 #include <linux/crc32.h>
22 #include <linux/writeback.h>
23 #include <linux/uaccess.h>
24 #include <linux/dlm.h>
25 #include <linux/dlm_plock.h>
26 #include <linux/delay.h>
27 #include <linux/backing-dev.h>
28 #include <linux/fileattr.h>
46 * gfs2_llseek - seek to a location in a file
49 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
51 * SEEK_END requires the glock for the file because it references the
54 * Returns: The new offset, or errno
57 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
59 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
60 struct gfs2_holder i_gh;
65 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
68 error = generic_file_llseek(file, offset, whence);
69 gfs2_glock_dq_uninit(&i_gh);
74 error = gfs2_seek_data(file, offset);
78 error = gfs2_seek_hole(file, offset);
84 * These don't reference inode->i_size and don't depend on the
85 * block mapping, so we don't need the glock.
87 error = generic_file_llseek(file, offset, whence);
97 * gfs2_readdir - Iterator for a directory
98 * @file: The directory to read from
99 * @ctx: What to feed directory entries to
104 static int gfs2_readdir(struct file *file, struct dir_context *ctx)
106 struct inode *dir = file->f_mapping->host;
107 struct gfs2_inode *dip = GFS2_I(dir);
108 struct gfs2_holder d_gh;
111 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
115 error = gfs2_dir_read(dir, ctx, &file->f_ra);
117 gfs2_glock_dq_uninit(&d_gh);
123 * struct fsflag_gfs2flag
125 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
126 * and to GFS2_DIF_JDATA for non-directories.
131 } fsflag_gfs2flag[] = {
132 {FS_SYNC_FL, GFS2_DIF_SYNC},
133 {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
134 {FS_APPEND_FL, GFS2_DIF_APPENDONLY},
135 {FS_NOATIME_FL, GFS2_DIF_NOATIME},
136 {FS_INDEX_FL, GFS2_DIF_EXHASH},
137 {FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
138 {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
141 static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags)
146 if (S_ISDIR(inode->i_mode))
147 gfsflags &= ~GFS2_DIF_JDATA;
149 gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
151 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
152 if (gfsflags & fsflag_gfs2flag[i].gfsflag)
153 fsflags |= fsflag_gfs2flag[i].fsflag;
157 int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
159 struct inode *inode = d_inode(dentry);
160 struct gfs2_inode *ip = GFS2_I(inode);
161 struct gfs2_holder gh;
165 if (d_is_special(dentry))
168 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
169 error = gfs2_glock_nq(&gh);
173 fsflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
175 fileattr_fill_flags(fa, fsflags);
179 gfs2_holder_uninit(&gh);
183 void gfs2_set_inode_flags(struct inode *inode)
185 struct gfs2_inode *ip = GFS2_I(inode);
186 unsigned int flags = inode->i_flags;
188 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
189 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
191 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
192 flags |= S_IMMUTABLE;
193 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
195 if (ip->i_diskflags & GFS2_DIF_NOATIME)
197 if (ip->i_diskflags & GFS2_DIF_SYNC)
199 inode->i_flags = flags;
202 /* Flags that can be set by user space */
203 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
204 GFS2_DIF_IMMUTABLE| \
205 GFS2_DIF_APPENDONLY| \
209 GFS2_DIF_INHERIT_JDATA)
212 * do_gfs2_set_flags - set flags on an inode
214 * @reqflags: The flags to set
215 * @mask: Indicates which flags are valid
216 * @fsflags: The FS_* inode flags passed in
219 static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask,
222 struct gfs2_inode *ip = GFS2_I(inode);
223 struct gfs2_sbd *sdp = GFS2_SB(inode);
224 struct buffer_head *bh;
225 struct gfs2_holder gh;
227 u32 new_flags, flags;
229 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
234 flags = ip->i_diskflags;
235 new_flags = (flags & ~mask) | (reqflags & mask);
236 if ((new_flags ^ flags) == 0)
240 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
242 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
244 if (!IS_IMMUTABLE(inode)) {
245 error = gfs2_permission(&init_user_ns, inode, MAY_WRITE);
249 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
250 if (new_flags & GFS2_DIF_JDATA)
251 gfs2_log_flush(sdp, ip->i_gl,
252 GFS2_LOG_HEAD_FLUSH_NORMAL |
254 error = filemap_fdatawrite(inode->i_mapping);
257 error = filemap_fdatawait(inode->i_mapping);
260 if (new_flags & GFS2_DIF_JDATA)
261 gfs2_ordered_del_inode(ip);
263 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
266 error = gfs2_meta_inode_buffer(ip, &bh);
269 inode->i_ctime = current_time(inode);
270 gfs2_trans_add_meta(ip->i_gl, bh);
271 ip->i_diskflags = new_flags;
272 gfs2_dinode_out(ip, bh->b_data);
274 gfs2_set_inode_flags(inode);
275 gfs2_set_aops(inode);
279 gfs2_glock_dq_uninit(&gh);
283 int gfs2_fileattr_set(struct user_namespace *mnt_userns,
284 struct dentry *dentry, struct fileattr *fa)
286 struct inode *inode = d_inode(dentry);
287 u32 fsflags = fa->flags, gfsflags = 0;
291 if (d_is_special(dentry))
294 if (fileattr_has_fsx(fa))
297 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
298 if (fsflags & fsflag_gfs2flag[i].fsflag) {
299 fsflags &= ~fsflag_gfs2flag[i].fsflag;
300 gfsflags |= fsflag_gfs2flag[i].gfsflag;
303 if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
306 mask = GFS2_FLAGS_USER_SET;
307 if (S_ISDIR(inode->i_mode)) {
308 mask &= ~GFS2_DIF_JDATA;
310 /* The GFS2_DIF_TOPDIR flag is only valid for directories. */
311 if (gfsflags & GFS2_DIF_TOPDIR)
313 mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
316 return do_gfs2_set_flags(inode, gfsflags, mask, fsflags);
319 static int gfs2_getlabel(struct file *filp, char __user *label)
321 struct inode *inode = file_inode(filp);
322 struct gfs2_sbd *sdp = GFS2_SB(inode);
324 if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
330 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
334 return gfs2_fitrim(filp, (void __user *)arg);
335 case FS_IOC_GETFSLABEL:
336 return gfs2_getlabel(filp, (char __user *)arg);
343 static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
346 /* Keep this list in sync with gfs2_ioctl */
348 case FS_IOC_GETFSLABEL:
354 return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
357 #define gfs2_compat_ioctl NULL
361 * gfs2_size_hint - Give a hint to the size of a write request
362 * @filep: The struct file
363 * @offset: The file offset of the write
364 * @size: The length of the write
366 * When we are about to do a write, this function records the total
367 * write size in order to provide a suitable hint to the lower layers
368 * about how many blocks will be required.
372 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
374 struct inode *inode = file_inode(filep);
375 struct gfs2_sbd *sdp = GFS2_SB(inode);
376 struct gfs2_inode *ip = GFS2_I(inode);
377 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
378 int hint = min_t(size_t, INT_MAX, blks);
380 if (hint > atomic_read(&ip->i_sizehint))
381 atomic_set(&ip->i_sizehint, hint);
385 * gfs2_allocate_page_backing - Allocate blocks for a write fault
386 * @page: The (locked) page to allocate backing for
387 * @length: Size of the allocation
389 * We try to allocate all the blocks required for the page in one go. This
390 * might fail for various reasons, so we keep trying until all the blocks to
391 * back this page are allocated. If some of the blocks are already allocated,
394 static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
396 u64 pos = page_offset(page);
399 struct iomap iomap = { };
401 if (gfs2_iomap_alloc(page->mapping->host, pos, length, &iomap))
404 if (length < iomap.length)
405 iomap.length = length;
406 length -= iomap.length;
408 } while (length > 0);
414 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
415 * @vmf: The virtual memory fault containing the page to become writable
417 * When the page becomes writable, we need to ensure that we have
418 * blocks allocated on disk to back that page.
421 static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
423 struct page *page = vmf->page;
424 struct inode *inode = file_inode(vmf->vma->vm_file);
425 struct gfs2_inode *ip = GFS2_I(inode);
426 struct gfs2_sbd *sdp = GFS2_SB(inode);
427 struct gfs2_alloc_parms ap = { .aflags = 0, };
428 u64 offset = page_offset(page);
429 unsigned int data_blocks, ind_blocks, rblocks;
430 vm_fault_t ret = VM_FAULT_LOCKED;
431 struct gfs2_holder gh;
436 sb_start_pagefault(inode->i_sb);
438 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
439 err = gfs2_glock_nq(&gh);
441 ret = block_page_mkwrite_return(err);
445 /* Check page index against inode size */
446 size = i_size_read(inode);
447 if (offset >= size) {
448 ret = VM_FAULT_SIGBUS;
452 /* Update file times before taking page lock */
453 file_update_time(vmf->vma->vm_file);
455 /* page is wholly or partially inside EOF */
456 if (size - offset < PAGE_SIZE)
457 length = size - offset;
461 gfs2_size_hint(vmf->vma->vm_file, offset, length);
463 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
464 set_bit(GIF_SW_PAGED, &ip->i_flags);
467 * iomap_writepage / iomap_writepages currently don't support inline
468 * files, so always unstuff here.
471 if (!gfs2_is_stuffed(ip) &&
472 !gfs2_write_alloc_required(ip, offset, length)) {
474 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
475 ret = VM_FAULT_NOPAGE;
481 err = gfs2_rindex_update(sdp);
483 ret = block_page_mkwrite_return(err);
487 gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks);
488 ap.target = data_blocks + ind_blocks;
489 err = gfs2_quota_lock_check(ip, &ap);
491 ret = block_page_mkwrite_return(err);
494 err = gfs2_inplace_reserve(ip, &ap);
496 ret = block_page_mkwrite_return(err);
497 goto out_quota_unlock;
500 rblocks = RES_DINODE + ind_blocks;
501 if (gfs2_is_jdata(ip))
502 rblocks += data_blocks ? data_blocks : 1;
503 if (ind_blocks || data_blocks) {
504 rblocks += RES_STATFS + RES_QUOTA;
505 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
507 err = gfs2_trans_begin(sdp, rblocks, 0);
509 ret = block_page_mkwrite_return(err);
513 /* Unstuff, if required, and allocate backing blocks for page */
514 if (gfs2_is_stuffed(ip)) {
515 err = gfs2_unstuff_dinode(ip);
517 ret = block_page_mkwrite_return(err);
523 /* If truncated, we must retry the operation, we may have raced
524 * with the glock demotion code.
526 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
527 ret = VM_FAULT_NOPAGE;
528 goto out_page_locked;
531 err = gfs2_allocate_page_backing(page, length);
533 ret = block_page_mkwrite_return(err);
536 if (ret != VM_FAULT_LOCKED)
541 gfs2_inplace_release(ip);
543 gfs2_quota_unlock(ip);
547 gfs2_holder_uninit(&gh);
548 if (ret == VM_FAULT_LOCKED) {
549 set_page_dirty(page);
550 wait_for_stable_page(page);
552 sb_end_pagefault(inode->i_sb);
556 static vm_fault_t gfs2_fault(struct vm_fault *vmf)
558 struct inode *inode = file_inode(vmf->vma->vm_file);
559 struct gfs2_inode *ip = GFS2_I(inode);
560 struct gfs2_holder gh;
564 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
565 err = gfs2_glock_nq(&gh);
567 ret = block_page_mkwrite_return(err);
570 ret = filemap_fault(vmf);
573 gfs2_holder_uninit(&gh);
577 static const struct vm_operations_struct gfs2_vm_ops = {
579 .map_pages = filemap_map_pages,
580 .page_mkwrite = gfs2_page_mkwrite,
585 * @file: The file to map
586 * @vma: The VMA which described the mapping
588 * There is no need to get a lock here unless we should be updating
589 * atime. We ignore any locking errors since the only consequence is
590 * a missed atime update (which will just be deferred until later).
595 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
597 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
599 if (!(file->f_flags & O_NOATIME) &&
600 !IS_NOATIME(&ip->i_inode)) {
601 struct gfs2_holder i_gh;
604 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
608 /* grab lock to update inode */
609 gfs2_glock_dq_uninit(&i_gh);
612 vma->vm_ops = &gfs2_vm_ops;
618 * gfs2_open_common - This is common to open and atomic_open
619 * @inode: The inode being opened
620 * @file: The file being opened
622 * This maybe called under a glock or not depending upon how it has
623 * been called. We must always be called under a glock for regular
624 * files, however. For other file types, it does not matter whether
625 * we hold the glock or not.
627 * Returns: Error code or 0 for success
630 int gfs2_open_common(struct inode *inode, struct file *file)
632 struct gfs2_file *fp;
635 if (S_ISREG(inode->i_mode)) {
636 ret = generic_file_open(inode, file);
641 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
645 mutex_init(&fp->f_fl_mutex);
647 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
648 file->private_data = fp;
649 if (file->f_mode & FMODE_WRITE) {
650 ret = gfs2_qa_get(GFS2_I(inode));
657 kfree(file->private_data);
658 file->private_data = NULL;
663 * gfs2_open - open a file
664 * @inode: the inode to open
665 * @file: the struct file for this opening
667 * After atomic_open, this function is only used for opening files
668 * which are already cached. We must still get the glock for regular
669 * files to ensure that we have the file size uptodate for the large
670 * file check which is in the common code. That is only an issue for
671 * regular files though.
676 static int gfs2_open(struct inode *inode, struct file *file)
678 struct gfs2_inode *ip = GFS2_I(inode);
679 struct gfs2_holder i_gh;
681 bool need_unlock = false;
683 if (S_ISREG(ip->i_inode.i_mode)) {
684 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
691 error = gfs2_open_common(inode, file);
694 gfs2_glock_dq_uninit(&i_gh);
700 * gfs2_release - called to close a struct file
701 * @inode: the inode the struct file belongs to
702 * @file: the struct file being closed
707 static int gfs2_release(struct inode *inode, struct file *file)
709 struct gfs2_inode *ip = GFS2_I(inode);
711 kfree(file->private_data);
712 file->private_data = NULL;
714 if (file->f_mode & FMODE_WRITE) {
715 if (gfs2_rs_active(&ip->i_res))
723 * gfs2_fsync - sync the dirty data for a file (across the cluster)
724 * @file: the file that points to the dentry
725 * @start: the start position in the file to sync
726 * @end: the end position in the file to sync
727 * @datasync: set if we can ignore timestamp changes
729 * We split the data flushing here so that we don't wait for the data
730 * until after we've also sent the metadata to disk. Note that for
731 * data=ordered, we will write & wait for the data at the log flush
732 * stage anyway, so this is unlikely to make much of a difference
733 * except in the data=writeback case.
735 * If the fdatawrite fails due to any reason except -EIO, we will
736 * continue the remainder of the fsync, although we'll still report
737 * the error at the end. This is to match filemap_write_and_wait_range()
743 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
746 struct address_space *mapping = file->f_mapping;
747 struct inode *inode = mapping->host;
748 int sync_state = inode->i_state & I_DIRTY;
749 struct gfs2_inode *ip = GFS2_I(inode);
750 int ret = 0, ret1 = 0;
752 if (mapping->nrpages) {
753 ret1 = filemap_fdatawrite_range(mapping, start, end);
758 if (!gfs2_is_jdata(ip))
759 sync_state &= ~I_DIRTY_PAGES;
761 sync_state &= ~I_DIRTY_SYNC;
764 ret = sync_inode_metadata(inode, 1);
767 if (gfs2_is_jdata(ip))
768 ret = file_write_and_wait(file);
771 gfs2_ail_flush(ip->i_gl, 1);
774 if (mapping->nrpages)
775 ret = file_fdatawait_range(file, start, end);
777 return ret ? ret : ret1;
780 static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
784 char __user *p = i->iov[0].iov_base + i->iov_offset;
785 size_t count = iov_iter_count(i);
790 if (ret <= 0 && ret != -EFAULT)
792 if (!iter_is_iovec(i))
795 if (*prev_count != count || !*window_size) {
796 int pages, nr_dirtied;
798 pages = min_t(int, BIO_MAX_VECS,
799 DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE));
800 nr_dirtied = max(current->nr_dirtied_pause -
801 current->nr_dirtied, 1);
802 pages = min(pages, nr_dirtied);
806 *window_size = (size_t)PAGE_SIZE * pages - offset_in_page(p);
810 static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
811 struct gfs2_holder *gh)
813 struct file *file = iocb->ki_filp;
814 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
815 size_t count = iov_iter_count(to);
819 return 0; /* skip atime */
821 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
822 ret = gfs2_glock_nq(gh);
826 ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL, 0);
829 gfs2_holder_uninit(gh);
833 static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
834 struct gfs2_holder *gh)
836 struct file *file = iocb->ki_filp;
837 struct inode *inode = file->f_mapping->host;
838 struct gfs2_inode *ip = GFS2_I(inode);
839 size_t len = iov_iter_count(from);
840 loff_t offset = iocb->ki_pos;
844 * Deferred lock, even if its a write, since we do no allocation on
845 * this path. All we need to change is the atime, and this lock mode
846 * ensures that other nodes have flushed their buffered read caches
847 * (i.e. their page cache entries for this inode). We do not,
848 * unfortunately, have the option of only flushing a range like the
851 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
852 ret = gfs2_glock_nq(gh);
856 /* Silently fall back to buffered I/O when writing beyond EOF */
857 if (offset + len > i_size_read(&ip->i_inode))
860 ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL, 0);
866 gfs2_holder_uninit(gh);
870 static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
872 struct gfs2_inode *ip;
873 struct gfs2_holder gh;
874 size_t prev_count = 0, window_size = 0;
879 * In this function, we disable page faults when we're holding the
880 * inode glock while doing I/O. If a page fault occurs, we indicate
881 * that the inode glock may be dropped, fault in the pages manually,
885 if (iocb->ki_flags & IOCB_DIRECT) {
886 ret = gfs2_file_direct_read(iocb, to, &gh);
887 if (likely(ret != -ENOTBLK))
889 iocb->ki_flags &= ~IOCB_DIRECT;
891 iocb->ki_flags |= IOCB_NOIO;
892 ret = generic_file_read_iter(iocb, to);
893 iocb->ki_flags &= ~IOCB_NOIO;
895 if (!iov_iter_count(to))
901 if (iocb->ki_flags & IOCB_NOWAIT)
904 ip = GFS2_I(iocb->ki_filp->f_mapping->host);
905 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
907 ret = gfs2_glock_nq(&gh);
912 ret = generic_file_read_iter(iocb, to);
917 if (should_fault_in_pages(ret, to, &prev_count, &window_size)) {
920 gfs2_holder_allow_demote(&gh);
921 leftover = fault_in_iov_iter_writeable(to, window_size);
922 gfs2_holder_disallow_demote(&gh);
923 if (leftover != window_size) {
924 if (!gfs2_holder_queued(&gh)) {
929 goto retry_under_glock;
932 if (gfs2_holder_queued(&gh))
935 gfs2_holder_uninit(&gh);
936 return written ? written : ret;
939 static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
940 struct iov_iter *from,
941 struct gfs2_holder *gh)
943 struct file *file = iocb->ki_filp;
944 struct inode *inode = file_inode(file);
945 struct gfs2_inode *ip = GFS2_I(inode);
946 struct gfs2_sbd *sdp = GFS2_SB(inode);
947 struct gfs2_holder *statfs_gh = NULL;
948 size_t prev_count = 0, window_size = 0;
953 * In this function, we disable page faults when we're holding the
954 * inode glock while doing I/O. If a page fault occurs, we indicate
955 * that the inode glock may be dropped, fault in the pages manually,
959 if (inode == sdp->sd_rindex) {
960 statfs_gh = kmalloc(sizeof(*statfs_gh), GFP_NOFS);
965 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
967 ret = gfs2_glock_nq(gh);
971 if (inode == sdp->sd_rindex) {
972 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
974 ret = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
975 GL_NOCACHE, statfs_gh);
980 current->backing_dev_info = inode_to_bdi(inode);
982 ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
984 current->backing_dev_info = NULL;
990 if (inode == sdp->sd_rindex)
991 gfs2_glock_dq_uninit(statfs_gh);
993 if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
996 gfs2_holder_allow_demote(gh);
997 leftover = fault_in_iov_iter_readable(from, window_size);
998 gfs2_holder_disallow_demote(gh);
999 if (leftover != window_size) {
1000 if (!gfs2_holder_queued(gh)) {
1005 goto retry_under_glock;
1009 if (gfs2_holder_queued(gh))
1012 gfs2_holder_uninit(gh);
1015 return read ? read : ret;
1019 * gfs2_file_write_iter - Perform a write to a file
1020 * @iocb: The io context
1021 * @from: The data to write
1023 * We have to do a lock/unlock here to refresh the inode size for
1024 * O_APPEND writes, otherwise we can land up writing at the wrong
1025 * offset. There is still a race, but provided the app is using its
1026 * own file locking, this will make O_APPEND work as expected.
1030 static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1032 struct file *file = iocb->ki_filp;
1033 struct inode *inode = file_inode(file);
1034 struct gfs2_inode *ip = GFS2_I(inode);
1035 struct gfs2_holder gh;
1038 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
1040 if (iocb->ki_flags & IOCB_APPEND) {
1041 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
1044 gfs2_glock_dq_uninit(&gh);
1048 ret = generic_write_checks(iocb, from);
1052 ret = file_remove_privs(file);
1056 ret = file_update_time(file);
1060 if (iocb->ki_flags & IOCB_DIRECT) {
1061 struct address_space *mapping = file->f_mapping;
1062 ssize_t buffered, ret2;
1064 ret = gfs2_file_direct_write(iocb, from, &gh);
1065 if (ret < 0 || !iov_iter_count(from))
1068 iocb->ki_flags |= IOCB_DSYNC;
1069 buffered = gfs2_file_buffered_write(iocb, from, &gh);
1070 if (unlikely(buffered <= 0)) {
1077 * We need to ensure that the page cache pages are written to
1078 * disk and invalidated to preserve the expected O_DIRECT
1079 * semantics. If the writeback or invalidate fails, only report
1080 * the direct I/O range as we don't know if the buffered pages
1083 ret2 = generic_write_sync(iocb, buffered);
1084 invalidate_mapping_pages(mapping,
1085 (iocb->ki_pos - buffered) >> PAGE_SHIFT,
1086 (iocb->ki_pos - 1) >> PAGE_SHIFT);
1087 if (!ret || ret2 > 0)
1090 ret = gfs2_file_buffered_write(iocb, from, &gh);
1091 if (likely(ret > 0))
1092 ret = generic_write_sync(iocb, ret);
1096 inode_unlock(inode);
1100 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
1103 struct super_block *sb = inode->i_sb;
1104 struct gfs2_inode *ip = GFS2_I(inode);
1105 loff_t end = offset + len;
1106 struct buffer_head *dibh;
1109 error = gfs2_meta_inode_buffer(ip, &dibh);
1110 if (unlikely(error))
1113 gfs2_trans_add_meta(ip->i_gl, dibh);
1115 if (gfs2_is_stuffed(ip)) {
1116 error = gfs2_unstuff_dinode(ip);
1117 if (unlikely(error))
1121 while (offset < end) {
1122 struct iomap iomap = { };
1124 error = gfs2_iomap_alloc(inode, offset, end - offset, &iomap);
1127 offset = iomap.offset + iomap.length;
1128 if (!(iomap.flags & IOMAP_F_NEW))
1130 error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
1131 iomap.length >> inode->i_blkbits,
1134 fs_err(GFS2_SB(inode), "Failed to zero data buffers\n");
1144 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
1145 * blocks, determine how many bytes can be written.
1146 * @ip: The inode in question.
1147 * @len: Max cap of bytes. What we return in *len must be <= this.
1148 * @data_blocks: Compute and return the number of data blocks needed
1149 * @ind_blocks: Compute and return the number of indirect blocks needed
1150 * @max_blocks: The total blocks available to work with.
1152 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
1154 static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
1155 unsigned int *data_blocks, unsigned int *ind_blocks,
1156 unsigned int max_blocks)
1159 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1160 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
1162 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
1163 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
1167 *data_blocks = max_data;
1168 *ind_blocks = max_blocks - max_data;
1169 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
1172 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
1176 static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1178 struct inode *inode = file_inode(file);
1179 struct gfs2_sbd *sdp = GFS2_SB(inode);
1180 struct gfs2_inode *ip = GFS2_I(inode);
1181 struct gfs2_alloc_parms ap = { .aflags = 0, };
1182 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
1183 loff_t bytes, max_bytes, max_blks;
1185 const loff_t pos = offset;
1186 const loff_t count = len;
1187 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
1188 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
1189 loff_t max_chunk_size = UINT_MAX & bsize_mask;
1191 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
1193 offset &= bsize_mask;
1195 len = next - offset;
1196 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
1199 bytes &= bsize_mask;
1201 bytes = sdp->sd_sb.sb_bsize;
1203 gfs2_size_hint(file, offset, len);
1205 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
1206 ap.min_target = data_blocks + ind_blocks;
1211 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
1217 /* We need to determine how many bytes we can actually
1218 * fallocate without exceeding quota or going over the
1219 * end of the fs. We start off optimistically by assuming
1220 * we can write max_bytes */
1221 max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
1223 /* Since max_bytes is most likely a theoretical max, we
1224 * calculate a more realistic 'bytes' to serve as a good
1225 * starting point for the number of bytes we may be able
1227 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
1228 ap.target = data_blocks + ind_blocks;
1230 error = gfs2_quota_lock_check(ip, &ap);
1233 /* ap.allowed tells us how many blocks quota will allow
1234 * us to write. Check if this reduces max_blks */
1235 max_blks = UINT_MAX;
1237 max_blks = ap.allowed;
1239 error = gfs2_inplace_reserve(ip, &ap);
1243 /* check if the selected rgrp limits our max_blks further */
1244 if (ip->i_res.rs_reserved < max_blks)
1245 max_blks = ip->i_res.rs_reserved;
1247 /* Almost done. Calculate bytes that can be written using
1248 * max_blks. We also recompute max_bytes, data_blocks and
1250 calc_max_reserv(ip, &max_bytes, &data_blocks,
1251 &ind_blocks, max_blks);
1253 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
1254 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1255 if (gfs2_is_jdata(ip))
1256 rblocks += data_blocks ? data_blocks : 1;
1258 error = gfs2_trans_begin(sdp, rblocks,
1259 PAGE_SIZE >> inode->i_blkbits);
1261 goto out_trans_fail;
1263 error = fallocate_chunk(inode, offset, max_bytes, mode);
1264 gfs2_trans_end(sdp);
1267 goto out_trans_fail;
1270 offset += max_bytes;
1271 gfs2_inplace_release(ip);
1272 gfs2_quota_unlock(ip);
1275 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size)
1276 i_size_write(inode, pos + count);
1277 file_update_time(file);
1278 mark_inode_dirty(inode);
1280 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
1281 return vfs_fsync_range(file, pos, pos + count - 1,
1282 (file->f_flags & __O_SYNC) ? 0 : 1);
1286 gfs2_inplace_release(ip);
1288 gfs2_quota_unlock(ip);
1292 static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1294 struct inode *inode = file_inode(file);
1295 struct gfs2_sbd *sdp = GFS2_SB(inode);
1296 struct gfs2_inode *ip = GFS2_I(inode);
1297 struct gfs2_holder gh;
1300 if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
1302 /* fallocate is needed by gfs2_grow to reserve space in the rindex */
1303 if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
1308 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1309 ret = gfs2_glock_nq(&gh);
1313 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1314 (offset + len) > inode->i_size) {
1315 ret = inode_newsize_ok(inode, offset + len);
1320 ret = get_write_access(inode);
1324 if (mode & FALLOC_FL_PUNCH_HOLE) {
1325 ret = __gfs2_punch_hole(file, offset, len);
1327 ret = __gfs2_fallocate(file, mode, offset, len);
1329 gfs2_rs_deltree(&ip->i_res);
1332 put_write_access(inode);
1336 gfs2_holder_uninit(&gh);
1337 inode_unlock(inode);
1341 static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
1342 struct file *out, loff_t *ppos,
1343 size_t len, unsigned int flags)
1347 gfs2_size_hint(out, *ppos, len);
1349 ret = iter_file_splice_write(pipe, out, ppos, len, flags);
1353 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
1356 * gfs2_lock - acquire/release a posix lock on a file
1357 * @file: the file pointer
1358 * @cmd: either modify or retrieve lock state, possibly wait
1359 * @fl: type and range of lock
1364 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
1366 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
1367 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
1368 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1370 if (!(fl->fl_flags & FL_POSIX))
1372 if (cmd == F_CANCELLK) {
1375 fl->fl_type = F_UNLCK;
1377 if (unlikely(gfs2_withdrawn(sdp))) {
1378 if (fl->fl_type == F_UNLCK)
1379 locks_lock_file_wait(file, fl);
1383 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1384 else if (fl->fl_type == F_UNLCK)
1385 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1387 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1390 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1392 struct gfs2_file *fp = file->private_data;
1393 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1394 struct gfs2_inode *ip = GFS2_I(file_inode(file));
1395 struct gfs2_glock *gl;
1401 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1402 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
1404 mutex_lock(&fp->f_fl_mutex);
1406 if (gfs2_holder_initialized(fl_gh)) {
1407 struct file_lock request;
1408 if (fl_gh->gh_state == state)
1410 locks_init_lock(&request);
1411 request.fl_type = F_UNLCK;
1412 request.fl_flags = FL_FLOCK;
1413 locks_lock_file_wait(file, &request);
1414 gfs2_glock_dq(fl_gh);
1415 gfs2_holder_reinit(state, flags, fl_gh);
1417 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1418 &gfs2_flock_glops, CREATE, &gl);
1421 gfs2_holder_init(gl, state, flags, fl_gh);
1424 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1425 error = gfs2_glock_nq(fl_gh);
1426 if (error != GLR_TRYFAILED)
1428 fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1429 fl_gh->gh_error = 0;
1433 gfs2_holder_uninit(fl_gh);
1434 if (error == GLR_TRYFAILED)
1437 error = locks_lock_file_wait(file, fl);
1438 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1442 mutex_unlock(&fp->f_fl_mutex);
1446 static void do_unflock(struct file *file, struct file_lock *fl)
1448 struct gfs2_file *fp = file->private_data;
1449 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1451 mutex_lock(&fp->f_fl_mutex);
1452 locks_lock_file_wait(file, fl);
1453 if (gfs2_holder_initialized(fl_gh)) {
1454 gfs2_glock_dq(fl_gh);
1455 gfs2_holder_uninit(fl_gh);
1457 mutex_unlock(&fp->f_fl_mutex);
1461 * gfs2_flock - acquire/release a flock lock on a file
1462 * @file: the file pointer
1463 * @cmd: either modify or retrieve lock state, possibly wait
1464 * @fl: type and range of lock
1469 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1471 if (!(fl->fl_flags & FL_FLOCK))
1473 if (fl->fl_type & LOCK_MAND)
1476 if (fl->fl_type == F_UNLCK) {
1477 do_unflock(file, fl);
1480 return do_flock(file, cmd, fl);
1484 const struct file_operations gfs2_file_fops = {
1485 .llseek = gfs2_llseek,
1486 .read_iter = gfs2_file_read_iter,
1487 .write_iter = gfs2_file_write_iter,
1488 .iopoll = iomap_dio_iopoll,
1489 .unlocked_ioctl = gfs2_ioctl,
1490 .compat_ioctl = gfs2_compat_ioctl,
1493 .release = gfs2_release,
1494 .fsync = gfs2_fsync,
1496 .flock = gfs2_flock,
1497 .splice_read = generic_file_splice_read,
1498 .splice_write = gfs2_file_splice_write,
1499 .setlease = simple_nosetlease,
1500 .fallocate = gfs2_fallocate,
1503 const struct file_operations gfs2_dir_fops = {
1504 .iterate_shared = gfs2_readdir,
1505 .unlocked_ioctl = gfs2_ioctl,
1506 .compat_ioctl = gfs2_compat_ioctl,
1508 .release = gfs2_release,
1509 .fsync = gfs2_fsync,
1511 .flock = gfs2_flock,
1512 .llseek = default_llseek,
1515 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1517 const struct file_operations gfs2_file_fops_nolock = {
1518 .llseek = gfs2_llseek,
1519 .read_iter = gfs2_file_read_iter,
1520 .write_iter = gfs2_file_write_iter,
1521 .iopoll = iomap_dio_iopoll,
1522 .unlocked_ioctl = gfs2_ioctl,
1523 .compat_ioctl = gfs2_compat_ioctl,
1526 .release = gfs2_release,
1527 .fsync = gfs2_fsync,
1528 .splice_read = generic_file_splice_read,
1529 .splice_write = gfs2_file_splice_write,
1530 .setlease = generic_setlease,
1531 .fallocate = gfs2_fallocate,
1534 const struct file_operations gfs2_dir_fops_nolock = {
1535 .iterate_shared = gfs2_readdir,
1536 .unlocked_ioctl = gfs2_ioctl,
1537 .compat_ioctl = gfs2_compat_ioctl,
1539 .release = gfs2_release,
1540 .fsync = gfs2_fsync,
1541 .llseek = default_llseek,