2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
18 #include <linux/mount.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/ext2_fs.h>
22 #include <linux/falloc.h>
23 #include <linux/swap.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <asm/uaccess.h>
27 #include <linux/dlm.h>
28 #include <linux/dlm_plock.h>
45 * gfs2_llseek - seek to a location in a file
48 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
50 * SEEK_END requires the glock for the file because it references the
53 * Returns: The new offset, or errno
56 static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 struct gfs2_holder i_gh;
63 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
66 error = generic_file_llseek_unlocked(file, offset, origin);
67 gfs2_glock_dq_uninit(&i_gh);
70 error = generic_file_llseek_unlocked(file, offset, origin);
76 * gfs2_readdir - Read directory entries from a directory
77 * @file: The directory to read from
78 * @dirent: Buffer for dirents
79 * @filldir: Function used to do the copying
84 static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
86 struct inode *dir = file->f_mapping->host;
87 struct gfs2_inode *dip = GFS2_I(dir);
88 struct gfs2_holder d_gh;
89 u64 offset = file->f_pos;
92 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
93 error = gfs2_glock_nq(&d_gh);
95 gfs2_holder_uninit(&d_gh);
99 error = gfs2_dir_read(dir, &offset, dirent, filldir);
101 gfs2_glock_dq_uninit(&d_gh);
103 file->f_pos = offset;
110 * @table: A table of 32 u32 flags
111 * @val: a 32 bit value to convert
113 * This function can be used to convert between fsflags values and
114 * GFS2's own flags values.
116 * Returns: the converted flags
118 static u32 fsflags_cvt(const u32 *table, u32 val)
130 static const u32 fsflags_to_gfs2[32] = {
132 [4] = GFS2_DIF_IMMUTABLE,
133 [5] = GFS2_DIF_APPENDONLY,
134 [7] = GFS2_DIF_NOATIME,
135 [12] = GFS2_DIF_EXHASH,
136 [14] = GFS2_DIF_INHERIT_JDATA,
139 static const u32 gfs2_to_fsflags[32] = {
140 [gfs2fl_Sync] = FS_SYNC_FL,
141 [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
142 [gfs2fl_AppendOnly] = FS_APPEND_FL,
143 [gfs2fl_NoAtime] = FS_NOATIME_FL,
144 [gfs2fl_ExHash] = FS_INDEX_FL,
145 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
148 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
150 struct inode *inode = filp->f_path.dentry->d_inode;
151 struct gfs2_inode *ip = GFS2_I(inode);
152 struct gfs2_holder gh;
156 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
157 error = gfs2_glock_nq(&gh);
161 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
162 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
163 fsflags |= FS_JOURNAL_DATA_FL;
164 if (put_user(fsflags, ptr))
168 gfs2_holder_uninit(&gh);
172 void gfs2_set_inode_flags(struct inode *inode)
174 struct gfs2_inode *ip = GFS2_I(inode);
175 unsigned int flags = inode->i_flags;
177 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
178 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
179 flags |= S_IMMUTABLE;
180 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
182 if (ip->i_diskflags & GFS2_DIF_NOATIME)
184 if (ip->i_diskflags & GFS2_DIF_SYNC)
186 inode->i_flags = flags;
189 /* Flags that can be set by user space */
190 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
191 GFS2_DIF_IMMUTABLE| \
192 GFS2_DIF_APPENDONLY| \
196 GFS2_DIF_INHERIT_JDATA)
199 * gfs2_set_flags - set flags on an inode
201 * @flags: The flags to set
202 * @mask: Indicates which flags are valid
205 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
207 struct inode *inode = filp->f_path.dentry->d_inode;
208 struct gfs2_inode *ip = GFS2_I(inode);
209 struct gfs2_sbd *sdp = GFS2_SB(inode);
210 struct buffer_head *bh;
211 struct gfs2_holder gh;
213 u32 new_flags, flags;
215 error = mnt_want_write(filp->f_path.mnt);
219 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
224 if (!inode_owner_or_capable(inode))
228 flags = ip->i_diskflags;
229 new_flags = (flags & ~mask) | (reqflags & mask);
230 if ((new_flags ^ flags) == 0)
234 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
238 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
240 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
242 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
243 !capable(CAP_LINUX_IMMUTABLE))
245 if (!IS_IMMUTABLE(inode)) {
246 error = gfs2_permission(inode, MAY_WRITE);
250 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
251 if (flags & GFS2_DIF_JDATA)
252 gfs2_log_flush(sdp, ip->i_gl);
253 error = filemap_fdatawrite(inode->i_mapping);
256 error = filemap_fdatawait(inode->i_mapping);
260 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
263 error = gfs2_meta_inode_buffer(ip, &bh);
266 gfs2_trans_add_bh(ip->i_gl, bh, 1);
267 ip->i_diskflags = new_flags;
268 gfs2_dinode_out(ip, bh->b_data);
270 gfs2_set_inode_flags(inode);
271 gfs2_set_aops(inode);
275 gfs2_glock_dq_uninit(&gh);
277 mnt_drop_write(filp->f_path.mnt);
281 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
283 struct inode *inode = filp->f_path.dentry->d_inode;
284 u32 fsflags, gfsflags;
286 if (get_user(fsflags, ptr))
289 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
290 if (!S_ISDIR(inode->i_mode)) {
291 if (gfsflags & GFS2_DIF_INHERIT_JDATA)
292 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
293 return do_gfs2_set_flags(filp, gfsflags, ~0);
295 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
298 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
301 case FS_IOC_GETFLAGS:
302 return gfs2_get_flags(filp, (u32 __user *)arg);
303 case FS_IOC_SETFLAGS:
304 return gfs2_set_flags(filp, (u32 __user *)arg);
310 * gfs2_allocate_page_backing - Use bmap to allocate blocks
311 * @page: The (locked) page to allocate backing for
313 * We try to allocate all the blocks required for the page in
314 * one go. This might fail for various reasons, so we keep
315 * trying until all the blocks to back this page are allocated.
316 * If some of the blocks are already allocated, thats ok too.
319 static int gfs2_allocate_page_backing(struct page *page)
321 struct inode *inode = page->mapping->host;
322 struct buffer_head bh;
323 unsigned long size = PAGE_CACHE_SIZE;
324 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
329 gfs2_block_map(inode, lblock, &bh, 1);
330 if (!buffer_mapped(&bh))
333 lblock += (bh.b_size >> inode->i_blkbits);
339 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
340 * @vma: The virtual memory area
341 * @page: The page which is about to become writable
343 * When the page becomes writable, we need to ensure that we have
344 * blocks allocated on disk to back that page.
347 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
349 struct page *page = vmf->page;
350 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
351 struct gfs2_inode *ip = GFS2_I(inode);
352 struct gfs2_sbd *sdp = GFS2_SB(inode);
353 unsigned long last_index;
354 u64 pos = page->index << PAGE_CACHE_SHIFT;
355 unsigned int data_blocks, ind_blocks, rblocks;
356 struct gfs2_holder gh;
357 struct gfs2_alloc *al;
360 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
361 ret = gfs2_glock_nq(&gh);
365 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
366 set_bit(GIF_SW_PAGED, &ip->i_flags);
368 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE))
371 al = gfs2_alloc_get(ip);
375 ret = gfs2_quota_lock_check(ip);
378 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
379 al->al_requested = data_blocks + ind_blocks;
380 ret = gfs2_inplace_reserve(ip);
382 goto out_quota_unlock;
384 rblocks = RES_DINODE + ind_blocks;
385 if (gfs2_is_jdata(ip))
386 rblocks += data_blocks ? data_blocks : 1;
387 if (ind_blocks || data_blocks) {
388 rblocks += RES_STATFS + RES_QUOTA;
389 rblocks += gfs2_rg_blocks(al);
391 ret = gfs2_trans_begin(sdp, rblocks, 0);
397 last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;
398 if (page->index > last_index)
399 goto out_unlock_page;
401 if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
402 goto out_unlock_page;
403 if (gfs2_is_stuffed(ip)) {
404 ret = gfs2_unstuff_dinode(ip, page);
406 goto out_unlock_page;
408 ret = gfs2_allocate_page_backing(page);
414 gfs2_inplace_release(ip);
416 gfs2_quota_unlock(ip);
422 gfs2_holder_uninit(&gh);
426 ret = VM_FAULT_SIGBUS;
430 static const struct vm_operations_struct gfs2_vm_ops = {
431 .fault = filemap_fault,
432 .page_mkwrite = gfs2_page_mkwrite,
437 * @file: The file to map
438 * @vma: The VMA which described the mapping
440 * There is no need to get a lock here unless we should be updating
441 * atime. We ignore any locking errors since the only consequence is
442 * a missed atime update (which will just be deferred until later).
447 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
449 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
451 if (!(file->f_flags & O_NOATIME) &&
452 !IS_NOATIME(&ip->i_inode)) {
453 struct gfs2_holder i_gh;
456 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
457 error = gfs2_glock_nq(&i_gh);
460 gfs2_glock_dq(&i_gh);
462 gfs2_holder_uninit(&i_gh);
466 vma->vm_ops = &gfs2_vm_ops;
467 vma->vm_flags |= VM_CAN_NONLINEAR;
473 * gfs2_open - open a file
474 * @inode: the inode to open
475 * @file: the struct file for this opening
480 static int gfs2_open(struct inode *inode, struct file *file)
482 struct gfs2_inode *ip = GFS2_I(inode);
483 struct gfs2_holder i_gh;
484 struct gfs2_file *fp;
487 fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
491 mutex_init(&fp->f_fl_mutex);
493 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
494 file->private_data = fp;
496 if (S_ISREG(ip->i_inode.i_mode)) {
497 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
502 if (!(file->f_flags & O_LARGEFILE) &&
503 i_size_read(inode) > MAX_NON_LFS) {
508 gfs2_glock_dq_uninit(&i_gh);
514 gfs2_glock_dq_uninit(&i_gh);
516 file->private_data = NULL;
522 * gfs2_close - called to close a struct file
523 * @inode: the inode the struct file belongs to
524 * @file: the struct file being closed
529 static int gfs2_close(struct inode *inode, struct file *file)
531 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
532 struct gfs2_file *fp;
534 fp = file->private_data;
535 file->private_data = NULL;
537 if (gfs2_assert_warn(sdp, fp))
546 * gfs2_fsync - sync the dirty data for a file (across the cluster)
547 * @file: the file that points to the dentry
548 * @start: the start position in the file to sync
549 * @end: the end position in the file to sync
550 * @datasync: set if we can ignore timestamp changes
552 * The VFS will flush data for us. We only need to worry
553 * about metadata here.
558 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
561 struct inode *inode = file->f_mapping->host;
562 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
563 struct gfs2_inode *ip = GFS2_I(inode);
566 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
569 mutex_lock(&inode->i_mutex);
572 sync_state &= ~I_DIRTY_SYNC;
575 ret = sync_inode_metadata(inode, 1);
577 mutex_unlock(&inode->i_mutex);
580 gfs2_ail_flush(ip->i_gl);
583 mutex_unlock(&inode->i_mutex);
588 * gfs2_file_aio_write - Perform a write to a file
589 * @iocb: The io context
590 * @iov: The data to write
591 * @nr_segs: Number of @iov segments
592 * @pos: The file position
594 * We have to do a lock/unlock here to refresh the inode size for
595 * O_APPEND writes, otherwise we can land up writing at the wrong
596 * offset. There is still a race, but provided the app is using its
597 * own file locking, this will make O_APPEND work as expected.
601 static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
602 unsigned long nr_segs, loff_t pos)
604 struct file *file = iocb->ki_filp;
606 if (file->f_flags & O_APPEND) {
607 struct dentry *dentry = file->f_dentry;
608 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
609 struct gfs2_holder gh;
612 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
615 gfs2_glock_dq_uninit(&gh);
618 return generic_file_aio_write(iocb, iov, nr_segs, pos);
621 static int empty_write_end(struct page *page, unsigned from,
622 unsigned to, int mode)
624 struct inode *inode = page->mapping->host;
625 struct gfs2_inode *ip = GFS2_I(inode);
626 struct buffer_head *bh;
627 unsigned offset, blksize = 1 << inode->i_blkbits;
628 pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
630 zero_user(page, from, to-from);
631 mark_page_accessed(page);
633 if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
634 if (!gfs2_is_writeback(ip))
635 gfs2_page_add_databufs(ip, page, from, to);
637 block_commit_write(page, from, to);
642 bh = page_buffers(page);
643 while (offset < to) {
644 if (offset >= from) {
645 set_buffer_uptodate(bh);
646 mark_buffer_dirty(bh);
647 clear_buffer_new(bh);
648 write_dirty_buffer(bh, WRITE);
651 bh = bh->b_this_page;
655 bh = page_buffers(page);
656 while (offset < to) {
657 if (offset >= from) {
659 if (!buffer_uptodate(bh))
663 bh = bh->b_this_page;
668 static int needs_empty_write(sector_t block, struct inode *inode)
671 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
673 bh_map.b_size = 1 << inode->i_blkbits;
674 error = gfs2_block_map(inode, block, &bh_map, 0);
677 return !buffer_mapped(&bh_map);
680 static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
683 struct inode *inode = page->mapping->host;
684 unsigned start, end, next, blksize;
685 sector_t block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
688 blksize = 1 << inode->i_blkbits;
690 while (next < from) {
697 ret = needs_empty_write(block, inode);
698 if (unlikely(ret < 0))
702 ret = __block_write_begin(page, start, end - start,
706 ret = empty_write_end(page, start, end, mode);
719 ret = __block_write_begin(page, start, end - start, gfs2_block_map);
722 ret = empty_write_end(page, start, end, mode);
730 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
733 struct gfs2_inode *ip = GFS2_I(inode);
734 struct buffer_head *dibh;
736 u64 start = offset >> PAGE_CACHE_SHIFT;
737 unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
738 u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
741 unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
742 unsigned int from, to;
745 end_offset = PAGE_CACHE_SIZE;
747 error = gfs2_meta_inode_buffer(ip, &dibh);
751 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
753 if (gfs2_is_stuffed(ip)) {
754 error = gfs2_unstuff_dinode(ip, NULL);
760 offset = start << PAGE_CACHE_SHIFT;
762 to = PAGE_CACHE_SIZE;
763 while (curr <= end) {
764 page = grab_cache_page_write_begin(inode->i_mapping, curr,
766 if (unlikely(!page)) {
773 error = write_empty_blocks(page, from, to, mode);
774 if (!error && offset + to > inode->i_size &&
775 !(mode & FALLOC_FL_KEEP_SIZE)) {
776 i_size_write(inode, offset + to);
779 page_cache_release(page);
783 offset += PAGE_CACHE_SIZE;
787 gfs2_dinode_out(ip, dibh->b_data);
788 mark_inode_dirty(inode);
796 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
797 unsigned int *data_blocks, unsigned int *ind_blocks)
799 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
800 unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
801 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
803 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
804 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
807 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
808 so it might end up with fewer data blocks */
809 if (max_data <= *data_blocks)
811 *data_blocks = max_data;
812 *ind_blocks = max_blocks - max_data;
813 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
816 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
820 static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
823 struct inode *inode = file->f_path.dentry->d_inode;
824 struct gfs2_sbd *sdp = GFS2_SB(inode);
825 struct gfs2_inode *ip = GFS2_I(inode);
826 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
827 loff_t bytes, max_bytes;
828 struct gfs2_alloc *al;
830 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
831 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
832 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
834 /* We only support the FALLOC_FL_KEEP_SIZE mode */
835 if (mode & ~FALLOC_FL_KEEP_SIZE)
838 offset &= bsize_mask;
841 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
846 bytes = sdp->sd_sb.sb_bsize;
848 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
849 error = gfs2_glock_nq(&ip->i_gh);
853 if (!gfs2_write_alloc_required(ip, offset, len))
859 al = gfs2_alloc_get(ip);
865 error = gfs2_quota_lock_check(ip);
870 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
872 al->al_requested = data_blocks + ind_blocks;
873 error = gfs2_inplace_reserve(ip);
875 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
879 bytes = sdp->sd_sb.sb_bsize;
885 calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
886 al->al_requested = data_blocks + ind_blocks;
888 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
889 RES_RG_HDR + gfs2_rg_blocks(al);
890 if (gfs2_is_jdata(ip))
891 rblocks += data_blocks ? data_blocks : 1;
893 error = gfs2_trans_begin(sdp, rblocks,
894 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
898 error = fallocate_chunk(inode, offset, max_bytes, mode);
906 gfs2_inplace_release(ip);
907 gfs2_quota_unlock(ip);
913 gfs2_inplace_release(ip);
915 gfs2_quota_unlock(ip);
919 gfs2_glock_dq(&ip->i_gh);
921 gfs2_holder_uninit(&ip->i_gh);
925 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
928 * gfs2_setlease - acquire/release a file lease
929 * @file: the file pointer
933 * We don't currently have a way to enforce a lease across the whole
934 * cluster; until we do, disable leases (by just returning -EINVAL),
935 * unless the administrator has requested purely local locking.
937 * Locking: called under lock_flocks
942 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
948 * gfs2_lock - acquire/release a posix lock on a file
949 * @file: the file pointer
950 * @cmd: either modify or retrieve lock state, possibly wait
951 * @fl: type and range of lock
956 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
958 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
959 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
960 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
962 if (!(fl->fl_flags & FL_POSIX))
964 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
967 if (cmd == F_CANCELLK) {
970 fl->fl_type = F_UNLCK;
972 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
975 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
976 else if (fl->fl_type == F_UNLCK)
977 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
979 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
982 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
984 struct gfs2_file *fp = file->private_data;
985 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
986 struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
987 struct gfs2_glock *gl;
992 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
993 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
995 mutex_lock(&fp->f_fl_mutex);
999 if (fl_gh->gh_state == state)
1001 flock_lock_file_wait(file,
1002 &(struct file_lock){.fl_type = F_UNLCK});
1003 gfs2_glock_dq_wait(fl_gh);
1004 gfs2_holder_reinit(state, flags, fl_gh);
1006 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1007 &gfs2_flock_glops, CREATE, &gl);
1010 gfs2_holder_init(gl, state, flags, fl_gh);
1013 error = gfs2_glock_nq(fl_gh);
1015 gfs2_holder_uninit(fl_gh);
1016 if (error == GLR_TRYFAILED)
1019 error = flock_lock_file_wait(file, fl);
1020 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1024 mutex_unlock(&fp->f_fl_mutex);
1028 static void do_unflock(struct file *file, struct file_lock *fl)
1030 struct gfs2_file *fp = file->private_data;
1031 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1033 mutex_lock(&fp->f_fl_mutex);
1034 flock_lock_file_wait(file, fl);
1036 gfs2_glock_dq_wait(fl_gh);
1037 gfs2_holder_uninit(fl_gh);
1039 mutex_unlock(&fp->f_fl_mutex);
1043 * gfs2_flock - acquire/release a flock lock on a file
1044 * @file: the file pointer
1045 * @cmd: either modify or retrieve lock state, possibly wait
1046 * @fl: type and range of lock
1051 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1053 if (!(fl->fl_flags & FL_FLOCK))
1055 if (fl->fl_type & LOCK_MAND)
1058 if (fl->fl_type == F_UNLCK) {
1059 do_unflock(file, fl);
1062 return do_flock(file, cmd, fl);
1066 const struct file_operations gfs2_file_fops = {
1067 .llseek = gfs2_llseek,
1068 .read = do_sync_read,
1069 .aio_read = generic_file_aio_read,
1070 .write = do_sync_write,
1071 .aio_write = gfs2_file_aio_write,
1072 .unlocked_ioctl = gfs2_ioctl,
1075 .release = gfs2_close,
1076 .fsync = gfs2_fsync,
1078 .flock = gfs2_flock,
1079 .splice_read = generic_file_splice_read,
1080 .splice_write = generic_file_splice_write,
1081 .setlease = gfs2_setlease,
1082 .fallocate = gfs2_fallocate,
1085 const struct file_operations gfs2_dir_fops = {
1086 .readdir = gfs2_readdir,
1087 .unlocked_ioctl = gfs2_ioctl,
1089 .release = gfs2_close,
1090 .fsync = gfs2_fsync,
1092 .flock = gfs2_flock,
1093 .llseek = default_llseek,
1096 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1098 const struct file_operations gfs2_file_fops_nolock = {
1099 .llseek = gfs2_llseek,
1100 .read = do_sync_read,
1101 .aio_read = generic_file_aio_read,
1102 .write = do_sync_write,
1103 .aio_write = gfs2_file_aio_write,
1104 .unlocked_ioctl = gfs2_ioctl,
1107 .release = gfs2_close,
1108 .fsync = gfs2_fsync,
1109 .splice_read = generic_file_splice_read,
1110 .splice_write = generic_file_splice_write,
1111 .setlease = generic_setlease,
1112 .fallocate = gfs2_fallocate,
1115 const struct file_operations gfs2_dir_fops_nolock = {
1116 .readdir = gfs2_readdir,
1117 .unlocked_ioctl = gfs2_ioctl,
1119 .release = gfs2_close,
1120 .fsync = gfs2_fsync,
1121 .llseek = default_llseek,