1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
32 #include <trace/events/f2fs.h>
33 #include <uapi/linux/f2fs.h>
35 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
37 struct inode *inode = file_inode(vmf->vma->vm_file);
40 down_read(&F2FS_I(inode)->i_mmap_sem);
41 ret = filemap_fault(vmf);
42 up_read(&F2FS_I(inode)->i_mmap_sem);
45 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
48 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
53 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
55 struct page *page = vmf->page;
56 struct inode *inode = file_inode(vmf->vma->vm_file);
57 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
58 struct dnode_of_data dn;
59 bool need_alloc = true;
62 if (unlikely(IS_IMMUTABLE(inode)))
63 return VM_FAULT_SIGBUS;
65 if (unlikely(f2fs_cp_error(sbi))) {
70 if (!f2fs_is_checkpoint_ready(sbi)) {
75 err = f2fs_convert_inline_inode(inode);
79 #ifdef CONFIG_F2FS_FS_COMPRESSION
80 if (f2fs_compressed_file(inode)) {
81 int ret = f2fs_is_compressed_cluster(inode, page->index);
87 if (ret < F2FS_I(inode)->i_cluster_size) {
95 /* should do out of any locked page */
97 f2fs_balance_fs(sbi, true);
99 sb_start_pagefault(inode->i_sb);
101 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
103 file_update_time(vmf->vma->vm_file);
104 down_read(&F2FS_I(inode)->i_mmap_sem);
106 if (unlikely(page->mapping != inode->i_mapping ||
107 page_offset(page) > i_size_read(inode) ||
108 !PageUptodate(page))) {
115 /* block allocation */
116 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
117 set_new_dnode(&dn, inode, NULL, NULL, 0);
118 err = f2fs_get_block(&dn, page->index);
120 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
123 #ifdef CONFIG_F2FS_FS_COMPRESSION
125 set_new_dnode(&dn, inode, NULL, NULL, 0);
126 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
135 f2fs_wait_on_page_writeback(page, DATA, false, true);
137 /* wait for GCed page writeback via META_MAPPING */
138 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
141 * check to see if the page is mapped already (no holes)
143 if (PageMappedToDisk(page))
146 /* page is wholly or partially inside EOF */
147 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
148 i_size_read(inode)) {
151 offset = i_size_read(inode) & ~PAGE_MASK;
152 zero_user_segment(page, offset, PAGE_SIZE);
154 set_page_dirty(page);
155 if (!PageUptodate(page))
156 SetPageUptodate(page);
158 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
159 f2fs_update_time(sbi, REQ_TIME);
161 trace_f2fs_vm_page_mkwrite(page, DATA);
163 up_read(&F2FS_I(inode)->i_mmap_sem);
165 sb_end_pagefault(inode->i_sb);
167 return block_page_mkwrite_return(err);
170 static const struct vm_operations_struct f2fs_file_vm_ops = {
171 .fault = f2fs_filemap_fault,
172 .map_pages = filemap_map_pages,
173 .page_mkwrite = f2fs_vm_page_mkwrite,
176 static int get_parent_ino(struct inode *inode, nid_t *pino)
178 struct dentry *dentry;
181 * Make sure to get the non-deleted alias. The alias associated with
182 * the open file descriptor being fsync()'ed may be deleted already.
184 dentry = d_find_alias(inode);
188 *pino = parent_ino(dentry);
193 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
195 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
196 enum cp_reason_type cp_reason = CP_NO_NEEDED;
198 if (!S_ISREG(inode->i_mode))
199 cp_reason = CP_NON_REGULAR;
200 else if (f2fs_compressed_file(inode))
201 cp_reason = CP_COMPRESSED;
202 else if (inode->i_nlink != 1)
203 cp_reason = CP_HARDLINK;
204 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
205 cp_reason = CP_SB_NEED_CP;
206 else if (file_wrong_pino(inode))
207 cp_reason = CP_WRONG_PINO;
208 else if (!f2fs_space_for_roll_forward(sbi))
209 cp_reason = CP_NO_SPC_ROLL;
210 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
211 cp_reason = CP_NODE_NEED_CP;
212 else if (test_opt(sbi, FASTBOOT))
213 cp_reason = CP_FASTBOOT_MODE;
214 else if (F2FS_OPTION(sbi).active_logs == 2)
215 cp_reason = CP_SPEC_LOG_NUM;
216 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
217 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
218 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
220 cp_reason = CP_RECOVER_DIR;
225 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
227 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
229 /* But we need to avoid that there are some inode updates */
230 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
236 static void try_to_fix_pino(struct inode *inode)
238 struct f2fs_inode_info *fi = F2FS_I(inode);
241 down_write(&fi->i_sem);
242 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
243 get_parent_ino(inode, &pino)) {
244 f2fs_i_pino_write(inode, pino);
245 file_got_pino(inode);
247 up_write(&fi->i_sem);
250 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
251 int datasync, bool atomic)
253 struct inode *inode = file->f_mapping->host;
254 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
255 nid_t ino = inode->i_ino;
257 enum cp_reason_type cp_reason = 0;
258 struct writeback_control wbc = {
259 .sync_mode = WB_SYNC_ALL,
260 .nr_to_write = LONG_MAX,
263 unsigned int seq_id = 0;
265 if (unlikely(f2fs_readonly(inode->i_sb) ||
266 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
269 trace_f2fs_sync_file_enter(inode);
271 if (S_ISDIR(inode->i_mode))
274 /* if fdatasync is triggered, let's do in-place-update */
275 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
276 set_inode_flag(inode, FI_NEED_IPU);
277 ret = file_write_and_wait_range(file, start, end);
278 clear_inode_flag(inode, FI_NEED_IPU);
281 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
285 /* if the inode is dirty, let's recover all the time */
286 if (!f2fs_skip_inode_update(inode, datasync)) {
287 f2fs_write_inode(inode, NULL);
292 * if there is no written data, don't waste time to write recovery info.
294 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
295 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
297 /* it may call write_inode just prior to fsync */
298 if (need_inode_page_update(sbi, ino))
301 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
302 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
308 * Both of fdatasync() and fsync() are able to be recovered from
311 down_read(&F2FS_I(inode)->i_sem);
312 cp_reason = need_do_checkpoint(inode);
313 up_read(&F2FS_I(inode)->i_sem);
316 /* all the dirty node pages should be flushed for POR */
317 ret = f2fs_sync_fs(inode->i_sb, 1);
320 * We've secured consistency through sync_fs. Following pino
321 * will be used only for fsynced inodes after checkpoint.
323 try_to_fix_pino(inode);
324 clear_inode_flag(inode, FI_APPEND_WRITE);
325 clear_inode_flag(inode, FI_UPDATE_WRITE);
329 atomic_inc(&sbi->wb_sync_req[NODE]);
330 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
331 atomic_dec(&sbi->wb_sync_req[NODE]);
335 /* if cp_error was enabled, we should avoid infinite loop */
336 if (unlikely(f2fs_cp_error(sbi))) {
341 if (f2fs_need_inode_block_update(sbi, ino)) {
342 f2fs_mark_inode_dirty_sync(inode, true);
343 f2fs_write_inode(inode, NULL);
348 * If it's atomic_write, it's just fine to keep write ordering. So
349 * here we don't need to wait for node write completion, since we use
350 * node chain which serializes node blocks. If one of node writes are
351 * reordered, we can see simply broken chain, resulting in stopping
352 * roll-forward recovery. It means we'll recover all or none node blocks
356 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
361 /* once recovery info is written, don't need to tack this */
362 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
363 clear_inode_flag(inode, FI_APPEND_WRITE);
365 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
366 ret = f2fs_issue_flush(sbi, inode->i_ino);
368 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
369 clear_inode_flag(inode, FI_UPDATE_WRITE);
370 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
372 f2fs_update_time(sbi, REQ_TIME);
374 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
378 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
380 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
382 return f2fs_do_sync_file(file, start, end, datasync, false);
385 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
386 pgoff_t index, int whence)
390 if (__is_valid_data_blkaddr(blkaddr))
392 if (blkaddr == NEW_ADDR &&
393 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
397 if (blkaddr == NULL_ADDR)
404 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
406 struct inode *inode = file->f_mapping->host;
407 loff_t maxbytes = inode->i_sb->s_maxbytes;
408 struct dnode_of_data dn;
409 pgoff_t pgofs, end_offset;
410 loff_t data_ofs = offset;
416 isize = i_size_read(inode);
420 /* handle inline data case */
421 if (f2fs_has_inline_data(inode)) {
422 if (whence == SEEK_HOLE) {
425 } else if (whence == SEEK_DATA) {
431 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
433 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
434 set_new_dnode(&dn, inode, NULL, NULL, 0);
435 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
436 if (err && err != -ENOENT) {
438 } else if (err == -ENOENT) {
439 /* direct node does not exists */
440 if (whence == SEEK_DATA) {
441 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
448 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
450 /* find data/hole in dnode block */
451 for (; dn.ofs_in_node < end_offset;
452 dn.ofs_in_node++, pgofs++,
453 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
456 blkaddr = f2fs_data_blkaddr(&dn);
458 if (__is_valid_data_blkaddr(blkaddr) &&
459 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
460 blkaddr, DATA_GENERIC_ENHANCE)) {
465 if (__found_offset(file->f_mapping, blkaddr,
474 if (whence == SEEK_DATA)
477 if (whence == SEEK_HOLE && data_ofs > isize)
480 return vfs_setpos(file, data_ofs, maxbytes);
486 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
488 struct inode *inode = file->f_mapping->host;
489 loff_t maxbytes = inode->i_sb->s_maxbytes;
491 if (f2fs_compressed_file(inode))
492 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
498 return generic_file_llseek_size(file, offset, whence,
499 maxbytes, i_size_read(inode));
504 return f2fs_seek_block(file, offset, whence);
510 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
512 struct inode *inode = file_inode(file);
514 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
517 if (!f2fs_is_compress_backend_ready(inode))
521 vma->vm_ops = &f2fs_file_vm_ops;
522 set_inode_flag(inode, FI_MMAP_FILE);
526 static int f2fs_file_open(struct inode *inode, struct file *filp)
528 int err = fscrypt_file_open(inode, filp);
533 if (!f2fs_is_compress_backend_ready(inode))
536 err = fsverity_file_open(inode, filp);
540 filp->f_mode |= FMODE_NOWAIT;
542 return dquot_file_open(inode, filp);
545 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
547 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
548 struct f2fs_node *raw_node;
549 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
552 bool compressed_cluster = false;
553 int cluster_index = 0, valid_blocks = 0;
554 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
555 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
557 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
558 base = get_extra_isize(dn->inode);
560 raw_node = F2FS_NODE(dn->node_page);
561 addr = blkaddr_in_node(raw_node) + base + ofs;
563 /* Assumption: truncateion starts with cluster */
564 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
565 block_t blkaddr = le32_to_cpu(*addr);
567 if (f2fs_compressed_file(dn->inode) &&
568 !(cluster_index & (cluster_size - 1))) {
569 if (compressed_cluster)
570 f2fs_i_compr_blocks_update(dn->inode,
571 valid_blocks, false);
572 compressed_cluster = (blkaddr == COMPRESS_ADDR);
576 if (blkaddr == NULL_ADDR)
579 dn->data_blkaddr = NULL_ADDR;
580 f2fs_set_data_blkaddr(dn);
582 if (__is_valid_data_blkaddr(blkaddr)) {
583 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
584 DATA_GENERIC_ENHANCE))
586 if (compressed_cluster)
590 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
591 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
593 f2fs_invalidate_blocks(sbi, blkaddr);
595 if (!released || blkaddr != COMPRESS_ADDR)
599 if (compressed_cluster)
600 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
605 * once we invalidate valid blkaddr in range [ofs, ofs + count],
606 * we will invalidate all blkaddr in the whole range.
608 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
610 f2fs_update_extent_cache_range(dn, fofs, 0, len);
611 dec_valid_block_count(sbi, dn->inode, nr_free);
613 dn->ofs_in_node = ofs;
615 f2fs_update_time(sbi, REQ_TIME);
616 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
617 dn->ofs_in_node, nr_free);
620 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
622 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
625 static int truncate_partial_data_page(struct inode *inode, u64 from,
628 loff_t offset = from & (PAGE_SIZE - 1);
629 pgoff_t index = from >> PAGE_SHIFT;
630 struct address_space *mapping = inode->i_mapping;
633 if (!offset && !cache_only)
637 page = find_lock_page(mapping, index);
638 if (page && PageUptodate(page))
640 f2fs_put_page(page, 1);
644 page = f2fs_get_lock_data_page(inode, index, true);
646 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
648 f2fs_wait_on_page_writeback(page, DATA, true, true);
649 zero_user(page, offset, PAGE_SIZE - offset);
651 /* An encrypted inode should have a key and truncate the last page. */
652 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
654 set_page_dirty(page);
655 f2fs_put_page(page, 1);
659 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
661 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
662 struct dnode_of_data dn;
664 int count = 0, err = 0;
666 bool truncate_page = false;
668 trace_f2fs_truncate_blocks_enter(inode, from);
670 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
672 if (free_from >= max_file_blocks(inode))
678 ipage = f2fs_get_node_page(sbi, inode->i_ino);
680 err = PTR_ERR(ipage);
684 if (f2fs_has_inline_data(inode)) {
685 f2fs_truncate_inline_inode(inode, ipage, from);
686 f2fs_put_page(ipage, 1);
687 truncate_page = true;
691 set_new_dnode(&dn, inode, ipage, NULL, 0);
692 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
699 count = ADDRS_PER_PAGE(dn.node_page, inode);
701 count -= dn.ofs_in_node;
702 f2fs_bug_on(sbi, count < 0);
704 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
705 f2fs_truncate_data_blocks_range(&dn, count);
711 err = f2fs_truncate_inode_blocks(inode, free_from);
716 /* lastly zero out the first data page */
718 err = truncate_partial_data_page(inode, from, truncate_page);
720 trace_f2fs_truncate_blocks_exit(inode, err);
724 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
726 u64 free_from = from;
729 #ifdef CONFIG_F2FS_FS_COMPRESSION
731 * for compressed file, only support cluster size
732 * aligned truncation.
734 if (f2fs_compressed_file(inode))
735 free_from = round_up(from,
736 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
739 err = f2fs_do_truncate_blocks(inode, free_from, lock);
743 #ifdef CONFIG_F2FS_FS_COMPRESSION
744 if (from != free_from) {
745 err = f2fs_truncate_partial_cluster(inode, from, lock);
754 int f2fs_truncate(struct inode *inode)
758 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
761 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
762 S_ISLNK(inode->i_mode)))
765 trace_f2fs_truncate(inode);
767 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
768 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
772 err = dquot_initialize(inode);
776 /* we should check inline_data size */
777 if (!f2fs_may_inline_data(inode)) {
778 err = f2fs_convert_inline_inode(inode);
783 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
787 inode->i_mtime = inode->i_ctime = current_time(inode);
788 f2fs_mark_inode_dirty_sync(inode, false);
792 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
793 struct kstat *stat, u32 request_mask, unsigned int query_flags)
795 struct inode *inode = d_inode(path->dentry);
796 struct f2fs_inode_info *fi = F2FS_I(inode);
797 struct f2fs_inode *ri;
800 if (f2fs_has_extra_attr(inode) &&
801 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
802 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
803 stat->result_mask |= STATX_BTIME;
804 stat->btime.tv_sec = fi->i_crtime.tv_sec;
805 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
809 if (flags & F2FS_COMPR_FL)
810 stat->attributes |= STATX_ATTR_COMPRESSED;
811 if (flags & F2FS_APPEND_FL)
812 stat->attributes |= STATX_ATTR_APPEND;
813 if (IS_ENCRYPTED(inode))
814 stat->attributes |= STATX_ATTR_ENCRYPTED;
815 if (flags & F2FS_IMMUTABLE_FL)
816 stat->attributes |= STATX_ATTR_IMMUTABLE;
817 if (flags & F2FS_NODUMP_FL)
818 stat->attributes |= STATX_ATTR_NODUMP;
819 if (IS_VERITY(inode))
820 stat->attributes |= STATX_ATTR_VERITY;
822 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
824 STATX_ATTR_ENCRYPTED |
825 STATX_ATTR_IMMUTABLE |
829 generic_fillattr(&init_user_ns, inode, stat);
831 /* we need to show initial sectors used for inline_data/dentries */
832 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
833 f2fs_has_inline_dentry(inode))
834 stat->blocks += (stat->size + 511) >> 9;
839 #ifdef CONFIG_F2FS_FS_POSIX_ACL
840 static void __setattr_copy(struct user_namespace *mnt_userns,
841 struct inode *inode, const struct iattr *attr)
843 unsigned int ia_valid = attr->ia_valid;
845 if (ia_valid & ATTR_UID)
846 inode->i_uid = attr->ia_uid;
847 if (ia_valid & ATTR_GID)
848 inode->i_gid = attr->ia_gid;
849 if (ia_valid & ATTR_ATIME)
850 inode->i_atime = attr->ia_atime;
851 if (ia_valid & ATTR_MTIME)
852 inode->i_mtime = attr->ia_mtime;
853 if (ia_valid & ATTR_CTIME)
854 inode->i_ctime = attr->ia_ctime;
855 if (ia_valid & ATTR_MODE) {
856 umode_t mode = attr->ia_mode;
857 kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
859 if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
861 set_acl_inode(inode, mode);
865 #define __setattr_copy setattr_copy
868 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
871 struct inode *inode = d_inode(dentry);
874 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
877 if (unlikely(IS_IMMUTABLE(inode)))
880 if (unlikely(IS_APPEND(inode) &&
881 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
882 ATTR_GID | ATTR_TIMES_SET))))
885 if ((attr->ia_valid & ATTR_SIZE) &&
886 !f2fs_is_compress_backend_ready(inode))
889 err = setattr_prepare(&init_user_ns, dentry, attr);
893 err = fscrypt_prepare_setattr(dentry, attr);
897 err = fsverity_prepare_setattr(dentry, attr);
901 if (is_quota_modification(inode, attr)) {
902 err = dquot_initialize(inode);
906 if ((attr->ia_valid & ATTR_UID &&
907 !uid_eq(attr->ia_uid, inode->i_uid)) ||
908 (attr->ia_valid & ATTR_GID &&
909 !gid_eq(attr->ia_gid, inode->i_gid))) {
910 f2fs_lock_op(F2FS_I_SB(inode));
911 err = dquot_transfer(inode, attr);
913 set_sbi_flag(F2FS_I_SB(inode),
914 SBI_QUOTA_NEED_REPAIR);
915 f2fs_unlock_op(F2FS_I_SB(inode));
919 * update uid/gid under lock_op(), so that dquot and inode can
920 * be updated atomically.
922 if (attr->ia_valid & ATTR_UID)
923 inode->i_uid = attr->ia_uid;
924 if (attr->ia_valid & ATTR_GID)
925 inode->i_gid = attr->ia_gid;
926 f2fs_mark_inode_dirty_sync(inode, true);
927 f2fs_unlock_op(F2FS_I_SB(inode));
930 if (attr->ia_valid & ATTR_SIZE) {
931 loff_t old_size = i_size_read(inode);
933 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
935 * should convert inline inode before i_size_write to
936 * keep smaller than inline_data size with inline flag.
938 err = f2fs_convert_inline_inode(inode);
943 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
944 down_write(&F2FS_I(inode)->i_mmap_sem);
946 truncate_setsize(inode, attr->ia_size);
948 if (attr->ia_size <= old_size)
949 err = f2fs_truncate(inode);
951 * do not trim all blocks after i_size if target size is
952 * larger than i_size.
954 up_write(&F2FS_I(inode)->i_mmap_sem);
955 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
959 spin_lock(&F2FS_I(inode)->i_size_lock);
960 inode->i_mtime = inode->i_ctime = current_time(inode);
961 F2FS_I(inode)->last_disk_size = i_size_read(inode);
962 spin_unlock(&F2FS_I(inode)->i_size_lock);
965 __setattr_copy(&init_user_ns, inode, attr);
967 if (attr->ia_valid & ATTR_MODE) {
968 err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode));
970 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
972 inode->i_mode = F2FS_I(inode)->i_acl_mode;
973 clear_inode_flag(inode, FI_ACL_MODE);
977 /* file size may changed here */
978 f2fs_mark_inode_dirty_sync(inode, true);
980 /* inode change will produce dirty node pages flushed by checkpoint */
981 f2fs_balance_fs(F2FS_I_SB(inode), true);
986 const struct inode_operations f2fs_file_inode_operations = {
987 .getattr = f2fs_getattr,
988 .setattr = f2fs_setattr,
989 .get_acl = f2fs_get_acl,
990 .set_acl = f2fs_set_acl,
991 .listxattr = f2fs_listxattr,
992 .fiemap = f2fs_fiemap,
995 static int fill_zero(struct inode *inode, pgoff_t index,
996 loff_t start, loff_t len)
998 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1004 f2fs_balance_fs(sbi, true);
1007 page = f2fs_get_new_data_page(inode, NULL, index, false);
1008 f2fs_unlock_op(sbi);
1011 return PTR_ERR(page);
1013 f2fs_wait_on_page_writeback(page, DATA, true, true);
1014 zero_user(page, start, len);
1015 set_page_dirty(page);
1016 f2fs_put_page(page, 1);
1020 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1024 while (pg_start < pg_end) {
1025 struct dnode_of_data dn;
1026 pgoff_t end_offset, count;
1028 set_new_dnode(&dn, inode, NULL, NULL, 0);
1029 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1031 if (err == -ENOENT) {
1032 pg_start = f2fs_get_next_page_offset(&dn,
1039 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1040 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1042 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1044 f2fs_truncate_data_blocks_range(&dn, count);
1045 f2fs_put_dnode(&dn);
1052 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1054 pgoff_t pg_start, pg_end;
1055 loff_t off_start, off_end;
1058 ret = f2fs_convert_inline_inode(inode);
1062 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1063 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1065 off_start = offset & (PAGE_SIZE - 1);
1066 off_end = (offset + len) & (PAGE_SIZE - 1);
1068 if (pg_start == pg_end) {
1069 ret = fill_zero(inode, pg_start, off_start,
1070 off_end - off_start);
1075 ret = fill_zero(inode, pg_start++, off_start,
1076 PAGE_SIZE - off_start);
1081 ret = fill_zero(inode, pg_end, 0, off_end);
1086 if (pg_start < pg_end) {
1087 struct address_space *mapping = inode->i_mapping;
1088 loff_t blk_start, blk_end;
1089 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1091 f2fs_balance_fs(sbi, true);
1093 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1094 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1096 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1097 down_write(&F2FS_I(inode)->i_mmap_sem);
1099 truncate_inode_pages_range(mapping, blk_start,
1103 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1104 f2fs_unlock_op(sbi);
1106 up_write(&F2FS_I(inode)->i_mmap_sem);
1107 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1114 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1115 int *do_replace, pgoff_t off, pgoff_t len)
1117 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1118 struct dnode_of_data dn;
1122 set_new_dnode(&dn, inode, NULL, NULL, 0);
1123 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1124 if (ret && ret != -ENOENT) {
1126 } else if (ret == -ENOENT) {
1127 if (dn.max_level == 0)
1129 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1130 dn.ofs_in_node, len);
1136 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1137 dn.ofs_in_node, len);
1138 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1139 *blkaddr = f2fs_data_blkaddr(&dn);
1141 if (__is_valid_data_blkaddr(*blkaddr) &&
1142 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1143 DATA_GENERIC_ENHANCE)) {
1144 f2fs_put_dnode(&dn);
1145 return -EFSCORRUPTED;
1148 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1150 if (f2fs_lfs_mode(sbi)) {
1151 f2fs_put_dnode(&dn);
1155 /* do not invalidate this block address */
1156 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1160 f2fs_put_dnode(&dn);
1169 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1170 int *do_replace, pgoff_t off, int len)
1172 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1173 struct dnode_of_data dn;
1176 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1177 if (*do_replace == 0)
1180 set_new_dnode(&dn, inode, NULL, NULL, 0);
1181 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1183 dec_valid_block_count(sbi, inode, 1);
1184 f2fs_invalidate_blocks(sbi, *blkaddr);
1186 f2fs_update_data_blkaddr(&dn, *blkaddr);
1188 f2fs_put_dnode(&dn);
1193 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1194 block_t *blkaddr, int *do_replace,
1195 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1197 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1202 if (blkaddr[i] == NULL_ADDR && !full) {
1207 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1208 struct dnode_of_data dn;
1209 struct node_info ni;
1213 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1214 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1218 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1220 f2fs_put_dnode(&dn);
1224 ilen = min((pgoff_t)
1225 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1226 dn.ofs_in_node, len - i);
1228 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1229 f2fs_truncate_data_blocks_range(&dn, 1);
1231 if (do_replace[i]) {
1232 f2fs_i_blocks_write(src_inode,
1234 f2fs_i_blocks_write(dst_inode,
1236 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1237 blkaddr[i], ni.version, true, false);
1243 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1244 if (dst_inode->i_size < new_size)
1245 f2fs_i_size_write(dst_inode, new_size);
1246 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1248 f2fs_put_dnode(&dn);
1250 struct page *psrc, *pdst;
1252 psrc = f2fs_get_lock_data_page(src_inode,
1255 return PTR_ERR(psrc);
1256 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1259 f2fs_put_page(psrc, 1);
1260 return PTR_ERR(pdst);
1262 f2fs_copy_page(psrc, pdst);
1263 set_page_dirty(pdst);
1264 f2fs_put_page(pdst, 1);
1265 f2fs_put_page(psrc, 1);
1267 ret = f2fs_truncate_hole(src_inode,
1268 src + i, src + i + 1);
1277 static int __exchange_data_block(struct inode *src_inode,
1278 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1279 pgoff_t len, bool full)
1281 block_t *src_blkaddr;
1287 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1289 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1290 array_size(olen, sizeof(block_t)),
1295 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1296 array_size(olen, sizeof(int)),
1299 kvfree(src_blkaddr);
1303 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1304 do_replace, src, olen);
1308 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1309 do_replace, src, dst, olen, full);
1317 kvfree(src_blkaddr);
1323 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1324 kvfree(src_blkaddr);
1329 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1331 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1332 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1333 pgoff_t start = offset >> PAGE_SHIFT;
1334 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1337 f2fs_balance_fs(sbi, true);
1339 /* avoid gc operation during block exchange */
1340 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1341 down_write(&F2FS_I(inode)->i_mmap_sem);
1344 f2fs_drop_extent_tree(inode);
1345 truncate_pagecache(inode, offset);
1346 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1347 f2fs_unlock_op(sbi);
1349 up_write(&F2FS_I(inode)->i_mmap_sem);
1350 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1354 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1359 if (offset + len >= i_size_read(inode))
1362 /* collapse range should be aligned to block size of f2fs. */
1363 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1366 ret = f2fs_convert_inline_inode(inode);
1370 /* write out all dirty pages from offset */
1371 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1375 ret = f2fs_do_collapse(inode, offset, len);
1379 /* write out all moved pages, if possible */
1380 down_write(&F2FS_I(inode)->i_mmap_sem);
1381 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1382 truncate_pagecache(inode, offset);
1384 new_size = i_size_read(inode) - len;
1385 ret = f2fs_truncate_blocks(inode, new_size, true);
1386 up_write(&F2FS_I(inode)->i_mmap_sem);
1388 f2fs_i_size_write(inode, new_size);
1392 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1395 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1396 pgoff_t index = start;
1397 unsigned int ofs_in_node = dn->ofs_in_node;
1401 for (; index < end; index++, dn->ofs_in_node++) {
1402 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1406 dn->ofs_in_node = ofs_in_node;
1407 ret = f2fs_reserve_new_blocks(dn, count);
1411 dn->ofs_in_node = ofs_in_node;
1412 for (index = start; index < end; index++, dn->ofs_in_node++) {
1413 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1415 * f2fs_reserve_new_blocks will not guarantee entire block
1418 if (dn->data_blkaddr == NULL_ADDR) {
1422 if (dn->data_blkaddr != NEW_ADDR) {
1423 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1424 dn->data_blkaddr = NEW_ADDR;
1425 f2fs_set_data_blkaddr(dn);
1429 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1434 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1437 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1438 struct address_space *mapping = inode->i_mapping;
1439 pgoff_t index, pg_start, pg_end;
1440 loff_t new_size = i_size_read(inode);
1441 loff_t off_start, off_end;
1444 ret = inode_newsize_ok(inode, (len + offset));
1448 ret = f2fs_convert_inline_inode(inode);
1452 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1456 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1457 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1459 off_start = offset & (PAGE_SIZE - 1);
1460 off_end = (offset + len) & (PAGE_SIZE - 1);
1462 if (pg_start == pg_end) {
1463 ret = fill_zero(inode, pg_start, off_start,
1464 off_end - off_start);
1468 new_size = max_t(loff_t, new_size, offset + len);
1471 ret = fill_zero(inode, pg_start++, off_start,
1472 PAGE_SIZE - off_start);
1476 new_size = max_t(loff_t, new_size,
1477 (loff_t)pg_start << PAGE_SHIFT);
1480 for (index = pg_start; index < pg_end;) {
1481 struct dnode_of_data dn;
1482 unsigned int end_offset;
1485 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1486 down_write(&F2FS_I(inode)->i_mmap_sem);
1488 truncate_pagecache_range(inode,
1489 (loff_t)index << PAGE_SHIFT,
1490 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1494 set_new_dnode(&dn, inode, NULL, NULL, 0);
1495 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1497 f2fs_unlock_op(sbi);
1498 up_write(&F2FS_I(inode)->i_mmap_sem);
1499 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1503 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1504 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1506 ret = f2fs_do_zero_range(&dn, index, end);
1507 f2fs_put_dnode(&dn);
1509 f2fs_unlock_op(sbi);
1510 up_write(&F2FS_I(inode)->i_mmap_sem);
1511 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1513 f2fs_balance_fs(sbi, dn.node_changed);
1519 new_size = max_t(loff_t, new_size,
1520 (loff_t)index << PAGE_SHIFT);
1524 ret = fill_zero(inode, pg_end, 0, off_end);
1528 new_size = max_t(loff_t, new_size, offset + len);
1533 if (new_size > i_size_read(inode)) {
1534 if (mode & FALLOC_FL_KEEP_SIZE)
1535 file_set_keep_isize(inode);
1537 f2fs_i_size_write(inode, new_size);
1542 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1544 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1545 pgoff_t nr, pg_start, pg_end, delta, idx;
1549 new_size = i_size_read(inode) + len;
1550 ret = inode_newsize_ok(inode, new_size);
1554 if (offset >= i_size_read(inode))
1557 /* insert range should be aligned to block size of f2fs. */
1558 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1561 ret = f2fs_convert_inline_inode(inode);
1565 f2fs_balance_fs(sbi, true);
1567 down_write(&F2FS_I(inode)->i_mmap_sem);
1568 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1569 up_write(&F2FS_I(inode)->i_mmap_sem);
1573 /* write out all dirty pages from offset */
1574 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1578 pg_start = offset >> PAGE_SHIFT;
1579 pg_end = (offset + len) >> PAGE_SHIFT;
1580 delta = pg_end - pg_start;
1581 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1583 /* avoid gc operation during block exchange */
1584 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1585 down_write(&F2FS_I(inode)->i_mmap_sem);
1586 truncate_pagecache(inode, offset);
1588 while (!ret && idx > pg_start) {
1589 nr = idx - pg_start;
1595 f2fs_drop_extent_tree(inode);
1597 ret = __exchange_data_block(inode, inode, idx,
1598 idx + delta, nr, false);
1599 f2fs_unlock_op(sbi);
1601 up_write(&F2FS_I(inode)->i_mmap_sem);
1602 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1604 /* write out all moved pages, if possible */
1605 down_write(&F2FS_I(inode)->i_mmap_sem);
1606 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1607 truncate_pagecache(inode, offset);
1608 up_write(&F2FS_I(inode)->i_mmap_sem);
1611 f2fs_i_size_write(inode, new_size);
1615 static int expand_inode_data(struct inode *inode, loff_t offset,
1616 loff_t len, int mode)
1618 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1619 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1620 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1621 .m_may_create = true };
1622 pgoff_t pg_start, pg_end;
1623 loff_t new_size = i_size_read(inode);
1625 block_t expanded = 0;
1628 err = inode_newsize_ok(inode, (len + offset));
1632 err = f2fs_convert_inline_inode(inode);
1636 f2fs_balance_fs(sbi, true);
1638 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1639 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1640 off_end = (offset + len) & (PAGE_SIZE - 1);
1642 map.m_lblk = pg_start;
1643 map.m_len = pg_end - pg_start;
1650 if (f2fs_is_pinned_file(inode)) {
1651 block_t sec_blks = BLKS_PER_SEC(sbi);
1652 block_t sec_len = roundup(map.m_len, sec_blks);
1654 map.m_len = sec_blks;
1656 if (has_not_enough_free_secs(sbi, 0,
1657 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1658 down_write(&sbi->gc_lock);
1659 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1660 if (err && err != -ENODATA && err != -EAGAIN)
1664 down_write(&sbi->pin_sem);
1667 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED);
1668 f2fs_unlock_op(sbi);
1670 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1671 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1673 up_write(&sbi->pin_sem);
1675 expanded += map.m_len;
1676 sec_len -= map.m_len;
1677 map.m_lblk += map.m_len;
1678 if (!err && sec_len)
1681 map.m_len = expanded;
1683 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1684 expanded = map.m_len;
1693 last_off = pg_start + expanded - 1;
1695 /* update new size to the failed position */
1696 new_size = (last_off == pg_end) ? offset + len :
1697 (loff_t)(last_off + 1) << PAGE_SHIFT;
1699 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1702 if (new_size > i_size_read(inode)) {
1703 if (mode & FALLOC_FL_KEEP_SIZE)
1704 file_set_keep_isize(inode);
1706 f2fs_i_size_write(inode, new_size);
1712 static long f2fs_fallocate(struct file *file, int mode,
1713 loff_t offset, loff_t len)
1715 struct inode *inode = file_inode(file);
1718 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1720 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1722 if (!f2fs_is_compress_backend_ready(inode))
1725 /* f2fs only support ->fallocate for regular file */
1726 if (!S_ISREG(inode->i_mode))
1729 if (IS_ENCRYPTED(inode) &&
1730 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1733 if (f2fs_compressed_file(inode) &&
1734 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1735 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1738 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1739 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1740 FALLOC_FL_INSERT_RANGE))
1745 if (mode & FALLOC_FL_PUNCH_HOLE) {
1746 if (offset >= inode->i_size)
1749 ret = punch_hole(inode, offset, len);
1750 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1751 ret = f2fs_collapse_range(inode, offset, len);
1752 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1753 ret = f2fs_zero_range(inode, offset, len, mode);
1754 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1755 ret = f2fs_insert_range(inode, offset, len);
1757 ret = expand_inode_data(inode, offset, len, mode);
1761 inode->i_mtime = inode->i_ctime = current_time(inode);
1762 f2fs_mark_inode_dirty_sync(inode, false);
1763 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1767 inode_unlock(inode);
1769 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1773 static int f2fs_release_file(struct inode *inode, struct file *filp)
1776 * f2fs_relase_file is called at every close calls. So we should
1777 * not drop any inmemory pages by close called by other process.
1779 if (!(filp->f_mode & FMODE_WRITE) ||
1780 atomic_read(&inode->i_writecount) != 1)
1783 /* some remained atomic pages should discarded */
1784 if (f2fs_is_atomic_file(inode))
1785 f2fs_drop_inmem_pages(inode);
1786 if (f2fs_is_volatile_file(inode)) {
1787 set_inode_flag(inode, FI_DROP_CACHE);
1788 filemap_fdatawrite(inode->i_mapping);
1789 clear_inode_flag(inode, FI_DROP_CACHE);
1790 clear_inode_flag(inode, FI_VOLATILE_FILE);
1791 stat_dec_volatile_write(inode);
1796 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1798 struct inode *inode = file_inode(file);
1801 * If the process doing a transaction is crashed, we should do
1802 * roll-back. Otherwise, other reader/write can see corrupted database
1803 * until all the writers close its file. Since this should be done
1804 * before dropping file lock, it needs to do in ->flush.
1806 if (f2fs_is_atomic_file(inode) &&
1807 F2FS_I(inode)->inmem_task == current)
1808 f2fs_drop_inmem_pages(inode);
1812 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1814 struct f2fs_inode_info *fi = F2FS_I(inode);
1815 u32 masked_flags = fi->i_flags & mask;
1817 f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1819 /* Is it quota file? Do not allow user to mess with it */
1820 if (IS_NOQUOTA(inode))
1823 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1824 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1826 if (!f2fs_empty_dir(inode))
1830 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1831 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1833 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1837 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1838 if (masked_flags & F2FS_COMPR_FL) {
1839 if (!f2fs_disable_compressed_file(inode))
1842 if (iflags & F2FS_NOCOMP_FL)
1844 if (iflags & F2FS_COMPR_FL) {
1845 if (!f2fs_may_compress(inode))
1847 if (S_ISREG(inode->i_mode) && inode->i_size)
1850 set_compress_context(inode);
1853 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1854 if (masked_flags & F2FS_COMPR_FL)
1858 fi->i_flags = iflags | (fi->i_flags & ~mask);
1859 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1860 (fi->i_flags & F2FS_NOCOMP_FL));
1862 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1863 set_inode_flag(inode, FI_PROJ_INHERIT);
1865 clear_inode_flag(inode, FI_PROJ_INHERIT);
1867 inode->i_ctime = current_time(inode);
1868 f2fs_set_inode_flags(inode);
1869 f2fs_mark_inode_dirty_sync(inode, true);
1873 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1876 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1877 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1878 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1879 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1882 static const struct {
1885 } f2fs_fsflags_map[] = {
1886 { F2FS_COMPR_FL, FS_COMPR_FL },
1887 { F2FS_SYNC_FL, FS_SYNC_FL },
1888 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1889 { F2FS_APPEND_FL, FS_APPEND_FL },
1890 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1891 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1892 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1893 { F2FS_INDEX_FL, FS_INDEX_FL },
1894 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1895 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1896 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1899 #define F2FS_GETTABLE_FS_FL ( \
1909 FS_PROJINHERIT_FL | \
1911 FS_INLINE_DATA_FL | \
1916 #define F2FS_SETTABLE_FS_FL ( \
1925 FS_PROJINHERIT_FL | \
1928 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1929 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1934 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1935 if (iflags & f2fs_fsflags_map[i].iflag)
1936 fsflags |= f2fs_fsflags_map[i].fsflag;
1941 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1942 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1947 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1948 if (fsflags & f2fs_fsflags_map[i].fsflag)
1949 iflags |= f2fs_fsflags_map[i].iflag;
1954 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1956 struct inode *inode = file_inode(filp);
1957 struct f2fs_inode_info *fi = F2FS_I(inode);
1958 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1960 if (IS_ENCRYPTED(inode))
1961 fsflags |= FS_ENCRYPT_FL;
1962 if (IS_VERITY(inode))
1963 fsflags |= FS_VERITY_FL;
1964 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1965 fsflags |= FS_INLINE_DATA_FL;
1966 if (is_inode_flag_set(inode, FI_PIN_FILE))
1967 fsflags |= FS_NOCOW_FL;
1969 fsflags &= F2FS_GETTABLE_FS_FL;
1971 return put_user(fsflags, (int __user *)arg);
1974 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1976 struct inode *inode = file_inode(filp);
1977 struct f2fs_inode_info *fi = F2FS_I(inode);
1978 u32 fsflags, old_fsflags;
1982 if (!inode_owner_or_capable(&init_user_ns, inode))
1985 if (get_user(fsflags, (int __user *)arg))
1988 if (fsflags & ~F2FS_GETTABLE_FS_FL)
1990 fsflags &= F2FS_SETTABLE_FS_FL;
1992 iflags = f2fs_fsflags_to_iflags(fsflags);
1993 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1996 ret = mnt_want_write_file(filp);
2002 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
2003 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2007 ret = f2fs_setflags_common(inode, iflags,
2008 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2010 inode_unlock(inode);
2011 mnt_drop_write_file(filp);
2015 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2017 struct inode *inode = file_inode(filp);
2019 return put_user(inode->i_generation, (int __user *)arg);
2022 static int f2fs_ioc_start_atomic_write(struct file *filp)
2024 struct inode *inode = file_inode(filp);
2025 struct f2fs_inode_info *fi = F2FS_I(inode);
2026 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2029 if (!inode_owner_or_capable(&init_user_ns, inode))
2032 if (!S_ISREG(inode->i_mode))
2035 if (filp->f_flags & O_DIRECT)
2038 ret = mnt_want_write_file(filp);
2044 f2fs_disable_compressed_file(inode);
2046 if (f2fs_is_atomic_file(inode)) {
2047 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2052 ret = f2fs_convert_inline_inode(inode);
2056 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2059 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2060 * f2fs_is_atomic_file.
2062 if (get_dirty_pages(inode))
2063 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2064 inode->i_ino, get_dirty_pages(inode));
2065 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2067 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2071 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2072 if (list_empty(&fi->inmem_ilist))
2073 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2074 sbi->atomic_files++;
2075 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2077 /* add inode in inmem_list first and set atomic_file */
2078 set_inode_flag(inode, FI_ATOMIC_FILE);
2079 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2080 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2082 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2083 F2FS_I(inode)->inmem_task = current;
2084 stat_update_max_atomic_write(inode);
2086 inode_unlock(inode);
2087 mnt_drop_write_file(filp);
2091 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2093 struct inode *inode = file_inode(filp);
2096 if (!inode_owner_or_capable(&init_user_ns, inode))
2099 ret = mnt_want_write_file(filp);
2103 f2fs_balance_fs(F2FS_I_SB(inode), true);
2107 if (f2fs_is_volatile_file(inode)) {
2112 if (f2fs_is_atomic_file(inode)) {
2113 ret = f2fs_commit_inmem_pages(inode);
2117 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2119 f2fs_drop_inmem_pages(inode);
2121 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2124 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2125 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2128 inode_unlock(inode);
2129 mnt_drop_write_file(filp);
2133 static int f2fs_ioc_start_volatile_write(struct file *filp)
2135 struct inode *inode = file_inode(filp);
2138 if (!inode_owner_or_capable(&init_user_ns, inode))
2141 if (!S_ISREG(inode->i_mode))
2144 ret = mnt_want_write_file(filp);
2150 if (f2fs_is_volatile_file(inode))
2153 ret = f2fs_convert_inline_inode(inode);
2157 stat_inc_volatile_write(inode);
2158 stat_update_max_volatile_write(inode);
2160 set_inode_flag(inode, FI_VOLATILE_FILE);
2161 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2163 inode_unlock(inode);
2164 mnt_drop_write_file(filp);
2168 static int f2fs_ioc_release_volatile_write(struct file *filp)
2170 struct inode *inode = file_inode(filp);
2173 if (!inode_owner_or_capable(&init_user_ns, inode))
2176 ret = mnt_want_write_file(filp);
2182 if (!f2fs_is_volatile_file(inode))
2185 if (!f2fs_is_first_block_written(inode)) {
2186 ret = truncate_partial_data_page(inode, 0, true);
2190 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2192 inode_unlock(inode);
2193 mnt_drop_write_file(filp);
2197 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2199 struct inode *inode = file_inode(filp);
2202 if (!inode_owner_or_capable(&init_user_ns, inode))
2205 ret = mnt_want_write_file(filp);
2211 if (f2fs_is_atomic_file(inode))
2212 f2fs_drop_inmem_pages(inode);
2213 if (f2fs_is_volatile_file(inode)) {
2214 clear_inode_flag(inode, FI_VOLATILE_FILE);
2215 stat_dec_volatile_write(inode);
2216 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2219 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2221 inode_unlock(inode);
2223 mnt_drop_write_file(filp);
2224 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2228 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2230 struct inode *inode = file_inode(filp);
2231 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2232 struct super_block *sb = sbi->sb;
2236 if (!capable(CAP_SYS_ADMIN))
2239 if (get_user(in, (__u32 __user *)arg))
2242 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2243 ret = mnt_want_write_file(filp);
2245 if (ret == -EROFS) {
2247 f2fs_stop_checkpoint(sbi, false);
2248 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2249 trace_f2fs_shutdown(sbi, in, ret);
2256 case F2FS_GOING_DOWN_FULLSYNC:
2257 ret = freeze_bdev(sb->s_bdev);
2260 f2fs_stop_checkpoint(sbi, false);
2261 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2262 thaw_bdev(sb->s_bdev);
2264 case F2FS_GOING_DOWN_METASYNC:
2265 /* do checkpoint only */
2266 ret = f2fs_sync_fs(sb, 1);
2269 f2fs_stop_checkpoint(sbi, false);
2270 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2272 case F2FS_GOING_DOWN_NOSYNC:
2273 f2fs_stop_checkpoint(sbi, false);
2274 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2276 case F2FS_GOING_DOWN_METAFLUSH:
2277 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2278 f2fs_stop_checkpoint(sbi, false);
2279 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2281 case F2FS_GOING_DOWN_NEED_FSCK:
2282 set_sbi_flag(sbi, SBI_NEED_FSCK);
2283 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2284 set_sbi_flag(sbi, SBI_IS_DIRTY);
2285 /* do checkpoint only */
2286 ret = f2fs_sync_fs(sb, 1);
2293 f2fs_stop_gc_thread(sbi);
2294 f2fs_stop_discard_thread(sbi);
2296 f2fs_drop_discard_cmd(sbi);
2297 clear_opt(sbi, DISCARD);
2299 f2fs_update_time(sbi, REQ_TIME);
2301 if (in != F2FS_GOING_DOWN_FULLSYNC)
2302 mnt_drop_write_file(filp);
2304 trace_f2fs_shutdown(sbi, in, ret);
2309 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2311 struct inode *inode = file_inode(filp);
2312 struct super_block *sb = inode->i_sb;
2313 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2314 struct fstrim_range range;
2317 if (!capable(CAP_SYS_ADMIN))
2320 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2323 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2327 ret = mnt_want_write_file(filp);
2331 range.minlen = max((unsigned int)range.minlen,
2332 q->limits.discard_granularity);
2333 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2334 mnt_drop_write_file(filp);
2338 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2341 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2345 static bool uuid_is_nonzero(__u8 u[16])
2349 for (i = 0; i < 16; i++)
2355 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2357 struct inode *inode = file_inode(filp);
2359 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2362 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2364 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2367 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2369 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2371 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2374 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2376 struct inode *inode = file_inode(filp);
2377 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2380 if (!f2fs_sb_has_encrypt(sbi))
2383 err = mnt_want_write_file(filp);
2387 down_write(&sbi->sb_lock);
2389 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2392 /* update superblock with uuid */
2393 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2395 err = f2fs_commit_super(sbi, false);
2398 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2402 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2406 up_write(&sbi->sb_lock);
2407 mnt_drop_write_file(filp);
2411 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2414 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2417 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2420 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2422 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2425 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2428 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2430 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2433 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2436 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2439 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2442 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2445 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2448 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2451 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2454 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2456 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2459 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2462 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2464 struct inode *inode = file_inode(filp);
2465 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2469 if (!capable(CAP_SYS_ADMIN))
2472 if (get_user(sync, (__u32 __user *)arg))
2475 if (f2fs_readonly(sbi->sb))
2478 ret = mnt_want_write_file(filp);
2483 if (!down_write_trylock(&sbi->gc_lock)) {
2488 down_write(&sbi->gc_lock);
2491 ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2493 mnt_drop_write_file(filp);
2497 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2499 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2503 if (!capable(CAP_SYS_ADMIN))
2505 if (f2fs_readonly(sbi->sb))
2508 end = range->start + range->len;
2509 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2510 end >= MAX_BLKADDR(sbi))
2513 ret = mnt_want_write_file(filp);
2519 if (!down_write_trylock(&sbi->gc_lock)) {
2524 down_write(&sbi->gc_lock);
2527 ret = f2fs_gc(sbi, range->sync, true, false,
2528 GET_SEGNO(sbi, range->start));
2534 range->start += BLKS_PER_SEC(sbi);
2535 if (range->start <= end)
2538 mnt_drop_write_file(filp);
2542 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2544 struct f2fs_gc_range range;
2546 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2549 return __f2fs_ioc_gc_range(filp, &range);
2552 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2554 struct inode *inode = file_inode(filp);
2555 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2558 if (!capable(CAP_SYS_ADMIN))
2561 if (f2fs_readonly(sbi->sb))
2564 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2565 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2569 ret = mnt_want_write_file(filp);
2573 ret = f2fs_sync_fs(sbi->sb, 1);
2575 mnt_drop_write_file(filp);
2579 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2581 struct f2fs_defragment *range)
2583 struct inode *inode = file_inode(filp);
2584 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2585 .m_seg_type = NO_CHECK_TYPE ,
2586 .m_may_create = false };
2587 struct extent_info ei = {0, 0, 0};
2588 pgoff_t pg_start, pg_end, next_pgofs;
2589 unsigned int blk_per_seg = sbi->blocks_per_seg;
2590 unsigned int total = 0, sec_num;
2591 block_t blk_end = 0;
2592 bool fragmented = false;
2595 /* if in-place-update policy is enabled, don't waste time here */
2596 if (f2fs_should_update_inplace(inode, NULL))
2599 pg_start = range->start >> PAGE_SHIFT;
2600 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2602 f2fs_balance_fs(sbi, true);
2606 /* writeback all dirty pages in the range */
2607 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2608 range->start + range->len - 1);
2613 * lookup mapping info in extent cache, skip defragmenting if physical
2614 * block addresses are continuous.
2616 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2617 if (ei.fofs + ei.len >= pg_end)
2621 map.m_lblk = pg_start;
2622 map.m_next_pgofs = &next_pgofs;
2625 * lookup mapping info in dnode page cache, skip defragmenting if all
2626 * physical block addresses are continuous even if there are hole(s)
2627 * in logical blocks.
2629 while (map.m_lblk < pg_end) {
2630 map.m_len = pg_end - map.m_lblk;
2631 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2635 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2636 map.m_lblk = next_pgofs;
2640 if (blk_end && blk_end != map.m_pblk)
2643 /* record total count of block that we're going to move */
2646 blk_end = map.m_pblk + map.m_len;
2648 map.m_lblk += map.m_len;
2656 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2659 * make sure there are enough free section for LFS allocation, this can
2660 * avoid defragment running in SSR mode when free section are allocated
2663 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2668 map.m_lblk = pg_start;
2669 map.m_len = pg_end - pg_start;
2672 while (map.m_lblk < pg_end) {
2677 map.m_len = pg_end - map.m_lblk;
2678 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2682 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2683 map.m_lblk = next_pgofs;
2687 set_inode_flag(inode, FI_DO_DEFRAG);
2690 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2693 page = f2fs_get_lock_data_page(inode, idx, true);
2695 err = PTR_ERR(page);
2699 set_page_dirty(page);
2700 f2fs_put_page(page, 1);
2709 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2712 clear_inode_flag(inode, FI_DO_DEFRAG);
2714 err = filemap_fdatawrite(inode->i_mapping);
2719 clear_inode_flag(inode, FI_DO_DEFRAG);
2721 inode_unlock(inode);
2723 range->len = (u64)total << PAGE_SHIFT;
2727 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2729 struct inode *inode = file_inode(filp);
2730 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2731 struct f2fs_defragment range;
2734 if (!capable(CAP_SYS_ADMIN))
2737 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2740 if (f2fs_readonly(sbi->sb))
2743 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2747 /* verify alignment of offset & size */
2748 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2751 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2752 max_file_blocks(inode)))
2755 err = mnt_want_write_file(filp);
2759 err = f2fs_defragment_range(sbi, filp, &range);
2760 mnt_drop_write_file(filp);
2762 f2fs_update_time(sbi, REQ_TIME);
2766 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2773 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2774 struct file *file_out, loff_t pos_out, size_t len)
2776 struct inode *src = file_inode(file_in);
2777 struct inode *dst = file_inode(file_out);
2778 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2779 size_t olen = len, dst_max_i_size = 0;
2783 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2784 src->i_sb != dst->i_sb)
2787 if (unlikely(f2fs_readonly(src->i_sb)))
2790 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2793 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2796 if (pos_out < 0 || pos_in < 0)
2800 if (pos_in == pos_out)
2802 if (pos_out > pos_in && pos_out < pos_in + len)
2809 if (!inode_trylock(dst))
2814 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2817 olen = len = src->i_size - pos_in;
2818 if (pos_in + len == src->i_size)
2819 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2825 dst_osize = dst->i_size;
2826 if (pos_out + olen > dst->i_size)
2827 dst_max_i_size = pos_out + olen;
2829 /* verify the end result is block aligned */
2830 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2831 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2832 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2835 ret = f2fs_convert_inline_inode(src);
2839 ret = f2fs_convert_inline_inode(dst);
2843 /* write out all dirty pages from offset */
2844 ret = filemap_write_and_wait_range(src->i_mapping,
2845 pos_in, pos_in + len);
2849 ret = filemap_write_and_wait_range(dst->i_mapping,
2850 pos_out, pos_out + len);
2854 f2fs_balance_fs(sbi, true);
2856 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2859 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2864 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2865 pos_out >> F2FS_BLKSIZE_BITS,
2866 len >> F2FS_BLKSIZE_BITS, false);
2870 f2fs_i_size_write(dst, dst_max_i_size);
2871 else if (dst_osize != dst->i_size)
2872 f2fs_i_size_write(dst, dst_osize);
2874 f2fs_unlock_op(sbi);
2877 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2879 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2888 static int __f2fs_ioc_move_range(struct file *filp,
2889 struct f2fs_move_range *range)
2894 if (!(filp->f_mode & FMODE_READ) ||
2895 !(filp->f_mode & FMODE_WRITE))
2898 dst = fdget(range->dst_fd);
2902 if (!(dst.file->f_mode & FMODE_WRITE)) {
2907 err = mnt_want_write_file(filp);
2911 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2912 range->pos_out, range->len);
2914 mnt_drop_write_file(filp);
2920 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2922 struct f2fs_move_range range;
2924 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2927 return __f2fs_ioc_move_range(filp, &range);
2930 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2932 struct inode *inode = file_inode(filp);
2933 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2934 struct sit_info *sm = SIT_I(sbi);
2935 unsigned int start_segno = 0, end_segno = 0;
2936 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2937 struct f2fs_flush_device range;
2940 if (!capable(CAP_SYS_ADMIN))
2943 if (f2fs_readonly(sbi->sb))
2946 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2949 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2953 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2954 __is_large_section(sbi)) {
2955 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2956 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2960 ret = mnt_want_write_file(filp);
2964 if (range.dev_num != 0)
2965 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2966 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2968 start_segno = sm->last_victim[FLUSH_DEVICE];
2969 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2970 start_segno = dev_start_segno;
2971 end_segno = min(start_segno + range.segments, dev_end_segno);
2973 while (start_segno < end_segno) {
2974 if (!down_write_trylock(&sbi->gc_lock)) {
2978 sm->last_victim[GC_CB] = end_segno + 1;
2979 sm->last_victim[GC_GREEDY] = end_segno + 1;
2980 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2981 ret = f2fs_gc(sbi, true, true, true, start_segno);
2989 mnt_drop_write_file(filp);
2993 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2995 struct inode *inode = file_inode(filp);
2996 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2998 /* Must validate to set it with SQLite behavior in Android. */
2999 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3001 return put_user(sb_feature, (u32 __user *)arg);
3005 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3007 struct dquot *transfer_to[MAXQUOTAS] = {};
3008 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3009 struct super_block *sb = sbi->sb;
3012 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3013 if (!IS_ERR(transfer_to[PRJQUOTA])) {
3014 err = __dquot_transfer(inode, transfer_to);
3016 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3017 dqput(transfer_to[PRJQUOTA]);
3022 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3024 struct inode *inode = file_inode(filp);
3025 struct f2fs_inode_info *fi = F2FS_I(inode);
3026 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3031 if (!f2fs_sb_has_project_quota(sbi)) {
3032 if (projid != F2FS_DEF_PROJID)
3038 if (!f2fs_has_extra_attr(inode))
3041 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3043 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3047 /* Is it quota file? Do not allow user to mess with it */
3048 if (IS_NOQUOTA(inode))
3051 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3053 return PTR_ERR(ipage);
3055 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3058 f2fs_put_page(ipage, 1);
3061 f2fs_put_page(ipage, 1);
3063 err = dquot_initialize(inode);
3068 err = f2fs_transfer_project_quota(inode, kprojid);
3072 F2FS_I(inode)->i_projid = kprojid;
3073 inode->i_ctime = current_time(inode);
3074 f2fs_mark_inode_dirty_sync(inode, true);
3076 f2fs_unlock_op(sbi);
3080 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3085 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3087 if (projid != F2FS_DEF_PROJID)
3093 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3096 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3097 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3098 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3101 static const struct {
3104 } f2fs_xflags_map[] = {
3105 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
3106 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
3107 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
3108 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
3109 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
3110 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
3113 #define F2FS_SUPPORTED_XFLAGS ( \
3115 FS_XFLAG_IMMUTABLE | \
3118 FS_XFLAG_NOATIME | \
3119 FS_XFLAG_PROJINHERIT)
3121 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3122 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3127 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3128 if (iflags & f2fs_xflags_map[i].iflag)
3129 xflags |= f2fs_xflags_map[i].xflag;
3134 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3135 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3140 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3141 if (xflags & f2fs_xflags_map[i].xflag)
3142 iflags |= f2fs_xflags_map[i].iflag;
3147 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3149 struct f2fs_inode_info *fi = F2FS_I(inode);
3151 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3153 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3154 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3157 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3159 struct inode *inode = file_inode(filp);
3162 f2fs_fill_fsxattr(inode, &fa);
3164 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3169 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3171 struct inode *inode = file_inode(filp);
3172 struct fsxattr fa, old_fa;
3176 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3179 /* Make sure caller has proper permission */
3180 if (!inode_owner_or_capable(&init_user_ns, inode))
3183 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3186 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3187 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3190 err = mnt_want_write_file(filp);
3196 f2fs_fill_fsxattr(inode, &old_fa);
3197 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3201 err = f2fs_setflags_common(inode, iflags,
3202 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3206 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3208 inode_unlock(inode);
3209 mnt_drop_write_file(filp);
3213 int f2fs_pin_file_control(struct inode *inode, bool inc)
3215 struct f2fs_inode_info *fi = F2FS_I(inode);
3216 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3218 /* Use i_gc_failures for normal file as a risk signal. */
3220 f2fs_i_gc_failures_write(inode,
3221 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3223 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3224 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3225 __func__, inode->i_ino,
3226 fi->i_gc_failures[GC_FAILURE_PIN]);
3227 clear_inode_flag(inode, FI_PIN_FILE);
3233 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3235 struct inode *inode = file_inode(filp);
3239 if (get_user(pin, (__u32 __user *)arg))
3242 if (!S_ISREG(inode->i_mode))
3245 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3248 ret = mnt_want_write_file(filp);
3254 if (f2fs_should_update_outplace(inode, NULL)) {
3260 clear_inode_flag(inode, FI_PIN_FILE);
3261 f2fs_i_gc_failures_write(inode, 0);
3265 if (f2fs_pin_file_control(inode, false)) {
3270 ret = f2fs_convert_inline_inode(inode);
3274 if (!f2fs_disable_compressed_file(inode)) {
3279 set_inode_flag(inode, FI_PIN_FILE);
3280 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3282 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3284 inode_unlock(inode);
3285 mnt_drop_write_file(filp);
3289 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3291 struct inode *inode = file_inode(filp);
3294 if (is_inode_flag_set(inode, FI_PIN_FILE))
3295 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3296 return put_user(pin, (u32 __user *)arg);
3299 int f2fs_precache_extents(struct inode *inode)
3301 struct f2fs_inode_info *fi = F2FS_I(inode);
3302 struct f2fs_map_blocks map;
3303 pgoff_t m_next_extent;
3307 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3311 map.m_next_pgofs = NULL;
3312 map.m_next_extent = &m_next_extent;
3313 map.m_seg_type = NO_CHECK_TYPE;
3314 map.m_may_create = false;
3315 end = max_file_blocks(inode);
3317 while (map.m_lblk < end) {
3318 map.m_len = end - map.m_lblk;
3320 down_write(&fi->i_gc_rwsem[WRITE]);
3321 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3322 up_write(&fi->i_gc_rwsem[WRITE]);
3326 map.m_lblk = m_next_extent;
3332 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3334 return f2fs_precache_extents(file_inode(filp));
3337 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3339 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3342 if (!capable(CAP_SYS_ADMIN))
3345 if (f2fs_readonly(sbi->sb))
3348 if (copy_from_user(&block_count, (void __user *)arg,
3349 sizeof(block_count)))
3352 return f2fs_resize_fs(sbi, block_count);
3355 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3357 struct inode *inode = file_inode(filp);
3359 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3361 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3362 f2fs_warn(F2FS_I_SB(inode),
3363 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3368 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3371 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3373 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3376 return fsverity_ioctl_measure(filp, (void __user *)arg);
3379 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3381 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3384 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3387 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3389 struct inode *inode = file_inode(filp);
3390 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3395 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3399 down_read(&sbi->sb_lock);
3400 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3401 ARRAY_SIZE(sbi->raw_super->volume_name),
3402 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3403 up_read(&sbi->sb_lock);
3405 if (copy_to_user((char __user *)arg, vbuf,
3406 min(FSLABEL_MAX, count)))
3413 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3415 struct inode *inode = file_inode(filp);
3416 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3420 if (!capable(CAP_SYS_ADMIN))
3423 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3425 return PTR_ERR(vbuf);
3427 err = mnt_want_write_file(filp);
3431 down_write(&sbi->sb_lock);
3433 memset(sbi->raw_super->volume_name, 0,
3434 sizeof(sbi->raw_super->volume_name));
3435 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3436 sbi->raw_super->volume_name,
3437 ARRAY_SIZE(sbi->raw_super->volume_name));
3439 err = f2fs_commit_super(sbi, false);
3441 up_write(&sbi->sb_lock);
3443 mnt_drop_write_file(filp);
3449 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3451 struct inode *inode = file_inode(filp);
3454 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3457 if (!f2fs_compressed_file(inode))
3460 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3461 return put_user(blocks, (u64 __user *)arg);
3464 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3466 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3467 unsigned int released_blocks = 0;
3468 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3472 for (i = 0; i < count; i++) {
3473 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3474 dn->ofs_in_node + i);
3476 if (!__is_valid_data_blkaddr(blkaddr))
3478 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3479 DATA_GENERIC_ENHANCE)))
3480 return -EFSCORRUPTED;
3484 int compr_blocks = 0;
3486 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3487 blkaddr = f2fs_data_blkaddr(dn);
3490 if (blkaddr == COMPRESS_ADDR)
3492 dn->ofs_in_node += cluster_size;
3496 if (__is_valid_data_blkaddr(blkaddr))
3499 if (blkaddr != NEW_ADDR)
3502 dn->data_blkaddr = NULL_ADDR;
3503 f2fs_set_data_blkaddr(dn);
3506 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3507 dec_valid_block_count(sbi, dn->inode,
3508 cluster_size - compr_blocks);
3510 released_blocks += cluster_size - compr_blocks;
3512 count -= cluster_size;
3515 return released_blocks;
3518 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3520 struct inode *inode = file_inode(filp);
3521 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3522 pgoff_t page_idx = 0, last_idx;
3523 unsigned int released_blocks = 0;
3527 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3530 if (!f2fs_compressed_file(inode))
3533 if (f2fs_readonly(sbi->sb))
3536 ret = mnt_want_write_file(filp);
3540 f2fs_balance_fs(F2FS_I_SB(inode), true);
3544 writecount = atomic_read(&inode->i_writecount);
3545 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3546 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3551 if (IS_IMMUTABLE(inode)) {
3556 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3560 F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3561 f2fs_set_inode_flags(inode);
3562 inode->i_ctime = current_time(inode);
3563 f2fs_mark_inode_dirty_sync(inode, true);
3565 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3568 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3569 down_write(&F2FS_I(inode)->i_mmap_sem);
3571 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3573 while (page_idx < last_idx) {
3574 struct dnode_of_data dn;
3575 pgoff_t end_offset, count;
3577 set_new_dnode(&dn, inode, NULL, NULL, 0);
3578 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3580 if (ret == -ENOENT) {
3581 page_idx = f2fs_get_next_page_offset(&dn,
3589 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3590 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3591 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3593 ret = release_compress_blocks(&dn, count);
3595 f2fs_put_dnode(&dn);
3601 released_blocks += ret;
3604 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3605 up_write(&F2FS_I(inode)->i_mmap_sem);
3607 inode_unlock(inode);
3609 mnt_drop_write_file(filp);
3612 ret = put_user(released_blocks, (u64 __user *)arg);
3613 } else if (released_blocks &&
3614 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3615 set_sbi_flag(sbi, SBI_NEED_FSCK);
3616 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3617 "iblocks=%llu, released=%u, compr_blocks=%u, "
3619 __func__, inode->i_ino, inode->i_blocks,
3621 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3627 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3629 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3630 unsigned int reserved_blocks = 0;
3631 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3635 for (i = 0; i < count; i++) {
3636 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3637 dn->ofs_in_node + i);
3639 if (!__is_valid_data_blkaddr(blkaddr))
3641 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3642 DATA_GENERIC_ENHANCE)))
3643 return -EFSCORRUPTED;
3647 int compr_blocks = 0;
3651 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3652 blkaddr = f2fs_data_blkaddr(dn);
3655 if (blkaddr == COMPRESS_ADDR)
3657 dn->ofs_in_node += cluster_size;
3661 if (__is_valid_data_blkaddr(blkaddr)) {
3666 dn->data_blkaddr = NEW_ADDR;
3667 f2fs_set_data_blkaddr(dn);
3670 reserved = cluster_size - compr_blocks;
3671 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3675 if (reserved != cluster_size - compr_blocks)
3678 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3680 reserved_blocks += reserved;
3682 count -= cluster_size;
3685 return reserved_blocks;
3688 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3690 struct inode *inode = file_inode(filp);
3691 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3692 pgoff_t page_idx = 0, last_idx;
3693 unsigned int reserved_blocks = 0;
3696 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3699 if (!f2fs_compressed_file(inode))
3702 if (f2fs_readonly(sbi->sb))
3705 ret = mnt_want_write_file(filp);
3709 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3712 f2fs_balance_fs(F2FS_I_SB(inode), true);
3716 if (!IS_IMMUTABLE(inode)) {
3721 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3722 down_write(&F2FS_I(inode)->i_mmap_sem);
3724 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3726 while (page_idx < last_idx) {
3727 struct dnode_of_data dn;
3728 pgoff_t end_offset, count;
3730 set_new_dnode(&dn, inode, NULL, NULL, 0);
3731 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3733 if (ret == -ENOENT) {
3734 page_idx = f2fs_get_next_page_offset(&dn,
3742 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3743 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3744 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3746 ret = reserve_compress_blocks(&dn, count);
3748 f2fs_put_dnode(&dn);
3754 reserved_blocks += ret;
3757 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3758 up_write(&F2FS_I(inode)->i_mmap_sem);
3761 F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3762 f2fs_set_inode_flags(inode);
3763 inode->i_ctime = current_time(inode);
3764 f2fs_mark_inode_dirty_sync(inode, true);
3767 inode_unlock(inode);
3769 mnt_drop_write_file(filp);
3772 ret = put_user(reserved_blocks, (u64 __user *)arg);
3773 } else if (reserved_blocks &&
3774 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3775 set_sbi_flag(sbi, SBI_NEED_FSCK);
3776 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3777 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3779 __func__, inode->i_ino, inode->i_blocks,
3781 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3787 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3788 pgoff_t off, block_t block, block_t len, u32 flags)
3790 struct request_queue *q = bdev_get_queue(bdev);
3791 sector_t sector = SECTOR_FROM_BLOCK(block);
3792 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3798 if (flags & F2FS_TRIM_FILE_DISCARD)
3799 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3800 blk_queue_secure_erase(q) ?
3801 BLKDEV_DISCARD_SECURE : 0);
3803 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3804 if (IS_ENCRYPTED(inode))
3805 ret = fscrypt_zeroout_range(inode, off, block, len);
3807 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3814 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3816 struct inode *inode = file_inode(filp);
3817 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3818 struct address_space *mapping = inode->i_mapping;
3819 struct block_device *prev_bdev = NULL;
3820 struct f2fs_sectrim_range range;
3821 pgoff_t index, pg_end, prev_index = 0;
3822 block_t prev_block = 0, len = 0;
3824 bool to_end = false;
3827 if (!(filp->f_mode & FMODE_WRITE))
3830 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3834 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3835 !S_ISREG(inode->i_mode))
3838 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3839 !f2fs_hw_support_discard(sbi)) ||
3840 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3841 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3844 file_start_write(filp);
3847 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3848 range.start >= inode->i_size) {
3856 if (inode->i_size - range.start > range.len) {
3857 end_addr = range.start + range.len;
3859 end_addr = range.len == (u64)-1 ?
3860 sbi->sb->s_maxbytes : inode->i_size;
3864 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3865 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3870 index = F2FS_BYTES_TO_BLK(range.start);
3871 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3873 ret = f2fs_convert_inline_inode(inode);
3877 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3878 down_write(&F2FS_I(inode)->i_mmap_sem);
3880 ret = filemap_write_and_wait_range(mapping, range.start,
3881 to_end ? LLONG_MAX : end_addr - 1);
3885 truncate_inode_pages_range(mapping, range.start,
3886 to_end ? -1 : end_addr - 1);
3888 while (index < pg_end) {
3889 struct dnode_of_data dn;
3890 pgoff_t end_offset, count;
3893 set_new_dnode(&dn, inode, NULL, NULL, 0);
3894 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3896 if (ret == -ENOENT) {
3897 index = f2fs_get_next_page_offset(&dn, index);
3903 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3904 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3905 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3906 struct block_device *cur_bdev;
3907 block_t blkaddr = f2fs_data_blkaddr(&dn);
3909 if (!__is_valid_data_blkaddr(blkaddr))
3912 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3913 DATA_GENERIC_ENHANCE)) {
3914 ret = -EFSCORRUPTED;
3915 f2fs_put_dnode(&dn);
3919 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3920 if (f2fs_is_multi_device(sbi)) {
3921 int di = f2fs_target_device_index(sbi, blkaddr);
3923 blkaddr -= FDEV(di).start_blk;
3927 if (prev_bdev == cur_bdev &&
3928 index == prev_index + len &&
3929 blkaddr == prev_block + len) {
3932 ret = f2fs_secure_erase(prev_bdev,
3933 inode, prev_index, prev_block,
3936 f2fs_put_dnode(&dn);
3945 prev_bdev = cur_bdev;
3947 prev_block = blkaddr;
3952 f2fs_put_dnode(&dn);
3954 if (fatal_signal_pending(current)) {
3962 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3963 prev_block, len, range.flags);
3965 up_write(&F2FS_I(inode)->i_mmap_sem);
3966 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3968 inode_unlock(inode);
3969 file_end_write(filp);
3974 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3976 struct inode *inode = file_inode(filp);
3977 struct f2fs_comp_option option;
3979 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3982 inode_lock_shared(inode);
3984 if (!f2fs_compressed_file(inode)) {
3985 inode_unlock_shared(inode);
3989 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3990 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3992 inode_unlock_shared(inode);
3994 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
4001 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
4003 struct inode *inode = file_inode(filp);
4004 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4005 struct f2fs_comp_option option;
4008 if (!f2fs_sb_has_compression(sbi))
4011 if (!(filp->f_mode & FMODE_WRITE))
4014 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
4018 if (!f2fs_compressed_file(inode) ||
4019 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
4020 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
4021 option.algorithm >= COMPRESS_MAX)
4024 file_start_write(filp);
4027 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4032 if (inode->i_size != 0) {
4037 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4038 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4039 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
4040 f2fs_mark_inode_dirty_sync(inode, true);
4042 if (!f2fs_is_compress_backend_ready(inode))
4043 f2fs_warn(sbi, "compression algorithm is successfully set, "
4044 "but current kernel doesn't support this algorithm.");
4046 inode_unlock(inode);
4047 file_end_write(filp);
4052 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4054 DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
4055 struct address_space *mapping = inode->i_mapping;
4057 pgoff_t redirty_idx = page_idx;
4058 int i, page_len = 0, ret = 0;
4060 page_cache_ra_unbounded(&ractl, len, 0);
4062 for (i = 0; i < len; i++, page_idx++) {
4063 page = read_cache_page(mapping, page_idx, NULL, NULL);
4065 ret = PTR_ERR(page);
4071 for (i = 0; i < page_len; i++, redirty_idx++) {
4072 page = find_lock_page(mapping, redirty_idx);
4077 set_page_dirty(page);
4078 f2fs_put_page(page, 1);
4079 f2fs_put_page(page, 0);
4085 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4087 struct inode *inode = file_inode(filp);
4088 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4089 struct f2fs_inode_info *fi = F2FS_I(inode);
4090 pgoff_t page_idx = 0, last_idx;
4091 unsigned int blk_per_seg = sbi->blocks_per_seg;
4092 int cluster_size = F2FS_I(inode)->i_cluster_size;
4095 if (!f2fs_sb_has_compression(sbi) ||
4096 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4099 if (!(filp->f_mode & FMODE_WRITE))
4102 if (!f2fs_compressed_file(inode))
4105 f2fs_balance_fs(F2FS_I_SB(inode), true);
4107 file_start_write(filp);
4110 if (!f2fs_is_compress_backend_ready(inode)) {
4115 if (f2fs_is_mmap_file(inode)) {
4120 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4124 if (!atomic_read(&fi->i_compr_blocks))
4127 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4129 count = last_idx - page_idx;
4131 int len = min(cluster_size, count);
4133 ret = redirty_blocks(inode, page_idx, len);
4137 if (get_dirty_pages(inode) >= blk_per_seg)
4138 filemap_fdatawrite(inode->i_mapping);
4145 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4149 f2fs_warn(sbi, "%s: The file might be partially decompressed "
4150 "(errno=%d). Please delete the file.\n",
4153 inode_unlock(inode);
4154 file_end_write(filp);
4159 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4161 struct inode *inode = file_inode(filp);
4162 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4163 pgoff_t page_idx = 0, last_idx;
4164 unsigned int blk_per_seg = sbi->blocks_per_seg;
4165 int cluster_size = F2FS_I(inode)->i_cluster_size;
4168 if (!f2fs_sb_has_compression(sbi) ||
4169 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4172 if (!(filp->f_mode & FMODE_WRITE))
4175 if (!f2fs_compressed_file(inode))
4178 f2fs_balance_fs(F2FS_I_SB(inode), true);
4180 file_start_write(filp);
4183 if (!f2fs_is_compress_backend_ready(inode)) {
4188 if (f2fs_is_mmap_file(inode)) {
4193 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4197 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4199 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4201 count = last_idx - page_idx;
4203 int len = min(cluster_size, count);
4205 ret = redirty_blocks(inode, page_idx, len);
4209 if (get_dirty_pages(inode) >= blk_per_seg)
4210 filemap_fdatawrite(inode->i_mapping);
4217 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4220 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4223 f2fs_warn(sbi, "%s: The file might be partially compressed "
4224 "(errno=%d). Please delete the file.\n",
4227 inode_unlock(inode);
4228 file_end_write(filp);
4233 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4236 case FS_IOC_GETFLAGS:
4237 return f2fs_ioc_getflags(filp, arg);
4238 case FS_IOC_SETFLAGS:
4239 return f2fs_ioc_setflags(filp, arg);
4240 case FS_IOC_GETVERSION:
4241 return f2fs_ioc_getversion(filp, arg);
4242 case F2FS_IOC_START_ATOMIC_WRITE:
4243 return f2fs_ioc_start_atomic_write(filp);
4244 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4245 return f2fs_ioc_commit_atomic_write(filp);
4246 case F2FS_IOC_START_VOLATILE_WRITE:
4247 return f2fs_ioc_start_volatile_write(filp);
4248 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4249 return f2fs_ioc_release_volatile_write(filp);
4250 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4251 return f2fs_ioc_abort_volatile_write(filp);
4252 case F2FS_IOC_SHUTDOWN:
4253 return f2fs_ioc_shutdown(filp, arg);
4255 return f2fs_ioc_fitrim(filp, arg);
4256 case FS_IOC_SET_ENCRYPTION_POLICY:
4257 return f2fs_ioc_set_encryption_policy(filp, arg);
4258 case FS_IOC_GET_ENCRYPTION_POLICY:
4259 return f2fs_ioc_get_encryption_policy(filp, arg);
4260 case FS_IOC_GET_ENCRYPTION_PWSALT:
4261 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4262 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4263 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4264 case FS_IOC_ADD_ENCRYPTION_KEY:
4265 return f2fs_ioc_add_encryption_key(filp, arg);
4266 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4267 return f2fs_ioc_remove_encryption_key(filp, arg);
4268 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4269 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4270 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4271 return f2fs_ioc_get_encryption_key_status(filp, arg);
4272 case FS_IOC_GET_ENCRYPTION_NONCE:
4273 return f2fs_ioc_get_encryption_nonce(filp, arg);
4274 case F2FS_IOC_GARBAGE_COLLECT:
4275 return f2fs_ioc_gc(filp, arg);
4276 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4277 return f2fs_ioc_gc_range(filp, arg);
4278 case F2FS_IOC_WRITE_CHECKPOINT:
4279 return f2fs_ioc_write_checkpoint(filp, arg);
4280 case F2FS_IOC_DEFRAGMENT:
4281 return f2fs_ioc_defragment(filp, arg);
4282 case F2FS_IOC_MOVE_RANGE:
4283 return f2fs_ioc_move_range(filp, arg);
4284 case F2FS_IOC_FLUSH_DEVICE:
4285 return f2fs_ioc_flush_device(filp, arg);
4286 case F2FS_IOC_GET_FEATURES:
4287 return f2fs_ioc_get_features(filp, arg);
4288 case FS_IOC_FSGETXATTR:
4289 return f2fs_ioc_fsgetxattr(filp, arg);
4290 case FS_IOC_FSSETXATTR:
4291 return f2fs_ioc_fssetxattr(filp, arg);
4292 case F2FS_IOC_GET_PIN_FILE:
4293 return f2fs_ioc_get_pin_file(filp, arg);
4294 case F2FS_IOC_SET_PIN_FILE:
4295 return f2fs_ioc_set_pin_file(filp, arg);
4296 case F2FS_IOC_PRECACHE_EXTENTS:
4297 return f2fs_ioc_precache_extents(filp, arg);
4298 case F2FS_IOC_RESIZE_FS:
4299 return f2fs_ioc_resize_fs(filp, arg);
4300 case FS_IOC_ENABLE_VERITY:
4301 return f2fs_ioc_enable_verity(filp, arg);
4302 case FS_IOC_MEASURE_VERITY:
4303 return f2fs_ioc_measure_verity(filp, arg);
4304 case FS_IOC_READ_VERITY_METADATA:
4305 return f2fs_ioc_read_verity_metadata(filp, arg);
4306 case FS_IOC_GETFSLABEL:
4307 return f2fs_ioc_getfslabel(filp, arg);
4308 case FS_IOC_SETFSLABEL:
4309 return f2fs_ioc_setfslabel(filp, arg);
4310 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4311 return f2fs_get_compress_blocks(filp, arg);
4312 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4313 return f2fs_release_compress_blocks(filp, arg);
4314 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4315 return f2fs_reserve_compress_blocks(filp, arg);
4316 case F2FS_IOC_SEC_TRIM_FILE:
4317 return f2fs_sec_trim_file(filp, arg);
4318 case F2FS_IOC_GET_COMPRESS_OPTION:
4319 return f2fs_ioc_get_compress_option(filp, arg);
4320 case F2FS_IOC_SET_COMPRESS_OPTION:
4321 return f2fs_ioc_set_compress_option(filp, arg);
4322 case F2FS_IOC_DECOMPRESS_FILE:
4323 return f2fs_ioc_decompress_file(filp, arg);
4324 case F2FS_IOC_COMPRESS_FILE:
4325 return f2fs_ioc_compress_file(filp, arg);
4331 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4333 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4335 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4338 return __f2fs_ioctl(filp, cmd, arg);
4341 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4343 struct file *file = iocb->ki_filp;
4344 struct inode *inode = file_inode(file);
4347 if (!f2fs_is_compress_backend_ready(inode))
4350 ret = generic_file_read_iter(iocb, iter);
4353 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4358 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4360 struct file *file = iocb->ki_filp;
4361 struct inode *inode = file_inode(file);
4364 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4369 if (!f2fs_is_compress_backend_ready(inode)) {
4374 if (iocb->ki_flags & IOCB_NOWAIT) {
4375 if (!inode_trylock(inode)) {
4383 if (unlikely(IS_IMMUTABLE(inode))) {
4388 ret = generic_write_checks(iocb, from);
4390 bool preallocated = false;
4391 size_t target_size = 0;
4394 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4395 set_inode_flag(inode, FI_NO_PREALLOC);
4397 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4398 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4399 iov_iter_count(from)) ||
4400 f2fs_has_inline_data(inode) ||
4401 f2fs_force_buffered_io(inode, iocb, from)) {
4402 clear_inode_flag(inode, FI_NO_PREALLOC);
4403 inode_unlock(inode);
4410 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4413 if (iocb->ki_flags & IOCB_DIRECT) {
4415 * Convert inline data for Direct I/O before entering
4418 err = f2fs_convert_inline_inode(inode);
4422 * If force_buffere_io() is true, we have to allocate
4423 * blocks all the time, since f2fs_direct_IO will fall
4424 * back to buffered IO.
4426 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4427 allow_outplace_dio(inode, iocb, from))
4430 preallocated = true;
4431 target_size = iocb->ki_pos + iov_iter_count(from);
4433 err = f2fs_preallocate_blocks(iocb, from);
4436 clear_inode_flag(inode, FI_NO_PREALLOC);
4437 inode_unlock(inode);
4442 ret = __generic_file_write_iter(iocb, from);
4443 clear_inode_flag(inode, FI_NO_PREALLOC);
4445 /* if we couldn't write data, we should deallocate blocks. */
4446 if (preallocated && i_size_read(inode) < target_size) {
4447 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4448 down_write(&F2FS_I(inode)->i_mmap_sem);
4449 f2fs_truncate(inode);
4450 up_write(&F2FS_I(inode)->i_mmap_sem);
4451 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4455 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4458 inode_unlock(inode);
4460 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4461 iov_iter_count(from), ret);
4463 ret = generic_write_sync(iocb, ret);
4467 #ifdef CONFIG_COMPAT
4468 struct compat_f2fs_gc_range {
4473 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4474 struct compat_f2fs_gc_range)
4476 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4478 struct compat_f2fs_gc_range __user *urange;
4479 struct f2fs_gc_range range;
4482 urange = compat_ptr(arg);
4483 err = get_user(range.sync, &urange->sync);
4484 err |= get_user(range.start, &urange->start);
4485 err |= get_user(range.len, &urange->len);
4489 return __f2fs_ioc_gc_range(file, &range);
4492 struct compat_f2fs_move_range {
4498 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4499 struct compat_f2fs_move_range)
4501 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4503 struct compat_f2fs_move_range __user *urange;
4504 struct f2fs_move_range range;
4507 urange = compat_ptr(arg);
4508 err = get_user(range.dst_fd, &urange->dst_fd);
4509 err |= get_user(range.pos_in, &urange->pos_in);
4510 err |= get_user(range.pos_out, &urange->pos_out);
4511 err |= get_user(range.len, &urange->len);
4515 return __f2fs_ioc_move_range(file, &range);
4518 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4520 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4522 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4526 case FS_IOC32_GETFLAGS:
4527 cmd = FS_IOC_GETFLAGS;
4529 case FS_IOC32_SETFLAGS:
4530 cmd = FS_IOC_SETFLAGS;
4532 case FS_IOC32_GETVERSION:
4533 cmd = FS_IOC_GETVERSION;
4535 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4536 return f2fs_compat_ioc_gc_range(file, arg);
4537 case F2FS_IOC32_MOVE_RANGE:
4538 return f2fs_compat_ioc_move_range(file, arg);
4539 case F2FS_IOC_START_ATOMIC_WRITE:
4540 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4541 case F2FS_IOC_START_VOLATILE_WRITE:
4542 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4543 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4544 case F2FS_IOC_SHUTDOWN:
4546 case FS_IOC_SET_ENCRYPTION_POLICY:
4547 case FS_IOC_GET_ENCRYPTION_PWSALT:
4548 case FS_IOC_GET_ENCRYPTION_POLICY:
4549 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4550 case FS_IOC_ADD_ENCRYPTION_KEY:
4551 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4552 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4553 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4554 case FS_IOC_GET_ENCRYPTION_NONCE:
4555 case F2FS_IOC_GARBAGE_COLLECT:
4556 case F2FS_IOC_WRITE_CHECKPOINT:
4557 case F2FS_IOC_DEFRAGMENT:
4558 case F2FS_IOC_FLUSH_DEVICE:
4559 case F2FS_IOC_GET_FEATURES:
4560 case FS_IOC_FSGETXATTR:
4561 case FS_IOC_FSSETXATTR:
4562 case F2FS_IOC_GET_PIN_FILE:
4563 case F2FS_IOC_SET_PIN_FILE:
4564 case F2FS_IOC_PRECACHE_EXTENTS:
4565 case F2FS_IOC_RESIZE_FS:
4566 case FS_IOC_ENABLE_VERITY:
4567 case FS_IOC_MEASURE_VERITY:
4568 case FS_IOC_READ_VERITY_METADATA:
4569 case FS_IOC_GETFSLABEL:
4570 case FS_IOC_SETFSLABEL:
4571 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4572 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4573 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4574 case F2FS_IOC_SEC_TRIM_FILE:
4575 case F2FS_IOC_GET_COMPRESS_OPTION:
4576 case F2FS_IOC_SET_COMPRESS_OPTION:
4577 case F2FS_IOC_DECOMPRESS_FILE:
4578 case F2FS_IOC_COMPRESS_FILE:
4581 return -ENOIOCTLCMD;
4583 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4587 const struct file_operations f2fs_file_operations = {
4588 .llseek = f2fs_llseek,
4589 .read_iter = f2fs_file_read_iter,
4590 .write_iter = f2fs_file_write_iter,
4591 .open = f2fs_file_open,
4592 .release = f2fs_release_file,
4593 .mmap = f2fs_file_mmap,
4594 .flush = f2fs_file_flush,
4595 .fsync = f2fs_sync_file,
4596 .fallocate = f2fs_fallocate,
4597 .unlocked_ioctl = f2fs_ioctl,
4598 #ifdef CONFIG_COMPAT
4599 .compat_ioctl = f2fs_compat_ioctl,
4601 .splice_read = generic_file_splice_read,
4602 .splice_write = iter_file_splice_write,