1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
33 #include <trace/events/f2fs.h>
34 #include <uapi/linux/f2fs.h>
36 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
38 struct inode *inode = file_inode(vmf->vma->vm_file);
41 down_read(&F2FS_I(inode)->i_mmap_sem);
42 ret = filemap_fault(vmf);
43 up_read(&F2FS_I(inode)->i_mmap_sem);
46 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
49 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
54 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
56 struct page *page = vmf->page;
57 struct inode *inode = file_inode(vmf->vma->vm_file);
58 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59 struct dnode_of_data dn;
60 bool need_alloc = true;
63 if (unlikely(f2fs_cp_error(sbi))) {
68 if (!f2fs_is_checkpoint_ready(sbi)) {
73 #ifdef CONFIG_F2FS_FS_COMPRESSION
74 if (f2fs_compressed_file(inode)) {
75 int ret = f2fs_is_compressed_cluster(inode, page->index);
81 if (ret < F2FS_I(inode)->i_cluster_size) {
89 /* should do out of any locked page */
91 f2fs_balance_fs(sbi, true);
93 sb_start_pagefault(inode->i_sb);
95 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
97 file_update_time(vmf->vma->vm_file);
98 down_read(&F2FS_I(inode)->i_mmap_sem);
100 if (unlikely(page->mapping != inode->i_mapping ||
101 page_offset(page) > i_size_read(inode) ||
102 !PageUptodate(page))) {
109 /* block allocation */
110 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
111 set_new_dnode(&dn, inode, NULL, NULL, 0);
112 err = f2fs_get_block(&dn, page->index);
114 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
117 #ifdef CONFIG_F2FS_FS_COMPRESSION
119 set_new_dnode(&dn, inode, NULL, NULL, 0);
120 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
129 f2fs_wait_on_page_writeback(page, DATA, false, true);
131 /* wait for GCed page writeback via META_MAPPING */
132 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
135 * check to see if the page is mapped already (no holes)
137 if (PageMappedToDisk(page))
140 /* page is wholly or partially inside EOF */
141 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
142 i_size_read(inode)) {
145 offset = i_size_read(inode) & ~PAGE_MASK;
146 zero_user_segment(page, offset, PAGE_SIZE);
148 set_page_dirty(page);
149 if (!PageUptodate(page))
150 SetPageUptodate(page);
152 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
153 f2fs_update_time(sbi, REQ_TIME);
155 trace_f2fs_vm_page_mkwrite(page, DATA);
157 up_read(&F2FS_I(inode)->i_mmap_sem);
159 sb_end_pagefault(inode->i_sb);
161 return block_page_mkwrite_return(err);
164 static const struct vm_operations_struct f2fs_file_vm_ops = {
165 .fault = f2fs_filemap_fault,
166 .map_pages = filemap_map_pages,
167 .page_mkwrite = f2fs_vm_page_mkwrite,
170 static int get_parent_ino(struct inode *inode, nid_t *pino)
172 struct dentry *dentry;
175 * Make sure to get the non-deleted alias. The alias associated with
176 * the open file descriptor being fsync()'ed may be deleted already.
178 dentry = d_find_alias(inode);
182 *pino = parent_ino(dentry);
187 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
189 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
190 enum cp_reason_type cp_reason = CP_NO_NEEDED;
192 if (!S_ISREG(inode->i_mode))
193 cp_reason = CP_NON_REGULAR;
194 else if (f2fs_compressed_file(inode))
195 cp_reason = CP_COMPRESSED;
196 else if (inode->i_nlink != 1)
197 cp_reason = CP_HARDLINK;
198 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
199 cp_reason = CP_SB_NEED_CP;
200 else if (file_wrong_pino(inode))
201 cp_reason = CP_WRONG_PINO;
202 else if (!f2fs_space_for_roll_forward(sbi))
203 cp_reason = CP_NO_SPC_ROLL;
204 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
205 cp_reason = CP_NODE_NEED_CP;
206 else if (test_opt(sbi, FASTBOOT))
207 cp_reason = CP_FASTBOOT_MODE;
208 else if (F2FS_OPTION(sbi).active_logs == 2)
209 cp_reason = CP_SPEC_LOG_NUM;
210 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
211 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
212 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
214 cp_reason = CP_RECOVER_DIR;
219 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
221 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
223 /* But we need to avoid that there are some inode updates */
224 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
230 static void try_to_fix_pino(struct inode *inode)
232 struct f2fs_inode_info *fi = F2FS_I(inode);
235 down_write(&fi->i_sem);
236 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
237 get_parent_ino(inode, &pino)) {
238 f2fs_i_pino_write(inode, pino);
239 file_got_pino(inode);
241 up_write(&fi->i_sem);
244 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
245 int datasync, bool atomic)
247 struct inode *inode = file->f_mapping->host;
248 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
249 nid_t ino = inode->i_ino;
251 enum cp_reason_type cp_reason = 0;
252 struct writeback_control wbc = {
253 .sync_mode = WB_SYNC_ALL,
254 .nr_to_write = LONG_MAX,
257 unsigned int seq_id = 0;
259 if (unlikely(f2fs_readonly(inode->i_sb) ||
260 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
263 trace_f2fs_sync_file_enter(inode);
265 if (S_ISDIR(inode->i_mode))
268 /* if fdatasync is triggered, let's do in-place-update */
269 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
270 set_inode_flag(inode, FI_NEED_IPU);
271 ret = file_write_and_wait_range(file, start, end);
272 clear_inode_flag(inode, FI_NEED_IPU);
275 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
279 /* if the inode is dirty, let's recover all the time */
280 if (!f2fs_skip_inode_update(inode, datasync)) {
281 f2fs_write_inode(inode, NULL);
286 * if there is no written data, don't waste time to write recovery info.
288 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
289 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
291 /* it may call write_inode just prior to fsync */
292 if (need_inode_page_update(sbi, ino))
295 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
296 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
302 * Both of fdatasync() and fsync() are able to be recovered from
305 down_read(&F2FS_I(inode)->i_sem);
306 cp_reason = need_do_checkpoint(inode);
307 up_read(&F2FS_I(inode)->i_sem);
310 /* all the dirty node pages should be flushed for POR */
311 ret = f2fs_sync_fs(inode->i_sb, 1);
314 * We've secured consistency through sync_fs. Following pino
315 * will be used only for fsynced inodes after checkpoint.
317 try_to_fix_pino(inode);
318 clear_inode_flag(inode, FI_APPEND_WRITE);
319 clear_inode_flag(inode, FI_UPDATE_WRITE);
323 atomic_inc(&sbi->wb_sync_req[NODE]);
324 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
325 atomic_dec(&sbi->wb_sync_req[NODE]);
329 /* if cp_error was enabled, we should avoid infinite loop */
330 if (unlikely(f2fs_cp_error(sbi))) {
335 if (f2fs_need_inode_block_update(sbi, ino)) {
336 f2fs_mark_inode_dirty_sync(inode, true);
337 f2fs_write_inode(inode, NULL);
342 * If it's atomic_write, it's just fine to keep write ordering. So
343 * here we don't need to wait for node write completion, since we use
344 * node chain which serializes node blocks. If one of node writes are
345 * reordered, we can see simply broken chain, resulting in stopping
346 * roll-forward recovery. It means we'll recover all or none node blocks
350 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
355 /* once recovery info is written, don't need to tack this */
356 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
357 clear_inode_flag(inode, FI_APPEND_WRITE);
359 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
360 ret = f2fs_issue_flush(sbi, inode->i_ino);
362 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
363 clear_inode_flag(inode, FI_UPDATE_WRITE);
364 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
366 f2fs_update_time(sbi, REQ_TIME);
368 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
369 f2fs_trace_ios(NULL, 1);
373 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
375 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
377 return f2fs_do_sync_file(file, start, end, datasync, false);
380 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
381 pgoff_t index, int whence)
385 if (__is_valid_data_blkaddr(blkaddr))
387 if (blkaddr == NEW_ADDR &&
388 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
392 if (blkaddr == NULL_ADDR)
399 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
401 struct inode *inode = file->f_mapping->host;
402 loff_t maxbytes = inode->i_sb->s_maxbytes;
403 struct dnode_of_data dn;
404 pgoff_t pgofs, end_offset;
405 loff_t data_ofs = offset;
411 isize = i_size_read(inode);
415 /* handle inline data case */
416 if (f2fs_has_inline_data(inode)) {
417 if (whence == SEEK_HOLE) {
420 } else if (whence == SEEK_DATA) {
426 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
428 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
429 set_new_dnode(&dn, inode, NULL, NULL, 0);
430 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
431 if (err && err != -ENOENT) {
433 } else if (err == -ENOENT) {
434 /* direct node does not exists */
435 if (whence == SEEK_DATA) {
436 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
443 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
445 /* find data/hole in dnode block */
446 for (; dn.ofs_in_node < end_offset;
447 dn.ofs_in_node++, pgofs++,
448 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
451 blkaddr = f2fs_data_blkaddr(&dn);
453 if (__is_valid_data_blkaddr(blkaddr) &&
454 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
455 blkaddr, DATA_GENERIC_ENHANCE)) {
460 if (__found_offset(file->f_mapping, blkaddr,
469 if (whence == SEEK_DATA)
472 if (whence == SEEK_HOLE && data_ofs > isize)
475 return vfs_setpos(file, data_ofs, maxbytes);
481 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
483 struct inode *inode = file->f_mapping->host;
484 loff_t maxbytes = inode->i_sb->s_maxbytes;
490 return generic_file_llseek_size(file, offset, whence,
491 maxbytes, i_size_read(inode));
496 return f2fs_seek_block(file, offset, whence);
502 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
504 struct inode *inode = file_inode(file);
507 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
510 if (!f2fs_is_compress_backend_ready(inode))
513 /* we don't need to use inline_data strictly */
514 err = f2fs_convert_inline_inode(inode);
519 vma->vm_ops = &f2fs_file_vm_ops;
520 set_inode_flag(inode, FI_MMAP_FILE);
524 static int f2fs_file_open(struct inode *inode, struct file *filp)
526 int err = fscrypt_file_open(inode, filp);
531 if (!f2fs_is_compress_backend_ready(inode))
534 err = fsverity_file_open(inode, filp);
538 filp->f_mode |= FMODE_NOWAIT;
540 return dquot_file_open(inode, filp);
543 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
545 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
546 struct f2fs_node *raw_node;
547 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
550 bool compressed_cluster = false;
551 int cluster_index = 0, valid_blocks = 0;
552 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
553 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
555 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
556 base = get_extra_isize(dn->inode);
558 raw_node = F2FS_NODE(dn->node_page);
559 addr = blkaddr_in_node(raw_node) + base + ofs;
561 /* Assumption: truncateion starts with cluster */
562 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
563 block_t blkaddr = le32_to_cpu(*addr);
565 if (f2fs_compressed_file(dn->inode) &&
566 !(cluster_index & (cluster_size - 1))) {
567 if (compressed_cluster)
568 f2fs_i_compr_blocks_update(dn->inode,
569 valid_blocks, false);
570 compressed_cluster = (blkaddr == COMPRESS_ADDR);
574 if (blkaddr == NULL_ADDR)
577 dn->data_blkaddr = NULL_ADDR;
578 f2fs_set_data_blkaddr(dn);
580 if (__is_valid_data_blkaddr(blkaddr)) {
581 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
582 DATA_GENERIC_ENHANCE))
584 if (compressed_cluster)
588 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
589 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
591 f2fs_invalidate_blocks(sbi, blkaddr);
593 if (!released || blkaddr != COMPRESS_ADDR)
597 if (compressed_cluster)
598 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
603 * once we invalidate valid blkaddr in range [ofs, ofs + count],
604 * we will invalidate all blkaddr in the whole range.
606 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
608 f2fs_update_extent_cache_range(dn, fofs, 0, len);
609 dec_valid_block_count(sbi, dn->inode, nr_free);
611 dn->ofs_in_node = ofs;
613 f2fs_update_time(sbi, REQ_TIME);
614 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
615 dn->ofs_in_node, nr_free);
618 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
620 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
623 static int truncate_partial_data_page(struct inode *inode, u64 from,
626 loff_t offset = from & (PAGE_SIZE - 1);
627 pgoff_t index = from >> PAGE_SHIFT;
628 struct address_space *mapping = inode->i_mapping;
631 if (!offset && !cache_only)
635 page = find_lock_page(mapping, index);
636 if (page && PageUptodate(page))
638 f2fs_put_page(page, 1);
642 page = f2fs_get_lock_data_page(inode, index, true);
644 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
646 f2fs_wait_on_page_writeback(page, DATA, true, true);
647 zero_user(page, offset, PAGE_SIZE - offset);
649 /* An encrypted inode should have a key and truncate the last page. */
650 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
652 set_page_dirty(page);
653 f2fs_put_page(page, 1);
657 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
659 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
660 struct dnode_of_data dn;
662 int count = 0, err = 0;
664 bool truncate_page = false;
666 trace_f2fs_truncate_blocks_enter(inode, from);
668 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
670 if (free_from >= sbi->max_file_blocks)
676 ipage = f2fs_get_node_page(sbi, inode->i_ino);
678 err = PTR_ERR(ipage);
682 if (f2fs_has_inline_data(inode)) {
683 f2fs_truncate_inline_inode(inode, ipage, from);
684 f2fs_put_page(ipage, 1);
685 truncate_page = true;
689 set_new_dnode(&dn, inode, ipage, NULL, 0);
690 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
697 count = ADDRS_PER_PAGE(dn.node_page, inode);
699 count -= dn.ofs_in_node;
700 f2fs_bug_on(sbi, count < 0);
702 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
703 f2fs_truncate_data_blocks_range(&dn, count);
709 err = f2fs_truncate_inode_blocks(inode, free_from);
714 /* lastly zero out the first data page */
716 err = truncate_partial_data_page(inode, from, truncate_page);
718 trace_f2fs_truncate_blocks_exit(inode, err);
722 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
724 u64 free_from = from;
727 #ifdef CONFIG_F2FS_FS_COMPRESSION
729 * for compressed file, only support cluster size
730 * aligned truncation.
732 if (f2fs_compressed_file(inode))
733 free_from = round_up(from,
734 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
737 err = f2fs_do_truncate_blocks(inode, free_from, lock);
741 #ifdef CONFIG_F2FS_FS_COMPRESSION
742 if (from != free_from) {
743 err = f2fs_truncate_partial_cluster(inode, from, lock);
752 int f2fs_truncate(struct inode *inode)
756 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
759 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
760 S_ISLNK(inode->i_mode)))
763 trace_f2fs_truncate(inode);
765 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
766 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
770 /* we should check inline_data size */
771 if (!f2fs_may_inline_data(inode)) {
772 err = f2fs_convert_inline_inode(inode);
777 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
781 inode->i_mtime = inode->i_ctime = current_time(inode);
782 f2fs_mark_inode_dirty_sync(inode, false);
786 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
787 struct kstat *stat, u32 request_mask, unsigned int query_flags)
789 struct inode *inode = d_inode(path->dentry);
790 struct f2fs_inode_info *fi = F2FS_I(inode);
791 struct f2fs_inode *ri;
794 if (f2fs_has_extra_attr(inode) &&
795 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
796 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
797 stat->result_mask |= STATX_BTIME;
798 stat->btime.tv_sec = fi->i_crtime.tv_sec;
799 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
803 if (flags & F2FS_COMPR_FL)
804 stat->attributes |= STATX_ATTR_COMPRESSED;
805 if (flags & F2FS_APPEND_FL)
806 stat->attributes |= STATX_ATTR_APPEND;
807 if (IS_ENCRYPTED(inode))
808 stat->attributes |= STATX_ATTR_ENCRYPTED;
809 if (flags & F2FS_IMMUTABLE_FL)
810 stat->attributes |= STATX_ATTR_IMMUTABLE;
811 if (flags & F2FS_NODUMP_FL)
812 stat->attributes |= STATX_ATTR_NODUMP;
813 if (IS_VERITY(inode))
814 stat->attributes |= STATX_ATTR_VERITY;
816 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
818 STATX_ATTR_ENCRYPTED |
819 STATX_ATTR_IMMUTABLE |
823 generic_fillattr(&init_user_ns, inode, stat);
825 /* we need to show initial sectors used for inline_data/dentries */
826 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
827 f2fs_has_inline_dentry(inode))
828 stat->blocks += (stat->size + 511) >> 9;
833 #ifdef CONFIG_F2FS_FS_POSIX_ACL
834 static void __setattr_copy(struct user_namespace *mnt_userns,
835 struct inode *inode, const struct iattr *attr)
837 unsigned int ia_valid = attr->ia_valid;
839 if (ia_valid & ATTR_UID)
840 inode->i_uid = attr->ia_uid;
841 if (ia_valid & ATTR_GID)
842 inode->i_gid = attr->ia_gid;
843 if (ia_valid & ATTR_ATIME)
844 inode->i_atime = attr->ia_atime;
845 if (ia_valid & ATTR_MTIME)
846 inode->i_mtime = attr->ia_mtime;
847 if (ia_valid & ATTR_CTIME)
848 inode->i_ctime = attr->ia_ctime;
849 if (ia_valid & ATTR_MODE) {
850 umode_t mode = attr->ia_mode;
851 kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
853 if (!in_group_p(kgid) && !capable(CAP_FSETID))
855 set_acl_inode(inode, mode);
859 #define __setattr_copy setattr_copy
862 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
865 struct inode *inode = d_inode(dentry);
868 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
871 if ((attr->ia_valid & ATTR_SIZE) &&
872 !f2fs_is_compress_backend_ready(inode))
875 err = setattr_prepare(&init_user_ns, dentry, attr);
879 err = fscrypt_prepare_setattr(dentry, attr);
883 err = fsverity_prepare_setattr(dentry, attr);
887 if (is_quota_modification(inode, attr)) {
888 err = dquot_initialize(inode);
892 if ((attr->ia_valid & ATTR_UID &&
893 !uid_eq(attr->ia_uid, inode->i_uid)) ||
894 (attr->ia_valid & ATTR_GID &&
895 !gid_eq(attr->ia_gid, inode->i_gid))) {
896 f2fs_lock_op(F2FS_I_SB(inode));
897 err = dquot_transfer(inode, attr);
899 set_sbi_flag(F2FS_I_SB(inode),
900 SBI_QUOTA_NEED_REPAIR);
901 f2fs_unlock_op(F2FS_I_SB(inode));
905 * update uid/gid under lock_op(), so that dquot and inode can
906 * be updated atomically.
908 if (attr->ia_valid & ATTR_UID)
909 inode->i_uid = attr->ia_uid;
910 if (attr->ia_valid & ATTR_GID)
911 inode->i_gid = attr->ia_gid;
912 f2fs_mark_inode_dirty_sync(inode, true);
913 f2fs_unlock_op(F2FS_I_SB(inode));
916 if (attr->ia_valid & ATTR_SIZE) {
917 loff_t old_size = i_size_read(inode);
919 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
921 * should convert inline inode before i_size_write to
922 * keep smaller than inline_data size with inline flag.
924 err = f2fs_convert_inline_inode(inode);
929 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
930 down_write(&F2FS_I(inode)->i_mmap_sem);
932 truncate_setsize(inode, attr->ia_size);
934 if (attr->ia_size <= old_size)
935 err = f2fs_truncate(inode);
937 * do not trim all blocks after i_size if target size is
938 * larger than i_size.
940 up_write(&F2FS_I(inode)->i_mmap_sem);
941 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
945 spin_lock(&F2FS_I(inode)->i_size_lock);
946 inode->i_mtime = inode->i_ctime = current_time(inode);
947 F2FS_I(inode)->last_disk_size = i_size_read(inode);
948 spin_unlock(&F2FS_I(inode)->i_size_lock);
951 __setattr_copy(&init_user_ns, inode, attr);
953 if (attr->ia_valid & ATTR_MODE) {
954 err = posix_acl_chmod(&init_user_ns, inode,
955 f2fs_get_inode_mode(inode));
956 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
957 inode->i_mode = F2FS_I(inode)->i_acl_mode;
958 clear_inode_flag(inode, FI_ACL_MODE);
962 /* file size may changed here */
963 f2fs_mark_inode_dirty_sync(inode, true);
965 /* inode change will produce dirty node pages flushed by checkpoint */
966 f2fs_balance_fs(F2FS_I_SB(inode), true);
971 const struct inode_operations f2fs_file_inode_operations = {
972 .getattr = f2fs_getattr,
973 .setattr = f2fs_setattr,
974 .get_acl = f2fs_get_acl,
975 .set_acl = f2fs_set_acl,
976 .listxattr = f2fs_listxattr,
977 .fiemap = f2fs_fiemap,
980 static int fill_zero(struct inode *inode, pgoff_t index,
981 loff_t start, loff_t len)
983 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
989 f2fs_balance_fs(sbi, true);
992 page = f2fs_get_new_data_page(inode, NULL, index, false);
996 return PTR_ERR(page);
998 f2fs_wait_on_page_writeback(page, DATA, true, true);
999 zero_user(page, start, len);
1000 set_page_dirty(page);
1001 f2fs_put_page(page, 1);
1005 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1009 while (pg_start < pg_end) {
1010 struct dnode_of_data dn;
1011 pgoff_t end_offset, count;
1013 set_new_dnode(&dn, inode, NULL, NULL, 0);
1014 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1016 if (err == -ENOENT) {
1017 pg_start = f2fs_get_next_page_offset(&dn,
1024 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1025 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1027 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1029 f2fs_truncate_data_blocks_range(&dn, count);
1030 f2fs_put_dnode(&dn);
1037 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1039 pgoff_t pg_start, pg_end;
1040 loff_t off_start, off_end;
1043 ret = f2fs_convert_inline_inode(inode);
1047 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1048 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1050 off_start = offset & (PAGE_SIZE - 1);
1051 off_end = (offset + len) & (PAGE_SIZE - 1);
1053 if (pg_start == pg_end) {
1054 ret = fill_zero(inode, pg_start, off_start,
1055 off_end - off_start);
1060 ret = fill_zero(inode, pg_start++, off_start,
1061 PAGE_SIZE - off_start);
1066 ret = fill_zero(inode, pg_end, 0, off_end);
1071 if (pg_start < pg_end) {
1072 struct address_space *mapping = inode->i_mapping;
1073 loff_t blk_start, blk_end;
1074 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1076 f2fs_balance_fs(sbi, true);
1078 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1079 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1081 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1082 down_write(&F2FS_I(inode)->i_mmap_sem);
1084 truncate_inode_pages_range(mapping, blk_start,
1088 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1089 f2fs_unlock_op(sbi);
1091 up_write(&F2FS_I(inode)->i_mmap_sem);
1092 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1099 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1100 int *do_replace, pgoff_t off, pgoff_t len)
1102 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1103 struct dnode_of_data dn;
1107 set_new_dnode(&dn, inode, NULL, NULL, 0);
1108 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1109 if (ret && ret != -ENOENT) {
1111 } else if (ret == -ENOENT) {
1112 if (dn.max_level == 0)
1114 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1115 dn.ofs_in_node, len);
1121 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1122 dn.ofs_in_node, len);
1123 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1124 *blkaddr = f2fs_data_blkaddr(&dn);
1126 if (__is_valid_data_blkaddr(*blkaddr) &&
1127 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1128 DATA_GENERIC_ENHANCE)) {
1129 f2fs_put_dnode(&dn);
1130 return -EFSCORRUPTED;
1133 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1135 if (f2fs_lfs_mode(sbi)) {
1136 f2fs_put_dnode(&dn);
1140 /* do not invalidate this block address */
1141 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1145 f2fs_put_dnode(&dn);
1154 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1155 int *do_replace, pgoff_t off, int len)
1157 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1158 struct dnode_of_data dn;
1161 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1162 if (*do_replace == 0)
1165 set_new_dnode(&dn, inode, NULL, NULL, 0);
1166 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1168 dec_valid_block_count(sbi, inode, 1);
1169 f2fs_invalidate_blocks(sbi, *blkaddr);
1171 f2fs_update_data_blkaddr(&dn, *blkaddr);
1173 f2fs_put_dnode(&dn);
1178 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1179 block_t *blkaddr, int *do_replace,
1180 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1182 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1187 if (blkaddr[i] == NULL_ADDR && !full) {
1192 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1193 struct dnode_of_data dn;
1194 struct node_info ni;
1198 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1199 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1203 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1205 f2fs_put_dnode(&dn);
1209 ilen = min((pgoff_t)
1210 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1211 dn.ofs_in_node, len - i);
1213 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1214 f2fs_truncate_data_blocks_range(&dn, 1);
1216 if (do_replace[i]) {
1217 f2fs_i_blocks_write(src_inode,
1219 f2fs_i_blocks_write(dst_inode,
1221 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1222 blkaddr[i], ni.version, true, false);
1228 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1229 if (dst_inode->i_size < new_size)
1230 f2fs_i_size_write(dst_inode, new_size);
1231 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1233 f2fs_put_dnode(&dn);
1235 struct page *psrc, *pdst;
1237 psrc = f2fs_get_lock_data_page(src_inode,
1240 return PTR_ERR(psrc);
1241 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1244 f2fs_put_page(psrc, 1);
1245 return PTR_ERR(pdst);
1247 f2fs_copy_page(psrc, pdst);
1248 set_page_dirty(pdst);
1249 f2fs_put_page(pdst, 1);
1250 f2fs_put_page(psrc, 1);
1252 ret = f2fs_truncate_hole(src_inode,
1253 src + i, src + i + 1);
1262 static int __exchange_data_block(struct inode *src_inode,
1263 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1264 pgoff_t len, bool full)
1266 block_t *src_blkaddr;
1272 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1274 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1275 array_size(olen, sizeof(block_t)),
1280 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1281 array_size(olen, sizeof(int)),
1284 kvfree(src_blkaddr);
1288 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1289 do_replace, src, olen);
1293 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1294 do_replace, src, dst, olen, full);
1302 kvfree(src_blkaddr);
1308 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1309 kvfree(src_blkaddr);
1314 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1316 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1317 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1318 pgoff_t start = offset >> PAGE_SHIFT;
1319 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1322 f2fs_balance_fs(sbi, true);
1324 /* avoid gc operation during block exchange */
1325 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1326 down_write(&F2FS_I(inode)->i_mmap_sem);
1329 f2fs_drop_extent_tree(inode);
1330 truncate_pagecache(inode, offset);
1331 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1332 f2fs_unlock_op(sbi);
1334 up_write(&F2FS_I(inode)->i_mmap_sem);
1335 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1339 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1344 if (offset + len >= i_size_read(inode))
1347 /* collapse range should be aligned to block size of f2fs. */
1348 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1351 ret = f2fs_convert_inline_inode(inode);
1355 /* write out all dirty pages from offset */
1356 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1360 ret = f2fs_do_collapse(inode, offset, len);
1364 /* write out all moved pages, if possible */
1365 down_write(&F2FS_I(inode)->i_mmap_sem);
1366 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1367 truncate_pagecache(inode, offset);
1369 new_size = i_size_read(inode) - len;
1370 ret = f2fs_truncate_blocks(inode, new_size, true);
1371 up_write(&F2FS_I(inode)->i_mmap_sem);
1373 f2fs_i_size_write(inode, new_size);
1377 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1380 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1381 pgoff_t index = start;
1382 unsigned int ofs_in_node = dn->ofs_in_node;
1386 for (; index < end; index++, dn->ofs_in_node++) {
1387 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1391 dn->ofs_in_node = ofs_in_node;
1392 ret = f2fs_reserve_new_blocks(dn, count);
1396 dn->ofs_in_node = ofs_in_node;
1397 for (index = start; index < end; index++, dn->ofs_in_node++) {
1398 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1400 * f2fs_reserve_new_blocks will not guarantee entire block
1403 if (dn->data_blkaddr == NULL_ADDR) {
1407 if (dn->data_blkaddr != NEW_ADDR) {
1408 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1409 dn->data_blkaddr = NEW_ADDR;
1410 f2fs_set_data_blkaddr(dn);
1414 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1419 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1422 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1423 struct address_space *mapping = inode->i_mapping;
1424 pgoff_t index, pg_start, pg_end;
1425 loff_t new_size = i_size_read(inode);
1426 loff_t off_start, off_end;
1429 ret = inode_newsize_ok(inode, (len + offset));
1433 ret = f2fs_convert_inline_inode(inode);
1437 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1441 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1442 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1444 off_start = offset & (PAGE_SIZE - 1);
1445 off_end = (offset + len) & (PAGE_SIZE - 1);
1447 if (pg_start == pg_end) {
1448 ret = fill_zero(inode, pg_start, off_start,
1449 off_end - off_start);
1453 new_size = max_t(loff_t, new_size, offset + len);
1456 ret = fill_zero(inode, pg_start++, off_start,
1457 PAGE_SIZE - off_start);
1461 new_size = max_t(loff_t, new_size,
1462 (loff_t)pg_start << PAGE_SHIFT);
1465 for (index = pg_start; index < pg_end;) {
1466 struct dnode_of_data dn;
1467 unsigned int end_offset;
1470 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1471 down_write(&F2FS_I(inode)->i_mmap_sem);
1473 truncate_pagecache_range(inode,
1474 (loff_t)index << PAGE_SHIFT,
1475 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1479 set_new_dnode(&dn, inode, NULL, NULL, 0);
1480 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1482 f2fs_unlock_op(sbi);
1483 up_write(&F2FS_I(inode)->i_mmap_sem);
1484 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1488 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1489 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1491 ret = f2fs_do_zero_range(&dn, index, end);
1492 f2fs_put_dnode(&dn);
1494 f2fs_unlock_op(sbi);
1495 up_write(&F2FS_I(inode)->i_mmap_sem);
1496 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1498 f2fs_balance_fs(sbi, dn.node_changed);
1504 new_size = max_t(loff_t, new_size,
1505 (loff_t)index << PAGE_SHIFT);
1509 ret = fill_zero(inode, pg_end, 0, off_end);
1513 new_size = max_t(loff_t, new_size, offset + len);
1518 if (new_size > i_size_read(inode)) {
1519 if (mode & FALLOC_FL_KEEP_SIZE)
1520 file_set_keep_isize(inode);
1522 f2fs_i_size_write(inode, new_size);
1527 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1529 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1530 pgoff_t nr, pg_start, pg_end, delta, idx;
1534 new_size = i_size_read(inode) + len;
1535 ret = inode_newsize_ok(inode, new_size);
1539 if (offset >= i_size_read(inode))
1542 /* insert range should be aligned to block size of f2fs. */
1543 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1546 ret = f2fs_convert_inline_inode(inode);
1550 f2fs_balance_fs(sbi, true);
1552 down_write(&F2FS_I(inode)->i_mmap_sem);
1553 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1554 up_write(&F2FS_I(inode)->i_mmap_sem);
1558 /* write out all dirty pages from offset */
1559 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1563 pg_start = offset >> PAGE_SHIFT;
1564 pg_end = (offset + len) >> PAGE_SHIFT;
1565 delta = pg_end - pg_start;
1566 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1568 /* avoid gc operation during block exchange */
1569 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1570 down_write(&F2FS_I(inode)->i_mmap_sem);
1571 truncate_pagecache(inode, offset);
1573 while (!ret && idx > pg_start) {
1574 nr = idx - pg_start;
1580 f2fs_drop_extent_tree(inode);
1582 ret = __exchange_data_block(inode, inode, idx,
1583 idx + delta, nr, false);
1584 f2fs_unlock_op(sbi);
1586 up_write(&F2FS_I(inode)->i_mmap_sem);
1587 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1589 /* write out all moved pages, if possible */
1590 down_write(&F2FS_I(inode)->i_mmap_sem);
1591 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1592 truncate_pagecache(inode, offset);
1593 up_write(&F2FS_I(inode)->i_mmap_sem);
1596 f2fs_i_size_write(inode, new_size);
1600 static int expand_inode_data(struct inode *inode, loff_t offset,
1601 loff_t len, int mode)
1603 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1604 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1605 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1606 .m_may_create = true };
1608 loff_t new_size = i_size_read(inode);
1612 err = inode_newsize_ok(inode, (len + offset));
1616 err = f2fs_convert_inline_inode(inode);
1620 f2fs_balance_fs(sbi, true);
1622 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1623 off_end = (offset + len) & (PAGE_SIZE - 1);
1625 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1626 map.m_len = pg_end - map.m_lblk;
1633 if (f2fs_is_pinned_file(inode)) {
1634 block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
1635 sbi->log_blocks_per_seg;
1638 if (map.m_len % sbi->blocks_per_seg)
1639 len += sbi->blocks_per_seg;
1641 map.m_len = sbi->blocks_per_seg;
1643 if (has_not_enough_free_secs(sbi, 0,
1644 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1645 down_write(&sbi->gc_lock);
1646 err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1647 if (err && err != -ENODATA && err != -EAGAIN)
1651 down_write(&sbi->pin_sem);
1654 f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED);
1655 f2fs_unlock_op(sbi);
1657 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1658 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1660 up_write(&sbi->pin_sem);
1664 map.m_lblk += map.m_len;
1670 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1679 last_off = map.m_lblk + map.m_len - 1;
1681 /* update new size to the failed position */
1682 new_size = (last_off == pg_end) ? offset + len :
1683 (loff_t)(last_off + 1) << PAGE_SHIFT;
1685 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1688 if (new_size > i_size_read(inode)) {
1689 if (mode & FALLOC_FL_KEEP_SIZE)
1690 file_set_keep_isize(inode);
1692 f2fs_i_size_write(inode, new_size);
1698 static long f2fs_fallocate(struct file *file, int mode,
1699 loff_t offset, loff_t len)
1701 struct inode *inode = file_inode(file);
1704 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1706 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1708 if (!f2fs_is_compress_backend_ready(inode))
1711 /* f2fs only support ->fallocate for regular file */
1712 if (!S_ISREG(inode->i_mode))
1715 if (IS_ENCRYPTED(inode) &&
1716 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1719 if (f2fs_compressed_file(inode) &&
1720 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1721 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1724 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1725 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1726 FALLOC_FL_INSERT_RANGE))
1731 if (mode & FALLOC_FL_PUNCH_HOLE) {
1732 if (offset >= inode->i_size)
1735 ret = punch_hole(inode, offset, len);
1736 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1737 ret = f2fs_collapse_range(inode, offset, len);
1738 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1739 ret = f2fs_zero_range(inode, offset, len, mode);
1740 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1741 ret = f2fs_insert_range(inode, offset, len);
1743 ret = expand_inode_data(inode, offset, len, mode);
1747 inode->i_mtime = inode->i_ctime = current_time(inode);
1748 f2fs_mark_inode_dirty_sync(inode, false);
1749 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1753 inode_unlock(inode);
1755 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1759 static int f2fs_release_file(struct inode *inode, struct file *filp)
1762 * f2fs_relase_file is called at every close calls. So we should
1763 * not drop any inmemory pages by close called by other process.
1765 if (!(filp->f_mode & FMODE_WRITE) ||
1766 atomic_read(&inode->i_writecount) != 1)
1769 /* some remained atomic pages should discarded */
1770 if (f2fs_is_atomic_file(inode))
1771 f2fs_drop_inmem_pages(inode);
1772 if (f2fs_is_volatile_file(inode)) {
1773 set_inode_flag(inode, FI_DROP_CACHE);
1774 filemap_fdatawrite(inode->i_mapping);
1775 clear_inode_flag(inode, FI_DROP_CACHE);
1776 clear_inode_flag(inode, FI_VOLATILE_FILE);
1777 stat_dec_volatile_write(inode);
1782 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1784 struct inode *inode = file_inode(file);
1787 * If the process doing a transaction is crashed, we should do
1788 * roll-back. Otherwise, other reader/write can see corrupted database
1789 * until all the writers close its file. Since this should be done
1790 * before dropping file lock, it needs to do in ->flush.
1792 if (f2fs_is_atomic_file(inode) &&
1793 F2FS_I(inode)->inmem_task == current)
1794 f2fs_drop_inmem_pages(inode);
1798 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1800 struct f2fs_inode_info *fi = F2FS_I(inode);
1801 u32 masked_flags = fi->i_flags & mask;
1803 f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1805 /* Is it quota file? Do not allow user to mess with it */
1806 if (IS_NOQUOTA(inode))
1809 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1810 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1812 if (!f2fs_empty_dir(inode))
1816 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1817 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1819 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1823 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1824 if (masked_flags & F2FS_COMPR_FL) {
1825 if (!f2fs_disable_compressed_file(inode))
1828 if (iflags & F2FS_NOCOMP_FL)
1830 if (iflags & F2FS_COMPR_FL) {
1831 if (!f2fs_may_compress(inode))
1833 if (S_ISREG(inode->i_mode) && inode->i_size)
1836 set_compress_context(inode);
1839 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1840 if (masked_flags & F2FS_COMPR_FL)
1844 fi->i_flags = iflags | (fi->i_flags & ~mask);
1845 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1846 (fi->i_flags & F2FS_NOCOMP_FL));
1848 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1849 set_inode_flag(inode, FI_PROJ_INHERIT);
1851 clear_inode_flag(inode, FI_PROJ_INHERIT);
1853 inode->i_ctime = current_time(inode);
1854 f2fs_set_inode_flags(inode);
1855 f2fs_mark_inode_dirty_sync(inode, true);
1859 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1862 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1863 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1864 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1865 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1868 static const struct {
1871 } f2fs_fsflags_map[] = {
1872 { F2FS_COMPR_FL, FS_COMPR_FL },
1873 { F2FS_SYNC_FL, FS_SYNC_FL },
1874 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1875 { F2FS_APPEND_FL, FS_APPEND_FL },
1876 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1877 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1878 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1879 { F2FS_INDEX_FL, FS_INDEX_FL },
1880 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1881 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1882 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1885 #define F2FS_GETTABLE_FS_FL ( \
1895 FS_PROJINHERIT_FL | \
1897 FS_INLINE_DATA_FL | \
1902 #define F2FS_SETTABLE_FS_FL ( \
1911 FS_PROJINHERIT_FL | \
1914 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1915 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1920 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1921 if (iflags & f2fs_fsflags_map[i].iflag)
1922 fsflags |= f2fs_fsflags_map[i].fsflag;
1927 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1928 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1933 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1934 if (fsflags & f2fs_fsflags_map[i].fsflag)
1935 iflags |= f2fs_fsflags_map[i].iflag;
1940 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1942 struct inode *inode = file_inode(filp);
1943 struct f2fs_inode_info *fi = F2FS_I(inode);
1944 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1946 if (IS_ENCRYPTED(inode))
1947 fsflags |= FS_ENCRYPT_FL;
1948 if (IS_VERITY(inode))
1949 fsflags |= FS_VERITY_FL;
1950 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1951 fsflags |= FS_INLINE_DATA_FL;
1952 if (is_inode_flag_set(inode, FI_PIN_FILE))
1953 fsflags |= FS_NOCOW_FL;
1955 fsflags &= F2FS_GETTABLE_FS_FL;
1957 return put_user(fsflags, (int __user *)arg);
1960 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1962 struct inode *inode = file_inode(filp);
1963 struct f2fs_inode_info *fi = F2FS_I(inode);
1964 u32 fsflags, old_fsflags;
1968 if (!inode_owner_or_capable(&init_user_ns, inode))
1971 if (get_user(fsflags, (int __user *)arg))
1974 if (fsflags & ~F2FS_GETTABLE_FS_FL)
1976 fsflags &= F2FS_SETTABLE_FS_FL;
1978 iflags = f2fs_fsflags_to_iflags(fsflags);
1979 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1982 ret = mnt_want_write_file(filp);
1988 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1989 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
1993 ret = f2fs_setflags_common(inode, iflags,
1994 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
1996 inode_unlock(inode);
1997 mnt_drop_write_file(filp);
2001 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2003 struct inode *inode = file_inode(filp);
2005 return put_user(inode->i_generation, (int __user *)arg);
2008 static int f2fs_ioc_start_atomic_write(struct file *filp)
2010 struct inode *inode = file_inode(filp);
2011 struct f2fs_inode_info *fi = F2FS_I(inode);
2012 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2015 if (!inode_owner_or_capable(&init_user_ns, inode))
2018 if (!S_ISREG(inode->i_mode))
2021 if (filp->f_flags & O_DIRECT)
2024 ret = mnt_want_write_file(filp);
2030 f2fs_disable_compressed_file(inode);
2032 if (f2fs_is_atomic_file(inode)) {
2033 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2038 ret = f2fs_convert_inline_inode(inode);
2042 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2045 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2046 * f2fs_is_atomic_file.
2048 if (get_dirty_pages(inode))
2049 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2050 inode->i_ino, get_dirty_pages(inode));
2051 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2053 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2057 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2058 if (list_empty(&fi->inmem_ilist))
2059 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2060 sbi->atomic_files++;
2061 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2063 /* add inode in inmem_list first and set atomic_file */
2064 set_inode_flag(inode, FI_ATOMIC_FILE);
2065 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2066 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2068 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2069 F2FS_I(inode)->inmem_task = current;
2070 stat_update_max_atomic_write(inode);
2072 inode_unlock(inode);
2073 mnt_drop_write_file(filp);
2077 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2079 struct inode *inode = file_inode(filp);
2082 if (!inode_owner_or_capable(&init_user_ns, inode))
2085 ret = mnt_want_write_file(filp);
2089 f2fs_balance_fs(F2FS_I_SB(inode), true);
2093 if (f2fs_is_volatile_file(inode)) {
2098 if (f2fs_is_atomic_file(inode)) {
2099 ret = f2fs_commit_inmem_pages(inode);
2103 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2105 f2fs_drop_inmem_pages(inode);
2107 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2110 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2111 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2114 inode_unlock(inode);
2115 mnt_drop_write_file(filp);
2119 static int f2fs_ioc_start_volatile_write(struct file *filp)
2121 struct inode *inode = file_inode(filp);
2124 if (!inode_owner_or_capable(&init_user_ns, inode))
2127 if (!S_ISREG(inode->i_mode))
2130 ret = mnt_want_write_file(filp);
2136 if (f2fs_is_volatile_file(inode))
2139 ret = f2fs_convert_inline_inode(inode);
2143 stat_inc_volatile_write(inode);
2144 stat_update_max_volatile_write(inode);
2146 set_inode_flag(inode, FI_VOLATILE_FILE);
2147 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2149 inode_unlock(inode);
2150 mnt_drop_write_file(filp);
2154 static int f2fs_ioc_release_volatile_write(struct file *filp)
2156 struct inode *inode = file_inode(filp);
2159 if (!inode_owner_or_capable(&init_user_ns, inode))
2162 ret = mnt_want_write_file(filp);
2168 if (!f2fs_is_volatile_file(inode))
2171 if (!f2fs_is_first_block_written(inode)) {
2172 ret = truncate_partial_data_page(inode, 0, true);
2176 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2178 inode_unlock(inode);
2179 mnt_drop_write_file(filp);
2183 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2185 struct inode *inode = file_inode(filp);
2188 if (!inode_owner_or_capable(&init_user_ns, inode))
2191 ret = mnt_want_write_file(filp);
2197 if (f2fs_is_atomic_file(inode))
2198 f2fs_drop_inmem_pages(inode);
2199 if (f2fs_is_volatile_file(inode)) {
2200 clear_inode_flag(inode, FI_VOLATILE_FILE);
2201 stat_dec_volatile_write(inode);
2202 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2205 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2207 inode_unlock(inode);
2209 mnt_drop_write_file(filp);
2210 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2214 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2216 struct inode *inode = file_inode(filp);
2217 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2218 struct super_block *sb = sbi->sb;
2222 if (!capable(CAP_SYS_ADMIN))
2225 if (get_user(in, (__u32 __user *)arg))
2228 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2229 ret = mnt_want_write_file(filp);
2231 if (ret == -EROFS) {
2233 f2fs_stop_checkpoint(sbi, false);
2234 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2235 trace_f2fs_shutdown(sbi, in, ret);
2242 case F2FS_GOING_DOWN_FULLSYNC:
2243 ret = freeze_bdev(sb->s_bdev);
2246 f2fs_stop_checkpoint(sbi, false);
2247 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2248 thaw_bdev(sb->s_bdev);
2250 case F2FS_GOING_DOWN_METASYNC:
2251 /* do checkpoint only */
2252 ret = f2fs_sync_fs(sb, 1);
2255 f2fs_stop_checkpoint(sbi, false);
2256 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2258 case F2FS_GOING_DOWN_NOSYNC:
2259 f2fs_stop_checkpoint(sbi, false);
2260 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2262 case F2FS_GOING_DOWN_METAFLUSH:
2263 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2264 f2fs_stop_checkpoint(sbi, false);
2265 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2267 case F2FS_GOING_DOWN_NEED_FSCK:
2268 set_sbi_flag(sbi, SBI_NEED_FSCK);
2269 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2270 set_sbi_flag(sbi, SBI_IS_DIRTY);
2271 /* do checkpoint only */
2272 ret = f2fs_sync_fs(sb, 1);
2279 f2fs_stop_gc_thread(sbi);
2280 f2fs_stop_discard_thread(sbi);
2282 f2fs_drop_discard_cmd(sbi);
2283 clear_opt(sbi, DISCARD);
2285 f2fs_update_time(sbi, REQ_TIME);
2287 if (in != F2FS_GOING_DOWN_FULLSYNC)
2288 mnt_drop_write_file(filp);
2290 trace_f2fs_shutdown(sbi, in, ret);
2295 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2297 struct inode *inode = file_inode(filp);
2298 struct super_block *sb = inode->i_sb;
2299 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2300 struct fstrim_range range;
2303 if (!capable(CAP_SYS_ADMIN))
2306 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2309 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2313 ret = mnt_want_write_file(filp);
2317 range.minlen = max((unsigned int)range.minlen,
2318 q->limits.discard_granularity);
2319 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2320 mnt_drop_write_file(filp);
2324 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2327 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2331 static bool uuid_is_nonzero(__u8 u[16])
2335 for (i = 0; i < 16; i++)
2341 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2343 struct inode *inode = file_inode(filp);
2345 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2348 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2350 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2353 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2355 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2357 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2360 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2362 struct inode *inode = file_inode(filp);
2363 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2366 if (!f2fs_sb_has_encrypt(sbi))
2369 err = mnt_want_write_file(filp);
2373 down_write(&sbi->sb_lock);
2375 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2378 /* update superblock with uuid */
2379 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2381 err = f2fs_commit_super(sbi, false);
2384 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2388 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2392 up_write(&sbi->sb_lock);
2393 mnt_drop_write_file(filp);
2397 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2400 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2403 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2406 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2408 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2411 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2414 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2416 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2419 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2422 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2425 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2428 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2431 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2434 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2437 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2440 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2442 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2445 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2448 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2450 struct inode *inode = file_inode(filp);
2451 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2455 if (!capable(CAP_SYS_ADMIN))
2458 if (get_user(sync, (__u32 __user *)arg))
2461 if (f2fs_readonly(sbi->sb))
2464 ret = mnt_want_write_file(filp);
2469 if (!down_write_trylock(&sbi->gc_lock)) {
2474 down_write(&sbi->gc_lock);
2477 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2479 mnt_drop_write_file(filp);
2483 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2485 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2489 if (!capable(CAP_SYS_ADMIN))
2491 if (f2fs_readonly(sbi->sb))
2494 end = range->start + range->len;
2495 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2496 end >= MAX_BLKADDR(sbi))
2499 ret = mnt_want_write_file(filp);
2505 if (!down_write_trylock(&sbi->gc_lock)) {
2510 down_write(&sbi->gc_lock);
2513 ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start));
2519 range->start += BLKS_PER_SEC(sbi);
2520 if (range->start <= end)
2523 mnt_drop_write_file(filp);
2527 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2529 struct f2fs_gc_range range;
2531 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2534 return __f2fs_ioc_gc_range(filp, &range);
2537 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2539 struct inode *inode = file_inode(filp);
2540 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2543 if (!capable(CAP_SYS_ADMIN))
2546 if (f2fs_readonly(sbi->sb))
2549 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2550 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2554 ret = mnt_want_write_file(filp);
2558 ret = f2fs_sync_fs(sbi->sb, 1);
2560 mnt_drop_write_file(filp);
2564 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2566 struct f2fs_defragment *range)
2568 struct inode *inode = file_inode(filp);
2569 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2570 .m_seg_type = NO_CHECK_TYPE ,
2571 .m_may_create = false };
2572 struct extent_info ei = {0, 0, 0};
2573 pgoff_t pg_start, pg_end, next_pgofs;
2574 unsigned int blk_per_seg = sbi->blocks_per_seg;
2575 unsigned int total = 0, sec_num;
2576 block_t blk_end = 0;
2577 bool fragmented = false;
2580 /* if in-place-update policy is enabled, don't waste time here */
2581 if (f2fs_should_update_inplace(inode, NULL))
2584 pg_start = range->start >> PAGE_SHIFT;
2585 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2587 f2fs_balance_fs(sbi, true);
2591 /* writeback all dirty pages in the range */
2592 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2593 range->start + range->len - 1);
2598 * lookup mapping info in extent cache, skip defragmenting if physical
2599 * block addresses are continuous.
2601 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2602 if (ei.fofs + ei.len >= pg_end)
2606 map.m_lblk = pg_start;
2607 map.m_next_pgofs = &next_pgofs;
2610 * lookup mapping info in dnode page cache, skip defragmenting if all
2611 * physical block addresses are continuous even if there are hole(s)
2612 * in logical blocks.
2614 while (map.m_lblk < pg_end) {
2615 map.m_len = pg_end - map.m_lblk;
2616 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2620 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2621 map.m_lblk = next_pgofs;
2625 if (blk_end && blk_end != map.m_pblk)
2628 /* record total count of block that we're going to move */
2631 blk_end = map.m_pblk + map.m_len;
2633 map.m_lblk += map.m_len;
2641 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2644 * make sure there are enough free section for LFS allocation, this can
2645 * avoid defragment running in SSR mode when free section are allocated
2648 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2653 map.m_lblk = pg_start;
2654 map.m_len = pg_end - pg_start;
2657 while (map.m_lblk < pg_end) {
2662 map.m_len = pg_end - map.m_lblk;
2663 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2667 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2668 map.m_lblk = next_pgofs;
2672 set_inode_flag(inode, FI_DO_DEFRAG);
2675 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2678 page = f2fs_get_lock_data_page(inode, idx, true);
2680 err = PTR_ERR(page);
2684 set_page_dirty(page);
2685 f2fs_put_page(page, 1);
2694 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2697 clear_inode_flag(inode, FI_DO_DEFRAG);
2699 err = filemap_fdatawrite(inode->i_mapping);
2704 clear_inode_flag(inode, FI_DO_DEFRAG);
2706 inode_unlock(inode);
2708 range->len = (u64)total << PAGE_SHIFT;
2712 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2714 struct inode *inode = file_inode(filp);
2715 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2716 struct f2fs_defragment range;
2719 if (!capable(CAP_SYS_ADMIN))
2722 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2725 if (f2fs_readonly(sbi->sb))
2728 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2732 /* verify alignment of offset & size */
2733 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2736 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2737 sbi->max_file_blocks))
2740 err = mnt_want_write_file(filp);
2744 err = f2fs_defragment_range(sbi, filp, &range);
2745 mnt_drop_write_file(filp);
2747 f2fs_update_time(sbi, REQ_TIME);
2751 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2758 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2759 struct file *file_out, loff_t pos_out, size_t len)
2761 struct inode *src = file_inode(file_in);
2762 struct inode *dst = file_inode(file_out);
2763 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2764 size_t olen = len, dst_max_i_size = 0;
2768 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2769 src->i_sb != dst->i_sb)
2772 if (unlikely(f2fs_readonly(src->i_sb)))
2775 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2778 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2781 if (pos_out < 0 || pos_in < 0)
2785 if (pos_in == pos_out)
2787 if (pos_out > pos_in && pos_out < pos_in + len)
2794 if (!inode_trylock(dst))
2799 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2802 olen = len = src->i_size - pos_in;
2803 if (pos_in + len == src->i_size)
2804 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2810 dst_osize = dst->i_size;
2811 if (pos_out + olen > dst->i_size)
2812 dst_max_i_size = pos_out + olen;
2814 /* verify the end result is block aligned */
2815 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2816 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2817 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2820 ret = f2fs_convert_inline_inode(src);
2824 ret = f2fs_convert_inline_inode(dst);
2828 /* write out all dirty pages from offset */
2829 ret = filemap_write_and_wait_range(src->i_mapping,
2830 pos_in, pos_in + len);
2834 ret = filemap_write_and_wait_range(dst->i_mapping,
2835 pos_out, pos_out + len);
2839 f2fs_balance_fs(sbi, true);
2841 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2844 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2849 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2850 pos_out >> F2FS_BLKSIZE_BITS,
2851 len >> F2FS_BLKSIZE_BITS, false);
2855 f2fs_i_size_write(dst, dst_max_i_size);
2856 else if (dst_osize != dst->i_size)
2857 f2fs_i_size_write(dst, dst_osize);
2859 f2fs_unlock_op(sbi);
2862 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2864 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2873 static int __f2fs_ioc_move_range(struct file *filp,
2874 struct f2fs_move_range *range)
2879 if (!(filp->f_mode & FMODE_READ) ||
2880 !(filp->f_mode & FMODE_WRITE))
2883 dst = fdget(range->dst_fd);
2887 if (!(dst.file->f_mode & FMODE_WRITE)) {
2892 err = mnt_want_write_file(filp);
2896 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2897 range->pos_out, range->len);
2899 mnt_drop_write_file(filp);
2905 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2907 struct f2fs_move_range range;
2909 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2912 return __f2fs_ioc_move_range(filp, &range);
2915 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2917 struct inode *inode = file_inode(filp);
2918 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2919 struct sit_info *sm = SIT_I(sbi);
2920 unsigned int start_segno = 0, end_segno = 0;
2921 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2922 struct f2fs_flush_device range;
2925 if (!capable(CAP_SYS_ADMIN))
2928 if (f2fs_readonly(sbi->sb))
2931 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2934 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2938 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2939 __is_large_section(sbi)) {
2940 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2941 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2945 ret = mnt_want_write_file(filp);
2949 if (range.dev_num != 0)
2950 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2951 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2953 start_segno = sm->last_victim[FLUSH_DEVICE];
2954 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2955 start_segno = dev_start_segno;
2956 end_segno = min(start_segno + range.segments, dev_end_segno);
2958 while (start_segno < end_segno) {
2959 if (!down_write_trylock(&sbi->gc_lock)) {
2963 sm->last_victim[GC_CB] = end_segno + 1;
2964 sm->last_victim[GC_GREEDY] = end_segno + 1;
2965 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2966 ret = f2fs_gc(sbi, true, true, start_segno);
2974 mnt_drop_write_file(filp);
2978 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2980 struct inode *inode = file_inode(filp);
2981 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2983 /* Must validate to set it with SQLite behavior in Android. */
2984 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2986 return put_user(sb_feature, (u32 __user *)arg);
2990 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2992 struct dquot *transfer_to[MAXQUOTAS] = {};
2993 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2994 struct super_block *sb = sbi->sb;
2997 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2998 if (!IS_ERR(transfer_to[PRJQUOTA])) {
2999 err = __dquot_transfer(inode, transfer_to);
3001 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3002 dqput(transfer_to[PRJQUOTA]);
3007 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3009 struct inode *inode = file_inode(filp);
3010 struct f2fs_inode_info *fi = F2FS_I(inode);
3011 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3016 if (!f2fs_sb_has_project_quota(sbi)) {
3017 if (projid != F2FS_DEF_PROJID)
3023 if (!f2fs_has_extra_attr(inode))
3026 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3028 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3032 /* Is it quota file? Do not allow user to mess with it */
3033 if (IS_NOQUOTA(inode))
3036 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3038 return PTR_ERR(ipage);
3040 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3043 f2fs_put_page(ipage, 1);
3046 f2fs_put_page(ipage, 1);
3048 err = dquot_initialize(inode);
3053 err = f2fs_transfer_project_quota(inode, kprojid);
3057 F2FS_I(inode)->i_projid = kprojid;
3058 inode->i_ctime = current_time(inode);
3059 f2fs_mark_inode_dirty_sync(inode, true);
3061 f2fs_unlock_op(sbi);
3065 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3070 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3072 if (projid != F2FS_DEF_PROJID)
3078 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3081 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3082 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3083 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3086 static const struct {
3089 } f2fs_xflags_map[] = {
3090 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
3091 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
3092 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
3093 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
3094 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
3095 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
3098 #define F2FS_SUPPORTED_XFLAGS ( \
3100 FS_XFLAG_IMMUTABLE | \
3103 FS_XFLAG_NOATIME | \
3104 FS_XFLAG_PROJINHERIT)
3106 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3107 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3112 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3113 if (iflags & f2fs_xflags_map[i].iflag)
3114 xflags |= f2fs_xflags_map[i].xflag;
3119 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3120 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3125 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3126 if (xflags & f2fs_xflags_map[i].xflag)
3127 iflags |= f2fs_xflags_map[i].iflag;
3132 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3134 struct f2fs_inode_info *fi = F2FS_I(inode);
3136 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3138 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3139 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3142 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3144 struct inode *inode = file_inode(filp);
3147 f2fs_fill_fsxattr(inode, &fa);
3149 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3154 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3156 struct inode *inode = file_inode(filp);
3157 struct fsxattr fa, old_fa;
3161 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3164 /* Make sure caller has proper permission */
3165 if (!inode_owner_or_capable(&init_user_ns, inode))
3168 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3171 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3172 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3175 err = mnt_want_write_file(filp);
3181 f2fs_fill_fsxattr(inode, &old_fa);
3182 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3186 err = f2fs_setflags_common(inode, iflags,
3187 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3191 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3193 inode_unlock(inode);
3194 mnt_drop_write_file(filp);
3198 int f2fs_pin_file_control(struct inode *inode, bool inc)
3200 struct f2fs_inode_info *fi = F2FS_I(inode);
3201 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3203 /* Use i_gc_failures for normal file as a risk signal. */
3205 f2fs_i_gc_failures_write(inode,
3206 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3208 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3209 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3210 __func__, inode->i_ino,
3211 fi->i_gc_failures[GC_FAILURE_PIN]);
3212 clear_inode_flag(inode, FI_PIN_FILE);
3218 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3220 struct inode *inode = file_inode(filp);
3224 if (get_user(pin, (__u32 __user *)arg))
3227 if (!S_ISREG(inode->i_mode))
3230 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3233 ret = mnt_want_write_file(filp);
3239 if (f2fs_should_update_outplace(inode, NULL)) {
3245 clear_inode_flag(inode, FI_PIN_FILE);
3246 f2fs_i_gc_failures_write(inode, 0);
3250 if (f2fs_pin_file_control(inode, false)) {
3255 ret = f2fs_convert_inline_inode(inode);
3259 if (!f2fs_disable_compressed_file(inode)) {
3264 set_inode_flag(inode, FI_PIN_FILE);
3265 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3267 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3269 inode_unlock(inode);
3270 mnt_drop_write_file(filp);
3274 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3276 struct inode *inode = file_inode(filp);
3279 if (is_inode_flag_set(inode, FI_PIN_FILE))
3280 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3281 return put_user(pin, (u32 __user *)arg);
3284 int f2fs_precache_extents(struct inode *inode)
3286 struct f2fs_inode_info *fi = F2FS_I(inode);
3287 struct f2fs_map_blocks map;
3288 pgoff_t m_next_extent;
3292 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3296 map.m_next_pgofs = NULL;
3297 map.m_next_extent = &m_next_extent;
3298 map.m_seg_type = NO_CHECK_TYPE;
3299 map.m_may_create = false;
3300 end = F2FS_I_SB(inode)->max_file_blocks;
3302 while (map.m_lblk < end) {
3303 map.m_len = end - map.m_lblk;
3305 down_write(&fi->i_gc_rwsem[WRITE]);
3306 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3307 up_write(&fi->i_gc_rwsem[WRITE]);
3311 map.m_lblk = m_next_extent;
3317 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3319 return f2fs_precache_extents(file_inode(filp));
3322 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3324 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3327 if (!capable(CAP_SYS_ADMIN))
3330 if (f2fs_readonly(sbi->sb))
3333 if (copy_from_user(&block_count, (void __user *)arg,
3334 sizeof(block_count)))
3337 return f2fs_resize_fs(sbi, block_count);
3340 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3342 struct inode *inode = file_inode(filp);
3344 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3346 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3347 f2fs_warn(F2FS_I_SB(inode),
3348 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3353 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3356 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3358 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3361 return fsverity_ioctl_measure(filp, (void __user *)arg);
3364 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3366 struct inode *inode = file_inode(filp);
3367 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3372 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3376 down_read(&sbi->sb_lock);
3377 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3378 ARRAY_SIZE(sbi->raw_super->volume_name),
3379 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3380 up_read(&sbi->sb_lock);
3382 if (copy_to_user((char __user *)arg, vbuf,
3383 min(FSLABEL_MAX, count)))
3390 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3392 struct inode *inode = file_inode(filp);
3393 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3397 if (!capable(CAP_SYS_ADMIN))
3400 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3402 return PTR_ERR(vbuf);
3404 err = mnt_want_write_file(filp);
3408 down_write(&sbi->sb_lock);
3410 memset(sbi->raw_super->volume_name, 0,
3411 sizeof(sbi->raw_super->volume_name));
3412 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3413 sbi->raw_super->volume_name,
3414 ARRAY_SIZE(sbi->raw_super->volume_name));
3416 err = f2fs_commit_super(sbi, false);
3418 up_write(&sbi->sb_lock);
3420 mnt_drop_write_file(filp);
3426 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3428 struct inode *inode = file_inode(filp);
3431 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3434 if (!f2fs_compressed_file(inode))
3437 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3438 return put_user(blocks, (u64 __user *)arg);
3441 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3443 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3444 unsigned int released_blocks = 0;
3445 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3449 for (i = 0; i < count; i++) {
3450 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3451 dn->ofs_in_node + i);
3453 if (!__is_valid_data_blkaddr(blkaddr))
3455 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3456 DATA_GENERIC_ENHANCE)))
3457 return -EFSCORRUPTED;
3461 int compr_blocks = 0;
3463 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3464 blkaddr = f2fs_data_blkaddr(dn);
3467 if (blkaddr == COMPRESS_ADDR)
3469 dn->ofs_in_node += cluster_size;
3473 if (__is_valid_data_blkaddr(blkaddr))
3476 if (blkaddr != NEW_ADDR)
3479 dn->data_blkaddr = NULL_ADDR;
3480 f2fs_set_data_blkaddr(dn);
3483 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3484 dec_valid_block_count(sbi, dn->inode,
3485 cluster_size - compr_blocks);
3487 released_blocks += cluster_size - compr_blocks;
3489 count -= cluster_size;
3492 return released_blocks;
3495 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3497 struct inode *inode = file_inode(filp);
3498 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3499 pgoff_t page_idx = 0, last_idx;
3500 unsigned int released_blocks = 0;
3504 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3507 if (!f2fs_compressed_file(inode))
3510 if (f2fs_readonly(sbi->sb))
3513 ret = mnt_want_write_file(filp);
3517 f2fs_balance_fs(F2FS_I_SB(inode), true);
3521 writecount = atomic_read(&inode->i_writecount);
3522 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3523 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3528 if (IS_IMMUTABLE(inode)) {
3533 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3537 F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3538 f2fs_set_inode_flags(inode);
3539 inode->i_ctime = current_time(inode);
3540 f2fs_mark_inode_dirty_sync(inode, true);
3542 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3545 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3546 down_write(&F2FS_I(inode)->i_mmap_sem);
3548 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3550 while (page_idx < last_idx) {
3551 struct dnode_of_data dn;
3552 pgoff_t end_offset, count;
3554 set_new_dnode(&dn, inode, NULL, NULL, 0);
3555 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3557 if (ret == -ENOENT) {
3558 page_idx = f2fs_get_next_page_offset(&dn,
3566 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3567 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3568 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3570 ret = release_compress_blocks(&dn, count);
3572 f2fs_put_dnode(&dn);
3578 released_blocks += ret;
3581 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3582 up_write(&F2FS_I(inode)->i_mmap_sem);
3584 inode_unlock(inode);
3586 mnt_drop_write_file(filp);
3589 ret = put_user(released_blocks, (u64 __user *)arg);
3590 } else if (released_blocks &&
3591 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3592 set_sbi_flag(sbi, SBI_NEED_FSCK);
3593 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3594 "iblocks=%llu, released=%u, compr_blocks=%u, "
3596 __func__, inode->i_ino, inode->i_blocks,
3598 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3604 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3606 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3607 unsigned int reserved_blocks = 0;
3608 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3612 for (i = 0; i < count; i++) {
3613 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3614 dn->ofs_in_node + i);
3616 if (!__is_valid_data_blkaddr(blkaddr))
3618 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3619 DATA_GENERIC_ENHANCE)))
3620 return -EFSCORRUPTED;
3624 int compr_blocks = 0;
3628 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3629 blkaddr = f2fs_data_blkaddr(dn);
3632 if (blkaddr == COMPRESS_ADDR)
3634 dn->ofs_in_node += cluster_size;
3638 if (__is_valid_data_blkaddr(blkaddr)) {
3643 dn->data_blkaddr = NEW_ADDR;
3644 f2fs_set_data_blkaddr(dn);
3647 reserved = cluster_size - compr_blocks;
3648 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3652 if (reserved != cluster_size - compr_blocks)
3655 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3657 reserved_blocks += reserved;
3659 count -= cluster_size;
3662 return reserved_blocks;
3665 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3667 struct inode *inode = file_inode(filp);
3668 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3669 pgoff_t page_idx = 0, last_idx;
3670 unsigned int reserved_blocks = 0;
3673 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3676 if (!f2fs_compressed_file(inode))
3679 if (f2fs_readonly(sbi->sb))
3682 ret = mnt_want_write_file(filp);
3686 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3689 f2fs_balance_fs(F2FS_I_SB(inode), true);
3693 if (!IS_IMMUTABLE(inode)) {
3698 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3699 down_write(&F2FS_I(inode)->i_mmap_sem);
3701 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3703 while (page_idx < last_idx) {
3704 struct dnode_of_data dn;
3705 pgoff_t end_offset, count;
3707 set_new_dnode(&dn, inode, NULL, NULL, 0);
3708 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3710 if (ret == -ENOENT) {
3711 page_idx = f2fs_get_next_page_offset(&dn,
3719 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3720 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3721 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3723 ret = reserve_compress_blocks(&dn, count);
3725 f2fs_put_dnode(&dn);
3731 reserved_blocks += ret;
3734 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3735 up_write(&F2FS_I(inode)->i_mmap_sem);
3738 F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3739 f2fs_set_inode_flags(inode);
3740 inode->i_ctime = current_time(inode);
3741 f2fs_mark_inode_dirty_sync(inode, true);
3744 inode_unlock(inode);
3746 mnt_drop_write_file(filp);
3749 ret = put_user(reserved_blocks, (u64 __user *)arg);
3750 } else if (reserved_blocks &&
3751 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3752 set_sbi_flag(sbi, SBI_NEED_FSCK);
3753 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3754 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3756 __func__, inode->i_ino, inode->i_blocks,
3758 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3764 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3765 pgoff_t off, block_t block, block_t len, u32 flags)
3767 struct request_queue *q = bdev_get_queue(bdev);
3768 sector_t sector = SECTOR_FROM_BLOCK(block);
3769 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3775 if (flags & F2FS_TRIM_FILE_DISCARD)
3776 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3777 blk_queue_secure_erase(q) ?
3778 BLKDEV_DISCARD_SECURE : 0);
3780 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3781 if (IS_ENCRYPTED(inode))
3782 ret = fscrypt_zeroout_range(inode, off, block, len);
3784 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3791 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3793 struct inode *inode = file_inode(filp);
3794 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3795 struct address_space *mapping = inode->i_mapping;
3796 struct block_device *prev_bdev = NULL;
3797 struct f2fs_sectrim_range range;
3798 pgoff_t index, pg_end, prev_index = 0;
3799 block_t prev_block = 0, len = 0;
3801 bool to_end = false;
3804 if (!(filp->f_mode & FMODE_WRITE))
3807 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3811 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3812 !S_ISREG(inode->i_mode))
3815 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3816 !f2fs_hw_support_discard(sbi)) ||
3817 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3818 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3821 file_start_write(filp);
3824 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3825 range.start >= inode->i_size) {
3833 if (inode->i_size - range.start > range.len) {
3834 end_addr = range.start + range.len;
3836 end_addr = range.len == (u64)-1 ?
3837 sbi->sb->s_maxbytes : inode->i_size;
3841 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3842 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3847 index = F2FS_BYTES_TO_BLK(range.start);
3848 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3850 ret = f2fs_convert_inline_inode(inode);
3854 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3855 down_write(&F2FS_I(inode)->i_mmap_sem);
3857 ret = filemap_write_and_wait_range(mapping, range.start,
3858 to_end ? LLONG_MAX : end_addr - 1);
3862 truncate_inode_pages_range(mapping, range.start,
3863 to_end ? -1 : end_addr - 1);
3865 while (index < pg_end) {
3866 struct dnode_of_data dn;
3867 pgoff_t end_offset, count;
3870 set_new_dnode(&dn, inode, NULL, NULL, 0);
3871 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3873 if (ret == -ENOENT) {
3874 index = f2fs_get_next_page_offset(&dn, index);
3880 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3881 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3882 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3883 struct block_device *cur_bdev;
3884 block_t blkaddr = f2fs_data_blkaddr(&dn);
3886 if (!__is_valid_data_blkaddr(blkaddr))
3889 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3890 DATA_GENERIC_ENHANCE)) {
3891 ret = -EFSCORRUPTED;
3892 f2fs_put_dnode(&dn);
3896 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3897 if (f2fs_is_multi_device(sbi)) {
3898 int di = f2fs_target_device_index(sbi, blkaddr);
3900 blkaddr -= FDEV(di).start_blk;
3904 if (prev_bdev == cur_bdev &&
3905 index == prev_index + len &&
3906 blkaddr == prev_block + len) {
3909 ret = f2fs_secure_erase(prev_bdev,
3910 inode, prev_index, prev_block,
3913 f2fs_put_dnode(&dn);
3922 prev_bdev = cur_bdev;
3924 prev_block = blkaddr;
3929 f2fs_put_dnode(&dn);
3931 if (fatal_signal_pending(current)) {
3939 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3940 prev_block, len, range.flags);
3942 up_write(&F2FS_I(inode)->i_mmap_sem);
3943 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3945 inode_unlock(inode);
3946 file_end_write(filp);
3951 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3953 struct inode *inode = file_inode(filp);
3954 struct f2fs_comp_option option;
3956 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3959 inode_lock_shared(inode);
3961 if (!f2fs_compressed_file(inode)) {
3962 inode_unlock_shared(inode);
3966 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3967 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3969 inode_unlock_shared(inode);
3971 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3978 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3980 struct inode *inode = file_inode(filp);
3981 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3982 struct f2fs_comp_option option;
3985 if (!f2fs_sb_has_compression(sbi))
3988 if (!(filp->f_mode & FMODE_WRITE))
3991 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3995 if (!f2fs_compressed_file(inode) ||
3996 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3997 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3998 option.algorithm >= COMPRESS_MAX)
4001 file_start_write(filp);
4004 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4009 if (inode->i_size != 0) {
4014 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4015 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4016 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
4017 f2fs_mark_inode_dirty_sync(inode, true);
4019 if (!f2fs_is_compress_backend_ready(inode))
4020 f2fs_warn(sbi, "compression algorithm is successfully set, "
4021 "but current kernel doesn't support this algorithm.");
4023 inode_unlock(inode);
4024 file_end_write(filp);
4029 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4031 DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
4032 struct address_space *mapping = inode->i_mapping;
4034 pgoff_t redirty_idx = page_idx;
4035 int i, page_len = 0, ret = 0;
4037 page_cache_ra_unbounded(&ractl, len, 0);
4039 for (i = 0; i < len; i++, page_idx++) {
4040 page = read_cache_page(mapping, page_idx, NULL, NULL);
4042 ret = PTR_ERR(page);
4048 for (i = 0; i < page_len; i++, redirty_idx++) {
4049 page = find_lock_page(mapping, redirty_idx);
4052 set_page_dirty(page);
4053 f2fs_put_page(page, 1);
4054 f2fs_put_page(page, 0);
4060 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4062 struct inode *inode = file_inode(filp);
4063 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4064 struct f2fs_inode_info *fi = F2FS_I(inode);
4065 pgoff_t page_idx = 0, last_idx;
4066 unsigned int blk_per_seg = sbi->blocks_per_seg;
4067 int cluster_size = F2FS_I(inode)->i_cluster_size;
4070 if (!f2fs_sb_has_compression(sbi) ||
4071 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4074 if (!(filp->f_mode & FMODE_WRITE))
4077 if (!f2fs_compressed_file(inode))
4080 f2fs_balance_fs(F2FS_I_SB(inode), true);
4082 file_start_write(filp);
4085 if (!f2fs_is_compress_backend_ready(inode)) {
4090 if (f2fs_is_mmap_file(inode)) {
4095 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4099 if (!atomic_read(&fi->i_compr_blocks))
4102 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4104 count = last_idx - page_idx;
4106 int len = min(cluster_size, count);
4108 ret = redirty_blocks(inode, page_idx, len);
4112 if (get_dirty_pages(inode) >= blk_per_seg)
4113 filemap_fdatawrite(inode->i_mapping);
4120 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4124 f2fs_warn(sbi, "%s: The file might be partially decompressed "
4125 "(errno=%d). Please delete the file.\n",
4128 inode_unlock(inode);
4129 file_end_write(filp);
4134 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4136 struct inode *inode = file_inode(filp);
4137 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4138 pgoff_t page_idx = 0, last_idx;
4139 unsigned int blk_per_seg = sbi->blocks_per_seg;
4140 int cluster_size = F2FS_I(inode)->i_cluster_size;
4143 if (!f2fs_sb_has_compression(sbi) ||
4144 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4147 if (!(filp->f_mode & FMODE_WRITE))
4150 if (!f2fs_compressed_file(inode))
4153 f2fs_balance_fs(F2FS_I_SB(inode), true);
4155 file_start_write(filp);
4158 if (!f2fs_is_compress_backend_ready(inode)) {
4163 if (f2fs_is_mmap_file(inode)) {
4168 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4172 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4174 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4176 count = last_idx - page_idx;
4178 int len = min(cluster_size, count);
4180 ret = redirty_blocks(inode, page_idx, len);
4184 if (get_dirty_pages(inode) >= blk_per_seg)
4185 filemap_fdatawrite(inode->i_mapping);
4192 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4195 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4198 f2fs_warn(sbi, "%s: The file might be partially compressed "
4199 "(errno=%d). Please delete the file.\n",
4202 inode_unlock(inode);
4203 file_end_write(filp);
4208 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4211 case FS_IOC_GETFLAGS:
4212 return f2fs_ioc_getflags(filp, arg);
4213 case FS_IOC_SETFLAGS:
4214 return f2fs_ioc_setflags(filp, arg);
4215 case FS_IOC_GETVERSION:
4216 return f2fs_ioc_getversion(filp, arg);
4217 case F2FS_IOC_START_ATOMIC_WRITE:
4218 return f2fs_ioc_start_atomic_write(filp);
4219 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4220 return f2fs_ioc_commit_atomic_write(filp);
4221 case F2FS_IOC_START_VOLATILE_WRITE:
4222 return f2fs_ioc_start_volatile_write(filp);
4223 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4224 return f2fs_ioc_release_volatile_write(filp);
4225 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4226 return f2fs_ioc_abort_volatile_write(filp);
4227 case F2FS_IOC_SHUTDOWN:
4228 return f2fs_ioc_shutdown(filp, arg);
4230 return f2fs_ioc_fitrim(filp, arg);
4231 case FS_IOC_SET_ENCRYPTION_POLICY:
4232 return f2fs_ioc_set_encryption_policy(filp, arg);
4233 case FS_IOC_GET_ENCRYPTION_POLICY:
4234 return f2fs_ioc_get_encryption_policy(filp, arg);
4235 case FS_IOC_GET_ENCRYPTION_PWSALT:
4236 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4237 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4238 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4239 case FS_IOC_ADD_ENCRYPTION_KEY:
4240 return f2fs_ioc_add_encryption_key(filp, arg);
4241 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4242 return f2fs_ioc_remove_encryption_key(filp, arg);
4243 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4244 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4245 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4246 return f2fs_ioc_get_encryption_key_status(filp, arg);
4247 case FS_IOC_GET_ENCRYPTION_NONCE:
4248 return f2fs_ioc_get_encryption_nonce(filp, arg);
4249 case F2FS_IOC_GARBAGE_COLLECT:
4250 return f2fs_ioc_gc(filp, arg);
4251 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4252 return f2fs_ioc_gc_range(filp, arg);
4253 case F2FS_IOC_WRITE_CHECKPOINT:
4254 return f2fs_ioc_write_checkpoint(filp, arg);
4255 case F2FS_IOC_DEFRAGMENT:
4256 return f2fs_ioc_defragment(filp, arg);
4257 case F2FS_IOC_MOVE_RANGE:
4258 return f2fs_ioc_move_range(filp, arg);
4259 case F2FS_IOC_FLUSH_DEVICE:
4260 return f2fs_ioc_flush_device(filp, arg);
4261 case F2FS_IOC_GET_FEATURES:
4262 return f2fs_ioc_get_features(filp, arg);
4263 case FS_IOC_FSGETXATTR:
4264 return f2fs_ioc_fsgetxattr(filp, arg);
4265 case FS_IOC_FSSETXATTR:
4266 return f2fs_ioc_fssetxattr(filp, arg);
4267 case F2FS_IOC_GET_PIN_FILE:
4268 return f2fs_ioc_get_pin_file(filp, arg);
4269 case F2FS_IOC_SET_PIN_FILE:
4270 return f2fs_ioc_set_pin_file(filp, arg);
4271 case F2FS_IOC_PRECACHE_EXTENTS:
4272 return f2fs_ioc_precache_extents(filp, arg);
4273 case F2FS_IOC_RESIZE_FS:
4274 return f2fs_ioc_resize_fs(filp, arg);
4275 case FS_IOC_ENABLE_VERITY:
4276 return f2fs_ioc_enable_verity(filp, arg);
4277 case FS_IOC_MEASURE_VERITY:
4278 return f2fs_ioc_measure_verity(filp, arg);
4279 case FS_IOC_GETFSLABEL:
4280 return f2fs_ioc_getfslabel(filp, arg);
4281 case FS_IOC_SETFSLABEL:
4282 return f2fs_ioc_setfslabel(filp, arg);
4283 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4284 return f2fs_get_compress_blocks(filp, arg);
4285 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4286 return f2fs_release_compress_blocks(filp, arg);
4287 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4288 return f2fs_reserve_compress_blocks(filp, arg);
4289 case F2FS_IOC_SEC_TRIM_FILE:
4290 return f2fs_sec_trim_file(filp, arg);
4291 case F2FS_IOC_GET_COMPRESS_OPTION:
4292 return f2fs_ioc_get_compress_option(filp, arg);
4293 case F2FS_IOC_SET_COMPRESS_OPTION:
4294 return f2fs_ioc_set_compress_option(filp, arg);
4295 case F2FS_IOC_DECOMPRESS_FILE:
4296 return f2fs_ioc_decompress_file(filp, arg);
4297 case F2FS_IOC_COMPRESS_FILE:
4298 return f2fs_ioc_compress_file(filp, arg);
4304 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4306 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4308 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4311 return __f2fs_ioctl(filp, cmd, arg);
4314 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4316 struct file *file = iocb->ki_filp;
4317 struct inode *inode = file_inode(file);
4320 if (!f2fs_is_compress_backend_ready(inode))
4323 ret = generic_file_read_iter(iocb, iter);
4326 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4331 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4333 struct file *file = iocb->ki_filp;
4334 struct inode *inode = file_inode(file);
4337 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4342 if (!f2fs_is_compress_backend_ready(inode)) {
4347 if (iocb->ki_flags & IOCB_NOWAIT) {
4348 if (!inode_trylock(inode)) {
4356 ret = generic_write_checks(iocb, from);
4358 bool preallocated = false;
4359 size_t target_size = 0;
4362 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4363 set_inode_flag(inode, FI_NO_PREALLOC);
4365 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4366 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4367 iov_iter_count(from)) ||
4368 f2fs_has_inline_data(inode) ||
4369 f2fs_force_buffered_io(inode, iocb, from)) {
4370 clear_inode_flag(inode, FI_NO_PREALLOC);
4371 inode_unlock(inode);
4378 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4381 if (iocb->ki_flags & IOCB_DIRECT) {
4383 * Convert inline data for Direct I/O before entering
4386 err = f2fs_convert_inline_inode(inode);
4390 * If force_buffere_io() is true, we have to allocate
4391 * blocks all the time, since f2fs_direct_IO will fall
4392 * back to buffered IO.
4394 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4395 allow_outplace_dio(inode, iocb, from))
4398 preallocated = true;
4399 target_size = iocb->ki_pos + iov_iter_count(from);
4401 err = f2fs_preallocate_blocks(iocb, from);
4404 clear_inode_flag(inode, FI_NO_PREALLOC);
4405 inode_unlock(inode);
4410 ret = __generic_file_write_iter(iocb, from);
4411 clear_inode_flag(inode, FI_NO_PREALLOC);
4413 /* if we couldn't write data, we should deallocate blocks. */
4414 if (preallocated && i_size_read(inode) < target_size)
4415 f2fs_truncate(inode);
4418 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4420 inode_unlock(inode);
4422 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4423 iov_iter_count(from), ret);
4425 ret = generic_write_sync(iocb, ret);
4429 #ifdef CONFIG_COMPAT
4430 struct compat_f2fs_gc_range {
4435 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4436 struct compat_f2fs_gc_range)
4438 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4440 struct compat_f2fs_gc_range __user *urange;
4441 struct f2fs_gc_range range;
4444 urange = compat_ptr(arg);
4445 err = get_user(range.sync, &urange->sync);
4446 err |= get_user(range.start, &urange->start);
4447 err |= get_user(range.len, &urange->len);
4451 return __f2fs_ioc_gc_range(file, &range);
4454 struct compat_f2fs_move_range {
4460 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4461 struct compat_f2fs_move_range)
4463 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4465 struct compat_f2fs_move_range __user *urange;
4466 struct f2fs_move_range range;
4469 urange = compat_ptr(arg);
4470 err = get_user(range.dst_fd, &urange->dst_fd);
4471 err |= get_user(range.pos_in, &urange->pos_in);
4472 err |= get_user(range.pos_out, &urange->pos_out);
4473 err |= get_user(range.len, &urange->len);
4477 return __f2fs_ioc_move_range(file, &range);
4480 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4482 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4484 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4488 case FS_IOC32_GETFLAGS:
4489 cmd = FS_IOC_GETFLAGS;
4491 case FS_IOC32_SETFLAGS:
4492 cmd = FS_IOC_SETFLAGS;
4494 case FS_IOC32_GETVERSION:
4495 cmd = FS_IOC_GETVERSION;
4497 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4498 return f2fs_compat_ioc_gc_range(file, arg);
4499 case F2FS_IOC32_MOVE_RANGE:
4500 return f2fs_compat_ioc_move_range(file, arg);
4501 case F2FS_IOC_START_ATOMIC_WRITE:
4502 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4503 case F2FS_IOC_START_VOLATILE_WRITE:
4504 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4505 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4506 case F2FS_IOC_SHUTDOWN:
4508 case FS_IOC_SET_ENCRYPTION_POLICY:
4509 case FS_IOC_GET_ENCRYPTION_PWSALT:
4510 case FS_IOC_GET_ENCRYPTION_POLICY:
4511 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4512 case FS_IOC_ADD_ENCRYPTION_KEY:
4513 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4514 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4515 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4516 case FS_IOC_GET_ENCRYPTION_NONCE:
4517 case F2FS_IOC_GARBAGE_COLLECT:
4518 case F2FS_IOC_WRITE_CHECKPOINT:
4519 case F2FS_IOC_DEFRAGMENT:
4520 case F2FS_IOC_FLUSH_DEVICE:
4521 case F2FS_IOC_GET_FEATURES:
4522 case FS_IOC_FSGETXATTR:
4523 case FS_IOC_FSSETXATTR:
4524 case F2FS_IOC_GET_PIN_FILE:
4525 case F2FS_IOC_SET_PIN_FILE:
4526 case F2FS_IOC_PRECACHE_EXTENTS:
4527 case F2FS_IOC_RESIZE_FS:
4528 case FS_IOC_ENABLE_VERITY:
4529 case FS_IOC_MEASURE_VERITY:
4530 case FS_IOC_GETFSLABEL:
4531 case FS_IOC_SETFSLABEL:
4532 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4533 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4534 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4535 case F2FS_IOC_SEC_TRIM_FILE:
4536 case F2FS_IOC_GET_COMPRESS_OPTION:
4537 case F2FS_IOC_SET_COMPRESS_OPTION:
4538 case F2FS_IOC_DECOMPRESS_FILE:
4539 case F2FS_IOC_COMPRESS_FILE:
4542 return -ENOIOCTLCMD;
4544 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4548 const struct file_operations f2fs_file_operations = {
4549 .llseek = f2fs_llseek,
4550 .read_iter = f2fs_file_read_iter,
4551 .write_iter = f2fs_file_write_iter,
4552 .open = f2fs_file_open,
4553 .release = f2fs_release_file,
4554 .mmap = f2fs_file_mmap,
4555 .flush = f2fs_file_flush,
4556 .fsync = f2fs_sync_file,
4557 .fallocate = f2fs_fallocate,
4558 .unlocked_ioctl = f2fs_ioctl,
4559 #ifdef CONFIG_COMPAT
4560 .compat_ioctl = f2fs_compat_ioctl,
4562 .splice_read = generic_file_splice_read,
4563 .splice_write = iter_file_splice_write,