1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 #include <linux/fileattr.h>
26 #include <linux/fadvise.h>
27 #include <linux/iomap.h>
36 #include <trace/events/f2fs.h>
37 #include <uapi/linux/f2fs.h>
39 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
41 struct inode *inode = file_inode(vmf->vma->vm_file);
44 ret = filemap_fault(vmf);
46 f2fs_update_iostat(F2FS_I_SB(inode), inode,
47 APP_MAPPED_READ_IO, F2FS_BLKSIZE);
49 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
54 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
56 struct page *page = vmf->page;
57 struct inode *inode = file_inode(vmf->vma->vm_file);
58 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59 struct dnode_of_data dn;
60 bool need_alloc = true;
63 if (unlikely(IS_IMMUTABLE(inode)))
64 return VM_FAULT_SIGBUS;
66 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
67 return VM_FAULT_SIGBUS;
69 if (unlikely(f2fs_cp_error(sbi))) {
74 if (!f2fs_is_checkpoint_ready(sbi)) {
79 err = f2fs_convert_inline_inode(inode);
83 #ifdef CONFIG_F2FS_FS_COMPRESSION
84 if (f2fs_compressed_file(inode)) {
85 int ret = f2fs_is_compressed_cluster(inode, page->index);
95 /* should do out of any locked page */
97 f2fs_balance_fs(sbi, true);
99 sb_start_pagefault(inode->i_sb);
101 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
103 file_update_time(vmf->vma->vm_file);
104 filemap_invalidate_lock_shared(inode->i_mapping);
106 if (unlikely(page->mapping != inode->i_mapping ||
107 page_offset(page) > i_size_read(inode) ||
108 !PageUptodate(page))) {
115 /* block allocation */
116 set_new_dnode(&dn, inode, NULL, NULL, 0);
117 err = f2fs_get_block_locked(&dn, page->index);
120 #ifdef CONFIG_F2FS_FS_COMPRESSION
122 set_new_dnode(&dn, inode, NULL, NULL, 0);
123 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
132 f2fs_wait_on_page_writeback(page, DATA, false, true);
134 /* wait for GCed page writeback via META_MAPPING */
135 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
138 * check to see if the page is mapped already (no holes)
140 if (PageMappedToDisk(page))
143 /* page is wholly or partially inside EOF */
144 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
145 i_size_read(inode)) {
148 offset = i_size_read(inode) & ~PAGE_MASK;
149 zero_user_segment(page, offset, PAGE_SIZE);
151 set_page_dirty(page);
152 if (!PageUptodate(page))
153 SetPageUptodate(page);
155 f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
156 f2fs_update_time(sbi, REQ_TIME);
158 trace_f2fs_vm_page_mkwrite(page, DATA);
160 filemap_invalidate_unlock_shared(inode->i_mapping);
162 sb_end_pagefault(inode->i_sb);
164 return block_page_mkwrite_return(err);
167 static const struct vm_operations_struct f2fs_file_vm_ops = {
168 .fault = f2fs_filemap_fault,
169 .map_pages = filemap_map_pages,
170 .page_mkwrite = f2fs_vm_page_mkwrite,
173 static int get_parent_ino(struct inode *inode, nid_t *pino)
175 struct dentry *dentry;
178 * Make sure to get the non-deleted alias. The alias associated with
179 * the open file descriptor being fsync()'ed may be deleted already.
181 dentry = d_find_alias(inode);
185 *pino = parent_ino(dentry);
190 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
192 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
193 enum cp_reason_type cp_reason = CP_NO_NEEDED;
195 if (!S_ISREG(inode->i_mode))
196 cp_reason = CP_NON_REGULAR;
197 else if (f2fs_compressed_file(inode))
198 cp_reason = CP_COMPRESSED;
199 else if (inode->i_nlink != 1)
200 cp_reason = CP_HARDLINK;
201 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
202 cp_reason = CP_SB_NEED_CP;
203 else if (file_wrong_pino(inode))
204 cp_reason = CP_WRONG_PINO;
205 else if (!f2fs_space_for_roll_forward(sbi))
206 cp_reason = CP_NO_SPC_ROLL;
207 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
208 cp_reason = CP_NODE_NEED_CP;
209 else if (test_opt(sbi, FASTBOOT))
210 cp_reason = CP_FASTBOOT_MODE;
211 else if (F2FS_OPTION(sbi).active_logs == 2)
212 cp_reason = CP_SPEC_LOG_NUM;
213 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
214 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
215 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
217 cp_reason = CP_RECOVER_DIR;
222 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
224 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
226 /* But we need to avoid that there are some inode updates */
227 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
233 static void try_to_fix_pino(struct inode *inode)
235 struct f2fs_inode_info *fi = F2FS_I(inode);
238 f2fs_down_write(&fi->i_sem);
239 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
240 get_parent_ino(inode, &pino)) {
241 f2fs_i_pino_write(inode, pino);
242 file_got_pino(inode);
244 f2fs_up_write(&fi->i_sem);
247 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
248 int datasync, bool atomic)
250 struct inode *inode = file->f_mapping->host;
251 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
252 nid_t ino = inode->i_ino;
254 enum cp_reason_type cp_reason = 0;
255 struct writeback_control wbc = {
256 .sync_mode = WB_SYNC_ALL,
257 .nr_to_write = LONG_MAX,
260 unsigned int seq_id = 0;
262 if (unlikely(f2fs_readonly(inode->i_sb)))
265 trace_f2fs_sync_file_enter(inode);
267 if (S_ISDIR(inode->i_mode))
270 /* if fdatasync is triggered, let's do in-place-update */
271 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
272 set_inode_flag(inode, FI_NEED_IPU);
273 ret = file_write_and_wait_range(file, start, end);
274 clear_inode_flag(inode, FI_NEED_IPU);
276 if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
277 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
281 /* if the inode is dirty, let's recover all the time */
282 if (!f2fs_skip_inode_update(inode, datasync)) {
283 f2fs_write_inode(inode, NULL);
288 * if there is no written data, don't waste time to write recovery info.
290 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
291 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
293 /* it may call write_inode just prior to fsync */
294 if (need_inode_page_update(sbi, ino))
297 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
298 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
303 * for OPU case, during fsync(), node can be persisted before
304 * data when lower device doesn't support write barrier, result
305 * in data corruption after SPO.
306 * So for strict fsync mode, force to use atomic write semantics
307 * to keep write order in between data/node and last node to
308 * avoid potential data corruption.
310 if (F2FS_OPTION(sbi).fsync_mode ==
311 FSYNC_MODE_STRICT && !atomic)
316 * Both of fdatasync() and fsync() are able to be recovered from
319 f2fs_down_read(&F2FS_I(inode)->i_sem);
320 cp_reason = need_do_checkpoint(inode);
321 f2fs_up_read(&F2FS_I(inode)->i_sem);
324 /* all the dirty node pages should be flushed for POR */
325 ret = f2fs_sync_fs(inode->i_sb, 1);
328 * We've secured consistency through sync_fs. Following pino
329 * will be used only for fsynced inodes after checkpoint.
331 try_to_fix_pino(inode);
332 clear_inode_flag(inode, FI_APPEND_WRITE);
333 clear_inode_flag(inode, FI_UPDATE_WRITE);
337 atomic_inc(&sbi->wb_sync_req[NODE]);
338 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
339 atomic_dec(&sbi->wb_sync_req[NODE]);
343 /* if cp_error was enabled, we should avoid infinite loop */
344 if (unlikely(f2fs_cp_error(sbi))) {
349 if (f2fs_need_inode_block_update(sbi, ino)) {
350 f2fs_mark_inode_dirty_sync(inode, true);
351 f2fs_write_inode(inode, NULL);
356 * If it's atomic_write, it's just fine to keep write ordering. So
357 * here we don't need to wait for node write completion, since we use
358 * node chain which serializes node blocks. If one of node writes are
359 * reordered, we can see simply broken chain, resulting in stopping
360 * roll-forward recovery. It means we'll recover all or none node blocks
364 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
369 /* once recovery info is written, don't need to tack this */
370 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
371 clear_inode_flag(inode, FI_APPEND_WRITE);
373 if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
374 (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
375 ret = f2fs_issue_flush(sbi, inode->i_ino);
377 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
378 clear_inode_flag(inode, FI_UPDATE_WRITE);
379 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
381 f2fs_update_time(sbi, REQ_TIME);
383 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
387 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
389 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
391 return f2fs_do_sync_file(file, start, end, datasync, false);
394 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
395 pgoff_t index, int whence)
399 if (__is_valid_data_blkaddr(blkaddr))
401 if (blkaddr == NEW_ADDR &&
402 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
406 if (blkaddr == NULL_ADDR)
413 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
415 struct inode *inode = file->f_mapping->host;
416 loff_t maxbytes = inode->i_sb->s_maxbytes;
417 struct dnode_of_data dn;
418 pgoff_t pgofs, end_offset;
419 loff_t data_ofs = offset;
425 isize = i_size_read(inode);
429 /* handle inline data case */
430 if (f2fs_has_inline_data(inode)) {
431 if (whence == SEEK_HOLE) {
434 } else if (whence == SEEK_DATA) {
440 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
442 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
443 set_new_dnode(&dn, inode, NULL, NULL, 0);
444 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
445 if (err && err != -ENOENT) {
447 } else if (err == -ENOENT) {
448 /* direct node does not exists */
449 if (whence == SEEK_DATA) {
450 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
457 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
459 /* find data/hole in dnode block */
460 for (; dn.ofs_in_node < end_offset;
461 dn.ofs_in_node++, pgofs++,
462 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
465 blkaddr = f2fs_data_blkaddr(&dn);
467 if (__is_valid_data_blkaddr(blkaddr) &&
468 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
469 blkaddr, DATA_GENERIC_ENHANCE)) {
474 if (__found_offset(file->f_mapping, blkaddr,
483 if (whence == SEEK_DATA)
486 if (whence == SEEK_HOLE && data_ofs > isize)
489 return vfs_setpos(file, data_ofs, maxbytes);
495 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
497 struct inode *inode = file->f_mapping->host;
498 loff_t maxbytes = inode->i_sb->s_maxbytes;
500 if (f2fs_compressed_file(inode))
501 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
507 return generic_file_llseek_size(file, offset, whence,
508 maxbytes, i_size_read(inode));
513 return f2fs_seek_block(file, offset, whence);
519 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
521 struct inode *inode = file_inode(file);
523 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
526 if (!f2fs_is_compress_backend_ready(inode))
530 vma->vm_ops = &f2fs_file_vm_ops;
531 set_inode_flag(inode, FI_MMAP_FILE);
535 static int f2fs_file_open(struct inode *inode, struct file *filp)
537 int err = fscrypt_file_open(inode, filp);
542 if (!f2fs_is_compress_backend_ready(inode))
545 err = fsverity_file_open(inode, filp);
549 filp->f_mode |= FMODE_NOWAIT;
551 return dquot_file_open(inode, filp);
554 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
556 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
557 struct f2fs_node *raw_node;
558 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
561 bool compressed_cluster = false;
562 int cluster_index = 0, valid_blocks = 0;
563 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
564 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
566 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
567 base = get_extra_isize(dn->inode);
569 raw_node = F2FS_NODE(dn->node_page);
570 addr = blkaddr_in_node(raw_node) + base + ofs;
572 /* Assumption: truncation starts with cluster */
573 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
574 block_t blkaddr = le32_to_cpu(*addr);
576 if (f2fs_compressed_file(dn->inode) &&
577 !(cluster_index & (cluster_size - 1))) {
578 if (compressed_cluster)
579 f2fs_i_compr_blocks_update(dn->inode,
580 valid_blocks, false);
581 compressed_cluster = (blkaddr == COMPRESS_ADDR);
585 if (blkaddr == NULL_ADDR)
588 dn->data_blkaddr = NULL_ADDR;
589 f2fs_set_data_blkaddr(dn);
591 if (__is_valid_data_blkaddr(blkaddr)) {
592 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
593 DATA_GENERIC_ENHANCE))
595 if (compressed_cluster)
599 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
600 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
602 f2fs_invalidate_blocks(sbi, blkaddr);
604 if (!released || blkaddr != COMPRESS_ADDR)
608 if (compressed_cluster)
609 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
614 * once we invalidate valid blkaddr in range [ofs, ofs + count],
615 * we will invalidate all blkaddr in the whole range.
617 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
619 f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
620 f2fs_update_age_extent_cache_range(dn, fofs, len);
621 dec_valid_block_count(sbi, dn->inode, nr_free);
623 dn->ofs_in_node = ofs;
625 f2fs_update_time(sbi, REQ_TIME);
626 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
627 dn->ofs_in_node, nr_free);
630 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
632 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
635 static int truncate_partial_data_page(struct inode *inode, u64 from,
638 loff_t offset = from & (PAGE_SIZE - 1);
639 pgoff_t index = from >> PAGE_SHIFT;
640 struct address_space *mapping = inode->i_mapping;
643 if (!offset && !cache_only)
647 page = find_lock_page(mapping, index);
648 if (page && PageUptodate(page))
650 f2fs_put_page(page, 1);
654 page = f2fs_get_lock_data_page(inode, index, true);
656 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
658 f2fs_wait_on_page_writeback(page, DATA, true, true);
659 zero_user(page, offset, PAGE_SIZE - offset);
661 /* An encrypted inode should have a key and truncate the last page. */
662 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
664 set_page_dirty(page);
665 f2fs_put_page(page, 1);
669 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
671 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
672 struct dnode_of_data dn;
674 int count = 0, err = 0;
676 bool truncate_page = false;
678 trace_f2fs_truncate_blocks_enter(inode, from);
680 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
682 if (free_from >= max_file_blocks(inode))
688 ipage = f2fs_get_node_page(sbi, inode->i_ino);
690 err = PTR_ERR(ipage);
694 if (f2fs_has_inline_data(inode)) {
695 f2fs_truncate_inline_inode(inode, ipage, from);
696 f2fs_put_page(ipage, 1);
697 truncate_page = true;
701 set_new_dnode(&dn, inode, ipage, NULL, 0);
702 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
709 count = ADDRS_PER_PAGE(dn.node_page, inode);
711 count -= dn.ofs_in_node;
712 f2fs_bug_on(sbi, count < 0);
714 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
715 f2fs_truncate_data_blocks_range(&dn, count);
721 err = f2fs_truncate_inode_blocks(inode, free_from);
726 /* lastly zero out the first data page */
728 err = truncate_partial_data_page(inode, from, truncate_page);
730 trace_f2fs_truncate_blocks_exit(inode, err);
734 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
736 u64 free_from = from;
739 #ifdef CONFIG_F2FS_FS_COMPRESSION
741 * for compressed file, only support cluster size
742 * aligned truncation.
744 if (f2fs_compressed_file(inode))
745 free_from = round_up(from,
746 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
749 err = f2fs_do_truncate_blocks(inode, free_from, lock);
753 #ifdef CONFIG_F2FS_FS_COMPRESSION
755 * For compressed file, after release compress blocks, don't allow write
756 * direct, but we should allow write direct after truncate to zero.
758 if (f2fs_compressed_file(inode) && !free_from
759 && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
760 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
762 if (from != free_from) {
763 err = f2fs_truncate_partial_cluster(inode, from, lock);
772 int f2fs_truncate(struct inode *inode)
776 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
779 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
780 S_ISLNK(inode->i_mode)))
783 trace_f2fs_truncate(inode);
785 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE))
788 err = f2fs_dquot_initialize(inode);
792 /* we should check inline_data size */
793 if (!f2fs_may_inline_data(inode)) {
794 err = f2fs_convert_inline_inode(inode);
799 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
803 inode->i_mtime = inode->i_ctime = current_time(inode);
804 f2fs_mark_inode_dirty_sync(inode, false);
808 static bool f2fs_force_buffered_io(struct inode *inode, int rw)
810 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
812 if (!fscrypt_dio_supported(inode))
814 if (fsverity_active(inode))
816 if (f2fs_compressed_file(inode))
819 /* disallow direct IO if any of devices has unaligned blksize */
820 if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
823 * for blkzoned device, fallback direct IO to buffered IO, so
824 * all IOs can be serialized by log-structured write.
826 if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE))
828 if (f2fs_lfs_mode(sbi) && rw == WRITE && F2FS_IO_ALIGNED(sbi))
830 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
836 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
837 struct kstat *stat, u32 request_mask, unsigned int query_flags)
839 struct inode *inode = d_inode(path->dentry);
840 struct f2fs_inode_info *fi = F2FS_I(inode);
841 struct f2fs_inode *ri = NULL;
844 if (f2fs_has_extra_attr(inode) &&
845 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
846 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
847 stat->result_mask |= STATX_BTIME;
848 stat->btime.tv_sec = fi->i_crtime.tv_sec;
849 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
853 * Return the DIO alignment restrictions if requested. We only return
854 * this information when requested, since on encrypted files it might
855 * take a fair bit of work to get if the file wasn't opened recently.
857 * f2fs sometimes supports DIO reads but not DIO writes. STATX_DIOALIGN
858 * cannot represent that, so in that case we report no DIO support.
860 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
861 unsigned int bsize = i_blocksize(inode);
863 stat->result_mask |= STATX_DIOALIGN;
864 if (!f2fs_force_buffered_io(inode, WRITE)) {
865 stat->dio_mem_align = bsize;
866 stat->dio_offset_align = bsize;
871 if (flags & F2FS_COMPR_FL)
872 stat->attributes |= STATX_ATTR_COMPRESSED;
873 if (flags & F2FS_APPEND_FL)
874 stat->attributes |= STATX_ATTR_APPEND;
875 if (IS_ENCRYPTED(inode))
876 stat->attributes |= STATX_ATTR_ENCRYPTED;
877 if (flags & F2FS_IMMUTABLE_FL)
878 stat->attributes |= STATX_ATTR_IMMUTABLE;
879 if (flags & F2FS_NODUMP_FL)
880 stat->attributes |= STATX_ATTR_NODUMP;
881 if (IS_VERITY(inode))
882 stat->attributes |= STATX_ATTR_VERITY;
884 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
886 STATX_ATTR_ENCRYPTED |
887 STATX_ATTR_IMMUTABLE |
891 generic_fillattr(idmap, inode, stat);
893 /* we need to show initial sectors used for inline_data/dentries */
894 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
895 f2fs_has_inline_dentry(inode))
896 stat->blocks += (stat->size + 511) >> 9;
901 #ifdef CONFIG_F2FS_FS_POSIX_ACL
902 static void __setattr_copy(struct mnt_idmap *idmap,
903 struct inode *inode, const struct iattr *attr)
905 unsigned int ia_valid = attr->ia_valid;
907 i_uid_update(idmap, attr, inode);
908 i_gid_update(idmap, attr, inode);
909 if (ia_valid & ATTR_ATIME)
910 inode->i_atime = attr->ia_atime;
911 if (ia_valid & ATTR_MTIME)
912 inode->i_mtime = attr->ia_mtime;
913 if (ia_valid & ATTR_CTIME)
914 inode->i_ctime = attr->ia_ctime;
915 if (ia_valid & ATTR_MODE) {
916 umode_t mode = attr->ia_mode;
917 vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
919 if (!vfsgid_in_group_p(vfsgid) &&
920 !capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
922 set_acl_inode(inode, mode);
926 #define __setattr_copy setattr_copy
929 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
932 struct inode *inode = d_inode(dentry);
935 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
938 if (unlikely(IS_IMMUTABLE(inode)))
941 if (unlikely(IS_APPEND(inode) &&
942 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
943 ATTR_GID | ATTR_TIMES_SET))))
946 if ((attr->ia_valid & ATTR_SIZE) &&
947 !f2fs_is_compress_backend_ready(inode))
950 err = setattr_prepare(idmap, dentry, attr);
954 err = fscrypt_prepare_setattr(dentry, attr);
958 err = fsverity_prepare_setattr(dentry, attr);
962 if (is_quota_modification(idmap, inode, attr)) {
963 err = f2fs_dquot_initialize(inode);
967 if (i_uid_needs_update(idmap, attr, inode) ||
968 i_gid_needs_update(idmap, attr, inode)) {
969 f2fs_lock_op(F2FS_I_SB(inode));
970 err = dquot_transfer(idmap, inode, attr);
972 set_sbi_flag(F2FS_I_SB(inode),
973 SBI_QUOTA_NEED_REPAIR);
974 f2fs_unlock_op(F2FS_I_SB(inode));
978 * update uid/gid under lock_op(), so that dquot and inode can
979 * be updated atomically.
981 i_uid_update(idmap, attr, inode);
982 i_gid_update(idmap, attr, inode);
983 f2fs_mark_inode_dirty_sync(inode, true);
984 f2fs_unlock_op(F2FS_I_SB(inode));
987 if (attr->ia_valid & ATTR_SIZE) {
988 loff_t old_size = i_size_read(inode);
990 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
992 * should convert inline inode before i_size_write to
993 * keep smaller than inline_data size with inline flag.
995 err = f2fs_convert_inline_inode(inode);
1000 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1001 filemap_invalidate_lock(inode->i_mapping);
1003 truncate_setsize(inode, attr->ia_size);
1005 if (attr->ia_size <= old_size)
1006 err = f2fs_truncate(inode);
1008 * do not trim all blocks after i_size if target size is
1009 * larger than i_size.
1011 filemap_invalidate_unlock(inode->i_mapping);
1012 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1016 spin_lock(&F2FS_I(inode)->i_size_lock);
1017 inode->i_mtime = inode->i_ctime = current_time(inode);
1018 F2FS_I(inode)->last_disk_size = i_size_read(inode);
1019 spin_unlock(&F2FS_I(inode)->i_size_lock);
1022 __setattr_copy(idmap, inode, attr);
1024 if (attr->ia_valid & ATTR_MODE) {
1025 err = posix_acl_chmod(idmap, dentry, f2fs_get_inode_mode(inode));
1027 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
1029 inode->i_mode = F2FS_I(inode)->i_acl_mode;
1030 clear_inode_flag(inode, FI_ACL_MODE);
1034 /* file size may changed here */
1035 f2fs_mark_inode_dirty_sync(inode, true);
1037 /* inode change will produce dirty node pages flushed by checkpoint */
1038 f2fs_balance_fs(F2FS_I_SB(inode), true);
1043 const struct inode_operations f2fs_file_inode_operations = {
1044 .getattr = f2fs_getattr,
1045 .setattr = f2fs_setattr,
1046 .get_inode_acl = f2fs_get_acl,
1047 .set_acl = f2fs_set_acl,
1048 .listxattr = f2fs_listxattr,
1049 .fiemap = f2fs_fiemap,
1050 .fileattr_get = f2fs_fileattr_get,
1051 .fileattr_set = f2fs_fileattr_set,
1054 static int fill_zero(struct inode *inode, pgoff_t index,
1055 loff_t start, loff_t len)
1057 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1063 f2fs_balance_fs(sbi, true);
1066 page = f2fs_get_new_data_page(inode, NULL, index, false);
1067 f2fs_unlock_op(sbi);
1070 return PTR_ERR(page);
1072 f2fs_wait_on_page_writeback(page, DATA, true, true);
1073 zero_user(page, start, len);
1074 set_page_dirty(page);
1075 f2fs_put_page(page, 1);
1079 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1083 while (pg_start < pg_end) {
1084 struct dnode_of_data dn;
1085 pgoff_t end_offset, count;
1087 set_new_dnode(&dn, inode, NULL, NULL, 0);
1088 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1090 if (err == -ENOENT) {
1091 pg_start = f2fs_get_next_page_offset(&dn,
1098 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1099 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1101 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1103 f2fs_truncate_data_blocks_range(&dn, count);
1104 f2fs_put_dnode(&dn);
1111 static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1113 pgoff_t pg_start, pg_end;
1114 loff_t off_start, off_end;
1117 ret = f2fs_convert_inline_inode(inode);
1121 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1122 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1124 off_start = offset & (PAGE_SIZE - 1);
1125 off_end = (offset + len) & (PAGE_SIZE - 1);
1127 if (pg_start == pg_end) {
1128 ret = fill_zero(inode, pg_start, off_start,
1129 off_end - off_start);
1134 ret = fill_zero(inode, pg_start++, off_start,
1135 PAGE_SIZE - off_start);
1140 ret = fill_zero(inode, pg_end, 0, off_end);
1145 if (pg_start < pg_end) {
1146 loff_t blk_start, blk_end;
1147 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1149 f2fs_balance_fs(sbi, true);
1151 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1152 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1154 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1155 filemap_invalidate_lock(inode->i_mapping);
1157 truncate_pagecache_range(inode, blk_start, blk_end - 1);
1160 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1161 f2fs_unlock_op(sbi);
1163 filemap_invalidate_unlock(inode->i_mapping);
1164 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1171 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1172 int *do_replace, pgoff_t off, pgoff_t len)
1174 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1175 struct dnode_of_data dn;
1179 set_new_dnode(&dn, inode, NULL, NULL, 0);
1180 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1181 if (ret && ret != -ENOENT) {
1183 } else if (ret == -ENOENT) {
1184 if (dn.max_level == 0)
1186 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1187 dn.ofs_in_node, len);
1193 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1194 dn.ofs_in_node, len);
1195 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1196 *blkaddr = f2fs_data_blkaddr(&dn);
1198 if (__is_valid_data_blkaddr(*blkaddr) &&
1199 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1200 DATA_GENERIC_ENHANCE)) {
1201 f2fs_put_dnode(&dn);
1202 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1203 return -EFSCORRUPTED;
1206 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1208 if (f2fs_lfs_mode(sbi)) {
1209 f2fs_put_dnode(&dn);
1213 /* do not invalidate this block address */
1214 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1218 f2fs_put_dnode(&dn);
1227 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1228 int *do_replace, pgoff_t off, int len)
1230 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1231 struct dnode_of_data dn;
1234 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1235 if (*do_replace == 0)
1238 set_new_dnode(&dn, inode, NULL, NULL, 0);
1239 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1241 dec_valid_block_count(sbi, inode, 1);
1242 f2fs_invalidate_blocks(sbi, *blkaddr);
1244 f2fs_update_data_blkaddr(&dn, *blkaddr);
1246 f2fs_put_dnode(&dn);
1251 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1252 block_t *blkaddr, int *do_replace,
1253 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1255 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1260 if (blkaddr[i] == NULL_ADDR && !full) {
1265 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1266 struct dnode_of_data dn;
1267 struct node_info ni;
1271 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1272 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1276 ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
1278 f2fs_put_dnode(&dn);
1282 ilen = min((pgoff_t)
1283 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1284 dn.ofs_in_node, len - i);
1286 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1287 f2fs_truncate_data_blocks_range(&dn, 1);
1289 if (do_replace[i]) {
1290 f2fs_i_blocks_write(src_inode,
1292 f2fs_i_blocks_write(dst_inode,
1294 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1295 blkaddr[i], ni.version, true, false);
1301 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1302 if (dst_inode->i_size < new_size)
1303 f2fs_i_size_write(dst_inode, new_size);
1304 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1306 f2fs_put_dnode(&dn);
1308 struct page *psrc, *pdst;
1310 psrc = f2fs_get_lock_data_page(src_inode,
1313 return PTR_ERR(psrc);
1314 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1317 f2fs_put_page(psrc, 1);
1318 return PTR_ERR(pdst);
1320 memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
1321 set_page_dirty(pdst);
1322 f2fs_put_page(pdst, 1);
1323 f2fs_put_page(psrc, 1);
1325 ret = f2fs_truncate_hole(src_inode,
1326 src + i, src + i + 1);
1335 static int __exchange_data_block(struct inode *src_inode,
1336 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1337 pgoff_t len, bool full)
1339 block_t *src_blkaddr;
1345 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1347 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1348 array_size(olen, sizeof(block_t)),
1353 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1354 array_size(olen, sizeof(int)),
1357 kvfree(src_blkaddr);
1361 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1362 do_replace, src, olen);
1366 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1367 do_replace, src, dst, olen, full);
1375 kvfree(src_blkaddr);
1381 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1382 kvfree(src_blkaddr);
1387 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1389 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1390 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1391 pgoff_t start = offset >> PAGE_SHIFT;
1392 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1395 f2fs_balance_fs(sbi, true);
1397 /* avoid gc operation during block exchange */
1398 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1399 filemap_invalidate_lock(inode->i_mapping);
1402 f2fs_drop_extent_tree(inode);
1403 truncate_pagecache(inode, offset);
1404 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1405 f2fs_unlock_op(sbi);
1407 filemap_invalidate_unlock(inode->i_mapping);
1408 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1412 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1417 if (offset + len >= i_size_read(inode))
1420 /* collapse range should be aligned to block size of f2fs. */
1421 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1424 ret = f2fs_convert_inline_inode(inode);
1428 /* write out all dirty pages from offset */
1429 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1433 ret = f2fs_do_collapse(inode, offset, len);
1437 /* write out all moved pages, if possible */
1438 filemap_invalidate_lock(inode->i_mapping);
1439 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1440 truncate_pagecache(inode, offset);
1442 new_size = i_size_read(inode) - len;
1443 ret = f2fs_truncate_blocks(inode, new_size, true);
1444 filemap_invalidate_unlock(inode->i_mapping);
1446 f2fs_i_size_write(inode, new_size);
1450 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1453 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1454 pgoff_t index = start;
1455 unsigned int ofs_in_node = dn->ofs_in_node;
1459 for (; index < end; index++, dn->ofs_in_node++) {
1460 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1464 dn->ofs_in_node = ofs_in_node;
1465 ret = f2fs_reserve_new_blocks(dn, count);
1469 dn->ofs_in_node = ofs_in_node;
1470 for (index = start; index < end; index++, dn->ofs_in_node++) {
1471 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1473 * f2fs_reserve_new_blocks will not guarantee entire block
1476 if (dn->data_blkaddr == NULL_ADDR) {
1481 if (dn->data_blkaddr == NEW_ADDR)
1484 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1485 DATA_GENERIC_ENHANCE)) {
1486 ret = -EFSCORRUPTED;
1487 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1491 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1492 dn->data_blkaddr = NEW_ADDR;
1493 f2fs_set_data_blkaddr(dn);
1496 f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
1497 f2fs_update_age_extent_cache_range(dn, start, index - start);
1502 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1505 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1506 struct address_space *mapping = inode->i_mapping;
1507 pgoff_t index, pg_start, pg_end;
1508 loff_t new_size = i_size_read(inode);
1509 loff_t off_start, off_end;
1512 ret = inode_newsize_ok(inode, (len + offset));
1516 ret = f2fs_convert_inline_inode(inode);
1520 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1524 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1525 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1527 off_start = offset & (PAGE_SIZE - 1);
1528 off_end = (offset + len) & (PAGE_SIZE - 1);
1530 if (pg_start == pg_end) {
1531 ret = fill_zero(inode, pg_start, off_start,
1532 off_end - off_start);
1536 new_size = max_t(loff_t, new_size, offset + len);
1539 ret = fill_zero(inode, pg_start++, off_start,
1540 PAGE_SIZE - off_start);
1544 new_size = max_t(loff_t, new_size,
1545 (loff_t)pg_start << PAGE_SHIFT);
1548 for (index = pg_start; index < pg_end;) {
1549 struct dnode_of_data dn;
1550 unsigned int end_offset;
1553 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1554 filemap_invalidate_lock(mapping);
1556 truncate_pagecache_range(inode,
1557 (loff_t)index << PAGE_SHIFT,
1558 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1562 set_new_dnode(&dn, inode, NULL, NULL, 0);
1563 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1565 f2fs_unlock_op(sbi);
1566 filemap_invalidate_unlock(mapping);
1567 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1571 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1572 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1574 ret = f2fs_do_zero_range(&dn, index, end);
1575 f2fs_put_dnode(&dn);
1577 f2fs_unlock_op(sbi);
1578 filemap_invalidate_unlock(mapping);
1579 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1581 f2fs_balance_fs(sbi, dn.node_changed);
1587 new_size = max_t(loff_t, new_size,
1588 (loff_t)index << PAGE_SHIFT);
1592 ret = fill_zero(inode, pg_end, 0, off_end);
1596 new_size = max_t(loff_t, new_size, offset + len);
1601 if (new_size > i_size_read(inode)) {
1602 if (mode & FALLOC_FL_KEEP_SIZE)
1603 file_set_keep_isize(inode);
1605 f2fs_i_size_write(inode, new_size);
1610 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1612 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1613 struct address_space *mapping = inode->i_mapping;
1614 pgoff_t nr, pg_start, pg_end, delta, idx;
1618 new_size = i_size_read(inode) + len;
1619 ret = inode_newsize_ok(inode, new_size);
1623 if (offset >= i_size_read(inode))
1626 /* insert range should be aligned to block size of f2fs. */
1627 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1630 ret = f2fs_convert_inline_inode(inode);
1634 f2fs_balance_fs(sbi, true);
1636 filemap_invalidate_lock(mapping);
1637 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1638 filemap_invalidate_unlock(mapping);
1642 /* write out all dirty pages from offset */
1643 ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1647 pg_start = offset >> PAGE_SHIFT;
1648 pg_end = (offset + len) >> PAGE_SHIFT;
1649 delta = pg_end - pg_start;
1650 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1652 /* avoid gc operation during block exchange */
1653 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1654 filemap_invalidate_lock(mapping);
1655 truncate_pagecache(inode, offset);
1657 while (!ret && idx > pg_start) {
1658 nr = idx - pg_start;
1664 f2fs_drop_extent_tree(inode);
1666 ret = __exchange_data_block(inode, inode, idx,
1667 idx + delta, nr, false);
1668 f2fs_unlock_op(sbi);
1670 filemap_invalidate_unlock(mapping);
1671 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1673 /* write out all moved pages, if possible */
1674 filemap_invalidate_lock(mapping);
1675 filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1676 truncate_pagecache(inode, offset);
1677 filemap_invalidate_unlock(mapping);
1680 f2fs_i_size_write(inode, new_size);
1684 static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
1685 loff_t len, int mode)
1687 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1688 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1689 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1690 .m_may_create = true };
1691 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
1692 .init_gc_type = FG_GC,
1693 .should_migrate_blocks = false,
1694 .err_gc_skipped = true,
1695 .nr_free_secs = 0 };
1696 pgoff_t pg_start, pg_end;
1699 block_t expanded = 0;
1702 err = inode_newsize_ok(inode, (len + offset));
1706 err = f2fs_convert_inline_inode(inode);
1710 f2fs_balance_fs(sbi, true);
1712 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1713 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1714 off_end = (offset + len) & (PAGE_SIZE - 1);
1716 map.m_lblk = pg_start;
1717 map.m_len = pg_end - pg_start;
1724 if (f2fs_is_pinned_file(inode)) {
1725 block_t sec_blks = CAP_BLKS_PER_SEC(sbi);
1726 block_t sec_len = roundup(map.m_len, sec_blks);
1728 map.m_len = sec_blks;
1730 if (has_not_enough_free_secs(sbi, 0,
1731 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1732 f2fs_down_write(&sbi->gc_lock);
1733 err = f2fs_gc(sbi, &gc_control);
1734 if (err && err != -ENODATA)
1738 f2fs_down_write(&sbi->pin_sem);
1741 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1742 f2fs_unlock_op(sbi);
1744 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1745 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO);
1746 file_dont_truncate(inode);
1748 f2fs_up_write(&sbi->pin_sem);
1750 expanded += map.m_len;
1751 sec_len -= map.m_len;
1752 map.m_lblk += map.m_len;
1753 if (!err && sec_len)
1756 map.m_len = expanded;
1758 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_AIO);
1759 expanded = map.m_len;
1768 last_off = pg_start + expanded - 1;
1770 /* update new size to the failed position */
1771 new_size = (last_off == pg_end) ? offset + len :
1772 (loff_t)(last_off + 1) << PAGE_SHIFT;
1774 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1777 if (new_size > i_size_read(inode)) {
1778 if (mode & FALLOC_FL_KEEP_SIZE)
1779 file_set_keep_isize(inode);
1781 f2fs_i_size_write(inode, new_size);
1787 static long f2fs_fallocate(struct file *file, int mode,
1788 loff_t offset, loff_t len)
1790 struct inode *inode = file_inode(file);
1793 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1795 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1797 if (!f2fs_is_compress_backend_ready(inode))
1800 /* f2fs only support ->fallocate for regular file */
1801 if (!S_ISREG(inode->i_mode))
1804 if (IS_ENCRYPTED(inode) &&
1805 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1809 * Pinned file should not support partial truncation since the block
1810 * can be used by applications.
1812 if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
1813 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1814 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1817 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1818 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1819 FALLOC_FL_INSERT_RANGE))
1824 ret = file_modified(file);
1828 if (mode & FALLOC_FL_PUNCH_HOLE) {
1829 if (offset >= inode->i_size)
1832 ret = f2fs_punch_hole(inode, offset, len);
1833 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1834 ret = f2fs_collapse_range(inode, offset, len);
1835 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1836 ret = f2fs_zero_range(inode, offset, len, mode);
1837 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1838 ret = f2fs_insert_range(inode, offset, len);
1840 ret = f2fs_expand_inode_data(inode, offset, len, mode);
1844 inode->i_mtime = inode->i_ctime = current_time(inode);
1845 f2fs_mark_inode_dirty_sync(inode, false);
1846 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1850 inode_unlock(inode);
1852 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1856 static int f2fs_release_file(struct inode *inode, struct file *filp)
1859 * f2fs_release_file is called at every close calls. So we should
1860 * not drop any inmemory pages by close called by other process.
1862 if (!(filp->f_mode & FMODE_WRITE) ||
1863 atomic_read(&inode->i_writecount) != 1)
1867 f2fs_abort_atomic_write(inode, true);
1868 inode_unlock(inode);
1873 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1875 struct inode *inode = file_inode(file);
1878 * If the process doing a transaction is crashed, we should do
1879 * roll-back. Otherwise, other reader/write can see corrupted database
1880 * until all the writers close its file. Since this should be done
1881 * before dropping file lock, it needs to do in ->flush.
1883 if (F2FS_I(inode)->atomic_write_task == current &&
1884 (current->flags & PF_EXITING)) {
1886 f2fs_abort_atomic_write(inode, true);
1887 inode_unlock(inode);
1893 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1895 struct f2fs_inode_info *fi = F2FS_I(inode);
1896 u32 masked_flags = fi->i_flags & mask;
1898 /* mask can be shrunk by flags_valid selector */
1901 /* Is it quota file? Do not allow user to mess with it */
1902 if (IS_NOQUOTA(inode))
1905 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1906 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1908 if (!f2fs_empty_dir(inode))
1912 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1913 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1915 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1919 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1920 if (masked_flags & F2FS_COMPR_FL) {
1921 if (!f2fs_disable_compressed_file(inode))
1924 /* try to convert inline_data to support compression */
1925 int err = f2fs_convert_inline_inode(inode);
1928 if (!f2fs_may_compress(inode))
1930 if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
1932 if (set_compress_context(inode))
1937 fi->i_flags = iflags | (fi->i_flags & ~mask);
1938 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1939 (fi->i_flags & F2FS_NOCOMP_FL));
1941 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1942 set_inode_flag(inode, FI_PROJ_INHERIT);
1944 clear_inode_flag(inode, FI_PROJ_INHERIT);
1946 inode->i_ctime = current_time(inode);
1947 f2fs_set_inode_flags(inode);
1948 f2fs_mark_inode_dirty_sync(inode, true);
1952 /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
1955 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1956 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1957 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1958 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1960 * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1961 * FS_IOC_FSSETXATTR is done by the VFS.
1964 static const struct {
1967 } f2fs_fsflags_map[] = {
1968 { F2FS_COMPR_FL, FS_COMPR_FL },
1969 { F2FS_SYNC_FL, FS_SYNC_FL },
1970 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1971 { F2FS_APPEND_FL, FS_APPEND_FL },
1972 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1973 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1974 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1975 { F2FS_INDEX_FL, FS_INDEX_FL },
1976 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1977 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1978 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1981 #define F2FS_GETTABLE_FS_FL ( \
1991 FS_PROJINHERIT_FL | \
1993 FS_INLINE_DATA_FL | \
1998 #define F2FS_SETTABLE_FS_FL ( \
2007 FS_PROJINHERIT_FL | \
2010 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
2011 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
2016 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2017 if (iflags & f2fs_fsflags_map[i].iflag)
2018 fsflags |= f2fs_fsflags_map[i].fsflag;
2023 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
2024 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
2029 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2030 if (fsflags & f2fs_fsflags_map[i].fsflag)
2031 iflags |= f2fs_fsflags_map[i].iflag;
2036 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2038 struct inode *inode = file_inode(filp);
2040 return put_user(inode->i_generation, (int __user *)arg);
2043 static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
2045 struct inode *inode = file_inode(filp);
2046 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2047 struct f2fs_inode_info *fi = F2FS_I(inode);
2048 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2049 struct inode *pinode;
2053 if (!inode_owner_or_capable(idmap, inode))
2056 if (!S_ISREG(inode->i_mode))
2059 if (filp->f_flags & O_DIRECT)
2062 ret = mnt_want_write_file(filp);
2068 if (!f2fs_disable_compressed_file(inode)) {
2073 if (f2fs_is_atomic_file(inode))
2076 ret = f2fs_convert_inline_inode(inode);
2080 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
2083 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2084 * f2fs_is_atomic_file.
2086 if (get_dirty_pages(inode))
2087 f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2088 inode->i_ino, get_dirty_pages(inode));
2089 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2091 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2095 /* Check if the inode already has a COW inode */
2096 if (fi->cow_inode == NULL) {
2097 /* Create a COW inode for atomic write */
2098 pinode = f2fs_iget(inode->i_sb, fi->i_pino);
2099 if (IS_ERR(pinode)) {
2100 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2101 ret = PTR_ERR(pinode);
2105 ret = f2fs_get_tmpfile(idmap, pinode, &fi->cow_inode);
2108 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2112 set_inode_flag(fi->cow_inode, FI_COW_FILE);
2113 clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
2115 /* Reuse the already created COW inode */
2116 ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
2118 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2123 f2fs_write_inode(inode, NULL);
2125 stat_inc_atomic_inode(inode);
2127 set_inode_flag(inode, FI_ATOMIC_FILE);
2129 isize = i_size_read(inode);
2130 fi->original_i_size = isize;
2132 set_inode_flag(inode, FI_ATOMIC_REPLACE);
2133 truncate_inode_pages_final(inode->i_mapping);
2134 f2fs_i_size_write(inode, 0);
2137 f2fs_i_size_write(fi->cow_inode, isize);
2139 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2141 f2fs_update_time(sbi, REQ_TIME);
2142 fi->atomic_write_task = current;
2143 stat_update_max_atomic_write(inode);
2144 fi->atomic_write_cnt = 0;
2146 inode_unlock(inode);
2147 mnt_drop_write_file(filp);
2151 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2153 struct inode *inode = file_inode(filp);
2154 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2157 if (!inode_owner_or_capable(idmap, inode))
2160 ret = mnt_want_write_file(filp);
2164 f2fs_balance_fs(F2FS_I_SB(inode), true);
2168 if (f2fs_is_atomic_file(inode)) {
2169 ret = f2fs_commit_atomic_write(inode);
2171 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2173 f2fs_abort_atomic_write(inode, ret);
2175 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2178 inode_unlock(inode);
2179 mnt_drop_write_file(filp);
2183 static int f2fs_ioc_abort_atomic_write(struct file *filp)
2185 struct inode *inode = file_inode(filp);
2186 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2189 if (!inode_owner_or_capable(idmap, inode))
2192 ret = mnt_want_write_file(filp);
2198 f2fs_abort_atomic_write(inode, true);
2200 inode_unlock(inode);
2202 mnt_drop_write_file(filp);
2203 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2207 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2209 struct inode *inode = file_inode(filp);
2210 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2211 struct super_block *sb = sbi->sb;
2215 if (!capable(CAP_SYS_ADMIN))
2218 if (get_user(in, (__u32 __user *)arg))
2221 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2222 ret = mnt_want_write_file(filp);
2224 if (ret == -EROFS) {
2226 f2fs_stop_checkpoint(sbi, false,
2227 STOP_CP_REASON_SHUTDOWN);
2228 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2229 trace_f2fs_shutdown(sbi, in, ret);
2236 case F2FS_GOING_DOWN_FULLSYNC:
2237 ret = freeze_bdev(sb->s_bdev);
2240 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2241 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2242 thaw_bdev(sb->s_bdev);
2244 case F2FS_GOING_DOWN_METASYNC:
2245 /* do checkpoint only */
2246 ret = f2fs_sync_fs(sb, 1);
2249 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2250 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2252 case F2FS_GOING_DOWN_NOSYNC:
2253 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2254 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2256 case F2FS_GOING_DOWN_METAFLUSH:
2257 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2258 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2259 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2261 case F2FS_GOING_DOWN_NEED_FSCK:
2262 set_sbi_flag(sbi, SBI_NEED_FSCK);
2263 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2264 set_sbi_flag(sbi, SBI_IS_DIRTY);
2265 /* do checkpoint only */
2266 ret = f2fs_sync_fs(sb, 1);
2273 f2fs_stop_gc_thread(sbi);
2274 f2fs_stop_discard_thread(sbi);
2276 f2fs_drop_discard_cmd(sbi);
2277 clear_opt(sbi, DISCARD);
2279 f2fs_update_time(sbi, REQ_TIME);
2281 if (in != F2FS_GOING_DOWN_FULLSYNC)
2282 mnt_drop_write_file(filp);
2284 trace_f2fs_shutdown(sbi, in, ret);
2289 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2291 struct inode *inode = file_inode(filp);
2292 struct super_block *sb = inode->i_sb;
2293 struct fstrim_range range;
2296 if (!capable(CAP_SYS_ADMIN))
2299 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2302 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2306 ret = mnt_want_write_file(filp);
2310 range.minlen = max((unsigned int)range.minlen,
2311 bdev_discard_granularity(sb->s_bdev));
2312 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2313 mnt_drop_write_file(filp);
2317 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2320 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2324 static bool uuid_is_nonzero(__u8 u[16])
2328 for (i = 0; i < 16; i++)
2334 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2336 struct inode *inode = file_inode(filp);
2338 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2341 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2343 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2346 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2348 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2350 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2353 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2355 struct inode *inode = file_inode(filp);
2356 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2357 u8 encrypt_pw_salt[16];
2360 if (!f2fs_sb_has_encrypt(sbi))
2363 err = mnt_want_write_file(filp);
2367 f2fs_down_write(&sbi->sb_lock);
2369 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2372 /* update superblock with uuid */
2373 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2375 err = f2fs_commit_super(sbi, false);
2378 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2382 memcpy(encrypt_pw_salt, sbi->raw_super->encrypt_pw_salt, 16);
2384 f2fs_up_write(&sbi->sb_lock);
2385 mnt_drop_write_file(filp);
2387 if (!err && copy_to_user((__u8 __user *)arg, encrypt_pw_salt, 16))
2393 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2396 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2399 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2402 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2404 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2407 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2410 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2412 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2415 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2418 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2421 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2424 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2427 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2430 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2433 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2436 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2438 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2441 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2444 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2446 struct inode *inode = file_inode(filp);
2447 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2448 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
2450 .should_migrate_blocks = false,
2451 .nr_free_secs = 0 };
2455 if (!capable(CAP_SYS_ADMIN))
2458 if (get_user(sync, (__u32 __user *)arg))
2461 if (f2fs_readonly(sbi->sb))
2464 ret = mnt_want_write_file(filp);
2469 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2474 f2fs_down_write(&sbi->gc_lock);
2477 gc_control.init_gc_type = sync ? FG_GC : BG_GC;
2478 gc_control.err_gc_skipped = sync;
2479 ret = f2fs_gc(sbi, &gc_control);
2481 mnt_drop_write_file(filp);
2485 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2487 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2488 struct f2fs_gc_control gc_control = {
2489 .init_gc_type = range->sync ? FG_GC : BG_GC,
2491 .should_migrate_blocks = false,
2492 .err_gc_skipped = range->sync,
2493 .nr_free_secs = 0 };
2497 if (!capable(CAP_SYS_ADMIN))
2499 if (f2fs_readonly(sbi->sb))
2502 end = range->start + range->len;
2503 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2504 end >= MAX_BLKADDR(sbi))
2507 ret = mnt_want_write_file(filp);
2513 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2518 f2fs_down_write(&sbi->gc_lock);
2521 gc_control.victim_segno = GET_SEGNO(sbi, range->start);
2522 ret = f2fs_gc(sbi, &gc_control);
2528 range->start += CAP_BLKS_PER_SEC(sbi);
2529 if (range->start <= end)
2532 mnt_drop_write_file(filp);
2536 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2538 struct f2fs_gc_range range;
2540 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2543 return __f2fs_ioc_gc_range(filp, &range);
2546 static int f2fs_ioc_write_checkpoint(struct file *filp)
2548 struct inode *inode = file_inode(filp);
2549 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2552 if (!capable(CAP_SYS_ADMIN))
2555 if (f2fs_readonly(sbi->sb))
2558 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2559 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2563 ret = mnt_want_write_file(filp);
2567 ret = f2fs_sync_fs(sbi->sb, 1);
2569 mnt_drop_write_file(filp);
2573 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2575 struct f2fs_defragment *range)
2577 struct inode *inode = file_inode(filp);
2578 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2579 .m_seg_type = NO_CHECK_TYPE,
2580 .m_may_create = false };
2581 struct extent_info ei = {};
2582 pgoff_t pg_start, pg_end, next_pgofs;
2583 unsigned int blk_per_seg = sbi->blocks_per_seg;
2584 unsigned int total = 0, sec_num;
2585 block_t blk_end = 0;
2586 bool fragmented = false;
2589 pg_start = range->start >> PAGE_SHIFT;
2590 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2592 f2fs_balance_fs(sbi, true);
2596 /* if in-place-update policy is enabled, don't waste time here */
2597 set_inode_flag(inode, FI_OPU_WRITE);
2598 if (f2fs_should_update_inplace(inode, NULL)) {
2603 /* writeback all dirty pages in the range */
2604 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2605 range->start + range->len - 1);
2610 * lookup mapping info in extent cache, skip defragmenting if physical
2611 * block addresses are continuous.
2613 if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
2614 if (ei.fofs + ei.len >= pg_end)
2618 map.m_lblk = pg_start;
2619 map.m_next_pgofs = &next_pgofs;
2622 * lookup mapping info in dnode page cache, skip defragmenting if all
2623 * physical block addresses are continuous even if there are hole(s)
2624 * in logical blocks.
2626 while (map.m_lblk < pg_end) {
2627 map.m_len = pg_end - map.m_lblk;
2628 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2632 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2633 map.m_lblk = next_pgofs;
2637 if (blk_end && blk_end != map.m_pblk)
2640 /* record total count of block that we're going to move */
2643 blk_end = map.m_pblk + map.m_len;
2645 map.m_lblk += map.m_len;
2653 sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi));
2656 * make sure there are enough free section for LFS allocation, this can
2657 * avoid defragment running in SSR mode when free section are allocated
2660 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2665 map.m_lblk = pg_start;
2666 map.m_len = pg_end - pg_start;
2669 while (map.m_lblk < pg_end) {
2674 map.m_len = pg_end - map.m_lblk;
2675 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2679 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2680 map.m_lblk = next_pgofs;
2684 set_inode_flag(inode, FI_SKIP_WRITES);
2687 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2690 page = f2fs_get_lock_data_page(inode, idx, true);
2692 err = PTR_ERR(page);
2696 set_page_dirty(page);
2697 set_page_private_gcing(page);
2698 f2fs_put_page(page, 1);
2707 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2710 clear_inode_flag(inode, FI_SKIP_WRITES);
2712 err = filemap_fdatawrite(inode->i_mapping);
2717 clear_inode_flag(inode, FI_SKIP_WRITES);
2719 clear_inode_flag(inode, FI_OPU_WRITE);
2720 inode_unlock(inode);
2722 range->len = (u64)total << PAGE_SHIFT;
2726 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2728 struct inode *inode = file_inode(filp);
2729 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2730 struct f2fs_defragment range;
2733 if (!capable(CAP_SYS_ADMIN))
2736 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2739 if (f2fs_readonly(sbi->sb))
2742 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2746 /* verify alignment of offset & size */
2747 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2750 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2751 max_file_blocks(inode)))
2754 err = mnt_want_write_file(filp);
2758 err = f2fs_defragment_range(sbi, filp, &range);
2759 mnt_drop_write_file(filp);
2761 f2fs_update_time(sbi, REQ_TIME);
2765 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2772 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2773 struct file *file_out, loff_t pos_out, size_t len)
2775 struct inode *src = file_inode(file_in);
2776 struct inode *dst = file_inode(file_out);
2777 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2778 size_t olen = len, dst_max_i_size = 0;
2782 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2783 src->i_sb != dst->i_sb)
2786 if (unlikely(f2fs_readonly(src->i_sb)))
2789 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2792 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2795 if (pos_out < 0 || pos_in < 0)
2799 if (pos_in == pos_out)
2801 if (pos_out > pos_in && pos_out < pos_in + len)
2808 if (!inode_trylock(dst))
2813 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2816 olen = len = src->i_size - pos_in;
2817 if (pos_in + len == src->i_size)
2818 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2824 dst_osize = dst->i_size;
2825 if (pos_out + olen > dst->i_size)
2826 dst_max_i_size = pos_out + olen;
2828 /* verify the end result is block aligned */
2829 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2830 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2831 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2834 ret = f2fs_convert_inline_inode(src);
2838 ret = f2fs_convert_inline_inode(dst);
2842 /* write out all dirty pages from offset */
2843 ret = filemap_write_and_wait_range(src->i_mapping,
2844 pos_in, pos_in + len);
2848 ret = filemap_write_and_wait_range(dst->i_mapping,
2849 pos_out, pos_out + len);
2853 f2fs_balance_fs(sbi, true);
2855 f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2858 if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2863 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2864 pos_out >> F2FS_BLKSIZE_BITS,
2865 len >> F2FS_BLKSIZE_BITS, false);
2869 f2fs_i_size_write(dst, dst_max_i_size);
2870 else if (dst_osize != dst->i_size)
2871 f2fs_i_size_write(dst, dst_osize);
2873 f2fs_unlock_op(sbi);
2876 f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2878 f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2887 static int __f2fs_ioc_move_range(struct file *filp,
2888 struct f2fs_move_range *range)
2893 if (!(filp->f_mode & FMODE_READ) ||
2894 !(filp->f_mode & FMODE_WRITE))
2897 dst = fdget(range->dst_fd);
2901 if (!(dst.file->f_mode & FMODE_WRITE)) {
2906 err = mnt_want_write_file(filp);
2910 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2911 range->pos_out, range->len);
2913 mnt_drop_write_file(filp);
2919 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2921 struct f2fs_move_range range;
2923 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2926 return __f2fs_ioc_move_range(filp, &range);
2929 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2931 struct inode *inode = file_inode(filp);
2932 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2933 struct sit_info *sm = SIT_I(sbi);
2934 unsigned int start_segno = 0, end_segno = 0;
2935 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2936 struct f2fs_flush_device range;
2937 struct f2fs_gc_control gc_control = {
2938 .init_gc_type = FG_GC,
2939 .should_migrate_blocks = true,
2940 .err_gc_skipped = true,
2941 .nr_free_secs = 0 };
2944 if (!capable(CAP_SYS_ADMIN))
2947 if (f2fs_readonly(sbi->sb))
2950 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2953 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2957 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2958 __is_large_section(sbi)) {
2959 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2960 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2964 ret = mnt_want_write_file(filp);
2968 if (range.dev_num != 0)
2969 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2970 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2972 start_segno = sm->last_victim[FLUSH_DEVICE];
2973 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2974 start_segno = dev_start_segno;
2975 end_segno = min(start_segno + range.segments, dev_end_segno);
2977 while (start_segno < end_segno) {
2978 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2982 sm->last_victim[GC_CB] = end_segno + 1;
2983 sm->last_victim[GC_GREEDY] = end_segno + 1;
2984 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2986 gc_control.victim_segno = start_segno;
2987 ret = f2fs_gc(sbi, &gc_control);
2995 mnt_drop_write_file(filp);
2999 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
3001 struct inode *inode = file_inode(filp);
3002 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3004 /* Must validate to set it with SQLite behavior in Android. */
3005 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3007 return put_user(sb_feature, (u32 __user *)arg);
3011 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3013 struct dquot *transfer_to[MAXQUOTAS] = {};
3014 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3015 struct super_block *sb = sbi->sb;
3018 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3019 if (IS_ERR(transfer_to[PRJQUOTA]))
3020 return PTR_ERR(transfer_to[PRJQUOTA]);
3022 err = __dquot_transfer(inode, transfer_to);
3024 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3025 dqput(transfer_to[PRJQUOTA]);
3029 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3031 struct f2fs_inode_info *fi = F2FS_I(inode);
3032 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3033 struct f2fs_inode *ri = NULL;
3037 if (!f2fs_sb_has_project_quota(sbi)) {
3038 if (projid != F2FS_DEF_PROJID)
3044 if (!f2fs_has_extra_attr(inode))
3047 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3049 if (projid_eq(kprojid, fi->i_projid))
3053 /* Is it quota file? Do not allow user to mess with it */
3054 if (IS_NOQUOTA(inode))
3057 if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
3060 err = f2fs_dquot_initialize(inode);
3065 err = f2fs_transfer_project_quota(inode, kprojid);
3069 fi->i_projid = kprojid;
3070 inode->i_ctime = current_time(inode);
3071 f2fs_mark_inode_dirty_sync(inode, true);
3073 f2fs_unlock_op(sbi);
3077 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3082 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3084 if (projid != F2FS_DEF_PROJID)
3090 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3092 struct inode *inode = d_inode(dentry);
3093 struct f2fs_inode_info *fi = F2FS_I(inode);
3094 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3096 if (IS_ENCRYPTED(inode))
3097 fsflags |= FS_ENCRYPT_FL;
3098 if (IS_VERITY(inode))
3099 fsflags |= FS_VERITY_FL;
3100 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3101 fsflags |= FS_INLINE_DATA_FL;
3102 if (is_inode_flag_set(inode, FI_PIN_FILE))
3103 fsflags |= FS_NOCOW_FL;
3105 fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3107 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3108 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3113 int f2fs_fileattr_set(struct mnt_idmap *idmap,
3114 struct dentry *dentry, struct fileattr *fa)
3116 struct inode *inode = d_inode(dentry);
3117 u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3121 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3123 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3125 if (fsflags & ~F2FS_GETTABLE_FS_FL)
3127 fsflags &= F2FS_SETTABLE_FS_FL;
3128 if (!fa->flags_valid)
3129 mask &= FS_COMMON_FL;
3131 iflags = f2fs_fsflags_to_iflags(fsflags);
3132 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3135 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3137 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3142 int f2fs_pin_file_control(struct inode *inode, bool inc)
3144 struct f2fs_inode_info *fi = F2FS_I(inode);
3145 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3147 /* Use i_gc_failures for normal file as a risk signal. */
3149 f2fs_i_gc_failures_write(inode,
3150 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3152 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3153 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3154 __func__, inode->i_ino,
3155 fi->i_gc_failures[GC_FAILURE_PIN]);
3156 clear_inode_flag(inode, FI_PIN_FILE);
3162 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3164 struct inode *inode = file_inode(filp);
3168 if (get_user(pin, (__u32 __user *)arg))
3171 if (!S_ISREG(inode->i_mode))
3174 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3177 ret = mnt_want_write_file(filp);
3184 clear_inode_flag(inode, FI_PIN_FILE);
3185 f2fs_i_gc_failures_write(inode, 0);
3189 if (f2fs_should_update_outplace(inode, NULL)) {
3194 if (f2fs_pin_file_control(inode, false)) {
3199 ret = f2fs_convert_inline_inode(inode);
3203 if (!f2fs_disable_compressed_file(inode)) {
3208 set_inode_flag(inode, FI_PIN_FILE);
3209 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3211 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3213 inode_unlock(inode);
3214 mnt_drop_write_file(filp);
3218 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3220 struct inode *inode = file_inode(filp);
3223 if (is_inode_flag_set(inode, FI_PIN_FILE))
3224 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3225 return put_user(pin, (u32 __user *)arg);
3228 int f2fs_precache_extents(struct inode *inode)
3230 struct f2fs_inode_info *fi = F2FS_I(inode);
3231 struct f2fs_map_blocks map;
3232 pgoff_t m_next_extent;
3236 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3240 map.m_next_pgofs = NULL;
3241 map.m_next_extent = &m_next_extent;
3242 map.m_seg_type = NO_CHECK_TYPE;
3243 map.m_may_create = false;
3244 end = max_file_blocks(inode);
3246 while (map.m_lblk < end) {
3247 map.m_len = end - map.m_lblk;
3249 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3250 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE);
3251 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3255 map.m_lblk = m_next_extent;
3261 static int f2fs_ioc_precache_extents(struct file *filp)
3263 return f2fs_precache_extents(file_inode(filp));
3266 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3268 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3271 if (!capable(CAP_SYS_ADMIN))
3274 if (f2fs_readonly(sbi->sb))
3277 if (copy_from_user(&block_count, (void __user *)arg,
3278 sizeof(block_count)))
3281 return f2fs_resize_fs(sbi, block_count);
3284 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3286 struct inode *inode = file_inode(filp);
3288 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3290 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3291 f2fs_warn(F2FS_I_SB(inode),
3292 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3297 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3300 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3302 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3305 return fsverity_ioctl_measure(filp, (void __user *)arg);
3308 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3310 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3313 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3316 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3318 struct inode *inode = file_inode(filp);
3319 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3324 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3328 f2fs_down_read(&sbi->sb_lock);
3329 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3330 ARRAY_SIZE(sbi->raw_super->volume_name),
3331 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3332 f2fs_up_read(&sbi->sb_lock);
3334 if (copy_to_user((char __user *)arg, vbuf,
3335 min(FSLABEL_MAX, count)))
3342 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3344 struct inode *inode = file_inode(filp);
3345 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3349 if (!capable(CAP_SYS_ADMIN))
3352 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3354 return PTR_ERR(vbuf);
3356 err = mnt_want_write_file(filp);
3360 f2fs_down_write(&sbi->sb_lock);
3362 memset(sbi->raw_super->volume_name, 0,
3363 sizeof(sbi->raw_super->volume_name));
3364 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3365 sbi->raw_super->volume_name,
3366 ARRAY_SIZE(sbi->raw_super->volume_name));
3368 err = f2fs_commit_super(sbi, false);
3370 f2fs_up_write(&sbi->sb_lock);
3372 mnt_drop_write_file(filp);
3378 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3380 struct inode *inode = file_inode(filp);
3383 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3386 if (!f2fs_compressed_file(inode))
3389 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3390 return put_user(blocks, (u64 __user *)arg);
3393 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3395 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3396 unsigned int released_blocks = 0;
3397 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3401 for (i = 0; i < count; i++) {
3402 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3403 dn->ofs_in_node + i);
3405 if (!__is_valid_data_blkaddr(blkaddr))
3407 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3408 DATA_GENERIC_ENHANCE))) {
3409 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3410 return -EFSCORRUPTED;
3415 int compr_blocks = 0;
3417 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3418 blkaddr = f2fs_data_blkaddr(dn);
3421 if (blkaddr == COMPRESS_ADDR)
3423 dn->ofs_in_node += cluster_size;
3427 if (__is_valid_data_blkaddr(blkaddr))
3430 if (blkaddr != NEW_ADDR)
3433 dn->data_blkaddr = NULL_ADDR;
3434 f2fs_set_data_blkaddr(dn);
3437 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3438 dec_valid_block_count(sbi, dn->inode,
3439 cluster_size - compr_blocks);
3441 released_blocks += cluster_size - compr_blocks;
3443 count -= cluster_size;
3446 return released_blocks;
3449 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3451 struct inode *inode = file_inode(filp);
3452 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3453 pgoff_t page_idx = 0, last_idx;
3454 unsigned int released_blocks = 0;
3458 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3461 if (!f2fs_compressed_file(inode))
3464 if (f2fs_readonly(sbi->sb))
3467 ret = mnt_want_write_file(filp);
3471 f2fs_balance_fs(F2FS_I_SB(inode), true);
3475 writecount = atomic_read(&inode->i_writecount);
3476 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3477 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3482 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3487 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3491 set_inode_flag(inode, FI_COMPRESS_RELEASED);
3492 inode->i_ctime = current_time(inode);
3493 f2fs_mark_inode_dirty_sync(inode, true);
3495 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3498 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3499 filemap_invalidate_lock(inode->i_mapping);
3501 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3503 while (page_idx < last_idx) {
3504 struct dnode_of_data dn;
3505 pgoff_t end_offset, count;
3507 set_new_dnode(&dn, inode, NULL, NULL, 0);
3508 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3510 if (ret == -ENOENT) {
3511 page_idx = f2fs_get_next_page_offset(&dn,
3519 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3520 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3521 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3523 ret = release_compress_blocks(&dn, count);
3525 f2fs_put_dnode(&dn);
3531 released_blocks += ret;
3534 filemap_invalidate_unlock(inode->i_mapping);
3535 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3537 inode_unlock(inode);
3539 mnt_drop_write_file(filp);
3542 ret = put_user(released_blocks, (u64 __user *)arg);
3543 } else if (released_blocks &&
3544 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3545 set_sbi_flag(sbi, SBI_NEED_FSCK);
3546 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3547 "iblocks=%llu, released=%u, compr_blocks=%u, "
3549 __func__, inode->i_ino, inode->i_blocks,
3551 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3557 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3559 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3560 unsigned int reserved_blocks = 0;
3561 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3565 for (i = 0; i < count; i++) {
3566 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3567 dn->ofs_in_node + i);
3569 if (!__is_valid_data_blkaddr(blkaddr))
3571 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3572 DATA_GENERIC_ENHANCE))) {
3573 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3574 return -EFSCORRUPTED;
3579 int compr_blocks = 0;
3583 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3584 blkaddr = f2fs_data_blkaddr(dn);
3587 if (blkaddr == COMPRESS_ADDR)
3589 dn->ofs_in_node += cluster_size;
3593 if (__is_valid_data_blkaddr(blkaddr)) {
3598 dn->data_blkaddr = NEW_ADDR;
3599 f2fs_set_data_blkaddr(dn);
3602 reserved = cluster_size - compr_blocks;
3603 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3607 if (reserved != cluster_size - compr_blocks)
3610 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3612 reserved_blocks += reserved;
3614 count -= cluster_size;
3617 return reserved_blocks;
3620 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3622 struct inode *inode = file_inode(filp);
3623 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3624 pgoff_t page_idx = 0, last_idx;
3625 unsigned int reserved_blocks = 0;
3628 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3631 if (!f2fs_compressed_file(inode))
3634 if (f2fs_readonly(sbi->sb))
3637 ret = mnt_want_write_file(filp);
3641 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3644 f2fs_balance_fs(F2FS_I_SB(inode), true);
3648 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3653 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3654 filemap_invalidate_lock(inode->i_mapping);
3656 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3658 while (page_idx < last_idx) {
3659 struct dnode_of_data dn;
3660 pgoff_t end_offset, count;
3662 set_new_dnode(&dn, inode, NULL, NULL, 0);
3663 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3665 if (ret == -ENOENT) {
3666 page_idx = f2fs_get_next_page_offset(&dn,
3674 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3675 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3676 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3678 ret = reserve_compress_blocks(&dn, count);
3680 f2fs_put_dnode(&dn);
3686 reserved_blocks += ret;
3689 filemap_invalidate_unlock(inode->i_mapping);
3690 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3693 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3694 inode->i_ctime = current_time(inode);
3695 f2fs_mark_inode_dirty_sync(inode, true);
3698 inode_unlock(inode);
3700 mnt_drop_write_file(filp);
3703 ret = put_user(reserved_blocks, (u64 __user *)arg);
3704 } else if (reserved_blocks &&
3705 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3706 set_sbi_flag(sbi, SBI_NEED_FSCK);
3707 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3708 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3710 __func__, inode->i_ino, inode->i_blocks,
3712 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3718 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3719 pgoff_t off, block_t block, block_t len, u32 flags)
3721 sector_t sector = SECTOR_FROM_BLOCK(block);
3722 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3725 if (flags & F2FS_TRIM_FILE_DISCARD) {
3726 if (bdev_max_secure_erase_sectors(bdev))
3727 ret = blkdev_issue_secure_erase(bdev, sector, nr_sects,
3730 ret = blkdev_issue_discard(bdev, sector, nr_sects,
3734 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3735 if (IS_ENCRYPTED(inode))
3736 ret = fscrypt_zeroout_range(inode, off, block, len);
3738 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3745 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3747 struct inode *inode = file_inode(filp);
3748 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3749 struct address_space *mapping = inode->i_mapping;
3750 struct block_device *prev_bdev = NULL;
3751 struct f2fs_sectrim_range range;
3752 pgoff_t index, pg_end, prev_index = 0;
3753 block_t prev_block = 0, len = 0;
3755 bool to_end = false;
3758 if (!(filp->f_mode & FMODE_WRITE))
3761 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3765 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3766 !S_ISREG(inode->i_mode))
3769 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3770 !f2fs_hw_support_discard(sbi)) ||
3771 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3772 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3775 file_start_write(filp);
3778 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3779 range.start >= inode->i_size) {
3787 if (inode->i_size - range.start > range.len) {
3788 end_addr = range.start + range.len;
3790 end_addr = range.len == (u64)-1 ?
3791 sbi->sb->s_maxbytes : inode->i_size;
3795 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3796 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3801 index = F2FS_BYTES_TO_BLK(range.start);
3802 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3804 ret = f2fs_convert_inline_inode(inode);
3808 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3809 filemap_invalidate_lock(mapping);
3811 ret = filemap_write_and_wait_range(mapping, range.start,
3812 to_end ? LLONG_MAX : end_addr - 1);
3816 truncate_inode_pages_range(mapping, range.start,
3817 to_end ? -1 : end_addr - 1);
3819 while (index < pg_end) {
3820 struct dnode_of_data dn;
3821 pgoff_t end_offset, count;
3824 set_new_dnode(&dn, inode, NULL, NULL, 0);
3825 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3827 if (ret == -ENOENT) {
3828 index = f2fs_get_next_page_offset(&dn, index);
3834 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3835 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3836 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3837 struct block_device *cur_bdev;
3838 block_t blkaddr = f2fs_data_blkaddr(&dn);
3840 if (!__is_valid_data_blkaddr(blkaddr))
3843 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3844 DATA_GENERIC_ENHANCE)) {
3845 ret = -EFSCORRUPTED;
3846 f2fs_put_dnode(&dn);
3847 f2fs_handle_error(sbi,
3848 ERROR_INVALID_BLKADDR);
3852 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3853 if (f2fs_is_multi_device(sbi)) {
3854 int di = f2fs_target_device_index(sbi, blkaddr);
3856 blkaddr -= FDEV(di).start_blk;
3860 if (prev_bdev == cur_bdev &&
3861 index == prev_index + len &&
3862 blkaddr == prev_block + len) {
3865 ret = f2fs_secure_erase(prev_bdev,
3866 inode, prev_index, prev_block,
3869 f2fs_put_dnode(&dn);
3878 prev_bdev = cur_bdev;
3880 prev_block = blkaddr;
3885 f2fs_put_dnode(&dn);
3887 if (fatal_signal_pending(current)) {
3895 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3896 prev_block, len, range.flags);
3898 filemap_invalidate_unlock(mapping);
3899 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3901 inode_unlock(inode);
3902 file_end_write(filp);
3907 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3909 struct inode *inode = file_inode(filp);
3910 struct f2fs_comp_option option;
3912 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3915 inode_lock_shared(inode);
3917 if (!f2fs_compressed_file(inode)) {
3918 inode_unlock_shared(inode);
3922 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3923 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3925 inode_unlock_shared(inode);
3927 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3934 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3936 struct inode *inode = file_inode(filp);
3937 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3938 struct f2fs_comp_option option;
3941 if (!f2fs_sb_has_compression(sbi))
3944 if (!(filp->f_mode & FMODE_WRITE))
3947 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3951 if (!f2fs_compressed_file(inode) ||
3952 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3953 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3954 option.algorithm >= COMPRESS_MAX)
3957 file_start_write(filp);
3960 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3965 if (F2FS_HAS_BLOCKS(inode)) {
3970 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3971 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3972 F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
3973 f2fs_mark_inode_dirty_sync(inode, true);
3975 if (!f2fs_is_compress_backend_ready(inode))
3976 f2fs_warn(sbi, "compression algorithm is successfully set, "
3977 "but current kernel doesn't support this algorithm.");
3979 inode_unlock(inode);
3980 file_end_write(filp);
3985 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3987 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
3988 struct address_space *mapping = inode->i_mapping;
3990 pgoff_t redirty_idx = page_idx;
3991 int i, page_len = 0, ret = 0;
3993 page_cache_ra_unbounded(&ractl, len, 0);
3995 for (i = 0; i < len; i++, page_idx++) {
3996 page = read_cache_page(mapping, page_idx, NULL, NULL);
3998 ret = PTR_ERR(page);
4004 for (i = 0; i < page_len; i++, redirty_idx++) {
4005 page = find_lock_page(mapping, redirty_idx);
4007 /* It will never fail, when page has pinned above */
4008 f2fs_bug_on(F2FS_I_SB(inode), !page);
4010 set_page_dirty(page);
4011 f2fs_put_page(page, 1);
4012 f2fs_put_page(page, 0);
4018 static int f2fs_ioc_decompress_file(struct file *filp)
4020 struct inode *inode = file_inode(filp);
4021 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4022 struct f2fs_inode_info *fi = F2FS_I(inode);
4023 pgoff_t page_idx = 0, last_idx;
4024 unsigned int blk_per_seg = sbi->blocks_per_seg;
4025 int cluster_size = fi->i_cluster_size;
4028 if (!f2fs_sb_has_compression(sbi) ||
4029 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4032 if (!(filp->f_mode & FMODE_WRITE))
4035 if (!f2fs_compressed_file(inode))
4038 f2fs_balance_fs(F2FS_I_SB(inode), true);
4040 file_start_write(filp);
4043 if (!f2fs_is_compress_backend_ready(inode)) {
4048 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4053 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4057 if (!atomic_read(&fi->i_compr_blocks))
4060 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4062 count = last_idx - page_idx;
4064 int len = min(cluster_size, count);
4066 ret = redirty_blocks(inode, page_idx, len);
4070 if (get_dirty_pages(inode) >= blk_per_seg) {
4071 ret = filemap_fdatawrite(inode->i_mapping);
4081 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4085 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4088 inode_unlock(inode);
4089 file_end_write(filp);
4094 static int f2fs_ioc_compress_file(struct file *filp)
4096 struct inode *inode = file_inode(filp);
4097 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4098 pgoff_t page_idx = 0, last_idx;
4099 unsigned int blk_per_seg = sbi->blocks_per_seg;
4100 int cluster_size = F2FS_I(inode)->i_cluster_size;
4103 if (!f2fs_sb_has_compression(sbi) ||
4104 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4107 if (!(filp->f_mode & FMODE_WRITE))
4110 if (!f2fs_compressed_file(inode))
4113 f2fs_balance_fs(F2FS_I_SB(inode), true);
4115 file_start_write(filp);
4118 if (!f2fs_is_compress_backend_ready(inode)) {
4123 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4128 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4132 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4134 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4136 count = last_idx - page_idx;
4138 int len = min(cluster_size, count);
4140 ret = redirty_blocks(inode, page_idx, len);
4144 if (get_dirty_pages(inode) >= blk_per_seg) {
4145 ret = filemap_fdatawrite(inode->i_mapping);
4155 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4158 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4161 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4164 inode_unlock(inode);
4165 file_end_write(filp);
4170 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4173 case FS_IOC_GETVERSION:
4174 return f2fs_ioc_getversion(filp, arg);
4175 case F2FS_IOC_START_ATOMIC_WRITE:
4176 return f2fs_ioc_start_atomic_write(filp, false);
4177 case F2FS_IOC_START_ATOMIC_REPLACE:
4178 return f2fs_ioc_start_atomic_write(filp, true);
4179 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4180 return f2fs_ioc_commit_atomic_write(filp);
4181 case F2FS_IOC_ABORT_ATOMIC_WRITE:
4182 return f2fs_ioc_abort_atomic_write(filp);
4183 case F2FS_IOC_START_VOLATILE_WRITE:
4184 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4186 case F2FS_IOC_SHUTDOWN:
4187 return f2fs_ioc_shutdown(filp, arg);
4189 return f2fs_ioc_fitrim(filp, arg);
4190 case FS_IOC_SET_ENCRYPTION_POLICY:
4191 return f2fs_ioc_set_encryption_policy(filp, arg);
4192 case FS_IOC_GET_ENCRYPTION_POLICY:
4193 return f2fs_ioc_get_encryption_policy(filp, arg);
4194 case FS_IOC_GET_ENCRYPTION_PWSALT:
4195 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4196 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4197 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4198 case FS_IOC_ADD_ENCRYPTION_KEY:
4199 return f2fs_ioc_add_encryption_key(filp, arg);
4200 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4201 return f2fs_ioc_remove_encryption_key(filp, arg);
4202 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4203 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4204 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4205 return f2fs_ioc_get_encryption_key_status(filp, arg);
4206 case FS_IOC_GET_ENCRYPTION_NONCE:
4207 return f2fs_ioc_get_encryption_nonce(filp, arg);
4208 case F2FS_IOC_GARBAGE_COLLECT:
4209 return f2fs_ioc_gc(filp, arg);
4210 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4211 return f2fs_ioc_gc_range(filp, arg);
4212 case F2FS_IOC_WRITE_CHECKPOINT:
4213 return f2fs_ioc_write_checkpoint(filp);
4214 case F2FS_IOC_DEFRAGMENT:
4215 return f2fs_ioc_defragment(filp, arg);
4216 case F2FS_IOC_MOVE_RANGE:
4217 return f2fs_ioc_move_range(filp, arg);
4218 case F2FS_IOC_FLUSH_DEVICE:
4219 return f2fs_ioc_flush_device(filp, arg);
4220 case F2FS_IOC_GET_FEATURES:
4221 return f2fs_ioc_get_features(filp, arg);
4222 case F2FS_IOC_GET_PIN_FILE:
4223 return f2fs_ioc_get_pin_file(filp, arg);
4224 case F2FS_IOC_SET_PIN_FILE:
4225 return f2fs_ioc_set_pin_file(filp, arg);
4226 case F2FS_IOC_PRECACHE_EXTENTS:
4227 return f2fs_ioc_precache_extents(filp);
4228 case F2FS_IOC_RESIZE_FS:
4229 return f2fs_ioc_resize_fs(filp, arg);
4230 case FS_IOC_ENABLE_VERITY:
4231 return f2fs_ioc_enable_verity(filp, arg);
4232 case FS_IOC_MEASURE_VERITY:
4233 return f2fs_ioc_measure_verity(filp, arg);
4234 case FS_IOC_READ_VERITY_METADATA:
4235 return f2fs_ioc_read_verity_metadata(filp, arg);
4236 case FS_IOC_GETFSLABEL:
4237 return f2fs_ioc_getfslabel(filp, arg);
4238 case FS_IOC_SETFSLABEL:
4239 return f2fs_ioc_setfslabel(filp, arg);
4240 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4241 return f2fs_get_compress_blocks(filp, arg);
4242 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4243 return f2fs_release_compress_blocks(filp, arg);
4244 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4245 return f2fs_reserve_compress_blocks(filp, arg);
4246 case F2FS_IOC_SEC_TRIM_FILE:
4247 return f2fs_sec_trim_file(filp, arg);
4248 case F2FS_IOC_GET_COMPRESS_OPTION:
4249 return f2fs_ioc_get_compress_option(filp, arg);
4250 case F2FS_IOC_SET_COMPRESS_OPTION:
4251 return f2fs_ioc_set_compress_option(filp, arg);
4252 case F2FS_IOC_DECOMPRESS_FILE:
4253 return f2fs_ioc_decompress_file(filp);
4254 case F2FS_IOC_COMPRESS_FILE:
4255 return f2fs_ioc_compress_file(filp);
4261 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4263 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4265 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4268 return __f2fs_ioctl(filp, cmd, arg);
4272 * Return %true if the given read or write request should use direct I/O, or
4273 * %false if it should use buffered I/O.
4275 static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4276 struct iov_iter *iter)
4280 if (!(iocb->ki_flags & IOCB_DIRECT))
4283 if (f2fs_force_buffered_io(inode, iov_iter_rw(iter)))
4287 * Direct I/O not aligned to the disk's logical_block_size will be
4288 * attempted, but will fail with -EINVAL.
4290 * f2fs additionally requires that direct I/O be aligned to the
4291 * filesystem block size, which is often a stricter requirement.
4292 * However, f2fs traditionally falls back to buffered I/O on requests
4293 * that are logical_block_size-aligned but not fs-block aligned.
4295 * The below logic implements this behavior.
4297 align = iocb->ki_pos | iov_iter_alignment(iter);
4298 if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4299 IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4305 static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
4308 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4310 dec_page_count(sbi, F2FS_DIO_READ);
4313 f2fs_update_iostat(sbi, NULL, APP_DIRECT_READ_IO, size);
4317 static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
4318 .end_io = f2fs_dio_read_end_io,
4321 static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
4323 struct file *file = iocb->ki_filp;
4324 struct inode *inode = file_inode(file);
4325 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4326 struct f2fs_inode_info *fi = F2FS_I(inode);
4327 const loff_t pos = iocb->ki_pos;
4328 const size_t count = iov_iter_count(to);
4329 struct iomap_dio *dio;
4333 return 0; /* skip atime update */
4335 trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
4337 if (iocb->ki_flags & IOCB_NOWAIT) {
4338 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4343 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4347 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4348 * the higher-level function iomap_dio_rw() in order to ensure that the
4349 * F2FS_DIO_READ counter will be decremented correctly in all cases.
4351 inc_page_count(sbi, F2FS_DIO_READ);
4352 dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
4353 &f2fs_iomap_dio_read_ops, 0, NULL, 0);
4354 if (IS_ERR_OR_NULL(dio)) {
4355 ret = PTR_ERR_OR_ZERO(dio);
4356 if (ret != -EIOCBQUEUED)
4357 dec_page_count(sbi, F2FS_DIO_READ);
4359 ret = iomap_dio_complete(dio);
4362 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4364 file_accessed(file);
4366 trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4370 static void f2fs_trace_rw_file_path(struct file *file, loff_t pos, size_t count,
4373 struct inode *inode = file_inode(file);
4376 buf = f2fs_getname(F2FS_I_SB(inode));
4379 path = dentry_path_raw(file_dentry(file), buf, PATH_MAX);
4383 trace_f2fs_datawrite_start(inode, pos, count,
4384 current->pid, path, current->comm);
4386 trace_f2fs_dataread_start(inode, pos, count,
4387 current->pid, path, current->comm);
4392 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4394 struct inode *inode = file_inode(iocb->ki_filp);
4395 const loff_t pos = iocb->ki_pos;
4398 if (!f2fs_is_compress_backend_ready(inode))
4401 if (trace_f2fs_dataread_start_enabled())
4402 f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
4403 iov_iter_count(to), READ);
4405 if (f2fs_should_use_dio(inode, iocb, to)) {
4406 ret = f2fs_dio_read_iter(iocb, to);
4408 ret = filemap_read(iocb, to, 0);
4410 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4411 APP_BUFFERED_READ_IO, ret);
4413 if (trace_f2fs_dataread_end_enabled())
4414 trace_f2fs_dataread_end(inode, pos, ret);
4418 static ssize_t f2fs_file_splice_read(struct file *in, loff_t *ppos,
4419 struct pipe_inode_info *pipe,
4420 size_t len, unsigned int flags)
4422 struct inode *inode = file_inode(in);
4423 const loff_t pos = *ppos;
4426 if (!f2fs_is_compress_backend_ready(inode))
4429 if (trace_f2fs_dataread_start_enabled())
4430 f2fs_trace_rw_file_path(in, pos, len, READ);
4432 ret = filemap_splice_read(in, ppos, pipe, len, flags);
4434 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4435 APP_BUFFERED_READ_IO, ret);
4437 if (trace_f2fs_dataread_end_enabled())
4438 trace_f2fs_dataread_end(inode, pos, ret);
4442 static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4444 struct file *file = iocb->ki_filp;
4445 struct inode *inode = file_inode(file);
4449 if (IS_IMMUTABLE(inode))
4452 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4455 count = generic_write_checks(iocb, from);
4459 err = file_modified(file);
4466 * Preallocate blocks for a write request, if it is possible and helpful to do
4467 * so. Returns a positive number if blocks may have been preallocated, 0 if no
4468 * blocks were preallocated, or a negative errno value if something went
4469 * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the
4470 * requested blocks (not just some of them) have been allocated.
4472 static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
4475 struct inode *inode = file_inode(iocb->ki_filp);
4476 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4477 const loff_t pos = iocb->ki_pos;
4478 const size_t count = iov_iter_count(iter);
4479 struct f2fs_map_blocks map = {};
4483 /* If it will be an out-of-place direct write, don't bother. */
4484 if (dio && f2fs_lfs_mode(sbi))
4487 * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
4488 * buffered IO, if DIO meets any holes.
4490 if (dio && i_size_read(inode) &&
4491 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4494 /* No-wait I/O can't allocate blocks. */
4495 if (iocb->ki_flags & IOCB_NOWAIT)
4498 /* If it will be a short write, don't bother. */
4499 if (fault_in_iov_iter_readable(iter, count))
4502 if (f2fs_has_inline_data(inode)) {
4503 /* If the data will fit inline, don't bother. */
4504 if (pos + count <= MAX_INLINE_DATA(inode))
4506 ret = f2fs_convert_inline_inode(inode);
4511 /* Do not preallocate blocks that will be written partially in 4KB. */
4512 map.m_lblk = F2FS_BLK_ALIGN(pos);
4513 map.m_len = F2FS_BYTES_TO_BLK(pos + count);
4514 if (map.m_len > map.m_lblk)
4515 map.m_len -= map.m_lblk;
4518 map.m_may_create = true;
4520 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4521 flag = F2FS_GET_BLOCK_PRE_DIO;
4523 map.m_seg_type = NO_CHECK_TYPE;
4524 flag = F2FS_GET_BLOCK_PRE_AIO;
4527 ret = f2fs_map_blocks(inode, &map, flag);
4528 /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
4529 if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
4532 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4536 static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
4537 struct iov_iter *from)
4539 struct file *file = iocb->ki_filp;
4540 struct inode *inode = file_inode(file);
4543 if (iocb->ki_flags & IOCB_NOWAIT)
4546 ret = generic_perform_write(iocb, from);
4549 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4550 APP_BUFFERED_IO, ret);
4555 static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
4558 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4560 dec_page_count(sbi, F2FS_DIO_WRITE);
4563 f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size);
4567 static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
4568 .end_io = f2fs_dio_write_end_io,
4571 static void f2fs_flush_buffered_write(struct address_space *mapping,
4572 loff_t start_pos, loff_t end_pos)
4576 ret = filemap_write_and_wait_range(mapping, start_pos, end_pos);
4579 invalidate_mapping_pages(mapping,
4580 start_pos >> PAGE_SHIFT,
4581 end_pos >> PAGE_SHIFT);
4584 static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
4585 bool *may_need_sync)
4587 struct file *file = iocb->ki_filp;
4588 struct inode *inode = file_inode(file);
4589 struct f2fs_inode_info *fi = F2FS_I(inode);
4590 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4591 const bool do_opu = f2fs_lfs_mode(sbi);
4592 const loff_t pos = iocb->ki_pos;
4593 const ssize_t count = iov_iter_count(from);
4594 unsigned int dio_flags;
4595 struct iomap_dio *dio;
4598 trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
4600 if (iocb->ki_flags & IOCB_NOWAIT) {
4601 /* f2fs_convert_inline_inode() and block allocation can block */
4602 if (f2fs_has_inline_data(inode) ||
4603 !f2fs_overwrite_io(inode, pos, count)) {
4608 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
4612 if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4613 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4618 ret = f2fs_convert_inline_inode(inode);
4622 f2fs_down_read(&fi->i_gc_rwsem[WRITE]);
4624 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4628 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4629 * the higher-level function iomap_dio_rw() in order to ensure that the
4630 * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
4632 inc_page_count(sbi, F2FS_DIO_WRITE);
4634 if (pos + count > inode->i_size)
4635 dio_flags |= IOMAP_DIO_FORCE_WAIT;
4636 dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
4637 &f2fs_iomap_dio_write_ops, dio_flags, NULL, 0);
4638 if (IS_ERR_OR_NULL(dio)) {
4639 ret = PTR_ERR_OR_ZERO(dio);
4640 if (ret == -ENOTBLK)
4642 if (ret != -EIOCBQUEUED)
4643 dec_page_count(sbi, F2FS_DIO_WRITE);
4645 ret = iomap_dio_complete(dio);
4649 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4650 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4654 if (pos + ret > inode->i_size)
4655 f2fs_i_size_write(inode, pos + ret);
4657 set_inode_flag(inode, FI_UPDATE_WRITE);
4659 if (iov_iter_count(from)) {
4661 loff_t bufio_start_pos = iocb->ki_pos;
4664 * The direct write was partial, so we need to fall back to a
4665 * buffered write for the remainder.
4668 ret2 = f2fs_buffered_write_iter(iocb, from);
4669 if (iov_iter_count(from))
4670 f2fs_write_failed(inode, iocb->ki_pos);
4675 * Ensure that the pagecache pages are written to disk and
4676 * invalidated to preserve the expected O_DIRECT semantics.
4679 loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
4683 f2fs_flush_buffered_write(file->f_mapping,
4688 /* iomap_dio_rw() already handled the generic_write_sync(). */
4689 *may_need_sync = false;
4692 trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
4696 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4698 struct inode *inode = file_inode(iocb->ki_filp);
4699 const loff_t orig_pos = iocb->ki_pos;
4700 const size_t orig_count = iov_iter_count(from);
4703 bool may_need_sync = true;
4707 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4712 if (!f2fs_is_compress_backend_ready(inode)) {
4717 if (iocb->ki_flags & IOCB_NOWAIT) {
4718 if (!inode_trylock(inode)) {
4726 ret = f2fs_write_checks(iocb, from);
4730 /* Determine whether we will do a direct write or a buffered write. */
4731 dio = f2fs_should_use_dio(inode, iocb, from);
4733 /* Possibly preallocate the blocks for the write. */
4734 target_size = iocb->ki_pos + iov_iter_count(from);
4735 preallocated = f2fs_preallocate_blocks(iocb, from, dio);
4736 if (preallocated < 0) {
4739 if (trace_f2fs_datawrite_start_enabled())
4740 f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
4743 /* Do the actual write. */
4745 f2fs_dio_write_iter(iocb, from, &may_need_sync) :
4746 f2fs_buffered_write_iter(iocb, from);
4748 if (trace_f2fs_datawrite_end_enabled())
4749 trace_f2fs_datawrite_end(inode, orig_pos, ret);
4752 /* Don't leave any preallocated blocks around past i_size. */
4753 if (preallocated && i_size_read(inode) < target_size) {
4754 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4755 filemap_invalidate_lock(inode->i_mapping);
4756 if (!f2fs_truncate(inode))
4757 file_dont_truncate(inode);
4758 filemap_invalidate_unlock(inode->i_mapping);
4759 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4761 file_dont_truncate(inode);
4764 clear_inode_flag(inode, FI_PREALLOCATED_ALL);
4766 inode_unlock(inode);
4768 trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
4770 if (ret > 0 && may_need_sync)
4771 ret = generic_write_sync(iocb, ret);
4773 /* If buffered IO was forced, flush and drop the data from
4774 * the page cache to preserve O_DIRECT semantics
4776 if (ret > 0 && !dio && (iocb->ki_flags & IOCB_DIRECT))
4777 f2fs_flush_buffered_write(iocb->ki_filp->f_mapping,
4779 orig_pos + ret - 1);
4784 static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4787 struct address_space *mapping;
4788 struct backing_dev_info *bdi;
4789 struct inode *inode = file_inode(filp);
4792 if (advice == POSIX_FADV_SEQUENTIAL) {
4793 if (S_ISFIFO(inode->i_mode))
4796 mapping = filp->f_mapping;
4797 if (!mapping || len < 0)
4800 bdi = inode_to_bdi(mapping->host);
4801 filp->f_ra.ra_pages = bdi->ra_pages *
4802 F2FS_I_SB(inode)->seq_file_ra_mul;
4803 spin_lock(&filp->f_lock);
4804 filp->f_mode &= ~FMODE_RANDOM;
4805 spin_unlock(&filp->f_lock);
4809 err = generic_fadvise(filp, offset, len, advice);
4810 if (!err && advice == POSIX_FADV_DONTNEED &&
4811 test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
4812 f2fs_compressed_file(inode))
4813 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
4818 #ifdef CONFIG_COMPAT
4819 struct compat_f2fs_gc_range {
4824 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4825 struct compat_f2fs_gc_range)
4827 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4829 struct compat_f2fs_gc_range __user *urange;
4830 struct f2fs_gc_range range;
4833 urange = compat_ptr(arg);
4834 err = get_user(range.sync, &urange->sync);
4835 err |= get_user(range.start, &urange->start);
4836 err |= get_user(range.len, &urange->len);
4840 return __f2fs_ioc_gc_range(file, &range);
4843 struct compat_f2fs_move_range {
4849 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4850 struct compat_f2fs_move_range)
4852 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4854 struct compat_f2fs_move_range __user *urange;
4855 struct f2fs_move_range range;
4858 urange = compat_ptr(arg);
4859 err = get_user(range.dst_fd, &urange->dst_fd);
4860 err |= get_user(range.pos_in, &urange->pos_in);
4861 err |= get_user(range.pos_out, &urange->pos_out);
4862 err |= get_user(range.len, &urange->len);
4866 return __f2fs_ioc_move_range(file, &range);
4869 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4871 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4873 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4877 case FS_IOC32_GETVERSION:
4878 cmd = FS_IOC_GETVERSION;
4880 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4881 return f2fs_compat_ioc_gc_range(file, arg);
4882 case F2FS_IOC32_MOVE_RANGE:
4883 return f2fs_compat_ioc_move_range(file, arg);
4884 case F2FS_IOC_START_ATOMIC_WRITE:
4885 case F2FS_IOC_START_ATOMIC_REPLACE:
4886 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4887 case F2FS_IOC_START_VOLATILE_WRITE:
4888 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4889 case F2FS_IOC_ABORT_ATOMIC_WRITE:
4890 case F2FS_IOC_SHUTDOWN:
4892 case FS_IOC_SET_ENCRYPTION_POLICY:
4893 case FS_IOC_GET_ENCRYPTION_PWSALT:
4894 case FS_IOC_GET_ENCRYPTION_POLICY:
4895 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4896 case FS_IOC_ADD_ENCRYPTION_KEY:
4897 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4898 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4899 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4900 case FS_IOC_GET_ENCRYPTION_NONCE:
4901 case F2FS_IOC_GARBAGE_COLLECT:
4902 case F2FS_IOC_WRITE_CHECKPOINT:
4903 case F2FS_IOC_DEFRAGMENT:
4904 case F2FS_IOC_FLUSH_DEVICE:
4905 case F2FS_IOC_GET_FEATURES:
4906 case F2FS_IOC_GET_PIN_FILE:
4907 case F2FS_IOC_SET_PIN_FILE:
4908 case F2FS_IOC_PRECACHE_EXTENTS:
4909 case F2FS_IOC_RESIZE_FS:
4910 case FS_IOC_ENABLE_VERITY:
4911 case FS_IOC_MEASURE_VERITY:
4912 case FS_IOC_READ_VERITY_METADATA:
4913 case FS_IOC_GETFSLABEL:
4914 case FS_IOC_SETFSLABEL:
4915 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4916 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4917 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4918 case F2FS_IOC_SEC_TRIM_FILE:
4919 case F2FS_IOC_GET_COMPRESS_OPTION:
4920 case F2FS_IOC_SET_COMPRESS_OPTION:
4921 case F2FS_IOC_DECOMPRESS_FILE:
4922 case F2FS_IOC_COMPRESS_FILE:
4925 return -ENOIOCTLCMD;
4927 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4931 const struct file_operations f2fs_file_operations = {
4932 .llseek = f2fs_llseek,
4933 .read_iter = f2fs_file_read_iter,
4934 .write_iter = f2fs_file_write_iter,
4935 .iopoll = iocb_bio_iopoll,
4936 .open = f2fs_file_open,
4937 .release = f2fs_release_file,
4938 .mmap = f2fs_file_mmap,
4939 .flush = f2fs_file_flush,
4940 .fsync = f2fs_sync_file,
4941 .fallocate = f2fs_fallocate,
4942 .unlocked_ioctl = f2fs_ioctl,
4943 #ifdef CONFIG_COMPAT
4944 .compat_ioctl = f2fs_compat_ioctl,
4946 .splice_read = f2fs_file_splice_read,
4947 .splice_write = iter_file_splice_write,
4948 .fadvise = f2fs_file_fadvise,