bf37983304a33832ba627197c24ecfcf1d25b761
[platform/kernel/linux-starfive.git] / fs / f2fs / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/file.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 #include <linux/fileattr.h>
26 #include <linux/fadvise.h>
27 #include <linux/iomap.h>
28
29 #include "f2fs.h"
30 #include "node.h"
31 #include "segment.h"
32 #include "xattr.h"
33 #include "acl.h"
34 #include "gc.h"
35 #include "iostat.h"
36 #include <trace/events/f2fs.h>
37 #include <uapi/linux/f2fs.h>
38
39 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
40 {
41         struct inode *inode = file_inode(vmf->vma->vm_file);
42         vm_fault_t ret;
43
44         ret = filemap_fault(vmf);
45         if (!ret)
46                 f2fs_update_iostat(F2FS_I_SB(inode), inode,
47                                         APP_MAPPED_READ_IO, F2FS_BLKSIZE);
48
49         trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
50
51         return ret;
52 }
53
54 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
55 {
56         struct page *page = vmf->page;
57         struct inode *inode = file_inode(vmf->vma->vm_file);
58         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59         struct dnode_of_data dn;
60         bool need_alloc = true;
61         int err = 0;
62
63         if (unlikely(IS_IMMUTABLE(inode)))
64                 return VM_FAULT_SIGBUS;
65
66         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
67                 return VM_FAULT_SIGBUS;
68
69         if (unlikely(f2fs_cp_error(sbi))) {
70                 err = -EIO;
71                 goto err;
72         }
73
74         if (!f2fs_is_checkpoint_ready(sbi)) {
75                 err = -ENOSPC;
76                 goto err;
77         }
78
79         err = f2fs_convert_inline_inode(inode);
80         if (err)
81                 goto err;
82
83 #ifdef CONFIG_F2FS_FS_COMPRESSION
84         if (f2fs_compressed_file(inode)) {
85                 int ret = f2fs_is_compressed_cluster(inode, page->index);
86
87                 if (ret < 0) {
88                         err = ret;
89                         goto err;
90                 } else if (ret) {
91                         need_alloc = false;
92                 }
93         }
94 #endif
95         /* should do out of any locked page */
96         if (need_alloc)
97                 f2fs_balance_fs(sbi, true);
98
99         sb_start_pagefault(inode->i_sb);
100
101         f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
102
103         file_update_time(vmf->vma->vm_file);
104         filemap_invalidate_lock_shared(inode->i_mapping);
105         lock_page(page);
106         if (unlikely(page->mapping != inode->i_mapping ||
107                         page_offset(page) > i_size_read(inode) ||
108                         !PageUptodate(page))) {
109                 unlock_page(page);
110                 err = -EFAULT;
111                 goto out_sem;
112         }
113
114         if (need_alloc) {
115                 /* block allocation */
116                 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
117                 set_new_dnode(&dn, inode, NULL, NULL, 0);
118                 err = f2fs_get_block(&dn, page->index);
119                 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
120         }
121
122 #ifdef CONFIG_F2FS_FS_COMPRESSION
123         if (!need_alloc) {
124                 set_new_dnode(&dn, inode, NULL, NULL, 0);
125                 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
126                 f2fs_put_dnode(&dn);
127         }
128 #endif
129         if (err) {
130                 unlock_page(page);
131                 goto out_sem;
132         }
133
134         f2fs_wait_on_page_writeback(page, DATA, false, true);
135
136         /* wait for GCed page writeback via META_MAPPING */
137         f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
138
139         /*
140          * check to see if the page is mapped already (no holes)
141          */
142         if (PageMappedToDisk(page))
143                 goto out_sem;
144
145         /* page is wholly or partially inside EOF */
146         if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
147                                                 i_size_read(inode)) {
148                 loff_t offset;
149
150                 offset = i_size_read(inode) & ~PAGE_MASK;
151                 zero_user_segment(page, offset, PAGE_SIZE);
152         }
153         set_page_dirty(page);
154         if (!PageUptodate(page))
155                 SetPageUptodate(page);
156
157         f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
158         f2fs_update_time(sbi, REQ_TIME);
159
160         trace_f2fs_vm_page_mkwrite(page, DATA);
161 out_sem:
162         filemap_invalidate_unlock_shared(inode->i_mapping);
163
164         sb_end_pagefault(inode->i_sb);
165 err:
166         return block_page_mkwrite_return(err);
167 }
168
169 static const struct vm_operations_struct f2fs_file_vm_ops = {
170         .fault          = f2fs_filemap_fault,
171         .map_pages      = filemap_map_pages,
172         .page_mkwrite   = f2fs_vm_page_mkwrite,
173 };
174
175 static int get_parent_ino(struct inode *inode, nid_t *pino)
176 {
177         struct dentry *dentry;
178
179         /*
180          * Make sure to get the non-deleted alias.  The alias associated with
181          * the open file descriptor being fsync()'ed may be deleted already.
182          */
183         dentry = d_find_alias(inode);
184         if (!dentry)
185                 return 0;
186
187         *pino = parent_ino(dentry);
188         dput(dentry);
189         return 1;
190 }
191
192 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
193 {
194         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
195         enum cp_reason_type cp_reason = CP_NO_NEEDED;
196
197         if (!S_ISREG(inode->i_mode))
198                 cp_reason = CP_NON_REGULAR;
199         else if (f2fs_compressed_file(inode))
200                 cp_reason = CP_COMPRESSED;
201         else if (inode->i_nlink != 1)
202                 cp_reason = CP_HARDLINK;
203         else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
204                 cp_reason = CP_SB_NEED_CP;
205         else if (file_wrong_pino(inode))
206                 cp_reason = CP_WRONG_PINO;
207         else if (!f2fs_space_for_roll_forward(sbi))
208                 cp_reason = CP_NO_SPC_ROLL;
209         else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
210                 cp_reason = CP_NODE_NEED_CP;
211         else if (test_opt(sbi, FASTBOOT))
212                 cp_reason = CP_FASTBOOT_MODE;
213         else if (F2FS_OPTION(sbi).active_logs == 2)
214                 cp_reason = CP_SPEC_LOG_NUM;
215         else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
216                 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
217                 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
218                                                         TRANS_DIR_INO))
219                 cp_reason = CP_RECOVER_DIR;
220
221         return cp_reason;
222 }
223
224 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
225 {
226         struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
227         bool ret = false;
228         /* But we need to avoid that there are some inode updates */
229         if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
230                 ret = true;
231         f2fs_put_page(i, 0);
232         return ret;
233 }
234
235 static void try_to_fix_pino(struct inode *inode)
236 {
237         struct f2fs_inode_info *fi = F2FS_I(inode);
238         nid_t pino;
239
240         f2fs_down_write(&fi->i_sem);
241         if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
242                         get_parent_ino(inode, &pino)) {
243                 f2fs_i_pino_write(inode, pino);
244                 file_got_pino(inode);
245         }
246         f2fs_up_write(&fi->i_sem);
247 }
248
249 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
250                                                 int datasync, bool atomic)
251 {
252         struct inode *inode = file->f_mapping->host;
253         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
254         nid_t ino = inode->i_ino;
255         int ret = 0;
256         enum cp_reason_type cp_reason = 0;
257         struct writeback_control wbc = {
258                 .sync_mode = WB_SYNC_ALL,
259                 .nr_to_write = LONG_MAX,
260                 .for_reclaim = 0,
261         };
262         unsigned int seq_id = 0;
263
264         if (unlikely(f2fs_readonly(inode->i_sb)))
265                 return 0;
266
267         trace_f2fs_sync_file_enter(inode);
268
269         if (S_ISDIR(inode->i_mode))
270                 goto go_write;
271
272         /* if fdatasync is triggered, let's do in-place-update */
273         if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
274                 set_inode_flag(inode, FI_NEED_IPU);
275         ret = file_write_and_wait_range(file, start, end);
276         clear_inode_flag(inode, FI_NEED_IPU);
277
278         if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
279                 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
280                 return ret;
281         }
282
283         /* if the inode is dirty, let's recover all the time */
284         if (!f2fs_skip_inode_update(inode, datasync)) {
285                 f2fs_write_inode(inode, NULL);
286                 goto go_write;
287         }
288
289         /*
290          * if there is no written data, don't waste time to write recovery info.
291          */
292         if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
293                         !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
294
295                 /* it may call write_inode just prior to fsync */
296                 if (need_inode_page_update(sbi, ino))
297                         goto go_write;
298
299                 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
300                                 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
301                         goto flush_out;
302                 goto out;
303         } else {
304                 /*
305                  * for OPU case, during fsync(), node can be persisted before
306                  * data when lower device doesn't support write barrier, result
307                  * in data corruption after SPO.
308                  * So for strict fsync mode, force to use atomic write sematics
309                  * to keep write order in between data/node and last node to
310                  * avoid potential data corruption.
311                  */
312                 if (F2FS_OPTION(sbi).fsync_mode ==
313                                 FSYNC_MODE_STRICT && !atomic)
314                         atomic = true;
315         }
316 go_write:
317         /*
318          * Both of fdatasync() and fsync() are able to be recovered from
319          * sudden-power-off.
320          */
321         f2fs_down_read(&F2FS_I(inode)->i_sem);
322         cp_reason = need_do_checkpoint(inode);
323         f2fs_up_read(&F2FS_I(inode)->i_sem);
324
325         if (cp_reason) {
326                 /* all the dirty node pages should be flushed for POR */
327                 ret = f2fs_sync_fs(inode->i_sb, 1);
328
329                 /*
330                  * We've secured consistency through sync_fs. Following pino
331                  * will be used only for fsynced inodes after checkpoint.
332                  */
333                 try_to_fix_pino(inode);
334                 clear_inode_flag(inode, FI_APPEND_WRITE);
335                 clear_inode_flag(inode, FI_UPDATE_WRITE);
336                 goto out;
337         }
338 sync_nodes:
339         atomic_inc(&sbi->wb_sync_req[NODE]);
340         ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
341         atomic_dec(&sbi->wb_sync_req[NODE]);
342         if (ret)
343                 goto out;
344
345         /* if cp_error was enabled, we should avoid infinite loop */
346         if (unlikely(f2fs_cp_error(sbi))) {
347                 ret = -EIO;
348                 goto out;
349         }
350
351         if (f2fs_need_inode_block_update(sbi, ino)) {
352                 f2fs_mark_inode_dirty_sync(inode, true);
353                 f2fs_write_inode(inode, NULL);
354                 goto sync_nodes;
355         }
356
357         /*
358          * If it's atomic_write, it's just fine to keep write ordering. So
359          * here we don't need to wait for node write completion, since we use
360          * node chain which serializes node blocks. If one of node writes are
361          * reordered, we can see simply broken chain, resulting in stopping
362          * roll-forward recovery. It means we'll recover all or none node blocks
363          * given fsync mark.
364          */
365         if (!atomic) {
366                 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
367                 if (ret)
368                         goto out;
369         }
370
371         /* once recovery info is written, don't need to tack this */
372         f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
373         clear_inode_flag(inode, FI_APPEND_WRITE);
374 flush_out:
375         if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
376             (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
377                 ret = f2fs_issue_flush(sbi, inode->i_ino);
378         if (!ret) {
379                 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
380                 clear_inode_flag(inode, FI_UPDATE_WRITE);
381                 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
382         }
383         f2fs_update_time(sbi, REQ_TIME);
384 out:
385         trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
386         return ret;
387 }
388
389 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
390 {
391         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
392                 return -EIO;
393         return f2fs_do_sync_file(file, start, end, datasync, false);
394 }
395
396 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
397                                 pgoff_t index, int whence)
398 {
399         switch (whence) {
400         case SEEK_DATA:
401                 if (__is_valid_data_blkaddr(blkaddr))
402                         return true;
403                 if (blkaddr == NEW_ADDR &&
404                     xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
405                         return true;
406                 break;
407         case SEEK_HOLE:
408                 if (blkaddr == NULL_ADDR)
409                         return true;
410                 break;
411         }
412         return false;
413 }
414
415 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
416 {
417         struct inode *inode = file->f_mapping->host;
418         loff_t maxbytes = inode->i_sb->s_maxbytes;
419         struct dnode_of_data dn;
420         pgoff_t pgofs, end_offset;
421         loff_t data_ofs = offset;
422         loff_t isize;
423         int err = 0;
424
425         inode_lock(inode);
426
427         isize = i_size_read(inode);
428         if (offset >= isize)
429                 goto fail;
430
431         /* handle inline data case */
432         if (f2fs_has_inline_data(inode)) {
433                 if (whence == SEEK_HOLE) {
434                         data_ofs = isize;
435                         goto found;
436                 } else if (whence == SEEK_DATA) {
437                         data_ofs = offset;
438                         goto found;
439                 }
440         }
441
442         pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
443
444         for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
445                 set_new_dnode(&dn, inode, NULL, NULL, 0);
446                 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
447                 if (err && err != -ENOENT) {
448                         goto fail;
449                 } else if (err == -ENOENT) {
450                         /* direct node does not exists */
451                         if (whence == SEEK_DATA) {
452                                 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
453                                 continue;
454                         } else {
455                                 goto found;
456                         }
457                 }
458
459                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
460
461                 /* find data/hole in dnode block */
462                 for (; dn.ofs_in_node < end_offset;
463                                 dn.ofs_in_node++, pgofs++,
464                                 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
465                         block_t blkaddr;
466
467                         blkaddr = f2fs_data_blkaddr(&dn);
468
469                         if (__is_valid_data_blkaddr(blkaddr) &&
470                                 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
471                                         blkaddr, DATA_GENERIC_ENHANCE)) {
472                                 f2fs_put_dnode(&dn);
473                                 goto fail;
474                         }
475
476                         if (__found_offset(file->f_mapping, blkaddr,
477                                                         pgofs, whence)) {
478                                 f2fs_put_dnode(&dn);
479                                 goto found;
480                         }
481                 }
482                 f2fs_put_dnode(&dn);
483         }
484
485         if (whence == SEEK_DATA)
486                 goto fail;
487 found:
488         if (whence == SEEK_HOLE && data_ofs > isize)
489                 data_ofs = isize;
490         inode_unlock(inode);
491         return vfs_setpos(file, data_ofs, maxbytes);
492 fail:
493         inode_unlock(inode);
494         return -ENXIO;
495 }
496
497 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
498 {
499         struct inode *inode = file->f_mapping->host;
500         loff_t maxbytes = inode->i_sb->s_maxbytes;
501
502         if (f2fs_compressed_file(inode))
503                 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
504
505         switch (whence) {
506         case SEEK_SET:
507         case SEEK_CUR:
508         case SEEK_END:
509                 return generic_file_llseek_size(file, offset, whence,
510                                                 maxbytes, i_size_read(inode));
511         case SEEK_DATA:
512         case SEEK_HOLE:
513                 if (offset < 0)
514                         return -ENXIO;
515                 return f2fs_seek_block(file, offset, whence);
516         }
517
518         return -EINVAL;
519 }
520
521 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
522 {
523         struct inode *inode = file_inode(file);
524
525         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
526                 return -EIO;
527
528         if (!f2fs_is_compress_backend_ready(inode))
529                 return -EOPNOTSUPP;
530
531         file_accessed(file);
532         vma->vm_ops = &f2fs_file_vm_ops;
533         set_inode_flag(inode, FI_MMAP_FILE);
534         return 0;
535 }
536
537 static int f2fs_file_open(struct inode *inode, struct file *filp)
538 {
539         int err = fscrypt_file_open(inode, filp);
540
541         if (err)
542                 return err;
543
544         if (!f2fs_is_compress_backend_ready(inode))
545                 return -EOPNOTSUPP;
546
547         err = fsverity_file_open(inode, filp);
548         if (err)
549                 return err;
550
551         filp->f_mode |= FMODE_NOWAIT;
552
553         return dquot_file_open(inode, filp);
554 }
555
556 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
557 {
558         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
559         struct f2fs_node *raw_node;
560         int nr_free = 0, ofs = dn->ofs_in_node, len = count;
561         __le32 *addr;
562         int base = 0;
563         bool compressed_cluster = false;
564         int cluster_index = 0, valid_blocks = 0;
565         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
566         bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
567
568         if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
569                 base = get_extra_isize(dn->inode);
570
571         raw_node = F2FS_NODE(dn->node_page);
572         addr = blkaddr_in_node(raw_node) + base + ofs;
573
574         /* Assumption: truncateion starts with cluster */
575         for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
576                 block_t blkaddr = le32_to_cpu(*addr);
577
578                 if (f2fs_compressed_file(dn->inode) &&
579                                         !(cluster_index & (cluster_size - 1))) {
580                         if (compressed_cluster)
581                                 f2fs_i_compr_blocks_update(dn->inode,
582                                                         valid_blocks, false);
583                         compressed_cluster = (blkaddr == COMPRESS_ADDR);
584                         valid_blocks = 0;
585                 }
586
587                 if (blkaddr == NULL_ADDR)
588                         continue;
589
590                 dn->data_blkaddr = NULL_ADDR;
591                 f2fs_set_data_blkaddr(dn);
592
593                 if (__is_valid_data_blkaddr(blkaddr)) {
594                         if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
595                                         DATA_GENERIC_ENHANCE))
596                                 continue;
597                         if (compressed_cluster)
598                                 valid_blocks++;
599                 }
600
601                 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
602                         clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
603
604                 f2fs_invalidate_blocks(sbi, blkaddr);
605
606                 if (!released || blkaddr != COMPRESS_ADDR)
607                         nr_free++;
608         }
609
610         if (compressed_cluster)
611                 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
612
613         if (nr_free) {
614                 pgoff_t fofs;
615                 /*
616                  * once we invalidate valid blkaddr in range [ofs, ofs + count],
617                  * we will invalidate all blkaddr in the whole range.
618                  */
619                 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
620                                                         dn->inode) + ofs;
621                 f2fs_update_extent_cache_range(dn, fofs, 0, len);
622                 dec_valid_block_count(sbi, dn->inode, nr_free);
623         }
624         dn->ofs_in_node = ofs;
625
626         f2fs_update_time(sbi, REQ_TIME);
627         trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
628                                          dn->ofs_in_node, nr_free);
629 }
630
631 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
632 {
633         f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
634 }
635
636 static int truncate_partial_data_page(struct inode *inode, u64 from,
637                                                                 bool cache_only)
638 {
639         loff_t offset = from & (PAGE_SIZE - 1);
640         pgoff_t index = from >> PAGE_SHIFT;
641         struct address_space *mapping = inode->i_mapping;
642         struct page *page;
643
644         if (!offset && !cache_only)
645                 return 0;
646
647         if (cache_only) {
648                 page = find_lock_page(mapping, index);
649                 if (page && PageUptodate(page))
650                         goto truncate_out;
651                 f2fs_put_page(page, 1);
652                 return 0;
653         }
654
655         page = f2fs_get_lock_data_page(inode, index, true);
656         if (IS_ERR(page))
657                 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
658 truncate_out:
659         f2fs_wait_on_page_writeback(page, DATA, true, true);
660         zero_user(page, offset, PAGE_SIZE - offset);
661
662         /* An encrypted inode should have a key and truncate the last page. */
663         f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
664         if (!cache_only)
665                 set_page_dirty(page);
666         f2fs_put_page(page, 1);
667         return 0;
668 }
669
670 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
671 {
672         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
673         struct dnode_of_data dn;
674         pgoff_t free_from;
675         int count = 0, err = 0;
676         struct page *ipage;
677         bool truncate_page = false;
678
679         trace_f2fs_truncate_blocks_enter(inode, from);
680
681         free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
682
683         if (free_from >= max_file_blocks(inode))
684                 goto free_partial;
685
686         if (lock)
687                 f2fs_lock_op(sbi);
688
689         ipage = f2fs_get_node_page(sbi, inode->i_ino);
690         if (IS_ERR(ipage)) {
691                 err = PTR_ERR(ipage);
692                 goto out;
693         }
694
695         if (f2fs_has_inline_data(inode)) {
696                 f2fs_truncate_inline_inode(inode, ipage, from);
697                 f2fs_put_page(ipage, 1);
698                 truncate_page = true;
699                 goto out;
700         }
701
702         set_new_dnode(&dn, inode, ipage, NULL, 0);
703         err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
704         if (err) {
705                 if (err == -ENOENT)
706                         goto free_next;
707                 goto out;
708         }
709
710         count = ADDRS_PER_PAGE(dn.node_page, inode);
711
712         count -= dn.ofs_in_node;
713         f2fs_bug_on(sbi, count < 0);
714
715         if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
716                 f2fs_truncate_data_blocks_range(&dn, count);
717                 free_from += count;
718         }
719
720         f2fs_put_dnode(&dn);
721 free_next:
722         err = f2fs_truncate_inode_blocks(inode, free_from);
723 out:
724         if (lock)
725                 f2fs_unlock_op(sbi);
726 free_partial:
727         /* lastly zero out the first data page */
728         if (!err)
729                 err = truncate_partial_data_page(inode, from, truncate_page);
730
731         trace_f2fs_truncate_blocks_exit(inode, err);
732         return err;
733 }
734
735 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
736 {
737         u64 free_from = from;
738         int err;
739
740 #ifdef CONFIG_F2FS_FS_COMPRESSION
741         /*
742          * for compressed file, only support cluster size
743          * aligned truncation.
744          */
745         if (f2fs_compressed_file(inode))
746                 free_from = round_up(from,
747                                 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
748 #endif
749
750         err = f2fs_do_truncate_blocks(inode, free_from, lock);
751         if (err)
752                 return err;
753
754 #ifdef CONFIG_F2FS_FS_COMPRESSION
755         /*
756          * For compressed file, after release compress blocks, don't allow write
757          * direct, but we should allow write direct after truncate to zero.
758          */
759         if (f2fs_compressed_file(inode) && !free_from
760                         && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
761                 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
762
763         if (from != free_from) {
764                 err = f2fs_truncate_partial_cluster(inode, from, lock);
765                 if (err)
766                         return err;
767         }
768 #endif
769
770         return 0;
771 }
772
773 int f2fs_truncate(struct inode *inode)
774 {
775         int err;
776
777         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
778                 return -EIO;
779
780         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
781                                 S_ISLNK(inode->i_mode)))
782                 return 0;
783
784         trace_f2fs_truncate(inode);
785
786         if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
787                 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
788                 return -EIO;
789         }
790
791         err = f2fs_dquot_initialize(inode);
792         if (err)
793                 return err;
794
795         /* we should check inline_data size */
796         if (!f2fs_may_inline_data(inode)) {
797                 err = f2fs_convert_inline_inode(inode);
798                 if (err)
799                         return err;
800         }
801
802         err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
803         if (err)
804                 return err;
805
806         inode->i_mtime = inode->i_ctime = current_time(inode);
807         f2fs_mark_inode_dirty_sync(inode, false);
808         return 0;
809 }
810
811 static bool f2fs_force_buffered_io(struct inode *inode, int rw)
812 {
813         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
814
815         if (!fscrypt_dio_supported(inode))
816                 return true;
817         if (fsverity_active(inode))
818                 return true;
819         if (f2fs_compressed_file(inode))
820                 return true;
821
822         /* disallow direct IO if any of devices has unaligned blksize */
823         if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
824                 return true;
825         /*
826          * for blkzoned device, fallback direct IO to buffered IO, so
827          * all IOs can be serialized by log-structured write.
828          */
829         if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE))
830                 return true;
831         if (f2fs_lfs_mode(sbi) && rw == WRITE && F2FS_IO_ALIGNED(sbi))
832                 return true;
833         if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
834                 return true;
835
836         return false;
837 }
838
839 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
840                  struct kstat *stat, u32 request_mask, unsigned int query_flags)
841 {
842         struct inode *inode = d_inode(path->dentry);
843         struct f2fs_inode_info *fi = F2FS_I(inode);
844         struct f2fs_inode *ri = NULL;
845         unsigned int flags;
846
847         if (f2fs_has_extra_attr(inode) &&
848                         f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
849                         F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
850                 stat->result_mask |= STATX_BTIME;
851                 stat->btime.tv_sec = fi->i_crtime.tv_sec;
852                 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
853         }
854
855         /*
856          * Return the DIO alignment restrictions if requested.  We only return
857          * this information when requested, since on encrypted files it might
858          * take a fair bit of work to get if the file wasn't opened recently.
859          *
860          * f2fs sometimes supports DIO reads but not DIO writes.  STATX_DIOALIGN
861          * cannot represent that, so in that case we report no DIO support.
862          */
863         if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
864                 unsigned int bsize = i_blocksize(inode);
865
866                 stat->result_mask |= STATX_DIOALIGN;
867                 if (!f2fs_force_buffered_io(inode, WRITE)) {
868                         stat->dio_mem_align = bsize;
869                         stat->dio_offset_align = bsize;
870                 }
871         }
872
873         flags = fi->i_flags;
874         if (flags & F2FS_COMPR_FL)
875                 stat->attributes |= STATX_ATTR_COMPRESSED;
876         if (flags & F2FS_APPEND_FL)
877                 stat->attributes |= STATX_ATTR_APPEND;
878         if (IS_ENCRYPTED(inode))
879                 stat->attributes |= STATX_ATTR_ENCRYPTED;
880         if (flags & F2FS_IMMUTABLE_FL)
881                 stat->attributes |= STATX_ATTR_IMMUTABLE;
882         if (flags & F2FS_NODUMP_FL)
883                 stat->attributes |= STATX_ATTR_NODUMP;
884         if (IS_VERITY(inode))
885                 stat->attributes |= STATX_ATTR_VERITY;
886
887         stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
888                                   STATX_ATTR_APPEND |
889                                   STATX_ATTR_ENCRYPTED |
890                                   STATX_ATTR_IMMUTABLE |
891                                   STATX_ATTR_NODUMP |
892                                   STATX_ATTR_VERITY);
893
894         generic_fillattr(mnt_userns, inode, stat);
895
896         /* we need to show initial sectors used for inline_data/dentries */
897         if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
898                                         f2fs_has_inline_dentry(inode))
899                 stat->blocks += (stat->size + 511) >> 9;
900
901         return 0;
902 }
903
904 #ifdef CONFIG_F2FS_FS_POSIX_ACL
905 static void __setattr_copy(struct user_namespace *mnt_userns,
906                            struct inode *inode, const struct iattr *attr)
907 {
908         unsigned int ia_valid = attr->ia_valid;
909
910         i_uid_update(mnt_userns, attr, inode);
911         i_gid_update(mnt_userns, attr, inode);
912         if (ia_valid & ATTR_ATIME)
913                 inode->i_atime = attr->ia_atime;
914         if (ia_valid & ATTR_MTIME)
915                 inode->i_mtime = attr->ia_mtime;
916         if (ia_valid & ATTR_CTIME)
917                 inode->i_ctime = attr->ia_ctime;
918         if (ia_valid & ATTR_MODE) {
919                 umode_t mode = attr->ia_mode;
920                 vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
921
922                 if (!vfsgid_in_group_p(vfsgid) &&
923                     !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
924                         mode &= ~S_ISGID;
925                 set_acl_inode(inode, mode);
926         }
927 }
928 #else
929 #define __setattr_copy setattr_copy
930 #endif
931
932 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
933                  struct iattr *attr)
934 {
935         struct inode *inode = d_inode(dentry);
936         int err;
937
938         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
939                 return -EIO;
940
941         if (unlikely(IS_IMMUTABLE(inode)))
942                 return -EPERM;
943
944         if (unlikely(IS_APPEND(inode) &&
945                         (attr->ia_valid & (ATTR_MODE | ATTR_UID |
946                                   ATTR_GID | ATTR_TIMES_SET))))
947                 return -EPERM;
948
949         if ((attr->ia_valid & ATTR_SIZE) &&
950                 !f2fs_is_compress_backend_ready(inode))
951                 return -EOPNOTSUPP;
952
953         err = setattr_prepare(mnt_userns, dentry, attr);
954         if (err)
955                 return err;
956
957         err = fscrypt_prepare_setattr(dentry, attr);
958         if (err)
959                 return err;
960
961         err = fsverity_prepare_setattr(dentry, attr);
962         if (err)
963                 return err;
964
965         if (is_quota_modification(mnt_userns, inode, attr)) {
966                 err = f2fs_dquot_initialize(inode);
967                 if (err)
968                         return err;
969         }
970         if (i_uid_needs_update(mnt_userns, attr, inode) ||
971             i_gid_needs_update(mnt_userns, attr, inode)) {
972                 f2fs_lock_op(F2FS_I_SB(inode));
973                 err = dquot_transfer(mnt_userns, inode, attr);
974                 if (err) {
975                         set_sbi_flag(F2FS_I_SB(inode),
976                                         SBI_QUOTA_NEED_REPAIR);
977                         f2fs_unlock_op(F2FS_I_SB(inode));
978                         return err;
979                 }
980                 /*
981                  * update uid/gid under lock_op(), so that dquot and inode can
982                  * be updated atomically.
983                  */
984                 i_uid_update(mnt_userns, attr, inode);
985                 i_gid_update(mnt_userns, attr, inode);
986                 f2fs_mark_inode_dirty_sync(inode, true);
987                 f2fs_unlock_op(F2FS_I_SB(inode));
988         }
989
990         if (attr->ia_valid & ATTR_SIZE) {
991                 loff_t old_size = i_size_read(inode);
992
993                 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
994                         /*
995                          * should convert inline inode before i_size_write to
996                          * keep smaller than inline_data size with inline flag.
997                          */
998                         err = f2fs_convert_inline_inode(inode);
999                         if (err)
1000                                 return err;
1001                 }
1002
1003                 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1004                 filemap_invalidate_lock(inode->i_mapping);
1005
1006                 truncate_setsize(inode, attr->ia_size);
1007
1008                 if (attr->ia_size <= old_size)
1009                         err = f2fs_truncate(inode);
1010                 /*
1011                  * do not trim all blocks after i_size if target size is
1012                  * larger than i_size.
1013                  */
1014                 filemap_invalidate_unlock(inode->i_mapping);
1015                 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1016                 if (err)
1017                         return err;
1018
1019                 spin_lock(&F2FS_I(inode)->i_size_lock);
1020                 inode->i_mtime = inode->i_ctime = current_time(inode);
1021                 F2FS_I(inode)->last_disk_size = i_size_read(inode);
1022                 spin_unlock(&F2FS_I(inode)->i_size_lock);
1023         }
1024
1025         __setattr_copy(mnt_userns, inode, attr);
1026
1027         if (attr->ia_valid & ATTR_MODE) {
1028                 err = posix_acl_chmod(mnt_userns, inode, f2fs_get_inode_mode(inode));
1029
1030                 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
1031                         if (!err)
1032                                 inode->i_mode = F2FS_I(inode)->i_acl_mode;
1033                         clear_inode_flag(inode, FI_ACL_MODE);
1034                 }
1035         }
1036
1037         /* file size may changed here */
1038         f2fs_mark_inode_dirty_sync(inode, true);
1039
1040         /* inode change will produce dirty node pages flushed by checkpoint */
1041         f2fs_balance_fs(F2FS_I_SB(inode), true);
1042
1043         return err;
1044 }
1045
1046 const struct inode_operations f2fs_file_inode_operations = {
1047         .getattr        = f2fs_getattr,
1048         .setattr        = f2fs_setattr,
1049         .get_acl        = f2fs_get_acl,
1050         .set_acl        = f2fs_set_acl,
1051         .listxattr      = f2fs_listxattr,
1052         .fiemap         = f2fs_fiemap,
1053         .fileattr_get   = f2fs_fileattr_get,
1054         .fileattr_set   = f2fs_fileattr_set,
1055 };
1056
1057 static int fill_zero(struct inode *inode, pgoff_t index,
1058                                         loff_t start, loff_t len)
1059 {
1060         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1061         struct page *page;
1062
1063         if (!len)
1064                 return 0;
1065
1066         f2fs_balance_fs(sbi, true);
1067
1068         f2fs_lock_op(sbi);
1069         page = f2fs_get_new_data_page(inode, NULL, index, false);
1070         f2fs_unlock_op(sbi);
1071
1072         if (IS_ERR(page))
1073                 return PTR_ERR(page);
1074
1075         f2fs_wait_on_page_writeback(page, DATA, true, true);
1076         zero_user(page, start, len);
1077         set_page_dirty(page);
1078         f2fs_put_page(page, 1);
1079         return 0;
1080 }
1081
1082 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1083 {
1084         int err;
1085
1086         while (pg_start < pg_end) {
1087                 struct dnode_of_data dn;
1088                 pgoff_t end_offset, count;
1089
1090                 set_new_dnode(&dn, inode, NULL, NULL, 0);
1091                 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1092                 if (err) {
1093                         if (err == -ENOENT) {
1094                                 pg_start = f2fs_get_next_page_offset(&dn,
1095                                                                 pg_start);
1096                                 continue;
1097                         }
1098                         return err;
1099                 }
1100
1101                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1102                 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1103
1104                 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1105
1106                 f2fs_truncate_data_blocks_range(&dn, count);
1107                 f2fs_put_dnode(&dn);
1108
1109                 pg_start += count;
1110         }
1111         return 0;
1112 }
1113
1114 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1115 {
1116         pgoff_t pg_start, pg_end;
1117         loff_t off_start, off_end;
1118         int ret;
1119
1120         ret = f2fs_convert_inline_inode(inode);
1121         if (ret)
1122                 return ret;
1123
1124         pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1125         pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1126
1127         off_start = offset & (PAGE_SIZE - 1);
1128         off_end = (offset + len) & (PAGE_SIZE - 1);
1129
1130         if (pg_start == pg_end) {
1131                 ret = fill_zero(inode, pg_start, off_start,
1132                                                 off_end - off_start);
1133                 if (ret)
1134                         return ret;
1135         } else {
1136                 if (off_start) {
1137                         ret = fill_zero(inode, pg_start++, off_start,
1138                                                 PAGE_SIZE - off_start);
1139                         if (ret)
1140                                 return ret;
1141                 }
1142                 if (off_end) {
1143                         ret = fill_zero(inode, pg_end, 0, off_end);
1144                         if (ret)
1145                                 return ret;
1146                 }
1147
1148                 if (pg_start < pg_end) {
1149                         loff_t blk_start, blk_end;
1150                         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1151
1152                         f2fs_balance_fs(sbi, true);
1153
1154                         blk_start = (loff_t)pg_start << PAGE_SHIFT;
1155                         blk_end = (loff_t)pg_end << PAGE_SHIFT;
1156
1157                         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1158                         filemap_invalidate_lock(inode->i_mapping);
1159
1160                         truncate_pagecache_range(inode, blk_start, blk_end - 1);
1161
1162                         f2fs_lock_op(sbi);
1163                         ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1164                         f2fs_unlock_op(sbi);
1165
1166                         filemap_invalidate_unlock(inode->i_mapping);
1167                         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1168                 }
1169         }
1170
1171         return ret;
1172 }
1173
1174 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1175                                 int *do_replace, pgoff_t off, pgoff_t len)
1176 {
1177         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1178         struct dnode_of_data dn;
1179         int ret, done, i;
1180
1181 next_dnode:
1182         set_new_dnode(&dn, inode, NULL, NULL, 0);
1183         ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1184         if (ret && ret != -ENOENT) {
1185                 return ret;
1186         } else if (ret == -ENOENT) {
1187                 if (dn.max_level == 0)
1188                         return -ENOENT;
1189                 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1190                                                 dn.ofs_in_node, len);
1191                 blkaddr += done;
1192                 do_replace += done;
1193                 goto next;
1194         }
1195
1196         done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1197                                                         dn.ofs_in_node, len);
1198         for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1199                 *blkaddr = f2fs_data_blkaddr(&dn);
1200
1201                 if (__is_valid_data_blkaddr(*blkaddr) &&
1202                         !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1203                                         DATA_GENERIC_ENHANCE)) {
1204                         f2fs_put_dnode(&dn);
1205                         f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1206                         return -EFSCORRUPTED;
1207                 }
1208
1209                 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1210
1211                         if (f2fs_lfs_mode(sbi)) {
1212                                 f2fs_put_dnode(&dn);
1213                                 return -EOPNOTSUPP;
1214                         }
1215
1216                         /* do not invalidate this block address */
1217                         f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1218                         *do_replace = 1;
1219                 }
1220         }
1221         f2fs_put_dnode(&dn);
1222 next:
1223         len -= done;
1224         off += done;
1225         if (len)
1226                 goto next_dnode;
1227         return 0;
1228 }
1229
1230 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1231                                 int *do_replace, pgoff_t off, int len)
1232 {
1233         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1234         struct dnode_of_data dn;
1235         int ret, i;
1236
1237         for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1238                 if (*do_replace == 0)
1239                         continue;
1240
1241                 set_new_dnode(&dn, inode, NULL, NULL, 0);
1242                 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1243                 if (ret) {
1244                         dec_valid_block_count(sbi, inode, 1);
1245                         f2fs_invalidate_blocks(sbi, *blkaddr);
1246                 } else {
1247                         f2fs_update_data_blkaddr(&dn, *blkaddr);
1248                 }
1249                 f2fs_put_dnode(&dn);
1250         }
1251         return 0;
1252 }
1253
1254 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1255                         block_t *blkaddr, int *do_replace,
1256                         pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1257 {
1258         struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1259         pgoff_t i = 0;
1260         int ret;
1261
1262         while (i < len) {
1263                 if (blkaddr[i] == NULL_ADDR && !full) {
1264                         i++;
1265                         continue;
1266                 }
1267
1268                 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1269                         struct dnode_of_data dn;
1270                         struct node_info ni;
1271                         size_t new_size;
1272                         pgoff_t ilen;
1273
1274                         set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1275                         ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1276                         if (ret)
1277                                 return ret;
1278
1279                         ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
1280                         if (ret) {
1281                                 f2fs_put_dnode(&dn);
1282                                 return ret;
1283                         }
1284
1285                         ilen = min((pgoff_t)
1286                                 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1287                                                 dn.ofs_in_node, len - i);
1288                         do {
1289                                 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1290                                 f2fs_truncate_data_blocks_range(&dn, 1);
1291
1292                                 if (do_replace[i]) {
1293                                         f2fs_i_blocks_write(src_inode,
1294                                                         1, false, false);
1295                                         f2fs_i_blocks_write(dst_inode,
1296                                                         1, true, false);
1297                                         f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1298                                         blkaddr[i], ni.version, true, false);
1299
1300                                         do_replace[i] = 0;
1301                                 }
1302                                 dn.ofs_in_node++;
1303                                 i++;
1304                                 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1305                                 if (dst_inode->i_size < new_size)
1306                                         f2fs_i_size_write(dst_inode, new_size);
1307                         } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1308
1309                         f2fs_put_dnode(&dn);
1310                 } else {
1311                         struct page *psrc, *pdst;
1312
1313                         psrc = f2fs_get_lock_data_page(src_inode,
1314                                                         src + i, true);
1315                         if (IS_ERR(psrc))
1316                                 return PTR_ERR(psrc);
1317                         pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1318                                                                 true);
1319                         if (IS_ERR(pdst)) {
1320                                 f2fs_put_page(psrc, 1);
1321                                 return PTR_ERR(pdst);
1322                         }
1323                         memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
1324                         set_page_dirty(pdst);
1325                         f2fs_put_page(pdst, 1);
1326                         f2fs_put_page(psrc, 1);
1327
1328                         ret = f2fs_truncate_hole(src_inode,
1329                                                 src + i, src + i + 1);
1330                         if (ret)
1331                                 return ret;
1332                         i++;
1333                 }
1334         }
1335         return 0;
1336 }
1337
1338 static int __exchange_data_block(struct inode *src_inode,
1339                         struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1340                         pgoff_t len, bool full)
1341 {
1342         block_t *src_blkaddr;
1343         int *do_replace;
1344         pgoff_t olen;
1345         int ret;
1346
1347         while (len) {
1348                 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1349
1350                 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1351                                         array_size(olen, sizeof(block_t)),
1352                                         GFP_NOFS);
1353                 if (!src_blkaddr)
1354                         return -ENOMEM;
1355
1356                 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1357                                         array_size(olen, sizeof(int)),
1358                                         GFP_NOFS);
1359                 if (!do_replace) {
1360                         kvfree(src_blkaddr);
1361                         return -ENOMEM;
1362                 }
1363
1364                 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1365                                         do_replace, src, olen);
1366                 if (ret)
1367                         goto roll_back;
1368
1369                 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1370                                         do_replace, src, dst, olen, full);
1371                 if (ret)
1372                         goto roll_back;
1373
1374                 src += olen;
1375                 dst += olen;
1376                 len -= olen;
1377
1378                 kvfree(src_blkaddr);
1379                 kvfree(do_replace);
1380         }
1381         return 0;
1382
1383 roll_back:
1384         __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1385         kvfree(src_blkaddr);
1386         kvfree(do_replace);
1387         return ret;
1388 }
1389
1390 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1391 {
1392         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1393         pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1394         pgoff_t start = offset >> PAGE_SHIFT;
1395         pgoff_t end = (offset + len) >> PAGE_SHIFT;
1396         int ret;
1397
1398         f2fs_balance_fs(sbi, true);
1399
1400         /* avoid gc operation during block exchange */
1401         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1402         filemap_invalidate_lock(inode->i_mapping);
1403
1404         f2fs_lock_op(sbi);
1405         f2fs_drop_extent_tree(inode);
1406         truncate_pagecache(inode, offset);
1407         ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1408         f2fs_unlock_op(sbi);
1409
1410         filemap_invalidate_unlock(inode->i_mapping);
1411         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1412         return ret;
1413 }
1414
1415 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1416 {
1417         loff_t new_size;
1418         int ret;
1419
1420         if (offset + len >= i_size_read(inode))
1421                 return -EINVAL;
1422
1423         /* collapse range should be aligned to block size of f2fs. */
1424         if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1425                 return -EINVAL;
1426
1427         ret = f2fs_convert_inline_inode(inode);
1428         if (ret)
1429                 return ret;
1430
1431         /* write out all dirty pages from offset */
1432         ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1433         if (ret)
1434                 return ret;
1435
1436         ret = f2fs_do_collapse(inode, offset, len);
1437         if (ret)
1438                 return ret;
1439
1440         /* write out all moved pages, if possible */
1441         filemap_invalidate_lock(inode->i_mapping);
1442         filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1443         truncate_pagecache(inode, offset);
1444
1445         new_size = i_size_read(inode) - len;
1446         ret = f2fs_truncate_blocks(inode, new_size, true);
1447         filemap_invalidate_unlock(inode->i_mapping);
1448         if (!ret)
1449                 f2fs_i_size_write(inode, new_size);
1450         return ret;
1451 }
1452
1453 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1454                                                                 pgoff_t end)
1455 {
1456         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1457         pgoff_t index = start;
1458         unsigned int ofs_in_node = dn->ofs_in_node;
1459         blkcnt_t count = 0;
1460         int ret;
1461
1462         for (; index < end; index++, dn->ofs_in_node++) {
1463                 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1464                         count++;
1465         }
1466
1467         dn->ofs_in_node = ofs_in_node;
1468         ret = f2fs_reserve_new_blocks(dn, count);
1469         if (ret)
1470                 return ret;
1471
1472         dn->ofs_in_node = ofs_in_node;
1473         for (index = start; index < end; index++, dn->ofs_in_node++) {
1474                 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1475                 /*
1476                  * f2fs_reserve_new_blocks will not guarantee entire block
1477                  * allocation.
1478                  */
1479                 if (dn->data_blkaddr == NULL_ADDR) {
1480                         ret = -ENOSPC;
1481                         break;
1482                 }
1483
1484                 if (dn->data_blkaddr == NEW_ADDR)
1485                         continue;
1486
1487                 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1488                                         DATA_GENERIC_ENHANCE)) {
1489                         ret = -EFSCORRUPTED;
1490                         f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1491                         break;
1492                 }
1493
1494                 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1495                 dn->data_blkaddr = NEW_ADDR;
1496                 f2fs_set_data_blkaddr(dn);
1497         }
1498
1499         f2fs_update_extent_cache_range(dn, start, 0, index - start);
1500
1501         return ret;
1502 }
1503
1504 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1505                                                                 int mode)
1506 {
1507         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1508         struct address_space *mapping = inode->i_mapping;
1509         pgoff_t index, pg_start, pg_end;
1510         loff_t new_size = i_size_read(inode);
1511         loff_t off_start, off_end;
1512         int ret = 0;
1513
1514         ret = inode_newsize_ok(inode, (len + offset));
1515         if (ret)
1516                 return ret;
1517
1518         ret = f2fs_convert_inline_inode(inode);
1519         if (ret)
1520                 return ret;
1521
1522         ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1523         if (ret)
1524                 return ret;
1525
1526         pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1527         pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1528
1529         off_start = offset & (PAGE_SIZE - 1);
1530         off_end = (offset + len) & (PAGE_SIZE - 1);
1531
1532         if (pg_start == pg_end) {
1533                 ret = fill_zero(inode, pg_start, off_start,
1534                                                 off_end - off_start);
1535                 if (ret)
1536                         return ret;
1537
1538                 new_size = max_t(loff_t, new_size, offset + len);
1539         } else {
1540                 if (off_start) {
1541                         ret = fill_zero(inode, pg_start++, off_start,
1542                                                 PAGE_SIZE - off_start);
1543                         if (ret)
1544                                 return ret;
1545
1546                         new_size = max_t(loff_t, new_size,
1547                                         (loff_t)pg_start << PAGE_SHIFT);
1548                 }
1549
1550                 for (index = pg_start; index < pg_end;) {
1551                         struct dnode_of_data dn;
1552                         unsigned int end_offset;
1553                         pgoff_t end;
1554
1555                         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1556                         filemap_invalidate_lock(mapping);
1557
1558                         truncate_pagecache_range(inode,
1559                                 (loff_t)index << PAGE_SHIFT,
1560                                 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1561
1562                         f2fs_lock_op(sbi);
1563
1564                         set_new_dnode(&dn, inode, NULL, NULL, 0);
1565                         ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1566                         if (ret) {
1567                                 f2fs_unlock_op(sbi);
1568                                 filemap_invalidate_unlock(mapping);
1569                                 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1570                                 goto out;
1571                         }
1572
1573                         end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1574                         end = min(pg_end, end_offset - dn.ofs_in_node + index);
1575
1576                         ret = f2fs_do_zero_range(&dn, index, end);
1577                         f2fs_put_dnode(&dn);
1578
1579                         f2fs_unlock_op(sbi);
1580                         filemap_invalidate_unlock(mapping);
1581                         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1582
1583                         f2fs_balance_fs(sbi, dn.node_changed);
1584
1585                         if (ret)
1586                                 goto out;
1587
1588                         index = end;
1589                         new_size = max_t(loff_t, new_size,
1590                                         (loff_t)index << PAGE_SHIFT);
1591                 }
1592
1593                 if (off_end) {
1594                         ret = fill_zero(inode, pg_end, 0, off_end);
1595                         if (ret)
1596                                 goto out;
1597
1598                         new_size = max_t(loff_t, new_size, offset + len);
1599                 }
1600         }
1601
1602 out:
1603         if (new_size > i_size_read(inode)) {
1604                 if (mode & FALLOC_FL_KEEP_SIZE)
1605                         file_set_keep_isize(inode);
1606                 else
1607                         f2fs_i_size_write(inode, new_size);
1608         }
1609         return ret;
1610 }
1611
1612 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1613 {
1614         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1615         struct address_space *mapping = inode->i_mapping;
1616         pgoff_t nr, pg_start, pg_end, delta, idx;
1617         loff_t new_size;
1618         int ret = 0;
1619
1620         new_size = i_size_read(inode) + len;
1621         ret = inode_newsize_ok(inode, new_size);
1622         if (ret)
1623                 return ret;
1624
1625         if (offset >= i_size_read(inode))
1626                 return -EINVAL;
1627
1628         /* insert range should be aligned to block size of f2fs. */
1629         if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1630                 return -EINVAL;
1631
1632         ret = f2fs_convert_inline_inode(inode);
1633         if (ret)
1634                 return ret;
1635
1636         f2fs_balance_fs(sbi, true);
1637
1638         filemap_invalidate_lock(mapping);
1639         ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1640         filemap_invalidate_unlock(mapping);
1641         if (ret)
1642                 return ret;
1643
1644         /* write out all dirty pages from offset */
1645         ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1646         if (ret)
1647                 return ret;
1648
1649         pg_start = offset >> PAGE_SHIFT;
1650         pg_end = (offset + len) >> PAGE_SHIFT;
1651         delta = pg_end - pg_start;
1652         idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1653
1654         /* avoid gc operation during block exchange */
1655         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1656         filemap_invalidate_lock(mapping);
1657         truncate_pagecache(inode, offset);
1658
1659         while (!ret && idx > pg_start) {
1660                 nr = idx - pg_start;
1661                 if (nr > delta)
1662                         nr = delta;
1663                 idx -= nr;
1664
1665                 f2fs_lock_op(sbi);
1666                 f2fs_drop_extent_tree(inode);
1667
1668                 ret = __exchange_data_block(inode, inode, idx,
1669                                         idx + delta, nr, false);
1670                 f2fs_unlock_op(sbi);
1671         }
1672         filemap_invalidate_unlock(mapping);
1673         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1674
1675         /* write out all moved pages, if possible */
1676         filemap_invalidate_lock(mapping);
1677         filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1678         truncate_pagecache(inode, offset);
1679         filemap_invalidate_unlock(mapping);
1680
1681         if (!ret)
1682                 f2fs_i_size_write(inode, new_size);
1683         return ret;
1684 }
1685
1686 static int expand_inode_data(struct inode *inode, loff_t offset,
1687                                         loff_t len, int mode)
1688 {
1689         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1690         struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1691                         .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1692                         .m_may_create = true };
1693         struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
1694                         .init_gc_type = FG_GC,
1695                         .should_migrate_blocks = false,
1696                         .err_gc_skipped = true,
1697                         .nr_free_secs = 0 };
1698         pgoff_t pg_start, pg_end;
1699         loff_t new_size = i_size_read(inode);
1700         loff_t off_end;
1701         block_t expanded = 0;
1702         int err;
1703
1704         err = inode_newsize_ok(inode, (len + offset));
1705         if (err)
1706                 return err;
1707
1708         err = f2fs_convert_inline_inode(inode);
1709         if (err)
1710                 return err;
1711
1712         f2fs_balance_fs(sbi, true);
1713
1714         pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1715         pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1716         off_end = (offset + len) & (PAGE_SIZE - 1);
1717
1718         map.m_lblk = pg_start;
1719         map.m_len = pg_end - pg_start;
1720         if (off_end)
1721                 map.m_len++;
1722
1723         if (!map.m_len)
1724                 return 0;
1725
1726         if (f2fs_is_pinned_file(inode)) {
1727                 block_t sec_blks = CAP_BLKS_PER_SEC(sbi);
1728                 block_t sec_len = roundup(map.m_len, sec_blks);
1729
1730                 map.m_len = sec_blks;
1731 next_alloc:
1732                 if (has_not_enough_free_secs(sbi, 0,
1733                         GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1734                         f2fs_down_write(&sbi->gc_lock);
1735                         err = f2fs_gc(sbi, &gc_control);
1736                         if (err && err != -ENODATA)
1737                                 goto out_err;
1738                 }
1739
1740                 f2fs_down_write(&sbi->pin_sem);
1741
1742                 f2fs_lock_op(sbi);
1743                 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1744                 f2fs_unlock_op(sbi);
1745
1746                 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1747                 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1748                 file_dont_truncate(inode);
1749
1750                 f2fs_up_write(&sbi->pin_sem);
1751
1752                 expanded += map.m_len;
1753                 sec_len -= map.m_len;
1754                 map.m_lblk += map.m_len;
1755                 if (!err && sec_len)
1756                         goto next_alloc;
1757
1758                 map.m_len = expanded;
1759         } else {
1760                 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1761                 expanded = map.m_len;
1762         }
1763 out_err:
1764         if (err) {
1765                 pgoff_t last_off;
1766
1767                 if (!expanded)
1768                         return err;
1769
1770                 last_off = pg_start + expanded - 1;
1771
1772                 /* update new size to the failed position */
1773                 new_size = (last_off == pg_end) ? offset + len :
1774                                         (loff_t)(last_off + 1) << PAGE_SHIFT;
1775         } else {
1776                 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1777         }
1778
1779         if (new_size > i_size_read(inode)) {
1780                 if (mode & FALLOC_FL_KEEP_SIZE)
1781                         file_set_keep_isize(inode);
1782                 else
1783                         f2fs_i_size_write(inode, new_size);
1784         }
1785
1786         return err;
1787 }
1788
1789 static long f2fs_fallocate(struct file *file, int mode,
1790                                 loff_t offset, loff_t len)
1791 {
1792         struct inode *inode = file_inode(file);
1793         long ret = 0;
1794
1795         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1796                 return -EIO;
1797         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1798                 return -ENOSPC;
1799         if (!f2fs_is_compress_backend_ready(inode))
1800                 return -EOPNOTSUPP;
1801
1802         /* f2fs only support ->fallocate for regular file */
1803         if (!S_ISREG(inode->i_mode))
1804                 return -EINVAL;
1805
1806         if (IS_ENCRYPTED(inode) &&
1807                 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1808                 return -EOPNOTSUPP;
1809
1810         /*
1811          * Pinned file should not support partial trucation since the block
1812          * can be used by applications.
1813          */
1814         if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
1815                 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1816                         FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1817                 return -EOPNOTSUPP;
1818
1819         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1820                         FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1821                         FALLOC_FL_INSERT_RANGE))
1822                 return -EOPNOTSUPP;
1823
1824         inode_lock(inode);
1825
1826         ret = file_modified(file);
1827         if (ret)
1828                 goto out;
1829
1830         if (mode & FALLOC_FL_PUNCH_HOLE) {
1831                 if (offset >= inode->i_size)
1832                         goto out;
1833
1834                 ret = punch_hole(inode, offset, len);
1835         } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1836                 ret = f2fs_collapse_range(inode, offset, len);
1837         } else if (mode & FALLOC_FL_ZERO_RANGE) {
1838                 ret = f2fs_zero_range(inode, offset, len, mode);
1839         } else if (mode & FALLOC_FL_INSERT_RANGE) {
1840                 ret = f2fs_insert_range(inode, offset, len);
1841         } else {
1842                 ret = expand_inode_data(inode, offset, len, mode);
1843         }
1844
1845         if (!ret) {
1846                 inode->i_mtime = inode->i_ctime = current_time(inode);
1847                 f2fs_mark_inode_dirty_sync(inode, false);
1848                 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1849         }
1850
1851 out:
1852         inode_unlock(inode);
1853
1854         trace_f2fs_fallocate(inode, mode, offset, len, ret);
1855         return ret;
1856 }
1857
1858 static int f2fs_release_file(struct inode *inode, struct file *filp)
1859 {
1860         /*
1861          * f2fs_relase_file is called at every close calls. So we should
1862          * not drop any inmemory pages by close called by other process.
1863          */
1864         if (!(filp->f_mode & FMODE_WRITE) ||
1865                         atomic_read(&inode->i_writecount) != 1)
1866                 return 0;
1867
1868         inode_lock(inode);
1869         f2fs_abort_atomic_write(inode, true);
1870         inode_unlock(inode);
1871
1872         return 0;
1873 }
1874
1875 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1876 {
1877         struct inode *inode = file_inode(file);
1878
1879         /*
1880          * If the process doing a transaction is crashed, we should do
1881          * roll-back. Otherwise, other reader/write can see corrupted database
1882          * until all the writers close its file. Since this should be done
1883          * before dropping file lock, it needs to do in ->flush.
1884          */
1885         if (F2FS_I(inode)->atomic_write_task == current &&
1886                                 (current->flags & PF_EXITING)) {
1887                 inode_lock(inode);
1888                 f2fs_abort_atomic_write(inode, true);
1889                 inode_unlock(inode);
1890         }
1891
1892         return 0;
1893 }
1894
1895 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1896 {
1897         struct f2fs_inode_info *fi = F2FS_I(inode);
1898         u32 masked_flags = fi->i_flags & mask;
1899
1900         /* mask can be shrunk by flags_valid selector */
1901         iflags &= mask;
1902
1903         /* Is it quota file? Do not allow user to mess with it */
1904         if (IS_NOQUOTA(inode))
1905                 return -EPERM;
1906
1907         if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1908                 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1909                         return -EOPNOTSUPP;
1910                 if (!f2fs_empty_dir(inode))
1911                         return -ENOTEMPTY;
1912         }
1913
1914         if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1915                 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1916                         return -EOPNOTSUPP;
1917                 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1918                         return -EINVAL;
1919         }
1920
1921         if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1922                 if (masked_flags & F2FS_COMPR_FL) {
1923                         if (!f2fs_disable_compressed_file(inode))
1924                                 return -EINVAL;
1925                 } else {
1926                         /* try to convert inline_data to support compression */
1927                         int err = f2fs_convert_inline_inode(inode);
1928                         if (err)
1929                                 return err;
1930                         if (!f2fs_may_compress(inode))
1931                                 return -EINVAL;
1932                         if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
1933                                 return -EINVAL;
1934                         if (set_compress_context(inode))
1935                                 return -EOPNOTSUPP;
1936                 }
1937         }
1938
1939         fi->i_flags = iflags | (fi->i_flags & ~mask);
1940         f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1941                                         (fi->i_flags & F2FS_NOCOMP_FL));
1942
1943         if (fi->i_flags & F2FS_PROJINHERIT_FL)
1944                 set_inode_flag(inode, FI_PROJ_INHERIT);
1945         else
1946                 clear_inode_flag(inode, FI_PROJ_INHERIT);
1947
1948         inode->i_ctime = current_time(inode);
1949         f2fs_set_inode_flags(inode);
1950         f2fs_mark_inode_dirty_sync(inode, true);
1951         return 0;
1952 }
1953
1954 /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
1955
1956 /*
1957  * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1958  * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1959  * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
1960  * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1961  *
1962  * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1963  * FS_IOC_FSSETXATTR is done by the VFS.
1964  */
1965
1966 static const struct {
1967         u32 iflag;
1968         u32 fsflag;
1969 } f2fs_fsflags_map[] = {
1970         { F2FS_COMPR_FL,        FS_COMPR_FL },
1971         { F2FS_SYNC_FL,         FS_SYNC_FL },
1972         { F2FS_IMMUTABLE_FL,    FS_IMMUTABLE_FL },
1973         { F2FS_APPEND_FL,       FS_APPEND_FL },
1974         { F2FS_NODUMP_FL,       FS_NODUMP_FL },
1975         { F2FS_NOATIME_FL,      FS_NOATIME_FL },
1976         { F2FS_NOCOMP_FL,       FS_NOCOMP_FL },
1977         { F2FS_INDEX_FL,        FS_INDEX_FL },
1978         { F2FS_DIRSYNC_FL,      FS_DIRSYNC_FL },
1979         { F2FS_PROJINHERIT_FL,  FS_PROJINHERIT_FL },
1980         { F2FS_CASEFOLD_FL,     FS_CASEFOLD_FL },
1981 };
1982
1983 #define F2FS_GETTABLE_FS_FL (           \
1984                 FS_COMPR_FL |           \
1985                 FS_SYNC_FL |            \
1986                 FS_IMMUTABLE_FL |       \
1987                 FS_APPEND_FL |          \
1988                 FS_NODUMP_FL |          \
1989                 FS_NOATIME_FL |         \
1990                 FS_NOCOMP_FL |          \
1991                 FS_INDEX_FL |           \
1992                 FS_DIRSYNC_FL |         \
1993                 FS_PROJINHERIT_FL |     \
1994                 FS_ENCRYPT_FL |         \
1995                 FS_INLINE_DATA_FL |     \
1996                 FS_NOCOW_FL |           \
1997                 FS_VERITY_FL |          \
1998                 FS_CASEFOLD_FL)
1999
2000 #define F2FS_SETTABLE_FS_FL (           \
2001                 FS_COMPR_FL |           \
2002                 FS_SYNC_FL |            \
2003                 FS_IMMUTABLE_FL |       \
2004                 FS_APPEND_FL |          \
2005                 FS_NODUMP_FL |          \
2006                 FS_NOATIME_FL |         \
2007                 FS_NOCOMP_FL |          \
2008                 FS_DIRSYNC_FL |         \
2009                 FS_PROJINHERIT_FL |     \
2010                 FS_CASEFOLD_FL)
2011
2012 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
2013 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
2014 {
2015         u32 fsflags = 0;
2016         int i;
2017
2018         for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2019                 if (iflags & f2fs_fsflags_map[i].iflag)
2020                         fsflags |= f2fs_fsflags_map[i].fsflag;
2021
2022         return fsflags;
2023 }
2024
2025 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
2026 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
2027 {
2028         u32 iflags = 0;
2029         int i;
2030
2031         for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2032                 if (fsflags & f2fs_fsflags_map[i].fsflag)
2033                         iflags |= f2fs_fsflags_map[i].iflag;
2034
2035         return iflags;
2036 }
2037
2038 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2039 {
2040         struct inode *inode = file_inode(filp);
2041
2042         return put_user(inode->i_generation, (int __user *)arg);
2043 }
2044
2045 static int f2fs_ioc_start_atomic_write(struct file *filp)
2046 {
2047         struct inode *inode = file_inode(filp);
2048         struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
2049         struct f2fs_inode_info *fi = F2FS_I(inode);
2050         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2051         struct inode *pinode;
2052         loff_t isize;
2053         int ret;
2054
2055         if (!inode_owner_or_capable(mnt_userns, inode))
2056                 return -EACCES;
2057
2058         if (!S_ISREG(inode->i_mode))
2059                 return -EINVAL;
2060
2061         if (filp->f_flags & O_DIRECT)
2062                 return -EINVAL;
2063
2064         ret = mnt_want_write_file(filp);
2065         if (ret)
2066                 return ret;
2067
2068         inode_lock(inode);
2069
2070         if (!f2fs_disable_compressed_file(inode)) {
2071                 ret = -EINVAL;
2072                 goto out;
2073         }
2074
2075         if (f2fs_is_atomic_file(inode))
2076                 goto out;
2077
2078         ret = f2fs_convert_inline_inode(inode);
2079         if (ret)
2080                 goto out;
2081
2082         f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
2083
2084         /*
2085          * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2086          * f2fs_is_atomic_file.
2087          */
2088         if (get_dirty_pages(inode))
2089                 f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2090                           inode->i_ino, get_dirty_pages(inode));
2091         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2092         if (ret) {
2093                 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2094                 goto out;
2095         }
2096
2097         /* Check if the inode already has a COW inode */
2098         if (fi->cow_inode == NULL) {
2099                 /* Create a COW inode for atomic write */
2100                 pinode = f2fs_iget(inode->i_sb, fi->i_pino);
2101                 if (IS_ERR(pinode)) {
2102                         f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2103                         ret = PTR_ERR(pinode);
2104                         goto out;
2105                 }
2106
2107                 ret = f2fs_get_tmpfile(mnt_userns, pinode, &fi->cow_inode);
2108                 iput(pinode);
2109                 if (ret) {
2110                         f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2111                         goto out;
2112                 }
2113
2114                 set_inode_flag(fi->cow_inode, FI_COW_FILE);
2115                 clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
2116         } else {
2117                 /* Reuse the already created COW inode */
2118                 ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
2119                 if (ret) {
2120                         f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2121                         goto out;
2122                 }
2123         }
2124
2125         f2fs_write_inode(inode, NULL);
2126
2127         isize = i_size_read(inode);
2128         fi->original_i_size = isize;
2129         f2fs_i_size_write(fi->cow_inode, isize);
2130
2131         stat_inc_atomic_inode(inode);
2132
2133         set_inode_flag(inode, FI_ATOMIC_FILE);
2134         f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2135
2136         f2fs_update_time(sbi, REQ_TIME);
2137         fi->atomic_write_task = current;
2138         stat_update_max_atomic_write(inode);
2139         fi->atomic_write_cnt = 0;
2140 out:
2141         inode_unlock(inode);
2142         mnt_drop_write_file(filp);
2143         return ret;
2144 }
2145
2146 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2147 {
2148         struct inode *inode = file_inode(filp);
2149         struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
2150         int ret;
2151
2152         if (!inode_owner_or_capable(mnt_userns, inode))
2153                 return -EACCES;
2154
2155         ret = mnt_want_write_file(filp);
2156         if (ret)
2157                 return ret;
2158
2159         f2fs_balance_fs(F2FS_I_SB(inode), true);
2160
2161         inode_lock(inode);
2162
2163         if (f2fs_is_atomic_file(inode)) {
2164                 ret = f2fs_commit_atomic_write(inode);
2165                 if (!ret)
2166                         ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2167
2168                 f2fs_abort_atomic_write(inode, ret);
2169         } else {
2170                 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2171         }
2172
2173         inode_unlock(inode);
2174         mnt_drop_write_file(filp);
2175         return ret;
2176 }
2177
2178 static int f2fs_ioc_abort_atomic_write(struct file *filp)
2179 {
2180         struct inode *inode = file_inode(filp);
2181         struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
2182         int ret;
2183
2184         if (!inode_owner_or_capable(mnt_userns, inode))
2185                 return -EACCES;
2186
2187         ret = mnt_want_write_file(filp);
2188         if (ret)
2189                 return ret;
2190
2191         inode_lock(inode);
2192
2193         f2fs_abort_atomic_write(inode, true);
2194
2195         inode_unlock(inode);
2196
2197         mnt_drop_write_file(filp);
2198         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2199         return ret;
2200 }
2201
2202 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2203 {
2204         struct inode *inode = file_inode(filp);
2205         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2206         struct super_block *sb = sbi->sb;
2207         __u32 in;
2208         int ret = 0;
2209
2210         if (!capable(CAP_SYS_ADMIN))
2211                 return -EPERM;
2212
2213         if (get_user(in, (__u32 __user *)arg))
2214                 return -EFAULT;
2215
2216         if (in != F2FS_GOING_DOWN_FULLSYNC) {
2217                 ret = mnt_want_write_file(filp);
2218                 if (ret) {
2219                         if (ret == -EROFS) {
2220                                 ret = 0;
2221                                 f2fs_stop_checkpoint(sbi, false,
2222                                                 STOP_CP_REASON_SHUTDOWN);
2223                                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2224                                 trace_f2fs_shutdown(sbi, in, ret);
2225                         }
2226                         return ret;
2227                 }
2228         }
2229
2230         switch (in) {
2231         case F2FS_GOING_DOWN_FULLSYNC:
2232                 ret = freeze_bdev(sb->s_bdev);
2233                 if (ret)
2234                         goto out;
2235                 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2236                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2237                 thaw_bdev(sb->s_bdev);
2238                 break;
2239         case F2FS_GOING_DOWN_METASYNC:
2240                 /* do checkpoint only */
2241                 ret = f2fs_sync_fs(sb, 1);
2242                 if (ret)
2243                         goto out;
2244                 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2245                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2246                 break;
2247         case F2FS_GOING_DOWN_NOSYNC:
2248                 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2249                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2250                 break;
2251         case F2FS_GOING_DOWN_METAFLUSH:
2252                 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2253                 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2254                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2255                 break;
2256         case F2FS_GOING_DOWN_NEED_FSCK:
2257                 set_sbi_flag(sbi, SBI_NEED_FSCK);
2258                 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2259                 set_sbi_flag(sbi, SBI_IS_DIRTY);
2260                 /* do checkpoint only */
2261                 ret = f2fs_sync_fs(sb, 1);
2262                 goto out;
2263         default:
2264                 ret = -EINVAL;
2265                 goto out;
2266         }
2267
2268         f2fs_stop_gc_thread(sbi);
2269         f2fs_stop_discard_thread(sbi);
2270
2271         f2fs_drop_discard_cmd(sbi);
2272         clear_opt(sbi, DISCARD);
2273
2274         f2fs_update_time(sbi, REQ_TIME);
2275 out:
2276         if (in != F2FS_GOING_DOWN_FULLSYNC)
2277                 mnt_drop_write_file(filp);
2278
2279         trace_f2fs_shutdown(sbi, in, ret);
2280
2281         return ret;
2282 }
2283
2284 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2285 {
2286         struct inode *inode = file_inode(filp);
2287         struct super_block *sb = inode->i_sb;
2288         struct fstrim_range range;
2289         int ret;
2290
2291         if (!capable(CAP_SYS_ADMIN))
2292                 return -EPERM;
2293
2294         if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2295                 return -EOPNOTSUPP;
2296
2297         if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2298                                 sizeof(range)))
2299                 return -EFAULT;
2300
2301         ret = mnt_want_write_file(filp);
2302         if (ret)
2303                 return ret;
2304
2305         range.minlen = max((unsigned int)range.minlen,
2306                            bdev_discard_granularity(sb->s_bdev));
2307         ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2308         mnt_drop_write_file(filp);
2309         if (ret < 0)
2310                 return ret;
2311
2312         if (copy_to_user((struct fstrim_range __user *)arg, &range,
2313                                 sizeof(range)))
2314                 return -EFAULT;
2315         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2316         return 0;
2317 }
2318
2319 static bool uuid_is_nonzero(__u8 u[16])
2320 {
2321         int i;
2322
2323         for (i = 0; i < 16; i++)
2324                 if (u[i])
2325                         return true;
2326         return false;
2327 }
2328
2329 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2330 {
2331         struct inode *inode = file_inode(filp);
2332
2333         if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2334                 return -EOPNOTSUPP;
2335
2336         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2337
2338         return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2339 }
2340
2341 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2342 {
2343         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2344                 return -EOPNOTSUPP;
2345         return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2346 }
2347
2348 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2349 {
2350         struct inode *inode = file_inode(filp);
2351         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2352         u8 encrypt_pw_salt[16];
2353         int err;
2354
2355         if (!f2fs_sb_has_encrypt(sbi))
2356                 return -EOPNOTSUPP;
2357
2358         err = mnt_want_write_file(filp);
2359         if (err)
2360                 return err;
2361
2362         f2fs_down_write(&sbi->sb_lock);
2363
2364         if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2365                 goto got_it;
2366
2367         /* update superblock with uuid */
2368         generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2369
2370         err = f2fs_commit_super(sbi, false);
2371         if (err) {
2372                 /* undo new data */
2373                 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2374                 goto out_err;
2375         }
2376 got_it:
2377         memcpy(encrypt_pw_salt, sbi->raw_super->encrypt_pw_salt, 16);
2378 out_err:
2379         f2fs_up_write(&sbi->sb_lock);
2380         mnt_drop_write_file(filp);
2381
2382         if (!err && copy_to_user((__u8 __user *)arg, encrypt_pw_salt, 16))
2383                 err = -EFAULT;
2384
2385         return err;
2386 }
2387
2388 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2389                                              unsigned long arg)
2390 {
2391         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2392                 return -EOPNOTSUPP;
2393
2394         return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2395 }
2396
2397 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2398 {
2399         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2400                 return -EOPNOTSUPP;
2401
2402         return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2403 }
2404
2405 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2406 {
2407         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2408                 return -EOPNOTSUPP;
2409
2410         return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2411 }
2412
2413 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2414                                                     unsigned long arg)
2415 {
2416         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2417                 return -EOPNOTSUPP;
2418
2419         return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2420 }
2421
2422 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2423                                               unsigned long arg)
2424 {
2425         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2426                 return -EOPNOTSUPP;
2427
2428         return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2429 }
2430
2431 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2432 {
2433         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2434                 return -EOPNOTSUPP;
2435
2436         return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2437 }
2438
2439 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2440 {
2441         struct inode *inode = file_inode(filp);
2442         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2443         struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
2444                         .no_bg_gc = false,
2445                         .should_migrate_blocks = false,
2446                         .nr_free_secs = 0 };
2447         __u32 sync;
2448         int ret;
2449
2450         if (!capable(CAP_SYS_ADMIN))
2451                 return -EPERM;
2452
2453         if (get_user(sync, (__u32 __user *)arg))
2454                 return -EFAULT;
2455
2456         if (f2fs_readonly(sbi->sb))
2457                 return -EROFS;
2458
2459         ret = mnt_want_write_file(filp);
2460         if (ret)
2461                 return ret;
2462
2463         if (!sync) {
2464                 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2465                         ret = -EBUSY;
2466                         goto out;
2467                 }
2468         } else {
2469                 f2fs_down_write(&sbi->gc_lock);
2470         }
2471
2472         gc_control.init_gc_type = sync ? FG_GC : BG_GC;
2473         gc_control.err_gc_skipped = sync;
2474         ret = f2fs_gc(sbi, &gc_control);
2475 out:
2476         mnt_drop_write_file(filp);
2477         return ret;
2478 }
2479
2480 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2481 {
2482         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2483         struct f2fs_gc_control gc_control = {
2484                         .init_gc_type = range->sync ? FG_GC : BG_GC,
2485                         .no_bg_gc = false,
2486                         .should_migrate_blocks = false,
2487                         .err_gc_skipped = range->sync,
2488                         .nr_free_secs = 0 };
2489         u64 end;
2490         int ret;
2491
2492         if (!capable(CAP_SYS_ADMIN))
2493                 return -EPERM;
2494         if (f2fs_readonly(sbi->sb))
2495                 return -EROFS;
2496
2497         end = range->start + range->len;
2498         if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2499                                         end >= MAX_BLKADDR(sbi))
2500                 return -EINVAL;
2501
2502         ret = mnt_want_write_file(filp);
2503         if (ret)
2504                 return ret;
2505
2506 do_more:
2507         if (!range->sync) {
2508                 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2509                         ret = -EBUSY;
2510                         goto out;
2511                 }
2512         } else {
2513                 f2fs_down_write(&sbi->gc_lock);
2514         }
2515
2516         gc_control.victim_segno = GET_SEGNO(sbi, range->start);
2517         ret = f2fs_gc(sbi, &gc_control);
2518         if (ret) {
2519                 if (ret == -EBUSY)
2520                         ret = -EAGAIN;
2521                 goto out;
2522         }
2523         range->start += CAP_BLKS_PER_SEC(sbi);
2524         if (range->start <= end)
2525                 goto do_more;
2526 out:
2527         mnt_drop_write_file(filp);
2528         return ret;
2529 }
2530
2531 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2532 {
2533         struct f2fs_gc_range range;
2534
2535         if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2536                                                         sizeof(range)))
2537                 return -EFAULT;
2538         return __f2fs_ioc_gc_range(filp, &range);
2539 }
2540
2541 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2542 {
2543         struct inode *inode = file_inode(filp);
2544         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2545         int ret;
2546
2547         if (!capable(CAP_SYS_ADMIN))
2548                 return -EPERM;
2549
2550         if (f2fs_readonly(sbi->sb))
2551                 return -EROFS;
2552
2553         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2554                 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2555                 return -EINVAL;
2556         }
2557
2558         ret = mnt_want_write_file(filp);
2559         if (ret)
2560                 return ret;
2561
2562         ret = f2fs_sync_fs(sbi->sb, 1);
2563
2564         mnt_drop_write_file(filp);
2565         return ret;
2566 }
2567
2568 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2569                                         struct file *filp,
2570                                         struct f2fs_defragment *range)
2571 {
2572         struct inode *inode = file_inode(filp);
2573         struct f2fs_map_blocks map = { .m_next_extent = NULL,
2574                                         .m_seg_type = NO_CHECK_TYPE,
2575                                         .m_may_create = false };
2576         struct extent_info ei = {0, 0, 0};
2577         pgoff_t pg_start, pg_end, next_pgofs;
2578         unsigned int blk_per_seg = sbi->blocks_per_seg;
2579         unsigned int total = 0, sec_num;
2580         block_t blk_end = 0;
2581         bool fragmented = false;
2582         int err;
2583
2584         pg_start = range->start >> PAGE_SHIFT;
2585         pg_end = (range->start + range->len) >> PAGE_SHIFT;
2586
2587         f2fs_balance_fs(sbi, true);
2588
2589         inode_lock(inode);
2590
2591         /* if in-place-update policy is enabled, don't waste time here */
2592         set_inode_flag(inode, FI_OPU_WRITE);
2593         if (f2fs_should_update_inplace(inode, NULL)) {
2594                 err = -EINVAL;
2595                 goto out;
2596         }
2597
2598         /* writeback all dirty pages in the range */
2599         err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2600                                                 range->start + range->len - 1);
2601         if (err)
2602                 goto out;
2603
2604         /*
2605          * lookup mapping info in extent cache, skip defragmenting if physical
2606          * block addresses are continuous.
2607          */
2608         if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2609                 if (ei.fofs + ei.len >= pg_end)
2610                         goto out;
2611         }
2612
2613         map.m_lblk = pg_start;
2614         map.m_next_pgofs = &next_pgofs;
2615
2616         /*
2617          * lookup mapping info in dnode page cache, skip defragmenting if all
2618          * physical block addresses are continuous even if there are hole(s)
2619          * in logical blocks.
2620          */
2621         while (map.m_lblk < pg_end) {
2622                 map.m_len = pg_end - map.m_lblk;
2623                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2624                 if (err)
2625                         goto out;
2626
2627                 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2628                         map.m_lblk = next_pgofs;
2629                         continue;
2630                 }
2631
2632                 if (blk_end && blk_end != map.m_pblk)
2633                         fragmented = true;
2634
2635                 /* record total count of block that we're going to move */
2636                 total += map.m_len;
2637
2638                 blk_end = map.m_pblk + map.m_len;
2639
2640                 map.m_lblk += map.m_len;
2641         }
2642
2643         if (!fragmented) {
2644                 total = 0;
2645                 goto out;
2646         }
2647
2648         sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi));
2649
2650         /*
2651          * make sure there are enough free section for LFS allocation, this can
2652          * avoid defragment running in SSR mode when free section are allocated
2653          * intensively
2654          */
2655         if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2656                 err = -EAGAIN;
2657                 goto out;
2658         }
2659
2660         map.m_lblk = pg_start;
2661         map.m_len = pg_end - pg_start;
2662         total = 0;
2663
2664         while (map.m_lblk < pg_end) {
2665                 pgoff_t idx;
2666                 int cnt = 0;
2667
2668 do_map:
2669                 map.m_len = pg_end - map.m_lblk;
2670                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2671                 if (err)
2672                         goto clear_out;
2673
2674                 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2675                         map.m_lblk = next_pgofs;
2676                         goto check;
2677                 }
2678
2679                 set_inode_flag(inode, FI_SKIP_WRITES);
2680
2681                 idx = map.m_lblk;
2682                 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2683                         struct page *page;
2684
2685                         page = f2fs_get_lock_data_page(inode, idx, true);
2686                         if (IS_ERR(page)) {
2687                                 err = PTR_ERR(page);
2688                                 goto clear_out;
2689                         }
2690
2691                         set_page_dirty(page);
2692                         set_page_private_gcing(page);
2693                         f2fs_put_page(page, 1);
2694
2695                         idx++;
2696                         cnt++;
2697                         total++;
2698                 }
2699
2700                 map.m_lblk = idx;
2701 check:
2702                 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2703                         goto do_map;
2704
2705                 clear_inode_flag(inode, FI_SKIP_WRITES);
2706
2707                 err = filemap_fdatawrite(inode->i_mapping);
2708                 if (err)
2709                         goto out;
2710         }
2711 clear_out:
2712         clear_inode_flag(inode, FI_SKIP_WRITES);
2713 out:
2714         clear_inode_flag(inode, FI_OPU_WRITE);
2715         inode_unlock(inode);
2716         if (!err)
2717                 range->len = (u64)total << PAGE_SHIFT;
2718         return err;
2719 }
2720
2721 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2722 {
2723         struct inode *inode = file_inode(filp);
2724         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2725         struct f2fs_defragment range;
2726         int err;
2727
2728         if (!capable(CAP_SYS_ADMIN))
2729                 return -EPERM;
2730
2731         if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2732                 return -EINVAL;
2733
2734         if (f2fs_readonly(sbi->sb))
2735                 return -EROFS;
2736
2737         if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2738                                                         sizeof(range)))
2739                 return -EFAULT;
2740
2741         /* verify alignment of offset & size */
2742         if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2743                 return -EINVAL;
2744
2745         if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2746                                         max_file_blocks(inode)))
2747                 return -EINVAL;
2748
2749         err = mnt_want_write_file(filp);
2750         if (err)
2751                 return err;
2752
2753         err = f2fs_defragment_range(sbi, filp, &range);
2754         mnt_drop_write_file(filp);
2755
2756         f2fs_update_time(sbi, REQ_TIME);
2757         if (err < 0)
2758                 return err;
2759
2760         if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2761                                                         sizeof(range)))
2762                 return -EFAULT;
2763
2764         return 0;
2765 }
2766
2767 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2768                         struct file *file_out, loff_t pos_out, size_t len)
2769 {
2770         struct inode *src = file_inode(file_in);
2771         struct inode *dst = file_inode(file_out);
2772         struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2773         size_t olen = len, dst_max_i_size = 0;
2774         size_t dst_osize;
2775         int ret;
2776
2777         if (file_in->f_path.mnt != file_out->f_path.mnt ||
2778                                 src->i_sb != dst->i_sb)
2779                 return -EXDEV;
2780
2781         if (unlikely(f2fs_readonly(src->i_sb)))
2782                 return -EROFS;
2783
2784         if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2785                 return -EINVAL;
2786
2787         if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2788                 return -EOPNOTSUPP;
2789
2790         if (pos_out < 0 || pos_in < 0)
2791                 return -EINVAL;
2792
2793         if (src == dst) {
2794                 if (pos_in == pos_out)
2795                         return 0;
2796                 if (pos_out > pos_in && pos_out < pos_in + len)
2797                         return -EINVAL;
2798         }
2799
2800         inode_lock(src);
2801         if (src != dst) {
2802                 ret = -EBUSY;
2803                 if (!inode_trylock(dst))
2804                         goto out;
2805         }
2806
2807         ret = -EINVAL;
2808         if (pos_in + len > src->i_size || pos_in + len < pos_in)
2809                 goto out_unlock;
2810         if (len == 0)
2811                 olen = len = src->i_size - pos_in;
2812         if (pos_in + len == src->i_size)
2813                 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2814         if (len == 0) {
2815                 ret = 0;
2816                 goto out_unlock;
2817         }
2818
2819         dst_osize = dst->i_size;
2820         if (pos_out + olen > dst->i_size)
2821                 dst_max_i_size = pos_out + olen;
2822
2823         /* verify the end result is block aligned */
2824         if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2825                         !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2826                         !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2827                 goto out_unlock;
2828
2829         ret = f2fs_convert_inline_inode(src);
2830         if (ret)
2831                 goto out_unlock;
2832
2833         ret = f2fs_convert_inline_inode(dst);
2834         if (ret)
2835                 goto out_unlock;
2836
2837         /* write out all dirty pages from offset */
2838         ret = filemap_write_and_wait_range(src->i_mapping,
2839                                         pos_in, pos_in + len);
2840         if (ret)
2841                 goto out_unlock;
2842
2843         ret = filemap_write_and_wait_range(dst->i_mapping,
2844                                         pos_out, pos_out + len);
2845         if (ret)
2846                 goto out_unlock;
2847
2848         f2fs_balance_fs(sbi, true);
2849
2850         f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2851         if (src != dst) {
2852                 ret = -EBUSY;
2853                 if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2854                         goto out_src;
2855         }
2856
2857         f2fs_lock_op(sbi);
2858         ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2859                                 pos_out >> F2FS_BLKSIZE_BITS,
2860                                 len >> F2FS_BLKSIZE_BITS, false);
2861
2862         if (!ret) {
2863                 if (dst_max_i_size)
2864                         f2fs_i_size_write(dst, dst_max_i_size);
2865                 else if (dst_osize != dst->i_size)
2866                         f2fs_i_size_write(dst, dst_osize);
2867         }
2868         f2fs_unlock_op(sbi);
2869
2870         if (src != dst)
2871                 f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2872 out_src:
2873         f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2874 out_unlock:
2875         if (src != dst)
2876                 inode_unlock(dst);
2877 out:
2878         inode_unlock(src);
2879         return ret;
2880 }
2881
2882 static int __f2fs_ioc_move_range(struct file *filp,
2883                                 struct f2fs_move_range *range)
2884 {
2885         struct fd dst;
2886         int err;
2887
2888         if (!(filp->f_mode & FMODE_READ) ||
2889                         !(filp->f_mode & FMODE_WRITE))
2890                 return -EBADF;
2891
2892         dst = fdget(range->dst_fd);
2893         if (!dst.file)
2894                 return -EBADF;
2895
2896         if (!(dst.file->f_mode & FMODE_WRITE)) {
2897                 err = -EBADF;
2898                 goto err_out;
2899         }
2900
2901         err = mnt_want_write_file(filp);
2902         if (err)
2903                 goto err_out;
2904
2905         err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2906                                         range->pos_out, range->len);
2907
2908         mnt_drop_write_file(filp);
2909 err_out:
2910         fdput(dst);
2911         return err;
2912 }
2913
2914 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2915 {
2916         struct f2fs_move_range range;
2917
2918         if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2919                                                         sizeof(range)))
2920                 return -EFAULT;
2921         return __f2fs_ioc_move_range(filp, &range);
2922 }
2923
2924 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2925 {
2926         struct inode *inode = file_inode(filp);
2927         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2928         struct sit_info *sm = SIT_I(sbi);
2929         unsigned int start_segno = 0, end_segno = 0;
2930         unsigned int dev_start_segno = 0, dev_end_segno = 0;
2931         struct f2fs_flush_device range;
2932         struct f2fs_gc_control gc_control = {
2933                         .init_gc_type = FG_GC,
2934                         .should_migrate_blocks = true,
2935                         .err_gc_skipped = true,
2936                         .nr_free_secs = 0 };
2937         int ret;
2938
2939         if (!capable(CAP_SYS_ADMIN))
2940                 return -EPERM;
2941
2942         if (f2fs_readonly(sbi->sb))
2943                 return -EROFS;
2944
2945         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2946                 return -EINVAL;
2947
2948         if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2949                                                         sizeof(range)))
2950                 return -EFAULT;
2951
2952         if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2953                         __is_large_section(sbi)) {
2954                 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2955                           range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2956                 return -EINVAL;
2957         }
2958
2959         ret = mnt_want_write_file(filp);
2960         if (ret)
2961                 return ret;
2962
2963         if (range.dev_num != 0)
2964                 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2965         dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2966
2967         start_segno = sm->last_victim[FLUSH_DEVICE];
2968         if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2969                 start_segno = dev_start_segno;
2970         end_segno = min(start_segno + range.segments, dev_end_segno);
2971
2972         while (start_segno < end_segno) {
2973                 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2974                         ret = -EBUSY;
2975                         goto out;
2976                 }
2977                 sm->last_victim[GC_CB] = end_segno + 1;
2978                 sm->last_victim[GC_GREEDY] = end_segno + 1;
2979                 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2980
2981                 gc_control.victim_segno = start_segno;
2982                 ret = f2fs_gc(sbi, &gc_control);
2983                 if (ret == -EAGAIN)
2984                         ret = 0;
2985                 else if (ret < 0)
2986                         break;
2987                 start_segno++;
2988         }
2989 out:
2990         mnt_drop_write_file(filp);
2991         return ret;
2992 }
2993
2994 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2995 {
2996         struct inode *inode = file_inode(filp);
2997         u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2998
2999         /* Must validate to set it with SQLite behavior in Android. */
3000         sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3001
3002         return put_user(sb_feature, (u32 __user *)arg);
3003 }
3004
3005 #ifdef CONFIG_QUOTA
3006 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3007 {
3008         struct dquot *transfer_to[MAXQUOTAS] = {};
3009         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3010         struct super_block *sb = sbi->sb;
3011         int err;
3012
3013         transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3014         if (IS_ERR(transfer_to[PRJQUOTA]))
3015                 return PTR_ERR(transfer_to[PRJQUOTA]);
3016
3017         err = __dquot_transfer(inode, transfer_to);
3018         if (err)
3019                 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3020         dqput(transfer_to[PRJQUOTA]);
3021         return err;
3022 }
3023
3024 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3025 {
3026         struct f2fs_inode_info *fi = F2FS_I(inode);
3027         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3028         struct f2fs_inode *ri = NULL;
3029         kprojid_t kprojid;
3030         int err;
3031
3032         if (!f2fs_sb_has_project_quota(sbi)) {
3033                 if (projid != F2FS_DEF_PROJID)
3034                         return -EOPNOTSUPP;
3035                 else
3036                         return 0;
3037         }
3038
3039         if (!f2fs_has_extra_attr(inode))
3040                 return -EOPNOTSUPP;
3041
3042         kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3043
3044         if (projid_eq(kprojid, fi->i_projid))
3045                 return 0;
3046
3047         err = -EPERM;
3048         /* Is it quota file? Do not allow user to mess with it */
3049         if (IS_NOQUOTA(inode))
3050                 return err;
3051
3052         if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
3053                 return -EOVERFLOW;
3054
3055         err = f2fs_dquot_initialize(inode);
3056         if (err)
3057                 return err;
3058
3059         f2fs_lock_op(sbi);
3060         err = f2fs_transfer_project_quota(inode, kprojid);
3061         if (err)
3062                 goto out_unlock;
3063
3064         fi->i_projid = kprojid;
3065         inode->i_ctime = current_time(inode);
3066         f2fs_mark_inode_dirty_sync(inode, true);
3067 out_unlock:
3068         f2fs_unlock_op(sbi);
3069         return err;
3070 }
3071 #else
3072 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3073 {
3074         return 0;
3075 }
3076
3077 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3078 {
3079         if (projid != F2FS_DEF_PROJID)
3080                 return -EOPNOTSUPP;
3081         return 0;
3082 }
3083 #endif
3084
3085 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3086 {
3087         struct inode *inode = d_inode(dentry);
3088         struct f2fs_inode_info *fi = F2FS_I(inode);
3089         u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3090
3091         if (IS_ENCRYPTED(inode))
3092                 fsflags |= FS_ENCRYPT_FL;
3093         if (IS_VERITY(inode))
3094                 fsflags |= FS_VERITY_FL;
3095         if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3096                 fsflags |= FS_INLINE_DATA_FL;
3097         if (is_inode_flag_set(inode, FI_PIN_FILE))
3098                 fsflags |= FS_NOCOW_FL;
3099
3100         fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3101
3102         if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3103                 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3104
3105         return 0;
3106 }
3107
3108 int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3109                       struct dentry *dentry, struct fileattr *fa)
3110 {
3111         struct inode *inode = d_inode(dentry);
3112         u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3113         u32 iflags;
3114         int err;
3115
3116         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3117                 return -EIO;
3118         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3119                 return -ENOSPC;
3120         if (fsflags & ~F2FS_GETTABLE_FS_FL)
3121                 return -EOPNOTSUPP;
3122         fsflags &= F2FS_SETTABLE_FS_FL;
3123         if (!fa->flags_valid)
3124                 mask &= FS_COMMON_FL;
3125
3126         iflags = f2fs_fsflags_to_iflags(fsflags);
3127         if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3128                 return -EOPNOTSUPP;
3129
3130         err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3131         if (!err)
3132                 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3133
3134         return err;
3135 }
3136
3137 int f2fs_pin_file_control(struct inode *inode, bool inc)
3138 {
3139         struct f2fs_inode_info *fi = F2FS_I(inode);
3140         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3141
3142         /* Use i_gc_failures for normal file as a risk signal. */
3143         if (inc)
3144                 f2fs_i_gc_failures_write(inode,
3145                                 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3146
3147         if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3148                 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3149                           __func__, inode->i_ino,
3150                           fi->i_gc_failures[GC_FAILURE_PIN]);
3151                 clear_inode_flag(inode, FI_PIN_FILE);
3152                 return -EAGAIN;
3153         }
3154         return 0;
3155 }
3156
3157 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3158 {
3159         struct inode *inode = file_inode(filp);
3160         __u32 pin;
3161         int ret = 0;
3162
3163         if (get_user(pin, (__u32 __user *)arg))
3164                 return -EFAULT;
3165
3166         if (!S_ISREG(inode->i_mode))
3167                 return -EINVAL;
3168
3169         if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3170                 return -EROFS;
3171
3172         ret = mnt_want_write_file(filp);
3173         if (ret)
3174                 return ret;
3175
3176         inode_lock(inode);
3177
3178         if (!pin) {
3179                 clear_inode_flag(inode, FI_PIN_FILE);
3180                 f2fs_i_gc_failures_write(inode, 0);
3181                 goto done;
3182         }
3183
3184         if (f2fs_should_update_outplace(inode, NULL)) {
3185                 ret = -EINVAL;
3186                 goto out;
3187         }
3188
3189         if (f2fs_pin_file_control(inode, false)) {
3190                 ret = -EAGAIN;
3191                 goto out;
3192         }
3193
3194         ret = f2fs_convert_inline_inode(inode);
3195         if (ret)
3196                 goto out;
3197
3198         if (!f2fs_disable_compressed_file(inode)) {
3199                 ret = -EOPNOTSUPP;
3200                 goto out;
3201         }
3202
3203         set_inode_flag(inode, FI_PIN_FILE);
3204         ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3205 done:
3206         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3207 out:
3208         inode_unlock(inode);
3209         mnt_drop_write_file(filp);
3210         return ret;
3211 }
3212
3213 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3214 {
3215         struct inode *inode = file_inode(filp);
3216         __u32 pin = 0;
3217
3218         if (is_inode_flag_set(inode, FI_PIN_FILE))
3219                 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3220         return put_user(pin, (u32 __user *)arg);
3221 }
3222
3223 int f2fs_precache_extents(struct inode *inode)
3224 {
3225         struct f2fs_inode_info *fi = F2FS_I(inode);
3226         struct f2fs_map_blocks map;
3227         pgoff_t m_next_extent;
3228         loff_t end;
3229         int err;
3230
3231         if (is_inode_flag_set(inode, FI_NO_EXTENT))
3232                 return -EOPNOTSUPP;
3233
3234         map.m_lblk = 0;
3235         map.m_next_pgofs = NULL;
3236         map.m_next_extent = &m_next_extent;
3237         map.m_seg_type = NO_CHECK_TYPE;
3238         map.m_may_create = false;
3239         end = max_file_blocks(inode);
3240
3241         while (map.m_lblk < end) {
3242                 map.m_len = end - map.m_lblk;
3243
3244                 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3245                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3246                 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3247                 if (err)
3248                         return err;
3249
3250                 map.m_lblk = m_next_extent;
3251         }
3252
3253         return 0;
3254 }
3255
3256 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3257 {
3258         return f2fs_precache_extents(file_inode(filp));
3259 }
3260
3261 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3262 {
3263         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3264         __u64 block_count;
3265
3266         if (!capable(CAP_SYS_ADMIN))
3267                 return -EPERM;
3268
3269         if (f2fs_readonly(sbi->sb))
3270                 return -EROFS;
3271
3272         if (copy_from_user(&block_count, (void __user *)arg,
3273                            sizeof(block_count)))
3274                 return -EFAULT;
3275
3276         return f2fs_resize_fs(sbi, block_count);
3277 }
3278
3279 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3280 {
3281         struct inode *inode = file_inode(filp);
3282
3283         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3284
3285         if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3286                 f2fs_warn(F2FS_I_SB(inode),
3287                           "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3288                           inode->i_ino);
3289                 return -EOPNOTSUPP;
3290         }
3291
3292         return fsverity_ioctl_enable(filp, (const void __user *)arg);
3293 }
3294
3295 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3296 {
3297         if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3298                 return -EOPNOTSUPP;
3299
3300         return fsverity_ioctl_measure(filp, (void __user *)arg);
3301 }
3302
3303 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3304 {
3305         if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3306                 return -EOPNOTSUPP;
3307
3308         return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3309 }
3310
3311 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3312 {
3313         struct inode *inode = file_inode(filp);
3314         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3315         char *vbuf;
3316         int count;
3317         int err = 0;
3318
3319         vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3320         if (!vbuf)
3321                 return -ENOMEM;
3322
3323         f2fs_down_read(&sbi->sb_lock);
3324         count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3325                         ARRAY_SIZE(sbi->raw_super->volume_name),
3326                         UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3327         f2fs_up_read(&sbi->sb_lock);
3328
3329         if (copy_to_user((char __user *)arg, vbuf,
3330                                 min(FSLABEL_MAX, count)))
3331                 err = -EFAULT;
3332
3333         kfree(vbuf);
3334         return err;
3335 }
3336
3337 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3338 {
3339         struct inode *inode = file_inode(filp);
3340         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3341         char *vbuf;
3342         int err = 0;
3343
3344         if (!capable(CAP_SYS_ADMIN))
3345                 return -EPERM;
3346
3347         vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3348         if (IS_ERR(vbuf))
3349                 return PTR_ERR(vbuf);
3350
3351         err = mnt_want_write_file(filp);
3352         if (err)
3353                 goto out;
3354
3355         f2fs_down_write(&sbi->sb_lock);
3356
3357         memset(sbi->raw_super->volume_name, 0,
3358                         sizeof(sbi->raw_super->volume_name));
3359         utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3360                         sbi->raw_super->volume_name,
3361                         ARRAY_SIZE(sbi->raw_super->volume_name));
3362
3363         err = f2fs_commit_super(sbi, false);
3364
3365         f2fs_up_write(&sbi->sb_lock);
3366
3367         mnt_drop_write_file(filp);
3368 out:
3369         kfree(vbuf);
3370         return err;
3371 }
3372
3373 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3374 {
3375         struct inode *inode = file_inode(filp);
3376         __u64 blocks;
3377
3378         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3379                 return -EOPNOTSUPP;
3380
3381         if (!f2fs_compressed_file(inode))
3382                 return -EINVAL;
3383
3384         blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3385         return put_user(blocks, (u64 __user *)arg);
3386 }
3387
3388 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3389 {
3390         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3391         unsigned int released_blocks = 0;
3392         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3393         block_t blkaddr;
3394         int i;
3395
3396         for (i = 0; i < count; i++) {
3397                 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3398                                                 dn->ofs_in_node + i);
3399
3400                 if (!__is_valid_data_blkaddr(blkaddr))
3401                         continue;
3402                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3403                                         DATA_GENERIC_ENHANCE))) {
3404                         f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3405                         return -EFSCORRUPTED;
3406                 }
3407         }
3408
3409         while (count) {
3410                 int compr_blocks = 0;
3411
3412                 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3413                         blkaddr = f2fs_data_blkaddr(dn);
3414
3415                         if (i == 0) {
3416                                 if (blkaddr == COMPRESS_ADDR)
3417                                         continue;
3418                                 dn->ofs_in_node += cluster_size;
3419                                 goto next;
3420                         }
3421
3422                         if (__is_valid_data_blkaddr(blkaddr))
3423                                 compr_blocks++;
3424
3425                         if (blkaddr != NEW_ADDR)
3426                                 continue;
3427
3428                         dn->data_blkaddr = NULL_ADDR;
3429                         f2fs_set_data_blkaddr(dn);
3430                 }
3431
3432                 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3433                 dec_valid_block_count(sbi, dn->inode,
3434                                         cluster_size - compr_blocks);
3435
3436                 released_blocks += cluster_size - compr_blocks;
3437 next:
3438                 count -= cluster_size;
3439         }
3440
3441         return released_blocks;
3442 }
3443
3444 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3445 {
3446         struct inode *inode = file_inode(filp);
3447         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3448         pgoff_t page_idx = 0, last_idx;
3449         unsigned int released_blocks = 0;
3450         int ret;
3451         int writecount;
3452
3453         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3454                 return -EOPNOTSUPP;
3455
3456         if (!f2fs_compressed_file(inode))
3457                 return -EINVAL;
3458
3459         if (f2fs_readonly(sbi->sb))
3460                 return -EROFS;
3461
3462         ret = mnt_want_write_file(filp);
3463         if (ret)
3464                 return ret;
3465
3466         f2fs_balance_fs(F2FS_I_SB(inode), true);
3467
3468         inode_lock(inode);
3469
3470         writecount = atomic_read(&inode->i_writecount);
3471         if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3472                         (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3473                 ret = -EBUSY;
3474                 goto out;
3475         }
3476
3477         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3478                 ret = -EINVAL;
3479                 goto out;
3480         }
3481
3482         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3483         if (ret)
3484                 goto out;
3485
3486         set_inode_flag(inode, FI_COMPRESS_RELEASED);
3487         inode->i_ctime = current_time(inode);
3488         f2fs_mark_inode_dirty_sync(inode, true);
3489
3490         if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3491                 goto out;
3492
3493         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3494         filemap_invalidate_lock(inode->i_mapping);
3495
3496         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3497
3498         while (page_idx < last_idx) {
3499                 struct dnode_of_data dn;
3500                 pgoff_t end_offset, count;
3501
3502                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3503                 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3504                 if (ret) {
3505                         if (ret == -ENOENT) {
3506                                 page_idx = f2fs_get_next_page_offset(&dn,
3507                                                                 page_idx);
3508                                 ret = 0;
3509                                 continue;
3510                         }
3511                         break;
3512                 }
3513
3514                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3515                 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3516                 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3517
3518                 ret = release_compress_blocks(&dn, count);
3519
3520                 f2fs_put_dnode(&dn);
3521
3522                 if (ret < 0)
3523                         break;
3524
3525                 page_idx += count;
3526                 released_blocks += ret;
3527         }
3528
3529         filemap_invalidate_unlock(inode->i_mapping);
3530         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3531 out:
3532         inode_unlock(inode);
3533
3534         mnt_drop_write_file(filp);
3535
3536         if (ret >= 0) {
3537                 ret = put_user(released_blocks, (u64 __user *)arg);
3538         } else if (released_blocks &&
3539                         atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3540                 set_sbi_flag(sbi, SBI_NEED_FSCK);
3541                 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3542                         "iblocks=%llu, released=%u, compr_blocks=%u, "
3543                         "run fsck to fix.",
3544                         __func__, inode->i_ino, inode->i_blocks,
3545                         released_blocks,
3546                         atomic_read(&F2FS_I(inode)->i_compr_blocks));
3547         }
3548
3549         return ret;
3550 }
3551
3552 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3553 {
3554         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3555         unsigned int reserved_blocks = 0;
3556         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3557         block_t blkaddr;
3558         int i;
3559
3560         for (i = 0; i < count; i++) {
3561                 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3562                                                 dn->ofs_in_node + i);
3563
3564                 if (!__is_valid_data_blkaddr(blkaddr))
3565                         continue;
3566                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3567                                         DATA_GENERIC_ENHANCE))) {
3568                         f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3569                         return -EFSCORRUPTED;
3570                 }
3571         }
3572
3573         while (count) {
3574                 int compr_blocks = 0;
3575                 blkcnt_t reserved;
3576                 int ret;
3577
3578                 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3579                         blkaddr = f2fs_data_blkaddr(dn);
3580
3581                         if (i == 0) {
3582                                 if (blkaddr == COMPRESS_ADDR)
3583                                         continue;
3584                                 dn->ofs_in_node += cluster_size;
3585                                 goto next;
3586                         }
3587
3588                         if (__is_valid_data_blkaddr(blkaddr)) {
3589                                 compr_blocks++;
3590                                 continue;
3591                         }
3592
3593                         dn->data_blkaddr = NEW_ADDR;
3594                         f2fs_set_data_blkaddr(dn);
3595                 }
3596
3597                 reserved = cluster_size - compr_blocks;
3598                 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3599                 if (ret)
3600                         return ret;
3601
3602                 if (reserved != cluster_size - compr_blocks)
3603                         return -ENOSPC;
3604
3605                 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3606
3607                 reserved_blocks += reserved;
3608 next:
3609                 count -= cluster_size;
3610         }
3611
3612         return reserved_blocks;
3613 }
3614
3615 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3616 {
3617         struct inode *inode = file_inode(filp);
3618         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3619         pgoff_t page_idx = 0, last_idx;
3620         unsigned int reserved_blocks = 0;
3621         int ret;
3622
3623         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3624                 return -EOPNOTSUPP;
3625
3626         if (!f2fs_compressed_file(inode))
3627                 return -EINVAL;
3628
3629         if (f2fs_readonly(sbi->sb))
3630                 return -EROFS;
3631
3632         ret = mnt_want_write_file(filp);
3633         if (ret)
3634                 return ret;
3635
3636         if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3637                 goto out;
3638
3639         f2fs_balance_fs(F2FS_I_SB(inode), true);
3640
3641         inode_lock(inode);
3642
3643         if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3644                 ret = -EINVAL;
3645                 goto unlock_inode;
3646         }
3647
3648         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3649         filemap_invalidate_lock(inode->i_mapping);
3650
3651         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3652
3653         while (page_idx < last_idx) {
3654                 struct dnode_of_data dn;
3655                 pgoff_t end_offset, count;
3656
3657                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3658                 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3659                 if (ret) {
3660                         if (ret == -ENOENT) {
3661                                 page_idx = f2fs_get_next_page_offset(&dn,
3662                                                                 page_idx);
3663                                 ret = 0;
3664                                 continue;
3665                         }
3666                         break;
3667                 }
3668
3669                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3670                 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3671                 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3672
3673                 ret = reserve_compress_blocks(&dn, count);
3674
3675                 f2fs_put_dnode(&dn);
3676
3677                 if (ret < 0)
3678                         break;
3679
3680                 page_idx += count;
3681                 reserved_blocks += ret;
3682         }
3683
3684         filemap_invalidate_unlock(inode->i_mapping);
3685         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3686
3687         if (ret >= 0) {
3688                 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3689                 inode->i_ctime = current_time(inode);
3690                 f2fs_mark_inode_dirty_sync(inode, true);
3691         }
3692 unlock_inode:
3693         inode_unlock(inode);
3694 out:
3695         mnt_drop_write_file(filp);
3696
3697         if (ret >= 0) {
3698                 ret = put_user(reserved_blocks, (u64 __user *)arg);
3699         } else if (reserved_blocks &&
3700                         atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3701                 set_sbi_flag(sbi, SBI_NEED_FSCK);
3702                 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3703                         "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3704                         "run fsck to fix.",
3705                         __func__, inode->i_ino, inode->i_blocks,
3706                         reserved_blocks,
3707                         atomic_read(&F2FS_I(inode)->i_compr_blocks));
3708         }
3709
3710         return ret;
3711 }
3712
3713 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3714                 pgoff_t off, block_t block, block_t len, u32 flags)
3715 {
3716         sector_t sector = SECTOR_FROM_BLOCK(block);
3717         sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3718         int ret = 0;
3719
3720         if (flags & F2FS_TRIM_FILE_DISCARD) {
3721                 if (bdev_max_secure_erase_sectors(bdev))
3722                         ret = blkdev_issue_secure_erase(bdev, sector, nr_sects,
3723                                         GFP_NOFS);
3724                 else
3725                         ret = blkdev_issue_discard(bdev, sector, nr_sects,
3726                                         GFP_NOFS);
3727         }
3728
3729         if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3730                 if (IS_ENCRYPTED(inode))
3731                         ret = fscrypt_zeroout_range(inode, off, block, len);
3732                 else
3733                         ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3734                                         GFP_NOFS, 0);
3735         }
3736
3737         return ret;
3738 }
3739
3740 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3741 {
3742         struct inode *inode = file_inode(filp);
3743         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3744         struct address_space *mapping = inode->i_mapping;
3745         struct block_device *prev_bdev = NULL;
3746         struct f2fs_sectrim_range range;
3747         pgoff_t index, pg_end, prev_index = 0;
3748         block_t prev_block = 0, len = 0;
3749         loff_t end_addr;
3750         bool to_end = false;
3751         int ret = 0;
3752
3753         if (!(filp->f_mode & FMODE_WRITE))
3754                 return -EBADF;
3755
3756         if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3757                                 sizeof(range)))
3758                 return -EFAULT;
3759
3760         if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3761                         !S_ISREG(inode->i_mode))
3762                 return -EINVAL;
3763
3764         if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3765                         !f2fs_hw_support_discard(sbi)) ||
3766                         ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3767                          IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3768                 return -EOPNOTSUPP;
3769
3770         file_start_write(filp);
3771         inode_lock(inode);
3772
3773         if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3774                         range.start >= inode->i_size) {
3775                 ret = -EINVAL;
3776                 goto err;
3777         }
3778
3779         if (range.len == 0)
3780                 goto err;
3781
3782         if (inode->i_size - range.start > range.len) {
3783                 end_addr = range.start + range.len;
3784         } else {
3785                 end_addr = range.len == (u64)-1 ?
3786                         sbi->sb->s_maxbytes : inode->i_size;
3787                 to_end = true;
3788         }
3789
3790         if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3791                         (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3792                 ret = -EINVAL;
3793                 goto err;
3794         }
3795
3796         index = F2FS_BYTES_TO_BLK(range.start);
3797         pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3798
3799         ret = f2fs_convert_inline_inode(inode);
3800         if (ret)
3801                 goto err;
3802
3803         f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3804         filemap_invalidate_lock(mapping);
3805
3806         ret = filemap_write_and_wait_range(mapping, range.start,
3807                         to_end ? LLONG_MAX : end_addr - 1);
3808         if (ret)
3809                 goto out;
3810
3811         truncate_inode_pages_range(mapping, range.start,
3812                         to_end ? -1 : end_addr - 1);
3813
3814         while (index < pg_end) {
3815                 struct dnode_of_data dn;
3816                 pgoff_t end_offset, count;
3817                 int i;
3818
3819                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3820                 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3821                 if (ret) {
3822                         if (ret == -ENOENT) {
3823                                 index = f2fs_get_next_page_offset(&dn, index);
3824                                 continue;
3825                         }
3826                         goto out;
3827                 }
3828
3829                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3830                 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3831                 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3832                         struct block_device *cur_bdev;
3833                         block_t blkaddr = f2fs_data_blkaddr(&dn);
3834
3835                         if (!__is_valid_data_blkaddr(blkaddr))
3836                                 continue;
3837
3838                         if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3839                                                 DATA_GENERIC_ENHANCE)) {
3840                                 ret = -EFSCORRUPTED;
3841                                 f2fs_put_dnode(&dn);
3842                                 f2fs_handle_error(sbi,
3843                                                 ERROR_INVALID_BLKADDR);
3844                                 goto out;
3845                         }
3846
3847                         cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3848                         if (f2fs_is_multi_device(sbi)) {
3849                                 int di = f2fs_target_device_index(sbi, blkaddr);
3850
3851                                 blkaddr -= FDEV(di).start_blk;
3852                         }
3853
3854                         if (len) {
3855                                 if (prev_bdev == cur_bdev &&
3856                                                 index == prev_index + len &&
3857                                                 blkaddr == prev_block + len) {
3858                                         len++;
3859                                 } else {
3860                                         ret = f2fs_secure_erase(prev_bdev,
3861                                                 inode, prev_index, prev_block,
3862                                                 len, range.flags);
3863                                         if (ret) {
3864                                                 f2fs_put_dnode(&dn);
3865                                                 goto out;
3866                                         }
3867
3868                                         len = 0;
3869                                 }
3870                         }
3871
3872                         if (!len) {
3873                                 prev_bdev = cur_bdev;
3874                                 prev_index = index;
3875                                 prev_block = blkaddr;
3876                                 len = 1;
3877                         }
3878                 }
3879
3880                 f2fs_put_dnode(&dn);
3881
3882                 if (fatal_signal_pending(current)) {
3883                         ret = -EINTR;
3884                         goto out;
3885                 }
3886                 cond_resched();
3887         }
3888
3889         if (len)
3890                 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3891                                 prev_block, len, range.flags);
3892 out:
3893         filemap_invalidate_unlock(mapping);
3894         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3895 err:
3896         inode_unlock(inode);
3897         file_end_write(filp);
3898
3899         return ret;
3900 }
3901
3902 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3903 {
3904         struct inode *inode = file_inode(filp);
3905         struct f2fs_comp_option option;
3906
3907         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3908                 return -EOPNOTSUPP;
3909
3910         inode_lock_shared(inode);
3911
3912         if (!f2fs_compressed_file(inode)) {
3913                 inode_unlock_shared(inode);
3914                 return -ENODATA;
3915         }
3916
3917         option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3918         option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3919
3920         inode_unlock_shared(inode);
3921
3922         if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3923                                 sizeof(option)))
3924                 return -EFAULT;
3925
3926         return 0;
3927 }
3928
3929 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3930 {
3931         struct inode *inode = file_inode(filp);
3932         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3933         struct f2fs_comp_option option;
3934         int ret = 0;
3935
3936         if (!f2fs_sb_has_compression(sbi))
3937                 return -EOPNOTSUPP;
3938
3939         if (!(filp->f_mode & FMODE_WRITE))
3940                 return -EBADF;
3941
3942         if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3943                                 sizeof(option)))
3944                 return -EFAULT;
3945
3946         if (!f2fs_compressed_file(inode) ||
3947                         option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3948                         option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3949                         option.algorithm >= COMPRESS_MAX)
3950                 return -EINVAL;
3951
3952         file_start_write(filp);
3953         inode_lock(inode);
3954
3955         if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3956                 ret = -EBUSY;
3957                 goto out;
3958         }
3959
3960         if (F2FS_HAS_BLOCKS(inode)) {
3961                 ret = -EFBIG;
3962                 goto out;
3963         }
3964
3965         F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3966         F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3967         F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3968         f2fs_mark_inode_dirty_sync(inode, true);
3969
3970         if (!f2fs_is_compress_backend_ready(inode))
3971                 f2fs_warn(sbi, "compression algorithm is successfully set, "
3972                         "but current kernel doesn't support this algorithm.");
3973 out:
3974         inode_unlock(inode);
3975         file_end_write(filp);
3976
3977         return ret;
3978 }
3979
3980 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3981 {
3982         DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
3983         struct address_space *mapping = inode->i_mapping;
3984         struct page *page;
3985         pgoff_t redirty_idx = page_idx;
3986         int i, page_len = 0, ret = 0;
3987
3988         page_cache_ra_unbounded(&ractl, len, 0);
3989
3990         for (i = 0; i < len; i++, page_idx++) {
3991                 page = read_cache_page(mapping, page_idx, NULL, NULL);
3992                 if (IS_ERR(page)) {
3993                         ret = PTR_ERR(page);
3994                         break;
3995                 }
3996                 page_len++;
3997         }
3998
3999         for (i = 0; i < page_len; i++, redirty_idx++) {
4000                 page = find_lock_page(mapping, redirty_idx);
4001
4002                 /* It will never fail, when page has pinned above */
4003                 f2fs_bug_on(F2FS_I_SB(inode), !page);
4004
4005                 set_page_dirty(page);
4006                 f2fs_put_page(page, 1);
4007                 f2fs_put_page(page, 0);
4008         }
4009
4010         return ret;
4011 }
4012
4013 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4014 {
4015         struct inode *inode = file_inode(filp);
4016         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4017         struct f2fs_inode_info *fi = F2FS_I(inode);
4018         pgoff_t page_idx = 0, last_idx;
4019         unsigned int blk_per_seg = sbi->blocks_per_seg;
4020         int cluster_size = fi->i_cluster_size;
4021         int count, ret;
4022
4023         if (!f2fs_sb_has_compression(sbi) ||
4024                         F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4025                 return -EOPNOTSUPP;
4026
4027         if (!(filp->f_mode & FMODE_WRITE))
4028                 return -EBADF;
4029
4030         if (!f2fs_compressed_file(inode))
4031                 return -EINVAL;
4032
4033         f2fs_balance_fs(F2FS_I_SB(inode), true);
4034
4035         file_start_write(filp);
4036         inode_lock(inode);
4037
4038         if (!f2fs_is_compress_backend_ready(inode)) {
4039                 ret = -EOPNOTSUPP;
4040                 goto out;
4041         }
4042
4043         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4044                 ret = -EINVAL;
4045                 goto out;
4046         }
4047
4048         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4049         if (ret)
4050                 goto out;
4051
4052         if (!atomic_read(&fi->i_compr_blocks))
4053                 goto out;
4054
4055         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4056
4057         count = last_idx - page_idx;
4058         while (count) {
4059                 int len = min(cluster_size, count);
4060
4061                 ret = redirty_blocks(inode, page_idx, len);
4062                 if (ret < 0)
4063                         break;
4064
4065                 if (get_dirty_pages(inode) >= blk_per_seg)
4066                         filemap_fdatawrite(inode->i_mapping);
4067
4068                 count -= len;
4069                 page_idx += len;
4070         }
4071
4072         if (!ret)
4073                 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4074                                                         LLONG_MAX);
4075
4076         if (ret)
4077                 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4078                           __func__, ret);
4079 out:
4080         inode_unlock(inode);
4081         file_end_write(filp);
4082
4083         return ret;
4084 }
4085
4086 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4087 {
4088         struct inode *inode = file_inode(filp);
4089         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4090         pgoff_t page_idx = 0, last_idx;
4091         unsigned int blk_per_seg = sbi->blocks_per_seg;
4092         int cluster_size = F2FS_I(inode)->i_cluster_size;
4093         int count, ret;
4094
4095         if (!f2fs_sb_has_compression(sbi) ||
4096                         F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4097                 return -EOPNOTSUPP;
4098
4099         if (!(filp->f_mode & FMODE_WRITE))
4100                 return -EBADF;
4101
4102         if (!f2fs_compressed_file(inode))
4103                 return -EINVAL;
4104
4105         f2fs_balance_fs(F2FS_I_SB(inode), true);
4106
4107         file_start_write(filp);
4108         inode_lock(inode);
4109
4110         if (!f2fs_is_compress_backend_ready(inode)) {
4111                 ret = -EOPNOTSUPP;
4112                 goto out;
4113         }
4114
4115         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4116                 ret = -EINVAL;
4117                 goto out;
4118         }
4119
4120         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4121         if (ret)
4122                 goto out;
4123
4124         set_inode_flag(inode, FI_ENABLE_COMPRESS);
4125
4126         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4127
4128         count = last_idx - page_idx;
4129         while (count) {
4130                 int len = min(cluster_size, count);
4131
4132                 ret = redirty_blocks(inode, page_idx, len);
4133                 if (ret < 0)
4134                         break;
4135
4136                 if (get_dirty_pages(inode) >= blk_per_seg)
4137                         filemap_fdatawrite(inode->i_mapping);
4138
4139                 count -= len;
4140                 page_idx += len;
4141         }
4142
4143         if (!ret)
4144                 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4145                                                         LLONG_MAX);
4146
4147         clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4148
4149         if (ret)
4150                 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4151                           __func__, ret);
4152 out:
4153         inode_unlock(inode);
4154         file_end_write(filp);
4155
4156         return ret;
4157 }
4158
4159 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4160 {
4161         switch (cmd) {
4162         case FS_IOC_GETVERSION:
4163                 return f2fs_ioc_getversion(filp, arg);
4164         case F2FS_IOC_START_ATOMIC_WRITE:
4165                 return f2fs_ioc_start_atomic_write(filp);
4166         case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4167                 return f2fs_ioc_commit_atomic_write(filp);
4168         case F2FS_IOC_ABORT_ATOMIC_WRITE:
4169                 return f2fs_ioc_abort_atomic_write(filp);
4170         case F2FS_IOC_START_VOLATILE_WRITE:
4171         case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4172                 return -EOPNOTSUPP;
4173         case F2FS_IOC_SHUTDOWN:
4174                 return f2fs_ioc_shutdown(filp, arg);
4175         case FITRIM:
4176                 return f2fs_ioc_fitrim(filp, arg);
4177         case FS_IOC_SET_ENCRYPTION_POLICY:
4178                 return f2fs_ioc_set_encryption_policy(filp, arg);
4179         case FS_IOC_GET_ENCRYPTION_POLICY:
4180                 return f2fs_ioc_get_encryption_policy(filp, arg);
4181         case FS_IOC_GET_ENCRYPTION_PWSALT:
4182                 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4183         case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4184                 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4185         case FS_IOC_ADD_ENCRYPTION_KEY:
4186                 return f2fs_ioc_add_encryption_key(filp, arg);
4187         case FS_IOC_REMOVE_ENCRYPTION_KEY:
4188                 return f2fs_ioc_remove_encryption_key(filp, arg);
4189         case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4190                 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4191         case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4192                 return f2fs_ioc_get_encryption_key_status(filp, arg);
4193         case FS_IOC_GET_ENCRYPTION_NONCE:
4194                 return f2fs_ioc_get_encryption_nonce(filp, arg);
4195         case F2FS_IOC_GARBAGE_COLLECT:
4196                 return f2fs_ioc_gc(filp, arg);
4197         case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4198                 return f2fs_ioc_gc_range(filp, arg);
4199         case F2FS_IOC_WRITE_CHECKPOINT:
4200                 return f2fs_ioc_write_checkpoint(filp, arg);
4201         case F2FS_IOC_DEFRAGMENT:
4202                 return f2fs_ioc_defragment(filp, arg);
4203         case F2FS_IOC_MOVE_RANGE:
4204                 return f2fs_ioc_move_range(filp, arg);
4205         case F2FS_IOC_FLUSH_DEVICE:
4206                 return f2fs_ioc_flush_device(filp, arg);
4207         case F2FS_IOC_GET_FEATURES:
4208                 return f2fs_ioc_get_features(filp, arg);
4209         case F2FS_IOC_GET_PIN_FILE:
4210                 return f2fs_ioc_get_pin_file(filp, arg);
4211         case F2FS_IOC_SET_PIN_FILE:
4212                 return f2fs_ioc_set_pin_file(filp, arg);
4213         case F2FS_IOC_PRECACHE_EXTENTS:
4214                 return f2fs_ioc_precache_extents(filp, arg);
4215         case F2FS_IOC_RESIZE_FS:
4216                 return f2fs_ioc_resize_fs(filp, arg);
4217         case FS_IOC_ENABLE_VERITY:
4218                 return f2fs_ioc_enable_verity(filp, arg);
4219         case FS_IOC_MEASURE_VERITY:
4220                 return f2fs_ioc_measure_verity(filp, arg);
4221         case FS_IOC_READ_VERITY_METADATA:
4222                 return f2fs_ioc_read_verity_metadata(filp, arg);
4223         case FS_IOC_GETFSLABEL:
4224                 return f2fs_ioc_getfslabel(filp, arg);
4225         case FS_IOC_SETFSLABEL:
4226                 return f2fs_ioc_setfslabel(filp, arg);
4227         case F2FS_IOC_GET_COMPRESS_BLOCKS:
4228                 return f2fs_get_compress_blocks(filp, arg);
4229         case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4230                 return f2fs_release_compress_blocks(filp, arg);
4231         case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4232                 return f2fs_reserve_compress_blocks(filp, arg);
4233         case F2FS_IOC_SEC_TRIM_FILE:
4234                 return f2fs_sec_trim_file(filp, arg);
4235         case F2FS_IOC_GET_COMPRESS_OPTION:
4236                 return f2fs_ioc_get_compress_option(filp, arg);
4237         case F2FS_IOC_SET_COMPRESS_OPTION:
4238                 return f2fs_ioc_set_compress_option(filp, arg);
4239         case F2FS_IOC_DECOMPRESS_FILE:
4240                 return f2fs_ioc_decompress_file(filp, arg);
4241         case F2FS_IOC_COMPRESS_FILE:
4242                 return f2fs_ioc_compress_file(filp, arg);
4243         default:
4244                 return -ENOTTY;
4245         }
4246 }
4247
4248 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4249 {
4250         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4251                 return -EIO;
4252         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4253                 return -ENOSPC;
4254
4255         return __f2fs_ioctl(filp, cmd, arg);
4256 }
4257
4258 /*
4259  * Return %true if the given read or write request should use direct I/O, or
4260  * %false if it should use buffered I/O.
4261  */
4262 static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4263                                 struct iov_iter *iter)
4264 {
4265         unsigned int align;
4266
4267         if (!(iocb->ki_flags & IOCB_DIRECT))
4268                 return false;
4269
4270         if (f2fs_force_buffered_io(inode, iov_iter_rw(iter)))
4271                 return false;
4272
4273         /*
4274          * Direct I/O not aligned to the disk's logical_block_size will be
4275          * attempted, but will fail with -EINVAL.
4276          *
4277          * f2fs additionally requires that direct I/O be aligned to the
4278          * filesystem block size, which is often a stricter requirement.
4279          * However, f2fs traditionally falls back to buffered I/O on requests
4280          * that are logical_block_size-aligned but not fs-block aligned.
4281          *
4282          * The below logic implements this behavior.
4283          */
4284         align = iocb->ki_pos | iov_iter_alignment(iter);
4285         if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4286             IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4287                 return false;
4288
4289         return true;
4290 }
4291
4292 static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
4293                                 unsigned int flags)
4294 {
4295         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4296
4297         dec_page_count(sbi, F2FS_DIO_READ);
4298         if (error)
4299                 return error;
4300         f2fs_update_iostat(sbi, NULL, APP_DIRECT_READ_IO, size);
4301         return 0;
4302 }
4303
4304 static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
4305         .end_io = f2fs_dio_read_end_io,
4306 };
4307
4308 static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
4309 {
4310         struct file *file = iocb->ki_filp;
4311         struct inode *inode = file_inode(file);
4312         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4313         struct f2fs_inode_info *fi = F2FS_I(inode);
4314         const loff_t pos = iocb->ki_pos;
4315         const size_t count = iov_iter_count(to);
4316         struct iomap_dio *dio;
4317         ssize_t ret;
4318
4319         if (count == 0)
4320                 return 0; /* skip atime update */
4321
4322         trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
4323
4324         if (iocb->ki_flags & IOCB_NOWAIT) {
4325                 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4326                         ret = -EAGAIN;
4327                         goto out;
4328                 }
4329         } else {
4330                 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4331         }
4332
4333         /*
4334          * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4335          * the higher-level function iomap_dio_rw() in order to ensure that the
4336          * F2FS_DIO_READ counter will be decremented correctly in all cases.
4337          */
4338         inc_page_count(sbi, F2FS_DIO_READ);
4339         dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
4340                              &f2fs_iomap_dio_read_ops, 0, NULL, 0);
4341         if (IS_ERR_OR_NULL(dio)) {
4342                 ret = PTR_ERR_OR_ZERO(dio);
4343                 if (ret != -EIOCBQUEUED)
4344                         dec_page_count(sbi, F2FS_DIO_READ);
4345         } else {
4346                 ret = iomap_dio_complete(dio);
4347         }
4348
4349         f2fs_up_read(&fi->i_gc_rwsem[READ]);
4350
4351         file_accessed(file);
4352 out:
4353         trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4354         return ret;
4355 }
4356
4357 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4358 {
4359         struct inode *inode = file_inode(iocb->ki_filp);
4360         const loff_t pos = iocb->ki_pos;
4361         ssize_t ret;
4362
4363         if (!f2fs_is_compress_backend_ready(inode))
4364                 return -EOPNOTSUPP;
4365
4366         if (trace_f2fs_dataread_start_enabled()) {
4367                 char *p = f2fs_kmalloc(F2FS_I_SB(inode), PATH_MAX, GFP_KERNEL);
4368                 char *path;
4369
4370                 if (!p)
4371                         goto skip_read_trace;
4372
4373                 path = dentry_path_raw(file_dentry(iocb->ki_filp), p, PATH_MAX);
4374                 if (IS_ERR(path)) {
4375                         kfree(p);
4376                         goto skip_read_trace;
4377                 }
4378
4379                 trace_f2fs_dataread_start(inode, pos, iov_iter_count(to),
4380                                         current->pid, path, current->comm);
4381                 kfree(p);
4382         }
4383 skip_read_trace:
4384         if (f2fs_should_use_dio(inode, iocb, to)) {
4385                 ret = f2fs_dio_read_iter(iocb, to);
4386         } else {
4387                 ret = filemap_read(iocb, to, 0);
4388                 if (ret > 0)
4389                         f2fs_update_iostat(F2FS_I_SB(inode), inode,
4390                                                 APP_BUFFERED_READ_IO, ret);
4391         }
4392         if (trace_f2fs_dataread_end_enabled())
4393                 trace_f2fs_dataread_end(inode, pos, ret);
4394         return ret;
4395 }
4396
4397 static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4398 {
4399         struct file *file = iocb->ki_filp;
4400         struct inode *inode = file_inode(file);
4401         ssize_t count;
4402         int err;
4403
4404         if (IS_IMMUTABLE(inode))
4405                 return -EPERM;
4406
4407         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4408                 return -EPERM;
4409
4410         count = generic_write_checks(iocb, from);
4411         if (count <= 0)
4412                 return count;
4413
4414         err = file_modified(file);
4415         if (err)
4416                 return err;
4417         return count;
4418 }
4419
4420 /*
4421  * Preallocate blocks for a write request, if it is possible and helpful to do
4422  * so.  Returns a positive number if blocks may have been preallocated, 0 if no
4423  * blocks were preallocated, or a negative errno value if something went
4424  * seriously wrong.  Also sets FI_PREALLOCATED_ALL on the inode if *all* the
4425  * requested blocks (not just some of them) have been allocated.
4426  */
4427 static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
4428                                    bool dio)
4429 {
4430         struct inode *inode = file_inode(iocb->ki_filp);
4431         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4432         const loff_t pos = iocb->ki_pos;
4433         const size_t count = iov_iter_count(iter);
4434         struct f2fs_map_blocks map = {};
4435         int flag;
4436         int ret;
4437
4438         /* If it will be an out-of-place direct write, don't bother. */
4439         if (dio && f2fs_lfs_mode(sbi))
4440                 return 0;
4441         /*
4442          * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
4443          * buffered IO, if DIO meets any holes.
4444          */
4445         if (dio && i_size_read(inode) &&
4446                 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4447                 return 0;
4448
4449         /* No-wait I/O can't allocate blocks. */
4450         if (iocb->ki_flags & IOCB_NOWAIT)
4451                 return 0;
4452
4453         /* If it will be a short write, don't bother. */
4454         if (fault_in_iov_iter_readable(iter, count))
4455                 return 0;
4456
4457         if (f2fs_has_inline_data(inode)) {
4458                 /* If the data will fit inline, don't bother. */
4459                 if (pos + count <= MAX_INLINE_DATA(inode))
4460                         return 0;
4461                 ret = f2fs_convert_inline_inode(inode);
4462                 if (ret)
4463                         return ret;
4464         }
4465
4466         /* Do not preallocate blocks that will be written partially in 4KB. */
4467         map.m_lblk = F2FS_BLK_ALIGN(pos);
4468         map.m_len = F2FS_BYTES_TO_BLK(pos + count);
4469         if (map.m_len > map.m_lblk)
4470                 map.m_len -= map.m_lblk;
4471         else
4472                 map.m_len = 0;
4473         map.m_may_create = true;
4474         if (dio) {
4475                 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4476                 flag = F2FS_GET_BLOCK_PRE_DIO;
4477         } else {
4478                 map.m_seg_type = NO_CHECK_TYPE;
4479                 flag = F2FS_GET_BLOCK_PRE_AIO;
4480         }
4481
4482         ret = f2fs_map_blocks(inode, &map, 1, flag);
4483         /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
4484         if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
4485                 return ret;
4486         if (ret == 0)
4487                 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4488         return map.m_len;
4489 }
4490
4491 static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
4492                                         struct iov_iter *from)
4493 {
4494         struct file *file = iocb->ki_filp;
4495         struct inode *inode = file_inode(file);
4496         ssize_t ret;
4497
4498         if (iocb->ki_flags & IOCB_NOWAIT)
4499                 return -EOPNOTSUPP;
4500
4501         current->backing_dev_info = inode_to_bdi(inode);
4502         ret = generic_perform_write(iocb, from);
4503         current->backing_dev_info = NULL;
4504
4505         if (ret > 0) {
4506                 iocb->ki_pos += ret;
4507                 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4508                                                 APP_BUFFERED_IO, ret);
4509         }
4510         return ret;
4511 }
4512
4513 static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
4514                                  unsigned int flags)
4515 {
4516         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4517
4518         dec_page_count(sbi, F2FS_DIO_WRITE);
4519         if (error)
4520                 return error;
4521         f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size);
4522         return 0;
4523 }
4524
4525 static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
4526         .end_io = f2fs_dio_write_end_io,
4527 };
4528
4529 static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
4530                                    bool *may_need_sync)
4531 {
4532         struct file *file = iocb->ki_filp;
4533         struct inode *inode = file_inode(file);
4534         struct f2fs_inode_info *fi = F2FS_I(inode);
4535         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4536         const bool do_opu = f2fs_lfs_mode(sbi);
4537         const loff_t pos = iocb->ki_pos;
4538         const ssize_t count = iov_iter_count(from);
4539         unsigned int dio_flags;
4540         struct iomap_dio *dio;
4541         ssize_t ret;
4542
4543         trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
4544
4545         if (iocb->ki_flags & IOCB_NOWAIT) {
4546                 /* f2fs_convert_inline_inode() and block allocation can block */
4547                 if (f2fs_has_inline_data(inode) ||
4548                     !f2fs_overwrite_io(inode, pos, count)) {
4549                         ret = -EAGAIN;
4550                         goto out;
4551                 }
4552
4553                 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
4554                         ret = -EAGAIN;
4555                         goto out;
4556                 }
4557                 if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4558                         f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4559                         ret = -EAGAIN;
4560                         goto out;
4561                 }
4562         } else {
4563                 ret = f2fs_convert_inline_inode(inode);
4564                 if (ret)
4565                         goto out;
4566
4567                 f2fs_down_read(&fi->i_gc_rwsem[WRITE]);
4568                 if (do_opu)
4569                         f2fs_down_read(&fi->i_gc_rwsem[READ]);
4570         }
4571
4572         /*
4573          * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4574          * the higher-level function iomap_dio_rw() in order to ensure that the
4575          * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
4576          */
4577         inc_page_count(sbi, F2FS_DIO_WRITE);
4578         dio_flags = 0;
4579         if (pos + count > inode->i_size)
4580                 dio_flags |= IOMAP_DIO_FORCE_WAIT;
4581         dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
4582                              &f2fs_iomap_dio_write_ops, dio_flags, NULL, 0);
4583         if (IS_ERR_OR_NULL(dio)) {
4584                 ret = PTR_ERR_OR_ZERO(dio);
4585                 if (ret == -ENOTBLK)
4586                         ret = 0;
4587                 if (ret != -EIOCBQUEUED)
4588                         dec_page_count(sbi, F2FS_DIO_WRITE);
4589         } else {
4590                 ret = iomap_dio_complete(dio);
4591         }
4592
4593         if (do_opu)
4594                 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4595         f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4596
4597         if (ret < 0)
4598                 goto out;
4599         if (pos + ret > inode->i_size)
4600                 f2fs_i_size_write(inode, pos + ret);
4601         if (!do_opu)
4602                 set_inode_flag(inode, FI_UPDATE_WRITE);
4603
4604         if (iov_iter_count(from)) {
4605                 ssize_t ret2;
4606                 loff_t bufio_start_pos = iocb->ki_pos;
4607
4608                 /*
4609                  * The direct write was partial, so we need to fall back to a
4610                  * buffered write for the remainder.
4611                  */
4612
4613                 ret2 = f2fs_buffered_write_iter(iocb, from);
4614                 if (iov_iter_count(from))
4615                         f2fs_write_failed(inode, iocb->ki_pos);
4616                 if (ret2 < 0)
4617                         goto out;
4618
4619                 /*
4620                  * Ensure that the pagecache pages are written to disk and
4621                  * invalidated to preserve the expected O_DIRECT semantics.
4622                  */
4623                 if (ret2 > 0) {
4624                         loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
4625
4626                         ret += ret2;
4627
4628                         ret2 = filemap_write_and_wait_range(file->f_mapping,
4629                                                             bufio_start_pos,
4630                                                             bufio_end_pos);
4631                         if (ret2 < 0)
4632                                 goto out;
4633                         invalidate_mapping_pages(file->f_mapping,
4634                                                  bufio_start_pos >> PAGE_SHIFT,
4635                                                  bufio_end_pos >> PAGE_SHIFT);
4636                 }
4637         } else {
4638                 /* iomap_dio_rw() already handled the generic_write_sync(). */
4639                 *may_need_sync = false;
4640         }
4641 out:
4642         trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
4643         return ret;
4644 }
4645
4646 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4647 {
4648         struct inode *inode = file_inode(iocb->ki_filp);
4649         const loff_t orig_pos = iocb->ki_pos;
4650         const size_t orig_count = iov_iter_count(from);
4651         loff_t target_size;
4652         bool dio;
4653         bool may_need_sync = true;
4654         int preallocated;
4655         ssize_t ret;
4656
4657         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4658                 ret = -EIO;
4659                 goto out;
4660         }
4661
4662         if (!f2fs_is_compress_backend_ready(inode)) {
4663                 ret = -EOPNOTSUPP;
4664                 goto out;
4665         }
4666
4667         if (iocb->ki_flags & IOCB_NOWAIT) {
4668                 if (!inode_trylock(inode)) {
4669                         ret = -EAGAIN;
4670                         goto out;
4671                 }
4672         } else {
4673                 inode_lock(inode);
4674         }
4675
4676         ret = f2fs_write_checks(iocb, from);
4677         if (ret <= 0)
4678                 goto out_unlock;
4679
4680         /* Determine whether we will do a direct write or a buffered write. */
4681         dio = f2fs_should_use_dio(inode, iocb, from);
4682
4683         /* Possibly preallocate the blocks for the write. */
4684         target_size = iocb->ki_pos + iov_iter_count(from);
4685         preallocated = f2fs_preallocate_blocks(iocb, from, dio);
4686         if (preallocated < 0) {
4687                 ret = preallocated;
4688         } else {
4689                 if (trace_f2fs_datawrite_start_enabled()) {
4690                         char *p = f2fs_kmalloc(F2FS_I_SB(inode),
4691                                                 PATH_MAX, GFP_KERNEL);
4692                         char *path;
4693
4694                         if (!p)
4695                                 goto skip_write_trace;
4696                         path = dentry_path_raw(file_dentry(iocb->ki_filp),
4697                                                                 p, PATH_MAX);
4698                         if (IS_ERR(path)) {
4699                                 kfree(p);
4700                                 goto skip_write_trace;
4701                         }
4702                         trace_f2fs_datawrite_start(inode, orig_pos, orig_count,
4703                                         current->pid, path, current->comm);
4704                         kfree(p);
4705                 }
4706 skip_write_trace:
4707                 /* Do the actual write. */
4708                 ret = dio ?
4709                         f2fs_dio_write_iter(iocb, from, &may_need_sync) :
4710                         f2fs_buffered_write_iter(iocb, from);
4711
4712                 if (trace_f2fs_datawrite_end_enabled())
4713                         trace_f2fs_datawrite_end(inode, orig_pos, ret);
4714         }
4715
4716         /* Don't leave any preallocated blocks around past i_size. */
4717         if (preallocated && i_size_read(inode) < target_size) {
4718                 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4719                 filemap_invalidate_lock(inode->i_mapping);
4720                 if (!f2fs_truncate(inode))
4721                         file_dont_truncate(inode);
4722                 filemap_invalidate_unlock(inode->i_mapping);
4723                 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4724         } else {
4725                 file_dont_truncate(inode);
4726         }
4727
4728         clear_inode_flag(inode, FI_PREALLOCATED_ALL);
4729 out_unlock:
4730         inode_unlock(inode);
4731 out:
4732         trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
4733         if (ret > 0 && may_need_sync)
4734                 ret = generic_write_sync(iocb, ret);
4735         return ret;
4736 }
4737
4738 static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4739                 int advice)
4740 {
4741         struct address_space *mapping;
4742         struct backing_dev_info *bdi;
4743         struct inode *inode = file_inode(filp);
4744         int err;
4745
4746         if (advice == POSIX_FADV_SEQUENTIAL) {
4747                 if (S_ISFIFO(inode->i_mode))
4748                         return -ESPIPE;
4749
4750                 mapping = filp->f_mapping;
4751                 if (!mapping || len < 0)
4752                         return -EINVAL;
4753
4754                 bdi = inode_to_bdi(mapping->host);
4755                 filp->f_ra.ra_pages = bdi->ra_pages *
4756                         F2FS_I_SB(inode)->seq_file_ra_mul;
4757                 spin_lock(&filp->f_lock);
4758                 filp->f_mode &= ~FMODE_RANDOM;
4759                 spin_unlock(&filp->f_lock);
4760                 return 0;
4761         }
4762
4763         err = generic_fadvise(filp, offset, len, advice);
4764         if (!err && advice == POSIX_FADV_DONTNEED &&
4765                 test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
4766                 f2fs_compressed_file(inode))
4767                 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
4768
4769         return err;
4770 }
4771
4772 #ifdef CONFIG_COMPAT
4773 struct compat_f2fs_gc_range {
4774         u32 sync;
4775         compat_u64 start;
4776         compat_u64 len;
4777 };
4778 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE        _IOW(F2FS_IOCTL_MAGIC, 11,\
4779                                                 struct compat_f2fs_gc_range)
4780
4781 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4782 {
4783         struct compat_f2fs_gc_range __user *urange;
4784         struct f2fs_gc_range range;
4785         int err;
4786
4787         urange = compat_ptr(arg);
4788         err = get_user(range.sync, &urange->sync);
4789         err |= get_user(range.start, &urange->start);
4790         err |= get_user(range.len, &urange->len);
4791         if (err)
4792                 return -EFAULT;
4793
4794         return __f2fs_ioc_gc_range(file, &range);
4795 }
4796
4797 struct compat_f2fs_move_range {
4798         u32 dst_fd;
4799         compat_u64 pos_in;
4800         compat_u64 pos_out;
4801         compat_u64 len;
4802 };
4803 #define F2FS_IOC32_MOVE_RANGE           _IOWR(F2FS_IOCTL_MAGIC, 9,      \
4804                                         struct compat_f2fs_move_range)
4805
4806 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4807 {
4808         struct compat_f2fs_move_range __user *urange;
4809         struct f2fs_move_range range;
4810         int err;
4811
4812         urange = compat_ptr(arg);
4813         err = get_user(range.dst_fd, &urange->dst_fd);
4814         err |= get_user(range.pos_in, &urange->pos_in);
4815         err |= get_user(range.pos_out, &urange->pos_out);
4816         err |= get_user(range.len, &urange->len);
4817         if (err)
4818                 return -EFAULT;
4819
4820         return __f2fs_ioc_move_range(file, &range);
4821 }
4822
4823 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4824 {
4825         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4826                 return -EIO;
4827         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4828                 return -ENOSPC;
4829
4830         switch (cmd) {
4831         case FS_IOC32_GETVERSION:
4832                 cmd = FS_IOC_GETVERSION;
4833                 break;
4834         case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4835                 return f2fs_compat_ioc_gc_range(file, arg);
4836         case F2FS_IOC32_MOVE_RANGE:
4837                 return f2fs_compat_ioc_move_range(file, arg);
4838         case F2FS_IOC_START_ATOMIC_WRITE:
4839         case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4840         case F2FS_IOC_START_VOLATILE_WRITE:
4841         case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4842         case F2FS_IOC_ABORT_ATOMIC_WRITE:
4843         case F2FS_IOC_SHUTDOWN:
4844         case FITRIM:
4845         case FS_IOC_SET_ENCRYPTION_POLICY:
4846         case FS_IOC_GET_ENCRYPTION_PWSALT:
4847         case FS_IOC_GET_ENCRYPTION_POLICY:
4848         case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4849         case FS_IOC_ADD_ENCRYPTION_KEY:
4850         case FS_IOC_REMOVE_ENCRYPTION_KEY:
4851         case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4852         case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4853         case FS_IOC_GET_ENCRYPTION_NONCE:
4854         case F2FS_IOC_GARBAGE_COLLECT:
4855         case F2FS_IOC_WRITE_CHECKPOINT:
4856         case F2FS_IOC_DEFRAGMENT:
4857         case F2FS_IOC_FLUSH_DEVICE:
4858         case F2FS_IOC_GET_FEATURES:
4859         case F2FS_IOC_GET_PIN_FILE:
4860         case F2FS_IOC_SET_PIN_FILE:
4861         case F2FS_IOC_PRECACHE_EXTENTS:
4862         case F2FS_IOC_RESIZE_FS:
4863         case FS_IOC_ENABLE_VERITY:
4864         case FS_IOC_MEASURE_VERITY:
4865         case FS_IOC_READ_VERITY_METADATA:
4866         case FS_IOC_GETFSLABEL:
4867         case FS_IOC_SETFSLABEL:
4868         case F2FS_IOC_GET_COMPRESS_BLOCKS:
4869         case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4870         case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4871         case F2FS_IOC_SEC_TRIM_FILE:
4872         case F2FS_IOC_GET_COMPRESS_OPTION:
4873         case F2FS_IOC_SET_COMPRESS_OPTION:
4874         case F2FS_IOC_DECOMPRESS_FILE:
4875         case F2FS_IOC_COMPRESS_FILE:
4876                 break;
4877         default:
4878                 return -ENOIOCTLCMD;
4879         }
4880         return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4881 }
4882 #endif
4883
4884 const struct file_operations f2fs_file_operations = {
4885         .llseek         = f2fs_llseek,
4886         .read_iter      = f2fs_file_read_iter,
4887         .write_iter     = f2fs_file_write_iter,
4888         .open           = f2fs_file_open,
4889         .release        = f2fs_release_file,
4890         .mmap           = f2fs_file_mmap,
4891         .flush          = f2fs_file_flush,
4892         .fsync          = f2fs_sync_file,
4893         .fallocate      = f2fs_fallocate,
4894         .unlocked_ioctl = f2fs_ioctl,
4895 #ifdef CONFIG_COMPAT
4896         .compat_ioctl   = f2fs_compat_ioctl,
4897 #endif
4898         .splice_read    = generic_file_splice_read,
4899         .splice_write   = iter_file_splice_write,
4900         .fadvise        = f2fs_file_fadvise,
4901 };