fs/ntfs3: Fix a memory leak on object opts
[platform/kernel/linux-starfive.git] / fs / ntfs3 / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  *  Regular file handling primitives for NTFS-based filesystems.
7  *
8  */
9
10 #include <linux/backing-dev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/compat.h>
13 #include <linux/falloc.h>
14 #include <linux/fiemap.h>
15
16 #include "debug.h"
17 #include "ntfs.h"
18 #include "ntfs_fs.h"
19
20 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
21 {
22         struct fstrim_range __user *user_range;
23         struct fstrim_range range;
24         struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
25         int err;
26
27         if (!capable(CAP_SYS_ADMIN))
28                 return -EPERM;
29
30         if (!blk_queue_discard(q))
31                 return -EOPNOTSUPP;
32
33         user_range = (struct fstrim_range __user *)arg;
34         if (copy_from_user(&range, user_range, sizeof(range)))
35                 return -EFAULT;
36
37         range.minlen = max_t(u32, range.minlen, q->limits.discard_granularity);
38
39         err = ntfs_trim_fs(sbi, &range);
40         if (err < 0)
41                 return err;
42
43         if (copy_to_user(user_range, &range, sizeof(range)))
44                 return -EFAULT;
45
46         return 0;
47 }
48
49 static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
50 {
51         struct inode *inode = file_inode(filp);
52         struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
53
54         switch (cmd) {
55         case FITRIM:
56                 return ntfs_ioctl_fitrim(sbi, arg);
57         }
58         return -ENOTTY; /* Inappropriate ioctl for device. */
59 }
60
61 #ifdef CONFIG_COMPAT
62 static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
63
64 {
65         return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
66 }
67 #endif
68
69 /*
70  * ntfs_getattr - inode_operations::getattr
71  */
72 int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
73                  struct kstat *stat, u32 request_mask, u32 flags)
74 {
75         struct inode *inode = d_inode(path->dentry);
76         struct ntfs_inode *ni = ntfs_i(inode);
77
78         if (is_compressed(ni))
79                 stat->attributes |= STATX_ATTR_COMPRESSED;
80
81         if (is_encrypted(ni))
82                 stat->attributes |= STATX_ATTR_ENCRYPTED;
83
84         stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED;
85
86         generic_fillattr(mnt_userns, inode, stat);
87
88         stat->result_mask |= STATX_BTIME;
89         stat->btime = ni->i_crtime;
90         stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
91
92         return 0;
93 }
94
95 static int ntfs_extend_initialized_size(struct file *file,
96                                         struct ntfs_inode *ni,
97                                         const loff_t valid,
98                                         const loff_t new_valid)
99 {
100         struct inode *inode = &ni->vfs_inode;
101         struct address_space *mapping = inode->i_mapping;
102         struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
103         loff_t pos = valid;
104         int err;
105
106         if (is_resident(ni)) {
107                 ni->i_valid = new_valid;
108                 return 0;
109         }
110
111         WARN_ON(is_compressed(ni));
112         WARN_ON(valid >= new_valid);
113
114         for (;;) {
115                 u32 zerofrom, len;
116                 struct page *page;
117                 void *fsdata;
118                 u8 bits;
119                 CLST vcn, lcn, clen;
120
121                 if (is_sparsed(ni)) {
122                         bits = sbi->cluster_bits;
123                         vcn = pos >> bits;
124
125                         err = attr_data_get_block(ni, vcn, 0, &lcn, &clen,
126                                                   NULL);
127                         if (err)
128                                 goto out;
129
130                         if (lcn == SPARSE_LCN) {
131                                 loff_t vbo = (loff_t)vcn << bits;
132                                 loff_t to = vbo + ((loff_t)clen << bits);
133
134                                 if (to <= new_valid) {
135                                         ni->i_valid = to;
136                                         pos = to;
137                                         goto next;
138                                 }
139
140                                 if (vbo < pos) {
141                                         pos = vbo;
142                                 } else {
143                                         to = (new_valid >> bits) << bits;
144                                         if (pos < to) {
145                                                 ni->i_valid = to;
146                                                 pos = to;
147                                                 goto next;
148                                         }
149                                 }
150                         }
151                 }
152
153                 zerofrom = pos & (PAGE_SIZE - 1);
154                 len = PAGE_SIZE - zerofrom;
155
156                 if (pos + len > new_valid)
157                         len = new_valid - pos;
158
159                 err = pagecache_write_begin(file, mapping, pos, len, 0, &page,
160                                             &fsdata);
161                 if (err)
162                         goto out;
163
164                 zero_user_segment(page, zerofrom, PAGE_SIZE);
165
166                 /* This function in any case puts page. */
167                 err = pagecache_write_end(file, mapping, pos, len, len, page,
168                                           fsdata);
169                 if (err < 0)
170                         goto out;
171                 pos += len;
172
173 next:
174                 if (pos >= new_valid)
175                         break;
176
177                 balance_dirty_pages_ratelimited(mapping);
178                 cond_resched();
179         }
180
181         return 0;
182
183 out:
184         ni->i_valid = valid;
185         ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
186                         new_valid);
187         return err;
188 }
189
190 /*
191  * ntfs_zero_range - Helper function for punch_hole.
192  *
193  * It zeroes a range [vbo, vbo_to).
194  */
195 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
196 {
197         int err = 0;
198         struct address_space *mapping = inode->i_mapping;
199         u32 blocksize = 1 << inode->i_blkbits;
200         pgoff_t idx = vbo >> PAGE_SHIFT;
201         u32 z_start = vbo & (PAGE_SIZE - 1);
202         pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
203         loff_t page_off;
204         struct buffer_head *head, *bh;
205         u32 bh_next, bh_off, z_end;
206         sector_t iblock;
207         struct page *page;
208
209         for (; idx < idx_end; idx += 1, z_start = 0) {
210                 page_off = (loff_t)idx << PAGE_SHIFT;
211                 z_end = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off)
212                                                         : PAGE_SIZE;
213                 iblock = page_off >> inode->i_blkbits;
214
215                 page = find_or_create_page(mapping, idx,
216                                            mapping_gfp_constraint(mapping,
217                                                                   ~__GFP_FS));
218                 if (!page)
219                         return -ENOMEM;
220
221                 if (!page_has_buffers(page))
222                         create_empty_buffers(page, blocksize, 0);
223
224                 bh = head = page_buffers(page);
225                 bh_off = 0;
226                 do {
227                         bh_next = bh_off + blocksize;
228
229                         if (bh_next <= z_start || bh_off >= z_end)
230                                 continue;
231
232                         if (!buffer_mapped(bh)) {
233                                 ntfs_get_block(inode, iblock, bh, 0);
234                                 /* Unmapped? It's a hole - nothing to do. */
235                                 if (!buffer_mapped(bh))
236                                         continue;
237                         }
238
239                         /* Ok, it's mapped. Make sure it's up-to-date. */
240                         if (PageUptodate(page))
241                                 set_buffer_uptodate(bh);
242
243                         if (!buffer_uptodate(bh)) {
244                                 lock_buffer(bh);
245                                 bh->b_end_io = end_buffer_read_sync;
246                                 get_bh(bh);
247                                 submit_bh(REQ_OP_READ, 0, bh);
248
249                                 wait_on_buffer(bh);
250                                 if (!buffer_uptodate(bh)) {
251                                         unlock_page(page);
252                                         put_page(page);
253                                         err = -EIO;
254                                         goto out;
255                                 }
256                         }
257
258                         mark_buffer_dirty(bh);
259
260                 } while (bh_off = bh_next, iblock += 1,
261                          head != (bh = bh->b_this_page));
262
263                 zero_user_segment(page, z_start, z_end);
264
265                 unlock_page(page);
266                 put_page(page);
267                 cond_resched();
268         }
269 out:
270         mark_inode_dirty(inode);
271         return err;
272 }
273
274 /*
275  * ntfs_sparse_cluster - Helper function to zero a new allocated clusters.
276  *
277  * NOTE: 512 <= cluster size <= 2M
278  */
279 void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
280                          CLST len)
281 {
282         struct address_space *mapping = inode->i_mapping;
283         struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
284         u64 vbo = (u64)vcn << sbi->cluster_bits;
285         u64 bytes = (u64)len << sbi->cluster_bits;
286         u32 blocksize = 1 << inode->i_blkbits;
287         pgoff_t idx0 = page0 ? page0->index : -1;
288         loff_t vbo_clst = vbo & sbi->cluster_mask_inv;
289         loff_t end = ntfs_up_cluster(sbi, vbo + bytes);
290         pgoff_t idx = vbo_clst >> PAGE_SHIFT;
291         u32 from = vbo_clst & (PAGE_SIZE - 1);
292         pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
293         loff_t page_off;
294         u32 to;
295         bool partial;
296         struct page *page;
297
298         for (; idx < idx_end; idx += 1, from = 0) {
299                 page = idx == idx0 ? page0 : grab_cache_page(mapping, idx);
300
301                 if (!page)
302                         continue;
303
304                 page_off = (loff_t)idx << PAGE_SHIFT;
305                 to = (page_off + PAGE_SIZE) > end ? (end - page_off)
306                                                   : PAGE_SIZE;
307                 partial = false;
308
309                 if ((from || PAGE_SIZE != to) &&
310                     likely(!page_has_buffers(page))) {
311                         create_empty_buffers(page, blocksize, 0);
312                 }
313
314                 if (page_has_buffers(page)) {
315                         struct buffer_head *head, *bh;
316                         u32 bh_off = 0;
317
318                         bh = head = page_buffers(page);
319                         do {
320                                 u32 bh_next = bh_off + blocksize;
321
322                                 if (from <= bh_off && bh_next <= to) {
323                                         set_buffer_uptodate(bh);
324                                         mark_buffer_dirty(bh);
325                                 } else if (!buffer_uptodate(bh)) {
326                                         partial = true;
327                                 }
328                                 bh_off = bh_next;
329                         } while (head != (bh = bh->b_this_page));
330                 }
331
332                 zero_user_segment(page, from, to);
333
334                 if (!partial) {
335                         if (!PageUptodate(page))
336                                 SetPageUptodate(page);
337                         set_page_dirty(page);
338                 }
339
340                 if (idx != idx0) {
341                         unlock_page(page);
342                         put_page(page);
343                 }
344                 cond_resched();
345         }
346         mark_inode_dirty(inode);
347 }
348
349 /*
350  * ntfs_file_mmap - file_operations::mmap
351  */
352 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
353 {
354         struct address_space *mapping = file->f_mapping;
355         struct inode *inode = mapping->host;
356         struct ntfs_inode *ni = ntfs_i(inode);
357         u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
358         bool rw = vma->vm_flags & VM_WRITE;
359         int err;
360
361         if (is_encrypted(ni)) {
362                 ntfs_inode_warn(inode, "mmap encrypted not supported");
363                 return -EOPNOTSUPP;
364         }
365
366         if (is_dedup(ni)) {
367                 ntfs_inode_warn(inode, "mmap deduplicated not supported");
368                 return -EOPNOTSUPP;
369         }
370
371         if (is_compressed(ni) && rw) {
372                 ntfs_inode_warn(inode, "mmap(write) compressed not supported");
373                 return -EOPNOTSUPP;
374         }
375
376         if (rw) {
377                 u64 to = min_t(loff_t, i_size_read(inode),
378                                from + vma->vm_end - vma->vm_start);
379
380                 if (is_sparsed(ni)) {
381                         /* Allocate clusters for rw map. */
382                         struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
383                         CLST lcn, len;
384                         CLST vcn = from >> sbi->cluster_bits;
385                         CLST end = bytes_to_cluster(sbi, to);
386                         bool new;
387
388                         for (; vcn < end; vcn += len) {
389                                 err = attr_data_get_block(ni, vcn, 1, &lcn,
390                                                           &len, &new);
391                                 if (err)
392                                         goto out;
393
394                                 if (!new)
395                                         continue;
396                                 ntfs_sparse_cluster(inode, NULL, vcn, 1);
397                         }
398                 }
399
400                 if (ni->i_valid < to) {
401                         if (!inode_trylock(inode)) {
402                                 err = -EAGAIN;
403                                 goto out;
404                         }
405                         err = ntfs_extend_initialized_size(file, ni,
406                                                            ni->i_valid, to);
407                         inode_unlock(inode);
408                         if (err)
409                                 goto out;
410                 }
411         }
412
413         err = generic_file_mmap(file, vma);
414 out:
415         return err;
416 }
417
418 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
419                        struct file *file)
420 {
421         struct ntfs_inode *ni = ntfs_i(inode);
422         struct address_space *mapping = inode->i_mapping;
423         loff_t end = pos + count;
424         bool extend_init = file && pos > ni->i_valid;
425         int err;
426
427         if (end <= inode->i_size && !extend_init)
428                 return 0;
429
430         /* Mark rw ntfs as dirty. It will be cleared at umount. */
431         ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
432
433         if (end > inode->i_size) {
434                 err = ntfs_set_size(inode, end);
435                 if (err)
436                         goto out;
437                 inode->i_size = end;
438         }
439
440         if (extend_init && !is_compressed(ni)) {
441                 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
442                 if (err)
443                         goto out;
444         } else {
445                 err = 0;
446         }
447
448         inode->i_ctime = inode->i_mtime = current_time(inode);
449         mark_inode_dirty(inode);
450
451         if (IS_SYNC(inode)) {
452                 int err2;
453
454                 err = filemap_fdatawrite_range(mapping, pos, end - 1);
455                 err2 = sync_mapping_buffers(mapping);
456                 if (!err)
457                         err = err2;
458                 err2 = write_inode_now(inode, 1);
459                 if (!err)
460                         err = err2;
461                 if (!err)
462                         err = filemap_fdatawait_range(mapping, pos, end - 1);
463         }
464
465 out:
466         return err;
467 }
468
469 static int ntfs_truncate(struct inode *inode, loff_t new_size)
470 {
471         struct super_block *sb = inode->i_sb;
472         struct ntfs_inode *ni = ntfs_i(inode);
473         int err, dirty = 0;
474         u64 new_valid;
475
476         if (!S_ISREG(inode->i_mode))
477                 return 0;
478
479         if (is_compressed(ni)) {
480                 if (ni->i_valid > new_size)
481                         ni->i_valid = new_size;
482         } else {
483                 err = block_truncate_page(inode->i_mapping, new_size,
484                                           ntfs_get_block);
485                 if (err)
486                         return err;
487         }
488
489         new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
490
491         ni_lock(ni);
492
493         truncate_setsize(inode, new_size);
494
495         down_write(&ni->file.run_lock);
496         err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
497                             &new_valid, true, NULL);
498         up_write(&ni->file.run_lock);
499
500         if (new_valid < ni->i_valid)
501                 ni->i_valid = new_valid;
502
503         ni_unlock(ni);
504
505         ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
506         inode->i_ctime = inode->i_mtime = current_time(inode);
507         if (!IS_DIRSYNC(inode)) {
508                 dirty = 1;
509         } else {
510                 err = ntfs_sync_inode(inode);
511                 if (err)
512                         return err;
513         }
514
515         if (dirty)
516                 mark_inode_dirty(inode);
517
518         /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
519
520         return 0;
521 }
522
523 /*
524  * ntfs_fallocate
525  *
526  * Preallocate space for a file. This implements ntfs's fallocate file
527  * operation, which gets called from sys_fallocate system call. User
528  * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
529  * we just allocate clusters without zeroing them out. Otherwise we
530  * allocate and zero out clusters via an expanding truncate.
531  */
532 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
533 {
534         struct inode *inode = file->f_mapping->host;
535         struct super_block *sb = inode->i_sb;
536         struct ntfs_sb_info *sbi = sb->s_fs_info;
537         struct ntfs_inode *ni = ntfs_i(inode);
538         loff_t end = vbo + len;
539         loff_t vbo_down = round_down(vbo, PAGE_SIZE);
540         loff_t i_size;
541         int err;
542
543         /* No support for dir. */
544         if (!S_ISREG(inode->i_mode))
545                 return -EOPNOTSUPP;
546
547         /* Return error if mode is not supported. */
548         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
549                      FALLOC_FL_COLLAPSE_RANGE)) {
550                 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
551                                 mode);
552                 return -EOPNOTSUPP;
553         }
554
555         ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
556
557         inode_lock(inode);
558         i_size = inode->i_size;
559
560         if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
561                 /* Should never be here, see ntfs_file_open. */
562                 err = -EOPNOTSUPP;
563                 goto out;
564         }
565
566         if (mode & FALLOC_FL_PUNCH_HOLE) {
567                 u32 frame_size;
568                 loff_t mask, vbo_a, end_a, tmp;
569
570                 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
571                         err = -EINVAL;
572                         goto out;
573                 }
574
575                 err = filemap_write_and_wait_range(inode->i_mapping, vbo,
576                                                    end - 1);
577                 if (err)
578                         goto out;
579
580                 err = filemap_write_and_wait_range(inode->i_mapping, end,
581                                                    LLONG_MAX);
582                 if (err)
583                         goto out;
584
585                 inode_dio_wait(inode);
586
587                 truncate_pagecache(inode, vbo_down);
588
589                 if (!is_sparsed(ni) && !is_compressed(ni)) {
590                         /* Normal file. */
591                         err = ntfs_zero_range(inode, vbo, end);
592                         goto out;
593                 }
594
595                 ni_lock(ni);
596                 err = attr_punch_hole(ni, vbo, len, &frame_size);
597                 ni_unlock(ni);
598                 if (err != E_NTFS_NOTALIGNED)
599                         goto out;
600
601                 /* Process not aligned punch. */
602                 mask = frame_size - 1;
603                 vbo_a = (vbo + mask) & ~mask;
604                 end_a = end & ~mask;
605
606                 tmp = min(vbo_a, end);
607                 if (tmp > vbo) {
608                         err = ntfs_zero_range(inode, vbo, tmp);
609                         if (err)
610                                 goto out;
611                 }
612
613                 if (vbo < end_a && end_a < end) {
614                         err = ntfs_zero_range(inode, end_a, end);
615                         if (err)
616                                 goto out;
617                 }
618
619                 /* Aligned punch_hole */
620                 if (end_a > vbo_a) {
621                         ni_lock(ni);
622                         err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
623                         ni_unlock(ni);
624                 }
625         } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
626                 if (mode & ~FALLOC_FL_COLLAPSE_RANGE) {
627                         err = -EINVAL;
628                         goto out;
629                 }
630
631                 /*
632                  * Write tail of the last page before removed range since
633                  * it will get removed from the page cache below.
634                  */
635                 err = filemap_write_and_wait_range(inode->i_mapping, vbo_down,
636                                                    vbo);
637                 if (err)
638                         goto out;
639
640                 /*
641                  * Write data that will be shifted to preserve them
642                  * when discarding page cache below.
643                  */
644                 err = filemap_write_and_wait_range(inode->i_mapping, end,
645                                                    LLONG_MAX);
646                 if (err)
647                         goto out;
648
649                 /* Wait for existing dio to complete. */
650                 inode_dio_wait(inode);
651
652                 truncate_pagecache(inode, vbo_down);
653
654                 ni_lock(ni);
655                 err = attr_collapse_range(ni, vbo, len);
656                 ni_unlock(ni);
657         } else {
658                 /*
659                  * Normal file: Allocate clusters, do not change 'valid' size.
660                  */
661                 err = ntfs_set_size(inode, max(end, i_size));
662                 if (err)
663                         goto out;
664
665                 if (is_sparsed(ni) || is_compressed(ni)) {
666                         CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
667                         CLST vcn = vbo >> sbi->cluster_bits;
668                         CLST cend = bytes_to_cluster(sbi, end);
669                         CLST lcn, clen;
670                         bool new;
671
672                         /*
673                          * Allocate but do not zero new clusters. (see below comments)
674                          * This breaks security: One can read unused on-disk areas.
675                          * Zeroing these clusters may be too long.
676                          * Maybe we should check here for root rights?
677                          */
678                         for (; vcn < cend; vcn += clen) {
679                                 err = attr_data_get_block(ni, vcn, cend - vcn,
680                                                           &lcn, &clen, &new);
681                                 if (err)
682                                         goto out;
683                                 if (!new || vcn >= vcn_v)
684                                         continue;
685
686                                 /*
687                                  * Unwritten area.
688                                  * NTFS is not able to store several unwritten areas.
689                                  * Activate 'ntfs_sparse_cluster' to zero new allocated clusters.
690                                  *
691                                  * Dangerous in case:
692                                  * 1G of sparsed clusters + 1 cluster of data =>
693                                  * valid_size == 1G + 1 cluster
694                                  * fallocate(1G) will zero 1G and this can be very long
695                                  * xfstest 016/086 will fail without 'ntfs_sparse_cluster'.
696                                  */
697                                 ntfs_sparse_cluster(inode, NULL, vcn,
698                                                     min(vcn_v - vcn, clen));
699                         }
700                 }
701
702                 if (mode & FALLOC_FL_KEEP_SIZE) {
703                         ni_lock(ni);
704                         /* True - Keep preallocated. */
705                         err = attr_set_size(ni, ATTR_DATA, NULL, 0,
706                                             &ni->file.run, i_size, &ni->i_valid,
707                                             true, NULL);
708                         ni_unlock(ni);
709                 }
710         }
711
712 out:
713         if (err == -EFBIG)
714                 err = -ENOSPC;
715
716         if (!err) {
717                 inode->i_ctime = inode->i_mtime = current_time(inode);
718                 mark_inode_dirty(inode);
719         }
720
721         inode_unlock(inode);
722         return err;
723 }
724
725 /*
726  * ntfs3_setattr - inode_operations::setattr
727  */
728 int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
729                   struct iattr *attr)
730 {
731         struct super_block *sb = dentry->d_sb;
732         struct ntfs_sb_info *sbi = sb->s_fs_info;
733         struct inode *inode = d_inode(dentry);
734         struct ntfs_inode *ni = ntfs_i(inode);
735         u32 ia_valid = attr->ia_valid;
736         umode_t mode = inode->i_mode;
737         int err;
738
739         if (sbi->options->noacsrules) {
740                 /* "No access rules" - Force any changes of time etc. */
741                 attr->ia_valid |= ATTR_FORCE;
742                 /* and disable for editing some attributes. */
743                 attr->ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE);
744                 ia_valid = attr->ia_valid;
745         }
746
747         err = setattr_prepare(mnt_userns, dentry, attr);
748         if (err)
749                 goto out;
750
751         if (ia_valid & ATTR_SIZE) {
752                 loff_t oldsize = inode->i_size;
753
754                 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
755                         /* Should never be here, see ntfs_file_open(). */
756                         err = -EOPNOTSUPP;
757                         goto out;
758                 }
759                 inode_dio_wait(inode);
760
761                 if (attr->ia_size < oldsize)
762                         err = ntfs_truncate(inode, attr->ia_size);
763                 else if (attr->ia_size > oldsize)
764                         err = ntfs_extend(inode, attr->ia_size, 0, NULL);
765
766                 if (err)
767                         goto out;
768
769                 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
770         }
771
772         setattr_copy(mnt_userns, inode, attr);
773
774         if (mode != inode->i_mode) {
775                 err = ntfs_acl_chmod(mnt_userns, inode);
776                 if (err)
777                         goto out;
778
779                 /* Linux 'w' -> Windows 'ro'. */
780                 if (0222 & inode->i_mode)
781                         ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
782                 else
783                         ni->std_fa |= FILE_ATTRIBUTE_READONLY;
784         }
785
786         if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE))
787                 ntfs_save_wsl_perm(inode);
788         mark_inode_dirty(inode);
789 out:
790         return err;
791 }
792
793 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
794 {
795         struct file *file = iocb->ki_filp;
796         struct inode *inode = file->f_mapping->host;
797         struct ntfs_inode *ni = ntfs_i(inode);
798
799         if (is_encrypted(ni)) {
800                 ntfs_inode_warn(inode, "encrypted i/o not supported");
801                 return -EOPNOTSUPP;
802         }
803
804         if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
805                 ntfs_inode_warn(inode, "direct i/o + compressed not supported");
806                 return -EOPNOTSUPP;
807         }
808
809 #ifndef CONFIG_NTFS3_LZX_XPRESS
810         if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
811                 ntfs_inode_warn(
812                         inode,
813                         "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
814                 return -EOPNOTSUPP;
815         }
816 #endif
817
818         if (is_dedup(ni)) {
819                 ntfs_inode_warn(inode, "read deduplicated not supported");
820                 return -EOPNOTSUPP;
821         }
822
823         return generic_file_read_iter(iocb, iter);
824 }
825
826 /*
827  * ntfs_get_frame_pages
828  *
829  * Return: Array of locked pages.
830  */
831 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
832                                 struct page **pages, u32 pages_per_frame,
833                                 bool *frame_uptodate)
834 {
835         gfp_t gfp_mask = mapping_gfp_mask(mapping);
836         u32 npages;
837
838         *frame_uptodate = true;
839
840         for (npages = 0; npages < pages_per_frame; npages++, index++) {
841                 struct page *page;
842
843                 page = find_or_create_page(mapping, index, gfp_mask);
844                 if (!page) {
845                         while (npages--) {
846                                 page = pages[npages];
847                                 unlock_page(page);
848                                 put_page(page);
849                         }
850
851                         return -ENOMEM;
852                 }
853
854                 if (!PageUptodate(page))
855                         *frame_uptodate = false;
856
857                 pages[npages] = page;
858         }
859
860         return 0;
861 }
862
863 /*
864  * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files).
865  */
866 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
867 {
868         int err;
869         struct file *file = iocb->ki_filp;
870         size_t count = iov_iter_count(from);
871         loff_t pos = iocb->ki_pos;
872         struct inode *inode = file_inode(file);
873         loff_t i_size = inode->i_size;
874         struct address_space *mapping = inode->i_mapping;
875         struct ntfs_inode *ni = ntfs_i(inode);
876         u64 valid = ni->i_valid;
877         struct ntfs_sb_info *sbi = ni->mi.sbi;
878         struct page *page, **pages = NULL;
879         size_t written = 0;
880         u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
881         u32 frame_size = 1u << frame_bits;
882         u32 pages_per_frame = frame_size >> PAGE_SHIFT;
883         u32 ip, off;
884         CLST frame;
885         u64 frame_vbo;
886         pgoff_t index;
887         bool frame_uptodate;
888
889         if (frame_size < PAGE_SIZE) {
890                 /*
891                  * frame_size == 8K if cluster 512
892                  * frame_size == 64K if cluster 4096
893                  */
894                 ntfs_inode_warn(inode, "page size is bigger than frame size");
895                 return -EOPNOTSUPP;
896         }
897
898         pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS);
899         if (!pages)
900                 return -ENOMEM;
901
902         current->backing_dev_info = inode_to_bdi(inode);
903         err = file_remove_privs(file);
904         if (err)
905                 goto out;
906
907         err = file_update_time(file);
908         if (err)
909                 goto out;
910
911         /* Zero range [valid : pos). */
912         while (valid < pos) {
913                 CLST lcn, clen;
914
915                 frame = valid >> frame_bits;
916                 frame_vbo = valid & ~(frame_size - 1);
917                 off = valid & (frame_size - 1);
918
919                 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn,
920                                           &clen, NULL);
921                 if (err)
922                         goto out;
923
924                 if (lcn == SPARSE_LCN) {
925                         ni->i_valid = valid =
926                                 frame_vbo + ((u64)clen << sbi->cluster_bits);
927                         continue;
928                 }
929
930                 /* Load full frame. */
931                 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
932                                            pages, pages_per_frame,
933                                            &frame_uptodate);
934                 if (err)
935                         goto out;
936
937                 if (!frame_uptodate && off) {
938                         err = ni_read_frame(ni, frame_vbo, pages,
939                                             pages_per_frame);
940                         if (err) {
941                                 for (ip = 0; ip < pages_per_frame; ip++) {
942                                         page = pages[ip];
943                                         unlock_page(page);
944                                         put_page(page);
945                                 }
946                                 goto out;
947                         }
948                 }
949
950                 ip = off >> PAGE_SHIFT;
951                 off = offset_in_page(valid);
952                 for (; ip < pages_per_frame; ip++, off = 0) {
953                         page = pages[ip];
954                         zero_user_segment(page, off, PAGE_SIZE);
955                         flush_dcache_page(page);
956                         SetPageUptodate(page);
957                 }
958
959                 ni_lock(ni);
960                 err = ni_write_frame(ni, pages, pages_per_frame);
961                 ni_unlock(ni);
962
963                 for (ip = 0; ip < pages_per_frame; ip++) {
964                         page = pages[ip];
965                         SetPageUptodate(page);
966                         unlock_page(page);
967                         put_page(page);
968                 }
969
970                 if (err)
971                         goto out;
972
973                 ni->i_valid = valid = frame_vbo + frame_size;
974         }
975
976         /* Copy user data [pos : pos + count). */
977         while (count) {
978                 size_t copied, bytes;
979
980                 off = pos & (frame_size - 1);
981                 bytes = frame_size - off;
982                 if (bytes > count)
983                         bytes = count;
984
985                 frame = pos >> frame_bits;
986                 frame_vbo = pos & ~(frame_size - 1);
987                 index = frame_vbo >> PAGE_SHIFT;
988
989                 if (unlikely(iov_iter_fault_in_readable(from, bytes))) {
990                         err = -EFAULT;
991                         goto out;
992                 }
993
994                 /* Load full frame. */
995                 err = ntfs_get_frame_pages(mapping, index, pages,
996                                            pages_per_frame, &frame_uptodate);
997                 if (err)
998                         goto out;
999
1000                 if (!frame_uptodate) {
1001                         loff_t to = pos + bytes;
1002
1003                         if (off || (to < i_size && (to & (frame_size - 1)))) {
1004                                 err = ni_read_frame(ni, frame_vbo, pages,
1005                                                     pages_per_frame);
1006                                 if (err) {
1007                                         for (ip = 0; ip < pages_per_frame;
1008                                              ip++) {
1009                                                 page = pages[ip];
1010                                                 unlock_page(page);
1011                                                 put_page(page);
1012                                         }
1013                                         goto out;
1014                                 }
1015                         }
1016                 }
1017
1018                 WARN_ON(!bytes);
1019                 copied = 0;
1020                 ip = off >> PAGE_SHIFT;
1021                 off = offset_in_page(pos);
1022
1023                 /* Copy user data to pages. */
1024                 for (;;) {
1025                         size_t cp, tail = PAGE_SIZE - off;
1026
1027                         page = pages[ip];
1028                         cp = copy_page_from_iter_atomic(page, off,
1029                                                         min(tail, bytes), from);
1030                         flush_dcache_page(page);
1031
1032                         copied += cp;
1033                         bytes -= cp;
1034                         if (!bytes || !cp)
1035                                 break;
1036
1037                         if (cp < tail) {
1038                                 off += cp;
1039                         } else {
1040                                 ip++;
1041                                 off = 0;
1042                         }
1043                 }
1044
1045                 ni_lock(ni);
1046                 err = ni_write_frame(ni, pages, pages_per_frame);
1047                 ni_unlock(ni);
1048
1049                 for (ip = 0; ip < pages_per_frame; ip++) {
1050                         page = pages[ip];
1051                         ClearPageDirty(page);
1052                         SetPageUptodate(page);
1053                         unlock_page(page);
1054                         put_page(page);
1055                 }
1056
1057                 if (err)
1058                         goto out;
1059
1060                 /*
1061                  * We can loop for a long time in here. Be nice and allow
1062                  * us to schedule out to avoid softlocking if preempt
1063                  * is disabled.
1064                  */
1065                 cond_resched();
1066
1067                 pos += copied;
1068                 written += copied;
1069
1070                 count = iov_iter_count(from);
1071         }
1072
1073 out:
1074         kfree(pages);
1075
1076         current->backing_dev_info = NULL;
1077
1078         if (err < 0)
1079                 return err;
1080
1081         iocb->ki_pos += written;
1082         if (iocb->ki_pos > ni->i_valid)
1083                 ni->i_valid = iocb->ki_pos;
1084
1085         return written;
1086 }
1087
1088 /*
1089  * ntfs_file_write_iter - file_operations::write_iter
1090  */
1091 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1092 {
1093         struct file *file = iocb->ki_filp;
1094         struct address_space *mapping = file->f_mapping;
1095         struct inode *inode = mapping->host;
1096         ssize_t ret;
1097         struct ntfs_inode *ni = ntfs_i(inode);
1098
1099         if (is_encrypted(ni)) {
1100                 ntfs_inode_warn(inode, "encrypted i/o not supported");
1101                 return -EOPNOTSUPP;
1102         }
1103
1104         if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
1105                 ntfs_inode_warn(inode, "direct i/o + compressed not supported");
1106                 return -EOPNOTSUPP;
1107         }
1108
1109         if (is_dedup(ni)) {
1110                 ntfs_inode_warn(inode, "write into deduplicated not supported");
1111                 return -EOPNOTSUPP;
1112         }
1113
1114         if (!inode_trylock(inode)) {
1115                 if (iocb->ki_flags & IOCB_NOWAIT)
1116                         return -EAGAIN;
1117                 inode_lock(inode);
1118         }
1119
1120         ret = generic_write_checks(iocb, from);
1121         if (ret <= 0)
1122                 goto out;
1123
1124         if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
1125                 /* Should never be here, see ntfs_file_open(). */
1126                 ret = -EOPNOTSUPP;
1127                 goto out;
1128         }
1129
1130         ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
1131         if (ret)
1132                 goto out;
1133
1134         ret = is_compressed(ni) ? ntfs_compress_write(iocb, from)
1135                                 : __generic_file_write_iter(iocb, from);
1136
1137 out:
1138         inode_unlock(inode);
1139
1140         if (ret > 0)
1141                 ret = generic_write_sync(iocb, ret);
1142
1143         return ret;
1144 }
1145
1146 /*
1147  * ntfs_file_open - file_operations::open
1148  */
1149 int ntfs_file_open(struct inode *inode, struct file *file)
1150 {
1151         struct ntfs_inode *ni = ntfs_i(inode);
1152
1153         if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
1154                      (file->f_flags & O_DIRECT))) {
1155                 return -EOPNOTSUPP;
1156         }
1157
1158         /* Decompress "external compressed" file if opened for rw. */
1159         if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
1160             (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
1161 #ifdef CONFIG_NTFS3_LZX_XPRESS
1162                 int err = ni_decompress_file(ni);
1163
1164                 if (err)
1165                         return err;
1166 #else
1167                 ntfs_inode_warn(
1168                         inode,
1169                         "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
1170                 return -EOPNOTSUPP;
1171 #endif
1172         }
1173
1174         return generic_file_open(inode, file);
1175 }
1176
1177 /*
1178  * ntfs_file_release - file_operations::release
1179  */
1180 static int ntfs_file_release(struct inode *inode, struct file *file)
1181 {
1182         struct ntfs_inode *ni = ntfs_i(inode);
1183         struct ntfs_sb_info *sbi = ni->mi.sbi;
1184         int err = 0;
1185
1186         /* If we are last writer on the inode, drop the block reservation. */
1187         if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) &&
1188                                       atomic_read(&inode->i_writecount) == 1)) {
1189                 ni_lock(ni);
1190                 down_write(&ni->file.run_lock);
1191
1192                 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
1193                                     inode->i_size, &ni->i_valid, false, NULL);
1194
1195                 up_write(&ni->file.run_lock);
1196                 ni_unlock(ni);
1197         }
1198         return err;
1199 }
1200
1201 /*
1202  * ntfs_fiemap - file_operations::fiemap
1203  */
1204 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1205                 __u64 start, __u64 len)
1206 {
1207         int err;
1208         struct ntfs_inode *ni = ntfs_i(inode);
1209
1210         err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR);
1211         if (err)
1212                 return err;
1213
1214         ni_lock(ni);
1215
1216         err = ni_fiemap(ni, fieinfo, start, len);
1217
1218         ni_unlock(ni);
1219
1220         return err;
1221 }
1222
1223 // clang-format off
1224 const struct inode_operations ntfs_file_inode_operations = {
1225         .getattr        = ntfs_getattr,
1226         .setattr        = ntfs3_setattr,
1227         .listxattr      = ntfs_listxattr,
1228         .permission     = ntfs_permission,
1229         .get_acl        = ntfs_get_acl,
1230         .set_acl        = ntfs_set_acl,
1231         .fiemap         = ntfs_fiemap,
1232 };
1233
1234 const struct file_operations ntfs_file_operations = {
1235         .llseek         = generic_file_llseek,
1236         .read_iter      = ntfs_file_read_iter,
1237         .write_iter     = ntfs_file_write_iter,
1238         .unlocked_ioctl = ntfs_ioctl,
1239 #ifdef CONFIG_COMPAT
1240         .compat_ioctl   = ntfs_compat_ioctl,
1241 #endif
1242         .splice_read    = generic_file_splice_read,
1243         .mmap           = ntfs_file_mmap,
1244         .open           = ntfs_file_open,
1245         .fsync          = generic_file_fsync,
1246         .splice_write   = iter_file_splice_write,
1247         .fallocate      = ntfs_fallocate,
1248         .release        = ntfs_file_release,
1249 };
1250 // clang-format on