1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
12 #include <linux/hash.h>
13 #include <linux/nls.h>
14 #include <linux/ratelimit.h>
15 #include <linux/slab.h>
22 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
23 * preallocate algorithm.
25 #ifndef NTFS_MIN_LOG2_OF_CLUMP
26 #define NTFS_MIN_LOG2_OF_CLUMP 16
29 #ifndef NTFS_MAX_LOG2_OF_CLUMP
30 #define NTFS_MAX_LOG2_OF_CLUMP 26
34 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
36 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
38 static inline u64 get_pre_allocated(u64 size)
44 if (size <= NTFS_CLUMP_MIN) {
45 clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
46 align_shift = NTFS_MIN_LOG2_OF_CLUMP;
47 } else if (size >= NTFS_CLUMP_MAX) {
48 clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
49 align_shift = NTFS_MAX_LOG2_OF_CLUMP;
51 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
52 __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
53 clump = 1u << align_shift;
56 ret = (((size + clump - 1) >> align_shift)) << align_shift;
62 * attr_must_be_resident
64 * Return: True if attribute must be resident.
66 static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
69 const struct ATTR_DEF_ENTRY *de;
81 de = ntfs_query_def(sbi, type);
82 if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
89 * attr_load_runs - Load all runs stored in @attr.
91 int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
92 struct runs_tree *run, const CLST *vcn)
95 CLST svcn = le64_to_cpu(attr->nres.svcn);
96 CLST evcn = le64_to_cpu(attr->nres.evcn);
100 if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
103 if (vcn && (evcn < *vcn || *vcn < svcn))
106 asize = le32_to_cpu(attr->size);
107 run_off = le16_to_cpu(attr->nres.run_off);
108 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
109 vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
118 * run_deallocate_ex - Deallocate clusters.
120 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
121 CLST vcn, CLST len, CLST *done, bool trim)
124 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
130 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
132 run_truncate(run, vcn0);
146 if (lcn != SPARSE_LCN) {
147 mark_as_free_ex(sbi, lcn, clen, trim);
155 vcn_next = vcn + clen;
156 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
158 /* Save memory - don't load entire run. */
171 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
173 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
174 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
175 enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
179 CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
180 struct wnd_bitmap *wnd = &sbi->used.bitmap;
181 size_t cnt = run->count;
184 err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
187 if (err == -ENOSPC && pre) {
197 if (new_lcn && vcn == vcn0)
200 /* Add new fragment into run storage. */
201 if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
202 /* Undo last 'ntfs_look_for_free_space' */
203 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
204 wnd_set_free(wnd, lcn, flen);
205 up_write(&wnd->rw_lock);
212 if (flen >= len || opt == ALLOCATE_MFT ||
213 (fr && run->count - cnt >= fr)) {
222 /* Undo 'ntfs_look_for_free_space' */
224 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
225 run_truncate(run, vcn0);
232 * attr_make_nonresident
234 * If page is not NULL - it is already contains resident data
235 * and locked (called from ni_write_frame()).
237 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
238 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
239 u64 new_size, struct runs_tree *run,
240 struct ATTRIB **ins_attr, struct page *page)
242 struct ntfs_sb_info *sbi;
243 struct ATTRIB *attr_s;
245 u32 used, asize, rsize, aoff, align;
259 used = le32_to_cpu(rec->used);
260 asize = le32_to_cpu(attr->size);
261 next = Add2Ptr(attr, asize);
262 aoff = PtrOffset(rec, attr);
263 rsize = le32_to_cpu(attr->res.data_size);
264 is_data = attr->type == ATTR_DATA && !attr->name_len;
266 align = sbi->cluster_size;
267 if (is_attr_compressed(attr))
268 align <<= COMPRESSION_UNIT;
269 len = (rsize + align - 1) >> sbi->cluster_bits;
273 /* Make a copy of original attribute. */
274 attr_s = kmemdup(attr, asize, GFP_NOFS);
281 /* Empty resident -> Empty nonresident. */
284 const char *data = resident_data(attr);
286 err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
287 ALLOCATE_DEF, &alen, 0, NULL);
292 /* Empty resident -> Non empty nonresident. */
293 } else if (!is_data) {
294 err = ntfs_sb_write_run(sbi, run, 0, data, rsize);
300 page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
305 kaddr = kmap_atomic(page);
306 memcpy(kaddr, data, rsize);
307 memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
308 kunmap_atomic(kaddr);
309 flush_dcache_page(page);
310 SetPageUptodate(page);
311 set_page_dirty(page);
317 /* Remove original attribute. */
319 memmove(attr, Add2Ptr(attr, asize), used - aoff);
320 rec->used = cpu_to_le32(used);
323 al_remove_le(ni, le);
325 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
326 attr_s->name_len, run, 0, alen,
327 attr_s->flags, &attr, NULL);
332 attr->nres.data_size = cpu_to_le64(rsize);
333 attr->nres.valid_size = attr->nres.data_size;
338 ni->ni_flags &= ~NI_FLAG_RESIDENT;
340 /* Resident attribute becomes non resident. */
344 attr = Add2Ptr(rec, aoff);
345 memmove(next, attr, used - aoff);
346 memcpy(attr, attr_s, asize);
347 rec->used = cpu_to_le32(used + asize);
350 /* Undo: do not trim new allocated clusters. */
351 run_deallocate(sbi, run, false);
360 * attr_set_size_res - Helper for attr_set_size().
362 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
363 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
364 u64 new_size, struct runs_tree *run,
365 struct ATTRIB **ins_attr)
367 struct ntfs_sb_info *sbi = mi->sbi;
368 struct MFT_REC *rec = mi->mrec;
369 u32 used = le32_to_cpu(rec->used);
370 u32 asize = le32_to_cpu(attr->size);
371 u32 aoff = PtrOffset(rec, attr);
372 u32 rsize = le32_to_cpu(attr->res.data_size);
373 u32 tail = used - aoff - asize;
374 char *next = Add2Ptr(attr, asize);
375 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
378 memmove(next + dsize, next, tail);
379 } else if (dsize > 0) {
380 if (used + dsize > sbi->max_bytes_per_attr)
381 return attr_make_nonresident(ni, attr, le, mi, new_size,
382 run, ins_attr, NULL);
384 memmove(next + dsize, next, tail);
385 memset(next, 0, dsize);
388 if (new_size > rsize)
389 memset(Add2Ptr(resident_data(attr), rsize), 0,
392 rec->used = cpu_to_le32(used + dsize);
393 attr->size = cpu_to_le32(asize + dsize);
394 attr->res.data_size = cpu_to_le32(new_size);
402 * attr_set_size - Change the size of attribute.
405 * - Sparse/compressed: No allocated clusters.
406 * - Normal: Append allocated and preallocated new clusters.
408 * - No deallocate if @keep_prealloc is set.
410 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
411 const __le16 *name, u8 name_len, struct runs_tree *run,
412 u64 new_size, const u64 *new_valid, bool keep_prealloc,
416 struct ntfs_sb_info *sbi = ni->mi.sbi;
417 u8 cluster_bits = sbi->cluster_bits;
419 ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
420 u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
421 struct ATTRIB *attr = NULL, *attr_b;
422 struct ATTR_LIST_ENTRY *le, *le_b;
423 struct mft_inode *mi, *mi_b;
424 CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
425 CLST next_svcn, pre_alloc = -1, done = 0;
432 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
439 if (!attr_b->non_res) {
440 err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
442 if (err || !attr_b->non_res)
445 /* Layout of records may be changed, so do a full search. */
449 is_ext = is_attr_ext(attr_b);
452 align = sbi->cluster_size;
455 align <<= attr_b->nres.c_unit;
456 if (is_attr_sparsed(attr_b))
457 keep_prealloc = false;
460 old_valid = le64_to_cpu(attr_b->nres.valid_size);
461 old_size = le64_to_cpu(attr_b->nres.data_size);
462 old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
463 old_alen = old_alloc >> cluster_bits;
465 new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
466 new_alen = new_alloc >> cluster_bits;
468 if (keep_prealloc && is_ext)
469 keep_prealloc = false;
471 if (keep_prealloc && new_size < old_size) {
472 attr_b->nres.data_size = cpu_to_le64(new_size);
479 svcn = le64_to_cpu(attr_b->nres.svcn);
480 evcn = le64_to_cpu(attr_b->nres.evcn);
482 if (svcn <= vcn && vcn <= evcn) {
491 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
499 svcn = le64_to_cpu(attr->nres.svcn);
500 evcn = le64_to_cpu(attr->nres.evcn);
506 err = attr_load_runs(attr, ni, run, NULL);
510 if (new_size > old_size) {
514 if (new_alloc <= old_alloc) {
515 attr_b->nres.data_size = cpu_to_le64(new_size);
520 to_allocate = new_alen - old_alen;
521 add_alloc_in_same_attr_seg:
524 /* MFT allocates clusters from MFT zone. */
527 /* No preallocate for sparse/compress. */
529 } else if (pre_alloc == -1) {
531 if (type == ATTR_DATA && !name_len &&
532 sbi->options.prealloc) {
533 CLST new_alen2 = bytes_to_cluster(
534 sbi, get_pre_allocated(new_size));
535 pre_alloc = new_alen2 - new_alen;
538 /* Get the last LCN to allocate from. */
540 !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
544 if (lcn == SPARSE_LCN)
549 free = wnd_zeroes(&sbi->used.bitmap);
550 if (to_allocate > free) {
555 if (pre_alloc && to_allocate + pre_alloc > free)
562 if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
569 /* ~3 bytes per fragment. */
570 err = attr_allocate_clusters(
571 sbi, run, vcn, lcn, to_allocate, &pre_alloc,
572 is_mft ? ALLOCATE_MFT : 0, &alen,
574 : (sbi->record_size -
575 le32_to_cpu(rec->used) + 8) /
585 if (to_allocate > alen)
591 err = mi_pack_runs(mi, attr, run, vcn - svcn);
595 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
596 new_alloc_tmp = (u64)next_svcn << cluster_bits;
597 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
600 if (next_svcn >= vcn && !to_allocate) {
601 /* Normal way. Update attribute and exit. */
602 attr_b->nres.data_size = cpu_to_le64(new_size);
606 /* At least two MFT to avoid recursive loop. */
607 if (is_mft && next_svcn == vcn &&
608 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
609 new_size = new_alloc_tmp;
610 attr_b->nres.data_size = attr_b->nres.alloc_size;
614 if (le32_to_cpu(rec->used) < sbi->record_size) {
615 old_alen = next_svcn;
617 goto add_alloc_in_same_attr_seg;
620 attr_b->nres.data_size = attr_b->nres.alloc_size;
621 if (new_alloc_tmp < old_valid)
622 attr_b->nres.valid_size = attr_b->nres.data_size;
624 if (type == ATTR_LIST) {
625 err = ni_expand_list(ni);
631 /* Layout of records is changed. */
635 if (!ni->attr_list.size) {
636 err = ni_create_attr_list(ni);
639 /* Layout of records is changed. */
642 if (next_svcn >= vcn) {
643 /* This is MFT data, repeat. */
647 /* Insert new attribute segment. */
648 err = ni_insert_nonresident(ni, type, name, name_len, run,
649 next_svcn, vcn - next_svcn,
650 attr_b->flags, &attr, &mi);
655 run_truncate_head(run, evcn + 1);
657 svcn = le64_to_cpu(attr->nres.svcn);
658 evcn = le64_to_cpu(attr->nres.evcn);
662 * Layout of records maybe changed.
663 * Find base attribute to update.
665 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
672 attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
673 attr_b->nres.data_size = attr_b->nres.alloc_size;
674 attr_b->nres.valid_size = attr_b->nres.alloc_size;
679 if (new_size != old_size ||
680 (new_alloc != old_alloc && !keep_prealloc)) {
681 vcn = max(svcn, new_alen);
682 new_alloc_tmp = (u64)vcn << cluster_bits;
685 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
690 run_truncate(run, vcn);
693 err = mi_pack_runs(mi, attr, run, vcn - svcn);
696 } else if (le && le->vcn) {
697 u16 le_sz = le16_to_cpu(le->size);
700 * NOTE: List entries for one attribute are always
701 * the same size. We deal with last entry (vcn==0)
702 * and it is not first in entries array
703 * (list entry for std attribute always first).
704 * So it is safe to step back.
706 mi_remove_attr(NULL, mi, attr);
708 if (!al_remove_le(ni, le)) {
713 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
715 attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
719 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
721 if (vcn == new_alen) {
722 attr_b->nres.data_size = cpu_to_le64(new_size);
723 if (new_size < old_valid)
724 attr_b->nres.valid_size =
725 attr_b->nres.data_size;
728 le64_to_cpu(attr_b->nres.data_size))
729 attr_b->nres.data_size =
730 attr_b->nres.alloc_size;
732 le64_to_cpu(attr_b->nres.valid_size))
733 attr_b->nres.valid_size =
734 attr_b->nres.alloc_size;
738 le64_sub_cpu(&attr_b->nres.total_size,
739 ((u64)alen << cluster_bits));
743 if (new_alloc_tmp <= new_alloc)
746 old_size = new_alloc_tmp;
757 if (le->type != type || le->name_len != name_len ||
758 memcmp(le_name(le), name, name_len * sizeof(short))) {
763 err = ni_load_mi(ni, le, &mi);
767 attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
777 __le64 valid = cpu_to_le64(min(*new_valid, new_size));
779 if (attr_b->nres.valid_size != valid) {
780 attr_b->nres.valid_size = valid;
786 if (!err && attr_b && ret)
789 /* Update inode_set_bytes. */
790 if (!err && ((type == ATTR_DATA && !name_len) ||
791 (type == ATTR_ALLOC && name == I30_NAME))) {
794 if (ni->vfs_inode.i_size != new_size) {
795 ni->vfs_inode.i_size = new_size;
799 if (attr_b && attr_b->non_res) {
800 new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
801 if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
802 inode_set_bytes(&ni->vfs_inode, new_alloc);
808 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
809 mark_inode_dirty(&ni->vfs_inode);
816 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
817 CLST *len, bool *new)
820 struct runs_tree *run = &ni->file.run;
821 struct ntfs_sb_info *sbi;
823 struct ATTRIB *attr = NULL, *attr_b;
824 struct ATTR_LIST_ENTRY *le, *le_b;
825 struct mft_inode *mi, *mi_b;
826 CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
834 down_read(&ni->file.run_lock);
835 ok = run_lookup_entry(run, vcn, lcn, len, NULL);
836 up_read(&ni->file.run_lock);
838 if (ok && (*lcn != SPARSE_LCN || !new)) {
846 if (ok && clen > *len)
850 cluster_bits = sbi->cluster_bits;
853 down_write(&ni->file.run_lock);
856 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
862 if (!attr_b->non_res) {
868 asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
874 clst_per_frame = 1u << attr_b->nres.c_unit;
875 to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
877 if (vcn + to_alloc > asize)
878 to_alloc = asize - vcn;
880 svcn = le64_to_cpu(attr_b->nres.svcn);
881 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
887 if (le_b && (vcn < svcn || evcn1 <= vcn)) {
888 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
894 svcn = le64_to_cpu(attr->nres.svcn);
895 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
898 err = attr_load_runs(attr, ni, run, NULL);
903 ok = run_lookup_entry(run, vcn, lcn, len, NULL);
904 if (ok && (*lcn != SPARSE_LCN || !new)) {
916 if (ok && clen > *len) {
918 to_alloc = (clen + clst_per_frame - 1) &
919 ~(clst_per_frame - 1);
923 if (!is_attr_ext(attr_b)) {
928 /* Get the last LCN to allocate from. */
932 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
937 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
941 err = attr_allocate_clusters(
942 sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
943 (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
951 total_size = le64_to_cpu(attr_b->nres.total_size) +
952 ((u64)*len << cluster_bits);
955 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
959 attr_b->nres.total_size = cpu_to_le64(total_size);
960 inode_set_bytes(&ni->vfs_inode, total_size);
961 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
964 mark_inode_dirty(&ni->vfs_inode);
966 /* Stored [vcn : next_svcn) from [vcn : end). */
967 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
970 if (next_svcn == evcn1) {
971 /* Normal way. Update attribute and exit. */
974 /* Add new segment [next_svcn : evcn1 - next_svcn). */
975 if (!ni->attr_list.size) {
976 err = ni_create_attr_list(ni);
979 /* Layout of records is changed. */
981 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
997 /* Estimate next attribute. */
998 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1001 CLST alloc = bytes_to_cluster(
1002 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1003 CLST evcn = le64_to_cpu(attr->nres.evcn);
1005 if (end < next_svcn)
1007 while (end > evcn) {
1008 /* Remove segment [svcn : evcn). */
1009 mi_remove_attr(NULL, mi, attr);
1011 if (!al_remove_le(ni, le)) {
1016 if (evcn + 1 >= alloc) {
1017 /* Last attribute segment. */
1022 if (ni_load_mi(ni, le, &mi)) {
1027 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1033 svcn = le64_to_cpu(attr->nres.svcn);
1034 evcn = le64_to_cpu(attr->nres.evcn);
1040 err = attr_load_runs(attr, ni, run, &end);
1045 attr->nres.svcn = cpu_to_le64(next_svcn);
1046 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1050 le->vcn = cpu_to_le64(next_svcn);
1051 ni->attr_list.dirty = true;
1054 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1057 if (evcn1 > next_svcn) {
1058 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1059 next_svcn, evcn1 - next_svcn,
1060 attr_b->flags, &attr, &mi);
1065 run_truncate_around(run, vcn);
1067 up_write(&ni->file.run_lock);
1073 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1076 struct ATTRIB *attr;
1079 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1084 return E_NTFS_NONRESIDENT;
1086 vbo = page->index << PAGE_SHIFT;
1087 data_size = le32_to_cpu(attr->res.data_size);
1088 if (vbo < data_size) {
1089 const char *data = resident_data(attr);
1090 char *kaddr = kmap_atomic(page);
1091 u32 use = data_size - vbo;
1093 if (use > PAGE_SIZE)
1096 memcpy(kaddr, data + vbo, use);
1097 memset(kaddr + use, 0, PAGE_SIZE - use);
1098 kunmap_atomic(kaddr);
1099 flush_dcache_page(page);
1100 SetPageUptodate(page);
1101 } else if (!PageUptodate(page)) {
1102 zero_user_segment(page, 0, PAGE_SIZE);
1103 SetPageUptodate(page);
1109 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1112 struct mft_inode *mi;
1113 struct ATTRIB *attr;
1116 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1120 if (attr->non_res) {
1121 /* Return special error code to check this case. */
1122 return E_NTFS_NONRESIDENT;
1125 vbo = page->index << PAGE_SHIFT;
1126 data_size = le32_to_cpu(attr->res.data_size);
1127 if (vbo < data_size) {
1128 char *data = resident_data(attr);
1129 char *kaddr = kmap_atomic(page);
1130 u32 use = data_size - vbo;
1132 if (use > PAGE_SIZE)
1134 memcpy(data + vbo, kaddr, use);
1135 kunmap_atomic(kaddr);
1138 ni->i_valid = data_size;
1144 * attr_load_runs_vcn - Load runs with VCN.
1146 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1147 const __le16 *name, u8 name_len, struct runs_tree *run,
1150 struct ATTRIB *attr;
1155 attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1157 /* Is record corrupted? */
1161 svcn = le64_to_cpu(attr->nres.svcn);
1162 evcn = le64_to_cpu(attr->nres.evcn);
1164 if (evcn < vcn || vcn < svcn) {
1165 /* Is record corrupted? */
1169 ro = le16_to_cpu(attr->nres.run_off);
1170 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1171 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1178 * attr_load_runs_range - Load runs for given range [from to).
1180 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1181 const __le16 *name, u8 name_len, struct runs_tree *run,
1184 struct ntfs_sb_info *sbi = ni->mi.sbi;
1185 u8 cluster_bits = sbi->cluster_bits;
1186 CLST vcn = from >> cluster_bits;
1187 CLST vcn_last = (to - 1) >> cluster_bits;
1191 for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1192 if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1193 err = attr_load_runs_vcn(ni, type, name, name_len, run,
1197 clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1204 #ifdef CONFIG_NTFS3_LZX_XPRESS
1206 * attr_wof_frame_info
1208 * Read header of Xpress/LZX file to get info about frame.
1210 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1211 struct runs_tree *run, u64 frame, u64 frames,
1212 u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1214 struct ntfs_sb_info *sbi = ni->mi.sbi;
1215 u64 vbo[2], off[2], wof_size;
1224 if (ni->vfs_inode.i_size < 0x100000000ull) {
1225 /* File starts with array of 32 bit offsets. */
1226 bytes_per_off = sizeof(__le32);
1227 vbo[1] = frame << 2;
1228 *vbo_data = frames << 2;
1230 /* File starts with array of 64 bit offsets. */
1231 bytes_per_off = sizeof(__le64);
1232 vbo[1] = frame << 3;
1233 *vbo_data = frames << 3;
1237 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1238 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1240 if (!attr->non_res) {
1241 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1242 ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1245 addr = resident_data(attr);
1247 if (bytes_per_off == sizeof(__le32)) {
1248 off32 = Add2Ptr(addr, vbo[1]);
1249 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1250 off[1] = le32_to_cpu(off32[0]);
1252 off64 = Add2Ptr(addr, vbo[1]);
1253 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1254 off[1] = le64_to_cpu(off64[0]);
1257 *vbo_data += off[0];
1258 *ondisk_size = off[1] - off[0];
1262 wof_size = le64_to_cpu(attr->nres.data_size);
1263 down_write(&ni->file.run_lock);
1264 page = ni->file.offs_page;
1266 page = alloc_page(GFP_KERNEL);
1272 ni->file.offs_page = page;
1275 addr = page_address(page);
1278 voff = vbo[1] & (PAGE_SIZE - 1);
1279 vbo[0] = vbo[1] - bytes_per_off;
1289 pgoff_t index = vbo[i] >> PAGE_SHIFT;
1291 if (index != page->index) {
1292 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1293 u64 to = min(from + PAGE_SIZE, wof_size);
1295 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1296 ARRAY_SIZE(WOF_NAME), run,
1301 err = ntfs_bio_pages(sbi, run, &page, 1, from,
1302 to - from, REQ_OP_READ);
1307 page->index = index;
1311 if (bytes_per_off == sizeof(__le32)) {
1312 off32 = Add2Ptr(addr, voff);
1313 off[1] = le32_to_cpu(*off32);
1315 off64 = Add2Ptr(addr, voff);
1316 off[1] = le64_to_cpu(*off64);
1319 if (bytes_per_off == sizeof(__le32)) {
1320 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1321 off[0] = le32_to_cpu(*off32);
1323 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1324 off[0] = le64_to_cpu(*off64);
1327 /* Two values in one page. */
1328 if (bytes_per_off == sizeof(__le32)) {
1329 off32 = Add2Ptr(addr, voff);
1330 off[0] = le32_to_cpu(off32[-1]);
1331 off[1] = le32_to_cpu(off32[0]);
1333 off64 = Add2Ptr(addr, voff);
1334 off[0] = le64_to_cpu(off64[-1]);
1335 off[1] = le64_to_cpu(off64[0]);
1341 *vbo_data += off[0];
1342 *ondisk_size = off[1] - off[0];
1347 up_write(&ni->file.run_lock);
1353 * attr_is_frame_compressed - Used to detect compressed frame.
1355 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1356 CLST frame, CLST *clst_data)
1360 CLST clen, lcn, vcn, alen, slen, vcn_next;
1362 struct runs_tree *run;
1366 if (!is_attr_compressed(attr))
1372 clst_frame = 1u << attr->nres.c_unit;
1373 vcn = frame * clst_frame;
1374 run = &ni->file.run;
1376 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1377 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1378 attr->name_len, run, vcn);
1382 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1386 if (lcn == SPARSE_LCN) {
1387 /* Sparsed frame. */
1391 if (clen >= clst_frame) {
1393 * The frame is not compressed 'cause
1394 * it does not contain any sparse clusters.
1396 *clst_data = clst_frame;
1400 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1405 * The frame is compressed if *clst_data + slen >= clst_frame.
1406 * Check next fragments.
1408 while ((vcn += clen) < alen) {
1411 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1413 err = attr_load_runs_vcn(ni, attr->type,
1415 attr->name_len, run, vcn_next);
1420 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1424 if (lcn == SPARSE_LCN) {
1429 * Data_clusters + sparse_clusters =
1430 * not enough for frame.
1437 if (*clst_data + slen >= clst_frame) {
1440 * There is no sparsed clusters in this frame
1441 * so it is not compressed.
1443 *clst_data = clst_frame;
1445 /* Frame is compressed. */
1455 * attr_allocate_frame - Allocate/free clusters for @frame.
1457 * Assumed: down_write(&ni->file.run_lock);
1459 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1463 struct runs_tree *run = &ni->file.run;
1464 struct ntfs_sb_info *sbi = ni->mi.sbi;
1465 struct ATTRIB *attr = NULL, *attr_b;
1466 struct ATTR_LIST_ENTRY *le, *le_b;
1467 struct mft_inode *mi, *mi_b;
1468 CLST svcn, evcn1, next_svcn, lcn, len;
1469 CLST vcn, end, clst_data;
1470 u64 total_size, valid_size, data_size;
1473 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1477 if (!is_attr_ext(attr_b))
1480 vcn = frame << NTFS_LZNT_CUNIT;
1481 total_size = le64_to_cpu(attr_b->nres.total_size);
1483 svcn = le64_to_cpu(attr_b->nres.svcn);
1484 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1485 data_size = le64_to_cpu(attr_b->nres.data_size);
1487 if (svcn <= vcn && vcn < evcn1) {
1496 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1502 svcn = le64_to_cpu(attr->nres.svcn);
1503 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1506 err = attr_load_runs(attr, ni, run, NULL);
1510 err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1514 total_size -= (u64)clst_data << sbi->cluster_bits;
1516 len = bytes_to_cluster(sbi, compr_size);
1518 if (len == clst_data)
1521 if (len < clst_data) {
1522 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1527 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1532 end = vcn + clst_data;
1533 /* Run contains updated range [vcn + len : end). */
1535 CLST alen, hint = 0;
1536 /* Get the last LCN to allocate from. */
1537 if (vcn + clst_data &&
1538 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1543 err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1544 hint + 1, len - clst_data, NULL, 0,
1550 /* Run contains updated range [vcn + clst_data : end). */
1553 total_size += (u64)len << sbi->cluster_bits;
1556 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1560 attr_b->nres.total_size = cpu_to_le64(total_size);
1561 inode_set_bytes(&ni->vfs_inode, total_size);
1564 mark_inode_dirty(&ni->vfs_inode);
1566 /* Stored [vcn : next_svcn) from [vcn : end). */
1567 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1570 if (next_svcn == evcn1) {
1571 /* Normal way. Update attribute and exit. */
1574 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1575 if (!ni->attr_list.size) {
1576 err = ni_create_attr_list(ni);
1579 /* Layout of records is changed. */
1581 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1597 /* Estimate next attribute. */
1598 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1601 CLST alloc = bytes_to_cluster(
1602 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1603 CLST evcn = le64_to_cpu(attr->nres.evcn);
1605 if (end < next_svcn)
1607 while (end > evcn) {
1608 /* Remove segment [svcn : evcn). */
1609 mi_remove_attr(NULL, mi, attr);
1611 if (!al_remove_le(ni, le)) {
1616 if (evcn + 1 >= alloc) {
1617 /* Last attribute segment. */
1622 if (ni_load_mi(ni, le, &mi)) {
1627 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1633 svcn = le64_to_cpu(attr->nres.svcn);
1634 evcn = le64_to_cpu(attr->nres.evcn);
1640 err = attr_load_runs(attr, ni, run, &end);
1645 attr->nres.svcn = cpu_to_le64(next_svcn);
1646 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1650 le->vcn = cpu_to_le64(next_svcn);
1651 ni->attr_list.dirty = true;
1654 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1657 if (evcn1 > next_svcn) {
1658 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1659 next_svcn, evcn1 - next_svcn,
1660 attr_b->flags, &attr, &mi);
1665 run_truncate_around(run, vcn);
1667 if (new_valid > data_size)
1668 new_valid = data_size;
1670 valid_size = le64_to_cpu(attr_b->nres.valid_size);
1671 if (new_valid != valid_size) {
1672 attr_b->nres.valid_size = cpu_to_le64(valid_size);
1680 * attr_collapse_range - Collapse range in file.
1682 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1685 struct runs_tree *run = &ni->file.run;
1686 struct ntfs_sb_info *sbi = ni->mi.sbi;
1687 struct ATTRIB *attr = NULL, *attr_b;
1688 struct ATTR_LIST_ENTRY *le, *le_b;
1689 struct mft_inode *mi, *mi_b;
1690 CLST svcn, evcn1, len, dealloc, alen;
1692 u64 valid_size, data_size, alloc_size, total_size;
1700 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1704 if (!attr_b->non_res) {
1705 /* Attribute is resident. Nothing to do? */
1709 data_size = le64_to_cpu(attr_b->nres.data_size);
1710 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1711 a_flags = attr_b->flags;
1713 if (is_attr_ext(attr_b)) {
1714 total_size = le64_to_cpu(attr_b->nres.total_size);
1715 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1717 total_size = alloc_size;
1718 mask = sbi->cluster_mask;
1721 if ((vbo & mask) || (bytes & mask)) {
1722 /* Allow to collapse only cluster aligned ranges. */
1726 if (vbo > data_size)
1729 down_write(&ni->file.run_lock);
1731 if (vbo + bytes >= data_size) {
1732 u64 new_valid = min(ni->i_valid, vbo);
1734 /* Simple truncate file at 'vbo'. */
1735 truncate_setsize(&ni->vfs_inode, vbo);
1736 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1737 &new_valid, true, NULL);
1739 if (!err && new_valid < ni->i_valid)
1740 ni->i_valid = new_valid;
1746 * Enumerate all attribute segments and collapse.
1748 alen = alloc_size >> sbi->cluster_bits;
1749 vcn = vbo >> sbi->cluster_bits;
1750 len = bytes >> sbi->cluster_bits;
1754 svcn = le64_to_cpu(attr_b->nres.svcn);
1755 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1757 if (svcn <= vcn && vcn < evcn1) {
1766 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1773 svcn = le64_to_cpu(attr->nres.svcn);
1774 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1780 attr->nres.svcn = cpu_to_le64(svcn - len);
1781 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1783 le->vcn = attr->nres.svcn;
1784 ni->attr_list.dirty = true;
1787 } else if (svcn < vcn || end < evcn1) {
1788 CLST vcn1, eat, next_svcn;
1790 /* Collapse a part of this attribute segment. */
1791 err = attr_load_runs(attr, ni, run, &svcn);
1794 vcn1 = max(vcn, svcn);
1795 eat = min(end, evcn1) - vcn1;
1797 err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1802 if (!run_collapse_range(run, vcn1, eat)) {
1809 attr->nres.svcn = cpu_to_le64(vcn);
1811 le->vcn = attr->nres.svcn;
1812 ni->attr_list.dirty = true;
1816 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1820 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1821 if (next_svcn + eat < evcn1) {
1822 err = ni_insert_nonresident(
1823 ni, ATTR_DATA, NULL, 0, run, next_svcn,
1824 evcn1 - eat - next_svcn, a_flags, &attr,
1829 /* Layout of records maybe changed. */
1831 le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
1839 /* Free all allocated memory. */
1840 run_truncate(run, 0);
1843 u16 roff = le16_to_cpu(attr->nres.run_off);
1845 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
1846 evcn1 - 1, svcn, Add2Ptr(attr, roff),
1847 le32_to_cpu(attr->size) - roff);
1849 /* Delete this attribute segment. */
1850 mi_remove_attr(NULL, mi, attr);
1854 le_sz = le16_to_cpu(le->size);
1855 if (!al_remove_le(ni, le)) {
1864 /* Load next record that contains this attribute. */
1865 if (ni_load_mi(ni, le, &mi)) {
1870 /* Look for required attribute. */
1871 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
1879 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
1885 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
1892 svcn = le64_to_cpu(attr->nres.svcn);
1893 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1898 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
1907 valid_size = ni->i_valid;
1908 if (vbo + bytes <= valid_size)
1909 valid_size -= bytes;
1910 else if (vbo < valid_size)
1913 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
1914 attr_b->nres.data_size = cpu_to_le64(data_size);
1915 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
1916 total_size -= (u64)dealloc << sbi->cluster_bits;
1917 if (is_attr_ext(attr_b))
1918 attr_b->nres.total_size = cpu_to_le64(total_size);
1921 /* Update inode size. */
1922 ni->i_valid = valid_size;
1923 ni->vfs_inode.i_size = data_size;
1924 inode_set_bytes(&ni->vfs_inode, total_size);
1925 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1926 mark_inode_dirty(&ni->vfs_inode);
1929 up_write(&ni->file.run_lock);
1931 make_bad_inode(&ni->vfs_inode);
1939 * Not for normal files.
1941 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
1944 struct runs_tree *run = &ni->file.run;
1945 struct ntfs_sb_info *sbi = ni->mi.sbi;
1946 struct ATTRIB *attr = NULL, *attr_b;
1947 struct ATTR_LIST_ENTRY *le, *le_b;
1948 struct mft_inode *mi, *mi_b;
1949 CLST svcn, evcn1, vcn, len, end, alen, dealloc;
1950 u64 total_size, alloc_size;
1957 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1961 if (!attr_b->non_res) {
1962 u32 data_size = le32_to_cpu(attr->res.data_size);
1965 if (vbo > data_size)
1969 to = (vbo + bytes) < data_size ? (vbo + bytes) : data_size;
1970 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
1974 if (!is_attr_ext(attr_b))
1977 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1978 total_size = le64_to_cpu(attr_b->nres.total_size);
1980 if (vbo >= alloc_size) {
1981 /* NOTE: It is allowed. */
1985 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1988 if (bytes > alloc_size)
1992 if ((vbo & mask) || (bytes & mask)) {
1993 /* We have to zero a range(s). */
1994 if (frame_size == NULL) {
1995 /* Caller insists range is aligned. */
1998 *frame_size = mask + 1;
1999 return E_NTFS_NOTALIGNED;
2002 down_write(&ni->file.run_lock);
2004 * Enumerate all attribute segments and punch hole where necessary.
2006 alen = alloc_size >> sbi->cluster_bits;
2007 vcn = vbo >> sbi->cluster_bits;
2008 len = bytes >> sbi->cluster_bits;
2012 svcn = le64_to_cpu(attr_b->nres.svcn);
2013 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2015 if (svcn <= vcn && vcn < evcn1) {
2024 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2031 svcn = le64_to_cpu(attr->nres.svcn);
2032 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2035 while (svcn < end) {
2036 CLST vcn1, zero, dealloc2;
2038 err = attr_load_runs(attr, ni, run, &svcn);
2041 vcn1 = max(vcn, svcn);
2042 zero = min(end, evcn1) - vcn1;
2045 err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
2049 if (dealloc2 == dealloc) {
2050 /* Looks like the required range is already sparsed. */
2052 if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
2058 err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2062 /* Free all allocated memory. */
2063 run_truncate(run, 0);
2068 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2074 svcn = le64_to_cpu(attr->nres.svcn);
2075 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2078 total_size -= (u64)dealloc << sbi->cluster_bits;
2079 attr_b->nres.total_size = cpu_to_le64(total_size);
2082 /* Update inode size. */
2083 inode_set_bytes(&ni->vfs_inode, total_size);
2084 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2085 mark_inode_dirty(&ni->vfs_inode);
2088 up_write(&ni->file.run_lock);
2090 make_bad_inode(&ni->vfs_inode);