1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19 * preallocate algorithm.
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
34 static inline u64 get_pre_allocated(u64 size)
40 if (size <= NTFS_CLUMP_MIN) {
41 clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 } else if (size >= NTFS_CLUMP_MAX) {
44 clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 align_shift = NTFS_MAX_LOG2_OF_CLUMP;
47 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 clump = 1u << align_shift;
52 ret = (((size + clump - 1) >> align_shift)) << align_shift;
58 * attr_must_be_resident
60 * Return: True if attribute must be resident.
62 static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
65 const struct ATTR_DEF_ENTRY *de;
77 de = ntfs_query_def(sbi, type);
78 if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
85 * attr_load_runs - Load all runs stored in @attr.
87 static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
88 struct runs_tree *run, const CLST *vcn)
91 CLST svcn = le64_to_cpu(attr->nres.svcn);
92 CLST evcn = le64_to_cpu(attr->nres.evcn);
96 if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
99 if (vcn && (evcn < *vcn || *vcn < svcn))
102 asize = le32_to_cpu(attr->size);
103 run_off = le16_to_cpu(attr->nres.run_off);
108 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
109 vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
118 * run_deallocate_ex - Deallocate clusters.
120 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
121 CLST vcn, CLST len, CLST *done, bool trim)
124 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
130 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
132 run_truncate(run, vcn0);
146 if (lcn != SPARSE_LCN) {
148 /* mark bitmap range [lcn + clen) as free and trim clusters. */
149 mark_as_free_ex(sbi, lcn, clen, trim);
158 vcn_next = vcn + clen;
159 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
161 /* Save memory - don't load entire run. */
174 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
176 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
177 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
178 enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
182 CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
183 size_t cnt = run->count;
186 err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
189 if (err == -ENOSPC && pre) {
199 if (new_lcn && vcn == vcn0)
202 /* Add new fragment into run storage. */
203 if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
204 /* Undo last 'ntfs_look_for_free_space' */
205 mark_as_free_ex(sbi, lcn, len, false);
212 if (flen >= len || opt == ALLOCATE_MFT ||
213 (fr && run->count - cnt >= fr)) {
222 /* Undo 'ntfs_look_for_free_space' */
224 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
225 run_truncate(run, vcn0);
232 * attr_make_nonresident
234 * If page is not NULL - it is already contains resident data
235 * and locked (called from ni_write_frame()).
237 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
238 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
239 u64 new_size, struct runs_tree *run,
240 struct ATTRIB **ins_attr, struct page *page)
242 struct ntfs_sb_info *sbi;
243 struct ATTRIB *attr_s;
245 u32 used, asize, rsize, aoff, align;
259 used = le32_to_cpu(rec->used);
260 asize = le32_to_cpu(attr->size);
261 next = Add2Ptr(attr, asize);
262 aoff = PtrOffset(rec, attr);
263 rsize = le32_to_cpu(attr->res.data_size);
264 is_data = attr->type == ATTR_DATA && !attr->name_len;
266 align = sbi->cluster_size;
267 if (is_attr_compressed(attr))
268 align <<= COMPRESSION_UNIT;
269 len = (rsize + align - 1) >> sbi->cluster_bits;
273 /* Make a copy of original attribute. */
274 attr_s = kmemdup(attr, asize, GFP_NOFS);
281 /* Empty resident -> Empty nonresident. */
284 const char *data = resident_data(attr);
286 err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
287 ALLOCATE_DEF, &alen, 0, NULL);
292 /* Empty resident -> Non empty nonresident. */
293 } else if (!is_data) {
294 err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
300 page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
305 kaddr = kmap_atomic(page);
306 memcpy(kaddr, data, rsize);
307 memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
308 kunmap_atomic(kaddr);
309 flush_dcache_page(page);
310 SetPageUptodate(page);
311 set_page_dirty(page);
317 /* Remove original attribute. */
319 memmove(attr, Add2Ptr(attr, asize), used - aoff);
320 rec->used = cpu_to_le32(used);
323 al_remove_le(ni, le);
325 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
326 attr_s->name_len, run, 0, alen,
327 attr_s->flags, &attr, NULL, NULL);
332 attr->nres.data_size = cpu_to_le64(rsize);
333 attr->nres.valid_size = attr->nres.data_size;
338 ni->ni_flags &= ~NI_FLAG_RESIDENT;
340 /* Resident attribute becomes non resident. */
344 attr = Add2Ptr(rec, aoff);
345 memmove(next, attr, used - aoff);
346 memcpy(attr, attr_s, asize);
347 rec->used = cpu_to_le32(used + asize);
350 /* Undo: do not trim new allocated clusters. */
351 run_deallocate(sbi, run, false);
360 * attr_set_size_res - Helper for attr_set_size().
362 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
363 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
364 u64 new_size, struct runs_tree *run,
365 struct ATTRIB **ins_attr)
367 struct ntfs_sb_info *sbi = mi->sbi;
368 struct MFT_REC *rec = mi->mrec;
369 u32 used = le32_to_cpu(rec->used);
370 u32 asize = le32_to_cpu(attr->size);
371 u32 aoff = PtrOffset(rec, attr);
372 u32 rsize = le32_to_cpu(attr->res.data_size);
373 u32 tail = used - aoff - asize;
374 char *next = Add2Ptr(attr, asize);
375 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
378 memmove(next + dsize, next, tail);
379 } else if (dsize > 0) {
380 if (used + dsize > sbi->max_bytes_per_attr)
381 return attr_make_nonresident(ni, attr, le, mi, new_size,
382 run, ins_attr, NULL);
384 memmove(next + dsize, next, tail);
385 memset(next, 0, dsize);
388 if (new_size > rsize)
389 memset(Add2Ptr(resident_data(attr), rsize), 0,
392 rec->used = cpu_to_le32(used + dsize);
393 attr->size = cpu_to_le32(asize + dsize);
394 attr->res.data_size = cpu_to_le32(new_size);
402 * attr_set_size - Change the size of attribute.
405 * - Sparse/compressed: No allocated clusters.
406 * - Normal: Append allocated and preallocated new clusters.
408 * - No deallocate if @keep_prealloc is set.
410 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
411 const __le16 *name, u8 name_len, struct runs_tree *run,
412 u64 new_size, const u64 *new_valid, bool keep_prealloc,
416 struct ntfs_sb_info *sbi = ni->mi.sbi;
417 u8 cluster_bits = sbi->cluster_bits;
419 ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
420 u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
421 struct ATTRIB *attr = NULL, *attr_b;
422 struct ATTR_LIST_ENTRY *le, *le_b;
423 struct mft_inode *mi, *mi_b;
424 CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
425 CLST next_svcn, pre_alloc = -1, done = 0;
426 bool is_ext, is_bad = false;
433 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
440 if (!attr_b->non_res) {
441 err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
446 /* Return if file is still resident. */
447 if (!attr_b->non_res)
450 /* Layout of records may be changed, so do a full search. */
454 is_ext = is_attr_ext(attr_b);
455 align = sbi->cluster_size;
457 align <<= attr_b->nres.c_unit;
459 old_valid = le64_to_cpu(attr_b->nres.valid_size);
460 old_size = le64_to_cpu(attr_b->nres.data_size);
461 old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
464 old_alen = old_alloc >> cluster_bits;
466 new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
467 new_alen = new_alloc >> cluster_bits;
469 if (keep_prealloc && new_size < old_size) {
470 attr_b->nres.data_size = cpu_to_le64(new_size);
477 svcn = le64_to_cpu(attr_b->nres.svcn);
478 evcn = le64_to_cpu(attr_b->nres.evcn);
480 if (svcn <= vcn && vcn <= evcn) {
489 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
497 svcn = le64_to_cpu(attr->nres.svcn);
498 evcn = le64_to_cpu(attr->nres.evcn);
502 * attr,mi,le - last attribute segment (containing 'vcn').
503 * attr_b,mi_b,le_b - base (primary) attribute segment.
507 err = attr_load_runs(attr, ni, run, NULL);
511 if (new_size > old_size) {
515 if (new_alloc <= old_alloc) {
516 attr_b->nres.data_size = cpu_to_le64(new_size);
522 * Add clusters. In simple case we have to:
523 * - allocate space (vcn, lcn, len)
524 * - update packed run in 'mi'
525 * - update attr->nres.evcn
526 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
528 to_allocate = new_alen - old_alen;
529 add_alloc_in_same_attr_seg:
532 /* MFT allocates clusters from MFT zone. */
535 /* No preallocate for sparse/compress. */
537 } else if (pre_alloc == -1) {
539 if (type == ATTR_DATA && !name_len &&
540 sbi->options->prealloc) {
544 get_pre_allocated(new_size)) -
548 /* Get the last LCN to allocate from. */
550 !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
554 if (lcn == SPARSE_LCN)
559 free = wnd_zeroes(&sbi->used.bitmap);
560 if (to_allocate > free) {
565 if (pre_alloc && to_allocate + pre_alloc > free)
572 if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
579 /* ~3 bytes per fragment. */
580 err = attr_allocate_clusters(
581 sbi, run, vcn, lcn, to_allocate, &pre_alloc,
582 is_mft ? ALLOCATE_MFT : 0, &alen,
584 : (sbi->record_size -
585 le32_to_cpu(rec->used) + 8) /
595 if (to_allocate > alen)
601 err = mi_pack_runs(mi, attr, run, vcn - svcn);
605 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
606 new_alloc_tmp = (u64)next_svcn << cluster_bits;
607 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
610 if (next_svcn >= vcn && !to_allocate) {
611 /* Normal way. Update attribute and exit. */
612 attr_b->nres.data_size = cpu_to_le64(new_size);
616 /* At least two MFT to avoid recursive loop. */
617 if (is_mft && next_svcn == vcn &&
618 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
619 new_size = new_alloc_tmp;
620 attr_b->nres.data_size = attr_b->nres.alloc_size;
624 if (le32_to_cpu(rec->used) < sbi->record_size) {
625 old_alen = next_svcn;
627 goto add_alloc_in_same_attr_seg;
630 attr_b->nres.data_size = attr_b->nres.alloc_size;
631 if (new_alloc_tmp < old_valid)
632 attr_b->nres.valid_size = attr_b->nres.data_size;
634 if (type == ATTR_LIST) {
635 err = ni_expand_list(ni);
641 /* Layout of records is changed. */
645 if (!ni->attr_list.size) {
646 err = ni_create_attr_list(ni);
647 /* In case of error layout of records is not changed. */
650 /* Layout of records is changed. */
653 if (next_svcn >= vcn) {
654 /* This is MFT data, repeat. */
658 /* Insert new attribute segment. */
659 err = ni_insert_nonresident(ni, type, name, name_len, run,
660 next_svcn, vcn - next_svcn,
661 attr_b->flags, &attr, &mi, NULL);
664 * Layout of records maybe changed.
665 * Find base attribute to update.
668 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
676 /* ni_insert_nonresident failed. */
682 run_truncate_head(run, evcn + 1);
684 svcn = le64_to_cpu(attr->nres.svcn);
685 evcn = le64_to_cpu(attr->nres.evcn);
688 * Attribute is in consistency state.
689 * Save this point to restore to if next steps fail.
691 old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
692 attr_b->nres.valid_size = attr_b->nres.data_size =
693 attr_b->nres.alloc_size = cpu_to_le64(old_size);
698 if (new_size != old_size ||
699 (new_alloc != old_alloc && !keep_prealloc)) {
701 * Truncate clusters. In simple case we have to:
702 * - update packed run in 'mi'
703 * - update attr->nres.evcn
704 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
705 * - mark and trim clusters as free (vcn, lcn, len)
709 vcn = max(svcn, new_alen);
710 new_alloc_tmp = (u64)vcn << cluster_bits;
713 err = mi_pack_runs(mi, attr, run, vcn - svcn);
716 } else if (le && le->vcn) {
717 u16 le_sz = le16_to_cpu(le->size);
720 * NOTE: List entries for one attribute are always
721 * the same size. We deal with last entry (vcn==0)
722 * and it is not first in entries array
723 * (list entry for std attribute always first).
724 * So it is safe to step back.
726 mi_remove_attr(NULL, mi, attr);
728 if (!al_remove_le(ni, le)) {
733 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
735 attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
739 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
741 if (vcn == new_alen) {
742 attr_b->nres.data_size = cpu_to_le64(new_size);
743 if (new_size < old_valid)
744 attr_b->nres.valid_size =
745 attr_b->nres.data_size;
748 le64_to_cpu(attr_b->nres.data_size))
749 attr_b->nres.data_size =
750 attr_b->nres.alloc_size;
752 le64_to_cpu(attr_b->nres.valid_size))
753 attr_b->nres.valid_size =
754 attr_b->nres.alloc_size;
758 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
764 /* dlen - really deallocated clusters. */
765 le64_sub_cpu(&attr_b->nres.total_size,
766 ((u64)dlen << cluster_bits));
769 run_truncate(run, vcn);
771 if (new_alloc_tmp <= new_alloc)
774 old_size = new_alloc_tmp;
785 if (le->type != type || le->name_len != name_len ||
786 memcmp(le_name(le), name, name_len * sizeof(short))) {
791 err = ni_load_mi(ni, le, &mi);
795 attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
805 __le64 valid = cpu_to_le64(min(*new_valid, new_size));
807 if (attr_b->nres.valid_size != valid) {
808 attr_b->nres.valid_size = valid;
817 /* Update inode_set_bytes. */
818 if (((type == ATTR_DATA && !name_len) ||
819 (type == ATTR_ALLOC && name == I30_NAME))) {
822 if (ni->vfs_inode.i_size != new_size) {
823 ni->vfs_inode.i_size = new_size;
827 if (attr_b->non_res) {
828 new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
829 if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
830 inode_set_bytes(&ni->vfs_inode, new_alloc);
836 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
837 mark_inode_dirty(&ni->vfs_inode);
845 attr_b->nres.data_size = cpu_to_le64(old_size);
846 attr_b->nres.valid_size = cpu_to_le64(old_valid);
847 attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
849 /* Restore 'attr' and 'mi'. */
853 if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
854 svcn <= le64_to_cpu(attr_b->nres.evcn)) {
863 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
870 if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
874 run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
876 run_truncate(run, vcn);
880 _ntfs_bad_inode(&ni->vfs_inode);
885 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
886 CLST *len, bool *new)
889 struct runs_tree *run = &ni->file.run;
890 struct ntfs_sb_info *sbi;
892 struct ATTRIB *attr = NULL, *attr_b;
893 struct ATTR_LIST_ENTRY *le, *le_b;
894 struct mft_inode *mi, *mi_b;
895 CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
903 down_read(&ni->file.run_lock);
904 ok = run_lookup_entry(run, vcn, lcn, len, NULL);
905 up_read(&ni->file.run_lock);
907 if (ok && (*lcn != SPARSE_LCN || !new)) {
915 if (ok && clen > *len)
919 cluster_bits = sbi->cluster_bits;
922 down_write(&ni->file.run_lock);
925 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
931 if (!attr_b->non_res) {
937 asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
943 clst_per_frame = 1u << attr_b->nres.c_unit;
944 to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
946 if (vcn + to_alloc > asize)
947 to_alloc = asize - vcn;
949 svcn = le64_to_cpu(attr_b->nres.svcn);
950 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
956 if (le_b && (vcn < svcn || evcn1 <= vcn)) {
957 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
963 svcn = le64_to_cpu(attr->nres.svcn);
964 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
967 err = attr_load_runs(attr, ni, run, NULL);
972 ok = run_lookup_entry(run, vcn, lcn, len, NULL);
973 if (ok && (*lcn != SPARSE_LCN || !new)) {
985 if (ok && clen > *len) {
987 to_alloc = (clen + clst_per_frame - 1) &
988 ~(clst_per_frame - 1);
992 if (!is_attr_ext(attr_b)) {
997 /* Get the last LCN to allocate from. */
1001 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1006 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1010 err = attr_allocate_clusters(
1011 sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
1012 (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
1020 total_size = le64_to_cpu(attr_b->nres.total_size) +
1021 ((u64)*len << cluster_bits);
1024 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1028 attr_b->nres.total_size = cpu_to_le64(total_size);
1029 inode_set_bytes(&ni->vfs_inode, total_size);
1030 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1033 mark_inode_dirty(&ni->vfs_inode);
1035 /* Stored [vcn : next_svcn) from [vcn : end). */
1036 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1039 if (next_svcn == evcn1) {
1040 /* Normal way. Update attribute and exit. */
1043 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1044 if (!ni->attr_list.size) {
1045 err = ni_create_attr_list(ni);
1048 /* Layout of records is changed. */
1050 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1066 /* Estimate next attribute. */
1067 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1070 CLST alloc = bytes_to_cluster(
1071 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1072 CLST evcn = le64_to_cpu(attr->nres.evcn);
1074 if (end < next_svcn)
1076 while (end > evcn) {
1077 /* Remove segment [svcn : evcn). */
1078 mi_remove_attr(NULL, mi, attr);
1080 if (!al_remove_le(ni, le)) {
1085 if (evcn + 1 >= alloc) {
1086 /* Last attribute segment. */
1091 if (ni_load_mi(ni, le, &mi)) {
1096 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1102 svcn = le64_to_cpu(attr->nres.svcn);
1103 evcn = le64_to_cpu(attr->nres.evcn);
1109 err = attr_load_runs(attr, ni, run, &end);
1114 attr->nres.svcn = cpu_to_le64(next_svcn);
1115 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1119 le->vcn = cpu_to_le64(next_svcn);
1120 ni->attr_list.dirty = true;
1123 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1126 if (evcn1 > next_svcn) {
1127 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1128 next_svcn, evcn1 - next_svcn,
1129 attr_b->flags, &attr, &mi, NULL);
1134 run_truncate_around(run, vcn);
1136 up_write(&ni->file.run_lock);
1142 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1145 struct ATTRIB *attr;
1148 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1153 return E_NTFS_NONRESIDENT;
1155 vbo = page->index << PAGE_SHIFT;
1156 data_size = le32_to_cpu(attr->res.data_size);
1157 if (vbo < data_size) {
1158 const char *data = resident_data(attr);
1159 char *kaddr = kmap_atomic(page);
1160 u32 use = data_size - vbo;
1162 if (use > PAGE_SIZE)
1165 memcpy(kaddr, data + vbo, use);
1166 memset(kaddr + use, 0, PAGE_SIZE - use);
1167 kunmap_atomic(kaddr);
1168 flush_dcache_page(page);
1169 SetPageUptodate(page);
1170 } else if (!PageUptodate(page)) {
1171 zero_user_segment(page, 0, PAGE_SIZE);
1172 SetPageUptodate(page);
1178 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1181 struct mft_inode *mi;
1182 struct ATTRIB *attr;
1185 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1189 if (attr->non_res) {
1190 /* Return special error code to check this case. */
1191 return E_NTFS_NONRESIDENT;
1194 vbo = page->index << PAGE_SHIFT;
1195 data_size = le32_to_cpu(attr->res.data_size);
1196 if (vbo < data_size) {
1197 char *data = resident_data(attr);
1198 char *kaddr = kmap_atomic(page);
1199 u32 use = data_size - vbo;
1201 if (use > PAGE_SIZE)
1203 memcpy(data + vbo, kaddr, use);
1204 kunmap_atomic(kaddr);
1207 ni->i_valid = data_size;
1213 * attr_load_runs_vcn - Load runs with VCN.
1215 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1216 const __le16 *name, u8 name_len, struct runs_tree *run,
1219 struct ATTRIB *attr;
1225 /* Is record corrupted? */
1229 attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1231 /* Is record corrupted? */
1235 svcn = le64_to_cpu(attr->nres.svcn);
1236 evcn = le64_to_cpu(attr->nres.evcn);
1238 if (evcn < vcn || vcn < svcn) {
1239 /* Is record corrupted? */
1243 ro = le16_to_cpu(attr->nres.run_off);
1245 if (ro > le32_to_cpu(attr->size))
1248 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1249 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1256 * attr_load_runs_range - Load runs for given range [from to).
1258 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1259 const __le16 *name, u8 name_len, struct runs_tree *run,
1262 struct ntfs_sb_info *sbi = ni->mi.sbi;
1263 u8 cluster_bits = sbi->cluster_bits;
1265 CLST vcn_last = (to - 1) >> cluster_bits;
1269 for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1270 if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1271 err = attr_load_runs_vcn(ni, type, name, name_len, run,
1275 clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1282 #ifdef CONFIG_NTFS3_LZX_XPRESS
1284 * attr_wof_frame_info
1286 * Read header of Xpress/LZX file to get info about frame.
1288 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1289 struct runs_tree *run, u64 frame, u64 frames,
1290 u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1292 struct ntfs_sb_info *sbi = ni->mi.sbi;
1293 u64 vbo[2], off[2], wof_size;
1302 if (ni->vfs_inode.i_size < 0x100000000ull) {
1303 /* File starts with array of 32 bit offsets. */
1304 bytes_per_off = sizeof(__le32);
1305 vbo[1] = frame << 2;
1306 *vbo_data = frames << 2;
1308 /* File starts with array of 64 bit offsets. */
1309 bytes_per_off = sizeof(__le64);
1310 vbo[1] = frame << 3;
1311 *vbo_data = frames << 3;
1315 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1316 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1318 if (!attr->non_res) {
1319 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1320 ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1323 addr = resident_data(attr);
1325 if (bytes_per_off == sizeof(__le32)) {
1326 off32 = Add2Ptr(addr, vbo[1]);
1327 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1328 off[1] = le32_to_cpu(off32[0]);
1330 off64 = Add2Ptr(addr, vbo[1]);
1331 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1332 off[1] = le64_to_cpu(off64[0]);
1335 *vbo_data += off[0];
1336 *ondisk_size = off[1] - off[0];
1340 wof_size = le64_to_cpu(attr->nres.data_size);
1341 down_write(&ni->file.run_lock);
1342 page = ni->file.offs_page;
1344 page = alloc_page(GFP_KERNEL);
1350 ni->file.offs_page = page;
1353 addr = page_address(page);
1356 voff = vbo[1] & (PAGE_SIZE - 1);
1357 vbo[0] = vbo[1] - bytes_per_off;
1367 pgoff_t index = vbo[i] >> PAGE_SHIFT;
1369 if (index != page->index) {
1370 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1371 u64 to = min(from + PAGE_SIZE, wof_size);
1373 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1374 ARRAY_SIZE(WOF_NAME), run,
1379 err = ntfs_bio_pages(sbi, run, &page, 1, from,
1380 to - from, REQ_OP_READ);
1385 page->index = index;
1389 if (bytes_per_off == sizeof(__le32)) {
1390 off32 = Add2Ptr(addr, voff);
1391 off[1] = le32_to_cpu(*off32);
1393 off64 = Add2Ptr(addr, voff);
1394 off[1] = le64_to_cpu(*off64);
1397 if (bytes_per_off == sizeof(__le32)) {
1398 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1399 off[0] = le32_to_cpu(*off32);
1401 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1402 off[0] = le64_to_cpu(*off64);
1405 /* Two values in one page. */
1406 if (bytes_per_off == sizeof(__le32)) {
1407 off32 = Add2Ptr(addr, voff);
1408 off[0] = le32_to_cpu(off32[-1]);
1409 off[1] = le32_to_cpu(off32[0]);
1411 off64 = Add2Ptr(addr, voff);
1412 off[0] = le64_to_cpu(off64[-1]);
1413 off[1] = le64_to_cpu(off64[0]);
1419 *vbo_data += off[0];
1420 *ondisk_size = off[1] - off[0];
1425 up_write(&ni->file.run_lock);
1431 * attr_is_frame_compressed - Used to detect compressed frame.
1433 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1434 CLST frame, CLST *clst_data)
1438 CLST clen, lcn, vcn, alen, slen, vcn_next;
1440 struct runs_tree *run;
1444 if (!is_attr_compressed(attr))
1450 clst_frame = 1u << attr->nres.c_unit;
1451 vcn = frame * clst_frame;
1452 run = &ni->file.run;
1454 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1455 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1456 attr->name_len, run, vcn);
1460 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1464 if (lcn == SPARSE_LCN) {
1465 /* Sparsed frame. */
1469 if (clen >= clst_frame) {
1471 * The frame is not compressed 'cause
1472 * it does not contain any sparse clusters.
1474 *clst_data = clst_frame;
1478 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1483 * The frame is compressed if *clst_data + slen >= clst_frame.
1484 * Check next fragments.
1486 while ((vcn += clen) < alen) {
1489 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1491 err = attr_load_runs_vcn(ni, attr->type,
1493 attr->name_len, run, vcn_next);
1498 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1502 if (lcn == SPARSE_LCN) {
1507 * Data_clusters + sparse_clusters =
1508 * not enough for frame.
1515 if (*clst_data + slen >= clst_frame) {
1518 * There is no sparsed clusters in this frame
1519 * so it is not compressed.
1521 *clst_data = clst_frame;
1523 /* Frame is compressed. */
1533 * attr_allocate_frame - Allocate/free clusters for @frame.
1535 * Assumed: down_write(&ni->file.run_lock);
1537 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1541 struct runs_tree *run = &ni->file.run;
1542 struct ntfs_sb_info *sbi = ni->mi.sbi;
1543 struct ATTRIB *attr = NULL, *attr_b;
1544 struct ATTR_LIST_ENTRY *le, *le_b;
1545 struct mft_inode *mi, *mi_b;
1546 CLST svcn, evcn1, next_svcn, lcn, len;
1547 CLST vcn, end, clst_data;
1548 u64 total_size, valid_size, data_size;
1551 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1555 if (!is_attr_ext(attr_b))
1558 vcn = frame << NTFS_LZNT_CUNIT;
1559 total_size = le64_to_cpu(attr_b->nres.total_size);
1561 svcn = le64_to_cpu(attr_b->nres.svcn);
1562 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1563 data_size = le64_to_cpu(attr_b->nres.data_size);
1565 if (svcn <= vcn && vcn < evcn1) {
1574 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1580 svcn = le64_to_cpu(attr->nres.svcn);
1581 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1584 err = attr_load_runs(attr, ni, run, NULL);
1588 err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1592 total_size -= (u64)clst_data << sbi->cluster_bits;
1594 len = bytes_to_cluster(sbi, compr_size);
1596 if (len == clst_data)
1599 if (len < clst_data) {
1600 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1605 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1610 end = vcn + clst_data;
1611 /* Run contains updated range [vcn + len : end). */
1613 CLST alen, hint = 0;
1614 /* Get the last LCN to allocate from. */
1615 if (vcn + clst_data &&
1616 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1621 err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1622 hint + 1, len - clst_data, NULL, 0,
1628 /* Run contains updated range [vcn + clst_data : end). */
1631 total_size += (u64)len << sbi->cluster_bits;
1634 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1638 attr_b->nres.total_size = cpu_to_le64(total_size);
1639 inode_set_bytes(&ni->vfs_inode, total_size);
1642 mark_inode_dirty(&ni->vfs_inode);
1644 /* Stored [vcn : next_svcn) from [vcn : end). */
1645 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1648 if (next_svcn == evcn1) {
1649 /* Normal way. Update attribute and exit. */
1652 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1653 if (!ni->attr_list.size) {
1654 err = ni_create_attr_list(ni);
1657 /* Layout of records is changed. */
1659 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1675 /* Estimate next attribute. */
1676 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1679 CLST alloc = bytes_to_cluster(
1680 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1681 CLST evcn = le64_to_cpu(attr->nres.evcn);
1683 if (end < next_svcn)
1685 while (end > evcn) {
1686 /* Remove segment [svcn : evcn). */
1687 mi_remove_attr(NULL, mi, attr);
1689 if (!al_remove_le(ni, le)) {
1694 if (evcn + 1 >= alloc) {
1695 /* Last attribute segment. */
1700 if (ni_load_mi(ni, le, &mi)) {
1705 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1711 svcn = le64_to_cpu(attr->nres.svcn);
1712 evcn = le64_to_cpu(attr->nres.evcn);
1718 err = attr_load_runs(attr, ni, run, &end);
1723 attr->nres.svcn = cpu_to_le64(next_svcn);
1724 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1728 le->vcn = cpu_to_le64(next_svcn);
1729 ni->attr_list.dirty = true;
1732 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1735 if (evcn1 > next_svcn) {
1736 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1737 next_svcn, evcn1 - next_svcn,
1738 attr_b->flags, &attr, &mi, NULL);
1743 run_truncate_around(run, vcn);
1745 if (new_valid > data_size)
1746 new_valid = data_size;
1748 valid_size = le64_to_cpu(attr_b->nres.valid_size);
1749 if (new_valid != valid_size) {
1750 attr_b->nres.valid_size = cpu_to_le64(valid_size);
1758 * attr_collapse_range - Collapse range in file.
1760 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1763 struct runs_tree *run = &ni->file.run;
1764 struct ntfs_sb_info *sbi = ni->mi.sbi;
1765 struct ATTRIB *attr = NULL, *attr_b;
1766 struct ATTR_LIST_ENTRY *le, *le_b;
1767 struct mft_inode *mi, *mi_b;
1768 CLST svcn, evcn1, len, dealloc, alen;
1770 u64 valid_size, data_size, alloc_size, total_size;
1778 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1782 if (!attr_b->non_res) {
1783 /* Attribute is resident. Nothing to do? */
1787 data_size = le64_to_cpu(attr_b->nres.data_size);
1788 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1789 a_flags = attr_b->flags;
1791 if (is_attr_ext(attr_b)) {
1792 total_size = le64_to_cpu(attr_b->nres.total_size);
1793 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1795 total_size = alloc_size;
1796 mask = sbi->cluster_mask;
1799 if ((vbo & mask) || (bytes & mask)) {
1800 /* Allow to collapse only cluster aligned ranges. */
1804 if (vbo > data_size)
1807 down_write(&ni->file.run_lock);
1809 if (vbo + bytes >= data_size) {
1810 u64 new_valid = min(ni->i_valid, vbo);
1812 /* Simple truncate file at 'vbo'. */
1813 truncate_setsize(&ni->vfs_inode, vbo);
1814 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1815 &new_valid, true, NULL);
1817 if (!err && new_valid < ni->i_valid)
1818 ni->i_valid = new_valid;
1824 * Enumerate all attribute segments and collapse.
1826 alen = alloc_size >> sbi->cluster_bits;
1827 vcn = vbo >> sbi->cluster_bits;
1828 len = bytes >> sbi->cluster_bits;
1832 svcn = le64_to_cpu(attr_b->nres.svcn);
1833 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1835 if (svcn <= vcn && vcn < evcn1) {
1844 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1851 svcn = le64_to_cpu(attr->nres.svcn);
1852 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1858 attr->nres.svcn = cpu_to_le64(svcn - len);
1859 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1861 le->vcn = attr->nres.svcn;
1862 ni->attr_list.dirty = true;
1865 } else if (svcn < vcn || end < evcn1) {
1866 CLST vcn1, eat, next_svcn;
1868 /* Collapse a part of this attribute segment. */
1869 err = attr_load_runs(attr, ni, run, &svcn);
1872 vcn1 = max(vcn, svcn);
1873 eat = min(end, evcn1) - vcn1;
1875 err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1880 if (!run_collapse_range(run, vcn1, eat)) {
1887 attr->nres.svcn = cpu_to_le64(vcn);
1889 le->vcn = attr->nres.svcn;
1890 ni->attr_list.dirty = true;
1894 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1898 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1899 if (next_svcn + eat < evcn1) {
1900 err = ni_insert_nonresident(
1901 ni, ATTR_DATA, NULL, 0, run, next_svcn,
1902 evcn1 - eat - next_svcn, a_flags, &attr,
1907 /* Layout of records maybe changed. */
1911 /* Free all allocated memory. */
1912 run_truncate(run, 0);
1915 u16 roff = le16_to_cpu(attr->nres.run_off);
1917 if (roff > le32_to_cpu(attr->size)) {
1922 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
1923 evcn1 - 1, svcn, Add2Ptr(attr, roff),
1924 le32_to_cpu(attr->size) - roff);
1926 /* Delete this attribute segment. */
1927 mi_remove_attr(NULL, mi, attr);
1931 le_sz = le16_to_cpu(le->size);
1932 if (!al_remove_le(ni, le)) {
1941 /* Load next record that contains this attribute. */
1942 if (ni_load_mi(ni, le, &mi)) {
1947 /* Look for required attribute. */
1948 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
1956 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
1962 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
1969 svcn = le64_to_cpu(attr->nres.svcn);
1970 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1975 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
1984 valid_size = ni->i_valid;
1985 if (vbo + bytes <= valid_size)
1986 valid_size -= bytes;
1987 else if (vbo < valid_size)
1990 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
1991 attr_b->nres.data_size = cpu_to_le64(data_size);
1992 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
1993 total_size -= (u64)dealloc << sbi->cluster_bits;
1994 if (is_attr_ext(attr_b))
1995 attr_b->nres.total_size = cpu_to_le64(total_size);
1998 /* Update inode size. */
1999 ni->i_valid = valid_size;
2000 ni->vfs_inode.i_size = data_size;
2001 inode_set_bytes(&ni->vfs_inode, total_size);
2002 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2003 mark_inode_dirty(&ni->vfs_inode);
2006 up_write(&ni->file.run_lock);
2008 _ntfs_bad_inode(&ni->vfs_inode);
2016 * Not for normal files.
2018 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2021 struct runs_tree *run = &ni->file.run;
2022 struct ntfs_sb_info *sbi = ni->mi.sbi;
2023 struct ATTRIB *attr = NULL, *attr_b;
2024 struct ATTR_LIST_ENTRY *le, *le_b;
2025 struct mft_inode *mi, *mi_b;
2026 CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2027 u64 total_size, alloc_size;
2030 struct runs_tree run2;
2036 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2040 if (!attr_b->non_res) {
2041 u32 data_size = le32_to_cpu(attr_b->res.data_size);
2044 if (vbo > data_size)
2048 to = min_t(u64, vbo + bytes, data_size);
2049 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2053 if (!is_attr_ext(attr_b))
2056 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2057 total_size = le64_to_cpu(attr_b->nres.total_size);
2059 if (vbo >= alloc_size) {
2060 /* NOTE: It is allowed. */
2064 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2067 if (bytes > alloc_size)
2071 if ((vbo & mask) || (bytes & mask)) {
2072 /* We have to zero a range(s). */
2073 if (frame_size == NULL) {
2074 /* Caller insists range is aligned. */
2077 *frame_size = mask + 1;
2078 return E_NTFS_NOTALIGNED;
2081 down_write(&ni->file.run_lock);
2083 run_truncate(run, 0);
2086 * Enumerate all attribute segments and punch hole where necessary.
2088 alen = alloc_size >> sbi->cluster_bits;
2089 vcn = vbo >> sbi->cluster_bits;
2090 len = bytes >> sbi->cluster_bits;
2094 svcn = le64_to_cpu(attr_b->nres.svcn);
2095 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2096 a_flags = attr_b->flags;
2098 if (svcn <= vcn && vcn < evcn1) {
2107 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2114 svcn = le64_to_cpu(attr->nres.svcn);
2115 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2118 while (svcn < end) {
2119 CLST vcn1, zero, hole2 = hole;
2121 err = attr_load_runs(attr, ni, run, &svcn);
2124 vcn1 = max(vcn, svcn);
2125 zero = min(end, evcn1) - vcn1;
2128 * Check range [vcn1 + zero).
2129 * Calculate how many clusters there are.
2130 * Don't do any destructive actions.
2132 err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2136 /* Check if required range is already hole. */
2140 /* Make a clone of run to undo. */
2141 err = run_clone(run, &run2);
2145 /* Make a hole range (sparse) [vcn1 + zero). */
2146 if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2151 /* Update run in attribute segment. */
2152 err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2155 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2156 if (next_svcn < evcn1) {
2157 /* Insert new attribute segment. */
2158 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2160 evcn1 - next_svcn, a_flags,
2165 /* Layout of records maybe changed. */
2169 /* Real deallocate. Should not fail. */
2170 run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2173 /* Free all allocated memory. */
2174 run_truncate(run, 0);
2179 /* Get next attribute segment. */
2180 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2186 svcn = le64_to_cpu(attr->nres.svcn);
2187 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2195 attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2203 total_size -= (u64)hole << sbi->cluster_bits;
2204 attr_b->nres.total_size = cpu_to_le64(total_size);
2207 /* Update inode size. */
2208 inode_set_bytes(&ni->vfs_inode, total_size);
2209 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2210 mark_inode_dirty(&ni->vfs_inode);
2214 up_write(&ni->file.run_lock);
2218 _ntfs_bad_inode(&ni->vfs_inode);
2223 * Restore packed runs.
2224 * 'mi_pack_runs' should not fail, cause we restore original.
2226 if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2233 * attr_insert_range - Insert range (hole) in file.
2234 * Not for normal files.
2236 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2239 struct runs_tree *run = &ni->file.run;
2240 struct ntfs_sb_info *sbi = ni->mi.sbi;
2241 struct ATTRIB *attr = NULL, *attr_b;
2242 struct ATTR_LIST_ENTRY *le, *le_b;
2243 struct mft_inode *mi, *mi_b;
2244 CLST vcn, svcn, evcn1, len, next_svcn;
2245 u64 data_size, alloc_size;
2253 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2257 if (!is_attr_ext(attr_b)) {
2258 /* It was checked above. See fallocate. */
2262 if (!attr_b->non_res) {
2263 data_size = le32_to_cpu(attr_b->res.data_size);
2264 alloc_size = data_size;
2265 mask = sbi->cluster_mask; /* cluster_size - 1 */
2267 data_size = le64_to_cpu(attr_b->nres.data_size);
2268 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2269 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2272 if (vbo > data_size) {
2273 /* Insert range after the file size is not allowed. */
2277 if ((vbo & mask) || (bytes & mask)) {
2278 /* Allow to insert only frame aligned ranges. */
2283 * valid_size <= data_size <= alloc_size
2284 * Check alloc_size for maximum possible.
2286 if (bytes > sbi->maxbytes_sparse - alloc_size)
2289 vcn = vbo >> sbi->cluster_bits;
2290 len = bytes >> sbi->cluster_bits;
2292 down_write(&ni->file.run_lock);
2294 if (!attr_b->non_res) {
2295 err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2296 data_size + bytes, NULL, false, NULL);
2299 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2309 if (!attr_b->non_res) {
2310 /* Still resident. */
2311 char *data = Add2Ptr(attr_b, attr_b->res.data_off);
2313 memmove(data + bytes, data, bytes);
2314 memset(data, 0, bytes);
2318 /* Resident files becomes nonresident. */
2319 data_size = le64_to_cpu(attr_b->nres.data_size);
2320 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2324 * Enumerate all attribute segments and shift start vcn.
2326 a_flags = attr_b->flags;
2327 svcn = le64_to_cpu(attr_b->nres.svcn);
2328 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2330 if (svcn <= vcn && vcn < evcn1) {
2339 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2346 svcn = le64_to_cpu(attr->nres.svcn);
2347 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2350 run_truncate(run, 0); /* clear cached values. */
2351 err = attr_load_runs(attr, ni, run, NULL);
2355 if (!run_insert_range(run, vcn, len)) {
2360 /* Try to pack in current record as much as possible. */
2361 err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2365 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2367 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2368 attr->type == ATTR_DATA && !attr->name_len) {
2369 le64_add_cpu(&attr->nres.svcn, len);
2370 le64_add_cpu(&attr->nres.evcn, len);
2372 le->vcn = attr->nres.svcn;
2373 ni->attr_list.dirty = true;
2378 if (next_svcn < evcn1 + len) {
2379 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2380 next_svcn, evcn1 + len - next_svcn,
2381 a_flags, NULL, NULL, NULL);
2384 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2392 /* ni_insert_nonresident failed. Try to undo. */
2393 goto undo_insert_range;
2398 * Update primary attribute segment.
2400 if (vbo <= ni->i_valid)
2401 ni->i_valid += bytes;
2403 attr_b->nres.data_size = le64_to_cpu(data_size + bytes);
2404 attr_b->nres.alloc_size = le64_to_cpu(alloc_size + bytes);
2406 /* ni->valid may be not equal valid_size (temporary). */
2407 if (ni->i_valid > data_size + bytes)
2408 attr_b->nres.valid_size = attr_b->nres.data_size;
2410 attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2414 ni->vfs_inode.i_size += bytes;
2415 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2416 mark_inode_dirty(&ni->vfs_inode);
2419 run_truncate(run, 0); /* clear cached values. */
2421 up_write(&ni->file.run_lock);
2426 _ntfs_bad_inode(&ni->vfs_inode);
2430 svcn = le64_to_cpu(attr_b->nres.svcn);
2431 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2433 if (svcn <= vcn && vcn < evcn1) {
2441 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2447 svcn = le64_to_cpu(attr->nres.svcn);
2448 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2451 if (attr_load_runs(attr, ni, run, NULL))
2454 if (!run_collapse_range(run, vcn, len))
2457 if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2460 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2461 attr->type == ATTR_DATA && !attr->name_len) {
2462 le64_sub_cpu(&attr->nres.svcn, len);
2463 le64_sub_cpu(&attr->nres.evcn, len);
2465 le->vcn = attr->nres.svcn;
2466 ni->attr_list.dirty = true;