1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
11 #include <linux/kernel.h>
18 const struct cpu_str NAME_MFT = {
19 4, 0, { '$', 'M', 'F', 'T' },
21 const struct cpu_str NAME_MIRROR = {
22 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 const struct cpu_str NAME_LOGFILE = {
25 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 const struct cpu_str NAME_VOLUME = {
28 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 const struct cpu_str NAME_ATTRDEF = {
31 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 const struct cpu_str NAME_ROOT = {
36 const struct cpu_str NAME_BITMAP = {
37 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 const struct cpu_str NAME_BOOT = {
40 5, 0, { '$', 'B', 'o', 'o', 't' },
42 const struct cpu_str NAME_BADCLUS = {
43 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 const struct cpu_str NAME_QUOTA = {
46 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 const struct cpu_str NAME_SECURE = {
49 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 const struct cpu_str NAME_UPCASE = {
52 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 const struct cpu_str NAME_EXTEND = {
55 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 const struct cpu_str NAME_OBJID = {
58 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 const struct cpu_str NAME_REPARSE = {
61 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 const struct cpu_str NAME_USNJRNL = {
64 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 const __le16 BAD_NAME[4] = {
67 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 const __le16 I30_NAME[4] = {
70 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 const __le16 SII_NAME[4] = {
73 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 const __le16 SDH_NAME[4] = {
76 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 const __le16 SDS_NAME[4] = {
79 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 const __le16 SO_NAME[2] = {
82 cpu_to_le16('$'), cpu_to_le16('O'),
84 const __le16 SQ_NAME[2] = {
85 cpu_to_le16('$'), cpu_to_le16('Q'),
87 const __le16 SR_NAME[2] = {
88 cpu_to_le16('$'), cpu_to_le16('R'),
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
104 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
106 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
110 u16 fo = le16_to_cpu(rhdr->fix_off);
111 u16 fn = le16_to_cpu(rhdr->fix_num);
113 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
114 fn * SECTOR_SIZE > bytes) {
118 /* Get fixup pointer. */
119 fixup = Add2Ptr(rhdr, fo);
121 if (*fixup >= 0x7FFF)
128 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
133 ptr += SECTOR_SIZE / sizeof(short);
139 * ntfs_fix_post_read - Remove fixups after reading from disk.
141 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
143 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
150 fo = le16_to_cpu(rhdr->fix_off);
151 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
152 : le16_to_cpu(rhdr->fix_num);
155 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
156 fn * SECTOR_SIZE > bytes) {
157 return -EINVAL; /* Native chkntfs returns ok! */
160 /* Get fixup pointer. */
161 fixup = Add2Ptr(rhdr, fo);
163 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
167 /* Test current word. */
168 if (*ptr != sample) {
169 /* Fixup does not match! Is it serious error? */
175 ptr += SECTOR_SIZE / sizeof(short);
182 * ntfs_extend_init - Load $Extend file.
184 int ntfs_extend_init(struct ntfs_sb_info *sbi)
187 struct super_block *sb = sbi->sb;
188 struct inode *inode, *inode2;
191 if (sbi->volume.major_ver < 3) {
192 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
196 ref.low = cpu_to_le32(MFT_REC_EXTEND);
198 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
199 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
201 err = PTR_ERR(inode);
202 ntfs_err(sb, "Failed to load $Extend.");
207 /* If ntfs_iget5() reads from disk it never returns bad inode. */
208 if (!S_ISDIR(inode->i_mode)) {
213 /* Try to find $ObjId */
214 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
215 if (inode2 && !IS_ERR(inode2)) {
216 if (is_bad_inode(inode2)) {
219 sbi->objid.ni = ntfs_i(inode2);
220 sbi->objid_no = inode2->i_ino;
224 /* Try to find $Quota */
225 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
226 if (inode2 && !IS_ERR(inode2)) {
227 sbi->quota_no = inode2->i_ino;
231 /* Try to find $Reparse */
232 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
233 if (inode2 && !IS_ERR(inode2)) {
234 sbi->reparse.ni = ntfs_i(inode2);
235 sbi->reparse_no = inode2->i_ino;
238 /* Try to find $UsnJrnl */
239 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
240 if (inode2 && !IS_ERR(inode2)) {
241 sbi->usn_jrnl_no = inode2->i_ino;
251 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
254 struct super_block *sb = sbi->sb;
255 bool initialized = false;
260 if (ni->vfs_inode.i_size >= 0x100000000ull) {
261 ntfs_err(sb, "\x24LogFile is too big");
266 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
268 ref.low = cpu_to_le32(MFT_REC_MFT);
270 ref.seq = cpu_to_le16(1);
272 inode = ntfs_iget5(sb, &ref, NULL);
278 /* Try to use MFT copy. */
279 u64 t64 = sbi->mft.lbo;
281 sbi->mft.lbo = sbi->mft.lbo2;
282 inode = ntfs_iget5(sb, &ref, NULL);
290 ntfs_err(sb, "Failed to load $MFT.");
294 sbi->mft.ni = ntfs_i(inode);
296 /* LogFile should not contains attribute list. */
297 err = ni_load_all_mi(sbi->mft.ni);
299 err = log_replay(ni, &initialized);
304 sync_blockdev(sb->s_bdev);
305 invalidate_bdev(sb->s_bdev);
307 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
312 if (sb_rdonly(sb) || !initialized)
315 /* Fill LogFile by '-1' if it is initialized. */
316 err = ntfs_bio_fill_1(sbi, &ni->file.run);
319 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
327 * Return: Current ATTR_DEF_ENTRY for given attribute type.
329 const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
332 int type_in = le32_to_cpu(type);
334 size_t max_idx = sbi->def_entries - 1;
336 while (min_idx <= max_idx) {
337 size_t i = min_idx + ((max_idx - min_idx) >> 1);
338 const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
339 int diff = le32_to_cpu(entry->type) - type_in;
354 * ntfs_look_for_free_space - Look for a free space in bitmap.
356 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
357 CLST *new_lcn, CLST *new_len,
358 enum ALLOCATE_OPT opt)
362 struct super_block *sb = sbi->sb;
363 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
364 struct wnd_bitmap *wnd = &sbi->used.bitmap;
366 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
367 if (opt & ALLOCATE_MFT) {
368 zlen = wnd_zone_len(wnd);
371 err = ntfs_refresh_zone(sbi);
375 zlen = wnd_zone_len(wnd);
379 ntfs_err(sbi->sb, "no free space to extend mft");
384 lcn = wnd_zone_bit(wnd);
385 alen = min_t(CLST, len, zlen);
387 wnd_zone_set(wnd, lcn + alen, zlen - alen);
389 err = wnd_set_used(wnd, lcn, alen);
397 * 'Cause cluster 0 is always used this value means that we should use
398 * cached value of 'next_free_lcn' to improve performance.
401 lcn = sbi->used.next_free_lcn;
403 if (lcn >= wnd->nbits)
406 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
410 /* Try to use clusters from MftZone. */
411 zlen = wnd_zone_len(wnd);
412 zeroes = wnd_zeroes(wnd);
414 /* Check too big request */
415 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
420 /* How many clusters to cat from zone. */
421 zlcn = wnd_zone_bit(wnd);
423 ztrim = clamp_val(len, zlen2, zlen);
424 new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
426 wnd_zone_set(wnd, zlcn, new_zlen);
428 /* Allocate continues clusters. */
429 alen = wnd_find(wnd, len, 0,
430 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
441 ntfs_unmap_meta(sb, alcn, alen);
443 /* Set hint for next requests. */
444 if (!(opt & ALLOCATE_MFT))
445 sbi->used.next_free_lcn = alcn + alen;
447 up_write(&wnd->rw_lock);
452 * ntfs_extend_mft - Allocate additional MFT records.
454 * sbi->mft.bitmap is locked for write.
457 * ntfs_look_free_mft ->
460 * ni_insert_nonresident ->
463 * ntfs_look_free_mft ->
466 * To avoid recursive always allocate space for two new MFT records
467 * see attrib.c: "at least two MFT to avoid recursive loop".
469 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
472 struct ntfs_inode *ni = sbi->mft.ni;
473 size_t new_mft_total;
474 u64 new_mft_bytes, new_bitmap_bytes;
476 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
478 new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
479 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
481 /* Step 1: Resize $MFT::DATA. */
482 down_write(&ni->file.run_lock);
483 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
484 new_mft_bytes, NULL, false, &attr);
487 up_write(&ni->file.run_lock);
491 attr->nres.valid_size = attr->nres.data_size;
492 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
495 /* Step 2: Resize $MFT::BITMAP. */
496 new_bitmap_bytes = bitmap_size(new_mft_total);
498 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
499 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
501 /* Refresh MFT Zone if necessary. */
502 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
504 ntfs_refresh_zone(sbi);
506 up_write(&sbi->used.bitmap.rw_lock);
507 up_write(&ni->file.run_lock);
512 err = wnd_extend(wnd, new_mft_total);
517 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
519 err = _ni_write_inode(&ni->vfs_inode, 0);
525 * ntfs_look_free_mft - Look for a free MFT record.
527 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
528 struct ntfs_inode *ni, struct mft_inode **mi)
531 size_t zbit, zlen, from, to, fr;
534 struct super_block *sb = sbi->sb;
535 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
538 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
539 MFT_REC_FREE - MFT_REC_RESERVED);
542 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
544 zlen = wnd_zone_len(wnd);
546 /* Always reserve space for MFT. */
549 zbit = wnd_zone_bit(wnd);
551 wnd_zone_set(wnd, zbit + 1, zlen - 1);
556 /* No MFT zone. Find the nearest to '0' free MFT. */
557 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
559 mft_total = wnd->nbits;
561 err = ntfs_extend_mft(sbi);
567 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
573 * Look for free record reserved area [11-16) ==
574 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
577 if (!sbi->mft.reserved_bitmap) {
578 /* Once per session create internal bitmap for 5 bits. */
579 sbi->mft.reserved_bitmap = 0xFF;
582 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
584 struct ntfs_inode *ni;
585 struct MFT_REC *mrec;
587 ref.low = cpu_to_le32(ir);
588 ref.seq = cpu_to_le16(ir);
590 i = ntfs_iget5(sb, &ref, NULL);
595 "Invalid reserved record %x",
599 if (is_bad_inode(i)) {
608 if (!is_rec_base(mrec))
611 if (mrec->hard_links)
617 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
618 NULL, 0, NULL, NULL))
621 __clear_bit(ir - MFT_REC_RESERVED,
622 &sbi->mft.reserved_bitmap);
626 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
627 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
628 MFT_REC_FREE, MFT_REC_RESERVED);
629 if (zbit >= MFT_REC_FREE) {
630 sbi->mft.next_reserved = MFT_REC_FREE;
635 sbi->mft.next_reserved = zbit;
638 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
639 if (zbit + zlen > wnd->nbits)
640 zlen = wnd->nbits - zbit;
642 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
645 /* [zbit, zbit + zlen) will be used for MFT itself. */
646 from = sbi->mft.used;
651 ntfs_clear_mft_tail(sbi, from, to);
662 wnd_zone_set(wnd, zbit, zlen);
666 /* The request to get record for general purpose. */
667 if (sbi->mft.next_free < MFT_REC_USER)
668 sbi->mft.next_free = MFT_REC_USER;
671 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
672 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
673 sbi->mft.next_free = sbi->mft.bitmap.nbits;
676 sbi->mft.next_free = *rno + 1;
680 err = ntfs_extend_mft(sbi);
686 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
691 /* We have found a record that are not reserved for next MFT. */
692 if (*rno >= MFT_REC_FREE)
693 wnd_set_used(wnd, *rno, 1);
694 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
695 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
699 up_write(&wnd->rw_lock);
705 * ntfs_mark_rec_free - Mark record as free.
707 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
709 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
711 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
712 if (rno >= wnd->nbits)
715 if (rno >= MFT_REC_FREE) {
716 if (!wnd_is_used(wnd, rno, 1))
717 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
719 wnd_set_free(wnd, rno, 1);
720 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
721 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
724 if (rno < wnd_zone_bit(wnd))
725 wnd_zone_set(wnd, rno, 1);
726 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
727 sbi->mft.next_free = rno;
730 up_write(&wnd->rw_lock);
734 * ntfs_clear_mft_tail - Format empty records [from, to).
736 * sbi->mft.bitmap is locked for write.
738 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
743 struct runs_tree *run;
744 struct ntfs_inode *ni;
749 rs = sbi->record_size;
753 down_read(&ni->file.run_lock);
754 vbo = (u64)from * rs;
755 for (; from < to; from++, vbo += rs) {
756 struct ntfs_buffers nb;
758 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
762 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
769 sbi->mft.used = from;
770 up_read(&ni->file.run_lock);
775 * ntfs_refresh_zone - Refresh MFT zone.
777 * sbi->used.bitmap is locked for rw.
778 * sbi->mft.bitmap is locked for write.
779 * sbi->mft.ni->file.run_lock for write.
781 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
783 CLST zone_limit, zone_max, lcn, vcn, len;
785 struct wnd_bitmap *wnd = &sbi->used.bitmap;
786 struct ntfs_inode *ni = sbi->mft.ni;
788 /* Do not change anything unless we have non empty MFT zone. */
789 if (wnd_zone_len(wnd))
793 * Compute the MFT zone at two steps.
794 * It would be nice if we are able to allocate 1/8 of
795 * total clusters for MFT but not more then 512 MB.
797 zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
798 zone_max = wnd->nbits >> 3;
799 if (zone_max > zone_limit)
800 zone_max = zone_limit;
802 vcn = bytes_to_cluster(sbi,
803 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
805 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
808 /* We should always find Last Lcn for MFT. */
809 if (lcn == SPARSE_LCN)
814 /* Try to allocate clusters after last MFT run. */
815 zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
817 ntfs_notice(sbi->sb, "MftZone: unavailable");
821 /* Truncate too large zone. */
822 wnd_zone_set(wnd, lcn_s, zlen);
828 * ntfs_update_mftmirr - Update $MFTMirr data.
830 int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
833 struct super_block *sb = sbi->sb;
834 u32 blocksize = sb->s_blocksize;
835 sector_t block1, block2;
838 if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
842 bytes = sbi->mft.recs_mirr << sbi->record_bits;
843 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
844 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
846 for (; bytes >= blocksize; bytes -= blocksize) {
847 struct buffer_head *bh1, *bh2;
849 bh1 = sb_bread(sb, block1++);
855 bh2 = sb_getblk(sb, block2++);
862 if (buffer_locked(bh2))
863 __wait_on_buffer(bh2);
866 memcpy(bh2->b_data, bh1->b_data, blocksize);
867 set_buffer_uptodate(bh2);
868 mark_buffer_dirty(bh2);
875 err = sync_dirty_buffer(bh2);
882 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
891 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
892 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
893 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
895 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
899 struct VOLUME_INFO *info;
900 struct mft_inode *mi;
901 struct ntfs_inode *ni;
904 * Do not change state if fs was real_dirty.
905 * Do not change state if fs already dirty(clear).
906 * Do not change any thing if mounted read only.
908 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
911 /* Check cached value. */
912 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
913 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
920 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
922 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
928 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
935 case NTFS_DIRTY_ERROR:
936 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
937 sbi->volume.real_dirty = true;
939 case NTFS_DIRTY_DIRTY:
940 info->flags |= VOLUME_FLAG_DIRTY;
942 case NTFS_DIRTY_CLEAR:
943 info->flags &= ~VOLUME_FLAG_DIRTY;
946 /* Cache current volume flags. */
947 sbi->volume.flags = info->flags;
956 mark_inode_dirty(&ni->vfs_inode);
957 /* verify(!ntfs_update_mftmirr()); */
960 * If we used wait=1, sync_inode_metadata waits for the io for the
961 * inode to finish. It hangs when media is removed.
962 * So wait=0 is sent down to sync_inode_metadata
963 * and filemap_fdatawrite is used for the data blocks.
965 err = sync_inode_metadata(&ni->vfs_inode, 0);
967 err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
973 * security_hash - Calculates a hash of security descriptor.
975 static inline __le32 security_hash(const void *sd, size_t bytes)
978 const __le32 *ptr = sd;
982 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
983 return cpu_to_le32(hash);
986 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
988 struct block_device *bdev = sb->s_bdev;
989 u32 blocksize = sb->s_blocksize;
990 u64 block = lbo >> sb->s_blocksize_bits;
991 u32 off = lbo & (blocksize - 1);
992 u32 op = blocksize - off;
994 for (; bytes; block += 1, off = 0, op = blocksize) {
995 struct buffer_head *bh = __bread(bdev, block, blocksize);
1003 memcpy(buffer, bh->b_data + off, op);
1008 buffer = Add2Ptr(buffer, op);
1014 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1015 const void *buf, int wait)
1017 u32 blocksize = sb->s_blocksize;
1018 struct block_device *bdev = sb->s_bdev;
1019 sector_t block = lbo >> sb->s_blocksize_bits;
1020 u32 off = lbo & (blocksize - 1);
1021 u32 op = blocksize - off;
1022 struct buffer_head *bh;
1024 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1027 for (; bytes; block += 1, off = 0, op = blocksize) {
1031 if (op < blocksize) {
1032 bh = __bread(bdev, block, blocksize);
1034 ntfs_err(sb, "failed to read block %llx",
1039 bh = __getblk(bdev, block, blocksize);
1044 if (buffer_locked(bh))
1045 __wait_on_buffer(bh);
1049 memcpy(bh->b_data + off, buf, op);
1050 buf = Add2Ptr(buf, op);
1052 memset(bh->b_data + off, -1, op);
1055 set_buffer_uptodate(bh);
1056 mark_buffer_dirty(bh);
1060 int err = sync_dirty_buffer(bh);
1065 "failed to sync buffer at block %llx, error %d",
1079 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1080 u64 vbo, const void *buf, size_t bytes, int sync)
1082 struct super_block *sb = sbi->sb;
1083 u8 cluster_bits = sbi->cluster_bits;
1084 u32 off = vbo & sbi->cluster_mask;
1085 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1089 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1092 if (lcn == SPARSE_LCN)
1095 lbo = ((u64)lcn << cluster_bits) + off;
1096 len = ((u64)clen << cluster_bits) - off;
1099 u32 op = min_t(u64, len, bytes);
1100 int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1109 vcn_next = vcn + clen;
1110 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1114 if (lcn == SPARSE_LCN)
1118 buf = Add2Ptr(buf, op);
1120 lbo = ((u64)lcn << cluster_bits);
1121 len = ((u64)clen << cluster_bits);
1127 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1128 const struct runs_tree *run, u64 vbo)
1130 struct super_block *sb = sbi->sb;
1131 u8 cluster_bits = sbi->cluster_bits;
1135 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1136 return ERR_PTR(-ENOENT);
1138 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1140 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1143 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1144 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1147 struct super_block *sb = sbi->sb;
1148 u32 blocksize = sb->s_blocksize;
1149 u8 cluster_bits = sbi->cluster_bits;
1150 u32 off = vbo & sbi->cluster_mask;
1152 CLST vcn_next, vcn = vbo >> cluster_bits;
1156 struct buffer_head *bh;
1159 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1160 if (vbo > MFT_REC_VOL * sbi->record_size) {
1165 /* Use absolute boot's 'MFTCluster' to read record. */
1166 lbo = vbo + sbi->mft.lbo;
1167 len = sbi->record_size;
1168 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1172 if (lcn == SPARSE_LCN) {
1177 lbo = ((u64)lcn << cluster_bits) + off;
1178 len = ((u64)clen << cluster_bits) - off;
1181 off = lbo & (blocksize - 1);
1188 u32 len32 = len >= bytes ? bytes : len;
1189 sector_t block = lbo >> sb->s_blocksize_bits;
1192 u32 op = blocksize - off;
1197 bh = ntfs_bread(sb, block);
1204 memcpy(buf, bh->b_data + off, op);
1205 buf = Add2Ptr(buf, op);
1210 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1227 vcn_next = vcn + clen;
1228 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1234 if (lcn == SPARSE_LCN) {
1239 lbo = ((u64)lcn << cluster_bits);
1240 len = ((u64)clen << cluster_bits);
1248 put_bh(nb->bh[--nbh]);
1259 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1261 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1262 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1263 struct ntfs_buffers *nb)
1265 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1269 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1272 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1273 u32 bytes, struct ntfs_buffers *nb)
1276 struct super_block *sb = sbi->sb;
1277 u32 blocksize = sb->s_blocksize;
1278 u8 cluster_bits = sbi->cluster_bits;
1279 CLST vcn_next, vcn = vbo >> cluster_bits;
1288 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1293 off = vbo & sbi->cluster_mask;
1294 lbo = ((u64)lcn << cluster_bits) + off;
1295 len = ((u64)clen << cluster_bits) - off;
1297 nb->off = off = lbo & (blocksize - 1);
1300 u32 len32 = min_t(u64, len, bytes);
1301 sector_t block = lbo >> sb->s_blocksize_bits;
1305 struct buffer_head *bh;
1307 if (nbh >= ARRAY_SIZE(nb->bh)) {
1312 op = blocksize - off;
1316 if (op == blocksize) {
1317 bh = sb_getblk(sb, block);
1322 if (buffer_locked(bh))
1323 __wait_on_buffer(bh);
1324 set_buffer_uptodate(bh);
1326 bh = ntfs_bread(sb, block);
1345 vcn_next = vcn + clen;
1346 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1352 lbo = ((u64)lcn << cluster_bits);
1353 len = ((u64)clen << cluster_bits);
1358 put_bh(nb->bh[--nbh]);
1367 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1368 struct ntfs_buffers *nb, int sync)
1371 struct super_block *sb = sbi->sb;
1372 u32 block_size = sb->s_blocksize;
1373 u32 bytes = nb->bytes;
1375 u16 fo = le16_to_cpu(rhdr->fix_off);
1376 u16 fn = le16_to_cpu(rhdr->fix_num);
1381 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1382 fn * SECTOR_SIZE > bytes) {
1386 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1387 u32 op = block_size - off;
1389 struct buffer_head *bh = nb->bh[idx];
1390 __le16 *ptr, *end_data;
1395 if (buffer_locked(bh))
1396 __wait_on_buffer(bh);
1398 lock_buffer(nb->bh[idx]);
1400 bh_data = bh->b_data + off;
1401 end_data = Add2Ptr(bh_data, op);
1402 memcpy(bh_data, rhdr, op);
1407 fixup = Add2Ptr(bh_data, fo);
1409 t16 = le16_to_cpu(sample);
1410 if (t16 >= 0x7FFF) {
1411 sample = *fixup = cpu_to_le16(1);
1413 sample = cpu_to_le16(t16 + 1);
1417 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1420 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1425 ptr += SECTOR_SIZE / sizeof(short);
1426 } while (ptr < end_data);
1428 set_buffer_uptodate(bh);
1429 mark_buffer_dirty(bh);
1433 int err2 = sync_dirty_buffer(bh);
1440 rhdr = Add2Ptr(rhdr, op);
1447 * ntfs_bio_pages - Read/write pages from/to disk.
1449 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1450 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1454 struct bio *new, *bio = NULL;
1455 struct super_block *sb = sbi->sb;
1456 struct block_device *bdev = sb->s_bdev;
1458 u8 cluster_bits = sbi->cluster_bits;
1459 CLST lcn, clen, vcn, vcn_next;
1460 u32 add, off, page_idx;
1463 struct blk_plug plug;
1468 blk_start_plug(&plug);
1470 /* Align vbo and bytes to be 512 bytes aligned. */
1471 lbo = (vbo + bytes + 511) & ~511ull;
1472 vbo = vbo & ~511ull;
1475 vcn = vbo >> cluster_bits;
1476 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1480 off = vbo & sbi->cluster_mask;
1485 lbo = ((u64)lcn << cluster_bits) + off;
1486 len = ((u64)clen << cluster_bits) - off;
1488 new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1490 bio_chain(bio, new);
1494 bio->bi_iter.bi_sector = lbo >> 9;
1497 off = vbo & (PAGE_SIZE - 1);
1498 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1500 if (bio_add_page(bio, page, add, off) < add)
1508 if (add + off == PAGE_SIZE) {
1510 if (WARN_ON(page_idx >= nr_pages)) {
1514 page = pages[page_idx];
1523 vcn_next = vcn + clen;
1524 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1534 err = submit_bio_wait(bio);
1537 blk_finish_plug(&plug);
1543 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1545 * Fill on-disk logfile range by (-1)
1546 * this means empty logfile.
1548 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1551 struct super_block *sb = sbi->sb;
1552 struct block_device *bdev = sb->s_bdev;
1553 u8 cluster_bits = sbi->cluster_bits;
1554 struct bio *new, *bio = NULL;
1560 struct blk_plug plug;
1562 fill = alloc_page(GFP_KERNEL);
1566 kaddr = kmap_atomic(fill);
1567 memset(kaddr, -1, PAGE_SIZE);
1568 kunmap_atomic(kaddr);
1569 flush_dcache_page(fill);
1572 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1578 * TODO: Try blkdev_issue_write_same.
1580 blk_start_plug(&plug);
1582 lbo = (u64)lcn << cluster_bits;
1583 len = (u64)clen << cluster_bits;
1585 new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1587 bio_chain(bio, new);
1591 bio->bi_iter.bi_sector = lbo >> 9;
1594 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1596 if (bio_add_page(bio, fill, add, 0) < add)
1604 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1607 err = submit_bio_wait(bio);
1610 blk_finish_plug(&plug);
1618 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1619 u64 vbo, u64 *lbo, u64 *bytes)
1623 u8 cluster_bits = sbi->cluster_bits;
1625 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1628 off = vbo & sbi->cluster_mask;
1629 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1630 *bytes = ((u64)len << cluster_bits) - off;
1635 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1638 struct super_block *sb = sbi->sb;
1639 struct inode *inode = new_inode(sb);
1640 struct ntfs_inode *ni;
1643 return ERR_PTR(-ENOMEM);
1647 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1653 if (insert_inode_locked(inode) < 0) {
1667 * O:BAG:BAD:(A;OICI;FA;;;WD)
1668 * Owner S-1-5-32-544 (Administrators)
1669 * Group S-1-5-32-544 (Administrators)
1670 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1672 const u8 s_default_security[] __aligned(8) = {
1673 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1674 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1675 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1676 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1677 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1678 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1679 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1682 static_assert(sizeof(s_default_security) == 0x50);
1684 static inline u32 sid_length(const struct SID *sid)
1686 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1692 * Thanks Mark Harmstone for idea.
1694 static bool is_acl_valid(const struct ACL *acl, u32 len)
1696 const struct ACE_HEADER *ace;
1698 u16 ace_count, ace_size;
1700 if (acl->AclRevision != ACL_REVISION &&
1701 acl->AclRevision != ACL_REVISION_DS) {
1703 * This value should be ACL_REVISION, unless the ACL contains an
1704 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1705 * All ACEs in an ACL must be at the same revision level.
1713 if (le16_to_cpu(acl->AclSize) > len)
1719 len -= sizeof(struct ACL);
1720 ace = (struct ACE_HEADER *)&acl[1];
1721 ace_count = le16_to_cpu(acl->AceCount);
1723 for (i = 0; i < ace_count; i++) {
1724 if (len < sizeof(struct ACE_HEADER))
1727 ace_size = le16_to_cpu(ace->AceSize);
1732 ace = Add2Ptr(ace, ace_size);
1738 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1740 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1742 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1745 if (sd->Revision != 1)
1751 if (!(sd->Control & SE_SELF_RELATIVE))
1754 sd_owner = le32_to_cpu(sd->Owner);
1756 const struct SID *owner = Add2Ptr(sd, sd_owner);
1758 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1761 if (owner->Revision != 1)
1764 if (sd_owner + sid_length(owner) > len)
1768 sd_group = le32_to_cpu(sd->Group);
1770 const struct SID *group = Add2Ptr(sd, sd_group);
1772 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1775 if (group->Revision != 1)
1778 if (sd_group + sid_length(group) > len)
1782 sd_sacl = le32_to_cpu(sd->Sacl);
1784 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1786 if (sd_sacl + sizeof(struct ACL) > len)
1789 if (!is_acl_valid(sacl, len - sd_sacl))
1793 sd_dacl = le32_to_cpu(sd->Dacl);
1795 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1797 if (sd_dacl + sizeof(struct ACL) > len)
1800 if (!is_acl_valid(dacl, len - sd_dacl))
1808 * ntfs_security_init - Load and parse $Secure.
1810 int ntfs_security_init(struct ntfs_sb_info *sbi)
1813 struct super_block *sb = sbi->sb;
1814 struct inode *inode;
1815 struct ntfs_inode *ni;
1817 struct ATTRIB *attr;
1818 struct ATTR_LIST_ENTRY *le;
1822 struct NTFS_DE_SII *sii_e;
1823 struct ntfs_fnd *fnd_sii = NULL;
1824 const struct INDEX_ROOT *root_sii;
1825 const struct INDEX_ROOT *root_sdh;
1826 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1827 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1829 ref.low = cpu_to_le32(MFT_REC_SECURE);
1831 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1833 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1834 if (IS_ERR(inode)) {
1835 err = PTR_ERR(inode);
1836 ntfs_err(sb, "Failed to load $Secure.");
1845 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1846 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1852 root_sdh = resident_data(attr);
1853 if (root_sdh->type != ATTR_ZERO ||
1854 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
1859 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1863 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1864 ARRAY_SIZE(SII_NAME), NULL, NULL);
1870 root_sii = resident_data(attr);
1871 if (root_sii->type != ATTR_ZERO ||
1872 root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
1877 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1881 fnd_sii = fnd_get();
1887 sds_size = inode->i_size;
1889 /* Find the last valid Id. */
1890 sbi->security.next_id = SECURITY_ID_FIRST;
1891 /* Always write new security at the end of bucket. */
1892 sbi->security.next_off =
1893 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1901 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1905 sii_e = (struct NTFS_DE_SII *)ne;
1906 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1909 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1910 if (next_id >= sbi->security.next_id)
1911 sbi->security.next_id = next_id;
1914 sbi->security.ni = ni;
1924 * ntfs_get_security_by_id - Read security descriptor by id.
1926 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1927 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1932 struct ntfs_inode *ni = sbi->security.ni;
1933 struct ntfs_index *indx = &sbi->security.index_sii;
1935 struct NTFS_DE_SII *sii_e;
1936 struct ntfs_fnd *fnd_sii;
1937 struct SECURITY_HDR d_security;
1938 const struct INDEX_ROOT *root_sii;
1943 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1945 fnd_sii = fnd_get();
1951 root_sii = indx_get_root(indx, ni, NULL, NULL);
1957 /* Try to find this SECURITY descriptor in SII indexes. */
1958 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1959 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1966 t32 = le32_to_cpu(sii_e->sec_hdr.size);
1967 if (t32 < SIZEOF_SECURITY_HDR) {
1972 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
1973 /* Looks like too big security. 0x10000 - is arbitrary big number. */
1978 *size = t32 - SIZEOF_SECURITY_HDR;
1980 p = kmalloc(*size, GFP_NOFS);
1986 err = ntfs_read_run_nb(sbi, &ni->file.run,
1987 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
1988 sizeof(d_security), NULL);
1992 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
1997 err = ntfs_read_run_nb(sbi, &ni->file.run,
1998 le64_to_cpu(sii_e->sec_hdr.off) +
1999 SIZEOF_SECURITY_HDR,
2016 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2018 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2019 * and it contains a mirror copy of each security descriptor. When writing
2020 * to a security descriptor at location X, another copy will be written at
2021 * location (X+256K).
2022 * When writing a security descriptor that will cross the 256K boundary,
2023 * the pointer will be advanced by 256K to skip
2024 * over the mirror portion.
2026 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2027 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2028 u32 size_sd, __le32 *security_id, bool *inserted)
2031 struct ntfs_inode *ni = sbi->security.ni;
2032 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2033 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2034 struct NTFS_DE_SDH *e;
2035 struct NTFS_DE_SDH sdh_e;
2036 struct NTFS_DE_SII sii_e;
2037 struct SECURITY_HDR *d_security;
2038 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2039 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2040 struct SECURITY_KEY hash_key;
2041 struct ntfs_fnd *fnd_sdh = NULL;
2042 const struct INDEX_ROOT *root_sdh;
2043 const struct INDEX_ROOT *root_sii;
2044 u64 mirr_off, new_sds_size;
2047 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2048 SecurityDescriptorsBlockSize);
2050 hash_key.hash = security_hash(sd, size_sd);
2051 hash_key.sec_id = SECURITY_ID_INVALID;
2055 *security_id = SECURITY_ID_INVALID;
2057 /* Allocate a temporal buffer. */
2058 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2062 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2064 fnd_sdh = fnd_get();
2070 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2076 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2083 * Check if such security already exists.
2084 * Use "SDH" and hash -> to get the offset in "SDS".
2086 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2087 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2093 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2094 err = ntfs_read_run_nb(sbi, &ni->file.run,
2095 le64_to_cpu(e->sec_hdr.off),
2096 d_security, new_sec_size, NULL);
2100 if (le32_to_cpu(d_security->size) == new_sec_size &&
2101 d_security->key.hash == hash_key.hash &&
2102 !memcmp(d_security + 1, sd, size_sd)) {
2103 *security_id = d_security->key.sec_id;
2104 /* Such security already exists. */
2110 err = indx_find_sort(indx_sdh, ni, root_sdh,
2111 (struct NTFS_DE **)&e, fnd_sdh);
2115 if (!e || e->key.hash != hash_key.hash)
2119 /* Zero unused space. */
2120 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2121 left = SecurityDescriptorsBlockSize - next;
2123 /* Zero gap until SecurityDescriptorsBlockSize. */
2124 if (left < new_sec_size) {
2125 /* Zero "left" bytes from sbi->security.next_off. */
2126 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2129 /* Zero tail of previous security. */
2130 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2134 * 0x40438 == ni->vfs_inode.i_size
2135 * 0x00440 == sbi->security.next_off
2136 * need to zero [0x438-0x440)
2137 * if (next > used) {
2138 * u32 tozero = next - used;
2139 * zero "tozero" bytes from sbi->security.next_off - tozero
2142 /* Format new security descriptor. */
2143 d_security->key.hash = hash_key.hash;
2144 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2145 d_security->off = cpu_to_le64(sbi->security.next_off);
2146 d_security->size = cpu_to_le32(new_sec_size);
2147 memcpy(d_security + 1, sd, size_sd);
2149 /* Write main SDS bucket. */
2150 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2151 d_security, aligned_sec_size, 0);
2156 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2157 new_sds_size = mirr_off + aligned_sec_size;
2159 if (new_sds_size > ni->vfs_inode.i_size) {
2160 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2161 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2162 new_sds_size, &new_sds_size, false, NULL);
2167 /* Write copy SDS bucket. */
2168 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2169 aligned_sec_size, 0);
2173 /* Fill SII entry. */
2174 sii_e.de.view.data_off =
2175 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2176 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2177 sii_e.de.view.res = 0;
2178 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2179 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2182 sii_e.sec_id = d_security->key.sec_id;
2183 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2185 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2189 /* Fill SDH entry. */
2190 sdh_e.de.view.data_off =
2191 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2192 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2193 sdh_e.de.view.res = 0;
2194 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2195 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2198 sdh_e.key.hash = d_security->key.hash;
2199 sdh_e.key.sec_id = d_security->key.sec_id;
2200 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2201 sdh_e.magic[0] = cpu_to_le16('I');
2202 sdh_e.magic[1] = cpu_to_le16('I');
2205 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2210 *security_id = d_security->key.sec_id;
2214 /* Update Id and offset for next descriptor. */
2215 sbi->security.next_id += 1;
2216 sbi->security.next_off += aligned_sec_size;
2220 mark_inode_dirty(&ni->vfs_inode);
2228 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2230 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2233 struct ntfs_inode *ni = sbi->reparse.ni;
2234 struct ntfs_index *indx = &sbi->reparse.index_r;
2235 struct ATTRIB *attr;
2236 struct ATTR_LIST_ENTRY *le;
2237 const struct INDEX_ROOT *root_r;
2243 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2244 ARRAY_SIZE(SR_NAME), NULL, NULL);
2250 root_r = resident_data(attr);
2251 if (root_r->type != ATTR_ZERO ||
2252 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2257 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2266 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2268 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2271 struct ntfs_inode *ni = sbi->objid.ni;
2272 struct ntfs_index *indx = &sbi->objid.index_o;
2273 struct ATTRIB *attr;
2274 struct ATTR_LIST_ENTRY *le;
2275 const struct INDEX_ROOT *root;
2281 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2282 ARRAY_SIZE(SO_NAME), NULL, NULL);
2288 root = resident_data(attr);
2289 if (root->type != ATTR_ZERO ||
2290 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2295 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2303 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2306 struct ntfs_inode *ni = sbi->objid.ni;
2307 struct ntfs_index *indx = &sbi->objid.index_o;
2312 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2314 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2316 mark_inode_dirty(&ni->vfs_inode);
2322 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2323 const struct MFT_REF *ref)
2326 struct ntfs_inode *ni = sbi->reparse.ni;
2327 struct ntfs_index *indx = &sbi->reparse.index_r;
2328 struct NTFS_DE_R re;
2333 memset(&re, 0, sizeof(re));
2335 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2336 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2337 re.de.key_size = cpu_to_le16(sizeof(re.key));
2339 re.key.ReparseTag = rtag;
2340 memcpy(&re.key.ref, ref, sizeof(*ref));
2342 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2344 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2346 mark_inode_dirty(&ni->vfs_inode);
2352 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2353 const struct MFT_REF *ref)
2356 struct ntfs_inode *ni = sbi->reparse.ni;
2357 struct ntfs_index *indx = &sbi->reparse.index_r;
2358 struct ntfs_fnd *fnd = NULL;
2359 struct REPARSE_KEY rkey;
2360 struct NTFS_DE_R *re;
2361 struct INDEX_ROOT *root_r;
2366 rkey.ReparseTag = rtag;
2369 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2372 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2382 root_r = indx_get_root(indx, ni, NULL, NULL);
2388 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2389 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2390 (struct NTFS_DE **)&re, fnd);
2394 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2395 /* Impossible. Looks like volume corrupt? */
2399 memcpy(&rkey, &re->key, sizeof(rkey));
2404 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2412 mark_inode_dirty(&ni->vfs_inode);
2418 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2421 ntfs_unmap_meta(sbi->sb, lcn, len);
2422 ntfs_discard(sbi, lcn, len);
2425 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2428 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2430 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2431 if (!wnd_is_used(wnd, lcn, len)) {
2432 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2436 for (i = lcn; i < end; i++) {
2437 if (wnd_is_used(wnd, i, 1)) {
2448 ntfs_unmap_and_discard(sbi, lcn, len);
2450 wnd_set_free(wnd, lcn, len);
2459 ntfs_unmap_and_discard(sbi, lcn, len);
2460 wnd_set_free(wnd, lcn, len);
2463 up_write(&wnd->rw_lock);
2467 * run_deallocate - Deallocate clusters.
2469 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2474 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2475 if (lcn == SPARSE_LCN)
2478 mark_as_free_ex(sbi, lcn, len, trim);