1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
17 const struct cpu_str NAME_MFT = {
18 4, 0, { '$', 'M', 'F', 'T' },
20 const struct cpu_str NAME_MIRROR = {
21 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
23 const struct cpu_str NAME_LOGFILE = {
24 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
26 const struct cpu_str NAME_VOLUME = {
27 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
29 const struct cpu_str NAME_ATTRDEF = {
30 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
32 const struct cpu_str NAME_ROOT = {
35 const struct cpu_str NAME_BITMAP = {
36 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
38 const struct cpu_str NAME_BOOT = {
39 5, 0, { '$', 'B', 'o', 'o', 't' },
41 const struct cpu_str NAME_BADCLUS = {
42 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
44 const struct cpu_str NAME_QUOTA = {
45 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
47 const struct cpu_str NAME_SECURE = {
48 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
50 const struct cpu_str NAME_UPCASE = {
51 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
53 const struct cpu_str NAME_EXTEND = {
54 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
56 const struct cpu_str NAME_OBJID = {
57 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
59 const struct cpu_str NAME_REPARSE = {
60 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
62 const struct cpu_str NAME_USNJRNL = {
63 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
65 const __le16 BAD_NAME[4] = {
66 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
68 const __le16 I30_NAME[4] = {
69 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
71 const __le16 SII_NAME[4] = {
72 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
74 const __le16 SDH_NAME[4] = {
75 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
77 const __le16 SDS_NAME[4] = {
78 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
80 const __le16 SO_NAME[2] = {
81 cpu_to_le16('$'), cpu_to_le16('O'),
83 const __le16 SQ_NAME[2] = {
84 cpu_to_le16('$'), cpu_to_le16('Q'),
86 const __le16 SR_NAME[2] = {
87 cpu_to_le16('$'), cpu_to_le16('R'),
90 #ifdef CONFIG_NTFS3_LZX_XPRESS
91 const __le16 WOF_NAME[17] = {
92 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
93 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
94 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
95 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
103 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
105 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
109 u16 fo = le16_to_cpu(rhdr->fix_off);
110 u16 fn = le16_to_cpu(rhdr->fix_num);
112 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
113 fn * SECTOR_SIZE > bytes) {
117 /* Get fixup pointer. */
118 fixup = Add2Ptr(rhdr, fo);
120 if (*fixup >= 0x7FFF)
127 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
132 ptr += SECTOR_SIZE / sizeof(short);
138 * ntfs_fix_post_read - Remove fixups after reading from disk.
140 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
142 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
149 fo = le16_to_cpu(rhdr->fix_off);
150 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
151 : le16_to_cpu(rhdr->fix_num);
154 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
155 fn * SECTOR_SIZE > bytes) {
156 return -EINVAL; /* Native chkntfs returns ok! */
159 /* Get fixup pointer. */
160 fixup = Add2Ptr(rhdr, fo);
162 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
166 /* Test current word. */
167 if (*ptr != sample) {
168 /* Fixup does not match! Is it serious error? */
174 ptr += SECTOR_SIZE / sizeof(short);
181 * ntfs_extend_init - Load $Extend file.
183 int ntfs_extend_init(struct ntfs_sb_info *sbi)
186 struct super_block *sb = sbi->sb;
187 struct inode *inode, *inode2;
190 if (sbi->volume.major_ver < 3) {
191 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
195 ref.low = cpu_to_le32(MFT_REC_EXTEND);
197 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
198 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
200 err = PTR_ERR(inode);
201 ntfs_err(sb, "Failed to load $Extend.");
206 /* If ntfs_iget5() reads from disk it never returns bad inode. */
207 if (!S_ISDIR(inode->i_mode)) {
212 /* Try to find $ObjId */
213 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
214 if (inode2 && !IS_ERR(inode2)) {
215 if (is_bad_inode(inode2)) {
218 sbi->objid.ni = ntfs_i(inode2);
219 sbi->objid_no = inode2->i_ino;
223 /* Try to find $Quota */
224 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
225 if (inode2 && !IS_ERR(inode2)) {
226 sbi->quota_no = inode2->i_ino;
230 /* Try to find $Reparse */
231 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
232 if (inode2 && !IS_ERR(inode2)) {
233 sbi->reparse.ni = ntfs_i(inode2);
234 sbi->reparse_no = inode2->i_ino;
237 /* Try to find $UsnJrnl */
238 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
239 if (inode2 && !IS_ERR(inode2)) {
240 sbi->usn_jrnl_no = inode2->i_ino;
250 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
253 struct super_block *sb = sbi->sb;
254 bool initialized = false;
259 if (ni->vfs_inode.i_size >= 0x100000000ull) {
260 ntfs_err(sb, "\x24LogFile is too big");
265 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
267 ref.low = cpu_to_le32(MFT_REC_MFT);
269 ref.seq = cpu_to_le16(1);
271 inode = ntfs_iget5(sb, &ref, NULL);
277 /* Try to use MFT copy. */
278 u64 t64 = sbi->mft.lbo;
280 sbi->mft.lbo = sbi->mft.lbo2;
281 inode = ntfs_iget5(sb, &ref, NULL);
289 ntfs_err(sb, "Failed to load $MFT.");
293 sbi->mft.ni = ntfs_i(inode);
295 /* LogFile should not contains attribute list. */
296 err = ni_load_all_mi(sbi->mft.ni);
298 err = log_replay(ni, &initialized);
303 sync_blockdev(sb->s_bdev);
304 invalidate_bdev(sb->s_bdev);
306 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
311 if (sb_rdonly(sb) || !initialized)
314 /* Fill LogFile by '-1' if it is initialized. */
315 err = ntfs_bio_fill_1(sbi, &ni->file.run);
318 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
326 * Return: Current ATTR_DEF_ENTRY for given attribute type.
328 const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
331 int type_in = le32_to_cpu(type);
333 size_t max_idx = sbi->def_entries - 1;
335 while (min_idx <= max_idx) {
336 size_t i = min_idx + ((max_idx - min_idx) >> 1);
337 const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
338 int diff = le32_to_cpu(entry->type) - type_in;
353 * ntfs_look_for_free_space - Look for a free space in bitmap.
355 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
356 CLST *new_lcn, CLST *new_len,
357 enum ALLOCATE_OPT opt)
361 struct super_block *sb = sbi->sb;
362 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
363 struct wnd_bitmap *wnd = &sbi->used.bitmap;
365 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
366 if (opt & ALLOCATE_MFT) {
367 zlen = wnd_zone_len(wnd);
370 err = ntfs_refresh_zone(sbi);
374 zlen = wnd_zone_len(wnd);
378 ntfs_err(sbi->sb, "no free space to extend mft");
383 lcn = wnd_zone_bit(wnd);
384 alen = zlen > len ? len : zlen;
386 wnd_zone_set(wnd, lcn + alen, zlen - alen);
388 err = wnd_set_used(wnd, lcn, alen);
396 * 'Cause cluster 0 is always used this value means that we should use
397 * cached value of 'next_free_lcn' to improve performance.
400 lcn = sbi->used.next_free_lcn;
402 if (lcn >= wnd->nbits)
405 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
409 /* Try to use clusters from MftZone. */
410 zlen = wnd_zone_len(wnd);
411 zeroes = wnd_zeroes(wnd);
413 /* Check too big request */
414 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
419 /* How many clusters to cat from zone. */
420 zlcn = wnd_zone_bit(wnd);
422 ztrim = len > zlen ? zlen : (len > zlen2 ? len : zlen2);
423 new_zlen = zlen - ztrim;
425 if (new_zlen < NTFS_MIN_MFT_ZONE) {
426 new_zlen = NTFS_MIN_MFT_ZONE;
431 wnd_zone_set(wnd, zlcn, new_zlen);
433 /* Allocate continues clusters. */
434 alen = wnd_find(wnd, len, 0,
435 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
446 ntfs_unmap_meta(sb, alcn, alen);
448 /* Set hint for next requests. */
449 if (!(opt & ALLOCATE_MFT))
450 sbi->used.next_free_lcn = alcn + alen;
452 up_write(&wnd->rw_lock);
457 * ntfs_extend_mft - Allocate additional MFT records.
459 * sbi->mft.bitmap is locked for write.
462 * ntfs_look_free_mft ->
465 * ni_insert_nonresident ->
468 * ntfs_look_free_mft ->
471 * To avoid recursive always allocate space for two new MFT records
472 * see attrib.c: "at least two MFT to avoid recursive loop".
474 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
477 struct ntfs_inode *ni = sbi->mft.ni;
478 size_t new_mft_total;
479 u64 new_mft_bytes, new_bitmap_bytes;
481 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
483 new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
484 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
486 /* Step 1: Resize $MFT::DATA. */
487 down_write(&ni->file.run_lock);
488 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
489 new_mft_bytes, NULL, false, &attr);
492 up_write(&ni->file.run_lock);
496 attr->nres.valid_size = attr->nres.data_size;
497 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
500 /* Step 2: Resize $MFT::BITMAP. */
501 new_bitmap_bytes = bitmap_size(new_mft_total);
503 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
504 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
506 /* Refresh MFT Zone if necessary. */
507 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
509 ntfs_refresh_zone(sbi);
511 up_write(&sbi->used.bitmap.rw_lock);
512 up_write(&ni->file.run_lock);
517 err = wnd_extend(wnd, new_mft_total);
522 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
524 err = _ni_write_inode(&ni->vfs_inode, 0);
530 * ntfs_look_free_mft - Look for a free MFT record.
532 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
533 struct ntfs_inode *ni, struct mft_inode **mi)
536 size_t zbit, zlen, from, to, fr;
539 struct super_block *sb = sbi->sb;
540 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
543 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
544 MFT_REC_FREE - MFT_REC_RESERVED);
547 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
549 zlen = wnd_zone_len(wnd);
551 /* Always reserve space for MFT. */
554 zbit = wnd_zone_bit(wnd);
556 wnd_zone_set(wnd, zbit + 1, zlen - 1);
561 /* No MFT zone. Find the nearest to '0' free MFT. */
562 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
564 mft_total = wnd->nbits;
566 err = ntfs_extend_mft(sbi);
572 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
578 * Look for free record reserved area [11-16) ==
579 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
582 if (!sbi->mft.reserved_bitmap) {
583 /* Once per session create internal bitmap for 5 bits. */
584 sbi->mft.reserved_bitmap = 0xFF;
587 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
589 struct ntfs_inode *ni;
590 struct MFT_REC *mrec;
592 ref.low = cpu_to_le32(ir);
593 ref.seq = cpu_to_le16(ir);
595 i = ntfs_iget5(sb, &ref, NULL);
600 "Invalid reserved record %x",
604 if (is_bad_inode(i)) {
613 if (!is_rec_base(mrec))
616 if (mrec->hard_links)
622 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
623 NULL, 0, NULL, NULL))
626 __clear_bit(ir - MFT_REC_RESERVED,
627 &sbi->mft.reserved_bitmap);
631 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
632 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
633 MFT_REC_FREE, MFT_REC_RESERVED);
634 if (zbit >= MFT_REC_FREE) {
635 sbi->mft.next_reserved = MFT_REC_FREE;
640 sbi->mft.next_reserved = zbit;
643 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
644 if (zbit + zlen > wnd->nbits)
645 zlen = wnd->nbits - zbit;
647 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
650 /* [zbit, zbit + zlen) will be used for MFT itself. */
651 from = sbi->mft.used;
656 ntfs_clear_mft_tail(sbi, from, to);
667 wnd_zone_set(wnd, zbit, zlen);
671 /* The request to get record for general purpose. */
672 if (sbi->mft.next_free < MFT_REC_USER)
673 sbi->mft.next_free = MFT_REC_USER;
676 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
677 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
678 sbi->mft.next_free = sbi->mft.bitmap.nbits;
681 sbi->mft.next_free = *rno + 1;
685 err = ntfs_extend_mft(sbi);
691 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
696 /* We have found a record that are not reserved for next MFT. */
697 if (*rno >= MFT_REC_FREE)
698 wnd_set_used(wnd, *rno, 1);
699 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
700 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
704 up_write(&wnd->rw_lock);
710 * ntfs_mark_rec_free - Mark record as free.
712 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
714 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
716 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
717 if (rno >= wnd->nbits)
720 if (rno >= MFT_REC_FREE) {
721 if (!wnd_is_used(wnd, rno, 1))
722 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
724 wnd_set_free(wnd, rno, 1);
725 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
726 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
729 if (rno < wnd_zone_bit(wnd))
730 wnd_zone_set(wnd, rno, 1);
731 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
732 sbi->mft.next_free = rno;
735 up_write(&wnd->rw_lock);
739 * ntfs_clear_mft_tail - Format empty records [from, to).
741 * sbi->mft.bitmap is locked for write.
743 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
748 struct runs_tree *run;
749 struct ntfs_inode *ni;
754 rs = sbi->record_size;
758 down_read(&ni->file.run_lock);
759 vbo = (u64)from * rs;
760 for (; from < to; from++, vbo += rs) {
761 struct ntfs_buffers nb;
763 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
767 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
774 sbi->mft.used = from;
775 up_read(&ni->file.run_lock);
780 * ntfs_refresh_zone - Refresh MFT zone.
782 * sbi->used.bitmap is locked for rw.
783 * sbi->mft.bitmap is locked for write.
784 * sbi->mft.ni->file.run_lock for write.
786 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
788 CLST zone_limit, zone_max, lcn, vcn, len;
790 struct wnd_bitmap *wnd = &sbi->used.bitmap;
791 struct ntfs_inode *ni = sbi->mft.ni;
793 /* Do not change anything unless we have non empty MFT zone. */
794 if (wnd_zone_len(wnd))
798 * Compute the MFT zone at two steps.
799 * It would be nice if we are able to allocate 1/8 of
800 * total clusters for MFT but not more then 512 MB.
802 zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
803 zone_max = wnd->nbits >> 3;
804 if (zone_max > zone_limit)
805 zone_max = zone_limit;
807 vcn = bytes_to_cluster(sbi,
808 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
810 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
813 /* We should always find Last Lcn for MFT. */
814 if (lcn == SPARSE_LCN)
819 /* Try to allocate clusters after last MFT run. */
820 zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
822 ntfs_notice(sbi->sb, "MftZone: unavailable");
826 /* Truncate too large zone. */
827 wnd_zone_set(wnd, lcn_s, zlen);
833 * ntfs_update_mftmirr - Update $MFTMirr data.
835 int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
838 struct super_block *sb = sbi->sb;
839 u32 blocksize = sb->s_blocksize;
840 sector_t block1, block2;
843 if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
847 bytes = sbi->mft.recs_mirr << sbi->record_bits;
848 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
849 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
851 for (; bytes >= blocksize; bytes -= blocksize) {
852 struct buffer_head *bh1, *bh2;
854 bh1 = sb_bread(sb, block1++);
860 bh2 = sb_getblk(sb, block2++);
867 if (buffer_locked(bh2))
868 __wait_on_buffer(bh2);
871 memcpy(bh2->b_data, bh1->b_data, blocksize);
872 set_buffer_uptodate(bh2);
873 mark_buffer_dirty(bh2);
880 err = sync_dirty_buffer(bh2);
887 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
896 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
897 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
898 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
900 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
904 struct VOLUME_INFO *info;
905 struct mft_inode *mi;
906 struct ntfs_inode *ni;
909 * Do not change state if fs was real_dirty.
910 * Do not change state if fs already dirty(clear).
911 * Do not change any thing if mounted read only.
913 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
916 /* Check cached value. */
917 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
918 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
925 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
927 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
933 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
940 case NTFS_DIRTY_ERROR:
941 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
942 sbi->volume.real_dirty = true;
944 case NTFS_DIRTY_DIRTY:
945 info->flags |= VOLUME_FLAG_DIRTY;
947 case NTFS_DIRTY_CLEAR:
948 info->flags &= ~VOLUME_FLAG_DIRTY;
951 /* Cache current volume flags. */
952 sbi->volume.flags = info->flags;
961 mark_inode_dirty(&ni->vfs_inode);
962 /* verify(!ntfs_update_mftmirr()); */
965 * If we used wait=1, sync_inode_metadata waits for the io for the
966 * inode to finish. It hangs when media is removed.
967 * So wait=0 is sent down to sync_inode_metadata
968 * and filemap_fdatawrite is used for the data blocks.
970 err = sync_inode_metadata(&ni->vfs_inode, 0);
972 err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
978 * security_hash - Calculates a hash of security descriptor.
980 static inline __le32 security_hash(const void *sd, size_t bytes)
983 const __le32 *ptr = sd;
987 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
988 return cpu_to_le32(hash);
991 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
993 struct block_device *bdev = sb->s_bdev;
994 u32 blocksize = sb->s_blocksize;
995 u64 block = lbo >> sb->s_blocksize_bits;
996 u32 off = lbo & (blocksize - 1);
997 u32 op = blocksize - off;
999 for (; bytes; block += 1, off = 0, op = blocksize) {
1000 struct buffer_head *bh = __bread(bdev, block, blocksize);
1008 memcpy(buffer, bh->b_data + off, op);
1013 buffer = Add2Ptr(buffer, op);
1019 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1020 const void *buf, int wait)
1022 u32 blocksize = sb->s_blocksize;
1023 struct block_device *bdev = sb->s_bdev;
1024 sector_t block = lbo >> sb->s_blocksize_bits;
1025 u32 off = lbo & (blocksize - 1);
1026 u32 op = blocksize - off;
1027 struct buffer_head *bh;
1029 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1032 for (; bytes; block += 1, off = 0, op = blocksize) {
1036 if (op < blocksize) {
1037 bh = __bread(bdev, block, blocksize);
1039 ntfs_err(sb, "failed to read block %llx",
1044 bh = __getblk(bdev, block, blocksize);
1049 if (buffer_locked(bh))
1050 __wait_on_buffer(bh);
1054 memcpy(bh->b_data + off, buf, op);
1055 buf = Add2Ptr(buf, op);
1057 memset(bh->b_data + off, -1, op);
1060 set_buffer_uptodate(bh);
1061 mark_buffer_dirty(bh);
1065 int err = sync_dirty_buffer(bh);
1070 "failed to sync buffer at block %llx, error %d",
1084 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1085 u64 vbo, const void *buf, size_t bytes)
1087 struct super_block *sb = sbi->sb;
1088 u8 cluster_bits = sbi->cluster_bits;
1089 u32 off = vbo & sbi->cluster_mask;
1090 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1094 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1097 if (lcn == SPARSE_LCN)
1100 lbo = ((u64)lcn << cluster_bits) + off;
1101 len = ((u64)clen << cluster_bits) - off;
1104 u32 op = len < bytes ? len : bytes;
1105 int err = ntfs_sb_write(sb, lbo, op, buf, 0);
1114 vcn_next = vcn + clen;
1115 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1119 if (lcn == SPARSE_LCN)
1123 buf = Add2Ptr(buf, op);
1125 lbo = ((u64)lcn << cluster_bits);
1126 len = ((u64)clen << cluster_bits);
1132 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1133 const struct runs_tree *run, u64 vbo)
1135 struct super_block *sb = sbi->sb;
1136 u8 cluster_bits = sbi->cluster_bits;
1140 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1141 return ERR_PTR(-ENOENT);
1143 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1145 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1148 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1149 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1152 struct super_block *sb = sbi->sb;
1153 u32 blocksize = sb->s_blocksize;
1154 u8 cluster_bits = sbi->cluster_bits;
1155 u32 off = vbo & sbi->cluster_mask;
1157 CLST vcn_next, vcn = vbo >> cluster_bits;
1161 struct buffer_head *bh;
1164 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1165 if (vbo > MFT_REC_VOL * sbi->record_size) {
1170 /* Use absolute boot's 'MFTCluster' to read record. */
1171 lbo = vbo + sbi->mft.lbo;
1172 len = sbi->record_size;
1173 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1177 if (lcn == SPARSE_LCN) {
1182 lbo = ((u64)lcn << cluster_bits) + off;
1183 len = ((u64)clen << cluster_bits) - off;
1186 off = lbo & (blocksize - 1);
1193 u32 len32 = len >= bytes ? bytes : len;
1194 sector_t block = lbo >> sb->s_blocksize_bits;
1197 u32 op = blocksize - off;
1202 bh = ntfs_bread(sb, block);
1209 memcpy(buf, bh->b_data + off, op);
1210 buf = Add2Ptr(buf, op);
1215 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1232 vcn_next = vcn + clen;
1233 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1239 if (lcn == SPARSE_LCN) {
1244 lbo = ((u64)lcn << cluster_bits);
1245 len = ((u64)clen << cluster_bits);
1253 put_bh(nb->bh[--nbh]);
1264 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1266 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1267 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1268 struct ntfs_buffers *nb)
1270 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1274 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1277 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1278 u32 bytes, struct ntfs_buffers *nb)
1281 struct super_block *sb = sbi->sb;
1282 u32 blocksize = sb->s_blocksize;
1283 u8 cluster_bits = sbi->cluster_bits;
1284 CLST vcn_next, vcn = vbo >> cluster_bits;
1293 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1298 off = vbo & sbi->cluster_mask;
1299 lbo = ((u64)lcn << cluster_bits) + off;
1300 len = ((u64)clen << cluster_bits) - off;
1302 nb->off = off = lbo & (blocksize - 1);
1305 u32 len32 = len < bytes ? len : bytes;
1306 sector_t block = lbo >> sb->s_blocksize_bits;
1310 struct buffer_head *bh;
1312 if (nbh >= ARRAY_SIZE(nb->bh)) {
1317 op = blocksize - off;
1321 if (op == blocksize) {
1322 bh = sb_getblk(sb, block);
1327 if (buffer_locked(bh))
1328 __wait_on_buffer(bh);
1329 set_buffer_uptodate(bh);
1331 bh = ntfs_bread(sb, block);
1350 vcn_next = vcn + clen;
1351 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1357 lbo = ((u64)lcn << cluster_bits);
1358 len = ((u64)clen << cluster_bits);
1363 put_bh(nb->bh[--nbh]);
1372 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1373 struct ntfs_buffers *nb, int sync)
1376 struct super_block *sb = sbi->sb;
1377 u32 block_size = sb->s_blocksize;
1378 u32 bytes = nb->bytes;
1380 u16 fo = le16_to_cpu(rhdr->fix_off);
1381 u16 fn = le16_to_cpu(rhdr->fix_num);
1386 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1387 fn * SECTOR_SIZE > bytes) {
1391 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1392 u32 op = block_size - off;
1394 struct buffer_head *bh = nb->bh[idx];
1395 __le16 *ptr, *end_data;
1400 if (buffer_locked(bh))
1401 __wait_on_buffer(bh);
1403 lock_buffer(nb->bh[idx]);
1405 bh_data = bh->b_data + off;
1406 end_data = Add2Ptr(bh_data, op);
1407 memcpy(bh_data, rhdr, op);
1412 fixup = Add2Ptr(bh_data, fo);
1414 t16 = le16_to_cpu(sample);
1415 if (t16 >= 0x7FFF) {
1416 sample = *fixup = cpu_to_le16(1);
1418 sample = cpu_to_le16(t16 + 1);
1422 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1425 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1430 ptr += SECTOR_SIZE / sizeof(short);
1431 } while (ptr < end_data);
1433 set_buffer_uptodate(bh);
1434 mark_buffer_dirty(bh);
1438 int err2 = sync_dirty_buffer(bh);
1445 rhdr = Add2Ptr(rhdr, op);
1451 static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
1453 struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1455 if (!bio && (current->flags & PF_MEMALLOC)) {
1456 while (!bio && (nr_vecs /= 2))
1457 bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1463 * ntfs_bio_pages - Read/write pages from/to disk.
1465 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1466 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1470 struct bio *new, *bio = NULL;
1471 struct super_block *sb = sbi->sb;
1472 struct block_device *bdev = sb->s_bdev;
1474 u8 cluster_bits = sbi->cluster_bits;
1475 CLST lcn, clen, vcn, vcn_next;
1476 u32 add, off, page_idx;
1479 struct blk_plug plug;
1484 blk_start_plug(&plug);
1486 /* Align vbo and bytes to be 512 bytes aligned. */
1487 lbo = (vbo + bytes + 511) & ~511ull;
1488 vbo = vbo & ~511ull;
1491 vcn = vbo >> cluster_bits;
1492 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1496 off = vbo & sbi->cluster_mask;
1501 lbo = ((u64)lcn << cluster_bits) + off;
1502 len = ((u64)clen << cluster_bits) - off;
1504 new = ntfs_alloc_bio(nr_pages - page_idx);
1510 bio_chain(bio, new);
1514 bio_set_dev(bio, bdev);
1515 bio->bi_iter.bi_sector = lbo >> 9;
1519 off = vbo & (PAGE_SIZE - 1);
1520 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1522 if (bio_add_page(bio, page, add, off) < add)
1530 if (add + off == PAGE_SIZE) {
1532 if (WARN_ON(page_idx >= nr_pages)) {
1536 page = pages[page_idx];
1545 vcn_next = vcn + clen;
1546 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1556 err = submit_bio_wait(bio);
1559 blk_finish_plug(&plug);
1565 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1567 * Fill on-disk logfile range by (-1)
1568 * this means empty logfile.
1570 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1573 struct super_block *sb = sbi->sb;
1574 struct block_device *bdev = sb->s_bdev;
1575 u8 cluster_bits = sbi->cluster_bits;
1576 struct bio *new, *bio = NULL;
1582 struct blk_plug plug;
1584 fill = alloc_page(GFP_KERNEL);
1588 kaddr = kmap_atomic(fill);
1589 memset(kaddr, -1, PAGE_SIZE);
1590 kunmap_atomic(kaddr);
1591 flush_dcache_page(fill);
1594 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1600 * TODO: Try blkdev_issue_write_same.
1602 blk_start_plug(&plug);
1604 lbo = (u64)lcn << cluster_bits;
1605 len = (u64)clen << cluster_bits;
1607 new = ntfs_alloc_bio(BIO_MAX_VECS);
1613 bio_chain(bio, new);
1617 bio_set_dev(bio, bdev);
1618 bio->bi_opf = REQ_OP_WRITE;
1619 bio->bi_iter.bi_sector = lbo >> 9;
1622 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1624 if (bio_add_page(bio, fill, add, 0) < add)
1632 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1636 err = submit_bio_wait(bio);
1639 blk_finish_plug(&plug);
1647 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1648 u64 vbo, u64 *lbo, u64 *bytes)
1652 u8 cluster_bits = sbi->cluster_bits;
1654 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1657 off = vbo & sbi->cluster_mask;
1658 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1659 *bytes = ((u64)len << cluster_bits) - off;
1664 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1667 struct super_block *sb = sbi->sb;
1668 struct inode *inode = new_inode(sb);
1669 struct ntfs_inode *ni;
1672 return ERR_PTR(-ENOMEM);
1676 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1682 if (insert_inode_locked(inode) < 0) {
1696 * O:BAG:BAD:(A;OICI;FA;;;WD)
1697 * Owner S-1-5-32-544 (Administrators)
1698 * Group S-1-5-32-544 (Administrators)
1699 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1701 const u8 s_default_security[] __aligned(8) = {
1702 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1703 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1704 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1705 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1706 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1707 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1708 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1711 static_assert(sizeof(s_default_security) == 0x50);
1713 static inline u32 sid_length(const struct SID *sid)
1715 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1721 * Thanks Mark Harmstone for idea.
1723 static bool is_acl_valid(const struct ACL *acl, u32 len)
1725 const struct ACE_HEADER *ace;
1727 u16 ace_count, ace_size;
1729 if (acl->AclRevision != ACL_REVISION &&
1730 acl->AclRevision != ACL_REVISION_DS) {
1732 * This value should be ACL_REVISION, unless the ACL contains an
1733 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1734 * All ACEs in an ACL must be at the same revision level.
1742 if (le16_to_cpu(acl->AclSize) > len)
1748 len -= sizeof(struct ACL);
1749 ace = (struct ACE_HEADER *)&acl[1];
1750 ace_count = le16_to_cpu(acl->AceCount);
1752 for (i = 0; i < ace_count; i++) {
1753 if (len < sizeof(struct ACE_HEADER))
1756 ace_size = le16_to_cpu(ace->AceSize);
1761 ace = Add2Ptr(ace, ace_size);
1767 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1769 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1771 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1774 if (sd->Revision != 1)
1780 if (!(sd->Control & SE_SELF_RELATIVE))
1783 sd_owner = le32_to_cpu(sd->Owner);
1785 const struct SID *owner = Add2Ptr(sd, sd_owner);
1787 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1790 if (owner->Revision != 1)
1793 if (sd_owner + sid_length(owner) > len)
1797 sd_group = le32_to_cpu(sd->Group);
1799 const struct SID *group = Add2Ptr(sd, sd_group);
1801 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1804 if (group->Revision != 1)
1807 if (sd_group + sid_length(group) > len)
1811 sd_sacl = le32_to_cpu(sd->Sacl);
1813 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1815 if (sd_sacl + sizeof(struct ACL) > len)
1818 if (!is_acl_valid(sacl, len - sd_sacl))
1822 sd_dacl = le32_to_cpu(sd->Dacl);
1824 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1826 if (sd_dacl + sizeof(struct ACL) > len)
1829 if (!is_acl_valid(dacl, len - sd_dacl))
1837 * ntfs_security_init - Load and parse $Secure.
1839 int ntfs_security_init(struct ntfs_sb_info *sbi)
1842 struct super_block *sb = sbi->sb;
1843 struct inode *inode;
1844 struct ntfs_inode *ni;
1846 struct ATTRIB *attr;
1847 struct ATTR_LIST_ENTRY *le;
1851 struct NTFS_DE_SII *sii_e;
1852 struct ntfs_fnd *fnd_sii = NULL;
1853 const struct INDEX_ROOT *root_sii;
1854 const struct INDEX_ROOT *root_sdh;
1855 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1856 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1858 ref.low = cpu_to_le32(MFT_REC_SECURE);
1860 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1862 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1863 if (IS_ERR(inode)) {
1864 err = PTR_ERR(inode);
1865 ntfs_err(sb, "Failed to load $Secure.");
1874 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1875 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1881 root_sdh = resident_data(attr);
1882 if (root_sdh->type != ATTR_ZERO ||
1883 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
1888 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1892 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1893 ARRAY_SIZE(SII_NAME), NULL, NULL);
1899 root_sii = resident_data(attr);
1900 if (root_sii->type != ATTR_ZERO ||
1901 root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
1906 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1910 fnd_sii = fnd_get();
1916 sds_size = inode->i_size;
1918 /* Find the last valid Id. */
1919 sbi->security.next_id = SECURITY_ID_FIRST;
1920 /* Always write new security at the end of bucket. */
1921 sbi->security.next_off =
1922 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1930 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1934 sii_e = (struct NTFS_DE_SII *)ne;
1935 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1938 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1939 if (next_id >= sbi->security.next_id)
1940 sbi->security.next_id = next_id;
1943 sbi->security.ni = ni;
1953 * ntfs_get_security_by_id - Read security descriptor by id.
1955 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1956 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1961 struct ntfs_inode *ni = sbi->security.ni;
1962 struct ntfs_index *indx = &sbi->security.index_sii;
1964 struct NTFS_DE_SII *sii_e;
1965 struct ntfs_fnd *fnd_sii;
1966 struct SECURITY_HDR d_security;
1967 const struct INDEX_ROOT *root_sii;
1972 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1974 fnd_sii = fnd_get();
1980 root_sii = indx_get_root(indx, ni, NULL, NULL);
1986 /* Try to find this SECURITY descriptor in SII indexes. */
1987 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1988 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1995 t32 = le32_to_cpu(sii_e->sec_hdr.size);
1996 if (t32 < SIZEOF_SECURITY_HDR) {
2001 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
2002 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2007 *size = t32 - SIZEOF_SECURITY_HDR;
2009 p = kmalloc(*size, GFP_NOFS);
2015 err = ntfs_read_run_nb(sbi, &ni->file.run,
2016 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2017 sizeof(d_security), NULL);
2021 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2026 err = ntfs_read_run_nb(sbi, &ni->file.run,
2027 le64_to_cpu(sii_e->sec_hdr.off) +
2028 SIZEOF_SECURITY_HDR,
2045 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2047 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2048 * and it contains a mirror copy of each security descriptor. When writing
2049 * to a security descriptor at location X, another copy will be written at
2050 * location (X+256K).
2051 * When writing a security descriptor that will cross the 256K boundary,
2052 * the pointer will be advanced by 256K to skip
2053 * over the mirror portion.
2055 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2056 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2057 u32 size_sd, __le32 *security_id, bool *inserted)
2060 struct ntfs_inode *ni = sbi->security.ni;
2061 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2062 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2063 struct NTFS_DE_SDH *e;
2064 struct NTFS_DE_SDH sdh_e;
2065 struct NTFS_DE_SII sii_e;
2066 struct SECURITY_HDR *d_security;
2067 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2068 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2069 struct SECURITY_KEY hash_key;
2070 struct ntfs_fnd *fnd_sdh = NULL;
2071 const struct INDEX_ROOT *root_sdh;
2072 const struct INDEX_ROOT *root_sii;
2073 u64 mirr_off, new_sds_size;
2076 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2077 SecurityDescriptorsBlockSize);
2079 hash_key.hash = security_hash(sd, size_sd);
2080 hash_key.sec_id = SECURITY_ID_INVALID;
2084 *security_id = SECURITY_ID_INVALID;
2086 /* Allocate a temporal buffer. */
2087 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2091 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2093 fnd_sdh = fnd_get();
2099 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2105 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2112 * Check if such security already exists.
2113 * Use "SDH" and hash -> to get the offset in "SDS".
2115 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2116 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2122 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2123 err = ntfs_read_run_nb(sbi, &ni->file.run,
2124 le64_to_cpu(e->sec_hdr.off),
2125 d_security, new_sec_size, NULL);
2129 if (le32_to_cpu(d_security->size) == new_sec_size &&
2130 d_security->key.hash == hash_key.hash &&
2131 !memcmp(d_security + 1, sd, size_sd)) {
2132 *security_id = d_security->key.sec_id;
2133 /* Such security already exists. */
2139 err = indx_find_sort(indx_sdh, ni, root_sdh,
2140 (struct NTFS_DE **)&e, fnd_sdh);
2144 if (!e || e->key.hash != hash_key.hash)
2148 /* Zero unused space. */
2149 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2150 left = SecurityDescriptorsBlockSize - next;
2152 /* Zero gap until SecurityDescriptorsBlockSize. */
2153 if (left < new_sec_size) {
2154 /* Zero "left" bytes from sbi->security.next_off. */
2155 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2158 /* Zero tail of previous security. */
2159 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2163 * 0x40438 == ni->vfs_inode.i_size
2164 * 0x00440 == sbi->security.next_off
2165 * need to zero [0x438-0x440)
2166 * if (next > used) {
2167 * u32 tozero = next - used;
2168 * zero "tozero" bytes from sbi->security.next_off - tozero
2171 /* Format new security descriptor. */
2172 d_security->key.hash = hash_key.hash;
2173 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2174 d_security->off = cpu_to_le64(sbi->security.next_off);
2175 d_security->size = cpu_to_le32(new_sec_size);
2176 memcpy(d_security + 1, sd, size_sd);
2178 /* Write main SDS bucket. */
2179 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2180 d_security, aligned_sec_size);
2185 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2186 new_sds_size = mirr_off + aligned_sec_size;
2188 if (new_sds_size > ni->vfs_inode.i_size) {
2189 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2190 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2191 new_sds_size, &new_sds_size, false, NULL);
2196 /* Write copy SDS bucket. */
2197 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2202 /* Fill SII entry. */
2203 sii_e.de.view.data_off =
2204 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2205 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2206 sii_e.de.view.res = 0;
2207 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2208 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2211 sii_e.sec_id = d_security->key.sec_id;
2212 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2214 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2218 /* Fill SDH entry. */
2219 sdh_e.de.view.data_off =
2220 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2221 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2222 sdh_e.de.view.res = 0;
2223 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2224 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2227 sdh_e.key.hash = d_security->key.hash;
2228 sdh_e.key.sec_id = d_security->key.sec_id;
2229 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2230 sdh_e.magic[0] = cpu_to_le16('I');
2231 sdh_e.magic[1] = cpu_to_le16('I');
2234 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2239 *security_id = d_security->key.sec_id;
2243 /* Update Id and offset for next descriptor. */
2244 sbi->security.next_id += 1;
2245 sbi->security.next_off += aligned_sec_size;
2249 mark_inode_dirty(&ni->vfs_inode);
2257 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2259 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2262 struct ntfs_inode *ni = sbi->reparse.ni;
2263 struct ntfs_index *indx = &sbi->reparse.index_r;
2264 struct ATTRIB *attr;
2265 struct ATTR_LIST_ENTRY *le;
2266 const struct INDEX_ROOT *root_r;
2272 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2273 ARRAY_SIZE(SR_NAME), NULL, NULL);
2279 root_r = resident_data(attr);
2280 if (root_r->type != ATTR_ZERO ||
2281 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2286 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2295 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2297 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2300 struct ntfs_inode *ni = sbi->objid.ni;
2301 struct ntfs_index *indx = &sbi->objid.index_o;
2302 struct ATTRIB *attr;
2303 struct ATTR_LIST_ENTRY *le;
2304 const struct INDEX_ROOT *root;
2310 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2311 ARRAY_SIZE(SO_NAME), NULL, NULL);
2317 root = resident_data(attr);
2318 if (root->type != ATTR_ZERO ||
2319 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2324 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2332 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2335 struct ntfs_inode *ni = sbi->objid.ni;
2336 struct ntfs_index *indx = &sbi->objid.index_o;
2341 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2343 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2345 mark_inode_dirty(&ni->vfs_inode);
2351 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2352 const struct MFT_REF *ref)
2355 struct ntfs_inode *ni = sbi->reparse.ni;
2356 struct ntfs_index *indx = &sbi->reparse.index_r;
2357 struct NTFS_DE_R re;
2362 memset(&re, 0, sizeof(re));
2364 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2365 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2366 re.de.key_size = cpu_to_le16(sizeof(re.key));
2368 re.key.ReparseTag = rtag;
2369 memcpy(&re.key.ref, ref, sizeof(*ref));
2371 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2373 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2375 mark_inode_dirty(&ni->vfs_inode);
2381 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2382 const struct MFT_REF *ref)
2385 struct ntfs_inode *ni = sbi->reparse.ni;
2386 struct ntfs_index *indx = &sbi->reparse.index_r;
2387 struct ntfs_fnd *fnd = NULL;
2388 struct REPARSE_KEY rkey;
2389 struct NTFS_DE_R *re;
2390 struct INDEX_ROOT *root_r;
2395 rkey.ReparseTag = rtag;
2398 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2401 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2411 root_r = indx_get_root(indx, ni, NULL, NULL);
2417 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2418 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2419 (struct NTFS_DE **)&re, fnd);
2423 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2424 /* Impossible. Looks like volume corrupt? */
2428 memcpy(&rkey, &re->key, sizeof(rkey));
2433 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2441 mark_inode_dirty(&ni->vfs_inode);
2447 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2450 ntfs_unmap_meta(sbi->sb, lcn, len);
2451 ntfs_discard(sbi, lcn, len);
2454 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2457 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2459 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2460 if (!wnd_is_used(wnd, lcn, len)) {
2461 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2465 for (i = lcn; i < end; i++) {
2466 if (wnd_is_used(wnd, i, 1)) {
2477 ntfs_unmap_and_discard(sbi, lcn, len);
2479 wnd_set_free(wnd, lcn, len);
2488 ntfs_unmap_and_discard(sbi, lcn, len);
2489 wnd_set_free(wnd, lcn, len);
2492 up_write(&wnd->rw_lock);
2496 * run_deallocate - Deallocate clusters.
2498 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2503 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2504 if (lcn == SPARSE_LCN)
2507 mark_as_free_ex(sbi, lcn, len, trim);