1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
17 const struct cpu_str NAME_MFT = {
18 4, 0, { '$', 'M', 'F', 'T' },
20 const struct cpu_str NAME_MIRROR = {
21 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
23 const struct cpu_str NAME_LOGFILE = {
24 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
26 const struct cpu_str NAME_VOLUME = {
27 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
29 const struct cpu_str NAME_ATTRDEF = {
30 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
32 const struct cpu_str NAME_ROOT = {
35 const struct cpu_str NAME_BITMAP = {
36 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
38 const struct cpu_str NAME_BOOT = {
39 5, 0, { '$', 'B', 'o', 'o', 't' },
41 const struct cpu_str NAME_BADCLUS = {
42 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
44 const struct cpu_str NAME_QUOTA = {
45 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
47 const struct cpu_str NAME_SECURE = {
48 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
50 const struct cpu_str NAME_UPCASE = {
51 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
53 const struct cpu_str NAME_EXTEND = {
54 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
56 const struct cpu_str NAME_OBJID = {
57 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
59 const struct cpu_str NAME_REPARSE = {
60 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
62 const struct cpu_str NAME_USNJRNL = {
63 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
65 const __le16 BAD_NAME[4] = {
66 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
68 const __le16 I30_NAME[4] = {
69 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
71 const __le16 SII_NAME[4] = {
72 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
74 const __le16 SDH_NAME[4] = {
75 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
77 const __le16 SDS_NAME[4] = {
78 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
80 const __le16 SO_NAME[2] = {
81 cpu_to_le16('$'), cpu_to_le16('O'),
83 const __le16 SQ_NAME[2] = {
84 cpu_to_le16('$'), cpu_to_le16('Q'),
86 const __le16 SR_NAME[2] = {
87 cpu_to_le16('$'), cpu_to_le16('R'),
90 #ifdef CONFIG_NTFS3_LZX_XPRESS
91 const __le16 WOF_NAME[17] = {
92 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
93 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
94 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
95 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
103 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
105 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
109 u16 fo = le16_to_cpu(rhdr->fix_off);
110 u16 fn = le16_to_cpu(rhdr->fix_num);
112 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
113 fn * SECTOR_SIZE > bytes) {
117 /* Get fixup pointer. */
118 fixup = Add2Ptr(rhdr, fo);
120 if (*fixup >= 0x7FFF)
127 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
132 ptr += SECTOR_SIZE / sizeof(short);
138 * ntfs_fix_post_read - Remove fixups after reading from disk.
140 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
142 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
149 fo = le16_to_cpu(rhdr->fix_off);
150 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
151 : le16_to_cpu(rhdr->fix_num);
154 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
155 fn * SECTOR_SIZE > bytes) {
156 return -EINVAL; /* Native chkntfs returns ok! */
159 /* Get fixup pointer. */
160 fixup = Add2Ptr(rhdr, fo);
162 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
166 /* Test current word. */
167 if (*ptr != sample) {
168 /* Fixup does not match! Is it serious error? */
174 ptr += SECTOR_SIZE / sizeof(short);
181 * ntfs_extend_init - Load $Extend file.
183 int ntfs_extend_init(struct ntfs_sb_info *sbi)
186 struct super_block *sb = sbi->sb;
187 struct inode *inode, *inode2;
190 if (sbi->volume.major_ver < 3) {
191 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
195 ref.low = cpu_to_le32(MFT_REC_EXTEND);
197 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
198 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
200 err = PTR_ERR(inode);
201 ntfs_err(sb, "Failed to load $Extend.");
206 /* If ntfs_iget5() reads from disk it never returns bad inode. */
207 if (!S_ISDIR(inode->i_mode)) {
212 /* Try to find $ObjId */
213 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
214 if (inode2 && !IS_ERR(inode2)) {
215 if (is_bad_inode(inode2)) {
218 sbi->objid.ni = ntfs_i(inode2);
219 sbi->objid_no = inode2->i_ino;
223 /* Try to find $Quota */
224 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
225 if (inode2 && !IS_ERR(inode2)) {
226 sbi->quota_no = inode2->i_ino;
230 /* Try to find $Reparse */
231 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
232 if (inode2 && !IS_ERR(inode2)) {
233 sbi->reparse.ni = ntfs_i(inode2);
234 sbi->reparse_no = inode2->i_ino;
237 /* Try to find $UsnJrnl */
238 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
239 if (inode2 && !IS_ERR(inode2)) {
240 sbi->usn_jrnl_no = inode2->i_ino;
250 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
253 struct super_block *sb = sbi->sb;
254 bool initialized = false;
259 if (ni->vfs_inode.i_size >= 0x100000000ull) {
260 ntfs_err(sb, "\x24LogFile is too big");
265 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
267 ref.low = cpu_to_le32(MFT_REC_MFT);
269 ref.seq = cpu_to_le16(1);
271 inode = ntfs_iget5(sb, &ref, NULL);
277 /* Try to use MFT copy. */
278 u64 t64 = sbi->mft.lbo;
280 sbi->mft.lbo = sbi->mft.lbo2;
281 inode = ntfs_iget5(sb, &ref, NULL);
289 ntfs_err(sb, "Failed to load $MFT.");
293 sbi->mft.ni = ntfs_i(inode);
295 /* LogFile should not contains attribute list. */
296 err = ni_load_all_mi(sbi->mft.ni);
298 err = log_replay(ni, &initialized);
303 sync_blockdev(sb->s_bdev);
304 invalidate_bdev(sb->s_bdev);
306 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
311 if (sb_rdonly(sb) || !initialized)
314 /* Fill LogFile by '-1' if it is initialized. */
315 err = ntfs_bio_fill_1(sbi, &ni->file.run);
318 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
326 * Return: Current ATTR_DEF_ENTRY for given attribute type.
328 const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
331 int type_in = le32_to_cpu(type);
333 size_t max_idx = sbi->def_entries - 1;
335 while (min_idx <= max_idx) {
336 size_t i = min_idx + ((max_idx - min_idx) >> 1);
337 const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
338 int diff = le32_to_cpu(entry->type) - type_in;
353 * ntfs_look_for_free_space - Look for a free space in bitmap.
355 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
356 CLST *new_lcn, CLST *new_len,
357 enum ALLOCATE_OPT opt)
361 struct super_block *sb = sbi->sb;
362 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
363 struct wnd_bitmap *wnd = &sbi->used.bitmap;
365 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
366 if (opt & ALLOCATE_MFT) {
367 zlen = wnd_zone_len(wnd);
370 err = ntfs_refresh_zone(sbi);
374 zlen = wnd_zone_len(wnd);
378 ntfs_err(sbi->sb, "no free space to extend mft");
383 lcn = wnd_zone_bit(wnd);
384 alen = zlen > len ? len : zlen;
386 wnd_zone_set(wnd, lcn + alen, zlen - alen);
388 err = wnd_set_used(wnd, lcn, alen);
396 * 'Cause cluster 0 is always used this value means that we should use
397 * cached value of 'next_free_lcn' to improve performance.
400 lcn = sbi->used.next_free_lcn;
402 if (lcn >= wnd->nbits)
405 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
409 /* Try to use clusters from MftZone. */
410 zlen = wnd_zone_len(wnd);
411 zeroes = wnd_zeroes(wnd);
413 /* Check too big request */
414 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
419 /* How many clusters to cat from zone. */
420 zlcn = wnd_zone_bit(wnd);
422 ztrim = len > zlen ? zlen : (len > zlen2 ? len : zlen2);
423 new_zlen = zlen - ztrim;
425 if (new_zlen < NTFS_MIN_MFT_ZONE)
426 new_zlen = NTFS_MIN_MFT_ZONE;
428 wnd_zone_set(wnd, zlcn, new_zlen);
430 /* Allocate continues clusters. */
431 alen = wnd_find(wnd, len, 0,
432 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
443 ntfs_unmap_meta(sb, alcn, alen);
445 /* Set hint for next requests. */
446 if (!(opt & ALLOCATE_MFT))
447 sbi->used.next_free_lcn = alcn + alen;
449 up_write(&wnd->rw_lock);
454 * ntfs_extend_mft - Allocate additional MFT records.
456 * sbi->mft.bitmap is locked for write.
459 * ntfs_look_free_mft ->
462 * ni_insert_nonresident ->
465 * ntfs_look_free_mft ->
468 * To avoid recursive always allocate space for two new MFT records
469 * see attrib.c: "at least two MFT to avoid recursive loop".
471 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
474 struct ntfs_inode *ni = sbi->mft.ni;
475 size_t new_mft_total;
476 u64 new_mft_bytes, new_bitmap_bytes;
478 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
480 new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
481 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
483 /* Step 1: Resize $MFT::DATA. */
484 down_write(&ni->file.run_lock);
485 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
486 new_mft_bytes, NULL, false, &attr);
489 up_write(&ni->file.run_lock);
493 attr->nres.valid_size = attr->nres.data_size;
494 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
497 /* Step 2: Resize $MFT::BITMAP. */
498 new_bitmap_bytes = bitmap_size(new_mft_total);
500 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
501 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
503 /* Refresh MFT Zone if necessary. */
504 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
506 ntfs_refresh_zone(sbi);
508 up_write(&sbi->used.bitmap.rw_lock);
509 up_write(&ni->file.run_lock);
514 err = wnd_extend(wnd, new_mft_total);
519 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
521 err = _ni_write_inode(&ni->vfs_inode, 0);
527 * ntfs_look_free_mft - Look for a free MFT record.
529 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
530 struct ntfs_inode *ni, struct mft_inode **mi)
533 size_t zbit, zlen, from, to, fr;
536 struct super_block *sb = sbi->sb;
537 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
540 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
541 MFT_REC_FREE - MFT_REC_RESERVED);
544 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
546 zlen = wnd_zone_len(wnd);
548 /* Always reserve space for MFT. */
551 zbit = wnd_zone_bit(wnd);
553 wnd_zone_set(wnd, zbit + 1, zlen - 1);
558 /* No MFT zone. Find the nearest to '0' free MFT. */
559 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
561 mft_total = wnd->nbits;
563 err = ntfs_extend_mft(sbi);
569 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
575 * Look for free record reserved area [11-16) ==
576 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
579 if (!sbi->mft.reserved_bitmap) {
580 /* Once per session create internal bitmap for 5 bits. */
581 sbi->mft.reserved_bitmap = 0xFF;
584 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
586 struct ntfs_inode *ni;
587 struct MFT_REC *mrec;
589 ref.low = cpu_to_le32(ir);
590 ref.seq = cpu_to_le16(ir);
592 i = ntfs_iget5(sb, &ref, NULL);
597 "Invalid reserved record %x",
601 if (is_bad_inode(i)) {
610 if (!is_rec_base(mrec))
613 if (mrec->hard_links)
619 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
620 NULL, 0, NULL, NULL))
623 __clear_bit(ir - MFT_REC_RESERVED,
624 &sbi->mft.reserved_bitmap);
628 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
629 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
630 MFT_REC_FREE, MFT_REC_RESERVED);
631 if (zbit >= MFT_REC_FREE) {
632 sbi->mft.next_reserved = MFT_REC_FREE;
637 sbi->mft.next_reserved = zbit;
640 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
641 if (zbit + zlen > wnd->nbits)
642 zlen = wnd->nbits - zbit;
644 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
647 /* [zbit, zbit + zlen) will be used for MFT itself. */
648 from = sbi->mft.used;
653 ntfs_clear_mft_tail(sbi, from, to);
664 wnd_zone_set(wnd, zbit, zlen);
668 /* The request to get record for general purpose. */
669 if (sbi->mft.next_free < MFT_REC_USER)
670 sbi->mft.next_free = MFT_REC_USER;
673 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
674 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
675 sbi->mft.next_free = sbi->mft.bitmap.nbits;
678 sbi->mft.next_free = *rno + 1;
682 err = ntfs_extend_mft(sbi);
688 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
693 /* We have found a record that are not reserved for next MFT. */
694 if (*rno >= MFT_REC_FREE)
695 wnd_set_used(wnd, *rno, 1);
696 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
697 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
701 up_write(&wnd->rw_lock);
707 * ntfs_mark_rec_free - Mark record as free.
709 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
711 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
713 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
714 if (rno >= wnd->nbits)
717 if (rno >= MFT_REC_FREE) {
718 if (!wnd_is_used(wnd, rno, 1))
719 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
721 wnd_set_free(wnd, rno, 1);
722 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
723 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
726 if (rno < wnd_zone_bit(wnd))
727 wnd_zone_set(wnd, rno, 1);
728 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
729 sbi->mft.next_free = rno;
732 up_write(&wnd->rw_lock);
736 * ntfs_clear_mft_tail - Format empty records [from, to).
738 * sbi->mft.bitmap is locked for write.
740 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
745 struct runs_tree *run;
746 struct ntfs_inode *ni;
751 rs = sbi->record_size;
755 down_read(&ni->file.run_lock);
756 vbo = (u64)from * rs;
757 for (; from < to; from++, vbo += rs) {
758 struct ntfs_buffers nb;
760 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
764 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
771 sbi->mft.used = from;
772 up_read(&ni->file.run_lock);
777 * ntfs_refresh_zone - Refresh MFT zone.
779 * sbi->used.bitmap is locked for rw.
780 * sbi->mft.bitmap is locked for write.
781 * sbi->mft.ni->file.run_lock for write.
783 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
785 CLST zone_limit, zone_max, lcn, vcn, len;
787 struct wnd_bitmap *wnd = &sbi->used.bitmap;
788 struct ntfs_inode *ni = sbi->mft.ni;
790 /* Do not change anything unless we have non empty MFT zone. */
791 if (wnd_zone_len(wnd))
795 * Compute the MFT zone at two steps.
796 * It would be nice if we are able to allocate 1/8 of
797 * total clusters for MFT but not more then 512 MB.
799 zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
800 zone_max = wnd->nbits >> 3;
801 if (zone_max > zone_limit)
802 zone_max = zone_limit;
804 vcn = bytes_to_cluster(sbi,
805 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
807 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
810 /* We should always find Last Lcn for MFT. */
811 if (lcn == SPARSE_LCN)
816 /* Try to allocate clusters after last MFT run. */
817 zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
819 ntfs_notice(sbi->sb, "MftZone: unavailable");
823 /* Truncate too large zone. */
824 wnd_zone_set(wnd, lcn_s, zlen);
830 * ntfs_update_mftmirr - Update $MFTMirr data.
832 int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
835 struct super_block *sb = sbi->sb;
836 u32 blocksize = sb->s_blocksize;
837 sector_t block1, block2;
840 if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
844 bytes = sbi->mft.recs_mirr << sbi->record_bits;
845 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
846 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
848 for (; bytes >= blocksize; bytes -= blocksize) {
849 struct buffer_head *bh1, *bh2;
851 bh1 = sb_bread(sb, block1++);
857 bh2 = sb_getblk(sb, block2++);
864 if (buffer_locked(bh2))
865 __wait_on_buffer(bh2);
868 memcpy(bh2->b_data, bh1->b_data, blocksize);
869 set_buffer_uptodate(bh2);
870 mark_buffer_dirty(bh2);
877 err = sync_dirty_buffer(bh2);
884 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
893 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
894 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
895 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
897 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
901 struct VOLUME_INFO *info;
902 struct mft_inode *mi;
903 struct ntfs_inode *ni;
906 * Do not change state if fs was real_dirty.
907 * Do not change state if fs already dirty(clear).
908 * Do not change any thing if mounted read only.
910 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
913 /* Check cached value. */
914 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
915 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
922 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
924 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
930 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
937 case NTFS_DIRTY_ERROR:
938 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
939 sbi->volume.real_dirty = true;
941 case NTFS_DIRTY_DIRTY:
942 info->flags |= VOLUME_FLAG_DIRTY;
944 case NTFS_DIRTY_CLEAR:
945 info->flags &= ~VOLUME_FLAG_DIRTY;
948 /* Cache current volume flags. */
949 sbi->volume.flags = info->flags;
958 mark_inode_dirty(&ni->vfs_inode);
959 /* verify(!ntfs_update_mftmirr()); */
962 * If we used wait=1, sync_inode_metadata waits for the io for the
963 * inode to finish. It hangs when media is removed.
964 * So wait=0 is sent down to sync_inode_metadata
965 * and filemap_fdatawrite is used for the data blocks.
967 err = sync_inode_metadata(&ni->vfs_inode, 0);
969 err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
975 * security_hash - Calculates a hash of security descriptor.
977 static inline __le32 security_hash(const void *sd, size_t bytes)
980 const __le32 *ptr = sd;
984 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
985 return cpu_to_le32(hash);
988 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
990 struct block_device *bdev = sb->s_bdev;
991 u32 blocksize = sb->s_blocksize;
992 u64 block = lbo >> sb->s_blocksize_bits;
993 u32 off = lbo & (blocksize - 1);
994 u32 op = blocksize - off;
996 for (; bytes; block += 1, off = 0, op = blocksize) {
997 struct buffer_head *bh = __bread(bdev, block, blocksize);
1005 memcpy(buffer, bh->b_data + off, op);
1010 buffer = Add2Ptr(buffer, op);
1016 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1017 const void *buf, int wait)
1019 u32 blocksize = sb->s_blocksize;
1020 struct block_device *bdev = sb->s_bdev;
1021 sector_t block = lbo >> sb->s_blocksize_bits;
1022 u32 off = lbo & (blocksize - 1);
1023 u32 op = blocksize - off;
1024 struct buffer_head *bh;
1026 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1029 for (; bytes; block += 1, off = 0, op = blocksize) {
1033 if (op < blocksize) {
1034 bh = __bread(bdev, block, blocksize);
1036 ntfs_err(sb, "failed to read block %llx",
1041 bh = __getblk(bdev, block, blocksize);
1046 if (buffer_locked(bh))
1047 __wait_on_buffer(bh);
1051 memcpy(bh->b_data + off, buf, op);
1052 buf = Add2Ptr(buf, op);
1054 memset(bh->b_data + off, -1, op);
1057 set_buffer_uptodate(bh);
1058 mark_buffer_dirty(bh);
1062 int err = sync_dirty_buffer(bh);
1067 "failed to sync buffer at block %llx, error %d",
1081 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1082 u64 vbo, const void *buf, size_t bytes)
1084 struct super_block *sb = sbi->sb;
1085 u8 cluster_bits = sbi->cluster_bits;
1086 u32 off = vbo & sbi->cluster_mask;
1087 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1091 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1094 if (lcn == SPARSE_LCN)
1097 lbo = ((u64)lcn << cluster_bits) + off;
1098 len = ((u64)clen << cluster_bits) - off;
1101 u32 op = len < bytes ? len : bytes;
1102 int err = ntfs_sb_write(sb, lbo, op, buf, 0);
1111 vcn_next = vcn + clen;
1112 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1116 if (lcn == SPARSE_LCN)
1120 buf = Add2Ptr(buf, op);
1122 lbo = ((u64)lcn << cluster_bits);
1123 len = ((u64)clen << cluster_bits);
1129 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1130 const struct runs_tree *run, u64 vbo)
1132 struct super_block *sb = sbi->sb;
1133 u8 cluster_bits = sbi->cluster_bits;
1137 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1138 return ERR_PTR(-ENOENT);
1140 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1142 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1145 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1146 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1149 struct super_block *sb = sbi->sb;
1150 u32 blocksize = sb->s_blocksize;
1151 u8 cluster_bits = sbi->cluster_bits;
1152 u32 off = vbo & sbi->cluster_mask;
1154 CLST vcn_next, vcn = vbo >> cluster_bits;
1158 struct buffer_head *bh;
1161 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1162 if (vbo > MFT_REC_VOL * sbi->record_size) {
1167 /* Use absolute boot's 'MFTCluster' to read record. */
1168 lbo = vbo + sbi->mft.lbo;
1169 len = sbi->record_size;
1170 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1174 if (lcn == SPARSE_LCN) {
1179 lbo = ((u64)lcn << cluster_bits) + off;
1180 len = ((u64)clen << cluster_bits) - off;
1183 off = lbo & (blocksize - 1);
1190 u32 len32 = len >= bytes ? bytes : len;
1191 sector_t block = lbo >> sb->s_blocksize_bits;
1194 u32 op = blocksize - off;
1199 bh = ntfs_bread(sb, block);
1206 memcpy(buf, bh->b_data + off, op);
1207 buf = Add2Ptr(buf, op);
1212 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1229 vcn_next = vcn + clen;
1230 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1236 if (lcn == SPARSE_LCN) {
1241 lbo = ((u64)lcn << cluster_bits);
1242 len = ((u64)clen << cluster_bits);
1250 put_bh(nb->bh[--nbh]);
1261 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1263 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1264 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1265 struct ntfs_buffers *nb)
1267 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1271 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1274 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1275 u32 bytes, struct ntfs_buffers *nb)
1278 struct super_block *sb = sbi->sb;
1279 u32 blocksize = sb->s_blocksize;
1280 u8 cluster_bits = sbi->cluster_bits;
1281 CLST vcn_next, vcn = vbo >> cluster_bits;
1290 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1295 off = vbo & sbi->cluster_mask;
1296 lbo = ((u64)lcn << cluster_bits) + off;
1297 len = ((u64)clen << cluster_bits) - off;
1299 nb->off = off = lbo & (blocksize - 1);
1302 u32 len32 = len < bytes ? len : bytes;
1303 sector_t block = lbo >> sb->s_blocksize_bits;
1307 struct buffer_head *bh;
1309 if (nbh >= ARRAY_SIZE(nb->bh)) {
1314 op = blocksize - off;
1318 if (op == blocksize) {
1319 bh = sb_getblk(sb, block);
1324 if (buffer_locked(bh))
1325 __wait_on_buffer(bh);
1326 set_buffer_uptodate(bh);
1328 bh = ntfs_bread(sb, block);
1347 vcn_next = vcn + clen;
1348 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1354 lbo = ((u64)lcn << cluster_bits);
1355 len = ((u64)clen << cluster_bits);
1360 put_bh(nb->bh[--nbh]);
1369 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1370 struct ntfs_buffers *nb, int sync)
1373 struct super_block *sb = sbi->sb;
1374 u32 block_size = sb->s_blocksize;
1375 u32 bytes = nb->bytes;
1377 u16 fo = le16_to_cpu(rhdr->fix_off);
1378 u16 fn = le16_to_cpu(rhdr->fix_num);
1383 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1384 fn * SECTOR_SIZE > bytes) {
1388 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1389 u32 op = block_size - off;
1391 struct buffer_head *bh = nb->bh[idx];
1392 __le16 *ptr, *end_data;
1397 if (buffer_locked(bh))
1398 __wait_on_buffer(bh);
1400 lock_buffer(nb->bh[idx]);
1402 bh_data = bh->b_data + off;
1403 end_data = Add2Ptr(bh_data, op);
1404 memcpy(bh_data, rhdr, op);
1409 fixup = Add2Ptr(bh_data, fo);
1411 t16 = le16_to_cpu(sample);
1412 if (t16 >= 0x7FFF) {
1413 sample = *fixup = cpu_to_le16(1);
1415 sample = cpu_to_le16(t16 + 1);
1419 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1422 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1427 ptr += SECTOR_SIZE / sizeof(short);
1428 } while (ptr < end_data);
1430 set_buffer_uptodate(bh);
1431 mark_buffer_dirty(bh);
1435 int err2 = sync_dirty_buffer(bh);
1442 rhdr = Add2Ptr(rhdr, op);
1448 static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
1450 struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1452 if (!bio && (current->flags & PF_MEMALLOC)) {
1453 while (!bio && (nr_vecs /= 2))
1454 bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1460 * ntfs_bio_pages - Read/write pages from/to disk.
1462 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1463 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1467 struct bio *new, *bio = NULL;
1468 struct super_block *sb = sbi->sb;
1469 struct block_device *bdev = sb->s_bdev;
1471 u8 cluster_bits = sbi->cluster_bits;
1472 CLST lcn, clen, vcn, vcn_next;
1473 u32 add, off, page_idx;
1476 struct blk_plug plug;
1481 blk_start_plug(&plug);
1483 /* Align vbo and bytes to be 512 bytes aligned. */
1484 lbo = (vbo + bytes + 511) & ~511ull;
1485 vbo = vbo & ~511ull;
1488 vcn = vbo >> cluster_bits;
1489 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1493 off = vbo & sbi->cluster_mask;
1498 lbo = ((u64)lcn << cluster_bits) + off;
1499 len = ((u64)clen << cluster_bits) - off;
1501 new = ntfs_alloc_bio(nr_pages - page_idx);
1507 bio_chain(bio, new);
1511 bio_set_dev(bio, bdev);
1512 bio->bi_iter.bi_sector = lbo >> 9;
1516 off = vbo & (PAGE_SIZE - 1);
1517 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1519 if (bio_add_page(bio, page, add, off) < add)
1527 if (add + off == PAGE_SIZE) {
1529 if (WARN_ON(page_idx >= nr_pages)) {
1533 page = pages[page_idx];
1542 vcn_next = vcn + clen;
1543 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1553 err = submit_bio_wait(bio);
1556 blk_finish_plug(&plug);
1562 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1564 * Fill on-disk logfile range by (-1)
1565 * this means empty logfile.
1567 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1570 struct super_block *sb = sbi->sb;
1571 struct block_device *bdev = sb->s_bdev;
1572 u8 cluster_bits = sbi->cluster_bits;
1573 struct bio *new, *bio = NULL;
1579 struct blk_plug plug;
1581 fill = alloc_page(GFP_KERNEL);
1585 kaddr = kmap_atomic(fill);
1586 memset(kaddr, -1, PAGE_SIZE);
1587 kunmap_atomic(kaddr);
1588 flush_dcache_page(fill);
1591 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1597 * TODO: Try blkdev_issue_write_same.
1599 blk_start_plug(&plug);
1601 lbo = (u64)lcn << cluster_bits;
1602 len = (u64)clen << cluster_bits;
1604 new = ntfs_alloc_bio(BIO_MAX_VECS);
1610 bio_chain(bio, new);
1614 bio_set_dev(bio, bdev);
1615 bio->bi_opf = REQ_OP_WRITE;
1616 bio->bi_iter.bi_sector = lbo >> 9;
1619 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1621 if (bio_add_page(bio, fill, add, 0) < add)
1629 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1633 err = submit_bio_wait(bio);
1636 blk_finish_plug(&plug);
1644 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1645 u64 vbo, u64 *lbo, u64 *bytes)
1649 u8 cluster_bits = sbi->cluster_bits;
1651 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1654 off = vbo & sbi->cluster_mask;
1655 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1656 *bytes = ((u64)len << cluster_bits) - off;
1661 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1664 struct super_block *sb = sbi->sb;
1665 struct inode *inode = new_inode(sb);
1666 struct ntfs_inode *ni;
1669 return ERR_PTR(-ENOMEM);
1673 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1679 if (insert_inode_locked(inode) < 0) {
1693 * O:BAG:BAD:(A;OICI;FA;;;WD)
1694 * Owner S-1-5-32-544 (Administrators)
1695 * Group S-1-5-32-544 (Administrators)
1696 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1698 const u8 s_default_security[] __aligned(8) = {
1699 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1700 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1701 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1702 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1703 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1704 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1705 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1708 static_assert(sizeof(s_default_security) == 0x50);
1710 static inline u32 sid_length(const struct SID *sid)
1712 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1718 * Thanks Mark Harmstone for idea.
1720 static bool is_acl_valid(const struct ACL *acl, u32 len)
1722 const struct ACE_HEADER *ace;
1724 u16 ace_count, ace_size;
1726 if (acl->AclRevision != ACL_REVISION &&
1727 acl->AclRevision != ACL_REVISION_DS) {
1729 * This value should be ACL_REVISION, unless the ACL contains an
1730 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1731 * All ACEs in an ACL must be at the same revision level.
1739 if (le16_to_cpu(acl->AclSize) > len)
1745 len -= sizeof(struct ACL);
1746 ace = (struct ACE_HEADER *)&acl[1];
1747 ace_count = le16_to_cpu(acl->AceCount);
1749 for (i = 0; i < ace_count; i++) {
1750 if (len < sizeof(struct ACE_HEADER))
1753 ace_size = le16_to_cpu(ace->AceSize);
1758 ace = Add2Ptr(ace, ace_size);
1764 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1766 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1768 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1771 if (sd->Revision != 1)
1777 if (!(sd->Control & SE_SELF_RELATIVE))
1780 sd_owner = le32_to_cpu(sd->Owner);
1782 const struct SID *owner = Add2Ptr(sd, sd_owner);
1784 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1787 if (owner->Revision != 1)
1790 if (sd_owner + sid_length(owner) > len)
1794 sd_group = le32_to_cpu(sd->Group);
1796 const struct SID *group = Add2Ptr(sd, sd_group);
1798 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1801 if (group->Revision != 1)
1804 if (sd_group + sid_length(group) > len)
1808 sd_sacl = le32_to_cpu(sd->Sacl);
1810 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1812 if (sd_sacl + sizeof(struct ACL) > len)
1815 if (!is_acl_valid(sacl, len - sd_sacl))
1819 sd_dacl = le32_to_cpu(sd->Dacl);
1821 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1823 if (sd_dacl + sizeof(struct ACL) > len)
1826 if (!is_acl_valid(dacl, len - sd_dacl))
1834 * ntfs_security_init - Load and parse $Secure.
1836 int ntfs_security_init(struct ntfs_sb_info *sbi)
1839 struct super_block *sb = sbi->sb;
1840 struct inode *inode;
1841 struct ntfs_inode *ni;
1843 struct ATTRIB *attr;
1844 struct ATTR_LIST_ENTRY *le;
1848 struct NTFS_DE_SII *sii_e;
1849 struct ntfs_fnd *fnd_sii = NULL;
1850 const struct INDEX_ROOT *root_sii;
1851 const struct INDEX_ROOT *root_sdh;
1852 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1853 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1855 ref.low = cpu_to_le32(MFT_REC_SECURE);
1857 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1859 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1860 if (IS_ERR(inode)) {
1861 err = PTR_ERR(inode);
1862 ntfs_err(sb, "Failed to load $Secure.");
1871 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1872 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1878 root_sdh = resident_data(attr);
1879 if (root_sdh->type != ATTR_ZERO ||
1880 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
1885 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1889 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1890 ARRAY_SIZE(SII_NAME), NULL, NULL);
1896 root_sii = resident_data(attr);
1897 if (root_sii->type != ATTR_ZERO ||
1898 root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
1903 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1907 fnd_sii = fnd_get();
1913 sds_size = inode->i_size;
1915 /* Find the last valid Id. */
1916 sbi->security.next_id = SECURITY_ID_FIRST;
1917 /* Always write new security at the end of bucket. */
1918 sbi->security.next_off =
1919 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1927 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1931 sii_e = (struct NTFS_DE_SII *)ne;
1932 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1935 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1936 if (next_id >= sbi->security.next_id)
1937 sbi->security.next_id = next_id;
1940 sbi->security.ni = ni;
1950 * ntfs_get_security_by_id - Read security descriptor by id.
1952 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1953 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1958 struct ntfs_inode *ni = sbi->security.ni;
1959 struct ntfs_index *indx = &sbi->security.index_sii;
1961 struct NTFS_DE_SII *sii_e;
1962 struct ntfs_fnd *fnd_sii;
1963 struct SECURITY_HDR d_security;
1964 const struct INDEX_ROOT *root_sii;
1969 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1971 fnd_sii = fnd_get();
1977 root_sii = indx_get_root(indx, ni, NULL, NULL);
1983 /* Try to find this SECURITY descriptor in SII indexes. */
1984 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1985 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1992 t32 = le32_to_cpu(sii_e->sec_hdr.size);
1993 if (t32 < SIZEOF_SECURITY_HDR) {
1998 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
1999 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2004 *size = t32 - SIZEOF_SECURITY_HDR;
2006 p = kmalloc(*size, GFP_NOFS);
2012 err = ntfs_read_run_nb(sbi, &ni->file.run,
2013 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2014 sizeof(d_security), NULL);
2018 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2023 err = ntfs_read_run_nb(sbi, &ni->file.run,
2024 le64_to_cpu(sii_e->sec_hdr.off) +
2025 SIZEOF_SECURITY_HDR,
2042 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2044 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2045 * and it contains a mirror copy of each security descriptor. When writing
2046 * to a security descriptor at location X, another copy will be written at
2047 * location (X+256K).
2048 * When writing a security descriptor that will cross the 256K boundary,
2049 * the pointer will be advanced by 256K to skip
2050 * over the mirror portion.
2052 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2053 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2054 u32 size_sd, __le32 *security_id, bool *inserted)
2057 struct ntfs_inode *ni = sbi->security.ni;
2058 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2059 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2060 struct NTFS_DE_SDH *e;
2061 struct NTFS_DE_SDH sdh_e;
2062 struct NTFS_DE_SII sii_e;
2063 struct SECURITY_HDR *d_security;
2064 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2065 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2066 struct SECURITY_KEY hash_key;
2067 struct ntfs_fnd *fnd_sdh = NULL;
2068 const struct INDEX_ROOT *root_sdh;
2069 const struct INDEX_ROOT *root_sii;
2070 u64 mirr_off, new_sds_size;
2073 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2074 SecurityDescriptorsBlockSize);
2076 hash_key.hash = security_hash(sd, size_sd);
2077 hash_key.sec_id = SECURITY_ID_INVALID;
2081 *security_id = SECURITY_ID_INVALID;
2083 /* Allocate a temporal buffer. */
2084 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2088 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2090 fnd_sdh = fnd_get();
2096 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2102 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2109 * Check if such security already exists.
2110 * Use "SDH" and hash -> to get the offset in "SDS".
2112 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2113 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2119 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2120 err = ntfs_read_run_nb(sbi, &ni->file.run,
2121 le64_to_cpu(e->sec_hdr.off),
2122 d_security, new_sec_size, NULL);
2126 if (le32_to_cpu(d_security->size) == new_sec_size &&
2127 d_security->key.hash == hash_key.hash &&
2128 !memcmp(d_security + 1, sd, size_sd)) {
2129 *security_id = d_security->key.sec_id;
2130 /* Such security already exists. */
2136 err = indx_find_sort(indx_sdh, ni, root_sdh,
2137 (struct NTFS_DE **)&e, fnd_sdh);
2141 if (!e || e->key.hash != hash_key.hash)
2145 /* Zero unused space. */
2146 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2147 left = SecurityDescriptorsBlockSize - next;
2149 /* Zero gap until SecurityDescriptorsBlockSize. */
2150 if (left < new_sec_size) {
2151 /* Zero "left" bytes from sbi->security.next_off. */
2152 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2155 /* Zero tail of previous security. */
2156 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2160 * 0x40438 == ni->vfs_inode.i_size
2161 * 0x00440 == sbi->security.next_off
2162 * need to zero [0x438-0x440)
2163 * if (next > used) {
2164 * u32 tozero = next - used;
2165 * zero "tozero" bytes from sbi->security.next_off - tozero
2168 /* Format new security descriptor. */
2169 d_security->key.hash = hash_key.hash;
2170 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2171 d_security->off = cpu_to_le64(sbi->security.next_off);
2172 d_security->size = cpu_to_le32(new_sec_size);
2173 memcpy(d_security + 1, sd, size_sd);
2175 /* Write main SDS bucket. */
2176 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2177 d_security, aligned_sec_size);
2182 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2183 new_sds_size = mirr_off + aligned_sec_size;
2185 if (new_sds_size > ni->vfs_inode.i_size) {
2186 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2187 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2188 new_sds_size, &new_sds_size, false, NULL);
2193 /* Write copy SDS bucket. */
2194 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2199 /* Fill SII entry. */
2200 sii_e.de.view.data_off =
2201 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2202 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2203 sii_e.de.view.res = 0;
2204 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2205 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2208 sii_e.sec_id = d_security->key.sec_id;
2209 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2211 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2215 /* Fill SDH entry. */
2216 sdh_e.de.view.data_off =
2217 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2218 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2219 sdh_e.de.view.res = 0;
2220 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2221 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2224 sdh_e.key.hash = d_security->key.hash;
2225 sdh_e.key.sec_id = d_security->key.sec_id;
2226 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2227 sdh_e.magic[0] = cpu_to_le16('I');
2228 sdh_e.magic[1] = cpu_to_le16('I');
2231 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2236 *security_id = d_security->key.sec_id;
2240 /* Update Id and offset for next descriptor. */
2241 sbi->security.next_id += 1;
2242 sbi->security.next_off += aligned_sec_size;
2246 mark_inode_dirty(&ni->vfs_inode);
2254 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2256 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2259 struct ntfs_inode *ni = sbi->reparse.ni;
2260 struct ntfs_index *indx = &sbi->reparse.index_r;
2261 struct ATTRIB *attr;
2262 struct ATTR_LIST_ENTRY *le;
2263 const struct INDEX_ROOT *root_r;
2269 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2270 ARRAY_SIZE(SR_NAME), NULL, NULL);
2276 root_r = resident_data(attr);
2277 if (root_r->type != ATTR_ZERO ||
2278 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2283 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2292 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2294 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2297 struct ntfs_inode *ni = sbi->objid.ni;
2298 struct ntfs_index *indx = &sbi->objid.index_o;
2299 struct ATTRIB *attr;
2300 struct ATTR_LIST_ENTRY *le;
2301 const struct INDEX_ROOT *root;
2307 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2308 ARRAY_SIZE(SO_NAME), NULL, NULL);
2314 root = resident_data(attr);
2315 if (root->type != ATTR_ZERO ||
2316 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2321 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2329 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2332 struct ntfs_inode *ni = sbi->objid.ni;
2333 struct ntfs_index *indx = &sbi->objid.index_o;
2338 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2340 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2342 mark_inode_dirty(&ni->vfs_inode);
2348 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2349 const struct MFT_REF *ref)
2352 struct ntfs_inode *ni = sbi->reparse.ni;
2353 struct ntfs_index *indx = &sbi->reparse.index_r;
2354 struct NTFS_DE_R re;
2359 memset(&re, 0, sizeof(re));
2361 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2362 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2363 re.de.key_size = cpu_to_le16(sizeof(re.key));
2365 re.key.ReparseTag = rtag;
2366 memcpy(&re.key.ref, ref, sizeof(*ref));
2368 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2370 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2372 mark_inode_dirty(&ni->vfs_inode);
2378 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2379 const struct MFT_REF *ref)
2382 struct ntfs_inode *ni = sbi->reparse.ni;
2383 struct ntfs_index *indx = &sbi->reparse.index_r;
2384 struct ntfs_fnd *fnd = NULL;
2385 struct REPARSE_KEY rkey;
2386 struct NTFS_DE_R *re;
2387 struct INDEX_ROOT *root_r;
2392 rkey.ReparseTag = rtag;
2395 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2398 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2408 root_r = indx_get_root(indx, ni, NULL, NULL);
2414 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2415 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2416 (struct NTFS_DE **)&re, fnd);
2420 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2421 /* Impossible. Looks like volume corrupt? */
2425 memcpy(&rkey, &re->key, sizeof(rkey));
2430 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2438 mark_inode_dirty(&ni->vfs_inode);
2444 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2447 ntfs_unmap_meta(sbi->sb, lcn, len);
2448 ntfs_discard(sbi, lcn, len);
2451 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2454 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2456 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2457 if (!wnd_is_used(wnd, lcn, len)) {
2458 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2462 for (i = lcn; i < end; i++) {
2463 if (wnd_is_used(wnd, i, 1)) {
2474 ntfs_unmap_and_discard(sbi, lcn, len);
2476 wnd_set_free(wnd, lcn, len);
2485 ntfs_unmap_and_discard(sbi, lcn, len);
2486 wnd_set_free(wnd, lcn, len);
2489 up_write(&wnd->rw_lock);
2493 * run_deallocate - Deallocate clusters.
2495 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2500 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2501 if (lcn == SPARSE_LCN)
2504 mark_as_free_ex(sbi, lcn, len, trim);