1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
11 #include <linux/kernel.h>
18 const struct cpu_str NAME_MFT = {
19 4, 0, { '$', 'M', 'F', 'T' },
21 const struct cpu_str NAME_MIRROR = {
22 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 const struct cpu_str NAME_LOGFILE = {
25 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 const struct cpu_str NAME_VOLUME = {
28 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 const struct cpu_str NAME_ATTRDEF = {
31 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 const struct cpu_str NAME_ROOT = {
36 const struct cpu_str NAME_BITMAP = {
37 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 const struct cpu_str NAME_BOOT = {
40 5, 0, { '$', 'B', 'o', 'o', 't' },
42 const struct cpu_str NAME_BADCLUS = {
43 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 const struct cpu_str NAME_QUOTA = {
46 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 const struct cpu_str NAME_SECURE = {
49 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 const struct cpu_str NAME_UPCASE = {
52 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 const struct cpu_str NAME_EXTEND = {
55 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 const struct cpu_str NAME_OBJID = {
58 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 const struct cpu_str NAME_REPARSE = {
61 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 const struct cpu_str NAME_USNJRNL = {
64 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 const __le16 BAD_NAME[4] = {
67 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 const __le16 I30_NAME[4] = {
70 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 const __le16 SII_NAME[4] = {
73 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 const __le16 SDH_NAME[4] = {
76 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 const __le16 SDS_NAME[4] = {
79 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 const __le16 SO_NAME[2] = {
82 cpu_to_le16('$'), cpu_to_le16('O'),
84 const __le16 SQ_NAME[2] = {
85 cpu_to_le16('$'), cpu_to_le16('Q'),
87 const __le16 SR_NAME[2] = {
88 cpu_to_le16('$'), cpu_to_le16('R'),
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
104 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
106 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
110 u16 fo = le16_to_cpu(rhdr->fix_off);
111 u16 fn = le16_to_cpu(rhdr->fix_num);
113 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
114 fn * SECTOR_SIZE > bytes) {
118 /* Get fixup pointer. */
119 fixup = Add2Ptr(rhdr, fo);
121 if (*fixup >= 0x7FFF)
128 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
133 ptr += SECTOR_SIZE / sizeof(short);
139 * ntfs_fix_post_read - Remove fixups after reading from disk.
141 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
143 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
150 fo = le16_to_cpu(rhdr->fix_off);
151 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
152 : le16_to_cpu(rhdr->fix_num);
155 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
156 fn * SECTOR_SIZE > bytes) {
157 return -EINVAL; /* Native chkntfs returns ok! */
160 /* Get fixup pointer. */
161 fixup = Add2Ptr(rhdr, fo);
163 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
167 /* Test current word. */
168 if (*ptr != sample) {
169 /* Fixup does not match! Is it serious error? */
175 ptr += SECTOR_SIZE / sizeof(short);
182 * ntfs_extend_init - Load $Extend file.
184 int ntfs_extend_init(struct ntfs_sb_info *sbi)
187 struct super_block *sb = sbi->sb;
188 struct inode *inode, *inode2;
191 if (sbi->volume.major_ver < 3) {
192 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
196 ref.low = cpu_to_le32(MFT_REC_EXTEND);
198 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
199 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
201 err = PTR_ERR(inode);
202 ntfs_err(sb, "Failed to load $Extend.");
207 /* If ntfs_iget5() reads from disk it never returns bad inode. */
208 if (!S_ISDIR(inode->i_mode)) {
213 /* Try to find $ObjId */
214 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
215 if (inode2 && !IS_ERR(inode2)) {
216 if (is_bad_inode(inode2)) {
219 sbi->objid.ni = ntfs_i(inode2);
220 sbi->objid_no = inode2->i_ino;
224 /* Try to find $Quota */
225 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
226 if (inode2 && !IS_ERR(inode2)) {
227 sbi->quota_no = inode2->i_ino;
231 /* Try to find $Reparse */
232 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
233 if (inode2 && !IS_ERR(inode2)) {
234 sbi->reparse.ni = ntfs_i(inode2);
235 sbi->reparse_no = inode2->i_ino;
238 /* Try to find $UsnJrnl */
239 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
240 if (inode2 && !IS_ERR(inode2)) {
241 sbi->usn_jrnl_no = inode2->i_ino;
251 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
254 struct super_block *sb = sbi->sb;
255 bool initialized = false;
260 if (ni->vfs_inode.i_size >= 0x100000000ull) {
261 ntfs_err(sb, "\x24LogFile is too big");
266 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
268 ref.low = cpu_to_le32(MFT_REC_MFT);
270 ref.seq = cpu_to_le16(1);
272 inode = ntfs_iget5(sb, &ref, NULL);
278 /* Try to use MFT copy. */
279 u64 t64 = sbi->mft.lbo;
281 sbi->mft.lbo = sbi->mft.lbo2;
282 inode = ntfs_iget5(sb, &ref, NULL);
290 ntfs_err(sb, "Failed to load $MFT.");
294 sbi->mft.ni = ntfs_i(inode);
296 /* LogFile should not contains attribute list. */
297 err = ni_load_all_mi(sbi->mft.ni);
299 err = log_replay(ni, &initialized);
304 sync_blockdev(sb->s_bdev);
305 invalidate_bdev(sb->s_bdev);
307 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
312 if (sb_rdonly(sb) || !initialized)
315 /* Fill LogFile by '-1' if it is initialized. */
316 err = ntfs_bio_fill_1(sbi, &ni->file.run);
319 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
327 * Return: Current ATTR_DEF_ENTRY for given attribute type.
329 const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
332 int type_in = le32_to_cpu(type);
334 size_t max_idx = sbi->def_entries - 1;
336 while (min_idx <= max_idx) {
337 size_t i = min_idx + ((max_idx - min_idx) >> 1);
338 const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
339 int diff = le32_to_cpu(entry->type) - type_in;
354 * ntfs_look_for_free_space - Look for a free space in bitmap.
356 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
357 CLST *new_lcn, CLST *new_len,
358 enum ALLOCATE_OPT opt)
362 struct super_block *sb = sbi->sb;
363 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
364 struct wnd_bitmap *wnd = &sbi->used.bitmap;
366 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
367 if (opt & ALLOCATE_MFT) {
368 zlen = wnd_zone_len(wnd);
371 err = ntfs_refresh_zone(sbi);
375 zlen = wnd_zone_len(wnd);
379 ntfs_err(sbi->sb, "no free space to extend mft");
384 lcn = wnd_zone_bit(wnd);
385 alen = min_t(CLST, len, zlen);
387 wnd_zone_set(wnd, lcn + alen, zlen - alen);
389 err = wnd_set_used(wnd, lcn, alen);
397 * 'Cause cluster 0 is always used this value means that we should use
398 * cached value of 'next_free_lcn' to improve performance.
401 lcn = sbi->used.next_free_lcn;
403 if (lcn >= wnd->nbits)
406 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
410 /* Try to use clusters from MftZone. */
411 zlen = wnd_zone_len(wnd);
412 zeroes = wnd_zeroes(wnd);
414 /* Check too big request */
415 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
420 /* How many clusters to cat from zone. */
421 zlcn = wnd_zone_bit(wnd);
423 ztrim = clamp_val(len, zlen2, zlen);
424 new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
426 wnd_zone_set(wnd, zlcn, new_zlen);
428 /* Allocate continues clusters. */
429 alen = wnd_find(wnd, len, 0,
430 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
441 ntfs_unmap_meta(sb, alcn, alen);
443 /* Set hint for next requests. */
444 if (!(opt & ALLOCATE_MFT))
445 sbi->used.next_free_lcn = alcn + alen;
447 up_write(&wnd->rw_lock);
452 * ntfs_extend_mft - Allocate additional MFT records.
454 * sbi->mft.bitmap is locked for write.
457 * ntfs_look_free_mft ->
460 * ni_insert_nonresident ->
463 * ntfs_look_free_mft ->
466 * To avoid recursive always allocate space for two new MFT records
467 * see attrib.c: "at least two MFT to avoid recursive loop".
469 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
472 struct ntfs_inode *ni = sbi->mft.ni;
473 size_t new_mft_total;
474 u64 new_mft_bytes, new_bitmap_bytes;
476 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
478 new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
479 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
481 /* Step 1: Resize $MFT::DATA. */
482 down_write(&ni->file.run_lock);
483 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
484 new_mft_bytes, NULL, false, &attr);
487 up_write(&ni->file.run_lock);
491 attr->nres.valid_size = attr->nres.data_size;
492 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
495 /* Step 2: Resize $MFT::BITMAP. */
496 new_bitmap_bytes = bitmap_size(new_mft_total);
498 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
499 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
501 /* Refresh MFT Zone if necessary. */
502 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
504 ntfs_refresh_zone(sbi);
506 up_write(&sbi->used.bitmap.rw_lock);
507 up_write(&ni->file.run_lock);
512 err = wnd_extend(wnd, new_mft_total);
517 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
519 err = _ni_write_inode(&ni->vfs_inode, 0);
525 * ntfs_look_free_mft - Look for a free MFT record.
527 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
528 struct ntfs_inode *ni, struct mft_inode **mi)
531 size_t zbit, zlen, from, to, fr;
534 struct super_block *sb = sbi->sb;
535 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
538 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
539 MFT_REC_FREE - MFT_REC_RESERVED);
542 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
544 zlen = wnd_zone_len(wnd);
546 /* Always reserve space for MFT. */
549 zbit = wnd_zone_bit(wnd);
551 wnd_zone_set(wnd, zbit + 1, zlen - 1);
556 /* No MFT zone. Find the nearest to '0' free MFT. */
557 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
559 mft_total = wnd->nbits;
561 err = ntfs_extend_mft(sbi);
567 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
573 * Look for free record reserved area [11-16) ==
574 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
577 if (!sbi->mft.reserved_bitmap) {
578 /* Once per session create internal bitmap for 5 bits. */
579 sbi->mft.reserved_bitmap = 0xFF;
582 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
584 struct ntfs_inode *ni;
585 struct MFT_REC *mrec;
587 ref.low = cpu_to_le32(ir);
588 ref.seq = cpu_to_le16(ir);
590 i = ntfs_iget5(sb, &ref, NULL);
595 "Invalid reserved record %x",
599 if (is_bad_inode(i)) {
608 if (!is_rec_base(mrec))
611 if (mrec->hard_links)
617 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
618 NULL, 0, NULL, NULL))
621 __clear_bit(ir - MFT_REC_RESERVED,
622 &sbi->mft.reserved_bitmap);
626 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
627 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
628 MFT_REC_FREE, MFT_REC_RESERVED);
629 if (zbit >= MFT_REC_FREE) {
630 sbi->mft.next_reserved = MFT_REC_FREE;
635 sbi->mft.next_reserved = zbit;
638 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
639 if (zbit + zlen > wnd->nbits)
640 zlen = wnd->nbits - zbit;
642 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
645 /* [zbit, zbit + zlen) will be used for MFT itself. */
646 from = sbi->mft.used;
651 ntfs_clear_mft_tail(sbi, from, to);
662 wnd_zone_set(wnd, zbit, zlen);
666 /* The request to get record for general purpose. */
667 if (sbi->mft.next_free < MFT_REC_USER)
668 sbi->mft.next_free = MFT_REC_USER;
671 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
672 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
673 sbi->mft.next_free = sbi->mft.bitmap.nbits;
676 sbi->mft.next_free = *rno + 1;
680 err = ntfs_extend_mft(sbi);
686 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
691 /* We have found a record that are not reserved for next MFT. */
692 if (*rno >= MFT_REC_FREE)
693 wnd_set_used(wnd, *rno, 1);
694 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
695 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
699 up_write(&wnd->rw_lock);
705 * ntfs_mark_rec_free - Mark record as free.
707 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
709 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
711 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
712 if (rno >= wnd->nbits)
715 if (rno >= MFT_REC_FREE) {
716 if (!wnd_is_used(wnd, rno, 1))
717 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
719 wnd_set_free(wnd, rno, 1);
720 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
721 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
724 if (rno < wnd_zone_bit(wnd))
725 wnd_zone_set(wnd, rno, 1);
726 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
727 sbi->mft.next_free = rno;
730 up_write(&wnd->rw_lock);
734 * ntfs_clear_mft_tail - Format empty records [from, to).
736 * sbi->mft.bitmap is locked for write.
738 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
743 struct runs_tree *run;
744 struct ntfs_inode *ni;
749 rs = sbi->record_size;
753 down_read(&ni->file.run_lock);
754 vbo = (u64)from * rs;
755 for (; from < to; from++, vbo += rs) {
756 struct ntfs_buffers nb;
758 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
762 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
769 sbi->mft.used = from;
770 up_read(&ni->file.run_lock);
775 * ntfs_refresh_zone - Refresh MFT zone.
777 * sbi->used.bitmap is locked for rw.
778 * sbi->mft.bitmap is locked for write.
779 * sbi->mft.ni->file.run_lock for write.
781 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
783 CLST zone_limit, zone_max, lcn, vcn, len;
785 struct wnd_bitmap *wnd = &sbi->used.bitmap;
786 struct ntfs_inode *ni = sbi->mft.ni;
788 /* Do not change anything unless we have non empty MFT zone. */
789 if (wnd_zone_len(wnd))
793 * Compute the MFT zone at two steps.
794 * It would be nice if we are able to allocate 1/8 of
795 * total clusters for MFT but not more then 512 MB.
797 zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
798 zone_max = wnd->nbits >> 3;
799 if (zone_max > zone_limit)
800 zone_max = zone_limit;
802 vcn = bytes_to_cluster(sbi,
803 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
805 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
808 /* We should always find Last Lcn for MFT. */
809 if (lcn == SPARSE_LCN)
814 /* Try to allocate clusters after last MFT run. */
815 zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
817 ntfs_notice(sbi->sb, "MftZone: unavailable");
821 /* Truncate too large zone. */
822 wnd_zone_set(wnd, lcn_s, zlen);
828 * ntfs_update_mftmirr - Update $MFTMirr data.
830 int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
833 struct super_block *sb = sbi->sb;
834 u32 blocksize = sb->s_blocksize;
835 sector_t block1, block2;
838 if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
842 bytes = sbi->mft.recs_mirr << sbi->record_bits;
843 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
844 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
846 for (; bytes >= blocksize; bytes -= blocksize) {
847 struct buffer_head *bh1, *bh2;
849 bh1 = sb_bread(sb, block1++);
855 bh2 = sb_getblk(sb, block2++);
862 if (buffer_locked(bh2))
863 __wait_on_buffer(bh2);
866 memcpy(bh2->b_data, bh1->b_data, blocksize);
867 set_buffer_uptodate(bh2);
868 mark_buffer_dirty(bh2);
875 err = sync_dirty_buffer(bh2);
882 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
891 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
892 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
893 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
895 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
899 struct VOLUME_INFO *info;
900 struct mft_inode *mi;
901 struct ntfs_inode *ni;
904 * Do not change state if fs was real_dirty.
905 * Do not change state if fs already dirty(clear).
906 * Do not change any thing if mounted read only.
908 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
911 /* Check cached value. */
912 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
913 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
920 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
922 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
928 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
935 case NTFS_DIRTY_ERROR:
936 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
937 sbi->volume.real_dirty = true;
939 case NTFS_DIRTY_DIRTY:
940 info->flags |= VOLUME_FLAG_DIRTY;
942 case NTFS_DIRTY_CLEAR:
943 info->flags &= ~VOLUME_FLAG_DIRTY;
946 /* Cache current volume flags. */
947 sbi->volume.flags = info->flags;
956 mark_inode_dirty(&ni->vfs_inode);
957 /* verify(!ntfs_update_mftmirr()); */
960 * If we used wait=1, sync_inode_metadata waits for the io for the
961 * inode to finish. It hangs when media is removed.
962 * So wait=0 is sent down to sync_inode_metadata
963 * and filemap_fdatawrite is used for the data blocks.
965 err = sync_inode_metadata(&ni->vfs_inode, 0);
967 err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
973 * security_hash - Calculates a hash of security descriptor.
975 static inline __le32 security_hash(const void *sd, size_t bytes)
978 const __le32 *ptr = sd;
982 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
983 return cpu_to_le32(hash);
986 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
988 struct block_device *bdev = sb->s_bdev;
989 u32 blocksize = sb->s_blocksize;
990 u64 block = lbo >> sb->s_blocksize_bits;
991 u32 off = lbo & (blocksize - 1);
992 u32 op = blocksize - off;
994 for (; bytes; block += 1, off = 0, op = blocksize) {
995 struct buffer_head *bh = __bread(bdev, block, blocksize);
1003 memcpy(buffer, bh->b_data + off, op);
1008 buffer = Add2Ptr(buffer, op);
1014 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1015 const void *buf, int wait)
1017 u32 blocksize = sb->s_blocksize;
1018 struct block_device *bdev = sb->s_bdev;
1019 sector_t block = lbo >> sb->s_blocksize_bits;
1020 u32 off = lbo & (blocksize - 1);
1021 u32 op = blocksize - off;
1022 struct buffer_head *bh;
1024 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1027 for (; bytes; block += 1, off = 0, op = blocksize) {
1031 if (op < blocksize) {
1032 bh = __bread(bdev, block, blocksize);
1034 ntfs_err(sb, "failed to read block %llx",
1039 bh = __getblk(bdev, block, blocksize);
1044 if (buffer_locked(bh))
1045 __wait_on_buffer(bh);
1049 memcpy(bh->b_data + off, buf, op);
1050 buf = Add2Ptr(buf, op);
1052 memset(bh->b_data + off, -1, op);
1055 set_buffer_uptodate(bh);
1056 mark_buffer_dirty(bh);
1060 int err = sync_dirty_buffer(bh);
1065 "failed to sync buffer at block %llx, error %d",
1079 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1080 u64 vbo, const void *buf, size_t bytes, int sync)
1082 struct super_block *sb = sbi->sb;
1083 u8 cluster_bits = sbi->cluster_bits;
1084 u32 off = vbo & sbi->cluster_mask;
1085 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1089 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1092 if (lcn == SPARSE_LCN)
1095 lbo = ((u64)lcn << cluster_bits) + off;
1096 len = ((u64)clen << cluster_bits) - off;
1099 u32 op = min_t(u64, len, bytes);
1100 int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1109 vcn_next = vcn + clen;
1110 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1114 if (lcn == SPARSE_LCN)
1118 buf = Add2Ptr(buf, op);
1120 lbo = ((u64)lcn << cluster_bits);
1121 len = ((u64)clen << cluster_bits);
1127 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1128 const struct runs_tree *run, u64 vbo)
1130 struct super_block *sb = sbi->sb;
1131 u8 cluster_bits = sbi->cluster_bits;
1135 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1136 return ERR_PTR(-ENOENT);
1138 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1140 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1143 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1144 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1147 struct super_block *sb = sbi->sb;
1148 u32 blocksize = sb->s_blocksize;
1149 u8 cluster_bits = sbi->cluster_bits;
1150 u32 off = vbo & sbi->cluster_mask;
1152 CLST vcn_next, vcn = vbo >> cluster_bits;
1156 struct buffer_head *bh;
1159 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1160 if (vbo > MFT_REC_VOL * sbi->record_size) {
1165 /* Use absolute boot's 'MFTCluster' to read record. */
1166 lbo = vbo + sbi->mft.lbo;
1167 len = sbi->record_size;
1168 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1172 if (lcn == SPARSE_LCN) {
1177 lbo = ((u64)lcn << cluster_bits) + off;
1178 len = ((u64)clen << cluster_bits) - off;
1181 off = lbo & (blocksize - 1);
1188 u32 len32 = len >= bytes ? bytes : len;
1189 sector_t block = lbo >> sb->s_blocksize_bits;
1192 u32 op = blocksize - off;
1197 bh = ntfs_bread(sb, block);
1204 memcpy(buf, bh->b_data + off, op);
1205 buf = Add2Ptr(buf, op);
1210 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1227 vcn_next = vcn + clen;
1228 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1234 if (lcn == SPARSE_LCN) {
1239 lbo = ((u64)lcn << cluster_bits);
1240 len = ((u64)clen << cluster_bits);
1248 put_bh(nb->bh[--nbh]);
1259 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1261 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1262 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1263 struct ntfs_buffers *nb)
1265 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1269 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1272 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1273 u32 bytes, struct ntfs_buffers *nb)
1276 struct super_block *sb = sbi->sb;
1277 u32 blocksize = sb->s_blocksize;
1278 u8 cluster_bits = sbi->cluster_bits;
1279 CLST vcn_next, vcn = vbo >> cluster_bits;
1288 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1293 off = vbo & sbi->cluster_mask;
1294 lbo = ((u64)lcn << cluster_bits) + off;
1295 len = ((u64)clen << cluster_bits) - off;
1297 nb->off = off = lbo & (blocksize - 1);
1300 u32 len32 = min_t(u64, len, bytes);
1301 sector_t block = lbo >> sb->s_blocksize_bits;
1305 struct buffer_head *bh;
1307 if (nbh >= ARRAY_SIZE(nb->bh)) {
1312 op = blocksize - off;
1316 if (op == blocksize) {
1317 bh = sb_getblk(sb, block);
1322 if (buffer_locked(bh))
1323 __wait_on_buffer(bh);
1324 set_buffer_uptodate(bh);
1326 bh = ntfs_bread(sb, block);
1345 vcn_next = vcn + clen;
1346 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1352 lbo = ((u64)lcn << cluster_bits);
1353 len = ((u64)clen << cluster_bits);
1358 put_bh(nb->bh[--nbh]);
1367 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1368 struct ntfs_buffers *nb, int sync)
1371 struct super_block *sb = sbi->sb;
1372 u32 block_size = sb->s_blocksize;
1373 u32 bytes = nb->bytes;
1375 u16 fo = le16_to_cpu(rhdr->fix_off);
1376 u16 fn = le16_to_cpu(rhdr->fix_num);
1381 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1382 fn * SECTOR_SIZE > bytes) {
1386 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1387 u32 op = block_size - off;
1389 struct buffer_head *bh = nb->bh[idx];
1390 __le16 *ptr, *end_data;
1395 if (buffer_locked(bh))
1396 __wait_on_buffer(bh);
1398 lock_buffer(nb->bh[idx]);
1400 bh_data = bh->b_data + off;
1401 end_data = Add2Ptr(bh_data, op);
1402 memcpy(bh_data, rhdr, op);
1407 fixup = Add2Ptr(bh_data, fo);
1409 t16 = le16_to_cpu(sample);
1410 if (t16 >= 0x7FFF) {
1411 sample = *fixup = cpu_to_le16(1);
1413 sample = cpu_to_le16(t16 + 1);
1417 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1420 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1425 ptr += SECTOR_SIZE / sizeof(short);
1426 } while (ptr < end_data);
1428 set_buffer_uptodate(bh);
1429 mark_buffer_dirty(bh);
1433 int err2 = sync_dirty_buffer(bh);
1440 rhdr = Add2Ptr(rhdr, op);
1446 static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
1448 struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1450 if (!bio && (current->flags & PF_MEMALLOC)) {
1451 while (!bio && (nr_vecs /= 2))
1452 bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1458 * ntfs_bio_pages - Read/write pages from/to disk.
1460 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1461 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1465 struct bio *new, *bio = NULL;
1466 struct super_block *sb = sbi->sb;
1467 struct block_device *bdev = sb->s_bdev;
1469 u8 cluster_bits = sbi->cluster_bits;
1470 CLST lcn, clen, vcn, vcn_next;
1471 u32 add, off, page_idx;
1474 struct blk_plug plug;
1479 blk_start_plug(&plug);
1481 /* Align vbo and bytes to be 512 bytes aligned. */
1482 lbo = (vbo + bytes + 511) & ~511ull;
1483 vbo = vbo & ~511ull;
1486 vcn = vbo >> cluster_bits;
1487 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1491 off = vbo & sbi->cluster_mask;
1496 lbo = ((u64)lcn << cluster_bits) + off;
1497 len = ((u64)clen << cluster_bits) - off;
1499 new = ntfs_alloc_bio(nr_pages - page_idx);
1505 bio_chain(bio, new);
1509 bio_set_dev(bio, bdev);
1510 bio->bi_iter.bi_sector = lbo >> 9;
1514 off = vbo & (PAGE_SIZE - 1);
1515 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1517 if (bio_add_page(bio, page, add, off) < add)
1525 if (add + off == PAGE_SIZE) {
1527 if (WARN_ON(page_idx >= nr_pages)) {
1531 page = pages[page_idx];
1540 vcn_next = vcn + clen;
1541 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1551 err = submit_bio_wait(bio);
1554 blk_finish_plug(&plug);
1560 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1562 * Fill on-disk logfile range by (-1)
1563 * this means empty logfile.
1565 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1568 struct super_block *sb = sbi->sb;
1569 struct block_device *bdev = sb->s_bdev;
1570 u8 cluster_bits = sbi->cluster_bits;
1571 struct bio *new, *bio = NULL;
1577 struct blk_plug plug;
1579 fill = alloc_page(GFP_KERNEL);
1583 kaddr = kmap_atomic(fill);
1584 memset(kaddr, -1, PAGE_SIZE);
1585 kunmap_atomic(kaddr);
1586 flush_dcache_page(fill);
1589 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1595 * TODO: Try blkdev_issue_write_same.
1597 blk_start_plug(&plug);
1599 lbo = (u64)lcn << cluster_bits;
1600 len = (u64)clen << cluster_bits;
1602 new = ntfs_alloc_bio(BIO_MAX_VECS);
1608 bio_chain(bio, new);
1612 bio_set_dev(bio, bdev);
1613 bio->bi_opf = REQ_OP_WRITE;
1614 bio->bi_iter.bi_sector = lbo >> 9;
1617 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1619 if (bio_add_page(bio, fill, add, 0) < add)
1627 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1631 err = submit_bio_wait(bio);
1634 blk_finish_plug(&plug);
1642 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1643 u64 vbo, u64 *lbo, u64 *bytes)
1647 u8 cluster_bits = sbi->cluster_bits;
1649 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1652 off = vbo & sbi->cluster_mask;
1653 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1654 *bytes = ((u64)len << cluster_bits) - off;
1659 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1662 struct super_block *sb = sbi->sb;
1663 struct inode *inode = new_inode(sb);
1664 struct ntfs_inode *ni;
1667 return ERR_PTR(-ENOMEM);
1671 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1677 if (insert_inode_locked(inode) < 0) {
1691 * O:BAG:BAD:(A;OICI;FA;;;WD)
1692 * Owner S-1-5-32-544 (Administrators)
1693 * Group S-1-5-32-544 (Administrators)
1694 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1696 const u8 s_default_security[] __aligned(8) = {
1697 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1698 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1699 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1700 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1701 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1702 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1703 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1706 static_assert(sizeof(s_default_security) == 0x50);
1708 static inline u32 sid_length(const struct SID *sid)
1710 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1716 * Thanks Mark Harmstone for idea.
1718 static bool is_acl_valid(const struct ACL *acl, u32 len)
1720 const struct ACE_HEADER *ace;
1722 u16 ace_count, ace_size;
1724 if (acl->AclRevision != ACL_REVISION &&
1725 acl->AclRevision != ACL_REVISION_DS) {
1727 * This value should be ACL_REVISION, unless the ACL contains an
1728 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1729 * All ACEs in an ACL must be at the same revision level.
1737 if (le16_to_cpu(acl->AclSize) > len)
1743 len -= sizeof(struct ACL);
1744 ace = (struct ACE_HEADER *)&acl[1];
1745 ace_count = le16_to_cpu(acl->AceCount);
1747 for (i = 0; i < ace_count; i++) {
1748 if (len < sizeof(struct ACE_HEADER))
1751 ace_size = le16_to_cpu(ace->AceSize);
1756 ace = Add2Ptr(ace, ace_size);
1762 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1764 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1766 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1769 if (sd->Revision != 1)
1775 if (!(sd->Control & SE_SELF_RELATIVE))
1778 sd_owner = le32_to_cpu(sd->Owner);
1780 const struct SID *owner = Add2Ptr(sd, sd_owner);
1782 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1785 if (owner->Revision != 1)
1788 if (sd_owner + sid_length(owner) > len)
1792 sd_group = le32_to_cpu(sd->Group);
1794 const struct SID *group = Add2Ptr(sd, sd_group);
1796 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1799 if (group->Revision != 1)
1802 if (sd_group + sid_length(group) > len)
1806 sd_sacl = le32_to_cpu(sd->Sacl);
1808 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1810 if (sd_sacl + sizeof(struct ACL) > len)
1813 if (!is_acl_valid(sacl, len - sd_sacl))
1817 sd_dacl = le32_to_cpu(sd->Dacl);
1819 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1821 if (sd_dacl + sizeof(struct ACL) > len)
1824 if (!is_acl_valid(dacl, len - sd_dacl))
1832 * ntfs_security_init - Load and parse $Secure.
1834 int ntfs_security_init(struct ntfs_sb_info *sbi)
1837 struct super_block *sb = sbi->sb;
1838 struct inode *inode;
1839 struct ntfs_inode *ni;
1841 struct ATTRIB *attr;
1842 struct ATTR_LIST_ENTRY *le;
1846 struct NTFS_DE_SII *sii_e;
1847 struct ntfs_fnd *fnd_sii = NULL;
1848 const struct INDEX_ROOT *root_sii;
1849 const struct INDEX_ROOT *root_sdh;
1850 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1851 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1853 ref.low = cpu_to_le32(MFT_REC_SECURE);
1855 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1857 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1858 if (IS_ERR(inode)) {
1859 err = PTR_ERR(inode);
1860 ntfs_err(sb, "Failed to load $Secure.");
1869 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1870 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1876 root_sdh = resident_data(attr);
1877 if (root_sdh->type != ATTR_ZERO ||
1878 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
1883 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1887 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1888 ARRAY_SIZE(SII_NAME), NULL, NULL);
1894 root_sii = resident_data(attr);
1895 if (root_sii->type != ATTR_ZERO ||
1896 root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
1901 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1905 fnd_sii = fnd_get();
1911 sds_size = inode->i_size;
1913 /* Find the last valid Id. */
1914 sbi->security.next_id = SECURITY_ID_FIRST;
1915 /* Always write new security at the end of bucket. */
1916 sbi->security.next_off =
1917 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1925 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1929 sii_e = (struct NTFS_DE_SII *)ne;
1930 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1933 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1934 if (next_id >= sbi->security.next_id)
1935 sbi->security.next_id = next_id;
1938 sbi->security.ni = ni;
1948 * ntfs_get_security_by_id - Read security descriptor by id.
1950 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1951 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1956 struct ntfs_inode *ni = sbi->security.ni;
1957 struct ntfs_index *indx = &sbi->security.index_sii;
1959 struct NTFS_DE_SII *sii_e;
1960 struct ntfs_fnd *fnd_sii;
1961 struct SECURITY_HDR d_security;
1962 const struct INDEX_ROOT *root_sii;
1967 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1969 fnd_sii = fnd_get();
1975 root_sii = indx_get_root(indx, ni, NULL, NULL);
1981 /* Try to find this SECURITY descriptor in SII indexes. */
1982 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1983 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1990 t32 = le32_to_cpu(sii_e->sec_hdr.size);
1991 if (t32 < SIZEOF_SECURITY_HDR) {
1996 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
1997 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2002 *size = t32 - SIZEOF_SECURITY_HDR;
2004 p = kmalloc(*size, GFP_NOFS);
2010 err = ntfs_read_run_nb(sbi, &ni->file.run,
2011 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2012 sizeof(d_security), NULL);
2016 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2021 err = ntfs_read_run_nb(sbi, &ni->file.run,
2022 le64_to_cpu(sii_e->sec_hdr.off) +
2023 SIZEOF_SECURITY_HDR,
2040 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2042 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2043 * and it contains a mirror copy of each security descriptor. When writing
2044 * to a security descriptor at location X, another copy will be written at
2045 * location (X+256K).
2046 * When writing a security descriptor that will cross the 256K boundary,
2047 * the pointer will be advanced by 256K to skip
2048 * over the mirror portion.
2050 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2051 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2052 u32 size_sd, __le32 *security_id, bool *inserted)
2055 struct ntfs_inode *ni = sbi->security.ni;
2056 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2057 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2058 struct NTFS_DE_SDH *e;
2059 struct NTFS_DE_SDH sdh_e;
2060 struct NTFS_DE_SII sii_e;
2061 struct SECURITY_HDR *d_security;
2062 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2063 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2064 struct SECURITY_KEY hash_key;
2065 struct ntfs_fnd *fnd_sdh = NULL;
2066 const struct INDEX_ROOT *root_sdh;
2067 const struct INDEX_ROOT *root_sii;
2068 u64 mirr_off, new_sds_size;
2071 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2072 SecurityDescriptorsBlockSize);
2074 hash_key.hash = security_hash(sd, size_sd);
2075 hash_key.sec_id = SECURITY_ID_INVALID;
2079 *security_id = SECURITY_ID_INVALID;
2081 /* Allocate a temporal buffer. */
2082 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2086 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2088 fnd_sdh = fnd_get();
2094 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2100 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2107 * Check if such security already exists.
2108 * Use "SDH" and hash -> to get the offset in "SDS".
2110 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2111 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2117 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2118 err = ntfs_read_run_nb(sbi, &ni->file.run,
2119 le64_to_cpu(e->sec_hdr.off),
2120 d_security, new_sec_size, NULL);
2124 if (le32_to_cpu(d_security->size) == new_sec_size &&
2125 d_security->key.hash == hash_key.hash &&
2126 !memcmp(d_security + 1, sd, size_sd)) {
2127 *security_id = d_security->key.sec_id;
2128 /* Such security already exists. */
2134 err = indx_find_sort(indx_sdh, ni, root_sdh,
2135 (struct NTFS_DE **)&e, fnd_sdh);
2139 if (!e || e->key.hash != hash_key.hash)
2143 /* Zero unused space. */
2144 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2145 left = SecurityDescriptorsBlockSize - next;
2147 /* Zero gap until SecurityDescriptorsBlockSize. */
2148 if (left < new_sec_size) {
2149 /* Zero "left" bytes from sbi->security.next_off. */
2150 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2153 /* Zero tail of previous security. */
2154 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2158 * 0x40438 == ni->vfs_inode.i_size
2159 * 0x00440 == sbi->security.next_off
2160 * need to zero [0x438-0x440)
2161 * if (next > used) {
2162 * u32 tozero = next - used;
2163 * zero "tozero" bytes from sbi->security.next_off - tozero
2166 /* Format new security descriptor. */
2167 d_security->key.hash = hash_key.hash;
2168 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2169 d_security->off = cpu_to_le64(sbi->security.next_off);
2170 d_security->size = cpu_to_le32(new_sec_size);
2171 memcpy(d_security + 1, sd, size_sd);
2173 /* Write main SDS bucket. */
2174 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2175 d_security, aligned_sec_size, 0);
2180 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2181 new_sds_size = mirr_off + aligned_sec_size;
2183 if (new_sds_size > ni->vfs_inode.i_size) {
2184 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2185 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2186 new_sds_size, &new_sds_size, false, NULL);
2191 /* Write copy SDS bucket. */
2192 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2193 aligned_sec_size, 0);
2197 /* Fill SII entry. */
2198 sii_e.de.view.data_off =
2199 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2200 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2201 sii_e.de.view.res = 0;
2202 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2203 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2206 sii_e.sec_id = d_security->key.sec_id;
2207 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2209 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2213 /* Fill SDH entry. */
2214 sdh_e.de.view.data_off =
2215 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2216 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2217 sdh_e.de.view.res = 0;
2218 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2219 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2222 sdh_e.key.hash = d_security->key.hash;
2223 sdh_e.key.sec_id = d_security->key.sec_id;
2224 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2225 sdh_e.magic[0] = cpu_to_le16('I');
2226 sdh_e.magic[1] = cpu_to_le16('I');
2229 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2234 *security_id = d_security->key.sec_id;
2238 /* Update Id and offset for next descriptor. */
2239 sbi->security.next_id += 1;
2240 sbi->security.next_off += aligned_sec_size;
2244 mark_inode_dirty(&ni->vfs_inode);
2252 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2254 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2257 struct ntfs_inode *ni = sbi->reparse.ni;
2258 struct ntfs_index *indx = &sbi->reparse.index_r;
2259 struct ATTRIB *attr;
2260 struct ATTR_LIST_ENTRY *le;
2261 const struct INDEX_ROOT *root_r;
2267 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2268 ARRAY_SIZE(SR_NAME), NULL, NULL);
2274 root_r = resident_data(attr);
2275 if (root_r->type != ATTR_ZERO ||
2276 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2281 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2290 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2292 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2295 struct ntfs_inode *ni = sbi->objid.ni;
2296 struct ntfs_index *indx = &sbi->objid.index_o;
2297 struct ATTRIB *attr;
2298 struct ATTR_LIST_ENTRY *le;
2299 const struct INDEX_ROOT *root;
2305 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2306 ARRAY_SIZE(SO_NAME), NULL, NULL);
2312 root = resident_data(attr);
2313 if (root->type != ATTR_ZERO ||
2314 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2319 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2327 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2330 struct ntfs_inode *ni = sbi->objid.ni;
2331 struct ntfs_index *indx = &sbi->objid.index_o;
2336 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2338 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2340 mark_inode_dirty(&ni->vfs_inode);
2346 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2347 const struct MFT_REF *ref)
2350 struct ntfs_inode *ni = sbi->reparse.ni;
2351 struct ntfs_index *indx = &sbi->reparse.index_r;
2352 struct NTFS_DE_R re;
2357 memset(&re, 0, sizeof(re));
2359 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2360 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2361 re.de.key_size = cpu_to_le16(sizeof(re.key));
2363 re.key.ReparseTag = rtag;
2364 memcpy(&re.key.ref, ref, sizeof(*ref));
2366 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2368 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2370 mark_inode_dirty(&ni->vfs_inode);
2376 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2377 const struct MFT_REF *ref)
2380 struct ntfs_inode *ni = sbi->reparse.ni;
2381 struct ntfs_index *indx = &sbi->reparse.index_r;
2382 struct ntfs_fnd *fnd = NULL;
2383 struct REPARSE_KEY rkey;
2384 struct NTFS_DE_R *re;
2385 struct INDEX_ROOT *root_r;
2390 rkey.ReparseTag = rtag;
2393 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2396 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2406 root_r = indx_get_root(indx, ni, NULL, NULL);
2412 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2413 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2414 (struct NTFS_DE **)&re, fnd);
2418 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2419 /* Impossible. Looks like volume corrupt? */
2423 memcpy(&rkey, &re->key, sizeof(rkey));
2428 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2436 mark_inode_dirty(&ni->vfs_inode);
2442 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2445 ntfs_unmap_meta(sbi->sb, lcn, len);
2446 ntfs_discard(sbi, lcn, len);
2449 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2452 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2454 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2455 if (!wnd_is_used(wnd, lcn, len)) {
2456 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2460 for (i = lcn; i < end; i++) {
2461 if (wnd_is_used(wnd, i, 1)) {
2472 ntfs_unmap_and_discard(sbi, lcn, len);
2474 wnd_set_free(wnd, lcn, len);
2483 ntfs_unmap_and_discard(sbi, lcn, len);
2484 wnd_set_free(wnd, lcn, len);
2487 up_write(&wnd->rw_lock);
2491 * run_deallocate - Deallocate clusters.
2493 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2498 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2499 if (lcn == SPARSE_LCN)
2502 mark_as_free_ex(sbi, lcn, len, trim);