1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
11 #include <linux/nls.h>
18 const struct cpu_str NAME_MFT = {
19 4, 0, { '$', 'M', 'F', 'T' },
21 const struct cpu_str NAME_MIRROR = {
22 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 const struct cpu_str NAME_LOGFILE = {
25 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 const struct cpu_str NAME_VOLUME = {
28 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 const struct cpu_str NAME_ATTRDEF = {
31 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 const struct cpu_str NAME_ROOT = {
36 const struct cpu_str NAME_BITMAP = {
37 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 const struct cpu_str NAME_BOOT = {
40 5, 0, { '$', 'B', 'o', 'o', 't' },
42 const struct cpu_str NAME_BADCLUS = {
43 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 const struct cpu_str NAME_QUOTA = {
46 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 const struct cpu_str NAME_SECURE = {
49 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 const struct cpu_str NAME_UPCASE = {
52 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 const struct cpu_str NAME_EXTEND = {
55 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 const struct cpu_str NAME_OBJID = {
58 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 const struct cpu_str NAME_REPARSE = {
61 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 const struct cpu_str NAME_USNJRNL = {
64 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 const __le16 BAD_NAME[4] = {
67 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 const __le16 I30_NAME[4] = {
70 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 const __le16 SII_NAME[4] = {
73 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 const __le16 SDH_NAME[4] = {
76 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 const __le16 SDS_NAME[4] = {
79 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 const __le16 SO_NAME[2] = {
82 cpu_to_le16('$'), cpu_to_le16('O'),
84 const __le16 SQ_NAME[2] = {
85 cpu_to_le16('$'), cpu_to_le16('Q'),
87 const __le16 SR_NAME[2] = {
88 cpu_to_le16('$'), cpu_to_le16('R'),
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
104 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
106 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
110 u16 fo = le16_to_cpu(rhdr->fix_off);
111 u16 fn = le16_to_cpu(rhdr->fix_num);
113 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
114 fn * SECTOR_SIZE > bytes) {
118 /* Get fixup pointer. */
119 fixup = Add2Ptr(rhdr, fo);
121 if (*fixup >= 0x7FFF)
128 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
133 ptr += SECTOR_SIZE / sizeof(short);
139 * ntfs_fix_post_read - Remove fixups after reading from disk.
141 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
143 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
150 fo = le16_to_cpu(rhdr->fix_off);
151 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
152 : le16_to_cpu(rhdr->fix_num);
155 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
156 fn * SECTOR_SIZE > bytes) {
157 return -EINVAL; /* Native chkntfs returns ok! */
160 /* Get fixup pointer. */
161 fixup = Add2Ptr(rhdr, fo);
163 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
167 /* Test current word. */
168 if (*ptr != sample) {
169 /* Fixup does not match! Is it serious error? */
175 ptr += SECTOR_SIZE / sizeof(short);
182 * ntfs_extend_init - Load $Extend file.
184 int ntfs_extend_init(struct ntfs_sb_info *sbi)
187 struct super_block *sb = sbi->sb;
188 struct inode *inode, *inode2;
191 if (sbi->volume.major_ver < 3) {
192 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
196 ref.low = cpu_to_le32(MFT_REC_EXTEND);
198 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
199 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
201 err = PTR_ERR(inode);
202 ntfs_err(sb, "Failed to load $Extend.");
207 /* If ntfs_iget5() reads from disk it never returns bad inode. */
208 if (!S_ISDIR(inode->i_mode)) {
213 /* Try to find $ObjId */
214 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
215 if (inode2 && !IS_ERR(inode2)) {
216 if (is_bad_inode(inode2)) {
219 sbi->objid.ni = ntfs_i(inode2);
220 sbi->objid_no = inode2->i_ino;
224 /* Try to find $Quota */
225 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
226 if (inode2 && !IS_ERR(inode2)) {
227 sbi->quota_no = inode2->i_ino;
231 /* Try to find $Reparse */
232 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
233 if (inode2 && !IS_ERR(inode2)) {
234 sbi->reparse.ni = ntfs_i(inode2);
235 sbi->reparse_no = inode2->i_ino;
238 /* Try to find $UsnJrnl */
239 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
240 if (inode2 && !IS_ERR(inode2)) {
241 sbi->usn_jrnl_no = inode2->i_ino;
251 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
254 struct super_block *sb = sbi->sb;
255 bool initialized = false;
260 if (ni->vfs_inode.i_size >= 0x100000000ull) {
261 ntfs_err(sb, "\x24LogFile is too big");
266 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
268 ref.low = cpu_to_le32(MFT_REC_MFT);
270 ref.seq = cpu_to_le16(1);
272 inode = ntfs_iget5(sb, &ref, NULL);
278 /* Try to use MFT copy. */
279 u64 t64 = sbi->mft.lbo;
281 sbi->mft.lbo = sbi->mft.lbo2;
282 inode = ntfs_iget5(sb, &ref, NULL);
290 ntfs_err(sb, "Failed to load $MFT.");
294 sbi->mft.ni = ntfs_i(inode);
296 /* LogFile should not contains attribute list. */
297 err = ni_load_all_mi(sbi->mft.ni);
299 err = log_replay(ni, &initialized);
304 sync_blockdev(sb->s_bdev);
305 invalidate_bdev(sb->s_bdev);
307 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
312 if (sb_rdonly(sb) || !initialized)
315 /* Fill LogFile by '-1' if it is initialized. */
316 err = ntfs_bio_fill_1(sbi, &ni->file.run);
319 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
327 * Return: Current ATTR_DEF_ENTRY for given attribute type.
329 const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
332 int type_in = le32_to_cpu(type);
334 size_t max_idx = sbi->def_entries - 1;
336 while (min_idx <= max_idx) {
337 size_t i = min_idx + ((max_idx - min_idx) >> 1);
338 const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
339 int diff = le32_to_cpu(entry->type) - type_in;
354 * ntfs_look_for_free_space - Look for a free space in bitmap.
356 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
357 CLST *new_lcn, CLST *new_len,
358 enum ALLOCATE_OPT opt)
362 struct super_block *sb = sbi->sb;
363 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
364 struct wnd_bitmap *wnd = &sbi->used.bitmap;
366 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
367 if (opt & ALLOCATE_MFT) {
368 zlen = wnd_zone_len(wnd);
371 err = ntfs_refresh_zone(sbi);
374 zlen = wnd_zone_len(wnd);
378 ntfs_err(sbi->sb, "no free space to extend mft");
382 lcn = wnd_zone_bit(wnd);
383 alen = zlen > len ? len : zlen;
385 wnd_zone_set(wnd, lcn + alen, zlen - alen);
387 err = wnd_set_used(wnd, lcn, alen);
389 up_write(&wnd->rw_lock);
396 * 'Cause cluster 0 is always used this value means that we should use
397 * cached value of 'next_free_lcn' to improve performance.
400 lcn = sbi->used.next_free_lcn;
402 if (lcn >= wnd->nbits)
405 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
409 /* Try to use clusters from MftZone. */
410 zlen = wnd_zone_len(wnd);
411 zeroes = wnd_zeroes(wnd);
413 /* Check too big request */
414 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE)
417 /* How many clusters to cat from zone. */
418 zlcn = wnd_zone_bit(wnd);
420 ztrim = len > zlen ? zlen : (len > zlen2 ? len : zlen2);
421 new_zlen = zlen - ztrim;
423 if (new_zlen < NTFS_MIN_MFT_ZONE) {
424 new_zlen = NTFS_MIN_MFT_ZONE;
429 wnd_zone_set(wnd, zlcn, new_zlen);
431 /* Allocate continues clusters. */
432 alen = wnd_find(wnd, len, 0,
433 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
441 ntfs_unmap_meta(sb, alcn, alen);
443 /* Set hint for next requests. */
444 if (!(opt & ALLOCATE_MFT))
445 sbi->used.next_free_lcn = alcn + alen;
450 up_write(&wnd->rw_lock);
455 * ntfs_extend_mft - Allocate additional MFT records.
457 * sbi->mft.bitmap is locked for write.
460 * ntfs_look_free_mft ->
463 * ni_insert_nonresident ->
466 * ntfs_look_free_mft ->
469 * To avoid recursive always allocate space for two new MFT records
470 * see attrib.c: "at least two MFT to avoid recursive loop".
472 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
475 struct ntfs_inode *ni = sbi->mft.ni;
476 size_t new_mft_total;
477 u64 new_mft_bytes, new_bitmap_bytes;
479 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
481 new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
482 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
484 /* Step 1: Resize $MFT::DATA. */
485 down_write(&ni->file.run_lock);
486 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
487 new_mft_bytes, NULL, false, &attr);
490 up_write(&ni->file.run_lock);
494 attr->nres.valid_size = attr->nres.data_size;
495 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
498 /* Step 2: Resize $MFT::BITMAP. */
499 new_bitmap_bytes = bitmap_size(new_mft_total);
501 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
502 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
504 /* Refresh MFT Zone if necessary. */
505 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
507 ntfs_refresh_zone(sbi);
509 up_write(&sbi->used.bitmap.rw_lock);
510 up_write(&ni->file.run_lock);
515 err = wnd_extend(wnd, new_mft_total);
520 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
522 err = _ni_write_inode(&ni->vfs_inode, 0);
528 * ntfs_look_free_mft - Look for a free MFT record.
530 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
531 struct ntfs_inode *ni, struct mft_inode **mi)
534 size_t zbit, zlen, from, to, fr;
537 struct super_block *sb = sbi->sb;
538 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
541 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
542 MFT_REC_FREE - MFT_REC_RESERVED);
545 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
547 zlen = wnd_zone_len(wnd);
549 /* Always reserve space for MFT. */
552 zbit = wnd_zone_bit(wnd);
554 wnd_zone_set(wnd, zbit + 1, zlen - 1);
559 /* No MFT zone. Find the nearest to '0' free MFT. */
560 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
562 mft_total = wnd->nbits;
564 err = ntfs_extend_mft(sbi);
570 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
576 * Look for free record reserved area [11-16) ==
577 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
580 if (!sbi->mft.reserved_bitmap) {
581 /* Once per session create internal bitmap for 5 bits. */
582 sbi->mft.reserved_bitmap = 0xFF;
585 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
587 struct ntfs_inode *ni;
588 struct MFT_REC *mrec;
590 ref.low = cpu_to_le32(ir);
591 ref.seq = cpu_to_le16(ir);
593 i = ntfs_iget5(sb, &ref, NULL);
598 "Invalid reserved record %x",
602 if (is_bad_inode(i)) {
611 if (!is_rec_base(mrec))
614 if (mrec->hard_links)
620 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
621 NULL, 0, NULL, NULL))
624 __clear_bit(ir - MFT_REC_RESERVED,
625 &sbi->mft.reserved_bitmap);
629 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
630 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
631 MFT_REC_FREE, MFT_REC_RESERVED);
632 if (zbit >= MFT_REC_FREE) {
633 sbi->mft.next_reserved = MFT_REC_FREE;
638 sbi->mft.next_reserved = zbit;
641 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
642 if (zbit + zlen > wnd->nbits)
643 zlen = wnd->nbits - zbit;
645 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
648 /* [zbit, zbit + zlen) will be used for MFT itself. */
649 from = sbi->mft.used;
654 ntfs_clear_mft_tail(sbi, from, to);
665 wnd_zone_set(wnd, zbit, zlen);
669 /* The request to get record for general purpose. */
670 if (sbi->mft.next_free < MFT_REC_USER)
671 sbi->mft.next_free = MFT_REC_USER;
674 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
675 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
676 sbi->mft.next_free = sbi->mft.bitmap.nbits;
679 sbi->mft.next_free = *rno + 1;
683 err = ntfs_extend_mft(sbi);
689 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
694 /* We have found a record that are not reserved for next MFT. */
695 if (*rno >= MFT_REC_FREE)
696 wnd_set_used(wnd, *rno, 1);
697 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
698 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
702 up_write(&wnd->rw_lock);
708 * ntfs_mark_rec_free - Mark record as free.
710 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
712 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
714 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
715 if (rno >= wnd->nbits)
718 if (rno >= MFT_REC_FREE) {
719 if (!wnd_is_used(wnd, rno, 1))
720 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
722 wnd_set_free(wnd, rno, 1);
723 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
724 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
727 if (rno < wnd_zone_bit(wnd))
728 wnd_zone_set(wnd, rno, 1);
729 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
730 sbi->mft.next_free = rno;
733 up_write(&wnd->rw_lock);
737 * ntfs_clear_mft_tail - Format empty records [from, to).
739 * sbi->mft.bitmap is locked for write.
741 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
746 struct runs_tree *run;
747 struct ntfs_inode *ni;
752 rs = sbi->record_size;
756 down_read(&ni->file.run_lock);
757 vbo = (u64)from * rs;
758 for (; from < to; from++, vbo += rs) {
759 struct ntfs_buffers nb;
761 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
765 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
772 sbi->mft.used = from;
773 up_read(&ni->file.run_lock);
778 * ntfs_refresh_zone - Refresh MFT zone.
780 * sbi->used.bitmap is locked for rw.
781 * sbi->mft.bitmap is locked for write.
782 * sbi->mft.ni->file.run_lock for write.
784 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
786 CLST zone_limit, zone_max, lcn, vcn, len;
788 struct wnd_bitmap *wnd = &sbi->used.bitmap;
789 struct ntfs_inode *ni = sbi->mft.ni;
791 /* Do not change anything unless we have non empty MFT zone. */
792 if (wnd_zone_len(wnd))
796 * Compute the MFT zone at two steps.
797 * It would be nice if we are able to allocate 1/8 of
798 * total clusters for MFT but not more then 512 MB.
800 zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
801 zone_max = wnd->nbits >> 3;
802 if (zone_max > zone_limit)
803 zone_max = zone_limit;
805 vcn = bytes_to_cluster(sbi,
806 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
808 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
811 /* We should always find Last Lcn for MFT. */
812 if (lcn == SPARSE_LCN)
817 /* Try to allocate clusters after last MFT run. */
818 zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
820 ntfs_notice(sbi->sb, "MftZone: unavailable");
824 /* Truncate too large zone. */
825 wnd_zone_set(wnd, lcn_s, zlen);
831 * ntfs_update_mftmirr - Update $MFTMirr data.
833 int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
836 struct super_block *sb = sbi->sb;
837 u32 blocksize = sb->s_blocksize;
838 sector_t block1, block2;
841 if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
845 bytes = sbi->mft.recs_mirr << sbi->record_bits;
846 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
847 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
849 for (; bytes >= blocksize; bytes -= blocksize) {
850 struct buffer_head *bh1, *bh2;
852 bh1 = sb_bread(sb, block1++);
858 bh2 = sb_getblk(sb, block2++);
865 if (buffer_locked(bh2))
866 __wait_on_buffer(bh2);
869 memcpy(bh2->b_data, bh1->b_data, blocksize);
870 set_buffer_uptodate(bh2);
871 mark_buffer_dirty(bh2);
878 err = sync_dirty_buffer(bh2);
885 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
894 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
895 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
896 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
898 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
902 struct VOLUME_INFO *info;
903 struct mft_inode *mi;
904 struct ntfs_inode *ni;
907 * Do not change state if fs was real_dirty.
908 * Do not change state if fs already dirty(clear).
909 * Do not change any thing if mounted read only.
911 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
914 /* Check cached value. */
915 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
916 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
923 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
925 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
931 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
938 case NTFS_DIRTY_ERROR:
939 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
940 sbi->volume.real_dirty = true;
942 case NTFS_DIRTY_DIRTY:
943 info->flags |= VOLUME_FLAG_DIRTY;
945 case NTFS_DIRTY_CLEAR:
946 info->flags &= ~VOLUME_FLAG_DIRTY;
949 /* Cache current volume flags. */
950 sbi->volume.flags = info->flags;
959 mark_inode_dirty(&ni->vfs_inode);
960 /* verify(!ntfs_update_mftmirr()); */
963 * If we used wait=1, sync_inode_metadata waits for the io for the
964 * inode to finish. It hangs when media is removed.
965 * So wait=0 is sent down to sync_inode_metadata
966 * and filemap_fdatawrite is used for the data blocks.
968 err = sync_inode_metadata(&ni->vfs_inode, 0);
970 err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
976 * security_hash - Calculates a hash of security descriptor.
978 static inline __le32 security_hash(const void *sd, size_t bytes)
981 const __le32 *ptr = sd;
985 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
986 return cpu_to_le32(hash);
989 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
991 struct block_device *bdev = sb->s_bdev;
992 u32 blocksize = sb->s_blocksize;
993 u64 block = lbo >> sb->s_blocksize_bits;
994 u32 off = lbo & (blocksize - 1);
995 u32 op = blocksize - off;
997 for (; bytes; block += 1, off = 0, op = blocksize) {
998 struct buffer_head *bh = __bread(bdev, block, blocksize);
1006 memcpy(buffer, bh->b_data + off, op);
1011 buffer = Add2Ptr(buffer, op);
1017 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1018 const void *buf, int wait)
1020 u32 blocksize = sb->s_blocksize;
1021 struct block_device *bdev = sb->s_bdev;
1022 sector_t block = lbo >> sb->s_blocksize_bits;
1023 u32 off = lbo & (blocksize - 1);
1024 u32 op = blocksize - off;
1025 struct buffer_head *bh;
1027 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1030 for (; bytes; block += 1, off = 0, op = blocksize) {
1034 if (op < blocksize) {
1035 bh = __bread(bdev, block, blocksize);
1037 ntfs_err(sb, "failed to read block %llx",
1042 bh = __getblk(bdev, block, blocksize);
1047 if (buffer_locked(bh))
1048 __wait_on_buffer(bh);
1052 memcpy(bh->b_data + off, buf, op);
1053 buf = Add2Ptr(buf, op);
1055 memset(bh->b_data + off, -1, op);
1058 set_buffer_uptodate(bh);
1059 mark_buffer_dirty(bh);
1063 int err = sync_dirty_buffer(bh);
1068 "failed to sync buffer at block %llx, error %d",
1082 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1083 u64 vbo, const void *buf, size_t bytes)
1085 struct super_block *sb = sbi->sb;
1086 u8 cluster_bits = sbi->cluster_bits;
1087 u32 off = vbo & sbi->cluster_mask;
1088 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1092 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1095 if (lcn == SPARSE_LCN)
1098 lbo = ((u64)lcn << cluster_bits) + off;
1099 len = ((u64)clen << cluster_bits) - off;
1102 u32 op = len < bytes ? len : bytes;
1103 int err = ntfs_sb_write(sb, lbo, op, buf, 0);
1112 vcn_next = vcn + clen;
1113 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1117 if (lcn == SPARSE_LCN)
1121 buf = Add2Ptr(buf, op);
1123 lbo = ((u64)lcn << cluster_bits);
1124 len = ((u64)clen << cluster_bits);
1130 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1131 const struct runs_tree *run, u64 vbo)
1133 struct super_block *sb = sbi->sb;
1134 u8 cluster_bits = sbi->cluster_bits;
1138 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1139 return ERR_PTR(-ENOENT);
1141 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1143 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1146 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1147 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1150 struct super_block *sb = sbi->sb;
1151 u32 blocksize = sb->s_blocksize;
1152 u8 cluster_bits = sbi->cluster_bits;
1153 u32 off = vbo & sbi->cluster_mask;
1155 CLST vcn_next, vcn = vbo >> cluster_bits;
1159 struct buffer_head *bh;
1162 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1163 if (vbo > MFT_REC_VOL * sbi->record_size) {
1168 /* Use absolute boot's 'MFTCluster' to read record. */
1169 lbo = vbo + sbi->mft.lbo;
1170 len = sbi->record_size;
1171 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1175 if (lcn == SPARSE_LCN) {
1180 lbo = ((u64)lcn << cluster_bits) + off;
1181 len = ((u64)clen << cluster_bits) - off;
1184 off = lbo & (blocksize - 1);
1191 u32 len32 = len >= bytes ? bytes : len;
1192 sector_t block = lbo >> sb->s_blocksize_bits;
1195 u32 op = blocksize - off;
1200 bh = ntfs_bread(sb, block);
1207 memcpy(buf, bh->b_data + off, op);
1208 buf = Add2Ptr(buf, op);
1213 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1230 vcn_next = vcn + clen;
1231 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1237 if (lcn == SPARSE_LCN) {
1242 lbo = ((u64)lcn << cluster_bits);
1243 len = ((u64)clen << cluster_bits);
1251 put_bh(nb->bh[--nbh]);
1262 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1264 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1265 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1266 struct ntfs_buffers *nb)
1268 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1272 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1275 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1276 u32 bytes, struct ntfs_buffers *nb)
1279 struct super_block *sb = sbi->sb;
1280 u32 blocksize = sb->s_blocksize;
1281 u8 cluster_bits = sbi->cluster_bits;
1282 CLST vcn_next, vcn = vbo >> cluster_bits;
1291 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1296 off = vbo & sbi->cluster_mask;
1297 lbo = ((u64)lcn << cluster_bits) + off;
1298 len = ((u64)clen << cluster_bits) - off;
1300 nb->off = off = lbo & (blocksize - 1);
1303 u32 len32 = len < bytes ? len : bytes;
1304 sector_t block = lbo >> sb->s_blocksize_bits;
1308 struct buffer_head *bh;
1310 if (nbh >= ARRAY_SIZE(nb->bh)) {
1315 op = blocksize - off;
1319 if (op == blocksize) {
1320 bh = sb_getblk(sb, block);
1325 if (buffer_locked(bh))
1326 __wait_on_buffer(bh);
1327 set_buffer_uptodate(bh);
1329 bh = ntfs_bread(sb, block);
1348 vcn_next = vcn + clen;
1349 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1355 lbo = ((u64)lcn << cluster_bits);
1356 len = ((u64)clen << cluster_bits);
1361 put_bh(nb->bh[--nbh]);
1370 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1371 struct ntfs_buffers *nb, int sync)
1374 struct super_block *sb = sbi->sb;
1375 u32 block_size = sb->s_blocksize;
1376 u32 bytes = nb->bytes;
1378 u16 fo = le16_to_cpu(rhdr->fix_off);
1379 u16 fn = le16_to_cpu(rhdr->fix_num);
1384 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1385 fn * SECTOR_SIZE > bytes) {
1389 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1390 u32 op = block_size - off;
1392 struct buffer_head *bh = nb->bh[idx];
1393 __le16 *ptr, *end_data;
1398 if (buffer_locked(bh))
1399 __wait_on_buffer(bh);
1401 lock_buffer(nb->bh[idx]);
1403 bh_data = bh->b_data + off;
1404 end_data = Add2Ptr(bh_data, op);
1405 memcpy(bh_data, rhdr, op);
1410 fixup = Add2Ptr(bh_data, fo);
1412 t16 = le16_to_cpu(sample);
1413 if (t16 >= 0x7FFF) {
1414 sample = *fixup = cpu_to_le16(1);
1416 sample = cpu_to_le16(t16 + 1);
1420 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1423 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1428 ptr += SECTOR_SIZE / sizeof(short);
1429 } while (ptr < end_data);
1431 set_buffer_uptodate(bh);
1432 mark_buffer_dirty(bh);
1436 int err2 = sync_dirty_buffer(bh);
1443 rhdr = Add2Ptr(rhdr, op);
1449 static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
1451 struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1453 if (!bio && (current->flags & PF_MEMALLOC)) {
1454 while (!bio && (nr_vecs /= 2))
1455 bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1461 * ntfs_bio_pages - Read/write pages from/to disk.
1463 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1464 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1468 struct bio *new, *bio = NULL;
1469 struct super_block *sb = sbi->sb;
1470 struct block_device *bdev = sb->s_bdev;
1472 u8 cluster_bits = sbi->cluster_bits;
1473 CLST lcn, clen, vcn, vcn_next;
1474 u32 add, off, page_idx;
1477 struct blk_plug plug;
1482 blk_start_plug(&plug);
1484 /* Align vbo and bytes to be 512 bytes aligned. */
1485 lbo = (vbo + bytes + 511) & ~511ull;
1486 vbo = vbo & ~511ull;
1489 vcn = vbo >> cluster_bits;
1490 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1494 off = vbo & sbi->cluster_mask;
1499 lbo = ((u64)lcn << cluster_bits) + off;
1500 len = ((u64)clen << cluster_bits) - off;
1502 new = ntfs_alloc_bio(nr_pages - page_idx);
1508 bio_chain(bio, new);
1512 bio_set_dev(bio, bdev);
1513 bio->bi_iter.bi_sector = lbo >> 9;
1517 off = vbo & (PAGE_SIZE - 1);
1518 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1520 if (bio_add_page(bio, page, add, off) < add)
1528 if (add + off == PAGE_SIZE) {
1530 if (WARN_ON(page_idx >= nr_pages)) {
1534 page = pages[page_idx];
1543 vcn_next = vcn + clen;
1544 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1554 err = submit_bio_wait(bio);
1557 blk_finish_plug(&plug);
1563 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1565 * Fill on-disk logfile range by (-1)
1566 * this means empty logfile.
1568 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1571 struct super_block *sb = sbi->sb;
1572 struct block_device *bdev = sb->s_bdev;
1573 u8 cluster_bits = sbi->cluster_bits;
1574 struct bio *new, *bio = NULL;
1580 struct blk_plug plug;
1582 fill = alloc_page(GFP_KERNEL);
1586 kaddr = kmap_atomic(fill);
1587 memset(kaddr, -1, PAGE_SIZE);
1588 kunmap_atomic(kaddr);
1589 flush_dcache_page(fill);
1592 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1598 * TODO: Try blkdev_issue_write_same.
1600 blk_start_plug(&plug);
1602 lbo = (u64)lcn << cluster_bits;
1603 len = (u64)clen << cluster_bits;
1605 new = ntfs_alloc_bio(BIO_MAX_VECS);
1611 bio_chain(bio, new);
1615 bio_set_dev(bio, bdev);
1616 bio->bi_opf = REQ_OP_WRITE;
1617 bio->bi_iter.bi_sector = lbo >> 9;
1620 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1622 if (bio_add_page(bio, fill, add, 0) < add)
1630 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1634 err = submit_bio_wait(bio);
1637 blk_finish_plug(&plug);
1645 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1646 u64 vbo, u64 *lbo, u64 *bytes)
1650 u8 cluster_bits = sbi->cluster_bits;
1652 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1655 off = vbo & sbi->cluster_mask;
1656 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1657 *bytes = ((u64)len << cluster_bits) - off;
1662 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1665 struct super_block *sb = sbi->sb;
1666 struct inode *inode = new_inode(sb);
1667 struct ntfs_inode *ni;
1670 return ERR_PTR(-ENOMEM);
1674 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1680 if (insert_inode_locked(inode) < 0) {
1694 * O:BAG:BAD:(A;OICI;FA;;;WD)
1695 * Owner S-1-5-32-544 (Administrators)
1696 * Group S-1-5-32-544 (Administrators)
1697 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1699 const u8 s_default_security[] __aligned(8) = {
1700 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1701 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1702 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1703 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1704 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1705 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1706 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1709 static_assert(sizeof(s_default_security) == 0x50);
1711 static inline u32 sid_length(const struct SID *sid)
1713 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1719 * Thanks Mark Harmstone for idea.
1721 static bool is_acl_valid(const struct ACL *acl, u32 len)
1723 const struct ACE_HEADER *ace;
1725 u16 ace_count, ace_size;
1727 if (acl->AclRevision != ACL_REVISION &&
1728 acl->AclRevision != ACL_REVISION_DS) {
1730 * This value should be ACL_REVISION, unless the ACL contains an
1731 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1732 * All ACEs in an ACL must be at the same revision level.
1740 if (le16_to_cpu(acl->AclSize) > len)
1746 len -= sizeof(struct ACL);
1747 ace = (struct ACE_HEADER *)&acl[1];
1748 ace_count = le16_to_cpu(acl->AceCount);
1750 for (i = 0; i < ace_count; i++) {
1751 if (len < sizeof(struct ACE_HEADER))
1754 ace_size = le16_to_cpu(ace->AceSize);
1759 ace = Add2Ptr(ace, ace_size);
1765 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1767 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1769 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1772 if (sd->Revision != 1)
1778 if (!(sd->Control & SE_SELF_RELATIVE))
1781 sd_owner = le32_to_cpu(sd->Owner);
1783 const struct SID *owner = Add2Ptr(sd, sd_owner);
1785 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1788 if (owner->Revision != 1)
1791 if (sd_owner + sid_length(owner) > len)
1795 sd_group = le32_to_cpu(sd->Group);
1797 const struct SID *group = Add2Ptr(sd, sd_group);
1799 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1802 if (group->Revision != 1)
1805 if (sd_group + sid_length(group) > len)
1809 sd_sacl = le32_to_cpu(sd->Sacl);
1811 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1813 if (sd_sacl + sizeof(struct ACL) > len)
1816 if (!is_acl_valid(sacl, len - sd_sacl))
1820 sd_dacl = le32_to_cpu(sd->Dacl);
1822 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1824 if (sd_dacl + sizeof(struct ACL) > len)
1827 if (!is_acl_valid(dacl, len - sd_dacl))
1835 * ntfs_security_init - Load and parse $Secure.
1837 int ntfs_security_init(struct ntfs_sb_info *sbi)
1840 struct super_block *sb = sbi->sb;
1841 struct inode *inode;
1842 struct ntfs_inode *ni;
1844 struct ATTRIB *attr;
1845 struct ATTR_LIST_ENTRY *le;
1849 struct NTFS_DE_SII *sii_e;
1850 struct ntfs_fnd *fnd_sii = NULL;
1851 const struct INDEX_ROOT *root_sii;
1852 const struct INDEX_ROOT *root_sdh;
1853 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1854 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1856 ref.low = cpu_to_le32(MFT_REC_SECURE);
1858 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1860 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1861 if (IS_ERR(inode)) {
1862 err = PTR_ERR(inode);
1863 ntfs_err(sb, "Failed to load $Secure.");
1872 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1873 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1879 root_sdh = resident_data(attr);
1880 if (root_sdh->type != ATTR_ZERO ||
1881 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
1886 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1890 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1891 ARRAY_SIZE(SII_NAME), NULL, NULL);
1897 root_sii = resident_data(attr);
1898 if (root_sii->type != ATTR_ZERO ||
1899 root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
1904 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1908 fnd_sii = fnd_get();
1914 sds_size = inode->i_size;
1916 /* Find the last valid Id. */
1917 sbi->security.next_id = SECURITY_ID_FIRST;
1918 /* Always write new security at the end of bucket. */
1919 sbi->security.next_off =
1920 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1928 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1932 sii_e = (struct NTFS_DE_SII *)ne;
1933 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1936 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1937 if (next_id >= sbi->security.next_id)
1938 sbi->security.next_id = next_id;
1941 sbi->security.ni = ni;
1951 * ntfs_get_security_by_id - Read security descriptor by id.
1953 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1954 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1959 struct ntfs_inode *ni = sbi->security.ni;
1960 struct ntfs_index *indx = &sbi->security.index_sii;
1962 struct NTFS_DE_SII *sii_e;
1963 struct ntfs_fnd *fnd_sii;
1964 struct SECURITY_HDR d_security;
1965 const struct INDEX_ROOT *root_sii;
1970 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1972 fnd_sii = fnd_get();
1978 root_sii = indx_get_root(indx, ni, NULL, NULL);
1984 /* Try to find this SECURITY descriptor in SII indexes. */
1985 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1986 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1993 t32 = le32_to_cpu(sii_e->sec_hdr.size);
1994 if (t32 < SIZEOF_SECURITY_HDR) {
1999 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
2000 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2005 *size = t32 - SIZEOF_SECURITY_HDR;
2007 p = kmalloc(*size, GFP_NOFS);
2013 err = ntfs_read_run_nb(sbi, &ni->file.run,
2014 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2015 sizeof(d_security), NULL);
2019 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2024 err = ntfs_read_run_nb(sbi, &ni->file.run,
2025 le64_to_cpu(sii_e->sec_hdr.off) +
2026 SIZEOF_SECURITY_HDR,
2043 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2045 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2046 * and it contains a mirror copy of each security descriptor. When writing
2047 * to a security descriptor at location X, another copy will be written at
2048 * location (X+256K).
2049 * When writing a security descriptor that will cross the 256K boundary,
2050 * the pointer will be advanced by 256K to skip
2051 * over the mirror portion.
2053 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2054 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2055 u32 size_sd, __le32 *security_id, bool *inserted)
2058 struct ntfs_inode *ni = sbi->security.ni;
2059 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2060 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2061 struct NTFS_DE_SDH *e;
2062 struct NTFS_DE_SDH sdh_e;
2063 struct NTFS_DE_SII sii_e;
2064 struct SECURITY_HDR *d_security;
2065 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2066 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2067 struct SECURITY_KEY hash_key;
2068 struct ntfs_fnd *fnd_sdh = NULL;
2069 const struct INDEX_ROOT *root_sdh;
2070 const struct INDEX_ROOT *root_sii;
2071 u64 mirr_off, new_sds_size;
2074 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2075 SecurityDescriptorsBlockSize);
2077 hash_key.hash = security_hash(sd, size_sd);
2078 hash_key.sec_id = SECURITY_ID_INVALID;
2082 *security_id = SECURITY_ID_INVALID;
2084 /* Allocate a temporal buffer. */
2085 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2089 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2091 fnd_sdh = fnd_get();
2097 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2103 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2110 * Check if such security already exists.
2111 * Use "SDH" and hash -> to get the offset in "SDS".
2113 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2114 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2120 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2121 err = ntfs_read_run_nb(sbi, &ni->file.run,
2122 le64_to_cpu(e->sec_hdr.off),
2123 d_security, new_sec_size, NULL);
2127 if (le32_to_cpu(d_security->size) == new_sec_size &&
2128 d_security->key.hash == hash_key.hash &&
2129 !memcmp(d_security + 1, sd, size_sd)) {
2130 *security_id = d_security->key.sec_id;
2131 /* Such security already exists. */
2137 err = indx_find_sort(indx_sdh, ni, root_sdh,
2138 (struct NTFS_DE **)&e, fnd_sdh);
2142 if (!e || e->key.hash != hash_key.hash)
2146 /* Zero unused space. */
2147 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2148 left = SecurityDescriptorsBlockSize - next;
2150 /* Zero gap until SecurityDescriptorsBlockSize. */
2151 if (left < new_sec_size) {
2152 /* Zero "left" bytes from sbi->security.next_off. */
2153 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2156 /* Zero tail of previous security. */
2157 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2161 * 0x40438 == ni->vfs_inode.i_size
2162 * 0x00440 == sbi->security.next_off
2163 * need to zero [0x438-0x440)
2164 * if (next > used) {
2165 * u32 tozero = next - used;
2166 * zero "tozero" bytes from sbi->security.next_off - tozero
2169 /* Format new security descriptor. */
2170 d_security->key.hash = hash_key.hash;
2171 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2172 d_security->off = cpu_to_le64(sbi->security.next_off);
2173 d_security->size = cpu_to_le32(new_sec_size);
2174 memcpy(d_security + 1, sd, size_sd);
2176 /* Write main SDS bucket. */
2177 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2178 d_security, aligned_sec_size);
2183 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2184 new_sds_size = mirr_off + aligned_sec_size;
2186 if (new_sds_size > ni->vfs_inode.i_size) {
2187 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2188 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2189 new_sds_size, &new_sds_size, false, NULL);
2194 /* Write copy SDS bucket. */
2195 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2200 /* Fill SII entry. */
2201 sii_e.de.view.data_off =
2202 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2203 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2204 sii_e.de.view.res = 0;
2205 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2206 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2209 sii_e.sec_id = d_security->key.sec_id;
2210 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2212 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2216 /* Fill SDH entry. */
2217 sdh_e.de.view.data_off =
2218 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2219 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2220 sdh_e.de.view.res = 0;
2221 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2222 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2225 sdh_e.key.hash = d_security->key.hash;
2226 sdh_e.key.sec_id = d_security->key.sec_id;
2227 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2228 sdh_e.magic[0] = cpu_to_le16('I');
2229 sdh_e.magic[1] = cpu_to_le16('I');
2232 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2237 *security_id = d_security->key.sec_id;
2241 /* Update Id and offset for next descriptor. */
2242 sbi->security.next_id += 1;
2243 sbi->security.next_off += aligned_sec_size;
2247 mark_inode_dirty(&ni->vfs_inode);
2255 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2257 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2260 struct ntfs_inode *ni = sbi->reparse.ni;
2261 struct ntfs_index *indx = &sbi->reparse.index_r;
2262 struct ATTRIB *attr;
2263 struct ATTR_LIST_ENTRY *le;
2264 const struct INDEX_ROOT *root_r;
2270 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2271 ARRAY_SIZE(SR_NAME), NULL, NULL);
2277 root_r = resident_data(attr);
2278 if (root_r->type != ATTR_ZERO ||
2279 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2284 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2293 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2295 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2298 struct ntfs_inode *ni = sbi->objid.ni;
2299 struct ntfs_index *indx = &sbi->objid.index_o;
2300 struct ATTRIB *attr;
2301 struct ATTR_LIST_ENTRY *le;
2302 const struct INDEX_ROOT *root;
2308 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2309 ARRAY_SIZE(SO_NAME), NULL, NULL);
2315 root = resident_data(attr);
2316 if (root->type != ATTR_ZERO ||
2317 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2322 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2330 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2333 struct ntfs_inode *ni = sbi->objid.ni;
2334 struct ntfs_index *indx = &sbi->objid.index_o;
2339 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2341 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2343 mark_inode_dirty(&ni->vfs_inode);
2349 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2350 const struct MFT_REF *ref)
2353 struct ntfs_inode *ni = sbi->reparse.ni;
2354 struct ntfs_index *indx = &sbi->reparse.index_r;
2355 struct NTFS_DE_R re;
2360 memset(&re, 0, sizeof(re));
2362 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2363 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2364 re.de.key_size = cpu_to_le16(sizeof(re.key));
2366 re.key.ReparseTag = rtag;
2367 memcpy(&re.key.ref, ref, sizeof(*ref));
2369 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2371 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2373 mark_inode_dirty(&ni->vfs_inode);
2379 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2380 const struct MFT_REF *ref)
2383 struct ntfs_inode *ni = sbi->reparse.ni;
2384 struct ntfs_index *indx = &sbi->reparse.index_r;
2385 struct ntfs_fnd *fnd = NULL;
2386 struct REPARSE_KEY rkey;
2387 struct NTFS_DE_R *re;
2388 struct INDEX_ROOT *root_r;
2393 rkey.ReparseTag = rtag;
2396 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2399 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2409 root_r = indx_get_root(indx, ni, NULL, NULL);
2415 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2416 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2417 (struct NTFS_DE **)&re, fnd);
2421 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2422 /* Impossible. Looks like volume corrupt? */
2426 memcpy(&rkey, &re->key, sizeof(rkey));
2431 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2439 mark_inode_dirty(&ni->vfs_inode);
2445 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2448 ntfs_unmap_meta(sbi->sb, lcn, len);
2449 ntfs_discard(sbi, lcn, len);
2452 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2455 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2457 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2458 if (!wnd_is_used(wnd, lcn, len)) {
2459 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2463 for (i = lcn; i < end; i++) {
2464 if (wnd_is_used(wnd, i, 1)) {
2475 ntfs_unmap_and_discard(sbi, lcn, len);
2477 wnd_set_free(wnd, lcn, len);
2486 ntfs_unmap_and_discard(sbi, lcn, len);
2487 wnd_set_free(wnd, lcn, len);
2490 up_write(&wnd->rw_lock);
2494 * run_deallocate - Deallocate clusters.
2496 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2501 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2502 if (lcn == SPARSE_LCN)
2505 mark_as_free_ex(sbi, lcn, len, trim);