1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
11 #include <linux/kernel.h>
12 #include <linux/nls.h>
19 const struct cpu_str NAME_MFT = {
20 4, 0, { '$', 'M', 'F', 'T' },
22 const struct cpu_str NAME_MIRROR = {
23 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
25 const struct cpu_str NAME_LOGFILE = {
26 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
28 const struct cpu_str NAME_VOLUME = {
29 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
31 const struct cpu_str NAME_ATTRDEF = {
32 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
34 const struct cpu_str NAME_ROOT = {
37 const struct cpu_str NAME_BITMAP = {
38 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
40 const struct cpu_str NAME_BOOT = {
41 5, 0, { '$', 'B', 'o', 'o', 't' },
43 const struct cpu_str NAME_BADCLUS = {
44 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
46 const struct cpu_str NAME_QUOTA = {
47 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
49 const struct cpu_str NAME_SECURE = {
50 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
52 const struct cpu_str NAME_UPCASE = {
53 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
55 const struct cpu_str NAME_EXTEND = {
56 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
58 const struct cpu_str NAME_OBJID = {
59 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
61 const struct cpu_str NAME_REPARSE = {
62 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
64 const struct cpu_str NAME_USNJRNL = {
65 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
67 const __le16 BAD_NAME[4] = {
68 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
70 const __le16 I30_NAME[4] = {
71 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
73 const __le16 SII_NAME[4] = {
74 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
76 const __le16 SDH_NAME[4] = {
77 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
79 const __le16 SDS_NAME[4] = {
80 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
82 const __le16 SO_NAME[2] = {
83 cpu_to_le16('$'), cpu_to_le16('O'),
85 const __le16 SQ_NAME[2] = {
86 cpu_to_le16('$'), cpu_to_le16('Q'),
88 const __le16 SR_NAME[2] = {
89 cpu_to_le16('$'), cpu_to_le16('R'),
92 #ifdef CONFIG_NTFS3_LZX_XPRESS
93 const __le16 WOF_NAME[17] = {
94 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
95 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
96 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
97 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
102 static const __le16 CON_NAME[3] = {
103 cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
106 static const __le16 NUL_NAME[3] = {
107 cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
110 static const __le16 AUX_NAME[3] = {
111 cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
114 static const __le16 PRN_NAME[3] = {
115 cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
118 static const __le16 COM_NAME[3] = {
119 cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
122 static const __le16 LPT_NAME[3] = {
123 cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
129 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
131 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
135 u16 fo = le16_to_cpu(rhdr->fix_off);
136 u16 fn = le16_to_cpu(rhdr->fix_num);
138 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
139 fn * SECTOR_SIZE > bytes) {
143 /* Get fixup pointer. */
144 fixup = Add2Ptr(rhdr, fo);
146 if (*fixup >= 0x7FFF)
153 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
158 ptr += SECTOR_SIZE / sizeof(short);
164 * ntfs_fix_post_read - Remove fixups after reading from disk.
166 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
168 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
175 fo = le16_to_cpu(rhdr->fix_off);
176 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
177 le16_to_cpu(rhdr->fix_num);
180 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
181 fn * SECTOR_SIZE > bytes) {
182 return -E_NTFS_CORRUPT;
185 /* Get fixup pointer. */
186 fixup = Add2Ptr(rhdr, fo);
188 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
192 /* Test current word. */
193 if (*ptr != sample) {
194 /* Fixup does not match! Is it serious error? */
200 ptr += SECTOR_SIZE / sizeof(short);
207 * ntfs_extend_init - Load $Extend file.
209 int ntfs_extend_init(struct ntfs_sb_info *sbi)
212 struct super_block *sb = sbi->sb;
213 struct inode *inode, *inode2;
216 if (sbi->volume.major_ver < 3) {
217 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
221 ref.low = cpu_to_le32(MFT_REC_EXTEND);
223 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
224 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
226 err = PTR_ERR(inode);
227 ntfs_err(sb, "Failed to load $Extend (%d).", err);
232 /* If ntfs_iget5() reads from disk it never returns bad inode. */
233 if (!S_ISDIR(inode->i_mode)) {
238 /* Try to find $ObjId */
239 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
240 if (inode2 && !IS_ERR(inode2)) {
241 if (is_bad_inode(inode2)) {
244 sbi->objid.ni = ntfs_i(inode2);
245 sbi->objid_no = inode2->i_ino;
249 /* Try to find $Quota */
250 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
251 if (inode2 && !IS_ERR(inode2)) {
252 sbi->quota_no = inode2->i_ino;
256 /* Try to find $Reparse */
257 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
258 if (inode2 && !IS_ERR(inode2)) {
259 sbi->reparse.ni = ntfs_i(inode2);
260 sbi->reparse_no = inode2->i_ino;
263 /* Try to find $UsnJrnl */
264 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
265 if (inode2 && !IS_ERR(inode2)) {
266 sbi->usn_jrnl_no = inode2->i_ino;
276 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
279 struct super_block *sb = sbi->sb;
280 bool initialized = false;
285 if (ni->vfs_inode.i_size >= 0x100000000ull) {
286 ntfs_err(sb, "\x24LogFile is large than 4G.");
291 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
293 ref.low = cpu_to_le32(MFT_REC_MFT);
295 ref.seq = cpu_to_le16(1);
297 inode = ntfs_iget5(sb, &ref, NULL);
303 /* Try to use MFT copy. */
304 u64 t64 = sbi->mft.lbo;
306 sbi->mft.lbo = sbi->mft.lbo2;
307 inode = ntfs_iget5(sb, &ref, NULL);
315 ntfs_err(sb, "Failed to load $MFT.");
319 sbi->mft.ni = ntfs_i(inode);
321 /* LogFile should not contains attribute list. */
322 err = ni_load_all_mi(sbi->mft.ni);
324 err = log_replay(ni, &initialized);
329 sync_blockdev(sb->s_bdev);
330 invalidate_bdev(sb->s_bdev);
332 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
337 if (sb_rdonly(sb) || !initialized)
340 /* Fill LogFile by '-1' if it is initialized. */
341 err = ntfs_bio_fill_1(sbi, &ni->file.run);
344 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
350 * ntfs_look_for_free_space - Look for a free space in bitmap.
352 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
353 CLST *new_lcn, CLST *new_len,
354 enum ALLOCATE_OPT opt)
358 struct super_block *sb = sbi->sb;
359 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
360 struct wnd_bitmap *wnd = &sbi->used.bitmap;
362 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
363 if (opt & ALLOCATE_MFT) {
364 zlen = wnd_zone_len(wnd);
367 err = ntfs_refresh_zone(sbi);
371 zlen = wnd_zone_len(wnd);
375 ntfs_err(sbi->sb, "no free space to extend mft");
380 lcn = wnd_zone_bit(wnd);
381 alen = min_t(CLST, len, zlen);
383 wnd_zone_set(wnd, lcn + alen, zlen - alen);
385 err = wnd_set_used(wnd, lcn, alen);
393 * 'Cause cluster 0 is always used this value means that we should use
394 * cached value of 'next_free_lcn' to improve performance.
397 lcn = sbi->used.next_free_lcn;
399 if (lcn >= wnd->nbits)
402 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
406 /* Try to use clusters from MftZone. */
407 zlen = wnd_zone_len(wnd);
408 zeroes = wnd_zeroes(wnd);
410 /* Check too big request */
411 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
416 /* How many clusters to cat from zone. */
417 zlcn = wnd_zone_bit(wnd);
419 ztrim = clamp_val(len, zlen2, zlen);
420 new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
422 wnd_zone_set(wnd, zlcn, new_zlen);
424 /* Allocate continues clusters. */
425 alen = wnd_find(wnd, len, 0,
426 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
437 ntfs_unmap_meta(sb, alcn, alen);
439 /* Set hint for next requests. */
440 if (!(opt & ALLOCATE_MFT))
441 sbi->used.next_free_lcn = alcn + alen;
443 up_write(&wnd->rw_lock);
448 * ntfs_check_for_free_space
450 * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
452 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
454 size_t free, zlen, avail;
455 struct wnd_bitmap *wnd;
457 wnd = &sbi->used.bitmap;
458 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
459 free = wnd_zeroes(wnd);
460 zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
461 up_read(&wnd->rw_lock);
463 if (free < zlen + clen)
466 avail = free - (zlen + clen);
468 wnd = &sbi->mft.bitmap;
469 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
470 free = wnd_zeroes(wnd);
471 zlen = wnd_zone_len(wnd);
472 up_read(&wnd->rw_lock);
474 if (free >= zlen + mlen)
477 return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
481 * ntfs_extend_mft - Allocate additional MFT records.
483 * sbi->mft.bitmap is locked for write.
486 * ntfs_look_free_mft ->
489 * ni_insert_nonresident ->
492 * ntfs_look_free_mft ->
495 * To avoid recursive always allocate space for two new MFT records
496 * see attrib.c: "at least two MFT to avoid recursive loop".
498 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
501 struct ntfs_inode *ni = sbi->mft.ni;
502 size_t new_mft_total;
503 u64 new_mft_bytes, new_bitmap_bytes;
505 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
507 new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
508 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
510 /* Step 1: Resize $MFT::DATA. */
511 down_write(&ni->file.run_lock);
512 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
513 new_mft_bytes, NULL, false, &attr);
516 up_write(&ni->file.run_lock);
520 attr->nres.valid_size = attr->nres.data_size;
521 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
524 /* Step 2: Resize $MFT::BITMAP. */
525 new_bitmap_bytes = bitmap_size(new_mft_total);
527 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
528 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
530 /* Refresh MFT Zone if necessary. */
531 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
533 ntfs_refresh_zone(sbi);
535 up_write(&sbi->used.bitmap.rw_lock);
536 up_write(&ni->file.run_lock);
541 err = wnd_extend(wnd, new_mft_total);
546 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
548 err = _ni_write_inode(&ni->vfs_inode, 0);
554 * ntfs_look_free_mft - Look for a free MFT record.
556 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
557 struct ntfs_inode *ni, struct mft_inode **mi)
560 size_t zbit, zlen, from, to, fr;
563 struct super_block *sb = sbi->sb;
564 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
567 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
568 MFT_REC_FREE - MFT_REC_RESERVED);
571 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
573 zlen = wnd_zone_len(wnd);
575 /* Always reserve space for MFT. */
578 zbit = wnd_zone_bit(wnd);
580 wnd_zone_set(wnd, zbit + 1, zlen - 1);
585 /* No MFT zone. Find the nearest to '0' free MFT. */
586 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
588 mft_total = wnd->nbits;
590 err = ntfs_extend_mft(sbi);
596 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
602 * Look for free record reserved area [11-16) ==
603 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
606 if (!sbi->mft.reserved_bitmap) {
607 /* Once per session create internal bitmap for 5 bits. */
608 sbi->mft.reserved_bitmap = 0xFF;
611 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
613 struct ntfs_inode *ni;
614 struct MFT_REC *mrec;
616 ref.low = cpu_to_le32(ir);
617 ref.seq = cpu_to_le16(ir);
619 i = ntfs_iget5(sb, &ref, NULL);
624 "Invalid reserved record %x",
628 if (is_bad_inode(i)) {
637 if (!is_rec_base(mrec))
640 if (mrec->hard_links)
646 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
647 NULL, 0, NULL, NULL))
650 __clear_bit(ir - MFT_REC_RESERVED,
651 &sbi->mft.reserved_bitmap);
655 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
656 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
657 MFT_REC_FREE, MFT_REC_RESERVED);
658 if (zbit >= MFT_REC_FREE) {
659 sbi->mft.next_reserved = MFT_REC_FREE;
664 sbi->mft.next_reserved = zbit;
667 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
668 if (zbit + zlen > wnd->nbits)
669 zlen = wnd->nbits - zbit;
671 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
674 /* [zbit, zbit + zlen) will be used for MFT itself. */
675 from = sbi->mft.used;
680 ntfs_clear_mft_tail(sbi, from, to);
691 wnd_zone_set(wnd, zbit, zlen);
695 /* The request to get record for general purpose. */
696 if (sbi->mft.next_free < MFT_REC_USER)
697 sbi->mft.next_free = MFT_REC_USER;
700 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
701 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
702 sbi->mft.next_free = sbi->mft.bitmap.nbits;
705 sbi->mft.next_free = *rno + 1;
709 err = ntfs_extend_mft(sbi);
715 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
720 /* We have found a record that are not reserved for next MFT. */
721 if (*rno >= MFT_REC_FREE)
722 wnd_set_used(wnd, *rno, 1);
723 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
724 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
728 up_write(&wnd->rw_lock);
734 * ntfs_mark_rec_free - Mark record as free.
735 * is_mft - true if we are changing MFT
737 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
739 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
742 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
743 if (rno >= wnd->nbits)
746 if (rno >= MFT_REC_FREE) {
747 if (!wnd_is_used(wnd, rno, 1))
748 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
750 wnd_set_free(wnd, rno, 1);
751 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
752 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
755 if (rno < wnd_zone_bit(wnd))
756 wnd_zone_set(wnd, rno, 1);
757 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
758 sbi->mft.next_free = rno;
762 up_write(&wnd->rw_lock);
766 * ntfs_clear_mft_tail - Format empty records [from, to).
768 * sbi->mft.bitmap is locked for write.
770 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
775 struct runs_tree *run;
776 struct ntfs_inode *ni;
781 rs = sbi->record_size;
785 down_read(&ni->file.run_lock);
786 vbo = (u64)from * rs;
787 for (; from < to; from++, vbo += rs) {
788 struct ntfs_buffers nb;
790 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
794 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
801 sbi->mft.used = from;
802 up_read(&ni->file.run_lock);
807 * ntfs_refresh_zone - Refresh MFT zone.
809 * sbi->used.bitmap is locked for rw.
810 * sbi->mft.bitmap is locked for write.
811 * sbi->mft.ni->file.run_lock for write.
813 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
817 struct wnd_bitmap *wnd = &sbi->used.bitmap;
818 struct ntfs_inode *ni = sbi->mft.ni;
820 /* Do not change anything unless we have non empty MFT zone. */
821 if (wnd_zone_len(wnd))
824 vcn = bytes_to_cluster(sbi,
825 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
827 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
830 /* We should always find Last Lcn for MFT. */
831 if (lcn == SPARSE_LCN)
836 /* Try to allocate clusters after last MFT run. */
837 zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
838 wnd_zone_set(wnd, lcn_s, zlen);
844 * ntfs_update_mftmirr - Update $MFTMirr data.
846 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
849 struct super_block *sb = sbi->sb;
850 u32 blocksize, bytes;
851 sector_t block1, block2;
854 * sb can be NULL here. In this case sbi->flags should be 0 too.
856 if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR))
859 blocksize = sb->s_blocksize;
860 bytes = sbi->mft.recs_mirr << sbi->record_bits;
861 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
862 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
864 for (; bytes >= blocksize; bytes -= blocksize) {
865 struct buffer_head *bh1, *bh2;
867 bh1 = sb_bread(sb, block1++);
871 bh2 = sb_getblk(sb, block2++);
877 if (buffer_locked(bh2))
878 __wait_on_buffer(bh2);
881 memcpy(bh2->b_data, bh1->b_data, blocksize);
882 set_buffer_uptodate(bh2);
883 mark_buffer_dirty(bh2);
889 err = wait ? sync_dirty_buffer(bh2) : 0;
896 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
902 * Marks inode as bad and marks fs as 'dirty'
904 void ntfs_bad_inode(struct inode *inode, const char *hint)
906 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
908 ntfs_inode_err(inode, "%s", hint);
909 make_bad_inode(inode);
910 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
916 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
917 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
918 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
920 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
924 struct VOLUME_INFO *info;
925 struct mft_inode *mi;
926 struct ntfs_inode *ni;
930 * Do not change state if fs was real_dirty.
931 * Do not change state if fs already dirty(clear).
932 * Do not change any thing if mounted read only.
934 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
937 /* Check cached value. */
938 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
939 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
946 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
948 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
954 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
960 info_flags = info->flags;
963 case NTFS_DIRTY_ERROR:
964 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
965 sbi->volume.real_dirty = true;
967 case NTFS_DIRTY_DIRTY:
968 info->flags |= VOLUME_FLAG_DIRTY;
970 case NTFS_DIRTY_CLEAR:
971 info->flags &= ~VOLUME_FLAG_DIRTY;
974 /* Cache current volume flags. */
975 if (info_flags != info->flags) {
976 sbi->volume.flags = info->flags;
986 mark_inode_dirty_sync(&ni->vfs_inode);
987 /* verify(!ntfs_update_mftmirr()); */
989 /* write mft record on disk. */
990 err = _ni_write_inode(&ni->vfs_inode, 1);
996 * security_hash - Calculates a hash of security descriptor.
998 static inline __le32 security_hash(const void *sd, size_t bytes)
1001 const __le32 *ptr = sd;
1005 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1006 return cpu_to_le32(hash);
1009 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1011 struct block_device *bdev = sb->s_bdev;
1012 u32 blocksize = sb->s_blocksize;
1013 u64 block = lbo >> sb->s_blocksize_bits;
1014 u32 off = lbo & (blocksize - 1);
1015 u32 op = blocksize - off;
1017 for (; bytes; block += 1, off = 0, op = blocksize) {
1018 struct buffer_head *bh = __bread(bdev, block, blocksize);
1026 memcpy(buffer, bh->b_data + off, op);
1031 buffer = Add2Ptr(buffer, op);
1037 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1038 const void *buf, int wait)
1040 u32 blocksize = sb->s_blocksize;
1041 struct block_device *bdev = sb->s_bdev;
1042 sector_t block = lbo >> sb->s_blocksize_bits;
1043 u32 off = lbo & (blocksize - 1);
1044 u32 op = blocksize - off;
1045 struct buffer_head *bh;
1047 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1050 for (; bytes; block += 1, off = 0, op = blocksize) {
1054 if (op < blocksize) {
1055 bh = __bread(bdev, block, blocksize);
1057 ntfs_err(sb, "failed to read block %llx",
1062 bh = __getblk(bdev, block, blocksize);
1067 if (buffer_locked(bh))
1068 __wait_on_buffer(bh);
1072 memcpy(bh->b_data + off, buf, op);
1073 buf = Add2Ptr(buf, op);
1075 memset(bh->b_data + off, -1, op);
1078 set_buffer_uptodate(bh);
1079 mark_buffer_dirty(bh);
1083 int err = sync_dirty_buffer(bh);
1088 "failed to sync buffer at block %llx, error %d",
1102 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1103 u64 vbo, const void *buf, size_t bytes, int sync)
1105 struct super_block *sb = sbi->sb;
1106 u8 cluster_bits = sbi->cluster_bits;
1107 u32 off = vbo & sbi->cluster_mask;
1108 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1112 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1115 if (lcn == SPARSE_LCN)
1118 lbo = ((u64)lcn << cluster_bits) + off;
1119 len = ((u64)clen << cluster_bits) - off;
1122 u32 op = min_t(u64, len, bytes);
1123 int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1132 vcn_next = vcn + clen;
1133 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1137 if (lcn == SPARSE_LCN)
1141 buf = Add2Ptr(buf, op);
1143 lbo = ((u64)lcn << cluster_bits);
1144 len = ((u64)clen << cluster_bits);
1150 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1151 const struct runs_tree *run, u64 vbo)
1153 struct super_block *sb = sbi->sb;
1154 u8 cluster_bits = sbi->cluster_bits;
1158 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1159 return ERR_PTR(-ENOENT);
1161 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1163 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1166 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1167 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1170 struct super_block *sb = sbi->sb;
1171 u32 blocksize = sb->s_blocksize;
1172 u8 cluster_bits = sbi->cluster_bits;
1173 u32 off = vbo & sbi->cluster_mask;
1175 CLST vcn_next, vcn = vbo >> cluster_bits;
1179 struct buffer_head *bh;
1182 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1183 if (vbo > MFT_REC_VOL * sbi->record_size) {
1188 /* Use absolute boot's 'MFTCluster' to read record. */
1189 lbo = vbo + sbi->mft.lbo;
1190 len = sbi->record_size;
1191 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1195 if (lcn == SPARSE_LCN) {
1200 lbo = ((u64)lcn << cluster_bits) + off;
1201 len = ((u64)clen << cluster_bits) - off;
1204 off = lbo & (blocksize - 1);
1211 u32 len32 = len >= bytes ? bytes : len;
1212 sector_t block = lbo >> sb->s_blocksize_bits;
1215 u32 op = blocksize - off;
1220 bh = ntfs_bread(sb, block);
1227 memcpy(buf, bh->b_data + off, op);
1228 buf = Add2Ptr(buf, op);
1233 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1250 vcn_next = vcn + clen;
1251 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1257 if (lcn == SPARSE_LCN) {
1262 lbo = ((u64)lcn << cluster_bits);
1263 len = ((u64)clen << cluster_bits);
1271 put_bh(nb->bh[--nbh]);
1282 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1284 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1285 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1286 struct ntfs_buffers *nb)
1288 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1292 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1295 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1296 u32 bytes, struct ntfs_buffers *nb)
1299 struct super_block *sb = sbi->sb;
1300 u32 blocksize = sb->s_blocksize;
1301 u8 cluster_bits = sbi->cluster_bits;
1302 CLST vcn_next, vcn = vbo >> cluster_bits;
1311 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1316 off = vbo & sbi->cluster_mask;
1317 lbo = ((u64)lcn << cluster_bits) + off;
1318 len = ((u64)clen << cluster_bits) - off;
1320 nb->off = off = lbo & (blocksize - 1);
1323 u32 len32 = min_t(u64, len, bytes);
1324 sector_t block = lbo >> sb->s_blocksize_bits;
1328 struct buffer_head *bh;
1330 if (nbh >= ARRAY_SIZE(nb->bh)) {
1335 op = blocksize - off;
1339 if (op == blocksize) {
1340 bh = sb_getblk(sb, block);
1345 if (buffer_locked(bh))
1346 __wait_on_buffer(bh);
1347 set_buffer_uptodate(bh);
1349 bh = ntfs_bread(sb, block);
1368 vcn_next = vcn + clen;
1369 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1375 lbo = ((u64)lcn << cluster_bits);
1376 len = ((u64)clen << cluster_bits);
1381 put_bh(nb->bh[--nbh]);
1390 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1391 struct ntfs_buffers *nb, int sync)
1394 struct super_block *sb = sbi->sb;
1395 u32 block_size = sb->s_blocksize;
1396 u32 bytes = nb->bytes;
1398 u16 fo = le16_to_cpu(rhdr->fix_off);
1399 u16 fn = le16_to_cpu(rhdr->fix_num);
1404 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1405 fn * SECTOR_SIZE > bytes) {
1409 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1410 u32 op = block_size - off;
1412 struct buffer_head *bh = nb->bh[idx];
1413 __le16 *ptr, *end_data;
1418 if (buffer_locked(bh))
1419 __wait_on_buffer(bh);
1423 bh_data = bh->b_data + off;
1424 end_data = Add2Ptr(bh_data, op);
1425 memcpy(bh_data, rhdr, op);
1430 fixup = Add2Ptr(bh_data, fo);
1432 t16 = le16_to_cpu(sample);
1433 if (t16 >= 0x7FFF) {
1434 sample = *fixup = cpu_to_le16(1);
1436 sample = cpu_to_le16(t16 + 1);
1440 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1443 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1448 ptr += SECTOR_SIZE / sizeof(short);
1449 } while (ptr < end_data);
1451 set_buffer_uptodate(bh);
1452 mark_buffer_dirty(bh);
1456 int err2 = sync_dirty_buffer(bh);
1463 rhdr = Add2Ptr(rhdr, op);
1470 * ntfs_bio_pages - Read/write pages from/to disk.
1472 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1473 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1477 struct bio *new, *bio = NULL;
1478 struct super_block *sb = sbi->sb;
1479 struct block_device *bdev = sb->s_bdev;
1481 u8 cluster_bits = sbi->cluster_bits;
1482 CLST lcn, clen, vcn, vcn_next;
1483 u32 add, off, page_idx;
1486 struct blk_plug plug;
1491 blk_start_plug(&plug);
1493 /* Align vbo and bytes to be 512 bytes aligned. */
1494 lbo = (vbo + bytes + 511) & ~511ull;
1495 vbo = vbo & ~511ull;
1498 vcn = vbo >> cluster_bits;
1499 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1503 off = vbo & sbi->cluster_mask;
1508 lbo = ((u64)lcn << cluster_bits) + off;
1509 len = ((u64)clen << cluster_bits) - off;
1511 new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1513 bio_chain(bio, new);
1517 bio->bi_iter.bi_sector = lbo >> 9;
1520 off = vbo & (PAGE_SIZE - 1);
1521 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1523 if (bio_add_page(bio, page, add, off) < add)
1531 if (add + off == PAGE_SIZE) {
1533 if (WARN_ON(page_idx >= nr_pages)) {
1537 page = pages[page_idx];
1546 vcn_next = vcn + clen;
1547 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1557 err = submit_bio_wait(bio);
1560 blk_finish_plug(&plug);
1566 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1568 * Fill on-disk logfile range by (-1)
1569 * this means empty logfile.
1571 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1574 struct super_block *sb = sbi->sb;
1575 struct block_device *bdev = sb->s_bdev;
1576 u8 cluster_bits = sbi->cluster_bits;
1577 struct bio *new, *bio = NULL;
1583 struct blk_plug plug;
1585 fill = alloc_page(GFP_KERNEL);
1589 kaddr = kmap_atomic(fill);
1590 memset(kaddr, -1, PAGE_SIZE);
1591 kunmap_atomic(kaddr);
1592 flush_dcache_page(fill);
1595 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1601 * TODO: Try blkdev_issue_write_same.
1603 blk_start_plug(&plug);
1605 lbo = (u64)lcn << cluster_bits;
1606 len = (u64)clen << cluster_bits;
1608 new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1610 bio_chain(bio, new);
1614 bio->bi_iter.bi_sector = lbo >> 9;
1617 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1619 if (bio_add_page(bio, fill, add, 0) < add)
1627 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1630 err = submit_bio_wait(bio);
1633 blk_finish_plug(&plug);
1641 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1642 u64 vbo, u64 *lbo, u64 *bytes)
1646 u8 cluster_bits = sbi->cluster_bits;
1648 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1651 off = vbo & sbi->cluster_mask;
1652 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1653 *bytes = ((u64)len << cluster_bits) - off;
1658 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
1659 enum RECORD_FLAG flag)
1662 struct super_block *sb = sbi->sb;
1663 struct inode *inode = new_inode(sb);
1664 struct ntfs_inode *ni;
1667 return ERR_PTR(-ENOMEM);
1671 err = mi_format_new(&ni->mi, sbi, rno, flag, false);
1676 if (insert_inode_locked(inode) < 0) {
1683 make_bad_inode(inode);
1691 * O:BAG:BAD:(A;OICI;FA;;;WD)
1692 * Owner S-1-5-32-544 (Administrators)
1693 * Group S-1-5-32-544 (Administrators)
1694 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1696 const u8 s_default_security[] __aligned(8) = {
1697 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1698 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1699 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1700 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1701 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1702 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1703 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1706 static_assert(sizeof(s_default_security) == 0x50);
1708 static inline u32 sid_length(const struct SID *sid)
1710 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1716 * Thanks Mark Harmstone for idea.
1718 static bool is_acl_valid(const struct ACL *acl, u32 len)
1720 const struct ACE_HEADER *ace;
1722 u16 ace_count, ace_size;
1724 if (acl->AclRevision != ACL_REVISION &&
1725 acl->AclRevision != ACL_REVISION_DS) {
1727 * This value should be ACL_REVISION, unless the ACL contains an
1728 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1729 * All ACEs in an ACL must be at the same revision level.
1737 if (le16_to_cpu(acl->AclSize) > len)
1743 len -= sizeof(struct ACL);
1744 ace = (struct ACE_HEADER *)&acl[1];
1745 ace_count = le16_to_cpu(acl->AceCount);
1747 for (i = 0; i < ace_count; i++) {
1748 if (len < sizeof(struct ACE_HEADER))
1751 ace_size = le16_to_cpu(ace->AceSize);
1756 ace = Add2Ptr(ace, ace_size);
1762 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1764 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1766 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1769 if (sd->Revision != 1)
1775 if (!(sd->Control & SE_SELF_RELATIVE))
1778 sd_owner = le32_to_cpu(sd->Owner);
1780 const struct SID *owner = Add2Ptr(sd, sd_owner);
1782 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1785 if (owner->Revision != 1)
1788 if (sd_owner + sid_length(owner) > len)
1792 sd_group = le32_to_cpu(sd->Group);
1794 const struct SID *group = Add2Ptr(sd, sd_group);
1796 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1799 if (group->Revision != 1)
1802 if (sd_group + sid_length(group) > len)
1806 sd_sacl = le32_to_cpu(sd->Sacl);
1808 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1810 if (sd_sacl + sizeof(struct ACL) > len)
1813 if (!is_acl_valid(sacl, len - sd_sacl))
1817 sd_dacl = le32_to_cpu(sd->Dacl);
1819 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1821 if (sd_dacl + sizeof(struct ACL) > len)
1824 if (!is_acl_valid(dacl, len - sd_dacl))
1832 * ntfs_security_init - Load and parse $Secure.
1834 int ntfs_security_init(struct ntfs_sb_info *sbi)
1837 struct super_block *sb = sbi->sb;
1838 struct inode *inode;
1839 struct ntfs_inode *ni;
1841 struct ATTRIB *attr;
1842 struct ATTR_LIST_ENTRY *le;
1846 struct NTFS_DE_SII *sii_e;
1847 struct ntfs_fnd *fnd_sii = NULL;
1848 const struct INDEX_ROOT *root_sii;
1849 const struct INDEX_ROOT *root_sdh;
1850 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1851 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1853 ref.low = cpu_to_le32(MFT_REC_SECURE);
1855 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1857 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1858 if (IS_ERR(inode)) {
1859 err = PTR_ERR(inode);
1860 ntfs_err(sb, "Failed to load $Secure (%d).", err);
1869 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1870 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1872 !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1873 root_sdh->type != ATTR_ZERO ||
1874 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1875 offsetof(struct INDEX_ROOT, ihdr) +
1876 le32_to_cpu(root_sdh->ihdr.used) >
1877 le32_to_cpu(attr->res.data_size)) {
1878 ntfs_err(sb, "$Secure::$SDH is corrupted.");
1883 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1885 ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1889 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1890 ARRAY_SIZE(SII_NAME), NULL, NULL);
1892 !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1893 root_sii->type != ATTR_ZERO ||
1894 root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1895 offsetof(struct INDEX_ROOT, ihdr) +
1896 le32_to_cpu(root_sii->ihdr.used) >
1897 le32_to_cpu(attr->res.data_size)) {
1898 ntfs_err(sb, "$Secure::$SII is corrupted.");
1903 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1905 ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1909 fnd_sii = fnd_get();
1915 sds_size = inode->i_size;
1917 /* Find the last valid Id. */
1918 sbi->security.next_id = SECURITY_ID_FIRST;
1919 /* Always write new security at the end of bucket. */
1920 sbi->security.next_off =
1921 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1929 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1933 sii_e = (struct NTFS_DE_SII *)ne;
1934 if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
1937 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1938 if (next_id >= sbi->security.next_id)
1939 sbi->security.next_id = next_id;
1942 sbi->security.ni = ni;
1952 * ntfs_get_security_by_id - Read security descriptor by id.
1954 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1955 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1960 struct ntfs_inode *ni = sbi->security.ni;
1961 struct ntfs_index *indx = &sbi->security.index_sii;
1963 struct NTFS_DE_SII *sii_e;
1964 struct ntfs_fnd *fnd_sii;
1965 struct SECURITY_HDR d_security;
1966 const struct INDEX_ROOT *root_sii;
1971 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1973 fnd_sii = fnd_get();
1979 root_sii = indx_get_root(indx, ni, NULL, NULL);
1985 /* Try to find this SECURITY descriptor in SII indexes. */
1986 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1987 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1994 t32 = le32_to_cpu(sii_e->sec_hdr.size);
1995 if (t32 < sizeof(struct SECURITY_HDR)) {
2000 if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
2001 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2006 *size = t32 - sizeof(struct SECURITY_HDR);
2008 p = kmalloc(*size, GFP_NOFS);
2014 err = ntfs_read_run_nb(sbi, &ni->file.run,
2015 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2016 sizeof(d_security), NULL);
2020 if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
2025 err = ntfs_read_run_nb(sbi, &ni->file.run,
2026 le64_to_cpu(sii_e->sec_hdr.off) +
2027 sizeof(struct SECURITY_HDR),
2044 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2046 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2047 * and it contains a mirror copy of each security descriptor. When writing
2048 * to a security descriptor at location X, another copy will be written at
2049 * location (X+256K).
2050 * When writing a security descriptor that will cross the 256K boundary,
2051 * the pointer will be advanced by 256K to skip
2052 * over the mirror portion.
2054 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2055 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2056 u32 size_sd, __le32 *security_id, bool *inserted)
2059 struct ntfs_inode *ni = sbi->security.ni;
2060 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2061 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2062 struct NTFS_DE_SDH *e;
2063 struct NTFS_DE_SDH sdh_e;
2064 struct NTFS_DE_SII sii_e;
2065 struct SECURITY_HDR *d_security;
2066 u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
2067 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2068 struct SECURITY_KEY hash_key;
2069 struct ntfs_fnd *fnd_sdh = NULL;
2070 const struct INDEX_ROOT *root_sdh;
2071 const struct INDEX_ROOT *root_sii;
2072 u64 mirr_off, new_sds_size;
2075 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2076 SecurityDescriptorsBlockSize);
2078 hash_key.hash = security_hash(sd, size_sd);
2079 hash_key.sec_id = SECURITY_ID_INVALID;
2083 *security_id = SECURITY_ID_INVALID;
2085 /* Allocate a temporal buffer. */
2086 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2090 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2092 fnd_sdh = fnd_get();
2098 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2104 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2111 * Check if such security already exists.
2112 * Use "SDH" and hash -> to get the offset in "SDS".
2114 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2115 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2121 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2122 err = ntfs_read_run_nb(sbi, &ni->file.run,
2123 le64_to_cpu(e->sec_hdr.off),
2124 d_security, new_sec_size, NULL);
2128 if (le32_to_cpu(d_security->size) == new_sec_size &&
2129 d_security->key.hash == hash_key.hash &&
2130 !memcmp(d_security + 1, sd, size_sd)) {
2131 *security_id = d_security->key.sec_id;
2132 /* Such security already exists. */
2138 err = indx_find_sort(indx_sdh, ni, root_sdh,
2139 (struct NTFS_DE **)&e, fnd_sdh);
2143 if (!e || e->key.hash != hash_key.hash)
2147 /* Zero unused space. */
2148 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2149 left = SecurityDescriptorsBlockSize - next;
2151 /* Zero gap until SecurityDescriptorsBlockSize. */
2152 if (left < new_sec_size) {
2153 /* Zero "left" bytes from sbi->security.next_off. */
2154 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2157 /* Zero tail of previous security. */
2158 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2162 * 0x40438 == ni->vfs_inode.i_size
2163 * 0x00440 == sbi->security.next_off
2164 * need to zero [0x438-0x440)
2165 * if (next > used) {
2166 * u32 tozero = next - used;
2167 * zero "tozero" bytes from sbi->security.next_off - tozero
2170 /* Format new security descriptor. */
2171 d_security->key.hash = hash_key.hash;
2172 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2173 d_security->off = cpu_to_le64(sbi->security.next_off);
2174 d_security->size = cpu_to_le32(new_sec_size);
2175 memcpy(d_security + 1, sd, size_sd);
2177 /* Write main SDS bucket. */
2178 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2179 d_security, aligned_sec_size, 0);
2184 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2185 new_sds_size = mirr_off + aligned_sec_size;
2187 if (new_sds_size > ni->vfs_inode.i_size) {
2188 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2189 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2190 new_sds_size, &new_sds_size, false, NULL);
2195 /* Write copy SDS bucket. */
2196 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2197 aligned_sec_size, 0);
2201 /* Fill SII entry. */
2202 sii_e.de.view.data_off =
2203 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2204 sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2205 sii_e.de.view.res = 0;
2206 sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
2207 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2210 sii_e.sec_id = d_security->key.sec_id;
2211 memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2213 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2217 /* Fill SDH entry. */
2218 sdh_e.de.view.data_off =
2219 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2220 sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2221 sdh_e.de.view.res = 0;
2222 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2223 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2226 sdh_e.key.hash = d_security->key.hash;
2227 sdh_e.key.sec_id = d_security->key.sec_id;
2228 memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2229 sdh_e.magic[0] = cpu_to_le16('I');
2230 sdh_e.magic[1] = cpu_to_le16('I');
2233 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2238 *security_id = d_security->key.sec_id;
2242 /* Update Id and offset for next descriptor. */
2243 sbi->security.next_id += 1;
2244 sbi->security.next_off += aligned_sec_size;
2248 mark_inode_dirty(&ni->vfs_inode);
2256 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2258 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2261 struct ntfs_inode *ni = sbi->reparse.ni;
2262 struct ntfs_index *indx = &sbi->reparse.index_r;
2263 struct ATTRIB *attr;
2264 struct ATTR_LIST_ENTRY *le;
2265 const struct INDEX_ROOT *root_r;
2271 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2272 ARRAY_SIZE(SR_NAME), NULL, NULL);
2278 root_r = resident_data(attr);
2279 if (root_r->type != ATTR_ZERO ||
2280 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2285 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2294 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2296 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2299 struct ntfs_inode *ni = sbi->objid.ni;
2300 struct ntfs_index *indx = &sbi->objid.index_o;
2301 struct ATTRIB *attr;
2302 struct ATTR_LIST_ENTRY *le;
2303 const struct INDEX_ROOT *root;
2309 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2310 ARRAY_SIZE(SO_NAME), NULL, NULL);
2316 root = resident_data(attr);
2317 if (root->type != ATTR_ZERO ||
2318 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2323 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2331 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2334 struct ntfs_inode *ni = sbi->objid.ni;
2335 struct ntfs_index *indx = &sbi->objid.index_o;
2340 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2342 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2344 mark_inode_dirty(&ni->vfs_inode);
2350 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2351 const struct MFT_REF *ref)
2354 struct ntfs_inode *ni = sbi->reparse.ni;
2355 struct ntfs_index *indx = &sbi->reparse.index_r;
2356 struct NTFS_DE_R re;
2361 memset(&re, 0, sizeof(re));
2363 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2364 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2365 re.de.key_size = cpu_to_le16(sizeof(re.key));
2367 re.key.ReparseTag = rtag;
2368 memcpy(&re.key.ref, ref, sizeof(*ref));
2370 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2372 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2374 mark_inode_dirty(&ni->vfs_inode);
2380 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2381 const struct MFT_REF *ref)
2384 struct ntfs_inode *ni = sbi->reparse.ni;
2385 struct ntfs_index *indx = &sbi->reparse.index_r;
2386 struct ntfs_fnd *fnd = NULL;
2387 struct REPARSE_KEY rkey;
2388 struct NTFS_DE_R *re;
2389 struct INDEX_ROOT *root_r;
2394 rkey.ReparseTag = rtag;
2397 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2400 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2410 root_r = indx_get_root(indx, ni, NULL, NULL);
2416 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2417 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2418 (struct NTFS_DE **)&re, fnd);
2422 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2423 /* Impossible. Looks like volume corrupt? */
2427 memcpy(&rkey, &re->key, sizeof(rkey));
2432 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2440 mark_inode_dirty(&ni->vfs_inode);
2446 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2449 ntfs_unmap_meta(sbi->sb, lcn, len);
2450 ntfs_discard(sbi, lcn, len);
2453 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2455 CLST end, i, zone_len, zlen;
2456 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2459 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2460 if (!wnd_is_used(wnd, lcn, len)) {
2461 /* mark volume as dirty out of wnd->rw_lock */
2466 for (i = lcn; i < end; i++) {
2467 if (wnd_is_used(wnd, i, 1)) {
2478 ntfs_unmap_and_discard(sbi, lcn, len);
2480 wnd_set_free(wnd, lcn, len);
2489 ntfs_unmap_and_discard(sbi, lcn, len);
2490 wnd_set_free(wnd, lcn, len);
2492 /* append to MFT zone, if possible. */
2493 zone_len = wnd_zone_len(wnd);
2494 zlen = min(zone_len + len, sbi->zone_max);
2496 if (zlen == zone_len) {
2497 /* MFT zone already has maximum size. */
2498 } else if (!zone_len) {
2499 /* Create MFT zone only if 'zlen' is large enough. */
2500 if (zlen == sbi->zone_max)
2501 wnd_zone_set(wnd, lcn, zlen);
2503 CLST zone_lcn = wnd_zone_bit(wnd);
2505 if (lcn + len == zone_lcn) {
2506 /* Append into head MFT zone. */
2507 wnd_zone_set(wnd, lcn, zlen);
2508 } else if (zone_lcn + zone_len == lcn) {
2509 /* Append into tail MFT zone. */
2510 wnd_zone_set(wnd, zone_lcn, zlen);
2515 up_write(&wnd->rw_lock);
2517 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2521 * run_deallocate - Deallocate clusters.
2523 int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
2529 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2530 if (lcn == SPARSE_LCN)
2533 mark_as_free_ex(sbi, lcn, len, trim);
2539 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2543 /* check for forbidden chars */
2544 for (i = 0; i < fname->len; ++i) {
2545 ch = le16_to_cpu(fname->name[i]);
2552 /* disallowed by Windows */
2570 /* file names cannot end with space or . */
2571 if (fname->len > 0) {
2572 ch = le16_to_cpu(fname->name[fname->len - 1]);
2573 if (ch == ' ' || ch == '.')
2580 static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
2581 const struct le_str *fname)
2584 const __le16 *name = fname->name;
2585 int len = fname->len;
2586 const u16 *upcase = sbi->upcase;
2588 /* check for 3 chars reserved names (device names) */
2589 /* name by itself or with any extension is forbidden */
2590 if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2591 if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2592 !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2593 !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2594 !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2597 /* check for 4 chars reserved names (port name followed by 1..9) */
2598 /* name by itself or with any extension is forbidden */
2599 if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2600 port_digit = le16_to_cpu(name[3]);
2601 if (port_digit >= '1' && port_digit <= '9')
2602 if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2604 !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2613 * valid_windows_name - Check if a file name is valid in Windows.
2615 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2617 return !name_has_forbidden_chars(fname) &&
2618 !is_reserved_name(sbi, fname);
2622 * ntfs_set_label - updates current ntfs label.
2624 int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
2627 struct ATTRIB *attr;
2628 struct ntfs_inode *ni = sbi->volume.ni;
2629 const u8 max_ulen = 0x80; /* TODO: use attrdef to get maximum length */
2630 /* Allocate PATH_MAX bytes. */
2631 struct cpu_str *uni = __getname();
2636 err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
2637 UTF16_LITTLE_ENDIAN);
2641 if (uni->len > max_ulen) {
2642 ntfs_warn(sbi->sb, "new label is too long");
2649 /* Ignore any errors. */
2650 ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
2652 err = ni_insert_resident(ni, uni->len * sizeof(u16), ATTR_LABEL, NULL,
2653 0, &attr, NULL, NULL);
2657 /* write new label in on-disk struct. */
2658 memcpy(resident_data(attr), uni->name, uni->len * sizeof(u16));
2660 /* update cached value of current label. */
2661 if (len >= ARRAY_SIZE(sbi->volume.label))
2662 len = ARRAY_SIZE(sbi->volume.label) - 1;
2663 memcpy(sbi->volume.label, label, len);
2664 sbi->volume.label[len] = 0;
2665 mark_inode_dirty_sync(&ni->vfs_inode);
2671 err = _ni_write_inode(&ni->vfs_inode, 0);