1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
11 #include <linux/kernel.h>
12 #include <linux/nls.h>
19 const struct cpu_str NAME_MFT = {
20 4, 0, { '$', 'M', 'F', 'T' },
22 const struct cpu_str NAME_MIRROR = {
23 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
25 const struct cpu_str NAME_LOGFILE = {
26 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
28 const struct cpu_str NAME_VOLUME = {
29 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
31 const struct cpu_str NAME_ATTRDEF = {
32 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
34 const struct cpu_str NAME_ROOT = {
37 const struct cpu_str NAME_BITMAP = {
38 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
40 const struct cpu_str NAME_BOOT = {
41 5, 0, { '$', 'B', 'o', 'o', 't' },
43 const struct cpu_str NAME_BADCLUS = {
44 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
46 const struct cpu_str NAME_QUOTA = {
47 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
49 const struct cpu_str NAME_SECURE = {
50 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
52 const struct cpu_str NAME_UPCASE = {
53 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
55 const struct cpu_str NAME_EXTEND = {
56 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
58 const struct cpu_str NAME_OBJID = {
59 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
61 const struct cpu_str NAME_REPARSE = {
62 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
64 const struct cpu_str NAME_USNJRNL = {
65 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
67 const __le16 BAD_NAME[4] = {
68 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
70 const __le16 I30_NAME[4] = {
71 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
73 const __le16 SII_NAME[4] = {
74 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
76 const __le16 SDH_NAME[4] = {
77 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
79 const __le16 SDS_NAME[4] = {
80 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
82 const __le16 SO_NAME[2] = {
83 cpu_to_le16('$'), cpu_to_le16('O'),
85 const __le16 SQ_NAME[2] = {
86 cpu_to_le16('$'), cpu_to_le16('Q'),
88 const __le16 SR_NAME[2] = {
89 cpu_to_le16('$'), cpu_to_le16('R'),
92 #ifdef CONFIG_NTFS3_LZX_XPRESS
93 const __le16 WOF_NAME[17] = {
94 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
95 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
96 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
97 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
102 static const __le16 CON_NAME[3] = {
103 cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
106 static const __le16 NUL_NAME[3] = {
107 cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
110 static const __le16 AUX_NAME[3] = {
111 cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
114 static const __le16 PRN_NAME[3] = {
115 cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
118 static const __le16 COM_NAME[3] = {
119 cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
122 static const __le16 LPT_NAME[3] = {
123 cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
129 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
131 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
135 u16 fo = le16_to_cpu(rhdr->fix_off);
136 u16 fn = le16_to_cpu(rhdr->fix_num);
138 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
139 fn * SECTOR_SIZE > bytes) {
143 /* Get fixup pointer. */
144 fixup = Add2Ptr(rhdr, fo);
146 if (*fixup >= 0x7FFF)
153 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
158 ptr += SECTOR_SIZE / sizeof(short);
164 * ntfs_fix_post_read - Remove fixups after reading from disk.
166 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
168 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
175 fo = le16_to_cpu(rhdr->fix_off);
176 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
177 le16_to_cpu(rhdr->fix_num);
180 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
181 fn * SECTOR_SIZE > bytes) {
182 return -E_NTFS_CORRUPT;
185 /* Get fixup pointer. */
186 fixup = Add2Ptr(rhdr, fo);
188 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
192 /* Test current word. */
193 if (*ptr != sample) {
194 /* Fixup does not match! Is it serious error? */
200 ptr += SECTOR_SIZE / sizeof(short);
207 * ntfs_extend_init - Load $Extend file.
209 int ntfs_extend_init(struct ntfs_sb_info *sbi)
212 struct super_block *sb = sbi->sb;
213 struct inode *inode, *inode2;
216 if (sbi->volume.major_ver < 3) {
217 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
221 ref.low = cpu_to_le32(MFT_REC_EXTEND);
223 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
224 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
226 err = PTR_ERR(inode);
227 ntfs_err(sb, "Failed to load $Extend (%d).", err);
232 /* If ntfs_iget5() reads from disk it never returns bad inode. */
233 if (!S_ISDIR(inode->i_mode)) {
238 /* Try to find $ObjId */
239 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
240 if (inode2 && !IS_ERR(inode2)) {
241 if (is_bad_inode(inode2)) {
244 sbi->objid.ni = ntfs_i(inode2);
245 sbi->objid_no = inode2->i_ino;
249 /* Try to find $Quota */
250 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
251 if (inode2 && !IS_ERR(inode2)) {
252 sbi->quota_no = inode2->i_ino;
256 /* Try to find $Reparse */
257 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
258 if (inode2 && !IS_ERR(inode2)) {
259 sbi->reparse.ni = ntfs_i(inode2);
260 sbi->reparse_no = inode2->i_ino;
263 /* Try to find $UsnJrnl */
264 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
265 if (inode2 && !IS_ERR(inode2)) {
266 sbi->usn_jrnl_no = inode2->i_ino;
276 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
279 struct super_block *sb = sbi->sb;
280 bool initialized = false;
285 if (ni->vfs_inode.i_size >= 0x100000000ull) {
286 ntfs_err(sb, "\x24LogFile is large than 4G.");
291 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
293 ref.low = cpu_to_le32(MFT_REC_MFT);
295 ref.seq = cpu_to_le16(1);
297 inode = ntfs_iget5(sb, &ref, NULL);
303 /* Try to use MFT copy. */
304 u64 t64 = sbi->mft.lbo;
306 sbi->mft.lbo = sbi->mft.lbo2;
307 inode = ntfs_iget5(sb, &ref, NULL);
315 ntfs_err(sb, "Failed to load $MFT.");
319 sbi->mft.ni = ntfs_i(inode);
321 /* LogFile should not contains attribute list. */
322 err = ni_load_all_mi(sbi->mft.ni);
324 err = log_replay(ni, &initialized);
329 sync_blockdev(sb->s_bdev);
330 invalidate_bdev(sb->s_bdev);
332 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
337 if (sb_rdonly(sb) || !initialized)
340 /* Fill LogFile by '-1' if it is initialized. */
341 err = ntfs_bio_fill_1(sbi, &ni->file.run);
344 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
350 * ntfs_look_for_free_space - Look for a free space in bitmap.
352 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
353 CLST *new_lcn, CLST *new_len,
354 enum ALLOCATE_OPT opt)
358 struct super_block *sb = sbi->sb;
359 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
360 struct wnd_bitmap *wnd = &sbi->used.bitmap;
362 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
363 if (opt & ALLOCATE_MFT) {
364 zlen = wnd_zone_len(wnd);
367 err = ntfs_refresh_zone(sbi);
371 zlen = wnd_zone_len(wnd);
375 ntfs_err(sbi->sb, "no free space to extend mft");
380 lcn = wnd_zone_bit(wnd);
381 alen = min_t(CLST, len, zlen);
383 wnd_zone_set(wnd, lcn + alen, zlen - alen);
385 err = wnd_set_used(wnd, lcn, alen);
393 * 'Cause cluster 0 is always used this value means that we should use
394 * cached value of 'next_free_lcn' to improve performance.
397 lcn = sbi->used.next_free_lcn;
399 if (lcn >= wnd->nbits)
402 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
406 /* Try to use clusters from MftZone. */
407 zlen = wnd_zone_len(wnd);
408 zeroes = wnd_zeroes(wnd);
410 /* Check too big request */
411 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
416 /* How many clusters to cat from zone. */
417 zlcn = wnd_zone_bit(wnd);
419 ztrim = clamp_val(len, zlen2, zlen);
420 new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
422 wnd_zone_set(wnd, zlcn, new_zlen);
424 /* Allocate continues clusters. */
425 alen = wnd_find(wnd, len, 0,
426 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
437 ntfs_unmap_meta(sb, alcn, alen);
439 /* Set hint for next requests. */
440 if (!(opt & ALLOCATE_MFT))
441 sbi->used.next_free_lcn = alcn + alen;
443 up_write(&wnd->rw_lock);
448 * ntfs_check_for_free_space
450 * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
452 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
454 size_t free, zlen, avail;
455 struct wnd_bitmap *wnd;
457 wnd = &sbi->used.bitmap;
458 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
459 free = wnd_zeroes(wnd);
460 zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
461 up_read(&wnd->rw_lock);
463 if (free < zlen + clen)
466 avail = free - (zlen + clen);
468 wnd = &sbi->mft.bitmap;
469 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
470 free = wnd_zeroes(wnd);
471 zlen = wnd_zone_len(wnd);
472 up_read(&wnd->rw_lock);
474 if (free >= zlen + mlen)
477 return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
481 * ntfs_extend_mft - Allocate additional MFT records.
483 * sbi->mft.bitmap is locked for write.
486 * ntfs_look_free_mft ->
489 * ni_insert_nonresident ->
492 * ntfs_look_free_mft ->
495 * To avoid recursive always allocate space for two new MFT records
496 * see attrib.c: "at least two MFT to avoid recursive loop".
498 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
501 struct ntfs_inode *ni = sbi->mft.ni;
502 size_t new_mft_total;
503 u64 new_mft_bytes, new_bitmap_bytes;
505 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
507 new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
508 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
510 /* Step 1: Resize $MFT::DATA. */
511 down_write(&ni->file.run_lock);
512 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
513 new_mft_bytes, NULL, false, &attr);
516 up_write(&ni->file.run_lock);
520 attr->nres.valid_size = attr->nres.data_size;
521 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
524 /* Step 2: Resize $MFT::BITMAP. */
525 new_bitmap_bytes = bitmap_size(new_mft_total);
527 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
528 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
530 /* Refresh MFT Zone if necessary. */
531 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
533 ntfs_refresh_zone(sbi);
535 up_write(&sbi->used.bitmap.rw_lock);
536 up_write(&ni->file.run_lock);
541 err = wnd_extend(wnd, new_mft_total);
546 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
548 err = _ni_write_inode(&ni->vfs_inode, 0);
554 * ntfs_look_free_mft - Look for a free MFT record.
556 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
557 struct ntfs_inode *ni, struct mft_inode **mi)
560 size_t zbit, zlen, from, to, fr;
563 struct super_block *sb = sbi->sb;
564 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
567 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
568 MFT_REC_FREE - MFT_REC_RESERVED);
571 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
573 zlen = wnd_zone_len(wnd);
575 /* Always reserve space for MFT. */
578 zbit = wnd_zone_bit(wnd);
580 wnd_zone_set(wnd, zbit + 1, zlen - 1);
585 /* No MFT zone. Find the nearest to '0' free MFT. */
586 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
588 mft_total = wnd->nbits;
590 err = ntfs_extend_mft(sbi);
596 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
602 * Look for free record reserved area [11-16) ==
603 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
606 if (!sbi->mft.reserved_bitmap) {
607 /* Once per session create internal bitmap for 5 bits. */
608 sbi->mft.reserved_bitmap = 0xFF;
611 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
613 struct ntfs_inode *ni;
614 struct MFT_REC *mrec;
616 ref.low = cpu_to_le32(ir);
617 ref.seq = cpu_to_le16(ir);
619 i = ntfs_iget5(sb, &ref, NULL);
624 "Invalid reserved record %x",
628 if (is_bad_inode(i)) {
637 if (!is_rec_base(mrec))
640 if (mrec->hard_links)
646 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
647 NULL, 0, NULL, NULL))
650 __clear_bit(ir - MFT_REC_RESERVED,
651 &sbi->mft.reserved_bitmap);
655 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
656 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
657 MFT_REC_FREE, MFT_REC_RESERVED);
658 if (zbit >= MFT_REC_FREE) {
659 sbi->mft.next_reserved = MFT_REC_FREE;
664 sbi->mft.next_reserved = zbit;
667 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
668 if (zbit + zlen > wnd->nbits)
669 zlen = wnd->nbits - zbit;
671 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
674 /* [zbit, zbit + zlen) will be used for MFT itself. */
675 from = sbi->mft.used;
680 ntfs_clear_mft_tail(sbi, from, to);
691 wnd_zone_set(wnd, zbit, zlen);
695 /* The request to get record for general purpose. */
696 if (sbi->mft.next_free < MFT_REC_USER)
697 sbi->mft.next_free = MFT_REC_USER;
700 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
701 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
702 sbi->mft.next_free = sbi->mft.bitmap.nbits;
705 sbi->mft.next_free = *rno + 1;
709 err = ntfs_extend_mft(sbi);
715 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
720 /* We have found a record that are not reserved for next MFT. */
721 if (*rno >= MFT_REC_FREE)
722 wnd_set_used(wnd, *rno, 1);
723 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
724 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
728 up_write(&wnd->rw_lock);
734 * ntfs_mark_rec_free - Mark record as free.
735 * is_mft - true if we are changing MFT
737 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
739 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
742 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
743 if (rno >= wnd->nbits)
746 if (rno >= MFT_REC_FREE) {
747 if (!wnd_is_used(wnd, rno, 1))
748 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
750 wnd_set_free(wnd, rno, 1);
751 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
752 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
755 if (rno < wnd_zone_bit(wnd))
756 wnd_zone_set(wnd, rno, 1);
757 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
758 sbi->mft.next_free = rno;
762 up_write(&wnd->rw_lock);
766 * ntfs_clear_mft_tail - Format empty records [from, to).
768 * sbi->mft.bitmap is locked for write.
770 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
775 struct runs_tree *run;
776 struct ntfs_inode *ni;
781 rs = sbi->record_size;
785 down_read(&ni->file.run_lock);
786 vbo = (u64)from * rs;
787 for (; from < to; from++, vbo += rs) {
788 struct ntfs_buffers nb;
790 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
794 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
801 sbi->mft.used = from;
802 up_read(&ni->file.run_lock);
807 * ntfs_refresh_zone - Refresh MFT zone.
809 * sbi->used.bitmap is locked for rw.
810 * sbi->mft.bitmap is locked for write.
811 * sbi->mft.ni->file.run_lock for write.
813 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
817 struct wnd_bitmap *wnd = &sbi->used.bitmap;
818 struct ntfs_inode *ni = sbi->mft.ni;
820 /* Do not change anything unless we have non empty MFT zone. */
821 if (wnd_zone_len(wnd))
824 vcn = bytes_to_cluster(sbi,
825 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
827 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
830 /* We should always find Last Lcn for MFT. */
831 if (lcn == SPARSE_LCN)
836 /* Try to allocate clusters after last MFT run. */
837 zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
838 wnd_zone_set(wnd, lcn_s, zlen);
844 * ntfs_update_mftmirr - Update $MFTMirr data.
846 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
849 struct super_block *sb = sbi->sb;
850 u32 blocksize, bytes;
851 sector_t block1, block2;
854 * sb can be NULL here. In this case sbi->flags should be 0 too.
856 if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR))
859 blocksize = sb->s_blocksize;
860 bytes = sbi->mft.recs_mirr << sbi->record_bits;
861 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
862 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
864 for (; bytes >= blocksize; bytes -= blocksize) {
865 struct buffer_head *bh1, *bh2;
867 bh1 = sb_bread(sb, block1++);
871 bh2 = sb_getblk(sb, block2++);
877 if (buffer_locked(bh2))
878 __wait_on_buffer(bh2);
881 memcpy(bh2->b_data, bh1->b_data, blocksize);
882 set_buffer_uptodate(bh2);
883 mark_buffer_dirty(bh2);
889 err = wait ? sync_dirty_buffer(bh2) : 0;
896 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
902 * Marks inode as bad and marks fs as 'dirty'
904 void ntfs_bad_inode(struct inode *inode, const char *hint)
906 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
908 ntfs_inode_err(inode, "%s", hint);
909 make_bad_inode(inode);
910 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
916 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
917 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
918 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
920 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
924 struct VOLUME_INFO *info;
925 struct mft_inode *mi;
926 struct ntfs_inode *ni;
930 * Do not change state if fs was real_dirty.
931 * Do not change state if fs already dirty(clear).
932 * Do not change any thing if mounted read only.
934 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
937 /* Check cached value. */
938 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
939 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
946 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
948 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
954 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
960 info_flags = info->flags;
963 case NTFS_DIRTY_ERROR:
964 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
965 sbi->volume.real_dirty = true;
967 case NTFS_DIRTY_DIRTY:
968 info->flags |= VOLUME_FLAG_DIRTY;
970 case NTFS_DIRTY_CLEAR:
971 info->flags &= ~VOLUME_FLAG_DIRTY;
974 /* Cache current volume flags. */
975 if (info_flags != info->flags) {
976 sbi->volume.flags = info->flags;
986 mark_inode_dirty(&ni->vfs_inode);
987 /* verify(!ntfs_update_mftmirr()); */
990 * If we used wait=1, sync_inode_metadata waits for the io for the
991 * inode to finish. It hangs when media is removed.
992 * So wait=0 is sent down to sync_inode_metadata
993 * and filemap_fdatawrite is used for the data blocks.
995 err = sync_inode_metadata(&ni->vfs_inode, 0);
997 err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
1003 * security_hash - Calculates a hash of security descriptor.
1005 static inline __le32 security_hash(const void *sd, size_t bytes)
1008 const __le32 *ptr = sd;
1012 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1013 return cpu_to_le32(hash);
1016 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1018 struct block_device *bdev = sb->s_bdev;
1019 u32 blocksize = sb->s_blocksize;
1020 u64 block = lbo >> sb->s_blocksize_bits;
1021 u32 off = lbo & (blocksize - 1);
1022 u32 op = blocksize - off;
1024 for (; bytes; block += 1, off = 0, op = blocksize) {
1025 struct buffer_head *bh = __bread(bdev, block, blocksize);
1033 memcpy(buffer, bh->b_data + off, op);
1038 buffer = Add2Ptr(buffer, op);
1044 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1045 const void *buf, int wait)
1047 u32 blocksize = sb->s_blocksize;
1048 struct block_device *bdev = sb->s_bdev;
1049 sector_t block = lbo >> sb->s_blocksize_bits;
1050 u32 off = lbo & (blocksize - 1);
1051 u32 op = blocksize - off;
1052 struct buffer_head *bh;
1054 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1057 for (; bytes; block += 1, off = 0, op = blocksize) {
1061 if (op < blocksize) {
1062 bh = __bread(bdev, block, blocksize);
1064 ntfs_err(sb, "failed to read block %llx",
1069 bh = __getblk(bdev, block, blocksize);
1074 if (buffer_locked(bh))
1075 __wait_on_buffer(bh);
1079 memcpy(bh->b_data + off, buf, op);
1080 buf = Add2Ptr(buf, op);
1082 memset(bh->b_data + off, -1, op);
1085 set_buffer_uptodate(bh);
1086 mark_buffer_dirty(bh);
1090 int err = sync_dirty_buffer(bh);
1095 "failed to sync buffer at block %llx, error %d",
1109 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1110 u64 vbo, const void *buf, size_t bytes, int sync)
1112 struct super_block *sb = sbi->sb;
1113 u8 cluster_bits = sbi->cluster_bits;
1114 u32 off = vbo & sbi->cluster_mask;
1115 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1119 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1122 if (lcn == SPARSE_LCN)
1125 lbo = ((u64)lcn << cluster_bits) + off;
1126 len = ((u64)clen << cluster_bits) - off;
1129 u32 op = min_t(u64, len, bytes);
1130 int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1139 vcn_next = vcn + clen;
1140 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1144 if (lcn == SPARSE_LCN)
1148 buf = Add2Ptr(buf, op);
1150 lbo = ((u64)lcn << cluster_bits);
1151 len = ((u64)clen << cluster_bits);
1157 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1158 const struct runs_tree *run, u64 vbo)
1160 struct super_block *sb = sbi->sb;
1161 u8 cluster_bits = sbi->cluster_bits;
1165 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1166 return ERR_PTR(-ENOENT);
1168 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1170 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1173 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1174 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1177 struct super_block *sb = sbi->sb;
1178 u32 blocksize = sb->s_blocksize;
1179 u8 cluster_bits = sbi->cluster_bits;
1180 u32 off = vbo & sbi->cluster_mask;
1182 CLST vcn_next, vcn = vbo >> cluster_bits;
1186 struct buffer_head *bh;
1189 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1190 if (vbo > MFT_REC_VOL * sbi->record_size) {
1195 /* Use absolute boot's 'MFTCluster' to read record. */
1196 lbo = vbo + sbi->mft.lbo;
1197 len = sbi->record_size;
1198 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1202 if (lcn == SPARSE_LCN) {
1207 lbo = ((u64)lcn << cluster_bits) + off;
1208 len = ((u64)clen << cluster_bits) - off;
1211 off = lbo & (blocksize - 1);
1218 u32 len32 = len >= bytes ? bytes : len;
1219 sector_t block = lbo >> sb->s_blocksize_bits;
1222 u32 op = blocksize - off;
1227 bh = ntfs_bread(sb, block);
1234 memcpy(buf, bh->b_data + off, op);
1235 buf = Add2Ptr(buf, op);
1240 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1257 vcn_next = vcn + clen;
1258 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1264 if (lcn == SPARSE_LCN) {
1269 lbo = ((u64)lcn << cluster_bits);
1270 len = ((u64)clen << cluster_bits);
1278 put_bh(nb->bh[--nbh]);
1289 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1291 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1292 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1293 struct ntfs_buffers *nb)
1295 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1299 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1302 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1303 u32 bytes, struct ntfs_buffers *nb)
1306 struct super_block *sb = sbi->sb;
1307 u32 blocksize = sb->s_blocksize;
1308 u8 cluster_bits = sbi->cluster_bits;
1309 CLST vcn_next, vcn = vbo >> cluster_bits;
1318 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1323 off = vbo & sbi->cluster_mask;
1324 lbo = ((u64)lcn << cluster_bits) + off;
1325 len = ((u64)clen << cluster_bits) - off;
1327 nb->off = off = lbo & (blocksize - 1);
1330 u32 len32 = min_t(u64, len, bytes);
1331 sector_t block = lbo >> sb->s_blocksize_bits;
1335 struct buffer_head *bh;
1337 if (nbh >= ARRAY_SIZE(nb->bh)) {
1342 op = blocksize - off;
1346 if (op == blocksize) {
1347 bh = sb_getblk(sb, block);
1352 if (buffer_locked(bh))
1353 __wait_on_buffer(bh);
1354 set_buffer_uptodate(bh);
1356 bh = ntfs_bread(sb, block);
1375 vcn_next = vcn + clen;
1376 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1382 lbo = ((u64)lcn << cluster_bits);
1383 len = ((u64)clen << cluster_bits);
1388 put_bh(nb->bh[--nbh]);
1397 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1398 struct ntfs_buffers *nb, int sync)
1401 struct super_block *sb = sbi->sb;
1402 u32 block_size = sb->s_blocksize;
1403 u32 bytes = nb->bytes;
1405 u16 fo = le16_to_cpu(rhdr->fix_off);
1406 u16 fn = le16_to_cpu(rhdr->fix_num);
1411 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1412 fn * SECTOR_SIZE > bytes) {
1416 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1417 u32 op = block_size - off;
1419 struct buffer_head *bh = nb->bh[idx];
1420 __le16 *ptr, *end_data;
1425 if (buffer_locked(bh))
1426 __wait_on_buffer(bh);
1430 bh_data = bh->b_data + off;
1431 end_data = Add2Ptr(bh_data, op);
1432 memcpy(bh_data, rhdr, op);
1437 fixup = Add2Ptr(bh_data, fo);
1439 t16 = le16_to_cpu(sample);
1440 if (t16 >= 0x7FFF) {
1441 sample = *fixup = cpu_to_le16(1);
1443 sample = cpu_to_le16(t16 + 1);
1447 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1450 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1455 ptr += SECTOR_SIZE / sizeof(short);
1456 } while (ptr < end_data);
1458 set_buffer_uptodate(bh);
1459 mark_buffer_dirty(bh);
1463 int err2 = sync_dirty_buffer(bh);
1470 rhdr = Add2Ptr(rhdr, op);
1477 * ntfs_bio_pages - Read/write pages from/to disk.
1479 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1480 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1484 struct bio *new, *bio = NULL;
1485 struct super_block *sb = sbi->sb;
1486 struct block_device *bdev = sb->s_bdev;
1488 u8 cluster_bits = sbi->cluster_bits;
1489 CLST lcn, clen, vcn, vcn_next;
1490 u32 add, off, page_idx;
1493 struct blk_plug plug;
1498 blk_start_plug(&plug);
1500 /* Align vbo and bytes to be 512 bytes aligned. */
1501 lbo = (vbo + bytes + 511) & ~511ull;
1502 vbo = vbo & ~511ull;
1505 vcn = vbo >> cluster_bits;
1506 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1510 off = vbo & sbi->cluster_mask;
1515 lbo = ((u64)lcn << cluster_bits) + off;
1516 len = ((u64)clen << cluster_bits) - off;
1518 new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1520 bio_chain(bio, new);
1524 bio->bi_iter.bi_sector = lbo >> 9;
1527 off = vbo & (PAGE_SIZE - 1);
1528 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1530 if (bio_add_page(bio, page, add, off) < add)
1538 if (add + off == PAGE_SIZE) {
1540 if (WARN_ON(page_idx >= nr_pages)) {
1544 page = pages[page_idx];
1553 vcn_next = vcn + clen;
1554 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1564 err = submit_bio_wait(bio);
1567 blk_finish_plug(&plug);
1573 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1575 * Fill on-disk logfile range by (-1)
1576 * this means empty logfile.
1578 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1581 struct super_block *sb = sbi->sb;
1582 struct block_device *bdev = sb->s_bdev;
1583 u8 cluster_bits = sbi->cluster_bits;
1584 struct bio *new, *bio = NULL;
1590 struct blk_plug plug;
1592 fill = alloc_page(GFP_KERNEL);
1596 kaddr = kmap_atomic(fill);
1597 memset(kaddr, -1, PAGE_SIZE);
1598 kunmap_atomic(kaddr);
1599 flush_dcache_page(fill);
1602 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1608 * TODO: Try blkdev_issue_write_same.
1610 blk_start_plug(&plug);
1612 lbo = (u64)lcn << cluster_bits;
1613 len = (u64)clen << cluster_bits;
1615 new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1617 bio_chain(bio, new);
1621 bio->bi_iter.bi_sector = lbo >> 9;
1624 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1626 if (bio_add_page(bio, fill, add, 0) < add)
1634 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1637 err = submit_bio_wait(bio);
1640 blk_finish_plug(&plug);
1648 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1649 u64 vbo, u64 *lbo, u64 *bytes)
1653 u8 cluster_bits = sbi->cluster_bits;
1655 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1658 off = vbo & sbi->cluster_mask;
1659 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1660 *bytes = ((u64)len << cluster_bits) - off;
1665 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
1666 enum RECORD_FLAG flag)
1669 struct super_block *sb = sbi->sb;
1670 struct inode *inode = new_inode(sb);
1671 struct ntfs_inode *ni;
1674 return ERR_PTR(-ENOMEM);
1678 err = mi_format_new(&ni->mi, sbi, rno, flag, false);
1683 if (insert_inode_locked(inode) < 0) {
1690 make_bad_inode(inode);
1698 * O:BAG:BAD:(A;OICI;FA;;;WD)
1699 * Owner S-1-5-32-544 (Administrators)
1700 * Group S-1-5-32-544 (Administrators)
1701 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1703 const u8 s_default_security[] __aligned(8) = {
1704 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1705 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1706 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1707 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1708 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1709 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1710 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1713 static_assert(sizeof(s_default_security) == 0x50);
1715 static inline u32 sid_length(const struct SID *sid)
1717 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1723 * Thanks Mark Harmstone for idea.
1725 static bool is_acl_valid(const struct ACL *acl, u32 len)
1727 const struct ACE_HEADER *ace;
1729 u16 ace_count, ace_size;
1731 if (acl->AclRevision != ACL_REVISION &&
1732 acl->AclRevision != ACL_REVISION_DS) {
1734 * This value should be ACL_REVISION, unless the ACL contains an
1735 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1736 * All ACEs in an ACL must be at the same revision level.
1744 if (le16_to_cpu(acl->AclSize) > len)
1750 len -= sizeof(struct ACL);
1751 ace = (struct ACE_HEADER *)&acl[1];
1752 ace_count = le16_to_cpu(acl->AceCount);
1754 for (i = 0; i < ace_count; i++) {
1755 if (len < sizeof(struct ACE_HEADER))
1758 ace_size = le16_to_cpu(ace->AceSize);
1763 ace = Add2Ptr(ace, ace_size);
1769 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1771 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1773 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1776 if (sd->Revision != 1)
1782 if (!(sd->Control & SE_SELF_RELATIVE))
1785 sd_owner = le32_to_cpu(sd->Owner);
1787 const struct SID *owner = Add2Ptr(sd, sd_owner);
1789 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1792 if (owner->Revision != 1)
1795 if (sd_owner + sid_length(owner) > len)
1799 sd_group = le32_to_cpu(sd->Group);
1801 const struct SID *group = Add2Ptr(sd, sd_group);
1803 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1806 if (group->Revision != 1)
1809 if (sd_group + sid_length(group) > len)
1813 sd_sacl = le32_to_cpu(sd->Sacl);
1815 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1817 if (sd_sacl + sizeof(struct ACL) > len)
1820 if (!is_acl_valid(sacl, len - sd_sacl))
1824 sd_dacl = le32_to_cpu(sd->Dacl);
1826 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1828 if (sd_dacl + sizeof(struct ACL) > len)
1831 if (!is_acl_valid(dacl, len - sd_dacl))
1839 * ntfs_security_init - Load and parse $Secure.
1841 int ntfs_security_init(struct ntfs_sb_info *sbi)
1844 struct super_block *sb = sbi->sb;
1845 struct inode *inode;
1846 struct ntfs_inode *ni;
1848 struct ATTRIB *attr;
1849 struct ATTR_LIST_ENTRY *le;
1853 struct NTFS_DE_SII *sii_e;
1854 struct ntfs_fnd *fnd_sii = NULL;
1855 const struct INDEX_ROOT *root_sii;
1856 const struct INDEX_ROOT *root_sdh;
1857 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1858 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1860 ref.low = cpu_to_le32(MFT_REC_SECURE);
1862 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1864 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1865 if (IS_ERR(inode)) {
1866 err = PTR_ERR(inode);
1867 ntfs_err(sb, "Failed to load $Secure (%d).", err);
1876 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1877 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1879 !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1880 root_sdh->type != ATTR_ZERO ||
1881 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1882 offsetof(struct INDEX_ROOT, ihdr) +
1883 le32_to_cpu(root_sdh->ihdr.used) >
1884 le32_to_cpu(attr->res.data_size)) {
1885 ntfs_err(sb, "$Secure::$SDH is corrupted.");
1890 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1892 ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1896 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1897 ARRAY_SIZE(SII_NAME), NULL, NULL);
1899 !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1900 root_sii->type != ATTR_ZERO ||
1901 root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1902 offsetof(struct INDEX_ROOT, ihdr) +
1903 le32_to_cpu(root_sii->ihdr.used) >
1904 le32_to_cpu(attr->res.data_size)) {
1905 ntfs_err(sb, "$Secure::$SII is corrupted.");
1910 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1912 ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1916 fnd_sii = fnd_get();
1922 sds_size = inode->i_size;
1924 /* Find the last valid Id. */
1925 sbi->security.next_id = SECURITY_ID_FIRST;
1926 /* Always write new security at the end of bucket. */
1927 sbi->security.next_off =
1928 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1936 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1940 sii_e = (struct NTFS_DE_SII *)ne;
1941 if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
1944 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1945 if (next_id >= sbi->security.next_id)
1946 sbi->security.next_id = next_id;
1949 sbi->security.ni = ni;
1959 * ntfs_get_security_by_id - Read security descriptor by id.
1961 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1962 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1967 struct ntfs_inode *ni = sbi->security.ni;
1968 struct ntfs_index *indx = &sbi->security.index_sii;
1970 struct NTFS_DE_SII *sii_e;
1971 struct ntfs_fnd *fnd_sii;
1972 struct SECURITY_HDR d_security;
1973 const struct INDEX_ROOT *root_sii;
1978 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1980 fnd_sii = fnd_get();
1986 root_sii = indx_get_root(indx, ni, NULL, NULL);
1992 /* Try to find this SECURITY descriptor in SII indexes. */
1993 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1994 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
2001 t32 = le32_to_cpu(sii_e->sec_hdr.size);
2002 if (t32 < sizeof(struct SECURITY_HDR)) {
2007 if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
2008 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2013 *size = t32 - sizeof(struct SECURITY_HDR);
2015 p = kmalloc(*size, GFP_NOFS);
2021 err = ntfs_read_run_nb(sbi, &ni->file.run,
2022 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2023 sizeof(d_security), NULL);
2027 if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
2032 err = ntfs_read_run_nb(sbi, &ni->file.run,
2033 le64_to_cpu(sii_e->sec_hdr.off) +
2034 sizeof(struct SECURITY_HDR),
2051 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2053 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2054 * and it contains a mirror copy of each security descriptor. When writing
2055 * to a security descriptor at location X, another copy will be written at
2056 * location (X+256K).
2057 * When writing a security descriptor that will cross the 256K boundary,
2058 * the pointer will be advanced by 256K to skip
2059 * over the mirror portion.
2061 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2062 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2063 u32 size_sd, __le32 *security_id, bool *inserted)
2066 struct ntfs_inode *ni = sbi->security.ni;
2067 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2068 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2069 struct NTFS_DE_SDH *e;
2070 struct NTFS_DE_SDH sdh_e;
2071 struct NTFS_DE_SII sii_e;
2072 struct SECURITY_HDR *d_security;
2073 u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
2074 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2075 struct SECURITY_KEY hash_key;
2076 struct ntfs_fnd *fnd_sdh = NULL;
2077 const struct INDEX_ROOT *root_sdh;
2078 const struct INDEX_ROOT *root_sii;
2079 u64 mirr_off, new_sds_size;
2082 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2083 SecurityDescriptorsBlockSize);
2085 hash_key.hash = security_hash(sd, size_sd);
2086 hash_key.sec_id = SECURITY_ID_INVALID;
2090 *security_id = SECURITY_ID_INVALID;
2092 /* Allocate a temporal buffer. */
2093 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2097 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2099 fnd_sdh = fnd_get();
2105 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2111 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2118 * Check if such security already exists.
2119 * Use "SDH" and hash -> to get the offset in "SDS".
2121 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2122 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2128 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2129 err = ntfs_read_run_nb(sbi, &ni->file.run,
2130 le64_to_cpu(e->sec_hdr.off),
2131 d_security, new_sec_size, NULL);
2135 if (le32_to_cpu(d_security->size) == new_sec_size &&
2136 d_security->key.hash == hash_key.hash &&
2137 !memcmp(d_security + 1, sd, size_sd)) {
2138 *security_id = d_security->key.sec_id;
2139 /* Such security already exists. */
2145 err = indx_find_sort(indx_sdh, ni, root_sdh,
2146 (struct NTFS_DE **)&e, fnd_sdh);
2150 if (!e || e->key.hash != hash_key.hash)
2154 /* Zero unused space. */
2155 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2156 left = SecurityDescriptorsBlockSize - next;
2158 /* Zero gap until SecurityDescriptorsBlockSize. */
2159 if (left < new_sec_size) {
2160 /* Zero "left" bytes from sbi->security.next_off. */
2161 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2164 /* Zero tail of previous security. */
2165 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2169 * 0x40438 == ni->vfs_inode.i_size
2170 * 0x00440 == sbi->security.next_off
2171 * need to zero [0x438-0x440)
2172 * if (next > used) {
2173 * u32 tozero = next - used;
2174 * zero "tozero" bytes from sbi->security.next_off - tozero
2177 /* Format new security descriptor. */
2178 d_security->key.hash = hash_key.hash;
2179 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2180 d_security->off = cpu_to_le64(sbi->security.next_off);
2181 d_security->size = cpu_to_le32(new_sec_size);
2182 memcpy(d_security + 1, sd, size_sd);
2184 /* Write main SDS bucket. */
2185 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2186 d_security, aligned_sec_size, 0);
2191 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2192 new_sds_size = mirr_off + aligned_sec_size;
2194 if (new_sds_size > ni->vfs_inode.i_size) {
2195 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2196 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2197 new_sds_size, &new_sds_size, false, NULL);
2202 /* Write copy SDS bucket. */
2203 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2204 aligned_sec_size, 0);
2208 /* Fill SII entry. */
2209 sii_e.de.view.data_off =
2210 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2211 sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2212 sii_e.de.view.res = 0;
2213 sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
2214 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2217 sii_e.sec_id = d_security->key.sec_id;
2218 memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2220 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2224 /* Fill SDH entry. */
2225 sdh_e.de.view.data_off =
2226 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2227 sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2228 sdh_e.de.view.res = 0;
2229 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2230 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2233 sdh_e.key.hash = d_security->key.hash;
2234 sdh_e.key.sec_id = d_security->key.sec_id;
2235 memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2236 sdh_e.magic[0] = cpu_to_le16('I');
2237 sdh_e.magic[1] = cpu_to_le16('I');
2240 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2245 *security_id = d_security->key.sec_id;
2249 /* Update Id and offset for next descriptor. */
2250 sbi->security.next_id += 1;
2251 sbi->security.next_off += aligned_sec_size;
2255 mark_inode_dirty(&ni->vfs_inode);
2263 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2265 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2268 struct ntfs_inode *ni = sbi->reparse.ni;
2269 struct ntfs_index *indx = &sbi->reparse.index_r;
2270 struct ATTRIB *attr;
2271 struct ATTR_LIST_ENTRY *le;
2272 const struct INDEX_ROOT *root_r;
2278 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2279 ARRAY_SIZE(SR_NAME), NULL, NULL);
2285 root_r = resident_data(attr);
2286 if (root_r->type != ATTR_ZERO ||
2287 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2292 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2301 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2303 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2306 struct ntfs_inode *ni = sbi->objid.ni;
2307 struct ntfs_index *indx = &sbi->objid.index_o;
2308 struct ATTRIB *attr;
2309 struct ATTR_LIST_ENTRY *le;
2310 const struct INDEX_ROOT *root;
2316 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2317 ARRAY_SIZE(SO_NAME), NULL, NULL);
2323 root = resident_data(attr);
2324 if (root->type != ATTR_ZERO ||
2325 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2330 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2338 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2341 struct ntfs_inode *ni = sbi->objid.ni;
2342 struct ntfs_index *indx = &sbi->objid.index_o;
2347 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2349 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2351 mark_inode_dirty(&ni->vfs_inode);
2357 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2358 const struct MFT_REF *ref)
2361 struct ntfs_inode *ni = sbi->reparse.ni;
2362 struct ntfs_index *indx = &sbi->reparse.index_r;
2363 struct NTFS_DE_R re;
2368 memset(&re, 0, sizeof(re));
2370 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2371 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2372 re.de.key_size = cpu_to_le16(sizeof(re.key));
2374 re.key.ReparseTag = rtag;
2375 memcpy(&re.key.ref, ref, sizeof(*ref));
2377 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2379 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2381 mark_inode_dirty(&ni->vfs_inode);
2387 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2388 const struct MFT_REF *ref)
2391 struct ntfs_inode *ni = sbi->reparse.ni;
2392 struct ntfs_index *indx = &sbi->reparse.index_r;
2393 struct ntfs_fnd *fnd = NULL;
2394 struct REPARSE_KEY rkey;
2395 struct NTFS_DE_R *re;
2396 struct INDEX_ROOT *root_r;
2401 rkey.ReparseTag = rtag;
2404 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2407 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2417 root_r = indx_get_root(indx, ni, NULL, NULL);
2423 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2424 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2425 (struct NTFS_DE **)&re, fnd);
2429 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2430 /* Impossible. Looks like volume corrupt? */
2434 memcpy(&rkey, &re->key, sizeof(rkey));
2439 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2447 mark_inode_dirty(&ni->vfs_inode);
2453 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2456 ntfs_unmap_meta(sbi->sb, lcn, len);
2457 ntfs_discard(sbi, lcn, len);
2460 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2462 CLST end, i, zone_len, zlen;
2463 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2465 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2466 if (!wnd_is_used(wnd, lcn, len)) {
2467 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2471 for (i = lcn; i < end; i++) {
2472 if (wnd_is_used(wnd, i, 1)) {
2483 ntfs_unmap_and_discard(sbi, lcn, len);
2485 wnd_set_free(wnd, lcn, len);
2494 ntfs_unmap_and_discard(sbi, lcn, len);
2495 wnd_set_free(wnd, lcn, len);
2497 /* append to MFT zone, if possible. */
2498 zone_len = wnd_zone_len(wnd);
2499 zlen = min(zone_len + len, sbi->zone_max);
2501 if (zlen == zone_len) {
2502 /* MFT zone already has maximum size. */
2503 } else if (!zone_len) {
2504 /* Create MFT zone only if 'zlen' is large enough. */
2505 if (zlen == sbi->zone_max)
2506 wnd_zone_set(wnd, lcn, zlen);
2508 CLST zone_lcn = wnd_zone_bit(wnd);
2510 if (lcn + len == zone_lcn) {
2511 /* Append into head MFT zone. */
2512 wnd_zone_set(wnd, lcn, zlen);
2513 } else if (zone_lcn + zone_len == lcn) {
2514 /* Append into tail MFT zone. */
2515 wnd_zone_set(wnd, zone_lcn, zlen);
2520 up_write(&wnd->rw_lock);
2524 * run_deallocate - Deallocate clusters.
2526 int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
2532 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2533 if (lcn == SPARSE_LCN)
2536 mark_as_free_ex(sbi, lcn, len, trim);
2542 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2546 /* check for forbidden chars */
2547 for (i = 0; i < fname->len; ++i) {
2548 ch = le16_to_cpu(fname->name[i]);
2555 /* disallowed by Windows */
2573 /* file names cannot end with space or . */
2574 if (fname->len > 0) {
2575 ch = le16_to_cpu(fname->name[fname->len - 1]);
2576 if (ch == ' ' || ch == '.')
2583 static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
2584 const struct le_str *fname)
2587 const __le16 *name = fname->name;
2588 int len = fname->len;
2589 const u16 *upcase = sbi->upcase;
2591 /* check for 3 chars reserved names (device names) */
2592 /* name by itself or with any extension is forbidden */
2593 if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2594 if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2595 !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2596 !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2597 !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2600 /* check for 4 chars reserved names (port name followed by 1..9) */
2601 /* name by itself or with any extension is forbidden */
2602 if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2603 port_digit = le16_to_cpu(name[3]);
2604 if (port_digit >= '1' && port_digit <= '9')
2605 if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2607 !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2616 * valid_windows_name - Check if a file name is valid in Windows.
2618 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2620 return !name_has_forbidden_chars(fname) &&
2621 !is_reserved_name(sbi, fname);
2625 * ntfs_set_label - updates current ntfs label.
2627 int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
2630 struct ATTRIB *attr;
2631 struct ntfs_inode *ni = sbi->volume.ni;
2632 const u8 max_ulen = 0x80; /* TODO: use attrdef to get maximum length */
2633 /* Allocate PATH_MAX bytes. */
2634 struct cpu_str *uni = __getname();
2639 err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
2640 UTF16_LITTLE_ENDIAN);
2644 if (uni->len > max_ulen) {
2645 ntfs_warn(sbi->sb, "new label is too long");
2652 /* Ignore any errors. */
2653 ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
2655 err = ni_insert_resident(ni, uni->len * sizeof(u16), ATTR_LABEL, NULL,
2656 0, &attr, NULL, NULL);
2660 /* write new label in on-disk struct. */
2661 memcpy(resident_data(attr), uni->name, uni->len * sizeof(u16));
2663 /* update cached value of current label. */
2664 if (len >= ARRAY_SIZE(sbi->volume.label))
2665 len = ARRAY_SIZE(sbi->volume.label) - 1;
2666 memcpy(sbi->volume.label, label, len);
2667 sbi->volume.label[len] = 0;
2668 mark_inode_dirty_sync(&ni->vfs_inode);
2674 err = _ni_write_inode(&ni->vfs_inode, 0);