1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
11 #include <linux/kernel.h>
18 const struct cpu_str NAME_MFT = {
19 4, 0, { '$', 'M', 'F', 'T' },
21 const struct cpu_str NAME_MIRROR = {
22 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 const struct cpu_str NAME_LOGFILE = {
25 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 const struct cpu_str NAME_VOLUME = {
28 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 const struct cpu_str NAME_ATTRDEF = {
31 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 const struct cpu_str NAME_ROOT = {
36 const struct cpu_str NAME_BITMAP = {
37 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 const struct cpu_str NAME_BOOT = {
40 5, 0, { '$', 'B', 'o', 'o', 't' },
42 const struct cpu_str NAME_BADCLUS = {
43 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 const struct cpu_str NAME_QUOTA = {
46 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 const struct cpu_str NAME_SECURE = {
49 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 const struct cpu_str NAME_UPCASE = {
52 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 const struct cpu_str NAME_EXTEND = {
55 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 const struct cpu_str NAME_OBJID = {
58 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 const struct cpu_str NAME_REPARSE = {
61 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 const struct cpu_str NAME_USNJRNL = {
64 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 const __le16 BAD_NAME[4] = {
67 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 const __le16 I30_NAME[4] = {
70 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 const __le16 SII_NAME[4] = {
73 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 const __le16 SDH_NAME[4] = {
76 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 const __le16 SDS_NAME[4] = {
79 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 const __le16 SO_NAME[2] = {
82 cpu_to_le16('$'), cpu_to_le16('O'),
84 const __le16 SQ_NAME[2] = {
85 cpu_to_le16('$'), cpu_to_le16('Q'),
87 const __le16 SR_NAME[2] = {
88 cpu_to_le16('$'), cpu_to_le16('R'),
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
101 static const __le16 CON_NAME[3] = {
102 cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
105 static const __le16 NUL_NAME[3] = {
106 cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
109 static const __le16 AUX_NAME[3] = {
110 cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
113 static const __le16 PRN_NAME[3] = {
114 cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
117 static const __le16 COM_NAME[3] = {
118 cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
121 static const __le16 LPT_NAME[3] = {
122 cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
128 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
130 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
134 u16 fo = le16_to_cpu(rhdr->fix_off);
135 u16 fn = le16_to_cpu(rhdr->fix_num);
137 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
138 fn * SECTOR_SIZE > bytes) {
142 /* Get fixup pointer. */
143 fixup = Add2Ptr(rhdr, fo);
145 if (*fixup >= 0x7FFF)
152 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
157 ptr += SECTOR_SIZE / sizeof(short);
163 * ntfs_fix_post_read - Remove fixups after reading from disk.
165 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
167 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
174 fo = le16_to_cpu(rhdr->fix_off);
175 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
176 le16_to_cpu(rhdr->fix_num);
179 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
180 fn * SECTOR_SIZE > bytes) {
181 return -EINVAL; /* Native chkntfs returns ok! */
184 /* Get fixup pointer. */
185 fixup = Add2Ptr(rhdr, fo);
187 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
191 /* Test current word. */
192 if (*ptr != sample) {
193 /* Fixup does not match! Is it serious error? */
199 ptr += SECTOR_SIZE / sizeof(short);
206 * ntfs_extend_init - Load $Extend file.
208 int ntfs_extend_init(struct ntfs_sb_info *sbi)
211 struct super_block *sb = sbi->sb;
212 struct inode *inode, *inode2;
215 if (sbi->volume.major_ver < 3) {
216 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
220 ref.low = cpu_to_le32(MFT_REC_EXTEND);
222 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
223 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
225 err = PTR_ERR(inode);
226 ntfs_err(sb, "Failed to load $Extend (%d).", err);
231 /* If ntfs_iget5() reads from disk it never returns bad inode. */
232 if (!S_ISDIR(inode->i_mode)) {
237 /* Try to find $ObjId */
238 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
239 if (inode2 && !IS_ERR(inode2)) {
240 if (is_bad_inode(inode2)) {
243 sbi->objid.ni = ntfs_i(inode2);
244 sbi->objid_no = inode2->i_ino;
248 /* Try to find $Quota */
249 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
250 if (inode2 && !IS_ERR(inode2)) {
251 sbi->quota_no = inode2->i_ino;
255 /* Try to find $Reparse */
256 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
257 if (inode2 && !IS_ERR(inode2)) {
258 sbi->reparse.ni = ntfs_i(inode2);
259 sbi->reparse_no = inode2->i_ino;
262 /* Try to find $UsnJrnl */
263 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
264 if (inode2 && !IS_ERR(inode2)) {
265 sbi->usn_jrnl_no = inode2->i_ino;
275 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
278 struct super_block *sb = sbi->sb;
279 bool initialized = false;
284 if (ni->vfs_inode.i_size >= 0x100000000ull) {
285 ntfs_err(sb, "\x24LogFile is large than 4G.");
290 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
292 ref.low = cpu_to_le32(MFT_REC_MFT);
294 ref.seq = cpu_to_le16(1);
296 inode = ntfs_iget5(sb, &ref, NULL);
302 /* Try to use MFT copy. */
303 u64 t64 = sbi->mft.lbo;
305 sbi->mft.lbo = sbi->mft.lbo2;
306 inode = ntfs_iget5(sb, &ref, NULL);
314 ntfs_err(sb, "Failed to load $MFT.");
318 sbi->mft.ni = ntfs_i(inode);
320 /* LogFile should not contains attribute list. */
321 err = ni_load_all_mi(sbi->mft.ni);
323 err = log_replay(ni, &initialized);
328 sync_blockdev(sb->s_bdev);
329 invalidate_bdev(sb->s_bdev);
331 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
336 if (sb_rdonly(sb) || !initialized)
339 /* Fill LogFile by '-1' if it is initialized. */
340 err = ntfs_bio_fill_1(sbi, &ni->file.run);
343 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
349 * ntfs_look_for_free_space - Look for a free space in bitmap.
351 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
352 CLST *new_lcn, CLST *new_len,
353 enum ALLOCATE_OPT opt)
357 struct super_block *sb = sbi->sb;
358 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
359 struct wnd_bitmap *wnd = &sbi->used.bitmap;
361 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
362 if (opt & ALLOCATE_MFT) {
363 zlen = wnd_zone_len(wnd);
366 err = ntfs_refresh_zone(sbi);
370 zlen = wnd_zone_len(wnd);
374 ntfs_err(sbi->sb, "no free space to extend mft");
379 lcn = wnd_zone_bit(wnd);
380 alen = min_t(CLST, len, zlen);
382 wnd_zone_set(wnd, lcn + alen, zlen - alen);
384 err = wnd_set_used(wnd, lcn, alen);
392 * 'Cause cluster 0 is always used this value means that we should use
393 * cached value of 'next_free_lcn' to improve performance.
396 lcn = sbi->used.next_free_lcn;
398 if (lcn >= wnd->nbits)
401 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
405 /* Try to use clusters from MftZone. */
406 zlen = wnd_zone_len(wnd);
407 zeroes = wnd_zeroes(wnd);
409 /* Check too big request */
410 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
415 /* How many clusters to cat from zone. */
416 zlcn = wnd_zone_bit(wnd);
418 ztrim = clamp_val(len, zlen2, zlen);
419 new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
421 wnd_zone_set(wnd, zlcn, new_zlen);
423 /* Allocate continues clusters. */
424 alen = wnd_find(wnd, len, 0,
425 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
436 ntfs_unmap_meta(sb, alcn, alen);
438 /* Set hint for next requests. */
439 if (!(opt & ALLOCATE_MFT))
440 sbi->used.next_free_lcn = alcn + alen;
442 up_write(&wnd->rw_lock);
447 * ntfs_check_for_free_space
449 * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
451 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
453 size_t free, zlen, avail;
454 struct wnd_bitmap *wnd;
456 wnd = &sbi->used.bitmap;
457 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
458 free = wnd_zeroes(wnd);
459 zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
460 up_read(&wnd->rw_lock);
462 if (free < zlen + clen)
465 avail = free - (zlen + clen);
467 wnd = &sbi->mft.bitmap;
468 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
469 free = wnd_zeroes(wnd);
470 zlen = wnd_zone_len(wnd);
471 up_read(&wnd->rw_lock);
473 if (free >= zlen + mlen)
476 return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
480 * ntfs_extend_mft - Allocate additional MFT records.
482 * sbi->mft.bitmap is locked for write.
485 * ntfs_look_free_mft ->
488 * ni_insert_nonresident ->
491 * ntfs_look_free_mft ->
494 * To avoid recursive always allocate space for two new MFT records
495 * see attrib.c: "at least two MFT to avoid recursive loop".
497 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
500 struct ntfs_inode *ni = sbi->mft.ni;
501 size_t new_mft_total;
502 u64 new_mft_bytes, new_bitmap_bytes;
504 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
506 new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
507 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
509 /* Step 1: Resize $MFT::DATA. */
510 down_write(&ni->file.run_lock);
511 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
512 new_mft_bytes, NULL, false, &attr);
515 up_write(&ni->file.run_lock);
519 attr->nres.valid_size = attr->nres.data_size;
520 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
523 /* Step 2: Resize $MFT::BITMAP. */
524 new_bitmap_bytes = bitmap_size(new_mft_total);
526 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
527 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
529 /* Refresh MFT Zone if necessary. */
530 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
532 ntfs_refresh_zone(sbi);
534 up_write(&sbi->used.bitmap.rw_lock);
535 up_write(&ni->file.run_lock);
540 err = wnd_extend(wnd, new_mft_total);
545 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
547 err = _ni_write_inode(&ni->vfs_inode, 0);
553 * ntfs_look_free_mft - Look for a free MFT record.
555 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
556 struct ntfs_inode *ni, struct mft_inode **mi)
559 size_t zbit, zlen, from, to, fr;
562 struct super_block *sb = sbi->sb;
563 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
566 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
567 MFT_REC_FREE - MFT_REC_RESERVED);
570 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
572 zlen = wnd_zone_len(wnd);
574 /* Always reserve space for MFT. */
577 zbit = wnd_zone_bit(wnd);
579 wnd_zone_set(wnd, zbit + 1, zlen - 1);
584 /* No MFT zone. Find the nearest to '0' free MFT. */
585 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
587 mft_total = wnd->nbits;
589 err = ntfs_extend_mft(sbi);
595 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
601 * Look for free record reserved area [11-16) ==
602 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
605 if (!sbi->mft.reserved_bitmap) {
606 /* Once per session create internal bitmap for 5 bits. */
607 sbi->mft.reserved_bitmap = 0xFF;
610 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
612 struct ntfs_inode *ni;
613 struct MFT_REC *mrec;
615 ref.low = cpu_to_le32(ir);
616 ref.seq = cpu_to_le16(ir);
618 i = ntfs_iget5(sb, &ref, NULL);
623 "Invalid reserved record %x",
627 if (is_bad_inode(i)) {
636 if (!is_rec_base(mrec))
639 if (mrec->hard_links)
645 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
646 NULL, 0, NULL, NULL))
649 __clear_bit(ir - MFT_REC_RESERVED,
650 &sbi->mft.reserved_bitmap);
654 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
655 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
656 MFT_REC_FREE, MFT_REC_RESERVED);
657 if (zbit >= MFT_REC_FREE) {
658 sbi->mft.next_reserved = MFT_REC_FREE;
663 sbi->mft.next_reserved = zbit;
666 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
667 if (zbit + zlen > wnd->nbits)
668 zlen = wnd->nbits - zbit;
670 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
673 /* [zbit, zbit + zlen) will be used for MFT itself. */
674 from = sbi->mft.used;
679 ntfs_clear_mft_tail(sbi, from, to);
690 wnd_zone_set(wnd, zbit, zlen);
694 /* The request to get record for general purpose. */
695 if (sbi->mft.next_free < MFT_REC_USER)
696 sbi->mft.next_free = MFT_REC_USER;
699 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
700 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
701 sbi->mft.next_free = sbi->mft.bitmap.nbits;
704 sbi->mft.next_free = *rno + 1;
708 err = ntfs_extend_mft(sbi);
714 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
719 /* We have found a record that are not reserved for next MFT. */
720 if (*rno >= MFT_REC_FREE)
721 wnd_set_used(wnd, *rno, 1);
722 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
723 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
727 up_write(&wnd->rw_lock);
733 * ntfs_mark_rec_free - Mark record as free.
734 * is_mft - true if we are changing MFT
736 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
738 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
741 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
742 if (rno >= wnd->nbits)
745 if (rno >= MFT_REC_FREE) {
746 if (!wnd_is_used(wnd, rno, 1))
747 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
749 wnd_set_free(wnd, rno, 1);
750 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
751 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
754 if (rno < wnd_zone_bit(wnd))
755 wnd_zone_set(wnd, rno, 1);
756 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
757 sbi->mft.next_free = rno;
761 up_write(&wnd->rw_lock);
765 * ntfs_clear_mft_tail - Format empty records [from, to).
767 * sbi->mft.bitmap is locked for write.
769 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
774 struct runs_tree *run;
775 struct ntfs_inode *ni;
780 rs = sbi->record_size;
784 down_read(&ni->file.run_lock);
785 vbo = (u64)from * rs;
786 for (; from < to; from++, vbo += rs) {
787 struct ntfs_buffers nb;
789 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
793 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
800 sbi->mft.used = from;
801 up_read(&ni->file.run_lock);
806 * ntfs_refresh_zone - Refresh MFT zone.
808 * sbi->used.bitmap is locked for rw.
809 * sbi->mft.bitmap is locked for write.
810 * sbi->mft.ni->file.run_lock for write.
812 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
816 struct wnd_bitmap *wnd = &sbi->used.bitmap;
817 struct ntfs_inode *ni = sbi->mft.ni;
819 /* Do not change anything unless we have non empty MFT zone. */
820 if (wnd_zone_len(wnd))
823 vcn = bytes_to_cluster(sbi,
824 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
826 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
829 /* We should always find Last Lcn for MFT. */
830 if (lcn == SPARSE_LCN)
835 /* Try to allocate clusters after last MFT run. */
836 zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
837 wnd_zone_set(wnd, lcn_s, zlen);
843 * ntfs_update_mftmirr - Update $MFTMirr data.
845 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
848 struct super_block *sb = sbi->sb;
849 u32 blocksize, bytes;
850 sector_t block1, block2;
853 * sb can be NULL here. In this case sbi->flags should be 0 too.
855 if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR))
858 blocksize = sb->s_blocksize;
859 bytes = sbi->mft.recs_mirr << sbi->record_bits;
860 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
861 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
863 for (; bytes >= blocksize; bytes -= blocksize) {
864 struct buffer_head *bh1, *bh2;
866 bh1 = sb_bread(sb, block1++);
870 bh2 = sb_getblk(sb, block2++);
876 if (buffer_locked(bh2))
877 __wait_on_buffer(bh2);
880 memcpy(bh2->b_data, bh1->b_data, blocksize);
881 set_buffer_uptodate(bh2);
882 mark_buffer_dirty(bh2);
888 err = wait ? sync_dirty_buffer(bh2) : 0;
895 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
901 * Marks inode as bad and marks fs as 'dirty'
903 void ntfs_bad_inode(struct inode *inode, const char *hint)
905 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
907 ntfs_inode_err(inode, "%s", hint);
908 make_bad_inode(inode);
909 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
915 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
916 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
917 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
919 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
923 struct VOLUME_INFO *info;
924 struct mft_inode *mi;
925 struct ntfs_inode *ni;
929 * Do not change state if fs was real_dirty.
930 * Do not change state if fs already dirty(clear).
931 * Do not change any thing if mounted read only.
933 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
936 /* Check cached value. */
937 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
938 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
945 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
947 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
953 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
959 info_flags = info->flags;
962 case NTFS_DIRTY_ERROR:
963 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
964 sbi->volume.real_dirty = true;
966 case NTFS_DIRTY_DIRTY:
967 info->flags |= VOLUME_FLAG_DIRTY;
969 case NTFS_DIRTY_CLEAR:
970 info->flags &= ~VOLUME_FLAG_DIRTY;
973 /* Cache current volume flags. */
974 if (info_flags != info->flags) {
975 sbi->volume.flags = info->flags;
985 mark_inode_dirty(&ni->vfs_inode);
986 /* verify(!ntfs_update_mftmirr()); */
989 * If we used wait=1, sync_inode_metadata waits for the io for the
990 * inode to finish. It hangs when media is removed.
991 * So wait=0 is sent down to sync_inode_metadata
992 * and filemap_fdatawrite is used for the data blocks.
994 err = sync_inode_metadata(&ni->vfs_inode, 0);
996 err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
1002 * security_hash - Calculates a hash of security descriptor.
1004 static inline __le32 security_hash(const void *sd, size_t bytes)
1007 const __le32 *ptr = sd;
1011 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1012 return cpu_to_le32(hash);
1015 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1017 struct block_device *bdev = sb->s_bdev;
1018 u32 blocksize = sb->s_blocksize;
1019 u64 block = lbo >> sb->s_blocksize_bits;
1020 u32 off = lbo & (blocksize - 1);
1021 u32 op = blocksize - off;
1023 for (; bytes; block += 1, off = 0, op = blocksize) {
1024 struct buffer_head *bh = __bread(bdev, block, blocksize);
1032 memcpy(buffer, bh->b_data + off, op);
1037 buffer = Add2Ptr(buffer, op);
1043 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1044 const void *buf, int wait)
1046 u32 blocksize = sb->s_blocksize;
1047 struct block_device *bdev = sb->s_bdev;
1048 sector_t block = lbo >> sb->s_blocksize_bits;
1049 u32 off = lbo & (blocksize - 1);
1050 u32 op = blocksize - off;
1051 struct buffer_head *bh;
1053 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1056 for (; bytes; block += 1, off = 0, op = blocksize) {
1060 if (op < blocksize) {
1061 bh = __bread(bdev, block, blocksize);
1063 ntfs_err(sb, "failed to read block %llx",
1068 bh = __getblk(bdev, block, blocksize);
1073 if (buffer_locked(bh))
1074 __wait_on_buffer(bh);
1078 memcpy(bh->b_data + off, buf, op);
1079 buf = Add2Ptr(buf, op);
1081 memset(bh->b_data + off, -1, op);
1084 set_buffer_uptodate(bh);
1085 mark_buffer_dirty(bh);
1089 int err = sync_dirty_buffer(bh);
1094 "failed to sync buffer at block %llx, error %d",
1108 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1109 u64 vbo, const void *buf, size_t bytes, int sync)
1111 struct super_block *sb = sbi->sb;
1112 u8 cluster_bits = sbi->cluster_bits;
1113 u32 off = vbo & sbi->cluster_mask;
1114 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1118 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1121 if (lcn == SPARSE_LCN)
1124 lbo = ((u64)lcn << cluster_bits) + off;
1125 len = ((u64)clen << cluster_bits) - off;
1128 u32 op = min_t(u64, len, bytes);
1129 int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1138 vcn_next = vcn + clen;
1139 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1143 if (lcn == SPARSE_LCN)
1147 buf = Add2Ptr(buf, op);
1149 lbo = ((u64)lcn << cluster_bits);
1150 len = ((u64)clen << cluster_bits);
1156 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1157 const struct runs_tree *run, u64 vbo)
1159 struct super_block *sb = sbi->sb;
1160 u8 cluster_bits = sbi->cluster_bits;
1164 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1165 return ERR_PTR(-ENOENT);
1167 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1169 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1172 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1173 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1176 struct super_block *sb = sbi->sb;
1177 u32 blocksize = sb->s_blocksize;
1178 u8 cluster_bits = sbi->cluster_bits;
1179 u32 off = vbo & sbi->cluster_mask;
1181 CLST vcn_next, vcn = vbo >> cluster_bits;
1185 struct buffer_head *bh;
1188 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1189 if (vbo > MFT_REC_VOL * sbi->record_size) {
1194 /* Use absolute boot's 'MFTCluster' to read record. */
1195 lbo = vbo + sbi->mft.lbo;
1196 len = sbi->record_size;
1197 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1201 if (lcn == SPARSE_LCN) {
1206 lbo = ((u64)lcn << cluster_bits) + off;
1207 len = ((u64)clen << cluster_bits) - off;
1210 off = lbo & (blocksize - 1);
1217 u32 len32 = len >= bytes ? bytes : len;
1218 sector_t block = lbo >> sb->s_blocksize_bits;
1221 u32 op = blocksize - off;
1226 bh = ntfs_bread(sb, block);
1233 memcpy(buf, bh->b_data + off, op);
1234 buf = Add2Ptr(buf, op);
1239 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1256 vcn_next = vcn + clen;
1257 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1263 if (lcn == SPARSE_LCN) {
1268 lbo = ((u64)lcn << cluster_bits);
1269 len = ((u64)clen << cluster_bits);
1277 put_bh(nb->bh[--nbh]);
1288 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1290 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1291 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1292 struct ntfs_buffers *nb)
1294 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1298 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1301 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1302 u32 bytes, struct ntfs_buffers *nb)
1305 struct super_block *sb = sbi->sb;
1306 u32 blocksize = sb->s_blocksize;
1307 u8 cluster_bits = sbi->cluster_bits;
1308 CLST vcn_next, vcn = vbo >> cluster_bits;
1317 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1322 off = vbo & sbi->cluster_mask;
1323 lbo = ((u64)lcn << cluster_bits) + off;
1324 len = ((u64)clen << cluster_bits) - off;
1326 nb->off = off = lbo & (blocksize - 1);
1329 u32 len32 = min_t(u64, len, bytes);
1330 sector_t block = lbo >> sb->s_blocksize_bits;
1334 struct buffer_head *bh;
1336 if (nbh >= ARRAY_SIZE(nb->bh)) {
1341 op = blocksize - off;
1345 if (op == blocksize) {
1346 bh = sb_getblk(sb, block);
1351 if (buffer_locked(bh))
1352 __wait_on_buffer(bh);
1353 set_buffer_uptodate(bh);
1355 bh = ntfs_bread(sb, block);
1374 vcn_next = vcn + clen;
1375 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1381 lbo = ((u64)lcn << cluster_bits);
1382 len = ((u64)clen << cluster_bits);
1387 put_bh(nb->bh[--nbh]);
1396 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1397 struct ntfs_buffers *nb, int sync)
1400 struct super_block *sb = sbi->sb;
1401 u32 block_size = sb->s_blocksize;
1402 u32 bytes = nb->bytes;
1404 u16 fo = le16_to_cpu(rhdr->fix_off);
1405 u16 fn = le16_to_cpu(rhdr->fix_num);
1410 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1411 fn * SECTOR_SIZE > bytes) {
1415 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1416 u32 op = block_size - off;
1418 struct buffer_head *bh = nb->bh[idx];
1419 __le16 *ptr, *end_data;
1424 if (buffer_locked(bh))
1425 __wait_on_buffer(bh);
1429 bh_data = bh->b_data + off;
1430 end_data = Add2Ptr(bh_data, op);
1431 memcpy(bh_data, rhdr, op);
1436 fixup = Add2Ptr(bh_data, fo);
1438 t16 = le16_to_cpu(sample);
1439 if (t16 >= 0x7FFF) {
1440 sample = *fixup = cpu_to_le16(1);
1442 sample = cpu_to_le16(t16 + 1);
1446 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1449 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1454 ptr += SECTOR_SIZE / sizeof(short);
1455 } while (ptr < end_data);
1457 set_buffer_uptodate(bh);
1458 mark_buffer_dirty(bh);
1462 int err2 = sync_dirty_buffer(bh);
1469 rhdr = Add2Ptr(rhdr, op);
1476 * ntfs_bio_pages - Read/write pages from/to disk.
1478 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1479 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1483 struct bio *new, *bio = NULL;
1484 struct super_block *sb = sbi->sb;
1485 struct block_device *bdev = sb->s_bdev;
1487 u8 cluster_bits = sbi->cluster_bits;
1488 CLST lcn, clen, vcn, vcn_next;
1489 u32 add, off, page_idx;
1492 struct blk_plug plug;
1497 blk_start_plug(&plug);
1499 /* Align vbo and bytes to be 512 bytes aligned. */
1500 lbo = (vbo + bytes + 511) & ~511ull;
1501 vbo = vbo & ~511ull;
1504 vcn = vbo >> cluster_bits;
1505 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1509 off = vbo & sbi->cluster_mask;
1514 lbo = ((u64)lcn << cluster_bits) + off;
1515 len = ((u64)clen << cluster_bits) - off;
1517 new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1519 bio_chain(bio, new);
1523 bio->bi_iter.bi_sector = lbo >> 9;
1526 off = vbo & (PAGE_SIZE - 1);
1527 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1529 if (bio_add_page(bio, page, add, off) < add)
1537 if (add + off == PAGE_SIZE) {
1539 if (WARN_ON(page_idx >= nr_pages)) {
1543 page = pages[page_idx];
1552 vcn_next = vcn + clen;
1553 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1563 err = submit_bio_wait(bio);
1566 blk_finish_plug(&plug);
1572 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1574 * Fill on-disk logfile range by (-1)
1575 * this means empty logfile.
1577 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1580 struct super_block *sb = sbi->sb;
1581 struct block_device *bdev = sb->s_bdev;
1582 u8 cluster_bits = sbi->cluster_bits;
1583 struct bio *new, *bio = NULL;
1589 struct blk_plug plug;
1591 fill = alloc_page(GFP_KERNEL);
1595 kaddr = kmap_atomic(fill);
1596 memset(kaddr, -1, PAGE_SIZE);
1597 kunmap_atomic(kaddr);
1598 flush_dcache_page(fill);
1601 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1607 * TODO: Try blkdev_issue_write_same.
1609 blk_start_plug(&plug);
1611 lbo = (u64)lcn << cluster_bits;
1612 len = (u64)clen << cluster_bits;
1614 new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1616 bio_chain(bio, new);
1620 bio->bi_iter.bi_sector = lbo >> 9;
1623 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1625 if (bio_add_page(bio, fill, add, 0) < add)
1633 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1636 err = submit_bio_wait(bio);
1639 blk_finish_plug(&plug);
1647 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1648 u64 vbo, u64 *lbo, u64 *bytes)
1652 u8 cluster_bits = sbi->cluster_bits;
1654 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1657 off = vbo & sbi->cluster_mask;
1658 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1659 *bytes = ((u64)len << cluster_bits) - off;
1664 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1667 struct super_block *sb = sbi->sb;
1668 struct inode *inode = new_inode(sb);
1669 struct ntfs_inode *ni;
1672 return ERR_PTR(-ENOMEM);
1676 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1682 if (insert_inode_locked(inode) < 0) {
1689 make_bad_inode(inode);
1697 * O:BAG:BAD:(A;OICI;FA;;;WD)
1698 * Owner S-1-5-32-544 (Administrators)
1699 * Group S-1-5-32-544 (Administrators)
1700 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1702 const u8 s_default_security[] __aligned(8) = {
1703 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1704 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1705 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1706 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1707 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1708 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1709 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1712 static_assert(sizeof(s_default_security) == 0x50);
1714 static inline u32 sid_length(const struct SID *sid)
1716 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1722 * Thanks Mark Harmstone for idea.
1724 static bool is_acl_valid(const struct ACL *acl, u32 len)
1726 const struct ACE_HEADER *ace;
1728 u16 ace_count, ace_size;
1730 if (acl->AclRevision != ACL_REVISION &&
1731 acl->AclRevision != ACL_REVISION_DS) {
1733 * This value should be ACL_REVISION, unless the ACL contains an
1734 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1735 * All ACEs in an ACL must be at the same revision level.
1743 if (le16_to_cpu(acl->AclSize) > len)
1749 len -= sizeof(struct ACL);
1750 ace = (struct ACE_HEADER *)&acl[1];
1751 ace_count = le16_to_cpu(acl->AceCount);
1753 for (i = 0; i < ace_count; i++) {
1754 if (len < sizeof(struct ACE_HEADER))
1757 ace_size = le16_to_cpu(ace->AceSize);
1762 ace = Add2Ptr(ace, ace_size);
1768 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1770 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1772 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1775 if (sd->Revision != 1)
1781 if (!(sd->Control & SE_SELF_RELATIVE))
1784 sd_owner = le32_to_cpu(sd->Owner);
1786 const struct SID *owner = Add2Ptr(sd, sd_owner);
1788 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1791 if (owner->Revision != 1)
1794 if (sd_owner + sid_length(owner) > len)
1798 sd_group = le32_to_cpu(sd->Group);
1800 const struct SID *group = Add2Ptr(sd, sd_group);
1802 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1805 if (group->Revision != 1)
1808 if (sd_group + sid_length(group) > len)
1812 sd_sacl = le32_to_cpu(sd->Sacl);
1814 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1816 if (sd_sacl + sizeof(struct ACL) > len)
1819 if (!is_acl_valid(sacl, len - sd_sacl))
1823 sd_dacl = le32_to_cpu(sd->Dacl);
1825 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1827 if (sd_dacl + sizeof(struct ACL) > len)
1830 if (!is_acl_valid(dacl, len - sd_dacl))
1838 * ntfs_security_init - Load and parse $Secure.
1840 int ntfs_security_init(struct ntfs_sb_info *sbi)
1843 struct super_block *sb = sbi->sb;
1844 struct inode *inode;
1845 struct ntfs_inode *ni;
1847 struct ATTRIB *attr;
1848 struct ATTR_LIST_ENTRY *le;
1852 struct NTFS_DE_SII *sii_e;
1853 struct ntfs_fnd *fnd_sii = NULL;
1854 const struct INDEX_ROOT *root_sii;
1855 const struct INDEX_ROOT *root_sdh;
1856 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1857 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1859 ref.low = cpu_to_le32(MFT_REC_SECURE);
1861 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1863 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1864 if (IS_ERR(inode)) {
1865 err = PTR_ERR(inode);
1866 ntfs_err(sb, "Failed to load $Secure (%d).", err);
1875 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1876 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1878 !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1879 root_sdh->type != ATTR_ZERO ||
1880 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1881 offsetof(struct INDEX_ROOT, ihdr) +
1882 le32_to_cpu(root_sdh->ihdr.used) >
1883 le32_to_cpu(attr->res.data_size)) {
1884 ntfs_err(sb, "$Secure::$SDH is corrupted.");
1889 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1891 ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1895 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1896 ARRAY_SIZE(SII_NAME), NULL, NULL);
1898 !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1899 root_sii->type != ATTR_ZERO ||
1900 root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1901 offsetof(struct INDEX_ROOT, ihdr) +
1902 le32_to_cpu(root_sii->ihdr.used) >
1903 le32_to_cpu(attr->res.data_size)) {
1904 ntfs_err(sb, "$Secure::$SII is corrupted.");
1909 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1911 ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1915 fnd_sii = fnd_get();
1921 sds_size = inode->i_size;
1923 /* Find the last valid Id. */
1924 sbi->security.next_id = SECURITY_ID_FIRST;
1925 /* Always write new security at the end of bucket. */
1926 sbi->security.next_off =
1927 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1935 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1939 sii_e = (struct NTFS_DE_SII *)ne;
1940 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1943 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1944 if (next_id >= sbi->security.next_id)
1945 sbi->security.next_id = next_id;
1948 sbi->security.ni = ni;
1958 * ntfs_get_security_by_id - Read security descriptor by id.
1960 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1961 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1966 struct ntfs_inode *ni = sbi->security.ni;
1967 struct ntfs_index *indx = &sbi->security.index_sii;
1969 struct NTFS_DE_SII *sii_e;
1970 struct ntfs_fnd *fnd_sii;
1971 struct SECURITY_HDR d_security;
1972 const struct INDEX_ROOT *root_sii;
1977 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1979 fnd_sii = fnd_get();
1985 root_sii = indx_get_root(indx, ni, NULL, NULL);
1991 /* Try to find this SECURITY descriptor in SII indexes. */
1992 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1993 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
2000 t32 = le32_to_cpu(sii_e->sec_hdr.size);
2001 if (t32 < SIZEOF_SECURITY_HDR) {
2006 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
2007 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2012 *size = t32 - SIZEOF_SECURITY_HDR;
2014 p = kmalloc(*size, GFP_NOFS);
2020 err = ntfs_read_run_nb(sbi, &ni->file.run,
2021 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2022 sizeof(d_security), NULL);
2026 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2031 err = ntfs_read_run_nb(sbi, &ni->file.run,
2032 le64_to_cpu(sii_e->sec_hdr.off) +
2033 SIZEOF_SECURITY_HDR,
2050 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2052 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2053 * and it contains a mirror copy of each security descriptor. When writing
2054 * to a security descriptor at location X, another copy will be written at
2055 * location (X+256K).
2056 * When writing a security descriptor that will cross the 256K boundary,
2057 * the pointer will be advanced by 256K to skip
2058 * over the mirror portion.
2060 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2061 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2062 u32 size_sd, __le32 *security_id, bool *inserted)
2065 struct ntfs_inode *ni = sbi->security.ni;
2066 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2067 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2068 struct NTFS_DE_SDH *e;
2069 struct NTFS_DE_SDH sdh_e;
2070 struct NTFS_DE_SII sii_e;
2071 struct SECURITY_HDR *d_security;
2072 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2073 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2074 struct SECURITY_KEY hash_key;
2075 struct ntfs_fnd *fnd_sdh = NULL;
2076 const struct INDEX_ROOT *root_sdh;
2077 const struct INDEX_ROOT *root_sii;
2078 u64 mirr_off, new_sds_size;
2081 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2082 SecurityDescriptorsBlockSize);
2084 hash_key.hash = security_hash(sd, size_sd);
2085 hash_key.sec_id = SECURITY_ID_INVALID;
2089 *security_id = SECURITY_ID_INVALID;
2091 /* Allocate a temporal buffer. */
2092 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2096 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2098 fnd_sdh = fnd_get();
2104 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2110 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2117 * Check if such security already exists.
2118 * Use "SDH" and hash -> to get the offset in "SDS".
2120 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2121 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2127 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2128 err = ntfs_read_run_nb(sbi, &ni->file.run,
2129 le64_to_cpu(e->sec_hdr.off),
2130 d_security, new_sec_size, NULL);
2134 if (le32_to_cpu(d_security->size) == new_sec_size &&
2135 d_security->key.hash == hash_key.hash &&
2136 !memcmp(d_security + 1, sd, size_sd)) {
2137 *security_id = d_security->key.sec_id;
2138 /* Such security already exists. */
2144 err = indx_find_sort(indx_sdh, ni, root_sdh,
2145 (struct NTFS_DE **)&e, fnd_sdh);
2149 if (!e || e->key.hash != hash_key.hash)
2153 /* Zero unused space. */
2154 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2155 left = SecurityDescriptorsBlockSize - next;
2157 /* Zero gap until SecurityDescriptorsBlockSize. */
2158 if (left < new_sec_size) {
2159 /* Zero "left" bytes from sbi->security.next_off. */
2160 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2163 /* Zero tail of previous security. */
2164 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2168 * 0x40438 == ni->vfs_inode.i_size
2169 * 0x00440 == sbi->security.next_off
2170 * need to zero [0x438-0x440)
2171 * if (next > used) {
2172 * u32 tozero = next - used;
2173 * zero "tozero" bytes from sbi->security.next_off - tozero
2176 /* Format new security descriptor. */
2177 d_security->key.hash = hash_key.hash;
2178 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2179 d_security->off = cpu_to_le64(sbi->security.next_off);
2180 d_security->size = cpu_to_le32(new_sec_size);
2181 memcpy(d_security + 1, sd, size_sd);
2183 /* Write main SDS bucket. */
2184 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2185 d_security, aligned_sec_size, 0);
2190 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2191 new_sds_size = mirr_off + aligned_sec_size;
2193 if (new_sds_size > ni->vfs_inode.i_size) {
2194 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2195 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2196 new_sds_size, &new_sds_size, false, NULL);
2201 /* Write copy SDS bucket. */
2202 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2203 aligned_sec_size, 0);
2207 /* Fill SII entry. */
2208 sii_e.de.view.data_off =
2209 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2210 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2211 sii_e.de.view.res = 0;
2212 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2213 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2216 sii_e.sec_id = d_security->key.sec_id;
2217 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2219 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2223 /* Fill SDH entry. */
2224 sdh_e.de.view.data_off =
2225 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2226 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2227 sdh_e.de.view.res = 0;
2228 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2229 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2232 sdh_e.key.hash = d_security->key.hash;
2233 sdh_e.key.sec_id = d_security->key.sec_id;
2234 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2235 sdh_e.magic[0] = cpu_to_le16('I');
2236 sdh_e.magic[1] = cpu_to_le16('I');
2239 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2244 *security_id = d_security->key.sec_id;
2248 /* Update Id and offset for next descriptor. */
2249 sbi->security.next_id += 1;
2250 sbi->security.next_off += aligned_sec_size;
2254 mark_inode_dirty(&ni->vfs_inode);
2262 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2264 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2267 struct ntfs_inode *ni = sbi->reparse.ni;
2268 struct ntfs_index *indx = &sbi->reparse.index_r;
2269 struct ATTRIB *attr;
2270 struct ATTR_LIST_ENTRY *le;
2271 const struct INDEX_ROOT *root_r;
2277 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2278 ARRAY_SIZE(SR_NAME), NULL, NULL);
2284 root_r = resident_data(attr);
2285 if (root_r->type != ATTR_ZERO ||
2286 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2291 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2300 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2302 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2305 struct ntfs_inode *ni = sbi->objid.ni;
2306 struct ntfs_index *indx = &sbi->objid.index_o;
2307 struct ATTRIB *attr;
2308 struct ATTR_LIST_ENTRY *le;
2309 const struct INDEX_ROOT *root;
2315 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2316 ARRAY_SIZE(SO_NAME), NULL, NULL);
2322 root = resident_data(attr);
2323 if (root->type != ATTR_ZERO ||
2324 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2329 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2337 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2340 struct ntfs_inode *ni = sbi->objid.ni;
2341 struct ntfs_index *indx = &sbi->objid.index_o;
2346 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2348 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2350 mark_inode_dirty(&ni->vfs_inode);
2356 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2357 const struct MFT_REF *ref)
2360 struct ntfs_inode *ni = sbi->reparse.ni;
2361 struct ntfs_index *indx = &sbi->reparse.index_r;
2362 struct NTFS_DE_R re;
2367 memset(&re, 0, sizeof(re));
2369 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2370 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2371 re.de.key_size = cpu_to_le16(sizeof(re.key));
2373 re.key.ReparseTag = rtag;
2374 memcpy(&re.key.ref, ref, sizeof(*ref));
2376 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2378 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2380 mark_inode_dirty(&ni->vfs_inode);
2386 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2387 const struct MFT_REF *ref)
2390 struct ntfs_inode *ni = sbi->reparse.ni;
2391 struct ntfs_index *indx = &sbi->reparse.index_r;
2392 struct ntfs_fnd *fnd = NULL;
2393 struct REPARSE_KEY rkey;
2394 struct NTFS_DE_R *re;
2395 struct INDEX_ROOT *root_r;
2400 rkey.ReparseTag = rtag;
2403 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2406 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2416 root_r = indx_get_root(indx, ni, NULL, NULL);
2422 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2423 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2424 (struct NTFS_DE **)&re, fnd);
2428 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2429 /* Impossible. Looks like volume corrupt? */
2433 memcpy(&rkey, &re->key, sizeof(rkey));
2438 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2446 mark_inode_dirty(&ni->vfs_inode);
2452 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2455 ntfs_unmap_meta(sbi->sb, lcn, len);
2456 ntfs_discard(sbi, lcn, len);
2459 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2461 CLST end, i, zone_len, zlen;
2462 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2464 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2465 if (!wnd_is_used(wnd, lcn, len)) {
2466 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2470 for (i = lcn; i < end; i++) {
2471 if (wnd_is_used(wnd, i, 1)) {
2482 ntfs_unmap_and_discard(sbi, lcn, len);
2484 wnd_set_free(wnd, lcn, len);
2493 ntfs_unmap_and_discard(sbi, lcn, len);
2494 wnd_set_free(wnd, lcn, len);
2496 /* append to MFT zone, if possible. */
2497 zone_len = wnd_zone_len(wnd);
2498 zlen = min(zone_len + len, sbi->zone_max);
2500 if (zlen == zone_len) {
2501 /* MFT zone already has maximum size. */
2502 } else if (!zone_len) {
2503 /* Create MFT zone only if 'zlen' is large enough. */
2504 if (zlen == sbi->zone_max)
2505 wnd_zone_set(wnd, lcn, zlen);
2507 CLST zone_lcn = wnd_zone_bit(wnd);
2509 if (lcn + len == zone_lcn) {
2510 /* Append into head MFT zone. */
2511 wnd_zone_set(wnd, lcn, zlen);
2512 } else if (zone_lcn + zone_len == lcn) {
2513 /* Append into tail MFT zone. */
2514 wnd_zone_set(wnd, zone_lcn, zlen);
2519 up_write(&wnd->rw_lock);
2523 * run_deallocate - Deallocate clusters.
2525 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2530 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2531 if (lcn == SPARSE_LCN)
2534 mark_as_free_ex(sbi, lcn, len, trim);
2540 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2544 /* check for forbidden chars */
2545 for (i = 0; i < fname->len; ++i) {
2546 ch = le16_to_cpu(fname->name[i]);
2553 /* disallowed by Windows */
2571 /* file names cannot end with space or . */
2572 if (fname->len > 0) {
2573 ch = le16_to_cpu(fname->name[fname->len - 1]);
2574 if (ch == ' ' || ch == '.')
2581 static inline bool is_reserved_name(struct ntfs_sb_info *sbi,
2582 const struct le_str *fname)
2585 const __le16 *name = fname->name;
2586 int len = fname->len;
2587 u16 *upcase = sbi->upcase;
2589 /* check for 3 chars reserved names (device names) */
2590 /* name by itself or with any extension is forbidden */
2591 if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2592 if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2593 !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2594 !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2595 !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2598 /* check for 4 chars reserved names (port name followed by 1..9) */
2599 /* name by itself or with any extension is forbidden */
2600 if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2601 port_digit = le16_to_cpu(name[3]);
2602 if (port_digit >= '1' && port_digit <= '9')
2603 if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2605 !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2614 * valid_windows_name - Check if a file name is valid in Windows.
2616 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2618 return !name_has_forbidden_chars(fname) &&
2619 !is_reserved_name(sbi, fname);