1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
11 #include <linux/iversion.h>
12 #include <linux/mpage.h>
13 #include <linux/namei.h>
14 #include <linux/nls.h>
15 #include <linux/uio.h>
16 #include <linux/writeback.h>
23 * ntfs_read_mft - Read record and parses MFT.
25 static struct inode *ntfs_read_mft(struct inode *inode,
26 const struct cpu_str *name,
27 const struct MFT_REF *ref)
30 struct ntfs_inode *ni = ntfs_i(inode);
31 struct super_block *sb = inode->i_sb;
32 struct ntfs_sb_info *sbi = sb->s_fs_info;
34 struct ATTR_STD_INFO5 *std5 = NULL;
35 struct ATTR_LIST_ENTRY *le;
37 bool is_match = false;
40 unsigned long ino = inode->i_ino;
41 u32 rp_fa = 0, asize, t32;
42 u16 roff, rsize, names = 0;
43 const struct ATTR_FILE_NAME *fname = NULL;
44 const struct INDEX_ROOT *root;
45 struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
48 struct runs_tree *run;
51 /* Setup 'uid' and 'gid' */
52 inode->i_uid = sbi->options.fs_uid;
53 inode->i_gid = sbi->options.fs_gid;
55 err = mi_init(&ni->mi, sbi, ino);
59 if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) {
60 t64 = sbi->mft.lbo >> sbi->cluster_bits;
61 t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size);
63 init_rwsem(&ni->file.run_lock);
65 if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) {
71 err = mi_read(&ni->mi, ino == MFT_REC_MFT);
78 if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) {
80 } else if (ref->seq != rec->seq) {
82 ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino,
83 le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
85 } else if (!is_rec_inuse(rec)) {
87 ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
91 if (le32_to_cpu(rec->total) != sbi->record_size) {
97 if (!is_rec_base(rec))
100 /* Record should contain $I30 root. */
101 is_dir = rec->flags & RECORD_FLAG_DIR;
103 inode->i_generation = le16_to_cpu(rec->seq);
105 /* Enumerate all struct Attributes MFT. */
110 * To reduce tab pressure use goto instead of
111 * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
116 attr = ni_enum_attr_ex(ni, attr, &le, NULL);
121 /* This is non primary attribute segment. Ignore if not MFT. */
122 if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
126 asize = le32_to_cpu(attr->size);
127 goto attr_unpack_run;
130 roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off);
131 rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
132 asize = le32_to_cpu(attr->size);
134 switch (attr->type) {
137 asize < sizeof(struct ATTR_STD_INFO) + roff ||
138 rsize < sizeof(struct ATTR_STD_INFO))
144 std5 = Add2Ptr(attr, roff);
147 nt2kernel(std5->cr_time, &ni->i_crtime);
149 nt2kernel(std5->a_time, &inode->i_atime);
150 nt2kernel(std5->c_time, &inode->i_ctime);
151 nt2kernel(std5->m_time, &inode->i_mtime);
153 ni->std_fa = std5->fa;
155 if (asize >= sizeof(struct ATTR_STD_INFO5) + roff &&
156 rsize >= sizeof(struct ATTR_STD_INFO5))
157 ni->std_security_id = std5->security_id;
161 if (attr->name_len || le || ino == MFT_REC_LOG)
164 err = ntfs_load_attr_list(ni, attr);
173 if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff ||
174 rsize < SIZEOF_ATTRIBUTE_FILENAME)
177 fname = Add2Ptr(attr, roff);
178 if (fname->type == FILE_NAME_DOS)
182 if (name && name->len == fname->name_len &&
183 !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
191 /* Ignore data attribute in dir record. */
195 if (ino == MFT_REC_BADCLUST && !attr->non_res)
198 if (attr->name_len &&
199 ((ino != MFT_REC_BADCLUST || !attr->non_res ||
200 attr->name_len != ARRAY_SIZE(BAD_NAME) ||
201 memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) &&
202 (ino != MFT_REC_SECURE || !attr->non_res ||
203 attr->name_len != ARRAY_SIZE(SDS_NAME) ||
204 memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
205 /* File contains stream attribute. Ignore it. */
209 if (is_attr_sparsed(attr))
210 ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE;
212 ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
214 if (is_attr_compressed(attr))
215 ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED;
217 ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED;
219 if (is_attr_encrypted(attr))
220 ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED;
222 ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED;
224 if (!attr->non_res) {
225 ni->i_valid = inode->i_size = rsize;
226 inode_set_bytes(inode, rsize);
229 t32 = le16_to_cpu(attr->nres.run_off);
232 mode = S_IFREG | (0777 & sbi->options.fs_fmask_inv);
234 if (!attr->non_res) {
235 ni->ni_flags |= NI_FLAG_RESIDENT;
239 inode_set_bytes(inode, attr_ondisk_size(attr));
241 ni->i_valid = le64_to_cpu(attr->nres.valid_size);
242 inode->i_size = le64_to_cpu(attr->nres.data_size);
243 if (!attr->nres.alloc_size)
246 run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run
254 root = Add2Ptr(attr, roff);
257 if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
258 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
261 if (root->type != ATTR_NAME ||
262 root->rule != NTFS_COLLATION_TYPE_FILENAME)
268 ni->ni_flags |= NI_FLAG_DIR;
270 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
275 ? (S_IFDIR | (0777 & sbi->options.fs_dmask_inv))
280 if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) ||
281 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
284 inode->i_size = le64_to_cpu(attr->nres.data_size);
285 ni->i_valid = le64_to_cpu(attr->nres.valid_size);
286 inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size));
288 run = &ni->dir.alloc_run;
292 if (ino == MFT_REC_MFT) {
295 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
296 /* 0x20000000 = 2^32 / 8 */
297 if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000)
300 run = &sbi->mft.bitmap.run;
302 } else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) &&
303 !memcmp(attr_name(attr), I30_NAME,
306 run = &ni->dir.bitmap_run;
315 rp_fa = ni_parse_reparse(ni, attr, &rp);
318 if (!attr->non_res) {
319 inode->i_size = rsize;
320 inode_set_bytes(inode, rsize);
324 le64_to_cpu(attr->nres.data_size);
325 t32 = le16_to_cpu(attr->nres.run_off);
328 /* Looks like normal symlink. */
329 ni->i_valid = inode->i_size;
331 /* Clear directory bit. */
332 if (ni->ni_flags & NI_FLAG_DIR) {
333 indx_clear(&ni->dir);
334 memset(&ni->dir, 0, sizeof(ni->dir));
335 ni->ni_flags &= ~NI_FLAG_DIR;
337 run_close(&ni->file.run);
339 mode = S_IFLNK | 0777;
343 goto attr_unpack_run; // Double break.
347 case REPARSE_COMPRESSED:
350 case REPARSE_DEDUPLICATED:
356 if (!attr->name_len &&
357 resident_data_ex(attr, sizeof(struct EA_INFO))) {
358 ni->ni_flags |= NI_FLAG_EA;
360 * ntfs_get_wsl_perm updates inode->i_uid, inode->i_gid, inode->i_mode
362 inode->i_mode = mode;
363 ntfs_get_wsl_perm(inode);
364 mode = inode->i_mode;
373 roff = le16_to_cpu(attr->nres.run_off);
375 t64 = le64_to_cpu(attr->nres.svcn);
376 err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
377 t64, Add2Ptr(attr, roff), asize - roff);
388 if (!is_match && name) {
389 /* Reuse rec as buffer for ascii name. */
394 if (std5->fa & FILE_ATTRIBUTE_READONLY)
402 if (names != le16_to_cpu(rec->hard_links)) {
403 /* Correct minor error on the fly. Do not mark inode as dirty. */
404 rec->hard_links = cpu_to_le16(names);
408 set_nlink(inode, names);
411 ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
414 * Dot and dot-dot should be included in count but was not
415 * included in enumeration.
416 * Usually a hard links to directories are disabled.
418 inode->i_op = &ntfs_dir_inode_operations;
419 inode->i_fop = &ntfs_dir_operations;
421 } else if (S_ISLNK(mode)) {
422 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
423 inode->i_op = &ntfs_link_inode_operations;
425 inode_nohighmem(inode); // ??
426 } else if (S_ISREG(mode)) {
427 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
428 inode->i_op = &ntfs_file_inode_operations;
429 inode->i_fop = &ntfs_file_operations;
430 inode->i_mapping->a_ops =
431 is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
432 if (ino != MFT_REC_MFT)
433 init_rwsem(&ni->file.run_lock);
434 } else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
436 inode->i_op = &ntfs_special_inode_operations;
437 init_special_inode(inode, mode, inode->i_rdev);
438 } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
439 fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
440 /* Records in $Extend are not a files or general directories. */
446 if ((sbi->options.sys_immutable &&
447 (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
448 !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
449 inode->i_flags |= S_IMMUTABLE;
451 inode->i_flags &= ~S_IMMUTABLE;
454 inode->i_mode = mode;
455 if (!(ni->ni_flags & NI_FLAG_EA)) {
456 /* If no xattr then no security (stored in xattr). */
457 inode->i_flags |= S_NOSEC;
461 if (ino == MFT_REC_MFT && !sb->s_root)
464 unlock_new_inode(inode);
469 if (ino == MFT_REC_MFT && !sb->s_root)
479 * Return: 1 if match.
481 static int ntfs_test_inode(struct inode *inode, void *data)
483 struct MFT_REF *ref = data;
485 return ino_get(ref) == inode->i_ino;
488 static int ntfs_set_inode(struct inode *inode, void *data)
490 const struct MFT_REF *ref = data;
492 inode->i_ino = ino_get(ref);
496 struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
497 const struct cpu_str *name)
501 inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode,
503 if (unlikely(!inode))
504 return ERR_PTR(-ENOMEM);
506 /* If this is a freshly allocated inode, need to read it now. */
507 if (inode->i_state & I_NEW)
508 inode = ntfs_read_mft(inode, name, ref);
509 else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
510 /* Inode overlaps? */
511 make_bad_inode(inode);
518 GET_BLOCK_GENERAL = 0,
519 GET_BLOCK_WRITE_BEGIN = 1,
520 GET_BLOCK_DIRECT_IO_R = 2,
521 GET_BLOCK_DIRECT_IO_W = 3,
525 static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
526 struct buffer_head *bh, int create,
527 enum get_block_ctx ctx)
529 struct super_block *sb = inode->i_sb;
530 struct ntfs_sb_info *sbi = sb->s_fs_info;
531 struct ntfs_inode *ni = ntfs_i(inode);
532 struct page *page = bh->b_page;
533 u8 cluster_bits = sbi->cluster_bits;
534 u32 block_size = sb->s_blocksize;
535 u64 bytes, lbo, valid;
541 /* Clear previous state. */
542 clear_buffer_new(bh);
543 clear_buffer_uptodate(bh);
545 /* Direct write uses 'create=0'. */
546 if (!create && vbo >= ni->i_valid) {
551 if (vbo >= inode->i_size) {
556 if (is_resident(ni)) {
558 err = attr_data_read_resident(ni, page);
562 set_buffer_uptodate(bh);
563 bh->b_size = block_size;
567 vcn = vbo >> cluster_bits;
568 off = vbo & sbi->cluster_mask;
571 err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
578 bytes = ((u64)len << cluster_bits) - off;
580 if (lcn == SPARSE_LCN) {
582 if (bh->b_size > bytes)
591 if ((len << cluster_bits) > block_size)
592 ntfs_sparse_cluster(inode, page, vcn, len);
595 lbo = ((u64)lcn << cluster_bits) + off;
597 set_buffer_mapped(bh);
598 bh->b_bdev = sb->s_bdev;
599 bh->b_blocknr = lbo >> sb->s_blocksize_bits;
603 if (ctx == GET_BLOCK_DIRECT_IO_W) {
604 /* ntfs_direct_IO will update ni->i_valid. */
609 if (bytes > bh->b_size)
615 if (vbo + bytes > valid) {
616 ni->i_valid = vbo + bytes;
617 mark_inode_dirty(inode);
619 } else if (vbo >= valid) {
620 /* Read out of valid data. */
621 /* Should never be here 'cause already checked. */
622 clear_buffer_mapped(bh);
623 } else if (vbo + bytes <= valid) {
625 } else if (vbo + block_size <= valid) {
626 /* Normal short read. */
630 * Read across valid size: vbo < valid && valid < vbo + block_size
635 u32 voff = valid - vbo;
637 bh->b_size = block_size;
638 off = vbo & (PAGE_SIZE - 1);
639 set_bh_page(bh, page, off);
640 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
642 if (!buffer_uptodate(bh)) {
646 zero_user_segment(page, off + voff, off + block_size);
650 if (bh->b_size > bytes)
654 if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) {
655 static_assert(sizeof(size_t) < sizeof(loff_t));
656 if (bytes > 0x40000000u)
657 bh->b_size = 0x40000000u;
667 int ntfs_get_block(struct inode *inode, sector_t vbn,
668 struct buffer_head *bh_result, int create)
670 return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
671 bh_result, create, GET_BLOCK_GENERAL);
674 static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn,
675 struct buffer_head *bh_result, int create)
677 return ntfs_get_block_vbo(inode,
678 (u64)vsn << inode->i_sb->s_blocksize_bits,
679 bh_result, create, GET_BLOCK_BMAP);
682 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
684 return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
687 static int ntfs_readpage(struct file *file, struct page *page)
690 struct address_space *mapping = page->mapping;
691 struct inode *inode = mapping->host;
692 struct ntfs_inode *ni = ntfs_i(inode);
694 if (is_resident(ni)) {
696 err = attr_data_read_resident(ni, page);
698 if (err != E_NTFS_NONRESIDENT) {
704 if (is_compressed(ni)) {
706 err = ni_readpage_cmpr(ni, page);
711 /* Normal + sparse files. */
712 return mpage_readpage(page, ntfs_get_block);
715 static void ntfs_readahead(struct readahead_control *rac)
717 struct address_space *mapping = rac->mapping;
718 struct inode *inode = mapping->host;
719 struct ntfs_inode *ni = ntfs_i(inode);
723 if (is_resident(ni)) {
724 /* No readahead for resident. */
728 if (is_compressed(ni)) {
729 /* No readahead for compressed. */
734 pos = readahead_pos(rac);
736 if (valid < i_size_read(inode) && pos <= valid &&
737 valid < pos + readahead_length(rac)) {
738 /* Range cross 'valid'. Read it page by page. */
742 mpage_readahead(rac, ntfs_get_block);
745 static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
746 struct buffer_head *bh_result, int create)
748 return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
749 bh_result, create, GET_BLOCK_DIRECT_IO_R);
752 static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
753 struct buffer_head *bh_result, int create)
755 return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
756 bh_result, create, GET_BLOCK_DIRECT_IO_W);
759 static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
761 struct file *file = iocb->ki_filp;
762 struct address_space *mapping = file->f_mapping;
763 struct inode *inode = mapping->host;
764 struct ntfs_inode *ni = ntfs_i(inode);
765 loff_t vbo = iocb->ki_pos;
767 int wr = iov_iter_rw(iter) & WRITE;
771 if (is_resident(ni)) {
772 /* Switch to buffered write. */
777 ret = blockdev_direct_IO(iocb, inode, iter,
778 wr ? ntfs_get_block_direct_IO_W
779 : ntfs_get_block_direct_IO_R);
787 if (end > valid && !S_ISBLK(inode->i_mode)) {
789 mark_inode_dirty(inode);
791 } else if (vbo < valid && valid < end) {
793 iov_iter_revert(iter, end - valid);
794 iov_iter_zero(end - valid, iter);
801 int ntfs_set_size(struct inode *inode, u64 new_size)
803 struct super_block *sb = inode->i_sb;
804 struct ntfs_sb_info *sbi = sb->s_fs_info;
805 struct ntfs_inode *ni = ntfs_i(inode);
808 /* Check for maximum file size. */
809 if (is_sparsed(ni) || is_compressed(ni)) {
810 if (new_size > sbi->maxbytes_sparse) {
814 } else if (new_size > sbi->maxbytes) {
820 down_write(&ni->file.run_lock);
822 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
823 &ni->i_valid, true, NULL);
825 up_write(&ni->file.run_lock);
828 mark_inode_dirty(inode);
834 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
836 struct address_space *mapping = page->mapping;
837 struct inode *inode = mapping->host;
838 struct ntfs_inode *ni = ntfs_i(inode);
841 if (is_resident(ni)) {
843 err = attr_data_write_resident(ni, page);
845 if (err != E_NTFS_NONRESIDENT) {
851 return block_write_full_page(page, ntfs_get_block, wbc);
854 static int ntfs_writepages(struct address_space *mapping,
855 struct writeback_control *wbc)
857 struct inode *inode = mapping->host;
858 struct ntfs_inode *ni = ntfs_i(inode);
859 /* Redirect call to 'ntfs_writepage' for resident files. */
860 get_block_t *get_block = is_resident(ni) ? NULL : &ntfs_get_block;
862 return mpage_writepages(mapping, wbc, get_block);
865 static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
866 struct buffer_head *bh_result, int create)
868 return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
869 bh_result, create, GET_BLOCK_WRITE_BEGIN);
872 static int ntfs_write_begin(struct file *file, struct address_space *mapping,
873 loff_t pos, u32 len, u32 flags, struct page **pagep,
877 struct inode *inode = mapping->host;
878 struct ntfs_inode *ni = ntfs_i(inode);
881 if (is_resident(ni)) {
882 struct page *page = grab_cache_page_write_begin(
883 mapping, pos >> PAGE_SHIFT, flags);
891 err = attr_data_read_resident(ni, page);
901 if (err != E_NTFS_NONRESIDENT)
905 err = block_write_begin(mapping, pos, len, flags, pagep,
906 ntfs_get_block_write_begin);
913 * ntfs_write_end - Address_space_operations::write_end.
915 static int ntfs_write_end(struct file *file, struct address_space *mapping,
916 loff_t pos, u32 len, u32 copied, struct page *page,
920 struct inode *inode = mapping->host;
921 struct ntfs_inode *ni = ntfs_i(inode);
922 u64 valid = ni->i_valid;
926 if (is_resident(ni)) {
928 err = attr_data_write_resident(ni, page);
932 /* Clear any buffers in page. */
933 if (page_has_buffers(page)) {
934 struct buffer_head *head, *bh;
936 bh = head = page_buffers(page);
938 clear_buffer_dirty(bh);
939 clear_buffer_mapped(bh);
940 set_buffer_uptodate(bh);
941 } while (head != (bh = bh->b_this_page));
943 SetPageUptodate(page);
949 err = generic_write_end(file, mapping, pos, len, copied, page,
954 if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
955 inode->i_ctime = inode->i_mtime = current_time(inode);
956 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
960 if (valid != ni->i_valid) {
961 /* ni->i_valid is changed in ntfs_get_block_vbo. */
966 mark_inode_dirty(inode);
972 int reset_log_file(struct inode *inode)
976 u32 log_size = inode->i_size;
977 struct address_space *mapping = inode->i_mapping;
984 len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
986 err = block_write_begin(mapping, pos, len, 0, &page,
987 ntfs_get_block_write_begin);
991 kaddr = kmap_atomic(page);
992 memset(kaddr, -1, len);
993 kunmap_atomic(kaddr);
994 flush_dcache_page(page);
996 err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
1001 if (pos >= log_size)
1003 balance_dirty_pages_ratelimited(mapping);
1006 mark_inode_dirty_sync(inode);
1011 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
1013 return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1016 int ntfs_sync_inode(struct inode *inode)
1018 return _ni_write_inode(inode, 1);
1022 * writeback_inode - Helper function for ntfs_flush_inodes().
1024 * This writes both the inode and the file data blocks, waiting
1025 * for in flight data blocks before the start of the call. It
1026 * does not wait for any io started during the call.
1028 static int writeback_inode(struct inode *inode)
1030 int ret = sync_inode_metadata(inode, 0);
1033 ret = filemap_fdatawrite(inode->i_mapping);
1040 * Write data and metadata corresponding to i1 and i2. The io is
1041 * started but we do not wait for any of it to finish.
1043 * filemap_flush() is used for the block device, so if there is a dirty
1044 * page for a block already in flight, we will not wait and start the
1047 int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
1053 ret = writeback_inode(i1);
1055 ret = writeback_inode(i2);
1057 ret = filemap_flush(sb->s_bdev->bd_inode->i_mapping);
1061 int inode_write_data(struct inode *inode, const void *data, size_t bytes)
1065 /* Write non resident data. */
1066 for (idx = 0; bytes; idx++) {
1067 size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
1068 struct page *page = ntfs_map_page(inode->i_mapping, idx);
1071 return PTR_ERR(page);
1074 WARN_ON(!PageUptodate(page));
1075 ClearPageUptodate(page);
1077 memcpy(page_address(page), data, op);
1079 flush_dcache_page(page);
1080 SetPageUptodate(page);
1083 ntfs_unmap_page(page);
1086 data = Add2Ptr(data, PAGE_SIZE);
1092 * ntfs_reparse_bytes
1094 * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
1095 * for unicode string of @uni_len length.
1097 static inline u32 ntfs_reparse_bytes(u32 uni_len)
1099 /* Header + unicode string + decorated unicode string. */
1100 return sizeof(short) * (2 * uni_len + 4) +
1101 offsetof(struct REPARSE_DATA_BUFFER,
1102 SymbolicLinkReparseBuffer.PathBuffer);
1105 static struct REPARSE_DATA_BUFFER *
1106 ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
1107 u32 size, u16 *nsize)
1110 struct REPARSE_DATA_BUFFER *rp;
1112 typeof(rp->SymbolicLinkReparseBuffer) *rs;
1114 rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS);
1116 return ERR_PTR(-ENOMEM);
1118 rs = &rp->SymbolicLinkReparseBuffer;
1119 rp_name = rs->PathBuffer;
1121 /* Convert link name to UTF-16. */
1122 err = ntfs_nls_to_utf16(sbi, symname, size,
1123 (struct cpu_str *)(rp_name - 1), 2 * size,
1124 UTF16_LITTLE_ENDIAN);
1128 /* err = the length of unicode name of symlink. */
1129 *nsize = ntfs_reparse_bytes(err);
1131 if (*nsize > sbi->reparse.max_size) {
1136 /* Translate Linux '/' into Windows '\'. */
1137 for (i = 0; i < err; i++) {
1138 if (rp_name[i] == cpu_to_le16('/'))
1139 rp_name[i] = cpu_to_le16('\\');
1142 rp->ReparseTag = IO_REPARSE_TAG_SYMLINK;
1143 rp->ReparseDataLength =
1144 cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
1145 SymbolicLinkReparseBuffer));
1147 /* PrintName + SubstituteName. */
1148 rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
1149 rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
1150 rs->PrintNameLength = rs->SubstituteNameOffset;
1153 * TODO: Use relative path if possible to allow Windows to
1155 * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE).
1159 memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
1161 /* Decorate SubstituteName. */
1163 rp_name[0] = cpu_to_le16('\\');
1164 rp_name[1] = cpu_to_le16('?');
1165 rp_name[2] = cpu_to_le16('?');
1166 rp_name[3] = cpu_to_le16('\\');
1171 return ERR_PTR(err);
1174 struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
1175 struct inode *dir, struct dentry *dentry,
1176 const struct cpu_str *uni, umode_t mode,
1177 dev_t dev, const char *symname, u32 size,
1178 struct ntfs_fnd *fnd)
1181 struct super_block *sb = dir->i_sb;
1182 struct ntfs_sb_info *sbi = sb->s_fs_info;
1183 const struct qstr *name = &dentry->d_name;
1185 struct ntfs_inode *dir_ni = ntfs_i(dir);
1186 struct ntfs_inode *ni = NULL;
1187 struct inode *inode = NULL;
1188 struct ATTRIB *attr;
1189 struct ATTR_STD_INFO5 *std5;
1190 struct ATTR_FILE_NAME *fname;
1191 struct MFT_REC *rec;
1192 u32 asize, dsize, sd_size;
1193 enum FILE_ATTRIBUTE fa;
1194 __le32 security_id = SECURITY_ID_INVALID;
1197 u16 t16, nsize = 0, aid = 0;
1198 struct INDEX_ROOT *root, *dir_root;
1199 struct NTFS_DE *e, *new_de = NULL;
1200 struct REPARSE_DATA_BUFFER *rp = NULL;
1201 bool rp_inserted = false;
1203 dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
1205 return ERR_PTR(-EINVAL);
1207 if (S_ISDIR(mode)) {
1208 /* Use parent's directory attributes. */
1209 fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY |
1210 FILE_ATTRIBUTE_ARCHIVE;
1212 * By default child directory inherits parent attributes.
1213 * Root directory is hidden + system.
1214 * Make an exception for children in root.
1216 if (dir->i_ino == MFT_REC_ROOT)
1217 fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM);
1218 } else if (S_ISLNK(mode)) {
1219 /* It is good idea that link should be the same type (file/dir) as target */
1220 fa = FILE_ATTRIBUTE_REPARSE_POINT;
1223 * Linux: there are dir/file/symlink and so on.
1224 * NTFS: symlinks are "dir + reparse" or "file + reparse"
1225 * It is good idea to create:
1226 * dir + reparse if 'symname' points to directory
1228 * file + reparse if 'symname' points to file
1229 * Unfortunately kern_path hangs if symname contains 'dir'.
1235 * if (!kern_path(symname, LOOKUP_FOLLOW, &path)){
1236 * struct inode *target = d_inode(path.dentry);
1238 * if (S_ISDIR(target->i_mode))
1239 * fa |= FILE_ATTRIBUTE_DIRECTORY;
1240 * // if ( target->i_sb == sb ){
1241 * // use relative path?
1246 } else if (S_ISREG(mode)) {
1247 if (sbi->options.sparse) {
1248 /* Sparsed regular file, cause option 'sparse'. */
1249 fa = FILE_ATTRIBUTE_SPARSE_FILE |
1250 FILE_ATTRIBUTE_ARCHIVE;
1251 } else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
1252 /* Compressed regular file, if parent is compressed. */
1253 fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
1255 /* Regular file, default attributes. */
1256 fa = FILE_ATTRIBUTE_ARCHIVE;
1259 fa = FILE_ATTRIBUTE_ARCHIVE;
1263 fa |= FILE_ATTRIBUTE_READONLY;
1265 /* Allocate PATH_MAX bytes. */
1266 new_de = __getname();
1272 /* Mark rw ntfs as dirty. it will be cleared at umount. */
1273 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1275 /* Step 1: allocate and fill new mft record. */
1276 err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
1280 ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY);
1286 inode = &ni->vfs_inode;
1287 inode_init_owner(mnt_userns, inode, dir, mode);
1288 mode = inode->i_mode;
1290 inode->i_atime = inode->i_mtime = inode->i_ctime = ni->i_crtime =
1291 current_time(inode);
1294 rec->hard_links = cpu_to_le16(1);
1295 attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
1297 /* Get default security id. */
1298 sd = s_default_security;
1299 sd_size = sizeof(s_default_security);
1301 if (is_ntfs3(sbi)) {
1302 security_id = dir_ni->std_security_id;
1303 if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) {
1304 security_id = sbi->security.def_security_id;
1306 if (security_id == SECURITY_ID_INVALID &&
1307 !ntfs_insert_security(sbi, sd, sd_size,
1308 &security_id, NULL))
1309 sbi->security.def_security_id = security_id;
1313 /* Insert standard info. */
1314 std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
1316 if (security_id == SECURITY_ID_INVALID) {
1317 dsize = sizeof(struct ATTR_STD_INFO);
1319 dsize = sizeof(struct ATTR_STD_INFO5);
1320 std5->security_id = security_id;
1321 ni->std_security_id = security_id;
1323 asize = SIZEOF_RESIDENT + dsize;
1325 attr->type = ATTR_STD;
1326 attr->size = cpu_to_le32(asize);
1327 attr->id = cpu_to_le16(aid++);
1328 attr->res.data_off = SIZEOF_RESIDENT_LE;
1329 attr->res.data_size = cpu_to_le32(dsize);
1331 std5->cr_time = std5->m_time = std5->c_time = std5->a_time =
1332 kernel2nt(&inode->i_atime);
1337 attr = Add2Ptr(attr, asize);
1339 /* Insert file name. */
1340 err = fill_name_de(sbi, new_de, name, uni);
1344 mi_get_ref(&ni->mi, &new_de->ref);
1346 fname = (struct ATTR_FILE_NAME *)(new_de + 1);
1347 mi_get_ref(&dir_ni->mi, &fname->home);
1348 fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
1349 fname->dup.a_time = std5->cr_time;
1350 fname->dup.alloc_size = fname->dup.data_size = 0;
1351 fname->dup.fa = std5->fa;
1352 fname->dup.ea_size = fname->dup.reparse = 0;
1354 dsize = le16_to_cpu(new_de->key_size);
1355 asize = ALIGN(SIZEOF_RESIDENT + dsize, 8);
1357 attr->type = ATTR_NAME;
1358 attr->size = cpu_to_le32(asize);
1359 attr->res.data_off = SIZEOF_RESIDENT_LE;
1360 attr->res.flags = RESIDENT_FLAG_INDEXED;
1361 attr->id = cpu_to_le16(aid++);
1362 attr->res.data_size = cpu_to_le32(dsize);
1363 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize);
1365 attr = Add2Ptr(attr, asize);
1367 if (security_id == SECURITY_ID_INVALID) {
1368 /* Insert security attribute. */
1369 asize = SIZEOF_RESIDENT + ALIGN(sd_size, 8);
1371 attr->type = ATTR_SECURE;
1372 attr->size = cpu_to_le32(asize);
1373 attr->id = cpu_to_le16(aid++);
1374 attr->res.data_off = SIZEOF_RESIDENT_LE;
1375 attr->res.data_size = cpu_to_le32(sd_size);
1376 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size);
1378 attr = Add2Ptr(attr, asize);
1381 attr->id = cpu_to_le16(aid++);
1382 if (fa & FILE_ATTRIBUTE_DIRECTORY) {
1384 * Regular directory or symlink to directory.
1385 * Create root attribute.
1387 dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
1388 asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
1390 attr->type = ATTR_ROOT;
1391 attr->size = cpu_to_le32(asize);
1393 attr->name_len = ARRAY_SIZE(I30_NAME);
1394 attr->name_off = SIZEOF_RESIDENT_LE;
1395 attr->res.data_off =
1396 cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT);
1397 attr->res.data_size = cpu_to_le32(dsize);
1398 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME,
1401 root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
1402 memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
1404 cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10
1405 root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
1406 sizeof(struct NTFS_DE));
1407 root->ihdr.total = root->ihdr.used;
1409 e = Add2Ptr(root, sizeof(struct INDEX_ROOT));
1410 e->size = cpu_to_le16(sizeof(struct NTFS_DE));
1411 e->flags = NTFS_IE_LAST;
1412 } else if (S_ISLNK(mode)) {
1415 * Create empty resident data attribute.
1417 asize = SIZEOF_RESIDENT;
1419 /* Insert empty ATTR_DATA */
1420 attr->type = ATTR_DATA;
1421 attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1422 attr->name_off = SIZEOF_RESIDENT_LE;
1423 attr->res.data_off = SIZEOF_RESIDENT_LE;
1424 } else if (S_ISREG(mode)) {
1426 * Regular file. Create empty non resident data attribute.
1428 attr->type = ATTR_DATA;
1430 attr->nres.evcn = cpu_to_le64(-1ll);
1431 if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
1432 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1433 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1434 attr->flags = ATTR_FLAG_SPARSED;
1435 asize = SIZEOF_NONRESIDENT_EX + 8;
1436 } else if (fa & FILE_ATTRIBUTE_COMPRESSED) {
1437 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1438 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1439 attr->flags = ATTR_FLAG_COMPRESSED;
1440 attr->nres.c_unit = COMPRESSION_UNIT;
1441 asize = SIZEOF_NONRESIDENT_EX + 8;
1443 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
1444 attr->name_off = SIZEOF_NONRESIDENT_LE;
1445 asize = SIZEOF_NONRESIDENT + 8;
1447 attr->nres.run_off = attr->name_off;
1450 * Node. Create empty resident data attribute.
1452 attr->type = ATTR_DATA;
1453 attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1454 attr->name_off = SIZEOF_RESIDENT_LE;
1455 if (fa & FILE_ATTRIBUTE_SPARSE_FILE)
1456 attr->flags = ATTR_FLAG_SPARSED;
1457 else if (fa & FILE_ATTRIBUTE_COMPRESSED)
1458 attr->flags = ATTR_FLAG_COMPRESSED;
1459 attr->res.data_off = SIZEOF_RESIDENT_LE;
1460 asize = SIZEOF_RESIDENT;
1461 ni->ni_flags |= NI_FLAG_RESIDENT;
1464 if (S_ISDIR(mode)) {
1465 ni->ni_flags |= NI_FLAG_DIR;
1466 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
1469 } else if (S_ISLNK(mode)) {
1470 rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize);
1479 * Insert ATTR_REPARSE.
1481 attr = Add2Ptr(attr, asize);
1482 attr->type = ATTR_REPARSE;
1483 attr->id = cpu_to_le16(aid++);
1485 /* Resident or non resident? */
1486 asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
1487 t16 = PtrOffset(rec, attr);
1489 /* 0x78 - the size of EA + EAINFO to store WSL */
1490 if (asize + t16 + 0x78 + 8 > sbi->record_size) {
1492 CLST clst = bytes_to_cluster(sbi, nsize);
1494 /* Bytes per runs. */
1495 t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
1498 attr->nres.evcn = cpu_to_le64(clst - 1);
1499 attr->name_off = SIZEOF_NONRESIDENT_LE;
1500 attr->nres.run_off = attr->name_off;
1501 attr->nres.data_size = cpu_to_le64(nsize);
1502 attr->nres.valid_size = attr->nres.data_size;
1503 attr->nres.alloc_size =
1504 cpu_to_le64(ntfs_up_cluster(sbi, nsize));
1506 err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
1507 clst, NULL, 0, &alen, 0,
1512 err = run_pack(&ni->file.run, 0, clst,
1513 Add2Ptr(attr, SIZEOF_NONRESIDENT), t16,
1523 asize = SIZEOF_NONRESIDENT + ALIGN(err, 8);
1524 inode->i_size = nsize;
1526 attr->res.data_off = SIZEOF_RESIDENT_LE;
1527 attr->res.data_size = cpu_to_le32(nsize);
1528 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
1529 inode->i_size = nsize;
1533 attr->size = cpu_to_le32(asize);
1535 err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK,
1543 attr = Add2Ptr(attr, asize);
1544 attr->type = ATTR_END;
1546 rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
1547 rec->next_attr_id = cpu_to_le16(aid);
1549 /* Step 2: Add new name in index. */
1550 err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd, 0);
1554 inode->i_generation = le16_to_cpu(rec->seq);
1556 dir->i_mtime = dir->i_ctime = inode->i_atime;
1558 if (S_ISDIR(mode)) {
1559 inode->i_op = &ntfs_dir_inode_operations;
1560 inode->i_fop = &ntfs_dir_operations;
1561 } else if (S_ISLNK(mode)) {
1562 inode->i_op = &ntfs_link_inode_operations;
1563 inode->i_fop = NULL;
1564 inode->i_mapping->a_ops = &ntfs_aops;
1565 } else if (S_ISREG(mode)) {
1566 inode->i_op = &ntfs_file_inode_operations;
1567 inode->i_fop = &ntfs_file_operations;
1568 inode->i_mapping->a_ops =
1569 is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
1570 init_rwsem(&ni->file.run_lock);
1572 inode->i_op = &ntfs_special_inode_operations;
1573 init_special_inode(inode, mode, dev);
1576 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
1577 if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) {
1578 err = ntfs_init_acl(mnt_userns, inode, dir);
1584 inode->i_flags |= S_NOSEC;
1587 /* Write non resident data. */
1589 err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize);
1595 * Call 'd_instantiate' after inode->i_op is set
1596 * but before finish_open.
1598 d_instantiate(dentry, inode);
1600 ntfs_save_wsl_perm(inode);
1601 mark_inode_dirty(dir);
1602 mark_inode_dirty(inode);
1609 /* Undo 'indx_insert_entry'. */
1610 indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
1611 le16_to_cpu(new_de->key_size), sbi);
1614 ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
1617 if (S_ISDIR(mode) || run_is_empty(&ni->file.run))
1620 run_deallocate(sbi, &ni->file.run, false);
1623 clear_rec_inuse(rec);
1625 ni->mi.dirty = false;
1626 discard_new_inode(inode);
1628 ntfs_mark_rec_free(sbi, ino);
1636 return ERR_PTR(err);
1638 unlock_new_inode(inode);
1643 int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
1646 struct ntfs_inode *ni = ntfs_i(inode);
1647 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
1649 struct ATTR_FILE_NAME *de_name;
1651 /* Allocate PATH_MAX bytes. */
1656 /* Mark rw ntfs as dirty. It will be cleared at umount. */
1657 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1659 /* Construct 'de'. */
1660 err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1664 de_name = (struct ATTR_FILE_NAME *)(de + 1);
1665 /* Fill duplicate info. */
1666 de_name->dup.cr_time = de_name->dup.m_time = de_name->dup.c_time =
1667 de_name->dup.a_time = kernel2nt(&inode->i_ctime);
1668 de_name->dup.alloc_size = de_name->dup.data_size =
1669 cpu_to_le64(inode->i_size);
1670 de_name->dup.fa = ni->std_fa;
1671 de_name->dup.ea_size = de_name->dup.reparse = 0;
1673 err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de);
1682 * inode_operations::unlink
1683 * inode_operations::rmdir
1685 int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
1688 struct ntfs_sb_info *sbi = dir->i_sb->s_fs_info;
1689 struct inode *inode = d_inode(dentry);
1690 struct ntfs_inode *ni = ntfs_i(inode);
1691 struct ntfs_inode *dir_ni = ntfs_i(dir);
1692 struct NTFS_DE *de, *de2 = NULL;
1695 if (ntfs_is_meta_file(sbi, ni->mi.rno))
1698 /* Allocate PATH_MAX bytes. */
1705 if (S_ISDIR(inode->i_mode) && !dir_is_empty(inode)) {
1710 err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1715 err = ni_remove_name(dir_ni, ni, de, &de2, &undo_remove);
1719 dir->i_mtime = dir->i_ctime = current_time(dir);
1720 mark_inode_dirty(dir);
1721 inode->i_ctime = dir->i_ctime;
1723 mark_inode_dirty(inode);
1724 } else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
1725 make_bad_inode(inode);
1726 ntfs_inode_err(inode, "failed to undo unlink");
1727 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
1729 if (ni_is_dirty(dir))
1730 mark_inode_dirty(dir);
1731 if (ni_is_dirty(inode))
1732 mark_inode_dirty(inode);
1741 void ntfs_evict_inode(struct inode *inode)
1743 truncate_inode_pages_final(&inode->i_data);
1746 _ni_write_inode(inode, inode_needs_sync(inode));
1748 invalidate_inode_buffers(inode);
1751 ni_clear(ntfs_i(inode));
1754 static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
1758 struct ntfs_inode *ni = ntfs_i(inode);
1759 struct super_block *sb = inode->i_sb;
1760 struct ntfs_sb_info *sbi = sb->s_fs_info;
1761 u64 i_size = inode->i_size;
1763 void *to_free = NULL;
1764 struct REPARSE_DATA_BUFFER *rp;
1766 struct ATTRIB *attr;
1768 /* Reparse data present. Try to parse it. */
1769 static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
1770 static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
1774 /* Read into temporal buffer. */
1775 if (i_size > sbi->reparse.max_size || i_size <= sizeof(u32)) {
1780 attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
1786 if (!attr->non_res) {
1787 rp = resident_data_ex(attr, i_size);
1793 rp = kmalloc(i_size, GFP_NOFS);
1799 err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, i_size, NULL);
1806 /* Microsoft Tag. */
1807 switch (rp->ReparseTag) {
1808 case IO_REPARSE_TAG_MOUNT_POINT:
1809 /* Mount points and junctions. */
1810 /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
1811 if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
1812 MountPointReparseBuffer.PathBuffer))
1815 offsetof(struct REPARSE_DATA_BUFFER,
1816 MountPointReparseBuffer.PathBuffer) +
1817 le16_to_cpu(rp->MountPointReparseBuffer
1820 nlen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
1823 case IO_REPARSE_TAG_SYMLINK:
1824 /* FolderSymbolicLink */
1825 /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
1826 if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
1827 SymbolicLinkReparseBuffer.PathBuffer))
1830 offsetof(struct REPARSE_DATA_BUFFER,
1831 SymbolicLinkReparseBuffer.PathBuffer) +
1832 le16_to_cpu(rp->SymbolicLinkReparseBuffer
1836 rp->SymbolicLinkReparseBuffer.PrintNameLength);
1839 case IO_REPARSE_TAG_CLOUD:
1840 case IO_REPARSE_TAG_CLOUD_1:
1841 case IO_REPARSE_TAG_CLOUD_2:
1842 case IO_REPARSE_TAG_CLOUD_3:
1843 case IO_REPARSE_TAG_CLOUD_4:
1844 case IO_REPARSE_TAG_CLOUD_5:
1845 case IO_REPARSE_TAG_CLOUD_6:
1846 case IO_REPARSE_TAG_CLOUD_7:
1847 case IO_REPARSE_TAG_CLOUD_8:
1848 case IO_REPARSE_TAG_CLOUD_9:
1849 case IO_REPARSE_TAG_CLOUD_A:
1850 case IO_REPARSE_TAG_CLOUD_B:
1851 case IO_REPARSE_TAG_CLOUD_C:
1852 case IO_REPARSE_TAG_CLOUD_D:
1853 case IO_REPARSE_TAG_CLOUD_E:
1854 case IO_REPARSE_TAG_CLOUD_F:
1855 err = sizeof("OneDrive") - 1;
1858 memcpy(buffer, "OneDrive", err);
1862 if (IsReparseTagMicrosoft(rp->ReparseTag)) {
1863 /* Unknown Microsoft Tag. */
1866 if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
1867 i_size <= sizeof(struct REPARSE_POINT)) {
1872 uni = Add2Ptr(rp, sizeof(struct REPARSE_POINT) - 2);
1873 nlen = le16_to_cpu(rp->ReparseDataLength) -
1874 sizeof(struct REPARSE_POINT);
1877 /* Convert nlen from bytes to UNICODE chars. */
1880 /* Check that name is available. */
1881 if (!nlen || &uni->name[nlen] > (__le16 *)Add2Ptr(rp, i_size))
1884 /* If name is already zero terminated then truncate it now. */
1885 if (!uni->name[nlen - 1])
1889 err = ntfs_utf16_to_nls(sbi, uni, buffer, buflen);
1894 /* Translate Windows '\' into Linux '/'. */
1895 for (i = 0; i < err; i++) {
1896 if (buffer[i] == '\\')
1900 /* Always set last zero. */
1907 static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
1908 struct delayed_call *done)
1914 return ERR_PTR(-ECHILD);
1916 ret = kmalloc(PAGE_SIZE, GFP_NOFS);
1918 return ERR_PTR(-ENOMEM);
1920 err = ntfs_readlink_hlp(inode, ret, PAGE_SIZE);
1923 return ERR_PTR(err);
1926 set_delayed_call(done, kfree_link, ret);
1932 const struct inode_operations ntfs_link_inode_operations = {
1933 .get_link = ntfs_get_link,
1934 .setattr = ntfs3_setattr,
1935 .listxattr = ntfs_listxattr,
1936 .permission = ntfs_permission,
1937 .get_acl = ntfs_get_acl,
1938 .set_acl = ntfs_set_acl,
1941 const struct address_space_operations ntfs_aops = {
1942 .readpage = ntfs_readpage,
1943 .readahead = ntfs_readahead,
1944 .writepage = ntfs_writepage,
1945 .writepages = ntfs_writepages,
1946 .write_begin = ntfs_write_begin,
1947 .write_end = ntfs_write_end,
1948 .direct_IO = ntfs_direct_IO,
1950 .set_page_dirty = __set_page_dirty_buffers,
1953 const struct address_space_operations ntfs_aops_cmpr = {
1954 .readpage = ntfs_readpage,
1955 .readahead = ntfs_readahead,