1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/buffer_head.h>
10 #include <linux/mpage.h>
11 #include <linux/namei.h>
12 #include <linux/nls.h>
13 #include <linux/uio.h>
14 #include <linux/writeback.h>
21 * ntfs_read_mft - Read record and parses MFT.
23 static struct inode *ntfs_read_mft(struct inode *inode,
24 const struct cpu_str *name,
25 const struct MFT_REF *ref)
28 struct ntfs_inode *ni = ntfs_i(inode);
29 struct super_block *sb = inode->i_sb;
30 struct ntfs_sb_info *sbi = sb->s_fs_info;
32 struct ATTR_STD_INFO5 *std5 = NULL;
33 struct ATTR_LIST_ENTRY *le;
35 bool is_match = false;
38 unsigned long ino = inode->i_ino;
39 u32 rp_fa = 0, asize, t32;
40 u16 roff, rsize, names = 0;
41 const struct ATTR_FILE_NAME *fname = NULL;
42 const struct INDEX_ROOT *root;
43 struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
46 struct runs_tree *run;
49 /* Setup 'uid' and 'gid' */
50 inode->i_uid = sbi->options->fs_uid;
51 inode->i_gid = sbi->options->fs_gid;
53 err = mi_init(&ni->mi, sbi, ino);
57 if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) {
58 t64 = sbi->mft.lbo >> sbi->cluster_bits;
59 t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size);
61 init_rwsem(&ni->file.run_lock);
63 if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) {
69 err = mi_read(&ni->mi, ino == MFT_REC_MFT);
76 if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) {
78 } else if (ref->seq != rec->seq) {
80 ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino,
81 le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
83 } else if (!is_rec_inuse(rec)) {
85 ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
89 if (le32_to_cpu(rec->total) != sbi->record_size) {
95 if (!is_rec_base(rec))
98 /* Record should contain $I30 root. */
99 is_dir = rec->flags & RECORD_FLAG_DIR;
101 inode->i_generation = le16_to_cpu(rec->seq);
103 /* Enumerate all struct Attributes MFT. */
108 * To reduce tab pressure use goto instead of
109 * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
114 attr = ni_enum_attr_ex(ni, attr, &le, NULL);
119 /* This is non primary attribute segment. Ignore if not MFT. */
120 if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
124 asize = le32_to_cpu(attr->size);
125 goto attr_unpack_run;
128 roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off);
129 rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
130 asize = le32_to_cpu(attr->size);
132 switch (attr->type) {
135 asize < sizeof(struct ATTR_STD_INFO) + roff ||
136 rsize < sizeof(struct ATTR_STD_INFO))
142 std5 = Add2Ptr(attr, roff);
145 nt2kernel(std5->cr_time, &ni->i_crtime);
147 nt2kernel(std5->a_time, &inode->i_atime);
148 nt2kernel(std5->c_time, &inode->i_ctime);
149 nt2kernel(std5->m_time, &inode->i_mtime);
151 ni->std_fa = std5->fa;
153 if (asize >= sizeof(struct ATTR_STD_INFO5) + roff &&
154 rsize >= sizeof(struct ATTR_STD_INFO5))
155 ni->std_security_id = std5->security_id;
159 if (attr->name_len || le || ino == MFT_REC_LOG)
162 err = ntfs_load_attr_list(ni, attr);
171 if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff ||
172 rsize < SIZEOF_ATTRIBUTE_FILENAME)
175 fname = Add2Ptr(attr, roff);
176 if (fname->type == FILE_NAME_DOS)
180 if (name && name->len == fname->name_len &&
181 !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
189 /* Ignore data attribute in dir record. */
193 if (ino == MFT_REC_BADCLUST && !attr->non_res)
196 if (attr->name_len &&
197 ((ino != MFT_REC_BADCLUST || !attr->non_res ||
198 attr->name_len != ARRAY_SIZE(BAD_NAME) ||
199 memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) &&
200 (ino != MFT_REC_SECURE || !attr->non_res ||
201 attr->name_len != ARRAY_SIZE(SDS_NAME) ||
202 memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
203 /* File contains stream attribute. Ignore it. */
207 if (is_attr_sparsed(attr))
208 ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE;
210 ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
212 if (is_attr_compressed(attr))
213 ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED;
215 ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED;
217 if (is_attr_encrypted(attr))
218 ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED;
220 ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED;
222 if (!attr->non_res) {
223 ni->i_valid = inode->i_size = rsize;
224 inode_set_bytes(inode, rsize);
227 mode = S_IFREG | (0777 & sbi->options->fs_fmask_inv);
229 if (!attr->non_res) {
230 ni->ni_flags |= NI_FLAG_RESIDENT;
234 inode_set_bytes(inode, attr_ondisk_size(attr));
236 ni->i_valid = le64_to_cpu(attr->nres.valid_size);
237 inode->i_size = le64_to_cpu(attr->nres.data_size);
238 if (!attr->nres.alloc_size)
241 run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run
249 root = Add2Ptr(attr, roff);
252 if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
253 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
256 if (root->type != ATTR_NAME ||
257 root->rule != NTFS_COLLATION_TYPE_FILENAME)
263 ni->ni_flags |= NI_FLAG_DIR;
265 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
270 ? (S_IFDIR | (0777 & sbi->options->fs_dmask_inv))
275 if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) ||
276 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
279 inode->i_size = le64_to_cpu(attr->nres.data_size);
280 ni->i_valid = le64_to_cpu(attr->nres.valid_size);
281 inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size));
283 run = &ni->dir.alloc_run;
287 if (ino == MFT_REC_MFT) {
290 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
291 /* 0x20000000 = 2^32 / 8 */
292 if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000)
295 run = &sbi->mft.bitmap.run;
297 } else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) &&
298 !memcmp(attr_name(attr), I30_NAME,
301 run = &ni->dir.bitmap_run;
310 rp_fa = ni_parse_reparse(ni, attr, &rp);
315 * Assume one unicode symbol == one utf8.
317 inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer
321 ni->i_valid = inode->i_size;
323 /* Clear directory bit. */
324 if (ni->ni_flags & NI_FLAG_DIR) {
325 indx_clear(&ni->dir);
326 memset(&ni->dir, 0, sizeof(ni->dir));
327 ni->ni_flags &= ~NI_FLAG_DIR;
329 run_close(&ni->file.run);
331 mode = S_IFLNK | 0777;
335 goto attr_unpack_run; // Double break.
339 case REPARSE_COMPRESSED:
342 case REPARSE_DEDUPLICATED:
348 if (!attr->name_len &&
349 resident_data_ex(attr, sizeof(struct EA_INFO))) {
350 ni->ni_flags |= NI_FLAG_EA;
352 * ntfs_get_wsl_perm updates inode->i_uid, inode->i_gid, inode->i_mode
354 inode->i_mode = mode;
355 ntfs_get_wsl_perm(inode);
356 mode = inode->i_mode;
365 roff = le16_to_cpu(attr->nres.run_off);
372 t64 = le64_to_cpu(attr->nres.svcn);
373 err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
374 t64, Add2Ptr(attr, roff), asize - roff);
385 if (!is_match && name) {
386 /* Reuse rec as buffer for ascii name. */
391 if (std5->fa & FILE_ATTRIBUTE_READONLY)
399 if (names != le16_to_cpu(rec->hard_links)) {
400 /* Correct minor error on the fly. Do not mark inode as dirty. */
401 rec->hard_links = cpu_to_le16(names);
405 set_nlink(inode, names);
408 ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
411 * Dot and dot-dot should be included in count but was not
412 * included in enumeration.
413 * Usually a hard links to directories are disabled.
415 inode->i_op = &ntfs_dir_inode_operations;
416 inode->i_fop = &ntfs_dir_operations;
418 } else if (S_ISLNK(mode)) {
419 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
420 inode->i_op = &ntfs_link_inode_operations;
422 inode_nohighmem(inode);
423 } else if (S_ISREG(mode)) {
424 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
425 inode->i_op = &ntfs_file_inode_operations;
426 inode->i_fop = &ntfs_file_operations;
427 inode->i_mapping->a_ops =
428 is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
429 if (ino != MFT_REC_MFT)
430 init_rwsem(&ni->file.run_lock);
431 } else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
433 inode->i_op = &ntfs_special_inode_operations;
434 init_special_inode(inode, mode, inode->i_rdev);
435 } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
436 fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
437 /* Records in $Extend are not a files or general directories. */
438 inode->i_op = &ntfs_file_inode_operations;
444 if ((sbi->options->sys_immutable &&
445 (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
446 !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
447 inode->i_flags |= S_IMMUTABLE;
449 inode->i_flags &= ~S_IMMUTABLE;
452 inode->i_mode = mode;
453 if (!(ni->ni_flags & NI_FLAG_EA)) {
454 /* If no xattr then no security (stored in xattr). */
455 inode->i_flags |= S_NOSEC;
459 if (ino == MFT_REC_MFT && !sb->s_root)
462 unlock_new_inode(inode);
467 if (ino == MFT_REC_MFT && !sb->s_root)
477 * Return: 1 if match.
479 static int ntfs_test_inode(struct inode *inode, void *data)
481 struct MFT_REF *ref = data;
483 return ino_get(ref) == inode->i_ino;
486 static int ntfs_set_inode(struct inode *inode, void *data)
488 const struct MFT_REF *ref = data;
490 inode->i_ino = ino_get(ref);
494 struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
495 const struct cpu_str *name)
499 inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode,
501 if (unlikely(!inode))
502 return ERR_PTR(-ENOMEM);
504 /* If this is a freshly allocated inode, need to read it now. */
505 if (inode->i_state & I_NEW)
506 inode = ntfs_read_mft(inode, name, ref);
507 else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
508 /* Inode overlaps? */
509 make_bad_inode(inode);
516 GET_BLOCK_GENERAL = 0,
517 GET_BLOCK_WRITE_BEGIN = 1,
518 GET_BLOCK_DIRECT_IO_R = 2,
519 GET_BLOCK_DIRECT_IO_W = 3,
523 static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
524 struct buffer_head *bh, int create,
525 enum get_block_ctx ctx)
527 struct super_block *sb = inode->i_sb;
528 struct ntfs_sb_info *sbi = sb->s_fs_info;
529 struct ntfs_inode *ni = ntfs_i(inode);
530 struct page *page = bh->b_page;
531 u8 cluster_bits = sbi->cluster_bits;
532 u32 block_size = sb->s_blocksize;
533 u64 bytes, lbo, valid;
539 /* Clear previous state. */
540 clear_buffer_new(bh);
541 clear_buffer_uptodate(bh);
543 /* Direct write uses 'create=0'. */
544 if (!create && vbo >= ni->i_valid) {
549 if (vbo >= inode->i_size) {
554 if (is_resident(ni)) {
556 err = attr_data_read_resident(ni, page);
560 set_buffer_uptodate(bh);
561 bh->b_size = block_size;
565 vcn = vbo >> cluster_bits;
566 off = vbo & sbi->cluster_mask;
569 err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
576 bytes = ((u64)len << cluster_bits) - off;
578 if (lcn == SPARSE_LCN) {
580 if (bh->b_size > bytes)
589 if ((len << cluster_bits) > block_size)
590 ntfs_sparse_cluster(inode, page, vcn, len);
593 lbo = ((u64)lcn << cluster_bits) + off;
595 set_buffer_mapped(bh);
596 bh->b_bdev = sb->s_bdev;
597 bh->b_blocknr = lbo >> sb->s_blocksize_bits;
601 if (ctx == GET_BLOCK_DIRECT_IO_W) {
602 /* ntfs_direct_IO will update ni->i_valid. */
607 if (bytes > bh->b_size)
613 if (vbo + bytes > valid) {
614 ni->i_valid = vbo + bytes;
615 mark_inode_dirty(inode);
617 } else if (vbo >= valid) {
618 /* Read out of valid data. */
619 /* Should never be here 'cause already checked. */
620 clear_buffer_mapped(bh);
621 } else if (vbo + bytes <= valid) {
623 } else if (vbo + block_size <= valid) {
624 /* Normal short read. */
628 * Read across valid size: vbo < valid && valid < vbo + block_size
633 u32 voff = valid - vbo;
635 bh->b_size = block_size;
636 off = vbo & (PAGE_SIZE - 1);
637 set_bh_page(bh, page, off);
638 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
640 if (!buffer_uptodate(bh)) {
644 zero_user_segment(page, off + voff, off + block_size);
648 if (bh->b_size > bytes)
652 if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) {
653 static_assert(sizeof(size_t) < sizeof(loff_t));
654 if (bytes > 0x40000000u)
655 bh->b_size = 0x40000000u;
665 int ntfs_get_block(struct inode *inode, sector_t vbn,
666 struct buffer_head *bh_result, int create)
668 return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
669 bh_result, create, GET_BLOCK_GENERAL);
672 static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn,
673 struct buffer_head *bh_result, int create)
675 return ntfs_get_block_vbo(inode,
676 (u64)vsn << inode->i_sb->s_blocksize_bits,
677 bh_result, create, GET_BLOCK_BMAP);
680 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
682 return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
685 static int ntfs_readpage(struct file *file, struct page *page)
688 struct address_space *mapping = page->mapping;
689 struct inode *inode = mapping->host;
690 struct ntfs_inode *ni = ntfs_i(inode);
692 if (is_resident(ni)) {
694 err = attr_data_read_resident(ni, page);
696 if (err != E_NTFS_NONRESIDENT) {
702 if (is_compressed(ni)) {
704 err = ni_readpage_cmpr(ni, page);
709 /* Normal + sparse files. */
710 return mpage_readpage(page, ntfs_get_block);
713 static void ntfs_readahead(struct readahead_control *rac)
715 struct address_space *mapping = rac->mapping;
716 struct inode *inode = mapping->host;
717 struct ntfs_inode *ni = ntfs_i(inode);
721 if (is_resident(ni)) {
722 /* No readahead for resident. */
726 if (is_compressed(ni)) {
727 /* No readahead for compressed. */
732 pos = readahead_pos(rac);
734 if (valid < i_size_read(inode) && pos <= valid &&
735 valid < pos + readahead_length(rac)) {
736 /* Range cross 'valid'. Read it page by page. */
740 mpage_readahead(rac, ntfs_get_block);
743 static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
744 struct buffer_head *bh_result, int create)
746 return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
747 bh_result, create, GET_BLOCK_DIRECT_IO_R);
750 static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
751 struct buffer_head *bh_result, int create)
753 return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
754 bh_result, create, GET_BLOCK_DIRECT_IO_W);
757 static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
759 struct file *file = iocb->ki_filp;
760 struct address_space *mapping = file->f_mapping;
761 struct inode *inode = mapping->host;
762 struct ntfs_inode *ni = ntfs_i(inode);
763 loff_t vbo = iocb->ki_pos;
765 int wr = iov_iter_rw(iter) & WRITE;
766 size_t iter_count = iov_iter_count(iter);
770 if (is_resident(ni)) {
771 /* Switch to buffered write. */
776 ret = blockdev_direct_IO(iocb, inode, iter,
777 wr ? ntfs_get_block_direct_IO_W
778 : ntfs_get_block_direct_IO_R);
782 else if (wr && ret == -EIOCBQUEUED)
783 end = vbo + iter_count;
789 if (end > valid && !S_ISBLK(inode->i_mode)) {
791 mark_inode_dirty(inode);
793 } else if (vbo < valid && valid < end) {
795 iov_iter_revert(iter, end - valid);
796 iov_iter_zero(end - valid, iter);
803 int ntfs_set_size(struct inode *inode, u64 new_size)
805 struct super_block *sb = inode->i_sb;
806 struct ntfs_sb_info *sbi = sb->s_fs_info;
807 struct ntfs_inode *ni = ntfs_i(inode);
810 /* Check for maximum file size. */
811 if (is_sparsed(ni) || is_compressed(ni)) {
812 if (new_size > sbi->maxbytes_sparse) {
816 } else if (new_size > sbi->maxbytes) {
822 down_write(&ni->file.run_lock);
824 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
825 &ni->i_valid, true, NULL);
827 up_write(&ni->file.run_lock);
830 mark_inode_dirty(inode);
836 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
838 struct address_space *mapping = page->mapping;
839 struct inode *inode = mapping->host;
840 struct ntfs_inode *ni = ntfs_i(inode);
843 if (is_resident(ni)) {
845 err = attr_data_write_resident(ni, page);
847 if (err != E_NTFS_NONRESIDENT) {
853 return block_write_full_page(page, ntfs_get_block, wbc);
856 static int ntfs_writepages(struct address_space *mapping,
857 struct writeback_control *wbc)
859 struct inode *inode = mapping->host;
860 struct ntfs_inode *ni = ntfs_i(inode);
861 /* Redirect call to 'ntfs_writepage' for resident files. */
862 get_block_t *get_block = is_resident(ni) ? NULL : &ntfs_get_block;
864 return mpage_writepages(mapping, wbc, get_block);
867 static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
868 struct buffer_head *bh_result, int create)
870 return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
871 bh_result, create, GET_BLOCK_WRITE_BEGIN);
874 static int ntfs_write_begin(struct file *file, struct address_space *mapping,
875 loff_t pos, u32 len, u32 flags, struct page **pagep,
879 struct inode *inode = mapping->host;
880 struct ntfs_inode *ni = ntfs_i(inode);
883 if (is_resident(ni)) {
884 struct page *page = grab_cache_page_write_begin(
885 mapping, pos >> PAGE_SHIFT, flags);
893 err = attr_data_read_resident(ni, page);
903 if (err != E_NTFS_NONRESIDENT)
907 err = block_write_begin(mapping, pos, len, flags, pagep,
908 ntfs_get_block_write_begin);
915 * ntfs_write_end - Address_space_operations::write_end.
917 static int ntfs_write_end(struct file *file, struct address_space *mapping,
918 loff_t pos, u32 len, u32 copied, struct page *page,
922 struct inode *inode = mapping->host;
923 struct ntfs_inode *ni = ntfs_i(inode);
924 u64 valid = ni->i_valid;
928 if (is_resident(ni)) {
930 err = attr_data_write_resident(ni, page);
934 /* Clear any buffers in page. */
935 if (page_has_buffers(page)) {
936 struct buffer_head *head, *bh;
938 bh = head = page_buffers(page);
940 clear_buffer_dirty(bh);
941 clear_buffer_mapped(bh);
942 set_buffer_uptodate(bh);
943 } while (head != (bh = bh->b_this_page));
945 SetPageUptodate(page);
951 err = generic_write_end(file, mapping, pos, len, copied, page,
956 if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
957 inode->i_ctime = inode->i_mtime = current_time(inode);
958 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
962 if (valid != ni->i_valid) {
963 /* ni->i_valid is changed in ntfs_get_block_vbo. */
968 mark_inode_dirty(inode);
974 int reset_log_file(struct inode *inode)
978 u32 log_size = inode->i_size;
979 struct address_space *mapping = inode->i_mapping;
986 len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
988 err = block_write_begin(mapping, pos, len, 0, &page,
989 ntfs_get_block_write_begin);
993 kaddr = kmap_atomic(page);
994 memset(kaddr, -1, len);
995 kunmap_atomic(kaddr);
996 flush_dcache_page(page);
998 err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
1003 if (pos >= log_size)
1005 balance_dirty_pages_ratelimited(mapping);
1008 mark_inode_dirty_sync(inode);
1013 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
1015 return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1018 int ntfs_sync_inode(struct inode *inode)
1020 return _ni_write_inode(inode, 1);
1024 * writeback_inode - Helper function for ntfs_flush_inodes().
1026 * This writes both the inode and the file data blocks, waiting
1027 * for in flight data blocks before the start of the call. It
1028 * does not wait for any io started during the call.
1030 static int writeback_inode(struct inode *inode)
1032 int ret = sync_inode_metadata(inode, 0);
1035 ret = filemap_fdatawrite(inode->i_mapping);
1042 * Write data and metadata corresponding to i1 and i2. The io is
1043 * started but we do not wait for any of it to finish.
1045 * filemap_flush() is used for the block device, so if there is a dirty
1046 * page for a block already in flight, we will not wait and start the
1049 int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
1055 ret = writeback_inode(i1);
1057 ret = writeback_inode(i2);
1059 ret = filemap_flush(sb->s_bdev->bd_inode->i_mapping);
1063 int inode_write_data(struct inode *inode, const void *data, size_t bytes)
1067 /* Write non resident data. */
1068 for (idx = 0; bytes; idx++) {
1069 size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
1070 struct page *page = ntfs_map_page(inode->i_mapping, idx);
1073 return PTR_ERR(page);
1076 WARN_ON(!PageUptodate(page));
1077 ClearPageUptodate(page);
1079 memcpy(page_address(page), data, op);
1081 flush_dcache_page(page);
1082 SetPageUptodate(page);
1085 ntfs_unmap_page(page);
1088 data = Add2Ptr(data, PAGE_SIZE);
1094 * ntfs_reparse_bytes
1096 * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
1097 * for unicode string of @uni_len length.
1099 static inline u32 ntfs_reparse_bytes(u32 uni_len)
1101 /* Header + unicode string + decorated unicode string. */
1102 return sizeof(short) * (2 * uni_len + 4) +
1103 offsetof(struct REPARSE_DATA_BUFFER,
1104 SymbolicLinkReparseBuffer.PathBuffer);
1107 static struct REPARSE_DATA_BUFFER *
1108 ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
1109 u32 size, u16 *nsize)
1112 struct REPARSE_DATA_BUFFER *rp;
1114 typeof(rp->SymbolicLinkReparseBuffer) *rs;
1116 rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS);
1118 return ERR_PTR(-ENOMEM);
1120 rs = &rp->SymbolicLinkReparseBuffer;
1121 rp_name = rs->PathBuffer;
1123 /* Convert link name to UTF-16. */
1124 err = ntfs_nls_to_utf16(sbi, symname, size,
1125 (struct cpu_str *)(rp_name - 1), 2 * size,
1126 UTF16_LITTLE_ENDIAN);
1130 /* err = the length of unicode name of symlink. */
1131 *nsize = ntfs_reparse_bytes(err);
1133 if (*nsize > sbi->reparse.max_size) {
1138 /* Translate Linux '/' into Windows '\'. */
1139 for (i = 0; i < err; i++) {
1140 if (rp_name[i] == cpu_to_le16('/'))
1141 rp_name[i] = cpu_to_le16('\\');
1144 rp->ReparseTag = IO_REPARSE_TAG_SYMLINK;
1145 rp->ReparseDataLength =
1146 cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
1147 SymbolicLinkReparseBuffer));
1149 /* PrintName + SubstituteName. */
1150 rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
1151 rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
1152 rs->PrintNameLength = rs->SubstituteNameOffset;
1155 * TODO: Use relative path if possible to allow Windows to
1157 * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE).
1161 memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
1163 /* Decorate SubstituteName. */
1165 rp_name[0] = cpu_to_le16('\\');
1166 rp_name[1] = cpu_to_le16('?');
1167 rp_name[2] = cpu_to_le16('?');
1168 rp_name[3] = cpu_to_le16('\\');
1173 return ERR_PTR(err);
1176 struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
1177 struct inode *dir, struct dentry *dentry,
1178 const struct cpu_str *uni, umode_t mode,
1179 dev_t dev, const char *symname, u32 size,
1180 struct ntfs_fnd *fnd)
1183 struct super_block *sb = dir->i_sb;
1184 struct ntfs_sb_info *sbi = sb->s_fs_info;
1185 const struct qstr *name = &dentry->d_name;
1187 struct ntfs_inode *dir_ni = ntfs_i(dir);
1188 struct ntfs_inode *ni = NULL;
1189 struct inode *inode = NULL;
1190 struct ATTRIB *attr;
1191 struct ATTR_STD_INFO5 *std5;
1192 struct ATTR_FILE_NAME *fname;
1193 struct MFT_REC *rec;
1194 u32 asize, dsize, sd_size;
1195 enum FILE_ATTRIBUTE fa;
1196 __le32 security_id = SECURITY_ID_INVALID;
1199 u16 t16, nsize = 0, aid = 0;
1200 struct INDEX_ROOT *root, *dir_root;
1201 struct NTFS_DE *e, *new_de = NULL;
1202 struct REPARSE_DATA_BUFFER *rp = NULL;
1203 bool rp_inserted = false;
1205 ni_lock_dir(dir_ni);
1207 dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
1213 if (S_ISDIR(mode)) {
1214 /* Use parent's directory attributes. */
1215 fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY |
1216 FILE_ATTRIBUTE_ARCHIVE;
1218 * By default child directory inherits parent attributes.
1219 * Root directory is hidden + system.
1220 * Make an exception for children in root.
1222 if (dir->i_ino == MFT_REC_ROOT)
1223 fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM);
1224 } else if (S_ISLNK(mode)) {
1225 /* It is good idea that link should be the same type (file/dir) as target */
1226 fa = FILE_ATTRIBUTE_REPARSE_POINT;
1229 * Linux: there are dir/file/symlink and so on.
1230 * NTFS: symlinks are "dir + reparse" or "file + reparse"
1231 * It is good idea to create:
1232 * dir + reparse if 'symname' points to directory
1234 * file + reparse if 'symname' points to file
1235 * Unfortunately kern_path hangs if symname contains 'dir'.
1241 * if (!kern_path(symname, LOOKUP_FOLLOW, &path)){
1242 * struct inode *target = d_inode(path.dentry);
1244 * if (S_ISDIR(target->i_mode))
1245 * fa |= FILE_ATTRIBUTE_DIRECTORY;
1246 * // if ( target->i_sb == sb ){
1247 * // use relative path?
1252 } else if (S_ISREG(mode)) {
1253 if (sbi->options->sparse) {
1254 /* Sparsed regular file, cause option 'sparse'. */
1255 fa = FILE_ATTRIBUTE_SPARSE_FILE |
1256 FILE_ATTRIBUTE_ARCHIVE;
1257 } else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
1258 /* Compressed regular file, if parent is compressed. */
1259 fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
1261 /* Regular file, default attributes. */
1262 fa = FILE_ATTRIBUTE_ARCHIVE;
1265 fa = FILE_ATTRIBUTE_ARCHIVE;
1269 fa |= FILE_ATTRIBUTE_READONLY;
1271 /* Allocate PATH_MAX bytes. */
1272 new_de = __getname();
1278 /* Mark rw ntfs as dirty. it will be cleared at umount. */
1279 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1281 /* Step 1: allocate and fill new mft record. */
1282 err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
1286 ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY);
1292 inode = &ni->vfs_inode;
1293 inode_init_owner(mnt_userns, inode, dir, mode);
1294 mode = inode->i_mode;
1296 inode->i_atime = inode->i_mtime = inode->i_ctime = ni->i_crtime =
1297 current_time(inode);
1300 rec->hard_links = cpu_to_le16(1);
1301 attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
1303 /* Get default security id. */
1304 sd = s_default_security;
1305 sd_size = sizeof(s_default_security);
1307 if (is_ntfs3(sbi)) {
1308 security_id = dir_ni->std_security_id;
1309 if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) {
1310 security_id = sbi->security.def_security_id;
1312 if (security_id == SECURITY_ID_INVALID &&
1313 !ntfs_insert_security(sbi, sd, sd_size,
1314 &security_id, NULL))
1315 sbi->security.def_security_id = security_id;
1319 /* Insert standard info. */
1320 std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
1322 if (security_id == SECURITY_ID_INVALID) {
1323 dsize = sizeof(struct ATTR_STD_INFO);
1325 dsize = sizeof(struct ATTR_STD_INFO5);
1326 std5->security_id = security_id;
1327 ni->std_security_id = security_id;
1329 asize = SIZEOF_RESIDENT + dsize;
1331 attr->type = ATTR_STD;
1332 attr->size = cpu_to_le32(asize);
1333 attr->id = cpu_to_le16(aid++);
1334 attr->res.data_off = SIZEOF_RESIDENT_LE;
1335 attr->res.data_size = cpu_to_le32(dsize);
1337 std5->cr_time = std5->m_time = std5->c_time = std5->a_time =
1338 kernel2nt(&inode->i_atime);
1343 attr = Add2Ptr(attr, asize);
1345 /* Insert file name. */
1346 err = fill_name_de(sbi, new_de, name, uni);
1350 mi_get_ref(&ni->mi, &new_de->ref);
1352 fname = (struct ATTR_FILE_NAME *)(new_de + 1);
1353 mi_get_ref(&dir_ni->mi, &fname->home);
1354 fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
1355 fname->dup.a_time = std5->cr_time;
1356 fname->dup.alloc_size = fname->dup.data_size = 0;
1357 fname->dup.fa = std5->fa;
1358 fname->dup.ea_size = fname->dup.reparse = 0;
1360 dsize = le16_to_cpu(new_de->key_size);
1361 asize = ALIGN(SIZEOF_RESIDENT + dsize, 8);
1363 attr->type = ATTR_NAME;
1364 attr->size = cpu_to_le32(asize);
1365 attr->res.data_off = SIZEOF_RESIDENT_LE;
1366 attr->res.flags = RESIDENT_FLAG_INDEXED;
1367 attr->id = cpu_to_le16(aid++);
1368 attr->res.data_size = cpu_to_le32(dsize);
1369 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize);
1371 attr = Add2Ptr(attr, asize);
1373 if (security_id == SECURITY_ID_INVALID) {
1374 /* Insert security attribute. */
1375 asize = SIZEOF_RESIDENT + ALIGN(sd_size, 8);
1377 attr->type = ATTR_SECURE;
1378 attr->size = cpu_to_le32(asize);
1379 attr->id = cpu_to_le16(aid++);
1380 attr->res.data_off = SIZEOF_RESIDENT_LE;
1381 attr->res.data_size = cpu_to_le32(sd_size);
1382 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size);
1384 attr = Add2Ptr(attr, asize);
1387 attr->id = cpu_to_le16(aid++);
1388 if (fa & FILE_ATTRIBUTE_DIRECTORY) {
1390 * Regular directory or symlink to directory.
1391 * Create root attribute.
1393 dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
1394 asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
1396 attr->type = ATTR_ROOT;
1397 attr->size = cpu_to_le32(asize);
1399 attr->name_len = ARRAY_SIZE(I30_NAME);
1400 attr->name_off = SIZEOF_RESIDENT_LE;
1401 attr->res.data_off =
1402 cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT);
1403 attr->res.data_size = cpu_to_le32(dsize);
1404 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME,
1407 root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
1408 memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
1410 cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10
1411 root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
1412 sizeof(struct NTFS_DE));
1413 root->ihdr.total = root->ihdr.used;
1415 e = Add2Ptr(root, sizeof(struct INDEX_ROOT));
1416 e->size = cpu_to_le16(sizeof(struct NTFS_DE));
1417 e->flags = NTFS_IE_LAST;
1418 } else if (S_ISLNK(mode)) {
1421 * Create empty resident data attribute.
1423 asize = SIZEOF_RESIDENT;
1425 /* Insert empty ATTR_DATA */
1426 attr->type = ATTR_DATA;
1427 attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1428 attr->name_off = SIZEOF_RESIDENT_LE;
1429 attr->res.data_off = SIZEOF_RESIDENT_LE;
1430 } else if (S_ISREG(mode)) {
1432 * Regular file. Create empty non resident data attribute.
1434 attr->type = ATTR_DATA;
1436 attr->nres.evcn = cpu_to_le64(-1ll);
1437 if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
1438 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1439 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1440 attr->flags = ATTR_FLAG_SPARSED;
1441 asize = SIZEOF_NONRESIDENT_EX + 8;
1442 } else if (fa & FILE_ATTRIBUTE_COMPRESSED) {
1443 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1444 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1445 attr->flags = ATTR_FLAG_COMPRESSED;
1446 attr->nres.c_unit = COMPRESSION_UNIT;
1447 asize = SIZEOF_NONRESIDENT_EX + 8;
1449 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
1450 attr->name_off = SIZEOF_NONRESIDENT_LE;
1451 asize = SIZEOF_NONRESIDENT + 8;
1453 attr->nres.run_off = attr->name_off;
1456 * Node. Create empty resident data attribute.
1458 attr->type = ATTR_DATA;
1459 attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1460 attr->name_off = SIZEOF_RESIDENT_LE;
1461 if (fa & FILE_ATTRIBUTE_SPARSE_FILE)
1462 attr->flags = ATTR_FLAG_SPARSED;
1463 else if (fa & FILE_ATTRIBUTE_COMPRESSED)
1464 attr->flags = ATTR_FLAG_COMPRESSED;
1465 attr->res.data_off = SIZEOF_RESIDENT_LE;
1466 asize = SIZEOF_RESIDENT;
1467 ni->ni_flags |= NI_FLAG_RESIDENT;
1470 if (S_ISDIR(mode)) {
1471 ni->ni_flags |= NI_FLAG_DIR;
1472 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
1475 } else if (S_ISLNK(mode)) {
1476 rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize);
1485 * Insert ATTR_REPARSE.
1487 attr = Add2Ptr(attr, asize);
1488 attr->type = ATTR_REPARSE;
1489 attr->id = cpu_to_le16(aid++);
1491 /* Resident or non resident? */
1492 asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
1493 t16 = PtrOffset(rec, attr);
1496 * Below function 'ntfs_save_wsl_perm' requires 0x78 bytes.
1497 * It is good idea to keep extened attributes resident.
1499 if (asize + t16 + 0x78 + 8 > sbi->record_size) {
1501 CLST clst = bytes_to_cluster(sbi, nsize);
1503 /* Bytes per runs. */
1504 t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
1507 attr->nres.evcn = cpu_to_le64(clst - 1);
1508 attr->name_off = SIZEOF_NONRESIDENT_LE;
1509 attr->nres.run_off = attr->name_off;
1510 attr->nres.data_size = cpu_to_le64(nsize);
1511 attr->nres.valid_size = attr->nres.data_size;
1512 attr->nres.alloc_size =
1513 cpu_to_le64(ntfs_up_cluster(sbi, nsize));
1515 err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
1516 clst, NULL, 0, &alen, 0,
1521 err = run_pack(&ni->file.run, 0, clst,
1522 Add2Ptr(attr, SIZEOF_NONRESIDENT), t16,
1532 asize = SIZEOF_NONRESIDENT + ALIGN(err, 8);
1534 attr->res.data_off = SIZEOF_RESIDENT_LE;
1535 attr->res.data_size = cpu_to_le32(nsize);
1536 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
1539 /* Size of symlink equals the length of input string. */
1540 inode->i_size = size;
1542 attr->size = cpu_to_le32(asize);
1544 err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK,
1552 attr = Add2Ptr(attr, asize);
1553 attr->type = ATTR_END;
1555 rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
1556 rec->next_attr_id = cpu_to_le16(aid);
1558 /* Step 2: Add new name in index. */
1559 err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd, 0);
1563 /* Unlock parent directory before ntfs_init_acl. */
1566 inode->i_generation = le16_to_cpu(rec->seq);
1568 dir->i_mtime = dir->i_ctime = inode->i_atime;
1570 if (S_ISDIR(mode)) {
1571 inode->i_op = &ntfs_dir_inode_operations;
1572 inode->i_fop = &ntfs_dir_operations;
1573 } else if (S_ISLNK(mode)) {
1574 inode->i_op = &ntfs_link_inode_operations;
1575 inode->i_fop = NULL;
1576 inode->i_mapping->a_ops = &ntfs_aops;
1577 inode->i_size = size;
1578 inode_nohighmem(inode);
1579 } else if (S_ISREG(mode)) {
1580 inode->i_op = &ntfs_file_inode_operations;
1581 inode->i_fop = &ntfs_file_operations;
1582 inode->i_mapping->a_ops =
1583 is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
1584 init_rwsem(&ni->file.run_lock);
1586 inode->i_op = &ntfs_special_inode_operations;
1587 init_special_inode(inode, mode, dev);
1590 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
1591 if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) {
1592 err = ntfs_init_acl(mnt_userns, inode, dir);
1598 inode->i_flags |= S_NOSEC;
1601 /* Write non resident data. */
1603 err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize, 0);
1609 * Call 'd_instantiate' after inode->i_op is set
1610 * but before finish_open.
1612 d_instantiate(dentry, inode);
1614 ntfs_save_wsl_perm(inode);
1615 mark_inode_dirty(dir);
1616 mark_inode_dirty(inode);
1623 /* Undo 'indx_insert_entry'. */
1624 ni_lock_dir(dir_ni);
1625 indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
1626 le16_to_cpu(new_de->key_size), sbi);
1627 /* ni_unlock(dir_ni); will be called later. */
1630 ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
1633 if (S_ISDIR(mode) || run_is_empty(&ni->file.run))
1636 run_deallocate(sbi, &ni->file.run, false);
1639 clear_rec_inuse(rec);
1641 ni->mi.dirty = false;
1642 discard_new_inode(inode);
1644 ntfs_mark_rec_free(sbi, ino);
1653 return ERR_PTR(err);
1656 unlock_new_inode(inode);
1661 int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
1664 struct ntfs_inode *ni = ntfs_i(inode);
1665 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
1667 struct ATTR_FILE_NAME *de_name;
1669 /* Allocate PATH_MAX bytes. */
1674 /* Mark rw ntfs as dirty. It will be cleared at umount. */
1675 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1677 /* Construct 'de'. */
1678 err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1682 de_name = (struct ATTR_FILE_NAME *)(de + 1);
1683 /* Fill duplicate info. */
1684 de_name->dup.cr_time = de_name->dup.m_time = de_name->dup.c_time =
1685 de_name->dup.a_time = kernel2nt(&inode->i_ctime);
1686 de_name->dup.alloc_size = de_name->dup.data_size =
1687 cpu_to_le64(inode->i_size);
1688 de_name->dup.fa = ni->std_fa;
1689 de_name->dup.ea_size = de_name->dup.reparse = 0;
1691 err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de);
1700 * inode_operations::unlink
1701 * inode_operations::rmdir
1703 int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
1706 struct ntfs_sb_info *sbi = dir->i_sb->s_fs_info;
1707 struct inode *inode = d_inode(dentry);
1708 struct ntfs_inode *ni = ntfs_i(inode);
1709 struct ntfs_inode *dir_ni = ntfs_i(dir);
1710 struct NTFS_DE *de, *de2 = NULL;
1713 if (ntfs_is_meta_file(sbi, ni->mi.rno))
1716 /* Allocate PATH_MAX bytes. */
1723 if (S_ISDIR(inode->i_mode) && !dir_is_empty(inode)) {
1728 err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1733 err = ni_remove_name(dir_ni, ni, de, &de2, &undo_remove);
1737 dir->i_mtime = dir->i_ctime = current_time(dir);
1738 mark_inode_dirty(dir);
1739 inode->i_ctime = dir->i_ctime;
1741 mark_inode_dirty(inode);
1742 } else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
1743 make_bad_inode(inode);
1744 ntfs_inode_err(inode, "failed to undo unlink");
1745 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
1747 if (ni_is_dirty(dir))
1748 mark_inode_dirty(dir);
1749 if (ni_is_dirty(inode))
1750 mark_inode_dirty(inode);
1759 void ntfs_evict_inode(struct inode *inode)
1761 truncate_inode_pages_final(&inode->i_data);
1764 _ni_write_inode(inode, inode_needs_sync(inode));
1766 invalidate_inode_buffers(inode);
1769 ni_clear(ntfs_i(inode));
1772 static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
1775 int i, err = -EINVAL;
1776 struct ntfs_inode *ni = ntfs_i(inode);
1777 struct super_block *sb = inode->i_sb;
1778 struct ntfs_sb_info *sbi = sb->s_fs_info;
1781 void *to_free = NULL;
1782 struct REPARSE_DATA_BUFFER *rp;
1783 const __le16 *uname;
1784 struct ATTRIB *attr;
1786 /* Reparse data present. Try to parse it. */
1787 static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
1788 static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
1792 attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
1796 if (!attr->non_res) {
1797 rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
1800 size = le32_to_cpu(attr->res.data_size);
1802 size = le64_to_cpu(attr->nres.data_size);
1806 if (size > sbi->reparse.max_size || size <= sizeof(u32))
1810 rp = kmalloc(size, GFP_NOFS);
1816 /* Read into temporal buffer. */
1817 err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, size, NULL);
1822 /* Microsoft Tag. */
1823 switch (rp->ReparseTag) {
1824 case IO_REPARSE_TAG_MOUNT_POINT:
1825 /* Mount points and junctions. */
1826 /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
1827 if (size <= offsetof(struct REPARSE_DATA_BUFFER,
1828 MountPointReparseBuffer.PathBuffer))
1831 offsetof(struct REPARSE_DATA_BUFFER,
1832 MountPointReparseBuffer.PathBuffer) +
1833 le16_to_cpu(rp->MountPointReparseBuffer
1835 ulen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
1838 case IO_REPARSE_TAG_SYMLINK:
1839 /* FolderSymbolicLink */
1840 /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
1841 if (size <= offsetof(struct REPARSE_DATA_BUFFER,
1842 SymbolicLinkReparseBuffer.PathBuffer))
1845 rp, offsetof(struct REPARSE_DATA_BUFFER,
1846 SymbolicLinkReparseBuffer.PathBuffer) +
1847 le16_to_cpu(rp->SymbolicLinkReparseBuffer
1850 rp->SymbolicLinkReparseBuffer.PrintNameLength);
1853 case IO_REPARSE_TAG_CLOUD:
1854 case IO_REPARSE_TAG_CLOUD_1:
1855 case IO_REPARSE_TAG_CLOUD_2:
1856 case IO_REPARSE_TAG_CLOUD_3:
1857 case IO_REPARSE_TAG_CLOUD_4:
1858 case IO_REPARSE_TAG_CLOUD_5:
1859 case IO_REPARSE_TAG_CLOUD_6:
1860 case IO_REPARSE_TAG_CLOUD_7:
1861 case IO_REPARSE_TAG_CLOUD_8:
1862 case IO_REPARSE_TAG_CLOUD_9:
1863 case IO_REPARSE_TAG_CLOUD_A:
1864 case IO_REPARSE_TAG_CLOUD_B:
1865 case IO_REPARSE_TAG_CLOUD_C:
1866 case IO_REPARSE_TAG_CLOUD_D:
1867 case IO_REPARSE_TAG_CLOUD_E:
1868 case IO_REPARSE_TAG_CLOUD_F:
1869 err = sizeof("OneDrive") - 1;
1872 memcpy(buffer, "OneDrive", err);
1876 if (IsReparseTagMicrosoft(rp->ReparseTag)) {
1877 /* Unknown Microsoft Tag. */
1880 if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
1881 size <= sizeof(struct REPARSE_POINT)) {
1886 uname = Add2Ptr(rp, sizeof(struct REPARSE_POINT));
1887 ulen = le16_to_cpu(rp->ReparseDataLength) -
1888 sizeof(struct REPARSE_POINT);
1891 /* Convert nlen from bytes to UNICODE chars. */
1894 /* Check that name is available. */
1895 if (!ulen || uname + ulen > (__le16 *)Add2Ptr(rp, size))
1898 /* If name is already zero terminated then truncate it now. */
1899 if (!uname[ulen - 1])
1902 err = ntfs_utf16_to_nls(sbi, uname, ulen, buffer, buflen);
1907 /* Translate Windows '\' into Linux '/'. */
1908 for (i = 0; i < err; i++) {
1909 if (buffer[i] == '\\')
1913 /* Always set last zero. */
1920 static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
1921 struct delayed_call *done)
1927 return ERR_PTR(-ECHILD);
1929 ret = kmalloc(PAGE_SIZE, GFP_NOFS);
1931 return ERR_PTR(-ENOMEM);
1933 err = ntfs_readlink_hlp(inode, ret, PAGE_SIZE);
1936 return ERR_PTR(err);
1939 set_delayed_call(done, kfree_link, ret);
1945 const struct inode_operations ntfs_link_inode_operations = {
1946 .get_link = ntfs_get_link,
1947 .setattr = ntfs3_setattr,
1948 .listxattr = ntfs_listxattr,
1949 .permission = ntfs_permission,
1952 const struct address_space_operations ntfs_aops = {
1953 .readpage = ntfs_readpage,
1954 .readahead = ntfs_readahead,
1955 .writepage = ntfs_writepage,
1956 .writepages = ntfs_writepages,
1957 .write_begin = ntfs_write_begin,
1958 .write_end = ntfs_write_end,
1959 .direct_IO = ntfs_direct_IO,
1961 .set_page_dirty = __set_page_dirty_buffers,
1964 const struct address_space_operations ntfs_aops_cmpr = {
1965 .readpage = ntfs_readpage,
1966 .readahead = ntfs_readahead,