1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corporation, 2021
5 * Author: Mike Rapoport <rppt@linux.ibm.com>
10 #include <linux/swap.h>
11 #include <linux/mount.h>
12 #include <linux/memfd.h>
13 #include <linux/bitops.h>
14 #include <linux/printk.h>
15 #include <linux/pagemap.h>
16 #include <linux/syscalls.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/secretmem.h>
19 #include <linux/set_memory.h>
20 #include <linux/sched/signal.h>
22 #include <uapi/linux/magic.h>
24 #include <asm/tlbflush.h>
29 #define pr_fmt(fmt) "secretmem: " fmt
32 * Define mode and flag masks to allow validation of the system call
35 #define SECRETMEM_MODE_MASK (0x0)
36 #define SECRETMEM_FLAGS_MASK SECRETMEM_MODE_MASK
38 static bool secretmem_enable __ro_after_init;
39 module_param_named(enable, secretmem_enable, bool, 0400);
40 MODULE_PARM_DESC(secretmem_enable,
41 "Enable secretmem and memfd_secret(2) system call");
43 static atomic_t secretmem_users;
45 bool secretmem_active(void)
47 return !!atomic_read(&secretmem_users);
50 static vm_fault_t secretmem_fault(struct vm_fault *vmf)
52 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
53 struct inode *inode = file_inode(vmf->vma->vm_file);
54 pgoff_t offset = vmf->pgoff;
55 gfp_t gfp = vmf->gfp_mask;
61 if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
62 return vmf_error(-EINVAL);
64 filemap_invalidate_lock_shared(mapping);
67 page = find_lock_page(mapping, offset);
69 page = alloc_page(gfp | __GFP_ZERO);
75 err = set_direct_map_invalid_noflush(page);
82 __SetPageUptodate(page);
83 err = add_to_page_cache_lru(page, mapping, offset, gfp);
87 * If a split of large page was required, it
88 * already happened when we marked the page invalid
89 * which guarantees that this call won't fail
91 set_direct_map_default_noflush(page);
99 addr = (unsigned long)page_address(page);
100 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
104 ret = VM_FAULT_LOCKED;
107 filemap_invalidate_unlock_shared(mapping);
111 static const struct vm_operations_struct secretmem_vm_ops = {
112 .fault = secretmem_fault,
115 static int secretmem_release(struct inode *inode, struct file *file)
117 atomic_dec(&secretmem_users);
121 static int secretmem_mmap(struct file *file, struct vm_area_struct *vma)
123 unsigned long len = vma->vm_end - vma->vm_start;
125 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
128 if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
131 vma->vm_flags |= VM_LOCKED | VM_DONTDUMP;
132 vma->vm_ops = &secretmem_vm_ops;
137 bool vma_is_secretmem(struct vm_area_struct *vma)
139 return vma->vm_ops == &secretmem_vm_ops;
142 static const struct file_operations secretmem_fops = {
143 .release = secretmem_release,
144 .mmap = secretmem_mmap,
147 static bool secretmem_isolate_page(struct page *page, isolate_mode_t mode)
152 static int secretmem_migratepage(struct address_space *mapping,
153 struct page *newpage, struct page *page,
154 enum migrate_mode mode)
159 static void secretmem_free_folio(struct folio *folio)
161 set_direct_map_default_noflush(&folio->page);
162 folio_zero_segment(folio, 0, folio_size(folio));
165 const struct address_space_operations secretmem_aops = {
166 .dirty_folio = noop_dirty_folio,
167 .free_folio = secretmem_free_folio,
168 .migratepage = secretmem_migratepage,
169 .isolate_page = secretmem_isolate_page,
172 static int secretmem_setattr(struct user_namespace *mnt_userns,
173 struct dentry *dentry, struct iattr *iattr)
175 struct inode *inode = d_inode(dentry);
176 struct address_space *mapping = inode->i_mapping;
177 unsigned int ia_valid = iattr->ia_valid;
180 filemap_invalidate_lock(mapping);
182 if ((ia_valid & ATTR_SIZE) && inode->i_size)
185 ret = simple_setattr(mnt_userns, dentry, iattr);
187 filemap_invalidate_unlock(mapping);
192 static const struct inode_operations secretmem_iops = {
193 .setattr = secretmem_setattr,
196 static struct vfsmount *secretmem_mnt;
198 static struct file *secretmem_file_create(unsigned long flags)
200 struct file *file = ERR_PTR(-ENOMEM);
203 inode = alloc_anon_inode(secretmem_mnt->mnt_sb);
205 return ERR_CAST(inode);
207 file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem",
208 O_RDWR, &secretmem_fops);
212 mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
213 mapping_set_unevictable(inode->i_mapping);
215 inode->i_op = &secretmem_iops;
216 inode->i_mapping->a_ops = &secretmem_aops;
218 /* pretend we are a normal file with zero size */
219 inode->i_mode |= S_IFREG;
229 SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
234 /* make sure local flags do not confict with global fcntl.h */
235 BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
237 if (!secretmem_enable)
240 if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC))
242 if (atomic_read(&secretmem_users) < 0)
245 fd = get_unused_fd_flags(flags & O_CLOEXEC);
249 file = secretmem_file_create(flags);
255 file->f_flags |= O_LARGEFILE;
257 atomic_inc(&secretmem_users);
258 fd_install(fd, file);
266 static int secretmem_init_fs_context(struct fs_context *fc)
268 return init_pseudo(fc, SECRETMEM_MAGIC) ? 0 : -ENOMEM;
271 static struct file_system_type secretmem_fs = {
273 .init_fs_context = secretmem_init_fs_context,
274 .kill_sb = kill_anon_super,
277 static int secretmem_init(void)
281 if (!secretmem_enable)
284 secretmem_mnt = kern_mount(&secretmem_fs);
285 if (IS_ERR(secretmem_mnt))
286 ret = PTR_ERR(secretmem_mnt);
288 /* prevent secretmem mappings from ever getting PROT_EXEC */
289 secretmem_mnt->mnt_flags |= MNT_NOEXEC;
293 fs_initcall(secretmem_init);