1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-20 Intel Corporation. */
4 #include <linux/lockdep.h>
6 #include <linux/mman.h>
7 #include <linux/shmem_fs.h>
8 #include <linux/suspend.h>
9 #include <linux/sched/mm.h>
15 static int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
16 struct sgx_backing *backing);
18 #define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd))
20 * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to
21 * determine the page index associated with the first PCMD entry
24 #define PCMD_FIRST_MASK GENMASK(4, 0)
27 * reclaimer_writing_to_pcmd() - Query if any enclave page associated with
28 * a PCMD page is in process of being reclaimed.
29 * @encl: Enclave to which PCMD page belongs
30 * @start_addr: Address of enclave page using first entry within the PCMD page
32 * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is
33 * stored. The PCMD data of a reclaimed enclave page contains enough
34 * information for the processor to verify the page at the time
35 * it is loaded back into the Enclave Page Cache (EPC).
37 * The backing storage to which enclave pages are reclaimed is laid out as
39 * Encrypted enclave pages:SECS page:PCMD pages
41 * Each PCMD page contains the PCMD metadata of
42 * PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages.
44 * A PCMD page can only be truncated if it is (a) empty, and (b) not in the
45 * process of getting data (and thus soon being non-empty). (b) is tested with
46 * a check if an enclave page sharing the PCMD page is in the process of being
49 * The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it
50 * intends to reclaim that enclave page - it means that the PCMD page
51 * associated with that enclave page is about to get some data and thus
52 * even if the PCMD page is empty, it should not be truncated.
54 * Context: Enclave mutex (&sgx_encl->lock) must be held.
55 * Return: 1 if the reclaimer is about to write to the PCMD page
56 * 0 if the reclaimer has no intention to write to the PCMD page
58 static int reclaimer_writing_to_pcmd(struct sgx_encl *encl,
59 unsigned long start_addr)
65 * PCMD_FIRST_MASK is based on number of PCMD entries within
68 BUILD_BUG_ON(PCMDS_PER_PAGE != 32);
70 for (i = 0; i < PCMDS_PER_PAGE; i++) {
71 struct sgx_encl_page *entry;
74 addr = start_addr + i * PAGE_SIZE;
77 * Stop when reaching the SECS page - it does not
78 * have a page_array entry and its reclaim is
79 * started and completed with enclave mutex held so
80 * it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED
83 if (addr == encl->base + encl->size)
86 entry = xa_load(&encl->page_array, PFN_DOWN(addr));
91 * VA page slot ID uses same bit as the flag so it is important
92 * to ensure that the page is not already in backing store.
94 if (entry->epc_page &&
95 (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) {
105 * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
106 * follow right after the EPC data in the backing storage. In addition to the
107 * visible enclave pages, there's one extra page slot for SECS, before PCMD
110 static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl,
111 unsigned long page_index)
113 pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs);
115 return epc_end_off + page_index * sizeof(struct sgx_pcmd);
119 * Free a page from the backing storage in the given page index.
121 static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index)
123 struct inode *inode = file_inode(encl->backing);
125 shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1);
129 * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC
132 static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
133 struct sgx_epc_page *epc_page,
134 struct sgx_epc_page *secs_page)
136 unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
137 struct sgx_encl *encl = encl_page->encl;
138 pgoff_t page_index, page_pcmd_off;
139 unsigned long pcmd_first_page;
140 struct sgx_pageinfo pginfo;
141 struct sgx_backing b;
142 bool pcmd_page_empty;
147 page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
149 page_index = PFN_DOWN(encl->size);
152 * Address of enclave page using the first entry within the PCMD page.
154 pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base;
156 page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
158 ret = sgx_encl_lookup_backing(encl, page_index, &b);
162 pginfo.addr = encl_page->desc & PAGE_MASK;
163 pginfo.contents = (unsigned long)kmap_local_page(b.contents);
164 pcmd_page = kmap_local_page(b.pcmd);
165 pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset;
168 pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page);
172 ret = __eldu(&pginfo, sgx_get_epc_virt_addr(epc_page),
173 sgx_get_epc_virt_addr(encl_page->va_page->epc_page) + va_offset);
175 if (encls_failed(ret))
176 ENCLS_WARN(ret, "ELDU");
181 memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
182 set_page_dirty(b.pcmd);
185 * The area for the PCMD in the page was zeroed above. Check if the
186 * whole page is now empty meaning that all PCMD's have been zeroed:
188 pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE);
190 kunmap_local(pcmd_page);
191 kunmap_local((void *)(unsigned long)pginfo.contents);
194 sgx_encl_put_backing(&b);
196 sgx_encl_truncate_backing_page(encl, page_index);
198 if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) {
199 sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
200 pcmd_page = kmap_local_page(b.pcmd);
201 if (memchr_inv(pcmd_page, 0, PAGE_SIZE))
202 pr_warn("PCMD page not empty after truncate.\n");
203 kunmap_local(pcmd_page);
211 static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
212 struct sgx_epc_page *secs_page)
215 unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
216 struct sgx_encl *encl = encl_page->encl;
217 struct sgx_epc_page *epc_page;
220 epc_page = sgx_alloc_epc_page(encl_page, false);
221 if (IS_ERR(epc_page))
224 ret = __sgx_encl_eldu(encl_page, epc_page, secs_page);
226 sgx_encl_free_epc_page(epc_page);
230 sgx_free_va_slot(encl_page->va_page, va_offset);
231 list_move(&encl_page->va_page->list, &encl->va_pages);
232 encl_page->desc &= ~SGX_ENCL_PAGE_VA_OFFSET_MASK;
233 encl_page->epc_page = epc_page;
238 static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
239 struct sgx_encl_page *entry)
241 struct sgx_epc_page *epc_page;
243 /* Entry successfully located. */
244 if (entry->epc_page) {
245 if (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)
246 return ERR_PTR(-EBUSY);
251 if (!(encl->secs.epc_page)) {
252 epc_page = sgx_encl_eldu(&encl->secs, NULL);
253 if (IS_ERR(epc_page))
254 return ERR_CAST(epc_page);
257 epc_page = sgx_encl_eldu(entry, encl->secs.epc_page);
258 if (IS_ERR(epc_page))
259 return ERR_CAST(epc_page);
261 encl->secs_child_cnt++;
262 sgx_mark_page_reclaimable(entry->epc_page);
267 static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl,
269 unsigned long vm_flags)
271 unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS;
272 struct sgx_encl_page *entry;
274 entry = xa_load(&encl->page_array, PFN_DOWN(addr));
276 return ERR_PTR(-EFAULT);
279 * Verify that the page has equal or higher build time
280 * permissions than the VMA permissions (i.e. the subset of {VM_READ,
281 * VM_WRITE, VM_EXECUTE} in vma->vm_flags).
283 if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits)
284 return ERR_PTR(-EFAULT);
286 return __sgx_encl_load_page(encl, entry);
289 struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
292 struct sgx_encl_page *entry;
294 entry = xa_load(&encl->page_array, PFN_DOWN(addr));
296 return ERR_PTR(-EFAULT);
298 return __sgx_encl_load_page(encl, entry);
302 * sgx_encl_eaug_page() - Dynamically add page to initialized enclave
303 * @vma: VMA obtained from fault info from where page is accessed
304 * @encl: enclave accessing the page
305 * @addr: address that triggered the page fault
307 * When an initialized enclave accesses a page with no backing EPC page
308 * on a SGX2 system then the EPC can be added dynamically via the SGX2
309 * ENCLS[EAUG] instruction.
311 * Returns: Appropriate vm_fault_t: VM_FAULT_NOPAGE when PTE was installed
312 * successfully, VM_FAULT_SIGBUS or VM_FAULT_OOM as error otherwise.
314 static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
315 struct sgx_encl *encl, unsigned long addr)
317 vm_fault_t vmret = VM_FAULT_SIGBUS;
318 struct sgx_pageinfo pginfo = {0};
319 struct sgx_encl_page *encl_page;
320 struct sgx_epc_page *epc_page;
321 struct sgx_va_page *va_page;
322 unsigned long phys_addr;
326 if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
327 return VM_FAULT_SIGBUS;
330 * Ignore internal permission checking for dynamically added pages.
331 * They matter only for data added during the pre-initialization
332 * phase. The enclave decides the permissions by the means of
333 * EACCEPT, EACCEPTCOPY and EMODPE.
335 secinfo_flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X;
336 encl_page = sgx_encl_page_alloc(encl, addr - encl->base, secinfo_flags);
337 if (IS_ERR(encl_page))
340 mutex_lock(&encl->lock);
342 epc_page = sgx_alloc_epc_page(encl_page, false);
343 if (IS_ERR(epc_page)) {
344 if (PTR_ERR(epc_page) == -EBUSY)
345 vmret = VM_FAULT_NOPAGE;
349 va_page = sgx_encl_grow(encl, false);
350 if (IS_ERR(va_page)) {
351 if (PTR_ERR(va_page) == -EBUSY)
352 vmret = VM_FAULT_NOPAGE;
357 list_add(&va_page->list, &encl->va_pages);
359 ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
360 encl_page, GFP_KERNEL);
362 * If ret == -EBUSY then page was created in another flow while
363 * running without encl->lock
368 pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page);
369 pginfo.addr = encl_page->desc & PAGE_MASK;
372 ret = __eaug(&pginfo, sgx_get_epc_virt_addr(epc_page));
376 encl_page->encl = encl;
377 encl_page->epc_page = epc_page;
378 encl_page->type = SGX_PAGE_TYPE_REG;
379 encl->secs_child_cnt++;
381 sgx_mark_page_reclaimable(encl_page->epc_page);
383 phys_addr = sgx_get_epc_phys_addr(epc_page);
385 * Do not undo everything when creating PTE entry fails - next #PF
386 * would find page ready for a PTE.
388 vmret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
389 if (vmret != VM_FAULT_NOPAGE) {
390 mutex_unlock(&encl->lock);
391 return VM_FAULT_SIGBUS;
393 mutex_unlock(&encl->lock);
394 return VM_FAULT_NOPAGE;
397 xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
400 sgx_encl_shrink(encl, va_page);
402 sgx_encl_free_epc_page(epc_page);
404 mutex_unlock(&encl->lock);
410 static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
412 unsigned long addr = (unsigned long)vmf->address;
413 struct vm_area_struct *vma = vmf->vma;
414 struct sgx_encl_page *entry;
415 unsigned long phys_addr;
416 struct sgx_encl *encl;
419 encl = vma->vm_private_data;
422 * It's very unlikely but possible that allocating memory for the
423 * mm_list entry of a forked process failed in sgx_vma_open(). When
424 * this happens, vm_private_data is set to NULL.
427 return VM_FAULT_SIGBUS;
430 * The page_array keeps track of all enclave pages, whether they
431 * are swapped out or not. If there is no entry for this page and
432 * the system supports SGX2 then it is possible to dynamically add
433 * a new enclave page. This is only possible for an initialized
434 * enclave that will be checked for right away.
436 if (cpu_feature_enabled(X86_FEATURE_SGX2) &&
437 (!xa_load(&encl->page_array, PFN_DOWN(addr))))
438 return sgx_encl_eaug_page(vma, encl, addr);
440 mutex_lock(&encl->lock);
442 entry = sgx_encl_load_page_in_vma(encl, addr, vma->vm_flags);
444 mutex_unlock(&encl->lock);
446 if (PTR_ERR(entry) == -EBUSY)
447 return VM_FAULT_NOPAGE;
449 return VM_FAULT_SIGBUS;
452 phys_addr = sgx_get_epc_phys_addr(entry->epc_page);
454 ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
455 if (ret != VM_FAULT_NOPAGE) {
456 mutex_unlock(&encl->lock);
458 return VM_FAULT_SIGBUS;
461 sgx_encl_test_and_clear_young(vma->vm_mm, entry);
462 mutex_unlock(&encl->lock);
464 return VM_FAULT_NOPAGE;
467 static void sgx_vma_open(struct vm_area_struct *vma)
469 struct sgx_encl *encl = vma->vm_private_data;
472 * It's possible but unlikely that vm_private_data is NULL. This can
473 * happen in a grandchild of a process, when sgx_encl_mm_add() had
474 * failed to allocate memory in this callback.
479 if (sgx_encl_mm_add(encl, vma->vm_mm))
480 vma->vm_private_data = NULL;
485 * sgx_encl_may_map() - Check if a requested VMA mapping is allowed
486 * @encl: an enclave pointer
487 * @start: lower bound of the address range, inclusive
488 * @end: upper bound of the address range, exclusive
489 * @vm_flags: VMA flags
491 * Iterate through the enclave pages contained within [@start, @end) to verify
492 * that the permissions requested by a subset of {VM_READ, VM_WRITE, VM_EXEC}
493 * do not contain any permissions that are not contained in the build time
494 * permissions of any of the enclave pages within the given address range.
496 * An enclave creator must declare the strongest permissions that will be
497 * needed for each enclave page. This ensures that mappings have the identical
498 * or weaker permissions than the earlier declared permissions.
500 * Return: 0 on success, -EACCES otherwise
502 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
503 unsigned long end, unsigned long vm_flags)
505 unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS;
506 struct sgx_encl_page *page;
507 unsigned long count = 0;
510 XA_STATE(xas, &encl->page_array, PFN_DOWN(start));
512 /* Disallow mapping outside enclave's address range. */
513 if (test_bit(SGX_ENCL_INITIALIZED, &encl->flags) &&
514 (start < encl->base || end > encl->base + encl->size))
518 * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might
519 * conflict with the enclave page permissions.
521 if (current->personality & READ_IMPLIES_EXEC)
524 mutex_lock(&encl->lock);
526 xas_for_each(&xas, page, PFN_DOWN(end - 1)) {
527 if (~page->vm_max_prot_bits & vm_prot_bits) {
532 /* Reschedule on every XA_CHECK_SCHED iteration. */
533 if (!(++count % XA_CHECK_SCHED)) {
536 mutex_unlock(&encl->lock);
540 mutex_lock(&encl->lock);
545 mutex_unlock(&encl->lock);
550 static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start,
551 unsigned long end, unsigned long newflags)
553 return sgx_encl_may_map(vma->vm_private_data, start, end, newflags);
556 static int sgx_encl_debug_read(struct sgx_encl *encl, struct sgx_encl_page *page,
557 unsigned long addr, void *data)
559 unsigned long offset = addr & ~PAGE_MASK;
563 ret = __edbgrd(sgx_get_epc_virt_addr(page->epc_page) + offset, data);
570 static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *page,
571 unsigned long addr, void *data)
573 unsigned long offset = addr & ~PAGE_MASK;
576 ret = __edbgwr(sgx_get_epc_virt_addr(page->epc_page) + offset, data);
584 * Load an enclave page to EPC if required, and take encl->lock.
586 static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl,
588 unsigned long vm_flags)
590 struct sgx_encl_page *entry;
593 mutex_lock(&encl->lock);
595 entry = sgx_encl_load_page_in_vma(encl, addr, vm_flags);
596 if (PTR_ERR(entry) != -EBUSY)
599 mutex_unlock(&encl->lock);
603 mutex_unlock(&encl->lock);
608 static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
609 void *buf, int len, int write)
611 struct sgx_encl *encl = vma->vm_private_data;
612 struct sgx_encl_page *entry = NULL;
613 char data[sizeof(unsigned long)];
621 * If process was forked, VMA is still there but vm_private_data is set
627 if (!test_bit(SGX_ENCL_DEBUG, &encl->flags))
630 for (i = 0; i < len; i += cnt) {
631 entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK,
634 ret = PTR_ERR(entry);
638 align = ALIGN_DOWN(addr + i, sizeof(unsigned long));
639 offset = (addr + i) & (sizeof(unsigned long) - 1);
640 cnt = sizeof(unsigned long) - offset;
641 cnt = min(cnt, len - i);
643 ret = sgx_encl_debug_read(encl, entry, align, data);
648 memcpy(data + offset, buf + i, cnt);
649 ret = sgx_encl_debug_write(encl, entry, align, data);
653 memcpy(buf + i, data + offset, cnt);
657 mutex_unlock(&encl->lock);
663 return ret < 0 ? ret : i;
666 const struct vm_operations_struct sgx_vm_ops = {
667 .fault = sgx_vma_fault,
668 .mprotect = sgx_vma_mprotect,
669 .open = sgx_vma_open,
670 .access = sgx_vma_access,
674 * sgx_encl_release - Destroy an enclave instance
675 * @ref: address of a kref inside &sgx_encl
677 * Used together with kref_put(). Frees all the resources associated with the
678 * enclave and the instance itself.
680 void sgx_encl_release(struct kref *ref)
682 struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
683 unsigned long max_page_index = PFN_DOWN(encl->base + encl->size - 1);
684 struct sgx_va_page *va_page;
685 struct sgx_encl_page *entry;
686 unsigned long count = 0;
688 XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base));
691 xas_for_each(&xas, entry, max_page_index) {
692 if (entry->epc_page) {
694 * The page and its radix tree entry cannot be freed
695 * if the page is being held by the reclaimer.
697 if (sgx_unmark_page_reclaimable(entry->epc_page))
700 sgx_encl_free_epc_page(entry->epc_page);
701 encl->secs_child_cnt--;
702 entry->epc_page = NULL;
707 * Invoke scheduler on every XA_CHECK_SCHED iteration
708 * to prevent soft lockups.
710 if (!(++count % XA_CHECK_SCHED)) {
721 xa_destroy(&encl->page_array);
723 if (!encl->secs_child_cnt && encl->secs.epc_page) {
724 sgx_encl_free_epc_page(encl->secs.epc_page);
725 encl->secs.epc_page = NULL;
728 while (!list_empty(&encl->va_pages)) {
729 va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
731 list_del(&va_page->list);
732 sgx_encl_free_epc_page(va_page->epc_page);
739 cleanup_srcu_struct(&encl->srcu);
741 WARN_ON_ONCE(!list_empty(&encl->mm_list));
743 /* Detect EPC page leak's. */
744 WARN_ON_ONCE(encl->secs_child_cnt);
745 WARN_ON_ONCE(encl->secs.epc_page);
751 * 'mm' is exiting and no longer needs mmu notifications.
753 static void sgx_mmu_notifier_release(struct mmu_notifier *mn,
754 struct mm_struct *mm)
756 struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier);
757 struct sgx_encl_mm *tmp = NULL;
761 * The enclave itself can remove encl_mm. Note, objects can't be moved
762 * off an RCU protected list, but deletion is ok.
764 spin_lock(&encl_mm->encl->mm_lock);
765 list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) {
766 if (tmp == encl_mm) {
767 list_del_rcu(&encl_mm->list);
772 spin_unlock(&encl_mm->encl->mm_lock);
775 synchronize_srcu(&encl_mm->encl->srcu);
776 mmu_notifier_put(mn);
780 static void sgx_mmu_notifier_free(struct mmu_notifier *mn)
782 struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier);
784 /* 'encl_mm' is going away, put encl_mm->encl reference: */
785 kref_put(&encl_mm->encl->refcount, sgx_encl_release);
790 static const struct mmu_notifier_ops sgx_mmu_notifier_ops = {
791 .release = sgx_mmu_notifier_release,
792 .free_notifier = sgx_mmu_notifier_free,
795 static struct sgx_encl_mm *sgx_encl_find_mm(struct sgx_encl *encl,
796 struct mm_struct *mm)
798 struct sgx_encl_mm *encl_mm = NULL;
799 struct sgx_encl_mm *tmp;
802 idx = srcu_read_lock(&encl->srcu);
804 list_for_each_entry_rcu(tmp, &encl->mm_list, list) {
811 srcu_read_unlock(&encl->srcu, idx);
816 int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
818 struct sgx_encl_mm *encl_mm;
822 * Even though a single enclave may be mapped into an mm more than once,
823 * each 'mm' only appears once on encl->mm_list. This is guaranteed by
824 * holding the mm's mmap lock for write before an mm can be added or
825 * remove to an encl->mm_list.
827 mmap_assert_write_locked(mm);
830 * It's possible that an entry already exists in the mm_list, because it
831 * is removed only on VFS release or process exit.
833 if (sgx_encl_find_mm(encl, mm))
836 encl_mm = kzalloc(sizeof(*encl_mm), GFP_KERNEL);
840 /* Grab a refcount for the encl_mm->encl reference: */
841 kref_get(&encl->refcount);
842 encl_mm->encl = encl;
844 encl_mm->mmu_notifier.ops = &sgx_mmu_notifier_ops;
846 ret = __mmu_notifier_register(&encl_mm->mmu_notifier, mm);
852 spin_lock(&encl->mm_lock);
853 list_add_rcu(&encl_mm->list, &encl->mm_list);
854 /* Pairs with smp_rmb() in sgx_zap_enclave_ptes(). */
856 encl->mm_list_version++;
857 spin_unlock(&encl->mm_lock);
863 * sgx_encl_cpumask() - Query which CPUs might be accessing the enclave
866 * Some SGX functions require that no cached linear-to-physical address
867 * mappings are present before they can succeed. For example, ENCLS[EWB]
868 * copies a page from the enclave page cache to regular main memory but
869 * it fails if it cannot ensure that there are no cached
870 * linear-to-physical address mappings referring to the page.
872 * SGX hardware flushes all cached linear-to-physical mappings on a CPU
873 * when an enclave is exited via ENCLU[EEXIT] or an Asynchronous Enclave
874 * Exit (AEX). Exiting an enclave will thus ensure cached linear-to-physical
875 * address mappings are cleared but coordination with the tracking done within
876 * the SGX hardware is needed to support the SGX functions that depend on this
879 * When the ENCLS[ETRACK] function is issued on an enclave the hardware
880 * tracks threads operating inside the enclave at that time. The SGX
881 * hardware tracking require that all the identified threads must have
882 * exited the enclave in order to flush the mappings before a function such
883 * as ENCLS[EWB] will be permitted
885 * The following flow is used to support SGX functions that require that
886 * no cached linear-to-physical address mappings are present:
887 * 1) Execute ENCLS[ETRACK] to initiate hardware tracking.
888 * 2) Use this function (sgx_encl_cpumask()) to query which CPUs might be
889 * accessing the enclave.
890 * 3) Send IPI to identified CPUs, kicking them out of the enclave and
891 * thus flushing all locally cached linear-to-physical address mappings.
892 * 4) Execute SGX function.
894 * Context: It is required to call this function after ENCLS[ETRACK].
895 * This will ensure that if any new mm appears (racing with
896 * sgx_encl_mm_add()) then the new mm will enter into the
897 * enclave with fresh linear-to-physical address mappings.
899 * It is required that all IPIs are completed before a new
900 * ENCLS[ETRACK] is issued so be sure to protect steps 1 to 3
901 * of the above flow with the enclave's mutex.
903 * Return: cpumask of CPUs that might be accessing @encl
905 const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl)
907 cpumask_t *cpumask = &encl->cpumask;
908 struct sgx_encl_mm *encl_mm;
911 cpumask_clear(cpumask);
913 idx = srcu_read_lock(&encl->srcu);
915 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
916 if (!mmget_not_zero(encl_mm->mm))
919 cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm));
921 mmput_async(encl_mm->mm);
924 srcu_read_unlock(&encl->srcu, idx);
929 static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
932 struct address_space *mapping = encl->backing->f_mapping;
933 gfp_t gfpmask = mapping_gfp_mask(mapping);
935 return shmem_read_mapping_page_gfp(mapping, index, gfpmask);
939 * __sgx_encl_get_backing() - Pin the backing storage
940 * @encl: an enclave pointer
941 * @page_index: enclave page index
942 * @backing: data for accessing backing storage for the page
944 * Pin the backing storage pages for storing the encrypted contents and Paging
945 * Crypto MetaData (PCMD) of an enclave page.
951 static int __sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
952 struct sgx_backing *backing)
954 pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
955 struct page *contents;
958 contents = sgx_encl_get_backing_page(encl, page_index);
959 if (IS_ERR(contents))
960 return PTR_ERR(contents);
962 pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off));
965 return PTR_ERR(pcmd);
968 backing->contents = contents;
969 backing->pcmd = pcmd;
970 backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1);
976 * When called from ksgxd, returns the mem_cgroup of a struct mm stored
977 * in the enclave's mm_list. When not called from ksgxd, just returns
978 * the mem_cgroup of the current task.
980 static struct mem_cgroup *sgx_encl_get_mem_cgroup(struct sgx_encl *encl)
982 struct mem_cgroup *memcg = NULL;
983 struct sgx_encl_mm *encl_mm;
987 * If called from normal task context, return the mem_cgroup
988 * of the current task's mm. The remainder of the handling is for
991 if (!current_is_ksgxd())
992 return get_mem_cgroup_from_mm(current->mm);
995 * Search the enclave's mm_list to find an mm associated with
996 * this enclave to charge the allocation to.
998 idx = srcu_read_lock(&encl->srcu);
1000 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
1001 if (!mmget_not_zero(encl_mm->mm))
1004 memcg = get_mem_cgroup_from_mm(encl_mm->mm);
1006 mmput_async(encl_mm->mm);
1011 srcu_read_unlock(&encl->srcu, idx);
1014 * In the rare case that there isn't an mm associated with
1015 * the enclave, set memcg to the current active mem_cgroup.
1016 * This will be the root mem_cgroup if there is no active
1020 return get_mem_cgroup_from_mm(NULL);
1026 * sgx_encl_alloc_backing() - create a new backing storage page
1027 * @encl: an enclave pointer
1028 * @page_index: enclave page index
1029 * @backing: data for accessing backing storage for the page
1031 * When called from ksgxd, sets the active memcg from one of the
1032 * mms in the enclave's mm_list prior to any backing page allocation,
1033 * in order to ensure that shmem page allocations are charged to the
1034 * enclave. Create a backing page for loading data back into an EPC page with
1035 * ELDU. This function takes a reference on a new backing page which
1036 * must be dropped with a corresponding call to sgx_encl_put_backing().
1042 int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index,
1043 struct sgx_backing *backing)
1045 struct mem_cgroup *encl_memcg = sgx_encl_get_mem_cgroup(encl);
1046 struct mem_cgroup *memcg = set_active_memcg(encl_memcg);
1049 ret = __sgx_encl_get_backing(encl, page_index, backing);
1051 set_active_memcg(memcg);
1052 mem_cgroup_put(encl_memcg);
1058 * sgx_encl_lookup_backing() - retrieve an existing backing storage page
1059 * @encl: an enclave pointer
1060 * @page_index: enclave page index
1061 * @backing: data for accessing backing storage for the page
1063 * Retrieve a backing page for loading data back into an EPC page with ELDU.
1064 * It is the caller's responsibility to ensure that it is appropriate to use
1065 * sgx_encl_lookup_backing() rather than sgx_encl_alloc_backing(). If lookup is
1066 * not used correctly, this will cause an allocation which is not accounted for.
1067 * This function takes a reference on an existing backing page which must be
1068 * dropped with a corresponding call to sgx_encl_put_backing().
1074 static int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
1075 struct sgx_backing *backing)
1077 return __sgx_encl_get_backing(encl, page_index, backing);
1081 * sgx_encl_put_backing() - Unpin the backing storage
1082 * @backing: data for accessing backing storage for the page
1084 void sgx_encl_put_backing(struct sgx_backing *backing)
1086 put_page(backing->pcmd);
1087 put_page(backing->contents);
1090 static int sgx_encl_test_and_clear_young_cb(pte_t *ptep, unsigned long addr,
1096 ret = pte_young(*ptep);
1098 pte = pte_mkold(*ptep);
1099 set_pte_at((struct mm_struct *)data, addr, ptep, pte);
1106 * sgx_encl_test_and_clear_young() - Test and reset the accessed bit
1107 * @mm: mm_struct that is checked
1108 * @page: enclave page to be tested for recent access
1110 * Checks the Access (A) bit from the PTE corresponding to the enclave page and
1113 * Return: 1 if the page has been recently accessed and 0 if not.
1115 int sgx_encl_test_and_clear_young(struct mm_struct *mm,
1116 struct sgx_encl_page *page)
1118 unsigned long addr = page->desc & PAGE_MASK;
1119 struct sgx_encl *encl = page->encl;
1120 struct vm_area_struct *vma;
1123 ret = sgx_encl_find(mm, addr, &vma);
1127 if (encl != vma->vm_private_data)
1130 ret = apply_to_page_range(vma->vm_mm, addr, PAGE_SIZE,
1131 sgx_encl_test_and_clear_young_cb, vma->vm_mm);
1138 struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
1139 unsigned long offset,
1142 struct sgx_encl_page *encl_page;
1145 encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL);
1147 return ERR_PTR(-ENOMEM);
1149 encl_page->desc = encl->base + offset;
1150 encl_page->encl = encl;
1152 prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ) |
1153 _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) |
1154 _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC);
1157 * TCS pages must always RW set for CPU access while the SECINFO
1158 * permissions are *always* zero - the CPU ignores the user provided
1159 * values and silently overwrites them with zero permissions.
1161 if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS)
1162 prot |= PROT_READ | PROT_WRITE;
1164 /* Calculate maximum of the VM flags for the page. */
1165 encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
1171 * sgx_zap_enclave_ptes() - remove PTEs mapping the address from enclave
1172 * @encl: the enclave
1173 * @addr: page aligned pointer to single page for which PTEs will be removed
1175 * Multiple VMAs may have an enclave page mapped. Remove the PTE mapping
1176 * @addr from each VMA. Ensure that page fault handler is ready to handle
1177 * new mappings of @addr before calling this function.
1179 void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr)
1181 unsigned long mm_list_version;
1182 struct sgx_encl_mm *encl_mm;
1183 struct vm_area_struct *vma;
1187 mm_list_version = encl->mm_list_version;
1189 /* Pairs with smp_wmb() in sgx_encl_mm_add(). */
1192 idx = srcu_read_lock(&encl->srcu);
1194 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
1195 if (!mmget_not_zero(encl_mm->mm))
1198 mmap_read_lock(encl_mm->mm);
1200 ret = sgx_encl_find(encl_mm->mm, addr, &vma);
1201 if (!ret && encl == vma->vm_private_data)
1202 zap_vma_ptes(vma, addr, PAGE_SIZE);
1204 mmap_read_unlock(encl_mm->mm);
1206 mmput_async(encl_mm->mm);
1209 srcu_read_unlock(&encl->srcu, idx);
1210 } while (unlikely(encl->mm_list_version != mm_list_version));
1214 * sgx_alloc_va_page() - Allocate a Version Array (VA) page
1215 * @reclaim: Reclaim EPC pages directly if none available. Enclave
1216 * mutex should not be held if this is set.
1218 * Allocate a free EPC page and convert it to a Version Array (VA) page.
1224 struct sgx_epc_page *sgx_alloc_va_page(bool reclaim)
1226 struct sgx_epc_page *epc_page;
1229 epc_page = sgx_alloc_epc_page(NULL, reclaim);
1230 if (IS_ERR(epc_page))
1231 return ERR_CAST(epc_page);
1233 ret = __epa(sgx_get_epc_virt_addr(epc_page));
1235 WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret);
1236 sgx_encl_free_epc_page(epc_page);
1237 return ERR_PTR(-EFAULT);
1244 * sgx_alloc_va_slot - allocate a VA slot
1245 * @va_page: a &struct sgx_va_page instance
1247 * Allocates a slot from a &struct sgx_va_page instance.
1249 * Return: offset of the slot inside the VA page
1251 unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page)
1253 int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
1255 if (slot < SGX_VA_SLOT_COUNT)
1256 set_bit(slot, va_page->slots);
1262 * sgx_free_va_slot - free a VA slot
1263 * @va_page: a &struct sgx_va_page instance
1264 * @offset: offset of the slot inside the VA page
1266 * Frees a slot from a &struct sgx_va_page instance.
1268 void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset)
1270 clear_bit(offset >> 3, va_page->slots);
1274 * sgx_va_page_full - is the VA page full?
1275 * @va_page: a &struct sgx_va_page instance
1277 * Return: true if all slots have been taken
1279 bool sgx_va_page_full(struct sgx_va_page *va_page)
1281 int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
1283 return slot == SGX_VA_SLOT_COUNT;
1287 * sgx_encl_free_epc_page - free an EPC page assigned to an enclave
1288 * @page: EPC page to be freed
1290 * Free an EPC page assigned to an enclave. It does EREMOVE for the page, and
1291 * only upon success, it puts the page back to free page list. Otherwise, it
1292 * gives a WARNING to indicate page is leaked.
1294 void sgx_encl_free_epc_page(struct sgx_epc_page *page)
1298 WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED);
1300 ret = __eremove(sgx_get_epc_virt_addr(page));
1301 if (WARN_ONCE(ret, EREMOVE_ERROR_MESSAGE, ret, ret))
1304 sgx_free_epc_page(page);