s390/mm: add (non)secure page access exceptions handlers
authorVasily Gorbik <gor@linux.ibm.com>
Tue, 21 Jan 2020 08:43:10 +0000 (09:43 +0100)
committerChristian Borntraeger <borntraeger@de.ibm.com>
Thu, 27 Feb 2020 18:44:40 +0000 (19:44 +0100)
Add exceptions handlers performing transparent transition of non-secure
pages to secure (import) upon guest access and secure pages to
non-secure (export) upon hypervisor access.

Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
[frankja@linux.ibm.com: adding checks for failures]
Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
[imbrenda@linux.ibm.com:  adding a check for gmap fault]
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
[borntraeger@de.ibm.com: patch merging, splitting, fixing]
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
arch/s390/kernel/entry.h
arch/s390/kernel/pgm_check.S
arch/s390/mm/fault.c

index 1d3927e..faca269 100644 (file)
@@ -24,6 +24,8 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
 
 void do_protection_exception(struct pt_regs *regs);
 void do_dat_exception(struct pt_regs *regs);
 
 void do_protection_exception(struct pt_regs *regs);
 void do_dat_exception(struct pt_regs *regs);
+void do_secure_storage_access(struct pt_regs *regs);
+void do_non_secure_storage_access(struct pt_regs *regs);
 
 void addressing_exception(struct pt_regs *regs);
 void data_exception(struct pt_regs *regs);
 
 void addressing_exception(struct pt_regs *regs);
 void data_exception(struct pt_regs *regs);
index eee3a48..2c27907 100644 (file)
@@ -78,8 +78,8 @@ PGM_CHECK(do_dat_exception)           /* 39 */
 PGM_CHECK(do_dat_exception)            /* 3a */
 PGM_CHECK(do_dat_exception)            /* 3b */
 PGM_CHECK_DEFAULT                      /* 3c */
 PGM_CHECK(do_dat_exception)            /* 3a */
 PGM_CHECK(do_dat_exception)            /* 3b */
 PGM_CHECK_DEFAULT                      /* 3c */
-PGM_CHECK_DEFAULT                      /* 3d */
-PGM_CHECK_DEFAULT                      /* 3e */
+PGM_CHECK(do_secure_storage_access)    /* 3d */
+PGM_CHECK(do_non_secure_storage_access)        /* 3e */
 PGM_CHECK_DEFAULT                      /* 3f */
 PGM_CHECK(monitor_event_exception)     /* 40 */
 PGM_CHECK_DEFAULT                      /* 41 */
 PGM_CHECK_DEFAULT                      /* 3f */
 PGM_CHECK(monitor_event_exception)     /* 40 */
 PGM_CHECK_DEFAULT                      /* 41 */
index 7b0bb47..7bd86eb 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/irq.h>
 #include <asm/mmu_context.h>
 #include <asm/facility.h>
 #include <asm/irq.h>
 #include <asm/mmu_context.h>
 #include <asm/facility.h>
+#include <asm/uv.h>
 #include "../kernel/entry.h"
 
 #define __FAIL_ADDR_MASK -4096L
 #include "../kernel/entry.h"
 
 #define __FAIL_ADDR_MASK -4096L
@@ -816,3 +817,80 @@ out_extint:
 early_initcall(pfault_irq_init);
 
 #endif /* CONFIG_PFAULT */
 early_initcall(pfault_irq_init);
 
 #endif /* CONFIG_PFAULT */
+
+#if IS_ENABLED(CONFIG_PGSTE)
+void do_secure_storage_access(struct pt_regs *regs)
+{
+       unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
+       struct vm_area_struct *vma;
+       struct mm_struct *mm;
+       struct page *page;
+       int rc;
+
+       switch (get_fault_type(regs)) {
+       case USER_FAULT:
+               mm = current->mm;
+               down_read(&mm->mmap_sem);
+               vma = find_vma(mm, addr);
+               if (!vma) {
+                       up_read(&mm->mmap_sem);
+                       do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
+                       break;
+               }
+               page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
+               if (IS_ERR_OR_NULL(page)) {
+                       up_read(&mm->mmap_sem);
+                       break;
+               }
+               if (arch_make_page_accessible(page))
+                       send_sig(SIGSEGV, current, 0);
+               put_page(page);
+               up_read(&mm->mmap_sem);
+               break;
+       case KERNEL_FAULT:
+               page = phys_to_page(addr);
+               if (unlikely(!try_get_page(page)))
+                       break;
+               rc = arch_make_page_accessible(page);
+               put_page(page);
+               if (rc)
+                       BUG();
+               break;
+       case VDSO_FAULT:
+               /* fallthrough */
+       case GMAP_FAULT:
+               /* fallthrough */
+       default:
+               do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
+               WARN_ON_ONCE(1);
+       }
+}
+NOKPROBE_SYMBOL(do_secure_storage_access);
+
+void do_non_secure_storage_access(struct pt_regs *regs)
+{
+       unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
+       struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
+
+       if (get_fault_type(regs) != GMAP_FAULT) {
+               do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
+               WARN_ON_ONCE(1);
+               return;
+       }
+
+       if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
+               send_sig(SIGSEGV, current, 0);
+}
+NOKPROBE_SYMBOL(do_non_secure_storage_access);
+
+#else
+void do_secure_storage_access(struct pt_regs *regs)
+{
+       default_trap_handler(regs);
+}
+
+void do_non_secure_storage_access(struct pt_regs *regs)
+{
+       default_trap_handler(regs);
+}
+#endif