s390/mm: cleanup init_new_context() callback
authorAlexander Gordeev <agordeev@linux.ibm.com>
Thu, 19 Mar 2020 12:44:50 +0000 (13:44 +0100)
committerVasily Gorbik <gor@linux.ibm.com>
Sat, 28 Mar 2020 11:46:12 +0000 (12:46 +0100)
The set of values asce_limit may be assigned with is TASK_SIZE_MAX,
_REGION1_SIZE, _REGION2_SIZE and 0 as a special case if the callback
was called from execve().
Do VM_BUG_ON() if asce_limit is something else.

Save few CPU cycles by removing unnecessary asce_limit re-assignment
in case of 3-level task and redundant PGD entry type reconstruction.

Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/pgalloc.h

index 248d51cdcad9f5ef683c2a92fc11804d63277dda..844396b3735e3822b9c20a63e4a3f231d95c6300 100644 (file)
@@ -18,6 +18,8 @@
 static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
 {
+       unsigned long asce_type, init_entry;
+
        spin_lock_init(&mm->context.lock);
        INIT_LIST_HEAD(&mm->context.pgtable_list);
        INIT_LIST_HEAD(&mm->context.gmap_list);
@@ -35,29 +37,34 @@ static inline int init_new_context(struct task_struct *tsk,
        mm->context.allow_gmap_hpage_1m = 0;
 #endif
        switch (mm->context.asce_limit) {
-       case _REGION2_SIZE:
+       default:
                /*
-                * forked 3-level task, fall through to set new asce with new
-                * mm->pgd
+                * context created by exec, the value of asce_limit can
+                * only be zero in this case
                 */
-       case 0:
-               /* context created by exec, set asce limit to 4TB */
+               VM_BUG_ON(mm->context.asce_limit);
+               /* continue as 3-level task */
                mm->context.asce_limit = _REGION2_SIZE;
-               mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
-                                  _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+               fallthrough;
+       case _REGION2_SIZE:
+               /* forked 3-level task */
+               init_entry = _REGION3_ENTRY_EMPTY;
+               asce_type = _ASCE_TYPE_REGION3;
                break;
        case TASK_SIZE_MAX:
-               /* forked 5-level task, set new asce with new_mm->pgd */
-               mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
-                       _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
+               /* forked 5-level task */
+               init_entry = _REGION1_ENTRY_EMPTY;
+               asce_type = _ASCE_TYPE_REGION1;
                break;
        case _REGION1_SIZE:
-               /* forked 4-level task, set new asce with new mm->pgd */
-               mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
-                                  _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+               /* forked 4-level task */
+               init_entry = _REGION2_ENTRY_EMPTY;
+               asce_type = _ASCE_TYPE_REGION2;
                break;
        }
-       crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
+       mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+                          _ASCE_USER_BITS | asce_type;
+       crst_table_init((unsigned long *) mm->pgd, init_entry);
        return 0;
 }
 
index 5e3ff9f7a58680175cc3a9752e73ad99655a2e20..74a352f8c0d1d3d288553e27de9b6feb450d995a 100644 (file)
@@ -34,17 +34,6 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
        memset64((u64 *)crst, entry, _CRST_ENTRIES);
 }
 
-static inline unsigned long pgd_entry_type(struct mm_struct *mm)
-{
-       if (mm_pmd_folded(mm))
-               return _SEGMENT_ENTRY_EMPTY;
-       if (mm_pud_folded(mm))
-               return _REGION3_ENTRY_EMPTY;
-       if (mm_p4d_folded(mm))
-               return _REGION2_ENTRY_EMPTY;
-       return _REGION1_ENTRY_EMPTY;
-}
-
 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
 
 static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr,