Merge branches 'cleanup', 'fixes', 'misc', 'omap-barrier' and 'uaccess' into for...
authorRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 3 Sep 2015 14:28:37 +0000 (15:28 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 3 Sep 2015 14:28:37 +0000 (15:28 +0100)
14 files changed:
1  2  3  4  5 
arch/arm/Kconfig
arch/arm/include/asm/assembler.h
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/armksyms.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/head.S
arch/arm/kernel/perf_event.c
arch/arm/kernel/process.c
arch/arm/kernel/smp.c
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mmu.c

@@@@@@ -1693,7 -1693,13 -1696,7 -1693,7 -1693,22 +1696,28 @@@@@@ config HIGHME
     config HIGHPTE
        bool "Allocate 2nd-level pagetables from highmem"
        depends on HIGHMEM
+ +++   help
+ +++     The VM uses one page of physical memory for each page table.
+ +++     For systems with a lot of processes, this can use a lot of
+ +++     precious low memory, eventually leading to low memory being
+ +++     consumed by page tables.  Setting this option will allow
+ +++     user-space 2nd level page tables to reside in high memory.
+ ++ 
++++ config CPU_SW_DOMAIN_PAN
++++    bool "Enable use of CPU domains to implement privileged no-access"
++++    depends on MMU && !ARM_LPAE
++++    default y
++++    help
++++      Increase kernel security by ensuring that normal kernel accesses
++++      are unable to access userspace addresses.  This can help prevent
++++      use-after-free bugs becoming an exploitable privilege escalation
++++      by ensuring that magic values (such as LIST_POISON) will always
++++      fault when dereferenced.
++++ 
++++      CPUs with low-vector mappings use a best-efforts implementation.
++++      Their lower 1MB needs to remain accessible for the vectors, but
++++      the remainder of userspace will become appropriately inaccessible.
 +   
     config HW_PERF_EVENTS
        bool "Enable hardware performance counter support for perf events"
        depends on PERF_EVENTS
Simple merge
Simple merge
@@@@@@ -379,11 -391,11 -391,11 -391,11 -429,12 +417,12 @@@@@@ do {                                                                      
        case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break;  \
        default: __put_user_bad();                                      \
        }                                                               \
++++    uaccess_restore(__ua_flags);                                    \
     } while (0)
     
 ----#define __put_user_asm_byte(x, __pu_addr, err)                     \
 ++++#define __put_user_asm(x, __pu_addr, err, instr)           \
        __asm__ __volatile__(                                   \
 ----   "1:     " TUSER(strb) " %1,[%2],#0\n"                   \
 ++++   "1:     " TUSER(instr) " %1, [%2], #0\n"                \
        "2:\n"                                                  \
        "       .pushsection .text.fixup,\"ax\"\n"              \
        "       .align  2\n"                                    \
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@@@@ -1494,7 -1500,8 -1567,8 -1494,7 -1494,7 +1573,9 @@@@@@ void __init paging_init(const struct ma
        build_mem_type_table();
        prepare_page_table();
        map_lowmem();
+ +++   memblock_set_current_limit(arm_lowmem_limit);
        dma_contiguous_remap();
++ ++   early_fixmap_shutdown();
        devicemaps_init(mdesc);
        kmap_init();
        tcm_init();