Merge commit 'a7d2475af7aedcb9b5c6343989a8bfadbf84429b' into uaccess.powerpc
authorAl Viro <viro@zeniv.linux.org.uk>
Thu, 6 Apr 2017 19:08:10 +0000 (15:08 -0400)
committerAl Viro <viro@zeniv.linux.org.uk>
Thu, 6 Apr 2017 19:08:10 +0000 (15:08 -0400)
backmerge of sorting the arch/powerpc/Kconfig

20 files changed:
arch/powerpc/Kconfig
arch/powerpc/Makefile
arch/powerpc/include/asm/checksum.h
arch/powerpc/include/asm/cpuidle.h
arch/powerpc/include/asm/elf.h
arch/powerpc/include/asm/nohash/pgtable.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/prom.h
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/lib/Makefile
arch/powerpc/lib/sstep.c
arch/powerpc/lib/test_emulate_step.c [new file with mode: 0644]
arch/powerpc/mm/init_64.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/powerpc/sysdev/xics/icp-opal.c
arch/powerpc/sysdev/xics/xics-common.c
tools/testing/selftests/powerpc/harness.c

index 4940917..97a8bc8 100644 (file)
@@ -80,93 +80,99 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK
 config PPC
        bool
        default y
-       select BUILDTIME_EXTABLE_SORT
+       #
+       # Please keep this list sorted alphabetically.
+       #
+       select ARCH_HAS_DEVMEM_IS_ALLOWED
+       select ARCH_HAS_DMA_SET_COHERENT_MASK
+       select ARCH_HAS_ELF_RANDOMIZE
+       select ARCH_HAS_GCOV_PROFILE_ALL
+       select ARCH_HAS_SCALED_CPUTIME          if VIRT_CPU_ACCOUNTING_NATIVE
+       select ARCH_HAS_SG_CHAIN
+       select ARCH_HAS_TICK_BROADCAST          if GENERIC_CLOCKEVENTS_BROADCAST
+       select ARCH_HAS_UBSAN_SANITIZE_ALL
+       select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
+       select ARCH_SUPPORTS_ATOMIC_RMW
+       select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+       select ARCH_USE_BUILTIN_BSWAP
+       select ARCH_USE_CMPXCHG_LOCKREF         if PPC64
+       select ARCH_WANT_IPC_PARSE_VERSION
        select BINFMT_ELF
-       select ARCH_HAS_ELF_RANDOMIZE
-       select OF
-       select OF_EARLY_FLATTREE
-       select OF_RESERVED_MEM
-       select HAVE_FTRACE_MCOUNT_RECORD
+       select BUILDTIME_EXTABLE_SORT
+       select CLONE_BACKWARDS
+       select DCACHE_WORD_ACCESS               if PPC64 && CPU_LITTLE_ENDIAN
+       select EDAC_ATOMIC_SCRUB
+       select EDAC_SUPPORT
+       select GENERIC_ATOMIC64                 if PPC32
+       select GENERIC_CLOCKEVENTS
+       select GENERIC_CLOCKEVENTS_BROADCAST    if SMP
+       select GENERIC_CMOS_UPDATE
+       select GENERIC_CPU_AUTOPROBE
+       select GENERIC_IRQ_SHOW
+       select GENERIC_IRQ_SHOW_LEVEL
+       select GENERIC_SMP_IDLE_THREAD
+       select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
+       select GENERIC_TIME_VSYSCALL_OLD
+       select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HARDENED_USERCOPY
+       select HAVE_ARCH_JUMP_LABEL
+       select HAVE_ARCH_KGDB
+       select HAVE_ARCH_SECCOMP_FILTER
+       select HAVE_ARCH_TRACEHOOK
+       select HAVE_CBPF_JIT                    if !PPC64
+       select HAVE_CONTEXT_TRACKING            if PPC64
+       select HAVE_DEBUG_KMEMLEAK
+       select HAVE_DEBUG_STACKOVERFLOW
+       select HAVE_DMA_API_DEBUG
        select HAVE_DYNAMIC_FTRACE
-       select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
-       select HAVE_FUNCTION_TRACER
+       select HAVE_DYNAMIC_FTRACE_WITH_REGS    if MPROFILE_KERNEL
+       select HAVE_EBPF_JIT                    if PPC64
+       select HAVE_EFFICIENT_UNALIGNED_ACCESS  if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+       select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FUNCTION_TRACER
        select HAVE_GCC_PLUGINS
-       select SYSCTL_EXCEPTION_TRACE
-       select VIRT_TO_BUS if !PPC64
+       select HAVE_GENERIC_RCU_GUP
+       select HAVE_HW_BREAKPOINT               if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
        select HAVE_IDE
        select HAVE_IOREMAP_PROT
-       select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+       select HAVE_IRQ_EXIT_ON_IRQ_STACK
+       select HAVE_KERNEL_GZIP
        select HAVE_KPROBES
-       select HAVE_OPTPROBES if PPC64
-       select HAVE_ARCH_KGDB
        select HAVE_KRETPROBES
-       select HAVE_ARCH_TRACEHOOK
+       select HAVE_LIVEPATCH                   if HAVE_DYNAMIC_FTRACE_WITH_REGS
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
-       select HAVE_DMA_API_DEBUG
+       select HAVE_MOD_ARCH_SPECIFIC
+       select HAVE_NMI                         if PERF_EVENTS
        select HAVE_OPROFILE
-       select HAVE_DEBUG_KMEMLEAK
-       select ARCH_HAS_SG_CHAIN
-       select GENERIC_ATOMIC64 if PPC32
+       select HAVE_OPTPROBES                   if PPC64
        select HAVE_PERF_EVENTS
+       select HAVE_PERF_EVENTS_NMI             if PPC64
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
+       select HAVE_RCU_TABLE_FREE              if SMP
        select HAVE_REGS_AND_STACK_ACCESS_API
-       select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
-       select ARCH_WANT_IPC_PARSE_VERSION
-       select SPARSE_IRQ
+       select HAVE_SYSCALL_TRACEPOINTS
+       select HAVE_VIRT_CPU_ACCOUNTING
        select IRQ_DOMAIN
-       select GENERIC_IRQ_SHOW
-       select GENERIC_IRQ_SHOW_LEVEL
        select IRQ_FORCED_THREADING
-       select HAVE_RCU_TABLE_FREE if SMP
-       select HAVE_SYSCALL_TRACEPOINTS
-       select HAVE_CBPF_JIT if !PPC64
-       select HAVE_EBPF_JIT if PPC64
-       select HAVE_ARCH_JUMP_LABEL
-       select ARCH_HAVE_NMI_SAFE_CMPXCHG
-       select ARCH_HAS_GCOV_PROFILE_ALL
-       select GENERIC_SMP_IDLE_THREAD
-       select GENERIC_CMOS_UPDATE
-       select GENERIC_TIME_VSYSCALL_OLD
-       select GENERIC_CLOCKEVENTS
-       select GENERIC_CLOCKEVENTS_BROADCAST if SMP
-       select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
-       select GENERIC_STRNCPY_FROM_USER
-       select GENERIC_STRNLEN_USER
-       select HAVE_MOD_ARCH_SPECIFIC
        select MODULES_USE_ELF_RELA
-       select CLONE_BACKWARDS
-       select ARCH_USE_BUILTIN_BSWAP
-       select OLD_SIGSUSPEND
-       select OLD_SIGACTION if PPC32
-       select HAVE_DEBUG_STACKOVERFLOW
-       select HAVE_IRQ_EXIT_ON_IRQ_STACK
-       select ARCH_USE_CMPXCHG_LOCKREF if PPC64
-       select HAVE_ARCH_AUDITSYSCALL
-       select ARCH_SUPPORTS_ATOMIC_RMW
-       select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
        select NO_BOOTMEM
-       select HAVE_GENERIC_RCU_GUP
-       select HAVE_PERF_EVENTS_NMI if PPC64
-       select HAVE_NMI if PERF_EVENTS
-       select EDAC_SUPPORT
-       select EDAC_ATOMIC_SCRUB
-       select ARCH_HAS_DMA_SET_COHERENT_MASK
-       select ARCH_HAS_DEVMEM_IS_ALLOWED
-       select HAVE_ARCH_SECCOMP_FILTER
-       select ARCH_HAS_UBSAN_SANITIZE_ALL
-       select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
-       select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
-       select GENERIC_CPU_AUTOPROBE
-       select HAVE_VIRT_CPU_ACCOUNTING
-       select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
-       select HAVE_ARCH_HARDENED_USERCOPY
-       select HAVE_KERNEL_GZIP
-       select HAVE_CONTEXT_TRACKING if PPC64
+       select OF
+       select OF_EARLY_FLATTREE
+       select OF_RESERVED_MEM
+       select OLD_SIGACTION                    if PPC32
+       select OLD_SIGSUSPEND
+       select SPARSE_IRQ
+       select SYSCTL_EXCEPTION_TRACE
+       select VIRT_TO_BUS                      if !PPC64
+       #
+       # Please keep this list sorted alphabetically.
+       #
 
 config GENERIC_CSUM
        def_bool n
index 31286fa..19b0d1a 100644 (file)
@@ -72,8 +72,15 @@ GNUTARGET    := powerpc
 MULTIPLEWORD   := -mmultiple
 endif
 
-cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mbig-endian)
+ifdef CONFIG_PPC64
+cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mabi=elfv1)
+cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mcall-aixdesc)
+aflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mabi=elfv1)
+aflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += -mabi=elfv2
+endif
+
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += -mlittle-endian
+cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mbig-endian)
 ifneq ($(cc-name),clang)
   cflags-$(CONFIG_CPU_LITTLE_ENDIAN)   += -mno-strict-align
 endif
@@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
 AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
 else
+CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
+AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
 endif
 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
index 4e63787..842124b 100644 (file)
@@ -112,7 +112,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
 
 #ifdef __powerpc64__
        res += (__force u64)addend;
-       return (__force __wsum)((u32)res + (res >> 32));
+       return (__force __wsum) from64to32(res);
 #else
        asm("addc %0,%0,%1;"
            "addze %0,%0;"
index fd321eb..1557315 100644 (file)
@@ -70,8 +70,8 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err)
        std     r0,0(r1);                                       \
        ptesync;                                                \
        ld      r0,0(r1);                                       \
-1:     cmpd    cr0,r0,r0;                                      \
-       bne     1b;                                             \
+236:   cmpd    cr0,r0,r0;                                      \
+       bne     236b;                                           \
        IDLE_INST;                                              \
 
 #define        IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST)                   \
index 93b9b84..09bde6e 100644 (file)
@@ -144,8 +144,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
 #define ARCH_DLINFO_CACHE_GEOMETRY                                     \
        NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size);           \
        NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i));     \
-       NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1i.size);           \
-       NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1i));     \
+       NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size);           \
+       NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d));     \
        NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size);             \
        NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2));       \
        NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size);             \
index 0cd8a38..e5805ad 100644 (file)
@@ -230,7 +230,7 @@ static inline int hugepd_ok(hugepd_t hpd)
        return ((hpd_val(hpd) & 0x4) != 0);
 #else
        /* We clear the top bit to indicate hugepd */
-       return ((hpd_val(hpd) & PD_HUGE) ==  0);
+       return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
 #endif
 }
 
index d99bd44..e7d6d86 100644 (file)
 #define PPC_INST_BRANCH_COND           0x40800000
 #define PPC_INST_LBZCIX                        0x7c0006aa
 #define PPC_INST_STBCIX                        0x7c0007aa
+#define PPC_INST_LWZX                  0x7c00002e
+#define PPC_INST_LFSX                  0x7c00042e
+#define PPC_INST_STFSX                 0x7c00052e
+#define PPC_INST_LFDX                  0x7c0004ae
+#define PPC_INST_STFDX                 0x7c0005ae
+#define PPC_INST_LVX                   0x7c0000ce
+#define PPC_INST_STVX                  0x7c0001ce
 
 /* macros to insert fields into opcodes */
 #define ___PPC_RA(a)   (((a) & 0x1f) << 16)
index 4a90634..35c00d7 100644 (file)
@@ -160,12 +160,18 @@ struct of_drconf_cell {
 #define OV5_PFO_HW_ENCR                0x1120  /* PFO Encryption Accelerator */
 #define OV5_SUB_PROCESSORS     0x1501  /* 1,2,or 4 Sub-Processors supported */
 #define OV5_XIVE_EXPLOIT       0x1701  /* XIVE exploitation supported */
-#define OV5_MMU_RADIX_300      0x1880  /* ISA v3.00 radix MMU supported */
-#define OV5_MMU_HASH_300       0x1840  /* ISA v3.00 hash MMU supported */
-#define OV5_MMU_SEGM_RADIX     0x1820  /* radix mode (no segmentation) */
-#define OV5_MMU_PROC_TBL       0x1810  /* hcall selects SLB or proc table */
-#define OV5_MMU_SLB            0x1800  /* always use SLB */
-#define OV5_MMU_GTSE           0x1808  /* Guest translation shootdown */
+/* MMU Base Architecture */
+#define OV5_MMU_SUPPORT                0x18C0  /* MMU Mode Support Mask */
+#define OV5_MMU_HASH           0x1800  /* Hash MMU Only */
+#define OV5_MMU_RADIX          0x1840  /* Radix MMU Only */
+#define OV5_MMU_EITHER         0x1880  /* Hash or Radix Supported */
+#define OV5_MMU_DYNAMIC                0x18C0  /* Hash or Radix Can Switch Later */
+#define OV5_NMMU               0x1820  /* Nest MMU Available */
+/* Hash Table Extensions */
+#define OV5_HASH_SEG_TBL       0x1980  /* In Memory Segment Tables Available */
+#define OV5_HASH_GTSE          0x1940  /* Guest Translation Shoot Down Avail */
+/* Radix Table Extensions */
+#define OV5_RADIX_GTSE         0x1A40  /* Guest Translation Shoot Down Avail */
 
 /* Option Vector 6: IBM PAPR hints */
 #define OV6_LINUX              0x02    /* Linux is our OS */
index 5f61cc0..9957287 100644 (file)
@@ -276,19 +276,21 @@ power_enter_stop:
  */
        andis.   r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
        clrldi   r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
-       bne      1f
+       bne      .Lhandle_esl_ec_set
        IDLE_STATE_ENTER_SEQ(PPC_STOP)
        li      r3,0  /* Since we didn't lose state, return 0 */
        b       pnv_wakeup_noloss
+
+.Lhandle_esl_ec_set:
 /*
  * Check if the requested state is a deep idle state.
  */
-1:     LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
+       LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
        ld      r4,ADDROFF(pnv_first_deep_stop_state)(r5)
        cmpd    r3,r4
-       bge     2f
+       bge     .Lhandle_deep_stop
        IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
-2:
+.Lhandle_deep_stop:
 /*
  * Entering deep idle state.
  * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
index a394454..1c1b44e 100644 (file)
@@ -168,6 +168,14 @@ static unsigned long __initdata prom_tce_alloc_start;
 static unsigned long __initdata prom_tce_alloc_end;
 #endif
 
+static bool __initdata prom_radix_disable;
+
+struct platform_support {
+       bool hash_mmu;
+       bool radix_mmu;
+       bool radix_gtse;
+};
+
 /* Platforms codes are now obsolete in the kernel. Now only used within this
  * file and ultimately gone too. Feel free to change them if you need, they
  * are not shared with anything outside of this file anymore
@@ -626,6 +634,12 @@ static void __init early_cmdline_parse(void)
                prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
 #endif
        }
+
+       opt = strstr(prom_cmd_line, "disable_radix");
+       if (opt) {
+               prom_debug("Radix disabled from cmdline\n");
+               prom_radix_disable = true;
+       }
 }
 
 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
@@ -695,6 +709,8 @@ struct option_vector5 {
        u8 byte22;
        u8 intarch;
        u8 mmu;
+       u8 hash_ext;
+       u8 radix_ext;
 } __packed;
 
 struct option_vector6 {
@@ -850,8 +866,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
                .reserved3 = 0,
                .subprocessors = 1,
                .intarch = 0,
-               .mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) |
-                       OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE),
+               .mmu = 0,
+               .hash_ext = 0,
+               .radix_ext = 0,
        },
 
        /* option vector 6: IBM PAPR hints */
@@ -990,6 +1007,92 @@ static int __init prom_count_smt_threads(void)
 
 }
 
+static void __init prom_parse_mmu_model(u8 val,
+                                       struct platform_support *support)
+{
+       switch (val) {
+       case OV5_FEAT(OV5_MMU_DYNAMIC):
+       case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
+               prom_debug("MMU - either supported\n");
+               support->radix_mmu = !prom_radix_disable;
+               support->hash_mmu = true;
+               break;
+       case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
+               prom_debug("MMU - radix only\n");
+               if (prom_radix_disable) {
+                       /*
+                        * If we __have__ to do radix, we're better off ignoring
+                        * the command line rather than not booting.
+                        */
+                       prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
+               }
+               support->radix_mmu = true;
+               break;
+       case OV5_FEAT(OV5_MMU_HASH):
+               prom_debug("MMU - hash only\n");
+               support->hash_mmu = true;
+               break;
+       default:
+               prom_debug("Unknown mmu support option: 0x%x\n", val);
+               break;
+       }
+}
+
+static void __init prom_parse_platform_support(u8 index, u8 val,
+                                              struct platform_support *support)
+{
+       switch (index) {
+       case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
+               prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
+               break;
+       case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
+               if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
+                       prom_debug("Radix - GTSE supported\n");
+                       support->radix_gtse = true;
+               }
+               break;
+       }
+}
+
+static void __init prom_check_platform_support(void)
+{
+       struct platform_support supported = {
+               .hash_mmu = false,
+               .radix_mmu = false,
+               .radix_gtse = false
+       };
+       int prop_len = prom_getproplen(prom.chosen,
+                                      "ibm,arch-vec-5-platform-support");
+       if (prop_len > 1) {
+               int i;
+               u8 vec[prop_len];
+               prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
+                          prop_len);
+               prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
+                            &vec, sizeof(vec));
+               for (i = 0; i < prop_len; i += 2) {
+                       prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
+                                                                 , vec[i]
+                                                                 , vec[i + 1]);
+                       prom_parse_platform_support(vec[i], vec[i + 1],
+                                                   &supported);
+               }
+       }
+
+       if (supported.radix_mmu && supported.radix_gtse) {
+               /* Radix preferred - but we require GTSE for now */
+               prom_debug("Asking for radix with GTSE\n");
+               ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
+               ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
+       } else if (supported.hash_mmu) {
+               /* Default to hash mmu (if we can) */
+               prom_debug("Asking for hash\n");
+               ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
+       } else {
+               /* We're probably on a legacy hypervisor */
+               prom_debug("Assuming legacy hash support\n");
+       }
+}
 
 static void __init prom_send_capabilities(void)
 {
@@ -997,6 +1100,9 @@ static void __init prom_send_capabilities(void)
        prom_arg_t ret;
        u32 cores;
 
+       /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
+       prom_check_platform_support();
+
        root = call_prom("open", 1, 1, ADDR("/"));
        if (root != 0) {
                /* We need to tell the FW about the number of cores we support.
@@ -2993,6 +3099,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
         */
        prom_check_initrd(r3, r4);
 
+       /*
+        * Do early parsing of command line
+        */
+       early_cmdline_parse();
+
 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
        /*
         * On pSeries, inform the firmware about our capabilities
@@ -3009,11 +3120,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
                copy_and_flush(0, kbase, 0x100, 0);
 
        /*
-        * Do early parsing of command line
-        */
-       early_cmdline_parse();
-
-       /*
         * Initialize memory management within prom_init
         */
        prom_init_mem();
index adf2084..9cfaa8b 100644 (file)
@@ -408,7 +408,10 @@ static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
        info->line_size = lsize;
        info->block_size = bsize;
        info->log_block_size = __ilog2(bsize);
-       info->blocks_per_page = PAGE_SIZE / bsize;
+       if (bsize)
+               info->blocks_per_page = PAGE_SIZE / bsize;
+       else
+               info->blocks_per_page = 0;
 
        if (sets == 0)
                info->assoc = 0xffff;
index 0e649d7..2b5e090 100644 (file)
@@ -20,6 +20,7 @@ obj64-y       += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
 
 obj64-$(CONFIG_SMP)    += locks.o
 obj64-$(CONFIG_ALTIVEC)        += vmx-helper.o
+obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o
 
 obj-y                  += checksum_$(BITS).o checksum_wrappers.o
 
index 846dba2..9c542ec 100644 (file)
@@ -1799,8 +1799,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                goto instr_done;
 
        case LARX:
-               if (regs->msr & MSR_LE)
-                       return 0;
                if (op.ea & (size - 1))
                        break;          /* can't handle misaligned */
                if (!address_ok(regs, op.ea, size))
@@ -1823,8 +1821,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                goto ldst_done;
 
        case STCX:
-               if (regs->msr & MSR_LE)
-                       return 0;
                if (op.ea & (size - 1))
                        break;          /* can't handle misaligned */
                if (!address_ok(regs, op.ea, size))
@@ -1849,8 +1845,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                goto ldst_done;
 
        case LOAD:
-               if (regs->msr & MSR_LE)
-                       return 0;
                err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
                if (!err) {
                        if (op.type & SIGNEXT)
@@ -1862,8 +1856,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 
 #ifdef CONFIG_PPC_FPU
        case LOAD_FP:
-               if (regs->msr & MSR_LE)
-                       return 0;
                if (size == 4)
                        err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
                else
@@ -1872,15 +1864,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 #endif
 #ifdef CONFIG_ALTIVEC
        case LOAD_VMX:
-               if (regs->msr & MSR_LE)
-                       return 0;
                err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
                goto ldst_done;
 #endif
 #ifdef CONFIG_VSX
        case LOAD_VSX:
-               if (regs->msr & MSR_LE)
-                       return 0;
                err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
                goto ldst_done;
 #endif
@@ -1903,8 +1891,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                goto instr_done;
 
        case STORE:
-               if (regs->msr & MSR_LE)
-                       return 0;
                if ((op.type & UPDATE) && size == sizeof(long) &&
                    op.reg == 1 && op.update_reg == 1 &&
                    !(regs->msr & MSR_PR) &&
@@ -1917,8 +1903,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 
 #ifdef CONFIG_PPC_FPU
        case STORE_FP:
-               if (regs->msr & MSR_LE)
-                       return 0;
                if (size == 4)
                        err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
                else
@@ -1927,15 +1911,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 #endif
 #ifdef CONFIG_ALTIVEC
        case STORE_VMX:
-               if (regs->msr & MSR_LE)
-                       return 0;
                err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
                goto ldst_done;
 #endif
 #ifdef CONFIG_VSX
        case STORE_VSX:
-               if (regs->msr & MSR_LE)
-                       return 0;
                err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
                goto ldst_done;
 #endif
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
new file mode 100644 (file)
index 0000000..2534c14
--- /dev/null
@@ -0,0 +1,434 @@
+/*
+ * Simple sanity test for emulate_step load/store instructions.
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * This program is free software;  you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "emulate_step_test: " fmt
+
+#include <linux/ptrace.h>
+#include <asm/sstep.h>
+#include <asm/ppc-opcode.h>
+
+#define IMM_L(i)               ((uintptr_t)(i) & 0xffff)
+
+/*
+ * Defined with TEST_ prefix so it does not conflict with other
+ * definitions.
+ */
+#define TEST_LD(r, base, i)    (PPC_INST_LD | ___PPC_RT(r) |           \
+                                       ___PPC_RA(base) | IMM_L(i))
+#define TEST_LWZ(r, base, i)   (PPC_INST_LWZ | ___PPC_RT(r) |          \
+                                       ___PPC_RA(base) | IMM_L(i))
+#define TEST_LWZX(t, a, b)     (PPC_INST_LWZX | ___PPC_RT(t) |         \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STD(r, base, i)   (PPC_INST_STD | ___PPC_RS(r) |          \
+                                       ___PPC_RA(base) | ((i) & 0xfffc))
+#define TEST_LDARX(t, a, b, eh)        (PPC_INST_LDARX | ___PPC_RT(t) |        \
+                                       ___PPC_RA(a) | ___PPC_RB(b) |   \
+                                       __PPC_EH(eh))
+#define TEST_STDCX(s, a, b)    (PPC_INST_STDCX | ___PPC_RS(s) |        \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LFSX(t, a, b)     (PPC_INST_LFSX | ___PPC_RT(t) |         \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STFSX(s, a, b)    (PPC_INST_STFSX | ___PPC_RS(s) |        \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LFDX(t, a, b)     (PPC_INST_LFDX | ___PPC_RT(t) |         \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STFDX(s, a, b)    (PPC_INST_STFDX | ___PPC_RS(s) |        \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LVX(t, a, b)      (PPC_INST_LVX | ___PPC_RT(t) |          \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STVX(s, a, b)     (PPC_INST_STVX | ___PPC_RS(s) |         \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LXVD2X(s, a, b)   (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b))
+#define TEST_STXVD2X(s, a, b)  (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b))
+
+
+static void __init init_pt_regs(struct pt_regs *regs)
+{
+       static unsigned long msr;
+       static bool msr_cached;
+
+       memset(regs, 0, sizeof(struct pt_regs));
+
+       if (likely(msr_cached)) {
+               regs->msr = msr;
+               return;
+       }
+
+       asm volatile("mfmsr %0" : "=r"(regs->msr));
+
+       regs->msr |= MSR_FP;
+       regs->msr |= MSR_VEC;
+       regs->msr |= MSR_VSX;
+
+       msr = regs->msr;
+       msr_cached = true;
+}
+
+static void __init show_result(char *ins, char *result)
+{
+       pr_info("%-14s : %s\n", ins, result);
+}
+
+static void __init test_ld(void)
+{
+       struct pt_regs regs;
+       unsigned long a = 0x23;
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+       regs.gpr[3] = (unsigned long) &a;
+
+       /* ld r5, 0(r3) */
+       stepped = emulate_step(&regs, TEST_LD(5, 3, 0));
+
+       if (stepped == 1 && regs.gpr[5] == a)
+               show_result("ld", "PASS");
+       else
+               show_result("ld", "FAIL");
+}
+
+static void __init test_lwz(void)
+{
+       struct pt_regs regs;
+       unsigned int a = 0x4545;
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+       regs.gpr[3] = (unsigned long) &a;
+
+       /* lwz r5, 0(r3) */
+       stepped = emulate_step(&regs, TEST_LWZ(5, 3, 0));
+
+       if (stepped == 1 && regs.gpr[5] == a)
+               show_result("lwz", "PASS");
+       else
+               show_result("lwz", "FAIL");
+}
+
+static void __init test_lwzx(void)
+{
+       struct pt_regs regs;
+       unsigned int a[3] = {0x0, 0x0, 0x1234};
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+       regs.gpr[3] = (unsigned long) a;
+       regs.gpr[4] = 8;
+       regs.gpr[5] = 0x8765;
+
+       /* lwzx r5, r3, r4 */
+       stepped = emulate_step(&regs, TEST_LWZX(5, 3, 4));
+       if (stepped == 1 && regs.gpr[5] == a[2])
+               show_result("lwzx", "PASS");
+       else
+               show_result("lwzx", "FAIL");
+}
+
+static void __init test_std(void)
+{
+       struct pt_regs regs;
+       unsigned long a = 0x1234;
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+       regs.gpr[3] = (unsigned long) &a;
+       regs.gpr[5] = 0x5678;
+
+       /* std r5, 0(r3) */
+       stepped = emulate_step(&regs, TEST_STD(5, 3, 0));
+       if (stepped == 1 || regs.gpr[5] == a)
+               show_result("std", "PASS");
+       else
+               show_result("std", "FAIL");
+}
+
+static void __init test_ldarx_stdcx(void)
+{
+       struct pt_regs regs;
+       unsigned long a = 0x1234;
+       int stepped = -1;
+       unsigned long cr0_eq = 0x1 << 29; /* eq bit of CR0 */
+
+       init_pt_regs(&regs);
+       asm volatile("mfcr %0" : "=r"(regs.ccr));
+
+
+       /*** ldarx ***/
+
+       regs.gpr[3] = (unsigned long) &a;
+       regs.gpr[4] = 0;
+       regs.gpr[5] = 0x5678;
+
+       /* ldarx r5, r3, r4, 0 */
+       stepped = emulate_step(&regs, TEST_LDARX(5, 3, 4, 0));
+
+       /*
+        * Don't touch 'a' here. Touching 'a' can do Load/store
+        * of 'a' which result in failure of subsequent stdcx.
+        * Instead, use hardcoded value for comparison.
+        */
+       if (stepped <= 0 || regs.gpr[5] != 0x1234) {
+               show_result("ldarx / stdcx.", "FAIL (ldarx)");
+               return;
+       }
+
+
+       /*** stdcx. ***/
+
+       regs.gpr[5] = 0x9ABC;
+
+       /* stdcx. r5, r3, r4 */
+       stepped = emulate_step(&regs, TEST_STDCX(5, 3, 4));
+
+       /*
+        * Two possible scenarios that indicates successful emulation
+        * of stdcx. :
+        *  1. Reservation is active and store is performed. In this
+        *     case cr0.eq bit will be set to 1.
+        *  2. Reservation is not active and store is not performed.
+        *     In this case cr0.eq bit will be set to 0.
+        */
+       if (stepped == 1 && ((regs.gpr[5] == a && (regs.ccr & cr0_eq))
+                       || (regs.gpr[5] != a && !(regs.ccr & cr0_eq))))
+               show_result("ldarx / stdcx.", "PASS");
+       else
+               show_result("ldarx / stdcx.", "FAIL (stdcx.)");
+}
+
+#ifdef CONFIG_PPC_FPU
+static void __init test_lfsx_stfsx(void)
+{
+       struct pt_regs regs;
+       union {
+               float a;
+               int b;
+       } c;
+       int cached_b;
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+
+
+       /*** lfsx ***/
+
+       c.a = 123.45;
+       cached_b = c.b;
+
+       regs.gpr[3] = (unsigned long) &c.a;
+       regs.gpr[4] = 0;
+
+       /* lfsx frt10, r3, r4 */
+       stepped = emulate_step(&regs, TEST_LFSX(10, 3, 4));
+
+       if (stepped == 1)
+               show_result("lfsx", "PASS");
+       else
+               show_result("lfsx", "FAIL");
+
+
+       /*** stfsx ***/
+
+       c.a = 678.91;
+
+       /* stfsx frs10, r3, r4 */
+       stepped = emulate_step(&regs, TEST_STFSX(10, 3, 4));
+
+       if (stepped == 1 && c.b == cached_b)
+               show_result("stfsx", "PASS");
+       else
+               show_result("stfsx", "FAIL");
+}
+
+static void __init test_lfdx_stfdx(void)
+{
+       struct pt_regs regs;
+       union {
+               double a;
+               long b;
+       } c;
+       long cached_b;
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+
+
+       /*** lfdx ***/
+
+       c.a = 123456.78;
+       cached_b = c.b;
+
+       regs.gpr[3] = (unsigned long) &c.a;
+       regs.gpr[4] = 0;
+
+       /* lfdx frt10, r3, r4 */
+       stepped = emulate_step(&regs, TEST_LFDX(10, 3, 4));
+
+       if (stepped == 1)
+               show_result("lfdx", "PASS");
+       else
+               show_result("lfdx", "FAIL");
+
+
+       /*** stfdx ***/
+
+       c.a = 987654.32;
+
+       /* stfdx frs10, r3, r4 */
+       stepped = emulate_step(&regs, TEST_STFDX(10, 3, 4));
+
+       if (stepped == 1 && c.b == cached_b)
+               show_result("stfdx", "PASS");
+       else
+               show_result("stfdx", "FAIL");
+}
+#else
+static void __init test_lfsx_stfsx(void)
+{
+       show_result("lfsx", "SKIP (CONFIG_PPC_FPU is not set)");
+       show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)");
+}
+
+static void __init test_lfdx_stfdx(void)
+{
+       show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)");
+       show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)");
+}
+#endif /* CONFIG_PPC_FPU */
+
+#ifdef CONFIG_ALTIVEC
+static void __init test_lvx_stvx(void)
+{
+       struct pt_regs regs;
+       union {
+               vector128 a;
+               u32 b[4];
+       } c;
+       u32 cached_b[4];
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+
+
+       /*** lvx ***/
+
+       cached_b[0] = c.b[0] = 923745;
+       cached_b[1] = c.b[1] = 2139478;
+       cached_b[2] = c.b[2] = 9012;
+       cached_b[3] = c.b[3] = 982134;
+
+       regs.gpr[3] = (unsigned long) &c.a;
+       regs.gpr[4] = 0;
+
+       /* lvx vrt10, r3, r4 */
+       stepped = emulate_step(&regs, TEST_LVX(10, 3, 4));
+
+       if (stepped == 1)
+               show_result("lvx", "PASS");
+       else
+               show_result("lvx", "FAIL");
+
+
+       /*** stvx ***/
+
+       c.b[0] = 4987513;
+       c.b[1] = 84313948;
+       c.b[2] = 71;
+       c.b[3] = 498532;
+
+       /* stvx vrs10, r3, r4 */
+       stepped = emulate_step(&regs, TEST_STVX(10, 3, 4));
+
+       if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
+           cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
+               show_result("stvx", "PASS");
+       else
+               show_result("stvx", "FAIL");
+}
+#else
+static void __init test_lvx_stvx(void)
+{
+       show_result("lvx", "SKIP (CONFIG_ALTIVEC is not set)");
+       show_result("stvx", "SKIP (CONFIG_ALTIVEC is not set)");
+}
+#endif /* CONFIG_ALTIVEC */
+
+#ifdef CONFIG_VSX
+static void __init test_lxvd2x_stxvd2x(void)
+{
+       struct pt_regs regs;
+       union {
+               vector128 a;
+               u32 b[4];
+       } c;
+       u32 cached_b[4];
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+
+
+       /*** lxvd2x ***/
+
+       cached_b[0] = c.b[0] = 18233;
+       cached_b[1] = c.b[1] = 34863571;
+       cached_b[2] = c.b[2] = 834;
+       cached_b[3] = c.b[3] = 6138911;
+
+       regs.gpr[3] = (unsigned long) &c.a;
+       regs.gpr[4] = 0;
+
+       /* lxvd2x vsr39, r3, r4 */
+       stepped = emulate_step(&regs, TEST_LXVD2X(39, 3, 4));
+
+       if (stepped == 1)
+               show_result("lxvd2x", "PASS");
+       else
+               show_result("lxvd2x", "FAIL");
+
+
+       /*** stxvd2x ***/
+
+       c.b[0] = 21379463;
+       c.b[1] = 87;
+       c.b[2] = 374234;
+       c.b[3] = 4;
+
+       /* stxvd2x vsr39, r3, r4 */
+       stepped = emulate_step(&regs, TEST_STXVD2X(39, 3, 4));
+
+       if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
+           cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
+               show_result("stxvd2x", "PASS");
+       else
+               show_result("stxvd2x", "FAIL");
+}
+#else
+static void __init test_lxvd2x_stxvd2x(void)
+{
+       show_result("lxvd2x", "SKIP (CONFIG_VSX is not set)");
+       show_result("stxvd2x", "SKIP (CONFIG_VSX is not set)");
+}
+#endif /* CONFIG_VSX */
+
+static int __init test_emulate_step(void)
+{
+       test_ld();
+       test_lwz();
+       test_lwzx();
+       test_std();
+       test_ldarx_stdcx();
+       test_lfsx_stfsx();
+       test_lfdx_stfdx();
+       test_lvx_stvx();
+       test_lxvd2x_stxvd2x();
+
+       return 0;
+}
+late_initcall(test_emulate_step);
index 6aa3b76..9be9920 100644 (file)
@@ -356,18 +356,42 @@ static void early_check_vec5(void)
        unsigned long root, chosen;
        int size;
        const u8 *vec5;
+       u8 mmu_supported;
 
        root = of_get_flat_dt_root();
        chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
-       if (chosen == -FDT_ERR_NOTFOUND)
+       if (chosen == -FDT_ERR_NOTFOUND) {
+               cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
                return;
+       }
        vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
-       if (!vec5)
+       if (!vec5) {
+               cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
                return;
-       if (size <= OV5_INDX(OV5_MMU_RADIX_300) ||
-           !(vec5[OV5_INDX(OV5_MMU_RADIX_300)] & OV5_FEAT(OV5_MMU_RADIX_300)))
-               /* Hypervisor doesn't support radix */
+       }
+       if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
                cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+               return;
+       }
+
+       /* Check for supported configuration */
+       mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
+                       OV5_FEAT(OV5_MMU_SUPPORT);
+       if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
+               /* Hypervisor only supports radix - check enabled && GTSE */
+               if (!early_radix_enabled()) {
+                       pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
+               }
+               if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
+                                               OV5_FEAT(OV5_RADIX_GTSE))) {
+                       pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
+               }
+               /* Do radix anyway - the hypervisor said we had to */
+               cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
+       } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
+               /* Hypervisor only supports hash - disable radix */
+               cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+       }
 }
 
 void __init mmu_early_init_devtree(void)
@@ -383,7 +407,7 @@ void __init mmu_early_init_devtree(void)
         * even though the ibm,architecture-vec-5 property created by
         * skiboot doesn't have the necessary bits set.
         */
-       if (early_radix_enabled() && !(mfmsr() & MSR_HV))
+       if (!(mfmsr() & MSR_HV))
                early_check_vec5();
 
        if (early_radix_enabled())
index 2a590a9..c28165d 100644 (file)
@@ -186,6 +186,10 @@ static void __init radix_init_pgtable(void)
         */
        register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
        pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
+       asm volatile("ptesync" : : : "memory");
+       asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
+                    "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
+       asm volatile("eieio; tlbsync; ptesync" : : : "memory");
 }
 
 static void __init radix_init_partition_table(void)
index 6693f75..da8a0f7 100644 (file)
@@ -39,8 +39,8 @@ opal_tracepoint_refcount:
 BEGIN_FTR_SECTION;                                             \
        b       1f;                                             \
 END_FTR_SECTION(0, 1);                                         \
-       ld      r12,opal_tracepoint_refcount@toc(r2);           \
-       cmpdi   r12,0;                                          \
+       ld      r11,opal_tracepoint_refcount@toc(r2);           \
+       cmpdi   r11,0;                                          \
        bne-    LABEL;                                          \
 1:
 
index f9670ea..b53f80f 100644 (file)
@@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void)
 
 static void icp_opal_set_cpu_priority(unsigned char cppr)
 {
+       /*
+        * Here be dragons. The caller has asked to allow only IPI's and not
+        * external interrupts. But OPAL XIVE doesn't support that. So instead
+        * of allowing no interrupts allow all. That's still not right, but
+        * currently the only caller who does this is xics_migrate_irqs_away()
+        * and it works in that case.
+        */
+       if (cppr >= DEFAULT_PRIORITY)
+               cppr = LOWEST_PRIORITY;
+
        xics_set_base_cppr(cppr);
        opal_int_set_cppr(cppr);
        iosync();
index 69d858e..23efe4e 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/delay.h>
 
 #include <asm/prom.h>
 #include <asm/io.h>
@@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void)
        /* Remove ourselves from the global interrupt queue */
        xics_set_cpu_giq(xics_default_distrib_server, 0);
 
-       /* Allow IPIs again... */
-       icp_ops->set_priority(DEFAULT_PRIORITY);
-
        for_each_irq_desc(virq, desc) {
                struct irq_chip *chip;
                long server;
@@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void)
 unlock:
                raw_spin_unlock_irqrestore(&desc->lock, flags);
        }
+
+       /* Allow "sufficient" time to drop any inflight IRQ's */
+       mdelay(5);
+
+       /*
+        * Allow IPIs again. This is done at the very end, after migrating all
+        * interrupts, the expectation is that we'll only get woken up by an IPI
+        * interrupt beyond this point, but leave externals masked just to be
+        * safe. If we're using icp-opal this may actually allow all
+        * interrupts anyway, but that should be OK.
+        */
+       icp_ops->set_priority(DEFAULT_PRIORITY);
+
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
index 248a820..66d31de 100644 (file)
@@ -114,9 +114,11 @@ int test_harness(int (test_function)(void), char *name)
 
        rc = run_test(test_function, name);
 
-       if (rc == MAGIC_SKIP_RETURN_VALUE)
+       if (rc == MAGIC_SKIP_RETURN_VALUE) {
                test_skip(name);
-       else
+               /* so that skipped test is not marked as failed */
+               rc = 0;
+       } else
                test_finish(name, rc);
 
        return rc;