Merge branch 'fixes' into next
authorMichael Ellerman <mpe@ellerman.id.au>
Sun, 21 Jan 2018 12:21:14 +0000 (23:21 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 21 Jan 2018 12:21:14 +0000 (23:21 +1100)
Merge our fixes branch from the 4.15 cycle.

Unusually the fixes branch saw some significant features merged,
notably the RFI flush patches, so we want the code in next to be
tested against that, to avoid any surprises when the two are merged.

There's also some other work on the panic handling that was reverted
in fixes and we now want to do properly in next, which would conflict.

And we also fix a few other minor merge conflicts.

19 files changed:
1  2 
arch/powerpc/Kconfig
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/paca.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/cpu_setup_power.S
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/lib/feature-fixups.c
arch/powerpc/mm/fault.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/imc-pmu.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/xmon/xmon.c

Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 24da91768133fe798b2539dd519c263accf79e78,8fd3a70047f1dae91f5e3d11479a178aa64b2ed3..af128ee6724862cde74eed560efd4d4b8dbdff3e
@@@ -354,10 -346,10 +346,8 @@@ static int show_cpuinfo(struct seq_fil
                   loops_per_jiffy / (500000/HZ),
                   (loops_per_jiffy / (5000/HZ)) % 100);
  #endif
 -
 -#ifdef CONFIG_SMP
        seq_printf(m, "\n");
 -#endif
 +
-       preempt_enable();
        /* If this is the last cpu, print the summary */
        if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
                show_cpuinfo_summary(m);
index 896dacef2f2dcfef871c805504d5b19cb38877a5,e67413f4a8f0c1fc852c1231f09176c152ed9059..d1fa0e91f526ec3e1a82094160f4ea94d2e0c361
@@@ -808,3 -802,141 +809,141 @@@ static int __init disable_hardlockup_de
        return 0;
  }
  early_initcall(disable_hardlockup_detector);
 -      limit = min(safe_stack_limit(), ppc64_rma_size);
+ #ifdef CONFIG_PPC_BOOK3S_64
+ static enum l1d_flush_type enabled_flush_types;
+ static void *l1d_flush_fallback_area;
+ static bool no_rfi_flush;
+ bool rfi_flush;
+ static int __init handle_no_rfi_flush(char *p)
+ {
+       pr_info("rfi-flush: disabled on command line.");
+       no_rfi_flush = true;
+       return 0;
+ }
+ early_param("no_rfi_flush", handle_no_rfi_flush);
+ /*
+  * The RFI flush is not KPTI, but because users will see doco that says to use
+  * nopti we hijack that option here to also disable the RFI flush.
+  */
+ static int __init handle_no_pti(char *p)
+ {
+       pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
+       handle_no_rfi_flush(NULL);
+       return 0;
+ }
+ early_param("nopti", handle_no_pti);
+ static void do_nothing(void *unused)
+ {
+       /*
+        * We don't need to do the flush explicitly, just enter+exit kernel is
+        * sufficient, the RFI exit handlers will do the right thing.
+        */
+ }
+ void rfi_flush_enable(bool enable)
+ {
+       if (rfi_flush == enable)
+               return;
+       if (enable) {
+               do_rfi_flush_fixups(enabled_flush_types);
+               on_each_cpu(do_nothing, NULL, 1);
+       } else
+               do_rfi_flush_fixups(L1D_FLUSH_NONE);
+       rfi_flush = enable;
+ }
+ static void init_fallback_flush(void)
+ {
+       u64 l1d_size, limit;
+       int cpu;
+       l1d_size = ppc64_caches.l1d.size;
++      limit = min(ppc64_bolted_size(), ppc64_rma_size);
+       /*
+        * Align to L1d size, and size it at 2x L1d size, to catch possible
+        * hardware prefetch runoff. We don't have a recipe for load patterns to
+        * reliably avoid the prefetcher.
+        */
+       l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
+       memset(l1d_flush_fallback_area, 0, l1d_size * 2);
+       for_each_possible_cpu(cpu) {
+               /*
+                * The fallback flush is currently coded for 8-way
+                * associativity. Different associativity is possible, but it
+                * will be treated as 8-way and may not evict the lines as
+                * effectively.
+                *
+                * 128 byte lines are mandatory.
+                */
+               u64 c = l1d_size / 8;
+               paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
+               paca[cpu].l1d_flush_congruence = c;
+               paca[cpu].l1d_flush_sets = c / 128;
+       }
+ }
+ void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
+ {
+       if (types & L1D_FLUSH_FALLBACK) {
+               pr_info("rfi-flush: Using fallback displacement flush\n");
+               init_fallback_flush();
+       }
+       if (types & L1D_FLUSH_ORI)
+               pr_info("rfi-flush: Using ori type flush\n");
+       if (types & L1D_FLUSH_MTTRIG)
+               pr_info("rfi-flush: Using mttrig type flush\n");
+       enabled_flush_types = types;
+       if (!no_rfi_flush)
+               rfi_flush_enable(enable);
+ }
+ #ifdef CONFIG_DEBUG_FS
+ static int rfi_flush_set(void *data, u64 val)
+ {
+       if (val == 1)
+               rfi_flush_enable(true);
+       else if (val == 0)
+               rfi_flush_enable(false);
+       else
+               return -EINVAL;
+       return 0;
+ }
+ static int rfi_flush_get(void *data, u64 *val)
+ {
+       *val = rfi_flush ? 1 : 0;
+       return 0;
+ }
+ DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
+ static __init int rfi_flush_debugfs_init(void)
+ {
+       debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
+       return 0;
+ }
+ device_initcall(rfi_flush_debugfs_init);
+ #endif
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+       if (rfi_flush)
+               return sprintf(buf, "Mitigation: RFI Flush\n");
+       return sprintf(buf, "Vulnerable\n");
+ }
+ #endif /* CONFIG_PPC_BOOK3S_64 */
Simple merge
Simple merge
index d3a6e0395eaa33674e703b75a381edc8eda6b47c,6e1e3903538065becbab2ad47febad597f5d6d49..866446cf2d9abd5ae1b0a5ebc1076e16feca3f4c
@@@ -144,15 -142,14 +144,20 @@@ static int __bad_area(struct pt_regs *r
  
  static noinline int bad_area(struct pt_regs *regs, unsigned long address)
  {
 -      return __bad_area(regs, address, SEGV_MAPERR);
 +      return __bad_area(regs, address, SEGV_MAPERR, 0);
 +}
 +
 +static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address,
 +                                  int pkey)
 +{
 +      return __bad_area_nosemaphore(regs, address, SEGV_PKUERR, pkey);
  }
  
 -      return __bad_area(regs, address, SEGV_ACCERR);
+ static noinline int bad_access(struct pt_regs *regs, unsigned long address)
+ {
++      return __bad_area(regs, address, SEGV_ACCERR, 0);
+ }
  static int do_sigbus(struct pt_regs *regs, unsigned long address,
                     unsigned int fault)
  {
Simple merge
index 671aa20d8dd8854072a0076966bfd41ba784fb80,be4e7f84f70a59db60e92a9bfe845678f71cc608..d7532e7b9ab5ccd4c37607b46bb43f7a9e75e5e5
@@@ -1172,16 -1171,6 +1185,15 @@@ static void cleanup_all_thread_imc_memo
        }
  }
  
-       kfree(per_nest_pmu_arr);
 +/* Function to free the attr_groups which are dynamically allocated */
 +static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
 +{
 +      if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
 +              kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
 +      kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
 +      kfree(pmu_ptr);
 +}
 +
  /*
   * Common function to unregister cpu hotplug callback and
   * free the memory.
Simple merge