net: veth: use newly added page pool API for veth with xdp
[platform/kernel/linux-rpi.git] / kernel / cpu.c
index 6de7c6b..72e0f53 100644 (file)
@@ -659,11 +659,19 @@ static inline bool cpu_smt_thread_allowed(unsigned int cpu)
 #endif
 }
 
-static inline bool cpu_smt_allowed(unsigned int cpu)
+static inline bool cpu_bootable(unsigned int cpu)
 {
        if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
                return true;
 
+       /* All CPUs are bootable if controls are not configured */
+       if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
+               return true;
+
+       /* All CPUs are bootable if CPU is not SMT capable */
+       if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+               return true;
+
        if (topology_is_primary_thread(cpu))
                return true;
 
@@ -685,7 +693,7 @@ bool cpu_smt_possible(void)
 EXPORT_SYMBOL_GPL(cpu_smt_possible);
 
 #else
-static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
+static inline bool cpu_bootable(unsigned int cpu) { return true; }
 #endif
 
 static inline enum cpuhp_state
@@ -788,10 +796,10 @@ static int bringup_wait_for_ap_online(unsigned int cpu)
         * SMT soft disabling on X86 requires to bring the CPU out of the
         * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
         * CPU marked itself as booted_once in notify_cpu_starting() so the
-        * cpu_smt_allowed() check will now return false if this is not the
+        * cpu_bootable() check will now return false if this is not the
         * primary sibling.
         */
-       if (!cpu_smt_allowed(cpu))
+       if (!cpu_bootable(cpu))
                return -ECANCELED;
        return 0;
 }
@@ -1515,11 +1523,14 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
        /*
         * Ensure that the control task does not run on the to be offlined
         * CPU to prevent a deadlock against cfs_b->period_timer.
+        * Also keep at least one housekeeping cpu onlined to avoid generating
+        * an empty sched_domain span.
         */
-       cpu = cpumask_any_but(cpu_online_mask, cpu);
-       if (cpu >= nr_cpu_ids)
-               return -EBUSY;
-       return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
+       for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
+               if (cpu != work.cpu)
+                       return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
+       }
+       return -EBUSY;
 }
 
 static int cpu_down(unsigned int cpu, enum cpuhp_state target)
@@ -1741,7 +1752,7 @@ static int cpu_up(unsigned int cpu, enum cpuhp_state target)
                err = -EBUSY;
                goto out;
        }
-       if (!cpu_smt_allowed(cpu)) {
+       if (!cpu_bootable(cpu)) {
                err = -EPERM;
                goto out;
        }
@@ -2098,7 +2109,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
        [CPUHP_HRTIMERS_PREPARE] = {
                .name                   = "hrtimers:prepare",
                .startup.single         = hrtimers_prepare_cpu,
-               .teardown.single        = hrtimers_dead_cpu,
+               .teardown.single        = NULL,
        },
        [CPUHP_SMPCFD_PREPARE] = {
                .name                   = "smpcfd:prepare",
@@ -2190,6 +2201,12 @@ static struct cpuhp_step cpuhp_hp_states[] = {
                .startup.single         = NULL,
                .teardown.single        = smpcfd_dying_cpu,
        },
+       [CPUHP_AP_HRTIMERS_DYING] = {
+               .name                   = "hrtimers:dying",
+               .startup.single         = NULL,
+               .teardown.single        = hrtimers_cpu_dying,
+       },
+
        /* Entry state on starting. Interrupts enabled from here on. Transient
         * state for synchronsization */
        [CPUHP_AP_ONLINE] = {