void machine_shutdown(void)
{
- #ifdef CONFIG_HOTPLUG_CPU
- int cpu;
+ smp_shutdown_nonboot_cpus(reboot_cpu);
- for_each_online_cpu(cpu) {
- if (cpu != smp_processor_id())
- cpu_down(cpu);
- }
- #endif
#ifdef CONFIG_KEXEC
kexec_disable_iosapic();
#endif
machine_halt();
}
+EXPORT_SYMBOL(ia64_delay_loop);
if (from_nid == NUMA_NO_NODE)
return -ENODEV;
- ret = cpu_up(cpuid);
+ ret = cpu_device_up(dev);
/*
* When hot adding memory to memoryless node and enabling a cpu
* on the node, node number of the cpu may internally change.
static int cpu_subsys_offline(struct device *dev)
{
- return cpu_down(dev->id);
+ return cpu_device_down(dev);
}
void unregister_cpu(struct cpu *cpu)
static ssize_t print_cpus_kernel_max(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
- return n;
+ return sprintf(buf, "%d\n", NR_CPUS - 1);
}
static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
buf[n++] = ',';
if (nr_cpu_ids == total_cpus-1)
- n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids);
+ n += scnprintf(&buf[n], len - n, "%u", nr_cpu_ids);
else
- n += snprintf(&buf[n], len - n, "%u-%d",
+ n += scnprintf(&buf[n], len - n, "%u-%d",
nr_cpu_ids, total_cpus-1);
}
- n += snprintf(&buf[n], len - n, "\n");
+ n += scnprintf(&buf[n], len - n, "\n");
return n;
}
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
static ssize_t print_cpus_isolated(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n = 0, len = PAGE_SIZE-2;
+ int n;
cpumask_var_t isolated;
if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
cpumask_andnot(isolated, cpu_possible_mask,
housekeeping_cpumask(HK_FLAG_DOMAIN));
- n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated));
+ n = sprintf(buf, "%*pbl\n", cpumask_pr_args(isolated));
free_cpumask_var(isolated);
static ssize_t print_cpus_nohz_full(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n = 0, len = PAGE_SIZE-2;
-
- n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
-
- return n;
+ return sprintf(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
}
static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
#endif
static void lockdep_acquire_cpus_lock(void)
{
- rwsem_acquire(&cpu_hotplug_lock.rw_sem.dep_map, 0, 0, _THIS_IP_);
+ rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
}
static void lockdep_release_cpus_lock(void)
{
- rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, _THIS_IP_);
+ rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
}
/*
return _cpu_down(cpu, 0, target);
}
- static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
+ static int cpu_down(unsigned int cpu, enum cpuhp_state target)
{
int err;
return err;
}
- int cpu_down(unsigned int cpu)
+ /**
+ * cpu_device_down - Bring down a cpu device
+ * @dev: Pointer to the cpu device to offline
+ *
+ * This function is meant to be used by device core cpu subsystem only.
+ *
+ * Other subsystems should use remove_cpu() instead.
+ */
+ int cpu_device_down(struct device *dev)
{
- return do_cpu_down(cpu, CPUHP_OFFLINE);
+ return cpu_down(dev->id, CPUHP_OFFLINE);
+ }
+
+ int remove_cpu(unsigned int cpu)
+ {
+ int ret;
+
+ lock_device_hotplug();
+ ret = device_offline(get_cpu_device(cpu));
+ unlock_device_hotplug();
+
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(remove_cpu);
+
+ void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
+ {
+ unsigned int cpu;
+ int error;
+
+ cpu_maps_update_begin();
+
+ /*
+ * Make certain the cpu I'm about to reboot on is online.
+ *
+ * This is inline to what migrate_to_reboot_cpu() already do.
+ */
+ if (!cpu_online(primary_cpu))
+ primary_cpu = cpumask_first(cpu_online_mask);
+
+ for_each_online_cpu(cpu) {
+ if (cpu == primary_cpu)
+ continue;
+
+ error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
+ if (error) {
+ pr_err("Failed to offline CPU%d - error=%d",
+ cpu, error);
+ break;
+ }
+ }
+
+ /*
+ * Ensure all but the reboot CPU are offline.
+ */
+ BUG_ON(num_online_cpus() > 1);
+
+ /*
+ * Make sure the CPUs won't be enabled by someone else after this
+ * point. Kexec will reboot to a new kernel shortly resetting
+ * everything along the way.
+ */
+ cpu_hotplug_disabled++;
+
+ cpu_maps_update_done();
}
- EXPORT_SYMBOL(cpu_down);
#else
#define takedown_cpu NULL
}
/*
- * The caller of do_cpu_up might have raced with another
- * caller. Ignore it for now.
+ * The caller of cpu_up() might have raced with another
+ * caller. Nothing to do.
*/
if (st->state >= target)
goto out;
return ret;
}
- static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
+ static int cpu_up(unsigned int cpu, enum cpuhp_state target)
{
int err = 0;
return err;
}
- int cpu_up(unsigned int cpu)
+ /**
+ * cpu_device_up - Bring up a cpu device
+ * @dev: Pointer to the cpu device to online
+ *
+ * This function is meant to be used by device core cpu subsystem only.
+ *
+ * Other subsystems should use add_cpu() instead.
+ */
+ int cpu_device_up(struct device *dev)
+ {
+ return cpu_up(dev->id, CPUHP_ONLINE);
+ }
+
+ int add_cpu(unsigned int cpu)
+ {
+ int ret;
+
+ lock_device_hotplug();
+ ret = device_online(get_cpu_device(cpu));
+ unlock_device_hotplug();
+
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(add_cpu);
+
+ /**
+ * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
+ * @sleep_cpu: The cpu we hibernated on and should be brought up.
+ *
+ * On some architectures like arm64, we can hibernate on any CPU, but on
+ * wake up the CPU we hibernated on might be offline as a side effect of
+ * using maxcpus= for example.
+ */
+ int bringup_hibernate_cpu(unsigned int sleep_cpu)
{
- return do_cpu_up(cpu, CPUHP_ONLINE);
+ int ret;
+
+ if (!cpu_online(sleep_cpu)) {
+ pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
+ ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
+ if (ret) {
+ pr_err("Failed to bring hibernate-CPU up!\n");
+ return ret;
+ }
+ }
+ return 0;
+ }
+
+ void bringup_nonboot_cpus(unsigned int setup_max_cpus)
+ {
+ unsigned int cpu;
+
+ for_each_present_cpu(cpu) {
+ if (num_online_cpus() >= setup_max_cpus)
+ break;
+ if (!cpu_online(cpu))
+ cpu_up(cpu, CPUHP_ONLINE);
+ }
}
- EXPORT_SYMBOL_GPL(cpu_up);
#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_var_t frozen_cpus;
- int freeze_secondary_cpus(int primary)
+ int __freeze_secondary_cpus(int primary, bool suspend)
{
int cpu, error = 0;
if (cpu == primary)
continue;
- if (pm_wakeup_pending()) {
+ if (suspend && pm_wakeup_pending()) {
pr_info("Wakeup pending. Abort CPU freeze\n");
error = -EBUSY;
break;
goto out;
if (st->state < target)
- ret = do_cpu_up(dev->id, target);
+ ret = cpu_up(dev->id, target);
else
- ret = do_cpu_down(dev->id, target);
+ ret = cpu_down(dev->id, target);
out:
unlock_device_hotplug();
return ret ? ret : count;
rq_lock(rq, &rf);
__hrtick_restart(rq);
- rq->hrtick_csd_pending = 0;
rq_unlock(rq, &rf);
}
hrtimer_set_expires(timer, time);
- if (rq == this_rq()) {
+ if (rq == this_rq())
__hrtick_restart(rq);
- } else if (!rq->hrtick_csd_pending) {
+ else
smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
- rq->hrtick_csd_pending = 1;
- }
}
#else
static void hrtick_rq_init(struct rq *rq)
{
#ifdef CONFIG_SMP
- rq->hrtick_csd_pending = 0;
-
rq->hrtick_csd.flags = 0;
rq->hrtick_csd.func = __hrtick_start;
rq->hrtick_csd.info = rq;
if (task_has_idle_policy(p)) {
load->weight = scale_load(WEIGHT_IDLEPRIO);
load->inv_weight = WMULT_IDLEPRIO;
- p->se.runnable_weight = load->weight;
return;
}
} else {
load->weight = scale_load(sched_prio_to_weight[prio]);
load->inv_weight = sched_prio_to_wmult[prio];
- p->se.runnable_weight = load->weight;
}
}
if (cpumask_equal(p->cpus_ptr, new_mask))
goto out;
- dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+ /*
+ * Picking a ~random cpu helps in cases where we are changing affinity
+ * for groups of tasks (ie. cpuset), so that load balancing is not
+ * immediately required to distribute the tasks within their new mask.
+ */
+ dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
if (dest_cpu >= nr_cpu_ids) {
ret = -EINVAL;
goto out;
return ns;
}
+DEFINE_PER_CPU(unsigned long, thermal_pressure);
+
+void arch_set_thermal_pressure(struct cpumask *cpus,
+ unsigned long th_pressure)
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpus)
+ WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
+}
+
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
struct rq *rq = cpu_rq(cpu);
struct task_struct *curr = rq->curr;
struct rq_flags rf;
+ unsigned long thermal_pressure;
+ arch_scale_freq_tick();
sched_clock_tick();
rq_lock(rq, &rf);
update_rq_clock(rq);
+ thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
+ update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
curr->sched_class->task_tick(rq, curr, 0);
calc_global_load_tick(rq);
psi_task_tick(rq);
if (cpu_is_offline(cpu))
goto out_unlock;
- curr = rq->curr;
update_rq_clock(rq);
if (!is_idle_task(curr)) {
*/
++*switch_count;
+ psi_sched_switch(prev, next, !task_on_rq_queued(prev));
+
trace_sched_switch(preempt, prev, next);
/* Also unlocks the rq: */
#ifdef CONFIG_64BIT
# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
-# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
+# define scale_load_down(w) \
+({ \
+ unsigned long __w = (w); \
+ if (__w) \
+ __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
+ __w; \
+})
#else
# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
# define scale_load(w) (w)
dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
}
-extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
extern void init_dl_bw(struct dl_bw *dl_b);
extern int sched_dl_global_validate(void);
extern void sched_dl_do_global(void);
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
- unsigned long runnable_weight;
unsigned int nr_running;
unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
unsigned int idle_h_nr_running; /* SCHED_IDLE */
int nr;
unsigned long load_avg;
unsigned long util_avg;
- unsigned long runnable_sum;
+ unsigned long runnable_avg;
} removed;
#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_FAIR_GROUP_SCHED
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se) (!se->my_q)
+
+static inline void se_update_runnable(struct sched_entity *se)
+{
+ if (!entity_is_task(se))
+ se->runnable_weight = se->my_q->h_nr_running;
+}
+
+static inline long se_runnable(struct sched_entity *se)
+{
+ if (entity_is_task(se))
+ return !!se->on_rq;
+ else
+ return se->runnable_weight;
+}
+
#else
#define entity_is_task(se) 1
+
+static inline void se_update_runnable(struct sched_entity *se) {}
+
+static inline long se_runnable(struct sched_entity *se)
+{
+ return !!se->on_rq;
+}
#endif
#ifdef CONFIG_SMP
return scale_load_down(se->load.weight);
}
-static inline long se_runnable(struct sched_entity *se)
-{
- return scale_load_down(se->runnable_weight);
-}
static inline bool sched_asym_prefer(int a, int b)
{
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
struct sched_avg avg_irq;
#endif
+#ifdef CONFIG_SCHED_THERMAL_PRESSURE
+ struct sched_avg avg_thermal;
+#endif
u64 idle_stamp;
u64 avg_idle;
#ifdef CONFIG_SCHED_HRTICK
#ifdef CONFIG_SMP
- int hrtick_csd_pending;
call_single_data_t hrtick_csd;
#endif
struct hrtimer hrtick_timer;
return rq->clock_task;
}
+/**
+ * By default the decay is the default pelt decay period.
+ * The decay shift can change the decay period in
+ * multiples of 32.
+ * Decay shift Decay period(ms)
+ * 0 32
+ * 1 64
+ * 2 128
+ * 3 256
+ * 4 512
+ */
+extern int sched_thermal_decay_shift;
+
+static inline u64 rq_clock_thermal(struct rq *rq)
+{
+ return rq_clock_task(rq) >> sched_thermal_decay_shift;
+}
+
static inline void rq_clock_skip_update(struct rq *rq)
{
lockdep_assert_held(&rq->lock);
for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
__sd; __sd = __sd->parent)
-#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
-
/**
* highest_flag_domain - Return highest sched_domain containing flag.
* @cpu: The CPU whose highest level of sched domain is to
extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
-extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
#define BW_SHIFT 20
#define BW_UNIT (1 << BW_SHIFT)
#endif /* CONFIG_SCHED_HRTICK */
+#ifndef arch_scale_freq_tick
+static __always_inline
+void arch_scale_freq_tick(void)
+{
+}
+#endif
+
#ifndef arch_scale_freq_capacity
static __always_inline
unsigned long arch_scale_freq_capacity(int cpu)
return true;
}
#endif
+
+void swake_up_all_locked(struct swait_queue_head *q);
+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
+static bool disable_onoff_at_boot;
+module_param(disable_onoff_at_boot, bool, 0444);
+
static char *torture_type;
static int verbose;
{
unsigned long delta;
int ret;
+ char *s;
unsigned long starttime;
if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
torture_type, cpu);
starttime = jiffies;
(*n_offl_attempts)++;
- ret = cpu_down(cpu);
+ ret = remove_cpu(cpu);
if (ret) {
+ s = "";
+ if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) {
+ // PCI probe frequently disables hotplug during boot.
+ (*n_offl_attempts)--;
+ s = " (-EBUSY forgiven during boot)";
+ }
if (verbose)
pr_alert("%s" TORTURE_FLAG
- "torture_onoff task: offline %d failed: errno %d\n",
- torture_type, cpu, ret);
+ "torture_onoff task: offline %d failed%s: errno %d\n",
+ torture_type, cpu, s, ret);
} else {
if (verbose > 1)
pr_alert("%s" TORTURE_FLAG
{
unsigned long delta;
int ret;
+ char *s;
unsigned long starttime;
if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
torture_type, cpu);
starttime = jiffies;
(*n_onl_attempts)++;
- ret = cpu_up(cpu);
+ ret = add_cpu(cpu);
if (ret) {
+ s = "";
+ if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) {
+ // PCI probe frequently disables hotplug during boot.
+ (*n_onl_attempts)--;
+ s = " (-EBUSY forgiven during boot)";
+ }
if (verbose)
pr_alert("%s" TORTURE_FLAG
- "torture_onoff task: online %d failed: errno %d\n",
- torture_type, cpu, ret);
+ "torture_onoff task: online %d failed%s: errno %d\n",
+ torture_type, cpu, s, ret);
} else {
if (verbose > 1)
pr_alert("%s" TORTURE_FLAG
for_each_online_cpu(cpu)
maxcpu = cpu;
WARN_ON(maxcpu < 0);
- if (!IS_MODULE(CONFIG_TORTURE_TEST))
+ if (!IS_MODULE(CONFIG_TORTURE_TEST)) {
for_each_possible_cpu(cpu) {
if (cpu_online(cpu))
continue;
- ret = cpu_up(cpu);
+ ret = add_cpu(cpu);
if (ret && verbose) {
pr_alert("%s" TORTURE_FLAG
"%s: Initial online %d: errno %d\n",
__func__, torture_type, cpu, ret);
}
}
+ }
if (maxcpu == 0) {
VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled");
VERBOSE_TOROUT_STRING("torture_onoff end holdoff");
}
while (!torture_must_stop()) {
+ if (disable_onoff_at_boot && !rcu_inkernel_boot_has_ended()) {
+ schedule_timeout_interruptible(HZ / 10);
+ continue;
+ }
cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
if (!torture_offline(cpu,
&n_offline_attempts, &n_offline_successes,