* the mechanism only works when all CPUs have RT task running,
* as if one CPU hasn't RT task, RT task from other CPUs will
* borrow CPU time from this CPU and cause RT task use > 95%
- * CPU time. To make 'avoid staration' work, takes a nap here.
+ * CPU time. To make 'avoid starvation' work, takes a nap here.
*/
if (do_sleep)
schedule_timeout_killable(HZ * idle_pct / 100);
static unsigned int ps_tsk_num;
static int create_power_saving_task(void)
{
+ int rc = -ENOMEM;
+
ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
(void *)(unsigned long)ps_tsk_num,
"power_saving/%d", ps_tsk_num);
- if (ps_tsks[ps_tsk_num]) {
+ rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
+ if (!rc)
ps_tsk_num++;
- return 0;
- }
- return -EINVAL;
+ else
+ ps_tsks[ps_tsk_num] = NULL;
+
+ return rc;
}
static void destroy_power_saving_task(void)
if (ps_tsk_num > 0) {
ps_tsk_num--;
kthread_stop(ps_tsks[ps_tsk_num]);
+ ps_tsks[ps_tsk_num] = NULL;
}
}
}
}
-static int acpi_pad_idle_cpus(unsigned int num_cpus)
+static void acpi_pad_idle_cpus(unsigned int num_cpus)
{
get_online_cpus();
set_power_saving_task_num(num_cpus);
put_online_cpus();
- return 0;
}
static uint32_t acpi_pad_idle_cpus_num(void)
static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
union acpi_object *package;
int rev, num, ret = -EINVAL;
- status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
+ return -EINVAL;
+
+ if (!buffer.length || !buffer.pointer)
return -EINVAL;
+
package = buffer.pointer;
if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
goto out;
rev = package->package.elements[0].integer.value;
num = package->package.elements[1].integer.value;
- if (rev != 1)
+ if (rev != 1 || num < 0)
goto out;
*num_cpus = num;
ret = 0;
static void acpi_pad_handle_notify(acpi_handle handle)
{
- int num_cpus, ret;
+ int num_cpus;
uint32_t idle_cpus;
mutex_lock(&isolated_cpus_lock);
mutex_unlock(&isolated_cpus_lock);
return;
}
- ret = acpi_pad_idle_cpus(num_cpus);
+ acpi_pad_idle_cpus(num_cpus);
idle_cpus = acpi_pad_idle_cpus_num();
- if (!ret)
- acpi_pad_ost(handle, 0, idle_cpus);
- else
- acpi_pad_ost(handle, 1, 0);
+ acpi_pad_ost(handle, 0, idle_cpus);
mutex_unlock(&isolated_cpus_lock);
}