u32 flags;
};
-/* Actual code that puts the SoC in different idle states */
-static int davinci_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
-
- if (ops && ops->enter)
- ops->enter();
-
- index = cpuidle_wrap_enter(dev, drv, index,
- arm_cpuidle_simple_enter);
-
- if (ops && ops->exit)
- ops->exit();
-
- return index;
-}
-
-static struct cpuidle_driver davinci_idle_driver = {
- .name = "cpuidle-davinci",
- .owner = THIS_MODULE,
- .en_core_tk_irqen = 1,
- .states[0] = ARM_CPUIDLE_WFI_STATE,
- .states[1] = {
- .enter = davinci_enter_idle,
- .exit_latency = 10,
- .target_residency = 100000,
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .name = "DDR SR",
- .desc = "WFI and DDR Self Refresh",
- },
- .state_count = DAVINCI_CPUIDLE_MAX_STATES,
-};
-
static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
static void __iomem *ddr2_reg_base;
static bool ddr2_pdown;
},
};
+/* Actual code that puts the SoC in different idle states */
+static int davinci_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+ struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
+
+ if (ops && ops->enter)
+ ops->enter();
+
+ index = cpuidle_wrap_enter(dev, drv, index,
+ arm_cpuidle_simple_enter);
+
+ if (ops && ops->exit)
+ ops->exit();
+
+ return index;
+}
+
+static struct cpuidle_driver davinci_idle_driver = {
+ .name = "cpuidle-davinci",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = davinci_enter_idle,
+ .exit_latency = 10,
+ .target_residency = 100000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "DDR SR",
+ .desc = "WFI and DDR Self Refresh",
+ },
+ .state_count = DAVINCI_CPUIDLE_MAX_STATES,
+};
+
static int __init davinci_cpuidle_probe(struct platform_device *pdev)
{
int ret;