config: Allow a Medfield selection
authorAlan Cox <alan@linux.intel.com>
Fri, 15 Jul 2011 13:39:35 +0000 (14:39 +0100)
committermgross <mark.gross@intel.com>
Wed, 9 Nov 2011 20:36:43 +0000 (12:36 -0800)
We have nothing much Medfield specific - yet but this is the boiler plate for it

Allowing Medfield only does need one fix up for the idle driver - we want the
same idle behaviour as Moorestown but we don't have the PMU hook

Change-Id: I9e27ffb0c78e59a1beaf292971bc55517099b9ff
Signed-off-by: Alan Cox <alan@linux.intel.com>
arch/x86/Kconfig
drivers/idle/intel_idle.c
drivers/idle/mrst_s0i3.c
drivers/pci/mid_pci.c

index ece4aac..5d96500 100644 (file)
@@ -413,6 +413,23 @@ config X86_MRST
          nor standard legacy replacement devices/features. e.g. Moorestown does
          not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
 
+config X86_MDFLD
+       bool "Medfield MID platform"
+       depends on PCI
+       depends on PCI_GOANY
+       depends on X86_IO_APIC
+       select APB_TIMER
+       select I2C
+       select SPI
+       select INTEL_SCU_IPC
+       select X86_PLATFORM_DEVICES
+       ---help---
+         Medfield is Intel's Low Power Intel Architecture (LPIA) based Moblin
+         Internet Device(MID) platform. 
+         Unlike standard x86 PCs, Medfield does not have many legacy devices
+         nor standard legacy replacement devices/features. e.g. Medfield does
+         not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
+
 endif
 
 config X86_RDC321X
index bcedf7a..3afe534 100644 (file)
@@ -83,6 +83,7 @@ static unsigned int lapic_timer_reliable_states = (1 << 1);    /* Default to only
 
 static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
+static int intel_mid_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
 
 static struct cpuidle_state *cpuidle_state_table;
 static int (*cpuidle_device_prepare)(struct cpuidle_device *dev);
@@ -249,7 +250,7 @@ static struct cpuidle_state mrst_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 140,
                .target_residency = 560,
-               .enter = &mrst_idle, },
+               .enter = &intel_mid_idle, },
        { /* MRST S0i3 */
                .name = "MRST-S0i3",
                .desc = "MRST S0i3",
@@ -258,7 +259,7 @@ static struct cpuidle_state mrst_cstates[MWAIT_MAX_NUM_CSTATES] = {
                        CPUIDLE_FLAG_INTEL_FAKE,
                .exit_latency = 300, /* XXX */
                .target_residency = 1200, /* XXX */
-               .enter = &mrst_idle },
+               .enter = &intel_mid_idle },
 };
 #warning pri#3 #24 tune mrst_cstates parameters
 #else
@@ -318,6 +319,67 @@ int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
        return usec_delta;
 }
 
+/**
+ * intel_mid_idle      -       Idle a MID device
+ * @dev: cpuidle_device
+ * @state: cpuidle state
+ *
+ * This enters S0i3, C6 or C4 depending on what is currently permitted.
+ * C1-C4 are handled via the normal intel_idle entry.
+ */
+int intel_mid_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
+{
+       unsigned long ecx = 1; /* break on interrupt flag */
+       unsigned long eax = (unsigned long)cpuidle_get_statedata(state);
+       ktime_t kt_before, kt_after;
+       s64 usec_delta;
+       int cpu = smp_processor_id();
+
+       local_irq_disable();
+
+       /*
+        * leave_mm() to avoid costly and often unnecessary wakeups
+        * for flushing the user TLB's associated with the active mm.
+        */
+#ifdef CPUIDLE_FLAG_TLB_FLUSHED         
+       if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
+               leave_mm(cpu);
+#endif /* FIXME */
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+
+       kt_before = ktime_get_real();
+
+       stop_critical_timings();
+
+       if (!need_resched()) {
+#ifdef CONFIG_X86_MRST
+               if (eax == -1UL) {
+                       do_s0i3();
+               } else
+#endif         
+               {
+                       /* Conventional MWAIT */
+
+                       __monitor((void *)&current_thread_info()->flags, 0, 0);
+                       smp_mb();
+                       if (!need_resched())
+                               __mwait(eax, ecx);
+               }
+       }
+
+       start_critical_timings();
+
+       kt_after = ktime_get_real();
+       usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before));
+
+       local_irq_enable();
+
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+
+       return usec_delta;
+}
+
+
 static void __setup_broadcast_timer(void *arg)
 {
        unsigned long reason = (unsigned long)arg;
@@ -408,10 +470,11 @@ static int intel_idle_probe(void)
                break;
 
        case 0x26:      /* 38 - Lincroft Atom Processor */
-               cpuidle_state_table = mrst_cstates;
-#ifdef CONFIG_X86_INTEL_MID
+#ifdef CONFIG_X86_MRST
                cpuidle_device_prepare = mrst_pmu_validate_cstates;
 #endif
+       case 0x27:      /* 39 - Penwell Atom Processor */
+               cpuidle_state_table = mrst_cstates;
                auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
                break;
 
index 7e56c32..9f6b291 100644 (file)
@@ -42,63 +42,6 @@ static u64 *wakeup_ptr;
 static phys_addr_t s0i3_trampoline_phys;
 static void *s0i3_trampoline_base;
 
-/**
- * mrst_idle
- * @dev: cpuidle_device
- * @state: cpuidle state
- *
- * This enters S0i3, C6 or C4 depending on what is currently permitted.
- * C1-C4 are handled via the normal intel_idle entry.
- */
-int mrst_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
-{
-       unsigned long ecx = 1; /* break on interrupt flag */
-       unsigned long eax = (unsigned long)cpuidle_get_statedata(state);
-       ktime_t kt_before, kt_after;
-       s64 usec_delta;
-       int cpu = smp_processor_id();
-
-       local_irq_disable();
-
-       /*
-        * leave_mm() to avoid costly and often unnecessary wakeups
-        * for flushing the user TLB's associated with the active mm.
-        */
-#ifdef CPUIDLE_FLAG_TLB_FLUSHED         
-       if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
-               leave_mm(cpu);
-#endif /* FIXME */
-       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
-
-       kt_before = ktime_get_real();
-
-       stop_critical_timings();
-
-       if (!need_resched()) {
-               if (eax == -1UL) {
-                       do_s0i3();
-               } else {
-                       /* Conventional MWAIT */
-
-                       __monitor((void *)&current_thread_info()->flags, 0, 0);
-                       smp_mb();
-                       if (!need_resched())
-                               __mwait(eax, ecx);
-               }
-       }
-
-       start_critical_timings();
-
-       kt_after = ktime_get_real();
-       usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before));
-
-       local_irq_enable();
-
-       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
-
-       return usec_delta;
-}
-
 /*
  * List of MSRs to be saved/restored, *other* than what is handled by
  * * save_processor_state/restore_processor_state.  * This is
index 35223ad..b2f9426 100644 (file)
@@ -44,7 +44,9 @@ static int mid_pci_sleep_wake(struct pci_dev *dev, bool enable)
 
 static struct pci_platform_pm_ops mid_pci_platform_pm = {
        .is_manageable = mid_pci_power_manageable,
+#ifdef CONFIG_X86_MRST
        .set_state = pmu_pci_set_power_state,
+#endif 
        .choose_state = mid_pci_choose_state,
        .can_wakeup = mid_pci_can_wakeup,
        .sleep_wake = mid_pci_sleep_wake,