Merge ../linus
authorDave Jones <davej@redhat.com>
Tue, 5 Sep 2006 21:16:33 +0000 (17:16 -0400)
committerDave Jones <davej@redhat.com>
Tue, 5 Sep 2006 21:16:33 +0000 (17:16 -0400)
arch/i386/kernel/cpu/cpufreq/longhaul.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_ondemand.c

index 4f2c3ae..43bbf94 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/cpufreq.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 
@@ -60,10 +61,11 @@ static int can_scale_voltage;
 static int vrmrev;
 static struct acpi_processor *pr = NULL;
 static struct acpi_processor_cx *cx = NULL;
+static int port22_en = 0;
 
 /* Module parameters */
 static int dont_scale_voltage;
-
+static int ignore_latency = 0;
 
 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg)
 
@@ -124,10 +126,9 @@ static int longhaul_get_cpu_mult(void)
 
 /* For processor with BCR2 MSR */
 
-static void do_longhaul1(int cx_address, unsigned int clock_ratio_index)
+static void do_longhaul1(unsigned int clock_ratio_index)
 {
        union msr_bcr2 bcr2;
-       u32 t;
 
        rdmsrl(MSR_VIA_BCR2, bcr2.val);
        /* Enable software clock multiplier */
@@ -136,13 +137,11 @@ static void do_longhaul1(int cx_address, unsigned int clock_ratio_index)
 
        /* Sync to timer tick */
        safe_halt();
-       ACPI_FLUSH_CPU_CACHE();
        /* Change frequency on next halt or sleep */
        wrmsrl(MSR_VIA_BCR2, bcr2.val);
-       /* Invoke C3 */
-       inb(cx_address);
-       /* Dummy op - must do something useless after P_LVL3 read */
-       t = inl(acpi_fadt.xpm_tmr_blk.address);
+       /* Invoke transition */
+       ACPI_FLUSH_CPU_CACHE();
+       halt();
 
        /* Disable software clock multiplier */
        local_irq_disable();
@@ -166,9 +165,9 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
 
        /* Sync to timer tick */
        safe_halt();
-       ACPI_FLUSH_CPU_CACHE();
        /* Change frequency on next halt or sleep */
        wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+       ACPI_FLUSH_CPU_CACHE();
        /* Invoke C3 */
        inb(cx_address);
        /* Dummy op - must do something useless after P_LVL3 read */
@@ -227,10 +226,13 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
        outb(0xFF,0xA1);        /* Overkill */
        outb(0xFE,0x21);        /* TMR0 only */
 
-       /* Disable bus master arbitration */
-       if (pr->flags.bm_check) {
+       if (pr->flags.bm_control) {
+               /* Disable bus master arbitration */
                acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
                                  ACPI_MTX_DO_NOT_LOCK);
+       } else if (port22_en) {
+               /* Disable AGP and PCI arbiters */
+               outb(3, 0x22);
        }
 
        switch (longhaul_version) {
@@ -244,7 +246,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
         */
        case TYPE_LONGHAUL_V1:
        case TYPE_LONGHAUL_V2:
-               do_longhaul1(cx->address, clock_ratio_index);
+               do_longhaul1(clock_ratio_index);
                break;
 
        /*
@@ -259,14 +261,20 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
         * to work in practice.
         */
        case TYPE_POWERSAVER:
+               /* Don't allow wakeup */
+               acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0,
+                                 ACPI_MTX_DO_NOT_LOCK);
                do_powersaver(cx->address, clock_ratio_index);
                break;
        }
 
-       /* Enable bus master arbitration */
-       if (pr->flags.bm_check) {
+       if (pr->flags.bm_control) {
+               /* Enable bus master arbitration */
                acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
                                  ACPI_MTX_DO_NOT_LOCK);
+       } else if (port22_en) {
+               /* Enable arbiters */
+               outb(0, 0x22);
        }
 
        outb(pic2_mask,0xA1);   /* restore mask */
@@ -540,21 +548,33 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
        return 1;
 }
 
+/* VIA don't support PM2 reg, but have something similar */
+static int enable_arbiter_disable(void)
+{
+       struct pci_dev *dev;
+       u8 pci_cmd;
+
+       /* Find PLE133 host bridge */
+       dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, NULL);
+       if (dev != NULL) {
+               /* Enable access to port 0x22 */
+               pci_read_config_byte(dev, 0x78, &pci_cmd);
+               if ( !(pci_cmd & 1<<7) ) {
+                       pci_cmd |= 1<<7;
+                       pci_write_config_byte(dev, 0x78, pci_cmd);
+               }
+               return 1;
+       }
+       return 0;
+}
+
 static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
 {
        struct cpuinfo_x86 *c = cpu_data;
        char *cpuname=NULL;
        int ret;
 
-       /* Check ACPI support for C3 state */
-       acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
-                        &longhaul_walk_callback, NULL, (void *)&pr);
-       if (pr == NULL) goto err_acpi;
-
-       cx = &pr->power.states[ACPI_STATE_C3];
-       if (cx->address == 0 || cx->latency > 1000) goto err_acpi;
-
-       /* Now check what we have on this motherboard */
+       /* Check what we have on this motherboard */
        switch (c->x86_model) {
        case 6:
                cpu_model = CPU_SAMUEL;
@@ -636,6 +656,30 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
                break;
        };
 
+       /* Find ACPI data for processor */
+       acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+                           &longhaul_walk_callback, NULL, (void *)&pr);
+       if (pr == NULL)
+               goto err_acpi;
+
+       if (longhaul_version == TYPE_POWERSAVER) {
+               /* Check ACPI support for C3 state */
+               cx = &pr->power.states[ACPI_STATE_C3];
+               if (cx->address == 0 ||
+                  (cx->latency > 1000 && ignore_latency == 0) )
+                       goto err_acpi;
+
+       } else {
+               /* Check ACPI support for bus master arbiter disable */
+               if (!pr->flags.bm_control) {
+                       if (!enable_arbiter_disable()) {
+                               printk(KERN_ERR PFX "No ACPI support. No VT8601 host bridge. Aborting.\n");
+                               return -ENODEV;
+                       } else
+                               port22_en = 1;
+               }
+       }
+
        ret = longhaul_get_ranges();
        if (ret != 0)
                return ret;
@@ -731,6 +775,8 @@ static void __exit longhaul_exit(void)
 
 module_param (dont_scale_voltage, int, 0644);
 MODULE_PARM_DESC(dont_scale_voltage, "Don't scale voltage of processor");
+module_param(ignore_latency, int, 0644);
+MODULE_PARM_DESC(ignore_latency, "Skip ACPI C3 latency test");
 
 MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
 MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
@@ -738,4 +784,3 @@ MODULE_LICENSE ("GPL");
 
 late_initcall(longhaul_init);
 module_exit(longhaul_exit);
-
index b3df613..d35a9f0 100644 (file)
@@ -32,7 +32,7 @@
 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
 
 /**
- * The "cpufreq driver" - the arch- or hardware-dependend low
+ * The "cpufreq driver" - the arch- or hardware-dependent low
  * level driver of CPUFreq support, and its spinlock. This lock
  * also protects the cpufreq_cpu_data array.
  */
index 52cf1f0..bf8aa45 100644 (file)
@@ -55,6 +55,10 @@ struct cpu_dbs_info_s {
        struct cpufreq_policy *cur_policy;
        struct work_struct work;
        unsigned int enable;
+       struct cpufreq_frequency_table *freq_table;
+       unsigned int freq_lo;
+       unsigned int freq_lo_jiffies;
+       unsigned int freq_hi_jiffies;
 };
 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
 
@@ -72,15 +76,15 @@ static DEFINE_MUTEX(dbs_mutex);
 
 static struct workqueue_struct *kondemand_wq;
 
-struct dbs_tuners {
+static struct dbs_tuners {
        unsigned int sampling_rate;
        unsigned int up_threshold;
        unsigned int ignore_nice;
-};
-
-static struct dbs_tuners dbs_tuners_ins = {
+       unsigned int powersave_bias;
+} dbs_tuners_ins = {
        .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
        .ignore_nice = 0,
+       .powersave_bias = 0,
 };
 
 static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
@@ -96,6 +100,70 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
        return retval;
 }
 
+/*
+ * Find right freq to be set now with powersave_bias on.
+ * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
+ * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
+ */
+static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
+                                         unsigned int freq_next,
+                                         unsigned int relation)
+{
+       unsigned int freq_req, freq_reduc, freq_avg;
+       unsigned int freq_hi, freq_lo;
+       unsigned int index = 0;
+       unsigned int jiffies_total, jiffies_hi, jiffies_lo;
+       struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
+
+       if (!dbs_info->freq_table) {
+               dbs_info->freq_lo = 0;
+               dbs_info->freq_lo_jiffies = 0;
+               return freq_next;
+       }
+
+       cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
+                       relation, &index);
+       freq_req = dbs_info->freq_table[index].frequency;
+       freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
+       freq_avg = freq_req - freq_reduc;
+
+       /* Find freq bounds for freq_avg in freq_table */
+       index = 0;
+       cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
+                       CPUFREQ_RELATION_H, &index);
+       freq_lo = dbs_info->freq_table[index].frequency;
+       index = 0;
+       cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
+                       CPUFREQ_RELATION_L, &index);
+       freq_hi = dbs_info->freq_table[index].frequency;
+
+       /* Find out how long we have to be in hi and lo freqs */
+       if (freq_hi == freq_lo) {
+               dbs_info->freq_lo = 0;
+               dbs_info->freq_lo_jiffies = 0;
+               return freq_lo;
+       }
+       jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+       jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
+       jiffies_hi += ((freq_hi - freq_lo) / 2);
+       jiffies_hi /= (freq_hi - freq_lo);
+       jiffies_lo = jiffies_total - jiffies_hi;
+       dbs_info->freq_lo = freq_lo;
+       dbs_info->freq_lo_jiffies = jiffies_lo;
+       dbs_info->freq_hi_jiffies = jiffies_hi;
+       return freq_hi;
+}
+
+static void ondemand_powersave_bias_init(void)
+{
+       int i;
+       for_each_online_cpu(i) {
+               struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
+               dbs_info->freq_table = cpufreq_frequency_get_table(i);
+               dbs_info->freq_lo = 0;
+       }
+}
+
 /************************** sysfs interface ************************/
 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
 {
@@ -124,6 +192,7 @@ static ssize_t show_##file_name                                             \
 show_one(sampling_rate, sampling_rate);
 show_one(up_threshold, up_threshold);
 show_one(ignore_nice_load, ignore_nice);
+show_one(powersave_bias, powersave_bias);
 
 static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
                const char *buf, size_t count)
@@ -198,6 +267,27 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
        return count;
 }
 
+static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
+               const char *buf, size_t count)
+{
+       unsigned int input;
+       int ret;
+       ret = sscanf(buf, "%u", &input);
+
+       if (ret != 1)
+               return -EINVAL;
+
+       if (input > 1000)
+               input = 1000;
+
+       mutex_lock(&dbs_mutex);
+       dbs_tuners_ins.powersave_bias = input;
+       ondemand_powersave_bias_init();
+       mutex_unlock(&dbs_mutex);
+
+       return count;
+}
+
 #define define_one_rw(_name) \
 static struct freq_attr _name = \
 __ATTR(_name, 0644, show_##_name, store_##_name)
@@ -205,6 +295,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
 define_one_rw(sampling_rate);
 define_one_rw(up_threshold);
 define_one_rw(ignore_nice_load);
+define_one_rw(powersave_bias);
 
 static struct attribute * dbs_attributes[] = {
        &sampling_rate_max.attr,
@@ -212,6 +303,7 @@ static struct attribute * dbs_attributes[] = {
        &sampling_rate.attr,
        &up_threshold.attr,
        &ignore_nice_load.attr,
+       &powersave_bias.attr,
        NULL
 };
 
@@ -234,6 +326,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
        if (!this_dbs_info->enable)
                return;
 
+       this_dbs_info->freq_lo = 0;
        policy = this_dbs_info->cur_policy;
        cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
        total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
@@ -274,11 +367,18 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
        /* Check for frequency increase */
        if (load > dbs_tuners_ins.up_threshold) {
                /* if we are already at full speed then break out early */
-               if (policy->cur == policy->max)
-                       return;
-
-               __cpufreq_driver_target(policy, policy->max,
-                       CPUFREQ_RELATION_H);
+               if (!dbs_tuners_ins.powersave_bias) {
+                       if (policy->cur == policy->max)
+                               return;
+
+                       __cpufreq_driver_target(policy, policy->max,
+                               CPUFREQ_RELATION_H);
+               } else {
+                       int freq = powersave_bias_target(policy, policy->max,
+                                       CPUFREQ_RELATION_H);
+                       __cpufreq_driver_target(policy, freq,
+                               CPUFREQ_RELATION_L);
+               }
                return;
        }
 
@@ -293,37 +393,64 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
         * policy. To be safe, we focus 10 points under the threshold.
         */
        if (load < (dbs_tuners_ins.up_threshold - 10)) {
-               unsigned int freq_next;
-               freq_next = (policy->cur * load) /
+               unsigned int freq_next = (policy->cur * load) /
                        (dbs_tuners_ins.up_threshold - 10);
-
-               __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
+               if (!dbs_tuners_ins.powersave_bias) {
+                       __cpufreq_driver_target(policy, freq_next,
+                                       CPUFREQ_RELATION_L);
+               } else {
+                       int freq = powersave_bias_target(policy, freq_next,
+                                       CPUFREQ_RELATION_L);
+                       __cpufreq_driver_target(policy, freq,
+                               CPUFREQ_RELATION_L);
+               }
        }
 }
 
+/* Sampling types */
+enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
+
 static void do_dbs_timer(void *data)
 {
        unsigned int cpu = smp_processor_id();
        struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+       /* We want all CPUs to do sampling nearly on same jiffy */
+       int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+       delay -= jiffies % delay;
 
        if (!dbs_info->enable)
                return;
-
-       lock_cpu_hotplug();
-       dbs_check_cpu(dbs_info);
-       unlock_cpu_hotplug();
-       queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
-                       usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+       /* Common NORMAL_SAMPLE setup */
+       INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
+       if (!dbs_tuners_ins.powersave_bias ||
+           (unsigned long) data == DBS_NORMAL_SAMPLE) {
+               lock_cpu_hotplug();
+               dbs_check_cpu(dbs_info);
+               unlock_cpu_hotplug();
+               if (dbs_info->freq_lo) {
+                       /* Setup timer for SUB_SAMPLE */
+                       INIT_WORK(&dbs_info->work, do_dbs_timer,
+                                       (void *)DBS_SUB_SAMPLE);
+                       delay = dbs_info->freq_hi_jiffies;
+               }
+       } else {
+               __cpufreq_driver_target(dbs_info->cur_policy,
+                                       dbs_info->freq_lo,
+                                       CPUFREQ_RELATION_H);
+       }
+       queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
 }
 
 static inline void dbs_timer_init(unsigned int cpu)
 {
        struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+       /* We want all CPUs to do sampling nearly on same jiffy */
+       int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+       delay -= jiffies % delay;
 
-       INIT_WORK(&dbs_info->work, do_dbs_timer, 0);
-       queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
-                       usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
-       return;
+       ondemand_powersave_bias_init();
+       INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
+       queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
 }
 
 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)