diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-12 13:42:43 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-12 13:42:43 -0700 |
commit | 702ed6ef375c19d65f2eeeefd3851476f2c4cee4 (patch) | |
tree | fe46588dcc716f64a04310797d9446573614d3fc | |
parent | 2f41fc806434f8466bb361570589a3f6099ca65d (diff) | |
parent | 58a7295bc8073b9e668c329cb9ceb5b668c2b15d (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
* master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] Fix sysfs_create_file return value handling
[CPUFREQ] ondemand: fix tickless accounting and software coordination bug
[CPUFREQ] ondemand: add a check to avoid negative load calculation
[CPUFREQ] Keep userspace governor quiet when it is not being used
[CPUFREQ] Longhaul - Proper register access
[CPUFREQ] Kconfig powernow-k8 driver should depend on ACPI P-States driver
[CPUFREQ] Longhaul - Replace ACPI functions with direct I/O
[CPUFREQ] Longhaul - Remove duplicate multipliers
[CPUFREQ] Longhaul - Embedded "conservative"
[CPUFREQ] acpi-cpufreq: Proper ReadModifyWrite of PERF_CTL MSR
[CPUFREQ] check return value of sysfs_create_file
[CPUFREQ] Longhaul - Check ACPI "BM DMA in progress" bit
[CPUFREQ] Longhaul - Move old_ratio to correct place
[CPUFREQ] Longhaul - VT8237 support
[CPUFREQ] Longhaul - Use all kinds of support
[CPUFREQ] powernow-k8: clarify number of cores.
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/Kconfig | 13 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | 13 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/longhaul.c | 209 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/longhaul.h | 12 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/powernow-k8.c | 5 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 18 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 30 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 23 |
8 files changed, 216 insertions, 107 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig index e912aae9473c..e77754ca94b4 100644 --- a/arch/i386/kernel/cpu/cpufreq/Kconfig +++ b/arch/i386/kernel/cpu/cpufreq/Kconfig @@ -90,10 +90,17 @@ config X86_POWERNOW_K8 If in doubt, say N. config X86_POWERNOW_K8_ACPI - bool - depends on X86_POWERNOW_K8 && ACPI_PROCESSOR - depends on !(X86_POWERNOW_K8 = y && ACPI_PROCESSOR = m) + bool "ACPI Support" + select ACPI_PROCESSOR + depends on X86_POWERNOW_K8 default y + help + This provides access to the K8s Processor Performance States via ACPI. + This driver is probably required for CPUFreq to work with multi-socket and + SMP systems. It is not required on at least some single-socket yet + multi-core systems, even if SMP is enabled. + + It is safe to say Y here. config X86_GX_SUSPMOD tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index 10baa3501ed3..18c8b67ea3a7 100644 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -167,11 +167,13 @@ static void do_drv_read(struct drv_cmd *cmd) static void do_drv_write(struct drv_cmd *cmd) { - u32 h = 0; + u32 lo, hi; switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: - wrmsr(cmd->addr.msr.reg, cmd->val, h); + rdmsr(cmd->addr.msr.reg, lo, hi); + lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); + wrmsr(cmd->addr.msr.reg, lo, hi); break; case SYSTEM_IO_CAPABLE: acpi_os_write_port((acpi_io_address)cmd->addr.io.port, @@ -372,7 +374,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; cpumask_t online_policy_cpus; struct drv_cmd cmd; - unsigned int msr; unsigned int next_state = 0; /* Index into freq_table */ unsigned int next_perf_state = 0; /* Index into perf table */ unsigned int i; @@ -417,11 +418,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_CTL; - msr = - (u32) perf->states[next_perf_state]. - control & INTEL_MSR_RANGE; - cmd.val = get_cur_val(online_policy_cpus); - cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr; + cmd.val = (u32) perf->states[next_perf_state].control; break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c index a3df9c039bd4..8eca59d4c8f4 100644 --- a/arch/i386/kernel/cpu/cpufreq/longhaul.c +++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c @@ -29,6 +29,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/delay.h> #include <asm/msr.h> #include <asm/timex.h> @@ -55,7 +56,6 @@ /* Flags */ #define USE_ACPI_C3 (1 << 1) #define USE_NORTHBRIDGE (1 << 2) -#define USE_VT8235 (1 << 3) static int cpu_model; static unsigned int numscales=16; @@ -63,19 +63,15 @@ static unsigned int fsb; static const struct mV_pos *vrm_mV_table; static const unsigned char *mV_vrm_table; -struct f_msr { - u8 vrm; - u8 pos; -}; -static struct f_msr f_msr_table[32]; static unsigned int highest_speed, lowest_speed; /* kHz */ static unsigned int minmult, maxmult; static int can_scale_voltage; static struct acpi_processor *pr = NULL; static struct acpi_processor_cx *cx = NULL; +static u32 acpi_regs_addr; static u8 longhaul_flags; -static u8 longhaul_pos; +static unsigned int longhaul_index; /* Module parameters */ static int scale_voltage; @@ -144,7 +140,7 @@ static void do_longhaul1(unsigned int clock_ratio_index) rdmsrl(MSR_VIA_BCR2, bcr2.val); /* Enable software clock multiplier */ bcr2.bits.ESOFTBF = 1; - bcr2.bits.CLOCKMUL = clock_ratio_index; + bcr2.bits.CLOCKMUL = clock_ratio_index & 0xff; /* Sync to timer tick */ safe_halt(); @@ -163,14 +159,12 @@ static void do_longhaul1(unsigned int clock_ratio_index) /* For processor with Longhaul MSR */ -static void do_powersaver(int cx_address, unsigned int clock_ratio_index) +static void do_powersaver(int cx_address, unsigned int clock_ratio_index, + unsigned int dir) { union msr_longhaul longhaul; - u8 dest_pos; u32 t; - dest_pos = f_msr_table[clock_ratio_index].pos; - rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Setup new frequency */ longhaul.bits.RevisionKey = longhaul.bits.RevisionID; @@ -178,11 +172,11 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; /* Setup new voltage */ if (can_scale_voltage) - longhaul.bits.SoftVID = f_msr_table[clock_ratio_index].vrm; + longhaul.bits.SoftVID = (clock_ratio_index >> 8) & 0x1f; /* Sync to timer tick */ safe_halt(); /* Raise voltage if necessary */ - if (can_scale_voltage && longhaul_pos < dest_pos) { + if (can_scale_voltage && dir) { longhaul.bits.EnableSoftVID = 1; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ @@ -199,7 +193,6 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) } longhaul.bits.EnableSoftVID = 0; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); - longhaul_pos = dest_pos; } /* Change frequency on next halt or sleep */ @@ -220,7 +213,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Reduce voltage if necessary */ - if (can_scale_voltage && longhaul_pos > dest_pos) { + if (can_scale_voltage && !dir) { longhaul.bits.EnableSoftVID = 1; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ @@ -237,7 +230,6 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) } longhaul.bits.EnableSoftVID = 0; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); - longhaul_pos = dest_pos; } } @@ -248,25 +240,28 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) * Sets a new clock ratio. */ -static void longhaul_setstate(unsigned int clock_ratio_index) +static void longhaul_setstate(unsigned int table_index) { + unsigned int clock_ratio_index; int speed, mult; struct cpufreq_freqs freqs; - static unsigned int old_ratio=-1; unsigned long flags; unsigned int pic1_mask, pic2_mask; + u16 bm_status = 0; + u32 bm_timeout = 1000; + unsigned int dir = 0; - if (old_ratio == clock_ratio_index) - return; - old_ratio = clock_ratio_index; - - mult = clock_ratio[clock_ratio_index]; + clock_ratio_index = longhaul_table[table_index].index; + /* Safety precautions */ + mult = clock_ratio[clock_ratio_index & 0x1f]; if (mult == -1) return; - speed = calc_speed(mult); if ((speed > highest_speed) || (speed < lowest_speed)) return; + /* Voltage transition before frequency transition? */ + if (can_scale_voltage && longhaul_index < table_index) + dir = 1; freqs.old = calc_speed(longhaul_get_cpu_mult()); freqs.new = speed; @@ -285,11 +280,24 @@ static void longhaul_setstate(unsigned int clock_ratio_index) outb(0xFF,0xA1); /* Overkill */ outb(0xFE,0x21); /* TMR0 only */ + /* Wait while PCI bus is busy. */ + if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE + || ((pr != NULL) && pr->flags.bm_control))) { + bm_status = inw(acpi_regs_addr); + bm_status &= 1 << 4; + while (bm_status && bm_timeout) { + outw(1 << 4, acpi_regs_addr); + bm_timeout--; + bm_status = inw(acpi_regs_addr); + bm_status &= 1 << 4; + } + } + if (longhaul_flags & USE_NORTHBRIDGE) { /* Disable AGP and PCI arbiters */ outb(3, 0x22); } else if ((pr != NULL) && pr->flags.bm_control) { - /* Disable bus master arbitration */ + /* Disable bus master arbitration */ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); } switch (longhaul_version) { @@ -314,9 +322,9 @@ static void longhaul_setstate(unsigned int clock_ratio_index) if (longhaul_flags & USE_ACPI_C3) { /* Don't allow wakeup */ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); - do_powersaver(cx->address, clock_ratio_index); + do_powersaver(cx->address, clock_ratio_index, dir); } else { - do_powersaver(0, clock_ratio_index); + do_powersaver(0, clock_ratio_index, dir); } break; } @@ -336,6 +344,9 @@ static void longhaul_setstate(unsigned int clock_ratio_index) freqs.new = calc_speed(longhaul_get_cpu_mult()); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + if (!bm_timeout) + printk(KERN_INFO PFX "Warning: Timeout while waiting for idle PCI bus.\n"); } /* @@ -369,7 +380,8 @@ static int guess_fsb(int mult) static int __init longhaul_get_ranges(void) { - unsigned int j, k = 0; + unsigned int i, j, k = 0; + unsigned int ratio; int mult; /* Get current frequency */ @@ -423,8 +435,7 @@ static int __init longhaul_get_ranges(void) if(!longhaul_table) return -ENOMEM; - for (j=0; j < numscales; j++) { - unsigned int ratio; + for (j = 0; j < numscales; j++) { ratio = clock_ratio[j]; if (ratio == -1) continue; @@ -434,13 +445,41 @@ static int __init longhaul_get_ranges(void) longhaul_table[k].index = j; k++; } + if (k <= 1) { + kfree(longhaul_table); + return -ENODEV; + } + /* Sort */ + for (j = 0; j < k - 1; j++) { + unsigned int min_f, min_i; + min_f = longhaul_table[j].frequency; + min_i = j; + for (i = j + 1; i < k; i++) { + if (longhaul_table[i].frequency < min_f) { + min_f = longhaul_table[i].frequency; + min_i = i; + } + } + if (min_i != j) { + unsigned int temp; + temp = longhaul_table[j].frequency; + longhaul_table[j].frequency = longhaul_table[min_i].frequency; + longhaul_table[min_i].frequency = temp; + temp = longhaul_table[j].index; + longhaul_table[j].index = longhaul_table[min_i].index; + longhaul_table[min_i].index = temp; + } + } longhaul_table[k].frequency = CPUFREQ_TABLE_END; - if (!k) { - kfree (longhaul_table); - return -EINVAL; - } + /* Find index we are running on */ + for (j = 0; j < k; j++) { + if (clock_ratio[longhaul_table[j].index & 0x1f] == mult) { + longhaul_index = j; + break; + } + } return 0; } @@ -448,7 +487,7 @@ static int __init longhaul_get_ranges(void) static void __init longhaul_setup_voltagescaling(void) { union msr_longhaul longhaul; - struct mV_pos minvid, maxvid; + struct mV_pos minvid, maxvid, vid; unsigned int j, speed, pos, kHz_step, numvscales; int min_vid_speed; @@ -459,11 +498,11 @@ static void __init longhaul_setup_voltagescaling(void) } if (!longhaul.bits.VRMRev) { - printk (KERN_INFO PFX "VRM 8.5\n"); + printk(KERN_INFO PFX "VRM 8.5\n"); vrm_mV_table = &vrm85_mV[0]; mV_vrm_table = &mV_vrm85[0]; } else { - printk (KERN_INFO PFX "Mobile VRM\n"); + printk(KERN_INFO PFX "Mobile VRM\n"); if (cpu_model < CPU_NEHEMIAH) return; vrm_mV_table = &mobilevrm_mV[0]; @@ -523,7 +562,6 @@ static void __init longhaul_setup_voltagescaling(void) /* Calculate kHz for one voltage step */ kHz_step = (highest_speed - min_vid_speed) / numvscales; - j = 0; while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { speed = longhaul_table[j].frequency; @@ -531,15 +569,14 @@ static void __init longhaul_setup_voltagescaling(void) pos = (speed - min_vid_speed) / kHz_step + minvid.pos; else pos = minvid.pos; - f_msr_table[longhaul_table[j].index].vrm = mV_vrm_table[pos]; - f_msr_table[longhaul_table[j].index].pos = pos; + longhaul_table[j].index |= mV_vrm_table[pos] << 8; + vid = vrm_mV_table[mV_vrm_table[pos]]; + printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", speed, j, vid.mV); j++; } - longhaul_pos = maxvid.pos; can_scale_voltage = 1; - printk(KERN_INFO PFX "Voltage scaling enabled. " - "Use of \"conservative\" governor is highly recommended.\n"); + printk(KERN_INFO PFX "Voltage scaling enabled.\n"); } @@ -553,15 +590,44 @@ static int longhaul_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int table_index = 0; - unsigned int new_clock_ratio = 0; + unsigned int i; + unsigned int dir = 0; + u8 vid, current_vid; if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index)) return -EINVAL; - new_clock_ratio = longhaul_table[table_index].index & 0xFF; - - longhaul_setstate(new_clock_ratio); + /* Don't set same frequency again */ + if (longhaul_index == table_index) + return 0; + if (!can_scale_voltage) + longhaul_setstate(table_index); + else { + /* On test system voltage transitions exceeding single + * step up or down were turning motherboard off. Both + * "ondemand" and "userspace" are unsafe. C7 is doing + * this in hardware, C3 is old and we need to do this + * in software. */ + i = longhaul_index; + current_vid = (longhaul_table[longhaul_index].index >> 8) & 0x1f; + if (table_index > longhaul_index) + dir = 1; + while (i != table_index) { + vid = (longhaul_table[i].index >> 8) & 0x1f; + if (vid != current_vid) { + longhaul_setstate(i); + current_vid = vid; + msleep(200); + } + if (dir) + i++; + else + i--; + } + longhaul_setstate(table_index); + } + longhaul_index = table_index; return 0; } @@ -590,11 +656,10 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle, static int enable_arbiter_disable(void) { struct pci_dev *dev; - int status; + int status = 1; int reg; u8 pci_cmd; - status = 1; /* Find PLE133 host bridge */ reg = 0x78; dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, @@ -627,13 +692,17 @@ static int enable_arbiter_disable(void) return 0; } -static int longhaul_setup_vt8235(void) +static int longhaul_setup_southbridge(void) { struct pci_dev *dev; u8 pci_cmd; /* Find VT8235 southbridge */ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); + if (dev == NULL) + /* Find VT8237 southbridge */ + dev = pci_get_device(PCI_VENDOR_ID_VIA, + PCI_DEVICE_ID_VIA_8237, NULL); if (dev != NULL) { /* Set transition time to max */ pci_read_config_byte(dev, 0xec, &pci_cmd); @@ -645,6 +714,14 @@ static int longhaul_setup_vt8235(void) pci_read_config_byte(dev, 0xe5, &pci_cmd); pci_cmd |= 1 << 7; pci_write_config_byte(dev, 0xe5, pci_cmd); + /* Get address of ACPI registers block*/ + pci_read_config_byte(dev, 0x81, &pci_cmd); + if (pci_cmd & 1 << 7) { + pci_read_config_dword(dev, 0x88, &acpi_regs_addr); + acpi_regs_addr &= 0xff00; + printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", acpi_regs_addr); + } + pci_dev_put(dev); return 1; } @@ -657,7 +734,6 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) char *cpuname=NULL; int ret; u32 lo, hi; - int vt8235_present; /* Check what we have on this motherboard */ switch (c->x86_model) { @@ -755,7 +831,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) }; /* Doesn't hurt */ - vt8235_present = longhaul_setup_vt8235(); + longhaul_setup_southbridge(); /* Find ACPI data for processor */ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, @@ -765,35 +841,26 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) /* Check ACPI support for C3 state */ if (pr != NULL && longhaul_version == TYPE_POWERSAVER) { cx = &pr->power.states[ACPI_STATE_C3]; - if (cx->address > 0 && cx->latency <= 1000) { + if (cx->address > 0 && cx->latency <= 1000) longhaul_flags |= USE_ACPI_C3; - goto print_support_type; - } } /* Check if northbridge is friendly */ - if (enable_arbiter_disable()) { + if (enable_arbiter_disable()) longhaul_flags |= USE_NORTHBRIDGE; - goto print_support_type; - } - /* Use VT8235 southbridge if present */ - if (longhaul_version == TYPE_POWERSAVER && vt8235_present) { - longhaul_flags |= USE_VT8235; - goto print_support_type; - } + /* Check ACPI support for bus master arbiter disable */ - if ((pr == NULL) || !(pr->flags.bm_control)) { + if (!(longhaul_flags & USE_ACPI_C3 + || longhaul_flags & USE_NORTHBRIDGE) + && ((pr == NULL) || !(pr->flags.bm_control))) { printk(KERN_ERR PFX "No ACPI support. Unsupported northbridge.\n"); return -ENODEV; } -print_support_type: if (longhaul_flags & USE_NORTHBRIDGE) - printk (KERN_INFO PFX "Using northbridge support.\n"); - else if (longhaul_flags & USE_VT8235) - printk (KERN_INFO PFX "Using VT8235 support.\n"); - else - printk (KERN_INFO PFX "Using ACPI support.\n"); + printk(KERN_INFO PFX "Using northbridge support.\n"); + if (longhaul_flags & USE_ACPI_C3) + printk(KERN_INFO PFX "Using ACPI support.\n"); ret = longhaul_get_ranges(); if (ret != 0) diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.h b/arch/i386/kernel/cpu/cpufreq/longhaul.h index 102548f12842..4fcc320997df 100644 --- a/arch/i386/kernel/cpu/cpufreq/longhaul.h +++ b/arch/i386/kernel/cpu/cpufreq/longhaul.h @@ -180,7 +180,7 @@ static const int __initdata ezrat_clock_ratio[32] = { -1, /* 0000 -> RESERVED (10.0x) */ 110, /* 0001 -> 11.0x */ - 120, /* 0010 -> 12.0x */ + -1, /* 0010 -> 12.0x */ -1, /* 0011 -> RESERVED (9.0x)*/ 105, /* 0100 -> 10.5x */ 115, /* 0101 -> 11.5x */ @@ -237,7 +237,7 @@ static const int __initdata ezrat_eblcr[32] = { static const int __initdata nehemiah_clock_ratio[32] = { 100, /* 0000 -> 10.0x */ - 160, /* 0001 -> 16.0x */ + -1, /* 0001 -> 16.0x */ 40, /* 0010 -> 4.0x */ 90, /* 0011 -> 9.0x */ 95, /* 0100 -> 9.5x */ @@ -252,10 +252,10 @@ static const int __initdata nehemiah_clock_ratio[32] = { 75, /* 1101 -> 7.5x */ 85, /* 1110 -> 8.5x */ 120, /* 1111 -> 12.0x */ - 100, /* 0000 -> 10.0x */ + -1, /* 0000 -> 10.0x */ 110, /* 0001 -> 11.0x */ - 120, /* 0010 -> 12.0x */ - 90, /* 0011 -> 9.0x */ + -1, /* 0010 -> 12.0x */ + -1, /* 0011 -> 9.0x */ 105, /* 0100 -> 10.5x */ 115, /* 0101 -> 11.5x */ 125, /* 0110 -> 12.5x */ @@ -267,7 +267,7 @@ static const int __initdata nehemiah_clock_ratio[32] = { 145, /* 1100 -> 14.5x */ 155, /* 1101 -> 15.5x */ -1, /* 1110 -> RESERVED (13.0x) */ - 120, /* 1111 -> 12.0x */ + -1, /* 1111 -> 12.0x */ }; static const int __initdata nehemiah_eblcr[32] = { diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index 4ade55c5f333..977336834127 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c @@ -1330,8 +1330,9 @@ static int __cpuinit powernowk8_init(void) if (supported_cpus == num_online_cpus()) { printk(KERN_INFO PFX "Found %d %s " - "processors (" VERSION ")\n", supported_cpus, - boot_cpu_data.x86_model_id); + "processors (%d cpu cores) (" VERSION ")\n", + supported_cpus/cpu_data[0].booted_cores, + boot_cpu_data.x86_model_id, supported_cpus); return cpufreq_register_driver(&cpufreq_amd64_driver); } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index eb37fba9b7ef..0db9e1bda322 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -826,13 +826,21 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) /* set up files for this cpu device */ drv_attr = cpufreq_driver->attr; while ((drv_attr) && (*drv_attr)) { - sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); + ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); + if (ret) + goto err_out_driver_exit; drv_attr++; } - if (cpufreq_driver->get) - sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); - if (cpufreq_driver->target) - sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); + if (cpufreq_driver->get){ + ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); + if (ret) + goto err_out_driver_exit; + } + if (cpufreq_driver->target){ + ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); + if (ret) + goto err_out_driver_exit; + } spin_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu_mask(j, policy->cpus) { diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 8532bb79e5fc..e794527e4925 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -96,15 +96,25 @@ static struct dbs_tuners { static inline cputime64_t get_cpu_idle_time(unsigned int cpu) { - cputime64_t retval; + cputime64_t idle_time; + cputime64_t cur_jiffies; + cputime64_t busy_time; - retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, - kstat_cpu(cpu).cpustat.iowait); + cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); - if (dbs_tuners_ins.ignore_nice) - retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - return retval; + if (!dbs_tuners_ins.ignore_nice) { + busy_time = cputime64_add(busy_time, + kstat_cpu(cpu).cpustat.nice); + } + + idle_time = cputime64_sub(cur_jiffies, busy_time); + return idle_time; } /* @@ -325,7 +335,7 @@ static struct attribute_group dbs_attr_group = { static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) { unsigned int idle_ticks, total_ticks; - unsigned int load; + unsigned int load = 0; cputime64_t cur_jiffies; struct cpufreq_policy *policy; @@ -339,7 +349,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); total_ticks = (unsigned int) cputime64_sub(cur_jiffies, this_dbs_info->prev_cpu_wall); - this_dbs_info->prev_cpu_wall = cur_jiffies; + this_dbs_info->prev_cpu_wall = get_jiffies_64(); + if (!total_ticks) return; /* @@ -370,7 +381,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) if (tmp_idle_ticks < idle_ticks) idle_ticks = tmp_idle_ticks; } - load = (100 * (total_ticks - idle_ticks)) / total_ticks; + if (likely(total_ticks > idle_ticks)) + load = (100 * (total_ticks - idle_ticks)) / total_ticks; /* Check for frequency increase */ if (load > dbs_tuners_ins.up_threshold) { diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index a648970338b0..51bedab6c808 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -37,6 +37,7 @@ static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */ static unsigned int cpu_is_managed[NR_CPUS]; static DEFINE_MUTEX (userspace_mutex); +static int cpus_using_userspace_governor; #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) @@ -47,7 +48,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, { struct cpufreq_freqs *freq = data; - dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", freq->cpu, freq->new); + if (!cpu_is_managed[freq->cpu]) + return 0; + + dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", + freq->cpu, freq->new); cpu_cur_freq[freq->cpu] = freq->new; return 0; @@ -142,6 +147,13 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, if (rc) goto start_out; + if (cpus_using_userspace_governor == 0) { + cpufreq_register_notifier( + &userspace_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + cpus_using_userspace_governor++; + cpu_is_managed[cpu] = 1; cpu_min_freq[cpu] = policy->min; cpu_max_freq[cpu] = policy->max; @@ -153,6 +165,13 @@ start_out: break; case CPUFREQ_GOV_STOP: mutex_lock(&userspace_mutex); + cpus_using_userspace_governor--; + if (cpus_using_userspace_governor == 0) { + cpufreq_unregister_notifier( + &userspace_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + cpu_is_managed[cpu] = 0; cpu_min_freq[cpu] = 0; cpu_max_freq[cpu] = 0; @@ -198,7 +217,6 @@ EXPORT_SYMBOL(cpufreq_gov_userspace); static int __init cpufreq_gov_userspace_init(void) { - cpufreq_register_notifier(&userspace_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); return cpufreq_register_governor(&cpufreq_gov_userspace); } @@ -206,7 +224,6 @@ static int __init cpufreq_gov_userspace_init(void) static void __exit cpufreq_gov_userspace_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_userspace); - cpufreq_unregister_notifier(&userspace_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } |