diff options
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r-- | drivers/acpi/processor_idle.c | 203 |
1 files changed, 143 insertions, 60 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 70d8a6ec0920..be2dae52f6fa 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -95,22 +95,57 @@ static int set_max_cstate(struct dmi_system_id *id) } static struct dmi_system_id __initdata processor_power_dmi_table[] = { - {set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR, - "IBM"), - DMI_MATCH(DMI_BIOS_VERSION, - "1SET60WW")}, - (void *)1}, - {set_max_cstate, "Medion 41700", { - DMI_MATCH(DMI_BIOS_VENDOR, - "Phoenix Technologies LTD"), - DMI_MATCH(DMI_BIOS_VERSION, - "R01-A1J")}, (void *)1}, - {set_max_cstate, "Clevo 5600D", { - DMI_MATCH(DMI_BIOS_VENDOR, - "Phoenix Technologies LTD"), - DMI_MATCH(DMI_BIOS_VERSION, - "SHE845M0.86C.0013.D.0302131307")}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1}, + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1}, + { set_max_cstate, "Medion 41700", { + DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), + DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1}, + { set_max_cstate, "Clevo 5600D", { + DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), + DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, (void *)2}, {}, }; @@ -169,15 +204,11 @@ acpi_processor_power_activate(struct acpi_processor *pr, static void acpi_safe_halt(void) { - int polling = test_thread_flag(TIF_POLLING_NRFLAG); - if (polling) { - clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb__after_clear_bit(); - } + clear_thread_flag(TIF_POLLING_NRFLAG); + smp_mb__after_clear_bit(); if (!need_resched()) safe_halt(); - if (polling) - set_thread_flag(TIF_POLLING_NRFLAG); + set_thread_flag(TIF_POLLING_NRFLAG); } static atomic_t c3_cpu_count; @@ -278,6 +309,17 @@ static void acpi_processor_idle(void) } } +#ifdef CONFIG_HOTPLUG_CPU + /* + * Check for P_LVL2_UP flag before entering C2 and above on + * an SMP system. We do it here instead of doing it at _CST/P_LVL + * detection phase, to work cleanly with logical CPU hotplug. + */ + if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && + !pr->flags.has_cst && !acpi_fadt.plvl2_up) + cx = &pr->power.states[ACPI_STATE_C1]; +#endif + cx->usage++; /* @@ -285,6 +327,16 @@ static void acpi_processor_idle(void) * ------ * Invoke the current Cx state to put the processor to sleep. */ + if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { + clear_thread_flag(TIF_POLLING_NRFLAG); + smp_mb__after_clear_bit(); + if (need_resched()) { + set_thread_flag(TIF_POLLING_NRFLAG); + local_irq_enable(); + return; + } + } + switch (cx->type) { case ACPI_STATE_C1: @@ -317,6 +369,7 @@ static void acpi_processor_idle(void) t2 = inl(acpi_fadt.xpm_tmr_blk.address); /* Re-enable interrupts */ local_irq_enable(); + set_thread_flag(TIF_POLLING_NRFLAG); /* Compute time (ticks) that we were actually asleep */ sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; @@ -356,6 +409,7 @@ static void acpi_processor_idle(void) /* Re-enable interrupts */ local_irq_enable(); + set_thread_flag(TIF_POLLING_NRFLAG); /* Compute time (ticks) that we were actually asleep */ sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; @@ -368,6 +422,15 @@ static void acpi_processor_idle(void) next_state = pr->power.state; +#ifdef CONFIG_HOTPLUG_CPU + /* Don't do promotion/demotion */ + if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && + !pr->flags.has_cst && !acpi_fadt.plvl2_up) { + next_state = cx; + goto end; + } +#endif + /* * Promotion? * ---------- @@ -522,17 +585,18 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) if (!pr->pblk) return_VALUE(-ENODEV); - memset(pr->power.states, 0, sizeof(pr->power.states)); - /* if info is obtained from pblk/fadt, type equals state */ - pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; - /* the C0 state only exists as a filler in our array, - * and all processors need to support C1 */ - pr->power.states[ACPI_STATE_C0].valid = 1; - pr->power.states[ACPI_STATE_C1].valid = 1; +#ifndef CONFIG_HOTPLUG_CPU + /* + * Check for P_LVL2_UP flag before entering C2 and above on + * an SMP system. + */ + if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up) + return_VALUE(-ENODEV); +#endif /* determine C2 and C3 address from pblk */ pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; @@ -554,12 +618,11 @@ static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr) { ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1"); + /* Zero initialize all the C-states info. */ memset(pr->power.states, 0, sizeof(pr->power.states)); - /* if info is obtained from pblk/fadt, type equals state */ + /* set the first C-State to C1 */ pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; - pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; - pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; /* the C0 state only exists as a filler in our array, * and all processors need to support C1 */ @@ -573,6 +636,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) { acpi_status status = 0; acpi_integer count; + int current_count; int i; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *cst; @@ -582,10 +646,12 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) if (nocst) return_VALUE(-ENODEV); - pr->power.count = 0; - for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) - memset(&(pr->power.states[i]), 0, - sizeof(struct acpi_processor_cx)); + current_count = 1; + + /* Zero initialize C2 onwards and prepare for fresh CST lookup */ + for (i = 2; i < ACPI_PROCESSOR_MAX_POWER; i++) + memset(&(pr->power.states[i]), 0, + sizeof(struct acpi_processor_cx)); status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); if (ACPI_FAILURE(status)) { @@ -613,16 +679,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) goto end; } - /* We support up to ACPI_PROCESSOR_MAX_POWER. */ - if (count > ACPI_PROCESSOR_MAX_POWER) { - printk(KERN_WARNING - "Limiting number of power states to max (%d)\n", - ACPI_PROCESSOR_MAX_POWER); - printk(KERN_WARNING - "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); - count = ACPI_PROCESSOR_MAX_POWER; - } - /* Tell driver that at least _CST is supported. */ pr->flags.has_cst = 1; @@ -666,7 +722,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) continue; - if ((cx.type < ACPI_STATE_C1) || (cx.type > ACPI_STATE_C3)) + if ((cx.type < ACPI_STATE_C2) || (cx.type > ACPI_STATE_C3)) continue; obj = (union acpi_object *)&(element->package.elements[2]); @@ -681,16 +737,29 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) cx.power = obj->integer.value; - (pr->power.count)++; - memcpy(&(pr->power.states[pr->power.count]), &cx, sizeof(cx)); + current_count++; + memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); + + /* + * We support total ACPI_PROCESSOR_MAX_POWER - 1 + * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) + */ + if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { + printk(KERN_WARNING + "Limiting number of power states to max (%d)\n", + ACPI_PROCESSOR_MAX_POWER); + printk(KERN_WARNING + "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); + break; + } } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", - pr->power.count)); + current_count)); /* Validate number of power states discovered */ - if (pr->power.count < 2) - status = -ENODEV; + if (current_count < 2) + status = -EFAULT; end: acpi_os_free(buffer.pointer); @@ -806,6 +875,15 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) unsigned int i; unsigned int working = 0; +#ifdef ARCH_APICTIMER_STOPS_ON_C3 + struct cpuinfo_x86 *c = cpu_data + pr->id; + cpumask_t mask = cpumask_of_cpu(pr->id); + + if (c->x86_vendor == X86_VENDOR_INTEL) { + on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); + } +#endif + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { struct acpi_processor_cx *cx = &pr->power.states[i]; @@ -820,6 +898,12 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) case ACPI_STATE_C3: acpi_processor_power_verify_c3(pr, cx); +#ifdef ARCH_APICTIMER_STOPS_ON_C3 + if (c->x86_vendor == X86_VENDOR_INTEL) { + on_each_cpu(switch_APIC_timer_to_ipi, + &mask, 1, 1); + } +#endif break; } @@ -840,12 +924,13 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) /* NOTE: the idle thread may not be running while calling * this function */ + /* Adding C1 state */ + acpi_processor_get_power_info_default_c1(pr); result = acpi_processor_get_power_info_cst(pr); - if ((result) || (acpi_processor_power_verify(pr) < 2)) { - result = acpi_processor_get_power_info_fadt(pr); - if ((result) || (acpi_processor_power_verify(pr) < 2)) - result = acpi_processor_get_power_info_default_c1(pr); - } + if (result == -ENODEV) + acpi_processor_get_power_info_fadt(pr); + + pr->power.count = acpi_processor_power_verify(pr); /* * Set Default Policy @@ -1014,8 +1099,6 @@ int acpi_processor_power_init(struct acpi_processor *pr, } } - acpi_processor_power_init_pdc(&(pr->power), pr->id); - acpi_processor_set_pdc(pr, pr->power.pdc); acpi_processor_get_power_info(pr); /* |