diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-05-11 19:11:26 +0200 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-05-11 22:58:38 +0200 |
commit | 1aa7a6e2b8105f22a5f7d6def281f776459c95ba (patch) | |
tree | 5b386334f71df41f50d601973d9a6f69262d2479 | |
parent | 8edb0a6e48d147bb2aa466c58e03c52d2b0d6ee7 (diff) |
intel_pstate: Clean up get_target_pstate_use_performance()
The comments and the core_busy variable name in
get_target_pstate_use_performance() are totally confusing,
so modify them to reflect what's going on.
The results of the computations should be the same as before.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 27 |
1 files changed, 11 insertions, 16 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ff5c591578ee..b76a98dd9988 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1259,43 +1259,38 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) { - int32_t core_busy, max_pstate, current_pstate, sample_ratio; + int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; u64 duration_ns; /* - * core_busy is the ratio of actual performance to max - * max_pstate is the max non turbo pstate available - * current_pstate was the pstate that was requested during - * the last sample period. - * - * We normalize core_busy, which was our actual percent - * performance to what we requested during the last sample - * period. The result will be a percentage of busy at a - * specified pstate. + * perf_scaled is the average performance during the last sampling + * period scaled by the ratio of the maximum P-state to the P-state + * requested last time (in percent). That measures the system's + * response to the previous P-state selection. */ max_pstate = cpu->pstate.max_pstate_physical; current_pstate = cpu->pstate.current_pstate; - core_busy = mul_ext_fp(cpu->sample.core_avg_perf, + perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, div_fp(100 * max_pstate, current_pstate)); /* * Since our utilization update callback will not run unless we are * in C0, check if the actual elapsed time is significantly greater (3x) * than our sample interval. If it is, then we were idle for a long - * enough period of time to adjust our busyness. + * enough period of time to adjust our performance metric. */ duration_ns = cpu->sample.time - cpu->last_sample_time; if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); - core_busy = mul_fp(core_busy, sample_ratio); + perf_scaled = mul_fp(perf_scaled, sample_ratio); } else { sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); if (sample_ratio < int_tofp(1)) - core_busy = 0; + perf_scaled = 0; } - cpu->sample.busy_scaled = core_busy; - return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy); + cpu->sample.busy_scaled = perf_scaled; + return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); } static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) |