diff options
author | David S. Miller <davem@davemloft.net> | 2016-10-30 12:42:58 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-10-30 12:42:58 -0400 |
commit | 27058af401e49d88a905df000dd26f443fcfa8ce (patch) | |
tree | 819f32113d3b8374b9fbf72e2202d4c4d4511a60 /drivers/cpufreq | |
parent | 357f4aae859b5d74554b0ccbb18556f1df4166c3 (diff) | |
parent | 2a26d99b251b8625d27aed14e97fc10707a3a81f (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Mostly simple overlapping changes.
For example, David Ahern's adjacency list revamp in 'net-next'
conflicted with an adjacency list traversal bug fix in 'net'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cppc_cpufreq.c | 8 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 19 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 81 |
3 files changed, 87 insertions, 21 deletions
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 1b2f28f69a81..4852d9efe74e 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -80,11 +80,17 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, { struct cppc_cpudata *cpu; struct cpufreq_freqs freqs; + u32 desired_perf; int ret = 0; cpu = all_cpu_data[policy->cpu]; - cpu->perf_ctrls.desired_perf = (u64)target_freq * policy->max / cppc_dmi_max_khz; + desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz; + /* Return if it is exactly the same perf */ + if (desired_perf == cpu->perf_ctrls.desired_perf) + return ret; + + cpu->perf_ctrls.desired_perf = desired_perf; freqs.old = policy->cur; freqs.new = target_freq; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 18da4f8051d3..13475890d792 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -17,6 +17,7 @@ struct cs_policy_dbs_info { struct policy_dbs_info policy_dbs; unsigned int down_skip; + unsigned int requested_freq; }; static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs) @@ -61,6 +62,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) { struct policy_dbs_info *policy_dbs = policy->governor_data; struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs); + unsigned int requested_freq = dbs_info->requested_freq; struct dbs_data *dbs_data = policy_dbs->dbs_data; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int load = dbs_update(policy); @@ -72,10 +74,16 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) if (cs_tuners->freq_step == 0) goto out; + /* + * If requested_freq is out of range, it is likely that the limits + * changed in the meantime, so fall back to current frequency in that + * case. + */ + if (requested_freq > policy->max || requested_freq < policy->min) + requested_freq = policy->cur; + /* Check for frequency increase */ if (load > dbs_data->up_threshold) { - unsigned int requested_freq = policy->cur; - dbs_info->down_skip = 0; /* if we are already at full speed then break out early */ @@ -83,8 +91,11 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) goto out; requested_freq += get_freq_target(cs_tuners, policy); + if (requested_freq > policy->max) + requested_freq = policy->max; __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H); + dbs_info->requested_freq = requested_freq; goto out; } @@ -95,7 +106,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) /* Check for frequency decrease */ if (load < cs_tuners->down_threshold) { - unsigned int freq_target, requested_freq = policy->cur; + unsigned int freq_target; /* * if we cannot reduce the frequency anymore, break out early */ @@ -109,6 +120,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) requested_freq = policy->min; __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L); + dbs_info->requested_freq = requested_freq; } out: @@ -287,6 +299,7 @@ static void cs_start(struct cpufreq_policy *policy) struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data); dbs_info->down_skip = 0; + dbs_info->requested_freq = policy->cur; } static struct dbs_governor cs_governor = { diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 806f2039571e..4737520ec823 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -179,6 +179,7 @@ struct _pid { /** * struct cpudata - Per CPU instance data storage * @cpu: CPU number for this instance data + * @policy: CPUFreq policy value * @update_util: CPUFreq utility callback information * @update_util_set: CPUFreq utility callback is set * @iowait_boost: iowait-related boost fraction @@ -201,6 +202,7 @@ struct _pid { struct cpudata { int cpu; + unsigned int policy; struct update_util_data update_util; bool update_util_set; @@ -225,7 +227,7 @@ struct cpudata { static struct cpudata **all_cpu_data; /** - * struct pid_adjust_policy - Stores static PID configuration data + * struct pstate_adjust_policy - Stores static PID configuration data * @sample_rate_ms: PID calculation sample rate in ms * @sample_rate_ns: Sample rate calculation in ns * @deadband: PID deadband @@ -562,12 +564,12 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask) int min, hw_min, max, hw_max, cpu, range, adj_range; u64 value, cap; - rdmsrl(MSR_HWP_CAPABILITIES, cap); - hw_min = HWP_LOWEST_PERF(cap); - hw_max = HWP_HIGHEST_PERF(cap); - range = hw_max - hw_min; - for_each_cpu(cpu, cpumask) { + rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); + hw_min = HWP_LOWEST_PERF(cap); + hw_max = HWP_HIGHEST_PERF(cap); + range = hw_max - hw_min; + rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); adj_range = limits->min_perf_pct * range / 100; min = hw_min + adj_range; @@ -1142,10 +1144,8 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); } -static void intel_pstate_set_min_pstate(struct cpudata *cpu) +static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) { - int pstate = cpu->pstate.min_pstate; - trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); cpu->pstate.current_pstate = pstate; /* @@ -1157,6 +1157,20 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu) pstate_funcs.get_val(cpu, pstate)); } +static void intel_pstate_set_min_pstate(struct cpudata *cpu) +{ + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); +} + +static void intel_pstate_max_within_limits(struct cpudata *cpu) +{ + int min_pstate, max_pstate; + + update_turbo_state(); + intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); + intel_pstate_set_pstate(cpu, max_pstate); +} + static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { cpu->pstate.min_pstate = pstate_funcs.get_min(); @@ -1232,6 +1246,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) { struct sample *sample = &cpu->sample; int32_t busy_frac, boost; + int target, avg_pstate; busy_frac = div_fp(sample->mperf, sample->tsc); @@ -1242,7 +1257,26 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) busy_frac = boost; sample->busy_scaled = busy_frac * 100; - return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled); + + target = limits->no_turbo || limits->turbo_disabled ? + cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; + target += target >> 2; + target = mul_fp(target, busy_frac); + if (target < cpu->pstate.min_pstate) + target = cpu->pstate.min_pstate; + + /* + * If the average P-state during the previous cycle was higher than the + * current target, add 50% of the difference to the target to reduce + * possible performance oscillations and offset possible performance + * loss related to moving the workload from one CPU to another within + * a package/module. + */ + avg_pstate = get_avg_pstate(cpu); + if (avg_pstate > target) + target += (avg_pstate - target) >> 1; + + return target; } static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) @@ -1251,10 +1285,11 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) u64 duration_ns; /* - * perf_scaled is the average performance during the last sampling - * period scaled by the ratio of the maximum P-state to the P-state - * requested last time (in percent). That measures the system's - * response to the previous P-state selection. + * perf_scaled is the ratio of the average P-state during the last + * sampling period to the P-state requested last time (in percent). + * + * That measures the system's response to the previous P-state + * selection. */ max_pstate = cpu->pstate.max_pstate_physical; current_pstate = cpu->pstate.current_pstate; @@ -1304,7 +1339,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) from = cpu->pstate.current_pstate; - target_pstate = pstate_funcs.get_target_pstate(cpu); + target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ? + cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu); intel_pstate_update_pstate(cpu, target_pstate); @@ -1470,7 +1506,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) pr_debug("set_policy cpuinfo.max %u policy->max %u\n", policy->cpuinfo.max_freq, policy->max); - cpu = all_cpu_data[0]; + cpu = all_cpu_data[policy->cpu]; + cpu->policy = policy->policy; + if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && policy->max < policy->cpuinfo.max_freq && policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { @@ -1478,7 +1516,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) policy->max = policy->cpuinfo.max_freq; } - if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { + if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { limits = &performance_limits; if (policy->max >= policy->cpuinfo.max_freq) { pr_debug("set performance\n"); @@ -1514,6 +1552,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits->max_perf = round_up(limits->max_perf, FRAC_BITS); out: + if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { + /* + * NOHZ_FULL CPUs need this as the governor callback may not + * be invoked on them. + */ + intel_pstate_clear_update_util_hook(policy->cpu); + intel_pstate_max_within_limits(cpu); + } + intel_pstate_set_update_util_hook(policy->cpu); intel_pstate_hwp_set_policy(policy); |