summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2009-01-23 09:25:02 -0500
committerDave Jones <davej@redhat.com>2009-02-05 12:25:26 -0500
commit1ca3abdb6a4b87246b00292f048acd344325fd12 (patch)
tree787f3d255676da7110b4e09b6fe91ce50286e888
parenteda58a85ec3fc05855a26654d97a2b53f0e715b9 (diff)
[CPUFREQ] Make ignore_nice_load setting of ondemand work as expected.
ondemand micro-accounting of idle time changes broke ignore_nice_load sysfs setting due to a thinko in the code. The bug entry: http://bugzilla.kernel.org/show_bug.cgi?id=12310 Reported-by: Jim Bray <jimsantelmo@gmail.com> Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Dave Jones <davej@redhat.com>
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c47
1 files changed, 25 insertions, 22 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6a2b036c9389..6f45b1658a67 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -117,11 +117,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
-
- if (!dbs_tuners_ins.ignore_nice) {
- busy_time = cputime64_add(busy_time,
- kstat_cpu(cpu).cpustat.nice);
- }
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
idle_time = cputime64_sub(cur_wall_time, busy_time);
if (wall)
@@ -137,23 +133,6 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
if (idle_time == -1ULL)
return get_cpu_idle_time_jiffy(cpu, wall);
- if (dbs_tuners_ins.ignore_nice) {
- cputime64_t cur_nice;
- unsigned long cur_nice_jiffies;
- struct cpu_dbs_info_s *dbs_info;
-
- dbs_info = &per_cpu(cpu_dbs_info, cpu);
- cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice,
- dbs_info->prev_cpu_nice);
- /*
- * Assumption: nice time between sampling periods will be
- * less than 2^32 jiffies for 32 bit sys
- */
- cur_nice_jiffies = (unsigned long)
- cputime64_to_jiffies64(cur_nice);
- dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice;
- return idle_time + jiffies_to_usecs(cur_nice_jiffies);
- }
return idle_time;
}
@@ -319,6 +298,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
dbs_info = &per_cpu(cpu_dbs_info, j);
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
+ if (dbs_tuners_ins.ignore_nice)
+ dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+
}
mutex_unlock(&dbs_mutex);
@@ -419,6 +401,23 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
+ if (dbs_tuners_ins.ignore_nice) {
+ cputime64_t cur_nice;
+ unsigned long cur_nice_jiffies;
+
+ cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
+ j_dbs_info->prev_cpu_nice);
+ /*
+ * Assumption: nice time between sampling periods will
+ * be less than 2^32 jiffies for 32 bit sys
+ */
+ cur_nice_jiffies = (unsigned long)
+ cputime64_to_jiffies64(cur_nice);
+
+ j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ idle_time += jiffies_to_usecs(cur_nice_jiffies);
+ }
+
if (unlikely(!wall_time || wall_time < idle_time))
continue;
@@ -575,6 +574,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&j_dbs_info->prev_cpu_wall);
+ if (dbs_tuners_ins.ignore_nice) {
+ j_dbs_info->prev_cpu_nice =
+ kstat_cpu(j).cpustat.nice;
+ }
}
this_dbs_info->cpu = cpu;
/*