summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-07-09 18:51:59 +0200
committerIngo Molnar <mingo@elte.hu>2007-07-09 18:51:59 +0200
commitbb29ab26863c022743143f27956cc0ca362f258c (patch)
treef8106b8a82d4abe9c3b217c7ca96307539a945ad
parentdd41f596cda0d7d6e4a8b139ffdfabcefdd46528 (diff)
sched: x86, track TSC-unstable events
track TSC-unstable events and propagate it to the scheduler code. Also allow sched_clock() to be used when the TSC is unstable, the rq_clock() wrapper creates a reliable clock out of it. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/i386/kernel/tsc.c9
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched.c7
3 files changed, 17 insertions, 1 deletions
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index f64b81f3033b..ea63a30ca3e8 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -4,6 +4,7 @@
* See comments there for proper credits.
*/
+#include <linux/sched.h>
#include <linux/clocksource.h>
#include <linux/workqueue.h>
#include <linux/cpufreq.h>
@@ -106,8 +107,13 @@ unsigned long long sched_clock(void)
/*
* Fall back to jiffies if there's no TSC available:
+ * ( But note that we still use it if the TSC is marked
+ * unstable. We do this because unlike Time Of Day,
+ * the scheduler clock tolerates small errors and it's
+ * very important for it to be as fast as the platform
+ * can achive it. )
*/
- if (unlikely(!tsc_enabled))
+ if (unlikely(!tsc_enabled && !tsc_unstable))
/* No locking but a rare wrong value is not a big deal: */
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
@@ -277,6 +283,7 @@ static struct clocksource clocksource_tsc = {
void mark_tsc_unstable(char *reason)
{
+ sched_clock_unstable_event();
if (!tsc_unstable) {
tsc_unstable = 1;
tsc_enabled = 0;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index be2460e6f55b..fa895b309da0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1321,6 +1321,8 @@ extern void sched_exec(void);
#define sched_exec() {}
#endif
+extern void sched_clock_unstable_event(void);
+
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
diff --git a/kernel/sched.c b/kernel/sched.c
index 01ba4b1848a0..6150cd70f448 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -68,6 +68,13 @@ unsigned long long __attribute__((weak)) sched_clock(void)
}
/*
+ * CPU frequency is/was unstable - start new by setting prev_clock_raw:
+ */
+void sched_clock_unstable_event(void)
+{
+}
+
+/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
* and back.