summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Kleikamp <shaggy@linux.vnet.ibm.com>2008-10-09 13:21:30 -0500
committerIngo Molnar <mingo@elte.hu>2008-10-10 11:17:04 +0200
commit5b7dba4ff834259a5623e03a565748704a8fe449 (patch)
tree8fa9be3a308d1b5f2eb22d0a02dbe887a479ee3e
parenta5d8c3483a6e19aca95ef6a2c5890e33bfa5b293 (diff)
sched_clock: prevent scd->clock from moving backwards
When sched_clock_cpu() couples the clocks between two cpus, it may increment scd->clock beyond the GTOD tick window that __update_sched_clock() uses to clamp the clock. A later call to __update_sched_clock() may move the clock back to scd->tick_gtod + TICK_NSEC, violating the clock's monotonic property. This patch ensures that scd->clock will not be set backward. Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched_clock.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index e8ab096ddfe3..81787248b60f 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -118,13 +118,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
/*
* scd->clock = clamp(scd->tick_gtod + delta,
- * max(scd->tick_gtod, scd->clock),
- * scd->tick_gtod + TICK_NSEC);
+ * max(scd->tick_gtod, scd->clock),
+ * max(scd->clock, scd->tick_gtod + TICK_NSEC));
*/
clock = scd->tick_gtod + delta;
min_clock = wrap_max(scd->tick_gtod, scd->clock);
- max_clock = scd->tick_gtod + TICK_NSEC;
+ max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
clock = wrap_max(clock, min_clock);
clock = wrap_min(clock, max_clock);