diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-11-15 17:14:39 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-11-17 12:20:22 +0100 |
commit | 391e43da797a96aeb65410281891f6d0b0e9611c (patch) | |
tree | 0ce6784525a5a8f75b377170cf1a7d60abccea29 /kernel/sched_clock.c | |
parent | 029632fbb7b7c9d85063cc9eb470de6c54873df3 (diff) |
sched: Move all scheduler bits into kernel/sched/
There's too many sched*.[ch] files in kernel/, give them their own
directory.
(No code changed, other than Makefile glue added.)
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r-- | kernel/sched_clock.c | 350 |
1 files changed, 0 insertions, 350 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c deleted file mode 100644 index c685e31492d..00000000000 --- a/kernel/sched_clock.c +++ /dev/null @@ -1,350 +0,0 @@ -/* - * sched_clock for unstable cpu clocks - * - * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> - * - * Updates and enhancements: - * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> - * - * Based on code by: - * Ingo Molnar <mingo@redhat.com> - * Guillaume Chazarain <guichaz@gmail.com> - * - * - * What: - * - * cpu_clock(i) provides a fast (execution time) high resolution - * clock with bounded drift between CPUs. The value of cpu_clock(i) - * is monotonic for constant i. The timestamp returned is in nanoseconds. - * - * ######################### BIG FAT WARNING ########################## - * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # - * # go backwards !! # - * #################################################################### - * - * There is no strict promise about the base, although it tends to start - * at 0 on boot (but people really shouldn't rely on that). - * - * cpu_clock(i) -- can be used from any context, including NMI. - * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI) - * local_clock() -- is cpu_clock() on the current cpu. - * - * How: - * - * The implementation either uses sched_clock() when - * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the - * sched_clock() is assumed to provide these properties (mostly it means - * the architecture provides a globally synchronized highres time source). - * - * Otherwise it tries to create a semi stable clock from a mixture of other - * clocks, including: - * - * - GTOD (clock monotomic) - * - sched_clock() - * - explicit idle events - * - * We use GTOD as base and use sched_clock() deltas to improve resolution. The - * deltas are filtered to provide monotonicity and keeping it within an - * expected window. - * - * Furthermore, explicit sleep and wakeup hooks allow us to account for time - * that is otherwise invisible (TSC gets stopped). - * - * - * Notes: - * - * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things - * like cpufreq interrupts that can change the base clock (TSC) multiplier - * and cause funny jumps in time -- although the filtering provided by - * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it - * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on - * sched_clock(). - */ -#include <linux/spinlock.h> -#include <linux/hardirq.h> -#include <linux/export.h> -#include <linux/percpu.h> -#include <linux/ktime.h> -#include <linux/sched.h> - -/* - * Scheduler clock - returns current time in nanosec units. - * This is default implementation. - * Architectures and sub-architectures can override this. - */ -unsigned long long __attribute__((weak)) sched_clock(void) -{ - return (unsigned long long)(jiffies - INITIAL_JIFFIES) - * (NSEC_PER_SEC / HZ); -} -EXPORT_SYMBOL_GPL(sched_clock); - -__read_mostly int sched_clock_running; - -#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -__read_mostly int sched_clock_stable; - -struct sched_clock_data { - u64 tick_raw; - u64 tick_gtod; - u64 clock; -}; - -static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); - -static inline struct sched_clock_data *this_scd(void) -{ - return &__get_cpu_var(sched_clock_data); -} - -static inline struct sched_clock_data *cpu_sdc(int cpu) -{ - return &per_cpu(sched_clock_data, cpu); -} - -void sched_clock_init(void) -{ - u64 ktime_now = ktime_to_ns(ktime_get()); - int cpu; - - for_each_possible_cpu(cpu) { - struct sched_clock_data *scd = cpu_sdc(cpu); - - scd->tick_raw = 0; - scd->tick_gtod = ktime_now; - scd->clock = ktime_now; - } - - sched_clock_running = 1; -} - -/* - * min, max except they take wrapping into account - */ - -static inline u64 wrap_min(u64 x, u64 y) -{ - return (s64)(x - y) < 0 ? x : y; -} - -static inline u64 wrap_max(u64 x, u64 y) -{ - return (s64)(x - y) > 0 ? x : y; -} - -/* - * update the percpu scd from the raw @now value - * - * - filter out backward motion - * - use the GTOD tick value to create a window to filter crazy TSC values - */ -static u64 sched_clock_local(struct sched_clock_data *scd) -{ - u64 now, clock, old_clock, min_clock, max_clock; - s64 delta; - -again: - now = sched_clock(); - delta = now - scd->tick_raw; - if (unlikely(delta < 0)) - delta = 0; - - old_clock = scd->clock; - - /* - * scd->clock = clamp(scd->tick_gtod + delta, - * max(scd->tick_gtod, scd->clock), - * scd->tick_gtod + TICK_NSEC); - */ - - clock = scd->tick_gtod + delta; - min_clock = wrap_max(scd->tick_gtod, old_clock); - max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); - - clock = wrap_max(clock, min_clock); - clock = wrap_min(clock, max_clock); - - if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) - goto again; - - return clock; -} - -static u64 sched_clock_remote(struct sched_clock_data *scd) -{ - struct sched_clock_data *my_scd = this_scd(); - u64 this_clock, remote_clock; - u64 *ptr, old_val, val; - - sched_clock_local(my_scd); -again: - this_clock = my_scd->clock; - remote_clock = scd->clock; - - /* - * Use the opportunity that we have both locks - * taken to couple the two clocks: we take the - * larger time as the latest time for both - * runqueues. (this creates monotonic movement) - */ - if (likely((s64)(remote_clock - this_clock) < 0)) { - ptr = &scd->clock; - old_val = remote_clock; - val = this_clock; - } else { - /* - * Should be rare, but possible: - */ - ptr = &my_scd->clock; - old_val = this_clock; - val = remote_clock; - } - - if (cmpxchg64(ptr, old_val, val) != old_val) - goto again; - - return val; -} - -/* - * Similar to cpu_clock(), but requires local IRQs to be disabled. - * - * See cpu_clock(). - */ -u64 sched_clock_cpu(int cpu) -{ - struct sched_clock_data *scd; - u64 clock; - - WARN_ON_ONCE(!irqs_disabled()); - - if (sched_clock_stable) - return sched_clock(); - - if (unlikely(!sched_clock_running)) - return 0ull; - - scd = cpu_sdc(cpu); - - if (cpu != smp_processor_id()) - clock = sched_clock_remote(scd); - else - clock = sched_clock_local(scd); - - return clock; -} - -void sched_clock_tick(void) -{ - struct sched_clock_data *scd; - u64 now, now_gtod; - - if (sched_clock_stable) - return; - - if (unlikely(!sched_clock_running)) - return; - - WARN_ON_ONCE(!irqs_disabled()); - - scd = this_scd(); - now_gtod = ktime_to_ns(ktime_get()); - now = sched_clock(); - - scd->tick_raw = now; - scd->tick_gtod = now_gtod; - sched_clock_local(scd); -} - -/* - * We are going deep-idle (irqs are disabled): - */ -void sched_clock_idle_sleep_event(void) -{ - sched_clock_cpu(smp_processor_id()); -} -EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); - -/* - * We just idled delta nanoseconds (called with irqs disabled): - */ -void sched_clock_idle_wakeup_event(u64 delta_ns) -{ - if (timekeeping_suspended) - return; - - sched_clock_tick(); - touch_softlockup_watchdog(); -} -EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); - -/* - * As outlined at the top, provides a fast, high resolution, nanosecond - * time source that is monotonic per cpu argument and has bounded drift - * between cpus. - * - * ######################### BIG FAT WARNING ########################## - * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # - * # go backwards !! # - * #################################################################### - */ -u64 cpu_clock(int cpu) -{ - u64 clock; - unsigned long flags; - - local_irq_save(flags); - clock = sched_clock_cpu(cpu); - local_irq_restore(flags); - - return clock; -} - -/* - * Similar to cpu_clock() for the current cpu. Time will only be observed - * to be monotonic if care is taken to only compare timestampt taken on the - * same CPU. - * - * See cpu_clock(). - */ -u64 local_clock(void) -{ - u64 clock; - unsigned long flags; - - local_irq_save(flags); - clock = sched_clock_cpu(smp_processor_id()); - local_irq_restore(flags); - - return clock; -} - -#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ - -void sched_clock_init(void) -{ - sched_clock_running = 1; -} - -u64 sched_clock_cpu(int cpu) -{ - if (unlikely(!sched_clock_running)) - return 0; - - return sched_clock(); -} - -u64 cpu_clock(int cpu) -{ - return sched_clock_cpu(cpu); -} - -u64 local_clock(void) -{ - return sched_clock_cpu(0); -} - -#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ - -EXPORT_SYMBOL_GPL(cpu_clock); -EXPORT_SYMBOL_GPL(local_clock); |