diff options
author | Andy Lutomirski <luto@mit.edu> | 2011-07-14 06:47:22 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-07-14 17:57:05 -0700 |
commit | 98d0ac38ca7b1b7a552c9a2359174ff84decb600 (patch) | |
tree | 0c244e828f86c779c348a4888ed9e303c3e59811 /arch/x86/vdso | |
parent | 433bd805e5fd2c731b3a9025b034f066272d336e (diff) |
x86-64: Move vread_tsc and vread_hpet into the vDSO
The vsyscall page now consists entirely of trap instructions.
Cc: John Stultz <johnstul@us.ibm.com>
Signed-off-by: Andy Lutomirski <luto@mit.edu>
Link: http://lkml.kernel.org/r/637648f303f2ef93af93bae25186e9a1bea093f5.1310639973.git.luto@mit.edu
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/vdso')
-rw-r--r-- | arch/x86/vdso/vclock_gettime.c | 53 |
1 files changed, 47 insertions, 6 deletions
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index cf54813ac52..8792d6e0a2c 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -17,6 +17,7 @@ #include <linux/time.h> #include <linux/string.h> #include <asm/vsyscall.h> +#include <asm/fixmap.h> #include <asm/vgtod.h> #include <asm/timex.h> #include <asm/hpet.h> @@ -25,6 +26,43 @@ #define gtod (&VVAR(vsyscall_gtod_data)) +notrace static cycle_t vread_tsc(void) +{ + cycle_t ret; + u64 last; + + /* + * Empirically, a fence (of type that depends on the CPU) + * before rdtsc is enough to ensure that rdtsc is ordered + * with respect to loads. The various CPU manuals are unclear + * as to whether rdtsc can be reordered with later loads, + * but no one has ever seen it happen. + */ + rdtsc_barrier(); + ret = (cycle_t)vget_cycles(); + + last = VVAR(vsyscall_gtod_data).clock.cycle_last; + + if (likely(ret >= last)) + return ret; + + /* + * GCC likes to generate cmov here, but this branch is extremely + * predictable (it's just a funciton of time and the likely is + * very likely) and there's a data dependence, so force GCC + * to generate a branch instead. I don't barrier() because + * we don't actually need a barrier, and if this function + * ever gets inlined it will generate worse code. + */ + asm volatile (""); + return last; +} + +static notrace cycle_t vread_hpet(void) +{ + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); +} + notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) { long ret; @@ -36,9 +74,12 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) notrace static inline long vgetns(void) { long v; - cycles_t (*vread)(void); - vread = gtod->clock.vread; - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask; + cycles_t cycles; + if (gtod->clock.vclock_mode == VCLOCK_TSC) + cycles = vread_tsc(); + else + cycles = vread_hpet(); + v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; return (v * gtod->clock.mult) >> gtod->clock.shift; } @@ -118,11 +159,11 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) { switch (clock) { case CLOCK_REALTIME: - if (likely(gtod->clock.vread)) + if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) return do_realtime(ts); break; case CLOCK_MONOTONIC: - if (likely(gtod->clock.vread)) + if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) return do_monotonic(ts); break; case CLOCK_REALTIME_COARSE: @@ -139,7 +180,7 @@ int clock_gettime(clockid_t, struct timespec *) notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) { long ret; - if (likely(gtod->clock.vread)) { + if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) { if (likely(tv != NULL)) { BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != offsetof(struct timespec, tv_nsec) || |