summaryrefslogtreecommitdiff
path: root/arch/x86/entry/vdso
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2015-06-25 18:44:08 +0200
committerIngo Molnar <mingo@kernel.org>2015-07-06 15:23:29 +0200
commit03b9730b769fc4d87e40f6104f4c5b2e43889f19 (patch)
treea3f46bac8fc6ee76933f5f17c2d9f30059957529 /arch/x86/entry/vdso
parent4ea1636b04dbd66536fa387bae2eea463efc705b (diff)
x86/asm/tsc: Add rdtsc_ordered() and use it in trivial call sites
rdtsc_barrier(); rdtsc() is an unnecessary mouthful and requires more thought than should be necessary. Add an rdtsc_ordered() helper and replace the trivial call sites with it. This should not change generated code. The duplication of the fence asm is temporary. Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Huang Rui <ray.huang@amd.com> Cc: John Stultz <john.stultz@linaro.org> Cc: Len Brown <lenb@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: kvm ML <kvm@vger.kernel.org> Link: http://lkml.kernel.org/r/dddbf98a2af53312e9aa73a5a2b1622fe5d6f52b.1434501121.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/entry/vdso')
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c16
1 files changed, 2 insertions, 14 deletions
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 0340d93c18ca..ca94fa649251 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -175,20 +175,8 @@ static notrace cycle_t vread_pvclock(int *mode)
notrace static cycle_t vread_tsc(void)
{
- cycle_t ret;
- u64 last;
-
- /*
- * Empirically, a fence (of type that depends on the CPU)
- * before rdtsc is enough to ensure that rdtsc is ordered
- * with respect to loads. The various CPU manuals are unclear
- * as to whether rdtsc can be reordered with later loads,
- * but no one has ever seen it happen.
- */
- rdtsc_barrier();
- ret = (cycle_t)rdtsc();
-
- last = gtod->cycle_last;
+ cycle_t ret = (cycle_t)rdtsc_ordered();
+ u64 last = gtod->cycle_last;
if (likely(ret >= last))
return ret;