summaryrefslogtreecommitdiff
path: root/qemu-timer.h
diff options
context:
space:
mode:
authorBlue Swirl <blauwirbel@gmail.com>2010-03-29 19:24:00 +0000
committerBlue Swirl <blauwirbel@gmail.com>2010-03-29 19:24:00 +0000
commit29e922b61fb3d93836825ca9731bb2cadbb6ed72 (patch)
treeb461e05df4e043c3015857ca95fbfdf704bab059 /qemu-timer.h
parent5c4532ee7894277d8d54db108e891c4204d15f1d (diff)
Compile qemu-timer only once
Arrange various declarations so that also non-CPU code can access them, adjust users. Move CPU specific code to cpus.c. Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
Diffstat (limited to 'qemu-timer.h')
-rw-r--r--qemu-timer.h167
1 files changed, 167 insertions, 0 deletions
diff --git a/qemu-timer.h b/qemu-timer.h
index fca11ebd55..a7eac9877b 100644
--- a/qemu-timer.h
+++ b/qemu-timer.h
@@ -1,6 +1,8 @@
#ifndef QEMU_TIMER_H
#define QEMU_TIMER_H
+#include "qemu-common.h"
+
/* timers */
typedef struct QEMUClock QEMUClock;
@@ -69,4 +71,169 @@ void ptimer_stop(ptimer_state *s);
void qemu_put_ptimer(QEMUFile *f, ptimer_state *s);
void qemu_get_ptimer(QEMUFile *f, ptimer_state *s);
+/* icount */
+int64_t qemu_icount_round(int64_t count);
+extern int64_t qemu_icount;
+extern int use_icount;
+extern int icount_time_shift;
+extern int64_t qemu_icount_bias;
+int64_t cpu_get_icount(void);
+
+/*******************************************/
+/* host CPU ticks (if available) */
+
+#if defined(_ARCH_PPC)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int64_t retval;
+#ifdef _ARCH_PPC64
+ /* This reads timebase in one 64bit go and includes Cell workaround from:
+ http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
+ */
+ __asm__ __volatile__ ("mftb %0\n\t"
+ "cmpwi %0,0\n\t"
+ "beq- $-8"
+ : "=r" (retval));
+#else
+ /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
+ unsigned long junk;
+ __asm__ __volatile__ ("mftbu %1\n\t"
+ "mftb %L0\n\t"
+ "mftbu %0\n\t"
+ "cmpw %0,%1\n\t"
+ "bne $-16"
+ : "=r" (retval), "=r" (junk));
+#endif
+ return retval;
+}
+
+#elif defined(__i386__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int64_t val;
+ asm volatile ("rdtsc" : "=A" (val));
+ return val;
+}
+
+#elif defined(__x86_64__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ uint32_t low,high;
+ int64_t val;
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+ val = high;
+ val <<= 32;
+ val |= low;
+ return val;
+}
+
+#elif defined(__hppa__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int val;
+ asm volatile ("mfctl %%cr16, %0" : "=r"(val));
+ return val;
+}
+
+#elif defined(__ia64)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int64_t val;
+ asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
+ return val;
+}
+
+#elif defined(__s390__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int64_t val;
+ asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
+ return val;
+}
+
+#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
+
+static inline int64_t cpu_get_real_ticks (void)
+{
+#if defined(_LP64)
+ uint64_t rval;
+ asm volatile("rd %%tick,%0" : "=r"(rval));
+ return rval;
+#else
+ union {
+ uint64_t i64;
+ struct {
+ uint32_t high;
+ uint32_t low;
+ } i32;
+ } rval;
+ asm volatile("rd %%tick,%1; srlx %1,32,%0"
+ : "=r"(rval.i32.high), "=r"(rval.i32.low));
+ return rval.i64;
+#endif
+}
+
+#elif defined(__mips__) && \
+ ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
+/*
+ * binutils wants to use rdhwr only on mips32r2
+ * but as linux kernel emulate it, it's fine
+ * to use it.
+ *
+ */
+#define MIPS_RDHWR(rd, value) { \
+ __asm__ __volatile__ (".set push\n\t" \
+ ".set mips32r2\n\t" \
+ "rdhwr %0, "rd"\n\t" \
+ ".set pop" \
+ : "=r" (value)); \
+ }
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
+ uint32_t count;
+ static uint32_t cyc_per_count = 0;
+
+ if (!cyc_per_count) {
+ MIPS_RDHWR("$3", cyc_per_count);
+ }
+
+ MIPS_RDHWR("$2", count);
+ return (int64_t)(count * cyc_per_count);
+}
+
+#else
+/* The host CPU doesn't have an easily accessible cycle counter.
+ Just return a monotonically increasing value. This will be
+ totally wrong, but hopefully better than nothing. */
+static inline int64_t cpu_get_real_ticks (void)
+{
+ static int64_t ticks = 0;
+ return ticks++;
+}
+#endif
+
+#ifdef NEED_CPU_H
+/* Deterministic execution requires that IO only be performed on the last
+ instruction of a TB so that interrupts take effect immediately. */
+static inline int can_do_io(CPUState *env)
+{
+ if (!use_icount)
+ return 1;
+
+ /* If not executing code then assume we are ok. */
+ if (!env->current_tb)
+ return 1;
+
+ return env->can_do_io != 0;
+}
+#endif
+
#endif