summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorPeter Crosthwaite <crosthwaitepeter@gmail.com>2015-05-30 23:11:46 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2015-06-26 16:00:51 +0200
commit41da4bd6420afd1209c408974920f63ff9c658e1 (patch)
tree78e56f7e51952a8c8319f4c7a8ac8b3c80053cf8 /include
parente1b89321bafea9fb33d87852fc91fee579d17dfe (diff)
cpu-defs: Move out TB_JMP defines
These are not Architecture specific in any way so move them out of cpu-defs.h. tb-hash.h is an appropriate place as a leading user and their strong relationship to TB hashing and caching. Reviewed-by: Richard Henderson <rth@redhat.com> Signed-off-by: Peter Crosthwaite <crosthwaite.peter@gmail.com> Message-Id: <43ceca65a3fa240efac49aa0bf604ad0442e1710.1433052532.git.crosthwaite.peter@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/exec/cpu-defs.h8
-rw-r--r--include/exec/tb-hash.h8
2 files changed, 8 insertions, 8 deletions
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 247829c968..98b9cff310 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -56,14 +56,6 @@ typedef uint64_t target_ulong;
#error TARGET_LONG_SIZE undefined
#endif
-/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
- addresses on the same page. The top bits are the same. This allows
- TLB invalidation to quickly clear a subset of the hash table. */
-#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
-#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
-#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
-#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
-
#if !defined(CONFIG_USER_ONLY)
/* use a fully associative victim tlb of 8 entries */
#define CPU_VTLB_SIZE 8
diff --git a/include/exec/tb-hash.h b/include/exec/tb-hash.h
index e0bd786f9f..0f4e8a08af 100644
--- a/include/exec/tb-hash.h
+++ b/include/exec/tb-hash.h
@@ -20,6 +20,14 @@
#ifndef EXEC_TB_HASH
#define EXEC_TB_HASH
+/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
+ addresses on the same page. The top bits are the same. This allows
+ TLB invalidation to quickly clear a subset of the hash table. */
+#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
+#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
+#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
+#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
+
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
{
target_ulong tmp;