summaryrefslogtreecommitdiff
path: root/softmmu_template.h
diff options
context:
space:
mode:
authorPaul Brook <paul@codesourcery.com>2010-04-05 00:28:53 +0100
committerPaul Brook <paul@codesourcery.com>2010-04-05 00:28:53 +0100
commit355b194369d02df7a97d554eef2a9cffe98d736f (patch)
tree15fb62cc03f22548ea08f7cb1609d7abe7970052 /softmmu_template.h
parent5bd2c0d7a6778542827ac7f897eed3fb5cf7ff5a (diff)
Split TLB addend and target_phys_addr_t
Historically the qemu tlb "addend" field was used for both RAM and IO accesses, so needed to be able to hold both host addresses (unsigned long) and guest physical addresses (target_phys_addr_t). However since the introduction of the iotlb field it has only been used for RAM accesses. This means we can change the type of addend to unsigned long, and remove associated hacks in the big-endian TCG backends. We can also remove the host dependence from target_phys_addr_t. Signed-off-by: Paul Brook <paul@codesourcery.com>
Diffstat (limited to 'softmmu_template.h')
-rw-r--r--softmmu_template.h28
1 files changed, 16 insertions, 12 deletions
diff --git a/softmmu_template.h b/softmmu_template.h
index 2f37c34aff..c2df9ec2d4 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -87,7 +87,8 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
DATA_TYPE res;
int index;
target_ulong tlb_addr;
- target_phys_addr_t addend;
+ target_phys_addr_t ioaddr;
+ unsigned long addend;
void *retaddr;
/* test if there is match for unaligned or IO access */
@@ -101,8 +102,8 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
retaddr = GETPC();
- addend = env->iotlb[mmu_idx][index];
- res = glue(io_read, SUFFIX)(addend, addr, retaddr);
+ ioaddr = env->iotlb[mmu_idx][index];
+ res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
/* slow unaligned access (it spans two pages or IO) */
do_unaligned_access:
@@ -143,7 +144,8 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
{
DATA_TYPE res, res1, res2;
int index, shift;
- target_phys_addr_t addend;
+ target_phys_addr_t ioaddr;
+ unsigned long addend;
target_ulong tlb_addr, addr1, addr2;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
@@ -154,8 +156,8 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
- addend = env->iotlb[mmu_idx][index];
- res = glue(io_read, SUFFIX)(addend, addr, retaddr);
+ ioaddr = env->iotlb[mmu_idx][index];
+ res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* slow unaligned access (it spans two pages) */
@@ -224,7 +226,8 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
DATA_TYPE val,
int mmu_idx)
{
- target_phys_addr_t addend;
+ target_phys_addr_t ioaddr;
+ unsigned long addend;
target_ulong tlb_addr;
void *retaddr;
int index;
@@ -238,8 +241,8 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
retaddr = GETPC();
- addend = env->iotlb[mmu_idx][index];
- glue(io_write, SUFFIX)(addend, val, addr, retaddr);
+ ioaddr = env->iotlb[mmu_idx][index];
+ glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
retaddr = GETPC();
@@ -277,7 +280,8 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
int mmu_idx,
void *retaddr)
{
- target_phys_addr_t addend;
+ target_phys_addr_t ioaddr;
+ unsigned long addend;
target_ulong tlb_addr;
int index, i;
@@ -289,8 +293,8 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
- addend = env->iotlb[mmu_idx][index];
- glue(io_write, SUFFIX)(addend, val, addr, retaddr);
+ ioaddr = env->iotlb[mmu_idx][index];
+ glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* XXX: not efficient, but simple */