summaryrefslogtreecommitdiff
path: root/exec.c
diff options
context:
space:
mode:
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c102
1 files changed, 102 insertions, 0 deletions
diff --git a/exec.c b/exec.c
index faa63336c5..6dd88fce45 100644
--- a/exec.c
+++ b/exec.c
@@ -3045,6 +3045,108 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr,
}
}
+typedef struct {
+ void *buffer;
+ target_phys_addr_t addr;
+ target_phys_addr_t len;
+} BounceBuffer;
+
+static BounceBuffer bounce;
+
+/* Map a physical memory region into a host virtual address.
+ * May map a subset of the requested range, given by and returned in *plen.
+ * May return NULL if resources needed to perform the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ */
+void *cpu_physical_memory_map(target_phys_addr_t addr,
+ target_phys_addr_t *plen,
+ int is_write)
+{
+ target_phys_addr_t len = *plen;
+ target_phys_addr_t done = 0;
+ int l;
+ uint8_t *ret = NULL;
+ uint8_t *ptr;
+ target_phys_addr_t page;
+ unsigned long pd;
+ PhysPageDesc *p;
+ unsigned long addr1;
+
+ while (len > 0) {
+ page = addr & TARGET_PAGE_MASK;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+ if (l > len)
+ l = len;
+ p = phys_page_find(page >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
+ if (done || bounce.buffer) {
+ break;
+ }
+ bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
+ bounce.addr = addr;
+ bounce.len = l;
+ if (!is_write) {
+ cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
+ }
+ ptr = bounce.buffer;
+ } else {
+ addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ ptr = phys_ram_base + addr1;
+ }
+ if (!done) {
+ ret = ptr;
+ } else if (ret + done != ptr) {
+ break;
+ }
+
+ len -= l;
+ addr += l;
+ done += l;
+ }
+ *plen = done;
+ return ret;
+}
+
+/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
+ * Will also mark the memory as dirty if is_write == 1. access_len gives
+ * the amount of memory that was actually read or written by the caller.
+ */
+void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
+ int is_write, target_phys_addr_t access_len)
+{
+ if (buffer != bounce.buffer) {
+ if (is_write) {
+ unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
+ while (access_len) {
+ unsigned l;
+ l = TARGET_PAGE_SIZE;
+ if (l > access_len)
+ l = access_len;
+ if (!cpu_physical_memory_is_dirty(addr1)) {
+ /* invalidate code */
+ tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
+ /* set dirty bit */
+ phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
+ (0xff & ~CODE_DIRTY_FLAG);
+ }
+ addr1 += l;
+ access_len -= l;
+ }
+ }
+ return;
+ }
+ if (is_write) {
+ cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
+ }
+ qemu_free(bounce.buffer);
+ bounce.buffer = NULL;
+}
/* warning: addr must be aligned */
uint32_t ldl_phys(target_phys_addr_t addr)