diff options
author | Edgar E. Iglesias <edgar.iglesias@gmail.com> | 2009-05-20 19:37:39 +0200 |
---|---|---|
committer | Edgar E. Iglesias <edgar.iglesias@gmail.com> | 2009-05-26 21:10:11 +0200 |
commit | 4acb54baa6557244fd9ea60d8119abfbefae6777 (patch) | |
tree | c61957165747f6af51114b0a8ebc37d666f97be6 /target-microblaze | |
parent | 370ab9863dd058c36781715f7a910685900acd5f (diff) |
microblaze: Add translation routines.
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Diffstat (limited to 'target-microblaze')
-rw-r--r-- | target-microblaze/cpu.h | 311 | ||||
-rw-r--r-- | target-microblaze/exec.h | 57 | ||||
-rw-r--r-- | target-microblaze/helper.c | 255 | ||||
-rw-r--r-- | target-microblaze/helper.h | 19 | ||||
-rw-r--r-- | target-microblaze/microblaze-decode.h | 52 | ||||
-rw-r--r-- | target-microblaze/op_helper.c | 216 | ||||
-rw-r--r-- | target-microblaze/translate.c | 1395 |
7 files changed, 2305 insertions, 0 deletions
diff --git a/target-microblaze/cpu.h b/target-microblaze/cpu.h new file mode 100644 index 0000000000..97f708c5a4 --- /dev/null +++ b/target-microblaze/cpu.h @@ -0,0 +1,311 @@ +/* + * MicroBlaze virtual CPU header + * + * Copyright (c) 2009 Edgar E. Iglesias + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA + */ +#ifndef CPU_MICROBLAZE_H +#define CPU_MICROBLAZE_H + +#define TARGET_LONG_BITS 32 + +#define CPUState struct CPUMBState + +#include "cpu-defs.h" +struct CPUMBState; +#if !defined(CONFIG_USER_ONLY) +#include "mmu.h" +#endif + +#define TARGET_HAS_ICE 1 + +#define ELF_MACHINE EM_XILINX_MICROBLAZE + +#define EXCP_NMI 1 +#define EXCP_MMU 2 +#define EXCP_IRQ 3 +#define EXCP_BREAK 4 +#define EXCP_HW_BREAK 5 + +/* Register aliases. R0 - R15 */ +#define R_SP 1 +#define SR_PC 0 +#define SR_MSR 1 +#define SR_EAR 3 +#define SR_ESR 5 +#define SR_FSR 7 +#define SR_BTR 0xb +#define SR_EDR 0xd + +/* MSR flags. */ +#define MSR_BE (1<<0) /* 0x001 */ +#define MSR_IE (1<<1) /* 0x002 */ +#define MSR_C (1<<2) /* 0x004 */ +#define MSR_BIP (1<<3) /* 0x008 */ +#define MSR_FSL (1<<4) /* 0x010 */ +#define MSR_ICE (1<<5) /* 0x020 */ +#define MSR_DZ (1<<6) /* 0x040 */ +#define MSR_DCE (1<<7) /* 0x080 */ +#define MSR_EE (1<<8) /* 0x100 */ +#define MSR_EIP (1<<9) /* 0x200 */ +#define MSR_CC (1<<31) + +/* Machine State Register (MSR) Fields */ +#define MSR_UM (1<<11) /* User Mode */ +#define MSR_UMS (1<<12) /* User Mode Save */ +#define MSR_VM (1<<13) /* Virtual Mode */ +#define MSR_VMS (1<<14) /* Virtual Mode Save */ + +#define MSR_KERNEL MSR_EE|MSR_VM +//#define MSR_USER MSR_KERNEL|MSR_UM|MSR_IE +#define MSR_KERNEL_VMS MSR_EE|MSR_VMS +//#define MSR_USER_VMS MSR_KERNEL_VMS|MSR_UMS|MSR_IE + +/* Exception State Register (ESR) Fields */ +#define ESR_DIZ (1<<11) /* Zone Protection */ +#define ESR_S (1<<10) /* Store instruction */ + + + +/* Version reg. */ +/* Basic PVR mask */ +#define PVR0_PVR_FULL_MASK 0x80000000 +#define PVR0_USE_BARREL_MASK 0x40000000 +#define PVR0_USE_DIV_MASK 0x20000000 +#define PVR0_USE_HW_MUL_MASK 0x10000000 +#define PVR0_USE_FPU_MASK 0x08000000 +#define PVR0_USE_EXC_MASK 0x04000000 +#define PVR0_USE_ICACHE_MASK 0x02000000 +#define PVR0_USE_DCACHE_MASK 0x01000000 +#define PVR0_USE_MMU 0x00800000 /* new */ +#define PVR0_VERSION_MASK 0x0000FF00 +#define PVR0_USER1_MASK 0x000000FF + +/* User 2 PVR mask */ +#define PVR1_USER2_MASK 0xFFFFFFFF + +/* Configuration PVR masks */ +#define PVR2_D_OPB_MASK 0x80000000 +#define PVR2_D_LMB_MASK 0x40000000 +#define PVR2_I_OPB_MASK 0x20000000 +#define PVR2_I_LMB_MASK 0x10000000 +#define PVR2_INTERRUPT_IS_EDGE_MASK 0x08000000 +#define PVR2_EDGE_IS_POSITIVE_MASK 0x04000000 +#define PVR2_D_PLB_MASK 0x02000000 /* new */ +#define PVR2_I_PLB_MASK 0x01000000 /* new */ +#define PVR2_INTERCONNECT 0x00800000 /* new */ +#define PVR2_USE_EXTEND_FSL 0x00080000 /* new */ +#define PVR2_USE_FSL_EXC 0x00040000 /* new */ +#define PVR2_USE_MSR_INSTR 0x00020000 +#define PVR2_USE_PCMP_INSTR 0x00010000 +#define PVR2_AREA_OPTIMISED 0x00008000 +#define PVR2_USE_BARREL_MASK 0x00004000 +#define PVR2_USE_DIV_MASK 0x00002000 +#define PVR2_USE_HW_MUL_MASK 0x00001000 +#define PVR2_USE_FPU_MASK 0x00000800 +#define PVR2_USE_MUL64_MASK 0x00000400 +#define PVR2_USE_FPU2_MASK 0x00000200 /* new */ +#define PVR2_USE_IPLBEXC 0x00000100 +#define PVR2_USE_DPLBEXC 0x00000080 +#define PVR2_OPCODE_0x0_ILL_MASK 0x00000040 +#define PVR2_UNALIGNED_EXC_MASK 0x00000020 +#define PVR2_ILL_OPCODE_EXC_MASK 0x00000010 +#define PVR2_IOPB_BUS_EXC_MASK 0x00000008 +#define PVR2_DOPB_BUS_EXC_MASK 0x00000004 +#define PVR2_DIV_ZERO_EXC_MASK 0x00000002 +#define PVR2_FPU_EXC_MASK 0x00000001 + +/* Debug and exception PVR masks */ +#define PVR3_DEBUG_ENABLED_MASK 0x80000000 +#define PVR3_NUMBER_OF_PC_BRK_MASK 0x1E000000 +#define PVR3_NUMBER_OF_RD_ADDR_BRK_MASK 0x00380000 +#define PVR3_NUMBER_OF_WR_ADDR_BRK_MASK 0x0000E000 +#define PVR3_FSL_LINKS_MASK 0x00000380 + +/* ICache config PVR masks */ +#define PVR4_USE_ICACHE_MASK 0x80000000 +#define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000 +#define PVR4_ICACHE_USE_FSL_MASK 0x02000000 +#define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000 +#define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000 +#define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000 + +/* DCache config PVR masks */ +#define PVR5_USE_DCACHE_MASK 0x80000000 +#define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000 +#define PVR5_DCACHE_USE_FSL_MASK 0x02000000 +#define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000 +#define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000 +#define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000 + +/* ICache base address PVR mask */ +#define PVR6_ICACHE_BASEADDR_MASK 0xFFFFFFFF + +/* ICache high address PVR mask */ +#define PVR7_ICACHE_HIGHADDR_MASK 0xFFFFFFFF + +/* DCache base address PVR mask */ +#define PVR8_DCACHE_BASEADDR_MASK 0xFFFFFFFF + +/* DCache high address PVR mask */ +#define PVR9_DCACHE_HIGHADDR_MASK 0xFFFFFFFF + +/* Target family PVR mask */ +#define PVR10_TARGET_FAMILY_MASK 0xFF000000 + +/* MMU descrtiption */ +#define PVR11_USE_MMU 0xC0000000 +#define PVR11_MMU_ITLB_SIZE 0x38000000 +#define PVR11_MMU_DTLB_SIZE 0x07000000 +#define PVR11_MMU_TLB_ACCESS 0x00C00000 +#define PVR11_MMU_ZONES 0x003C0000 +/* MSR Reset value PVR mask */ +#define PVR11_MSR_RESET_VALUE_MASK 0x000007FF + + + +/* CPU flags. */ + +/* Condition codes. */ +#define CC_GE 5 +#define CC_GT 4 +#define CC_LE 3 +#define CC_LT 2 +#define CC_NE 1 +#define CC_EQ 0 + +#define NB_MMU_MODES 3 +typedef struct CPUMBState { + uint32_t debug; + uint32_t btaken; + uint32_t btarget; + uint32_t bimm; + + uint32_t imm; + uint32_t regs[33]; + uint32_t sregs[24]; + + /* Internal flags. */ +#define IMM_FLAG 4 +#define DRTI_FLAG (1 << 16) +#define DRTE_FLAG (1 << 17) +#define DRTB_FLAG (1 << 18) +#define D_FLAG (1 << 19) /* Bit in ESR. */ +/* TB dependant CPUState. */ +#define IFLAGS_TB_MASK (D_FLAG | IMM_FLAG | DRTI_FLAG | DRTE_FLAG | DRTB_FLAG) + uint32_t iflags; + + struct { + uint32_t regs[16]; + } pvr; + +#if !defined(CONFIG_USER_ONLY) + /* Unified MMU. */ + struct microblaze_mmu mmu; +#endif + + CPU_COMMON +} CPUMBState; + +CPUState *cpu_mb_init(const char *cpu_model); +int cpu_mb_exec(CPUState *s); +void cpu_mb_close(CPUState *s); +void do_interrupt(CPUState *env); +/* you can call this signal handler from your SIGBUS and SIGSEGV + signal handlers to inform the virtual CPU of exceptions. non zero + is returned if the signal was handled by the virtual CPU. */ +int cpu_mb_signal_handler(int host_signum, void *pinfo, + void *puc); + +enum { + CC_OP_DYNAMIC, /* Use env->cc_op */ + CC_OP_FLAGS, + CC_OP_CMP, +}; + +/* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */ +#define TARGET_PAGE_BITS 12 +#define MMAP_SHIFT TARGET_PAGE_BITS + +#define cpu_init cpu_mb_init +#define cpu_exec cpu_mb_exec +#define cpu_gen_code cpu_mb_gen_code +#define cpu_signal_handler cpu_mb_signal_handler + +#define CPU_SAVE_VERSION 1 + +/* MMU modes definitions */ +#define MMU_MODE0_SUFFIX _nommu +#define MMU_MODE1_SUFFIX _kernel +#define MMU_MODE2_SUFFIX _user +#define MMU_NOMMU_IDX 0 +#define MMU_KERNEL_IDX 1 +#define MMU_USER_IDX 2 +/* See NB_MMU_MODES further up the file. */ + +static inline int cpu_mmu_index (CPUState *env) +{ + /* Are we in nommu mode?. */ + if (!(env->sregs[SR_MSR] & MSR_VM)) + return MMU_NOMMU_IDX; + + if (env->sregs[SR_MSR] & MSR_UM) + return MMU_USER_IDX; + return MMU_KERNEL_IDX; +} + +int cpu_mb_handle_mmu_fault(CPUState *env, target_ulong address, int rw, + int mmu_idx, int is_softmmu); + +#if defined(CONFIG_USER_ONLY) +static inline void cpu_clone_regs(CPUState *env, target_ulong newsp) +{ + if (newsp) + env->regs[R_SP] = newsp; + env->regs[3] = 0; +} +#endif + +static inline void cpu_set_tls(CPUState *env, target_ulong newtls) +{ +} + +static inline int cpu_interrupts_enabled(CPUState *env) +{ + return env->sregs[SR_MSR] & MSR_IE; +} + +#include "cpu-all.h" +#include "exec-all.h" + +static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb) +{ + env->sregs[SR_PC] = tb->pc; +} + +static inline target_ulong cpu_get_pc(CPUState *env) +{ + return env->sregs[SR_PC]; +} + +static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc, + target_ulong *cs_base, int *flags) +{ + *pc = env->sregs[SR_PC]; + *cs_base = 0; + *flags = env->iflags & IFLAGS_TB_MASK; +} +#endif diff --git a/target-microblaze/exec.h b/target-microblaze/exec.h new file mode 100644 index 0000000000..55045bb921 --- /dev/null +++ b/target-microblaze/exec.h @@ -0,0 +1,57 @@ +/* + * Microblaze execution defines + * + * Copyright (c) 2009 Edgar E. Iglesias + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA + */ +#include "dyngen-exec.h" + +register struct CPUMBState *env asm(AREG0); + +#include "cpu.h" +#include "exec-all.h" + +static inline void env_to_regs(void) +{ +} + +static inline void regs_to_env(void) +{ +} + +#if !defined(CONFIG_USER_ONLY) +#include "softmmu_exec.h" +#endif + +void cpu_mb_flush_flags(CPUMBState *env, int cc_op); + +static inline int cpu_has_work(CPUState *env) +{ + return (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI)); +} + +static inline int cpu_halted(CPUState *env) { + if (!env->halted) + return 0; + + /* IRQ, NMI and GURU execeptions wakes us up. */ + if (env->interrupt_request + & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI)) { + env->halted = 0; + return 0; + } + return EXCP_HALTED; +} diff --git a/target-microblaze/helper.c b/target-microblaze/helper.c new file mode 100644 index 0000000000..00936594da --- /dev/null +++ b/target-microblaze/helper.c @@ -0,0 +1,255 @@ +/* + * MicroBlaze helper routines. + * + * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA + */ + +#include <stdio.h> +#include <string.h> +#include <assert.h> + +#include "config.h" +#include "cpu.h" +#include "exec-all.h" +#include "host-utils.h" + +#define D(x) +#define DMMU(x) + +#if defined(CONFIG_USER_ONLY) + +void do_interrupt (CPUState *env) +{ + env->exception_index = -1; + env->regs[14] = env->sregs[SR_PC]; +} + +int cpu_mb_handle_mmu_fault(CPUState * env, target_ulong address, int rw, + int mmu_idx, int is_softmmu) +{ + env->exception_index = 0xaa; + cpu_dump_state(env, stderr, fprintf, 0); + return 1; +} + +target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr) +{ + return addr; +} + +#else /* !CONFIG_USER_ONLY */ + +int cpu_mb_handle_mmu_fault (CPUState *env, target_ulong address, int rw, + int mmu_idx, int is_softmmu) +{ + unsigned int hit; + unsigned int mmu_available; + int r = 1; + int prot; + + mmu_available = 0; + if (env->pvr.regs[0] & PVR0_USE_MMU) { + mmu_available = 1; + if ((env->pvr.regs[0] & PVR0_PVR_FULL_MASK) + && (env->pvr.regs[11] & PVR11_USE_MMU) != PVR11_USE_MMU) { + mmu_available = 0; + } + } + + /* Translate if the MMU is available and enabled. */ + if (mmu_available && (env->sregs[SR_MSR] & MSR_VM)) { + target_ulong vaddr, paddr; + struct microblaze_mmu_lookup lu; + + hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx); + if (hit) { + vaddr = address & TARGET_PAGE_MASK; + paddr = lu.paddr + vaddr - lu.vaddr; + + DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n", + mmu_idx, vaddr, paddr, lu.prot)); + r = tlb_set_page(env, vaddr, + paddr, lu.prot, mmu_idx, is_softmmu); + } else { + env->sregs[SR_EAR] = address; + DMMU(qemu_log("mmu=%d miss addr=%x\n", mmu_idx, vaddr)); + + switch (lu.err) { + case ERR_PROT: + env->sregs[SR_ESR] = rw == 2 ? 17 : 16; + env->sregs[SR_ESR] |= (rw == 1) << 10; + break; + case ERR_MISS: + env->sregs[SR_ESR] = rw == 2 ? 19 : 18; + env->sregs[SR_ESR] |= (rw == 1) << 10; + break; + default: + abort(); + break; + } + + if (env->exception_index == EXCP_MMU) { + cpu_abort(env, "recursive faults\n"); + } + + /* TLB miss. */ + env->exception_index = EXCP_MMU; + } + } else { + /* MMU disabled or not available. */ + address &= TARGET_PAGE_MASK; + prot = PAGE_BITS; + r = tlb_set_page(env, address, address, prot, mmu_idx, is_softmmu); + } + return r; +} + +void do_interrupt(CPUState *env) +{ + uint32_t t; + + /* IMM flag cannot propagate accross a branch and into the dslot. */ + assert(!((env->iflags & D_FLAG) && (env->iflags & IMM_FLAG))); + assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG))); +/* assert(env->sregs[SR_MSR] & (MSR_EE)); Only for HW exceptions. */ + switch (env->exception_index) { + case EXCP_MMU: + env->regs[17] = env->sregs[SR_PC]; + + /* Exception breaks branch + dslot sequence? */ + if (env->iflags & D_FLAG) { + D(qemu_log("D_FLAG set at exception bimm=%d\n", env->bimm)); + env->sregs[SR_ESR] |= 1 << 12 ; + env->sregs[SR_BTR] = env->btarget; + + /* Reexecute the branch. */ + env->regs[17] -= 4; + /* was the branch immprefixed?. */ + if (env->bimm) { + qemu_log_mask(CPU_LOG_INT, + "bimm exception at pc=%x iflags=%x\n", + env->sregs[SR_PC], env->iflags); + env->regs[17] -= 4; + log_cpu_state_mask(CPU_LOG_INT, env, 0); + } + } else if (env->iflags & IMM_FLAG) { + D(qemu_log("IMM_FLAG set at exception\n")); + env->regs[17] -= 4; + } + + /* Disable the MMU. */ + t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1; + env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM); + env->sregs[SR_MSR] |= t; + /* Exception in progress. */ + env->sregs[SR_MSR] |= MSR_EIP; + + qemu_log_mask(CPU_LOG_INT, + "exception at pc=%x ear=%x iflags=%x\n", + env->sregs[SR_PC], env->sregs[SR_EAR], env->iflags); + log_cpu_state_mask(CPU_LOG_INT, env, 0); + env->iflags &= ~(IMM_FLAG | D_FLAG); + env->sregs[SR_PC] = 0x20; + break; + + case EXCP_IRQ: + assert(!(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))); + assert(env->sregs[SR_MSR] & MSR_IE); + assert(!(env->iflags & D_FLAG)); + + t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1; + +#if 0 +#include "disas.h" + +/* Useful instrumentation when debugging interrupt issues in either + the models or in sw. */ + { + const char *sym; + + sym = lookup_symbol(env->sregs[SR_PC]); + if (sym + && (!strcmp("netif_rx", sym) + || !strcmp("process_backlog", sym))) { + + qemu_log( + "interrupt at pc=%x msr=%x %x iflags=%x sym=%s\n", + env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags, + sym); + + log_cpu_state(env, 0); + } + } +#endif + qemu_log_mask(CPU_LOG_INT, + "interrupt at pc=%x msr=%x %x iflags=%x\n", + env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags); + + env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM \ + | MSR_UM | MSR_IE); + env->sregs[SR_MSR] |= t; + + env->regs[14] = env->sregs[SR_PC]; + env->sregs[SR_PC] = 0x10; + //log_cpu_state_mask(CPU_LOG_INT, env, 0); + break; + + case EXCP_BREAK: + case EXCP_HW_BREAK: + assert(!(env->iflags & IMM_FLAG)); + assert(!(env->iflags & D_FLAG)); + t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1; + qemu_log_mask(CPU_LOG_INT, + "break at pc=%x msr=%x %x iflags=%x\n", + env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags); + log_cpu_state_mask(CPU_LOG_INT, env, 0); + env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM); + env->sregs[SR_MSR] |= t; + env->sregs[SR_MSR] |= MSR_BIP; + if (env->exception_index == EXCP_HW_BREAK) { + env->regs[16] = env->sregs[SR_PC]; + env->sregs[SR_MSR] |= MSR_BIP; + env->sregs[SR_PC] = 0x18; + } else + env->sregs[SR_PC] = env->btarget; + break; + default: + cpu_abort(env, "unhandled exception type=%d\n", + env->exception_index); + break; + } +} + +target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr) +{ + target_ulong vaddr, paddr = 0; + struct microblaze_mmu_lookup lu; + unsigned int hit; + + if (env->sregs[SR_MSR] & MSR_VM) { + hit = mmu_translate(&env->mmu, &lu, addr, 0, 0); + if (hit) { + vaddr = addr & TARGET_PAGE_MASK; + paddr = lu.paddr + vaddr - lu.vaddr; + } else + paddr = 0; /* ???. */ + } else + paddr = addr & TARGET_PAGE_MASK; + + return paddr; +} +#endif diff --git a/target-microblaze/helper.h b/target-microblaze/helper.h new file mode 100644 index 0000000000..8c5361ecde --- /dev/null +++ b/target-microblaze/helper.h @@ -0,0 +1,19 @@ +#include "def-helper.h" + +DEF_HELPER_1(raise_exception, void, i32) +DEF_HELPER_0(debug, void) +DEF_HELPER_4(addkc, i32, i32, i32, i32, i32) +DEF_HELPER_4(subkc, i32, i32, i32, i32, i32) +DEF_HELPER_2(cmp, i32, i32, i32) +DEF_HELPER_2(cmpu, i32, i32, i32) + +DEF_HELPER_2(divs, i32, i32, i32) +DEF_HELPER_2(divu, i32, i32, i32) + +DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_PURE | TCG_CALL_CONST, i32, i32, i32) +#if !defined(CONFIG_USER_ONLY) +DEF_HELPER_1(mmu_read, i32, i32) +DEF_HELPER_2(mmu_write, void, i32, i32) +#endif + +#include "def-helper.h" diff --git a/target-microblaze/microblaze-decode.h b/target-microblaze/microblaze-decode.h new file mode 100644 index 0000000000..27b01288c9 --- /dev/null +++ b/target-microblaze/microblaze-decode.h @@ -0,0 +1,52 @@ +/* + * MicroBlaze insn decoding macros. + * + * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA + */ + +/* Convenient binary macros. */ +#define HEX__(n) 0x##n##LU +#define B8__(x) ((x&0x0000000FLU)?1:0) \ + + ((x&0x000000F0LU)?2:0) \ + + ((x&0x00000F00LU)?4:0) \ + + ((x&0x0000F000LU)?8:0) \ + + ((x&0x000F0000LU)?16:0) \ + + ((x&0x00F00000LU)?32:0) \ + + ((x&0x0F000000LU)?64:0) \ + + ((x&0xF0000000LU)?128:0) +#define B8(d) ((unsigned char)B8__(HEX__(d))) + +/* Decode logic, mask and value. */ +#define DEC_ADD {B8(00000000), B8(00110001)} +#define DEC_SUB {B8(00000001), B8(00110001)} +#define DEC_AND {B8(00100001), B8(00110101)} +#define DEC_XOR {B8(00100010), B8(00110111)} +#define DEC_OR {B8(00100000), B8(00110111)} +#define DEC_BIT {B8(00100100), B8(00111111)} +#define DEC_MSR {B8(00100101), B8(00111111)} + +#define DEC_BARREL {B8(00010001), B8(00110111)} +#define DEC_MUL {B8(00010000), B8(00110111)} +#define DEC_DIV {B8(00010010), B8(00110111)} + +#define DEC_LD {B8(00110000), B8(00110100)} +#define DEC_ST {B8(00110100), B8(00110100)} +#define DEC_IMM {B8(00101100), B8(00111111)} + +#define DEC_BR {B8(00100110), B8(00110111)} +#define DEC_BCC {B8(00100111), B8(00110111)} +#define DEC_RTS {B8(00101101), B8(00111111)} diff --git a/target-microblaze/op_helper.c b/target-microblaze/op_helper.c new file mode 100644 index 0000000000..1a0776d2cf --- /dev/null +++ b/target-microblaze/op_helper.c @@ -0,0 +1,216 @@ +/* + * Microblaze helper routines. + * + * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA + */ + +#include <assert.h> +#include "exec.h" +#include "helper.h" +#include "host-utils.h" + +#define D(x) + +#if !defined(CONFIG_USER_ONLY) +#define MMUSUFFIX _mmu +#define SHIFT 0 +#include "softmmu_template.h" +#define SHIFT 1 +#include "softmmu_template.h" +#define SHIFT 2 +#include "softmmu_template.h" +#define SHIFT 3 +#include "softmmu_template.h" + +/* Try to fill the TLB and return an exception if error. If retaddr is + NULL, it means that the function was called in C code (i.e. not + from generated code or from helper.c) */ +/* XXX: fix it to restore all registers */ +void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr) +{ + TranslationBlock *tb; + CPUState *saved_env; + unsigned long pc; + int ret; + + /* XXX: hack to restore env in all cases, even if not called from + generated code */ + saved_env = env; + env = cpu_single_env; + + ret = cpu_mb_handle_mmu_fault(env, addr, is_write, mmu_idx, 1); + if (unlikely(ret)) { + if (retaddr) { + /* now we have a real cpu fault */ + pc = (unsigned long)retaddr; + tb = tb_find_pc(pc); + if (tb) { + /* the PC is inside the translated code. It means that we have + a virtual CPU fault */ + cpu_restore_state(tb, env, pc, NULL); + } + } + cpu_loop_exit(); + } + env = saved_env; +} +#endif + +void helper_raise_exception(uint32_t index) +{ + env->exception_index = index; + cpu_loop_exit(); +} + +void helper_debug(void) +{ + int i; + + qemu_log("PC=%8.8x\n", env->sregs[SR_PC]); + for (i = 0; i < 32; i++) { + qemu_log("r%2.2d=%8.8x ", i, env->regs[i]); + if ((i + 1) % 4 == 0) + qemu_log("\n"); + } + qemu_log("\n\n"); +} + +static inline uint32_t compute_carry(uint32_t a, uint32_t b, uint32_t cin) +{ + uint32_t cout = 0; + + if ((b == ~0) && cin) + cout = 1; + else if ((~0 - a) < (b + cin)) + cout = 1; + return cout; +} + +uint32_t helper_cmp(uint32_t a, uint32_t b) +{ + uint32_t t; + + t = b + ~a + 1; + if ((b & 0x80000000) ^ (a & 0x80000000)) + t = (t & 0x7fffffff) | (b & 0x80000000); + return t; +} + +uint32_t helper_cmpu(uint32_t a, uint32_t b) +{ + uint32_t t; + + t = b + ~a + 1; + if ((b & 0x80000000) ^ (a & 0x80000000)) + t = (t & 0x7fffffff) | (a & 0x80000000); + return t; +} + +uint32_t helper_addkc(uint32_t a, uint32_t b, uint32_t k, uint32_t c) +{ + uint32_t d, cf = 0, ncf; + + if (c) + cf = env->sregs[SR_MSR] >> 31; + assert(cf == 0 || cf == 1); + d = a + b + cf; + + if (!k) { + ncf = compute_carry(a, b, cf); + assert(ncf == 0 || ncf == 1); + if (ncf) + env->sregs[SR_MSR] |= MSR_C | MSR_CC; + else + env->sregs[SR_MSR] &= ~(MSR_C | MSR_CC); + } + D(qemu_log("%x = %x + %x cf=%d ncf=%d k=%d c=%d\n", + d, a, b, cf, ncf, k, c)); + return d; +} + +uint32_t helper_subkc(uint32_t a, uint32_t b, uint32_t k, uint32_t c) +{ + uint32_t d, cf = 1, ncf; + + if (c) + cf = env->sregs[SR_MSR] >> 31; + assert(cf == 0 || cf == 1); + d = b + ~a + cf; + + if (!k) { + ncf = compute_carry(b, ~a, cf); + assert(ncf == 0 || ncf == 1); + if (ncf) + env->sregs[SR_MSR] |= MSR_C | MSR_CC; + else + env->sregs[SR_MSR] &= ~(MSR_C | MSR_CC); + } + D(qemu_log("%x = %x + %x cf=%d ncf=%d k=%d c=%d\n", + d, a, b, cf, ncf, k, c)); + return d; +} + +static inline int div_prepare(uint32_t a, uint32_t b) +{ + if (b == 0) { + env->sregs[SR_MSR] |= MSR_DZ; + /* FIXME: Raise the div by zero exception. */ + return 0; + } + env->sregs[SR_MSR] &= ~MSR_DZ; + return 1; +} + +uint32_t helper_divs(uint32_t a, uint32_t b) +{ + if (!div_prepare(a, b)) + return 0; + return (int32_t)a / (int32_t)b; +} + +uint32_t helper_divu(uint32_t a, uint32_t b) +{ + if (!div_prepare(a, b)) + return 0; + return a / b; +} + +uint32_t helper_pcmpbf(uint32_t a, uint32_t b) +{ + unsigned int i; + uint32_t mask = 0xff000000; + + for (i = 0; i < 4; i++) { + if ((a & mask) == (b & mask)) + return i + 1; + mask >>= 8; + } + return 0; +} + +#if !defined(CONFIG_USER_ONLY) +/* Writes/reads to the MMU's special regs end up here. */ +uint32_t helper_mmu_read(uint32_t rn) +{ + return mmu_read(env, rn); +} + +void helper_mmu_write(uint32_t rn, uint32_t v) +{ + mmu_write(env, rn, v); +} +#endif diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c new file mode 100644 index 0000000000..33fff9c2c8 --- /dev/null +++ b/target-microblaze/translate.c @@ -0,0 +1,1395 @@ +/* + * Xilinx MicroBlaze emulation for qemu: main translation routines. + * + * Copyright (c) 2009 Edgar E. Iglesias. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA + */ + +#include <stdarg.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <inttypes.h> +#include <assert.h> + +#include "cpu.h" +#include "exec-all.h" +#include "disas.h" +#include "tcg-op.h" +#include "helper.h" +#include "microblaze-decode.h" +#include "qemu-common.h" + +#define GEN_HELPER 1 +#include "helper.h" + +#define SIM_COMPAT 0 +#define DISAS_GNU 1 +#define DISAS_MB 1 +#if DISAS_MB && !SIM_COMPAT +# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) +#else +# define LOG_DIS(...) do { } while (0) +#endif + +#define D(x) + +#define EXTRACT_FIELD(src, start, end) \ + (((src) >> start) & ((1 << (end - start + 1)) - 1)) + +static TCGv env_debug; +static TCGv_ptr cpu_env; +static TCGv cpu_R[32]; +static TCGv cpu_SR[18]; +static TCGv env_imm; +static TCGv env_btaken; +static TCGv env_btarget; +static TCGv env_iflags; + +#include "gen-icount.h" + +/* This is the state at translation time. */ +typedef struct DisasContext { + CPUState *env; + target_ulong pc, ppc; + target_ulong cache_pc; + + /* Decoder. */ + int type_b; + uint32_t ir; + uint8_t opcode; + uint8_t rd, ra, rb; + uint16_t imm; + + unsigned int cpustate_changed; + unsigned int delayed_branch; + unsigned int tb_flags, synced_flags; /* tb dependent flags. */ + unsigned int clear_imm; + int is_jmp; + +#define JMP_NOJMP 0 +#define JMP_DIRECT 1 +#define JMP_INDIRECT 2 + unsigned int jmp; + uint32_t jmp_pc; + + int abort_at_next_insn; + int nr_nops; + struct TranslationBlock *tb; + int singlestep_enabled; +} DisasContext; + +const static char *regnames[] = +{ + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", +}; + +const static char *special_regnames[] = +{ + "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7", + "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15", + "sr16", "sr17", "sr18" +}; + +/* Sign extend at translation time. */ +static inline int sign_extend(unsigned int val, unsigned int width) +{ + int sval; + + /* LSL. */ + val <<= 31 - width; + sval = val; + /* ASR. */ + sval >>= 31 - width; + return sval; +} + +static inline void t_sync_flags(DisasContext *dc) +{ + /* Synch the tb dependant flags between translator and runtime. */ + if (dc->tb_flags != dc->synced_flags) { + tcg_gen_movi_tl(env_iflags, dc->tb_flags); + dc->synced_flags = dc->tb_flags; + } +} + +static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index) +{ + TCGv_i32 tmp = tcg_const_i32(index); + + t_sync_flags(dc); + tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc); + gen_helper_raise_exception(tmp); + tcg_temp_free_i32(tmp); + dc->is_jmp = DISAS_UPDATE; +} + +static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) +{ + TranslationBlock *tb; + tb = dc->tb; + if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { + tcg_gen_goto_tb(n); + tcg_gen_movi_tl(cpu_SR[SR_PC], dest); + tcg_gen_exit_tb((long)tb + n); + } else { + tcg_gen_movi_tl(cpu_SR[SR_PC], dest); + tcg_gen_exit_tb(0); + } +} + +static inline TCGv *dec_alu_op_b(DisasContext *dc) +{ + if (dc->type_b) { + if (dc->tb_flags & IMM_FLAG) + tcg_gen_ori_tl(env_imm, env_imm, dc->imm); + else + tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm)); + return &env_imm; + } else + return &cpu_R[dc->rb]; +} + +static void dec_add(DisasContext *dc) +{ + unsigned int k, c; + + k = dc->opcode & 4; + c = dc->opcode & 2; + + LOG_DIS("add%s%s%s r%d r%d r%d\n", + dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "", + dc->rd, dc->ra, dc->rb); + + if (k && !c && dc->rd) + tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc))); + else if (dc->rd) + gen_helper_addkc(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)), + tcg_const_tl(k), tcg_const_tl(c)); + else { + TCGv d = tcg_temp_new(); + gen_helper_addkc(d, cpu_R[dc->ra], *(dec_alu_op_b(dc)), + tcg_const_tl(k), tcg_const_tl(c)); + tcg_temp_free(d); + } +} + +static void dec_sub(DisasContext *dc) +{ + unsigned int u, cmp, k, c; + + u = dc->imm & 2; + k = dc->opcode & 4; + c = dc->opcode & 2; + cmp = (dc->imm & 1) && (!dc->type_b) && k; + + if (cmp) { + LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir); + if (dc->rd) { + if (u) + gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); + else + gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); + } + } else { + LOG_DIS("sub%s%s r%d, r%d r%d\n", + k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb); + + if (!k || c) { + TCGv t; + t = tcg_temp_new(); + if (dc->rd) + gen_helper_subkc(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)), + tcg_const_tl(k), tcg_const_tl(c)); + else + gen_helper_subkc(t, cpu_R[dc->ra], *(dec_alu_op_b(dc)), + tcg_const_tl(k), tcg_const_tl(c)); + tcg_temp_free(t); + } + else if (dc->rd) + tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]); + } +} + +static void dec_pattern(DisasContext *dc) +{ + unsigned int mode; + int l1; + + mode = dc->opcode & 3; + switch (mode) { + case 0: + /* pcmpbf. */ + LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); + if (dc->rd) + gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); + break; + case 2: + LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); + if (dc->rd) { + TCGv t0 = tcg_temp_local_new(); + l1 = gen_new_label(); + tcg_gen_movi_tl(t0, 1); + tcg_gen_brcond_tl(TCG_COND_EQ, + cpu_R[dc->ra], cpu_R[dc->rb], l1); + tcg_gen_movi_tl(t0, 0); + gen_set_label(l1); + tcg_gen_mov_tl(cpu_R[dc->rd], t0); + tcg_temp_free(t0); + } + break; + case 3: + LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); + l1 = gen_new_label(); + if (dc->rd) { + TCGv t0 = tcg_temp_local_new(); + tcg_gen_movi_tl(t0, 1); + tcg_gen_brcond_tl(TCG_COND_NE, + cpu_R[dc->ra], cpu_R[dc->rb], l1); + tcg_gen_movi_tl(t0, 0); + gen_set_label(l1); + tcg_gen_mov_tl(cpu_R[dc->rd], t0); + tcg_temp_free(t0); + } + break; + default: + cpu_abort(dc->env, + "unsupported pattern insn opcode=%x\n", dc->opcode); + break; + } +} + +static void dec_and(DisasContext *dc) +{ + unsigned int not; + + if (!dc->type_b && (dc->imm & (1 << 10))) { + dec_pattern(dc); + return; + } + + not = dc->opcode & (1 << 1); + LOG_DIS("and%s\n", not ? "n" : ""); + + if (!dc->rd) + return; + + if (not) { + TCGv t = tcg_temp_new(); + tcg_gen_not_tl(t, *(dec_alu_op_b(dc))); + tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], t); + tcg_temp_free(t); + } else + tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc))); +} + +static void dec_or(DisasContext *dc) +{ + if (!dc->type_b && (dc->imm & (1 << 10))) { + dec_pattern(dc); + return; + } + + LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm); + if (dc->rd) + tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc))); +} + +static void dec_xor(DisasContext *dc) +{ + if (!dc->type_b && (dc->imm & (1 << 10))) { + dec_pattern(dc); + return; + } + + LOG_DIS("xor r%d\n", dc->rd); + if (dc->rd) + tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc))); +} + +static void read_carry(DisasContext *dc, TCGv d) +{ + tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31); +} + +static void write_carry(DisasContext *dc, TCGv v) +{ + TCGv t0 = tcg_temp_new(); + tcg_gen_shli_tl(t0, v, 31); + tcg_gen_sari_tl(t0, t0, 31); + tcg_gen_mov_tl(env_debug, t0); + tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC)); + tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], + ~(MSR_C | MSR_CC)); + tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0); + tcg_temp_free(t0); +} + + +static inline void msr_read(DisasContext *dc, TCGv d) +{ + tcg_gen_mov_tl(d, cpu_SR[SR_MSR]); +} + +static inline void msr_write(DisasContext *dc, TCGv v) +{ + dc->cpustate_changed = 1; + tcg_gen_mov_tl(cpu_SR[SR_MSR], v); + /* PVR, we have a processor version register. */ + tcg_gen_ori_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], (1 << 10)); +} + +static void dec_msr(DisasContext *dc) +{ + TCGv t0, t1; + unsigned int sr, to, rn; + + sr = dc->imm & ((1 << 14) - 1); + to = dc->imm & (1 << 14); + dc->type_b = 1; + if (to) + dc->cpustate_changed = 1; + + /* msrclr and msrset. */ + if (!(dc->imm & (1 << 15))) { + unsigned int clr = dc->ir & (1 << 16); + + LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set", + dc->rd, dc->imm); + if (dc->rd) + msr_read(dc, cpu_R[dc->rd]); + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + msr_read(dc, t0); + tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc))); + + if (clr) { + tcg_gen_not_tl(t1, t1); + tcg_gen_and_tl(t0, t0, t1); + } else + tcg_gen_or_tl(t0, t0, t1); + msr_write(dc, t0); + tcg_temp_free(t0); + tcg_temp_free(t1); + tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4); + dc->is_jmp = DISAS_UPDATE; + return; + } + +#if !defined(CONFIG_USER_ONLY) + /* Catch read/writes to the mmu block. */ + if ((sr & ~0xff) == 0x1000) { + sr &= 7; + LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm); + if (to) + gen_helper_mmu_write(tcg_const_tl(sr), cpu_R[dc->ra]); + else + gen_helper_mmu_read(cpu_R[dc->rd], tcg_const_tl(sr)); + return; + } +#endif + + if (to) { + LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm); + switch (sr) { + case 0: + break; + case 1: + msr_write(dc, cpu_R[dc->ra]); + break; + case 0x3: + tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]); + break; + case 0x5: + tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]); + break; + case 0x7: + /* Ignored at the moment. */ + break; + default: + cpu_abort(dc->env, "unknown mts reg %x\n", sr); + break; + } + } else { + LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm); + + switch (sr) { + case 0: + tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc); + break; + case 1: + msr_read(dc, cpu_R[dc->rd]); + break; + case 0x3: + tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]); + break; + case 0x5: + tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]); + break; + case 0x7: + tcg_gen_movi_tl(cpu_R[dc->rd], 0); + break; + case 0xb: + tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]); + break; + case 0x2000: + case 0x2001: + case 0x2002: + case 0x2003: + case 0x2004: + case 0x2005: + case 0x2006: + case 0x2007: + case 0x2008: + case 0x2009: + case 0x200a: + case 0x200b: + case 0x200c: + rn = sr & 0xf; + tcg_gen_ld_tl(cpu_R[dc->rd], + cpu_env, offsetof(CPUState, pvr.regs[rn])); + break; + default: + cpu_abort(dc->env, "unknown mfs reg %x\n", sr); + break; + } + } +} + +/* 64-bit signed mul, lower result in d and upper in d2. */ +static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b) +{ + TCGv_i64 t0, t1; + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + tcg_gen_ext_i32_i64(t0, a); + tcg_gen_ext_i32_i64(t1, b); + tcg_gen_mul_i64(t0, t0, t1); + + tcg_gen_trunc_i64_i32(d, t0); + tcg_gen_shri_i64(t0, t0, 32); + tcg_gen_trunc_i64_i32(d2, t0); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); +} + +/* 64-bit unsigned muls, lower result in d and upper in d2. */ +static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b) +{ + TCGv_i64 t0, t1; + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + tcg_gen_extu_i32_i64(t0, a); + tcg_gen_extu_i32_i64(t1, b); + tcg_gen_mul_i64(t0, t0, t1); + + tcg_gen_trunc_i64_i32(d, t0); + tcg_gen_shri_i64(t0, t0, 32); + tcg_gen_trunc_i64_i32(d2, t0); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); +} + +/* Multiplier unit. */ +static void dec_mul(DisasContext *dc) +{ + TCGv d[2]; + unsigned int subcode; + + subcode = dc->imm & 3; + d[0] = tcg_temp_new(); + d[1] = tcg_temp_new(); + + if (dc->type_b) { + LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm); + t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc))); + goto done; + } + + switch (subcode) { + case 0: + LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); + t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]); + break; + case 1: + LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); + t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); + break; + case 2: + LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); + t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); + break; + case 3: + LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); + t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); + break; + default: + cpu_abort(dc->env, "unknown MUL insn %x\n", subcode); + break; + } +done: + tcg_temp_free(d[0]); + tcg_temp_free(d[1]); +} + +/* Div unit. */ +static void dec_div(DisasContext *dc) +{ + unsigned int u; + + u = dc->imm & 2; + LOG_DIS("div\n"); + + /* FIXME: support div by zero exceptions. */ + if (u) + gen_helper_divu(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]); + else + gen_helper_divs(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]); + if (!dc->rd) + tcg_gen_movi_tl(cpu_R[dc->rd], 0); +} + +static void dec_barrel(DisasContext *dc) +{ + TCGv t0; + unsigned int s, t; + + s = dc->imm & (1 << 10); + t = dc->imm & (1 << 9); + + LOG_DIS("bs%s%s r%d r%d r%d\n", + s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb); + + t0 = tcg_temp_new(); + + tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc))); + tcg_gen_andi_tl(t0, t0, 31); + + if (s) + tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0); + else { + if (t) + tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0); + else + tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0); + } +} + +static void dec_bit(DisasContext *dc) +{ + TCGv t0, t1; + unsigned int op; + + op = dc->ir & ((1 << 8) - 1); + switch (op) { + case 0x21: + /* src. */ + t0 = tcg_temp_new(); + + LOG_DIS("src r%d r%d\n", dc->rd, dc->ra); + tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1); + if (dc->rd) { + t1 = tcg_temp_new(); + read_carry(dc, t1); + tcg_gen_shli_tl(t1, t1, 31); + + tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1); + tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t1); + tcg_temp_free(t1); + } + + /* Update carry. */ + write_carry(dc, t0); + tcg_temp_free(t0); + break; + + case 0x1: + case 0x41: + /* srl. */ + t0 = tcg_temp_new(); + LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra); + + /* Update carry. */ + tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1); + write_carry(dc, t0); + tcg_temp_free(t0); + if (dc->rd) { + if (op == 0x41) + tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1); + else + tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1); + } + break; + case 0x60: + LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra); + tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]); + break; + case 0x61: + LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra); + tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]); + break; + case 0x64: + /* wdc. */ + LOG_DIS("wdc r%d\n", dc->ra); + break; + case 0x68: + /* wic. */ + LOG_DIS("wic r%d\n", dc->ra); + break; + default: + cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n", + dc->pc, op, dc->rd, dc->ra, dc->rb); + break; + } +} + +static inline void sync_jmpstate(DisasContext *dc) +{ + if (dc->jmp == JMP_DIRECT) { + dc->jmp = JMP_INDIRECT; + tcg_gen_movi_tl(env_btaken, 1); + tcg_gen_movi_tl(env_btarget, dc->jmp_pc); + } +} + +static void dec_imm(DisasContext *dc) +{ + LOG_DIS("imm %x\n", dc->imm << 16); + tcg_gen_movi_tl(env_imm, (dc->imm << 16)); + dc->tb_flags |= IMM_FLAG; + dc->clear_imm = 0; +} + +static inline void gen_load(DisasContext *dc, TCGv dst, TCGv addr, + unsigned int size) +{ + int mem_index = cpu_mmu_index(dc->env); + + if (size == 1) { + tcg_gen_qemu_ld8u(dst, addr, mem_index); + } else if (size == 2) { + tcg_gen_qemu_ld16u(dst, addr, mem_index); + } else if (size == 4) { + tcg_gen_qemu_ld32u(dst, addr, mem_index); + } else + cpu_abort(dc->env, "Incorrect load size %d\n", size); +} + +static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t) +{ + unsigned int extimm = dc->tb_flags & IMM_FLAG; + + /* Treat the fast cases first. */ + if (!dc->type_b) { + *t = tcg_temp_new(); + tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]); + return t; + } + /* Immediate. */ + if (!extimm) { + if (dc->imm == 0) { + return &cpu_R[dc->ra]; + } + *t = tcg_temp_new(); + tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm)); + tcg_gen_add_tl(*t, cpu_R[dc->ra], *t); + } else { + *t = tcg_temp_new(); + tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc))); + } + + return t; +} + +static void dec_load(DisasContext *dc) +{ + TCGv t, *addr; + unsigned int size; + + size = 1 << (dc->opcode & 3); + + LOG_DIS("l %x %d\n", dc->opcode, size); + t_sync_flags(dc); + addr = compute_ldst_addr(dc, &t); + + /* If we get a fault on a dslot, the jmpstate better be in sync. */ + sync_jmpstate(dc); + if (dc->rd) + gen_load(dc, cpu_R[dc->rd], *addr, size); + else { + gen_load(dc, env_imm, *addr, size); + } + + if (addr == &t) + tcg_temp_free(t); +} + +static void gen_store(DisasContext *dc, TCGv addr, TCGv val, + unsigned int size) +{ + int mem_index = cpu_mmu_index(dc->env); + + if (size == 1) + tcg_gen_qemu_st8(val, addr, mem_index); + else if (size == 2) { + tcg_gen_qemu_st16(val, addr, mem_index); + } else if (size == 4) { + tcg_gen_qemu_st32(val, addr, mem_index); + } else + cpu_abort(dc->env, "Incorrect store size %d\n", size); +} + +static void dec_store(DisasContext *dc) +{ + TCGv t, *addr; + unsigned int size; + + size = 1 << (dc->opcode & 3); + + LOG_DIS("s%d%s\n", size, dc->type_b ? "i" : ""); + t_sync_flags(dc); + /* If we get a fault on a dslot, the jmpstate better be in sync. */ + sync_jmpstate(dc); + addr = compute_ldst_addr(dc, &t); + gen_store(dc, *addr, cpu_R[dc->rd], size); + if (addr == &t) + tcg_temp_free(t); +} + +static inline void eval_cc(DisasContext *dc, unsigned int cc, + TCGv d, TCGv a, TCGv b) +{ + int l1; + + switch (cc) { + case CC_EQ: + l1 = gen_new_label(); + tcg_gen_movi_tl(env_btaken, 1); + tcg_gen_brcond_tl(TCG_COND_EQ, a, b, l1); + tcg_gen_movi_tl(env_btaken, 0); + gen_set_label(l1); + break; + case CC_NE: + l1 = gen_new_label(); + tcg_gen_movi_tl(env_btaken, 1); + tcg_gen_brcond_tl(TCG_COND_NE, a, b, l1); + tcg_gen_movi_tl(env_btaken, 0); + gen_set_label(l1); + break; + case CC_LT: + l1 = gen_new_label(); + tcg_gen_movi_tl(env_btaken, 1); + tcg_gen_brcond_tl(TCG_COND_LT, a, b, l1); + tcg_gen_movi_tl(env_btaken, 0); + gen_set_label(l1); + break; + case CC_LE: + l1 = gen_new_label(); + tcg_gen_movi_tl(env_btaken, 1); + tcg_gen_brcond_tl(TCG_COND_LE, a, b, l1); + tcg_gen_movi_tl(env_btaken, 0); + gen_set_label(l1); + break; + case CC_GE: + l1 = gen_new_label(); + tcg_gen_movi_tl(env_btaken, 1); + tcg_gen_brcond_tl(TCG_COND_GE, a, b, l1); + tcg_gen_movi_tl(env_btaken, 0); + gen_set_label(l1); + break; + case CC_GT: + l1 = gen_new_label(); + tcg_gen_movi_tl(env_btaken, 1); + tcg_gen_brcond_tl(TCG_COND_GT, a, b, l1); + tcg_gen_movi_tl(env_btaken, 0); + gen_set_label(l1); + break; + default: + cpu_abort(dc->env, "Unknown condition code %x.\n", cc); + break; + } +} + +static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false) +{ + int l1; + + l1 = gen_new_label(); + /* Conditional jmp. */ + tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false); + tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1); + tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true); + gen_set_label(l1); +} + +static void dec_bcc(DisasContext *dc) +{ + unsigned int cc; + unsigned int dslot; + + cc = EXTRACT_FIELD(dc->ir, 21, 23); + dslot = dc->ir & (1 << 25); + LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm); + + dc->delayed_branch = 1; + if (dslot) { + dc->delayed_branch = 2; + dc->tb_flags |= D_FLAG; + tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)), + cpu_env, offsetof(CPUState, bimm)); + } + + tcg_gen_movi_tl(env_btarget, dc->pc); + tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc))); + eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0)); + dc->jmp = JMP_INDIRECT; +} + +static void dec_br(DisasContext *dc) +{ + unsigned int dslot, link, abs; + + dslot = dc->ir & (1 << 20); + abs = dc->ir & (1 << 19); + link = dc->ir & (1 << 18); + LOG_DIS("br%s%s%s%s imm=%x\n", + abs ? "a" : "", link ? "l" : "", + dc->type_b ? "i" : "", dslot ? "d" : "", + dc->imm); + + dc->delayed_branch = 1; + if (dslot) { + dc->delayed_branch = 2; + dc->tb_flags |= D_FLAG; + tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)), + cpu_env, offsetof(CPUState, bimm)); + } + if (link && dc->rd) + tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc); + + dc->jmp = JMP_INDIRECT; + if (abs) { + tcg_gen_movi_tl(env_btaken, 1); + tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc))); + if (link && !(dc->tb_flags & IMM_FLAG) + && (dc->imm == 8 || dc->imm == 0x18)) + t_gen_raise_exception(dc, EXCP_BREAK); + if (dc->imm == 0) + t_gen_raise_exception(dc, EXCP_DEBUG); + } else { + if (dc->tb_flags & IMM_FLAG) { + tcg_gen_movi_tl(env_btaken, 1); + tcg_gen_movi_tl(env_btarget, dc->pc); + tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc))); + } else { + dc->jmp = JMP_DIRECT; + dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm); + } + } +} + +static inline void do_rti(DisasContext *dc) +{ + TCGv t0, t1; + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1); + tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE); + tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM)); + + tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM)); + tcg_gen_or_tl(t1, t1, t0); + msr_write(dc, t1); + tcg_temp_free(t1); + tcg_temp_free(t0); + dc->tb_flags &= ~DRTI_FLAG; +} + +static inline void do_rtb(DisasContext *dc) +{ + TCGv t0, t1; + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP); + tcg_gen_shri_tl(t0, t1, 1); + tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM)); + + tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM)); + tcg_gen_or_tl(t1, t1, t0); + msr_write(dc, t1); + tcg_temp_free(t1); + tcg_temp_free(t0); + dc->tb_flags &= ~DRTB_FLAG; +} + +static inline void do_rte(DisasContext *dc) +{ + TCGv t0, t1; + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + + tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE); + tcg_gen_andi_tl(t1, t1, ~MSR_EIP); + tcg_gen_shri_tl(t0, t1, 1); + tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM)); + + tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM)); + tcg_gen_or_tl(t1, t1, t0); + msr_write(dc, t1); + tcg_temp_free(t1); + tcg_temp_free(t0); + dc->tb_flags &= ~DRTE_FLAG; +} + +static void dec_rts(DisasContext *dc) +{ + unsigned int b_bit, i_bit, e_bit; + + i_bit = dc->ir & (1 << 21); + b_bit = dc->ir & (1 << 22); + e_bit = dc->ir & (1 << 23); + + dc->delayed_branch = 2; + dc->tb_flags |= D_FLAG; + tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)), + cpu_env, offsetof(CPUState, bimm)); + + if (i_bit) { + LOG_DIS("rtid ir=%x\n", dc->ir); + dc->tb_flags |= DRTI_FLAG; + } else if (b_bit) { + LOG_DIS("rtbd ir=%x\n", dc->ir); + dc->tb_flags |= DRTB_FLAG; + } else if (e_bit) { + LOG_DIS("rted ir=%x\n", dc->ir); + dc->tb_flags |= DRTE_FLAG; + } else + LOG_DIS("rts ir=%x\n", dc->ir); + + tcg_gen_movi_tl(env_btaken, 1); + tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc))); +} + +static void dec_null(DisasContext *dc) +{ + qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode); + dc->abort_at_next_insn = 1; +} + +static struct decoder_info { + struct { + uint32_t bits; + uint32_t mask; + }; + void (*dec)(DisasContext *dc); +} decinfo[] = { + {DEC_ADD, dec_add}, + {DEC_SUB, dec_sub}, + {DEC_AND, dec_and}, + {DEC_XOR, dec_xor}, + {DEC_OR, dec_or}, + {DEC_BIT, dec_bit}, + {DEC_BARREL, dec_barrel}, + {DEC_LD, dec_load}, + {DEC_ST, dec_store}, + {DEC_IMM, dec_imm}, + {DEC_BR, dec_br}, + {DEC_BCC, dec_bcc}, + {DEC_RTS, dec_rts}, + {DEC_MUL, dec_mul}, + {DEC_DIV, dec_div}, + {DEC_MSR, dec_msr}, + {{0, 0}, dec_null} +}; + +static inline void decode(DisasContext *dc) +{ + uint32_t ir; + int i; + + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) + tcg_gen_debug_insn_start(dc->pc); + + dc->ir = ir = ldl_code(dc->pc); + LOG_DIS("%8.8x\t", dc->ir); + + if (dc->ir) + dc->nr_nops = 0; + else { + LOG_DIS("nr_nops=%d\t", dc->nr_nops); + dc->nr_nops++; + if (dc->nr_nops > 4) + cpu_abort(dc->env, "fetching nop sequence\n"); + } + /* bit 2 seems to indicate insn type. */ + dc->type_b = ir & (1 << 29); + + dc->opcode = EXTRACT_FIELD(ir, 26, 31); + dc->rd = EXTRACT_FIELD(ir, 21, 25); + dc->ra = EXTRACT_FIELD(ir, 16, 20); + dc->rb = EXTRACT_FIELD(ir, 11, 15); + dc->imm = EXTRACT_FIELD(ir, 0, 15); + + /* Large switch for all insns. */ + for (i = 0; i < ARRAY_SIZE(decinfo); i++) { + if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) { + decinfo[i].dec(dc); + break; + } + } +} + + +static void check_breakpoint(CPUState *env, DisasContext *dc) +{ + CPUBreakpoint *bp; + + if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) { + TAILQ_FOREACH(bp, &env->breakpoints, entry) { + if (bp->pc == dc->pc) { + t_gen_raise_exception(dc, EXCP_DEBUG); + dc->is_jmp = DISAS_UPDATE; + } + } + } +} + +/* generate intermediate code for basic block 'tb'. */ +static void +gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, + int search_pc) +{ + uint16_t *gen_opc_end; + uint32_t pc_start; + int j, lj; + struct DisasContext ctx; + struct DisasContext *dc = &ctx; + uint32_t next_page_start, org_flags; + target_ulong npc; + int num_insns; + int max_insns; + + qemu_log_try_set_file(stderr); + + pc_start = tb->pc; + dc->env = env; + dc->tb = tb; + org_flags = dc->synced_flags = dc->tb_flags = tb->flags; + + gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; + + dc->is_jmp = DISAS_NEXT; + dc->jmp = 0; + dc->delayed_branch = !!(dc->tb_flags & D_FLAG); + dc->ppc = pc_start; + dc->pc = pc_start; + dc->cache_pc = -1; + dc->singlestep_enabled = env->singlestep_enabled; + dc->cpustate_changed = 0; + dc->abort_at_next_insn = 0; + dc->nr_nops = 0; + + if (pc_start & 3) + cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start); + + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { +#if !SIM_COMPAT + qemu_log("--------------\n"); + log_cpu_state(env, 0); +#endif + } + + next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + lj = -1; + num_insns = 0; + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) + max_insns = CF_COUNT_MASK; + + gen_icount_start(); + do + { +#if SIM_COMPAT + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc); + gen_helper_debug(); + } +#endif + check_breakpoint(env, dc); + + if (search_pc) { + j = gen_opc_ptr - gen_opc_buf; + if (lj < j) { + lj++; + while (lj < j) + gen_opc_instr_start[lj++] = 0; + } + gen_opc_pc[lj] = dc->pc; + gen_opc_instr_start[lj] = 1; + gen_opc_icount[lj] = num_insns; + } + + /* Pretty disas. */ + LOG_DIS("%8.8x:\t", dc->pc); + + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) + gen_io_start(); + + dc->clear_imm = 1; + decode(dc); + if (dc->clear_imm) + dc->tb_flags &= ~IMM_FLAG; + dc->ppc = dc->pc; + dc->pc += 4; + num_insns++; + + if (dc->delayed_branch) { + dc->delayed_branch--; + if (!dc->delayed_branch) { + if (dc->tb_flags & DRTI_FLAG) + do_rti(dc); + if (dc->tb_flags & DRTB_FLAG) + do_rtb(dc); + if (dc->tb_flags & DRTE_FLAG) + do_rte(dc); + /* Clear the delay slot flag. */ + dc->tb_flags &= ~D_FLAG; + /* If it is a direct jump, try direct chaining. */ + if (dc->jmp != JMP_DIRECT) { + eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc)); + dc->is_jmp = DISAS_JUMP; + } + break; + } + } + if (env->singlestep_enabled) + break; + } while (!dc->is_jmp && !dc->cpustate_changed + && gen_opc_ptr < gen_opc_end + && !singlestep + && (dc->pc < next_page_start) + && num_insns < max_insns); + + npc = dc->pc; + if (dc->jmp == JMP_DIRECT) { + if (dc->tb_flags & D_FLAG) { + dc->is_jmp = DISAS_UPDATE; + tcg_gen_movi_tl(cpu_SR[SR_PC], npc); + sync_jmpstate(dc); + } else + npc = dc->jmp_pc; + } + + if (tb->cflags & CF_LAST_IO) + gen_io_end(); + /* Force an update if the per-tb cpu state has changed. */ + if (dc->is_jmp == DISAS_NEXT + && (dc->cpustate_changed || org_flags != dc->tb_flags)) { + dc->is_jmp = DISAS_UPDATE; + tcg_gen_movi_tl(cpu_SR[SR_PC], npc); + } + t_sync_flags(dc); + + if (unlikely(env->singlestep_enabled)) { + t_gen_raise_exception(dc, EXCP_DEBUG); + if (dc->is_jmp == DISAS_NEXT) + tcg_gen_movi_tl(cpu_SR[SR_PC], npc); + } else { + switch(dc->is_jmp) { + case DISAS_NEXT: + gen_goto_tb(dc, 1, npc); + break; + default: + case DISAS_JUMP: + case DISAS_UPDATE: + /* indicate that the hash table must be used + to find the next TB */ + tcg_gen_exit_tb(0); + break; + case DISAS_TB_JUMP: + /* nothing more to generate */ + break; + } + } + gen_icount_end(tb, num_insns); + *gen_opc_ptr = INDEX_op_end; + if (search_pc) { + j = gen_opc_ptr - gen_opc_buf; + lj++; + while (lj <= j) + gen_opc_instr_start[lj++] = 0; + } else { + tb->size = dc->pc - pc_start; + tb->icount = num_insns; + } + +#ifdef DEBUG_DISAS +#if !SIM_COMPAT + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + qemu_log("\n"); +#if DISAS_GNU + log_target_disas(pc_start, dc->pc - pc_start, 0); +#endif + qemu_log("\nisize=%d osize=%zd\n", + dc->pc - pc_start, gen_opc_ptr - gen_opc_buf); + } +#endif +#endif + assert(!dc->abort_at_next_insn); +} + +void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb) +{ + gen_intermediate_code_internal(env, tb, 0); +} + +void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb) +{ + gen_intermediate_code_internal(env, tb, 1); +} + +void cpu_dump_state (CPUState *env, FILE *f, + int (*cpu_fprintf)(FILE *f, const char *fmt, ...), + int flags) +{ + int i; + + if (!env || !f) + return; + + cpu_fprintf(f, "IN: PC=%x %s\n", + env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC])); + cpu_fprintf(f, "rmsr=%x resr=%x debug[%x] imm=%x iflags=%x\n", + env->sregs[SR_MSR], env->sregs[SR_ESR], + env->debug, env->imm, env->iflags); + cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s)\n", + env->btaken, env->btarget, + (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel", + (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel"); + for (i = 0; i < 32; i++) { + cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]); + if ((i + 1) % 4 == 0) + cpu_fprintf(f, "\n"); + } + cpu_fprintf(f, "\n\n"); +} + +CPUState *cpu_mb_init (const char *cpu_model) +{ + CPUState *env; + static int tcg_initialized = 0; + int i; + + env = qemu_mallocz(sizeof(CPUState)); + + cpu_exec_init(env); + cpu_reset(env); + + env->pvr.regs[0] = PVR0_PVR_FULL_MASK \ + | PVR0_USE_BARREL_MASK \ + | PVR0_USE_DIV_MASK \ + | PVR0_USE_HW_MUL_MASK \ + | PVR0_USE_EXC_MASK \ + | PVR0_USE_ICACHE_MASK \ + | PVR0_USE_DCACHE_MASK \ + | PVR0_USE_MMU \ + | (0xb << 8); + env->pvr.regs[2] = PVR2_D_OPB_MASK \ + | PVR2_D_LMB_MASK \ + | PVR2_I_OPB_MASK \ + | PVR2_I_LMB_MASK \ + | PVR2_USE_MSR_INSTR \ + | PVR2_USE_PCMP_INSTR \ + | PVR2_USE_BARREL_MASK \ + | PVR2_USE_DIV_MASK \ + | PVR2_USE_HW_MUL_MASK \ + | PVR2_USE_MUL64_MASK \ + | 0; + env->pvr.regs[10] = 0x0c000000; /* Default to spartan 3a dsp family. */ + env->pvr.regs[11] = PVR11_USE_MMU; + + if (tcg_initialized) + return env; + + tcg_initialized = 1; + + cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); + + env_debug = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUState, debug), + "debug0"); + env_iflags = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUState, iflags), + "iflags"); + env_imm = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUState, imm), + "imm"); + env_btarget = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUState, btarget), + "btarget"); + env_btaken = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUState, btaken), + "btaken"); + for (i = 0; i < ARRAY_SIZE(cpu_R); i++) { + cpu_R[i] = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUState, regs[i]), + regnames[i]); + } + for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) { + cpu_SR[i] = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUState, sregs[i]), + special_regnames[i]); + } +#define GEN_HELPER 2 +#include "helper.h" + + return env; +} + +void cpu_reset (CPUState *env) +{ + if (qemu_loglevel_mask(CPU_LOG_RESET)) { + qemu_log("CPU Reset (CPU %d)\n", env->cpu_index); + log_cpu_state(env, 0); + } + + memset(env, 0, offsetof(CPUMBState, breakpoints)); + tlb_flush(env, 1); + + env->sregs[SR_MSR] = 0; +#if defined(CONFIG_USER_ONLY) + /* start in user mode with interrupts enabled. */ + env->pvr.regs[10] = 0x0c000000; /* Spartan 3a dsp. */ +#else + mmu_init(&env->mmu); +#endif +} + +void gen_pc_load(CPUState *env, struct TranslationBlock *tb, + unsigned long searched_pc, int pc_pos, void *puc) +{ + env->sregs[SR_PC] = gen_opc_pc[pc_pos]; +} |