summaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
authorAndreas Färber <afaerber@suse.de>2012-03-14 01:38:32 +0100
committerAndreas Färber <afaerber@suse.de>2012-03-14 22:20:27 +0100
commit9349b4f9fda360f3d9adc4cf4443a1a9b429c17e (patch)
treeeec784672b45df3b321a2724669ff80639555ce0 /tcg
parent5bfcb36ec49192cb22f45f4b7ae805c530a1fd9e (diff)
Rename CPUState -> CPUArchState
Scripted conversion: for file in *.[hc] hw/*.[hc] hw/kvm/*.[hc] linux-user/*.[hc] linux-user/m68k/*.[hc] bsd-user/*.[hc] darwin-user/*.[hc] tcg/*/*.[hc] target-*/cpu.h; do sed -i "s/CPUState/CPUArchState/g" $file done All occurrences of CPUArchState are expected to be replaced by QOM CPUState, once all targets are QOM'ified and common fields have been extracted. Signed-off-by: Andreas Färber <afaerber@suse.de> Reviewed-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'tcg')
-rw-r--r--tcg/arm/tcg-target.c22
-rw-r--r--tcg/hppa/tcg-target.c8
-rw-r--r--tcg/i386/tcg-target.c2
-rw-r--r--tcg/ia64/tcg-target.c10
-rw-r--r--tcg/mips/tcg-target.c14
-rw-r--r--tcg/ppc/tcg-target.c4
-rw-r--r--tcg/ppc64/tcg-target.c4
-rw-r--r--tcg/s390/tcg-target.c8
-rw-r--r--tcg/sparc/tcg-target.c4
-rw-r--r--tcg/tci/tcg-target.c2
-rw-r--r--tcg/tci/tcg-target.h2
11 files changed, 40 insertions, 40 deletions
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c
index 5b233f564c..5af21b3f5d 100644
--- a/tcg/arm/tcg-target.c
+++ b/tcg/arm/tcg-target.c
@@ -990,10 +990,10 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
/* In the
- * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
+ * ldr r1 [r0, #(offsetof(CPUArchState, tlb_table[mem_index][0].addr_read))]
* below, the offset is likely to exceed 12 bits if mem_index != 0 and
* not exceed otherwise, so use an
- * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
+ * add r0, r0, #(mem_index * sizeof *CPUArchState.tlb_table)
* before.
*/
if (mem_index)
@@ -1001,7 +1001,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
(mem_index << (TLB_SHIFT & 1)) |
((16 - (TLB_SHIFT >> 1)) << 8));
tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
- offsetof(CPUState, tlb_table[0][0].addr_read));
+ offsetof(CPUArchState, tlb_table[0][0].addr_read));
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
/* Check alignment. */
@@ -1012,12 +1012,12 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
/* XXX: possibly we could use a block data load or writeback in
* the first access. */
tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
- offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
+ offsetof(CPUArchState, tlb_table[0][0].addr_read) + 4);
tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
# endif
tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
- offsetof(CPUState, tlb_table[0][0].addend));
+ offsetof(CPUArchState, tlb_table[0][0].addend));
switch (opc) {
case 0:
@@ -1210,10 +1210,10 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
/* In the
- * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
+ * ldr r1 [r0, #(offsetof(CPUArchState, tlb_table[mem_index][0].addr_write))]
* below, the offset is likely to exceed 12 bits if mem_index != 0 and
* not exceed otherwise, so use an
- * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
+ * add r0, r0, #(mem_index * sizeof *CPUArchState.tlb_table)
* before.
*/
if (mem_index)
@@ -1221,7 +1221,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
(mem_index << (TLB_SHIFT & 1)) |
((16 - (TLB_SHIFT >> 1)) << 8));
tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
- offsetof(CPUState, tlb_table[0][0].addr_write));
+ offsetof(CPUArchState, tlb_table[0][0].addr_write));
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
/* Check alignment. */
@@ -1232,12 +1232,12 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
/* XXX: possibly we could use a block data load or writeback in
* the first access. */
tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
- offsetof(CPUState, tlb_table[0][0].addr_write) + 4);
+ offsetof(CPUArchState, tlb_table[0][0].addr_write) + 4);
tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
# endif
tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
- offsetof(CPUState, tlb_table[0][0].addend));
+ offsetof(CPUArchState, tlb_table[0][0].addend));
switch (opc) {
case 0:
@@ -1797,7 +1797,7 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
tcg_add_target_add_op_defs(arm_op_defs);
- tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf),
+ tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
CPU_TEMP_BUF_NLONGS * sizeof(long));
}
diff --git a/tcg/hppa/tcg-target.c b/tcg/hppa/tcg-target.c
index 71f4a8a6b2..c5a3730a2b 100644
--- a/tcg/hppa/tcg-target.c
+++ b/tcg/hppa/tcg-target.c
@@ -1040,13 +1040,13 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
lab1 = gen_new_label();
lab2 = gen_new_label();
- offset = offsetof(CPUState, tlb_table[mem_index][0].addr_read);
+ offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
opc & 3, lab1, offset);
/* TLB Hit. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
- offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
+ offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, TCG_REG_R20, opc);
tcg_out_branch(s, lab2, 1);
@@ -1155,13 +1155,13 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
lab1 = gen_new_label();
lab2 = gen_new_label();
- offset = offsetof(CPUState, tlb_table[mem_index][0].addr_write);
+ offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
opc, lab1, offset);
/* TLB Hit. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
- offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
+ offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
/* There are no indexed stores, so we must do this addition explitly.
Careful to avoid R20, which is used for the bswaps to follow. */
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
index 1dbe2408ad..fafd900c5a 100644
--- a/tcg/i386/tcg-target.c
+++ b/tcg/i386/tcg-target.c
@@ -1031,7 +1031,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r1, TCG_AREG0, r1, 0,
- offsetof(CPUState, tlb_table[mem_index][0])
+ offsetof(CPUArchState, tlb_table[mem_index][0])
+ which);
/* cmp 0(r1), r0 */
diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c
index e3de79fdb6..f90252a443 100644
--- a/tcg/ia64/tcg-target.c
+++ b/tcg/ia64/tcg-target.c
@@ -1479,8 +1479,8 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
/* Read the TLB entry */
tcg_out_qemu_tlb(s, addr_reg, s_bits,
- offsetof(CPUState, tlb_table[mem_index][0].addr_read),
- offsetof(CPUState, tlb_table[mem_index][0].addend));
+ offsetof(CPUArchState, tlb_table[mem_index][0].addr_read),
+ offsetof(CPUArchState, tlb_table[mem_index][0].addend));
/* P6 is the fast path, and P7 the slow path */
tcg_out_bundle(s, mLX,
@@ -1570,8 +1570,8 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
#endif
tcg_out_qemu_tlb(s, addr_reg, opc,
- offsetof(CPUState, tlb_table[mem_index][0].addr_write),
- offsetof(CPUState, tlb_table[mem_index][0].addend));
+ offsetof(CPUArchState, tlb_table[mem_index][0].addr_write),
+ offsetof(CPUArchState, tlb_table[mem_index][0].addend));
/* P6 is the fast path, and P7 the slow path */
tcg_out_bundle(s, mLX,
@@ -2368,6 +2368,6 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R6);
tcg_add_target_add_op_defs(ia64_op_defs);
- tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf),
+ tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
CPU_TEMP_BUF_NLONGS * sizeof(long));
}
diff --git a/tcg/mips/tcg-target.c b/tcg/mips/tcg-target.c
index c5c32825f0..c6aa5bced5 100644
--- a/tcg/mips/tcg-target.c
+++ b/tcg/mips/tcg-target.c
@@ -827,7 +827,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0,
- offsetof(CPUState, tlb_table[mem_index][0].addr_read) + addr_meml);
+ offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) + addr_meml);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T0, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
tcg_out_opc_reg(s, OPC_AND, TCG_REG_T0, TCG_REG_T0, addr_regl);
@@ -837,7 +837,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
tcg_out_nop(s);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0,
- offsetof(CPUState, tlb_table[mem_index][0].addr_read) + addr_memh);
+ offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) + addr_memh);
label1_ptr = s->code_ptr;
tcg_out_opc_br(s, OPC_BEQ, addr_regh, TCG_REG_AT);
@@ -893,7 +893,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0,
- offsetof(CPUState, tlb_table[mem_index][0].addend));
+ offsetof(CPUArchState, tlb_table[mem_index][0].addend));
tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_V0, TCG_REG_A0, addr_regl);
#else
if (GUEST_BASE == (int16_t)GUEST_BASE) {
@@ -1013,7 +1013,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0,
- offsetof(CPUState, tlb_table[mem_index][0].addr_write) + addr_meml);
+ offsetof(CPUArchState, tlb_table[mem_index][0].addr_write) + addr_meml);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T0, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
tcg_out_opc_reg(s, OPC_AND, TCG_REG_T0, TCG_REG_T0, addr_regl);
@@ -1023,7 +1023,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
tcg_out_nop(s);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0,
- offsetof(CPUState, tlb_table[mem_index][0].addr_write) + addr_memh);
+ offsetof(CPUArchState, tlb_table[mem_index][0].addr_write) + addr_memh);
label1_ptr = s->code_ptr;
tcg_out_opc_br(s, OPC_BEQ, addr_regh, TCG_REG_AT);
@@ -1080,7 +1080,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0,
- offsetof(CPUState, tlb_table[mem_index][0].addend));
+ offsetof(CPUArchState, tlb_table[mem_index][0].addend));
tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, addr_regl);
#else
if (GUEST_BASE == (int16_t)GUEST_BASE) {
@@ -1529,6 +1529,6 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
tcg_add_target_add_op_defs(mips_op_defs);
- tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf),
+ tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
CPU_TEMP_BUF_NLONGS * sizeof(long));
}
diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c
index f5d9bf3b00..6a34cab5f9 100644
--- a/tcg/ppc/tcg-target.c
+++ b/tcg/ppc/tcg-target.c
@@ -564,7 +564,7 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
tcg_out32 (s, (LWZU
| RT (r1)
| RA (r0)
- | offsetof (CPUState, tlb_table[mem_index][0].addr_read)
+ | offsetof (CPUArchState, tlb_table[mem_index][0].addr_read)
)
);
tcg_out32 (s, (RLWINM
@@ -760,7 +760,7 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
tcg_out32 (s, (LWZU
| RT (r1)
| RA (r0)
- | offsetof (CPUState, tlb_table[mem_index][0].addr_write)
+ | offsetof (CPUArchState, tlb_table[mem_index][0].addr_write)
)
);
tcg_out32 (s, (RLWINM
diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c
index 44193784f2..7f723b5c2c 100644
--- a/tcg/ppc64/tcg-target.c
+++ b/tcg/ppc64/tcg-target.c
@@ -635,7 +635,7 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
rbase = 0;
tcg_out_tlb_read (s, r0, r1, r2, addr_reg, s_bits,
- offsetof (CPUState, tlb_table[mem_index][0].addr_read));
+ offsetof (CPUArchState, tlb_table[mem_index][0].addr_read));
tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L);
@@ -782,7 +782,7 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
rbase = 0;
tcg_out_tlb_read (s, r0, r1, r2, addr_reg, opc,
- offsetof (CPUState, tlb_table[mem_index][0].addr_write));
+ offsetof (CPUArchState, tlb_table[mem_index][0].addr_write));
tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L);
diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
index 9317fe88ef..47ffcc1f51 100644
--- a/tcg/s390/tcg-target.c
+++ b/tcg/s390/tcg-target.c
@@ -1439,9 +1439,9 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
tgen64_andi_tmp(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
if (is_store) {
- ofs = offsetof(CPUState, tlb_table[mem_index][0].addr_write);
+ ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
} else {
- ofs = offsetof(CPUState, tlb_table[mem_index][0].addr_read);
+ ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
}
assert(ofs < 0x80000);
@@ -1515,7 +1515,7 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
*(label1_ptr + 1) = ((unsigned long)s->code_ptr -
(unsigned long)label1_ptr) >> 1;
- ofs = offsetof(CPUState, tlb_table[mem_index][0].addend);
+ ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
assert(ofs < 0x80000);
tcg_out_mem(s, 0, RXY_AG, arg0, arg1, TCG_AREG0, ofs);
@@ -2293,7 +2293,7 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
tcg_add_target_add_op_defs(s390_op_defs);
- tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf),
+ tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
CPU_TEMP_BUF_NLONGS * sizeof(long));
}
diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c
index 4461fb4d13..b287122df5 100644
--- a/tcg/sparc/tcg-target.c
+++ b/tcg/sparc/tcg-target.c
@@ -776,7 +776,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
/* add arg1, x, arg1 */
- tcg_out_addi(s, arg1, offsetof(CPUState,
+ tcg_out_addi(s, arg1, offsetof(CPUArchState,
tlb_table[mem_index][0].addr_read));
/* add env, arg1, arg1 */
@@ -988,7 +988,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
/* add arg1, x, arg1 */
- tcg_out_addi(s, arg1, offsetof(CPUState,
+ tcg_out_addi(s, arg1, offsetof(CPUArchState,
tlb_table[mem_index][0].addr_write));
/* add env, arg1, arg1 */
diff --git a/tcg/tci/tcg-target.c b/tcg/tci/tcg-target.c
index fc0880cec5..bd85073662 100644
--- a/tcg/tci/tcg-target.c
+++ b/tcg/tci/tcg-target.c
@@ -891,7 +891,7 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_clear(s->reserved_regs);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
tcg_add_target_add_op_defs(tcg_target_op_defs);
- tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf),
+ tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
CPU_TEMP_BUF_NLONGS * sizeof(long));
}
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index 81fcc0fd49..b61e99aff1 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -154,7 +154,7 @@ typedef enum {
void tci_disas(uint8_t opc);
-unsigned long tcg_qemu_tb_exec(CPUState *env, uint8_t *tb_ptr);
+unsigned long tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
#define tcg_qemu_tb_exec tcg_qemu_tb_exec
static inline void flush_icache_range(tcg_target_ulong start,