summaryrefslogtreecommitdiff
path: root/target-arm
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2016-05-12 15:55:45 +0100
committerPeter Maydell <peter.maydell@linaro.org>2016-05-12 15:55:45 +0100
commite4f70d635863cfc3e3fa7d9a6e37b569ae94d82f (patch)
treee78b4881f2ed4be84792356761ddf2dcdf8e71d8 /target-arm
parent6ddeeffffecf1f78acf6c93cbf267a8abe755836 (diff)
parent0bc91ab3bb70f836d5a7a3ef6f800ef8c22e936f (diff)
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20160512' into staging
target-arm queue: * blizzard, omap_lcdc: code cleanup to remove DEPTH != 32 dead code * QOMify various ARM devices * bcm2835_property: use cached values when querying framebuffer * hw/arm/nseries: don't allocate large sized array on the stack * fix LPAE descriptor address masking (only visible for EL2) * fix stage 2 exec permission handling for AArch32 * first part of supporting syndrome info for data aborts to EL2 * virt: NUMA support * work towards i.MX6 support * avoid unnecessary TLB flush on TCR_EL2, TCR_EL3 writes # gpg: Signature made Thu 12 May 2016 14:29:14 BST using RSA key ID 14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" * remotes/pmaydell/tags/pull-target-arm-20160512: (43 commits) hw/arm: QOM'ify versatilepb.c hw/arm: QOM'ify strongarm.c hw/arm: QOM'ify stellaris.c hw/arm: QOM'ify spitz.c hw/arm: QOM'ify pxa2xx_pic.c hw/arm: QOM'ify pxa2xx.c hw/arm: QOM'ify integratorcp.c hw/arm: QOM'ify highbank.c hw/arm: QOM'ify armv7m.c target-arm: Avoid unnecessary TLB flush on TCR_EL2, TCR_EL3 writes hw/display/blizzard: Remove blizzard_template.h hw/display/blizzard: Expand out macros i.MX: Add sabrelite i.MX6 emulation. i.MX: Add i.MX6 SOC implementation. i.MX: Add the Freescale SPI Controller FIFO: Add a FIFO32 implementation i.MX: Add i.MX6 System Reset Controller device. ARM: Factor out ARM on/off PSCI control functions ACPI: Virt: Generate SRAT table ACPI: move acpi_build_srat_memory to common place ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-arm')
-rw-r--r--target-arm/Makefile.objs1
-rw-r--r--target-arm/arm-powerctl.c224
-rw-r--r--target-arm/arm-powerctl.h75
-rw-r--r--target-arm/helper.c45
-rw-r--r--target-arm/internals.h24
-rw-r--r--target-arm/op_helper.c6
-rw-r--r--target-arm/psci.c70
-rw-r--r--target-arm/translate-a64.c45
8 files changed, 379 insertions, 111 deletions
diff --git a/target-arm/Makefile.objs b/target-arm/Makefile.objs
index 82cbe6bbad..f20641163c 100644
--- a/target-arm/Makefile.objs
+++ b/target-arm/Makefile.objs
@@ -9,3 +9,4 @@ obj-y += neon_helper.o iwmmxt_helper.o
obj-y += gdbstub.o
obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o gdbstub64.o
obj-y += crypto_helper.o
+obj-y += arm-powerctl.o
diff --git a/target-arm/arm-powerctl.c b/target-arm/arm-powerctl.c
new file mode 100644
index 0000000000..cb9919b465
--- /dev/null
+++ b/target-arm/arm-powerctl.c
@@ -0,0 +1,224 @@
+/*
+ * QEMU support -- ARM Power Control specific functions.
+ *
+ * Copyright (c) 2016 Jean-Christophe Dubois
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include <cpu.h>
+#include <cpu-qom.h>
+#include "internals.h"
+#include "arm-powerctl.h"
+
+#ifndef DEBUG_ARM_POWERCTL
+#define DEBUG_ARM_POWERCTL 0
+#endif
+
+#define DPRINTF(fmt, args...) \
+ do { \
+ if (DEBUG_ARM_POWERCTL) { \
+ fprintf(stderr, "[ARM]%s: " fmt , __func__, ##args); \
+ } \
+ } while (0)
+
+CPUState *arm_get_cpu_by_id(uint64_t id)
+{
+ CPUState *cpu;
+
+ DPRINTF("cpu %" PRId64 "\n", id);
+
+ CPU_FOREACH(cpu) {
+ ARMCPU *armcpu = ARM_CPU(cpu);
+
+ if (armcpu->mp_affinity == id) {
+ return cpu;
+ }
+ }
+
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: Requesting unknown CPU %" PRId64 "\n",
+ __func__, id);
+
+ return NULL;
+}
+
+int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
+ uint32_t target_el, bool target_aa64)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64
+ "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry,
+ context_id);
+
+ /* requested EL level need to be in the 1 to 3 range */
+ assert((target_el > 0) && (target_el < 4));
+
+ if (target_aa64 && (entry & 3)) {
+ /*
+ * if we are booting in AArch64 mode then "entry" needs to be 4 bytes
+ * aligned.
+ */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ /* Retrieve the cpu we are powering up */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ /* The cpu was not found */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (!target_cpu->powered_off) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is already on\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_ALREADY_ON;
+ }
+
+ /*
+ * The newly brought CPU is requested to enter the exception level
+ * "target_el" and be in the requested mode (AArch64 or AArch32).
+ */
+
+ if (((target_el == 3) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) ||
+ ((target_el == 2) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL2))) {
+ /*
+ * The CPU does not support requested level
+ */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ if (!target_aa64 && arm_feature(&target_cpu->env, ARM_FEATURE_AARCH64)) {
+ /*
+ * For now we don't support booting an AArch64 CPU in AArch32 mode
+ * TODO: We should add this support later
+ */
+ qemu_log_mask(LOG_UNIMP,
+ "[ARM]%s: Starting AArch64 CPU %" PRId64
+ " in AArch32 mode is not supported yet\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ /* Initialize the cpu we are turning on */
+ cpu_reset(target_cpu_state);
+ target_cpu->powered_off = false;
+ target_cpu_state->halted = 0;
+
+ if (target_aa64) {
+ if ((target_el < 3) && arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) {
+ /*
+ * As target mode is AArch64, we need to set lower
+ * exception level (the requested level 2) to AArch64
+ */
+ target_cpu->env.cp15.scr_el3 |= SCR_RW;
+ }
+
+ if ((target_el < 2) && arm_feature(&target_cpu->env, ARM_FEATURE_EL2)) {
+ /*
+ * As target mode is AArch64, we need to set lower
+ * exception level (the requested level 1) to AArch64
+ */
+ target_cpu->env.cp15.hcr_el2 |= HCR_RW;
+ }
+
+ target_cpu->env.pstate = aarch64_pstate_mode(target_el, true);
+ } else {
+ /* We are requested to boot in AArch32 mode */
+ static uint32_t mode_for_el[] = { 0,
+ ARM_CPU_MODE_SVC,
+ ARM_CPU_MODE_HYP,
+ ARM_CPU_MODE_SVC };
+
+ cpsr_write(&target_cpu->env, mode_for_el[target_el], CPSR_M,
+ CPSRWriteRaw);
+ }
+
+ if (target_el == 3) {
+ /* Processor is in secure mode */
+ target_cpu->env.cp15.scr_el3 &= ~SCR_NS;
+ } else {
+ /* Processor is not in secure mode */
+ target_cpu->env.cp15.scr_el3 |= SCR_NS;
+ }
+
+ /* We check if the started CPU is now at the correct level */
+ assert(target_el == arm_current_el(&target_cpu->env));
+
+ if (target_aa64) {
+ target_cpu->env.xregs[0] = context_id;
+ target_cpu->env.thumb = false;
+ } else {
+ target_cpu->env.regs[0] = context_id;
+ target_cpu->env.thumb = entry & 1;
+ entry &= 0xfffffffe;
+ }
+
+ /* Start the new CPU at the requested address */
+ cpu_set_pc(target_cpu_state, entry);
+
+ /* We are good to go */
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
+
+int arm_set_cpu_off(uint64_t cpuid)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ DPRINTF("cpu %" PRId64 "\n", cpuid);
+
+ /* change to the cpu we are powering up */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (target_cpu->powered_off) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is already off\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_IS_OFF;
+ }
+
+ target_cpu->powered_off = true;
+ target_cpu_state->halted = 1;
+ target_cpu_state->exception_index = EXCP_HLT;
+ cpu_loop_exit(target_cpu_state);
+ /* notreached */
+
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
+
+int arm_reset_cpu(uint64_t cpuid)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ DPRINTF("cpu %" PRId64 "\n", cpuid);
+
+ /* change to the cpu we are resetting */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (target_cpu->powered_off) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is off\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_IS_OFF;
+ }
+
+ /* Reset the cpu */
+ cpu_reset(target_cpu_state);
+
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
diff --git a/target-arm/arm-powerctl.h b/target-arm/arm-powerctl.h
new file mode 100644
index 0000000000..98ee04989b
--- /dev/null
+++ b/target-arm/arm-powerctl.h
@@ -0,0 +1,75 @@
+/*
+ * QEMU support -- ARM Power Control specific functions.
+ *
+ * Copyright (c) 2016 Jean-Christophe Dubois
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_ARM_POWERCTL_H
+#define QEMU_ARM_POWERCTL_H
+
+#include "kvm-consts.h"
+
+#define QEMU_ARM_POWERCTL_RET_SUCCESS QEMU_PSCI_RET_SUCCESS
+#define QEMU_ARM_POWERCTL_INVALID_PARAM QEMU_PSCI_RET_INVALID_PARAMS
+#define QEMU_ARM_POWERCTL_ALREADY_ON QEMU_PSCI_RET_ALREADY_ON
+#define QEMU_ARM_POWERCTL_IS_OFF QEMU_PSCI_RET_DENIED
+
+/*
+ * arm_get_cpu_by_id:
+ * @cpuid: the id of the CPU we want to retrieve the state
+ *
+ * Retrieve a CPUState object from its CPU ID provided in @cpuid.
+ *
+ * Returns: a pointer to the CPUState structure of the requested CPU.
+ */
+CPUState *arm_get_cpu_by_id(uint64_t cpuid);
+
+/*
+ * arm_set_cpu_on:
+ * @cpuid: the id of the CPU we want to start/wake up.
+ * @entry: the address the CPU shall start from.
+ * @context_id: the value to put in r0/x0.
+ * @target_el: The desired exception level.
+ * @target_aa64: 1 if the requested mode is AArch64. 0 otherwise.
+ *
+ * Start the cpu designated by @cpuid in @target_el exception level. The mode
+ * shall be AArch64 if @target_aa64 is set to 1. Otherwise the mode is
+ * AArch32. The CPU shall start at @entry with @context_id in r0/x0.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
+ * QEMU_ARM_POWERCTL_ALREADY_ON if the CPU was already started.
+ */
+int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
+ uint32_t target_el, bool target_aa64);
+
+/*
+ * arm_set_cpu_off:
+ * @cpuid: the id of the CPU we want to stop/shut down.
+ *
+ * Stop the cpu designated by @cpuid.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
+ * QEMU_ARM_POWERCTL_IS_OFF if CPU is already off
+ */
+
+int arm_set_cpu_off(uint64_t cpuid);
+
+/*
+ * arm_reset_cpu:
+ * @cpuid: the id of the CPU we want to reset.
+ *
+ * Reset the cpu designated by @cpuid.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
+ * QEMU_ARM_POWERCTL_IS_OFF if CPU is off
+ */
+int arm_reset_cpu(uint64_t cpuid);
+
+#endif
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 09638b2e7d..a2ab701ca5 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -3559,8 +3559,10 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
.resetvalue = 0 },
{ .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
- .access = PL2_RW, .writefn = vmsa_tcr_el1_write,
- .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
+ .access = PL2_RW,
+ /* no .writefn needed as this can't cause an ASID change;
+ * no .raw_writefn or .resetfn needed as we never use mask/base_mask
+ */
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
{ .name = "VTCR", .state = ARM_CP_STATE_AA32,
.cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
@@ -3753,8 +3755,10 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
{ .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
- .access = PL3_RW, .writefn = vmsa_tcr_el1_write,
- .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
+ .access = PL3_RW,
+ /* no .writefn needed as this can't cause an ASID change;
+ * no .raw_writefn or .resetfn needed as we never use mask/base_mask
+ */
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
{ .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
@@ -6708,7 +6712,9 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn)
prot |= PAGE_WRITE;
}
if (!xn) {
- prot |= PAGE_EXEC;
+ if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
+ prot |= PAGE_EXEC;
+ }
}
return prot;
}
@@ -7248,7 +7254,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
uint32_t tg;
uint64_t ttbr;
int ttbr_select;
- hwaddr descaddr, descmask;
+ hwaddr descaddr, indexmask, indexmask_grainsize;
uint32_t tableattrs;
target_ulong page_size;
uint32_t attrs;
@@ -7437,28 +7443,20 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
level = startlevel;
}
- /* Clear the vaddr bits which aren't part of the within-region address,
- * so that we don't have to special case things when calculating the
- * first descriptor address.
- */
- if (va_size != inputsize) {
- address &= (1ULL << inputsize) - 1;
- }
-
- descmask = (1ULL << (stride + 3)) - 1;
+ indexmask_grainsize = (1ULL << (stride + 3)) - 1;
+ indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
/* Now we can extract the actual base address from the TTBR */
descaddr = extract64(ttbr, 0, 48);
- descaddr &= ~((1ULL << (inputsize - (stride * (4 - level)))) - 1);
+ descaddr &= ~indexmask;
/* The address field in the descriptor goes up to bit 39 for ARMv7
- * but up to bit 47 for ARMv8.
+ * but up to bit 47 for ARMv8, but we use the descaddrmask
+ * up to bit 39 for AArch32, because we don't need other bits in that case
+ * to construct next descriptor address (anyway they should be all zeroes).
*/
- if (arm_feature(env, ARM_FEATURE_V8)) {
- descaddrmask = 0xfffffffff000ULL;
- } else {
- descaddrmask = 0xfffffff000ULL;
- }
+ descaddrmask = ((1ull << (va_size == 64 ? 48 : 40)) - 1) &
+ ~indexmask_grainsize;
/* Secure accesses start with the page table in secure memory and
* can be downgraded to non-secure at any step. Non-secure accesses
@@ -7470,7 +7468,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
uint64_t descriptor;
bool nstable;
- descaddr |= (address >> (stride * (4 - level))) & descmask;
+ descaddr |= (address >> (stride * (4 - level))) & indexmask;
descaddr &= ~7ULL;
nstable = extract32(tableattrs, 4, 1);
descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fsr, fi);
@@ -7493,6 +7491,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
*/
tableattrs |= extract64(descriptor, 59, 5);
level++;
+ indexmask = indexmask_grainsize;
continue;
}
/* Block entry at level 1 or 2, or page entry at level 3.
diff --git a/target-arm/internals.h b/target-arm/internals.h
index 2e70272be2..54a0fb1db7 100644
--- a/target-arm/internals.h
+++ b/target-arm/internals.h
@@ -263,7 +263,9 @@ enum arm_exception_class {
#define ARM_EL_EC_SHIFT 26
#define ARM_EL_IL_SHIFT 25
+#define ARM_EL_ISV_SHIFT 24
#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
+#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
/* Utility functions for constructing various kinds of syndrome value.
* Note that in general we follow the AArch64 syndrome values; in a
@@ -383,11 +385,27 @@ static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
| (ea << 9) | (s1ptw << 7) | fsc;
}
-static inline uint32_t syn_data_abort(int same_el, int ea, int cm, int s1ptw,
- int wnr, int fsc)
+static inline uint32_t syn_data_abort_no_iss(int same_el,
+ int ea, int cm, int s1ptw,
+ int wnr, int fsc)
{
return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
- | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
+ | ARM_EL_IL
+ | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
+}
+
+static inline uint32_t syn_data_abort_with_iss(int same_el,
+ int sas, int sse, int srt,
+ int sf, int ar,
+ int ea, int cm, int s1ptw,
+ int wnr, int fsc,
+ bool is_16bit)
+{
+ return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16)
+ | (sf << 15) | (ar << 14)
+ | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
}
static inline uint32_t syn_swstep(int same_el, int isv, int ex)
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index d626ff1a20..c7fba8526c 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -115,7 +115,8 @@ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
exc = EXCP_PREFETCH_ABORT;
} else {
- syn = syn_data_abort(same_el, 0, 0, fi.s1ptw, is_write == 1, syn);
+ syn = syn_data_abort_no_iss(same_el,
+ 0, 0, fi.s1ptw, is_write == 1, syn);
if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
fsr |= (1 << 11);
}
@@ -161,7 +162,8 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, int is_write,
}
raise_exception(env, EXCP_DATA_ABORT,
- syn_data_abort(same_el, 0, 0, 0, is_write == 1, 0x21),
+ syn_data_abort_no_iss(same_el,
+ 0, 0, 0, is_write == 1, 0x21),
target_el);
}
diff --git a/target-arm/psci.c b/target-arm/psci.c
index c55487f872..ce2e0dca39 100644
--- a/target-arm/psci.c
+++ b/target-arm/psci.c
@@ -22,6 +22,7 @@
#include <kvm-consts.h>
#include <sysemu/sysemu.h>
#include "internals.h"
+#include "arm-powerctl.h"
bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
{
@@ -73,21 +74,6 @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
}
}
-static CPUState *get_cpu_by_id(uint64_t id)
-{
- CPUState *cpu;
-
- CPU_FOREACH(cpu) {
- ARMCPU *armcpu = ARM_CPU(cpu);
-
- if (armcpu->mp_affinity == id) {
- return cpu;
- }
- }
-
- return NULL;
-}
-
void arm_handle_psci_call(ARMCPU *cpu)
{
/*
@@ -98,7 +84,6 @@ void arm_handle_psci_call(ARMCPU *cpu)
* Additional information about the calling convention used is available in
* the document 'SMC Calling Convention' (ARM DEN 0028)
*/
- CPUState *cs = CPU(cpu);
CPUARMState *env = &cpu->env;
uint64_t param[4];
uint64_t context_id, mpidr;
@@ -123,7 +108,6 @@ void arm_handle_psci_call(ARMCPU *cpu)
switch (param[0]) {
CPUState *target_cpu_state;
ARMCPU *target_cpu;
- CPUClass *target_cpu_class;
case QEMU_PSCI_0_2_FN_PSCI_VERSION:
ret = QEMU_PSCI_0_2_RET_VERSION_0_2;
@@ -137,7 +121,7 @@ void arm_handle_psci_call(ARMCPU *cpu)
switch (param[2]) {
case 0:
- target_cpu_state = get_cpu_by_id(mpidr);
+ target_cpu_state = arm_get_cpu_by_id(mpidr);
if (!target_cpu_state) {
ret = QEMU_PSCI_RET_INVALID_PARAMS;
break;
@@ -167,52 +151,13 @@ void arm_handle_psci_call(ARMCPU *cpu)
mpidr = param[1];
entry = param[2];
context_id = param[3];
-
- /* change to the cpu we are powering up */
- target_cpu_state = get_cpu_by_id(mpidr);
- if (!target_cpu_state) {
- ret = QEMU_PSCI_RET_INVALID_PARAMS;
- break;
- }
- target_cpu = ARM_CPU(target_cpu_state);
- if (!target_cpu->powered_off) {
- ret = QEMU_PSCI_RET_ALREADY_ON;
- break;
- }
- target_cpu_class = CPU_GET_CLASS(target_cpu);
-
- /* Initialize the cpu we are turning on */
- cpu_reset(target_cpu_state);
- target_cpu->powered_off = false;
- target_cpu_state->halted = 0;
-
/*
* The PSCI spec mandates that newly brought up CPUs enter the
* exception level of the caller in the same execution mode as
* the caller, with context_id in x0/r0, respectively.
- *
- * For now, it is sufficient to assert() that CPUs come out of
- * reset in the same mode as the calling CPU, since we only
- * implement EL1, which means that
- * (a) there is no EL2 for the calling CPU to trap into to change
- * its state
- * (b) the newly brought up CPU enters EL1 immediately after coming
- * out of reset in the default state
*/
- assert(is_a64(env) == is_a64(&target_cpu->env));
- if (is_a64(env)) {
- if (entry & 1) {
- ret = QEMU_PSCI_RET_INVALID_PARAMS;
- break;
- }
- target_cpu->env.xregs[0] = context_id;
- } else {
- target_cpu->env.regs[0] = context_id;
- target_cpu->env.thumb = entry & 1;
- }
- target_cpu_class->set_pc(target_cpu_state, entry);
-
- ret = 0;
+ ret = arm_set_cpu_on(mpidr, entry, context_id, arm_current_el(env),
+ is_a64(env));
break;
case QEMU_PSCI_0_1_FN_CPU_OFF:
case QEMU_PSCI_0_2_FN_CPU_OFF:
@@ -250,9 +195,8 @@ err:
return;
cpu_off:
- cpu->powered_off = true;
- cs->halted = 1;
- cs->exception_index = EXCP_HLT;
- cpu_loop_exit(cs);
+ ret = arm_set_cpu_off(cpu->mp_affinity);
/* notreached */
+ /* sanity check in case something failed */
+ assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
}
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index b13cff756a..24f5e177dd 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -2086,19 +2086,19 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
* size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
* opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
*/
-static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
+static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
+ int opc,
+ int size,
+ int rt,
+ bool is_vector)
{
- int rt = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
int imm9 = sextract32(insn, 12, 9);
- int opc = extract32(insn, 22, 2);
- int size = extract32(insn, 30, 2);
int idx = extract32(insn, 10, 2);
bool is_signed = false;
bool is_store = false;
bool is_extended = false;
bool is_unpriv = (idx == 2);
- bool is_vector = extract32(insn, 26, 1);
bool post_index;
bool writeback;
@@ -2128,8 +2128,8 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
return;
}
is_store = (opc == 0);
- is_signed = opc & (1<<1);
- is_extended = (size < 3) && (opc & 1);
+ is_signed = extract32(opc, 1, 1);
+ is_extended = (size < 3) && extract32(opc, 0, 1);
}
switch (idx) {
@@ -2205,19 +2205,19 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
* Rn: address register or SP for base
* Rm: offset register or ZR for offset
*/
-static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
+static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
+ int opc,
+ int size,
+ int rt,
+ bool is_vector)
{
- int rt = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
int shift = extract32(insn, 12, 1);
int rm = extract32(insn, 16, 5);
- int opc = extract32(insn, 22, 2);
int opt = extract32(insn, 13, 3);
- int size = extract32(insn, 30, 2);
bool is_signed = false;
bool is_store = false;
bool is_extended = false;
- bool is_vector = extract32(insn, 26, 1);
TCGv_i64 tcg_rm;
TCGv_i64 tcg_addr;
@@ -2294,14 +2294,14 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
* Rn: base address register (inc SP)
* Rt: target register
*/
-static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
+static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
+ int opc,
+ int size,
+ int rt,
+ bool is_vector)
{
- int rt = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
unsigned int imm12 = extract32(insn, 10, 12);
- bool is_vector = extract32(insn, 26, 1);
- int size = extract32(insn, 30, 2);
- int opc = extract32(insn, 22, 2);
unsigned int offset;
TCGv_i64 tcg_addr;
@@ -2360,20 +2360,25 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
/* Load/store register (all forms) */
static void disas_ldst_reg(DisasContext *s, uint32_t insn)
{
+ int rt = extract32(insn, 0, 5);
+ int opc = extract32(insn, 22, 2);
+ bool is_vector = extract32(insn, 26, 1);
+ int size = extract32(insn, 30, 2);
+
switch (extract32(insn, 24, 2)) {
case 0:
if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
- disas_ldst_reg_roffset(s, insn);
+ disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
} else {
/* Load/store register (unscaled immediate)
* Load/store immediate pre/post-indexed
* Load/store register unprivileged
*/
- disas_ldst_reg_imm9(s, insn);
+ disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
}
break;
case 1:
- disas_ldst_reg_unsigned_imm(s, insn);
+ disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
break;
default:
unallocated_encoding(s);