summaryrefslogtreecommitdiff
path: root/target/arm
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm')
-rw-r--r--target/arm/cpu.c4
-rw-r--r--target/arm/cpu.h141
-rw-r--r--target/arm/cpu64.c11
-rw-r--r--target/arm/helper.c222
-rw-r--r--target/arm/internals.h3
-rw-r--r--target/arm/kvm64.c4
-rw-r--r--target/arm/op_helper.c14
-rw-r--r--target/arm/translate-a64.c12
8 files changed, 309 insertions, 102 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 60411f6bfe..0b185f8d30 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1932,6 +1932,10 @@ static void arm_max_initfn(Object *obj)
t = cpu->isar.id_isar6;
t = FIELD_DP32(t, ID_ISAR6, DP, 1);
cpu->isar.id_isar6 = t;
+
+ t = cpu->id_mmfr4;
+ t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
+ cpu->id_mmfr4 = t;
}
#endif
}
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 2a73fed9a0..c943f35dd9 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -818,6 +818,8 @@ struct ARMCPU {
uint64_t id_aa64isar1;
uint64_t id_aa64pfr0;
uint64_t id_aa64pfr1;
+ uint64_t id_aa64mmfr0;
+ uint64_t id_aa64mmfr1;
} isar;
uint32_t midr;
uint32_t revidr;
@@ -839,8 +841,6 @@ struct ARMCPU {
uint64_t id_aa64dfr1;
uint64_t id_aa64afr0;
uint64_t id_aa64afr1;
- uint64_t id_aa64mmfr0;
- uint64_t id_aa64mmfr1;
uint32_t dbgdidr;
uint32_t clidr;
uint64_t mp_affinity; /* MP ID without feature bits */
@@ -1249,7 +1249,7 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
#define HCR_TIDCP (1ULL << 20)
#define HCR_TACR (1ULL << 21)
#define HCR_TSW (1ULL << 22)
-#define HCR_TPC (1ULL << 23)
+#define HCR_TPCP (1ULL << 23)
#define HCR_TPU (1ULL << 24)
#define HCR_TTLB (1ULL << 25)
#define HCR_TVM (1ULL << 26)
@@ -1261,6 +1261,26 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
#define HCR_CD (1ULL << 32)
#define HCR_ID (1ULL << 33)
#define HCR_E2H (1ULL << 34)
+#define HCR_TLOR (1ULL << 35)
+#define HCR_TERR (1ULL << 36)
+#define HCR_TEA (1ULL << 37)
+#define HCR_MIOCNCE (1ULL << 38)
+#define HCR_APK (1ULL << 40)
+#define HCR_API (1ULL << 41)
+#define HCR_NV (1ULL << 42)
+#define HCR_NV1 (1ULL << 43)
+#define HCR_AT (1ULL << 44)
+#define HCR_NV2 (1ULL << 45)
+#define HCR_FWB (1ULL << 46)
+#define HCR_FIEN (1ULL << 47)
+#define HCR_TID4 (1ULL << 49)
+#define HCR_TICAB (1ULL << 50)
+#define HCR_TOCU (1ULL << 52)
+#define HCR_TTLBIS (1ULL << 54)
+#define HCR_TTLBOS (1ULL << 55)
+#define HCR_ATA (1ULL << 56)
+#define HCR_DCT (1ULL << 57)
+
/*
* When we actually implement ARMv8.1-VHE we should add HCR_E2H to
* HCR_MASK and then clear it again if the feature bit is not set in
@@ -1282,8 +1302,16 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
#define SCR_ST (1U << 11)
#define SCR_TWI (1U << 12)
#define SCR_TWE (1U << 13)
-#define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST))
-#define SCR_AARCH64_MASK (0x3fff & ~SCR_NET)
+#define SCR_TLOR (1U << 14)
+#define SCR_TERR (1U << 15)
+#define SCR_APK (1U << 16)
+#define SCR_API (1U << 17)
+#define SCR_EEL2 (1U << 18)
+#define SCR_EASE (1U << 19)
+#define SCR_NMEA (1U << 20)
+#define SCR_FIEN (1U << 21)
+#define SCR_ENSCXT (1U << 25)
+#define SCR_ATA (1U << 26)
/* Return the current FPSCR value. */
uint32_t vfp_get_fpscr(CPUARMState *env);
@@ -1520,6 +1548,15 @@ FIELD(ID_ISAR6, FHM, 8, 4)
FIELD(ID_ISAR6, SB, 12, 4)
FIELD(ID_ISAR6, SPECRES, 16, 4)
+FIELD(ID_MMFR4, SPECSEI, 0, 4)
+FIELD(ID_MMFR4, AC2, 4, 4)
+FIELD(ID_MMFR4, XNX, 8, 4)
+FIELD(ID_MMFR4, CNP, 12, 4)
+FIELD(ID_MMFR4, HPDS, 16, 4)
+FIELD(ID_MMFR4, LSM, 20, 4)
+FIELD(ID_MMFR4, CCIDX, 24, 4)
+FIELD(ID_MMFR4, EVT, 28, 4)
+
FIELD(ID_AA64ISAR0, AES, 4, 4)
FIELD(ID_AA64ISAR0, SHA1, 8, 4)
FIELD(ID_AA64ISAR0, SHA2, 12, 4)
@@ -1557,6 +1594,28 @@ FIELD(ID_AA64PFR0, GIC, 24, 4)
FIELD(ID_AA64PFR0, RAS, 28, 4)
FIELD(ID_AA64PFR0, SVE, 32, 4)
+FIELD(ID_AA64MMFR0, PARANGE, 0, 4)
+FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4)
+FIELD(ID_AA64MMFR0, BIGEND, 8, 4)
+FIELD(ID_AA64MMFR0, SNSMEM, 12, 4)
+FIELD(ID_AA64MMFR0, BIGENDEL0, 16, 4)
+FIELD(ID_AA64MMFR0, TGRAN16, 20, 4)
+FIELD(ID_AA64MMFR0, TGRAN64, 24, 4)
+FIELD(ID_AA64MMFR0, TGRAN4, 28, 4)
+FIELD(ID_AA64MMFR0, TGRAN16_2, 32, 4)
+FIELD(ID_AA64MMFR0, TGRAN64_2, 36, 4)
+FIELD(ID_AA64MMFR0, TGRAN4_2, 40, 4)
+FIELD(ID_AA64MMFR0, EXS, 44, 4)
+
+FIELD(ID_AA64MMFR1, HAFDBS, 0, 4)
+FIELD(ID_AA64MMFR1, VMIDBITS, 4, 4)
+FIELD(ID_AA64MMFR1, VH, 8, 4)
+FIELD(ID_AA64MMFR1, HPDS, 12, 4)
+FIELD(ID_AA64MMFR1, LO, 16, 4)
+FIELD(ID_AA64MMFR1, PAN, 20, 4)
+FIELD(ID_AA64MMFR1, SPECSEI, 24, 4)
+FIELD(ID_AA64MMFR1, XNX, 28, 4)
+
QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
/* If adding a feature bit which corresponds to a Linux ELF
@@ -1670,6 +1729,14 @@ static inline bool arm_is_secure(CPUARMState *env)
}
#endif
+/**
+ * arm_hcr_el2_eff(): Return the effective value of HCR_EL2.
+ * E.g. when in secure state, fields in HCR_EL2 are suppressed,
+ * "for all purposes other than a direct read or write access of HCR_EL2."
+ * Not included here is HCR_RW.
+ */
+uint64_t arm_hcr_el2_eff(CPUARMState *env);
+
/* Return true if the specified exception level is running in AArch64 state. */
static inline bool arm_el_is_aa64(CPUARMState *env, int el)
{
@@ -2355,54 +2422,6 @@ bool write_cpustate_to_list(ARMCPU *cpu);
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
-/**
- * arm_hcr_el2_imo(): Return the effective value of HCR_EL2.IMO.
- * Depending on the values of HCR_EL2.E2H and TGE, this may be
- * "behaves as 1 for all purposes other than direct read/write" or
- * "behaves as 0 for all purposes other than direct read/write"
- */
-static inline bool arm_hcr_el2_imo(CPUARMState *env)
-{
- switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
- case HCR_TGE:
- return true;
- case HCR_TGE | HCR_E2H:
- return false;
- default:
- return env->cp15.hcr_el2 & HCR_IMO;
- }
-}
-
-/**
- * arm_hcr_el2_fmo(): Return the effective value of HCR_EL2.FMO.
- */
-static inline bool arm_hcr_el2_fmo(CPUARMState *env)
-{
- switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
- case HCR_TGE:
- return true;
- case HCR_TGE | HCR_E2H:
- return false;
- default:
- return env->cp15.hcr_el2 & HCR_FMO;
- }
-}
-
-/**
- * arm_hcr_el2_amo(): Return the effective value of HCR_EL2.AMO.
- */
-static inline bool arm_hcr_el2_amo(CPUARMState *env)
-{
- switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
- case HCR_TGE:
- return true;
- case HCR_TGE | HCR_E2H:
- return false;
- default:
- return env->cp15.hcr_el2 & HCR_AMO;
- }
-}
-
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
unsigned int target_el)
{
@@ -2411,6 +2430,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
bool secure = arm_is_secure(env);
bool pstate_unmasked;
int8_t unmasked = 0;
+ uint64_t hcr_el2;
/* Don't take exceptions if they target a lower EL.
* This check should catch any exceptions that would not be taken but left
@@ -2420,6 +2440,8 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
return false;
}
+ hcr_el2 = arm_hcr_el2_eff(env);
+
switch (excp_idx) {
case EXCP_FIQ:
pstate_unmasked = !(env->daif & PSTATE_F);
@@ -2430,13 +2452,13 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
break;
case EXCP_VFIQ:
- if (secure || !arm_hcr_el2_fmo(env) || (env->cp15.hcr_el2 & HCR_TGE)) {
+ if (secure || !(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
/* VFIQs are only taken when hypervized and non-secure. */
return false;
}
return !(env->daif & PSTATE_F);
case EXCP_VIRQ:
- if (secure || !arm_hcr_el2_imo(env) || (env->cp15.hcr_el2 & HCR_TGE)) {
+ if (secure || !(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
/* VIRQs are only taken when hypervized and non-secure. */
return false;
}
@@ -2475,7 +2497,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* to the CPSR.F setting otherwise we further assess the state
* below.
*/
- hcr = arm_hcr_el2_fmo(env);
+ hcr = hcr_el2 & HCR_FMO;
scr = (env->cp15.scr_el3 & SCR_FIQ);
/* When EL3 is 32-bit, the SCR.FW bit controls whether the
@@ -2492,7 +2514,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* when setting the target EL, so it does not have a further
* affect here.
*/
- hcr = arm_hcr_el2_imo(env);
+ hcr = hcr_el2 & HCR_IMO;
scr = false;
break;
default:
@@ -3318,6 +3340,11 @@ static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
}
+static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
+}
+
/*
* Forward to the above feature tests given an ARMCPU pointer.
*/
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 873f059bf2..1d57be0c91 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -141,7 +141,7 @@ static void aarch64_a57_initfn(Object *obj)
cpu->pmceid0 = 0x00000000;
cpu->pmceid1 = 0x00000000;
cpu->isar.id_aa64isar0 = 0x00011120;
- cpu->id_aa64mmfr0 = 0x00001124;
+ cpu->isar.id_aa64mmfr0 = 0x00001124;
cpu->dbgdidr = 0x3516d000;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
@@ -195,7 +195,7 @@ static void aarch64_a53_initfn(Object *obj)
cpu->isar.id_aa64pfr0 = 0x00002222;
cpu->id_aa64dfr0 = 0x10305106;
cpu->isar.id_aa64isar0 = 0x00011120;
- cpu->id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
+ cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
cpu->dbgdidr = 0x3516d000;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
@@ -249,7 +249,7 @@ static void aarch64_a72_initfn(Object *obj)
cpu->pmceid0 = 0x00000000;
cpu->pmceid1 = 0x00000000;
cpu->isar.id_aa64isar0 = 0x00011120;
- cpu->id_aa64mmfr0 = 0x00001124;
+ cpu->isar.id_aa64mmfr0 = 0x00001124;
cpu->dbgdidr = 0x3516d000;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
@@ -324,6 +324,11 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
cpu->isar.id_aa64pfr0 = t;
+ t = cpu->isar.id_aa64mmfr1;
+ t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
+ t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
+ cpu->isar.id_aa64mmfr1 = t;
+
/* Replicate the same data to the 32-bit id registers. */
u = cpu->isar.id_isar5;
u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 0da1424f72..644599b29d 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -448,7 +448,7 @@ static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
int el = arm_current_el(env);
bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
(env->cp15.mdcr_el2 & MDCR_TDE) ||
- (env->cp15.hcr_el2 & HCR_TGE);
+ (arm_hcr_el2_eff(env) & HCR_TGE);
if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
return CP_ACCESS_TRAP_EL2;
@@ -468,7 +468,7 @@ static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
int el = arm_current_el(env);
bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
(env->cp15.mdcr_el2 & MDCR_TDE) ||
- (env->cp15.hcr_el2 & HCR_TGE);
+ (arm_hcr_el2_eff(env) & HCR_TGE);
if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
return CP_ACCESS_TRAP_EL2;
@@ -488,7 +488,7 @@ static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
int el = arm_current_el(env);
bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
(env->cp15.mdcr_el2 & MDCR_TDE) ||
- (env->cp15.hcr_el2 & HCR_TGE);
+ (arm_hcr_el2_eff(env) & HCR_TGE);
if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
return CP_ACCESS_TRAP_EL2;
@@ -1279,11 +1279,16 @@ static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
- /* We only mask off bits that are RES0 both for AArch64 and AArch32.
- * For bits that vary between AArch32/64, code needs to check the
- * current execution mode before directly using the feature bit.
- */
- uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
+ /* Begin with base v8.0 state. */
+ uint32_t valid_mask = 0x3fff;
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ if (arm_el_is_aa64(env, 3)) {
+ value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
+ valid_mask &= ~SCR_NET;
+ } else {
+ valid_mask &= ~(SCR_RW | SCR_ST);
+ }
if (!arm_feature(env, ARM_FEATURE_EL2)) {
valid_mask &= ~SCR_HCE;
@@ -1299,6 +1304,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
valid_mask &= ~SCR_SMD;
}
}
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ valid_mask |= SCR_TLOR;
+ }
/* Clear all-context RES0 bits. */
value &= valid_mask;
@@ -1327,9 +1335,10 @@ static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
CPUState *cs = ENV_GET_CPU(env);
+ uint64_t hcr_el2 = arm_hcr_el2_eff(env);
uint64_t ret = 0;
- if (arm_hcr_el2_imo(env)) {
+ if (hcr_el2 & HCR_IMO) {
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
ret |= CPSR_I;
}
@@ -1339,7 +1348,7 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
}
}
- if (arm_hcr_el2_fmo(env)) {
+ if (hcr_el2 & HCR_FMO) {
if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
ret |= CPSR_F;
}
@@ -2724,6 +2733,7 @@ static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
+ TCR *tcr = raw_ptr(env, ri);
if (arm_feature(env, ARM_FEATURE_LPAE)) {
/* With LPAE the TTBCR could result in a change of ASID
@@ -2731,6 +2741,8 @@ static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
*/
tlb_flush(CPU(cpu));
}
+ /* Preserve the high half of TCR_EL1, set via TTBCR2. */
+ value = deposit64(tcr->raw_tcr, 0, 32, value);
vmsa_ttbcr_raw_write(env, ri, value);
}
@@ -2833,6 +2845,16 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
REGINFO_SENTINEL
};
+/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
+ * qemu tlbs nor adjusting cached masks.
+ */
+static const ARMCPRegInfo ttbcr2_reginfo = {
+ .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
+ .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
+ offsetofhigh32(CPUARMState, cp15.tcr_el[1]) },
+};
+
static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -3945,6 +3967,9 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
*/
valid_mask &= ~HCR_TSC;
}
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ valid_mask |= HCR_TLOR;
+ }
/* Clear RES0 bits. */
value &= valid_mask;
@@ -3991,6 +4016,51 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
hcr_write(env, NULL, value);
}
+/*
+ * Return the effective value of HCR_EL2.
+ * Bits that are not included here:
+ * RW (read from SCR_EL3.RW as needed)
+ */
+uint64_t arm_hcr_el2_eff(CPUARMState *env)
+{
+ uint64_t ret = env->cp15.hcr_el2;
+
+ if (arm_is_secure_below_el3(env)) {
+ /*
+ * "This register has no effect if EL2 is not enabled in the
+ * current Security state". This is ARMv8.4-SecEL2 speak for
+ * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
+ *
+ * Prior to that, the language was "In an implementation that
+ * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
+ * as if this field is 0 for all purposes other than a direct
+ * read or write access of HCR_EL2". With lots of enumeration
+ * on a per-field basis. In current QEMU, this is condition
+ * is arm_is_secure_below_el3.
+ *
+ * Since the v8.4 language applies to the entire register, and
+ * appears to be backward compatible, use that.
+ */
+ ret = 0;
+ } else if (ret & HCR_TGE) {
+ /* These bits are up-to-date as of ARMv8.4. */
+ if (ret & HCR_E2H) {
+ ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
+ HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
+ HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
+ HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE);
+ } else {
+ ret |= HCR_FMO | HCR_IMO | HCR_AMO;
+ }
+ ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
+ HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
+ HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
+ HCR_TLOR);
+ }
+
+ return ret;
+}
+
static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_IO,
@@ -4503,8 +4573,7 @@ int sve_exception_el(CPUARMState *env, int el)
if (disabled) {
/* route_to_el2 */
return (arm_feature(env, ARM_FEATURE_EL2)
- && !arm_is_secure(env)
- && (env->cp15.hcr_el2 & HCR_TGE) ? 2 : 1);
+ && (arm_hcr_el2_eff(env) & HCR_TGE) ? 2 : 1);
}
/* Check CPACR.FPEN. */
@@ -4956,6 +5025,42 @@ static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
return pfr0;
}
+/* Shared logic between LORID and the rest of the LOR* registers.
+ * Secure state has already been delt with.
+ */
+static CPAccessResult access_lor_ns(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_is_secure_below_el3(env)) {
+ /* Access ok in secure mode. */
+ return CP_ACCESS_OK;
+ }
+ return access_lor_ns(env);
+}
+
+static CPAccessResult access_lor_other(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ if (arm_is_secure_below_el3(env)) {
+ /* Access denied in secure mode. */
+ return CP_ACCESS_TRAP;
+ }
+ return access_lor_ns(env);
+}
+
void register_cp_regs_for_features(ARMCPU *cpu)
{
/* Register all the coprocessor registers based on feature bits */
@@ -5207,11 +5312,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_aa64mmfr0 },
+ .resetvalue = cpu->isar.id_aa64mmfr0 },
{ .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_aa64mmfr1 },
+ .resetvalue = cpu->isar.id_aa64mmfr1 },
{ .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -5433,6 +5538,10 @@ void register_cp_regs_for_features(ARMCPU *cpu)
} else {
define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
define_arm_cp_regs(cpu, vmsa_cp_reginfo);
+ /* TTCBR2 is introduced with ARMv8.2-A32HPD. */
+ if (FIELD_EX32(cpu->id_mmfr4, ID_MMFR4, HPDS) != 0) {
+ define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
+ }
}
if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
define_arm_cp_regs(cpu, t2ee_cp_reginfo);
@@ -5693,6 +5802,38 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_one_arm_cp_reg(cpu, &sctlr);
}
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ /*
+ * A trivial implementation of ARMv8.1-LOR leaves all of these
+ * registers fixed at 0, which indicates that there are zero
+ * supported Limited Ordering regions.
+ */
+ static const ARMCPRegInfo lor_reginfo[] = {
+ { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
+ .access = PL1_R, .accessfn = access_lorid,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, lor_reginfo);
+ }
+
if (cpu_isar_feature(aa64_sve, cpu)) {
define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
if (arm_feature(env, ARM_FEATURE_EL2)) {
@@ -6149,9 +6290,8 @@ static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
* and CPS are treated as illegal mode changes.
*/
if (write_type == CPSRWriteByInstr &&
- (env->cp15.hcr_el2 & HCR_TGE) &&
(env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
- !arm_is_secure_below_el3(env)) {
+ (arm_hcr_el2_eff(env) & HCR_TGE)) {
return 1;
}
return 0;
@@ -6505,12 +6645,13 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
uint32_t cur_el, bool secure)
{
CPUARMState *env = cs->env_ptr;
- int rw;
- int scr;
- int hcr;
+ bool rw;
+ bool scr;
+ bool hcr;
int target_el;
/* Is the highest EL AArch64? */
- int is64 = arm_feature(env, ARM_FEATURE_AARCH64);
+ bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
+ uint64_t hcr_el2;
if (arm_feature(env, ARM_FEATURE_EL3)) {
rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
@@ -6522,24 +6663,22 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
rw = is64;
}
+ hcr_el2 = arm_hcr_el2_eff(env);
switch (excp_idx) {
case EXCP_IRQ:
scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
- hcr = arm_hcr_el2_imo(env);
+ hcr = hcr_el2 & HCR_IMO;
break;
case EXCP_FIQ:
scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
- hcr = arm_hcr_el2_fmo(env);
+ hcr = hcr_el2 & HCR_FMO;
break;
default:
scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
- hcr = arm_hcr_el2_amo(env);
+ hcr = hcr_el2 & HCR_AMO;
break;
};
- /* If HCR.TGE is set then HCR is treated as being 1 */
- hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE);
-
/* Perform a table-lookup for the target EL given the current state */
target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
@@ -9635,6 +9774,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
bool ttbr1_valid = true;
uint64_t descaddrmask;
bool aarch64 = arm_el_is_aa64(env, el);
+ bool hpd = false;
/* TODO:
* This code does not handle the different format TCR for VTCR_EL2.
@@ -9749,6 +9889,15 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
if (tg == 2) { /* 16KB pages */
stride = 11;
}
+ if (aarch64 && el > 1) {
+ hpd = extract64(tcr->raw_tcr, 24, 1);
+ } else {
+ hpd = extract64(tcr->raw_tcr, 41, 1);
+ }
+ if (!aarch64) {
+ /* For aarch32, hpd0 is not enabled without t2e as well. */
+ hpd &= extract64(tcr->raw_tcr, 6, 1);
+ }
} else {
/* We should only be here if TTBR1 is valid */
assert(ttbr1_valid);
@@ -9764,6 +9913,11 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
if (tg == 1) { /* 16KB pages */
stride = 11;
}
+ hpd = extract64(tcr->raw_tcr, 42, 1);
+ if (!aarch64) {
+ /* For aarch32, hpd1 is not enabled without t2e as well. */
+ hpd &= extract64(tcr->raw_tcr, 6, 1);
+ }
}
/* Here we should have set up all the parameters for the translation:
@@ -9857,7 +10011,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
descaddr = descriptor & descaddrmask;
if ((descriptor & 2) && (level < 3)) {
- /* Table entry. The top five bits are attributes which may
+ /* Table entry. The top five bits are attributes which may
* propagate down through lower levels of the table (and
* which are all arranged so that 0 means "no effect", so
* we can gather them up by ORing in the bits at each level).
@@ -9882,15 +10036,17 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
break;
}
/* Merge in attributes from table descriptors */
- attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
- attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
+ attrs |= nstable << 3; /* NS */
+ if (hpd) {
+ /* HPD disables all the table attributes except NSTable. */
+ break;
+ }
+ attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
/* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
* means "force PL1 access only", which means forcing AP[1] to 0.
*/
- if (extract32(tableattrs, 2, 1)) {
- attrs &= ~(1 << 4);
- }
- attrs |= nstable << 3; /* NS */
+ attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */
+ attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */
break;
}
/* Here descaddr is the final physical address, and attributes
diff --git a/target/arm/internals.h b/target/arm/internals.h
index d208b70a64..78e026d6e9 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -229,7 +229,8 @@ static inline unsigned int arm_pamax(ARMCPU *cpu)
[4] = 44,
[5] = 48,
};
- unsigned int parange = extract32(cpu->id_aa64mmfr0, 0, 4);
+ unsigned int parange =
+ FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
/* id_aa64mmfr0 is a read-only register so values outside of the
* supported mappings can be considered an implementation error. */
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index 0a502091e7..089af9c5f0 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -538,6 +538,10 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
ARM64_SYS_REG(3, 0, 0, 6, 0));
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
ARM64_SYS_REG(3, 0, 0, 6, 1));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
+ ARM64_SYS_REG(3, 0, 0, 7, 0));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
+ ARM64_SYS_REG(3, 0, 0, 7, 1));
/*
* Note that if AArch32 support is not present in the host,
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 0d6e89e474..ef72361a36 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -33,8 +33,7 @@ void raise_exception(CPUARMState *env, uint32_t excp,
{
CPUState *cs = CPU(arm_env_get_cpu(env));
- if ((env->cp15.hcr_el2 & HCR_TGE) &&
- target_el == 1 && !arm_is_secure(env)) {
+ if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
/*
* Redirect NS EL1 exceptions to NS EL2. These are reported with
* their original syndrome register value, with the exception of
@@ -428,9 +427,9 @@ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
* No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
* bits will be zero indicating no trap.
*/
- if (cur_el < 2 && !arm_is_secure(env)) {
- mask = (is_wfe) ? HCR_TWE : HCR_TWI;
- if (env->cp15.hcr_el2 & mask) {
+ if (cur_el < 2) {
+ mask = is_wfe ? HCR_TWE : HCR_TWI;
+ if (arm_hcr_el2_eff(env) & mask) {
return 2;
}
}
@@ -995,7 +994,7 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
exception_target_el(env));
}
- if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
+ if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
/* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
* We also want an EL2 guest to be able to forbid its EL1 from
* making PSCI calls into QEMU's "firmware" via HCR.TSC.
@@ -1098,8 +1097,7 @@ void HELPER(exception_return)(CPUARMState *env)
goto illegal_return;
}
- if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
- && !arm_is_secure_below_el3(env)) {
+ if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
goto illegal_return;
}
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index fd36425f1a..e1da1e4d6f 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -2290,6 +2290,12 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
}
return;
+ case 0x8: /* STLLR */
+ if (!dc_isar_feature(aa64_lor, s)) {
+ break;
+ }
+ /* StoreLORelease is the same as Store-Release for QEMU. */
+ /* fall through */
case 0x9: /* STLR */
/* Generate ISS for non-exclusive accesses including LASR. */
if (rn == 31) {
@@ -2301,6 +2307,12 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
return;
+ case 0xc: /* LDLAR */
+ if (!dc_isar_feature(aa64_lor, s)) {
+ break;
+ }
+ /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
+ /* fall through */
case 0xd: /* LDAR */
/* Generate ISS for non-exclusive accesses including LASR. */
if (rn == 31) {